Merge branch 'master_14.1.1' into solutions
Conflicts: cave/com.raytheon.uf.viz.xy.crosssection/src/com/raytheon/uf/viz/xy/crosssection/rsc/CrossSectionVectorResource.java cave/com.raytheon.uf.viz.xy.timeseries/META-INF/MANIFEST.MF cave/com.raytheon.uf.viz.xy.varheight/META-INF/MANIFEST.MF cave/com.raytheon.uf.viz.xy.varheight/src/com/raytheon/uf/viz/xy/varheight/rsc/VarHeightResource.java cave/com.raytheon.viz.gfe/src/com/raytheon/viz/gfe/core/DataManager.java cave/com.raytheon.viz.gfe/src/com/raytheon/viz/gfe/rsc/GFEResource.java cave/com.raytheon.viz.grid/src/com/raytheon/viz/grid/rsc/general/GeneralGridData.java edexOsgi/com.raytheon.edex.plugin.gfe/src/com/raytheon/edex/plugin/gfe/server/GridParmManager.java edexOsgi/com.raytheon.edex.plugin.gfe/src/com/raytheon/edex/plugin/gfe/server/database/IFPGridDatabase.java edexOsgi/com.raytheon.uf.edex.archive/utility/common_static/base/archiver/purger/PROCESSED_DATA.xml edexOsgi/com.raytheon.uf.edex.database/src/com/raytheon/uf/edex/database/dao/CoreDao.java Change-Id: I8c5fabf90d864db89a0db7d5f3f6f54c1e5f44a5 Former-commit-id: 9f121ec921a2f8635c48f005ddb71787ad6bf166
This commit is contained in:
commit
62c445d872
86 changed files with 5309 additions and 4232 deletions
Binary file not shown.
|
@ -120,7 +120,7 @@ function copyVizShutdownUtilIfNecessary()
|
|||
function getPidsOfMyRunningCaves()
|
||||
{
|
||||
local user=`whoami`
|
||||
local caveProcs=`ps -ef | grep "/awips2/cave/cave " | grep -v "grep" | grep $user`
|
||||
local caveProcs=`ps -ef | grep -E "(/awips2/cave|/usr/local/viz)/cave " | grep -v "grep" | grep $user`
|
||||
|
||||
# preserve IFS and set it to line feed only
|
||||
local PREV_IFS=$IFS
|
||||
|
|
|
@ -4,6 +4,7 @@ import java.io.File;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Calendar;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
|
@ -48,6 +49,8 @@ import com.raytheon.uf.common.time.util.TimeUtil;
|
|||
* Jul 24, 2013 #2220 rferrel Change to get all data sizes only one time.
|
||||
* Aug 02, 2013 #2224 rferrel Changes for new configuration files.
|
||||
* Aug 06, 2013 #2222 rferrel Changes to display all selected data.
|
||||
* Dec 11, 2013 #2603 rferrel Selected list changed to a Set.
|
||||
* Dec 11, 2013 #2624 rferrel Clear display variables when recomputing sizes.
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -231,6 +234,8 @@ public class SizeJob extends Job {
|
|||
*/
|
||||
public void recomputeSize() {
|
||||
clearQueue();
|
||||
displayArchive = null;
|
||||
displayCategory = null;
|
||||
for (ArchiveInfo archiveInfo : archiveInfoMap.values()) {
|
||||
for (String categoryName : archiveInfo.getCategoryNames()) {
|
||||
CategoryInfo categoryInfo = archiveInfo.get(categoryName);
|
||||
|
@ -300,19 +305,19 @@ public class SizeJob extends Job {
|
|||
for (String archiveName : getArchiveNames()) {
|
||||
ArchiveInfo archiveInfo = get(archiveName);
|
||||
for (String categoryName : archiveInfo.getCategoryNames()) {
|
||||
List<String> selectionsList = selections.getSelectedList(
|
||||
Set<String> selectionsSet = selections.getSelectedSet(
|
||||
archiveName, categoryName);
|
||||
MissingData missingData = removeMissingData(archiveName,
|
||||
categoryName);
|
||||
if (missingData != null) {
|
||||
missingData.setSelectedList(selectionsList);
|
||||
missingData.setSelectedSet(selectionsSet);
|
||||
addMissingData(missingData);
|
||||
} else {
|
||||
CategoryInfo categoryInfo = archiveInfo.get(categoryName);
|
||||
for (DisplayData displayData : categoryInfo
|
||||
.getDisplayDataList()) {
|
||||
String displayLabel = displayData.getDisplayLabel();
|
||||
boolean selected = selectionsList
|
||||
boolean selected = selectionsSet
|
||||
.contains(displayLabel);
|
||||
if (selected != displayData.isSelected()) {
|
||||
setSelect(displayData, selected);
|
||||
|
@ -506,10 +511,10 @@ public class SizeJob extends Job {
|
|||
|
||||
visibleList = manager.getDisplayData(displayArchive, displayCategory,
|
||||
false);
|
||||
List<String> selectedList = selections.getSelectedList(displayArchive,
|
||||
Set<String> selectedSet = selections.getSelectedSet(displayArchive,
|
||||
displayCategory);
|
||||
for (DisplayData displayData : visibleList) {
|
||||
displayData.setSelected(selectedList.contains(displayData
|
||||
displayData.setSelected(selectedSet.contains(displayData
|
||||
.getDisplayLabel()));
|
||||
}
|
||||
|
||||
|
@ -528,10 +533,10 @@ public class SizeJob extends Job {
|
|||
schedule();
|
||||
}
|
||||
} else {
|
||||
selectedList = selections.getSelectedList(archiveName,
|
||||
selectedSet = selections.getSelectedSet(archiveName,
|
||||
categoryName);
|
||||
MissingData missingData = new MissingData(archiveName,
|
||||
categoryName, selectedList);
|
||||
categoryName, selectedSet);
|
||||
missingDataQueue.add(missingData);
|
||||
}
|
||||
}
|
||||
|
@ -658,14 +663,11 @@ public class SizeJob extends Job {
|
|||
break mainLoop;
|
||||
}
|
||||
|
||||
// System.out.println("+++SizeJob: " + currentDisplayData);
|
||||
|
||||
List<File> files = manager.getDisplayFiles(currentDisplayData,
|
||||
startCal, endCal);
|
||||
|
||||
// Size no longer needed.
|
||||
if (currentDisplayData != sizeQueue.peek()) {
|
||||
// System.out.println("---SizeJob: " + currentDisplayData);
|
||||
continue mainLoop;
|
||||
}
|
||||
|
||||
|
@ -682,7 +684,6 @@ public class SizeJob extends Job {
|
|||
|
||||
// Skip when size no longer needed.
|
||||
if (stopComputeSize) {
|
||||
// System.out.println("---SizeJob: " + currentDisplayData);
|
||||
continue mainLoop;
|
||||
}
|
||||
}
|
||||
|
@ -692,7 +693,6 @@ public class SizeJob extends Job {
|
|||
displayQueue.add(currentDisplayData);
|
||||
}
|
||||
|
||||
// System.out.println("xxxSizeJob: OK_STATUS");
|
||||
shutdownDisplayTimer.set(true);
|
||||
return Status.OK_STATUS;
|
||||
}
|
||||
|
@ -748,15 +748,10 @@ public class SizeJob extends Job {
|
|||
displayQueue.size());
|
||||
displayQueue.drainTo(list);
|
||||
|
||||
// for (DisplayData displayData : list) {
|
||||
// System.out.println("== " + displayData);
|
||||
// }
|
||||
//
|
||||
for (IUpdateListener listener : listeners) {
|
||||
listener.update(list);
|
||||
}
|
||||
} else if (shutdownDisplayTimer.get()) {
|
||||
// System.out.println("xxx updateDisplayTimer canceled");
|
||||
displayTimer.cancel();
|
||||
displayTimer = null;
|
||||
}
|
||||
|
@ -773,7 +768,6 @@ public class SizeJob extends Job {
|
|||
*/
|
||||
@Override
|
||||
protected void canceling() {
|
||||
// System.err.println("canceling SizeJob");
|
||||
clearQueue();
|
||||
missingDataQueue.clear();
|
||||
missingDataJob.cancel();
|
||||
|
@ -789,28 +783,28 @@ public class SizeJob extends Job {
|
|||
|
||||
protected final String category;
|
||||
|
||||
protected final List<String> selectedList;
|
||||
protected final Set<String> selectedSet;
|
||||
|
||||
protected boolean visiable = false;
|
||||
|
||||
public MissingData(String archive, String category,
|
||||
List<String> selectedList) {
|
||||
Set<String> selectedSet) {
|
||||
this.archive = archive;
|
||||
this.category = category;
|
||||
this.selectedList = new ArrayList<String>(selectedList);
|
||||
this.selectedSet = new HashSet<String>(selectedSet);
|
||||
}
|
||||
|
||||
public boolean isSelected() {
|
||||
return !selectedList.isEmpty();
|
||||
return !selectedSet.isEmpty();
|
||||
}
|
||||
|
||||
public void setVisiable(boolean state) {
|
||||
this.visiable = state;
|
||||
}
|
||||
|
||||
public void setSelectedList(List<String> selectedList) {
|
||||
this.selectedList.clear();
|
||||
this.selectedList.addAll(selectedList);
|
||||
public void setSelectedSet(Set<String> selectedSet) {
|
||||
this.selectedSet.clear();
|
||||
this.selectedSet.addAll(selectedSet);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -861,8 +855,7 @@ public class SizeJob extends Job {
|
|||
|
||||
String archiveName = currentMissingData.archive;
|
||||
String categoryName = currentMissingData.category;
|
||||
// System.out.println("== missingData: " + currentMissingData);
|
||||
List<String> selectedList = currentMissingData.selectedList;
|
||||
Set<String> selectedSet = currentMissingData.selectedSet;
|
||||
List<DisplayData> displayDatas = manager.getDisplayData(
|
||||
archiveName, categoryName, false);
|
||||
if (shutdown.get()) {
|
||||
|
@ -870,7 +863,7 @@ public class SizeJob extends Job {
|
|||
}
|
||||
|
||||
for (DisplayData displayData : displayDatas) {
|
||||
displayData.setSelected(selectedList.contains(displayData
|
||||
displayData.setSelected(selectedSet.contains(displayData
|
||||
.getDisplayLabel()));
|
||||
sizeQueue.add(displayData);
|
||||
}
|
||||
|
@ -883,13 +876,11 @@ public class SizeJob extends Job {
|
|||
}
|
||||
}
|
||||
|
||||
// System.out.println("xxx missingData");
|
||||
return Status.OK_STATUS;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void canceling() {
|
||||
// System.err.println("canceling MissingDataJob");
|
||||
shutdown.set(true);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -76,6 +76,7 @@ import com.raytheon.viz.ui.dialogs.CaveSWTDialog;
|
|||
* Aug 01, 2013 2221 rferrel Changes for select configuration.
|
||||
* Aug 06, 2013 2222 rferrel Changes to display all selected data.
|
||||
* Nov 14, 2013 2549 rferrel Get category data moved off the UI thread.
|
||||
* Dec 11, 2013 2624 rferrel No longer clear table prior to populating.
|
||||
* </pre>
|
||||
*
|
||||
* @author bgonzale
|
||||
|
@ -131,6 +132,10 @@ public abstract class AbstractArchiveDlg extends CaveSWTDialog implements
|
|||
/** Which table is being displayed. */
|
||||
private boolean showingSelected = true;
|
||||
|
||||
private String previousSelectedArchive = null;
|
||||
|
||||
private String previousSelectedCategory = null;
|
||||
|
||||
/**
|
||||
* @param parentShell
|
||||
*/
|
||||
|
@ -386,7 +391,11 @@ public abstract class AbstractArchiveDlg extends CaveSWTDialog implements
|
|||
* Method invoked when archive combo selection is changed.
|
||||
*/
|
||||
protected void archiveComboSelection() {
|
||||
populateCategoryCbo();
|
||||
String selectedArchvieName = getSelectedArchiveName();
|
||||
if (!selectedArchvieName.equals(previousSelectedArchive)) {
|
||||
previousSelectedArchive = selectedArchvieName;
|
||||
populateCategoryCbo();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -412,7 +421,14 @@ public abstract class AbstractArchiveDlg extends CaveSWTDialog implements
|
|||
* Method invoked when the category combo selection is changed.
|
||||
*/
|
||||
protected void categoryComboSelection() {
|
||||
populateTableComp();
|
||||
String archiveName = getSelectedArchiveName();
|
||||
String categoryName = getSelectedCategoryName();
|
||||
if (!archiveName.equals(previousSelectedArchive)
|
||||
|| !categoryName.equals(previousSelectedCategory)) {
|
||||
previousSelectedArchive = archiveName;
|
||||
previousSelectedCategory = categoryName;
|
||||
populateTableComp();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -463,9 +479,6 @@ public abstract class AbstractArchiveDlg extends CaveSWTDialog implements
|
|||
setCursorBusy(true);
|
||||
|
||||
setShowingSelected(false);
|
||||
tableComp.populateTable(archiveName, categoryName,
|
||||
new ArrayList<DisplayData>(0));
|
||||
tableComp.refresh();
|
||||
|
||||
Job job = new Job("populate category table") {
|
||||
|
||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -71,6 +71,7 @@ import com.vividsolutions.jts.geom.Coordinate;
|
|||
* ------------ ---------- ----------- --------------------------
|
||||
* Nov 29, 2007 njensen Initial creation
|
||||
* 02/17/09 njensen Refactored to new rsc architecture
|
||||
* Dec 11, 2013 DR 16795 D. Friedman Transform pixel coordinate in inspect
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -293,13 +294,15 @@ public class CrossSectionImageResource extends AbstractCrossSectionResource
|
|||
|
||||
IExtent extent = descriptor.getGraph(this).getExtent();
|
||||
|
||||
|
||||
double val = Double.NaN;
|
||||
if (extent.contains(new double[] { coord.getObject().x,
|
||||
coord.getObject().y })) {
|
||||
double[] worldCoord = descriptor.pixelToWorld(new double[] {
|
||||
coord.getObject().x, coord.getObject().y });
|
||||
if (extent.contains(worldCoord)) {
|
||||
try {
|
||||
|
||||
DirectPosition2D dp = new DirectPosition2D(coord.getObject().x,
|
||||
coord.getObject().y);
|
||||
DirectPosition2D dp = new DirectPosition2D(worldCoord[0],
|
||||
worldCoord[1]);
|
||||
descriptor.getGridGeometry().getGridToCRS().transform(dp, dp);
|
||||
val = reproj.reprojectedGridCell(sampler, (int) dp.x,
|
||||
(int) dp.y);
|
||||
|
|
|
@ -54,11 +54,12 @@ import com.vividsolutions.jts.geom.Coordinate;
|
|||
* <pre>
|
||||
*
|
||||
* SOFTWARE HISTORY
|
||||
* Date Ticket# Engineer Description
|
||||
* Date Ticket# Engineer Description
|
||||
* ------------- -------- ----------- --------------------------
|
||||
* Jun 15, 2010 bsteffen Initial creation
|
||||
* Feb 14, 2011 8244 bkowal enabled magnification capability.
|
||||
* Jun 15, 2010 bsteffen Initial creation
|
||||
* Feb 14, 2011 8244 bkowal enabled magnification capability.
|
||||
* Sep 23, 2013 2363 bsteffen Add more vector configuration options.
|
||||
* Dec 11, 2013 DR 16795 D. Friedman Transform pixel coordinate in inspect
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -176,7 +177,7 @@ public class CrossSectionVectorResource extends AbstractCrossSectionResource {
|
|||
String s = null;
|
||||
Coordinate c = coord.getObject();
|
||||
DataTime time = descriptor.getTimeForResource(this);
|
||||
double[] values = descriptor.getGraph(this).getGridLocation(c.x, c.y);
|
||||
double[] values = descriptor.pixelToWorld(new double[] { c.x, c.y });
|
||||
|
||||
// if geometry has not been created yet dont sample
|
||||
if (geometry == null) {
|
||||
|
|
|
@ -62,6 +62,7 @@ import com.vividsolutions.jts.geom.Coordinate;
|
|||
* ------------ ---------- ----------- --------------------------
|
||||
* Dec 4, 2007 njensen Initial creation
|
||||
* Feb 20, 2009 njensen Refactored to new rsc architecture
|
||||
* Dec 11, 2013 DR 16795 D. Friedman Transform pixel coordinate in inspect
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -278,12 +279,13 @@ public class TimeHeightImageResource extends AbstractTimeHeightResource
|
|||
IExtent extent = descriptor.getGraph(this).getExtent();
|
||||
|
||||
double val = Double.NaN;
|
||||
if (extent.contains(new double[] { coord.getObject().x,
|
||||
coord.getObject().y })) {
|
||||
double[] worldCoord = descriptor.pixelToWorld(new double[] {
|
||||
coord.getObject().x, coord.getObject().y });
|
||||
if (extent.contains(worldCoord)) {
|
||||
try {
|
||||
|
||||
DirectPosition2D dp = new DirectPosition2D(coord.getObject().x,
|
||||
coord.getObject().y);
|
||||
DirectPosition2D dp = new DirectPosition2D(worldCoord[0],
|
||||
worldCoord[1]);
|
||||
descriptor.getGridGeometry().getGridToCRS().transform(dp, dp);
|
||||
val = reproj.reprojectedGridCell(sampler, (int) dp.x,
|
||||
(int) dp.y);
|
||||
|
|
|
@ -17,7 +17,8 @@ Require-Bundle: com.raytheon.uf.common.dataplugin,
|
|||
com.raytheon.viz.ui,
|
||||
javax.measure,
|
||||
org.eclipse.ui,
|
||||
org.eclipse.core.runtime
|
||||
org.eclipse.core.runtime,
|
||||
org.geotools
|
||||
Import-Package: com.raytheon.viz.core.rsc
|
||||
Export-Package: com.raytheon.uf.viz.xy.timeseries,
|
||||
com.raytheon.uf.viz.xy.timeseries.adapter,
|
||||
|
|
|
@ -96,6 +96,7 @@ import com.vividsolutions.jts.geom.Geometry;
|
|||
* Feb 10, 2011 8244 bkowal enabled the magnification
|
||||
* capability.
|
||||
* Feb 14, 2011 8244 bkowal enabled magnification for wind barbs.
|
||||
* Dec 19, 2013 DR 16795 D. Friedman Transform pixel coordinate in inspect
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -582,7 +583,10 @@ public class TimeSeriesResource extends
|
|||
@Override
|
||||
public String inspect(ReferencedCoordinate coord) throws VizException {
|
||||
String inspect = null;
|
||||
Coordinate c = descriptor.getGraphCoordiante(this, coord.getObject());
|
||||
double[] worldCoord = descriptor.pixelToWorld(
|
||||
new double[] { coord.getObject().x, coord.getObject().y });
|
||||
Coordinate c = descriptor.getGraphCoordiante(this,
|
||||
new Coordinate(worldCoord[0], worldCoord[1]));
|
||||
if (c != null && data != null) {
|
||||
double[] vals = data.inspectXY(c);
|
||||
NumberFormat nf = NumberFormat.getInstance();
|
||||
|
|
|
@ -22,7 +22,11 @@ package com.raytheon.uf.viz.xy.timeseries.util;
|
|||
import java.util.Stack;
|
||||
|
||||
import org.eclipse.swt.widgets.Event;
|
||||
import org.geotools.geometry.DirectPosition2D;
|
||||
|
||||
import com.raytheon.uf.common.status.IUFStatusHandler;
|
||||
import com.raytheon.uf.common.status.UFStatus;
|
||||
import com.raytheon.uf.common.status.UFStatus.Priority;
|
||||
import com.raytheon.uf.viz.core.IDisplayPaneContainer;
|
||||
import com.raytheon.uf.viz.core.drawables.IRenderableDisplay;
|
||||
import com.raytheon.uf.viz.xy.AbstractGraphInputHandler;
|
||||
|
@ -42,6 +46,7 @@ import com.vividsolutions.jts.geom.Coordinate;
|
|||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* Oct 16, 2009 mschenke Initial creation
|
||||
* Dec 11, 2013 DR 16795 D. Friedman Transform pixel coordinate for zoom
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -51,6 +56,9 @@ import com.vividsolutions.jts.geom.Coordinate;
|
|||
|
||||
public class TimeSeriesZoomHandler extends AbstractGraphInputHandler {
|
||||
|
||||
private static final transient IUFStatusHandler statusHandler = UFStatus
|
||||
.getHandler(TimeSeriesZoomHandler.class);
|
||||
|
||||
private MousePreferenceManager prefManager = MousePreferenceManager
|
||||
.getInstance();
|
||||
|
||||
|
@ -103,7 +111,7 @@ public class TimeSeriesZoomHandler extends AbstractGraphInputHandler {
|
|||
|
||||
private boolean zoomIn(int x, int y) {
|
||||
IDisplayPaneContainer editor = display.getContainer();
|
||||
Coordinate grid = editor.translateClick(x, y);
|
||||
Coordinate grid = translateClick(x, y);
|
||||
if (grid == null) {
|
||||
return false;
|
||||
}
|
||||
|
@ -129,7 +137,7 @@ public class TimeSeriesZoomHandler extends AbstractGraphInputHandler {
|
|||
|
||||
private boolean zoomOut(int x, int y) {
|
||||
IDisplayPaneContainer editor = display.getContainer();
|
||||
Coordinate grid = editor.translateClick(x, y);
|
||||
Coordinate grid = translateClick(x, y);
|
||||
if (grid == null) {
|
||||
return false;
|
||||
}
|
||||
|
@ -153,4 +161,28 @@ public class TimeSeriesZoomHandler extends AbstractGraphInputHandler {
|
|||
return true;
|
||||
}
|
||||
|
||||
private Coordinate translateClick(int x, int y) {
|
||||
IDisplayPaneContainer editor = display.getContainer();
|
||||
XyGraphDescriptor desc = (XyGraphDescriptor) editor
|
||||
.getActiveDisplayPane().getDescriptor();
|
||||
Coordinate grid = editor.translateClick(x, y);
|
||||
if (grid == null) {
|
||||
return null;
|
||||
}
|
||||
/* Convert from the overall display coordinate space to the coordinate
|
||||
* space for our resource.
|
||||
*/
|
||||
DirectPosition2D dp = new DirectPosition2D(grid.x, grid.y);
|
||||
try {
|
||||
desc.getGridGeometry().getGridToCRS().transform(dp, dp);
|
||||
} catch (Exception e) {
|
||||
statusHandler.handle(Priority.PROBLEM,
|
||||
"Error converting coordinate", e);
|
||||
}
|
||||
grid.x = dp.x;
|
||||
grid.y = dp.y;
|
||||
grid.z = 0;
|
||||
return grid;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -18,7 +18,8 @@ Require-Bundle: com.raytheon.uf.common.dataplugin,
|
|||
com.raytheon.viz.ui,
|
||||
javax.measure,
|
||||
org.eclipse.core.runtime,
|
||||
org.eclipse.ui
|
||||
org.eclipse.ui,
|
||||
org.geotools
|
||||
Import-Package: com.raytheon.viz.core.map,
|
||||
com.raytheon.viz.core.rsc
|
||||
Export-Package: com.raytheon.uf.viz.xy.varheight,
|
||||
|
|
|
@ -76,12 +76,12 @@ import com.vividsolutions.jts.geom.Geometry;
|
|||
* <pre>
|
||||
*
|
||||
* SOFTWARE HISTORY
|
||||
* Date Ticket# Engineer Description
|
||||
* Date Ticket# Engineer Description
|
||||
* ------------- -------- ----------- --------------------------
|
||||
* Nov 23, 2009 mschenke Initial creation
|
||||
* Feb 10, 2011 8344 bkowal enabled the magnification capability.
|
||||
* Sep 23, 2013 2363 bsteffen Add more vector configuration options.
|
||||
*
|
||||
* Nov 23, 2009 mschenke Initial creation
|
||||
* Feb 10, 2011 8344 bkowal enabled the magnification capability.
|
||||
* Sep 23, 2013 2363 bsteffen Add more vector configuration options.
|
||||
* Dec 19, 2013 DR 16795 D. Friedman Transform pixel coordinate in inspect
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -551,9 +551,13 @@ public class VarHeightResource extends
|
|||
@Override
|
||||
public String inspect(ReferencedCoordinate coord) throws VizException {
|
||||
Coordinate object = coord.getObject();
|
||||
object = descriptor.getGraphCoordiante(this, object);
|
||||
if (object != null) {
|
||||
return object.x + ", " + object.y;
|
||||
double[] worldCoord = descriptor.pixelToWorld(
|
||||
new double[] { object.x, object.y });
|
||||
Coordinate c = new Coordinate(worldCoord[0], worldCoord[1]);
|
||||
|
||||
c = descriptor.getGraphCoordiante(this, c);
|
||||
if (c != null) {
|
||||
return c.x + ", " + c.y;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -20,7 +20,11 @@
|
|||
package com.raytheon.uf.viz.xy.varheight.util;
|
||||
|
||||
import org.eclipse.swt.widgets.Event;
|
||||
import org.geotools.geometry.DirectPosition2D;
|
||||
|
||||
import com.raytheon.uf.common.status.IUFStatusHandler;
|
||||
import com.raytheon.uf.common.status.UFStatus;
|
||||
import com.raytheon.uf.common.status.UFStatus.Priority;
|
||||
import com.raytheon.uf.viz.core.IDisplayPaneContainer;
|
||||
import com.raytheon.uf.viz.core.drawables.IRenderableDisplay;
|
||||
import com.raytheon.uf.viz.core.drawables.ResourcePair;
|
||||
|
@ -44,6 +48,7 @@ import com.vividsolutions.jts.geom.Coordinate;
|
|||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* Jul 3, 2010 bsteffen Initial creation
|
||||
* Dec 11, 2013 DR 16795 D. Friedman Transform pixel coordinate for zoom
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -52,6 +57,9 @@ import com.vividsolutions.jts.geom.Coordinate;
|
|||
*/
|
||||
public class VarHeightZoomHandler extends AbstractGraphInputHandler {
|
||||
|
||||
private static final transient IUFStatusHandler statusHandler = UFStatus
|
||||
.getHandler(VarHeightZoomHandler.class);
|
||||
|
||||
private MousePreferenceManager prefManager = MousePreferenceManager
|
||||
.getInstance();
|
||||
|
||||
|
@ -113,12 +121,24 @@ public class VarHeightZoomHandler extends AbstractGraphInputHandler {
|
|||
&& zoomIndex < ZoomMenuAction.ZOOM_LEVELS.length - 1) {
|
||||
zoomIndex += 1;
|
||||
}
|
||||
|
||||
/* Convert from the overall display coordinate space to the coordinate
|
||||
* space for our resource.
|
||||
*/
|
||||
DirectPosition2D dp = new DirectPosition2D(grid.x, grid.y);
|
||||
try {
|
||||
desc.getGridGeometry().getGridToCRS().transform(dp, dp);
|
||||
} catch (Exception e) {
|
||||
statusHandler.handle(Priority.PROBLEM,
|
||||
"Error converting coordinate for zoom", e);
|
||||
}
|
||||
|
||||
for (ResourcePair rsc : desc.getResourceList()) {
|
||||
if (rsc.getResource() instanceof IGraphableResource<?, ?>) {
|
||||
IGraph graph = desc.getGraph((IGraphableResource<?, ?>) rsc
|
||||
.getResource());
|
||||
if (graph.getExtent().contains(new double[] { grid.x, grid.y })) {
|
||||
graph.zoom((int) Math.pow(2, zoomIndex), grid);
|
||||
if (graph.getExtent().contains(new double[] { dp.x, dp.y })) {
|
||||
graph.zoom((int) Math.pow(2, zoomIndex), new Coordinate(dp.x, dp.y));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
from com.raytheon.viz.gfe import GFEPreference
|
||||
|
||||
Options = [
|
||||
('*visual', 'truecolor'),
|
||||
# ('*visual', 'truecolor'),
|
||||
('*background' , 'gray65'),
|
||||
('*activeBackground' , 'gray83'),
|
||||
('*blinkingHighlightColor' , 'CornSilk'),
|
||||
|
|
|
@ -29,8 +29,6 @@ import com.raytheon.uf.common.status.IUFStatusHandler;
|
|||
import com.raytheon.uf.common.status.UFStatus;
|
||||
import com.raytheon.uf.common.status.UFStatus.Priority;
|
||||
import com.raytheon.viz.gfe.dialogs.GFEConfigDialog;
|
||||
import com.raytheon.viz.gfe.procedures.ProcedureJob;
|
||||
import com.raytheon.viz.gfe.smarttool.script.SmartToolJob;
|
||||
|
||||
/**
|
||||
* The activator class controls the plug-in life cycle
|
||||
|
@ -43,6 +41,8 @@ import com.raytheon.viz.gfe.smarttool.script.SmartToolJob;
|
|||
* ------------ ---------- ----------- --------------------------
|
||||
* Initial creation
|
||||
* Oct 30, 2012 1298 rferrel Must be a blocking dialog.
|
||||
* Dec 09, 2013 #2367 dgilling Remove shutdown of ProcedureJob and
|
||||
* SmartToolJob.
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -92,8 +92,6 @@ public class Activator extends AbstractUIPlugin implements BundleActivator {
|
|||
@Override
|
||||
public void stop(BundleContext context) throws Exception {
|
||||
plugin = null;
|
||||
ProcedureJob.shutdown();
|
||||
SmartToolJob.shutdown();
|
||||
super.stop(context);
|
||||
}
|
||||
|
||||
|
|
|
@ -38,8 +38,6 @@ import com.raytheon.viz.gfe.core.parm.Parm;
|
|||
import com.raytheon.viz.gfe.dialogs.KillJobsOnExitDialog;
|
||||
import com.raytheon.viz.gfe.dialogs.SaveParameterDialog;
|
||||
import com.raytheon.viz.gfe.gridmanager.GridManager;
|
||||
import com.raytheon.viz.gfe.procedures.ProcedureJob;
|
||||
import com.raytheon.viz.gfe.smarttool.script.SmartToolJob;
|
||||
import com.raytheon.viz.ui.DetachedViewListener;
|
||||
import com.raytheon.viz.ui.color.BackgroundColor;
|
||||
import com.raytheon.viz.ui.color.IBackgroundColorChangedListener.BGColorMode;
|
||||
|
@ -56,6 +54,7 @@ import com.raytheon.viz.ui.color.IBackgroundColorChangedListener.BGColorMode;
|
|||
* adding cancel capability and if error on
|
||||
* save then the close is cancelled.
|
||||
* 10/30/2012 #1298 rferrel Must keep blocking dialogs to work with eclipse plugins.
|
||||
* 12/10/2013 #2367 dgilling Use new ProcedureJobePool and SmartToolJobPool.
|
||||
* </pre>
|
||||
*
|
||||
* @author dfitch
|
||||
|
@ -138,11 +137,12 @@ public class GridManagerView extends ViewPart implements ISaveablePart2 {
|
|||
@Override
|
||||
public int promptToSaveOnClose() {
|
||||
// Check for any running/queued jobs.
|
||||
if (ProcedureJob.haveJobs() || SmartToolJob.haveJobs()) {
|
||||
if (dataManager.getProcedureJobPool().isActive()
|
||||
|| dataManager.getSmartToolJobPool().isActive()) {
|
||||
Shell shell = PlatformUI.getWorkbench().getActiveWorkbenchWindow()
|
||||
.getShell();
|
||||
|
||||
KillJobsOnExitDialog dialog = new KillJobsOnExitDialog(shell);
|
||||
KillJobsOnExitDialog dialog = new KillJobsOnExitDialog(shell,
|
||||
dataManager);
|
||||
// Must keep modal and blocking in order to work with eclipse
|
||||
// plugins.
|
||||
dialog.setBlockOnOpen(true);
|
||||
|
@ -187,13 +187,10 @@ public class GridManagerView extends ViewPart implements ISaveablePart2 {
|
|||
|
||||
@Override
|
||||
public boolean isDirty() {
|
||||
if ((dataManager != null && dataManager.getParmManager()
|
||||
.getModifiedParms().length > 0)
|
||||
|| SmartToolJob.haveJobs()
|
||||
|| ProcedureJob.haveJobs()) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
return ((dataManager != null) && (dataManager.getParmManager()
|
||||
.getModifiedParms().length > 0))
|
||||
|| dataManager.getProcedureJobPool().isActive()
|
||||
|| dataManager.getSmartToolJobPool().isActive();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -32,7 +32,7 @@ import com.raytheon.uf.common.status.IUFStatusHandler;
|
|||
import com.raytheon.uf.common.status.UFStatus;
|
||||
import com.raytheon.uf.common.status.UFStatus.Priority;
|
||||
import com.raytheon.viz.gfe.core.DataManager;
|
||||
import com.raytheon.viz.gfe.procedures.ProcedureJob;
|
||||
import com.raytheon.viz.gfe.core.DataManagerUIFactory;
|
||||
import com.raytheon.viz.gfe.procedures.ProcedureRequest;
|
||||
import com.raytheon.viz.gfe.procedures.ProcedureSelectionDlg;
|
||||
import com.raytheon.viz.gfe.procedures.ProcedureUtil;
|
||||
|
@ -47,8 +47,9 @@ import com.raytheon.viz.gfe.ui.runtimeui.SelectionDlg;
|
|||
* SOFTWARE HISTORY
|
||||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* Nov 4, 2008 njensen Initial creation
|
||||
* Nov 15, 2012 1298 rferrel Changes for non-blocking ProcedureSelectionDlg.
|
||||
* Nov 04, 2008 njensen Initial creation
|
||||
* Nov 15, 2012 #1298 rferrel Changes for non-blocking ProcedureSelectionDlg.
|
||||
* Dec 09, 2013 #2367 dgilling Use new ProcedureJobPool.
|
||||
* </pre>
|
||||
*
|
||||
* @author njensen
|
||||
|
@ -69,11 +70,11 @@ public class RunProcedureAction extends AbstractHandler {
|
|||
@Override
|
||||
public Object execute(ExecutionEvent event) throws ExecutionException {
|
||||
String procedureName = event.getParameter("name");
|
||||
DataManager dm = DataManager.getCurrentInstance();
|
||||
DataManager dm = DataManagerUIFactory.getCurrentInstance();
|
||||
try {
|
||||
List<FieldDefinition> varList = dm.getProcedureInterface()
|
||||
.getVarDictWidgets(procedureName);
|
||||
if (varList == null || varList.size() == 0) {
|
||||
if (varList == null || varList.isEmpty()) {
|
||||
// no VariableList found on procedure, just run it
|
||||
PreviewInfo pi = ProcedureUtil.checkAndBuildPreview(dm,
|
||||
procedureName);
|
||||
|
@ -81,7 +82,7 @@ public class RunProcedureAction extends AbstractHandler {
|
|||
ProcedureRequest req = ProcedureUtil.buildProcedureRequest(
|
||||
procedureName, dm);
|
||||
if (req != null) {
|
||||
ProcedureJob.enqueue(dm, req);
|
||||
dm.getProcedureJobPool().schedule(req);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -67,10 +67,12 @@ import com.raytheon.viz.gfe.core.parm.ParmOp;
|
|||
import com.raytheon.viz.gfe.gridmanager.IGridManager;
|
||||
import com.raytheon.viz.gfe.jobs.AutoSaveJob;
|
||||
import com.raytheon.viz.gfe.procedures.ProcedureFactory;
|
||||
import com.raytheon.viz.gfe.procedures.ProcedureJobPool;
|
||||
import com.raytheon.viz.gfe.procedures.ProcedureUIController;
|
||||
import com.raytheon.viz.gfe.smarttool.EditActionProcessor;
|
||||
import com.raytheon.viz.gfe.smarttool.GridCycler;
|
||||
import com.raytheon.viz.gfe.smarttool.script.SmartToolFactory;
|
||||
import com.raytheon.viz.gfe.smarttool.script.SmartToolJobPool;
|
||||
import com.raytheon.viz.gfe.smarttool.script.SmartToolUIController;
|
||||
import com.raytheon.viz.gfe.textformatter.TextProductManager;
|
||||
|
||||
|
@ -97,6 +99,7 @@ import com.raytheon.viz.gfe.textformatter.TextProductManager;
|
|||
* 08/27/2013 2302 randerso Code cleanup for AutoSaveJob
|
||||
* 09/05/2013 2307 dgilling Use better PythonScript constructor.
|
||||
* 09/16/2013 2033 dgilling Remove unused IToolController.
|
||||
* 12/09/2013 2367 dgilling Instantiate ProcedureJobPool here.
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -193,6 +196,10 @@ public class DataManager {
|
|||
|
||||
private List<String> allSites;
|
||||
|
||||
private final ProcedureJobPool procJobPool;
|
||||
|
||||
private final SmartToolJobPool toolJobPool;
|
||||
|
||||
public IISCDataAccess getIscDataAccess() {
|
||||
return iscDataAccess;
|
||||
}
|
||||
|
@ -226,6 +233,8 @@ public class DataManager {
|
|||
strInitJob.schedule();
|
||||
|
||||
initializeScriptControllers();
|
||||
procJobPool = new ProcedureJobPool(4, 4, this);
|
||||
toolJobPool = new SmartToolJobPool(3, 3, this);
|
||||
|
||||
this.weGroupManager = new WEGroupManager(this);
|
||||
this.editActionProcessor = new EditActionProcessor(this);
|
||||
|
@ -295,6 +304,28 @@ public class DataManager {
|
|||
procedureInterface.dispose();
|
||||
}
|
||||
|
||||
// by moving the the pools' cancel calls to another thread, we prevent
|
||||
// GFE shutdown from freezing the UI thread until all jobs have
|
||||
// completed. The unfortunate side effect is that we get that annoying
|
||||
// "Job found still running after platform shutdown" warning from
|
||||
// Eclipse.
|
||||
Runnable killJobPools = new Runnable() {
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
if (toolJobPool != null) {
|
||||
toolJobPool.cancel();
|
||||
}
|
||||
|
||||
if (procJobPool != null) {
|
||||
procJobPool.cancel();
|
||||
}
|
||||
}
|
||||
};
|
||||
Thread killPoolsThread = new Thread(killJobPools, "shutdown-gfe-pools");
|
||||
killPoolsThread.setDaemon(false);
|
||||
killPoolsThread.start();
|
||||
|
||||
NotificationManagerJob.removeObserver("edex.alerts.gfe", router);
|
||||
}
|
||||
|
||||
|
@ -675,4 +706,11 @@ public class DataManager {
|
|||
return textProductMgr;
|
||||
}
|
||||
|
||||
public ProcedureJobPool getProcedureJobPool() {
|
||||
return procJobPool;
|
||||
}
|
||||
|
||||
public SmartToolJobPool getSmartToolJobPool() {
|
||||
return toolJobPool;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,8 +30,7 @@ import org.eclipse.swt.widgets.Control;
|
|||
import org.eclipse.swt.widgets.Label;
|
||||
import org.eclipse.swt.widgets.Shell;
|
||||
|
||||
import com.raytheon.viz.gfe.procedures.ProcedureJob;
|
||||
import com.raytheon.viz.gfe.smarttool.script.SmartToolJob;
|
||||
import com.raytheon.viz.gfe.core.DataManager;
|
||||
import com.raytheon.viz.ui.dialogs.CaveJFACEDialog;
|
||||
|
||||
/**
|
||||
|
@ -44,6 +43,8 @@ import com.raytheon.viz.ui.dialogs.CaveJFACEDialog;
|
|||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* Jun 13, 2011 rferrel Initial creation
|
||||
* Dec 10, 2013 #2367 dgilling Rewrite to use new ProcedureJobPool and
|
||||
* SmartToolJobPool.
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -54,13 +55,16 @@ public class KillJobsOnExitDialog extends CaveJFACEDialog {
|
|||
|
||||
private Composite top;
|
||||
|
||||
private final DataManager dataMgr;
|
||||
|
||||
/**
|
||||
* Use defaults of -240, minimum and 240 max.
|
||||
*/
|
||||
public KillJobsOnExitDialog(Shell parent) {
|
||||
public KillJobsOnExitDialog(Shell parent, DataManager dataMgr) {
|
||||
super(parent);
|
||||
int style = this.getShellStyle() | SWT.MODELESS | SWT.TITLE | SWT.CLOSE;
|
||||
this.setShellStyle(style);
|
||||
this.dataMgr = dataMgr;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -77,9 +81,9 @@ public class KillJobsOnExitDialog extends CaveJFACEDialog {
|
|||
|
||||
private void initializeComponents() {
|
||||
|
||||
int cnt[] = ProcedureJob.getJobCount();
|
||||
int cnt[] = dataMgr.getProcedureJobPool().getWorkRemaining();
|
||||
GridData data = null;
|
||||
if (cnt[0] > 0 || cnt[1] > 0) {
|
||||
if ((cnt[0] > 0) || (cnt[1] > 0)) {
|
||||
Label lab = new Label(top, SWT.NONE);
|
||||
lab.setText(String
|
||||
.format("Have %d procedure(s) running and %d procedures(s) pending",
|
||||
|
@ -88,8 +92,8 @@ public class KillJobsOnExitDialog extends CaveJFACEDialog {
|
|||
lab.setLayoutData(data);
|
||||
}
|
||||
|
||||
cnt = SmartToolJob.getJobCount();
|
||||
if (cnt[0] > 0 || cnt[1] > 0) {
|
||||
cnt = dataMgr.getSmartToolJobPool().getWorkRemaining();
|
||||
if ((cnt[0] > 0) || (cnt[1] > 0)) {
|
||||
Label lab = new Label(top, SWT.NONE);
|
||||
lab.setText(String
|
||||
.format("Have %d Smart tool(s) running and %d Smart tool(s) pending",
|
||||
|
|
|
@ -60,9 +60,7 @@ import com.raytheon.viz.gfe.core.DataManagerUIFactory;
|
|||
import com.raytheon.viz.gfe.core.GFEMapRenderableDisplay;
|
||||
import com.raytheon.viz.gfe.core.ISpatialDisplayManager;
|
||||
import com.raytheon.viz.gfe.core.internal.GFESpatialDisplayManager;
|
||||
import com.raytheon.viz.gfe.procedures.ProcedureJob;
|
||||
import com.raytheon.viz.gfe.rsc.GFELegendResourceData;
|
||||
import com.raytheon.viz.gfe.smarttool.script.SmartToolJob;
|
||||
import com.raytheon.viz.gfe.statusline.ISCSendEnable;
|
||||
import com.raytheon.viz.ui.EditorUtil;
|
||||
import com.raytheon.viz.ui.cmenu.ZoomMenuAction;
|
||||
|
@ -88,6 +86,8 @@ import com.raytheon.viz.ui.perspectives.VizPerspectiveListener;
|
|||
* Jul 7, 2011 #9897 ryu close formatters on perspective close/reset
|
||||
* Aug 20,2012 #1077 randerso Added support for bgColor setting
|
||||
* Oct 23, 2012 #1287 rferrel Changes for non-blocking FormattrLauncherDialog.
|
||||
* Dec 09, 2013 #2367 dgilling Remove shutdown of ProcedureJob and
|
||||
* SmartToolJob.
|
||||
* </pre>
|
||||
*
|
||||
* @author randerso
|
||||
|
@ -235,15 +235,6 @@ public class GFEPerspectiveManager extends AbstractCAVEPerspectiveManager {
|
|||
|
||||
DataManagerUIFactory.dispose(perspectiveWindow);
|
||||
|
||||
// Put on own thread so close is not slowed down.
|
||||
new Thread(new Runnable() {
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
ProcedureJob.shutdown();
|
||||
SmartToolJob.shutdown();
|
||||
}
|
||||
}).start();
|
||||
FormatterlauncherAction.closeDialog();
|
||||
}
|
||||
|
||||
|
|
|
@ -1,449 +0,0 @@
|
|||
/**
|
||||
* This software was developed and / or modified by Raytheon Company,
|
||||
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
|
||||
*
|
||||
* U.S. EXPORT CONTROLLED TECHNICAL DATA
|
||||
* This software product contains export-restricted data whose
|
||||
* export/transfer/disclosure is restricted by U.S. law. Dissemination
|
||||
* to non-U.S. persons whether in the United States or abroad requires
|
||||
* an export license or other authorization.
|
||||
*
|
||||
* Contractor Name: Raytheon Company
|
||||
* Contractor Address: 6825 Pine Street, Suite 340
|
||||
* Mail Stop B8
|
||||
* Omaha, NE 68106
|
||||
* 402.291.0100
|
||||
*
|
||||
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
|
||||
* further licensing information.
|
||||
**/
|
||||
package com.raytheon.viz.gfe.procedures;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import jep.JepException;
|
||||
|
||||
import org.eclipse.core.runtime.IProgressMonitor;
|
||||
import org.eclipse.core.runtime.IStatus;
|
||||
import org.eclipse.core.runtime.Status;
|
||||
import org.eclipse.core.runtime.jobs.Job;
|
||||
|
||||
import com.raytheon.uf.common.dataplugin.gfe.StatusConstants;
|
||||
import com.raytheon.uf.common.dataplugin.gfe.reference.ReferenceData;
|
||||
import com.raytheon.uf.common.status.IUFStatusHandler;
|
||||
import com.raytheon.uf.common.status.UFStatus;
|
||||
import com.raytheon.uf.common.status.UFStatus.Priority;
|
||||
import com.raytheon.uf.common.time.TimeRange;
|
||||
import com.raytheon.uf.viz.core.jobs.AbstractQueueJob;
|
||||
import com.raytheon.viz.gfe.Activator;
|
||||
import com.raytheon.viz.gfe.GFEException;
|
||||
import com.raytheon.viz.gfe.core.DataManager;
|
||||
import com.raytheon.viz.gfe.jobs.AsyncProgressJob;
|
||||
|
||||
/**
|
||||
* Job for running GFE procedures. Since JEP/JNI requires that the thread that
|
||||
* initialized the python interpreter is the same one that runs it, this job
|
||||
* initializes an interpreter for procedures and then sleeps until a request is
|
||||
* enqueued.
|
||||
*
|
||||
* <pre>
|
||||
*
|
||||
* SOFTWARE HISTORY
|
||||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* Oct 8, 2009 njensen Initial creation
|
||||
* Jan 8, 2013 1486 dgilling Support changes to BaseGfePyController.
|
||||
* Jan 18, 2013 1509 njensen Garbage collect after running procedure
|
||||
* Apr 03, 2013 1855 njensen Never dispose interpreters until shutdown and
|
||||
* reuse interpreter if called from procedure
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
* @author njensen
|
||||
* @version 1.0
|
||||
*/
|
||||
|
||||
public class ProcedureJob extends AbstractQueueJob<ProcedureRequest> {
|
||||
/**
|
||||
* Maximum number of jobs to keep for a given Data Manager.
|
||||
*/
|
||||
private final static int maxJobs = 4;
|
||||
|
||||
/**
|
||||
* Index of job with the queue. Will break code if not zero.
|
||||
*/
|
||||
private final static int QUEUE_JOB_INDEX = 0;
|
||||
|
||||
private static final transient IUFStatusHandler statusHandler = UFStatus
|
||||
.getHandler(ProcedureJob.class);
|
||||
|
||||
private static Map<DataManager, List<ProcedureJob>> instanceMap = null;
|
||||
|
||||
private ProcedureController python;
|
||||
|
||||
private DataManager dataMgr;
|
||||
|
||||
private ProcedureRequest request;
|
||||
|
||||
protected ProcedureJob(DataManager dataMgr) {
|
||||
super("GFE Procedures Job");
|
||||
this.dataMgr = dataMgr;
|
||||
}
|
||||
|
||||
private void getRequest() throws InterruptedException {
|
||||
if (instanceMap == null) {
|
||||
request = null;
|
||||
return;
|
||||
}
|
||||
|
||||
List<ProcedureJob> jobList = instanceMap.get(dataMgr);
|
||||
if (jobList == null || jobList.size() == 0
|
||||
|| jobList.get(QUEUE_JOB_INDEX).queue == null) {
|
||||
request = null;
|
||||
} else {
|
||||
request = jobList.get(QUEUE_JOB_INDEX).queue.poll(1000L,
|
||||
TimeUnit.MILLISECONDS);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
*
|
||||
* @seeorg.eclipse.core.runtime.jobs.Job#run(org.eclipse.core.runtime.
|
||||
* IProgressMonitor)
|
||||
*/
|
||||
@Override
|
||||
protected IStatus run(IProgressMonitor monitor) {
|
||||
try {
|
||||
python = ProcedureFactory.buildController(dataMgr);
|
||||
} catch (JepException e) {
|
||||
ProcedureJob.removeJob(dataMgr, this);
|
||||
return new Status(IStatus.ERROR, StatusConstants.PLUGIN_ID,
|
||||
"Error initializing procedure python", e);
|
||||
}
|
||||
|
||||
try {
|
||||
while (monitor.isCanceled() == false) {
|
||||
// ProcedureRequest request;
|
||||
try {
|
||||
getRequest();
|
||||
} catch (InterruptedException e) {
|
||||
continue;
|
||||
}
|
||||
// May have been canceled while waiting.
|
||||
if (monitor.isCanceled()) {
|
||||
break;
|
||||
}
|
||||
synchronized (this) {
|
||||
try {
|
||||
if (request != null) {
|
||||
python.processFileUpdates();
|
||||
processRequest(request);
|
||||
if (request != null) {
|
||||
request.requestComplete(null);
|
||||
}
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
statusHandler.handle(Priority.PROBLEM,
|
||||
"Error running procedure ", t);
|
||||
if (request != null) {
|
||||
request.requestComplete(t);
|
||||
}
|
||||
} finally {
|
||||
request = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
if (python != null) {
|
||||
python.dispose();
|
||||
python = null;
|
||||
}
|
||||
}
|
||||
|
||||
return Status.OK_STATUS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a job from the Data Manger's job list.
|
||||
*
|
||||
* @param dataMgr
|
||||
* - The job's data manager
|
||||
* @param job
|
||||
* - The job to remove
|
||||
*/
|
||||
private static synchronized void removeJob(DataManager dataMgr,
|
||||
ProcedureJob job) {
|
||||
if (instanceMap == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
List<ProcedureJob> jobList = instanceMap.get(dataMgr);
|
||||
|
||||
if (jobList != null) {
|
||||
jobList.remove(job);
|
||||
|
||||
// Removing job with queue remove job list so next request will set
|
||||
// up new queue.
|
||||
if (job.queue != null) {
|
||||
jobList.clear();
|
||||
instanceMap.remove(dataMgr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void processRequest(ProcedureRequest request) {
|
||||
this.execute(python, request.getProcedureName(), request.getRefSet(),
|
||||
request.getTimeRange(), request.getVarDict());
|
||||
this.dataMgr.getEditActionProcessor().wrapUpExecute(
|
||||
request.getPreview(), false);
|
||||
}
|
||||
|
||||
/**
|
||||
* This manages the scheduling of jobs to service a Data Manger's requests.
|
||||
*
|
||||
* @param dataMgr
|
||||
* - Data Manger for the request
|
||||
* @param request
|
||||
* - The request to service
|
||||
* @return state - true when job available to process request otherwise
|
||||
* false and request is queued to wait for next available job
|
||||
*/
|
||||
public static synchronized boolean enqueue(DataManager dataMgr,
|
||||
ProcedureRequest request) {
|
||||
if (instanceMap == null) {
|
||||
instanceMap = new HashMap<DataManager, List<ProcedureJob>>();
|
||||
}
|
||||
|
||||
Thread currentThread = Thread.currentThread();
|
||||
List<ProcedureJob> jobList = instanceMap.get(dataMgr);
|
||||
if (jobList == null) {
|
||||
jobList = new ArrayList<ProcedureJob>();
|
||||
// Add the first job which contains the queue used by all jobs in
|
||||
// the list.
|
||||
ProcedureJob job = new ProcedureJob(dataMgr);
|
||||
jobList.add(job);
|
||||
instanceMap.put(dataMgr, jobList);
|
||||
job.setSystem(true);
|
||||
job.schedule();
|
||||
}
|
||||
boolean jobAvailable = false;
|
||||
ProcedureJob alreadyOnThread = null;
|
||||
for (ProcedureJob job : jobList) {
|
||||
Thread jobThread = job.getThread();
|
||||
if (currentThread == jobThread) {
|
||||
// this occurs when a running procedure uses
|
||||
// SmartScript.callProcedure()
|
||||
// for efficiency we want to just stay on this thread
|
||||
alreadyOnThread = job;
|
||||
jobAvailable = true;
|
||||
break;
|
||||
} else if (job.request == null) {
|
||||
jobAvailable = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// All jobs for data manager are busy, add another if we haven't
|
||||
// reached the limit.
|
||||
if (alreadyOnThread == null && !jobAvailable
|
||||
&& jobList.size() < maxJobs) {
|
||||
ProcedureJob job = new ProcedureJob(dataMgr);
|
||||
job.setSystem(true);
|
||||
jobList.add(job);
|
||||
// Never used additional job's queue
|
||||
job.queue = null;
|
||||
job.schedule();
|
||||
jobAvailable = true;
|
||||
}
|
||||
|
||||
if (alreadyOnThread != null) {
|
||||
try {
|
||||
alreadyOnThread.processRequest(request);
|
||||
request.requestComplete(null);
|
||||
} catch (Throwable t) {
|
||||
statusHandler.handle(Priority.PROBLEM,
|
||||
"Error running procedure ", t);
|
||||
request.requestComplete(t);
|
||||
}
|
||||
} else {
|
||||
jobList.get(QUEUE_JOB_INDEX).enqueue(request);
|
||||
}
|
||||
return jobAvailable;
|
||||
}
|
||||
|
||||
/**
|
||||
* This returns an array of two integers the first is the number of
|
||||
* Procedure Tool Jobs being processed and the second is the number in the
|
||||
* queue waiting to be processed.
|
||||
*
|
||||
* @return cnts
|
||||
*/
|
||||
public static int[] getJobCount() {
|
||||
int[] cnt = new int[] { 0, 0 };
|
||||
if (instanceMap != null) {
|
||||
for (List<ProcedureJob> jobList : instanceMap.values()) {
|
||||
cnt[1] += jobList.get(QUEUE_JOB_INDEX).queue.size();
|
||||
for (ProcedureJob job : jobList) {
|
||||
if (job.request != null) {
|
||||
++cnt[0];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine if there are any Procedure Tool Jobs queued and/or being
|
||||
* processed.
|
||||
*
|
||||
* @return true when there are job(s)s queued or being processed otherwise
|
||||
* false
|
||||
*/
|
||||
public static boolean haveJobs() {
|
||||
boolean result = false;
|
||||
|
||||
if (instanceMap != null) {
|
||||
for (List<ProcedureJob> jobList : instanceMap.values()) {
|
||||
// Any pending requests.
|
||||
if (jobList.get(QUEUE_JOB_INDEX).queue.size() > 0) {
|
||||
result = true;
|
||||
break;
|
||||
}
|
||||
|
||||
// Any requests being processed.
|
||||
for (ProcedureJob job : jobList) {
|
||||
if (job.request != null) {
|
||||
result = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* This terminates all the Data Managers' jobs.
|
||||
*/
|
||||
public static synchronized void shutdown() {
|
||||
// TODO This currently joins with a job waiting for it to finish which
|
||||
// can take a long time and may even be waiting for user to input. Must
|
||||
// find a wait to kill any GUI associated with a request and if python
|
||||
// running a way to terminate it so no waiting is involved.
|
||||
if (instanceMap != null) {
|
||||
for (List<ProcedureJob> jobList : instanceMap.values()) {
|
||||
jobList.get(QUEUE_JOB_INDEX).queue.clear();
|
||||
|
||||
// Do in reverse order so last job cancel is the one with the
|
||||
// queue.
|
||||
for (int index = jobList.size() - 1; index >= 0; --index) {
|
||||
jobList.get(index).cancel();
|
||||
}
|
||||
}
|
||||
|
||||
for (List<ProcedureJob> jobList : instanceMap.values()) {
|
||||
for (ProcedureJob job : jobList) {
|
||||
synchronized (job) {
|
||||
try {
|
||||
if (job.getState() != Job.NONE) {
|
||||
job.join();
|
||||
}
|
||||
} catch (InterruptedException ex) {
|
||||
System.err.println("here SmartToolJob");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (List<ProcedureJob> jobList : instanceMap.values()) {
|
||||
jobList.clear();
|
||||
}
|
||||
|
||||
instanceMap.clear();
|
||||
instanceMap = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a procedure
|
||||
*
|
||||
* @param procedureName
|
||||
* the name of the procedure
|
||||
* @param refSet
|
||||
* the edit area to run the procedure against
|
||||
* @param timeRange
|
||||
* the time range to run the procedure against
|
||||
* @param varDict
|
||||
* the cached varDict for the procedure, or null if there is none
|
||||
* (should be null unless called from within another procedure)
|
||||
*/
|
||||
private void execute(ProcedureController controller, String procedureName,
|
||||
ReferenceData refSet, TimeRange timeRange, String varDict) {
|
||||
|
||||
Job progressJob = new AsyncProgressJob(procedureName, this);
|
||||
IStatus pjStatus = Status.CANCEL_STATUS;
|
||||
try {
|
||||
List<String> argNames = controller.getMethodArguments(
|
||||
procedureName, "execute");
|
||||
Map<String, Object> argMap = getArgValues(argNames, refSet,
|
||||
timeRange);
|
||||
controller.setVarDict(varDict);
|
||||
progressJob.schedule();
|
||||
controller.executeProcedure(procedureName, argMap);
|
||||
pjStatus = Status.OK_STATUS;
|
||||
} catch (Exception e) {
|
||||
pjStatus = new Status(IStatus.WARNING, Activator.PLUGIN_ID,
|
||||
"Error in procedure " + procedureName, e);
|
||||
statusHandler.handle(Priority.PROBLEM, "Error executing procedure "
|
||||
+ procedureName, e);
|
||||
} catch (JepException e) {
|
||||
pjStatus = new Status(IStatus.WARNING, Activator.PLUGIN_ID,
|
||||
"Error in procedure " + procedureName, e);
|
||||
statusHandler.handle(Priority.PROBLEM, "Error executing procedure "
|
||||
+ procedureName, e);
|
||||
} finally {
|
||||
controller.garbageCollect();
|
||||
progressJob.done(pjStatus);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Maps a procedure's execute's argument name to an object
|
||||
*
|
||||
* @param args
|
||||
* the name of the objects
|
||||
* @param refSet
|
||||
* the edit area to run the procedure on
|
||||
* @param timeRange
|
||||
* the time range to run the procedure on
|
||||
* @return a map of argument names to objects
|
||||
* @throws GFEException
|
||||
*/
|
||||
private Map<String, Object> getArgValues(List<String> args,
|
||||
ReferenceData refSet, TimeRange timeRange) throws GFEException {
|
||||
Map<String, Object> argValueMap = new HashMap<String, Object>();
|
||||
// For each argument in args, append a value to the argValueList
|
||||
for (String arg : args) {
|
||||
if (arg.equals("varDict")) {
|
||||
argValueMap.put("varDict", null);
|
||||
} else if (arg.equals("editArea")) {
|
||||
argValueMap.put("editArea", refSet);
|
||||
} else if (arg.equals("timeRange")) {
|
||||
argValueMap.put("timeRange", timeRange);
|
||||
} else if (arg.equals("self")) {
|
||||
// skip
|
||||
} else {
|
||||
throw new GFEException("Unknown argument " + arg);
|
||||
}
|
||||
|
||||
}
|
||||
return argValueMap;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,432 @@
|
|||
/**
|
||||
* This software was developed and / or modified by Raytheon Company,
|
||||
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
|
||||
*
|
||||
* U.S. EXPORT CONTROLLED TECHNICAL DATA
|
||||
* This software product contains export-restricted data whose
|
||||
* export/transfer/disclosure is restricted by U.S. law. Dissemination
|
||||
* to non-U.S. persons whether in the United States or abroad requires
|
||||
* an export license or other authorization.
|
||||
*
|
||||
* Contractor Name: Raytheon Company
|
||||
* Contractor Address: 6825 Pine Street, Suite 340
|
||||
* Mail Stop B8
|
||||
* Omaha, NE 68106
|
||||
* 402.291.0100
|
||||
*
|
||||
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
|
||||
* further licensing information.
|
||||
**/
|
||||
package com.raytheon.viz.gfe.procedures;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import jep.JepException;
|
||||
|
||||
import org.eclipse.core.runtime.IProgressMonitor;
|
||||
import org.eclipse.core.runtime.IStatus;
|
||||
import org.eclipse.core.runtime.Status;
|
||||
import org.eclipse.core.runtime.jobs.Job;
|
||||
|
||||
import com.raytheon.uf.common.dataplugin.gfe.StatusConstants;
|
||||
import com.raytheon.uf.common.dataplugin.gfe.reference.ReferenceData;
|
||||
import com.raytheon.uf.common.status.IUFStatusHandler;
|
||||
import com.raytheon.uf.common.status.UFStatus;
|
||||
import com.raytheon.uf.common.status.UFStatus.Priority;
|
||||
import com.raytheon.uf.common.time.TimeRange;
|
||||
import com.raytheon.uf.common.time.util.TimeUtil;
|
||||
import com.raytheon.viz.gfe.Activator;
|
||||
import com.raytheon.viz.gfe.GFEException;
|
||||
import com.raytheon.viz.gfe.core.DataManager;
|
||||
import com.raytheon.viz.gfe.jobs.AsyncProgressJob;
|
||||
|
||||
/**
|
||||
* Job pool for running GFE procedures. Since JEP/JNI requires that the thread
|
||||
* that initialized the python interpreter is the same one that runs it, this
|
||||
* pool initializes an interpreter for procedures and then sleeps until a
|
||||
* request is enqueued.
|
||||
*
|
||||
* <pre>
|
||||
*
|
||||
* SOFTWARE HISTORY
|
||||
*
|
||||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* Dec 09, 2013 #2367 dgilling Initial creation
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
* @author dgilling
|
||||
* @version 1.0
|
||||
*/
|
||||
|
||||
public class ProcedureJobPool {
|
||||
|
||||
protected LinkedBlockingQueue<ProcedureRequest> workQueue = new LinkedBlockingQueue<ProcedureRequest>();
|
||||
|
||||
protected LinkedBlockingQueue<Job> jobQueue = new LinkedBlockingQueue<Job>();
|
||||
|
||||
protected List<Job> jobList;
|
||||
|
||||
protected boolean cancel = false;
|
||||
|
||||
protected Object cancelLock = new Object();
|
||||
|
||||
protected Object joinLock = new Object();
|
||||
|
||||
private final DataManager dataMgr;
|
||||
|
||||
private final int poolMaxSize;
|
||||
|
||||
/**
|
||||
* Creates a new ProcedureJobPool with the specified size parameters.
|
||||
*
|
||||
* @param corePoolSize
|
||||
* The minimum size of the job pool--will always have at least
|
||||
* this many Jobs ready to execute.
|
||||
* @param poolMaxSize
|
||||
* The maximum size of the job pool.
|
||||
* @param dataMgr
|
||||
* DataManager instance.
|
||||
*/
|
||||
public ProcedureJobPool(int corePoolSize, int poolMaxSize,
|
||||
DataManager dataMgr) {
|
||||
this.dataMgr = dataMgr;
|
||||
this.poolMaxSize = poolMaxSize;
|
||||
for (int i = 0; i < corePoolSize; i++) {
|
||||
Job job = new ProcedureJob(this.dataMgr);
|
||||
jobQueue.add(job);
|
||||
}
|
||||
this.jobList = new CopyOnWriteArrayList<Job>();
|
||||
}
|
||||
|
||||
/**
|
||||
* Enqueue the specified request into the job pool's request queue. Will be
|
||||
* worked by first available job. If calling from an existing thread in the
|
||||
* job pool, that thread will be reused to execute the request.
|
||||
*
|
||||
* @param request
|
||||
* ProcedureRequest containing information on procedure to
|
||||
* execute.
|
||||
*/
|
||||
public void schedule(ProcedureRequest request) {
|
||||
ProcedureJob reuseJob = null;
|
||||
|
||||
// do not schedule while canceling(cancel should be fast).
|
||||
synchronized (cancelLock) {
|
||||
if (cancel) {
|
||||
return;
|
||||
}
|
||||
// do not schedule while joining, join might be slow but the javaDoc
|
||||
// warns others.
|
||||
synchronized (joinLock) {
|
||||
boolean jobAvailable = false;
|
||||
Thread currentThread = Thread.currentThread();
|
||||
for (Job job : jobList) {
|
||||
Thread jobThread = job.getThread();
|
||||
ProcedureJob procJob = (ProcedureJob) job;
|
||||
if (currentThread == jobThread) {
|
||||
// this occurs when a running procedure uses
|
||||
// SmartScript.callProcedure()
|
||||
// for efficiency we want to just stay on this thread
|
||||
reuseJob = procJob;
|
||||
jobAvailable = true;
|
||||
break;
|
||||
} else if (!procJob.isRunning()) {
|
||||
jobAvailable = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (reuseJob == null) {
|
||||
if (!jobAvailable) {
|
||||
Job job = jobQueue.poll();
|
||||
if ((job == null) && (jobList.size() < poolMaxSize)) {
|
||||
job = new ProcedureJob(dataMgr);
|
||||
}
|
||||
if (job != null) {
|
||||
job.schedule();
|
||||
jobList.add(job);
|
||||
}
|
||||
}
|
||||
workQueue.offer(request);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (reuseJob != null) {
|
||||
reuseJob.processRequest(request);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Join on the Jobs in the pool. Attempting to schedule other Jobs will
|
||||
* block until join has returned so be careful when calling
|
||||
*/
|
||||
public void join() {
|
||||
synchronized (joinLock) {
|
||||
for (Job j : jobList) {
|
||||
try {
|
||||
j.join();
|
||||
} catch (InterruptedException e) {
|
||||
// Ignore interupt
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Cancel the job pool, will clear out the workQueue then join on all jobs
|
||||
* running. Once canceled all future calls to schedule will be ignored.
|
||||
*/
|
||||
public void cancel() {
|
||||
cancel(true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Cancel the job pool, will clear out the workQueue and optionally join
|
||||
* running jobs. Once canceled all future calls to schedule will be ignored.
|
||||
*
|
||||
* @param join
|
||||
* true if you want to join before returning.
|
||||
*/
|
||||
public void cancel(boolean join) {
|
||||
synchronized (cancelLock) {
|
||||
cancel = true;
|
||||
workQueue.clear();
|
||||
for (Job j : jobList) {
|
||||
j.cancel();
|
||||
}
|
||||
}
|
||||
if (join) {
|
||||
join();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Cancels the specified request. Returns true if the provided request was
|
||||
* waiting to be run but now is not. Returns false if the provided request
|
||||
* is already running or if it was not enqueued to begin with.
|
||||
*
|
||||
* @param request
|
||||
* The request to cancel.
|
||||
* @return True, if the request was in the queue. False, if it was already
|
||||
* being worked by the pool or if it was not in the queue.
|
||||
*/
|
||||
public boolean cancel(ProcedureRequest request) {
|
||||
return workQueue.remove(request);
|
||||
}
|
||||
|
||||
/**
|
||||
* A job pool is considered active if any of the jobs it contains are
|
||||
* servicing a request or there is still requests to be worked off in the
|
||||
* queue.
|
||||
*
|
||||
* @return If any jobs are working off a request or there are requests still
|
||||
* in the work queue.
|
||||
*/
|
||||
public boolean isActive() {
|
||||
if (!workQueue.isEmpty()) {
|
||||
return true;
|
||||
}
|
||||
for (Job job : jobList) {
|
||||
ProcedureJob procJob = (ProcedureJob) job;
|
||||
if (procJob.isRunning()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number requests remaining in the queue and the number of jobs in
|
||||
* the pool currently working off a request.
|
||||
*
|
||||
* @return The number requests remaining in the queue and the number of jobs
|
||||
* in the pool currently working off a request.
|
||||
*/
|
||||
public int[] getWorkRemaining() {
|
||||
int jobsRunning = 0;
|
||||
for (Job job : jobList) {
|
||||
ProcedureJob procJob = (ProcedureJob) job;
|
||||
if (procJob.isRunning()) {
|
||||
jobsRunning++;
|
||||
}
|
||||
}
|
||||
|
||||
return new int[] { jobsRunning, workQueue.size() };
|
||||
}
|
||||
|
||||
protected class ProcedureJob extends Job {
|
||||
|
||||
private final IUFStatusHandler statusHandler = UFStatus
|
||||
.getHandler(ProcedureJob.class);
|
||||
|
||||
private ProcedureController python;
|
||||
|
||||
private final DataManager dataMgr;
|
||||
|
||||
private volatile boolean running;
|
||||
|
||||
public ProcedureJob(DataManager dataMgr) {
|
||||
super("GFE Procedures Job");
|
||||
this.dataMgr = dataMgr;
|
||||
this.running = false;
|
||||
setSystem(true);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IStatus run(IProgressMonitor monitor) {
|
||||
try {
|
||||
python = ProcedureFactory.buildController(dataMgr);
|
||||
} catch (JepException e) {
|
||||
jobList.remove(this);
|
||||
statusHandler.error("Error initializing procedure python", e);
|
||||
return new Status(IStatus.ERROR, StatusConstants.PLUGIN_ID,
|
||||
"Error initializing procedure python", e);
|
||||
}
|
||||
|
||||
IStatus statusCode = Status.OK_STATUS;
|
||||
try {
|
||||
while (!monitor.isCanceled()) {
|
||||
try {
|
||||
ProcedureRequest request = null;
|
||||
try {
|
||||
request = workQueue.poll(
|
||||
TimeUtil.MILLIS_PER_SECOND,
|
||||
TimeUnit.MILLISECONDS);
|
||||
} catch (InterruptedException e) {
|
||||
statusCode = Status.CANCEL_STATUS;
|
||||
break;
|
||||
}
|
||||
|
||||
if (monitor.isCanceled()) {
|
||||
statusCode = Status.CANCEL_STATUS;
|
||||
break;
|
||||
}
|
||||
|
||||
if (request != null) {
|
||||
running = true;
|
||||
|
||||
python.processFileUpdates();
|
||||
if (monitor.isCanceled()) {
|
||||
statusCode = Status.CANCEL_STATUS;
|
||||
break;
|
||||
}
|
||||
|
||||
processRequest(request);
|
||||
running = false;
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
statusHandler.error(
|
||||
"Unhandled exception in ProcedureJob.", t);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
if (python != null) {
|
||||
python.dispose();
|
||||
python = null;
|
||||
}
|
||||
}
|
||||
|
||||
return statusCode;
|
||||
}
|
||||
|
||||
protected void processRequest(ProcedureRequest request) {
|
||||
Object retVal = null;
|
||||
try {
|
||||
execute(python, request);
|
||||
retVal = null;
|
||||
} catch (Throwable t) {
|
||||
statusHandler
|
||||
.handle(Priority.PROBLEM, "Error running procedure "
|
||||
+ request.getProcedureName(), t);
|
||||
retVal = t;
|
||||
} finally {
|
||||
dataMgr.getEditActionProcessor().wrapUpExecute(
|
||||
request.getPreview(), false);
|
||||
request.requestComplete(retVal);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a procedure
|
||||
*
|
||||
* @param procedureName
|
||||
* the name of the procedure
|
||||
* @param request
|
||||
* the request containing data on the procedure to run.
|
||||
* @throws Exception
|
||||
* @throws JepException
|
||||
*/
|
||||
private void execute(ProcedureController controller,
|
||||
ProcedureRequest request) throws Exception, JepException {
|
||||
String procedureName = request.getProcedureName();
|
||||
Job progressJob = new AsyncProgressJob(procedureName, this);
|
||||
IStatus pjStatus = Status.CANCEL_STATUS;
|
||||
progressJob.schedule();
|
||||
|
||||
try {
|
||||
List<String> argNames = controller.getMethodArguments(
|
||||
procedureName, "execute");
|
||||
Map<String, Object> argMap = getArgValues(argNames,
|
||||
request.getRefSet(), request.getTimeRange());
|
||||
controller.setVarDict(request.getVarDict());
|
||||
controller.executeProcedure(procedureName, argMap);
|
||||
pjStatus = Status.OK_STATUS;
|
||||
} catch (Exception e) {
|
||||
pjStatus = new Status(IStatus.WARNING, Activator.PLUGIN_ID,
|
||||
"Error in procedure " + procedureName, e);
|
||||
throw e;
|
||||
} catch (JepException e) {
|
||||
pjStatus = new Status(IStatus.WARNING, Activator.PLUGIN_ID,
|
||||
"Error in procedure " + procedureName, e);
|
||||
throw e;
|
||||
} finally {
|
||||
controller.garbageCollect();
|
||||
progressJob.done(pjStatus);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Maps a procedure's execute's argument name to an object
|
||||
*
|
||||
* @param args
|
||||
* the name of the objects
|
||||
* @param refSet
|
||||
* the edit area to run the procedure on
|
||||
* @param timeRange
|
||||
* the time range to run the procedure on
|
||||
* @return a map of argument names to objects
|
||||
* @throws GFEException
|
||||
*/
|
||||
private Map<String, Object> getArgValues(List<String> args,
|
||||
ReferenceData refSet, TimeRange timeRange) throws GFEException {
|
||||
Map<String, Object> argValueMap = new HashMap<String, Object>();
|
||||
// For each argument in args, append a value to the argValueList
|
||||
for (String arg : args) {
|
||||
if (arg.equals("varDict")) {
|
||||
argValueMap.put("varDict", null);
|
||||
} else if (arg.equals("editArea")) {
|
||||
argValueMap.put("editArea", refSet);
|
||||
} else if (arg.equals("timeRange")) {
|
||||
argValueMap.put("timeRange", timeRange);
|
||||
} else if (arg.equals("self")) {
|
||||
// skip
|
||||
} else {
|
||||
throw new GFEException("Unknown argument " + arg);
|
||||
}
|
||||
|
||||
}
|
||||
return argValueMap;
|
||||
}
|
||||
|
||||
public boolean isRunning() {
|
||||
return running;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -36,7 +36,8 @@ import com.raytheon.viz.gfe.ui.runtimeui.SelectionDlg;
|
|||
* SOFTWARE HISTORY
|
||||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* Feb 9, 2010 njensen Initial creation
|
||||
* Feb 09, 2010 njensen Initial creation
|
||||
* Dec 09, 2013 #2367 dgilling Use new ProcedureJobPool.
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -67,8 +68,7 @@ public class ProcedureSelectionDlg extends SelectionDlg {
|
|||
.transformVarDict(getValues());
|
||||
req.setVarDict(varDict);
|
||||
req.setPreview(pi);
|
||||
// ProcedureJob.getInstance(dataMgr).enqueue(req);
|
||||
ProcedureJob.enqueue(dataMgr, req);
|
||||
dataMgr.getProcedureJobPool().schedule(req);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -44,8 +44,9 @@ import com.raytheon.viz.gfe.smarttool.PreviewInfo;
|
|||
* SOFTWARE HISTORY
|
||||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* Feb 9, 2010 njensen Initial creation
|
||||
* 4/26/2012 14748 ryu Use edit area and time range from preview info
|
||||
* Feb 09, 2010 njensen Initial creation
|
||||
* Apr 26, 2012 14748 ryu Use edit area and time range from preview info
|
||||
* Dec 09, 2013 #2367 dgilling Use new ProcedureJobPool.
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -123,7 +124,7 @@ public class ProcedureUtil {
|
|||
});
|
||||
}
|
||||
|
||||
ProcedureJob.enqueue(dm, req);
|
||||
dm.getProcedureJobPool().schedule(req);
|
||||
return req.getResult();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -154,7 +154,7 @@ import com.vividsolutions.jts.geom.Envelope;
|
|||
*
|
||||
* SOFTWARE HISTORY
|
||||
*
|
||||
* Date Ticket# Engineer Description
|
||||
* Date Ticket# Engineer Description
|
||||
* ------------- -------- ----------- --------------------------
|
||||
* Mar 01, 2008 chammack Initial Creation.
|
||||
* Aug 20, 2008 dglazesk Update for the ColorMap interface change
|
||||
|
@ -165,6 +165,7 @@ import com.vividsolutions.jts.geom.Envelope;
|
|||
* Aug 27, 2013 2287 randerso Fixed scaling and direction of wind arrows
|
||||
* Sep 23, 2013 2363 bsteffen Add more vector configuration options.
|
||||
* Oct 31, 2013 2508 randerso Change to use DiscreteGridSlice.getKeys()
|
||||
* Dec 11, 2013 2621 randerso Removed conditional from getParm so it never returns null
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -354,11 +355,7 @@ public class GFEResource extends
|
|||
* @return Returns the parm associated with the GFE Resource
|
||||
*/
|
||||
public Parm getParm() {
|
||||
Parm retVal = null;
|
||||
if (this.getStatus() != ResourceStatus.DISPOSED) {
|
||||
retVal = this.parm;
|
||||
}
|
||||
return retVal;
|
||||
return this.parm;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -36,14 +36,13 @@ import com.raytheon.viz.gfe.core.DataManager;
|
|||
import com.raytheon.viz.gfe.core.parm.Parm;
|
||||
import com.raytheon.viz.gfe.smartscript.FieldDefinition;
|
||||
import com.raytheon.viz.gfe.smarttool.script.SmartToolBlockingSelectionDlg;
|
||||
import com.raytheon.viz.gfe.smarttool.script.SmartToolJob;
|
||||
import com.raytheon.viz.gfe.smarttool.script.SmartToolRequest;
|
||||
import com.raytheon.viz.gfe.smarttool.script.SmartToolSelectionDlg;
|
||||
import com.raytheon.viz.gfe.ui.runtimeui.SelectionDlg;
|
||||
|
||||
/**
|
||||
* Utilities for smart tools
|
||||
*
|
||||
*
|
||||
* <pre>
|
||||
* SOFTWARE HISTORY
|
||||
* Date Ticket# Engineer Description
|
||||
|
@ -52,9 +51,10 @@ import com.raytheon.viz.gfe.ui.runtimeui.SelectionDlg;
|
|||
* Dec 1, 2009 1426 ryu Add time range warning
|
||||
* Nov 15, 2012 1298 rferrel Changes for non-blocking prcedures.
|
||||
* Jun 25, 2013 16065 ryu Passing outerLevel to smart tool job.
|
||||
*
|
||||
* Dec 10, 2013 #2367 dgilling Use new SmartToolJobPool.
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
*
|
||||
* @author njensen
|
||||
* @version 1.0
|
||||
*/
|
||||
|
@ -67,7 +67,7 @@ public class SmartUtil {
|
|||
* Checks if LD_PRELOAD is set in the environment. If not, jep may have
|
||||
* issues importing modules. (Note that this presumes LD_PRELOAD was set
|
||||
* correctly to point at the python .so file).
|
||||
*
|
||||
*
|
||||
* @return if LD_PRELOAD is set
|
||||
*/
|
||||
public static boolean isLdPreloadSet() {
|
||||
|
@ -118,7 +118,7 @@ public class SmartUtil {
|
|||
if (pi != null) {
|
||||
SmartToolRequest req = buildSmartToolRequest(dm, pi, true);
|
||||
if (req != null) {
|
||||
SmartToolJob.enqueue(dm, req);
|
||||
dm.getSmartToolJobPool().schedule(req);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -145,8 +145,8 @@ public class SmartUtil {
|
|||
timeRange, editArea, emptyEditAreaFlag,
|
||||
MissingDataMode.valueFrom(missingDataMode));
|
||||
PreviewInfo pi = new PreviewInfo(editAction, passErrors, parm);
|
||||
final SmartToolRequest req = SmartUtil.
|
||||
buildSmartToolRequest(dm, pi, false);
|
||||
final SmartToolRequest req = SmartUtil.buildSmartToolRequest(dm, pi,
|
||||
false);
|
||||
|
||||
if (varDict != null) {
|
||||
req.setVarDict(varDict);
|
||||
|
@ -195,7 +195,7 @@ public class SmartUtil {
|
|||
});
|
||||
}
|
||||
|
||||
SmartToolJob.enqueue(dm, req);
|
||||
dm.getSmartToolJobPool().schedule(req);
|
||||
return req.getResult();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,378 +0,0 @@
|
|||
/**
|
||||
* This software was developed and / or modified by Raytheon Company,
|
||||
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
|
||||
*
|
||||
* U.S. EXPORT CONTROLLED TECHNICAL DATA
|
||||
* This software product contains export-restricted data whose
|
||||
* export/transfer/disclosure is restricted by U.S. law. Dissemination
|
||||
* to non-U.S. persons whether in the United States or abroad requires
|
||||
* an export license or other authorization.
|
||||
*
|
||||
* Contractor Name: Raytheon Company
|
||||
* Contractor Address: 6825 Pine Street, Suite 340
|
||||
* Mail Stop B8
|
||||
* Omaha, NE 68106
|
||||
* 402.291.0100
|
||||
*
|
||||
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
|
||||
* further licensing information.
|
||||
**/
|
||||
package com.raytheon.viz.gfe.smarttool.script;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import jep.JepException;
|
||||
|
||||
import org.eclipse.core.runtime.IProgressMonitor;
|
||||
import org.eclipse.core.runtime.IStatus;
|
||||
import org.eclipse.core.runtime.Status;
|
||||
import org.eclipse.core.runtime.jobs.Job;
|
||||
|
||||
import com.raytheon.uf.common.dataplugin.gfe.StatusConstants;
|
||||
import com.raytheon.uf.common.status.IUFStatusHandler;
|
||||
import com.raytheon.uf.common.status.UFStatus;
|
||||
import com.raytheon.uf.common.status.UFStatus.Priority;
|
||||
import com.raytheon.uf.viz.core.jobs.AbstractQueueJob;
|
||||
import com.raytheon.viz.gfe.Activator;
|
||||
import com.raytheon.viz.gfe.core.DataManager;
|
||||
import com.raytheon.viz.gfe.jobs.AsyncProgressJob;
|
||||
import com.raytheon.viz.gfe.smarttool.EditAction;
|
||||
import com.raytheon.viz.gfe.smarttool.SmartToolException;
|
||||
import com.raytheon.viz.gfe.smarttool.Tool;
|
||||
|
||||
/**
|
||||
* Job for running smart tools off the UI thread
|
||||
*
|
||||
* <pre>
|
||||
*
|
||||
* SOFTWARE HISTORY
|
||||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* Jan 19, 2010 njensen Initial creation
|
||||
* Jan 18, 2013 1509 njensen Garbage collect after running tool
|
||||
* Apr 03, 2013 1855 njensen Never dispose interpreters until shutdown
|
||||
* Jun 25, 2013 16065 ryu Clear undo parms list before tool execution
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
* @author njensen
|
||||
* @version 1.0
|
||||
*/
|
||||
|
||||
public class SmartToolJob extends AbstractQueueJob<SmartToolRequest> {
|
||||
|
||||
/**
|
||||
* Maximum number of jobs to keep for a given Data Manager.
|
||||
*/
|
||||
private final static int maxJobs = 3;
|
||||
|
||||
/**
|
||||
* Index of job with the queue. Will break code if not zero.
|
||||
*/
|
||||
private final static int QUEUE_JOB_INDEX = 0;
|
||||
|
||||
private static final transient IUFStatusHandler statusHandler = UFStatus
|
||||
.getHandler(SmartToolJob.class);
|
||||
|
||||
private static Map<DataManager, List<SmartToolJob>> instanceMap = null;
|
||||
|
||||
private DataManager dataMgr;
|
||||
|
||||
/**
|
||||
* The request being processed.
|
||||
*/
|
||||
private SmartToolRequest request = null;
|
||||
|
||||
protected SmartToolJob(DataManager dataMgr) {
|
||||
super("GFE Smart Tool Job");
|
||||
this.dataMgr = dataMgr;
|
||||
}
|
||||
|
||||
private void getRequest() throws InterruptedException {
|
||||
if (instanceMap == null) {
|
||||
request = null;
|
||||
return;
|
||||
}
|
||||
|
||||
List<SmartToolJob> jobList = instanceMap.get(dataMgr);
|
||||
if (jobList == null || jobList.size() == 0
|
||||
|| jobList.get(QUEUE_JOB_INDEX).queue == null) {
|
||||
request = null;
|
||||
} else {
|
||||
request = jobList.get(QUEUE_JOB_INDEX).queue.poll(1000L,
|
||||
TimeUnit.MILLISECONDS);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IStatus run(IProgressMonitor monitor) {
|
||||
SmartToolController python = null;
|
||||
try {
|
||||
python = SmartToolFactory.buildController(dataMgr);
|
||||
} catch (JepException e) {
|
||||
SmartToolJob.removeJob(dataMgr, this);
|
||||
return new Status(IStatus.ERROR, StatusConstants.PLUGIN_ID,
|
||||
"Error initializing smart tool python", e);
|
||||
}
|
||||
|
||||
try {
|
||||
// Used req to wrap up request after leaving the synchronized
|
||||
// region.
|
||||
SmartToolRequest req = null;
|
||||
while (monitor.isCanceled() == false) {
|
||||
try {
|
||||
getRequest();
|
||||
|
||||
// May have been canceled while waiting.
|
||||
if (monitor.isCanceled()) {
|
||||
break;
|
||||
}
|
||||
|
||||
synchronized (this) {
|
||||
if (request != null) {
|
||||
python.processFileUpdates();
|
||||
EditAction ea = request.getPreview()
|
||||
.getEditAction();
|
||||
Job progressJob = new AsyncProgressJob(
|
||||
ea.getItemName(), this);
|
||||
progressJob.schedule();
|
||||
IStatus pjResult = Status.CANCEL_STATUS;
|
||||
try {
|
||||
if (request.getOuterLevel()) {
|
||||
dataMgr.getParmOp().clearUndoParmList();
|
||||
}
|
||||
Tool tool = new Tool(dataMgr.getParmManager(),
|
||||
request.getPreview().getParm(),
|
||||
ea.getItemName(), python);
|
||||
tool.execute(ea.getItemName(), request
|
||||
.getPreview().getParm(),
|
||||
ea.getRefSet(), ea.getTimeRange(),
|
||||
request.getVarDict(), ea
|
||||
.getMissingDataMode(), monitor);
|
||||
request.requestComplete(null);
|
||||
pjResult = Status.OK_STATUS;
|
||||
|
||||
} catch (SmartToolException e) {
|
||||
pjResult = new Status(IStatus.WARNING,
|
||||
Activator.PLUGIN_ID,
|
||||
"Error in smart tool", e);
|
||||
throw e;
|
||||
} finally {
|
||||
python.garbageCollect();
|
||||
progressJob.done(pjResult);
|
||||
req = request;
|
||||
request = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
statusHandler.handle(Priority.PROBLEM,
|
||||
"Smart tool thread interrupted", e);
|
||||
break;
|
||||
} catch (SmartToolException e) {
|
||||
statusHandler.handle(Priority.PROBLEM,
|
||||
"Error running tool ", e);
|
||||
if (req != null) {
|
||||
req.requestComplete(e);
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
statusHandler.handle(Priority.PROBLEM,
|
||||
"Error running tool ", t);
|
||||
if (req != null) {
|
||||
req.requestComplete(t);
|
||||
}
|
||||
} finally {
|
||||
if (req != null && req.getPreview() != null) {
|
||||
this.dataMgr.getEditActionProcessor().wrapUpExecute(
|
||||
req.getPreview(), true);
|
||||
}
|
||||
req = null;
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
System.err.println("Shutdown instance of SmartToolJob");
|
||||
if (python != null) {
|
||||
python.dispose();
|
||||
python = null;
|
||||
}
|
||||
}
|
||||
|
||||
return Status.OK_STATUS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a job from the Data Manger's job list.
|
||||
*
|
||||
* @param dataMgr
|
||||
* - The job's data manager
|
||||
* @param job
|
||||
* - The job to remove
|
||||
*/
|
||||
private static synchronized void removeJob(DataManager dataMgr,
|
||||
SmartToolJob job) {
|
||||
if (instanceMap == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
List<SmartToolJob> jobList = instanceMap.get(dataMgr);
|
||||
|
||||
if (jobList != null) {
|
||||
jobList.remove(job);
|
||||
|
||||
// Removing job with queue remove job list so next request will set
|
||||
// up new queue.
|
||||
if (job.queue != null) {
|
||||
jobList.clear();
|
||||
instanceMap.remove(dataMgr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This manages the scheduling of jobs to service a Data Manger's requests.
|
||||
*
|
||||
* @param dataMgr
|
||||
* - Data Manger for the request
|
||||
* @param request
|
||||
* - The request to service
|
||||
* @return state - true when job available to process request otherwise
|
||||
* false and request is queued to wait for next available job
|
||||
*/
|
||||
public static synchronized boolean enqueue(DataManager dataMgr,
|
||||
SmartToolRequest request) {
|
||||
if (instanceMap == null) {
|
||||
instanceMap = new HashMap<DataManager, List<SmartToolJob>>();
|
||||
}
|
||||
|
||||
List<SmartToolJob> jobList = instanceMap.get(dataMgr);
|
||||
if (jobList == null) {
|
||||
jobList = new ArrayList<SmartToolJob>();
|
||||
// Add the first job which contains the queue used by all jobs in
|
||||
// the list.
|
||||
SmartToolJob job = new SmartToolJob(dataMgr);
|
||||
jobList.add(job);
|
||||
instanceMap.put(dataMgr, jobList);
|
||||
job.setSystem(true);
|
||||
job.schedule();
|
||||
}
|
||||
boolean jobAvailable = false;
|
||||
for (SmartToolJob job : jobList) {
|
||||
if (job.request == null) {
|
||||
jobAvailable = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// All jobs for data manager are busy, add another if we haven't reached
|
||||
// the limit
|
||||
if (!jobAvailable && jobList.size() < maxJobs) {
|
||||
SmartToolJob job = new SmartToolJob(dataMgr);
|
||||
job.setSystem(true);
|
||||
jobList.add(job);
|
||||
// Never used additional job's queue
|
||||
job.queue = null;
|
||||
job.schedule();
|
||||
jobAvailable = true;
|
||||
}
|
||||
|
||||
jobList.get(QUEUE_JOB_INDEX).enqueue(request);
|
||||
return jobAvailable;
|
||||
}
|
||||
|
||||
/**
|
||||
* This returns an array of two integers the first is the number of Smart
|
||||
* Tool Jobs being processed and the second is the number in the queue
|
||||
* waiting to be processed.
|
||||
*
|
||||
* @return cnts
|
||||
*/
|
||||
public static int[] getJobCount() {
|
||||
int[] cnt = new int[] { 0, 0 };
|
||||
if (instanceMap != null) {
|
||||
for (List<SmartToolJob> jobList : instanceMap.values()) {
|
||||
cnt[1] += jobList.get(QUEUE_JOB_INDEX).queue.size();
|
||||
for (SmartToolJob job : jobList) {
|
||||
if (job.request != null) {
|
||||
++cnt[0];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine if there are any Smart Tool Jobs queued and/or being processed.
|
||||
*
|
||||
* @return true when there are job(s)s queued or being processed otherwise
|
||||
* false
|
||||
*/
|
||||
public static boolean haveJobs() {
|
||||
boolean result = false;
|
||||
|
||||
if (instanceMap != null) {
|
||||
for (List<SmartToolJob> jobList : instanceMap.values()) {
|
||||
// Any pending requests.
|
||||
if (jobList.get(QUEUE_JOB_INDEX).queue.size() > 0) {
|
||||
result = true;
|
||||
break;
|
||||
}
|
||||
|
||||
// Any requests being processed.
|
||||
for (SmartToolJob job : jobList) {
|
||||
if (job.request != null) {
|
||||
result = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* This terminates all the Data Managers' jobs.
|
||||
*/
|
||||
public static synchronized void shutdown() {
|
||||
// TODO This currently joins with a job waiting for it to finish which
|
||||
// can take a long time and may even be waiting for user input. Must
|
||||
// find a wait to kill any GUI associated with a request and if python
|
||||
// running a way to terminate it so no waiting is involved.
|
||||
if (instanceMap != null) {
|
||||
for (List<SmartToolJob> jobList : instanceMap.values()) {
|
||||
jobList.get(QUEUE_JOB_INDEX).queue.clear();
|
||||
|
||||
// Do in reverse order so last job cancel is the one with the
|
||||
// queue.
|
||||
for (int index = jobList.size() - 1; index >= 0; --index) {
|
||||
jobList.get(index).cancel();
|
||||
}
|
||||
}
|
||||
|
||||
for (List<SmartToolJob> jobList : instanceMap.values()) {
|
||||
for (SmartToolJob job : jobList) {
|
||||
synchronized (job) {
|
||||
try {
|
||||
if (job.getState() != Job.NONE) {
|
||||
job.join();
|
||||
}
|
||||
} catch (InterruptedException ex) {
|
||||
// System.err.println("here SmartToolJob");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (List<SmartToolJob> jobList : instanceMap.values()) {
|
||||
jobList.clear();
|
||||
}
|
||||
|
||||
instanceMap.clear();
|
||||
instanceMap = null;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,377 @@
|
|||
/**
|
||||
* This software was developed and / or modified by Raytheon Company,
|
||||
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
|
||||
*
|
||||
* U.S. EXPORT CONTROLLED TECHNICAL DATA
|
||||
* This software product contains export-restricted data whose
|
||||
* export/transfer/disclosure is restricted by U.S. law. Dissemination
|
||||
* to non-U.S. persons whether in the United States or abroad requires
|
||||
* an export license or other authorization.
|
||||
*
|
||||
* Contractor Name: Raytheon Company
|
||||
* Contractor Address: 6825 Pine Street, Suite 340
|
||||
* Mail Stop B8
|
||||
* Omaha, NE 68106
|
||||
* 402.291.0100
|
||||
*
|
||||
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
|
||||
* further licensing information.
|
||||
**/
|
||||
package com.raytheon.viz.gfe.smarttool.script;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import jep.JepException;
|
||||
|
||||
import org.eclipse.core.runtime.IProgressMonitor;
|
||||
import org.eclipse.core.runtime.IStatus;
|
||||
import org.eclipse.core.runtime.Status;
|
||||
import org.eclipse.core.runtime.jobs.Job;
|
||||
|
||||
import com.raytheon.uf.common.dataplugin.gfe.StatusConstants;
|
||||
import com.raytheon.uf.common.status.IUFStatusHandler;
|
||||
import com.raytheon.uf.common.status.UFStatus;
|
||||
import com.raytheon.uf.common.time.util.TimeUtil;
|
||||
import com.raytheon.viz.gfe.Activator;
|
||||
import com.raytheon.viz.gfe.core.DataManager;
|
||||
import com.raytheon.viz.gfe.jobs.AsyncProgressJob;
|
||||
import com.raytheon.viz.gfe.smarttool.EditAction;
|
||||
import com.raytheon.viz.gfe.smarttool.SmartToolException;
|
||||
import com.raytheon.viz.gfe.smarttool.Tool;
|
||||
|
||||
/**
|
||||
* Job pool for running smart tools off the UI thread.
|
||||
*
|
||||
* <pre>
|
||||
*
|
||||
* SOFTWARE HISTORY
|
||||
*
|
||||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* Dec 09, 2013 #2367 dgilling Initial creation
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
* @author dgilling
|
||||
* @version 1.0
|
||||
*/
|
||||
|
||||
public class SmartToolJobPool {
|
||||
|
||||
protected LinkedBlockingQueue<SmartToolRequest> workQueue = new LinkedBlockingQueue<SmartToolRequest>();
|
||||
|
||||
protected LinkedBlockingQueue<Job> jobQueue = new LinkedBlockingQueue<Job>();
|
||||
|
||||
protected List<Job> jobList;
|
||||
|
||||
protected boolean cancel = false;
|
||||
|
||||
protected Object cancelLock = new Object();
|
||||
|
||||
protected Object joinLock = new Object();
|
||||
|
||||
private final DataManager dataMgr;
|
||||
|
||||
private final int poolMaxSize;
|
||||
|
||||
/**
|
||||
* Creates a new SmartToolJobPool with the specified size parameters.
|
||||
*
|
||||
* @param corePoolSize
|
||||
* The minimum size of the job pool--will always have at least
|
||||
* this many Jobs ready to execute.
|
||||
* @param poolMaxSize
|
||||
* The maximum size of the job pool.
|
||||
* @param dataMgr
|
||||
* DataManager instance.
|
||||
*/
|
||||
public SmartToolJobPool(int corePoolSize, int poolMaxSize,
|
||||
DataManager dataMgr) {
|
||||
this.dataMgr = dataMgr;
|
||||
this.poolMaxSize = poolMaxSize;
|
||||
for (int i = 0; i < corePoolSize; i++) {
|
||||
Job job = new SmartToolJob(this.dataMgr);
|
||||
jobQueue.add(job);
|
||||
}
|
||||
this.jobList = new CopyOnWriteArrayList<Job>();
|
||||
}
|
||||
|
||||
/**
|
||||
* Enqueue the specified request into the job pool's request queue. Will be
|
||||
* worked by first available job.
|
||||
*
|
||||
* @param request
|
||||
* SmartToolRequest containing information on procedure to
|
||||
* execute.
|
||||
*/
|
||||
public void schedule(SmartToolRequest request) {
|
||||
// do not schedule while canceling(cancel should be fast).
|
||||
synchronized (cancelLock) {
|
||||
if (cancel) {
|
||||
return;
|
||||
}
|
||||
// do not schedule while joining, join might be slow but the javaDoc
|
||||
// warns others.
|
||||
synchronized (joinLock) {
|
||||
if (!isJobAvailable()) {
|
||||
Job job = jobQueue.poll();
|
||||
if ((job == null) && (jobList.size() < poolMaxSize)) {
|
||||
job = new SmartToolJob(dataMgr);
|
||||
}
|
||||
if (job != null) {
|
||||
job.schedule();
|
||||
jobList.add(job);
|
||||
}
|
||||
}
|
||||
workQueue.offer(request);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private boolean isJobAvailable() {
|
||||
for (Job job : jobList) {
|
||||
SmartToolJob toolJob = (SmartToolJob) job;
|
||||
if (!toolJob.isRunning()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Join on the Jobs in the pool. Attempting to schedule other Jobs will
|
||||
* block until join has returned so be careful when calling
|
||||
*/
|
||||
public void join() {
|
||||
synchronized (joinLock) {
|
||||
for (Job j : jobList) {
|
||||
try {
|
||||
j.join();
|
||||
} catch (InterruptedException e) {
|
||||
// Ignore interupt
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Cancel the job pool, will clear out the workQueue then join on all jobs
|
||||
* running. Once canceled all future calls to schedule will be ignored.
|
||||
*/
|
||||
public void cancel() {
|
||||
cancel(true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Cancel the job pool, will clear out the workQueue and optionally join
|
||||
* running jobs. Once canceled all future calls to schedule will be ignored.
|
||||
*
|
||||
* @param join
|
||||
* true if you want to join before returning.
|
||||
*/
|
||||
public void cancel(boolean join) {
|
||||
synchronized (cancelLock) {
|
||||
cancel = true;
|
||||
workQueue.clear();
|
||||
for (Job j : jobList) {
|
||||
j.cancel();
|
||||
}
|
||||
}
|
||||
if (join) {
|
||||
join();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Cancels the specified request. Returns true if the provided request was
|
||||
* waiting to be run but now is not. Returns false if the provided request
|
||||
* is already running or if it was not enqueued to begin with.
|
||||
*
|
||||
* @param request
|
||||
* The request to cancel.
|
||||
* @return True, if the request was in the queue. False, if it was already
|
||||
* being worked by the pool or if it was not in the queue.
|
||||
*/
|
||||
public boolean cancel(SmartToolRequest request) {
|
||||
return workQueue.remove(request);
|
||||
}
|
||||
|
||||
/**
|
||||
* A job pool is considered active if any of the jobs it contains are
|
||||
* servicing a request or there is still requests to be worked off in the
|
||||
* queue.
|
||||
*
|
||||
* @return If any jobs are working off a request or there are requests still
|
||||
* in the work queue.
|
||||
*/
|
||||
public boolean isActive() {
|
||||
if (!workQueue.isEmpty()) {
|
||||
return true;
|
||||
}
|
||||
for (Job job : jobList) {
|
||||
SmartToolJob toolJob = (SmartToolJob) job;
|
||||
if (toolJob.isRunning()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number requests remaining in the queue and the number of jobs in
|
||||
* the pool currently working off a request.
|
||||
*
|
||||
* @return The number requests remaining in the queue and the number of jobs
|
||||
* in the pool currently working off a request.
|
||||
*/
|
||||
public int[] getWorkRemaining() {
|
||||
int jobsRunning = 0;
|
||||
for (Job job : jobList) {
|
||||
SmartToolJob toolJob = (SmartToolJob) job;
|
||||
if (toolJob.isRunning()) {
|
||||
jobsRunning++;
|
||||
}
|
||||
}
|
||||
|
||||
return new int[] { jobsRunning, workQueue.size() };
|
||||
}
|
||||
|
||||
protected class SmartToolJob extends Job {
|
||||
|
||||
private final IUFStatusHandler statusHandler = UFStatus
|
||||
.getHandler(SmartToolJob.class);
|
||||
|
||||
private SmartToolController python;
|
||||
|
||||
private final DataManager dataMgr;
|
||||
|
||||
private volatile boolean running;
|
||||
|
||||
public SmartToolJob(DataManager dataMgr) {
|
||||
super("GFE Smart Tool Job");
|
||||
this.dataMgr = dataMgr;
|
||||
this.running = false;
|
||||
setSystem(true);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IStatus run(IProgressMonitor monitor) {
|
||||
try {
|
||||
python = SmartToolFactory.buildController(dataMgr);
|
||||
} catch (JepException e) {
|
||||
jobList.remove(this);
|
||||
statusHandler.error("Error initializing procedure python", e);
|
||||
return new Status(IStatus.ERROR, StatusConstants.PLUGIN_ID,
|
||||
"Error initializing procedure python", e);
|
||||
}
|
||||
|
||||
IStatus statusCode = Status.OK_STATUS;
|
||||
try {
|
||||
while (!monitor.isCanceled()) {
|
||||
try {
|
||||
SmartToolRequest request = null;
|
||||
try {
|
||||
request = workQueue.poll(
|
||||
TimeUtil.MILLIS_PER_SECOND,
|
||||
TimeUnit.MILLISECONDS);
|
||||
} catch (InterruptedException e) {
|
||||
statusCode = Status.CANCEL_STATUS;
|
||||
break;
|
||||
}
|
||||
|
||||
if (monitor.isCanceled()) {
|
||||
statusCode = Status.CANCEL_STATUS;
|
||||
break;
|
||||
}
|
||||
|
||||
if (request != null) {
|
||||
running = true;
|
||||
|
||||
python.processFileUpdates();
|
||||
if (monitor.isCanceled()) {
|
||||
statusCode = Status.CANCEL_STATUS;
|
||||
break;
|
||||
}
|
||||
|
||||
Object retVal = null;
|
||||
try {
|
||||
execute(python, request, monitor);
|
||||
retVal = null;
|
||||
} catch (Throwable t) {
|
||||
String toolName = request.getPreview()
|
||||
.getEditAction().getItemName();
|
||||
statusHandler.error("Error running smart tool "
|
||||
+ toolName, t);
|
||||
retVal = t;
|
||||
} finally {
|
||||
if (request.getPreview() != null) {
|
||||
dataMgr.getEditActionProcessor()
|
||||
.wrapUpExecute(
|
||||
request.getPreview(), true);
|
||||
}
|
||||
request.requestComplete(retVal);
|
||||
running = false;
|
||||
}
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
statusHandler.error(
|
||||
"Unhandled exception in SmartToolJob.", t);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
if (python != null) {
|
||||
python.dispose();
|
||||
python = null;
|
||||
}
|
||||
}
|
||||
|
||||
return statusCode;
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a smart tool.
|
||||
*
|
||||
* @param controller
|
||||
* @param request
|
||||
* @param monitor
|
||||
* @throws SmartToolException
|
||||
*/
|
||||
private void execute(SmartToolController controller,
|
||||
SmartToolRequest request, IProgressMonitor monitor)
|
||||
throws SmartToolException {
|
||||
EditAction ea = request.getPreview().getEditAction();
|
||||
String toolName = ea.getItemName();
|
||||
|
||||
Job progressJob = new AsyncProgressJob(toolName, this);
|
||||
progressJob.schedule();
|
||||
IStatus pjStatus = Status.CANCEL_STATUS;
|
||||
|
||||
try {
|
||||
if (request.getOuterLevel()) {
|
||||
dataMgr.getParmOp().clearUndoParmList();
|
||||
}
|
||||
Tool tool = new Tool(dataMgr.getParmManager(), request
|
||||
.getPreview().getParm(), ea.getItemName(), python);
|
||||
tool.execute(ea.getItemName(), request.getPreview().getParm(),
|
||||
ea.getRefSet(), ea.getTimeRange(),
|
||||
request.getVarDict(), ea.getMissingDataMode(), monitor);
|
||||
pjStatus = Status.OK_STATUS;
|
||||
} catch (SmartToolException e) {
|
||||
pjStatus = new Status(IStatus.WARNING, Activator.PLUGIN_ID,
|
||||
"Error in smart tool " + toolName, e);
|
||||
throw e;
|
||||
} finally {
|
||||
controller.garbageCollect();
|
||||
progressJob.done(pjStatus);
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isRunning() {
|
||||
return running;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -31,17 +31,18 @@ import com.raytheon.viz.gfe.ui.runtimeui.SelectionDlg;
|
|||
|
||||
/**
|
||||
* Dynamic GUI for showing smart tools' Variable Lists and running the tools
|
||||
*
|
||||
*
|
||||
* <pre>
|
||||
*
|
||||
*
|
||||
* SOFTWARE HISTORY
|
||||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* Feb 9, 2010 njensen Initial creation
|
||||
* Jun 25, 2013 16065 ryu Passing outerLevel to tool job
|
||||
*
|
||||
* Dec 10, 2013 #2367 dgilling Use new SmartToolJobPool.
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
*
|
||||
* @author njensen
|
||||
* @version 1.0
|
||||
*/
|
||||
|
@ -55,20 +56,20 @@ public class SmartToolSelectionDlg extends SelectionDlg {
|
|||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
*
|
||||
*
|
||||
* @see com.raytheon.viz.gfe.ui.runtimeui.SelectionDlg#run()
|
||||
*/
|
||||
@Override
|
||||
public void run() {
|
||||
PreviewInfo pi = SmartUtil.checkAndBuildPreview(dataMgr, name);
|
||||
if (pi != null) {
|
||||
SmartToolRequest req = SmartUtil.
|
||||
buildSmartToolRequest(dataMgr, pi, true);
|
||||
SmartToolRequest req = SmartUtil.buildSmartToolRequest(dataMgr, pi,
|
||||
true);
|
||||
if (req != null) {
|
||||
String varDict = dataMgr.getSmartToolInterface()
|
||||
.transformVarDict(getValues());
|
||||
req.setVarDict(varDict);
|
||||
SmartToolJob.enqueue(dataMgr, req);
|
||||
dataMgr.getSmartToolJobPool().schedule(req);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,11 +56,14 @@ import com.vividsolutions.jts.geom.Coordinate;
|
|||
* SOFTWARE HISTORY
|
||||
*
|
||||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* Mar 09, 2011 bsteffen Initial creation
|
||||
* Jul 17, 2013 2185 bsteffen Cache computed grid reprojections.
|
||||
* Aug 27, 2013 2287 randerso Removed 180 degree adjustment required by error
|
||||
* in Maputil.rotation
|
||||
* ------------- -------- ----------- --------------------------
|
||||
* Mar 09, 2011 bsteffen Initial creation
|
||||
* Jul 17, 2013 2185 bsteffen Cache computed grid reprojections.
|
||||
* Aug 27, 2013 2287 randerso Removed 180 degree adjustment required by error
|
||||
* in Maputil.rotation
|
||||
* Dec 09, 2013 2617 bsteffen Added 180 degree rotation into reproject
|
||||
* so wind direction is calculated as
|
||||
* direction wind is coming from.
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -252,8 +255,29 @@ public class GeneralGridData {
|
|||
Coordinate ll = new Coordinate(dp.x, dp.y);
|
||||
double rot = MapUtil.rotation(ll, newGeom);
|
||||
double rot2 = MapUtil.rotation(ll, gridGeometry);
|
||||
double cos = Math.cos(Math.toRadians(rot - rot2));
|
||||
double sin = Math.sin(Math.toRadians(rot - rot2));
|
||||
/*
|
||||
* When code calls into this method, the observed state
|
||||
* of things is that u and v represent the direction
|
||||
* the vector is going while mag and dir represent
|
||||
* the direction the vector is coming from. The extra
|
||||
* 180 here makes everything consistently represent the
|
||||
* direction the vector is coming from so that when the
|
||||
* barbs or arrows are rendered the mag and dir are
|
||||
* calculated as expected. Overall this is a completely
|
||||
* rediculous way of doing things. During construction
|
||||
* everything should be forced to represent the vector
|
||||
* consistently and we should only be keeping either
|
||||
* u/v or mag/dir to minimize memory consumption.
|
||||
* Unfortunately that is a significant change which is
|
||||
* made high risk by the fact no one documents which
|
||||
* areas are expecting vectors oriented to vs from. So
|
||||
* for now I(bsteffen) have chosen to simply add in 180
|
||||
* so that the behavior will be exactly as it was before
|
||||
* 2287 because even though it is rediculous it is a well
|
||||
* tested rediculous(theoretically).
|
||||
*/
|
||||
double cos = Math.cos(Math.toRadians(rot - rot2 + 180));
|
||||
double sin = Math.sin(Math.toRadians(rot - rot2 + 180));
|
||||
double u = udata[index];
|
||||
double v = vdata[index];
|
||||
udata[index] = (float) (cos * u - sin * v);
|
||||
|
|
|
@ -58,6 +58,7 @@ import com.vividsolutions.jts.geom.LineString;
|
|||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* Aug 17, 2010 bsteffen Initial creation
|
||||
* Dec 11, 2013 DR 16795 D. Friedman Transform pixel coordinate in inspect
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -213,11 +214,13 @@ public class RadarXsectXYResource extends RadarXYResource implements
|
|||
DrawableImage image = images.get(displayedDate);
|
||||
try {
|
||||
Coordinate c = latLon.asLatLon();
|
||||
double[] worldCoord = descriptor.pixelToWorld(new double[] {
|
||||
c.x, c.y });
|
||||
IExtent extent = image.getCoverage().getExtent();
|
||||
// Convert the screen coordinate to a coordinate within the image.
|
||||
// 0,0 is the upper left and 1,1 is the lower right of the iamge.
|
||||
double xRat = (c.x - extent.getMinX()) / extent.getWidth();
|
||||
double yRat = (c.y - extent.getMinY()) / extent.getHeight();
|
||||
double xRat = (worldCoord[0] - extent.getMinX()) / extent.getWidth();
|
||||
double yRat = (worldCoord[1] - extent.getMinY()) / extent.getHeight();
|
||||
return super.inspect(new ReferencedCoordinate(new Coordinate(xRat,
|
||||
yRat)));
|
||||
} catch (Exception e) {
|
||||
|
|
|
@ -115,6 +115,11 @@
|
|||
<include>manualIngest-common.xml</include>
|
||||
<include>manualIngest-spring.xml</include>
|
||||
<include>shef-ingest.xml</include>
|
||||
<include>persist-ingest.xml</include>
|
||||
<include>obs-common.xml</include>
|
||||
<include>obs-ingest.xml</include>
|
||||
<include>metartohmdb-plugin.xml</include>
|
||||
<include>pointdata-common.xml</include>
|
||||
<include>shef-common.xml</include>
|
||||
<include>ohd-common.xml</include>
|
||||
<include>alarmWhfs-spring.xml</include>
|
||||
|
@ -136,6 +141,8 @@
|
|||
<include>q2FileProcessor-spring.xml</include>
|
||||
<include>satpre-spring.xml</include>
|
||||
<include>purge-logs.xml</include>
|
||||
<exclude>fssobs-ingest.xml</exclude>
|
||||
<exclude>fssobs-common.xml</exclude>
|
||||
</mode>
|
||||
<mode name="requestHydro">
|
||||
<include>ohd-common.xml</include>
|
||||
|
@ -144,6 +151,7 @@
|
|||
<include>alertviz-request.xml</include>
|
||||
<include>auth-common.xml</include>
|
||||
<include>auth-request.xml</include>
|
||||
<include>persist-request.xml</include>
|
||||
<include>menus-request.xml</include>
|
||||
<include>utility-request.xml</include>
|
||||
<include>management-common.xml</include>
|
||||
|
@ -226,6 +234,7 @@
|
|||
<include>fssobs-ingest.xml</include>
|
||||
<include>fssobs-common.xml</include>
|
||||
<include>ldadmesonet-common.xml</include>
|
||||
<include>manualIngest-common.xml</include>
|
||||
<include>dataaccess-common.xml</include>
|
||||
<exclude>nctext-common.xml</exclude>
|
||||
<includeMode>excludeDpaAndOgc</includeMode>
|
||||
|
|
|
@ -21,7 +21,12 @@ package com.raytheon.edex.plugin.bufrua.decoder;
|
|||
|
||||
import static com.raytheon.uf.edex.decodertools.bufr.packets.DataPacketTypes.RepSubList;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import com.raytheon.uf.common.dataplugin.bufrua.LayerTools;
|
||||
import com.raytheon.uf.common.dataplugin.bufrua.UAObs;
|
||||
|
@ -42,6 +47,7 @@ import com.raytheon.uf.edex.pointdata.PointDataPluginDao;
|
|||
* ------------- -------- ----------- --------------------------
|
||||
* Mar 03, 2008 969 jkorman Initial implementation.
|
||||
* Dec 05, 2013 2612 bsteffen Fix max wind decoding.
|
||||
* Dec 17, 2013 2639 bsteffen Validate mandatory level heights.
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -50,6 +56,20 @@ import com.raytheon.uf.edex.pointdata.PointDataPluginDao;
|
|||
*/
|
||||
public class BUFRUAManLevelAdapter extends AbstractBUFRUAAdapter {
|
||||
|
||||
/** Mandatory pressure levels */
|
||||
private static final float[] VALID_PR = { 100000, 92500, 85000, 70000,
|
||||
50000, 40000, 30000, 25000, 20000, 15000, 10000, 5000 };
|
||||
|
||||
/** Reasonable height levels corresponding to VALID_PR */
|
||||
private static final float[] VALID_HT = { 100, 750, 1450, 3000, 5550, 7150,
|
||||
9150, 10350, 11800, 13600, 16150, 20000 };
|
||||
|
||||
/** Map VALID_PR to VALID_HT values. */
|
||||
private static final Map<Float, Float> VALID_HEIGHT_MAP = generateValidHeights();
|
||||
|
||||
/** Reasonable range for reasonable heights in VALID_HT */
|
||||
private static final float VALID_HEIGHT_RANGE = 1000;
|
||||
|
||||
/**
|
||||
*
|
||||
* @param pdd
|
||||
|
@ -98,7 +118,7 @@ public class BUFRUAManLevelAdapter extends AbstractBUFRUAAdapter {
|
|||
|
||||
int maxManLevels = -1;
|
||||
int maxTropLevels = -1;
|
||||
float sfcPressure = -9999;
|
||||
float sfcPressure = PDV_FILL_INT;
|
||||
|
||||
Dimension[] dims = getPointDataDescription().dimensions;
|
||||
for (Dimension d : dims) {
|
||||
|
@ -120,21 +140,21 @@ public class BUFRUAManLevelAdapter extends AbstractBUFRUAAdapter {
|
|||
List<IBUFRDataPacket> p = (List<IBUFRDataPacket>) packet
|
||||
.getValue();
|
||||
int sig = getInt(p.get(1), IDecoderConstants.VAL_MISSING);
|
||||
double pres = getDouble(p.get(0), -9999);
|
||||
double pres = getDouble(p.get(0), PDV_FILL_DBL);
|
||||
switch (sig) {
|
||||
|
||||
case LayerTools.TROP_LEVEL: { // Tropopause level
|
||||
if ((tropIdx < maxTropLevels) && (pres > 0)
|
||||
&& (pres != 99900.0)) {
|
||||
setViewData("prTrop", view, p.get(0), tropIdx);
|
||||
double t = getDouble(p.get(3), -9999);
|
||||
if (t < -9999) {
|
||||
t = -9999.0;
|
||||
double t = getDouble(p.get(3), PDV_FILL_DBL);
|
||||
if (t < PDV_FILL_DBL) {
|
||||
t = PDV_FILL_DBL;
|
||||
}
|
||||
view.setFloat("tpTrop", (float) t, tropIdx);
|
||||
t = getDouble(p.get(4), -9999);
|
||||
if (t < -9999) {
|
||||
t = -9999.0;
|
||||
t = getDouble(p.get(4), PDV_FILL_DBL);
|
||||
if (t < PDV_FILL_DBL) {
|
||||
t = PDV_FILL_DBL;
|
||||
}
|
||||
view.setFloat("tdTrop", (float) t, tropIdx);
|
||||
setViewData("wdTrop", view, p.get(5), tropIdx);
|
||||
|
@ -144,7 +164,7 @@ public class BUFRUAManLevelAdapter extends AbstractBUFRUAAdapter {
|
|||
break;
|
||||
}
|
||||
case LayerTools.SFC_LEVEL: {
|
||||
sfcPressure = (float) getDouble(p.get(0), -9999);
|
||||
sfcPressure = (float) getDouble(p.get(0), PDV_FILL_DBL);
|
||||
// fall through
|
||||
}
|
||||
case LayerTools.MANPRE_LEVEL: {
|
||||
|
@ -152,14 +172,14 @@ public class BUFRUAManLevelAdapter extends AbstractBUFRUAAdapter {
|
|||
if ((manIdx < maxManLevels) && (pres > 0)) {
|
||||
setViewData("prMan", view, p.get(0), manIdx);
|
||||
setViewData("htMan", view, p.get(2), manIdx);
|
||||
double t = getDouble(p.get(3), -9999);
|
||||
if (t < -9999) {
|
||||
t = -9999.0;
|
||||
double t = getDouble(p.get(3), PDV_FILL_DBL);
|
||||
if (t < PDV_FILL_DBL) {
|
||||
t = PDV_FILL_DBL;
|
||||
}
|
||||
view.setFloat("tpMan", (float) t, manIdx);
|
||||
t = getDouble(p.get(4), -9999);
|
||||
if (t < -9999) {
|
||||
t = -9999.0;
|
||||
t = getDouble(p.get(4), PDV_FILL_DBL);
|
||||
if (t < PDV_FILL_DBL) {
|
||||
t = PDV_FILL_DBL;
|
||||
}
|
||||
view.setFloat("tdMan", (float) t, manIdx);
|
||||
setViewData("wdMan", view, p.get(5), manIdx);
|
||||
|
@ -168,12 +188,13 @@ public class BUFRUAManLevelAdapter extends AbstractBUFRUAAdapter {
|
|||
}
|
||||
break;
|
||||
}
|
||||
// No default!
|
||||
// No default!
|
||||
} // switch
|
||||
} // for
|
||||
view.setInt("numMand", manIdx);
|
||||
view.setInt("numTrop", tropIdx);
|
||||
view.setFloat("sfcPressure", sfcPressure);
|
||||
removeInvalidHeights(view);
|
||||
}
|
||||
return pointData;
|
||||
}
|
||||
|
@ -209,7 +230,7 @@ public class BUFRUAManLevelAdapter extends AbstractBUFRUAAdapter {
|
|||
.getValue();
|
||||
int sig = getInt(p.get(1), IDecoderConstants.VAL_MISSING);
|
||||
if (sig == LayerTools.MAXWND_LEVEL) {
|
||||
double pres = getDouble(p.get(0), -9999);
|
||||
double pres = getDouble(p.get(0), PDV_FILL_DBL);
|
||||
if (pres > 0) {
|
||||
setViewData("prMaxW", view, p.get(0), maxWindIdx);
|
||||
setViewData("wdMaxW", view, p.get(2), maxWindIdx);
|
||||
|
@ -225,4 +246,77 @@ public class BUFRUAManLevelAdapter extends AbstractBUFRUAAdapter {
|
|||
}
|
||||
return pointData;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check the heights for each reading, removing invalid readings. Check
|
||||
* that heights are within the range specified from the mean value and that
|
||||
* they are between the preceeding and following values.
|
||||
*
|
||||
* One reason this is needed is because there is a known error in the
|
||||
* encoded data when the height for the 250MB level is less than 10000. For
|
||||
* these cases the encoder is prepending a 1 so a height of 9990 becomes
|
||||
* 19990. It appears this may be an artifact of the compression used to
|
||||
* encode the heights. For this case it would be theoretically possible to
|
||||
* remove the extra 1 and treat the data as valid, but invalidating the
|
||||
* height is done because it is not clear if this would always be a safe
|
||||
* fix or if there are other possible errors to detect.
|
||||
*
|
||||
* @param view
|
||||
* {@link PointDataView} which will be modified to have invalid
|
||||
* mandataory hight data removed.
|
||||
*/
|
||||
private void removeInvalidHeights(PointDataView view) {
|
||||
int numMand = view.getInt("numMand");
|
||||
if (numMand < 3) {
|
||||
return;
|
||||
}
|
||||
/* Convert pressure and height data into a map for easy access. */
|
||||
Number[] pr = view.getNumberAllLevels("prMan");
|
||||
Number[] ht = view.getNumberAllLevels("htMan");
|
||||
Map<Float, Float> heights = new HashMap<Float, Float>(numMand * 2);
|
||||
for (int i = 0; i < numMand; i += 1) {
|
||||
heights.put(pr[i].floatValue(), ht[i].floatValue());
|
||||
}
|
||||
/* Check each predefined level. */
|
||||
Set<Float> invalidPrLevels = new HashSet<Float>();
|
||||
for (int i = 1; i < VALID_PR.length - 1; i += 1) {
|
||||
float prLevel = VALID_PR[i];
|
||||
float validHt = VALID_HEIGHT_MAP.get(prLevel);
|
||||
float minHt = validHt - VALID_HEIGHT_RANGE;
|
||||
float maxHt = validHt + VALID_HEIGHT_RANGE;
|
||||
Float testHt = heights.get(prLevel);
|
||||
/* First detect values which don't look reasonable. */
|
||||
if (testHt != null && testHt > PDV_FILL_INT
|
||||
&& (minHt > testHt || maxHt < testHt)) {
|
||||
float prevPr = VALID_PR[i - 1];
|
||||
float nextPr = VALID_PR[i + 1];
|
||||
Float prevHt = heights.get(prevPr);
|
||||
Float nextHt = heights.get(nextPr);
|
||||
/* Next check if its at least ascending. */
|
||||
if (prevHt != null && prevHt > PDV_FILL_INT && nextHt != null
|
||||
&& nextHt > PDV_FILL_INT
|
||||
&& (testHt < prevHt || testHt > nextHt)) {
|
||||
invalidPrLevels.add(prLevel);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (invalidPrLevels.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (int i = 0; i < numMand; i += 1) {
|
||||
if (invalidPrLevels.contains(pr[i].floatValue())) {
|
||||
view.setFloat("htMan", PDV_FILL_INT, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static Map<Float, Float> generateValidHeights() {
|
||||
Map<Float, Float> validHeights = new HashMap<Float, Float>();
|
||||
for (int i = 0; i < VALID_HT.length; i += 1) {
|
||||
validHeights.put(VALID_PR[i], VALID_HT[i]);
|
||||
}
|
||||
return Collections.unmodifiableMap(validHeights);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -119,7 +119,6 @@ import com.raytheon.uf.edex.database.purge.PurgeLogger;
|
|||
* the same parm simultaneously.
|
||||
* Added code to check the purge times when publishing and not publish
|
||||
* data that is eligible to be purged.
|
||||
* 12/03/13 #2595 randerso Added check for null update time in commitGrid
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -173,7 +172,7 @@ public class GridParmManager {
|
|||
this.lockMgr.setGridParmMgr(this);
|
||||
|
||||
initializeManager();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Dispose the GridParmManager
|
||||
|
@ -200,7 +199,7 @@ public class GridParmManager {
|
|||
.debug("No matching GridDatabase for requested ParmID in createParm()");
|
||||
// TODO: should we return null?
|
||||
return new GridParm();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -331,10 +330,10 @@ public class GridParmManager {
|
|||
for (SaveGridRequest req : saveRequest) {
|
||||
ServerResponse<?> ssr = null;
|
||||
GridParm gp = null;
|
||||
gp = gridParm(req.getParmId());
|
||||
if (!gp.isValid()) {
|
||||
sr.addMessage("Unknown Parm: " + req.getParmId()
|
||||
+ " in saveGridData()");
|
||||
gp = gridParm(req.getParmId());
|
||||
if (!gp.isValid()) {
|
||||
sr.addMessage("Unknown Parm: " + req.getParmId()
|
||||
+ " in saveGridData()");
|
||||
statusHandler.error("Unknown Parm: " + req.getParmId()
|
||||
+ " in saveGridData()");
|
||||
continue;
|
||||
|
@ -456,27 +455,27 @@ public class GridParmManager {
|
|||
// for the source data
|
||||
ParmID sourceParmId = req.getParmId();
|
||||
GridParm sourceGP = gridParm(sourceParmId);
|
||||
if (!sourceGP.isValid()) {
|
||||
ssr.addMessage("Unknown Source Parm: " + req.getParmId()
|
||||
+ " in commitGrid()");
|
||||
srDetailed.addMessages(ssr);
|
||||
failures.add(req);
|
||||
continue;
|
||||
}
|
||||
if (!sourceGP.isValid()) {
|
||||
ssr.addMessage("Unknown Source Parm: " + req.getParmId()
|
||||
+ " in commitGrid()");
|
||||
srDetailed.addMessages(ssr);
|
||||
failures.add(req);
|
||||
continue;
|
||||
}
|
||||
|
||||
// for the destination data
|
||||
ParmID destParmId = new ParmID(req.getParmId().getParmName(),
|
||||
officialDBid, req.getParmId().getParmLevel());
|
||||
String destParmIdStr = destParmId.toString();
|
||||
GridParm destGP = null;
|
||||
destGP = gridParm(destParmId);
|
||||
if (!destGP.isValid()) {
|
||||
ssr.addMessage("Unknown Destination Parm: " + destGP
|
||||
+ " in commitGrid()");
|
||||
srDetailed.addMessages(ssr);
|
||||
failures.add(req);
|
||||
continue;
|
||||
}
|
||||
destGP = gridParm(destParmId);
|
||||
if (!destGP.isValid()) {
|
||||
ssr.addMessage("Unknown Destination Parm: " + destGP
|
||||
+ " in commitGrid()");
|
||||
srDetailed.addMessages(ssr);
|
||||
failures.add(req);
|
||||
continue;
|
||||
}
|
||||
|
||||
// verify that the source and destination are matched
|
||||
GridParmInfo sourceInfo, destInfo;
|
||||
|
@ -520,17 +519,17 @@ public class GridParmManager {
|
|||
publishTime.setStart(startTime);
|
||||
}
|
||||
|
||||
inventoryTimer.start();
|
||||
inventoryTimer.start();
|
||||
ServerResponse<List<TimeRange>> invSr = sourceGP
|
||||
.getGridInventory(publishTime);
|
||||
List<TimeRange> overlapInventory = invSr.getPayload();
|
||||
ssr.addMessages(invSr);
|
||||
if (!ssr.isOkay()) {
|
||||
ssr.addMessage("GetGridInventory for source for commitGrid() failure: "
|
||||
+ ssr.message());
|
||||
srDetailed.addMessages(ssr);
|
||||
failures.add(req);
|
||||
}
|
||||
ssr.addMessages(invSr);
|
||||
if (!ssr.isOkay()) {
|
||||
ssr.addMessage("GetGridInventory for source for commitGrid() failure: "
|
||||
+ ssr.message());
|
||||
srDetailed.addMessages(ssr);
|
||||
failures.add(req);
|
||||
}
|
||||
|
||||
// expand publish time to span overlapping inventory
|
||||
if (!overlapInventory.isEmpty()) {
|
||||
|
@ -547,174 +546,173 @@ public class GridParmManager {
|
|||
}
|
||||
|
||||
invSr = destGP.getGridInventory(publishTime);
|
||||
inventoryTimer.stop();
|
||||
List<TimeRange> destInventory = invSr.getPayload();
|
||||
ssr.addMessages(invSr);
|
||||
if (!ssr.isOkay()) {
|
||||
ssr.addMessage("GetGridInventory for destination for commitGrid() failure: "
|
||||
+ ssr.message());
|
||||
srDetailed.addMessages(ssr);
|
||||
failures.add(req);
|
||||
continue;
|
||||
}
|
||||
inventoryTimer.stop();
|
||||
List<TimeRange> destInventory = invSr.getPayload();
|
||||
ssr.addMessages(invSr);
|
||||
if (!ssr.isOkay()) {
|
||||
ssr.addMessage("GetGridInventory for destination for commitGrid() failure: "
|
||||
+ ssr.message());
|
||||
srDetailed.addMessages(ssr);
|
||||
failures.add(req);
|
||||
continue;
|
||||
}
|
||||
|
||||
// get the source grid data
|
||||
List<IGridSlice> sourceData = null;
|
||||
List<TimeRange> badGridTR = new ArrayList<TimeRange>();
|
||||
// get the source grid data
|
||||
List<IGridSlice> sourceData = null;
|
||||
List<TimeRange> badGridTR = new ArrayList<TimeRange>();
|
||||
|
||||
// System.out.println("overlapInventory initial size "
|
||||
// + overlapInventory.size());
|
||||
// System.out.println("overlapInventory initial size "
|
||||
// + overlapInventory.size());
|
||||
|
||||
historyRetrieveTimer.start();
|
||||
ServerResponse<Map<TimeRange, List<GridDataHistory>>> history = sourceGP
|
||||
.getGridHistory(overlapInventory);
|
||||
Map<TimeRange, List<GridDataHistory>> currentDestHistory = destGP
|
||||
.getGridHistory(overlapInventory).getPayload();
|
||||
historyRetrieveTimer.stop();
|
||||
historyRetrieveTimer.start();
|
||||
ServerResponse<Map<TimeRange, List<GridDataHistory>>> history = sourceGP
|
||||
.getGridHistory(overlapInventory);
|
||||
Map<TimeRange, List<GridDataHistory>> currentDestHistory = destGP
|
||||
.getGridHistory(overlapInventory).getPayload();
|
||||
historyRetrieveTimer.stop();
|
||||
|
||||
Map<TimeRange, List<GridDataHistory>> historyOnly = new HashMap<TimeRange, List<GridDataHistory>>();
|
||||
for (TimeRange tr : history.getPayload().keySet()) {
|
||||
// should only ever be one history for source grids
|
||||
Map<TimeRange, List<GridDataHistory>> historyOnly = new HashMap<TimeRange, List<GridDataHistory>>();
|
||||
for (TimeRange tr : history.getPayload().keySet()) {
|
||||
// should only ever be one history for source grids
|
||||
List<GridDataHistory> gdhList = history.getPayload()
|
||||
.get(tr);
|
||||
boolean doPublish = false;
|
||||
for (GridDataHistory gdh : gdhList) {
|
||||
// if update time is less than publish time, grid
|
||||
// has not changed since last published,
|
||||
// therefore only update history, do not publish
|
||||
if ((gdh.getUpdateTime() == null)
|
||||
|| (gdh.getPublishTime() == null)
|
||||
|| (gdh.getUpdateTime().getTime() > gdh
|
||||
.getPublishTime().getTime())
|
||||
// in service backup, times on srcHistory
|
||||
// could appear as not needing a publish,
|
||||
// even though dest data does not exist
|
||||
|| (currentDestHistory.get(tr) == null)
|
||||
|| (currentDestHistory.get(tr).size() == 0)) {
|
||||
doPublish = true;
|
||||
}
|
||||
}
|
||||
if (!doPublish) {
|
||||
historyOnly.put(tr, gdhList);
|
||||
overlapInventory.remove(tr);
|
||||
boolean doPublish = false;
|
||||
for (GridDataHistory gdh : gdhList) {
|
||||
// if update time is less than publish time, grid
|
||||
// has not changed since last published,
|
||||
// therefore only update history, do not publish
|
||||
if ((gdh.getPublishTime() == null)
|
||||
|| (gdh.getUpdateTime().getTime() > gdh
|
||||
.getPublishTime().getTime())
|
||||
// in service backup, times on srcHistory
|
||||
// could appear as not needing a publish,
|
||||
// even though dest data does not exist
|
||||
|| (currentDestHistory.get(tr) == null)
|
||||
|| (currentDestHistory.get(tr).size() == 0)) {
|
||||
doPublish = true;
|
||||
}
|
||||
}
|
||||
if (!doPublish) {
|
||||
historyOnly.put(tr, gdhList);
|
||||
overlapInventory.remove(tr);
|
||||
}
|
||||
}
|
||||
|
||||
retrieveTimer.start();
|
||||
ServerResponse<List<IGridSlice>> getSr = sourceGP.getGridData(
|
||||
new GetGridRequest(req.getParmId(), overlapInventory),
|
||||
badGridTR);
|
||||
retrieveTimer.stop();
|
||||
// System.out.println("Retrieved " + overlapInventory.size()
|
||||
// + " grids");
|
||||
sourceData = getSr.getPayload();
|
||||
ssr.addMessages(getSr);
|
||||
if (!ssr.isOkay()) {
|
||||
ssr.addMessage("GetGridData for source for commitGrid() failure: "
|
||||
+ ssr.message());
|
||||
srDetailed.addMessages(ssr);
|
||||
failures.add(req);
|
||||
continue;
|
||||
}
|
||||
|
||||
// get list of official grids that overlap publish range and
|
||||
// aren't contained in the publish range, these have to be
|
||||
// included in the publish step. Then get the grids, shorten
|
||||
// and insert into sourceData.
|
||||
List<IGridSlice> officialData = new ArrayList<IGridSlice>();
|
||||
List<TimeRange> officialTR = new ArrayList<TimeRange>();
|
||||
for (int t = 0; t < destInventory.size(); t++) {
|
||||
if (!publishTime.contains(destInventory.get(t))) {
|
||||
officialTR.add(destInventory.get(t));
|
||||
}
|
||||
}
|
||||
|
||||
if (!officialTR.isEmpty()) {
|
||||
retrieveTimer.start();
|
||||
ServerResponse<List<IGridSlice>> getSr = sourceGP.getGridData(
|
||||
new GetGridRequest(req.getParmId(), overlapInventory),
|
||||
badGridTR);
|
||||
getSr = destGP.getGridData(new GetGridRequest(destParmId,
|
||||
officialTR), badGridTR);
|
||||
retrieveTimer.stop();
|
||||
// System.out.println("Retrieved " + overlapInventory.size()
|
||||
// + " grids");
|
||||
sourceData = getSr.getPayload();
|
||||
officialData = getSr.getPayload();
|
||||
ssr.addMessages(getSr);
|
||||
if (!ssr.isOkay()) {
|
||||
ssr.addMessage("GetGridData for source for commitGrid() failure: "
|
||||
ssr.addMessage("GetGridData for official for commidtGrid() failure: "
|
||||
+ ssr.message());
|
||||
srDetailed.addMessages(ssr);
|
||||
failures.add(req);
|
||||
continue;
|
||||
}
|
||||
|
||||
// get list of official grids that overlap publish range and
|
||||
// aren't contained in the publish range, these have to be
|
||||
// included in the publish step. Then get the grids, shorten
|
||||
// and insert into sourceData.
|
||||
List<IGridSlice> officialData = new ArrayList<IGridSlice>();
|
||||
List<TimeRange> officialTR = new ArrayList<TimeRange>();
|
||||
for (int t = 0; t < destInventory.size(); t++) {
|
||||
if (!publishTime.contains(destInventory.get(t))) {
|
||||
officialTR.add(destInventory.get(t));
|
||||
}
|
||||
}
|
||||
|
||||
if (!officialTR.isEmpty()) {
|
||||
retrieveTimer.start();
|
||||
getSr = destGP.getGridData(new GetGridRequest(destParmId,
|
||||
officialTR), badGridTR);
|
||||
retrieveTimer.stop();
|
||||
officialData = getSr.getPayload();
|
||||
ssr.addMessages(getSr);
|
||||
if (!ssr.isOkay()) {
|
||||
ssr.addMessage("GetGridData for official for commidtGrid() failure: "
|
||||
+ ssr.message());
|
||||
srDetailed.addMessages(ssr);
|
||||
failures.add(req);
|
||||
continue;
|
||||
}
|
||||
|
||||
// insert the grid into the "sourceGrid" list
|
||||
for (int t = 0; t < officialTR.size(); t++) {
|
||||
// before
|
||||
try {
|
||||
if (officialTR.get(t).getStart()
|
||||
.before(publishTime.getStart())) {
|
||||
// insert the grid into the "sourceGrid" list
|
||||
for (int t = 0; t < officialTR.size(); t++) {
|
||||
// before
|
||||
try {
|
||||
if (officialTR.get(t).getStart()
|
||||
.before(publishTime.getStart())) {
|
||||
|
||||
IGridSlice tempSlice = officialData.get(t)
|
||||
.clone();
|
||||
tempSlice.setValidTime(new TimeRange(officialTR
|
||||
.get(t).getStart(), publishTime
|
||||
.getStart()));
|
||||
sourceData.add(0, tempSlice);
|
||||
.getStart()));
|
||||
sourceData.add(0, tempSlice);
|
||||
publishTime.setStart(officialTR.get(t)
|
||||
.getStart());
|
||||
overlapInventory.add(tempSlice.getValidTime());
|
||||
}
|
||||
overlapInventory.add(tempSlice.getValidTime());
|
||||
}
|
||||
|
||||
// after
|
||||
if (officialTR.get(t).getEnd()
|
||||
.after(publishTime.getEnd())) {
|
||||
// after
|
||||
if (officialTR.get(t).getEnd()
|
||||
.after(publishTime.getEnd())) {
|
||||
IGridSlice tempSlice = officialData.get(t)
|
||||
.clone();
|
||||
tempSlice.setValidTime(new TimeRange(
|
||||
publishTime.getEnd(), officialTR.get(t)
|
||||
.getEnd()));
|
||||
sourceData.add(tempSlice);
|
||||
publishTime.setEnd(officialTR.get(t).getEnd());
|
||||
overlapInventory.add(tempSlice.getValidTime());
|
||||
}
|
||||
} catch (CloneNotSupportedException e) {
|
||||
sr.addMessage("Error cloning GridSlice "
|
||||
+ e.getMessage());
|
||||
sourceData.add(tempSlice);
|
||||
publishTime.setEnd(officialTR.get(t).getEnd());
|
||||
overlapInventory.add(tempSlice.getValidTime());
|
||||
}
|
||||
} catch (CloneNotSupportedException e) {
|
||||
sr.addMessage("Error cloning GridSlice "
|
||||
+ e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// save off the source grid history, to update the source
|
||||
// database modify the source grid data for the dest ParmID and
|
||||
// GridDataHistory
|
||||
Map<TimeRange, List<GridDataHistory>> histories = new HashMap<TimeRange, List<GridDataHistory>>();
|
||||
Date nowTime = new Date();
|
||||
// GridDataHistory
|
||||
Map<TimeRange, List<GridDataHistory>> histories = new HashMap<TimeRange, List<GridDataHistory>>();
|
||||
Date nowTime = new Date();
|
||||
|
||||
for (IGridSlice slice : sourceData) {
|
||||
GridDataHistory[] sliceHist = slice.getHistory();
|
||||
for (GridDataHistory hist : sliceHist) {
|
||||
hist.setPublishTime((Date) nowTime.clone());
|
||||
}
|
||||
slice.getGridInfo().resetParmID(destParmId);
|
||||
for (IGridSlice slice : sourceData) {
|
||||
GridDataHistory[] sliceHist = slice.getHistory();
|
||||
for (GridDataHistory hist : sliceHist) {
|
||||
hist.setPublishTime((Date) nowTime.clone());
|
||||
}
|
||||
slice.getGridInfo().resetParmID(destParmId);
|
||||
histories.put(slice.getValidTime(),
|
||||
Arrays.asList(sliceHist));
|
||||
}
|
||||
}
|
||||
|
||||
// update the history for publish time for grids that are
|
||||
// unchanged
|
||||
for (TimeRange tr : historyOnly.keySet()) {
|
||||
List<GridDataHistory> histList = historyOnly.get(tr);
|
||||
for (GridDataHistory hist : histList) {
|
||||
hist.setPublishTime((Date) nowTime.clone());
|
||||
}
|
||||
histories.put(tr, histList);
|
||||
for (TimeRange tr : historyOnly.keySet()) {
|
||||
List<GridDataHistory> histList = historyOnly.get(tr);
|
||||
for (GridDataHistory hist : histList) {
|
||||
hist.setPublishTime((Date) nowTime.clone());
|
||||
}
|
||||
histories.put(tr, histList);
|
||||
}
|
||||
|
||||
// update the publish times in the source database,
|
||||
// update the notifications
|
||||
historyUpdateTimer.start();
|
||||
sr.addMessages(sourceGP.updatePublishTime(histories.values(),
|
||||
(Date) nowTime.clone()));
|
||||
historyUpdateTimer.start();
|
||||
sr.addMessages(sourceGP.updatePublishTime(histories.values(),
|
||||
(Date) nowTime.clone()));
|
||||
// System.out.println("Updated " + histories.size() +
|
||||
// " histories");
|
||||
historyUpdateTimer.stop();
|
||||
historyUpdateTimer.stop();
|
||||
|
||||
List<TimeRange> historyTimes = new ArrayList<TimeRange>(
|
||||
histories.keySet());
|
||||
|
@ -725,56 +723,56 @@ public class GridParmManager {
|
|||
// update the histories of destination database for ones
|
||||
// that are not going to be saved since there hasn't been a
|
||||
// change
|
||||
List<TimeRange> historyOnlyList = new ArrayList<TimeRange>();
|
||||
historyOnlyList.addAll(historyOnly.keySet());
|
||||
List<TimeRange> historyOnlyList = new ArrayList<TimeRange>();
|
||||
historyOnlyList.addAll(historyOnly.keySet());
|
||||
|
||||
historyRetrieveTimer.start();
|
||||
Map<TimeRange, List<GridDataHistory>> destHistory = destGP
|
||||
.getGridHistory(historyOnlyList).getPayload();
|
||||
historyRetrieveTimer.stop();
|
||||
for (TimeRange tr : destHistory.keySet()) {
|
||||
List<GridDataHistory> srcHistList = histories.get(tr);
|
||||
List<GridDataHistory> destHistList = destHistory.get(tr);
|
||||
for (int i = 0; i < srcHistList.size(); i++) {
|
||||
destHistList.get(i).replaceValues(srcHistList.get(i));
|
||||
}
|
||||
historyRetrieveTimer.start();
|
||||
Map<TimeRange, List<GridDataHistory>> destHistory = destGP
|
||||
.getGridHistory(historyOnlyList).getPayload();
|
||||
historyRetrieveTimer.stop();
|
||||
for (TimeRange tr : destHistory.keySet()) {
|
||||
List<GridDataHistory> srcHistList = histories.get(tr);
|
||||
List<GridDataHistory> destHistList = destHistory.get(tr);
|
||||
for (int i = 0; i < srcHistList.size(); i++) {
|
||||
destHistList.get(i).replaceValues(srcHistList.get(i));
|
||||
}
|
||||
}
|
||||
|
||||
// only need to update the publish time on the destination
|
||||
// histories of grids that are not being saved (due to no
|
||||
// changes), because the saveGridSlices() call below will update
|
||||
// the publish time of the ones with changes
|
||||
historyUpdateTimer.start();
|
||||
destGP.updatePublishTime(destHistory.values(),
|
||||
(Date) nowTime.clone());
|
||||
historyUpdateTimer.stop();
|
||||
historyUpdateTimer.start();
|
||||
destGP.updatePublishTime(destHistory.values(),
|
||||
(Date) nowTime.clone());
|
||||
historyUpdateTimer.stop();
|
||||
|
||||
// save data directly to the official database (bypassing
|
||||
// the checks in Parm intentionally)
|
||||
storeTimer.start();
|
||||
ssr.addMessages(officialDBPtr.saveGridSlices(destParmId,
|
||||
publishTime, sourceData, requestorId, historyOnlyList));
|
||||
storeTimer.stop();
|
||||
// save data directly to the official database (bypassing
|
||||
// the checks in Parm intentionally)
|
||||
storeTimer.start();
|
||||
ssr.addMessages(officialDBPtr.saveGridSlices(destParmId,
|
||||
publishTime, sourceData, requestorId, historyOnlyList));
|
||||
storeTimer.stop();
|
||||
|
||||
// System.out.println("Published " + sourceData.size() +
|
||||
// " slices");
|
||||
if (!ssr.isOkay()) {
|
||||
ssr.addMessage("SaveGridData for official for commitGrid() failure: "
|
||||
+ ssr.message());
|
||||
srDetailed.addMessages(ssr);
|
||||
failures.add(req);
|
||||
continue;
|
||||
}
|
||||
if (!ssr.isOkay()) {
|
||||
ssr.addMessage("SaveGridData for official for commitGrid() failure: "
|
||||
+ ssr.message());
|
||||
srDetailed.addMessages(ssr);
|
||||
failures.add(req);
|
||||
continue;
|
||||
}
|
||||
|
||||
// make the notification
|
||||
// make the notification
|
||||
GridUpdateNotification not = new GridUpdateNotification(
|
||||
destParmId, publishTime, histories, requestorId, siteID);
|
||||
changes.add(not);
|
||||
sr.getPayload().add(not);
|
||||
changes.add(not);
|
||||
sr.getPayload().add(not);
|
||||
|
||||
} finally {
|
||||
ClusterLockUtils.unlock(ct, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
perfLog.logDuration("Publish Grids: Acquiring cluster lock",
|
||||
|
@ -824,8 +822,8 @@ public class GridParmManager {
|
|||
this.dbMap.keySet());
|
||||
|
||||
sr.setPayload(databases);
|
||||
return sr;
|
||||
}
|
||||
return sr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a database if available
|
||||
|
@ -849,8 +847,8 @@ public class GridParmManager {
|
|||
if (status.isOkay()) {
|
||||
db = status.getPayload();
|
||||
createDbNotification(Arrays.asList(dbId), null);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (db != null) {
|
||||
this.addDB(db);
|
||||
|
@ -888,8 +886,8 @@ public class GridParmManager {
|
|||
return sr;
|
||||
}
|
||||
|
||||
return sr;
|
||||
}
|
||||
return sr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete database
|
||||
|
@ -946,9 +944,9 @@ public class GridParmManager {
|
|||
|
||||
if (db == null) {
|
||||
sr.addMessage("Database " + dbId
|
||||
+ " does not exist for getParmList()");
|
||||
+ " does not exist for getParmList()");
|
||||
return sr;
|
||||
}
|
||||
}
|
||||
|
||||
sr = db.getParmList();
|
||||
return sr;
|
||||
|
@ -990,7 +988,7 @@ public class GridParmManager {
|
|||
|
||||
// determine desired number of versions
|
||||
desiredVersions = this.config.desiredDbVersions(dbId);
|
||||
}
|
||||
}
|
||||
|
||||
// process the id and determine whether it should be purged
|
||||
count++;
|
||||
|
@ -1014,9 +1012,9 @@ public class GridParmManager {
|
|||
toRemove.removeAll(newInv);
|
||||
for (DatabaseID dbId : toRemove) {
|
||||
if (dbMap.remove(dbId) != null) {
|
||||
statusHandler
|
||||
.info("Synching GridParmManager with database inventory, removing "
|
||||
+ dbId);
|
||||
statusHandler
|
||||
.info("Synching GridParmManager with database inventory, removing "
|
||||
+ dbId);
|
||||
}
|
||||
|
||||
// add any removals to the deletions list
|
||||
|
@ -1075,14 +1073,14 @@ public class GridParmManager {
|
|||
List<LockNotification> lockNotify = new ArrayList<LockNotification>();
|
||||
GridParm gp = createParm(parmId);
|
||||
if (gp.isValid()) {
|
||||
ServerResponse<Integer> sr1 = gp.timePurge(purgeTime,
|
||||
ServerResponse<Integer> sr1 = gp.timePurge(purgeTime,
|
||||
gridNotify, lockNotify);
|
||||
sr.addMessages(sr1);
|
||||
purgedCount += sr1.getPayload();
|
||||
sr.addMessages(sr1);
|
||||
purgedCount += sr1.getPayload();
|
||||
|
||||
gridNotifications.addAll(gridNotify);
|
||||
lockNotifications.addAll(lockNotify);
|
||||
}
|
||||
gridNotifications.addAll(gridNotify);
|
||||
lockNotifications.addAll(lockNotify);
|
||||
}
|
||||
}
|
||||
|
||||
PurgeLogger.logInfo("Purge " + purgedCount + " items from " + dbId,
|
||||
|
@ -1121,7 +1119,7 @@ public class GridParmManager {
|
|||
|
||||
if (dbId.getRemovedDate() != null) {
|
||||
// mark database as not removed
|
||||
try {
|
||||
try {
|
||||
GFEDao gfeDao = new GFEDao();
|
||||
gfeDao.setDatabaseRemovedDate(dbId, null);
|
||||
statusHandler.info("Database " + dbId + " restored");
|
||||
|
@ -1129,7 +1127,7 @@ public class GridParmManager {
|
|||
statusHandler.handle(Priority.PROBLEM,
|
||||
"Unable to mark database restored: " + dbId, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// add to list of databases
|
||||
addDB(db);
|
||||
|
@ -1179,8 +1177,8 @@ public class GridParmManager {
|
|||
if (manID.getFormat().equals(DataType.GRID)
|
||||
&& !inventory.contains(manID)) {
|
||||
inventory.add(manID);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// create the databases (the list should now only contain GRID dbs)
|
||||
ServerResponse<GridDatabase> sr = new ServerResponse<GridDatabase>();
|
||||
|
@ -1285,7 +1283,7 @@ public class GridParmManager {
|
|||
if (db == null) {
|
||||
// New database
|
||||
db = D2DGridDatabase.getDatabase(config, d2dModelName, refTime);
|
||||
if (db == null) {
|
||||
if (db == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1308,16 +1306,16 @@ public class GridParmManager {
|
|||
queue.queue(siteID, config, dbId, validTime, false,
|
||||
SmartInitRecord.LIVE_SMART_INIT_PRIORITY);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// send notifications;
|
||||
try {
|
||||
try {
|
||||
SendNotifications.send(guns);
|
||||
} catch (Exception e) {
|
||||
} catch (Exception e) {
|
||||
statusHandler.error("Unable to send grib ingest notifications", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @param records
|
||||
|
@ -1339,9 +1337,9 @@ public class GridParmManager {
|
|||
Date validTime = gun.getReplacementTimeRange().getStart();
|
||||
queue.queue(siteID, config, dbId, validTime, false,
|
||||
SmartInitRecord.LIVE_SMART_INIT_PRIORITY);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
SendNotifications.send(guns);
|
||||
|
@ -1349,7 +1347,7 @@ public class GridParmManager {
|
|||
statusHandler.error(
|
||||
"Unable to send satellite ingest notifications", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private Date purgeTime(DatabaseID id) {
|
||||
int numHours = this.config.gridPurgeAgeInHours(id);
|
||||
|
@ -1427,8 +1425,8 @@ public class GridParmManager {
|
|||
for (ParmID pid : parmList) {
|
||||
out.add(new CommitGridRequest(pid, req.getTimeRange(),
|
||||
req.isClientSendStatus()));
|
||||
}
|
||||
} else {
|
||||
}
|
||||
} else {
|
||||
sr.addMessage("Could not find database for "
|
||||
+ req.getDbId() + " in convertToParmReq()");
|
||||
}
|
||||
|
@ -1544,7 +1542,7 @@ public class GridParmManager {
|
|||
DatabaseID dbId = db.getDbId();
|
||||
statusHandler.info("addDB called, adding " + dbId);
|
||||
this.dbMap.put(dbId, db);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Process D2D grid data purge notification
|
||||
|
@ -1568,9 +1566,9 @@ public class GridParmManager {
|
|||
newInventory.addAll(dbIds);
|
||||
} catch (DataAccessLayerException e) {
|
||||
statusHandler.error(e.getLocalizedMessage(), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
DatabaseID satDbid = D2DSatDatabase.getDbId(siteID);
|
||||
|
||||
|
@ -1613,8 +1611,8 @@ public class GridParmManager {
|
|||
statusHandler.info("d2dGridDataPurged removing database: "
|
||||
+ dbid);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if ((added.size() > 0) || (deleted.size() > 0)) {
|
||||
// DBInvChangeNotification changed = new DBInvChangeNotification(
|
||||
|
@ -1624,8 +1622,8 @@ public class GridParmManager {
|
|||
deleted, siteID);
|
||||
|
||||
SendNotifications.send(changed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Process D2D satellite data purge notification
|
||||
|
|
|
@ -77,6 +77,7 @@ import com.raytheon.uf.common.status.IUFStatusHandler;
|
|||
import com.raytheon.uf.common.status.PerformanceStatus;
|
||||
import com.raytheon.uf.common.status.UFStatus;
|
||||
import com.raytheon.uf.common.status.UFStatus.Priority;
|
||||
import com.raytheon.uf.common.time.SimulatedTime;
|
||||
import com.raytheon.uf.common.time.TimeRange;
|
||||
import com.raytheon.uf.common.time.util.ITimer;
|
||||
import com.raytheon.uf.common.time.util.TimeUtil;
|
||||
|
@ -103,8 +104,8 @@ import com.raytheon.uf.edex.database.DataAccessLayerException;
|
|||
* Removed unnecessary conversion from Lists to/from arrays
|
||||
* Added performance logging
|
||||
* 02/12/13 #1608 randerso Changed to explicitly call deleteGroups
|
||||
* 03/07/13 #1737 njensen Logged getGridData times
|
||||
* 03/15/13 #1795 njensen Added updatePublishTime()
|
||||
* 03/07/13 #1737 njensen Logged getGridData times
|
||||
* 03/15/13 #1795 njensen Added updatePublishTime()
|
||||
* 03/20/13 #1774 randerso Cleanup code to use proper constructors
|
||||
* 04/08/13 #1949 rjpeter Updated to work with normalized database.
|
||||
* 05/02/13 #1969 randerso Removed updateDbs from parent class
|
||||
|
@ -112,6 +113,7 @@ import com.raytheon.uf.edex.database.DataAccessLayerException;
|
|||
* 07/30/13 #2057 randerso Added a static deleteDatabase method
|
||||
* 08/05/13 #1571 randerso Refactored to store GridParmInfo and ParmStorageinfo in postgres database
|
||||
* 10/31/2013 #2508 randerso Change to use DiscreteGridSlice.getKeys()
|
||||
* 12/10/13 #2611 randerso Change saveGridData to set update time when saving grids
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -152,22 +154,22 @@ public class IFPGridDatabase extends GridDatabase {
|
|||
this.valid = true;
|
||||
ServerResponse<Object> failResponse = new ServerResponse<Object>();
|
||||
|
||||
try {
|
||||
// lookup actual database id row from database
|
||||
// if it doesn't exist, it will be created at this point
|
||||
try {
|
||||
// lookup actual database id row from database
|
||||
// if it doesn't exist, it will be created at this point
|
||||
this.dao = new GFEDao();
|
||||
|
||||
// Make a DatabaseID and save it.
|
||||
this.dbId = dao.getDatabaseId(dbId);
|
||||
} catch (Exception e) {
|
||||
this.dbId = dao.getDatabaseId(dbId);
|
||||
} catch (Exception e) {
|
||||
String msg = "Unable to look up database id for ifp database: "
|
||||
+ dbId;
|
||||
statusHandler.handle(Priority.PROBLEM, msg, e);
|
||||
failResponse.addMessage(msg);
|
||||
}
|
||||
}
|
||||
if (!failInitCheck(failResponse)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Get the current database configuration and store the information
|
||||
// in private data _parmInfo, _parmStorageInfo, and _areaStorageInfo
|
||||
|
@ -218,7 +220,7 @@ public class IFPGridDatabase extends GridDatabase {
|
|||
statusHandler.error("DatabaseFAIL: " + this.dbId + "\n"
|
||||
+ failResponse.getMessages());
|
||||
this.valid = false;
|
||||
}
|
||||
}
|
||||
return this.valid;
|
||||
}
|
||||
|
||||
|
@ -572,20 +574,20 @@ public class IFPGridDatabase extends GridDatabase {
|
|||
* The list of parms to delete
|
||||
*/
|
||||
private void removeOldParms(List<String> parms) {
|
||||
for (String item : parms) {
|
||||
statusHandler.handle(Priority.INFO, "Removing: " + item
|
||||
+ " from the " + this.dbId + " database.");
|
||||
try {
|
||||
// Remove the entire data structure for the parm
|
||||
for (String item : parms) {
|
||||
statusHandler.handle(Priority.INFO, "Removing: " + item
|
||||
+ " from the " + this.dbId + " database.");
|
||||
try {
|
||||
// Remove the entire data structure for the parm
|
||||
dao.removeParm(parmStorageInfo.get(item).getParmID());
|
||||
// parmIdMap.remove(item);
|
||||
this.parmStorageInfo.remove(item);
|
||||
} catch (DataAccessLayerException e) {
|
||||
statusHandler.handle(Priority.PROBLEM, "Error removing: "
|
||||
+ item + " from the database");
|
||||
this.parmStorageInfo.remove(item);
|
||||
} catch (DataAccessLayerException e) {
|
||||
statusHandler.handle(Priority.PROBLEM, "Error removing: "
|
||||
+ item + " from the database");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ServerResponse<List<ParmID>> getParmList() {
|
||||
|
@ -792,6 +794,14 @@ public class IFPGridDatabase extends GridDatabase {
|
|||
|
||||
// track merge with existing records or add to new list
|
||||
for (GFERecord recToSave : recordsToSave) {
|
||||
// modify update time for non ISC/Official db
|
||||
if (!this.dbId.getModelName().equals("ISC")
|
||||
&& !this.dbId.getModelName().equals("Official")) {
|
||||
Date nowTime = SimulatedTime.getSystemTime().getTime();
|
||||
for (GridDataHistory history : recToSave.getGridHistory()) {
|
||||
history.setUpdateTime(nowTime);
|
||||
}
|
||||
}
|
||||
TimeRange tr = recToSave.getTimeRange();
|
||||
GFERecord existing = existingMap.get(tr);
|
||||
if (existing != null) {
|
||||
|
@ -1130,7 +1140,7 @@ public class IFPGridDatabase extends GridDatabase {
|
|||
if (!glocUser.equals(glocDb)) {
|
||||
|
||||
// save/update the database GridLocation
|
||||
try {
|
||||
try {
|
||||
dao.saveOrUpdateGridLocation(glocUser);
|
||||
|
||||
// remap the actual gridded data to the new gridLocation
|
||||
|
@ -1169,7 +1179,7 @@ public class IFPGridDatabase extends GridDatabase {
|
|||
ParmStorageInfo newPSI = parmStorageInfoUser.get(compositeName);
|
||||
if (newPSI == null) {
|
||||
continue; // this parm not in new database, so skip
|
||||
}
|
||||
}
|
||||
|
||||
GridParmInfo newGPI = newPSI.getGridParmInfo();
|
||||
|
||||
|
@ -1189,12 +1199,12 @@ public class IFPGridDatabase extends GridDatabase {
|
|||
statusHandler.error("Unable to retrieve GFERecords for "
|
||||
+ compositeName, e);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// process each grid
|
||||
for (GFERecord rec : records) {
|
||||
List<TimeRange> times = new ArrayList<TimeRange>();
|
||||
times.add(rec.getTimeRange());
|
||||
for (GFERecord rec : records) {
|
||||
List<TimeRange> times = new ArrayList<TimeRange>();
|
||||
times.add(rec.getTimeRange());
|
||||
ServerResponse<List<IGridSlice>> ssr = this.getGridData(
|
||||
rec.getParmId(), times, oldGL);
|
||||
sr.addMessages(ssr);
|
||||
|
@ -1205,24 +1215,24 @@ public class IFPGridDatabase extends GridDatabase {
|
|||
continue;
|
||||
}
|
||||
IGridSlice slice = ssr.getPayload().get(0);
|
||||
IGridSlice newSlice = null;
|
||||
try {
|
||||
switch (slice.getGridInfo().getGridType()) {
|
||||
case NONE:
|
||||
break;
|
||||
case SCALAR:
|
||||
ScalarGridSlice scalarSlice = (ScalarGridSlice) slice;
|
||||
IGridSlice newSlice = null;
|
||||
try {
|
||||
switch (slice.getGridInfo().getGridType()) {
|
||||
case NONE:
|
||||
break;
|
||||
case SCALAR:
|
||||
ScalarGridSlice scalarSlice = (ScalarGridSlice) slice;
|
||||
Grid2DFloat newGrid = remapper.remap(scalarSlice
|
||||
.getScalarGrid(), scalarSlice.getGridInfo()
|
||||
.getMinValue(), scalarSlice.getGridInfo()
|
||||
.getMaxValue(), scalarSlice.getGridInfo()
|
||||
.getMinValue(), scalarSlice.getGridInfo()
|
||||
.getMinValue());
|
||||
scalarSlice.setScalarGrid(newGrid);
|
||||
newSlice = scalarSlice;
|
||||
break;
|
||||
case VECTOR:
|
||||
VectorGridSlice vectorSlice = (VectorGridSlice) slice;
|
||||
.getMinValue());
|
||||
scalarSlice.setScalarGrid(newGrid);
|
||||
newSlice = scalarSlice;
|
||||
break;
|
||||
case VECTOR:
|
||||
VectorGridSlice vectorSlice = (VectorGridSlice) slice;
|
||||
Grid2DFloat magOutput = new Grid2DFloat(newGL.getNx(),
|
||||
newGL.getNy());
|
||||
Grid2DFloat dirOutput = new Grid2DFloat(newGL.getNx(),
|
||||
|
@ -1233,38 +1243,38 @@ public class IFPGridDatabase extends GridDatabase {
|
|||
.getMaxValue(), vectorSlice.getGridInfo()
|
||||
.getMinValue(), vectorSlice.getGridInfo()
|
||||
.getMinValue(), magOutput, dirOutput);
|
||||
vectorSlice.setDirGrid(dirOutput);
|
||||
vectorSlice.setMagGrid(magOutput);
|
||||
newSlice = vectorSlice;
|
||||
break;
|
||||
case WEATHER:
|
||||
WeatherGridSlice weatherSlice = (WeatherGridSlice) slice;
|
||||
vectorSlice.setDirGrid(dirOutput);
|
||||
vectorSlice.setMagGrid(magOutput);
|
||||
newSlice = vectorSlice;
|
||||
break;
|
||||
case WEATHER:
|
||||
WeatherGridSlice weatherSlice = (WeatherGridSlice) slice;
|
||||
Grid2DByte newWeatherGrid = remapper.remap(
|
||||
weatherSlice.getWeatherGrid(), 0, 0);
|
||||
weatherSlice.setWeatherGrid(newWeatherGrid);
|
||||
newSlice = weatherSlice;
|
||||
break;
|
||||
case DISCRETE:
|
||||
DiscreteGridSlice discreteSlice = (DiscreteGridSlice) slice;
|
||||
weatherSlice.getWeatherGrid(), 0, 0);
|
||||
weatherSlice.setWeatherGrid(newWeatherGrid);
|
||||
newSlice = weatherSlice;
|
||||
break;
|
||||
case DISCRETE:
|
||||
DiscreteGridSlice discreteSlice = (DiscreteGridSlice) slice;
|
||||
Grid2DByte newDiscreteGrid = remapper.remap(
|
||||
discreteSlice.getDiscreteGrid(), 0, 0);
|
||||
discreteSlice.setDiscreteGrid(newDiscreteGrid);
|
||||
newSlice = discreteSlice;
|
||||
break;
|
||||
}
|
||||
discreteSlice.getDiscreteGrid(), 0, 0);
|
||||
discreteSlice.setDiscreteGrid(newDiscreteGrid);
|
||||
newSlice = discreteSlice;
|
||||
break;
|
||||
}
|
||||
newSlice.setGridInfo(newGPI);
|
||||
rec.setMessageData(newSlice);
|
||||
this.removeFromHDF5(rec);
|
||||
rec.setMessageData(newSlice);
|
||||
this.removeFromHDF5(rec);
|
||||
this.saveGridsToHdf5(Arrays.asList(rec), newPSI);
|
||||
} catch (Exception e) {
|
||||
statusHandler.handle(Priority.PROBLEM,
|
||||
} catch (Exception e) {
|
||||
statusHandler.handle(Priority.PROBLEM,
|
||||
"Error remapping data for record [" + rec + "]", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return sr;
|
||||
}
|
||||
}
|
||||
|
||||
private ServerResponse<?> getDBConfiguration() {
|
||||
ServerResponse<?> sr = new ServerResponse<Object>();
|
||||
|
@ -1289,9 +1299,9 @@ public class IFPGridDatabase extends GridDatabase {
|
|||
+ e.getLocalizedMessage();
|
||||
statusHandler.error(msg, e);
|
||||
sr.addMessage(msg);
|
||||
}
|
||||
}
|
||||
return sr;
|
||||
}
|
||||
}
|
||||
|
||||
private void compareParmInfoWithDB(
|
||||
Map<String, ParmStorageInfo> parmStorageInfoUser,
|
||||
|
@ -1386,12 +1396,12 @@ public class IFPGridDatabase extends GridDatabase {
|
|||
return null;
|
||||
} else {
|
||||
psi = this.gridDbConfig.getParmStorageInfo(nameLevel[0],
|
||||
nameLevel[1]);
|
||||
if (psi == null) {
|
||||
statusHandler.handle(Priority.DEBUG, compositeName
|
||||
+ " not found in ParmStorageInfo config");
|
||||
nameLevel[1]);
|
||||
if (psi == null) {
|
||||
statusHandler.handle(Priority.DEBUG, compositeName
|
||||
+ " not found in ParmStorageInfo config");
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
psi.getGridParmInfo().resetParmID(
|
||||
|
@ -1722,7 +1732,7 @@ public class IFPGridDatabase extends GridDatabase {
|
|||
first = false;
|
||||
} else {
|
||||
sb.append(GfeUtil.KEY_SEPARATOR);
|
||||
}
|
||||
}
|
||||
sb.append(key.toString());
|
||||
}
|
||||
byte[] keyBytes = sb.toString().getBytes();
|
||||
|
|
|
@ -1,304 +1,304 @@
|
|||
##
|
||||
# This software was developed and / or modified by Raytheon Company,
|
||||
##
|
||||
# This software was developed and / or modified by Raytheon Company,
|
||||
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
|
||||
#
|
||||
# U.S. EXPORT CONTROLLED TECHNICAL DATA
|
||||
# This software product contains export-restricted data whose
|
||||
# export/transfer/disclosure is restricted by U.S. law. Dissemination
|
||||
# to non-U.S. persons whether in the United States or abroad requires
|
||||
# an export license or other authorization.
|
||||
#
|
||||
# This software product contains export-restricted data whose
|
||||
# export/transfer/disclosure is restricted by U.S. law. Dissemination
|
||||
# to non-U.S. persons whether in the United States or abroad requires
|
||||
# an export license or other authorization.
|
||||
#
|
||||
# Contractor Name: Raytheon Company
|
||||
# Contractor Address: 6825 Pine Street, Suite 340
|
||||
# Mail Stop B8
|
||||
# Omaha, NE 68106
|
||||
# 402.291.0100
|
||||
#
|
||||
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
|
||||
# further licensing information.
|
||||
##
|
||||
|
||||
import string, getopt, sys, time, gzip, os, iscTime, stat
|
||||
import numpy
|
||||
import LogStream, fcntl
|
||||
|
||||
#
|
||||
# merges two grids and histories together, input gridA is merged into gridB
|
||||
# result is returned from mergeGrid. Grids are represented in the following
|
||||
# manner:
|
||||
# Scalar: (grid, history)
|
||||
# Vector: ((magGrid, dirGrid), history)
|
||||
# Weather: ((byteGrid, key), history)
|
||||
# Discrete: ((byteGrid, key), history)
|
||||
#
|
||||
# SOFTWARE HISTORY
|
||||
#
|
||||
# Date Ticket# Engineer Description
|
||||
# ------------ ---------- ----------- --------------------------
|
||||
# 07/06/09 1995 bphillip Initial Creation.
|
||||
# 11/05/13 2517 randerso Improve memory utilization
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
|
||||
class MergeGrid:
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# Constructor
|
||||
# Takes creationtime - seconds since Jan 1, 1970, to be used
|
||||
# in the updating of the histories.
|
||||
# siteID = site identifier for input grid
|
||||
# inFillValue = input fill value indicator
|
||||
# outFillValue = output fill value indicator
|
||||
# areaMask = numerical mask of areas to merge from grid1 to grid2
|
||||
# gridType = 'SCALAR', 'VECTOR', 'WEATHER', 'DISCRETE'
|
||||
#---------------------------------------------------------------------
|
||||
def __init__(self, creationTime, siteID, inFillValue, outFillValue,
|
||||
areaMask, gridType, discreteKeys=None):
|
||||
self.__creationTime = creationTime
|
||||
self.__siteID = siteID
|
||||
self.__inFillV = inFillValue
|
||||
self.__outFillV = outFillValue
|
||||
self.__areaMask = areaMask
|
||||
self.__gridType = gridType
|
||||
self.__discreteKeys = discreteKeys
|
||||
|
||||
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# find key
|
||||
# key = input key
|
||||
# keymap = existing key maps (updated on exit)
|
||||
# returns the index to use for the key.
|
||||
#---------------------------------------------------------------------
|
||||
def __findKey(self, key, keyMap):
|
||||
try:
|
||||
index = keyMap.index(key)
|
||||
return index
|
||||
except:
|
||||
keyMap.append(key)
|
||||
return len(keyMap) - 1
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# commonize key
|
||||
# wxA = input grid and key
|
||||
# wxB = input grid and key
|
||||
# returns a tuple (commonkey, gridA, gridB) where gridA and gridB
|
||||
# now use the commonkey
|
||||
#---------------------------------------------------------------------
|
||||
def __commonizeKey(self, wxA, wxB):
|
||||
# make common key and make data changes in B
|
||||
gridB = wxB[0]
|
||||
key = wxA[1]
|
||||
newGrid = numpy.zeros_like(gridB)
|
||||
|
||||
for k in range(len(wxB[1])):
|
||||
index = self.__findKey(wxB[1][k], key)
|
||||
newGrid[gridB == k] = index
|
||||
|
||||
return (key, wxA[0], newGrid)
|
||||
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# update history strings
|
||||
# historyA = history from input grid (None to delete history entry)
|
||||
# historyB = history from base grid, list (None for no old grid.)
|
||||
# returns an updated list of strings, each string is an encoded history
|
||||
# returns None if no history is present.
|
||||
#---------------------------------------------------------------------
|
||||
def __updateHistoryStrings(self, historyA, historyB):
|
||||
|
||||
out = []
|
||||
|
||||
# removal any old entry
|
||||
if historyB is not None:
|
||||
for h in historyB:
|
||||
index = string.find(h, ":" + self.__siteID + "_GRID")
|
||||
if index == -1:
|
||||
out.append(h)
|
||||
|
||||
# if add mode, add in new entries
|
||||
if historyA is not None:
|
||||
for h in historyA:
|
||||
out.append(h)
|
||||
|
||||
if len(out) > 0:
|
||||
return out
|
||||
else:
|
||||
return None
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# merge scalar grid
|
||||
# Note: gridA can be None, which indicates that the data
|
||||
# is to be blanked out, i.e., made invalid. gridB can also be
|
||||
# none, which indicates that there is no destination grid and one must
|
||||
# be created.
|
||||
#---------------------------------------------------------------------
|
||||
def __mergeScalarGrid(self, gridA, gridB):
|
||||
if gridA is None and gridB is None:
|
||||
return None
|
||||
|
||||
# merge the grids
|
||||
if gridA is not None:
|
||||
mask = numpy.not_equal(gridA, self.__inFillV)
|
||||
numpy.logical_and(mask, self.__areaMask, mask)
|
||||
|
||||
if gridB is None:
|
||||
return numpy.where(mask, gridA, self.__outFillV)
|
||||
else:
|
||||
return numpy.where(mask, gridA, gridB)
|
||||
|
||||
# blank out the data
|
||||
else:
|
||||
return numpy.where(self.__areaMask, self.__outFillV, gridB)
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# merge vector grid
|
||||
# Note: gridA can be None, which indicates that the data
|
||||
# is to be blanked out, i.e., made invalid. gridB can also be
|
||||
# none, which indicates that there is no destination grid and one must
|
||||
# be created.
|
||||
#---------------------------------------------------------------------
|
||||
def __mergeVectorGrid(self, gridA, gridB):
|
||||
if gridA is None and gridB is None:
|
||||
return None
|
||||
|
||||
# merge the grids
|
||||
if gridA is not None:
|
||||
mask = numpy.not_equal(gridA[0], self.__inFillV)
|
||||
numpy.logical_and(mask, self.__areaMask, mask)
|
||||
|
||||
if gridB is None:
|
||||
magGrid = numpy.where(mask, gridA[0], self.__outFillV)
|
||||
dirGrid = numpy.where(mask, gridA[1], 0.0)
|
||||
else:
|
||||
magGrid = numpy.where(mask, gridA[0], gridB[0])
|
||||
dirGrid = numpy.where(mask, gridA[1], gridB[1])
|
||||
return (magGrid, dirGrid)
|
||||
|
||||
# blank out the data
|
||||
else:
|
||||
magGrid = numpy.where(self.__areaMask, self.__outFillV, gridB[0])
|
||||
dirGrid = numpy.where(self.__areaMask, 0.0, gridB[1])
|
||||
return (magGrid, dirGrid)
|
||||
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# merge weather grid
|
||||
#
|
||||
# Note the outFillV is ignored for now, all out-of-bounds points will
|
||||
# get the <NoWx> value.
|
||||
#---------------------------------------------------------------------
|
||||
def __mergeWeatherGrid(self, gridA, gridB):
|
||||
|
||||
if gridA is None and gridB is None:
|
||||
return None
|
||||
|
||||
noWx = "<NoCov>:<NoWx>:<NoInten>:<NoVis>:"
|
||||
# merge the grids
|
||||
if gridA is not None:
|
||||
mask = numpy.not_equal(gridA[0], self.__inFillV)
|
||||
numpy.logical_and(mask, self.__areaMask, mask)
|
||||
|
||||
if gridB is None: #make an empty grid
|
||||
noWxKeys = []
|
||||
noWxGrid = numpy.empty_like(gridA[0])
|
||||
noWxGrid.fill(self.__findKey(noWx, noWxKeys))
|
||||
gridB = (noWxGrid, noWxKeys)
|
||||
(commonkey, remapG, dbG) = self.__commonizeKey(gridA, gridB)
|
||||
mergedGrid = numpy.where(mask, remapG, dbG)
|
||||
return (mergedGrid, commonkey)
|
||||
|
||||
# blank out the data
|
||||
else:
|
||||
blankGrid = numpy.empty_like(gridB[0])
|
||||
blankGrid.fill(self.__findKey(noWx, gridB[1]))
|
||||
key = gridB[1]
|
||||
grid = numpy.where(self.__areaMask, blankGrid, gridB[0])
|
||||
return (grid, key)
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# merge discrete grid
|
||||
#
|
||||
# Note the outFillV is ignored for now, all out-of-bounds points will
|
||||
# get the first value in the discrete key.
|
||||
#---------------------------------------------------------------------
|
||||
def __mergeDiscreteGrid(self, gridA, gridB):
|
||||
if gridA is None and gridB is None:
|
||||
return None
|
||||
|
||||
noKey = self.__discreteKeys[0]
|
||||
|
||||
# merge the grids
|
||||
if gridA is not None:
|
||||
mask = numpy.not_equal(gridA[0], self.__inFillV)
|
||||
numpy.logical_and(mask, self.__areaMask)
|
||||
|
||||
if gridB is None: #make an empty grid
|
||||
noKeys = []
|
||||
noGrid = numpy.empty_like(gridA[0])
|
||||
noGrid.fill(self.__findKey(noKey, noKeys))
|
||||
gridB = (noGrid, noKeys)
|
||||
|
||||
(commonkey, remapG, dbG) = \
|
||||
self.__commonizeKey(gridA, gridB)
|
||||
mergedGrid = numpy.where(mask, remapG, dbG)
|
||||
return (mergedGrid, commonkey)
|
||||
|
||||
# blank out the data
|
||||
else:
|
||||
blankGrid = numpy.empty_like(gridB[0])
|
||||
blankGrid.fill(self.__findKey(noKey, gridB[1]))
|
||||
key = gridB[1]
|
||||
grid = numpy.where(self.__areaMask, blankGrid, gridB[0])
|
||||
return (grid, key)
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# mergeGrid
|
||||
# Merges the grid
|
||||
# Scalar: (grid, history)
|
||||
# Vector: ((magGrid, dirGrid), history)
|
||||
# Weather: ((byteGrid, key), history)
|
||||
# Discrete: ((byteGrid, key), history)
|
||||
# gridA = input remapped grid, contains inFillV to denote invalid
|
||||
# gridB = grid to have gridA mosaic'd into
|
||||
# Note: gridA can be None, which indicates that the data
|
||||
# is to be blanked out, i.e., made invalid. gridB can also be
|
||||
# none, which indicates that there is no destination grid and one must
|
||||
# be created.
|
||||
#---------------------------------------------------------------------
|
||||
def mergeGrid(self, gridAIn, gridBIn):
|
||||
# merge the grids
|
||||
if gridAIn is not None:
|
||||
gridA = gridAIn[0]
|
||||
historyA = gridAIn[1]
|
||||
else:
|
||||
gridA = None
|
||||
historyA = None
|
||||
if gridBIn is not None:
|
||||
gridB = gridBIn[0]
|
||||
historyB = gridBIn[1]
|
||||
else:
|
||||
gridB = None
|
||||
historyB = None
|
||||
|
||||
if self.__gridType == 'SCALAR':
|
||||
mergedGrid = self.__mergeScalarGrid(gridA, gridB)
|
||||
|
||||
elif self.__gridType == 'VECTOR':
|
||||
mergedGrid = self.__mergeVectorGrid(gridA, gridB)
|
||||
|
||||
elif self.__gridType == 'WEATHER':
|
||||
mergedGrid = self.__mergeWeatherGrid(gridA, gridB)
|
||||
|
||||
elif self.__gridType == 'DISCRETE':
|
||||
mergedGrid = self.__mergeDiscreteGrid(gridA, gridB)
|
||||
|
||||
else:
|
||||
mergedGrid = None
|
||||
|
||||
# merge History
|
||||
history = self.__updateHistoryStrings(historyA, historyB)
|
||||
|
||||
return (mergedGrid, history)
|
||||
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
|
||||
# further licensing information.
|
||||
##
|
||||
|
||||
import string, getopt, sys, time, gzip, os, iscTime, stat
|
||||
import numpy
|
||||
import LogStream, fcntl
|
||||
|
||||
#
|
||||
# merges two grids and histories together, input gridA is merged into gridB
|
||||
# result is returned from mergeGrid. Grids are represented in the following
|
||||
# manner:
|
||||
# Scalar: (grid, history)
|
||||
# Vector: ((magGrid, dirGrid), history)
|
||||
# Weather: ((byteGrid, key), history)
|
||||
# Discrete: ((byteGrid, key), history)
|
||||
#
|
||||
# SOFTWARE HISTORY
|
||||
#
|
||||
# Date Ticket# Engineer Description
|
||||
# ------------ ---------- ----------- --------------------------
|
||||
# 07/06/09 1995 bphillip Initial Creation.
|
||||
# 11/05/13 2517 randerso Improve memory utilization
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
|
||||
class MergeGrid:
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# Constructor
|
||||
# Takes creationtime - seconds since Jan 1, 1970, to be used
|
||||
# in the updating of the histories.
|
||||
# siteID = site identifier for input grid
|
||||
# inFillValue = input fill value indicator
|
||||
# outFillValue = output fill value indicator
|
||||
# areaMask = numerical mask of areas to merge from grid1 to grid2
|
||||
# gridType = 'SCALAR', 'VECTOR', 'WEATHER', 'DISCRETE'
|
||||
#---------------------------------------------------------------------
|
||||
def __init__(self, creationTime, siteID, inFillValue, outFillValue,
|
||||
areaMask, gridType, discreteKeys=None):
|
||||
self.__creationTime = creationTime
|
||||
self.__siteID = siteID
|
||||
self.__inFillV = inFillValue
|
||||
self.__outFillV = outFillValue
|
||||
self.__areaMask = areaMask
|
||||
self.__gridType = gridType
|
||||
self.__discreteKeys = discreteKeys
|
||||
|
||||
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# find key
|
||||
# key = input key
|
||||
# keymap = existing key maps (updated on exit)
|
||||
# returns the index to use for the key.
|
||||
#---------------------------------------------------------------------
|
||||
def __findKey(self, key, keyMap):
|
||||
try:
|
||||
index = keyMap.index(key)
|
||||
return index
|
||||
except:
|
||||
keyMap.append(key)
|
||||
return len(keyMap) - 1
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# commonize key
|
||||
# wxA = input grid and key
|
||||
# wxB = input grid and key
|
||||
# returns a tuple (commonkey, gridA, gridB) where gridA and gridB
|
||||
# now use the commonkey
|
||||
#---------------------------------------------------------------------
|
||||
def __commonizeKey(self, wxA, wxB):
|
||||
# make common key and make data changes in B
|
||||
gridB = wxB[0]
|
||||
key = wxA[1]
|
||||
newGrid = numpy.zeros_like(gridB)
|
||||
|
||||
for k in range(len(wxB[1])):
|
||||
index = self.__findKey(wxB[1][k], key)
|
||||
newGrid[gridB == k] = index
|
||||
|
||||
return (key, wxA[0], newGrid)
|
||||
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# update history strings
|
||||
# historyA = history from input grid (None to delete history entry)
|
||||
# historyB = history from base grid, list (None for no old grid.)
|
||||
# returns an updated list of strings, each string is an encoded history
|
||||
# returns None if no history is present.
|
||||
#---------------------------------------------------------------------
|
||||
def __updateHistoryStrings(self, historyA, historyB):
|
||||
|
||||
out = []
|
||||
|
||||
# removal any old entry
|
||||
if historyB is not None:
|
||||
for h in historyB:
|
||||
index = string.find(h, ":" + self.__siteID + "_GRID")
|
||||
if index == -1:
|
||||
out.append(h)
|
||||
|
||||
# if add mode, add in new entries
|
||||
if historyA is not None:
|
||||
for h in historyA:
|
||||
out.append(h)
|
||||
|
||||
if len(out) > 0:
|
||||
return out
|
||||
else:
|
||||
return None
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# merge scalar grid
|
||||
# Note: gridA can be None, which indicates that the data
|
||||
# is to be blanked out, i.e., made invalid. gridB can also be
|
||||
# none, which indicates that there is no destination grid and one must
|
||||
# be created.
|
||||
#---------------------------------------------------------------------
|
||||
def __mergeScalarGrid(self, gridA, gridB):
|
||||
if gridA is None and gridB is None:
|
||||
return None
|
||||
|
||||
# merge the grids
|
||||
if gridA is not None:
|
||||
mask = numpy.not_equal(gridA, self.__inFillV)
|
||||
numpy.logical_and(mask, self.__areaMask, mask)
|
||||
|
||||
if gridB is None:
|
||||
return numpy.where(mask, gridA, self.__outFillV)
|
||||
else:
|
||||
return numpy.where(mask, gridA, gridB)
|
||||
|
||||
# blank out the data
|
||||
else:
|
||||
return numpy.where(self.__areaMask, self.__outFillV, gridB)
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# merge vector grid
|
||||
# Note: gridA can be None, which indicates that the data
|
||||
# is to be blanked out, i.e., made invalid. gridB can also be
|
||||
# none, which indicates that there is no destination grid and one must
|
||||
# be created.
|
||||
#---------------------------------------------------------------------
|
||||
def __mergeVectorGrid(self, gridA, gridB):
|
||||
if gridA is None and gridB is None:
|
||||
return None
|
||||
|
||||
# merge the grids
|
||||
if gridA is not None:
|
||||
mask = numpy.not_equal(gridA[0], self.__inFillV)
|
||||
numpy.logical_and(mask, self.__areaMask, mask)
|
||||
|
||||
if gridB is None:
|
||||
magGrid = numpy.where(mask, gridA[0], self.__outFillV)
|
||||
dirGrid = numpy.where(mask, gridA[1], 0.0)
|
||||
else:
|
||||
magGrid = numpy.where(mask, gridA[0], gridB[0])
|
||||
dirGrid = numpy.where(mask, gridA[1], gridB[1])
|
||||
return (magGrid, dirGrid)
|
||||
|
||||
# blank out the data
|
||||
else:
|
||||
magGrid = numpy.where(self.__areaMask, self.__outFillV, gridB[0])
|
||||
dirGrid = numpy.where(self.__areaMask, 0.0, gridB[1])
|
||||
return (magGrid, dirGrid)
|
||||
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# merge weather grid
|
||||
#
|
||||
# Note the outFillV is ignored for now, all out-of-bounds points will
|
||||
# get the <NoWx> value.
|
||||
#---------------------------------------------------------------------
|
||||
def __mergeWeatherGrid(self, gridA, gridB):
|
||||
|
||||
if gridA is None and gridB is None:
|
||||
return None
|
||||
|
||||
noWx = "<NoCov>:<NoWx>:<NoInten>:<NoVis>:"
|
||||
# merge the grids
|
||||
if gridA is not None:
|
||||
mask = numpy.not_equal(gridA[0], self.__inFillV)
|
||||
numpy.logical_and(mask, self.__areaMask, mask)
|
||||
|
||||
if gridB is None: #make an empty grid
|
||||
noWxKeys = []
|
||||
noWxGrid = numpy.empty_like(gridA[0])
|
||||
noWxGrid.fill(self.__findKey(noWx, noWxKeys))
|
||||
gridB = (noWxGrid, noWxKeys)
|
||||
(commonkey, remapG, dbG) = self.__commonizeKey(gridA, gridB)
|
||||
mergedGrid = numpy.where(mask, remapG, dbG)
|
||||
return (mergedGrid, commonkey)
|
||||
|
||||
# blank out the data
|
||||
else:
|
||||
blankGrid = numpy.empty_like(gridB[0])
|
||||
blankGrid.fill(self.__findKey(noWx, gridB[1]))
|
||||
key = gridB[1]
|
||||
grid = numpy.where(self.__areaMask, blankGrid, gridB[0])
|
||||
return (grid, key)
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# merge discrete grid
|
||||
#
|
||||
# Note the outFillV is ignored for now, all out-of-bounds points will
|
||||
# get the first value in the discrete key.
|
||||
#---------------------------------------------------------------------
|
||||
def __mergeDiscreteGrid(self, gridA, gridB):
|
||||
if gridA is None and gridB is None:
|
||||
return None
|
||||
|
||||
noKey = self.__discreteKeys[0]
|
||||
|
||||
# merge the grids
|
||||
if gridA is not None:
|
||||
mask = numpy.not_equal(gridA[0], self.__inFillV)
|
||||
numpy.logical_and(mask, self.__areaMask, mask)
|
||||
|
||||
if gridB is None: #make an empty grid
|
||||
noKeys = []
|
||||
noGrid = numpy.empty_like(gridA[0])
|
||||
noGrid.fill(self.__findKey(noKey, noKeys))
|
||||
gridB = (noGrid, noKeys)
|
||||
|
||||
(commonkey, remapG, dbG) = \
|
||||
self.__commonizeKey(gridA, gridB)
|
||||
mergedGrid = numpy.where(mask, remapG, dbG)
|
||||
return (mergedGrid, commonkey)
|
||||
|
||||
# blank out the data
|
||||
else:
|
||||
blankGrid = numpy.empty_like(gridB[0])
|
||||
blankGrid.fill(self.__findKey(noKey, gridB[1]))
|
||||
key = gridB[1]
|
||||
grid = numpy.where(self.__areaMask, blankGrid, gridB[0])
|
||||
return (grid, key)
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# mergeGrid
|
||||
# Merges the grid
|
||||
# Scalar: (grid, history)
|
||||
# Vector: ((magGrid, dirGrid), history)
|
||||
# Weather: ((byteGrid, key), history)
|
||||
# Discrete: ((byteGrid, key), history)
|
||||
# gridA = input remapped grid, contains inFillV to denote invalid
|
||||
# gridB = grid to have gridA mosaic'd into
|
||||
# Note: gridA can be None, which indicates that the data
|
||||
# is to be blanked out, i.e., made invalid. gridB can also be
|
||||
# none, which indicates that there is no destination grid and one must
|
||||
# be created.
|
||||
#---------------------------------------------------------------------
|
||||
def mergeGrid(self, gridAIn, gridBIn):
|
||||
# merge the grids
|
||||
if gridAIn is not None:
|
||||
gridA = gridAIn[0]
|
||||
historyA = gridAIn[1]
|
||||
else:
|
||||
gridA = None
|
||||
historyA = None
|
||||
if gridBIn is not None:
|
||||
gridB = gridBIn[0]
|
||||
historyB = gridBIn[1]
|
||||
else:
|
||||
gridB = None
|
||||
historyB = None
|
||||
|
||||
if self.__gridType == 'SCALAR':
|
||||
mergedGrid = self.__mergeScalarGrid(gridA, gridB)
|
||||
|
||||
elif self.__gridType == 'VECTOR':
|
||||
mergedGrid = self.__mergeVectorGrid(gridA, gridB)
|
||||
|
||||
elif self.__gridType == 'WEATHER':
|
||||
mergedGrid = self.__mergeWeatherGrid(gridA, gridB)
|
||||
|
||||
elif self.__gridType == 'DISCRETE':
|
||||
mergedGrid = self.__mergeDiscreteGrid(gridA, gridB)
|
||||
|
||||
else:
|
||||
mergedGrid = None
|
||||
|
||||
# merge History
|
||||
history = self.__updateHistoryStrings(historyA, historyB)
|
||||
|
||||
return (mergedGrid, history)
|
||||
|
|
|
@ -60,6 +60,7 @@ import com.raytheon.uf.edex.decodertools.time.TimeTools;
|
|||
* May 09, 2013 1869 bsteffen Modified D2D time series of point data to
|
||||
* work without dataURI.
|
||||
* Aug 30, 2013 2298 rjpeter Make getPluginName abstract
|
||||
* Dec 16, 2013 DR 16920 D. Friemdan Fix type of tempFromTenths access.
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -398,7 +399,7 @@ public class MetarPointDataTransform {
|
|||
|
||||
mr.setTemperature(pdv.getNumber(TEMPERATURE).intValue());
|
||||
mr.setDewPoint(pdv.getNumber(DEWPOINT).intValue());
|
||||
mr.setTempFromTenths(pdv.getNumber(TEMP_FROM_TENTHS).intValue());
|
||||
mr.setTempFromTenths(pdv.getNumber(TEMP_FROM_TENTHS).floatValue());
|
||||
mr.setDewPointFromTenths(pdv.getNumber(DP_FROM_TENTHS).floatValue());
|
||||
|
||||
mr.setMinTemp6Hour(pdv.getNumber(MIN_TEMP6_HOUR).floatValue());
|
||||
|
|
|
@ -57,5 +57,10 @@
|
|||
</route>
|
||||
|
||||
</camelContext>
|
||||
|
||||
|
||||
<bean factory-bean="manualProc"
|
||||
factory-method="registerSecondaryPlugin">
|
||||
<constructor-arg value="text" />
|
||||
</bean>
|
||||
|
||||
</beans>
|
|
@ -14,11 +14,6 @@
|
|||
<constructor-arg value="jms-dist:queue:Ingest.Text"/>
|
||||
</bean>
|
||||
|
||||
<bean factory-bean="manualProc"
|
||||
factory-method="registerSecondaryPlugin">
|
||||
<constructor-arg value="text" />
|
||||
</bean>
|
||||
|
||||
<bean id="textHandleoupDistRegistry" factory-bean="handleoupDistributionSrv"
|
||||
factory-method="register">
|
||||
<constructor-arg value="text" />
|
||||
|
@ -52,6 +47,10 @@
|
|||
<constructor-arg ref="textArchiveNamer" />
|
||||
</bean>
|
||||
|
||||
<bean factory-bean="databaseArchiver" factory-method="registerPluginBatchSize" depends-on="databaseArchiver">
|
||||
<constructor-arg value="text" />
|
||||
<constructor-arg value="1000" type="java.lang.Integer"/>
|
||||
</bean>
|
||||
|
||||
<camelContext id="text-camel"
|
||||
xmlns="http://camel.apache.org/schema/spring"
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
# Every minute, the text database is version purged on the AFOS id's that were
|
||||
# inserted in the last minute. When purge runs if the hour is a multiple of
|
||||
# the interval it does a full version purge to catch any cases not handled by
|
||||
# the purge every minute.
|
||||
text.fullVersionPurge.intervalhours=3
|
|
@ -28,9 +28,10 @@ import com.raytheon.edex.db.dao.DefaultPluginDao;
|
|||
import com.raytheon.edex.textdb.dao.StdTextProductDao;
|
||||
import com.raytheon.edex.textdb.dbapi.impl.TextDB;
|
||||
import com.raytheon.uf.common.dataplugin.PluginException;
|
||||
import com.raytheon.uf.common.dataplugin.persist.PersistableDataObject;
|
||||
import com.raytheon.uf.common.dataquery.db.QueryParam.QueryOperand;
|
||||
import com.raytheon.uf.common.time.util.TimeUtil;
|
||||
import com.raytheon.uf.edex.database.DataAccessLayerException;
|
||||
import com.raytheon.uf.edex.database.processor.IDatabaseProcessor;
|
||||
import com.raytheon.uf.edex.database.purge.PurgeLogger;
|
||||
import com.raytheon.uf.edex.database.query.DatabaseQuery;
|
||||
|
||||
|
@ -45,12 +46,29 @@ import com.raytheon.uf.edex.database.query.DatabaseQuery;
|
|||
* ------------ ---------- ----------- --------------------------
|
||||
* Jul 10, 2009 2191 rjpeter Update retention time handling.
|
||||
* Aug 18, 2009 2191 rjpeter Changed to version purging.
|
||||
* Dec 13, 2013 2555 rjpeter Renamed getRecordsToArchive to processArchiveRecords.
|
||||
* </pre>
|
||||
*
|
||||
* @author
|
||||
* @version 1
|
||||
*/
|
||||
public class TextDao extends DefaultPluginDao {
|
||||
private static final int fullPurgeInterval;
|
||||
|
||||
static {
|
||||
String fullPurgeProperty = System.getProperty(
|
||||
"text.fullVersionPurge.intervalhours", "3");
|
||||
Integer val = null;
|
||||
try {
|
||||
val = Integer.parseInt(fullPurgeProperty);
|
||||
if ((val < 0) || (val > 23)) {
|
||||
|
||||
}
|
||||
} catch (Exception e) {
|
||||
val = new Integer(3);
|
||||
}
|
||||
fullPurgeInterval = val.intValue();
|
||||
}
|
||||
|
||||
public TextDao(String pluginName) throws PluginException {
|
||||
super(pluginName);
|
||||
|
@ -71,7 +89,7 @@ public class TextDao extends DefaultPluginDao {
|
|||
|
||||
// only do full purge every few hours since incremental purge runs every
|
||||
// minute
|
||||
if (Calendar.getInstance().get(Calendar.HOUR_OF_DAY) % 3 == 0) {
|
||||
if ((TimeUtil.newGmtCalendar().get(Calendar.HOUR_OF_DAY) % fullPurgeInterval) == 0) {
|
||||
TextDB.purgeStdTextProducts();
|
||||
}
|
||||
|
||||
|
@ -79,10 +97,9 @@ public class TextDao extends DefaultPluginDao {
|
|||
"text");
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public List<PersistableDataObject> getRecordsToArchive(
|
||||
Calendar insertStartTime, Calendar insertEndTime)
|
||||
public int processArchiveRecords(Calendar insertStartTime,
|
||||
Calendar insertEndTime, IDatabaseProcessor processor)
|
||||
throws DataAccessLayerException {
|
||||
StdTextProductDao dao = new StdTextProductDao(true);
|
||||
DatabaseQuery dbQuery = new DatabaseQuery(dao.getDaoClass());
|
||||
|
@ -91,8 +108,9 @@ public class TextDao extends DefaultPluginDao {
|
|||
dbQuery.addQueryParam("insertTime", insertEndTime,
|
||||
QueryOperand.LESSTHAN);
|
||||
dbQuery.addOrder("insertTime", true);
|
||||
dbQuery.addOrder("refTime", true);
|
||||
|
||||
return (List<PersistableDataObject>) dao.queryByCriteria(dbQuery);
|
||||
return this.processByCriteria(dbQuery, processor);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,15 +19,7 @@
|
|||
**/
|
||||
package com.raytheon.edex.plugin.text.maintenance.archiver;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Calendar;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import com.raytheon.uf.common.dataplugin.persist.DefaultPathProvider;
|
||||
import com.raytheon.uf.common.dataplugin.persist.PersistableDataObject;
|
||||
|
@ -35,7 +27,6 @@ import com.raytheon.uf.common.dataplugin.text.db.StdTextProduct;
|
|||
import com.raytheon.uf.common.status.IUFStatusHandler;
|
||||
import com.raytheon.uf.common.status.UFStatus;
|
||||
import com.raytheon.uf.edex.archive.IPluginArchiveFileNameFormatter;
|
||||
import com.raytheon.uf.edex.database.DataAccessLayerException;
|
||||
import com.raytheon.uf.edex.database.plugin.PluginDao;
|
||||
|
||||
/**
|
||||
|
@ -47,8 +38,9 @@ import com.raytheon.uf.edex.database.plugin.PluginDao;
|
|||
*
|
||||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* Apr 20, 2012 dgilling Initial creation
|
||||
* Nov 05, 2013 2499 rjpeter Moved IPluginArchiveFileNameFormatter.
|
||||
* Apr 20, 2012 dgilling Initial creation
|
||||
* Nov 05, 2013 2499 rjpeter Moved IPluginArchiveFileNameFormatter.
|
||||
* Dec 13, 2013 2555 rjpeter Refactored.
|
||||
* </pre>
|
||||
*
|
||||
* @author dgilling
|
||||
|
@ -65,60 +57,26 @@ public class TextArchiveFileNameFormatter implements
|
|||
* (non-Javadoc)
|
||||
*
|
||||
* @see
|
||||
* com.raytheon.uf.edex.maintenance.archive.IPluginArchiveFileNameFormatter
|
||||
* #getPdosByFile(java.lang.String,
|
||||
* com.raytheon.uf.edex.database.plugin.PluginDao, java.util.Map,
|
||||
* java.util.Calendar, java.util.Calendar)
|
||||
* com.raytheon.uf.edex.archive.IPluginArchiveFileNameFormatter#getFilename
|
||||
* (java.lang.String, com.raytheon.uf.edex.database.plugin.PluginDao,
|
||||
* com.raytheon.uf.common.dataplugin.persist.PersistableDataObject)
|
||||
*/
|
||||
@SuppressWarnings("rawtypes")
|
||||
@Override
|
||||
public Map<String, List<PersistableDataObject>> getPdosByFile(
|
||||
String pluginName, PluginDao dao,
|
||||
Map<String, List<PersistableDataObject>> pdoMap,
|
||||
Calendar startTime, Calendar endTime)
|
||||
throws DataAccessLayerException {
|
||||
List<PersistableDataObject> pdos = dao.getRecordsToArchive(startTime,
|
||||
endTime);
|
||||
public String getFilename(String pluginName, PluginDao dao,
|
||||
PersistableDataObject<?> pdo) {
|
||||
String path = null;
|
||||
if (pdo instanceof StdTextProduct) {
|
||||
StdTextProduct casted = (StdTextProduct) pdo;
|
||||
|
||||
Set<String> newFileEntries = new HashSet<String>();
|
||||
if ((pdos != null) && !pdos.isEmpty()) {
|
||||
if (pdos.get(0) instanceof StdTextProduct) {
|
||||
for (PersistableDataObject pdo : pdos) {
|
||||
StdTextProduct casted = (StdTextProduct) pdo;
|
||||
|
||||
// no refTime to use, so we use creation time
|
||||
Date time = new Date(casted.getRefTime());
|
||||
String path = pluginName
|
||||
+ DefaultPathProvider.fileNameFormat.get().format(
|
||||
time);
|
||||
|
||||
newFileEntries.add(path);
|
||||
List<PersistableDataObject> list = pdoMap.get(path);
|
||||
if (list == null) {
|
||||
list = new ArrayList<PersistableDataObject>(pdos.size());
|
||||
pdoMap.put(path, list);
|
||||
}
|
||||
list.add(pdo);
|
||||
}
|
||||
} else {
|
||||
statusHandler.error("Invalid PersistableDataObject class "
|
||||
+ pdos.get(0).getClass()
|
||||
+ "sent to TextArchiveFileNameFormatter to archive");
|
||||
}
|
||||
// no refTime to use, so we use creation time
|
||||
Date time = new Date(casted.getRefTime());
|
||||
path = pluginName
|
||||
+ DefaultPathProvider.fileNameFormat.get().format(time);
|
||||
} else {
|
||||
statusHandler.error("Invalid PersistableDataObject class "
|
||||
+ pdo.getClass()
|
||||
+ "sent to TextArchiveFileNameFormatter to archive");
|
||||
}
|
||||
|
||||
Iterator<String> iter = pdoMap.keySet().iterator();
|
||||
Map<String, List<PersistableDataObject>> pdosToSave = new HashMap<String, List<PersistableDataObject>>(
|
||||
pdoMap.size() - newFileEntries.size());
|
||||
|
||||
while (iter.hasNext()) {
|
||||
String key = iter.next();
|
||||
if (!newFileEntries.contains(key)) {
|
||||
pdosToSave.put(key, pdoMap.get(key));
|
||||
iter.remove();
|
||||
}
|
||||
}
|
||||
|
||||
return pdosToSave;
|
||||
return path;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,7 +31,6 @@ import java.util.Arrays;
|
|||
import java.util.Calendar;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -88,6 +87,8 @@ import com.raytheon.uf.common.util.FileUtil;
|
|||
* Jul 24, 2013 2221 rferrel Changes for select configuration.
|
||||
* Aug 06, 2013 2224 rferrel Changes to use DataSet.
|
||||
* Aug 28, 2013 2299 rferrel purgeExpiredFromArchive now returns the number of files purged.
|
||||
* Dec 04, 2013 2603 rferrel Changes to improve archive purging.
|
||||
* Dec 17, 2013 2603 rjpeter Fix directory purging.
|
||||
* </pre>
|
||||
*
|
||||
* @author rferrel
|
||||
|
@ -189,23 +190,31 @@ public class ArchiveConfigManager {
|
|||
String fileName = ArchiveConstants.selectFileName(Type.Retention, null);
|
||||
SelectConfig selections = loadSelection(fileName);
|
||||
if ((selections != null) && !selections.isEmpty()) {
|
||||
try {
|
||||
for (ArchiveSelect archiveSelect : selections.getArchiveList()) {
|
||||
ArchiveConfig archiveConfig = archiveMap.get(archiveSelect
|
||||
.getName());
|
||||
for (CategorySelect categorySelect : archiveSelect
|
||||
.getCategorySelectList()) {
|
||||
CategoryConfig categoryConfig = archiveConfig
|
||||
.getCategory(categorySelect.getName());
|
||||
categoryConfig.setSelectedDisplayNames(categorySelect
|
||||
.getSelectList());
|
||||
}
|
||||
for (ArchiveSelect archiveSelect : selections.getArchiveList()) {
|
||||
String archiveName = archiveSelect.getName();
|
||||
ArchiveConfig archiveConfig = archiveMap.get(archiveName);
|
||||
if (archiveConfig == null) {
|
||||
statusHandler.handle(Priority.WARN,
|
||||
"Archive Configuration [" + archiveName
|
||||
+ "] not found. Skipping selections.");
|
||||
continue;
|
||||
}
|
||||
|
||||
for (CategorySelect categorySelect : archiveSelect
|
||||
.getCategorySelectList()) {
|
||||
String categoryname = categorySelect.getName();
|
||||
CategoryConfig categoryConfig = archiveConfig
|
||||
.getCategory(categoryname);
|
||||
if (categoryConfig == null) {
|
||||
statusHandler.handle(Priority.WARN,
|
||||
"Archive Configuration [" + archiveName
|
||||
+ "] Category [" + categoryname
|
||||
+ "] not found. Skipping selections.");
|
||||
continue;
|
||||
}
|
||||
categoryConfig.setSelectedDisplayNames(categorySelect
|
||||
.getSelectSet());
|
||||
}
|
||||
} catch (NullPointerException ex) {
|
||||
statusHandler
|
||||
.handle(Priority.ERROR,
|
||||
"Retention selection and Archive configuration no longer in sync: ",
|
||||
ex);
|
||||
}
|
||||
}
|
||||
return archiveMap.values();
|
||||
|
@ -285,7 +294,8 @@ public class ArchiveConfigManager {
|
|||
|
||||
/**
|
||||
* Purge the Files that fall outside of the time frame constraints for the
|
||||
* Archive.
|
||||
* archive. This will always leave the archive's top level directories even
|
||||
* when they are empty.
|
||||
*
|
||||
* @param archive
|
||||
* @return purgeCount
|
||||
|
@ -293,107 +303,244 @@ public class ArchiveConfigManager {
|
|||
public int purgeExpiredFromArchive(ArchiveConfig archive) {
|
||||
String archiveRootDirPath = archive.getRootDir();
|
||||
File archiveRootDir = new File(archiveRootDirPath);
|
||||
|
||||
String[] topLevelDirs = archiveRootDir.list();
|
||||
|
||||
List<String> topLevelDirsNotPurged = new ArrayList<String>();
|
||||
int purgeCount = 0;
|
||||
|
||||
if (topLevelDirs != null) {
|
||||
topLevelDirsNotPurged.addAll(Arrays.asList(topLevelDirs));
|
||||
topLevelDirs = null;
|
||||
if (!archiveRootDir.isDirectory()) {
|
||||
statusHandler.error(archiveRootDir.getAbsolutePath()
|
||||
+ " not a directory.");
|
||||
return purgeCount;
|
||||
}
|
||||
|
||||
if (statusHandler.isPriorityEnabled(Priority.INFO)) {
|
||||
statusHandler.info("Purging directory: \""
|
||||
+ archiveRootDir.getAbsolutePath() + "\".");
|
||||
}
|
||||
|
||||
if (statusHandler.isPriorityEnabled(Priority.DEBUG)) {
|
||||
String message = String.format(
|
||||
"Start setup of category date helpers for archive: %s.",
|
||||
archive.getName());
|
||||
statusHandler.debug(message);
|
||||
}
|
||||
|
||||
Map<CategoryConfig, CategoryFileDateHelper> helperMap = new HashMap<CategoryConfig, CategoryFileDateHelper>();
|
||||
for (CategoryConfig category : archive.getCategoryList()) {
|
||||
Calendar purgeTime = calculateExpiration(archive, category);
|
||||
CategoryFileDateHelper helper = new CategoryFileDateHelper(
|
||||
category, archive.getRootDir());
|
||||
IOFileFilter fileDateFilter = FileFilterUtils.and(FileFilterUtils
|
||||
.fileFileFilter(), new FileDateFilter(null, purgeTime,
|
||||
helper));
|
||||
|
||||
// Remove the directory associated with this category from the not
|
||||
// purged list since it is being purged.
|
||||
for (Iterator<String> iter = topLevelDirsNotPurged.iterator(); iter
|
||||
.hasNext();) {
|
||||
String dirName = iter.next();
|
||||
if (helper.isCategoryDirectory(dirName)) {
|
||||
iter.remove();
|
||||
break;
|
||||
}
|
||||
}
|
||||
for (DisplayData display : getDisplayData(archive.getName(),
|
||||
category.getName(), true)) {
|
||||
List<File> displayFiles = getDisplayFiles(display, null,
|
||||
purgeTime);
|
||||
for (File file : displayFiles) {
|
||||
purgeCount += purgeFile(file, fileDateFilter);
|
||||
}
|
||||
}
|
||||
CategoryFileDateHelper helper = new CategoryFileDateHelper(category);
|
||||
helperMap.put(category, helper);
|
||||
}
|
||||
|
||||
// check for other expired in top level directories not covered
|
||||
// by the categories in the archive.
|
||||
Calendar defaultPurgeTime = calculateExpiration(archive, null);
|
||||
IOFileFilter fileDateFilter = FileFilterUtils.and(FileFilterUtils
|
||||
.fileFileFilter(), new FileDateFilter(null, defaultPurgeTime));
|
||||
for (String topDirName : topLevelDirsNotPurged) {
|
||||
File topLevelDir = new File(archiveRootDir, topDirName);
|
||||
if (statusHandler.isPriorityEnabled(Priority.DEBUG)) {
|
||||
String message = String.format(
|
||||
"End setup of category date helpers for archive: %s.",
|
||||
archive.getName());
|
||||
statusHandler.debug(message);
|
||||
}
|
||||
|
||||
// Keep both top level hidden files and hidden directories.
|
||||
if (!topLevelDir.isHidden()) {
|
||||
purgeCount += purgeFile(topLevelDir, fileDateFilter);
|
||||
final Calendar minPurgeTime = calculateExpiration(archive, null);
|
||||
|
||||
IOFileFilter defaultTimeFilter = new IOFileFilter() {
|
||||
|
||||
@Override
|
||||
public boolean accept(File dir, String name) {
|
||||
File file = new File(dir, name);
|
||||
return accept(file);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean accept(File file) {
|
||||
Calendar time = TimeUtil.newGmtCalendar();
|
||||
time.setTimeInMillis(file.lastModified());
|
||||
return time.compareTo(minPurgeTime) < 0;
|
||||
}
|
||||
};
|
||||
|
||||
File[] topLevelFiles = archiveRootDir.listFiles();
|
||||
for (File topFile : topLevelFiles) {
|
||||
// In top level directory ignore all hidden files and directories.
|
||||
if (!topFile.isHidden()) {
|
||||
if (topFile.isDirectory()) {
|
||||
boolean isInCategory = false;
|
||||
for (CategoryConfig category : archive.getCategoryList()) {
|
||||
CategoryFileDateHelper helper = helperMap.get(category);
|
||||
|
||||
if (helper.isCategoryDirectory(topFile.getName())) {
|
||||
isInCategory = true;
|
||||
if (statusHandler.isPriorityEnabled(Priority.INFO)) {
|
||||
String message = String
|
||||
.format("Start purge of category %s - %s, directory \"%s\".",
|
||||
archive.getName(),
|
||||
category.getName(),
|
||||
topFile.getAbsolutePath());
|
||||
statusHandler.info(message);
|
||||
}
|
||||
|
||||
final Calendar extPurgeTime = calculateExpiration(
|
||||
archive, category);
|
||||
int pc = purgeDir(topFile, defaultTimeFilter,
|
||||
minPurgeTime, extPurgeTime, helper,
|
||||
category);
|
||||
purgeCount += pc;
|
||||
if (statusHandler.isPriorityEnabled(Priority.INFO)) {
|
||||
String message = String
|
||||
.format("End purge of category %s - %s, directory \"%s\", deleted %d files and directories.",
|
||||
archive.getName(),
|
||||
category.getName(),
|
||||
topFile.getAbsolutePath(), pc);
|
||||
statusHandler.info(message);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (isInCategory == false) {
|
||||
if (statusHandler.isPriorityEnabled(Priority.INFO)) {
|
||||
String message = String.format(
|
||||
"Start purge of directory: \"%s\".",
|
||||
topFile.getAbsolutePath());
|
||||
statusHandler.info(message);
|
||||
}
|
||||
int pc = purgeDir(topFile, defaultTimeFilter);
|
||||
purgeCount += pc;
|
||||
if (statusHandler.isPriorityEnabled(Priority.INFO)) {
|
||||
String message = String
|
||||
.format("End purge of directory: \"%s\", deleted %d files and directories.",
|
||||
topFile.getAbsolutePath(), pc);
|
||||
statusHandler.info(message);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (defaultTimeFilter.accept(topFile)) {
|
||||
purgeCount += deleteFile(topFile);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return purgeCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Recursive method for purging files. Never pass in a directory you do not
|
||||
* want deleted when purging makes it an empty directory.
|
||||
* Purge the contents of a directory of expired data leaving a possibly
|
||||
* empty directory.
|
||||
*
|
||||
* @param fileToPurge
|
||||
* @param filter
|
||||
* @return purgeCount number of files and directories purged
|
||||
* @param dir
|
||||
* @param defaultTimeFilter
|
||||
* @param minPurgeTime
|
||||
* @param extPurgeTime
|
||||
* @param helper
|
||||
* @return purgerCount
|
||||
*/
|
||||
private int purgeFile(File fileToPurge, IOFileFilter filter) {
|
||||
private int purgeDir(File dir, IOFileFilter defaultTimeFilter,
|
||||
Calendar minPurgeTime, Calendar extPurgeTime,
|
||||
CategoryFileDateHelper helper, CategoryConfig category) {
|
||||
int purgeCount = 0;
|
||||
|
||||
if (fileToPurge.isFile() && filter.accept(fileToPurge)) {
|
||||
if (fileToPurge.delete()) {
|
||||
++purgeCount;
|
||||
if (statusHandler.isPriorityEnabled(Priority.DEBUG)) {
|
||||
statusHandler.debug("Purged file: \""
|
||||
+ fileToPurge.getAbsolutePath() + "\"");
|
||||
}
|
||||
} else {
|
||||
statusHandler.warn("Failed to purge file: "
|
||||
+ fileToPurge.getAbsolutePath());
|
||||
}
|
||||
} else if (fileToPurge.isDirectory() && !fileToPurge.isHidden()) {
|
||||
// Purge only visible directories.
|
||||
File[] expiredFilesInDir = fileToPurge.listFiles();
|
||||
|
||||
for (File dirFile : expiredFilesInDir) {
|
||||
purgeCount += purgeFile(dirFile, filter);
|
||||
}
|
||||
|
||||
// Attempt to delete empty directory.
|
||||
if ((purgeCount >= expiredFilesInDir.length)
|
||||
&& (fileToPurge.list().length == 0)) {
|
||||
if (!fileToPurge.delete()) {
|
||||
statusHandler.warn("Failed to purge directory: "
|
||||
+ fileToPurge.getAbsolutePath());
|
||||
} else {
|
||||
++purgeCount;
|
||||
if (statusHandler.isPriorityEnabled(Priority.DEBUG)) {
|
||||
statusHandler.debug("Purged directory: \""
|
||||
+ fileToPurge.getAbsolutePath()
|
||||
+ File.separator + "\"");
|
||||
for (File file : dir.listFiles()) {
|
||||
if (!file.isHidden()) {
|
||||
DataSetStatus status = helper.getFileDate(file);
|
||||
if (status.isInDataSet()) {
|
||||
Collection<String> labels = category
|
||||
.getSelectedDisplayNames();
|
||||
boolean isSelected = false;
|
||||
for (String label : status.getDisplayLabels()) {
|
||||
if (labels.contains(label)) {
|
||||
isSelected = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Calendar checkTime = (isSelected ? extPurgeTime
|
||||
: minPurgeTime);
|
||||
Calendar fileTime = status.getTime();
|
||||
boolean purge = fileTime.compareTo(checkTime) < 0;
|
||||
|
||||
if (statusHandler.isPriorityEnabled(Priority.DEBUG)) {
|
||||
String message = String
|
||||
.format("%s [%s] category [%s] %s retention [%s] checkTime [%s] = %s.",
|
||||
(file.isDirectory() ? "Directory"
|
||||
: "File"), file
|
||||
.getAbsoluteFile(), category
|
||||
.getName(), (isSelected ? "ext"
|
||||
: "min"), TimeUtil
|
||||
.formatCalendar(checkTime),
|
||||
TimeUtil.formatCalendar(fileTime),
|
||||
(purge ? "purge" : "retain"));
|
||||
statusHandler.debug(message);
|
||||
}
|
||||
|
||||
if (purge) {
|
||||
if (file.isDirectory()) {
|
||||
purgeCount += purgeDir(file,
|
||||
FileFilterUtils.trueFileFilter());
|
||||
if (file.list().length == 0) {
|
||||
purgeCount += purgeDir(file,
|
||||
FileFilterUtils.trueFileFilter());
|
||||
}
|
||||
} else {
|
||||
purgeCount += deleteFile(file);
|
||||
}
|
||||
}
|
||||
} else if (file.isDirectory()) {
|
||||
purgeCount += purgeDir(file, defaultTimeFilter,
|
||||
minPurgeTime, extPurgeTime, helper, category);
|
||||
if (file.list().length == 0) {
|
||||
purgeCount += deleteFile(file);
|
||||
}
|
||||
} else if (defaultTimeFilter.accept(file)) {
|
||||
purgeCount += deleteFile(file);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return purgeCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Recursively purge the contents of a directory based on the filter. The
|
||||
* directory in the initial call is not deleted. This may result in an empty
|
||||
* directory which is the desired result for top level directories.
|
||||
*
|
||||
*
|
||||
* @param dir
|
||||
* @param fileDataFilter
|
||||
* @return purgeCount
|
||||
*/
|
||||
private int purgeDir(File dir, IOFileFilter fileDataFilter) {
|
||||
int purgeCount = 0;
|
||||
for (File file : dir.listFiles()) {
|
||||
if (!file.isHidden()) {
|
||||
if (file.isDirectory()) {
|
||||
purgeCount += purgeDir(file, fileDataFilter);
|
||||
if (file.list().length == 0) {
|
||||
purgeCount += deleteFile(file);
|
||||
}
|
||||
} else if (fileDataFilter.accept(file)) {
|
||||
purgeCount += deleteFile(file);
|
||||
}
|
||||
}
|
||||
}
|
||||
return purgeCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete a file or directory.
|
||||
*
|
||||
* @param file
|
||||
* @return purgeCount
|
||||
*/
|
||||
private int deleteFile(File file) {
|
||||
int purgeCount = 0;
|
||||
boolean isDir = file.isDirectory();
|
||||
if (file.delete()) {
|
||||
++purgeCount;
|
||||
if (statusHandler.isPriorityEnabled(Priority.DEBUG)) {
|
||||
statusHandler
|
||||
.debug(String.format("Purged %s: \"%s\"",
|
||||
(isDir ? "directory" : "file"),
|
||||
file.getAbsolutePath()));
|
||||
}
|
||||
} else {
|
||||
statusHandler.warn(String.format("Failed to purge %s: \"%s\"",
|
||||
(isDir ? "directory" : "file"), file.getAbsolutePath()));
|
||||
}
|
||||
return purgeCount;
|
||||
}
|
||||
|
||||
|
@ -644,39 +791,60 @@ public class ArchiveConfigManager {
|
|||
* @param categoryConfig
|
||||
* @return dirs
|
||||
*/
|
||||
private List<File> getDirs(File rootFile, CategoryDataSet dataSet) {
|
||||
List<File> resultDirs = new ArrayList<File>();
|
||||
private Map<CategoryDataSet, List<File>> getDirs(File rootFile,
|
||||
CategoryConfig categoryConfig) {
|
||||
List<File> resultDirs = null;
|
||||
List<File> dirs = new ArrayList<File>();
|
||||
List<File> tmpDirs = new ArrayList<File>();
|
||||
List<File> swpDirs = null;
|
||||
List<CategoryDataSet> dataSets = categoryConfig.getDataSetList();
|
||||
Map<CategoryDataSet, List<File>> rval = new HashMap<CategoryDataSet, List<File>>(
|
||||
dataSets.size(), 1);
|
||||
|
||||
for (String dirPattern : dataSet.getDirPatterns()) {
|
||||
String[] subExpr = dirPattern.split(File.separator);
|
||||
dirs.clear();
|
||||
dirs.add(rootFile);
|
||||
tmpDirs.clear();
|
||||
// keep an in memory map since some of the categories cause the same
|
||||
// directories to be listed over and over
|
||||
Map<File, List<File>> polledDirs = new HashMap<File, List<File>>();
|
||||
|
||||
for (String regex : subExpr) {
|
||||
Pattern subPattern = Pattern.compile("^" + regex + "$");
|
||||
IOFileFilter filter = FileFilterUtils
|
||||
.makeDirectoryOnly(new RegexFileFilter(subPattern));
|
||||
for (CategoryDataSet dataSet : dataSets) {
|
||||
resultDirs = new LinkedList<File>();
|
||||
|
||||
for (File dir : dirs) {
|
||||
File[] list = dir.listFiles();
|
||||
if (list != null) {
|
||||
List<File> dirList = Arrays.asList(list);
|
||||
tmpDirs.addAll(Arrays.asList(FileFilterUtils.filter(
|
||||
filter, dirList)));
|
||||
}
|
||||
}
|
||||
swpDirs = dirs;
|
||||
dirs = tmpDirs;
|
||||
tmpDirs = swpDirs;
|
||||
for (String dirPattern : dataSet.getDirPatterns()) {
|
||||
String[] subExpr = dirPattern.split(File.separator);
|
||||
dirs.clear();
|
||||
dirs.add(rootFile);
|
||||
tmpDirs.clear();
|
||||
|
||||
for (String regex : subExpr) {
|
||||
Pattern subPattern = Pattern.compile("^" + regex + "$");
|
||||
IOFileFilter filter = FileFilterUtils
|
||||
.makeDirectoryOnly(new RegexFileFilter(subPattern));
|
||||
|
||||
for (File dir : dirs) {
|
||||
List<File> dirList = polledDirs.get(dir);
|
||||
if (dirList == null) {
|
||||
File[] list = dir.listFiles();
|
||||
dirList = Arrays.asList(list);
|
||||
polledDirs.put(dir, dirList);
|
||||
}
|
||||
|
||||
if (dirList != null) {
|
||||
tmpDirs.addAll(FileFilterUtils.filterList(filter,
|
||||
dirList));
|
||||
}
|
||||
}
|
||||
|
||||
swpDirs = dirs;
|
||||
dirs = tmpDirs;
|
||||
tmpDirs = swpDirs;
|
||||
tmpDirs.clear();
|
||||
}
|
||||
|
||||
resultDirs.addAll(dirs);
|
||||
}
|
||||
resultDirs.addAll(dirs);
|
||||
rval.put(dataSet, resultDirs);
|
||||
}
|
||||
return resultDirs;
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -701,10 +869,11 @@ public class ArchiveConfigManager {
|
|||
categoryName);
|
||||
File rootFile = new File(rootDirName);
|
||||
TreeMap<String, DisplayData> displays = new TreeMap<String, DisplayData>();
|
||||
Map<CategoryDataSet, List<File>> dirMap = getDirs(rootFile,
|
||||
categoryConfig);
|
||||
for (CategoryDataSet dataSet : categoryConfig.getDataSetList()) {
|
||||
List<String> dataSetDirPatterns = dataSet.getDirPatterns();
|
||||
|
||||
List<File> dirs = getDirs(rootFile, dataSet);
|
||||
List<File> dirs = dirMap.get(dataSet);
|
||||
|
||||
int beginIndex = rootFile.getAbsolutePath().length() + 1;
|
||||
List<Pattern> patterns = new ArrayList<Pattern>(
|
||||
|
|
|
@ -43,7 +43,8 @@ import com.raytheon.uf.common.time.util.TimeUtil;
|
|||
* ------------ ---------- ----------- --------------------------
|
||||
* Aug 6, 2013 #2224 rferrel Initial creation
|
||||
* Oct 02, 2013 #2147 rferrel Allow Date to ignore hour in time stamp.
|
||||
*
|
||||
* Dec 10, 2013 #2624 rferrel Added Julian date.
|
||||
* Dec 17, 2013 2603 rjpeter Clear low order time fields on time generation.
|
||||
* </pre>
|
||||
*
|
||||
* @author rferrel
|
||||
|
@ -52,22 +53,26 @@ import com.raytheon.uf.common.time.util.TimeUtil;
|
|||
@XmlAccessorType(XmlAccessType.NONE)
|
||||
@XmlRootElement(name = "dataSet")
|
||||
public class CategoryDataSet {
|
||||
public static final int YEAR_INDEX = 0;
|
||||
private static final int YEAR_INDEX = 0;
|
||||
|
||||
public static final int MONTH_INDEX = 1;
|
||||
private static final int MONTH_INDEX = 1;
|
||||
|
||||
public static final int DAY_INDEX = 2;
|
||||
private static final int DAY_OF_YEAR_INDEX = 1;
|
||||
|
||||
public static final int HOUR_INDEX = 3;
|
||||
private static final int DAY_INDEX = 2;
|
||||
|
||||
public static final int TIMESTAMP_INDEX = 0;
|
||||
private static final int JULIAN_HOUR_INDEX = 2;
|
||||
|
||||
private static final int HOUR_INDEX = 3;
|
||||
|
||||
private static final int TIMESTAMP_INDEX = 0;
|
||||
|
||||
/**
|
||||
* Types of times and the number of indices for getting the time stamp from
|
||||
* patterns.
|
||||
*/
|
||||
public static enum TimeType {
|
||||
Date(4), EpochSec(1), EpochMS(1), File(0);
|
||||
Date(4), EpochSec(1), EpochMS(1), File(0), Julian(3);
|
||||
|
||||
private final int numIndices;
|
||||
|
||||
|
@ -199,7 +204,8 @@ public class CategoryDataSet {
|
|||
* @return true when only the dirPatterns should be used.
|
||||
*/
|
||||
public boolean isDirOnly() {
|
||||
return filePattern == null || filePattern.equals(".*");
|
||||
return (filePattern == null) || (filePattern.length() == 0)
|
||||
|| ".*".equals(filePattern);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -249,6 +255,7 @@ public class CategoryDataSet {
|
|||
}
|
||||
|
||||
fileCal.set(year, month, day, hour, 0, 0);
|
||||
fileCal.set(Calendar.MILLISECOND, 0);
|
||||
fileTime = fileCal.getTimeInMillis();
|
||||
break;
|
||||
case EpochMS:
|
||||
|
@ -263,6 +270,42 @@ public class CategoryDataSet {
|
|||
case File:
|
||||
fileTime = null;
|
||||
break;
|
||||
case Julian:
|
||||
Calendar julainCal = TimeUtil.newGmtCalendar();
|
||||
int jYear = Integer.parseInt(matcher
|
||||
.group(timeIndices[CategoryDataSet.YEAR_INDEX]));
|
||||
int jDay = Integer.parseInt(matcher
|
||||
.group(timeIndices[CategoryDataSet.DAY_OF_YEAR_INDEX]));
|
||||
|
||||
// When two digit year determine century.
|
||||
if (jYear < 100) {
|
||||
int cYear = julainCal.get(Calendar.YEAR);
|
||||
jYear += (cYear - (cYear % 100));
|
||||
julainCal.add(Calendar.YEAR, 1);
|
||||
int nextYear = julainCal.get(Calendar.YEAR);
|
||||
|
||||
// If date too far into the future back up a century.
|
||||
if ((jYear > nextYear) || ((jYear == nextYear) && (jDay > 31))) {
|
||||
jYear -= 100;
|
||||
}
|
||||
}
|
||||
|
||||
julainCal.set(Calendar.YEAR, jYear);
|
||||
julainCal.set(Calendar.DAY_OF_YEAR, jDay);
|
||||
|
||||
// Default to last hour of the day.
|
||||
int jHour = 23;
|
||||
if (timeIndices[CategoryDataSet.JULIAN_HOUR_INDEX] >= 0) {
|
||||
jHour = Integer.parseInt(matcher
|
||||
.group(timeIndices[CategoryDataSet.JULIAN_HOUR_INDEX]));
|
||||
}
|
||||
julainCal.set(Calendar.HOUR_OF_DAY, jHour);
|
||||
julainCal.set(Calendar.MINUTE, 0);
|
||||
julainCal.set(Calendar.SECOND, 0);
|
||||
julainCal.set(Calendar.MILLISECOND, 0);
|
||||
fileTime = julainCal.getTimeInMillis();
|
||||
break;
|
||||
|
||||
default:
|
||||
fileTime = null;
|
||||
break;
|
||||
|
|
|
@ -20,14 +20,14 @@
|
|||
package com.raytheon.uf.common.archive.config;
|
||||
|
||||
import java.io.File;
|
||||
import java.text.FieldPosition;
|
||||
import java.text.MessageFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Calendar;
|
||||
import java.util.List;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.commons.io.FilenameUtils;
|
||||
|
||||
import com.raytheon.uf.common.time.util.TimeUtil;
|
||||
|
||||
/**
|
||||
|
@ -42,7 +42,8 @@ import com.raytheon.uf.common.time.util.TimeUtil;
|
|||
* Jun 21, 2013 1965 bgonzale Initial creation
|
||||
* Aug 03, 2013 2224 rferrel Changes for new configuration files.
|
||||
* Aug 28, 2013 2299 rferrel Changes in IFileDateHelper.
|
||||
*
|
||||
* Dec 04, 2013 2603 rferrel Changes to improve archive purging.
|
||||
* Dec 17, 2013 2603 rjpeter Fix file data pattern matching.
|
||||
* </pre>
|
||||
*
|
||||
* @author bgonzale
|
||||
|
@ -54,16 +55,27 @@ public class CategoryFileDateHelper implements IFileDateHelper {
|
|||
* Date information derived from each of a Category's dirPatterns.
|
||||
*/
|
||||
private static class CategoryDateInfo {
|
||||
/** Always use the same field postion. */
|
||||
private static final FieldPosition pos0 = new FieldPosition(0);
|
||||
|
||||
/** Pattern used to get the date. */
|
||||
private final Pattern datePattern;
|
||||
|
||||
/** Pattern for getting top level directories. */
|
||||
private final Pattern categoryTopLevelDirPattern;
|
||||
|
||||
/** The type of type stamp being used. */
|
||||
private final CategoryDataSet.TimeType timeType;
|
||||
|
||||
private final boolean isDirOnly;
|
||||
|
||||
/** Indices in the pattern group used to get the time stamp. */
|
||||
private final int[] timeIndices;
|
||||
|
||||
/** The format used to get the display label. */
|
||||
private final String displayLabelFormat;
|
||||
|
||||
/** Formatter used to get display label. */
|
||||
private final MessageFormat msgfmt;
|
||||
|
||||
/**
|
||||
* Initialization constructor.
|
||||
*
|
||||
|
@ -73,24 +85,46 @@ public class CategoryFileDateHelper implements IFileDateHelper {
|
|||
* @param monthIndex
|
||||
* @param dayIndex
|
||||
* @param hourIndex
|
||||
* @param displayLabelFormat
|
||||
*/
|
||||
public CategoryDateInfo(Pattern datePattern,
|
||||
Pattern categoryTopLevelDirPattern,
|
||||
CategoryDataSet.TimeType timeType, boolean isDirOnly,
|
||||
int[] timeIndices) {
|
||||
CategoryDataSet.TimeType timeType, int[] timeIndices,
|
||||
String displayLabelFormat) {
|
||||
this.datePattern = datePattern;
|
||||
this.categoryTopLevelDirPattern = categoryTopLevelDirPattern;
|
||||
this.timeType = timeType;
|
||||
this.isDirOnly = isDirOnly;
|
||||
this.timeIndices = timeIndices;
|
||||
this.displayLabelFormat = displayLabelFormat;
|
||||
if (displayLabelFormat != null) {
|
||||
this.msgfmt = new MessageFormat(this.displayLabelFormat);
|
||||
} else {
|
||||
this.msgfmt = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the display label from the matcher. This assumes the matcher is a
|
||||
* pattern match for the date pattern.
|
||||
*
|
||||
* @param matcher
|
||||
* @return label
|
||||
*/
|
||||
public String getDisplayLabel(Matcher matcher) {
|
||||
// Unable to use StringBuilder with MessageFormat.
|
||||
StringBuffer sb = new StringBuffer();
|
||||
String[] args = new String[matcher.groupCount() + 1];
|
||||
args[0] = matcher.group();
|
||||
for (int i = 1; i < args.length; ++i) {
|
||||
args[i] = matcher.group(i);
|
||||
}
|
||||
String label = msgfmt.format(args, sb, pos0).toString();
|
||||
return label;
|
||||
}
|
||||
}
|
||||
|
||||
private final List<CategoryDateInfo> dateInfoList;
|
||||
|
||||
private final String rootDir;
|
||||
|
||||
/**
|
||||
* Initialization constructor.
|
||||
*
|
||||
|
@ -98,8 +132,7 @@ public class CategoryFileDateHelper implements IFileDateHelper {
|
|||
* @param rootDirPattern
|
||||
* categoryTopLevelDirPattern
|
||||
*/
|
||||
public CategoryFileDateHelper(CategoryConfig config, String rootDir) {
|
||||
this.rootDir = rootDir;
|
||||
public CategoryFileDateHelper(CategoryConfig config) {
|
||||
List<CategoryDataSet> categoryDataSetList = config.getDataSetList();
|
||||
int size = 0;
|
||||
for (CategoryDataSet dataSet : categoryDataSetList) {
|
||||
|
@ -109,26 +142,26 @@ public class CategoryFileDateHelper implements IFileDateHelper {
|
|||
this.dateInfoList = new ArrayList<CategoryFileDateHelper.CategoryDateInfo>(
|
||||
size);
|
||||
|
||||
boolean isDirOnly;
|
||||
CategoryDataSet.TimeType timeType;
|
||||
for (CategoryDataSet dataSet : categoryDataSetList) {
|
||||
isDirOnly = dataSet.isDirOnly();
|
||||
timeType = dataSet.getTimeType();
|
||||
|
||||
for (String patternString : dataSet.getDirPatterns()) {
|
||||
Pattern datePattern = dataSet.getPattern(patternString);
|
||||
int dirSeparatorIndex = patternString
|
||||
.indexOf(File.separatorChar);
|
||||
patternString = dirSeparatorIndex > patternString.length()
|
||||
|| dirSeparatorIndex < 0 ? patternString
|
||||
patternString = (dirSeparatorIndex > patternString.length())
|
||||
|| (dirSeparatorIndex < 0) ? patternString
|
||||
: patternString.substring(0, dirSeparatorIndex);
|
||||
Pattern categoryTopLevelDirPattern = Pattern
|
||||
.compile(patternString);
|
||||
int[] timeIndices = dataSet.getTimeIndices();
|
||||
|
||||
String displayLabelFormat = dataSet.getDisplayLabel();
|
||||
|
||||
dateInfoList.add(new CategoryDateInfo(datePattern,
|
||||
categoryTopLevelDirPattern, timeType, isDirOnly,
|
||||
timeIndices));
|
||||
categoryTopLevelDirPattern, timeType, timeIndices,
|
||||
displayLabelFormat));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -141,26 +174,19 @@ public class CategoryFileDateHelper implements IFileDateHelper {
|
|||
* .io.File)
|
||||
*/
|
||||
@Override
|
||||
public Calendar getFileDate(File file) {
|
||||
public DataSetStatus getFileDate(File file) {
|
||||
String filenamePath = file.getAbsolutePath();
|
||||
String pathForFilePatternCheck = filenamePath.substring(rootDir
|
||||
.length());
|
||||
String pathForDirPatternCheck = FilenameUtils
|
||||
.getFullPathNoEndSeparator(pathForFilePatternCheck);
|
||||
Calendar result = null;
|
||||
Long timestamp = null;
|
||||
DataSetStatus result = new DataSetStatus(file);
|
||||
|
||||
for (CategoryDateInfo dateInfo : dateInfoList) {
|
||||
Matcher matcher = null;
|
||||
if (dateInfo.isDirOnly) {
|
||||
matcher = dateInfo.datePattern.matcher(pathForDirPatternCheck);
|
||||
} else {
|
||||
matcher = dateInfo.datePattern.matcher(pathForFilePatternCheck);
|
||||
}
|
||||
Matcher matcher = dateInfo.datePattern.matcher(filenamePath);
|
||||
|
||||
if (matcher.matches()) {
|
||||
timestamp = CategoryDataSet.getMatchTimeInMilliseconds(
|
||||
dateInfo.timeType, dateInfo.timeIndices, matcher);
|
||||
result.setInDataSet(true);
|
||||
result.addDisplayLabel(dateInfo.getDisplayLabel(matcher));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -170,11 +196,9 @@ public class CategoryFileDateHelper implements IFileDateHelper {
|
|||
timestamp = file.lastModified();
|
||||
}
|
||||
|
||||
// TODO future speed improvement refactor IFileDateHelper to have a
|
||||
// method that returns a long instead of Calendar. That will prevent
|
||||
// converting Calendar to long then back to a Calendar.
|
||||
result = TimeUtil.newGmtCalendar();
|
||||
result.setTimeInMillis(timestamp);
|
||||
Calendar time = TimeUtil.newGmtCalendar();
|
||||
time.setTimeInMillis(timestamp);
|
||||
result.setTime(time);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,128 @@
|
|||
/**
|
||||
* This software was developed and / or modified by Raytheon Company,
|
||||
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
|
||||
*
|
||||
* U.S. EXPORT CONTROLLED TECHNICAL DATA
|
||||
* This software product contains export-restricted data whose
|
||||
* export/transfer/disclosure is restricted by U.S. law. Dissemination
|
||||
* to non-U.S. persons whether in the United States or abroad requires
|
||||
* an export license or other authorization.
|
||||
*
|
||||
* Contractor Name: Raytheon Company
|
||||
* Contractor Address: 6825 Pine Street, Suite 340
|
||||
* Mail Stop B8
|
||||
* Omaha, NE 68106
|
||||
* 402.291.0100
|
||||
*
|
||||
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
|
||||
* further licensing information.
|
||||
**/
|
||||
package com.raytheon.uf.common.archive.config;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Calendar;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* This class used by IFileDateHelper to contains additional information about a
|
||||
* file.
|
||||
*
|
||||
* <pre>
|
||||
*
|
||||
* SOFTWARE HISTORY
|
||||
*
|
||||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* Dec 4, 2013 2603 rferrel Initial creation
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
* @author rferrel
|
||||
* @version 1.0
|
||||
*/
|
||||
|
||||
public class DataSetStatus {
|
||||
|
||||
/** The file the status is for. */
|
||||
private final File file;
|
||||
|
||||
/** Set to true when file is contains in a data set. */
|
||||
private boolean inDataSet = false;
|
||||
|
||||
private final List<String> displayLabels = new ArrayList<String>(1);
|
||||
|
||||
/** The file's time based on IFileDataHelper. */
|
||||
private Calendar time = null;
|
||||
|
||||
/**
|
||||
* The constructor with default values set.
|
||||
*
|
||||
* @param file
|
||||
* should not be null.
|
||||
*/
|
||||
DataSetStatus(File file) {
|
||||
this.file = file;
|
||||
}
|
||||
|
||||
/**
|
||||
* The file the information is for.
|
||||
*
|
||||
* @return file
|
||||
*/
|
||||
public File getFile() {
|
||||
return file;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @return true when file is in a data set.
|
||||
*/
|
||||
public boolean isInDataSet() {
|
||||
return inDataSet;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set data set status.
|
||||
*
|
||||
* @param inDataSet
|
||||
*/
|
||||
public void setInDataSet(boolean inDataSet) {
|
||||
this.inDataSet = inDataSet;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @return non-null only when file is in a data set.
|
||||
*/
|
||||
public List<String> getDisplayLabels() {
|
||||
return displayLabels;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the select status. Should only be true when in a data set.
|
||||
*
|
||||
* @param isSelected
|
||||
*/
|
||||
public void addDisplayLabel(String displayLabel) {
|
||||
this.displayLabels.add(displayLabel);
|
||||
}
|
||||
|
||||
/**
|
||||
* The file's time
|
||||
*
|
||||
* @return time
|
||||
*/
|
||||
public Calendar getTime() {
|
||||
return time;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the file's time.
|
||||
*
|
||||
* @param time
|
||||
*/
|
||||
public void setTime(Calendar time) {
|
||||
this.time = time;
|
||||
}
|
||||
}
|
|
@ -1,127 +0,0 @@
|
|||
/**
|
||||
* This software was developed and / or modified by Raytheon Company,
|
||||
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
|
||||
*
|
||||
* U.S. EXPORT CONTROLLED TECHNICAL DATA
|
||||
* This software product contains export-restricted data whose
|
||||
* export/transfer/disclosure is restricted by U.S. law. Dissemination
|
||||
* to non-U.S. persons whether in the United States or abroad requires
|
||||
* an export license or other authorization.
|
||||
*
|
||||
* Contractor Name: Raytheon Company
|
||||
* Contractor Address: 6825 Pine Street, Suite 340
|
||||
* Mail Stop B8
|
||||
* Omaha, NE 68106
|
||||
* 402.291.0100
|
||||
*
|
||||
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
|
||||
* further licensing information.
|
||||
**/
|
||||
package com.raytheon.uf.common.archive.config;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.Calendar;
|
||||
|
||||
import org.apache.commons.io.FilenameUtils;
|
||||
import org.apache.commons.io.filefilter.IOFileFilter;
|
||||
|
||||
import com.raytheon.uf.common.time.util.TimeUtil;
|
||||
|
||||
/**
|
||||
* Filter files based on a file date parsed using the given file date helper.
|
||||
* Accept returns true for files that fall between the Start and End times. If
|
||||
* start is null, then all after start checks will return true. If end is null,
|
||||
* then all before end checks will return true.
|
||||
*
|
||||
* <pre>
|
||||
*
|
||||
* SOFTWARE HISTORY
|
||||
*
|
||||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* Jun 18, 2013 1965 bgonzale Initial creation
|
||||
* Aug 28, 2013 2299 rferrel Reject hidden directories.
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
* @author bgonzale
|
||||
* @version 1.0
|
||||
*/
|
||||
|
||||
public class FileDateFilter implements IOFileFilter {
|
||||
|
||||
private IFileDateHelper helper;
|
||||
|
||||
private final Calendar start;
|
||||
|
||||
private final Calendar end;
|
||||
|
||||
/**
|
||||
* Initialization constructor. This filter uses file last modified time as
|
||||
* the filter time.
|
||||
*
|
||||
* @param startDate
|
||||
* @param endDate
|
||||
*/
|
||||
public FileDateFilter(Calendar start, Calendar end) {
|
||||
this(start, end, DEFAULT_FILE_DATE_HELPER);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialization constructor.
|
||||
*
|
||||
* @param startDate
|
||||
* @param endDate
|
||||
* @param helper
|
||||
*/
|
||||
public FileDateFilter(Calendar start, Calendar end, IFileDateHelper helper) {
|
||||
this.helper = helper == null ? DEFAULT_FILE_DATE_HELPER : helper;
|
||||
this.start = start;
|
||||
this.end = end;
|
||||
}
|
||||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
*
|
||||
* @see org.apache.commons.io.filefilter.IOFileFilter#accept(java.io.File)
|
||||
*/
|
||||
@Override
|
||||
public boolean accept(File file) {
|
||||
String filePath = file.getAbsolutePath();
|
||||
String dirName = FilenameUtils.getFullPath(filePath);
|
||||
String fileName = FilenameUtils.getName(filePath);
|
||||
return accept(new File(dirName), fileName);
|
||||
}
|
||||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
*
|
||||
* @see org.apache.commons.io.filefilter.IOFileFilter#accept(java.io.File,
|
||||
* java.lang.String)
|
||||
*/
|
||||
@Override
|
||||
public boolean accept(File dir, String name) {
|
||||
File file = new File(dir, name);
|
||||
Calendar fileDate = helper.getFileDate(file);
|
||||
boolean isAfterEqualsStart = start == null || fileDate.after(start)
|
||||
|| fileDate.equals(start);
|
||||
boolean isBeforeEqualsEnd = end == null || fileDate.before(end)
|
||||
|| fileDate.equals(end);
|
||||
return isAfterEqualsStart && isBeforeEqualsEnd;
|
||||
}
|
||||
|
||||
/**
|
||||
* This File Date helper returns a file's last modified time.
|
||||
*/
|
||||
private static final IFileDateHelper DEFAULT_FILE_DATE_HELPER = new IFileDateHelper() {
|
||||
@Override
|
||||
public Calendar getFileDate(File file) {
|
||||
// use file last modified date
|
||||
long lastModifiedMillis = file.lastModified();
|
||||
Calendar result = TimeUtil.newGmtCalendar();
|
||||
result.setTimeInMillis(lastModifiedMillis);
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -20,7 +20,6 @@
|
|||
package com.raytheon.uf.common.archive.config;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.Calendar;
|
||||
|
||||
/**
|
||||
* Helper to get a file last modification date.
|
||||
|
@ -33,7 +32,8 @@ import java.util.Calendar;
|
|||
* ------------ ---------- ----------- --------------------------
|
||||
* Jun 21, 2013 bgonzale Initial creation
|
||||
* Aug 28, 2013 2299 rferrel Change getFileDate argument.
|
||||
*
|
||||
* Dec 04, 2013 2603 rferrel Changes to improve archive purging.
|
||||
* Dec 17, 2013 2603 rjpeter Clean up imports.
|
||||
* </pre>
|
||||
*
|
||||
* @author bgonzale
|
||||
|
@ -48,6 +48,6 @@ public interface IFileDateHelper {
|
|||
* @param file
|
||||
* @return calendar
|
||||
*/
|
||||
public Calendar getFileDate(File file);
|
||||
public DataSetStatus getFileDate(File file);
|
||||
|
||||
}
|
||||
|
|
|
@ -20,7 +20,9 @@
|
|||
package com.raytheon.uf.common.archive.config;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
|
@ -40,6 +42,7 @@ import com.raytheon.uf.common.archive.config.select.CategorySelect;
|
|||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* Jul 19, 2013 2221 rferrel Initial creation
|
||||
* Dec 11, 2013 2603 rferrel Make selections a set.
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -113,24 +116,24 @@ public class SelectConfig {
|
|||
}
|
||||
|
||||
/**
|
||||
* Get a list of selected display names for the archive and its category.
|
||||
* Get a set of selected display names for the archive and its category.
|
||||
*
|
||||
* @param archiveName
|
||||
* @param categoryName
|
||||
* @return displayLabelList may be an empty list.
|
||||
*/
|
||||
public List<String> getSelectedList(String archiveName, String categoryName) {
|
||||
public Set<String> getSelectedSet(String archiveName, String categoryName) {
|
||||
ArchiveSelect archiveSelect = getArchive(archiveName);
|
||||
if (archiveSelect == null || archiveSelect.isEmpty()) {
|
||||
return new ArrayList<String>(0);
|
||||
return new HashSet<String>(0);
|
||||
}
|
||||
CategorySelect categorySelect = getCategorySelect(categoryName,
|
||||
archiveSelect);
|
||||
if (categorySelect == null || categorySelect.isEmpty()) {
|
||||
return new ArrayList<String>(0);
|
||||
return new HashSet<String>(0);
|
||||
}
|
||||
|
||||
List<String> selected = categorySelect.getSelectList();
|
||||
Set<String> selected = categorySelect.getSelectSet();
|
||||
|
||||
return selected;
|
||||
}
|
||||
|
|
|
@ -20,7 +20,9 @@
|
|||
package com.raytheon.uf.common.archive.config.select;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
|
@ -38,6 +40,7 @@ import javax.xml.bind.annotation.XmlRootElement;
|
|||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* Jul 19, 2013 2221 rferrel Initial creation
|
||||
* Dec 11, 2013 2603 rferrel Selected now a Set.
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -57,7 +60,7 @@ public class CategorySelect {
|
|||
* List of selected labels.
|
||||
*/
|
||||
@XmlElement(name = "selectedDisplayName")
|
||||
private final List<String> selectList = new ArrayList<String>();
|
||||
private final Set<String> selectSet = new HashSet<String>();
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
|
@ -67,21 +70,21 @@ public class CategorySelect {
|
|||
this.name = name;
|
||||
}
|
||||
|
||||
public List<String> getSelectList() {
|
||||
return selectList;
|
||||
public Set<String> getSelectSet() {
|
||||
return selectSet;
|
||||
}
|
||||
|
||||
public void setSelectList(List<String> selectList) {
|
||||
this.selectList.clear();
|
||||
this.selectList.addAll(selectList);
|
||||
public void setSelectSet(Set<String> selectList) {
|
||||
this.selectSet.clear();
|
||||
this.selectSet.addAll(selectList);
|
||||
}
|
||||
|
||||
public void add(String displayName) {
|
||||
selectList.add(displayName);
|
||||
selectSet.add(displayName);
|
||||
}
|
||||
|
||||
public boolean isEmpty() {
|
||||
return selectList.isEmpty();
|
||||
return selectSet.isEmpty();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -89,7 +92,7 @@ public class CategorySelect {
|
|||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("CategorySelect [ name: ").append(getName());
|
||||
sb.append("[ ");
|
||||
for (String select : getSelectList()) {
|
||||
for (String select : getSelectSet()) {
|
||||
sb.append("\"").append(select).append("\", ");
|
||||
}
|
||||
sb.append("]");
|
||||
|
|
|
@ -3,9 +3,7 @@
|
|||
xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-3.1.xsd
|
||||
http://camel.apache.org/schema/spring http://camel.apache.org/schema/spring/camel-spring.xsd">
|
||||
|
||||
<bean id="dataArchiver" class="com.raytheon.uf.edex.archive.DataArchiver">
|
||||
<constructor-arg value="/archive"/>
|
||||
</bean>
|
||||
<bean id="dataArchiver" class="com.raytheon.uf.edex.archive.DataArchiver"/>
|
||||
|
||||
<bean id="databaseArchiver" class="com.raytheon.uf.edex.archive.DatabaseArchiver"/>
|
||||
|
||||
|
|
|
@ -2,12 +2,15 @@
|
|||
archive.enable=true
|
||||
# runs database and hdf5 archive for archive server to pull data from
|
||||
archive.cron=0+40+*+*+*+?
|
||||
# path to store processed archive data
|
||||
archive.path=/archive
|
||||
|
||||
# enable archive purge
|
||||
archive.purge.enable=true
|
||||
# purge archives
|
||||
archive.purge.cron=0+5+0/3+*+*+?
|
||||
# when to purge archives
|
||||
archive.purge.cron=0+5+0/2+*+*+?
|
||||
# compress database records
|
||||
archive.compression.enable=true
|
||||
archive.compression.enable=false
|
||||
|
||||
# to disable a specific archive, use property archive.disable=pluginName,pluginName...
|
||||
#archive.disable=grid,text,acars
|
|
@ -45,6 +45,7 @@ import com.raytheon.uf.edex.core.dataplugin.PluginRegistry;
|
|||
* ------------ ---------- ----------- --------------------------
|
||||
* Dec 16, 2011 rjpeter Initial creation
|
||||
* Nov 05, 2013 2499 rjpeter Repackaged, updated to use System properties.
|
||||
* Dec 11, 2013 2555 rjpeter archivePath overridable via System properties.
|
||||
* </pre>
|
||||
*
|
||||
* @author rjpeter
|
||||
|
@ -60,6 +61,8 @@ public class DataArchiver {
|
|||
// allows for disabling of specific plugins if desired
|
||||
private final static String DISABLE_PROPERTY = "archive.disable";
|
||||
|
||||
private final static String PATH_PROPERTY = "archive.path";
|
||||
|
||||
private final boolean ARCHIVING_ENABLED;
|
||||
|
||||
private final Set<String> DISABLED_PLUGINS;
|
||||
|
@ -68,10 +71,9 @@ public class DataArchiver {
|
|||
|
||||
private final List<IDataArchiver> dataArchivers = new LinkedList<IDataArchiver>();
|
||||
|
||||
private String archivePath = null;
|
||||
private final String archivePath;
|
||||
|
||||
public DataArchiver(String archivePath) {
|
||||
this.archivePath = archivePath;
|
||||
public DataArchiver() {
|
||||
ARCHIVING_ENABLED = Boolean.getBoolean(ENABLE_PROPERTY);
|
||||
String disabledPluginList = System.getProperty(DISABLE_PROPERTY);
|
||||
if (disabledPluginList != null) {
|
||||
|
@ -83,6 +85,9 @@ public class DataArchiver {
|
|||
} else {
|
||||
DISABLED_PLUGINS = Collections.emptySet();
|
||||
}
|
||||
|
||||
// default to /archive
|
||||
archivePath = System.getProperty(PATH_PROPERTY, "/archive");
|
||||
}
|
||||
|
||||
public void archivePlugins() {
|
||||
|
|
|
@ -0,0 +1,739 @@
|
|||
/**
|
||||
* This software was developed and / or modified by Raytheon Company,
|
||||
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
|
||||
*
|
||||
* U.S. EXPORT CONTROLLED TECHNICAL DATA
|
||||
* This software product contains export-restricted data whose
|
||||
* export/transfer/disclosure is restricted by U.S. law. Dissemination
|
||||
* to non-U.S. persons whether in the United States or abroad requires
|
||||
* an export license or other authorization.
|
||||
*
|
||||
* Contractor Name: Raytheon Company
|
||||
* Contractor Address: 6825 Pine Street, Suite 340
|
||||
* Mail Stop B8
|
||||
* Omaha, NE 68106
|
||||
* 402.291.0100
|
||||
*
|
||||
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
|
||||
* further licensing information.
|
||||
**/
|
||||
package com.raytheon.uf.edex.archive;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.BufferedOutputStream;
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.io.Writer;
|
||||
import java.text.DecimalFormat;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.zip.GZIPInputStream;
|
||||
import java.util.zip.GZIPOutputStream;
|
||||
|
||||
import com.raytheon.uf.common.dataplugin.PluginDataObject;
|
||||
import com.raytheon.uf.common.dataplugin.PluginProperties;
|
||||
import com.raytheon.uf.common.dataplugin.persist.PersistableDataObject;
|
||||
import com.raytheon.uf.common.datastorage.DataStoreFactory;
|
||||
import com.raytheon.uf.common.datastorage.IDataStore;
|
||||
import com.raytheon.uf.common.datastorage.StorageException;
|
||||
import com.raytheon.uf.common.datastorage.StorageProperties.Compression;
|
||||
import com.raytheon.uf.common.serialization.SerializationException;
|
||||
import com.raytheon.uf.common.serialization.SerializationUtil;
|
||||
import com.raytheon.uf.common.status.IUFStatusHandler;
|
||||
import com.raytheon.uf.common.status.UFStatus;
|
||||
import com.raytheon.uf.common.status.UFStatus.Priority;
|
||||
import com.raytheon.uf.common.util.FileUtil;
|
||||
import com.raytheon.uf.edex.core.dataplugin.PluginRegistry;
|
||||
import com.raytheon.uf.edex.database.plugin.PluginDao;
|
||||
import com.raytheon.uf.edex.database.processor.IDatabaseProcessor;
|
||||
|
||||
/**
|
||||
* Receives records to be archived to disk. Records can be written over extended
|
||||
* periods of time and so when writing, the previous records must be dup elim'd
|
||||
* against the current set of data to handle database being updated.
|
||||
*
|
||||
* <pre>
|
||||
*
|
||||
* SOFTWARE HISTORY
|
||||
*
|
||||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* Dec 10, 2013 2555 rjpeter Initial creation
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
* @author rjpeter
|
||||
* @version 1.0
|
||||
*/
|
||||
|
||||
public class DatabaseArchiveProcessor implements IDatabaseProcessor {
|
||||
private static final transient IUFStatusHandler statusHandler = UFStatus
|
||||
.getHandler(DatabaseArchiveProcessor.class);
|
||||
|
||||
/** Chunk size for I/O Buffering and Compression */
|
||||
private static final int CHUNK_SIZE = 8192;
|
||||
|
||||
private static final String BIN_FILE_EXT = ".bin";
|
||||
|
||||
private static final String GZIP_FILE_EXT = ".gz";
|
||||
|
||||
private static final Pattern FILE_COUNT_PATTERN = Pattern
|
||||
.compile("^(.*\\.bin\\.)(\\d+)(?:\\.gz)?$");
|
||||
|
||||
protected final String archivePath;
|
||||
|
||||
protected final String pluginName;
|
||||
|
||||
protected final PluginDao dao;
|
||||
|
||||
protected final IPluginArchiveFileNameFormatter nameFormatter;
|
||||
|
||||
protected boolean debugArchiver = false;
|
||||
|
||||
protected boolean compressDatabaseFiles = false;
|
||||
|
||||
protected int fetchSize = 1000;
|
||||
|
||||
protected Set<String> datastoreFilesToArchive = new HashSet<String>();
|
||||
|
||||
protected Set<String> filesCreatedThisSession = new HashSet<String>();
|
||||
|
||||
protected Set<File> dirsToCheckNumbering = new HashSet<File>();
|
||||
|
||||
protected int recordsSaved = 0;
|
||||
|
||||
protected boolean failed = false;
|
||||
|
||||
public DatabaseArchiveProcessor(String archivePath, String pluginName,
|
||||
PluginDao dao, IPluginArchiveFileNameFormatter nameFormatter) {
|
||||
this.archivePath = archivePath;
|
||||
this.pluginName = pluginName;
|
||||
this.dao = dao;
|
||||
this.nameFormatter = nameFormatter;
|
||||
}
|
||||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
*
|
||||
* @see
|
||||
* com.raytheon.uf.edex.database.processor.IDatabaseProcessor#process(java
|
||||
* .util.List)
|
||||
*/
|
||||
@Override
|
||||
public boolean process(List<?> objects) {
|
||||
if ((objects != null) && !objects.isEmpty()) {
|
||||
Set<String> datastoreFiles = new HashSet<String>();
|
||||
statusHandler.info(pluginName + ": Processing rows " + recordsSaved
|
||||
+ " to " + (recordsSaved + objects.size()));
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
List<PersistableDataObject<?>> pdos = (List<PersistableDataObject<?>>) objects;
|
||||
Map<String, List<PersistableDataObject<?>>> pdosByFile = new HashMap<String, List<PersistableDataObject<?>>>();
|
||||
for (PersistableDataObject<?> pdo : pdos) {
|
||||
String path = nameFormatter.getFilename(pluginName, dao, pdo);
|
||||
if (path.endsWith(".h5")) {
|
||||
datastoreFiles.add(path);
|
||||
path = path.substring(0, path.length() - 3);
|
||||
}
|
||||
|
||||
List<PersistableDataObject<?>> list = pdosByFile.get(path);
|
||||
if (list == null) {
|
||||
list = new LinkedList<PersistableDataObject<?>>();
|
||||
pdosByFile.put(path, list);
|
||||
}
|
||||
|
||||
list.add(pdo);
|
||||
}
|
||||
|
||||
if (statusHandler.isPriorityEnabled(Priority.DEBUG)) {
|
||||
statusHandler.debug(pluginName + ": Processed "
|
||||
+ objects.size() + " rows into " + pdosByFile.size()
|
||||
+ " files");
|
||||
}
|
||||
|
||||
try {
|
||||
savePdoMap(pdosByFile);
|
||||
datastoreFilesToArchive.addAll(datastoreFiles);
|
||||
recordsSaved += pdos.size();
|
||||
} catch (Exception e) {
|
||||
statusHandler.error(pluginName
|
||||
+ ": Error occurred saving data to archive", e);
|
||||
failed = true;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks file numbering on any directory that have been flagged. Also
|
||||
* archives any associated hdf5 files.
|
||||
*/
|
||||
@Override
|
||||
public void finish() {
|
||||
for (File dir : dirsToCheckNumbering) {
|
||||
checkFileNumbering(dir);
|
||||
}
|
||||
|
||||
if (!datastoreFilesToArchive.isEmpty()) {
|
||||
statusHandler.info(pluginName + ": archiving "
|
||||
+ datastoreFilesToArchive.size() + " hdf5 file(s)");
|
||||
Compression compRequired = Compression.LZF;
|
||||
PluginProperties props = PluginRegistry.getInstance()
|
||||
.getRegisteredObject(pluginName);
|
||||
|
||||
if ((props != null) && (props.getCompression() != null)) {
|
||||
if (compRequired.equals(Compression.valueOf(props
|
||||
.getCompression()))) {
|
||||
// if plugin is already compressed to the correct level,
|
||||
// no additional compression required
|
||||
compRequired = null;
|
||||
}
|
||||
}
|
||||
|
||||
for (String dataStoreFile : datastoreFilesToArchive) {
|
||||
IDataStore ds = DataStoreFactory.getDataStore(new File(FileUtil
|
||||
.join(pluginName, dataStoreFile)));
|
||||
// all dataStore files should end with .h5
|
||||
String destDir = (dataStoreFile.endsWith(".h5") ? dataStoreFile
|
||||
.substring(0, dataStoreFile.length() - 3)
|
||||
: dataStoreFile);
|
||||
|
||||
String outputDir = FileUtil.join(archivePath, pluginName,
|
||||
destDir) + File.separator;
|
||||
|
||||
try {
|
||||
if (statusHandler.isPriorityEnabled(Priority.DEBUG)) {
|
||||
statusHandler.debug(pluginName
|
||||
+ ": Archiving data store file "
|
||||
+ dataStoreFile + " to " + outputDir);
|
||||
}
|
||||
|
||||
// copy the changed hdf5 file, does repack if
|
||||
// compRequired, otherwise pure file copy
|
||||
ds.copy(outputDir, compRequired, null, 0, 0);
|
||||
} catch (StorageException e) {
|
||||
statusHandler.handle(Priority.PROBLEM,
|
||||
e.getLocalizedMessage());
|
||||
}
|
||||
}
|
||||
statusHandler.info(pluginName + ": hdf5 archiving complete");
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
*
|
||||
* @see
|
||||
* com.raytheon.uf.edex.database.processor.IDatabaseProcessor#getFetchSize()
|
||||
*/
|
||||
@Override
|
||||
public int getBatchSize() {
|
||||
return fetchSize;
|
||||
}
|
||||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
*
|
||||
* @see
|
||||
* com.raytheon.uf.edex.database.processor.IDatabaseProcessor#setFetchSize
|
||||
* (int)
|
||||
*/
|
||||
@Override
|
||||
public void setBatchSize(int fetchSize) {
|
||||
this.fetchSize = fetchSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* True if the processor had a failure during its execution.
|
||||
*
|
||||
* @return
|
||||
*/
|
||||
public boolean isFailed() {
|
||||
return failed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset any state fields so processor can be reused.
|
||||
*/
|
||||
public void reset() {
|
||||
datastoreFilesToArchive.clear();
|
||||
filesCreatedThisSession.clear();
|
||||
dirsToCheckNumbering.clear();
|
||||
recordsSaved = 0;
|
||||
failed = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the debugArchiver
|
||||
*/
|
||||
public boolean isDebugArchiver() {
|
||||
return debugArchiver;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param debugArchiver
|
||||
* the debugArchiver to set
|
||||
*/
|
||||
public void setDebugArchiver(boolean debugArchiver) {
|
||||
this.debugArchiver = debugArchiver;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the compressDatabaseFiles
|
||||
*/
|
||||
public boolean isCompressDatabaseFiles() {
|
||||
return compressDatabaseFiles;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param compressDatabaseFiles
|
||||
* the compressDatabaseFiles to set
|
||||
*/
|
||||
public void setCompressDatabaseFiles(boolean compressDatabaseFiles) {
|
||||
this.compressDatabaseFiles = compressDatabaseFiles;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the recordsSaved
|
||||
*/
|
||||
public int getRecordsSaved() {
|
||||
return recordsSaved;
|
||||
}
|
||||
|
||||
/**
|
||||
* Saves data in the pdo map to disk. The data in the pdoMap is dup elim'd
|
||||
* against any previously written records.
|
||||
*
|
||||
* @param pdoMap
|
||||
* @throws SerializationException
|
||||
* @throws IOException
|
||||
*/
|
||||
protected void savePdoMap(Map<String, List<PersistableDataObject<?>>> pdoMap)
|
||||
throws SerializationException, IOException {
|
||||
StringBuilder baseDir = new StringBuilder(160);
|
||||
Set<Object> identifierSet = null;
|
||||
|
||||
for (Map.Entry<String, List<PersistableDataObject<?>>> entry : pdoMap
|
||||
.entrySet()) {
|
||||
baseDir.setLength(0);
|
||||
baseDir.append(archivePath).append(File.separator)
|
||||
.append(pluginName).append(File.separator)
|
||||
.append(entry.getKey()).append(File.separator);
|
||||
File dir = new File(baseDir.toString());
|
||||
|
||||
if (!dir.exists()) {
|
||||
if (!dir.mkdirs() && !dir.exists()) {
|
||||
throw new IOException("Cannot create directory "
|
||||
+ baseDir.toString());
|
||||
}
|
||||
}
|
||||
|
||||
List<PersistableDataObject<?>> pdos = entry.getValue();
|
||||
if (identifierSet == null) {
|
||||
identifierSet = new HashSet<Object>(pdos.size(), 1);
|
||||
} else {
|
||||
identifierSet.clear();
|
||||
}
|
||||
|
||||
for (PersistableDataObject<?> pdo : pdos) {
|
||||
identifierSet.add(pdo.getIdentifier());
|
||||
}
|
||||
|
||||
SortedMap<Integer, File> fileMap = getArchivedFiles(dir);
|
||||
pdos = dupElimPreviousFiles(fileMap, pdos, identifierSet);
|
||||
|
||||
// if any records left in pdos, write to disk
|
||||
if (pdos.size() > 0) {
|
||||
int fileCount = 1;
|
||||
if (!fileMap.isEmpty()) {
|
||||
fileCount += fileMap.lastKey();
|
||||
}
|
||||
File newFile = new File(dir, dir.getName() + BIN_FILE_EXT + "."
|
||||
+ fileCount);
|
||||
fileMap.put(fileCount, newFile);
|
||||
writeDataToDisk(newFile, pdos);
|
||||
filesCreatedThisSession.add(newFile.getAbsolutePath());
|
||||
|
||||
// check if we have added another digit and should add a 0 to
|
||||
// previous numbers
|
||||
String fileCountStr = Integer.toString(fileCount);
|
||||
if (fileCountStr.startsWith("1") && fileCountStr.endsWith("0")) {
|
||||
dirsToCheckNumbering.add(dir);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks the pdos against the previously written pdos. If a previous pdo
|
||||
* would be overwritten its entry is deleted from the previous file and the
|
||||
* file rewritten. If the last file does not contain a full fetch set, then
|
||||
* pdos are appended up to the fetch size. If any pdos are remaining to be
|
||||
* written they are returned otherwise an empty list is returned.
|
||||
*
|
||||
* @param fileMap
|
||||
* @param pdos
|
||||
* @param identifierSet
|
||||
* @return
|
||||
* @throws IOException
|
||||
* @throws SerializationException
|
||||
*/
|
||||
protected List<PersistableDataObject<?>> dupElimPreviousFiles(
|
||||
SortedMap<Integer, File> fileMap,
|
||||
List<PersistableDataObject<?>> pdos, Set<Object> identifierSet)
|
||||
throws IOException, SerializationException {
|
||||
if (!fileMap.isEmpty()) {
|
||||
Iterator<File> fileIter = fileMap.values().iterator();
|
||||
while (fileIter.hasNext()) {
|
||||
File dataFile = fileIter.next();
|
||||
|
||||
if (filesCreatedThisSession
|
||||
.contains(dataFile.getAbsolutePath())) {
|
||||
statusHandler
|
||||
.debug(pluginName
|
||||
+ ": Skipping dup check on data file created this session: "
|
||||
+ dataFile.getName());
|
||||
continue;
|
||||
}
|
||||
|
||||
List<PersistableDataObject<?>> pdosFromDisk = readDataFromDisk(dataFile);
|
||||
if (statusHandler.isPriorityEnabled(Priority.DEBUG)) {
|
||||
statusHandler.debug(pluginName + ": Checking "
|
||||
+ pdosFromDisk.size() + " old records from file: "
|
||||
+ dataFile.getAbsolutePath());
|
||||
}
|
||||
Iterator<PersistableDataObject<?>> pdoIter = pdosFromDisk
|
||||
.iterator();
|
||||
boolean needsUpdate = false;
|
||||
int dupsRemoved = 0;
|
||||
while (pdoIter.hasNext()) {
|
||||
PersistableDataObject<?> pdo = pdoIter.next();
|
||||
if (identifierSet.contains(pdo.getIdentifier())) {
|
||||
pdoIter.remove();
|
||||
needsUpdate = true;
|
||||
dupsRemoved++;
|
||||
}
|
||||
}
|
||||
|
||||
if (statusHandler.isPriorityEnabled(Priority.DEBUG)
|
||||
&& (dupsRemoved > 0)) {
|
||||
statusHandler.debug(pluginName + ": Removed " + dupsRemoved
|
||||
+ " old records from file: "
|
||||
+ dataFile.getAbsolutePath());
|
||||
}
|
||||
|
||||
if (!fileIter.hasNext() && (pdosFromDisk.size() < fetchSize)) {
|
||||
// last file, add more data to it
|
||||
needsUpdate = true;
|
||||
int numToAdd = fetchSize - pdosFromDisk.size();
|
||||
numToAdd = Math.min(numToAdd, pdos.size());
|
||||
|
||||
if (statusHandler.isPriorityEnabled(Priority.DEBUG)) {
|
||||
statusHandler.debug(pluginName + ": Adding " + numToAdd
|
||||
+ " records to file: "
|
||||
+ dataFile.getAbsolutePath());
|
||||
}
|
||||
|
||||
pdosFromDisk.addAll(pdos.subList(0, numToAdd));
|
||||
if (numToAdd < pdos.size()) {
|
||||
pdos = pdos.subList(numToAdd, pdos.size());
|
||||
} else {
|
||||
pdos = Collections.emptyList();
|
||||
}
|
||||
}
|
||||
|
||||
if (needsUpdate) {
|
||||
if (!pdosFromDisk.isEmpty()) {
|
||||
writeDataToDisk(dataFile, pdosFromDisk);
|
||||
} else {
|
||||
dirsToCheckNumbering.add(dataFile.getParentFile());
|
||||
dataFile.delete();
|
||||
fileIter.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return pdos;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads the serialized data from file. If there is a problem reading the
|
||||
* file it is renamed to .bad.
|
||||
*
|
||||
* @param file
|
||||
* @return
|
||||
* @throws IOException
|
||||
* @throws SerializationException
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
protected List<PersistableDataObject<?>> readDataFromDisk(File file)
|
||||
throws IOException, SerializationException {
|
||||
if (file.exists()) {
|
||||
InputStream is = null;
|
||||
boolean successful = false;
|
||||
try {
|
||||
if (file.getName().endsWith(GZIP_FILE_EXT)) {
|
||||
is = new GZIPInputStream(new FileInputStream(file),
|
||||
CHUNK_SIZE);
|
||||
} else {
|
||||
is = new BufferedInputStream(new FileInputStream(file),
|
||||
CHUNK_SIZE);
|
||||
}
|
||||
|
||||
List<PersistableDataObject<?>> rval = SerializationUtil
|
||||
.transformFromThrift(List.class, is);
|
||||
successful = true;
|
||||
return rval;
|
||||
} finally {
|
||||
if (!successful) {
|
||||
// couldn't read in file, move it to bad
|
||||
file.renameTo(new File(file.getAbsoluteFile() + ".bad"));
|
||||
}
|
||||
if (is != null) {
|
||||
try {
|
||||
is.close();
|
||||
} catch (IOException e) {
|
||||
statusHandler.error(pluginName
|
||||
+ ": Error occurred closing input stream", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
/**
|
||||
* Dynamic serializes the pdos. The data will be written to file. If the
|
||||
* file has .gz extension and the database compression flag is not set, the
|
||||
* .gz file will be deleted in favor of the uncompressed file. Reverse also
|
||||
* holds true. This allows a file written under a different compression
|
||||
* scheme to automatically be converted if rewritten out.
|
||||
*
|
||||
* @param file
|
||||
* @param pdos
|
||||
* @throws IOException
|
||||
* @throws SerializationException
|
||||
*/
|
||||
protected void writeDataToDisk(File file,
|
||||
List<PersistableDataObject<?>> pdos) throws IOException,
|
||||
SerializationException {
|
||||
OutputStream os = null;
|
||||
|
||||
File gzipFile = null;
|
||||
File baseFile = null;
|
||||
String fileAbsPath = file.getAbsolutePath();
|
||||
|
||||
if (fileAbsPath.endsWith(GZIP_FILE_EXT)) {
|
||||
gzipFile = file;
|
||||
baseFile = new File(fileAbsPath.substring(0,
|
||||
fileAbsPath.length() - 3));
|
||||
} else {
|
||||
baseFile = file;
|
||||
gzipFile = new File(fileAbsPath + GZIP_FILE_EXT);
|
||||
}
|
||||
|
||||
try {
|
||||
if (!file.getParentFile().exists()) {
|
||||
file.getParentFile().mkdirs();
|
||||
}
|
||||
|
||||
if (compressDatabaseFiles) {
|
||||
if (baseFile.exists()) {
|
||||
if (statusHandler.isPriorityEnabled(Priority.DEBUG)) {
|
||||
statusHandler
|
||||
.debug(pluginName
|
||||
+ ": Database compression flag changed, deleting uncompressed file "
|
||||
+ baseFile.getAbsolutePath());
|
||||
}
|
||||
baseFile.delete();
|
||||
}
|
||||
|
||||
os = new GZIPOutputStream(new FileOutputStream(gzipFile),
|
||||
CHUNK_SIZE);
|
||||
} else {
|
||||
if (gzipFile.exists()) {
|
||||
if (statusHandler.isPriorityEnabled(Priority.DEBUG)) {
|
||||
statusHandler
|
||||
.debug(pluginName
|
||||
+ ": Database compression flag changed, deleting compressed file "
|
||||
+ gzipFile.getAbsolutePath());
|
||||
}
|
||||
gzipFile.delete();
|
||||
}
|
||||
|
||||
os = new BufferedOutputStream(new FileOutputStream(baseFile),
|
||||
CHUNK_SIZE);
|
||||
}
|
||||
|
||||
if (statusHandler.isPriorityEnabled(Priority.DEBUG)) {
|
||||
statusHandler.debug(pluginName + ": Serializing " + pdos.size()
|
||||
+ " records to file " + file.getAbsolutePath());
|
||||
}
|
||||
|
||||
// Thrift serialize pdo list
|
||||
SerializationUtil.transformToThriftUsingStream(pdos, os);
|
||||
os.flush();
|
||||
} finally {
|
||||
if (os != null) {
|
||||
try {
|
||||
os.close();
|
||||
} catch (IOException e) {
|
||||
statusHandler.error(pluginName
|
||||
+ ": Error occurred closing output stream", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (debugArchiver) {
|
||||
String debugPath = baseFile.getAbsolutePath() + ".debug";
|
||||
dumpPdos(debugPath.toString(), pdos);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Dump the record information being archived to a file.
|
||||
*
|
||||
* @param basePath
|
||||
* @param pdos
|
||||
*/
|
||||
private void dumpPdos(String basePath, List<PersistableDataObject<?>> pdos) {
|
||||
Writer writer = null;
|
||||
File dumpFile = null;
|
||||
|
||||
try {
|
||||
int index = 0;
|
||||
do {
|
||||
index++;
|
||||
dumpFile = new File(basePath + "." + index);
|
||||
} while (dumpFile.exists());
|
||||
|
||||
Iterator<PersistableDataObject<?>> pdoIter = pdos.iterator();
|
||||
writer = new BufferedWriter(new FileWriter(dumpFile));
|
||||
statusHandler.info(String.format("%s: Dumping records to: %s",
|
||||
pluginName, dumpFile.getAbsolutePath()));
|
||||
|
||||
while (pdoIter.hasNext()) {
|
||||
PersistableDataObject<?> pdo = pdoIter.next();
|
||||
if (pdo instanceof PluginDataObject) {
|
||||
PluginDataObject pluginDataObject = (PluginDataObject) pdo;
|
||||
if (pluginDataObject.getId() != 0) {
|
||||
// otherwise was read from file and will be recorded in
|
||||
// a previous entry
|
||||
writer.write("" + pluginDataObject.getId() + ":");
|
||||
writer.write(pluginDataObject.getDataURI());
|
||||
writer.write("\n");
|
||||
}
|
||||
} else {
|
||||
writer.write(pdo.getIdentifier().toString());
|
||||
writer.write("\n");
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
statusHandler
|
||||
.handle(Priority.PROBLEM, pluginName
|
||||
+ ": Unable to dump pdo data to debug file: "
|
||||
+ (dumpFile != null ? dumpFile.getAbsolutePath()
|
||||
: null), e);
|
||||
} finally {
|
||||
if (writer != null) {
|
||||
try {
|
||||
writer.close();
|
||||
} catch (Exception e) {
|
||||
// Ignore
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a map of the archived database files in the directory. Map
|
||||
* ordered by file count in the file name.
|
||||
*
|
||||
* @param baseDir
|
||||
* @return
|
||||
*/
|
||||
protected SortedMap<Integer, File> getArchivedFiles(File baseDir) {
|
||||
File[] dirListing = baseDir.listFiles();
|
||||
SortedMap<Integer, File> fileMap = new TreeMap<Integer, File>();
|
||||
|
||||
if ((dirListing != null) && (dirListing.length > 0)) {
|
||||
for (File dataFile : dirListing) {
|
||||
if (dataFile.isFile()) {
|
||||
String name = dataFile.getName();
|
||||
Matcher matcher = FILE_COUNT_PATTERN.matcher(name);
|
||||
if (matcher.matches()) {
|
||||
String fileNumStr = matcher.group(2);
|
||||
int fileNum = Integer.parseInt(fileNumStr);
|
||||
fileMap.put(fileNum, dataFile);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fileMap;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks database bin files in directory for consistency. If a file has
|
||||
* been deleted or if the number of digits has increased, files are renamed
|
||||
* to fill in holes as well as to have leading zeros as necessary.
|
||||
*
|
||||
* @param dir
|
||||
*/
|
||||
protected void checkFileNumbering(File dir) {
|
||||
SortedMap<Integer, File> fileMap = getArchivedFiles(dir);
|
||||
int nextFileCount = 1;
|
||||
int size = fileMap.size();
|
||||
StringBuilder formatString = new StringBuilder(4);
|
||||
do {
|
||||
formatString.append("0");
|
||||
size /= 10;
|
||||
} while (size > 0);
|
||||
|
||||
DecimalFormat format = new DecimalFormat(formatString.toString());
|
||||
|
||||
for (Map.Entry<Integer, File> entry : fileMap.entrySet()) {
|
||||
int fileNum = entry.getKey();
|
||||
File oldFile = entry.getValue();
|
||||
String name = oldFile.getName();
|
||||
Matcher m = FILE_COUNT_PATTERN.matcher(name);
|
||||
if (m.matches()) {
|
||||
String oldCountString = m.group(2);
|
||||
|
||||
if ((fileNum > nextFileCount)
|
||||
|| (oldCountString.length() != formatString.length())) {
|
||||
// rename file to file count
|
||||
String newFileName = m.group(1) + format.format(fileNum);
|
||||
if (name.endsWith(GZIP_FILE_EXT)) {
|
||||
newFileName += GZIP_FILE_EXT;
|
||||
}
|
||||
|
||||
File newFile = new File(oldFile.getParent(), newFileName);
|
||||
oldFile.renameTo(newFile);
|
||||
}
|
||||
|
||||
nextFileCount++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -19,47 +19,20 @@
|
|||
**/
|
||||
package com.raytheon.uf.edex.archive;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.BufferedOutputStream;
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.io.Writer;
|
||||
import java.text.ParseException;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Calendar;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.TimeZone;
|
||||
import java.util.zip.GZIPInputStream;
|
||||
import java.util.zip.GZIPOutputStream;
|
||||
|
||||
import com.raytheon.uf.common.dataplugin.PluginDataObject;
|
||||
import com.raytheon.uf.common.dataplugin.PluginException;
|
||||
import com.raytheon.uf.common.dataplugin.PluginProperties;
|
||||
import com.raytheon.uf.common.dataplugin.persist.IPersistable;
|
||||
import com.raytheon.uf.common.dataplugin.persist.PersistableDataObject;
|
||||
import com.raytheon.uf.common.datastorage.DataStoreFactory;
|
||||
import com.raytheon.uf.common.datastorage.IDataStore;
|
||||
import com.raytheon.uf.common.datastorage.StorageException;
|
||||
import com.raytheon.uf.common.datastorage.StorageProperties.Compression;
|
||||
import com.raytheon.uf.common.serialization.SerializationException;
|
||||
import com.raytheon.uf.common.serialization.SerializationUtil;
|
||||
import com.raytheon.uf.common.status.IUFStatusHandler;
|
||||
import com.raytheon.uf.common.status.UFStatus;
|
||||
import com.raytheon.uf.common.status.UFStatus.Priority;
|
||||
import com.raytheon.uf.common.time.util.TimeUtil;
|
||||
import com.raytheon.uf.common.util.FileUtil;
|
||||
import com.raytheon.uf.edex.core.dataplugin.PluginRegistry;
|
||||
import com.raytheon.uf.edex.database.DataAccessLayerException;
|
||||
import com.raytheon.uf.edex.database.cluster.ClusterLockUtils;
|
||||
|
@ -70,7 +43,7 @@ import com.raytheon.uf.edex.database.plugin.PluginDao;
|
|||
import com.raytheon.uf.edex.database.plugin.PluginFactory;
|
||||
|
||||
/**
|
||||
* This class handles moving processed data to the archiver directory.
|
||||
* This class handles saving processed data to the archiver directory.
|
||||
*
|
||||
* <pre>
|
||||
*
|
||||
|
@ -84,6 +57,7 @@ import com.raytheon.uf.edex.database.plugin.PluginFactory;
|
|||
* Add debug information.
|
||||
* Nov 05, 2013 2499 rjpeter Repackaged, removed config files, always compresses hdf5.
|
||||
* Nov 11, 2013 2478 rjpeter Updated data store copy to always copy hdf5.
|
||||
* Dec 13, 2013 2555 rjpeter Refactored logic into DatabaseArchiveProcessor.
|
||||
* </pre>
|
||||
*
|
||||
* @author rjpeter
|
||||
|
@ -100,29 +74,34 @@ public class DatabaseArchiver implements IPluginArchiver {
|
|||
protected SimpleDateFormat initialValue() {
|
||||
SimpleDateFormat df = new SimpleDateFormat(
|
||||
"yyyy-MM-dd HH:mm:ss.SSS");
|
||||
df.setTimeZone(TimeZone.getTimeZone("GMT"));
|
||||
df.setTimeZone(TimeUtil.GMT_TIME_ZONE);
|
||||
return df;
|
||||
}
|
||||
};
|
||||
|
||||
/** Minimum time increment to archive, note based off of insertTime. */
|
||||
private static final int MIN_DURATION_MILLIS = 1000 * 60 * 30;
|
||||
private static final long MIN_DURATION_MILLIS = 30 * TimeUtil.MILLIS_PER_MINUTE;
|
||||
|
||||
/** Maximum time increment to archive, note based off of insertTime. */
|
||||
private static final int MAX_DURATION_MILLIS = 1000 * 60 * 60;
|
||||
private static final long MAX_DURATION_MILLIS = 120 * TimeUtil.MILLIS_PER_MINUTE;
|
||||
|
||||
/** Default batch size for database queries */
|
||||
private static final Integer defaultBatchSize = 10000;
|
||||
|
||||
/** Job's name. */
|
||||
private static final String TASK_NAME = "DB Archiver";
|
||||
|
||||
/** Cluster time out on lock. */
|
||||
private static final int CLUSTER_LOCK_TIMEOUT = 60000;
|
||||
|
||||
/** Chunk size for I/O Buffering and Compression */
|
||||
private static final int CHUNK_SIZE = 8192;
|
||||
private static final long CLUSTER_LOCK_TIMEOUT = 10 * TimeUtil.MILLIS_PER_MINUTE;
|
||||
|
||||
/** Mapping for plug-in formatters. */
|
||||
private final Map<String, IPluginArchiveFileNameFormatter> pluginArchiveFormatters;
|
||||
|
||||
/** Mapping for plug-in fetch size */
|
||||
private final Map<String, Integer> pluginBatchSize;
|
||||
|
||||
private final IPluginArchiveFileNameFormatter defaultFormatter = new DefaultPluginArchiveFileNameFormatter();
|
||||
|
||||
/** When true dump the pdos. */
|
||||
private final boolean debugArchiver;
|
||||
|
||||
|
@ -133,8 +112,7 @@ public class DatabaseArchiver implements IPluginArchiver {
|
|||
*/
|
||||
public DatabaseArchiver() {
|
||||
pluginArchiveFormatters = new HashMap<String, IPluginArchiveFileNameFormatter>();
|
||||
pluginArchiveFormatters.put("default",
|
||||
new DefaultPluginArchiveFileNameFormatter());
|
||||
pluginBatchSize = new HashMap<String, Integer>();
|
||||
debugArchiver = Boolean.getBoolean("archive.debug.enable");
|
||||
compressDatabaseFiles = Boolean
|
||||
.getBoolean("archive.compression.enable");
|
||||
|
@ -159,12 +137,10 @@ public class DatabaseArchiver implements IPluginArchiver {
|
|||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
public boolean archivePluginData(String pluginName, String archivePath) {
|
||||
SimpleDateFormat dateFormat = TL_DATE_FORMAT.get();
|
||||
// set archive time
|
||||
Calendar runTime = Calendar.getInstance();
|
||||
runTime.setTimeZone(TimeZone.getTimeZone("GMT"));
|
||||
Calendar runTime = TimeUtil.newGmtCalendar();
|
||||
runTime.add(Calendar.MINUTE, -30);
|
||||
|
||||
// cluster lock, grabbing time of last successful archive
|
||||
|
@ -195,99 +171,52 @@ public class DatabaseArchiver implements IPluginArchiver {
|
|||
return false;
|
||||
}
|
||||
|
||||
Set<String> datastoreFilesToArchive = new HashSet<String>();
|
||||
|
||||
startTime = determineStartTime(pluginName, ct.getExtraInfo(),
|
||||
runTime, dao);
|
||||
Calendar endTime = determineEndTime(startTime, runTime);
|
||||
Map<String, List<PersistableDataObject>> pdoMap = new HashMap<String, List<PersistableDataObject>>();
|
||||
|
||||
IPluginArchiveFileNameFormatter archiveFormatter = pluginArchiveFormatters
|
||||
.get(pluginName);
|
||||
if (archiveFormatter == null) {
|
||||
archiveFormatter = pluginArchiveFormatters.get("default");
|
||||
archiveFormatter = defaultFormatter;
|
||||
}
|
||||
|
||||
while ((startTime != null) && (endTime != null)) {
|
||||
Map<String, List<PersistableDataObject>> pdosToSave = archiveFormatter
|
||||
.getPdosByFile(pluginName, dao, pdoMap, startTime,
|
||||
endTime);
|
||||
Integer batchSize = pluginBatchSize.get(pluginName);
|
||||
|
||||
if ((pdosToSave != null) && !pdosToSave.isEmpty()) {
|
||||
recordCount += savePdoMap(pluginName, archivePath,
|
||||
pdosToSave);
|
||||
for (Map.Entry<String, List<PersistableDataObject>> entry : pdosToSave
|
||||
.entrySet()) {
|
||||
List<PersistableDataObject> pdoList = entry.getValue();
|
||||
if ((pdoList != null) && !pdoList.isEmpty()
|
||||
&& (pdoList.get(0) instanceof IPersistable)) {
|
||||
datastoreFilesToArchive.add(entry.getKey());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
startTime = endTime;
|
||||
endTime = determineEndTime(startTime, runTime);
|
||||
if (batchSize == null) {
|
||||
batchSize = defaultBatchSize;
|
||||
}
|
||||
|
||||
if ((pdoMap != null) && !pdoMap.isEmpty()) {
|
||||
recordCount += savePdoMap(pluginName, archivePath, pdoMap);
|
||||
// don't forget to archive the HDF5 for the records that weren't
|
||||
// saved off by the prior while block
|
||||
for (Map.Entry<String, List<PersistableDataObject>> entry : pdoMap
|
||||
.entrySet()) {
|
||||
List<PersistableDataObject> pdoList = entry.getValue();
|
||||
if ((pdoList != null) && !pdoList.isEmpty()
|
||||
&& (pdoList.get(0) instanceof IPersistable)) {
|
||||
datastoreFilesToArchive.add(entry.getKey());
|
||||
}
|
||||
DatabaseArchiveProcessor processor = new DatabaseArchiveProcessor(
|
||||
archivePath, pluginName, dao, archiveFormatter);
|
||||
processor.setCompressDatabaseFiles(compressDatabaseFiles);
|
||||
processor.setDebugArchiver(debugArchiver);
|
||||
processor.setBatchSize(batchSize.intValue());
|
||||
|
||||
while ((startTime != null) && (endTime != null)
|
||||
&& !processor.isFailed()) {
|
||||
statusHandler.info(pluginName + ": Checking for records from "
|
||||
+ TimeUtil.formatDate(startTime) + " to "
|
||||
+ TimeUtil.formatDate(endTime));
|
||||
|
||||
processor.reset();
|
||||
dao.processArchiveRecords(startTime, endTime, processor);
|
||||
if (!processor.isFailed()) {
|
||||
recordCount += processor.getRecordsSaved();
|
||||
startTime = endTime;
|
||||
endTime = determineEndTime(startTime, runTime);
|
||||
|
||||
// update the cluster lock with check point details
|
||||
String extraInfo = dateFormat.format(startTime.getTime());
|
||||
lockHandler.setExtraInfo(extraInfo);
|
||||
ClusterLockUtils.updateExtraInfoAndLockTime(TASK_NAME,
|
||||
pluginName, extraInfo, System.currentTimeMillis());
|
||||
}
|
||||
}
|
||||
|
||||
if (!datastoreFilesToArchive.isEmpty()) {
|
||||
Compression compRequired = Compression.LZF;
|
||||
|
||||
PluginProperties props = PluginRegistry.getInstance()
|
||||
.getRegisteredObject(pluginName);
|
||||
|
||||
if ((props != null) && (props.getCompression() != null)) {
|
||||
if (compRequired.equals(Compression.valueOf(props
|
||||
.getCompression()))) {
|
||||
// if plugin is already compressed to the correct level,
|
||||
// no additional compression required
|
||||
compRequired = null;
|
||||
}
|
||||
}
|
||||
|
||||
for (String dataStoreFile : datastoreFilesToArchive) {
|
||||
IDataStore ds = DataStoreFactory.getDataStore(new File(
|
||||
FileUtil.join(pluginName, dataStoreFile)));
|
||||
int pathSep = dataStoreFile.lastIndexOf(File.separatorChar);
|
||||
String outputDir = (pathSep > 0 ? FileUtil.join(
|
||||
archivePath, pluginName,
|
||||
dataStoreFile.substring(0, pathSep)) : FileUtil
|
||||
.join(archivePath, pluginName, dataStoreFile));
|
||||
|
||||
try {
|
||||
// copy the changed hdf5 file, does repack if
|
||||
// compRequired, otherwise pure file copy
|
||||
ds.copy(outputDir, compRequired, null, 0, 0);
|
||||
} catch (StorageException e) {
|
||||
statusHandler.handle(Priority.PROBLEM,
|
||||
e.getLocalizedMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// set last archive time to startTime
|
||||
if (startTime != null) {
|
||||
lockHandler
|
||||
.setExtraInfo(dateFormat.format(startTime.getTime()));
|
||||
}
|
||||
|
||||
if (recordCount > 0) {
|
||||
statusHandler.info(pluginName
|
||||
+ ": successfully archived "
|
||||
+ ": archived "
|
||||
+ recordCount
|
||||
+ " records in "
|
||||
+ TimeUtil.prettyDuration(System.currentTimeMillis()
|
||||
|
@ -315,180 +244,6 @@ public class DatabaseArchiver implements IPluginArchiver {
|
|||
return true;
|
||||
}
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
protected int savePdoMap(String pluginName, String archivePath,
|
||||
Map<String, List<PersistableDataObject>> pdoMap)
|
||||
throws SerializationException, IOException {
|
||||
int recordsSaved = 0;
|
||||
|
||||
StringBuilder path = new StringBuilder();
|
||||
for (Map.Entry<String, List<PersistableDataObject>> entry : pdoMap
|
||||
.entrySet()) {
|
||||
path.setLength(0);
|
||||
path.append(archivePath).append(File.separator).append(pluginName)
|
||||
.append(File.separator).append(entry.getKey());
|
||||
// remove .h5
|
||||
if (path.lastIndexOf(".h5") == (path.length() - 3)) {
|
||||
path.setLength(path.length() - 3);
|
||||
}
|
||||
int pathDebugLength = path.length();
|
||||
if (compressDatabaseFiles) {
|
||||
path.append(".bin.gz");
|
||||
} else {
|
||||
path.append(".bin");
|
||||
}
|
||||
|
||||
File file = new File(path.toString());
|
||||
List<PersistableDataObject> pdosToSerialize = entry.getValue();
|
||||
recordsSaved += pdosToSerialize.size();
|
||||
|
||||
if (file.exists()) {
|
||||
// read previous list in from disk (in gz format)
|
||||
InputStream is = null;
|
||||
|
||||
try {
|
||||
|
||||
// created gzip'd stream
|
||||
if (compressDatabaseFiles) {
|
||||
is = new GZIPInputStream(new FileInputStream(file),
|
||||
CHUNK_SIZE);
|
||||
} else {
|
||||
is = new BufferedInputStream(new FileInputStream(file),
|
||||
CHUNK_SIZE);
|
||||
}
|
||||
|
||||
// transform back for list append
|
||||
@SuppressWarnings("unchecked")
|
||||
List<PersistableDataObject<Object>> prev = SerializationUtil
|
||||
.transformFromThrift(List.class, is);
|
||||
|
||||
statusHandler.info(pluginName + ": Read in " + prev.size()
|
||||
+ " records from file " + file.getAbsolutePath());
|
||||
|
||||
List<PersistableDataObject> newList = new ArrayList<PersistableDataObject>(
|
||||
prev.size() + pdosToSerialize.size());
|
||||
|
||||
// get set of new identifiers
|
||||
Set<Object> identifierSet = new HashSet<Object>(
|
||||
pdosToSerialize.size(), 1);
|
||||
for (PersistableDataObject pdo : pdosToSerialize) {
|
||||
identifierSet.add(pdo.getIdentifier());
|
||||
}
|
||||
|
||||
// merge records by Identifier, to remove old duplicate
|
||||
for (PersistableDataObject pdo : prev) {
|
||||
if (!identifierSet.contains(pdo.getIdentifier())) {
|
||||
newList.add(pdo);
|
||||
}
|
||||
}
|
||||
|
||||
// release prev
|
||||
prev = null;
|
||||
|
||||
newList.addAll(pdosToSerialize);
|
||||
pdosToSerialize = newList;
|
||||
} finally {
|
||||
if (is != null) {
|
||||
try {
|
||||
is.close();
|
||||
} catch (IOException e) {
|
||||
statusHandler.error(pluginName
|
||||
+ ": Error occurred closing input stream",
|
||||
e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
statusHandler.info(pluginName + ": Serializing "
|
||||
+ pdosToSerialize.size() + " records to file "
|
||||
+ file.getAbsolutePath());
|
||||
|
||||
OutputStream os = null;
|
||||
|
||||
try {
|
||||
if (!file.getParentFile().exists()) {
|
||||
file.getParentFile().mkdirs();
|
||||
}
|
||||
|
||||
if (debugArchiver) {
|
||||
String debugRootName = path.substring(0, pathDebugLength);
|
||||
dumpPdos(pluginName, pdosToSerialize, debugRootName);
|
||||
}
|
||||
|
||||
// created gzip'd stream
|
||||
if (compressDatabaseFiles) {
|
||||
os = new GZIPOutputStream(new FileOutputStream(file), CHUNK_SIZE);
|
||||
} else {
|
||||
os = new BufferedOutputStream(new FileOutputStream(file),
|
||||
CHUNK_SIZE);
|
||||
}
|
||||
|
||||
// Thrift serialize pdo list
|
||||
SerializationUtil.transformToThriftUsingStream(pdosToSerialize,
|
||||
os);
|
||||
} finally {
|
||||
if (os != null) {
|
||||
try {
|
||||
os.close();
|
||||
} catch (IOException e) {
|
||||
statusHandler.error(pluginName
|
||||
+ ": Error occurred closing output stream", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return recordsSaved;
|
||||
}
|
||||
|
||||
/**
|
||||
* Dump the record information being archived to a file.
|
||||
*/
|
||||
@SuppressWarnings("rawtypes")
|
||||
private void dumpPdos(String pluginName,
|
||||
List<PersistableDataObject> pdosToSerialize, String debugRootName) {
|
||||
StringBuilder sb = new StringBuilder(debugRootName);
|
||||
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd-HH-mm-ss");
|
||||
sdf.setTimeZone(TimeZone.getTimeZone("GMT"));
|
||||
sb.append("_").append(sdf.format(Calendar.getInstance().getTime()))
|
||||
.append(".txt");
|
||||
File file = new File(sb.toString());
|
||||
Writer writer = null;
|
||||
try {
|
||||
PersistableDataObject<?>[] pdoArray = pdosToSerialize
|
||||
.toArray(new PersistableDataObject<?>[0]);
|
||||
writer = new BufferedWriter(new FileWriter(file));
|
||||
statusHandler.info(String.format("Dumping %s records to: %s",
|
||||
pdoArray.length, file.getAbsolutePath()));
|
||||
for (int i = 0; i < pdosToSerialize.size(); ++i) {
|
||||
if (pdoArray[i] instanceof PluginDataObject) {
|
||||
PluginDataObject pdo = (PluginDataObject) pdoArray[i];
|
||||
if (pdo.getId() != 0) {
|
||||
// otherwise was read from file
|
||||
writer.write("" + pdo.getId() + ":");
|
||||
writer.write(pdo.getDataURI());
|
||||
writer.write("\n");
|
||||
}
|
||||
} else {
|
||||
writer.write(pdoArray[i].toString());
|
||||
writer.write("\n");
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
statusHandler.handle(Priority.PROBLEM, e.getLocalizedMessage(), e);
|
||||
} finally {
|
||||
if (writer != null) {
|
||||
try {
|
||||
writer.close();
|
||||
} catch (Exception e) {
|
||||
// Ignore
|
||||
}
|
||||
writer = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the plug-in's start time for a query.
|
||||
*
|
||||
|
@ -591,4 +346,17 @@ public class DatabaseArchiver implements IPluginArchiver {
|
|||
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Register batch size for a plug-in.
|
||||
*
|
||||
* @param pluginName
|
||||
* @param batchSize
|
||||
* Batch Size for the plugin. Default is 10000.
|
||||
* @return databaseArchiver
|
||||
*/
|
||||
public Object registerPluginBatchSize(String pluginName, Integer batchSize) {
|
||||
pluginBatchSize.put(pluginName, batchSize);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,22 +20,13 @@
|
|||
package com.raytheon.uf.edex.archive;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.Calendar;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import com.raytheon.uf.common.dataplugin.PluginDataObject;
|
||||
import com.raytheon.uf.common.dataplugin.persist.DefaultPathProvider;
|
||||
import com.raytheon.uf.common.dataplugin.persist.IHDFFilePathProvider;
|
||||
import com.raytheon.uf.common.dataplugin.persist.IPersistable;
|
||||
import com.raytheon.uf.common.dataplugin.persist.PersistableDataObject;
|
||||
import com.raytheon.uf.edex.database.DataAccessLayerException;
|
||||
import com.raytheon.uf.edex.database.plugin.PluginDao;
|
||||
|
||||
/**
|
||||
|
@ -47,11 +38,12 @@ import com.raytheon.uf.edex.database.plugin.PluginDao;
|
|||
*
|
||||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* Apr 20, 2012 dgilling Initial creation
|
||||
* Mar 12, 2013 1783 rferrel Replace ArrayList with LinkedList to
|
||||
* remove excess capacity and reduce
|
||||
* time to resize a growing list.
|
||||
* Nov 05, 2013 2499 rjpeter Repackaged
|
||||
* Apr 20, 2012 dgilling Initial creation
|
||||
* Mar 12, 2013 1783 rferrel Replace ArrayList with LinkedList to
|
||||
* remove excess capacity and reduce
|
||||
* time to resize a growing list.
|
||||
* Nov 05, 2013 2499 rjpeter Repackaged
|
||||
* Dec 14, 2013 2555 rjpeter Refactored
|
||||
* </pre>
|
||||
*
|
||||
* @author dgilling
|
||||
|
@ -60,89 +52,40 @@ import com.raytheon.uf.edex.database.plugin.PluginDao;
|
|||
|
||||
public class DefaultPluginArchiveFileNameFormatter implements
|
||||
IPluginArchiveFileNameFormatter {
|
||||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
*
|
||||
* @see
|
||||
* com.raytheon.uf.edex.maintenance.archive.IPluginArchiveFileNameFormatter
|
||||
* #getPdosByFile(java.lang.String,
|
||||
* com.raytheon.uf.edex.database.plugin.PluginDao, java.util.Map,
|
||||
* java.util.Calendar, java.util.Calendar)
|
||||
* com.raytheon.uf.edex.archive.IPluginArchiveFileNameFormatter#getFilename
|
||||
* (java.lang.String, com.raytheon.uf.edex.database.plugin.PluginDao,
|
||||
* com.raytheon.uf.common.dataplugin.persist.PersistableDataObject)
|
||||
*/
|
||||
@SuppressWarnings("rawtypes")
|
||||
@Override
|
||||
public Map<String, List<PersistableDataObject>> getPdosByFile(
|
||||
String pluginName, PluginDao dao,
|
||||
Map<String, List<PersistableDataObject>> pdoMap,
|
||||
Calendar startTime, Calendar endTime)
|
||||
throws DataAccessLayerException {
|
||||
List<PersistableDataObject> pdos = dao.getRecordsToArchive(startTime,
|
||||
endTime);
|
||||
|
||||
Set<String> newFileEntries = new HashSet<String>();
|
||||
if ((pdos != null) && !pdos.isEmpty()) {
|
||||
if (pdos.get(0) instanceof IPersistable) {
|
||||
IHDFFilePathProvider pathProvider = dao.pathProvider;
|
||||
|
||||
for (PersistableDataObject pdo : pdos) {
|
||||
IPersistable persistable = (IPersistable) pdo;
|
||||
String path = pathProvider.getHDFPath(pluginName,
|
||||
persistable)
|
||||
+ File.separator
|
||||
+ pathProvider.getHDFFileName(pluginName,
|
||||
persistable);
|
||||
newFileEntries.add(path);
|
||||
List<PersistableDataObject> list = pdoMap.get(path);
|
||||
if (list == null) {
|
||||
list = new LinkedList<PersistableDataObject>();
|
||||
pdoMap.put(path, list);
|
||||
}
|
||||
list.add(pdo);
|
||||
}
|
||||
public String getFilename(String pluginName, PluginDao dao,
|
||||
PersistableDataObject<?> pdo) {
|
||||
String path = null;
|
||||
if (pdo instanceof IPersistable) {
|
||||
IPersistable persistable = (IPersistable) pdo;
|
||||
IHDFFilePathProvider pathProvider = dao.pathProvider;
|
||||
path = pathProvider.getHDFPath(pluginName, persistable)
|
||||
+ File.separator
|
||||
+ pathProvider.getHDFFileName(pluginName, persistable);
|
||||
} else {
|
||||
String timeString = null;
|
||||
PluginDataObject pluginDataObj = (PluginDataObject) pdo;
|
||||
if (pdo instanceof PluginDataObject) {
|
||||
Date time = pluginDataObj.getDataTime().getRefTimeAsCalendar()
|
||||
.getTime();
|
||||
timeString = DefaultPathProvider.fileNameFormat.get().format(
|
||||
time);
|
||||
} else {
|
||||
// order files by refTime hours
|
||||
for (PersistableDataObject pdo : pdos) {
|
||||
String timeString = null;
|
||||
if (pdo instanceof PluginDataObject) {
|
||||
PluginDataObject pluginDataObj = (PluginDataObject) pdo;
|
||||
Date time = pluginDataObj.getDataTime()
|
||||
.getRefTimeAsCalendar().getTime();
|
||||
timeString = DefaultPathProvider.fileNameFormat.get()
|
||||
.format(time);
|
||||
} else {
|
||||
// no refTime to use bounded insert query bounds
|
||||
Date time = startTime.getTime();
|
||||
timeString = DefaultPathProvider.fileNameFormat.get()
|
||||
.format(time);
|
||||
}
|
||||
|
||||
String path = pluginName + timeString;
|
||||
newFileEntries.add(path);
|
||||
List<PersistableDataObject> list = pdoMap.get(path);
|
||||
if (list == null) {
|
||||
list = new LinkedList<PersistableDataObject>();
|
||||
pdoMap.put(path, list);
|
||||
}
|
||||
list.add(pdo);
|
||||
}
|
||||
|
||||
// no refTime, use current time as last resort
|
||||
timeString = DefaultPathProvider.fileNameFormat.get().format(
|
||||
new Date());
|
||||
}
|
||||
|
||||
path = pluginName + timeString;
|
||||
}
|
||||
|
||||
Iterator<String> iter = pdoMap.keySet().iterator();
|
||||
Map<String, List<PersistableDataObject>> pdosToSave = new HashMap<String, List<PersistableDataObject>>(
|
||||
pdoMap.size() - newFileEntries.size());
|
||||
|
||||
while (iter.hasNext()) {
|
||||
String key = iter.next();
|
||||
if (!newFileEntries.contains(key)) {
|
||||
pdosToSave.put(key, pdoMap.get(key));
|
||||
iter.remove();
|
||||
}
|
||||
}
|
||||
|
||||
return pdosToSave;
|
||||
return path;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,12 +19,7 @@
|
|||
**/
|
||||
package com.raytheon.uf.edex.archive;
|
||||
|
||||
import java.util.Calendar;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import com.raytheon.uf.common.dataplugin.persist.PersistableDataObject;
|
||||
import com.raytheon.uf.edex.database.DataAccessLayerException;
|
||||
import com.raytheon.uf.edex.database.plugin.PluginDao;
|
||||
|
||||
/**
|
||||
|
@ -36,8 +31,9 @@ import com.raytheon.uf.edex.database.plugin.PluginDao;
|
|||
*
|
||||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* Apr 20, 2012 dgilling Initial creation
|
||||
* Apr 20, 2012 dgilling Initial creation
|
||||
* Nov 05, 2013 2499 rjpeter Repackaged
|
||||
* Dec 13, 2013 2555 rjpeter Refactored
|
||||
* </pre>
|
||||
*
|
||||
* @author dgilling
|
||||
|
@ -45,29 +41,18 @@ import com.raytheon.uf.edex.database.plugin.PluginDao;
|
|||
*/
|
||||
|
||||
public interface IPluginArchiveFileNameFormatter {
|
||||
|
||||
/**
|
||||
* Returns base file name for the pdo. In the case of IPersistable objects,
|
||||
* it should match the h5 file.
|
||||
*
|
||||
* @param pluginName
|
||||
* The plugin name.
|
||||
* @param dao
|
||||
* @param pdoMap
|
||||
* The current pdos by file. This map will be merged with pdos,
|
||||
* if a key was not referenced by pdos it will be removed and
|
||||
* returned in the returned map for storage.
|
||||
* @param startTime
|
||||
* @param endTime
|
||||
* @return The pdos to save to disk. If sortPdosByFiles did not store any
|
||||
* entries from pdos into a file listed in currentPdoMap then that
|
||||
* entry will be returned in a new map and removed from
|
||||
* currentPdoMap.
|
||||
* @throws DataAccessLayerException
|
||||
* If the DAO is unable to retrieve the records from the
|
||||
* database.
|
||||
* The dao for the object.
|
||||
* @param pdo
|
||||
* The object to look up.
|
||||
* @return
|
||||
*/
|
||||
@SuppressWarnings("rawtypes")
|
||||
public abstract Map<String, List<PersistableDataObject>> getPdosByFile(
|
||||
String pluginName, PluginDao dao,
|
||||
Map<String, List<PersistableDataObject>> pdoMap,
|
||||
Calendar startTime, Calendar endTime)
|
||||
throws DataAccessLayerException;
|
||||
public String getFilename(String pluginName, PluginDao dao,
|
||||
PersistableDataObject<?> pdo);
|
||||
}
|
||||
|
|
|
@ -44,6 +44,7 @@ import com.raytheon.uf.common.time.util.TimeUtil;
|
|||
* number of files purged.
|
||||
* Sep 03, 2013 2224 rferrel Add check to enable/disable purger.
|
||||
* Nov 05, 2013 2499 rjpeter Repackaged
|
||||
* Dec 17, 2013 2603 rjpeter Reload configuration every run of purge.
|
||||
* </pre>
|
||||
*
|
||||
* @author bgonzale
|
||||
|
@ -67,6 +68,7 @@ public class ArchivePurger {
|
|||
timer.start();
|
||||
statusHandler.info("Archive Purge started.");
|
||||
ArchiveConfigManager manager = ArchiveConfigManager.getInstance();
|
||||
manager.reset();
|
||||
Collection<ArchiveConfig> archives = manager.getArchives();
|
||||
for (ArchiveConfig archive : archives) {
|
||||
ITimer archiveTimer = TimeUtil.getTimer();
|
||||
|
|
|
@ -28,6 +28,8 @@
|
|||
* Oct 01, 2013 2147 rferrel Date time stamp no longer requires an hour field.
|
||||
* Nov 05, 2013 2497 rferrel Change root directory.
|
||||
* Nov 13, 2013 2549 rferrel Changes to GFE and modelsounding.
|
||||
* Dec 12, 2013 2624 rferrel Document Julian time stamp.
|
||||
* Dec 13, 2013 2555 rjpeter Updated all to use dirPatterns.
|
||||
*
|
||||
* @author rferrel
|
||||
* @version 1.0
|
||||
|
@ -66,14 +68,19 @@
|
|||
single table entry.
|
||||
<timeType> - Optional tag to determine what type of time stamp is being used to get files/directories for retention
|
||||
and case creation. The value dictates how many groupings in the <dirPattern>s and/or <filePattern> are
|
||||
used to get the time stamp for a file. The four values are:
|
||||
Date - (default) the time stamp is made up of four groups in the patterns: year, month, day and hour.
|
||||
used to get the time stamp for a file. The five values are:
|
||||
Date - (default) the time stamp is made up of 3 or 4 groups in the patterns: year, month, day and optional hour.
|
||||
Julian - The time stamp is made up of 2 or 3 groups in the patterns: year, day_of_year and optional hour.
|
||||
if the year is less then 100 it is adjust to a year prior to or no more then a month into the future
|
||||
of the current simulate year.
|
||||
EpochSec - The time stamp has one group in the patterns which is the epoch time in seconds.
|
||||
EpochMS - The time stamp has one group in the patterns which is the epoch time in milliseconds.
|
||||
File - No group is used to get the time stamp. Instead use the files date of last modification.
|
||||
<dateGroupIndicies> - Required tag when <timeType> has any value but File.
|
||||
Date - A comma separated list of 3 or 4 numbers which are in order the index for year, month, day and hour.
|
||||
When only 3 numbers the hour is value is 23.
|
||||
Julian - A comma separated list of 2 or 3 numbers which are in order the index for year, day of year, and hour.
|
||||
When only two numbers the hour value is 23.
|
||||
EpochSec - A number which is the index for the epoch in seconds.
|
||||
EpochMS - A number which is the index for the epoch in milliseconds.
|
||||
File - Not needed since no group is used to get the time stamp.
|
||||
|
@ -116,7 +123,7 @@
|
|||
<displayLabel>{1}</displayLabel>
|
||||
<timeType>Date</timeType>
|
||||
<dateGroupIndices>2,3,4,5</dateGroupIndices>
|
||||
<filePattern>[^/]*-(\d{4})-(\d{2})-(\d{2})-(\d{2})\..*</filePattern>
|
||||
<filePattern>.*-(\d{4})-(\d{2})-(\d{2})-(\d{2})\..*</filePattern>
|
||||
</dataSet>
|
||||
</category>
|
||||
The first <dirPattern> looks for files matching the <filePattern> in the directories acars, airep, airmet or taf.
|
||||
|
@ -137,8 +144,7 @@
|
|||
<name>Decision Assistance</name>
|
||||
<extRetentionHours>168</extRetentionHours>
|
||||
<dataSet>
|
||||
<dirPattern>(cwat|fog|ffmp|fssobs|preciprate|qpf|scan|vil)</dirPattern>
|
||||
<filePattern>.*(\d{4})-(\d{2})-(\d{2})-(\d{2}).*</filePattern>
|
||||
<dirPattern>(cwat|fog|ffmp|fssobs|preciprate|qpf|scan|vil)/.*(\d{4})-(\d{2})-(\d{2})-(\d{2}).*</dirPattern>
|
||||
<displayLabel>{1}</displayLabel>
|
||||
<dateGroupIndices>2,3,4,5</dateGroupIndices>
|
||||
</dataSet>
|
||||
|
@ -147,13 +153,12 @@
|
|||
<name>GFE</name>
|
||||
<extRetentionHours>168</extRetentionHours>
|
||||
<dataSet>
|
||||
<dirPattern>gfe/(.*)/(.*)/(\d{4})_(\d{2})_(\d{2})_(\d{2})\d{2}</dirPattern>
|
||||
<dirPattern>gfe/(.*)/(.*)/(\d{4})_(\d{2})_(\d{2})_(\d{2})\d{2}.*</dirPattern>
|
||||
<displayLabel>{1} - {2}</displayLabel>
|
||||
<dateGroupIndices>3,4,5,6</dateGroupIndices>
|
||||
</dataSet>
|
||||
<dataSet>
|
||||
<dirPattern>gfe/(.*)/(.*)</dirPattern>
|
||||
<filePattern>.*_(\d{4})(\d{2})(\d{2})_.*</filePattern>
|
||||
<dirPattern>gfe/(.*)/(.*)/.*_(\d{4})(\d{2})(\d{2})_.*</dirPattern>
|
||||
<displayLabel>{1} - {2}</displayLabel>
|
||||
<dateGroupIndices>3,4,5</dateGroupIndices>
|
||||
</dataSet>
|
||||
|
@ -162,8 +167,7 @@
|
|||
<name>Local</name>
|
||||
<extRetentionHours>168</extRetentionHours>
|
||||
<dataSet>
|
||||
<dirPattern>(ldadhydro|ldadmesonet|ldadprofiler|ldad_manual|qc)</dirPattern>
|
||||
<filePattern>.*(\d{4})-(\d{2})-(\d{2})-(\d{2}).*</filePattern>
|
||||
<dirPattern>(ldadhydro|ldadmesonet|ldadprofiler|ldad_manual|qc)/.*(\d{4})-(\d{2})-(\d{2})-(\d{2}).*</dirPattern>
|
||||
<displayLabel>{1}</displayLabel>
|
||||
<dateGroupIndices>2,3,4,5</dateGroupIndices>
|
||||
</dataSet>
|
||||
|
@ -172,54 +176,48 @@
|
|||
<name>Model</name>
|
||||
<extRetentionHours>168</extRetentionHours>
|
||||
<dataSet>
|
||||
<dirPattern>(grid)/(.*)/(.*)</dirPattern>
|
||||
<dirPattern>(grid)/(.*)/(.*)/.*-(\d{4})-(\d{2})-(\d{2})-(\d{2})-.*</dirPattern>
|
||||
<displayLabel>{2}</displayLabel>
|
||||
<dateGroupIndices>4,5,6,7</dateGroupIndices>
|
||||
<filePattern>.*-(\d{4})-(\d{2})-(\d{2})-(\d{2})-.*</filePattern>
|
||||
</dataSet>
|
||||
<dataSet>
|
||||
<dirPattern>(modelsounding)/(.*)/.*</dirPattern>
|
||||
<dirPattern>(bufrmos)(.*)</dirPattern>
|
||||
<dirPattern>(modelsounding)/(.*)/.*/.*(\d{4})-(\d{2})-(\d{2})-(\d{2}).*</dirPattern>
|
||||
<dirPattern>(bufrmos)(.*)/.*(\d{4})-(\d{2})-(\d{2})-(\d{2})</dirPattern>
|
||||
<displayLabel>{1} - {2}</displayLabel>
|
||||
<dateGroupIndices>3,4,5,6</dateGroupIndices>
|
||||
<filePattern>.*(\d{4})-(\d{2})-(\d{2})-(\d{2}).*</filePattern>
|
||||
</dataSet>
|
||||
</category>
|
||||
<category>
|
||||
<name>Products</name>
|
||||
<extRetentionHours>168</extRetentionHours>
|
||||
<dataSet>
|
||||
<dirPattern>(airmet|atcf|aww|bufrncwf|ccfp|convsigmet|cwa|ffg|intlsigmet|nonconvsigmet|stormtrack|taf|tcg|tcm|tcs|text|vaa|warning|wcp)</dirPattern>
|
||||
<dirPattern>(bufrsigwx|redbook)/.*</dirPattern>
|
||||
<dirPattern>(airmet|atcf|aww|bufrncwf|ccfp|convsigmet|cwa|ffg|intlsigmet|nonconvsigmet|stormtrack|taf|tcg|tcm|tcs|text|vaa|warning|wcp)/.*-(\d{4})-(\d{2})-(\d{2})-(\d{2})</dirPattern>
|
||||
<dirPattern>(bufrsigwx|redbook)/.*/.*-(\d{4})-(\d{2})-(\d{2})-(\d{2})</dirPattern>
|
||||
<displayLabel>{1}</displayLabel>
|
||||
<dateGroupIndices>2,3,4,5</dateGroupIndices>
|
||||
<filePattern>[^/]*-(\d{4})-(\d{2})-(\d{2})-(\d{2})\..*</filePattern>
|
||||
</dataSet>
|
||||
</category>
|
||||
<category>
|
||||
<name>Observation</name>
|
||||
<extRetentionHours>168</extRetentionHours>
|
||||
<dataSet>
|
||||
<dirPattern>(acars|airep|binlightning|bufrascat|bufrhdw|bufrmthdw|bufrssmi|idft|lsr|obs|pirep|recco|svrwx)</dirPattern>
|
||||
<dirPattern>(sfcobs)/.*</dirPattern>
|
||||
<dirPattern>(acars|airep|binlightning|bufrascat|bufrhdw|bufrmthdw|bufrssmi|idft|lsr|obs|pirep|recco|svrwx)/.*-(\d{4})-(\d{2})-(\d{2})-(\d{2})</dirPattern>
|
||||
<dirPattern>(sfcobs)/.*/.*-(\d{4})-(\d{2})-(\d{2})-(\d{2})</dirPattern>
|
||||
<displayLabel>{1}</displayLabel>
|
||||
<dateGroupIndices>2,3,4,5</dateGroupIndices>
|
||||
<filePattern>.*-(\d{4})-(\d{2})-(\d{2})-(\d{2})\..*</filePattern>
|
||||
</dataSet>
|
||||
</category>
|
||||
<category>
|
||||
<name>Satellite</name>
|
||||
<extRetentionHours>168</extRetentionHours>
|
||||
<dataSet>
|
||||
<dirPattern>satellite/(.*)/(.*)</dirPattern>
|
||||
<filePattern>.*-(\d{4})-(\d{2})-(\d{2})-(\d{2}).*</filePattern>
|
||||
<dirPattern>satellite/(.*)/(.*)/.*-(\d{4})-(\d{2})-(\d{2})-(\d{2})</dirPattern>
|
||||
<dateGroupIndices>3,4,5,6</dateGroupIndices>
|
||||
<displayLabel>{1}</displayLabel>
|
||||
</dataSet>
|
||||
<dataSet>
|
||||
<!-- Guess for mcidas and viirs is based on old example. -->
|
||||
<dirPattern>(mcidas|viirs)/.*/.*/.*/.*</dirPattern>
|
||||
<filePattern>.*-(\d{4})-(\d{2})-(\d{2})-(\d{2}).*</filePattern>
|
||||
<dirPattern>(mcidas|viirs)/.*/.*/.*/.*/.*-(\d{4})-(\d{2})-(\d{2})-(\d{2})</dirPattern>
|
||||
<dateGroupIndices>2,3,4,5</dateGroupIndices>
|
||||
<displayLabel>{1}</displayLabel>
|
||||
</dataSet>
|
||||
|
@ -228,10 +226,9 @@
|
|||
<name>Profiles</name>
|
||||
<extRetentionHours>168</extRetentionHours>
|
||||
<dataSet>
|
||||
<dirPattern>(acarssounding|bufrua|goessounding|poessounding|profiler)</dirPattern>
|
||||
<dirPattern>(acarssounding|bufrua|goessounding|poessounding|profiler)/.*-(\d{4})-(\d{2})-(\d{2})-(\d{2})</dirPattern>
|
||||
<displayLabel>{1}</displayLabel>
|
||||
<dateGroupIndices>2,3,4,5</dateGroupIndices>
|
||||
<filePattern>.*-(\d{4})-(\d{2})-(\d{2})-(\d{2})\..*</filePattern>
|
||||
</dataSet>
|
||||
</category>
|
||||
<category>
|
||||
|
@ -239,10 +236,9 @@
|
|||
<name>radar</name>
|
||||
<extRetentionHours>168</extRetentionHours>
|
||||
<dataSet>
|
||||
<dirPattern>radar/(.*)/(.*)</dirPattern>
|
||||
<dirPattern>radar/(.*)/(.*)/.*-(\d{4})-(\d{2})-(\d{2})-(\d{2})</dirPattern>
|
||||
<displayLabel>{1}</displayLabel>
|
||||
<dateGroupIndices>3,4,5,6</dateGroupIndices>
|
||||
<filePattern>.*-(\d{4})-(\d{2})-(\d{2})-(\d{2})\..*</filePattern>
|
||||
</dataSet>
|
||||
</category>
|
||||
</archive>
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
* Jun 20, 2013 1966 rferrel Initial creation
|
||||
* Aug 05, 2013 2224 rferrel Changes to add dataSet tags.
|
||||
* Oct 01, 2013 2147 rferrel Date time stamp no longer requires an hour field.
|
||||
* Dec 12, 2013 2624 rferrel Document Julian time stamp.
|
||||
*
|
||||
* @author rferrel
|
||||
* @version 1.0
|
||||
|
@ -64,14 +65,19 @@
|
|||
single table entry.
|
||||
<timeType> - Optional tag to determine what type of time stamp is being used to get files/directories for retention
|
||||
and case creation. The value dictates how many groupings in the <dirPattern>s and/or <filePattern> are
|
||||
used to get the time stamp for a file. The four values are:
|
||||
Date - (default) the time stamp is made up of four groups in the patterns: year, month, day and hour.
|
||||
used to get the time stamp for a file. The five values are:
|
||||
Date - (default) the time stamp is made up of 3 or 4 groups in the patterns: year, month, day and optional hour.
|
||||
Julian - The time stamp is made up of 2 or 3 groups in the patterns: year, day_of_year and optional hour.
|
||||
if the year is less then 100 it is adjust to a year prior to or no more then a month into the future
|
||||
of the current simulate year.
|
||||
EpochSec - The time stamp has one group in the patterns which is the epoch time in seconds.
|
||||
EpochMS - The time stamp has one group in the patterns which is the epoch time in milliseconds.
|
||||
File - No group is used to get the time stamp. Instead use the files date of last modification.
|
||||
<dateGroupIndicies> - Required tag when <timeType> has any value but File.
|
||||
Date - A comma separated list of 3 or 4 numbers which are in order the index for year, month, day and hour.
|
||||
When only 3 numbers the hour is value is 23.
|
||||
Julian - A comma separated list of 2 or 3 numbers which are in order the index for year, day of year, and hour.
|
||||
When only two numbers the hour value is 23.
|
||||
EpochSec - A number which is the index for the epoch in seconds.
|
||||
EpochMS - A number which is the index for the epoch in milliseconds.
|
||||
File - Not needed since no group is used to get the time stamp.
|
||||
|
@ -114,7 +120,7 @@
|
|||
<displayLabel>{1}</displayLabel>
|
||||
<timeType>Date</timeType>
|
||||
<dateGroupIndices>2,3,4,5</dateGroupIndices>
|
||||
<filePattern>[^/]*-(\d{4})-(\d{2})-(\d{2})-(\d{2})\..*</filePattern>
|
||||
<filePattern>.*-(\d{4})-(\d{2})-(\d{2})-(\d{2})\..*</filePattern>
|
||||
</dataSet>
|
||||
</category>
|
||||
The first <dirPattern> looks for files matching the <filePattern> in the directories acars, airep, airmet or taf.
|
||||
|
|
|
@ -29,6 +29,7 @@ Export-Package: com.raytheon.uf.edex.database,
|
|||
com.raytheon.uf.edex.database.handlers,
|
||||
com.raytheon.uf.edex.database.init,
|
||||
com.raytheon.uf.edex.database.plugin,
|
||||
com.raytheon.uf.edex.database.processor,
|
||||
com.raytheon.uf.edex.database.purge,
|
||||
com.raytheon.uf.edex.database.query,
|
||||
com.raytheon.uf.edex.database.status,
|
||||
|
|
|
@ -50,7 +50,7 @@ import com.raytheon.uf.edex.database.dao.DaoConfig;
|
|||
* Apr 28, 2010 #5050 rjpeter Initial creation from SmartInitTransaction.
|
||||
* Aug 26, 2013 #2272 bkowal Add a function to see if a cluster suffix has
|
||||
* been specified via the environment.
|
||||
*
|
||||
* Dec 13, 2013 2555 rjpeter Added updateExtraInfoAndLockTime and javadoc.
|
||||
* </pre>
|
||||
*
|
||||
* @author rjpeter
|
||||
|
@ -116,6 +116,13 @@ public class ClusterLockUtils {
|
|||
}
|
||||
|
||||
/**
|
||||
* Attempts to lock based on the taskName/details and the specified
|
||||
* validTime for checkTime. If waitForRunningToFinish it will sleep and then
|
||||
* attempt to lock again until it achieves a lock other than already
|
||||
* running. The waitForRunningToFinish is not part of the main lock logic
|
||||
* due to checkTime being keyed off something other than System clock. If
|
||||
* the validTime is older than the current validTime for the lock, an OLD
|
||||
* LockState will be returned.
|
||||
*
|
||||
* @param taskName
|
||||
* @param details
|
||||
|
@ -131,6 +138,11 @@ public class ClusterLockUtils {
|
|||
}
|
||||
|
||||
/**
|
||||
* Attempts to lock based on the taskName/details and the specified
|
||||
* lockHandler. If waitForRunningToFinish it will sleep and then attempt to
|
||||
* lock again until it achieves a lock other than already running. The
|
||||
* waitForRunningToFinish is not part of the main lock logic due to
|
||||
* checkTime being keyed off something other than System clock.
|
||||
*
|
||||
* @param taskName
|
||||
* @param details
|
||||
|
@ -214,6 +226,9 @@ public class ClusterLockUtils {
|
|||
}
|
||||
|
||||
/**
|
||||
* Updates the lock time for the specified lock. IMPORTANT: No tracking is
|
||||
* done to ensure caller has lock, so only use when you know you have a
|
||||
* valid lock.
|
||||
*
|
||||
* @param taskName
|
||||
* @param details
|
||||
|
@ -268,7 +283,9 @@ public class ClusterLockUtils {
|
|||
}
|
||||
|
||||
/**
|
||||
* Updates the extra info field for a cluster task
|
||||
* Updates the extra info field for a cluster task. IMPORTANT: No tracking
|
||||
* is done to ensure caller has lock, so only use when you know you have a
|
||||
* valid lock.
|
||||
*
|
||||
* @param taskName
|
||||
* The name of the task
|
||||
|
@ -327,6 +344,70 @@ public class ClusterLockUtils {
|
|||
}
|
||||
|
||||
/**
|
||||
* Updates the extra info and lock time fields for a cluster task.
|
||||
* IMPORTANT: No tracking is done to ensure caller has lock, so only use
|
||||
* when you know you have a valid lock.
|
||||
*
|
||||
* @param taskName
|
||||
* The name of the task
|
||||
* @param details
|
||||
* The details associated with the task
|
||||
* @param extraInfo
|
||||
* The new extra info to set
|
||||
* @oaran lockTime The lock time to set
|
||||
* @return True if the update was successful, else false if the update
|
||||
* failed
|
||||
*/
|
||||
public static boolean updateExtraInfoAndLockTime(String taskName,
|
||||
String details, String extraInfo, long lockTime) {
|
||||
CoreDao cd = new CoreDao(DaoConfig.DEFAULT);
|
||||
Session s = null;
|
||||
Transaction tx = null;
|
||||
ClusterTask ct = null;
|
||||
boolean rval = true;
|
||||
|
||||
try {
|
||||
s = cd.getHibernateTemplate().getSessionFactory().openSession();
|
||||
tx = s.beginTransaction();
|
||||
ClusterTaskPK pk = new ClusterTaskPK();
|
||||
pk.setName(taskName);
|
||||
pk.setDetails(details);
|
||||
|
||||
ct = getLock(s, pk, true);
|
||||
ct.setExtraInfo(extraInfo);
|
||||
ct.setLastExecution(lockTime);
|
||||
s.update(ct);
|
||||
tx.commit();
|
||||
} catch (Throwable t) {
|
||||
handler.handle(Priority.ERROR,
|
||||
"Error processing update lock time for cluster task ["
|
||||
+ taskName + "/" + details + "]", t);
|
||||
rval = false;
|
||||
|
||||
if (tx != null) {
|
||||
try {
|
||||
tx.rollback();
|
||||
} catch (HibernateException e) {
|
||||
handler.handle(Priority.ERROR,
|
||||
"Error rolling back cluster task lock transaction",
|
||||
e);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
if (s != null) {
|
||||
try {
|
||||
s.close();
|
||||
} catch (HibernateException e) {
|
||||
handler.handle(Priority.ERROR,
|
||||
"Error closing cluster task lock session", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
return rval;
|
||||
}
|
||||
|
||||
/**
|
||||
* Looks up the specified cluster lock.
|
||||
*
|
||||
* @param taskName
|
||||
* @param details
|
||||
|
@ -388,6 +469,9 @@ public class ClusterLockUtils {
|
|||
}
|
||||
|
||||
/**
|
||||
* Unlocks the given cluster lock. If clear time is set, time field will be
|
||||
* reset to the epoch time. This can be useful when wanting the next check
|
||||
* to always succeed.
|
||||
*
|
||||
* @param taskName
|
||||
* @param details
|
||||
|
@ -500,6 +584,7 @@ public class ClusterLockUtils {
|
|||
}
|
||||
|
||||
/**
|
||||
* Deletes the specified cluster lock.
|
||||
*
|
||||
* @param taskName
|
||||
* @param details
|
||||
|
@ -554,11 +639,22 @@ public class ClusterLockUtils {
|
|||
return rval;
|
||||
}
|
||||
|
||||
/**
|
||||
* Looks up and returns the specified cluster lock. If the lock does not
|
||||
* exist and create flag is set, the lock will be created. This is done
|
||||
* using a Master lock to ensure isolation among all transactions.
|
||||
*
|
||||
* @param s
|
||||
* @param pk
|
||||
* @param create
|
||||
* @return
|
||||
* @throws HibernateException
|
||||
*/
|
||||
private static ClusterTask getLock(Session s, ClusterTaskPK pk,
|
||||
boolean create) throws HibernateException {
|
||||
ClusterTask ct = (ClusterTask) s.get(ClusterTask.class, pk,
|
||||
LockOptions.UPGRADE);
|
||||
if (ct == null && create) {
|
||||
if ((ct == null) && create) {
|
||||
getMasterLock(s);
|
||||
|
||||
// now have master lock, verify new row hasn't already been
|
||||
|
@ -577,6 +673,13 @@ public class ClusterLockUtils {
|
|||
return ct;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the master lock.
|
||||
*
|
||||
* @param s
|
||||
* @return
|
||||
* @throws HibernateException
|
||||
*/
|
||||
private static ClusterTask getMasterLock(Session s)
|
||||
throws HibernateException {
|
||||
ClusterTaskPK masterNewRowLockId = new ClusterTaskPK();
|
||||
|
@ -597,6 +700,12 @@ public class ClusterLockUtils {
|
|||
return masterLock;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns all cluster locks that match the specified name.
|
||||
*
|
||||
* @param name
|
||||
* @return
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public static List<ClusterTask> getLocks(String name) {
|
||||
StatelessSession sess = null;
|
||||
|
@ -611,15 +720,15 @@ public class ClusterLockUtils {
|
|||
crit.add(nameCrit);
|
||||
tasks = crit.list();
|
||||
} catch (Throwable e) {
|
||||
// TODO
|
||||
e.printStackTrace();
|
||||
handler.handle(Priority.ERROR,
|
||||
"Error retrieving cluster locks for name: " + name, e);
|
||||
} finally {
|
||||
if (sess != null) {
|
||||
try {
|
||||
sess.close();
|
||||
} catch (HibernateException e) {
|
||||
// TODO
|
||||
e.printStackTrace();
|
||||
handler.handle(Priority.ERROR,
|
||||
"Error closing cluster task getLocks session", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
package com.raytheon.uf.edex.database.dao;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.Serializable;
|
||||
|
@ -68,7 +67,9 @@ import com.raytheon.uf.common.dataquery.db.QueryResult;
|
|||
import com.raytheon.uf.common.dataquery.db.QueryResultRow;
|
||||
import com.raytheon.uf.common.status.IUFStatusHandler;
|
||||
import com.raytheon.uf.common.status.UFStatus;
|
||||
import com.raytheon.uf.common.util.FileUtil;
|
||||
import com.raytheon.uf.edex.database.DataAccessLayerException;
|
||||
import com.raytheon.uf.edex.database.processor.IDatabaseProcessor;
|
||||
import com.raytheon.uf.edex.database.query.DatabaseQuery;
|
||||
|
||||
/**
|
||||
|
@ -94,7 +95,9 @@ import com.raytheon.uf.edex.database.query.DatabaseQuery;
|
|||
* 5/14/08 1076 brockwoo Fix for distinct with multiple properties
|
||||
* Oct 10, 2012 1261 djohnson Incorporate changes to DaoConfig, add generic to {@link IPersistableDataObject}.
|
||||
* Apr 15, 2013 1868 bsteffen Rewrite mergeAll in PluginDao.
|
||||
*
|
||||
* Nov 08, 2013 2361 njensen Changed method signature of saveOrUpdate to take Objects, not PersistableDataObjects
|
||||
* Dec 13, 2013 2555 rjpeter Added processByCriteria and fixed Generics warnings.
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -243,13 +246,13 @@ public class CoreDao extends HibernateDaoSupport {
|
|||
return loadAll(daoClass);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public List<Object> loadAll(final Class<?> entity) {
|
||||
return (List<Object>) txTemplate.execute(new TransactionCallback() {
|
||||
return txTemplate.execute(new TransactionCallback<List<Object>>() {
|
||||
@Override
|
||||
public Object doInTransaction(TransactionStatus status) {
|
||||
@SuppressWarnings("unchecked")
|
||||
public List<Object> doInTransaction(TransactionStatus status) {
|
||||
HibernateTemplate ht = getHibernateTemplate();
|
||||
return ht.loadAll(entity);
|
||||
return (List<Object>) ht.loadAll(entity);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -279,10 +282,10 @@ public class CoreDao extends HibernateDaoSupport {
|
|||
* Null if not found
|
||||
*/
|
||||
public <T> PersistableDataObject<T> queryById(final Serializable id) {
|
||||
@SuppressWarnings("unchecked")
|
||||
PersistableDataObject<T> retVal = (PersistableDataObject<T>) txTemplate
|
||||
.execute(new TransactionCallback() {
|
||||
PersistableDataObject<T> retVal = txTemplate
|
||||
.execute(new TransactionCallback<PersistableDataObject<T>>() {
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public PersistableDataObject<T> doInTransaction(
|
||||
TransactionStatus status) {
|
||||
return (PersistableDataObject<T>) getHibernateTemplate()
|
||||
|
@ -300,10 +303,10 @@ public class CoreDao extends HibernateDaoSupport {
|
|||
* @return The object
|
||||
*/
|
||||
public <T> PersistableDataObject<T> queryById(final PluginDataObject id) {
|
||||
@SuppressWarnings("unchecked")
|
||||
PersistableDataObject<T> retVal = (PersistableDataObject<T>) txTemplate
|
||||
.execute(new TransactionCallback() {
|
||||
PersistableDataObject<T> retVal = txTemplate
|
||||
.execute(new TransactionCallback<PersistableDataObject<T>>() {
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public PersistableDataObject<T> doInTransaction(
|
||||
TransactionStatus status) {
|
||||
DetachedCriteria criteria = DetachedCriteria.forClass(
|
||||
|
@ -334,12 +337,12 @@ public class CoreDao extends HibernateDaoSupport {
|
|||
* Maximum number of results to return
|
||||
* @return A list of similar objects
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public <T> List<PersistableDataObject<T>> queryByExample(
|
||||
final PersistableDataObject<T> obj, final int maxResults) {
|
||||
List<PersistableDataObject<T>> retVal = (List<PersistableDataObject<T>>) txTemplate
|
||||
.execute(new TransactionCallback() {
|
||||
List<PersistableDataObject<T>> retVal = txTemplate
|
||||
.execute(new TransactionCallback<List<PersistableDataObject<T>>>() {
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public List<PersistableDataObject<T>> doInTransaction(
|
||||
TransactionStatus status) {
|
||||
return getHibernateTemplate().findByExample(obj, 0,
|
||||
|
@ -378,8 +381,8 @@ public class CoreDao extends HibernateDaoSupport {
|
|||
int rowsDeleted = 0;
|
||||
try {
|
||||
// Get a session and create a new criteria instance
|
||||
rowsDeleted = (Integer) txTemplate
|
||||
.execute(new TransactionCallback() {
|
||||
rowsDeleted = txTemplate
|
||||
.execute(new TransactionCallback<Integer>() {
|
||||
@Override
|
||||
public Integer doInTransaction(TransactionStatus status) {
|
||||
String queryString = query.createHQLDelete();
|
||||
|
@ -415,8 +418,8 @@ public class CoreDao extends HibernateDaoSupport {
|
|||
List<?> queryResult = null;
|
||||
try {
|
||||
// Get a session and create a new criteria instance
|
||||
queryResult = (List<?>) txTemplate
|
||||
.execute(new TransactionCallback() {
|
||||
queryResult = txTemplate
|
||||
.execute(new TransactionCallback<List<?>>() {
|
||||
@Override
|
||||
public List<?> doInTransaction(TransactionStatus status) {
|
||||
String queryString = query.createHQLQuery();
|
||||
|
@ -445,6 +448,68 @@ public class CoreDao extends HibernateDaoSupport {
|
|||
return queryResult;
|
||||
}
|
||||
|
||||
/**
|
||||
* Queries the database in batches using a DatabaseQuery object and send
|
||||
* each batch to processor.
|
||||
*
|
||||
* @param query
|
||||
* The query object
|
||||
* @param processor
|
||||
* The processor object
|
||||
* @return The number of results processed
|
||||
* @throws DataAccessLayerException
|
||||
* If the query fails
|
||||
*/
|
||||
public int processByCriteria(final DatabaseQuery query,
|
||||
final IDatabaseProcessor processor) throws DataAccessLayerException {
|
||||
int rowsProcessed = 0;
|
||||
try {
|
||||
// Get a session and create a new criteria instance
|
||||
rowsProcessed = txTemplate
|
||||
.execute(new TransactionCallback<Integer>() {
|
||||
@Override
|
||||
public Integer doInTransaction(TransactionStatus status) {
|
||||
String queryString = query.createHQLQuery();
|
||||
Query hibQuery = getSession(false).createQuery(
|
||||
queryString);
|
||||
try {
|
||||
query.populateHQLQuery(hibQuery,
|
||||
getSessionFactory());
|
||||
} catch (DataAccessLayerException e) {
|
||||
throw new org.hibernate.TransactionException(
|
||||
"Error populating query", e);
|
||||
}
|
||||
|
||||
if (processor.getBatchSize() > 0) {
|
||||
hibQuery.setMaxResults(processor.getBatchSize());
|
||||
} else if (query.getMaxResults() != null) {
|
||||
hibQuery.setMaxResults(query.getMaxResults());
|
||||
}
|
||||
|
||||
List<?> results = null;
|
||||
boolean continueProcessing = false;
|
||||
int count = 0;
|
||||
|
||||
do {
|
||||
hibQuery.setFirstResult(count);
|
||||
results = hibQuery.list();
|
||||
continueProcessing = processor.process(results);
|
||||
count += results.size();
|
||||
getSession().clear();
|
||||
} while (continueProcessing && (results != null)
|
||||
&& (results.size() > 0));
|
||||
processor.finish();
|
||||
return count;
|
||||
}
|
||||
});
|
||||
|
||||
} catch (TransactionException e) {
|
||||
throw new DataAccessLayerException("Transaction failed", e);
|
||||
}
|
||||
|
||||
return rowsProcessed;
|
||||
}
|
||||
|
||||
public void deleteAll(final List<?> objs) {
|
||||
txTemplate.execute(new TransactionCallbackWithoutResult() {
|
||||
@Override
|
||||
|
@ -644,8 +709,8 @@ public class CoreDao extends HibernateDaoSupport {
|
|||
*/
|
||||
public QueryResult executeHQLQuery(final String hqlQuery) {
|
||||
|
||||
QueryResult result = (QueryResult) txTemplate
|
||||
.execute(new TransactionCallback() {
|
||||
QueryResult result = txTemplate
|
||||
.execute(new TransactionCallback<QueryResult>() {
|
||||
@Override
|
||||
public QueryResult doInTransaction(TransactionStatus status) {
|
||||
Query hibQuery = getSession(false)
|
||||
|
@ -698,8 +763,8 @@ public class CoreDao extends HibernateDaoSupport {
|
|||
*/
|
||||
public int executeHQLStatement(final String hqlStmt) {
|
||||
|
||||
int queryResult = (Integer) txTemplate
|
||||
.execute(new TransactionCallback() {
|
||||
int queryResult = txTemplate
|
||||
.execute(new TransactionCallback<Integer>() {
|
||||
@Override
|
||||
public Integer doInTransaction(TransactionStatus status) {
|
||||
Query hibQuery = getSession(false).createQuery(hqlStmt);
|
||||
|
@ -723,8 +788,8 @@ public class CoreDao extends HibernateDaoSupport {
|
|||
public Object[] executeSQLQuery(final String sql) {
|
||||
|
||||
long start = System.currentTimeMillis();
|
||||
List<?> queryResult = (List<?>) txTemplate
|
||||
.execute(new TransactionCallback() {
|
||||
List<?> queryResult = txTemplate
|
||||
.execute(new TransactionCallback<List<?>>() {
|
||||
@Override
|
||||
public List<?> doInTransaction(TransactionStatus status) {
|
||||
return getSession(false).createSQLQuery(sql).list();
|
||||
|
@ -738,8 +803,8 @@ public class CoreDao extends HibernateDaoSupport {
|
|||
public List<?> executeCriteriaQuery(final List<Criterion> criterion) {
|
||||
|
||||
long start = System.currentTimeMillis();
|
||||
List<?> queryResult = (List<?>) txTemplate
|
||||
.execute(new TransactionCallback() {
|
||||
List<?> queryResult = txTemplate
|
||||
.execute(new TransactionCallback<List<?>>() {
|
||||
@Override
|
||||
public List<?> doInTransaction(TransactionStatus status) {
|
||||
|
||||
|
@ -773,8 +838,8 @@ public class CoreDao extends HibernateDaoSupport {
|
|||
public int executeSQLUpdate(final String sql) {
|
||||
|
||||
long start = System.currentTimeMillis();
|
||||
int updateResult = (Integer) txTemplate
|
||||
.execute(new TransactionCallback() {
|
||||
int updateResult = txTemplate
|
||||
.execute(new TransactionCallback<Integer>() {
|
||||
@Override
|
||||
public Integer doInTransaction(TransactionStatus status) {
|
||||
return getSession(false).createSQLQuery(sql)
|
||||
|
@ -1007,27 +1072,16 @@ public class CoreDao extends HibernateDaoSupport {
|
|||
* If reading the file fails
|
||||
*/
|
||||
public void runScript(File script) throws DataAccessLayerException {
|
||||
FileInputStream fileIn;
|
||||
byte[] bytes = null;
|
||||
try {
|
||||
fileIn = new FileInputStream(script);
|
||||
bytes = FileUtil.file2bytes(script);
|
||||
} catch (FileNotFoundException e) {
|
||||
throw new DataAccessLayerException(
|
||||
"Unable to open input stream to sql script: " + script);
|
||||
}
|
||||
byte[] bytes = null;
|
||||
try {
|
||||
bytes = new byte[fileIn.available()];
|
||||
fileIn.read(bytes);
|
||||
} catch (IOException e) {
|
||||
throw new DataAccessLayerException(
|
||||
"Unable to read script contents for script: " + script);
|
||||
}
|
||||
try {
|
||||
fileIn.close();
|
||||
} catch (IOException e) {
|
||||
throw new DataAccessLayerException(
|
||||
"Error closing file input stream to: " + script);
|
||||
}
|
||||
runScript(new StringBuffer().append(new String(bytes)));
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
package com.raytheon.uf.edex.database.plugin;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Calendar;
|
||||
|
@ -52,7 +51,6 @@ import org.springframework.transaction.support.TransactionCallbackWithoutResult;
|
|||
import com.raytheon.uf.common.dataplugin.PluginDataObject;
|
||||
import com.raytheon.uf.common.dataplugin.PluginException;
|
||||
import com.raytheon.uf.common.dataplugin.annotations.DataURIUtil;
|
||||
import com.raytheon.uf.common.dataplugin.persist.DefaultPathProvider;
|
||||
import com.raytheon.uf.common.dataplugin.persist.IHDFFilePathProvider;
|
||||
import com.raytheon.uf.common.dataplugin.persist.IPersistable;
|
||||
import com.raytheon.uf.common.dataplugin.persist.PersistableDataObject;
|
||||
|
@ -74,11 +72,11 @@ import com.raytheon.uf.common.serialization.SerializationException;
|
|||
import com.raytheon.uf.common.serialization.SerializationUtil;
|
||||
import com.raytheon.uf.common.status.UFStatus.Priority;
|
||||
import com.raytheon.uf.common.time.util.TimeUtil;
|
||||
import com.raytheon.uf.common.util.FileUtil;
|
||||
import com.raytheon.uf.edex.core.EdexException;
|
||||
import com.raytheon.uf.edex.database.DataAccessLayerException;
|
||||
import com.raytheon.uf.edex.database.dao.CoreDao;
|
||||
import com.raytheon.uf.edex.database.dao.DaoConfig;
|
||||
import com.raytheon.uf.edex.database.processor.IDatabaseProcessor;
|
||||
import com.raytheon.uf.edex.database.purge.PurgeLogger;
|
||||
import com.raytheon.uf.edex.database.purge.PurgeRule;
|
||||
import com.raytheon.uf.edex.database.purge.PurgeRuleSet;
|
||||
|
@ -118,6 +116,7 @@ import com.raytheon.uf.edex.database.query.DatabaseQuery;
|
|||
* Aug 30, 2013 2298 rjpeter Make getPluginName abstract
|
||||
* Sept23, 2013 2399 dhladky Changed logging of duplicate records.
|
||||
* Oct 07, 2013 2392 rjpeter Updated to pass null productKeys as actual null instead of string null.
|
||||
* Dec 13, 2013 2555 rjpeter Refactored archiving logic into processArchiveRecords.
|
||||
* </pre>
|
||||
*
|
||||
* @author bphillip
|
||||
|
@ -467,7 +466,7 @@ public abstract class PluginDao extends CoreDao {
|
|||
|
||||
for (IPersistable persistable : persistables) {
|
||||
try {
|
||||
if (((PersistableDataObject) persistable)
|
||||
if (((PersistableDataObject<?>) persistable)
|
||||
.isOverwriteAllowed()) {
|
||||
if (replaceDataStore == null) {
|
||||
replaceDataStore = DataStoreFactory
|
||||
|
@ -1763,105 +1762,18 @@ public abstract class PluginDao extends CoreDao {
|
|||
return null;
|
||||
}
|
||||
|
||||
public void archiveData(String archivePath, Calendar insertStartTime,
|
||||
Calendar insertEndTime) throws DataAccessLayerException,
|
||||
SerializationException, IOException {
|
||||
List<PersistableDataObject> pdos = getRecordsToArchive(insertStartTime,
|
||||
insertEndTime);
|
||||
if ((pdos != null) && (pdos.size() > 0)) {
|
||||
// map of file to list of pdo
|
||||
Map<String, List<PersistableDataObject>> pdoMap = new HashMap<String, List<PersistableDataObject>>();
|
||||
if (pdos.get(0) instanceof IPersistable) {
|
||||
IHDFFilePathProvider pathProvider = this.pathProvider;
|
||||
|
||||
for (PersistableDataObject pdo : pdos) {
|
||||
IPersistable persistable = (IPersistable) pdo;
|
||||
String path = pathProvider.getHDFPath(pluginName,
|
||||
persistable)
|
||||
+ File.separator
|
||||
+ pathProvider.getHDFFileName(pluginName,
|
||||
persistable);
|
||||
List<PersistableDataObject> list = pdoMap.get(path);
|
||||
if (list == null) {
|
||||
list = new ArrayList<PersistableDataObject>(pdos.size());
|
||||
pdoMap.put(path, list);
|
||||
}
|
||||
list.add(pdo);
|
||||
}
|
||||
} else {
|
||||
// order files by refTime hours
|
||||
for (PersistableDataObject pdo : pdos) {
|
||||
String timeString = null;
|
||||
if (pdo instanceof PluginDataObject) {
|
||||
PluginDataObject pluginDataObj = (PluginDataObject) pdo;
|
||||
Date time = pluginDataObj.getDataTime()
|
||||
.getRefTimeAsCalendar().getTime();
|
||||
timeString = DefaultPathProvider.fileNameFormat.get()
|
||||
.format(time);
|
||||
} else {
|
||||
// no refTime to use bounded insert query bounds
|
||||
Date time = insertStartTime.getTime();
|
||||
timeString = DefaultPathProvider.fileNameFormat.get()
|
||||
.format(time);
|
||||
}
|
||||
|
||||
String path = pluginName + timeString;
|
||||
List<PersistableDataObject> list = pdoMap.get(path);
|
||||
if (list == null) {
|
||||
list = new ArrayList<PersistableDataObject>(pdos.size());
|
||||
pdoMap.put(path, list);
|
||||
}
|
||||
list.add(pdo);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
for (Map.Entry<String, List<PersistableDataObject>> entry : pdoMap
|
||||
.entrySet()) {
|
||||
String path = archivePath + File.separator + pluginName
|
||||
+ File.separator + entry.getKey();
|
||||
|
||||
// remove .h5
|
||||
int index = path.lastIndexOf('.');
|
||||
if ((index > 0) && ((path.length() - index) < 5)) {
|
||||
// ensure its end of string in case extension is
|
||||
// dropped/changed
|
||||
path = path.substring(0, index);
|
||||
}
|
||||
|
||||
path += ".bin.gz";
|
||||
|
||||
File file = new File(path);
|
||||
|
||||
if (file.exists()) {
|
||||
// pull the
|
||||
}
|
||||
|
||||
// Thrift serialize pdo list
|
||||
byte[] data = SerializationUtil.transformToThrift(entry
|
||||
.getValue());
|
||||
|
||||
SerializationUtil.transformFromThrift(data);
|
||||
|
||||
// save list to disk (in gz format?)
|
||||
FileUtil.bytes2File(data, file, true);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public List<PersistableDataObject> getRecordsToArchive(
|
||||
Calendar insertStartTime, Calendar insertEndTime)
|
||||
public int processArchiveRecords(Calendar insertStartTime,
|
||||
Calendar insertEndTime, IDatabaseProcessor processor)
|
||||
throws DataAccessLayerException {
|
||||
DatabaseQuery dbQuery = new DatabaseQuery(this.getDaoClass());
|
||||
dbQuery.addQueryParam("insertTime", insertStartTime,
|
||||
QueryOperand.GREATERTHANEQUALS);
|
||||
dbQuery.addQueryParam("insertTime", insertEndTime,
|
||||
QueryOperand.LESSTHAN);
|
||||
dbQuery.addOrder("insertTime", true);
|
||||
dbQuery.addOrder("dataTime.refTime", true);
|
||||
|
||||
return (List<PersistableDataObject>) this.queryByCriteria(dbQuery);
|
||||
return this.processByCriteria(dbQuery, processor);
|
||||
}
|
||||
|
||||
protected static class DuplicateCheckStat {
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
/**
|
||||
* This software was developed and / or modified by Raytheon Company,
|
||||
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
|
||||
*
|
||||
* U.S. EXPORT CONTROLLED TECHNICAL DATA
|
||||
* This software product contains export-restricted data whose
|
||||
* export/transfer/disclosure is restricted by U.S. law. Dissemination
|
||||
* to non-U.S. persons whether in the United States or abroad requires
|
||||
* an export license or other authorization.
|
||||
*
|
||||
* Contractor Name: Raytheon Company
|
||||
* Contractor Address: 6825 Pine Street, Suite 340
|
||||
* Mail Stop B8
|
||||
* Omaha, NE 68106
|
||||
* 402.291.0100
|
||||
*
|
||||
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
|
||||
* further licensing information.
|
||||
**/
|
||||
package com.raytheon.uf.edex.database.processor;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Interface for working with a batched set of results inside a database
|
||||
* session. Process can be called multiple times based on the batchSize of the
|
||||
* processor.
|
||||
*
|
||||
* <pre>
|
||||
*
|
||||
* SOFTWARE HISTORY
|
||||
*
|
||||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* Dec 9, 2013 2555 rjpeter Initial creation
|
||||
* </pre>
|
||||
*
|
||||
* @author rjpeter
|
||||
* @version 1.0
|
||||
*/
|
||||
|
||||
public interface IDatabaseProcessor {
|
||||
/**
|
||||
* Perform any processing on this batch of objects.
|
||||
*
|
||||
* @param objects
|
||||
* @return True if should continue processing, false otherwise.
|
||||
*/
|
||||
public boolean process(List<?> objects);
|
||||
|
||||
/**
|
||||
* Perform any post processing if necessary.
|
||||
*/
|
||||
public void finish();
|
||||
|
||||
/**
|
||||
* Get the batch size of the query.
|
||||
*
|
||||
* @return
|
||||
*/
|
||||
public int getBatchSize();
|
||||
|
||||
/**
|
||||
* Set the batch size of the query.
|
||||
*
|
||||
* @param batchSize
|
||||
*/
|
||||
public void setBatchSize(int batchSize);
|
||||
}
|
|
@ -35,6 +35,7 @@
|
|||
# 08/17/12 DR 15304 D. Friedman Use unique output file names
|
||||
# 10/12/12 DR 15418 D. Friedman Use unique attachment file names
|
||||
# 11/20/13 DR 16777 D. Friedman Add a test mode.
|
||||
# 12/05/16 DR 16842 D. Friedman Do not set product ID on MhsMessage
|
||||
#
|
||||
#
|
||||
|
||||
|
@ -412,7 +413,6 @@ def sendWANMsg(productId, prodPathName, receivingSite, handling,
|
|||
if attachedFilename:
|
||||
mhsMsg.addEnclosure(attachedFilename)
|
||||
|
||||
mhsMsg.setProductId(productId)
|
||||
#mhsMsg.setBodyFile(prodPathName)
|
||||
mhsMsg.addEnclosure(prodPathName)
|
||||
if priority == 0:
|
||||
|
|
|
@ -18,11 +18,6 @@
|
|||
<constructor-arg value="jms-dist:queue:Ingest.dpa"/>
|
||||
</bean>
|
||||
|
||||
<bean factory-bean="manualProc"
|
||||
factory-method="registerSecondaryPlugin">
|
||||
<constructor-arg value="dpa" />
|
||||
</bean>
|
||||
|
||||
<camelContext id="dpa-camel"
|
||||
xmlns="http://camel.apache.org/schema/spring"
|
||||
errorHandlerRef="errorHandler">
|
||||
|
|
|
@ -18,11 +18,6 @@
|
|||
<constructor-arg value="jms-dist:queue:Ingest.dhr"/>
|
||||
</bean>
|
||||
|
||||
<bean factory-bean="manualProc"
|
||||
factory-method="registerSecondaryPlugin">
|
||||
<constructor-arg value="dhr" />
|
||||
</bean>
|
||||
|
||||
<camelContext id="nonClusteredDHRroutes" xmlns="http://camel.apache.org/schema/spring"
|
||||
errorHandlerRef="errorHandler">
|
||||
<!-- Begin non-clustered dhr Routes -->
|
||||
|
|
|
@ -38,9 +38,18 @@
|
|||
<bean id="damTxManager"
|
||||
class="org.springframework.orm.hibernate3.HibernateTransactionManager">
|
||||
<property name="sessionFactory" ref="damSessionFactory" />
|
||||
</bean>
|
||||
|
||||
</bean>
|
||||
|
||||
<bean id="mpeFieldGenService" class="com.raytheon.uf.edex.ohd.pproc.MpeFieldGenSrv" />
|
||||
|
||||
|
||||
|
||||
<bean factory-bean="manualProc"
|
||||
factory-method="registerSecondaryPlugin">
|
||||
<constructor-arg value="dpa" />
|
||||
</bean>
|
||||
|
||||
<bean factory-bean="manualProc"
|
||||
factory-method="registerSecondaryPlugin">
|
||||
<constructor-arg value="dhr" />
|
||||
</bean>
|
||||
|
||||
</beans>
|
|
@ -19,5 +19,6 @@ export CLASSPATH=$DB_DRIVER_PATH
|
|||
CLASSPATH=$CLASSPATH:$WHFS_BIN_DIR/fcstservice.jar
|
||||
|
||||
#Execute Lhvm
|
||||
unset GNOME_DESKTOP_SESSION_ID
|
||||
xterm -T "fcstservice" -iconic \
|
||||
-e $SYS_JAVA_DIR/bin/java ohd.hseb.fcstservice.LhvmApplicationWindow $JDBCURL &
|
||||
|
|
|
@ -20,4 +20,9 @@
|
|||
<constructor-arg ref="awwPluginName" />
|
||||
<constructor-arg ref="awwProperties" />
|
||||
</bean>
|
||||
|
||||
<bean factory-bean="manualProc"
|
||||
factory-method="registerSecondaryPlugin">
|
||||
<constructor-arg ref="awwPluginName" />
|
||||
</bean>
|
||||
</beans>
|
|
@ -18,6 +18,8 @@
|
|||
<constructor-arg ref="nctextProperties"/>
|
||||
</bean>
|
||||
|
||||
|
||||
|
||||
<bean factory-bean="manualProc"
|
||||
factory-method="registerSecondaryPlugin">
|
||||
<constructor-arg ref="nctextPluginName" />
|
||||
</bean>
|
||||
</beans>
|
|
@ -16,11 +16,6 @@
|
|||
<constructor-arg value="jms-dist:queue:ingest.nctext" />
|
||||
</bean>
|
||||
|
||||
<bean factory-bean="manualProc"
|
||||
factory-method="registerSecondaryPlugin">
|
||||
<constructor-arg value="nctext" />
|
||||
</bean>
|
||||
|
||||
<bean id="nctextCamelRegistered" factory-bean="contextManager"
|
||||
factory-method="register" depends-on="persistCamelRegistered">
|
||||
<constructor-arg ref="nctext-camel"/>
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -412,13 +412,13 @@ if [ "${1}" = "-viz" ]; then
|
|||
#buildRPM "awips2-common-base"
|
||||
#buildRPM "awips2-python-dynamicserialize"
|
||||
#buildRPM "awips2-python"
|
||||
#buildRPM "awips2-adapt-native"
|
||||
buildRPM "awips2-adapt-native"
|
||||
#unpackHttpdPypies
|
||||
#if [ $? -ne 0 ]; then
|
||||
# exit 1
|
||||
#fi
|
||||
#buildRPM "awips2-httpd-pypies"
|
||||
#buildRPM "awips2-hydroapps-shared"
|
||||
buildRPM "awips2-hydroapps-shared"
|
||||
#buildRPM "awips2-rcm"
|
||||
#buildRPM "awips2-tools"
|
||||
#buildRPM "awips2-cli"
|
||||
|
|
Loading…
Add table
Reference in a new issue