Merge branch 'master_14.2.1' into master_14.2.2

Merge (14.2.1-21 into 14.2.2-5)


Former-commit-id: 9a017e543fc6be9fd0d7b9361bd2226bf2af6211
This commit is contained in:
Brian.Dyke 2014-04-29 12:07:21 -04:00
commit 35b637c3e2
42 changed files with 528 additions and 737 deletions

View file

@ -31,6 +31,7 @@ package com.raytheon.uf.viz.datadelivery.common.ui;
* Jun 06, 2012 lvenable Initial creation
* Apr 10, 2013 1891 djohnson Declare variable as List.
* Feb 07, 2014 2453 mpduff Added getSize().
* Apr 18, 2014 3012 dhladky Null check.
*
* </pre>
*
@ -134,7 +135,11 @@ public class TableDataManager<T extends ITableData<T>> implements ISortTable {
if (index >= 0 && index < tableData.size()) {
return tableData.get(index);
} else {
return tableData.get(0);
if (!tableData.isEmpty()) {
return tableData.get(0);
} else {
return null;
}
}
}

View file

@ -85,6 +85,7 @@ import com.raytheon.uf.viz.datadelivery.utils.NotificationHandler;
* Oct 15, 2013 2451 skorolev Get highlighted rows after message update.
* Nov 01, 2013 2431 skorolev Changed labels on the table.
* Feb 07, 2014 2453 mpduff Refactored.
* Apr 18, 2014 3012 dhladky Null check.
* </pre>
*
* @author lvenable
@ -941,6 +942,9 @@ public class NotificationTableComp extends TableComp implements ITableFind {
for (int index : indices) {
NotificationRowData rowData = filteredTableList
.getDataRow(index);
if (rowData == null) {
continue;
}
selectedRowIds.add(rowData.getId());
}
}

View file

@ -155,6 +155,7 @@ import com.raytheon.viz.ui.presenter.IDisplay;
* Feb 04, 2014 2722 mpduff Add auto-refresh task.
* Feb 14, 2014 2806 mpduff Disable activate/deactivate buttons when viewing other site's subscriptions
* Feb 11, 2014 2771 bgonzale Use Data Delivery ID instead of Site.
* Apr 18, 2014 3012 dhladky Null check.
*
* </pre>
*
@ -997,18 +998,20 @@ public class SubscriptionManagerDlg extends CaveSWTDialog implements
SubscriptionManagerRowData removedItem = tableComp
.getSubscriptionData().getDataRow(idx);
Subscription sub = removedItem.getSubscription();
if (sub instanceof SharedSubscription) {
sub.getOfficeIDs().remove(CURRENT_SITE);
if (sub.getOfficeIDs().size() > 0) {
subsToUpdate.add(sub);
if (sub != null) {
if (sub instanceof SharedSubscription) {
sub.getOfficeIDs().remove(CURRENT_SITE);
if (sub.getOfficeIDs().size() > 0) {
subsToUpdate.add(sub);
} else {
subsToDelete.add(sub);
}
} else {
subsToDelete.add(sub);
subsToDelete.add(removedItem.getSubscription());
}
} else {
subsToDelete.add(removedItem.getSubscription());
}
deleteList.add(removedItem);
deleteList.add(removedItem);
}
}
String message = getMessage(subsToDelete, subsToUpdate);
@ -1182,7 +1185,9 @@ public class SubscriptionManagerDlg extends CaveSWTDialog implements
int idx = selectionIndices[i];
SubscriptionManagerRowData rowData = tableComp
.getSubscriptionData().getDataRow(idx);
if (rowData == null) {
continue;
}
Subscription sub = rowData.getSubscription();
if (activate) {
sub.activate();

View file

@ -112,6 +112,7 @@ import com.raytheon.uf.viz.datadelivery.utils.DataDeliveryUtils.TABLE_TYPE;
* Jan 08, 2014 2642 mpduff Enable/disable menus based on site, allow user to add their site to a shared sub.
* Feb 04, 2014 2722 mpduff Add last update time.
* Feb 11, 2014 2771 bgonzale Use Data Delivery ID instead of Site.
* Apr 18, 2014 3012 dhladky Null check.
* @version 1.0
*/
@ -323,6 +324,9 @@ public class SubscriptionTableComp extends TableComp implements IGroupAction {
SubscriptionManagerRowData rowData = subManagerData
.getDataRow(selectionIndices[i]);
if (rowData == null) {
continue;
}
// get the subscription details to be displayed to the user
printDetails.append(DataDeliveryUtils.formatDetails(rowData
.getSubscription()));

View file

@ -74,6 +74,7 @@ import com.raytheon.uf.viz.datadelivery.utils.DataDeliveryUtils.TABLE_TYPE;
* denied pending messages.
* Apr 05, 2013 1841 djohnson Refresh entire table on receiving a notification of the correct type.
* Apr 10, 2013 1891 djohnson Move logic to get column display text to the column definition, fix sorting.
* Apr 18, 2014 3012 dhladky Null check.
* </pre>
*
* @author lvenable
@ -270,6 +271,9 @@ public class SubApprovalTableComp extends TableComp {
SubscriptionApprovalRowData rowData = pendingSubData.getDataRow(table
.getSelectionIndex());
if (rowData == null) {
return;
}
// Get the subscription object
InitialPendingSubscription pendingSub = rowData.getSubscription();
diffDetails.append("Subscription Name: ")

View file

@ -95,6 +95,7 @@ import com.raytheon.viz.ui.presenter.IDisplay;
* Jul 26, 2013 2232 mpduff Refactored Data Delivery permissions.
* Sep 03, 2013 2315 mpduff Add subscription name to denied approval message.
* Oct 23, 2013 2292 mpduff Move subscription overlap checks to edex.
* Apr 18, 2014 3012 dhladky Null check.
*
* </pre>
*
@ -367,13 +368,15 @@ public class SubscriptionApprovalDlg extends CaveSWTDialog implements
for (int idx : tableComp.getTable().getSelectionIndices()) {
SubscriptionApprovalRowData approvedItem = pendingSubData
.getDataRow(idx);
if (site) {
approveList.add(approvedItem);
} else {
if (approvedItem.isOwner(user)) {
if (approvedItem != null) {
if (site) {
approveList.add(approvedItem);
} else {
notApprovedSubList.add(approvedItem.getSubName());
if (approvedItem.isOwner(user)) {
approveList.add(approvedItem);
} else {
notApprovedSubList.add(approvedItem.getSubName());
}
}
}
}
@ -460,12 +463,13 @@ public class SubscriptionApprovalDlg extends CaveSWTDialog implements
for (int idx : tableComp.getTable().getSelectionIndices()) {
SubscriptionApprovalRowData removedItem = pendingSubData
.getDataRow(idx);
if (site) {
deleteList.add(removedItem);
} else {
if (removedItem.isOwner(user)) {
if (removedItem != null) {
if (site) {
deleteList.add(removedItem);
} else {
if (removedItem.isOwner(user)) {
deleteList.add(removedItem);
}
}
}
}

View file

@ -25,6 +25,7 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintWriter;
import java.text.DateFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
@ -34,6 +35,7 @@ import java.util.Scanner;
import com.raytheon.edex.plugin.gfe.config.IFPServerConfig;
import com.raytheon.edex.plugin.gfe.config.IFPServerConfigManager;
import com.raytheon.edex.plugin.gfe.exception.GfeConfigurationException;
import com.raytheon.edex.plugin.gfe.server.IFPServer;
import com.raytheon.uf.common.dataplugin.gfe.GridDataHistory;
import com.raytheon.uf.common.dataplugin.gfe.GridDataHistory.OriginType;
import com.raytheon.uf.common.dataplugin.gfe.RemapGrid;
@ -46,6 +48,7 @@ import com.raytheon.uf.common.dataplugin.gfe.db.objects.GridParmInfo;
import com.raytheon.uf.common.dataplugin.gfe.db.objects.ParmID;
import com.raytheon.uf.common.dataplugin.gfe.db.objects.TimeConstraints;
import com.raytheon.uf.common.dataplugin.gfe.discrete.DiscreteKey;
import com.raytheon.uf.common.dataplugin.gfe.exception.GfeException;
import com.raytheon.uf.common.dataplugin.gfe.grid.Grid2DByte;
import com.raytheon.uf.common.dataplugin.gfe.grid.Grid2DFloat;
import com.raytheon.uf.common.dataplugin.gfe.slice.DiscreteGridSlice;
@ -72,6 +75,7 @@ import com.vividsolutions.jts.geom.Coordinate;
* Apr 13, 2011 #8393 dgilling Initial creation
* 02/19/13 #1637 randerso Added exception handling for Discrete and Weather
* 10/31/2013 #2508 randerso Change to use DiscreteGridSlice.getKeys()
* 04/22/2014 #3050 randerso Allow exceptions to propagate to caller from readASCIIGridData
*
* </pre>
*
@ -353,12 +357,14 @@ public class ASCIIGrid {
}
public String readASCIIGridData(File aGridData)
throws FileNotFoundException {
throws FileNotFoundException, GfeException, ParseException {
List<IGridSlice> gridSlices = new ArrayList<IGridSlice>();
Scanner inputStream = new Scanner(aGridData, "US-ASCII");
while (true) {
try {
Scanner inputStream = null;
try {
inputStream = new Scanner(aGridData, "US-ASCII");
while (true) {
// read the ASCIIGRID keyword
// if we have an ASCIIGRID to read
if (!inputStream.next().equals("ASCIIGRID")) {
@ -421,8 +427,12 @@ public class ASCIIGrid {
float yExtent = inputStream.nextFloat();
// make the GridLocation
IFPServerConfig config = IFPServerConfigManager
.getServerConfig(dbSiteId);
IFPServer ifpServer = IFPServer.getActiveServer(dbSiteId);
if (ifpServer == null) {
throw new GfeException("No active IFPServer for site: "
+ dbSiteId);
}
IFPServerConfig config = ifpServer.getConfig();
GridLocation baseGLoc = config.dbDomain();
ProjectionData projData = config.getProjectionData(projId);
GridLocation gLocation = new GridLocation(dbSiteId, projData,
@ -600,14 +610,12 @@ public class ASCIIGrid {
if (!inputStream.hasNext()) {
break;
}
} catch (Exception e) {
statusHandler.handle(Priority.PROBLEM,
"Caught exception in readASCIIGridData()", e);
break;
}
} finally {
if (inputStream != null) {
inputStream.close();
}
}
inputStream.close();
this.gridSlices = gridSlices;
return "";

View file

@ -61,6 +61,9 @@ import com.raytheon.uf.common.status.UFStatus;
* Apr 21, 2011 dgilling Initial creation
* Apr 23, 2013 1949 rjpeter Removed extra lock table look up
* Jun 13, 2013 #2044 randerso Refactored to use IFPServer
* Apr 21, 2014 #3050 randerso Get the IFPServer instance based on the
* site in the ParmID
*
* </pre>
*
* @author dgilling
@ -83,9 +86,6 @@ public class SaveASCIIGridsHandler extends BaseGfeRequestHandler implements
@Override
public ServerResponse<String> handleRequest(SaveASCIIGridsRequest request)
throws Exception {
IFPServer ifpServer = getIfpServer(request);
GridParmManager gridParmMgr = ifpServer.getGridParmMgr();
LockManager lockMgr = ifpServer.getLockMgr();
ServerResponse<String> sr = new ServerResponse<String>();
@ -97,10 +97,23 @@ public class SaveASCIIGridsHandler extends BaseGfeRequestHandler implements
sr.addMessage(msg);
}
String prevSiteID = null;
int ngrids = agrid.getGridSlices().size();
for (int i = 0; i < ngrids; i++) {
ParmID pid = agrid.getGridSlices().get(i).getGridInfo().getParmID();
// get the server for this site
String siteID = pid.getDbId().getSiteId();
IFPServer ifpServer = IFPServer.getActiveServer(siteID);
if (ifpServer == null && !siteID.equals(prevSiteID)) {
sr.addMessage("No active IFPServer for site: " + siteID);
continue;
}
prevSiteID = siteID;
GridParmManager gridParmMgr = ifpServer.getGridParmMgr();
LockManager lockMgr = ifpServer.getLockMgr();
// get a list of available databases, see if the grid is part of an
// existing database.
ServerResponse<List<DatabaseID>> srDbInv = gridParmMgr

View file

@ -19,6 +19,7 @@ import com.raytheon.uf.common.serialization.annotations.DynamicSerializeElement;
* 12 Sept, 2012 1038 dhladky Initial creation
* 1 May 2013 1959 dhladky remove backup registry references
* 23 Oct, 2013 2361 njensen Remove ISerializableObject
* 15 Apr, 2014 3012 dhladky Added retention time for this provider in registry.
*
* </pre>
*
@ -37,6 +38,11 @@ public class HarvesterConfig {
@XmlElement(name = "agent")
@DynamicSerializeElement
private Agent agent;
/** default of 7 days **/
@XmlElement(name = "retention")
@DynamicSerializeElement
private String retention = "7";
public HarvesterConfig() {
@ -58,4 +64,12 @@ public class HarvesterConfig {
this.provider = provider;
}
public String getRetention() {
return retention;
}
public void setRetention(String retention) {
this.retention = retention;
}
}

View file

@ -19,6 +19,8 @@
<description>MADIS Test LatLon Coverage</description>
</projection>
</provider>
<!-- MADIS doesn't purge Data Set Meta Data -->
<retention>-1</retention>
<agent xsi:type="ogcAgent" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<dateFormat>HHddMMMyyyy</dateFormat>
<layer name="madis-conus" namespace="http://madis.edex.uf.raytheon.com">

View file

@ -28,6 +28,8 @@
<url>http://nomads.ncep.noaa.gov:9090/dods/</url>
</connection>
</provider>
<!-- default one week of DataSetMetaData retention -->
<retention>7</retention>
<agent xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="crawlAgent">
<crawlDir>/awips2/crawl</crawlDir>

View file

@ -15,6 +15,8 @@
<description>METAR Test LatLon Coverage</description>
</projection>
</provider>
<!-- retention time in days -->
<retention>1</retention>
<agent xsi:type="ogcAgent" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<dateFormat>HHddMMMyyyy</dateFormat>
<layer name="metar">

View file

@ -43,7 +43,8 @@ import com.raytheon.uf.common.time.util.ImmutableDate;
* Nov 19, 2012 1166 djohnson Clean up JAXB representation of registry objects.
* Sept, 30 2013 1797 dhladky Made generic based on Time
* Dec 20, 2013 2636 mpduff Add a dataset availability offset
* jan 23, 2013 2584 dhladky Versions.
* jan 23, 2013 2584 dhladky Versions.
* Apr 14, 2013 3012 dhladky Removed unused methods.
* </pre>
*
* @author dhladky
@ -229,13 +230,4 @@ public abstract class DataSetMetaData<T extends Time> {
return url;
}
/**
* Accepts a {@link IDataSetMetaDataVisitor} which can perform arbitrary
* processing on this {@link DataSetMetaData} instance. Should be defined by
* each concrete class instance.
*
* @param visitor
* the visitor
*/
public abstract void accept(IDataSetMetaDataVisitor visitor);
}

View file

@ -1,50 +0,0 @@
/**
* This software was developed and / or modified by Raytheon Company,
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
*
* U.S. EXPORT CONTROLLED TECHNICAL DATA
* This software product contains export-restricted data whose
* export/transfer/disclosure is restricted by U.S. law. Dissemination
* to non-U.S. persons whether in the United States or abroad requires
* an export license or other authorization.
*
* Contractor Name: Raytheon Company
* Contractor Address: 6825 Pine Street, Suite 340
* Mail Stop B8
* Omaha, NE 68106
* 402.291.0100
*
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
* further licensing information.
**/
package com.raytheon.uf.common.datadelivery.registry;
/**
* Defines a type that can visit {@link DataSetMetaData} instances and perform
* some activity.
*
* <pre>
*
* SOFTWARE HISTORY
*
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Sep 4, 2012 1102 djohnson Initial creation
*
* </pre>
*
* @author djohnson
* @version 1.0
*/
public interface IDataSetMetaDataVisitor {
/**
* Visits an {@link OpenDapGriddedDataSetMetaData} instance.
*
* @param metaData
* the metaData
*/
void visit(OpenDapGriddedDataSetMetaData metaData);
}

View file

@ -36,7 +36,8 @@ import com.raytheon.uf.common.serialization.annotations.DynamicSerialize;
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Sep 4, 2012 1102 djohnson Initial creation
* jan 23, 2013 2584 dhladky Versions
* jan 23, 2014 2584 dhladky Versions
* Apr 14, 2014 3012 dhladky Unneeded method removed.
*
* </pre>
*
@ -49,12 +50,4 @@ import com.raytheon.uf.common.serialization.annotations.DynamicSerialize;
@DynamicSerialize
public class OpenDapGriddedDataSetMetaData extends GriddedDataSetMetaData {
/**
* {@inheritDoc}
*/
@Override
public void accept(IDataSetMetaDataVisitor visitor) {
visitor.visit(this);
}
}

View file

@ -34,6 +34,7 @@ import com.raytheon.uf.common.serialization.annotations.DynamicSerialize;
* ------------ ---------- ----------- --------------------------
* Aug 20, 2012 754 dhladky Initial creation
* Sept 30, 2013 1797 dhladky Generics
* Apr 14, 2014 3012 dhladky Unneeded method removed.
*
* </pre>
*
@ -50,10 +51,5 @@ public class PointDataSetMetaData extends DataSetMetaData<PointTime> {
public PointDataSetMetaData() {
}
@Override
public void accept(IDataSetMetaDataVisitor visitor) {
// TODO: not sure what this does?
}
}

View file

@ -20,7 +20,6 @@ package com.raytheon.uf.common.datadelivery.retrieval.xml;
* further licensing information.
**/
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
@ -28,6 +27,7 @@ import javax.xml.bind.annotation.XmlRootElement;
import com.raytheon.uf.common.serialization.annotations.DynamicSerialize;
import com.raytheon.uf.common.serialization.annotations.DynamicSerializeElement;
import com.raytheon.uf.common.time.util.TimeUtil;
/**
* Data Set Info XML Object.
@ -39,6 +39,7 @@ import com.raytheon.uf.common.serialization.annotations.DynamicSerializeElement;
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Jan 14, 2014 dhladky Initial creation.
* Apr 09, 2014 #3012 dhladky Fixed incorrect default calc.
*
* </pre>
*
@ -54,25 +55,26 @@ public class DataSetInformation {
@XmlElement(name = "modelName", type = String.class)
@DynamicSerializeElement
protected String modelName;
@XmlElement(name = "multiplier", type = Double.class)
@DynamicSerializeElement
protected Double multiplier;
@XmlElement(name = "modelRunIncrement", type = Integer.class)
@DynamicSerializeElement
protected Integer modelRunIncrement;
@XmlElement(name = "defaultOffset", type = Integer.class)
@DynamicSerializeElement
protected Integer defaultOffset;
public DataSetInformation() {
}
public DataSetInformation(String modelName, Double multiplier, int modelRunIncrement, int defaultOffset) {
public DataSetInformation(String modelName, Double multiplier,
int modelRunIncrement, int defaultOffset) {
this.modelName = modelName;
this.multiplier = multiplier;
this.modelRunIncrement = modelRunIncrement;
@ -111,8 +113,12 @@ public class DataSetInformation {
this.defaultOffset = defaultOffset;
}
/**
* The range int is in minutes, so we need multiplier * model running increment * minutes per hour
* @return
*/
public int getRange() {
return (int) (getMultiplier() * getModelRunIncrement());
return (int) ((getMultiplier() * getModelRunIncrement()) * TimeUtil.MINUTES_PER_HOUR);
}
}

View file

@ -21,6 +21,7 @@
package com.raytheon.uf.edex.database.dao;
import java.io.Serializable;
import java.util.Calendar;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
@ -41,6 +42,7 @@ import org.springframework.transaction.annotation.Transactional;
import com.raytheon.uf.common.dataplugin.persist.IPersistableDataObject;
import com.raytheon.uf.common.status.IUFStatusHandler;
import com.raytheon.uf.common.status.UFStatus;
import com.raytheon.uf.common.status.UFStatus.Priority;
import com.raytheon.uf.edex.database.DataAccessLayerException;
/**
@ -65,6 +67,7 @@ import com.raytheon.uf.edex.database.DataAccessLayerException;
* 12/9/2013 2613 bphillip Added flushAndClearSession method
* Jan 17, 2014 2459 mpduff Added null check to prevent NPE.
* 2/13/2014 2769 bphillip Added read-only flag to query methods and loadById method
* 4/18/2014 3012 dhladky Diagnostic addition.
*
* </pre>
*
@ -214,7 +217,26 @@ public abstract class SessionManagedDao<IDENTIFIER extends Serializable, ENTITY
*/
@Transactional(propagation = Propagation.REQUIRED, readOnly = true)
public List<ENTITY> query(String queryString, Object... params) {
return executeHQLQuery(queryString, 0, params);
List<ENTITY> stuff = executeHQLQuery(queryString, 0, params);
// Used for diagnostics
if (statusHandler.isPriorityEnabled(Priority.DEBUG)) {
statusHandler.debug("Query: " + queryString);
for (Object o : params) {
if (o instanceof Calendar) {
statusHandler.debug("param: "
+ ((Calendar) o).getTime().toString());
} else {
statusHandler.debug("param: " + o.toString());
}
}
statusHandler.debug("return size: " + stuff.size() + "\n");
}
return stuff;
}
@Transactional(propagation = Propagation.REQUIRED, readOnly = true)

View file

@ -35,7 +35,8 @@ Require-Bundle: com.raytheon.uf.common.status;bundle-version="1.12.1174",
org.eclipse.jetty.io;bundle-version="8.1.3",
org.eclipse.jetty.server;bundle-version="8.1.3",
com.raytheon.uf.common.serialization.comm;bundle-version="1.12.1174",
org.quartz;bundle-version="1.8.6"
org.quartz;bundle-version="1.8.6",
com.raytheon.uf.edex.datadelivery.registry;bundle-version="1.0.0"
Export-Package: com.raytheon.uf.edex.datadelivery.bandwidth,
com.raytheon.uf.edex.datadelivery.bandwidth.dao,
com.raytheon.uf.edex.datadelivery.bandwidth.hibernate,

View file

@ -147,6 +147,7 @@ import com.raytheon.uf.edex.registry.ebxml.exception.EbxmlRegistryException;
* of already scheduled BandwidthAllocations.
* Feb 11, 2014 2771 bgonzale Added handler for GET_DATADELIVERY_ID request.
* Feb 10, 2014 2636 mpduff Changed how retrieval plan is updated over time.
* Apr 09, 2014 3012 dhladky Range the querries for metadata checks to subscriptions.
*
* </pre>
*
@ -172,6 +173,12 @@ public abstract class BandwidthManager<T extends Time, C extends Coverage>
protected final BandwidthDaoUtil<T, C> bandwidthDaoUtil;
private final IBandwidthDbInit dbInit;
/** used for min time range **/
public static final String MIN_RANGE_TIME = "min";
/** used for max time range **/
public static final String MAX_RANGE_TIME = "max";
// Instance variable and not static, because there are multiple child
// implementation classes which should each have a unique prefix
@ -370,7 +377,9 @@ public abstract class BandwidthManager<T extends Time, C extends Coverage>
.getBaseReferenceTime();
Calendar startTime = TimeUtil.newGmtCalendar(retrievalTime
.getTime());
startTime.add(Calendar.MINUTE,
retrieval.getDataSetAvailablityDelay());
int maxLatency = retrieval.getSubscriptionLatency();
retrieval.setStartTime(startTime);
@ -449,8 +458,14 @@ public abstract class BandwidthManager<T extends Time, C extends Coverage>
@Override
public List<BandwidthAllocation> schedule(Subscription<T, C> subscription) {
List<BandwidthAllocation> unscheduled = null;
List<BandwidthAllocation> unscheduled = Collections.emptyList();
if (subscription instanceof RecurringSubscription) {
if (!((RecurringSubscription<T, C>) subscription).shouldSchedule()) {
return unscheduled;
}
}
final DataType dataSetType = subscription.getDataSetType();
switch (dataSetType) {
case GRID:
@ -469,7 +484,7 @@ public abstract class BandwidthManager<T extends Time, C extends Coverage>
return unscheduled;
}
/**
* Update the retrieval plan for this subscription.
*
@ -727,7 +742,10 @@ public abstract class BandwidthManager<T extends Time, C extends Coverage>
Subscription<T, C> subscription) {
List<BandwidthAllocation> unscheduled = schedule(subscription,
((PointTime) subscription.getTime()).getInterval());
unscheduled.addAll(getMostRecent(subscription, false));
// add an adhoc if one exists and isn't in startup mode
if (EDEXUtil.isRunning()) {
unscheduled.addAll(getMostRecent(subscription, false));
}
return unscheduled;
}
@ -751,10 +769,20 @@ public abstract class BandwidthManager<T extends Time, C extends Coverage>
if (subscribedToCycles) {
unscheduled = schedule(subscription, Sets.newTreeSet(cycles));
}
// add an adhoc if one exists and isn't in startup mode
if (EDEXUtil.isRunning()) {
unscheduled.addAll(getMostRecent(subscription, true));
}
return unscheduled;
}
/**
* Schedule the most recent dataset update if one exists.
* @param subscription
* @param useMostRecentDataSetUpdate
* @return
*/
private List<BandwidthAllocation> getMostRecent(
Subscription<T, C> subscription, boolean useMostRecentDataSetUpdate) {
List<BandwidthAllocation> unscheduled = Collections.emptyList();
@ -1700,4 +1728,27 @@ public abstract class BandwidthManager<T extends Time, C extends Coverage>
return dataSetMetaDataTime;
}
/**
* Sets a range based on the baseReferenceTime hour.
* @param baseReferenceTime
* @return
*/
public static Map<String, Date> getBaseReferenceTimeDateRange(Calendar baseReferenceTime) {
Map<String, Date> dates = new HashMap<String, Date>(2);
// Set min range to baseReferenceTime hour "00" minutes, "00" seconds
// Set max range to baseReferenceTime hour "59" minutes, "59" seconds
Calendar min = TimeUtil.newGmtCalendar(baseReferenceTime.getTime());
min.set(Calendar.MINUTE, 0);
min.set(Calendar.SECOND, 0);
Calendar max = TimeUtil.newGmtCalendar(baseReferenceTime.getTime());
max.set(Calendar.MINUTE, 59);
max.set(Calendar.SECOND, 59);
dates.put(MIN_RANGE_TIME, min.getTime());
dates.put(MAX_RANGE_TIME, max.getTime());
return dates;
}
}

View file

@ -127,6 +127,7 @@ import com.raytheon.uf.edex.datadelivery.util.DataDeliveryIdUtil;
* Feb 11, 2014 2771 bgonzale Use Data Delivery ID instead of Site.
* Feb 10, 2014 2636 mpduff Pass Network map to be scheduled.
* Feb 21, 2014, 2636 dhladky Try catch to keep MaintTask from dying.
* Apr 09, 2014 3012 dhladky Range the queries for metadata checks, adhoc firing prevention.
* </pre>
*
* @author djohnson
@ -292,7 +293,6 @@ public abstract class EdexBandwidthManager<T extends Time, C extends Coverage>
* @param subscription
* The completed subscription.
*/
@SuppressWarnings("unchecked")
@Subscribe
public void subscriptionFulfilled(
SubscriptionRetrievalFulfilled subscriptionRetrievalFulfilled) {
@ -382,6 +382,7 @@ public abstract class EdexBandwidthManager<T extends Time, C extends Coverage>
Subscription<T, C> sub = getRegistryObjectById(subscriptionHandler,
re.getId());
sendSubscriptionNotificationEvent(re, sub);
}
}
@ -447,9 +448,9 @@ public abstract class EdexBandwidthManager<T extends Time, C extends Coverage>
+ dsmd.getDataSetName() + "] to [rap_f]");
dsmd.setDataSetName("rap_f");
}
// TODO: End of hack..
BandwidthEventBus.publish(dsmd);
} else {
statusHandler.error("No DataSetMetaData found for id [" + id + "]");
}
@ -489,6 +490,7 @@ public abstract class EdexBandwidthManager<T extends Time, C extends Coverage>
@Subscribe
public void updateGriddedDataSetMetaData(
GriddedDataSetMetaData dataSetMetaData) throws ParseException {
// Daily/Hourly/Monthly datasets
if (dataSetMetaData.getCycle() == GriddedDataSetMetaData.NO_CYCLE) {
updateDataSetMetaDataWithoutCycle((DataSetMetaData<T>) dataSetMetaData);
@ -497,6 +499,7 @@ public abstract class EdexBandwidthManager<T extends Time, C extends Coverage>
else {
updateDataSetMetaDataWithCycle((DataSetMetaData<T>) dataSetMetaData);
}
}
/**
@ -664,19 +667,20 @@ public abstract class EdexBandwidthManager<T extends Time, C extends Coverage>
DataSetMetaData<T> dataSetMetaData) throws ParseException {
BandwidthDataSetUpdate dataset = bandwidthDao
.newBandwidthDataSetUpdate(dataSetMetaData);
// Looking for active subscriptions to the dataset.
List<SubscriptionRetrieval> subscriptions = bandwidthDao
.getSubscriptionRetrievals(dataset.getProviderName(),
dataset.getDataSetName(), dataset.getDataSetBaseTime());
// Range the query for subscriptions within the baseReferenceTime hour.
// SOME models, RAP and RTMA, come not exactly on the hour. This causes these
// subscriptions to be missed because baseReferenceTimes are on the hour.
Map<String, Date> timeRange = getBaseReferenceTimeDateRange(dataset.getDataSetBaseTime());
final SortedSet<SubscriptionRetrieval> subscriptions = bandwidthDao
.getSubscriptionRetrievals(dataset.getProviderName(), dataset.getDataSetName(),
RetrievalStatus.SCHEDULED, timeRange.get(MIN_RANGE_TIME), timeRange.get(MAX_RANGE_TIME));
if (!subscriptions.isEmpty()) {
// Loop through the scheduled SubscriptionRetrievals and mark
// the scheduled retrievals as ready for retrieval
for (SubscriptionRetrieval retrieval : subscriptions) {
// TODO: Evaluate the state changes for receiving multiple
// dataset update messages. This seems to be happening
// quite a bit.
if (RetrievalStatus.SCHEDULED.equals(retrieval.getStatus())) {
// Need to update the Subscription Object in the
@ -720,14 +724,12 @@ public abstract class EdexBandwidthManager<T extends Time, C extends Coverage>
}
} else {
if (statusHandler.isPriorityEnabled(Priority.DEBUG)) {
statusHandler
.debug("No Subscriptions scheduled for BandwidthDataSetUpdate ["
+ dataset.getIdentifier()
+ "] base time ["
+ BandwidthUtil.format(dataset
.getDataSetBaseTime()) + "]");
}
statusHandler
.debug("No Subscriptions scheduled for BandwidthDataSetUpdate ["
+ dataset.getIdentifier()
+ "] base time ["
+ BandwidthUtil.format(dataset.getDataSetBaseTime())
+ "]");
}
}
@ -864,7 +866,8 @@ public abstract class EdexBandwidthManager<T extends Time, C extends Coverage>
+ plan.getPlanEnd().getTime());
statusHandler.info("MaintenanceTask: Update schedule");
}
// Find DEFERRED Allocations and load them into the plan...
// Find DEFERRED Allocations and load them into the
// plan...
List<BandwidthAllocation> deferred = bandwidthDao
.getDeferred(plan.getNetwork(), plan.getPlanEnd());
if (!deferred.isEmpty()) {
@ -883,8 +886,11 @@ public abstract class EdexBandwidthManager<T extends Time, C extends Coverage>
+ " Subscriptions processed.");
} catch (Throwable t) {
statusHandler.error("MaintenanceTask: Subscription update scheduling has failed", t);
statusHandler
.error("MaintenanceTask: Subscription update scheduling has failed",
t);
}
}
}
}

View file

@ -39,6 +39,7 @@ import com.raytheon.uf.edex.datadelivery.util.DataDeliveryIdUtil;
* Feb 06, 2014 2636 bgonzale Use scheduling initialization method after registry init.
* Feb 11, 2014 2771 bgonzale Use Data Delivery ID instead of Site.
* Feb 14, 2014 2636 mpduff Clean up logging
* Apr 09, 2014 3012 dhladky Adhoc firing prevention.
* </pre>
*
* @author djohnson
@ -102,19 +103,22 @@ public class HibernateBandwidthInitializer implements BandwidthInitializer {
@Override
public void executeAfterRegistryInit() {
try {
Map<Network, List<Subscription>> subMap = findSubscriptionsStrategy
.findSubscriptionsToSchedule();
List<String> unscheduled = instance.initializeScheduling(subMap);
if (!unscheduled.isEmpty()) {
StringBuilder sb = new StringBuilder("The following subscriptions could not be scheduled at startup: ");
StringBuilder sb = new StringBuilder(
"The following subscriptions could not be scheduled at startup: ");
sb.append(StringUtil.NEWLINE);
for (String subscription : unscheduled) {
sb.append(subscription).append(" ");
}
statusHandler.handle(Priority.INFO, sb.toString());
}
} catch (Exception e) {
statusHandler.error(
"Failed to query for subscriptions to schedule", e);

View file

@ -33,7 +33,6 @@ import com.raytheon.uf.edex.datadelivery.bandwidth.dao.SubscriptionRetrieval;
import com.raytheon.uf.edex.datadelivery.bandwidth.interfaces.ISubscriptionAggregator;
import com.raytheon.uf.edex.datadelivery.bandwidth.retrieval.RetrievalAgent;
import com.raytheon.uf.edex.datadelivery.bandwidth.retrieval.RetrievalStatus;
import com.raytheon.uf.edex.datadelivery.bandwidth.retrieval.SubscriptionRetrievalAgent;
import com.raytheon.uf.edex.datadelivery.bandwidth.util.BandwidthUtil;
/**
@ -55,6 +54,7 @@ import com.raytheon.uf.edex.datadelivery.bandwidth.util.BandwidthUtil;
* Jul 11, 2013 2106 djohnson aggregate() signature changed.
* Jan 06, 2014 2636 mpduff Changed how data set offset is set.
* Jan 30, 2014 2686 dhladky refactor of retrieval.
* Apr 15, 2014 3012 dhladky help with confusing nomenclature.
* </pre>
*
* @author jspinks
@ -87,6 +87,8 @@ public class SimpleSubscriptionAggregator implements ISubscriptionAggregator {
// (i.e. has SubscriptionRetrievals associated with it) if
// not, create a SubscriptionRetrieval for the subscription
// Do NOT confuse this with an actual SubscriptionRetrieval.
// This SubscriptionRetrieval object is a BandwidthAllocation object
SubscriptionRetrieval subscriptionRetrieval = new SubscriptionRetrieval();
// Link this SubscriptionRetrieval with the subscription.
subscriptionRetrieval.setBandwidthSubscription(subDao);

View file

@ -84,6 +84,7 @@ import com.raytheon.uf.edex.datadelivery.bandwidth.retrieval.RetrievalStatus;
* active period.
* Jan 29, 2014 2636 mpduff Scheduling refactor.
* Feb 11, 2014 2636 mpduff Change how retrieval times are calculated.
* Apr 15, 2014 3012 dhladky Fixed improper offsets.
* </pre>
*
* @author djohnson
@ -179,6 +180,7 @@ public class BandwidthDaoUtil<T extends Time, C extends Coverage> {
// based on plan start and subscription start.
Calendar subscriptionCalculatedStart = subscription
.calculateStart(planStart);
// end time when when subscription is last valid for scheduling based on
// plan end and subscription end.
Calendar subscriptionCalculatedEnd = subscription.calculateEnd(planEnd);
@ -200,40 +202,47 @@ public class BandwidthDaoUtil<T extends Time, C extends Coverage> {
Calendar start = TimeUtil.newGmtCalendar(subscriptionCalculatedStart
.getTime());
int availabilityOffset = 0;
try {
availabilityOffset = BandwidthUtil.getDataSetAvailablityOffset(
subscription, start);
} catch (RegistryHandlerException e) {
// Error occurred querying the registry. Log and continue on
statusHandler
.handle(Priority.PROBLEM,
"Unable to retrieve data availability offset, using 0 for the offset.",
e);
}
while (!start.after(subscriptionCalculatedEnd)) {
for (Integer cycle : hours) {
start.set(Calendar.HOUR_OF_DAY, cycle);
for (Integer minute : minutes) {
start.set(Calendar.MINUTE, minute);
Calendar retrievalTime = TimeUtil.newGmtCalendar();
retrievalTime.setTimeInMillis(start.getTimeInMillis());
retrievalTime.add(Calendar.MINUTE, availabilityOffset);
if (retrievalTime.after(planStart)
&& retrievalTime.before(planEnd)) {
// calculate the offset, every hour
int availabilityOffset = 0;
try {
availabilityOffset = BandwidthUtil
.getDataSetAvailablityOffset(subscription, start);
} catch (RegistryHandlerException e) {
// Error occurred querying the registry. Log and continue on
statusHandler
.handle(Priority.PROBLEM,
"Unable to retrieve data availability offset, using 0 for the offset.",
e);
}
for (Integer minute : minutes) {
start.set(Calendar.MINUTE, minute);
Calendar baseRefTime = TimeUtil.newGmtCalendar();
baseRefTime.setTimeInMillis(start.getTimeInMillis());
// add the offset and check if it falls within window
Calendar offsetBaseRefTime = TimeUtil
.newGmtCalendar(baseRefTime.getTime());
offsetBaseRefTime.add(Calendar.MINUTE, availabilityOffset);
if (offsetBaseRefTime.after(planStart)
&& offsetBaseRefTime.before(planEnd)) {
// Check for nonsense
/*
* Fine grain check by hour and minute, for
* subscription(start/end), activePeriod(start/end)
*/
if (!subscription.inActivePeriodWindow(retrievalTime)) {
if (!subscription.inActivePeriodWindow(baseRefTime)) {
// don't schedule this retrieval time,
// outside subscription window
continue;
}
subscriptionTimes.add(retrievalTime);
subscriptionTimes.add(baseRefTime);
}
}
}

View file

@ -33,6 +33,7 @@ import com.raytheon.uf.edex.datadelivery.bandwidth.dao.BandwidthSubscription;
* Oct 30, 2013 2448 dhladky Moved methods to TimeUtil.
* Dec 20, 2013 2636 mpduff Changed dataset delay to offset.
* Jan 08, 2014 2615 bgonzale Moved Calendar min and max methods to TimeUtil.
* Apr 09, 2014 3012 dhladky GMT Calendar use.
*
* </pre>
*
@ -191,7 +192,7 @@ public class BandwidthUtil {
dao.setDataSetName(dataSetMetaData.getDataSetName());
dao.setProviderName(dataSetMetaData.getProviderName());
dao.setUpdateTime(BandwidthUtil.now());
dao.setDataSetBaseTime(TimeUtil.newCalendar(dataSetMetaData.getDate()));
dao.setDataSetBaseTime(TimeUtil.newGmtCalendar(dataSetMetaData.getDate()));
dao.setUrl(dataSetMetaData.getUrl());
return dao;

View file

@ -38,23 +38,24 @@
</camelContext>
<!-- Start of DataSetMetaData purge configuration -->
<bean id="DataSetMetaDataPurgeLauncher" class="com.raytheon.uf.edex.datadelivery.harvester.purge.DataSetMetaDataPurgeLauncher"
factory-method="getInstance" depends-on="DbInit">
<bean id="DataSetMetaDataPurgeLauncher" class="com.raytheon.uf.edex.datadelivery.harvester.purge.DataSetMetaDataPurgeLauncher" depends-on="registryInit">
<constructor-arg ref="registryObjectDao" />
</bean>
<camelContext id="DataSetMetaDataPurge-context"
xmlns="http://camel.apache.org/schema/spring"
errorHandlerRef="errorHandler">
<endpoint id="datasetMetaDataPurgeCron" uri="quartz://datadelivery/metaDataPurge/?cron=${metadata-purge.cron}"/>
<endpoint id="dataSetMetaDataPurgeCron" uri="quartz://datadelivery/metaDataPurge/?cron=${metadata-purge.cron}"/>
<endpoint id="dataSetMetaDataWorkEndpoint" uri="jms-generic:queue:dataSetMetaDataPurgeWork?concurrentConsumers=1&amp;threadName=dataSetMetaDataPurge"/>
<route id="metaDataPurge">
<from uri="datasetMetaDataPurgeCron" />
<to uri="jms-generic:queue:metaDataPurgeWork" />
<from uri="dataSetMetaDataPurgeCron" />
<to uri="jms-generic:queue:dataSetMetaDataPurgeWork" />
</route>
<route id="metaDataPurgeWork">
<from uri="jms-generic:queue:metaDataPurgeWork" />
<route id="datSetMetaDataPurgeWork">
<from uri="dataSetMetaDataWorkEndpoint" />
<doTry>
<pipeline>
<bean ref="DataSetMetaDataPurgeLauncher" method="runPurge" />

View file

@ -2,6 +2,6 @@
metadata-process.cron=0+*+*+*+*+?
# The cron pattern for how often the DataSetMetaDataPurgeLauncher will run, e.g.
# how often to check for purgeable dataset metadata instances
metadata-purge.cron=0+0+3+*+*+?
metadata-purge.cron=0+0+8+*+*+?
# how many meta data process threads to use to process metadata
metadata-process.threads=2

View file

@ -24,6 +24,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
import com.raytheon.uf.common.status.IUFStatusHandler;
import com.raytheon.uf.common.status.UFStatus;
import com.raytheon.uf.common.status.UFStatus.Priority;
import com.raytheon.uf.edex.registry.ebxml.dao.RegistryObjectDao;
/**
* Container class to hold the {@link IDataSetMetaDataPurgeTask} instance. It
@ -36,28 +37,32 @@ import com.raytheon.uf.common.status.UFStatus.Priority;
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Sep 4, 2012 1102 djohnson Initial creation
* Apr 12,2014 3012 dhladky Purge never worked, fixed to make work.
*
* </pre>
*
* @author djohnson
* @version 1.0
*/
public final class DataSetMetaDataPurgeLauncher {
public class DataSetMetaDataPurgeLauncher {
private static final IUFStatusHandler statusHandler = UFStatus
.getHandler(DataSetMetaDataPurgeLauncher.class);
private static final DataSetMetaDataPurgeLauncher INSTANCE = new DataSetMetaDataPurgeLauncher();
private static final IDataSetMetaDataPurgeTask PURGE_TASK = new DataSetMetaDataPurgeTaskImpl();
private IDataSetMetaDataPurgeTask PURGE_TASK = null;
private static final AtomicBoolean running = new AtomicBoolean();
/**
* Disabled constructor.
* Public constructor.
*/
private DataSetMetaDataPurgeLauncher() {
public DataSetMetaDataPurgeLauncher(RegistryObjectDao rdo) {
PURGE_TASK = new DataSetMetaDataPurgeTaskImpl(rdo);
}
/**
* Try purging datasets.
*/
public void runPurge() {
try {
if (running.compareAndSet(false, true)) {
@ -70,11 +75,4 @@ public final class DataSetMetaDataPurgeLauncher {
running.set(false);
}
}
/**
* @return the instance
*/
public static DataSetMetaDataPurgeLauncher getInstance() {
return INSTANCE;
}
}

View file

@ -22,33 +22,29 @@ package com.raytheon.uf.edex.datadelivery.harvester.purge;
import static com.google.common.base.Preconditions.checkNotNull;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Calendar;
import java.util.Collections;
import java.util.Iterator;
import java.util.HashMap;
import java.util.List;
import java.util.SortedSet;
import java.util.Map;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Multimap;
import com.google.common.collect.Ordering;
import com.google.common.collect.TreeMultimap;
import com.raytheon.uf.common.datadelivery.harvester.Agent;
import com.raytheon.uf.common.datadelivery.harvester.CrawlAgent;
import com.raytheon.uf.common.datadelivery.harvester.HarvesterConfig;
import com.raytheon.uf.common.datadelivery.harvester.HarvesterConfigurationManager;
import com.raytheon.uf.common.datadelivery.registry.DataDeliveryRegistryObjectTypes;
import com.raytheon.uf.common.datadelivery.registry.DataSetMetaData;
import com.raytheon.uf.common.datadelivery.registry.IDataSetMetaDataVisitor;
import com.raytheon.uf.common.datadelivery.registry.OpenDapGriddedDataSetMetaData;
import com.raytheon.uf.common.datadelivery.registry.handlers.DataDeliveryHandlers;
import com.raytheon.uf.common.localization.LocalizationFile;
import com.raytheon.uf.common.registry.ebxml.RegistryUtil;
import com.raytheon.uf.common.registry.handler.RegistryHandlerException;
import com.raytheon.uf.common.serialization.SerializationUtil;
import com.raytheon.uf.common.status.IUFStatusHandler;
import com.raytheon.uf.common.status.UFStatus;
import com.raytheon.uf.common.status.UFStatus.Priority;
import com.raytheon.uf.common.time.util.ITimer;
import com.raytheon.uf.common.time.util.TimeUtil;
import com.raytheon.uf.edex.datadelivery.harvester.crawler.CrawlLauncher;
import com.raytheon.uf.edex.registry.ebxml.dao.RegistryObjectDao;
/**
* Purges {@link DataSetMetaData} instances that are no longer accessible on
@ -66,50 +62,22 @@ import com.raytheon.uf.edex.datadelivery.harvester.crawler.CrawlLauncher;
* Oct 05, 2012 1241 djohnson Replace RegistryManager calls with registry handler calls.
* Dec 12, 2012 1410 dhladky multi provider configurations.
* Sept 30, 2013 1797 dhladky Generics
* Apr 12,2014 3012 dhladky Purge never worked, fixed to make work.
*
* </pre>
*
* @author djohnson
* @version 1.0
*/
class DataSetMetaDataPurgeTaskImpl implements IDataSetMetaDataPurgeTask,
IDataSetMetaDataVisitor {
/**
* Maintains state for an instance of the purge task.
*/
private static class State {
/**
* This boolean flag is used to mark whether or not the DataSetMetaData
* group should be continued, it will be set to false when the purge has
* found a DataSetMetaData instance that should NOT be purged
*/
private boolean continueWithDataSet = true;
/**
* The harvester configurations instance at the time the purge started.
*/
private List<HarvesterConfig> harvesterConfigs;
}
public class DataSetMetaDataPurgeTaskImpl implements IDataSetMetaDataPurgeTask {
private static final IUFStatusHandler statusHandler = UFStatus
.getHandler(DataSetMetaDataPurgeTaskImpl.class);
private static final IOpenDapGriddedPurge GRIDDED_OPENDAP = new OpenDapGriddedPurgeImpl();
/**
* This is the unique identifying key for this metadata's dataset in the
* map.
*
* @param metaData
* the metaDat
* @return the key
*/
@VisibleForTesting
static String getDatasetMetaDataMapKey(DataSetMetaData<?> metaData) {
return metaData.getDataSetName() + metaData.getProviderName();
}
/** Data access object for registry objects */
private RegistryObjectDao rdo;
/**
* Purges a {@link DataSetMetaData} instance.
*
@ -133,83 +101,40 @@ class DataSetMetaDataPurgeTaskImpl implements IDataSetMetaDataPurgeTask,
}
}
private final IOpenDapGriddedPurge openDapGriddedPurge;
// Used to maintain state on a per-thread basis, in case two purges somehow
// overrun each other
private final ThreadLocal<State> threadState = new ThreadLocal<State>();
/**
* Default Constructor.
*/
DataSetMetaDataPurgeTaskImpl() {
this(GRIDDED_OPENDAP);
public DataSetMetaDataPurgeTaskImpl(RegistryObjectDao rdo) {
this.rdo = rdo;
}
/**
* Constructor accepting specific purge strategies.
*
* @param openDapGriddedPurge
* openDapGriddedPurge
*
*/
@VisibleForTesting
DataSetMetaDataPurgeTaskImpl(IOpenDapGriddedPurge openDapGriddedPurge) {
this.openDapGriddedPurge = openDapGriddedPurge;
}
/**
* Clears the state for a running instance.
*/
private void clearState() {
threadState.set(null);
}
/**
* Returns all {@link DataSetMetaData} instances that are to be checked for
* validity.
*
* @return the {@link DataSetMetaData} instances
*/
@SuppressWarnings("rawtypes")
@VisibleForTesting
List<DataSetMetaData> getDataSetMetaDatas() {
try {
return DataDeliveryHandlers.getDataSetMetaDataHandler().getAll();
} catch (RegistryHandlerException e) {
statusHandler.handle(Priority.PROBLEM,
"Unable to retrieve DataSetMetaData instances!", e);
return Collections.emptyList();
}
}
/**
* Creates a map from the DataSetMetaData key as defined by
* {@link #getDatasetMetaDataMapKey(DataSetMetaData)} to the
* {@link SortedSet} of instances.
* Gets the entire list of DSMD ids from the registry.
*
* @return the map
*/
@VisibleForTesting
Multimap<String, DataSetMetaData<?>> getDataSetNameKeyedInstanceMap() {
Multimap<String, DataSetMetaData<?>> map = TreeMultimap.create(
Ordering.<String> natural(), DataSetMetaData.DATE_COMPARATOR);
for (DataSetMetaData<?> metaData : getDataSetMetaDatas()) {
String key = getDatasetMetaDataMapKey(metaData);
map.put(key, metaData);
List<String> getDataSetMetaDataIds() {
ArrayList<String> ids = null;
try {
// Gets the list of all available lids for current DataSetMetaData objects
ids = (ArrayList<String>) rdo.getRegistryObjectIdsOfType(DataDeliveryRegistryObjectTypes.DATASETMETADATA);
} catch (Exception e) {
statusHandler.handle(Priority.PROBLEM,
"Unable to retrieve DataSetMetaData ids!", e);
return Collections.emptyList();
}
return map;
return ids;
}
/**
* Returns the HarvesterConfig Array from localization.
* Returns the Retention times by Provider name.
*
* @return the {@link HarvesterConfig}
*/
@VisibleForTesting
List<HarvesterConfig> getHarvesterConfigs() {
static Map<String, String> getHarvesterConfigs() {
// first get the Localization directory and find all harvester
// configs
@ -220,8 +145,7 @@ class DataSetMetaDataPurgeTaskImpl implements IDataSetMetaDataPurgeTask,
HarvesterConfig hc = null;
try {
hc = SerializationUtil.jaxbUnmarshalFromXmlFile(
HarvesterConfig.class, lf.getFile());
hc = HarvesterConfigurationManager.getHarvesterFile(lf.getFile());
} catch (Exception se) {
statusHandler.handle(Priority.PROBLEM,
se.getLocalizedMessage(), se);
@ -238,102 +162,78 @@ class DataSetMetaDataPurgeTaskImpl implements IDataSetMetaDataPurgeTask,
}
}
}
Map<String, String> configMap = null;
return configs;
}
/**
* This method consolidates the logic of applying a purge strategy for a
* specific data type and service (e.g. OpenDAP for Gridded data) on a
* specific {@link DataSetMetaData} of that type. The generics ensure strict
* adherence to the data type mappings.
*
* @param <T>
* the type that extends DataSetMetaData
* @param metaData
* the metadata instance
* @param purge
* the purge strategy
*/
private <T extends DataSetMetaData<?>> void handleVisit(T metaData,
IServiceDataSetMetaDataPurge<T> purge) {
State state = threadState.get();
List<HarvesterConfig> harvesterConfigs = state.harvesterConfigs;
for (HarvesterConfig config : harvesterConfigs) {
if (purge.isTimeToPurge(metaData, config)) {
purgeMetaData(metaData);
} else {
// Found a non-purgeable metadata instance
state.continueWithDataSet = false;
if (statusHandler.isPriorityEnabled(Priority.DEBUG)) {
final String id = RegistryUtil
.getRegistryObjectKey(metaData);
statusHandler
.debug(String
.format("Provider: "
+ config.getProvider().getName()
+ " : DataSetMetaData with id [%s] does not require purging.",
id));
}
if (!configs.isEmpty()) {
configMap = new HashMap<String, String>(
configs.size());
for (HarvesterConfig config : configs) {
configMap.put(config.getProvider().getName(), config.getRetention());
}
} else {
return Collections.emptyMap();
}
return configMap;
}
/**
* Initializes the state for a running instance.
*
* @return the State instance
*/
@VisibleForTesting
State initializeState() {
State state = new State();
state.harvesterConfigs = getHarvesterConfigs();
threadState.set(state);
return state;
}
/**
* {@inheritDoc}
*/
@Override
public void run() {
ITimer timer = TimeUtil.getTimer();
timer.start();
Multimap<String, DataSetMetaData<?>> dataSetKeyedMap = getDataSetNameKeyedInstanceMap();
List<String> idList = getDataSetMetaDataIds();
Map<String, String> configMap = getHarvesterConfigs();
int deletes = 0;
for (String id : idList) {
try {
State state = initializeState();
try {
for (String key : dataSetKeyedMap.keySet()) {
Collection<DataSetMetaData<?>> metaDatas = dataSetKeyedMap
.get(key);
Iterator<DataSetMetaData<?>> iter = metaDatas.iterator();
DataSetMetaData<?> metaData = DataDeliveryHandlers
.getDataSetMetaDataHandler().getById(id);
Integer retention = Integer.valueOf(configMap.get(metaData.getProviderName()));
state.continueWithDataSet = true;
while (iter.hasNext() && state.continueWithDataSet) {
DataSetMetaData<?> metaData = iter.next();
metaData.accept(this);
if (retention != null) {
if (retention == -1) {
// no purging for this DSMD type
continue;
} else {
// retention is in days
retention = retention * (-1);
// we are subtracting from current
Calendar thresholdTime = TimeUtil.newGmtCalendar();
thresholdTime.add(Calendar.DAY_OF_YEAR, retention);
if (thresholdTime.getTimeInMillis() >= metaData
.getDate().getTime()) {
purgeMetaData(metaData);
deletes++;
}
}
} else {
statusHandler
.warn("No retention time set for this DataSetMetaData provider! "
+ id
+ "Provider: "
+ metaData.getProviderName());
}
} catch (Exception e) {
statusHandler.error("DataSetMetaData purge failed! " + id, e);
}
} finally {
clearState();
}
timer.stop();
statusHandler.info(String.format(
"DataSetMetaData purge completed in %s ms.",
timer.getElapsedTime()));
}
/**
* {@inheritDoc}
*/
@Override
public void visit(OpenDapGriddedDataSetMetaData metaData) {
handleVisit(metaData, openDapGriddedPurge);
timer.getElapsedTime()+" deleted: "+deletes));
}
}

View file

@ -16,7 +16,9 @@
*
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
* further licensing information.
**/
*/
package com.raytheon.uf.edex.datadelivery.harvester.purge;
import com.raytheon.uf.common.datadelivery.registry.DataSetMetaData;

View file

@ -1,44 +0,0 @@
/**
* This software was developed and / or modified by Raytheon Company,
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
*
* U.S. EXPORT CONTROLLED TECHNICAL DATA
* This software product contains export-restricted data whose
* export/transfer/disclosure is restricted by U.S. law. Dissemination
* to non-U.S. persons whether in the United States or abroad requires
* an export license or other authorization.
*
* Contractor Name: Raytheon Company
* Contractor Address: 6825 Pine Street, Suite 340
* Mail Stop B8
* Omaha, NE 68106
* 402.291.0100
*
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
* further licensing information.
**/
package com.raytheon.uf.edex.datadelivery.harvester.purge;
import com.raytheon.uf.common.datadelivery.registry.OpenDapGriddedDataSetMetaData;
/**
* Marker interface for the gridded OpenDAP purge strategy. Intentionally
* package-level as it's an internal implementation detail that should only
* reside and be accessible within this package.
*
* <pre>
*
* SOFTWARE HISTORY
*
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Sep 4, 2012 1102 djohnson Initial creation
*
* </pre>
*
* @author djohnson
* @version 1.0
*/
interface IOpenDapGriddedPurge extends
IServiceDataSetMetaDataPurge<OpenDapGriddedDataSetMetaData> {
}

View file

@ -1,55 +0,0 @@
/**
* This software was developed and / or modified by Raytheon Company,
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
*
* U.S. EXPORT CONTROLLED TECHNICAL DATA
* This software product contains export-restricted data whose
* export/transfer/disclosure is restricted by U.S. law. Dissemination
* to non-U.S. persons whether in the United States or abroad requires
* an export license or other authorization.
*
* Contractor Name: Raytheon Company
* Contractor Address: 6825 Pine Street, Suite 340
* Mail Stop B8
* Omaha, NE 68106
* 402.291.0100
*
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
* further licensing information.
**/
package com.raytheon.uf.edex.datadelivery.harvester.purge;
import com.raytheon.uf.common.datadelivery.harvester.HarvesterConfig;
import com.raytheon.uf.common.datadelivery.registry.DataSetMetaData;
/**
* Defines a purge for a {@link DataSetMetaData} type. Intentionally
* package-level as it's an internal implementation detail that should only
* reside and be accessible within this package.
*
* <pre>
*
* SOFTWARE HISTORY
*
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Sep 4, 2012 1102 djohnson Initial creation
*
* </pre>
*
* @author djohnson
* @version 1.0
*/
interface IServiceDataSetMetaDataPurge<T extends DataSetMetaData> {
/**
* Perform the purge of a {@link DataSetMetaData} instance if required.
*
* @param metaData
* the metaData
* @param harvesterConfig TODO
* @return
*/
boolean isTimeToPurge(T metaData, HarvesterConfig harvesterConfig);
}

View file

@ -1,185 +0,0 @@
/**
* This software was developed and / or modified by Raytheon Company,
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
*
* U.S. EXPORT CONTROLLED TECHNICAL DATA
* This software product contains export-restricted data whose
* export/transfer/disclosure is restricted by U.S. law. Dissemination
* to non-U.S. persons whether in the United States or abroad requires
* an export license or other authorization.
*
* Contractor Name: Raytheon Company
* Contractor Address: 6825 Pine Street, Suite 340
* Mail Stop B8
* Omaha, NE 68106
* 402.291.0100
*
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
* further licensing information.
**/
package com.raytheon.uf.edex.datadelivery.harvester.purge;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.http.HttpHost;
import org.apache.http.HttpResponse;
import org.apache.http.HttpStatus;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.conn.params.ConnRoutePNames;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.util.EntityUtils;
import com.google.common.annotations.VisibleForTesting;
import com.raytheon.uf.common.comm.ProxyConfiguration;
import com.raytheon.uf.common.datadelivery.harvester.HarvesterConfig;
import com.raytheon.uf.common.datadelivery.registry.DataSetMetaData;
import com.raytheon.uf.common.datadelivery.registry.OpenDapGriddedDataSetMetaData;
import com.raytheon.uf.common.datadelivery.registry.Provider;
import com.raytheon.uf.common.status.IUFStatusHandler;
import com.raytheon.uf.common.status.UFStatus;
import com.raytheon.uf.common.status.UFStatus.Priority;
import com.raytheon.uf.edex.datadelivery.retrieval.util.ConnectionUtil;
/**
* Purges {@link OpenDapGriddedDataSetMetaData} instances that are no longer
* retrievable. Intentionally package-private as it should not be part of the
* public API.
*
* <pre>
*
* SOFTWARE HISTORY
*
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Sep 4, 2012 1102 djohnson Initial creation
*
* </pre>
*
* @author djohnson
* @version 1.0
*/
class OpenDapGriddedPurgeImpl implements IOpenDapGriddedPurge {
private static final IUFStatusHandler statusHandler = UFStatus
.getHandler(OpenDapGriddedPurgeImpl.class);
/**
* Retrieves the error response pattern from the provider for the specified
* metadata instance.
*
* @param metaData
* the metadata
* @param harvesterConfig
* the harvester configuration
* @return the pattern or null if the applicable provider cannot be found
*/
private static Pattern getProviderErrorResponsePattern(
DataSetMetaData metaData, HarvesterConfig harvesterConfig) {
Provider provider = harvesterConfig.getProvider();
final String providerName = provider.getName();
final String metadataProviderName = metaData.getProviderName();
if (statusHandler.isPriorityEnabled(Priority.DEBUG)) {
statusHandler.debug("Checking provider [" + providerName
+ "] to match metadata provider name ["
+ metadataProviderName + "].");
}
if (metadataProviderName.equals(providerName)) {
return Pattern.compile(provider.getErrorResponsePattern());
}
return null;
}
/**
* Retrieves the HttpClient implementation to use.
*
* @return the {@link HttpClient} implementation
*/
@VisibleForTesting
HttpClient getHttpClient() {
HttpClient httpClient = new DefaultHttpClient();
ProxyConfiguration proxyParameters = ConnectionUtil
.getProxyParameters();
if (proxyParameters != null) {
HttpHost proxy = new HttpHost(proxyParameters.getHost(),
proxyParameters.getPort());
httpClient.getParams().setParameter(ConnRoutePNames.DEFAULT_PROXY,
proxy);
}
return httpClient;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isTimeToPurge(OpenDapGriddedDataSetMetaData metaData,
HarvesterConfig harvesterConfig) {
HttpGet request = new HttpGet();
HttpResponse response = null;
try {
final String url = metaData.getUrl();
request.setURI(new URI(url));
HttpClient httpClient = getHttpClient();
response = httpClient.execute(request);
int code = response.getStatusLine().getStatusCode();
if (statusHandler.isPriorityEnabled(Priority.DEBUG)) {
statusHandler.debug(String.format(
"Received status code [%s] from url [%s]", code, url));
}
if (code == HttpStatus.SC_OK) {
String entityContent = EntityUtils.toString(response
.getEntity());
Pattern providerErrorResponsePattern = getProviderErrorResponsePattern(
metaData, harvesterConfig);
if (providerErrorResponsePattern != null) {
Matcher matcher = providerErrorResponsePattern
.matcher(entityContent);
if (matcher.find()) {
return true;
}
} else {
statusHandler
.warn(String
.format("Unable to find a configured provider by name [%s], removing obsolete DataSetMetaData.",
metaData.getProviderName()));
}
}
return false;
} catch (URISyntaxException e) {
statusHandler
.handle(Priority.PROBLEM,
"Unable to parse URL into a URI, purging metadata since it would otherwise remain unusable!",
e);
return true;
} catch (IOException e) {
statusHandler
.handle(Priority.PROBLEM,
"Unable to contact the host, not purging metadata since it may still be valid!",
e);
request.abort();
return false;
} finally {
if (response != null) {
try {
EntityUtils.consume(response.getEntity());
} catch (IOException e) {
statusHandler.handle(Priority.PROBLEM,
e.getLocalizedMessage(), e);
}
}
}
}
}

View file

@ -95,6 +95,7 @@ import com.raytheon.uf.common.status.UFStatus.Priority;
import com.raytheon.uf.common.time.util.TimeUtil;
import com.raytheon.uf.common.util.CollectionUtil;
import com.raytheon.uf.common.util.StringUtil;
import com.raytheon.uf.edex.core.EDEXUtil;
import com.raytheon.uf.edex.datadelivery.registry.availability.FederatedRegistryMonitor;
import com.raytheon.uf.edex.datadelivery.registry.dao.ReplicationEventDao;
import com.raytheon.uf.edex.datadelivery.registry.replication.NotificationServers;
@ -104,6 +105,7 @@ import com.raytheon.uf.edex.registry.ebxml.dao.DbInit;
import com.raytheon.uf.edex.registry.ebxml.dao.RegistryDao;
import com.raytheon.uf.edex.registry.ebxml.dao.RegistryObjectDao;
import com.raytheon.uf.edex.registry.ebxml.exception.EbxmlRegistryException;
import com.raytheon.uf.edex.registry.ebxml.exception.NoReplicationServersAvailableException;
import com.raytheon.uf.edex.registry.ebxml.init.RegistryInitializedListener;
import com.raytheon.uf.edex.registry.ebxml.services.query.QueryConstants;
import com.raytheon.uf.edex.registry.ebxml.services.query.RegistryQueryUtil;
@ -153,6 +155,7 @@ import com.raytheon.uf.edex.registry.events.CreateAuditTrailEvent;
* Feb 11, 2014 2771 bgonzale Use Data Delivery ID instead of Site.
* 2/13/2014 2769 bphillip Refactored registry sync. Created quartz tasks to monitor registry uptime as well as subscription integrity
* 4/11/2014 3011 bphillip Removed automatic registry sync check on startup
* 4/15/2014 3012 dhladky Merge fixes.
* </pre>
*
* @author bphillip
@ -172,7 +175,7 @@ public class RegistryFederationManager implements IRegistryFederationManager,
/** Query used for synchronizing registries */
private static final String SYNC_QUERY = "FROM RegistryObjectType obj where obj.id in (%s) order by obj.id asc";
/** Batch size for registry synchronization queries */
private static final int SYNC_BATCH_SIZE = Integer.parseInt(System
.getProperty("ebxml-notification-batch-size"));
@ -201,14 +204,7 @@ public class RegistryFederationManager implements IRegistryFederationManager,
*/
private static final long MAX_DOWN_TIME_DURATION = TimeUtil.MILLIS_PER_HOUR * 48;
private static final String SYNC_WARNING_MSG = "Registry is out of sync with federation. Registry Synchronization required. Go to: ["
+ RegistryUtil.LOCAL_REGISTRY_ADDRESS
+ "/registry/federation/status.html] to synchronize.";
private static volatile boolean SYNC_NECESSARY = false;
public static AtomicBoolean SYNC_IN_PROGRESS = new AtomicBoolean(
false);
public static AtomicBoolean SYNC_IN_PROGRESS = new AtomicBoolean(false);
/** Cutoff parameter for the query to get the expired events */
private static final String GET_EXPIRED_EVENTS_QUERY_CUTOFF_PARAMETER = "cutoff";
@ -326,9 +322,7 @@ public class RegistryFederationManager implements IRegistryFederationManager,
throw new EbxmlRegistryException(
"Error joining federation!!");
}
if (!centralRegistry) {
checkDownTime();
}
} catch (Exception e1) {
throw new EbxmlRegistryException(
"Error initializing RegistryReplicationManager", e1);
@ -350,29 +344,6 @@ public class RegistryFederationManager implements IRegistryFederationManager,
initialized.set(true);
}
/**
* Checks how long a registry has been down. If the registry has been down
* longer than the MAX_DOWN_TIME_DURATION, then a sync is necessary
*
* @see RegistryFederationManager.MAX_DOWN_TIME_DURATION
* @throws Exception
*/
private void checkDownTime() throws Exception {
long currentTime = TimeUtil.currentTimeMillis();
long lastKnownUp = federatedRegistryMonitor.getLastKnownUptime();
long downTime = currentTime - lastKnownUp;
statusHandler.info("Registry has been down since: "
+ new Date(currentTime - downTime));
/*
* The registry has been down for ~2 days, this requires a
* synchronization of the data from the federation
*/
if (currentTime - lastKnownUp > MAX_DOWN_TIME_DURATION) {
SYNC_NECESSARY = true;
sendSyncMessage();
}
}
public boolean joinFederation() {
try {
final List<RegistryObjectType> objects = new ArrayList<RegistryObjectType>(
@ -509,6 +480,62 @@ public class RegistryFederationManager implements IRegistryFederationManager,
return true;
}
/**
* Checks how long a registry has been down. If the registry has been down
* for over 2 days, the registry is synchronized with one of the federation
* members
*
* @throws Exception
*/
private void synchronize() throws EbxmlRegistryException {
monitorHandler.warn("Synchronizing registry with federation...");
RegistryType registryToSyncFrom = null;
for (String remoteRegistryId : servers.getRegistryReplicationServers()) {
statusHandler.info("Checking availability of [" + remoteRegistryId
+ "]...");
RegistryType remoteRegistry = null;
try {
remoteRegistry = dataDeliveryRestClient.getRegistryObject(
ncfAddress, remoteRegistryId
+ FederationProperties.REGISTRY_SUFFIX);
} catch (Exception e) {
throw new EbxmlRegistryException(
"Error getting remote registry object!", e);
}
if (remoteRegistry == null) {
statusHandler
.warn("Registry at ["
+ remoteRegistryId
+ "] not found in federation. Unable to use as synchronization source.");
} else if (dataDeliveryRestClient
.isRegistryAvailable(remoteRegistry.getBaseURL())) {
registryToSyncFrom = remoteRegistry;
break;
} else {
statusHandler
.info("Registry at ["
+ remoteRegistryId
+ "] is not available. Unable to use as synchronization source.");
}
}
// No available registry was found!
if (registryToSyncFrom == null) {
throw new NoReplicationServersAvailableException(
"No available registries found! Registry data will not be synchronized with the federation!");
} else {
try {
synchronizeWithRegistry(registryToSyncFrom.getId());
} catch (Exception e) {
throw new EbxmlRegistryException(
"Error synchronizing with registry ["
+ registryToSyncFrom.getId() + "]", e);
}
}
}
/**
* Synchronizes this registry's data with the registry at the specified URL
*
@ -523,6 +550,7 @@ public class RegistryFederationManager implements IRegistryFederationManager,
@Path("synchronizeWithRegistry/{registryId}")
public void synchronizeWithRegistry(@PathParam("registryId")
String registryId) throws Exception {
if (SYNC_IN_PROGRESS.compareAndSet(false, true)) {
try {
monitorHandler.handle(Priority.WARN,
@ -551,7 +579,6 @@ public class RegistryFederationManager implements IRegistryFederationManager,
for (final String objectType : replicatedObjectTypes) {
syncObjectType(objectType, remoteRegistryUrl);
}
SYNC_NECESSARY = false;
federatedRegistryMonitor.updateTime();
StringBuilder syncMsg = new StringBuilder();
@ -565,6 +592,10 @@ public class RegistryFederationManager implements IRegistryFederationManager,
} finally {
SYNC_IN_PROGRESS.set(false);
}
} else {
statusHandler.info("Registry sync already in progress.");
}
}
@ -611,8 +642,9 @@ public class RegistryFederationManager implements IRegistryFederationManager,
int remainder = remoteIds.size() % SYNC_BATCH_SIZE;
for (int currentBatch = 0; currentBatch < batches; currentBatch++) {
statusHandler.info("Processing batch " + (currentBatch + 1)
+ "/" + batches);
statusHandler.info("Processing batch "+(currentBatch+1)+"/"+batches);
persistBatch(objectType, remoteRegistryUrl, remoteIds.subList(
currentBatch * SYNC_BATCH_SIZE, (currentBatch + 1)
* SYNC_BATCH_SIZE));
@ -669,12 +701,6 @@ public class RegistryFederationManager implements IRegistryFederationManager,
}
}
private void sendSyncMessage() {
if (!SYNC_IN_PROGRESS.get()) {
statusHandler.warn(SYNC_WARNING_MSG);
monitorHandler.handle(Priority.WARN, SYNC_WARNING_MSG);
}
}
@GET
@Path("isFederated")
@ -1094,15 +1120,35 @@ public class RegistryFederationManager implements IRegistryFederationManager,
* Updates the record in the registry that keeps track of if this registry
* has been up. This method is called every minute via a quartz cron
* configured in Camel
*
* @throws EbxmlRegistryException
*/
@Transactional
public void updateUpTime() {
if (initialized.get()) {
if (SYNC_NECESSARY) {
if (!SYNC_IN_PROGRESS.get()
&& TimeUtil.newGmtCalendar().get(Calendar.MINUTE) % 15 == 0) {
sendSyncMessage();
public void updateUpTime() throws EbxmlRegistryException {
if (initialized.get() && EDEXUtil.isRunning()) {
long currentTime = TimeUtil.currentTimeMillis();
long lastKnownUp = federatedRegistryMonitor.getLastKnownUptime();
long downTime = currentTime - lastKnownUp;
if (currentTime - lastKnownUp > MAX_DOWN_TIME_DURATION
&& !centralRegistry) {
if (!SYNC_IN_PROGRESS.get()) {
statusHandler.info("Registry has been down since: "
+ new Date(currentTime - downTime));
statusHandler
.warn("Registry is out of sync with federation. Attempting to automatically synchronize");
try {
synchronize();
monitorHandler
.info("Registry successfully synchronized!");
} catch (EbxmlRegistryException e) {
monitorHandler
.error("Error synchronizing registry!", e);
throw e;
}
}
} else {
federatedRegistryMonitor.updateTime();
}

View file

@ -78,6 +78,7 @@ import com.raytheon.uf.edex.registry.ebxml.services.query.RegistryQueryUtil;
* 10/2/2013 2385 bphillip Fixed subscription backup queries
* 10/8/2013 1682 bphillip Added query queries
* 11/7/2013 1678 bphillip Added getCustomQueries method
* Apr 12,2014 3012 dhladky Purge never worked, fixed to make work.
* </pre>
*
* @author bphillip
@ -340,9 +341,9 @@ public class RegistryDataAccessService implements IRegistryDataAccessService {
try {
Object subObj = subscriptionJaxbManager
.unmarshalFromXml(subscriptionXML);
EDEXUtil.getMessageProducer().sendSync("scheduleSubscription",
subObj);
lcm.submitObjects(submitRequest);
EDEXUtil.getMessageProducer().sendSync("scheduleSubscription",
new Object[] {subObj, false});
subscriptionFile.delete();
response.append(
"Subscription successfully restored from file [")

View file

@ -54,6 +54,7 @@ import com.raytheon.uf.edex.datadelivery.retrieval.handlers.SubscriptionRetrieva
* Dec 10, 2012 1259 bsteffen Switch Data Delivery from LatLon to referenced envelopes.
* Dec 11, 2013 2625 mpduff Remove creation of DataURI.
* Jan 30, 2014 2686 dhladky refactor of retrieval.
* Apr 09, 2014 3012 dhladky Added error message.
*
* </pre>
*
@ -128,7 +129,7 @@ public class RetrievalGeneratorUtilities {
}
} catch (Exception e) {
statusHandler.handle(Priority.PROBLEM,
e.getLocalizedMessage(), e);
"Can't determine if URI is a duplicate", e);
}
}
}

View file

@ -61,6 +61,7 @@ import com.raytheon.uf.edex.ogc.common.db.SimpleLayer;
* Jul 23, 2013 bclement Initial creation
* Aug 08, 2013 dhladky Made operational
* Jan 13, 2014 #2679 dhladky multiple layers
* Apr 14, 2014 3012 dhladky Cleaned up.
*
* </pre>
*
@ -118,7 +119,7 @@ public abstract class RegistryCollectorAddon<D extends SimpleDimension, L extend
final String description = metaData.getDataSetDescription();
statusHandler.info("Attempting store of DataSetMetaData[" + description
+ "]");
+ "] " + "Date: "+metaData.getDate());
try {
handler.update(metaData);

View file

@ -76,6 +76,7 @@ import com.vividsolutions.jts.geom.Envelope;
* Oct 10, 2013 1797 bgonzale Refactored registry Time objects.
* Nov 6, 2013 2525 dhladky Stop appending "/wfs"
* Jan 13, 2014 2679 dhladky Multiple ingest layer windows for a single request window.
* Apr 13, 2014 3012 dhladky Cleaned up.
*
* </pre>
*
@ -454,13 +455,15 @@ public abstract class WfsRegistryCollectorAddon<D extends SimpleDimension, L ext
times.put(layer.getName(), new PointTime());
// harvests the times from the layer
setTime(layer);
// make sure you populate the metadata
setDataSetMetaData(layer);
getDataSetMetaData(layer.getName()).setTime(getTime(layer.getName()));
ImmutableDate date = null;
date = new ImmutableDate(getTime(layer.getName()).getEnd());
getDataSetMetaData(layer.getName()).setDate(date);
storeMetaData(getDataSetMetaData(layer.getName()));
PointDataSetMetaData data = getDataSetMetaData(layer.getName());
PointTime time = getTime(layer.getName());
data.setTime(time);
ImmutableDate date = new ImmutableDate(time.getEnd());
data.setDate(date);
storeMetaData(data);
}
}

View file

@ -19,13 +19,16 @@
**/
package com.raytheon.uf.edex.registry.ebxml.dao;
import java.util.Collection;
import java.util.List;
import oasis.names.tc.ebxml.regrep.xsd.rim.v4.SlotType;
import org.hibernate.SQLQuery;
import org.hibernate.criterion.Property;
import com.raytheon.uf.edex.database.dao.SessionManagedDao;
import com.raytheon.uf.edex.registry.ebxml.services.query.QueryConstants;
/**
*
@ -84,4 +87,10 @@ public class SlotTypeDao extends SessionManagedDao<String, SlotType> {
this.template.delete(slot);
}
}
public void deleteBySlotId(Collection<String> ids){
template.deleteAll(createCriteria().add(
Property.forName(QueryConstants.ID).in(ids))
.list());
}
}

View file

@ -55,6 +55,7 @@ import com.raytheon.uf.edex.registry.events.DeleteSlotEvent;
* 2/4/2014 2769 bphillip Removed flush and clear call
* 2/13/2014 2769 bphillip Refactored to no longer use executor threads
* 4/11/2014 3011 bphillip Added slot purging via event bus notifications
* 4/17/2014 3011 bphillip Delete slot events now contain strings
* </pre>
*
* @author bphillip
@ -139,15 +140,17 @@ public class RegistryGarbageCollector {
@Subscribe
public void deleteOrphanedSlot(DeleteSlotEvent slotEvent) {
if (!CollectionUtil.isNullOrEmpty(slotEvent.getSlotsToDelete())) {
long start = TimeUtil.currentTimeMillis();
statusHandler.info("Deleting "
+ slotEvent.getSlotsToDelete().size() + " slots...");
slotDao.deleteAll(slotEvent.getSlotsToDelete());
slotDao.deleteBySlotId(slotEvent.getSlotsToDelete());
statusHandler.info("Deleted " + slotEvent.getSlotsToDelete().size()
+ " slots in " + (TimeUtil.currentTimeMillis() - start)
+ " ms");
}
}
}

View file

@ -111,6 +111,7 @@ import com.raytheon.uf.edex.registry.events.DeleteSlotEvent;
* 01/21/2014 2613 bphillip Removed verbose log message from removeObjects
* 2/19/2014 2769 bphillip Added current time to audit trail events
* 4/11/2014 3011 bphillip Modified merge behavior
* 4/17/2014 3011 bphillip Delete slot events now contain strings
*
*
* </pre>
@ -300,6 +301,8 @@ public class LifecycleManagerImpl implements LifecycleManager {
event.setObjectType(objectType);
EventBus.publish(event);
}
DeleteSlotEvent deleteEvent = new DeleteSlotEvent(obj.getSlot());
EventBus.publish(deleteEvent);
EventBus.publish(new RegistryStatisticsEvent(obj.getObjectType(),
obj.getStatus(), obj.getOwner(), avTimePerRecord));
}
@ -756,9 +759,8 @@ public class LifecycleManagerImpl implements LifecycleManager {
private void mergeObjects(RegistryObjectType newObject,
RegistryObjectType existingObject) {
DeleteSlotEvent deleteSlotEvent = new DeleteSlotEvent(existingObject.getSlot());
registryObjectDao.merge(newObject, existingObject);
DeleteSlotEvent deleteSlotEvent = new DeleteSlotEvent(
existingObject.getSlot());
EventBus.publish(deleteSlotEvent);
}

View file

@ -19,11 +19,13 @@
**/
package com.raytheon.uf.edex.registry.events;
import java.util.ArrayList;
import java.util.List;
import oasis.names.tc.ebxml.regrep.xsd.rim.v4.SlotType;
import com.raytheon.uf.common.event.Event;
import com.raytheon.uf.common.util.CollectionUtil;
/**
* Event containing slots to be deleted by the registry garbage collector
@ -35,6 +37,7 @@ import com.raytheon.uf.common.event.Event;
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* 4/11/2014 3011 bphillip Initial Coding
* 4/17/2014 3011 bphillip Delete slot events now contain strings
* </pre>
*
* @author bphillip
@ -42,26 +45,31 @@ import com.raytheon.uf.common.event.Event;
*/
public class DeleteSlotEvent extends Event {
private static final long serialVersionUID = -2818002679753482984L;
private List<SlotType> slotsToDelete;
public DeleteSlotEvent(){
super();
}
public DeleteSlotEvent(List<SlotType> slotsToDelete){
this.slotsToDelete = slotsToDelete;
}
private static final long serialVersionUID = -2818002679753482984L;
public List<SlotType> getSlotsToDelete() {
return slotsToDelete;
}
private List<String> slotsToDelete;;
public void setSlotsToDelete(List<SlotType> slotsToDelete) {
this.slotsToDelete = slotsToDelete;
}
public DeleteSlotEvent() {
super();
}
public DeleteSlotEvent(List<SlotType> slotsToDelete) {
if (CollectionUtil.isNullOrEmpty(slotsToDelete)) {
slotsToDelete = new ArrayList<SlotType>();
} else {
this.slotsToDelete = new ArrayList<String>(slotsToDelete.size());
for (SlotType slot : slotsToDelete) {
this.slotsToDelete.add(slot.getId());
}
}
}
public List<String> getSlotsToDelete() {
return slotsToDelete;
}
public void setSlotsToDelete(List<String> slotsToDelete) {
this.slotsToDelete = slotsToDelete;
}
}