diff --git a/RadarServer/com.raytheon.rcm.lib/src/com/raytheon/rcm/config/awips1/Awips1RpsListUtil.java b/RadarServer/com.raytheon.rcm.lib/src/com/raytheon/rcm/config/awips1/Awips1RpsListUtil.java old mode 100755 new mode 100644 diff --git a/RadarServer/com.raytheon.rcm.server/src/com/raytheon/rcm/config/awips1/Awips1ConfigProvider.java b/RadarServer/com.raytheon.rcm.server/src/com/raytheon/rcm/config/awips1/Awips1ConfigProvider.java old mode 100755 new mode 100644 diff --git a/TextDao.java b/TextDao.java new file mode 100644 index 0000000000..069873d95d --- /dev/null +++ b/TextDao.java @@ -0,0 +1,72 @@ +/** + * This software was developed and / or modified by Raytheon Company, + * pursuant to Contract DG133W-05-CQ-1067 with the US Government. + * + * U.S. EXPORT CONTROLLED TECHNICAL DATA + * This software product contains export-restricted data whose + * export/transfer/disclosure is restricted by U.S. law. Dissemination + * to non-U.S. persons whether in the United States or abroad requires + * an export license or other authorization. + * + * Contractor Name: Raytheon Company + * Contractor Address: 6825 Pine Street, Suite 340 + * Mail Stop B8 + * Omaha, NE 68106 + * 402.291.0100 + * + * See the AWIPS II Master Rights File ("Master Rights File.pdf") for + * further licensing information. + **/ +package com.raytheon.edex.plugin.text.dao; + +import java.util.Calendar; + +import com.raytheon.edex.db.dao.DefaultPluginDao; +import com.raytheon.edex.textdb.dbapi.impl.TextDB; +import com.raytheon.uf.common.dataplugin.PluginException; +import com.raytheon.uf.edex.database.purge.PurgeLogger; + +/** + * DAO for text products + * + *
+ * + * SOFTWARE HISTORY + * + * Date Ticket# Engineer Description + * ------------ ---------- ----------- -------------------------- + * Jul 10, 2009 2191 rjpeter Update retention time handling. + * Aug 18, 2009 2191 rjpeter Changed to version purging. + *+ * + * @author + * @version 1 + */ +public class TextDao extends DefaultPluginDao { + + public TextDao(String pluginName) throws PluginException { + super(pluginName); + } + + @Override + public void purgeAllData() { + logger.warn("purgeAllPluginData not implemented for text. No data will be purged."); + } + + protected void loadScripts() throws PluginException { + // no op + } + + public void purgeExpiredData() throws PluginException { + int deletedRecords = 0; + + // only do full purge every few hours since incremental purge runs every + // minute + if (Calendar.getInstance().get(Calendar.HOUR_OF_DAY) % 3 == 0) { + TextDB.purgeStdTextProducts(); + } + + PurgeLogger.logInfo("Purged " + deletedRecords + " items total.", + "text"); + } +} diff --git a/cave/build/static/linux/cave/caveEnvironment/lib/libgempak.so b/cave/build/static/linux/cave/caveEnvironment/lib/libgempak.so old mode 100755 new mode 100644 diff --git a/cave/com.raytheon.uf.viz.alertviz/src/com/raytheon/uf/viz/alertviz/AlertVizClient.java b/cave/com.raytheon.uf.viz.alertviz/src/com/raytheon/uf/viz/alertviz/AlertVizClient.java index 125ad50b33..4da25ea3ae 100644 --- a/cave/com.raytheon.uf.viz.alertviz/src/com/raytheon/uf/viz/alertviz/AlertVizClient.java +++ b/cave/com.raytheon.uf.viz.alertviz/src/com/raytheon/uf/viz/alertviz/AlertVizClient.java @@ -21,6 +21,7 @@ package com.raytheon.uf.viz.alertviz; import java.io.PrintStream; import java.io.StringWriter; +import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.CopyOnWriteArrayList; import javax.jms.ExceptionListener; @@ -88,7 +89,11 @@ public class AlertVizClient implements MessageListener { private CopyOnWriteArrayList
- * - * SOFTWARE HISTORY - * - * Date Ticket# Engineer Description - * ------------ ---------- ----------- -------------------------- - * 4/7/09 1994 bphillip Initial Creation - * - *- * - * @author bphillip - * @version 1 - */ -public class GribSpatialCache { - - /** The logger */ - protected transient Log logger = LogFactory.getLog(getClass()); - - /** The singleton instance */ - private static GribSpatialCache instance = new GribSpatialCache(); - - /** - * Map containing the GridCoverages
+ * + * SOFTWARE HISTORY + * + * Date Ticket# Engineer Description + * ------------ ---------- ----------- -------------------------- + * 4/7/09 1994 bphillip Initial Creation + * + *+ * + * @author bphillip + * @version 1 + */ +public class GribSpatialCache { + + /** The logger */ + protected transient Log logger = LogFactory.getLog(getClass()); + + /** The singleton instance */ + private static GribSpatialCache instance = new GribSpatialCache(); + + /** + * Map containing the GridCoverages
- * - * SOFTWARE HISTORY - * - * Date Ticket# Engineer Description - * ------------ ---------- ----------- -------------------------- - * Jul 10, 2009 2191 rjpeter Update retention time handling. - * Aug 18, 2009 2191 rjpeter Changed to version purging. - *- * - * @author - * @version 1 - */ -public class TextDao extends DefaultPluginDao { - - public TextDao(String pluginName) throws PluginException { - super(pluginName); - } - - @Override - public void purgeAllData() { - logger.warn("purgeAllPluginData not implemented for text. No data will be purged."); - } - - protected void loadScripts() throws PluginException { - // no op - } - - public void purgeExpiredData() throws PluginException { - int deletedRecords = TextDB.purgeStdTextProducts(); - PurgeLogger.logInfo("Purged " + deletedRecords + " items total.", - "text"); - } -} +/** + * This software was developed and / or modified by Raytheon Company, + * pursuant to Contract DG133W-05-CQ-1067 with the US Government. + * + * U.S. EXPORT CONTROLLED TECHNICAL DATA + * This software product contains export-restricted data whose + * export/transfer/disclosure is restricted by U.S. law. Dissemination + * to non-U.S. persons whether in the United States or abroad requires + * an export license or other authorization. + * + * Contractor Name: Raytheon Company + * Contractor Address: 6825 Pine Street, Suite 340 + * Mail Stop B8 + * Omaha, NE 68106 + * 402.291.0100 + * + * See the AWIPS II Master Rights File ("Master Rights File.pdf") for + * further licensing information. + **/ +package com.raytheon.edex.plugin.text.dao; + +import java.util.Calendar; + +import com.raytheon.edex.db.dao.DefaultPluginDao; +import com.raytheon.edex.textdb.dbapi.impl.TextDB; +import com.raytheon.uf.common.dataplugin.PluginException; +import com.raytheon.uf.edex.database.purge.PurgeLogger; + +/** + * DAO for text products + * + *
+ * + * SOFTWARE HISTORY + * + * Date Ticket# Engineer Description + * ------------ ---------- ----------- -------------------------- + * Jul 10, 2009 2191 rjpeter Update retention time handling. + * Aug 18, 2009 2191 rjpeter Changed to version purging. + *+ * + * @author + * @version 1 + */ +public class TextDao extends DefaultPluginDao { + + public TextDao(String pluginName) throws PluginException { + super(pluginName); + } + + @Override + public void purgeAllData() { + logger.warn("purgeAllPluginData not implemented for text. No data will be purged."); + } + + protected void loadScripts() throws PluginException { + // no op + } + + public void purgeExpiredData() throws PluginException { + int deletedRecords = 0; + + // only do full purge every few hours since incremental purge runs every + // minute + if (Calendar.getInstance().get(Calendar.HOUR_OF_DAY) % 3 == 0) { + TextDB.purgeStdTextProducts(); + } + + PurgeLogger.logInfo("Purged " + deletedRecords + " items total.", + "text"); + } +} diff --git a/edexOsgi/com.raytheon.uf.edex.purgesrv/src/com/raytheon/uf/edex/purgesrv/PurgeDao.java b/edexOsgi/com.raytheon.uf.edex.purgesrv/src/com/raytheon/uf/edex/purgesrv/PurgeDao.java index 7d2a7518bf..492c27e9c7 100644 --- a/edexOsgi/com.raytheon.uf.edex.purgesrv/src/com/raytheon/uf/edex/purgesrv/PurgeDao.java +++ b/edexOsgi/com.raytheon.uf.edex.purgesrv/src/com/raytheon/uf/edex/purgesrv/PurgeDao.java @@ -1,284 +1,282 @@ -/** - * This software was developed and / or modified by Raytheon Company, - * pursuant to Contract DG133W-05-CQ-1067 with the US Government. - * - * U.S. EXPORT CONTROLLED TECHNICAL DATA - * This software product contains export-restricted data whose - * export/transfer/disclosure is restricted by U.S. law. Dissemination - * to non-U.S. persons whether in the United States or abroad requires - * an export license or other authorization. - * - * Contractor Name: Raytheon Company - * Contractor Address: 6825 Pine Street, Suite 340 - * Mail Stop B8 - * Omaha, NE 68106 - * 402.291.0100 - * - * See the AWIPS II Master Rights File ("Master Rights File.pdf") for - * further licensing information. - **/ -package com.raytheon.uf.edex.purgesrv; - -import java.sql.Timestamp; -import java.util.Calendar; -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.TimeZone; - -import org.hibernate.Query; -import org.hibernate.Session; -import org.springframework.transaction.TransactionStatus; -import org.springframework.transaction.support.TransactionCallback; -import org.springframework.transaction.support.TransactionCallbackWithoutResult; - -import com.raytheon.uf.edex.database.dao.CoreDao; -import com.raytheon.uf.edex.database.dao.DaoConfig; - -/** - * - * Data access object for accessing purge job status objects - * - *
- * - * SOFTWARE HISTORY - * - * Date Ticket# Engineer Description - * ------------ ---------- ----------- -------------------------- - * May 1, 2012 #470 bphillip Initial creation - * - *- * - * @author bphillip - * @version 1.0 - */ -public class PurgeDao extends CoreDao { - - /** - * Constructs a new purge data access object - */ - public PurgeDao() { - super(DaoConfig.forClass(PurgeJobStatus.class)); - } - - /** - * Gets the number of purge jobs currently running on the cluster. A job is - * considered running if the 'running' flag is set to true and the job has - * been started since validStartTime and has not met or exceeded the failed - * count. - * - * @param validStartTime - * @param failedCount - * @return The number of purge jobs currently running on the cluster - */ - public int getRunningClusterJobs(final Date validStartTime, - final int failedCount) { - final String query = "from " - + daoClass.getName() - + " obj where obj.running = true and obj.startTime > :startTime and obj.failedCount <= :failedCount"; - return (Integer) txTemplate.execute(new TransactionCallback() { - @Override - public Object doInTransaction(TransactionStatus status) { - Query hibQuery = getSession(false).createQuery(query); - hibQuery.setTimestamp("startTime", validStartTime); - hibQuery.setInteger("failedCount", failedCount); - List> queryResult = hibQuery.list(); - if (queryResult == null) { - return 0; - } else { - return queryResult.size(); - } - } - }); - } - - /** - * Returns the jobs that have met or exceed the failed count. - * - * @param failedCount - * @return - */ - @SuppressWarnings("unchecked") - public List
+ * + * SOFTWARE HISTORY + * + * Date Ticket# Engineer Description + * ------------ ---------- ----------- -------------------------- + * May 1, 2012 #470 bphillip Initial creation + * + *+ * + * @author bphillip + * @version 1.0 + */ +public class PurgeDao extends CoreDao { + + /** + * Constructs a new purge data access object + */ + public PurgeDao() { + super(DaoConfig.forClass(PurgeJobStatus.class)); + } + + /** + * Gets the number of purge jobs currently running on the cluster. A job is + * considered running if the 'running' flag is set to true and the job has + * been started since validStartTime and has not met or exceeded the failed + * count. + * + * @param validStartTime + * @param failedCount + * @return The number of purge jobs currently running on the cluster + */ + public int getRunningClusterJobs(final Date validStartTime, + final int failedCount) { + final String query = "from " + + daoClass.getName() + + " obj where obj.running = true and obj.startTime > :startTime and obj.failedCount <= :failedCount"; + return (Integer) txTemplate.execute(new TransactionCallback() { + @Override + public Object doInTransaction(TransactionStatus status) { + Query hibQuery = getSession(false).createQuery(query); + hibQuery.setTimestamp("startTime", validStartTime); + hibQuery.setInteger("failedCount", failedCount); + List> queryResult = hibQuery.list(); + if (queryResult == null) { + return 0; + } else { + return queryResult.size(); + } + } + }); + } + + /** + * Returns the jobs that have met or exceed the failed count. + * + * @param failedCount + * @return + */ + @SuppressWarnings("unchecked") + public List
- * - * SOFTWARE HISTORY - * - * Date Ticket# Engineer Description - * ------------ ---------- ----------- -------------------------- - * Apr 19, 2012 #470 bphillip Initial creation - * - *- * - * @author bphillip - * @version 1.0 - */ -public class PurgeJob extends Thread { - - /** The type of purge */ - public enum PURGE_JOB_TYPE { - PURGE_ALL, PURGE_EXPIRED - } - - private long startTime; - - /** The cluster task name to use for purge jobs */ - public static final String TASK_NAME = "Purge Plugin Data"; - - /** The plugin associated with this purge job */ - private String pluginName; - - /** The type of purge job being executed */ - private PURGE_JOB_TYPE purgeType; - - /** Last time job has printed a timed out message */ - private long lastTimeOutMessage = 0; - - /** - * Creates a new Purge job for the specified plugin. - * - * @param pluginName - * The plugin to be purged - * @param purgeType - * The type of purge to be executed - */ - public PurgeJob(String pluginName, PURGE_JOB_TYPE purgeType) { - // Give the thread a name - this.setName("Purge-" + pluginName.toUpperCase() + "-Thread"); - this.pluginName = pluginName; - this.purgeType = purgeType; - } - - public void run() { - - // Flag used to track if this job has failed - boolean failed = false; - startTime = System.currentTimeMillis(); - PurgeLogger.logInfo("Purging expired data...", pluginName); - PluginDao dao = null; - - try { - dao = PluginFactory.getInstance().getPluginDao(pluginName); - if (dao.getDaoClass() != null) { - dao.purgeExpiredData(); - PurgeLogger.logInfo("Data successfully Purged!", pluginName); - } else { - Method m = dao.getClass().getMethod("purgeExpiredData", - new Class[] {}); - if (m != null) { - if (m.getDeclaringClass().equals(PluginDao.class)) { - PurgeLogger - .logWarn( - "Unable to purge data. This plugin does not specify a record class and does not implement a custom purger.", - pluginName); - } else { - if (this.purgeType.equals(PURGE_JOB_TYPE.PURGE_EXPIRED)) { - dao.purgeExpiredData(); - } else { - dao.purgeAllData(); - } - PurgeLogger.logInfo("Data successfully Purged!", - pluginName); - } - } - } - } catch (Exception e) { - failed = true; - // keep getting next exceptions with sql exceptions to ensure - // we can see the underlying error - PurgeLogger - .logError("Error purging expired data!\n", pluginName, e); - Throwable t = e.getCause(); - while (t != null) { - if (t instanceof SQLException) { - SQLException se = ((SQLException) t).getNextException(); - PurgeLogger.logError("Next exception:", pluginName, se); - } - t = t.getCause(); - } - } finally { - ClusterTask purgeLock = PurgeManager.getInstance().getPurgeLock(); - try { - /* - * Update the status accordingly if the purge failed or - * succeeded - */ - PurgeDao purgeDao = new PurgeDao(); - PurgeJobStatus status = purgeDao - .getJobForPlugin(this.pluginName); - if (status == null) { - PurgeLogger.logError( - "Purge job completed but no status object found!", - this.pluginName); - } else { - if (failed) { - status.incrementFailedCount(); - if (status.getFailedCount() >= PurgeManager - .getInstance().getFatalFailureCount()) { - PurgeLogger - .logFatal( - "Purger for this plugin has reached or exceeded consecutive failure limit of " - + PurgeManager - .getInstance() - .getFatalFailureCount() - + ". Data will no longer being purged for this plugin.", - pluginName); - } else { - PurgeLogger.logError("Purge job has failed " - + status.getFailedCount() - + " consecutive times.", this.pluginName); - // Reset the start time so we can try again as soon - // as possible - status.setStartTime(new Date(0)); - } - } else { - status.setFailedCount(0); - } - - /* - * This purger thread has exceeded the time out duration but - * finally finished. Output a message and update the status - */ - int deadPurgeJobAge = PurgeManager.getInstance() - .getDeadPurgeJobAge(); - Calendar purgeTimeOutLimit = Calendar.getInstance(); - purgeTimeOutLimit.setTimeZone(TimeZone.getTimeZone("GMT")); - purgeTimeOutLimit.add(Calendar.MINUTE, -deadPurgeJobAge); - if (startTime < purgeTimeOutLimit.getTimeInMillis()) { - PurgeLogger - .logInfo( - "Purge job has recovered from timed out state!!", - pluginName); - } - status.setRunning(false); - purgeDao.update(status); - /* - * Log execution times - */ - long executionTime = getAge(); - long execTimeInMinutes = executionTime / 60000; - if (execTimeInMinutes > 0) { - PurgeLogger.logInfo("Purge run time: " + executionTime - + " ms (" + execTimeInMinutes + " minutes)", - this.pluginName); - } else { - PurgeLogger.logInfo("Purge run time: " + executionTime - + " ms", this.pluginName); - } - } - } catch (Throwable e) { - PurgeLogger - .logError( - "An unexpected error occurred upon completion of the purge job", - this.pluginName, e); - } finally { - ClusterLockUtils.unlock(purgeLock, false); - } - } - } - - public void printTimedOutMessage(int deadPurgeJobAge) { - // only print message every 5 minutes - if (System.currentTimeMillis() - lastTimeOutMessage > 300000) { - PurgeLogger.logFatal( - "Purger running time has exceeded timeout duration of " - + deadPurgeJobAge - + " minutes. Current running time: " - + (getAge() / 60000) + " minutes", pluginName); - printStackTrace(); - } - } - - /** - * Prints the stack trace for this job thread. - */ - public void printStackTrace() { - StringBuffer buffer = new StringBuffer(); - buffer.append("Stack trace for Purge Job Thread:\n"); - buffer.append(getStackTrace(this)); - // If this thread is blocked, output the stack traces for the other - // blocked threads to assist in determining the source of the - // deadlocked - // threads - if (this.getState().equals(State.BLOCKED)) { - buffer.append("\tDUMPING OTHER BLOCKED THREADS\n"); - buffer.append(getBlockedStackTraces()); - - } - PurgeLogger.logError(buffer.toString(), this.pluginName); - - } - - /** - * Gets the stack traces for all other threads in the BLOCKED state in the - * JVM - * - * @return The stack traces for all other threads in the BLOCKED state in - * the JVM - */ - private String getBlockedStackTraces() { - StringBuffer buffer = new StringBuffer(); - Map
+ * + * SOFTWARE HISTORY + * + * Date Ticket# Engineer Description + * ------------ ---------- ----------- -------------------------- + * Apr 19, 2012 #470 bphillip Initial creation + * + *+ * + * @author bphillip + * @version 1.0 + */ +public class PurgeJob extends Thread { + + /** The type of purge */ + public enum PURGE_JOB_TYPE { + PURGE_ALL, PURGE_EXPIRED + } + + private long startTime; + + /** The cluster task name to use for purge jobs */ + public static final String TASK_NAME = "Purge Plugin Data"; + + /** The plugin associated with this purge job */ + private String pluginName; + + /** The type of purge job being executed */ + private PURGE_JOB_TYPE purgeType; + + /** Last time job has printed a timed out message */ + private long lastTimeOutMessage = 0; + + /** + * Creates a new Purge job for the specified plugin. + * + * @param pluginName + * The plugin to be purged + * @param purgeType + * The type of purge to be executed + */ + public PurgeJob(String pluginName, PURGE_JOB_TYPE purgeType) { + // Give the thread a name + this.setName("Purge-" + pluginName.toUpperCase() + "-Thread"); + this.pluginName = pluginName; + this.purgeType = purgeType; + } + + public void run() { + + // Flag used to track if this job has failed + boolean failed = false; + startTime = System.currentTimeMillis(); + PurgeLogger.logInfo("Purging expired data...", pluginName); + PluginDao dao = null; + + try { + dao = PluginFactory.getInstance().getPluginDao(pluginName); + if (dao.getDaoClass() != null) { + dao.purgeExpiredData(); + PurgeLogger.logInfo("Data successfully Purged!", pluginName); + } else { + Method m = dao.getClass().getMethod("purgeExpiredData", + new Class[] {}); + if (m != null) { + if (m.getDeclaringClass().equals(PluginDao.class)) { + PurgeLogger + .logWarn( + "Unable to purge data. This plugin does not specify a record class and does not implement a custom purger.", + pluginName); + } else { + if (this.purgeType.equals(PURGE_JOB_TYPE.PURGE_EXPIRED)) { + dao.purgeExpiredData(); + } else { + dao.purgeAllData(); + } + PurgeLogger.logInfo("Data successfully Purged!", + pluginName); + } + } + } + } catch (Exception e) { + failed = true; + // keep getting next exceptions with sql exceptions to ensure + // we can see the underlying error + PurgeLogger + .logError("Error purging expired data!\n", pluginName, e); + Throwable t = e.getCause(); + while (t != null) { + if (t instanceof SQLException) { + SQLException se = ((SQLException) t).getNextException(); + PurgeLogger.logError("Next exception:", pluginName, se); + } + t = t.getCause(); + } + } finally { + ClusterTask purgeLock = PurgeManager.getInstance().getPurgeLock(); + try { + /* + * Update the status accordingly if the purge failed or + * succeeded + */ + PurgeDao purgeDao = new PurgeDao(); + PurgeJobStatus status = purgeDao + .getJobForPlugin(this.pluginName); + if (status == null) { + PurgeLogger.logError( + "Purge job completed but no status object found!", + this.pluginName); + } else { + if (failed) { + status.incrementFailedCount(); + if (status.getFailedCount() >= PurgeManager + .getInstance().getFatalFailureCount()) { + PurgeLogger + .logFatal( + "Purger for this plugin has reached or exceeded consecutive failure limit of " + + PurgeManager + .getInstance() + .getFatalFailureCount() + + ". Data will no longer being purged for this plugin.", + pluginName); + } else { + PurgeLogger.logError("Purge job has failed " + + status.getFailedCount() + + " consecutive times.", this.pluginName); + // Back the start time off by half an hour to try to + // purgin soon, don't want to start immediately so + // it doesn't ping pong between servers in a time + // out scenario + Date startTime = status.getStartTime(); + startTime.setTime(startTime.getTime() - (1800000)); + } + } else { + status.setFailedCount(0); + } + + /* + * This purger thread has exceeded the time out duration but + * finally finished. Output a message and update the status + */ + int deadPurgeJobAge = PurgeManager.getInstance() + .getDeadPurgeJobAge(); + Calendar purgeTimeOutLimit = Calendar.getInstance(); + purgeTimeOutLimit.setTimeZone(TimeZone.getTimeZone("GMT")); + purgeTimeOutLimit.add(Calendar.MINUTE, -deadPurgeJobAge); + if (startTime < purgeTimeOutLimit.getTimeInMillis()) { + PurgeLogger + .logInfo( + "Purge job has recovered from timed out state!!", + pluginName); + } + status.setRunning(false); + purgeDao.update(status); + /* + * Log execution times + */ + long executionTime = getAge(); + long execTimeInMinutes = executionTime / 60000; + if (execTimeInMinutes > 0) { + PurgeLogger.logInfo("Purge run time: " + executionTime + + " ms (" + execTimeInMinutes + " minutes)", + this.pluginName); + } else { + PurgeLogger.logInfo("Purge run time: " + executionTime + + " ms", this.pluginName); + } + } + } catch (Throwable e) { + PurgeLogger + .logError( + "An unexpected error occurred upon completion of the purge job", + this.pluginName, e); + } finally { + ClusterLockUtils.unlock(purgeLock, false); + } + } + } + + public void printTimedOutMessage(int deadPurgeJobAge) { + // only print message every 5 minutes + if (System.currentTimeMillis() - lastTimeOutMessage > 300000) { + PurgeLogger.logFatal( + "Purger running time has exceeded timeout duration of " + + deadPurgeJobAge + + " minutes. Current running time: " + + (getAge() / 60000) + " minutes", pluginName); + printStackTrace(); + } + } + + /** + * Prints the stack trace for this job thread. + */ + public void printStackTrace() { + StringBuffer buffer = new StringBuffer(); + buffer.append("Stack trace for Purge Job Thread:\n"); + buffer.append(getStackTrace(this)); + // If this thread is blocked, output the stack traces for the other + // blocked threads to assist in determining the source of the + // deadlocked + // threads + if (this.getState().equals(State.BLOCKED)) { + buffer.append("\tDUMPING OTHER BLOCKED THREADS\n"); + buffer.append(getBlockedStackTraces()); + + } + PurgeLogger.logError(buffer.toString(), this.pluginName); + + } + + /** + * Gets the stack traces for all other threads in the BLOCKED state in the + * JVM + * + * @return The stack traces for all other threads in the BLOCKED state in + * the JVM + */ + private String getBlockedStackTraces() { + StringBuffer buffer = new StringBuffer(); + Map
- * The purge manager is designed to adhere to the following rules: - *
- * · The cluster may have no more than 6 purge jobs running simultaneously by
- * default. This property is configurable in the project.properties file
- * · Any given server may have no more than 2 purge jobs running simultaneously
- * by default. This property is configurable in the project.properties file
- * · A purge job for a plugin is considered 'hung' if it has been running for
- * more than 20 minutes by default. This property is configurable in the
- * project.properties file
- * · If a purge job that was previously determined to be hung actually finishes
- * it's execution, the cluster lock is updated appropriately and the purge job
- * is able to resume normal operation. This is in place so if a hung purge
- * process goes unnoticed for a period of time, the server will still try to
- * recover autonomously if it can.
- * · If a purge job is determined to be hung, the stack trace for the thread
- * executing the job is output to the log. Furthermore, if the job is in the
- * BLOCKED state, the stack traces for all other BLOCKED threads is output to
- * the purge log as part of a rudimentary deadlock detection strategy to be used
- * by personnel attempting to remedy the situation.
- * · By default, a fatal condition occurs if a given plugin's purge job fails 3
- * consecutive times.
- * · If a purge job hangs on one server in the cluster, it will try and run on
- * another cluster member at the next purge interval.
- * · If the purge manager attempts to purge a plugin that has been running for
- * longer than the 20 minute threshold, it is considered a failure, and the
- * failure count is updated.
- *
- * - * - *
- * - * SOFTWARE HISTORY - * - * Date Ticket# Engineer Description - * ------------ ---------- ----------- -------------------------- - * Apr 18, 2012 #470 bphillip Initial creation - * - *- * - * @author bphillip - * @version 1.0 - */ -public class PurgeManager { - - /** Purge Manager task name */ - private static final String PURGE_TASK_NAME = "Purge Manager"; - - /** Purge Manager task details */ - private static final String PURGE_TASK_DETAILS = "Purge Manager Job"; - - /** Purge Manager task override timeout. Currently 2 minutes */ - private static final long PURGE_MANAGER_TIMEOUT = 120000; - - /** - * The cluster limit property to be set via Spring with the value defined in - * project.properties - */ - private int clusterLimit = 6; - - /** - * The server limit property to be set via Spring with the value defined in - * project.properties - */ - private int serverLimit = 2; - - /** - * The time in minutes at which a purge job is considered 'dead' or 'hung' - * set via Spring with the value defined in project.properties - */ - private int deadPurgeJobAge = 20; - - /** - * The frequency, in minutes, that a plugin may be purged set via Spring - * with the value defined in project.properties - */ - private int purgeFrequency = 60; - - /** - * How many times a purger is allowed to fail before it is considered fatal. - * Set via Spring with the value defined in project.properties - */ - private int fatalFailureCount = 3; - - /** - * The master switch defined in project.properties that enables and disables - * data purging - */ - private boolean purgeEnabled = true; - - /** Map of purge jobs */ - private Map
+ * The purge manager is designed to adhere to the following rules: + *
+ * · The cluster may have no more than 6 purge jobs running simultaneously by
+ * default. This property is configurable in the project.properties file
+ * · Any given server may have no more than 2 purge jobs running simultaneously
+ * by default. This property is configurable in the project.properties file
+ * · A purge job for a plugin is considered 'hung' if it has been running for
+ * more than 20 minutes by default. This property is configurable in the
+ * project.properties file
+ * · If a purge job that was previously determined to be hung actually finishes
+ * it's execution, the cluster lock is updated appropriately and the purge job
+ * is able to resume normal operation. This is in place so if a hung purge
+ * process goes unnoticed for a period of time, the server will still try to
+ * recover autonomously if it can.
+ * · If a purge job is determined to be hung, the stack trace for the thread
+ * executing the job is output to the log. Furthermore, if the job is in the
+ * BLOCKED state, the stack traces for all other BLOCKED threads is output to
+ * the purge log as part of a rudimentary deadlock detection strategy to be used
+ * by personnel attempting to remedy the situation.
+ * · By default, a fatal condition occurs if a given plugin's purge job fails 3
+ * consecutive times.
+ * · If a purge job hangs on one server in the cluster, it will try and run on
+ * another cluster member at the next purge interval.
+ * · If the purge manager attempts to purge a plugin that has been running for
+ * longer than the 20 minute threshold, it is considered a failure, and the
+ * failure count is updated.
+ *
+ * + * + *
+ * + * SOFTWARE HISTORY + * + * Date Ticket# Engineer Description + * ------------ ---------- ----------- -------------------------- + * Apr 18, 2012 #470 bphillip Initial creation + * + *+ * + * @author bphillip + * @version 1.0 + */ +public class PurgeManager { + + /** Purge Manager task name */ + private static final String PURGE_TASK_NAME = "Purge Manager"; + + /** Purge Manager task details */ + private static final String PURGE_TASK_DETAILS = "Purge Manager Job"; + + /** Purge Manager task override timeout. Currently 2 minutes */ + private static final long PURGE_MANAGER_TIMEOUT = 120000; + + /** + * The cluster limit property to be set via Spring with the value defined in + * project.properties + */ + private int clusterLimit = 6; + + /** + * The server limit property to be set via Spring with the value defined in + * project.properties + */ + private int serverLimit = 2; + + /** + * The time in minutes at which a purge job is considered 'dead' or 'hung' + * set via Spring with the value defined in project.properties + */ + private int deadPurgeJobAge = 20; + + /** + * The frequency, in minutes, that a plugin may be purged set via Spring + * with the value defined in project.properties + */ + private int purgeFrequency = 60; + + /** + * How many times a purger is allowed to fail before it is considered fatal. + * Set via Spring with the value defined in project.properties + */ + private int fatalFailureCount = 3; + + /** + * The master switch defined in project.properties that enables and disables + * data purging + */ + private boolean purgeEnabled = true; + + /** Map of purge jobs */ + private Map