cleanup unused and redundant directories on working branch

Former-commit-id: ca996a2a86
This commit is contained in:
mjames-upc 2015-12-30 14:02:46 -07:00
parent dc4b67b586
commit 87d626a78b
623 changed files with 0 additions and 78608 deletions

View file

@ -1,17 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>deltaScripts</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.python.pydev.PyDevBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.python.pydev.pythonNature</nature>
</natures>
</projectDescription>

View file

@ -1,7 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?eclipse-pydev version="1.0"?>
<pydev_project>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
</pydev_project>

View file

@ -1,79 +0,0 @@
#!/usr/bin/env python
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
#
# Update GFE HDF5 Group format to include minutes
#
#
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 11/18/10 njensen Initial Creation.
# 06/13/13 #2044 randerso Fixed to use correct python
# 05/08/14 #3142 dgilling Add better error-handling, logging.
#
#
#
import os
import logging
hdf5loc = "/awips2/edex/data/hdf5/gfe"
logging.basicConfig(format="%(asctime)s %(name)s:%(lineno)d %(levelname)s: %(message)s",
datefmt="%H:%M:%S",
level=logging.INFO)
log = logging.getLogger("CleanupGfeHDF5Storage")
def processDir(hdf5Dir):
# walk the directory tree removing *_GridParm.h5 files
# any directories that become empty after removing those files will also
# be deleted.
for root, dirs, files in os.walk(hdf5Dir, topdown=False):
for file in files:
if str(file).endswith("_GridParm.h5"):
fullPath = os.path.join(root, file)
log.info("Removing " + str(fullPath))
try:
os.remove(fullPath)
except OSError:
log.exception("Could not delete file " + str(fullPath))
for dir in dirs:
fullPath = os.path.join(root, dir)
try:
if not os.listdir(fullPath):
log.info("Removing " + str(fullPath))
try:
os.rmdir(fullPath)
except OSError:
log.exception("Could not delete path " + str(fullPath))
except OSError:
log.warning("Skipping directory " + str(fullPath), exc_info=True)
def main():
processDir(hdf5loc)
if __name__ == '__main__':
main()

View file

@ -1,11 +0,0 @@
#!/bin/bash
# run the update
/awips2/psql/bin/psql -U awips -d metadata -f CreateNewGfeTables.sql
if [ $? -ne 0 ]; then
echo "FATAL: the update has failed!"
exit 1
fi
echo "INFO: the update has completed successfully!"
exit 0

View file

@ -1,106 +0,0 @@
DROP TABLE IF EXISTS gfe_spatial;
-- Sequence: gfe_gridlocation_seq
-- DROP SEQUENCE gfe_gridlocation_seq;
CREATE SEQUENCE gfe_gridlocation_seq
INCREMENT 1
MINVALUE 1
MAXVALUE 9223372036854775807
START 1
CACHE 1;
ALTER TABLE gfe_gridlocation_seq
OWNER TO awips;
-- Table: gfe_gridlocation
-- DROP TABLE gfe_gridlocation;
CREATE TABLE gfe_gridlocation
(
id integer NOT NULL,
extent bytea NOT NULL,
nx integer NOT NULL,
ny integer NOT NULL,
origin bytea NOT NULL,
gridpointll bytea NOT NULL,
gridpointur bytea NOT NULL,
latintersect double precision NOT NULL,
latlonll bytea NOT NULL,
latlonorigin bytea NOT NULL,
latlonur bytea NOT NULL,
loncenter double precision NOT NULL,
lonorigin double precision NOT NULL,
projectionid character varying(32) NOT NULL,
projectiontype character varying(20) NOT NULL,
stdparallelone double precision NOT NULL,
stdparalleltwo double precision NOT NULL,
siteid character varying(8) NOT NULL,
timezone character varying(32) NOT NULL,
dbid_id integer NOT NULL,
CONSTRAINT gfe_gridlocation_pkey PRIMARY KEY (id),
CONSTRAINT fk22b8153412156549 FOREIGN KEY (dbid_id)
REFERENCES gfe_dbid (id) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE CASCADE,
CONSTRAINT gfe_gridlocation_dbid_id_key UNIQUE (dbid_id)
)
WITH (
OIDS=FALSE
);
ALTER TABLE gfe_gridlocation
OWNER TO awips;
-- Sequence: gfe_parminfo_seq
-- DROP SEQUENCE gfe_parminfo_seq;
CREATE SEQUENCE gfe_parminfo_seq
INCREMENT 1
MINVALUE 1
MAXVALUE 9223372036854775807
START 1
CACHE 1;
ALTER TABLE gfe_parminfo_seq
OWNER TO awips;
-- Table: gfe_parminfo
-- DROP TABLE gfe_parminfo;
CREATE TABLE gfe_parminfo
(
id integer NOT NULL,
datamultiplier real NOT NULL,
dataoffset real NOT NULL,
datatype character varying(8) NOT NULL,
descriptivename character varying(64) NOT NULL,
gridtype character varying(8) NOT NULL,
maxvalue real NOT NULL,
minvalue real NOT NULL,
"precision" integer NOT NULL,
rateparm boolean NOT NULL,
duration integer NOT NULL,
repeatinterval integer NOT NULL,
starttime integer NOT NULL,
valid boolean NOT NULL,
timeindependentparm boolean NOT NULL,
unitstring character varying(64) NOT NULL,
storagetype character varying(8) NOT NULL,
gridloc_id integer NOT NULL,
parmid_id integer NOT NULL,
CONSTRAINT gfe_parminfo_pkey PRIMARY KEY (id),
CONSTRAINT fk1871875338803a4d FOREIGN KEY (gridloc_id)
REFERENCES gfe_gridlocation (id) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE CASCADE,
CONSTRAINT fk187187537bab05cc FOREIGN KEY (parmid_id)
REFERENCES gfe_parmid (id) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE CASCADE,
CONSTRAINT gfe_parminfo_parmid_id_key UNIQUE (parmid_id)
)
WITH (
OIDS=FALSE
);
ALTER TABLE gfe_parminfo
OWNER TO awips;

View file

@ -1,11 +0,0 @@
#!/bin/bash
# run the update
/awips2/psql/bin/psql -U awips -d metadata -c "ALTER TABLE gfe_dbid ADD COLUMN removeddate timestamp without time zone;"
if [ $? -ne 0 ]; then
echo "FATAL: the update has failed!"
exit 1
fi
echo "INFO: the update has completed successfully!"
exit 0

View file

@ -1,135 +0,0 @@
#!/usr/bin/env python
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
#
# Update GFE HDF5 Group format to include minutes
#
#
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 11/18/10 njensen Initial Creation.
# 06/13/13 #2044 randerso Fixed to use correct python
#
#
#
import h5py
import os
import re
import subprocess
import traceback
hdf5loc = "/awips2/edex/data/hdf5/gfe"
# T::SFC::2013_07_04_06--2013_07_04_07
oldGroupFormat = re.compile("(.+::.+::)(\d{4})_(\d\d)_(\d\d)_(\d\d)--(\d{4})_(\d\d)_(\d\d)_(\d\d)")
# T::SFC::20130704_0600--20130704_0700
newGroupFormat = re.compile("(.+::.+::)(\d{4})(\d\d)(\d\d)_(\d\d)(\d\d)--(\d{4})(\d\d)(\d\d)_(\d\d)(\d\d)")
def updateFile(filePath):
print "Updating",filePath
h5File = h5py.File(filePath)
origGroups = h5File.keys()
for groupName in origGroups:
newMatch = newGroupFormat.match(groupName)
oldMatch = oldGroupFormat.match(groupName)
if newMatch:
#print "Found new format:", groupName
pass
elif oldMatch:
#print "Found old format:", groupName
(nameAndLevel, startYear, startMonth, startDay, startHour, endYear, endMonth, endDay, endHour)= oldMatch.groups()
newGroupName = nameAndLevel+startYear+startMonth+startDay+"_"+startHour+"00--"+endYear+endMonth+endDay+"_"+endHour+"00"
#print " New format:", newGroupName
# if new group already exists (partial conversion)
if newGroupName in origGroups:
newGroup = h5File[newGroupName]
# else create new group
else:
newGroup = h5File.create_group(newGroupName)
# move datasets from old group to new group
oldGroup = h5File[groupName]
dataSets = oldGroup.keys()
for dataSet in dataSets:
#print " Moving dataSet:",dataSet
newGroup[dataSet] = oldGroup[dataSet]
del oldGroup[dataSet]
# remove old group
del h5File[groupName]
else:
print "Unrecognized group found:",groupName
h5File.close()
def repack(dir):
files = os.listdir(dir)
for file in files:
filePath = os.path.join(dir, file)
if os.path.isfile(filePath) and \
str(filePath).endswith(".h5") and \
not str(filePath).endswith("_GridParm.h5"):
repackFilePath = filePath+".repack"
try:
subprocess.check_call(("/awips2/tools/bin/h5repack", filePath, repackFilePath))
except:
print "h5repack failed:", filePath
continue
try:
os.remove(filePath)
os.rename(repackFilePath, filePath)
except:
print "error renaming repacked file:", repackFilePath
continue
def processDir(dir):
singleton = False
for file in os.listdir(dir):
filePath = os.path.join(dir, file)
if os.path.isfile(filePath) and \
str(filePath).endswith(".h5"):
if str(filePath).endswith("_GridParm.h5"):
if (str(filePath).endswith("_00000000_0000_GridParm.h5")):
singleton = True
else:
updateFile(filePath)
elif os.path.isdir(filePath):
# skip the Topo and climo directories (climo is obsolete and should be removed)
if str(file) != 'Topo' and str(file) != 'climo':
processDir(filePath)
if singleton:
print "repacking singleton database:", dir
repack(dir)
def main():
processDir(hdf5loc)
if __name__ == '__main__':
main()

View file

@ -1,11 +0,0 @@
#!/bin/bash
# DR #1051 remove invalid bufrmos locations
PSQL="/awips2/psql/bin/psql"
${PSQL} -U awips -d metadata -c "DELETE FROM bufrmosmrf WHERE location_id IN (SELECT DISTINCT id FROM bufrmos_location WHERE latitude > 90 or latitude < -90);"
${PSQL} -U awips -d metadata -c "DELETE FROM bufrmoshpc WHERE location_id IN (SELECT DISTINCT id FROM bufrmos_location WHERE latitude > 90 or latitude < -90);"
${PSQL} -U awips -d metadata -c "DELETE FROM bufrmos_location WHERE latitude > 90 or latitude < -90;"
${PSQL} -U awips -d metadata -c "VACUUM FULL ANALYZE bufrmosmrf;"
${PSQL} -U awips -d metadata -c "VACUUM FULL ANALYZE bufrmoshpc;"
${PSQL} -U awips -d metadata -c "VACUUM FULL ANALYZE bufrmos_location;"

View file

@ -1,7 +0,0 @@
#!/bin/bash
# DR #1992 - this update script will drop the refHour and validTime columns
# from the bufrua column, refTime has the exact same value.
PSQL="/awips2/psql/bin/psql"
${PSQL} -U awips -d metadata -c "ALTER TABLE bufrua DROP COLUMN IF EXISTS validtime, DROP COLUMN IF EXISTS refhour;"

View file

@ -1,23 +0,0 @@
#!/bin/bash
SQL_SCRIPT="createDDSystemStatusTable.sql"
# ensure that the sql script is present
if [ ! -f ${SQL_SCRIPT} ]; then
echo "ERROR: the required sql script - ${SQL_SCRIPT} was not found."
echo "FATAL: the update has failed!"
exit 1
fi
echo "INFO: update started - creating ddsystemstatus table"
# run the update
/awips2/psql/bin/psql -U awips -d metadata -f ${SQL_SCRIPT}
if [ $? -ne 0 ]; then
echo "FATAL: the update has failed!"
exit 1
fi
echo "INFO: the update has completed successfully!"
exit 0

View file

@ -1,7 +0,0 @@
CREATE TABLE IF NOT EXISTS ddsystemstatus
(
name character varying(255) NOT NULL,
systemtype character varying(255) NOT NULL,
status character varying(255),
CONSTRAINT ddsystemstatus_pkey PRIMARY KEY (name, systemtype)
)

View file

@ -1,25 +0,0 @@
#!/bin/bash
# This script will move any non-base colormaps from cave_static to common_static.
#
# This update is required with 13.6.1.
#
# This update is only for edex servers which host the cave localization files
#
echo "INFO: Moving all colormaps to common_static."
IFS=$'\n'
commonFiles=`find /awips2/edex/data/utility/cave_static/*/*/colormaps/ -iname '*.cmap'`
for f in $commonFiles; do
newf=${f//cave_static/common_static}
if [ -e "$newf" ]; then
echo Cannot upgrade $f because $newf already exists
else
mkdir -p `dirname $newf`
mv "$f" "$newf"
fi
done
echo "INFO: The update finished successfully."
exit 0

View file

@ -1,174 +0,0 @@
#!/usr/bin/env python
# This script will update any saved displays which use older skewT displays to
# use Nsharp.
#
# This update only needs to be run if there are saved displays being stored
# outside of localization, for procedures saved in localization,
# updateSkewtProcedures.sh will automatically call this.
import sys
import xml.etree.ElementTree as ET
xsitype = '{http://www.w3.org/2001/XMLSchema-instance}type'
def upgradeBundle(bundleFile):
tree = ET.parse(bundleFile)
root = tree.getroot()
iterpath = 'bundles/bundle/displayList/displays'
if root.tag == 'bundle':
iterpath = 'displayList/displays'
for display in root.iterfind(iterpath):
if display.get(xsitype) == "skewtDisplay":
plugins = getPlugins(display)
nsharp = False
varheight = False
for plugin in plugins:
nsharp |= isNsharp(plugin)
varheight |= isHodoVarHeight(plugin)
if varheight and nsharp:
# This will cause the bundle to continue loading old sounding,
# this is not a big problem until that is deleted
print 'Cannot convert bundle with both var height hodo and nsharp'
elif varheight:
convertDisplayToHodoVarHeight(display)
elif nsharp:
convertDisplayToNsharp(display)
elif display.get(xsitype) == "d2DNSharpDisplay":
display.set(xsitype,'nsharpSkewTPaneDisplay')
descriptor = display.find('descriptor')
descriptor.set(xsitype,'nsharpSkewTPaneDescriptor')
tree.write(bundleFile)
def getPlugins(xmlDisplay):
plugins = set()
for resourceData in xmlDisplay.iterfind('descriptor/resource/resourceData'):
plugin = getPluginName(resourceData)
if plugin is not None:
plugins.add(plugin)
return plugins;
def getPluginName(resourceData):
if resourceData.get(xsitype) == 'gribSoundingSkewTResourceData':
return 'grib'
elif resourceData.get(xsitype) == 'skewTResourceData':
return getConstraintValue(resourceData, 'pluginName')
return None
def getConstraintValue(resourceData, key):
for mapping in resourceData.iterfind('metadataMap/mapping'):
if(mapping.get('key') == key):
return mapping.find('constraint').get('constraintValue')
return None
def isNsharp(plugin):
return plugin == 'grib' or plugin == 'bufrua' or\
plugin == 'goessounding' or plugin == 'poessounding' or\
plugin == 'acarssounding' or plugin == 'modelsounding'
def isHodoVarHeight(plugin):
return plugin == 'radar' or plugin == 'profiler'
def convertDisplayToNsharp(xmlDisplay):
xmlDisplay.set(xsitype,'nsharpSkewTPaneDisplay')
descriptor = xmlDisplay.find('descriptor')
descriptor.set(xsitype,'nsharpSkewTPaneDescriptor')
toRemove = []
for resource in descriptor.iterfind('resource'):
resourceData = resource.find('resourceData')
type = resourceData.get(xsitype)
if type == 'skewTBkgResourceData':
toRemove.append(resource)
elif type == 'gribSoundingSkewTResourceData':
convertResourceToNsharp(resource)
elif type == 'skewTResourceData':
convertResourceToNsharp(resource)
else:
print "Removing unrecognized resource of type: " + type
toRemove.append(resource)
for resource in toRemove:
descriptor.remove(resource)
def convertResourceToNsharp(xmlResource):
resourceData = xmlResource.find('resourceData')
plugin = getPluginName(resourceData)
if(plugin == 'grib'):
resourceData.set(xsitype,'gribNSharpResourceData')
resourceData.set('soundingType', getConstraintValue(resourceData, 'info.datasetId'))
resourceData.set('pointName', 'Point' + resourceData.get('point'))
resourceData.attrib.pop('point')
elif(plugin == 'bufrua'):
resourceData.set(xsitype,'bufruaNSharpResourceData')
resourceData.set('soundingType', 'BUFRUA')
elif(plugin == 'modelsounding'):
reportType = getConstraintValue(resourceData, 'reportType')
if reportType == 'ETA':
reportType = 'NAM'
resourceData.set('soundingType', reportType + 'SND')
resourceData.set(xsitype,'mdlSndNSharpResourceData')
elif(plugin == 'goessounding'):
resourceData.set('soundingType', 'GOES')
resourceData.set(xsitype,'goesSndNSharpResourceData')
elif(plugin == 'poessounding'):
resourceData.set('soundingType', 'POES')
resourceData.set(xsitype,'poesSndNSharpResourceData')
elif(plugin == 'acarssounding'):
resourceData.set('soundingType', 'MDCRS')
resourceData.set(xsitype,'acarsSndNSharpResourceData')
loadProperties = xmlResource.find('loadProperties')
if loadProperties is not None:
# since nsharp doesn't use any capabilities just drop them all.
capabilities = loadProperties.find('capabilities')
if capabilities is not None:
loadProperties.remove(capabilities)
def convertDisplayToHodoVarHeight(xmlDisplay):
xmlDisplay.set(xsitype,'varHeightRenderableDisplay')
xmlDisplay.set('tabTitle','Var vs height : Log 1050-150')
descriptor = xmlDisplay.find('descriptor')
descriptor.set(xsitype,'varHeightHodoDescriptor')
toRemove = []
for resource in descriptor.iterfind('resource'):
resourceData = resource.find('resourceData')
pluginName = getPluginName(resourceData)
type = resourceData.get(xsitype)
if type == 'skewTBkgResourceData':
toRemove.append(resource)
elif type == 'skewTResourceData':
resourceData.set(xsitype,'varHeightResourceData')
resourceData.set('parameter','Wind')
resourceData.set('parameterName','Wind')
if pluginName == 'radar':
ET.SubElement(resourceData, 'source').text = 'VWP'
else:
ET.SubElement(resourceData, 'source').text = pluginName
else:
print "Removing unrecognized resource of type: " + type
descriptor.remove(resource)
for resource in toRemove:
descriptor.remove(resource)
heightScale = ET.SubElement(descriptor, 'heightScale')
heightScale.set('unit','MILLIBARS')
heightScale.set('name','Log 1050-150')
heightScale.set('minVal','1050.0')
heightScale.set('maxVal','150.0')
heightScale.set('parameter','P')
heightScale.set('parameterUnit','hPa')
heightScale.set('scale','LOG')
heightScale.set('heightType','PRESSURE')
ET.SubElement(heightScale, 'labels').text = '1000,850,700,500,400,300,250,200,150'
gridGeometry = descriptor.find('gridGeometry')
gridGeometry.set('rangeX','0 999')
gridGeometry.set('rangeY','0 999')
gridGeometry.set('envelopeMinX','0.0')
gridGeometry.set('envelopeMaxX','1000.0')
gridGeometry.set('envelopeMinY','0.0')
gridGeometry.set('envelopeMaxY','1000.0')
if __name__ == '__main__':
for arg in sys.argv[1:]:
upgradeBundle(arg)

View file

@ -1,24 +0,0 @@
#!/bin/bash
# This script will update any D2D procedures files
# which use older skewT displays to use Nsharp.
IFS=$'\n'
files=`ls /awips2/edex/data/utility/cave_static/*/*/procedures/*.xml`
if [ $? -ne 0 ]; then
echo "No procedures found"
exit 1
fi
MY_DIR=`dirname $0`
for f in $files; do
grep 'skewtDisplay\|d2DNSharpDisplay' $f > /dev/null
if [ $? -eq 0 ]; then
echo Updating $f
python $MY_DIR/updateSkewtDisplays.py $f
fi
done
echo "INFO: the update has completed successfully!"
exit 0

View file

@ -1,158 +0,0 @@
-- create nwx.adminmessages table
DROP TABLE IF EXISTS nwx.adminmessages CASCADE;
CREATE TABLE nwx.adminmessages(
id SERIAL PRIMARY KEY,
productname varchar(60) NOT NULL,
producttablename varchar(30) NOT NULL,
producttype varchar(20) NOT NULL
);
-- create nwx.cmcam table
DROP TABLE IF EXISTS nwx.cmcam CASCADE;
CREATE TABLE nwx.cmcam(
id SERIAL PRIMARY KEY,
productid varchar(6) NOT NULL,
stnid varchar(8) NOT NULL,
stnname varchar(32) NOT NULL,
state varchar(2) NOT NULL,
country varchar(2) NOT NULL,
latitude double precision NOT NULL,
longitude double precision NOT NULL,
elevation int NOT NULL
);
-- create nwx.ncfam table
DROP TABLE IF EXISTS nwx.ncfam CASCADE;
CREATE TABLE nwx.ncfam(
id SERIAL PRIMARY KEY,
productid varchar(6) NOT NULL,
stnid varchar(8) NOT NULL,
stnname varchar(32) NOT NULL,
state varchar(2) NOT NULL,
country varchar(2) NOT NULL,
latitude double precision NOT NULL,
longitude double precision NOT NULL,
elevation int NOT NULL
);
-- create nwx.nesdisam table
DROP TABLE IF EXISTS nwx.nesdisam CASCADE;
CREATE TABLE nwx.nesdisam(
id SERIAL PRIMARY KEY,
productid varchar(6) NOT NULL,
stnid varchar(8) NOT NULL,
stnname varchar(32) NOT NULL,
state varchar(2) NOT NULL,
country varchar(2) NOT NULL,
latitude double precision NOT NULL,
longitude double precision NOT NULL,
elevation int NOT NULL
);
-- create nwx.nesdispm table
DROP TABLE IF EXISTS nwx.nesdispm CASCADE;
CREATE TABLE nwx.nesdispm(
id SERIAL PRIMARY KEY,
productid varchar(6) NOT NULL,
stnid varchar(8) NOT NULL,
stnname varchar(32) NOT NULL,
state varchar(2) NOT NULL,
country varchar(2) NOT NULL,
latitude double precision NOT NULL,
longitude double precision NOT NULL,
elevation int NOT NULL
);
-- create nwx.snwstgam table
DROP TABLE IF EXISTS nwx.nwstgam CASCADE;
CREATE TABLE nwx.nwstgam(
id SERIAL PRIMARY KEY,
productid varchar(6) NOT NULL,
stnid varchar(8) NOT NULL,
stnname varchar(32) NOT NULL,
state varchar(2) NOT NULL,
country varchar(2) NOT NULL,
latitude double precision NOT NULL,
longitude double precision NOT NULL,
elevation int NOT NULL
);
-- Drop nwx.sdm table
DROP TABLE IF EXISTS nwx.sdm CASCADE;
-- create nwx.sdmam table
DROP TABLE IF EXISTS nwx.sdmam CASCADE;
CREATE TABLE nwx.sdmam(
id SERIAL PRIMARY KEY,
productid varchar(6) NOT NULL,
stnid varchar(8) NOT NULL,
stnname varchar(32) NOT NULL,
state varchar(2) NOT NULL,
country varchar(2) NOT NULL,
latitude double precision NOT NULL,
longitude double precision NOT NULL,
elevation int NOT NULL
);
-- create nwx.sdmim table
DROP TABLE IF EXISTS nwx.sdmim CASCADE;
CREATE TABLE nwx.sdmim(
id SERIAL PRIMARY KEY,
productid varchar(6) NOT NULL,
stnid varchar(8) NOT NULL,
stnname varchar(32) NOT NULL,
state varchar(2) NOT NULL,
country varchar(2) NOT NULL,
latitude double precision NOT NULL,
longitude double precision NOT NULL,
elevation int NOT NULL
);
-- create nwx.sdmdhm table
DROP TABLE IF EXISTS nwx.sdmdhm CASCADE;
CREATE TABLE nwx.sdmdhm(
id SERIAL PRIMARY KEY,
productid varchar(6) NOT NULL,
stnid varchar(8) NOT NULL,
stnname varchar(32) NOT NULL,
state varchar(2) NOT NULL,
country varchar(2) NOT NULL,
latitude double precision NOT NULL,
longitude double precision NOT NULL,
elevation int NOT NULL
);
INSERT INTO nwx.datatypegrouplist (datatypegroupname,datatypegrouptablename) values ('Admin Messages','nwx.adminmessages');
INSERT INTO nwx.adminmessages(productname,producttablename,producttype) values ('SDM Administrative Messages','nwx.sdmam','sdmam');
INSERT INTO nwx.sdmam (productid,stnid,stnname,state,country,latitude,longitude,elevation) values ('NOUS42','KWNO','NMC','MD','US',38.82,-76.87,86);
INSERT INTO nwx.adminmessages(productname,producttablename,producttype) values ('SDM International Messages','nwx.sdmim','sdmim');
INSERT INTO nwx.sdmim (productid,stnid,stnname,state,country,latitude,longitude,elevation) values ('NPXX10','KWNO','NMC','MD','US',38.82,-76.87,86);
INSERT INTO nwx.adminmessages(productname,producttablename,producttype) values ('SDM DHS Hazards Messages','nwx.sdmdhm','sdmdhm');
INSERT INTO nwx.sdmdhm (productid,stnid,stnname,state,country,latitude,longitude,elevation) values ('NOUS71','KWNO','NMC','MD','US',38.82,-76.87,86);
INSERT INTO nwx.adminmessages(productname,producttablename,producttype) values ('CMC Administrative Messages','nwx.cmcam','cmcam');
INSERT INTO nwx.cmcam (productid,stnid,stnname,state,country,latitude,longitude,elevation) values ('NOCN05', 'CWAO','MONTREAL_VAAC','CN','CN',45.47,-73.75,-9999);
INSERT INTO nwx.cmcam (productid,stnid,stnname,state,country,latitude,longitude,elevation) values ('AACN01', 'CWAO','MONTREAL_VAAC','CN','CN',45.47,-73.75,-9999);
INSERT INTO nwx.adminmessages(productname,producttablename,producttype) values ('NWSTG Administrative Messages','nwx.nwstgam','nwstgam');
INSERT INTO nwx.nwstgam (productid,stnid,stnname,state,country,latitude,longitude,elevation) values ('NOXX01', 'KWBC','NMC','MD','US',38.82,-76.87,86);
INSERT INTO nwx.adminmessages(productname,producttablename,producttype) values ('NCF Administrative Messages','nwx.ncfam','ncfam');
INSERT INTO nwx.ncfam (productid,stnid,stnname,state,country,latitude,longitude,elevation) values ('NOUS72', 'KNCF','NMC','MD','US',38.82,-76.87,86);
INSERT INTO nwx.adminmessages(productname,producttablename,producttype) values ('NESDIS Product Anomaly Messages','nwx.nesdispm','nesdispm');
INSERT INTO nwx.nesdispm (productid,stnid,stnname,state,country,latitude,longitude,elevation) values ('NOUS71', 'KNES','NESDIS','MD','US',38.82,-76.87,86);
INSERT INTO nwx.adminmessages(productname,producttablename,producttype) values ('NESDIS Administrative Messages','nwx.nesdisam','nesdisam');
INSERT INTO nwx.nesdisam (productid,stnid,stnname,state,country,latitude,longitude,elevation) values ('NOUS72', 'KNES','NESDIS','MD','US',38.82,-76.87,86);
DELETE from nwx.hpcproducts where productname='SDM Messages';
DELETE from nwx.hpcproducts where productname='International Messages';
DROP TABLE nwx.sdm;
DROP TABLE nwx.intl;
\connect metadata awips
INSERT INTO awips.nctext_inputfile_type VALUES (181,'sdmdhm','W');
INSERT INTO awips.nctext_inputfile_type VALUES (182,'cmcam','W');
INSERT INTO awips.nctext_inputfile_type VALUES (183,'nwstgam','W');
INSERT INTO awips.nctext_inputfile_type VALUES (184,'ncfam','W');
INSERT INTO awips.nctext_inputfile_type VALUES (185,'nesdispm','W');
INSERT INTO awips.nctext_inputfile_type VALUES (186,'nesdisam','W');
INSERT INTO awips.nctext_inputfile_type VALUES (185,'sdmam','B');
INSERT INTO awips.nctext_inputfile_type VALUES (186,'sdmim','W');

View file

@ -1,5 +0,0 @@
#!/bin/bash
PSQL=/awips2/psql/bin/psql
DIR=`dirname $0`
${PSQL} -U awips -d ncep -f ${DIR}/addNcepNwxAdminMessageGpTable.sql

View file

@ -1,22 +0,0 @@
#!/bin/bash
# This script will delete the Ingest.Grib queue from qpid.
# qpid must be running when this script is executed.
#
# This script will also remove the large grib file lock
#
# This update is required with 14.2.1.
#
PSQL="/awips2/psql/bin/psql"
echo "INFO: Deleting Ingest.Grib queue."
curl -X DELETE http://cp1f:8180/rest/queue/edex/Ingest.Grib > /dev/null
echo "INFO: Deleting GribIngestLargeFile cluster locks."
${PSQL} -U awips -d metadata -c "delete from cluster_task where name = 'GribIngestLargeFile';"
echo "INFO: The update was applied successfully."
exit 0

View file

@ -1,36 +0,0 @@
#!/bin/bash
# DR #2316,2317 replace airep and pirep with ncairep and ncpirep
PSQL="/awips2/psql/bin/psql"
SQL_COMMAND="
delete from plugin_info where name in ('ncpirep','ncairep');
drop table pirep, pirep_anc_data, ncpirep_anc_data, airep;
alter table ncpirep rename to pirep;
alter table ncairep rename to airep;
update pirep set datauri = replace(datauri, 'ncpirep', 'pirep');
update airep set datauri = replace(datauri, 'ncairep', 'airep');
"
${PSQL} -U awips -d metadata -c "${SQL_COMMAND}"
if [ -d "/awips2/edex/data/hdf5/ncpirep" ]
then
mv /awips2/edex/data/hdf5/ncpirep /awips2/edex/data/hdf5/pirep
files=`ls /awips2/edex/data/hdf5/pirep/ncpirep*.h5`
for file in $files; do
newfile=${file/ncpirep/pirep}
mv $file $newfile
done
fi
if [ -d "/awips2/edex/data/hdf5/ncairep" ]
then
mv /awips2/edex/data/hdf5/ncairep /awips2/edex/data/hdf5/airep
files=`ls /awips2/edex/data/hdf5/airep/ncairep*.h5`
for file in $files; do
newfile=${file/ncairep/airep}
mv $file $newfile
done
fi

View file

@ -1,5 +0,0 @@
#!/bin/bash
PSQL=/awips2/psql/bin/psql
DIR=`dirname $0`
${PSQL} -U awips -d metadata -f ${DIR}/registryBandwidth.sql

View file

@ -1,154 +0,0 @@
#!/usr/bin/env python
from subprocess import Popen, PIPE
import sys
import h5py
import re
import os
postgresCmd = "psql -U awips -d metadata -t -q -A "
hdf5loc = os.sep + "awips2" + os.sep + "edex" + os.sep + "data" + os.sep + "hdf5" + os.sep
postgres_dataURISeparator = '/'
hdf5_dataURISeparator = '::'
ID_ID = 'id'
DATAURI_ID = 'datauri'
REFTIME_ID = 'to_char(reftime, \'YYYY-MM-DD-HH24\') as reftime'
def printUsage():
print "usage: <plugin> <index1> <findAt_Index1> <replaceWith_Index1> (1-n times) <optional hdf5 file path relative to plugin name>"
sys.exit()
def executePostgresSQL(sql):
result = Popen(postgresCmd + "-c \"" + sql + "\"", stdout=PIPE, shell=True)
retVal = []
for line in result.stdout:
retVal.append(line.strip().split("|"))
return retVal
def executePostgresSQLFile(file):
result = Popen(postgresCmd + "-f \"" + file + "\"", stdout=PIPE, shell=True)
retVal = []
for line in result.stdout:
retVal.append(line.strip().split("|"))
return retVal
def processReplacements(plugin, replacements, hdf5Path):
columns = [ID_ID, DATAURI_ID]
hdf5Columns = []
if hdf5Path is not None:
columns.append(REFTIME_ID)
regex = re.compile("\[([\w]+)\]")
for column in regex.findall(hdf5Path):
hdf5Columns.append(column)
if column not in columns:
columns.append(column)
sql = "SELECT " + columns[0] + ", " + columns[1]
for i in range(2, len(columns)):
sql = sql + ", " + columns[i]
sql = sql + " FROM " + plugin
results = executePostgresSQL(sql)
toUpdate = []
id_idx = columns.index(ID_ID)
uri_idx = columns.index(DATAURI_ID)
reftime_idx = columns.index(REFTIME_ID)
for result in results:
uri = result[uri_idx]
parts = uri.split(postgres_dataURISeparator)
update = False
for replacement in replacements:
idx = replacement[0] + 1;
find = replacement[1]
replace = replacement[2]
if parts[idx].find(find) != -1:
parts[idx] = parts[idx].replace(find, replace)
update = True
if update:
uri = ""
for i in range(1, len(parts)):
uri = uri + postgres_dataURISeparator + parts[i]
result.append(uri) # Append new uri to results
toUpdate.append(result)
if len(toUpdate) > 0:
hdf5_file_mapping = {}
pathIndexes = []
for hdf5PathColumn in hdf5Columns:
pathIndexes.append(columns.index(hdf5PathColumn))
updateFileName = os.sep + "tmp" + os.sep + plugin + ".uri_update_sql"
update_file = open(updateFileName, "w")
for update in toUpdate:
# Write UPDATE statement to sql file
id = update[id_idx]
new_uri = update[len(update)-1] # Last entry is updated uri
update_file.write("UPDATE " + plugin + " SET " + DATAURI_ID
+ "='" + new_uri + "' WHERE " + ID_ID + "="
+ id + ";\n")
if hdf5Path is not None:
path = plugin + os.sep
for pathIndex in pathIndexes:
path = path + update[pathIndex] + os.sep
path = path + plugin + "-" + update[reftime_idx] + ".h5"
file_updates = hdf5_file_mapping.get(path, None)
if file_updates is None:
file_updates = []
hdf5_file_mapping[path] = file_updates
file_updates.append(update)
update_file.close()
# Execute and delete temporary file
executePostgresSQLFile(updateFileName)
os.remove(updateFileName)
# Create hdf5 links from new uri to old uri
for hdf5File in hdf5_file_mapping.keys():
absolutePath = hdf5loc + hdf5File
if os.path.exists(absolutePath) == True:
h5pyFile = h5py.File(absolutePath)
for entry in hdf5_file_mapping[hdf5File]:
old_uri = entry[uri_idx].replace(postgres_dataURISeparator, hdf5_dataURISeparator)[2:]
new_uri = entry[len(entry)-1].replace(postgres_dataURISeparator, hdf5_dataURISeparator)[2:]
hasOldUri = old_uri in h5pyFile
hasNewUri = new_uri in h5pyFile
if hasOldUri and not hasNewUri:
h5pyFile[new_uri] = h5pyFile[old_uri]
else:
print "Skipping linking", old_uri, "to", new_uri + ".", hasOldUri, hasNewUri
else:
print "Skipping non-existing file:", absolutePath
if __name__ == '__main__':
numArgs = len(sys.argv)
if numArgs < 5:
printUsage()
pluginName = sys.argv[1]
replacements = []
inc = 3
for i in range(2, numArgs, inc):
if (i + inc) <= numArgs:
replacements.append((int(sys.argv[i]), sys.argv[i + 1], sys.argv[i + 2]))
if len(replacements) == 0:
printUsage()
replacementArgs = len(replacements) * inc
hdf5Path = None
if (2 + replacementArgs) < numArgs:
hdf5Path = sys.argv[numArgs - 1]
processReplacements(pluginName, replacements, hdf5Path)

View file

@ -1,33 +0,0 @@
#!/bin/bash
# This script updates all tables in A2 that should contain a space or / but instead have an underscore.
# uri_update.py takes the name of the plugin to update then 1-n sequences of uri index to check,
# character to look for at that index and character to replace it with. Optional last argument is
# an hdf5 path layout for modifying the datauris in hdf5. If no need to modify hdf5, this can be
# left blank. This was done in support of Redmine DR 2333
DIR=`dirname $0`
# acars will replace _ with empty string to remove extra chars
python $DIR/uri_update.py acars 2 '_' ''
# acars is special as it also needs to update the column that has extra spaces in it
PSQL="/awips2/psql/bin/psql"
${PSQL} -U awips -d metadata -c "UPDATE acars SET tailnumber = replace(tailnumber, ' ', '')"
# bufrua needs to replace _ with space
python $DIR/uri_update.py bufrua 4 '_' ' '
# intlsigmet needs to replace _ with space
python $DIR/uri_update.py intlsigmet 3 '_' ' '
# satellite needs to replace _ with space at index 4 and 5
python $DIR/uri_update.py satellite 4 '_' ' ' 5 '_' ' ' '[sectorid]/[physicalelement]/'
# svrwx needs to replace _ with encoded %2F as the field actually contains a '/' in it
python $DIR/uri_update.py svrwx 3 '_' '%2F'
# vaa needs to rplace _ with space at index 2 and _ with encoded '/' at index 6
python $DIR/uri_update.py vaa 2 '_' ' ' 6 '_' '%2F'

View file

@ -1,28 +0,0 @@
#!/bin/bash
# DR #2473 drops all deprecated grib data from the database
PSQL="/awips2/psql/bin/psql"
SQL_COMMAND="
delete from plugin_info where name = 'grib';
delete from plugin_info where name = 'ncgrib';
drop table if exists grib, grib_models;
drop table if exists ncgrib, ncgrib1_parameter_map, ncgrib_genprocess, ncgrib_models, ncgrib_parameters, ncgrib_surfaces, ncgridcoverage;
drop sequence if exists gribseq;
drop sequence if exists ncgribseq;
"
echo "INFO: Dropping grib tables."
${PSQL} -U awips -d metadata -c "${SQL_COMMAND}"
echo "INFO: Removing grib site localization files"
find /awips2/edex/data/utility/edex_static/site/ -iname 'gribPathkeys.xml' -exec rm -v {} \;
find /awips2/edex/data/utility/edex_static/site -iname 'gribPurgeRules.xml' -exec rm -v {} \;
rm -rv /awips2/edex/data/utility/common_static/site/*/grib/
echo "INFO: The update finished successfully."
exit 0

View file

@ -1,42 +0,0 @@
#!/bin/bash
# DR #2581 - this update script will drop the dataURI column from the lsr table
PSQL="/awips2/psql/bin/psql"
# takes one arg: a table name
# drops the datauri constraint and column if they exist
function dropDatauri {
echo "INFO: Dropping DataURI column from $1"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 DROP CONSTRAINT IF EXISTS ${1}_datauri_key;"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 DROP COLUMN IF EXISTS datauri;"
if [ $? -ne 0 ]; then
echo "ERROR: Failed to drop dataURI column for $table"
echo "FATAL: The update has failed."
exit 1
fi
}
# takes three args: table, constraint name, unique columns
# will first drop the constraint if it exists and then adds it back, this is
# fairly inefficient if it does exist but operationally it won't exist and for
# testing this allows the script to be run easily as a noop.
function dropDatauriAndAddConstraint {
dropDatauri $1
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 DROP CONSTRAINT IF EXISTS $2;"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 ADD CONSTRAINT $2 UNIQUE $3;"
if [ $? -ne 0 ]; then
echo "ERROR: Failed to add new unique constraint for $table"
echo "FATAL: The update has failed."
exit 1
fi
${PSQL} -U awips -d metadata -c "VACUUM FULL ANALYZE $1"
}
echo "INFO: Dropping dataURI columns."
dropDatauriAndAddConstraint lsr lsr_latitude_longitude_officeId_reftime_forecasttime_eventtype_key "(latitude, longitude, officeId, reftime, forecasttime, eventtype)"
echo "INFO: LSR dataURI column dropped successfully"

View file

@ -1,11 +0,0 @@
#!/bin/bash
# DR #2493 remove mesowest from the database
PSQL="/awips2/psql/bin/psql"
SQL_COMMAND="
delete from plugin_info where name = 'mesowest';
drop table if exists mesowest;
drop sequence if exists mesowestseq;
"
${PSQL} -U awips -d metadata -c "${SQL_COMMAND}"

View file

@ -1,11 +0,0 @@
#!/bin/bash
# DR #2485 remove ncccfp from the database
PSQL="/awips2/psql/bin/psql"
SQL_COMMAND="
delete from plugin_info where name = 'ncccfp';
drop table if exists ncccfp;
drop sequence if exists ncccfpseq;
"
${PSQL} -U awips -d metadata -c "${SQL_COMMAND}"

View file

@ -1,11 +0,0 @@
#!/bin/bash
# DR #2505 Disable deployment of recco plugin
PSQL="/awips2/psql/bin/psql"
SQL_COMMAND="
delete from plugin_info where name = 'recco';
drop table if exists recco;
drop sequence if exists reccoseq;
"
${PSQL} -U awips -d metadata -c "${SQL_COMMAND}"

View file

@ -1,53 +0,0 @@
#!/bin/bash
# DR #2582 - this update script will drop the dataURI column from the vaa table
PSQL="/awips2/psql/bin/psql"
# takes one arg: a table name
# drops the datauri constraint and column if they exist
function dropDatauri {
echo "INFO: Dropping DataURI column from $1"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 DROP CONSTRAINT IF EXISTS ${1}_datauri_key;"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 DROP COLUMN IF EXISTS datauri;"
if [ $? -ne 0 ]; then
echo "ERROR: Failed to drop dataURI column for $table"
echo "FATAL: The update has failed."
exit 1
fi
}
function dropRecordType {
echo "INFO: Dropping recordType column from $1"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 DROP COLUMN IF EXISTS recordtype;"
if [ $? -ne 0 ]; then
echo "ERROR: Failed to drop recordType column for $table"
echo "FATAL: The update has failed."
exit 1
fi
}
# takes three args: table, constraint name, unique columns
# will first drop the constraint if it exists and then adds it back, this is
# fairly inefficient if it does exist but operationally it won't exist and for
# testing this allows the script to be run easily as a noop.
function dropDatauriAndAddConstraint {
dropDatauri $1
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 DROP CONSTRAINT IF EXISTS $2;"
dropRecordType $1
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 ADD CONSTRAINT $2 UNIQUE $3;"
if [ $? -ne 0 ]; then
echo "ERROR: Failed to add new unique constraint for $table"
echo "FATAL: The update has failed."
exit 1
fi
${PSQL} -U awips -d metadata -c "VACUUM FULL ANALYZE $1"
}
echo "INFO: Dropping dataURI columns."
dropDatauriAndAddConstraint vaa vaa_latitude_longitude_stationId_reftime_forecasttime_advisoryNumber_key "(latitude, longitude, stationId, reftime, forecasttime, advisoryNumber)"
echo "INFO: VAA dataURI column dropped successfully"

View file

@ -1,7 +0,0 @@
#!/bin/bash
# DR #2537 - this update script will drop the fcstseconds and timeobs columns
# from the modelsounding table, refTime and forecasttime have the exact same values.
PSQL="/awips2/psql/bin/psql"
${PSQL} -U awips -d metadata -c "ALTER TABLE modelsounding DROP COLUMN IF EXISTS fcstseconds, DROP COLUMN IF EXISTS timeobs;"

View file

@ -1,25 +0,0 @@
#!/bin/bash
# This script will move any non-base style rules from cave_static to common_static.
#
# This update is required with 14.2.1.
#
# This update is only for edex servers which host the cave localization files
#
echo "INFO: Moving all style rules to common_static."
IFS=$'\n'
commonFiles=`find /awips2/edex/data/utility/cave_static/*/*/styleRules/ -iname '*.xml'`
for f in $commonFiles; do
newf=${f//cave_static/common_static}
if [ -e "$newf" ]; then
echo Cannot upgrade $f because $newf already exists
else
mkdir -p `dirname $newf`
mv "$f" "$newf"
fi
done
echo "INFO: The update finished successfully."
exit 0

View file

@ -1,11 +0,0 @@
CREATE TABLE datadeliveryregistrybandwidth
(
timeperiod integer NOT NULL,
bytes integer NOT NULL,
CONSTRAINT datadeliveryregistrybandwidth_pkey PRIMARY KEY (timeperiod)
)
WITH (
OIDS=FALSE
);
ALTER TABLE datadeliveryregistrybandwidth
OWNER TO awips;

View file

@ -1,5 +0,0 @@
#!/bin/bash
PSQL=/awips2/psql/bin/psql
DIR=`dirname $0`
${PSQL} -U awips -d metadata -f ${DIR}/updateQueriesSql.sql

View file

@ -1,66 +0,0 @@
CREATE FUNCTION taxonomyelementtype_classificationnode_update() RETURNS void AS $$
DECLARE
t bool;
BEGIN
SELECT EXISTS(
SELECT * FROM information_schema.tables
WHERE
table_schema = 'ebxml' AND
table_name = 'taxonomyelementtype_classificationnode'
) into t;
IF
t ='t'
THEN
delete from ebxml.taxonomyelementtype_classificationnode where classificationnode_id='urn:oasis:names:tc:ebxml-regrep:QueryLanguage:SPARQL';
delete from ebxml.taxonomyelementtype_classificationnode where classificationnode_id='urn:oasis:names:tc:ebxml-regrep:QueryLanguage:SQL-92';
delete from ebxml.taxonomyelementtype_classificationnode where classificationnode_id='urn:oasis:names:tc:ebxml-regrep:QueryLanguage:XQuery';
delete from ebxml.taxonomyelementtype_classificationnode where classificationnode_id='urn:oasis:names:tc:ebxml-regrep:QueryLanguage:EJBQL';
delete from ebxml.taxonomyelementtype_classificationnode where classificationnode_id='urn:oasis:names:tc:ebxml-regrep:query:ExportObject';
delete from ebxml.taxonomyelementtype_classificationnode where classificationnode_id='urn:oasis:names:tc:ebxml-regrep:query:FindAllMyObjects';
delete from ebxml.taxonomyelementtype_classificationnode where classificationnode_id='urn:oasis:names:tc:ebxml-regrep:query:ExtrinsicObjectQuery';
INSERT INTO ebxml.taxonomyelementtype_classificationnode(taxonomyelementtype_id,classificationnode_id)
VALUES('urn:oasis:names:tc:ebxml-regrep:classificationScheme:QueryLanguage','urn:oasis:names:tc:ebxml-regrep:QueryLanguage:HQL');
RAISE NOTICE 'updated ebxml.taxonomyelementtype_classificationnode table, success!';
ELSE
RAISE NOTICE 'Table ebxml.taxonomyelementtype_classificationnode does not exist, skipping!';
END IF;
END;
$$ LANGUAGE plpgsql;
CREATE FUNCTION classificationnode_update() RETURNS void AS $$
DECLARE
t bool;
BEGIN
SELECT EXISTS(
SELECT * FROM information_schema.tables
WHERE
table_schema = 'ebxml' AND
table_name = 'classificationnode'
) into t;
IF
t ='t'
THEN
delete from where id= 'urn:oasis:names:tc:ebxml-regrep:QueryLanguage:SPARQL';
delete from ebxml.classificationnode where id= 'urn:oasis:names:tc:ebxml-regrep:QueryLanguage:SQL-92';
delete from ebxml.classificationnode where id= 'urn:oasis:names:tc:ebxml-regrep:QueryLanguage:XQuery';
delete from ebxml.classificationnode where id= 'urn:oasis:names:tc:ebxml-regrep:QueryLanguage:EJBQL';
delete from ebxml.classificationnode where id= 'urn:oasis:names:tc:ebxml-regrep:query:ExportObject';
delete from ebxml.classificationnode where id= 'urn:oasis:names:tc:ebxml-regrep:query:FindAllMyObjects';
delete from ebxml.classificationnode where id= 'urn:oasis:names:tc:ebxml-regrep:query:ExtrinsicObjectQuery';
INSERT INTO ebxml.classificationnode (id,lid,objecttype,owner,versionname,code,parent,path)
VALUES ('urn:oasis:names:tc:ebxml-regrep:QueryLanguage:HQL','urn:oasis:names:tc:ebxml-regrep:QueryLanguage:HQL',
'urn:oasis:names:tc:ebxml-regrep:ObjectType:RegistryObject:ClassificationNode','NCF','1','HQL',
'urn:oasis:names:tc:ebxml-regrep:classificationScheme:QueryLanguage','/urn:oasis:names:tc:ebxml-regrep:classificationScheme:QueryLanguage/HQL');
RAISE NOTICE 'updated ebxml.classificationnode table, success!';
ELSE
RAISE NOTICE 'Table ebxml.classificationnode does not exist, skipping!';
END IF;
END;
$$ LANGUAGE plpgsql;
select taxonomyelementtype_classificationnode_update();
select classificationnode_update();
DROP FUNCTION taxonomyelementtype_classificationnode_update();
DROP FUNCTION classificationnode_update();

View file

@ -1,7 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<classpath>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.6"/>
<classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
<classpathentry kind="src" path="src"/>
<classpathentry kind="output" path="bin"/>
</classpath>

View file

@ -1,28 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>com.raytheon.uf.edex.upgrade.satellitespatial</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.pde.ManifestBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.pde.SchemaBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.eclipse.pde.PluginNature</nature>
<nature>org.eclipse.jdt.core.javanature</nature>
</natures>
</projectDescription>

View file

@ -1,7 +0,0 @@
eclipse.preferences.version=1
org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6
org.eclipse.jdt.core.compiler.compliance=1.6
org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
org.eclipse.jdt.core.compiler.source=1.6

View file

@ -1,10 +0,0 @@
Manifest-Version: 1.0
Bundle-ManifestVersion: 2
Bundle-Name: Satellite Spatial Upgrade
Bundle-SymbolicName: com.raytheon.uf.edex.upgrade.satellitespatial
Bundle-Version: 1.0.0.qualifier
Bundle-Vendor: RAYTHEON
Bundle-RequiredExecutionEnvironment: JavaSE-1.6
Require-Bundle: org.postgres;bundle-version="9.2.0",
com.raytheon.uf.common.geospatial;bundle-version="1.12.1174",
com.raytheon.uf.common.serialization;bundle-version="1.12.1174"

View file

@ -1,4 +0,0 @@
source.. = src/
output.. = bin/
bin.includes = META-INF/,\
.

View file

@ -1,281 +0,0 @@
/**
* This software was developed and / or modified by Raytheon Company,
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
*
* U.S. EXPORT CONTROLLED TECHNICAL DATA
* This software product contains export-restricted data whose
* export/transfer/disclosure is restricted by U.S. law. Dissemination
* to non-U.S. persons whether in the United States or abroad requires
* an export license or other authorization.
*
* Contractor Name: Raytheon Company
* Contractor Address: 6825 Pine Street, Suite 340
* Mail Stop B8
* Omaha, NE 68106
* 402.291.0100
*
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
* further licensing information.
**/
package com.raytheon.uf.edex.upgrade.satellitespatial;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.geotools.coverage.grid.GridEnvelope2D;
import org.geotools.coverage.grid.GridGeometry2D;
import org.geotools.geometry.Envelope2D;
import org.geotools.referencing.CRS;
import org.geotools.referencing.crs.DefaultGeographicCRS;
import org.opengis.referencing.crs.CoordinateReferenceSystem;
import com.raytheon.uf.common.geospatial.ISpatialObject;
import com.raytheon.uf.common.geospatial.MapUtil;
import com.raytheon.uf.common.geospatial.util.EnvelopeIntersection;
import com.vividsolutions.jts.geom.Geometry;
import com.vividsolutions.jts.io.WKBReader;
/**
* Java application to update the satellite spatial table. Converts old spatial
* format into new format using crs space
*
* <pre>
*
* SOFTWARE HISTORY
*
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Sep 30, 2013 2333 mschenke Initial creation
*
* </pre>
*
* @author mschenke
* @version 1.0
*/
public class UpdateSatSpatial {
private static final String SATELLITE_SPATIAL_TABLE = "satellite_spatial";
private static final String SATELLITE_SPATIAL_GID = "gid";
private static final String SATELLITE_SPATIAL_CRSWKT = "crswkt";
private static final String SATELLITE_SPATIAL_GEOM = "the_geom";
private static final String SATELLITE_SPATIAL_NX = "nx";
private static final String SATELLITE_SPATIAL_NY = "ny";
private static final String SATELLITE_SPATIAL_DX = "dx";
private static final String SATELLITE_SPATIAL_DY = "dy";
private static final String SATELLITE_SPATIAL_MINX = "minx";
private static final String SATELLITE_SPATIAL_MINY = "miny";
private static final String SATELLITE_SPATIAL_MINIMUMS = "minimums";
private static final String HOST_ARGUMENT = "-host";
private static final String DEFAULT_HOST = "localhost";
private static final String PORT_ARGUMENT = "-port";
private static final String DEFAULT_PORT = "5432";
private static final String USER_ARGUMENT = "-user";
private static final String DEFAULT_USER = "awips";
private static final String PASSWORD_ARGUMENT = "-password";
private static final String DEFAULT_PASSWORD = "awips";
private static final String DATABASE_ARGUMENT = "-database";
private static final String DEFAULT_DATABASE = "metadata";
private static final String JDBC_CONNECTION_FORMAT_STRING = "jdbc:postgresql://%s:%s/%s";
private static final String USER_PROPERTY = "user";
private static final String PASSWORD_PROPERTY = "password";
private static Map<String, Object> argumentMap = new HashMap<String, Object>();
private static class SpatialObject implements ISpatialObject {
private static final long serialVersionUID = 1L;
private final int nx;
private final int ny;
private final Geometry geometry;
private final CoordinateReferenceSystem crs;
public SpatialObject(int nx, int ny, Geometry geometry,
CoordinateReferenceSystem crs) {
this.nx = nx;
this.ny = ny;
this.geometry = geometry;
this.crs = crs;
}
@Override
public Geometry getGeometry() {
return geometry;
}
@Override
public CoordinateReferenceSystem getCrs() {
return crs;
}
@Override
public Integer getNx() {
return nx;
}
@Override
public Integer getNy() {
return ny;
}
}
public static void main(String[] args) throws Exception {
// Parse arguments
parseArguments(args);
Connection conn = openConnection();
Statement query = conn.createStatement();
ResultSet results = query.executeQuery("SELECT ("
+ SATELLITE_SPATIAL_MINX + " || '_' || "
+ SATELLITE_SPATIAL_MINY + ") as " + SATELLITE_SPATIAL_MINIMUMS
+ ", " + SATELLITE_SPATIAL_GID + ", "
+ SATELLITE_SPATIAL_CRSWKT + ", " + SATELLITE_SPATIAL_NX + ", "
+ SATELLITE_SPATIAL_NY + ", " + SATELLITE_SPATIAL_DX + ", "
+ SATELLITE_SPATIAL_DY + ", AsBinary(" + SATELLITE_SPATIAL_GEOM
+ ") as " + SATELLITE_SPATIAL_GEOM + " FROM "
+ SATELLITE_SPATIAL_TABLE);
String updateStatement = "UPDATE " + SATELLITE_SPATIAL_TABLE + " SET ("
+ SATELLITE_SPATIAL_MINX + ", " + SATELLITE_SPATIAL_MINY + ", "
+ SATELLITE_SPATIAL_DX + ", " + SATELLITE_SPATIAL_DY + ", "
+ SATELLITE_SPATIAL_GEOM
+ ") = (?, ?, ?, ?, GeomFromText(? , -1)) WHERE "
+ SATELLITE_SPATIAL_GID + " = ?";
while (results.next()) {
int gid = results.getInt(SATELLITE_SPATIAL_GID);
String mins = results.getString(SATELLITE_SPATIAL_MINIMUMS);
if (mins == null || mins.isEmpty()) {
System.out
.println("Upgrading satellite_spatial record: " + gid);
// No minimum values set, continue with upgrade
Geometry geometry = new WKBReader().read(results
.getBytes(SATELLITE_SPATIAL_GEOM));
CoordinateReferenceSystem crs = CRS.parseWKT(results
.getString(SATELLITE_SPATIAL_CRSWKT));
int nx = results.getInt(SATELLITE_SPATIAL_NX);
int ny = results.getInt(SATELLITE_SPATIAL_NY);
double dx = results.getDouble(SATELLITE_SPATIAL_DX);
double dy = results.getDouble(SATELLITE_SPATIAL_DY);
ISpatialObject object = new SpatialObject(nx, ny, geometry, crs);
GridGeometry2D resultGeom = MapUtil.getGridGeometry(object);
Envelope2D env = resultGeom.getEnvelope2D();
GridEnvelope2D grid = resultGeom.getGridRange2D();
double minX = env.getMinX();
double minY = env.getMinY();
if (dx == 0.0) {
dx = env.getWidth() / grid.width;
}
if (dy == 0.0) {
dy = env.getHeight() / grid.height;
}
Geometry newGeom = EnvelopeIntersection
.createEnvelopeIntersection(
resultGeom.getEnvelope(),
new Envelope2D(DefaultGeographicCRS.WGS84,
-180, -90, 360, 180), 1.0, 10, 10)
.getEnvelope();
PreparedStatement update = conn
.prepareStatement(updateStatement);
int index = 1;
update.setDouble(index++, minX);
update.setDouble(index++, minY);
update.setDouble(index++, dx);
update.setDouble(index++, dy);
update.setString(index++, newGeom.toText());
update.setInt(index++, gid);
update.execute();
} else {
System.err
.println("Skipping update of satellite_spatial record: "
+ gid);
}
}
conn.close();
}
private static Connection openConnection() throws SQLException {
String host = getString(HOST_ARGUMENT, DEFAULT_HOST);
String port = getString(PORT_ARGUMENT, DEFAULT_PORT);
String database = getString(DATABASE_ARGUMENT, DEFAULT_DATABASE);
String user = getString(USER_ARGUMENT, DEFAULT_USER);
String password = getString(PASSWORD_ARGUMENT, DEFAULT_PASSWORD);
DriverManager.registerDriver(new org.postgresql.Driver());
String connectionURL = String.format(JDBC_CONNECTION_FORMAT_STRING,
host, port, database);
Properties props = new Properties();
props.setProperty(USER_PROPERTY, user);
props.setProperty(PASSWORD_PROPERTY, password);
return DriverManager.getConnection(connectionURL, props);
}
private static void parseArguments(String[] args) {
for (int i = 0; i < args.length; ++i) {
String arg = args[i];
if (arg.startsWith("-")) {
// we have a key
if (args.length > (i + 1)
&& args[i + 1].startsWith("-") == false) {
argumentMap.put(arg, args[i + 1]);
++i;
} else {
argumentMap.put(arg, true);
}
}
}
}
private static String getString(String key, String defaultValue) {
Object val = argumentMap.get(key);
if (val != null) {
return val.toString();
}
return defaultValue;
}
}

View file

@ -1,16 +0,0 @@
#!/bin/bash
# This script is needed for DR 2333 for GOES-R readiness. The satellite spatial table needed to be refactored to store data to construct
# GridGeometry2D in CRS space instead of lat/lon due to the geostationary projection corner points not being valid in lat/lon space.
DIR=`dirname $0`
PSQL="/awips2/psql/bin/psql"
JAVA="/awips2/java/bin/java"
# Update columns on tables for satellite
${PSQL} -U awips -d metadata -c "ALTER TABLE satellite DROP COLUMN upperrightlat, DROP COLUMN upperrightlon, DROP COLUMN sizerecords, DROP COLUMN numrecords;"
${PSQL} -U awips -d metadata -c "ALTER TABLE satellite_spatial DROP COLUMN la1, DROP COLUMN la2, DROP COLUMN latin, DROP COLUMN lo1, DROP COLUMN lo2, DROP COLUMN lov;"
${PSQL} -U awips -d metadata -c "ALTER TABLE satellite_spatial ADD COLUMN minx double precision, ADD COLUMN miny double precision;"
# Run application to convert sat spatial entries to use crs space
${JAVA} -jar update_satellite_spatial.jar

View file

@ -1,29 +0,0 @@
#!/bin/bash
# DR #2581 - This update script will add the dataURI column to the lsr table
# The column needs to be there for 14.2.1.
# Only run this if you have already run dropLSRdataURI.sh, otherwise
# don't run this script or that one.
PSQL="/awips2/psql/bin/psql"
# adds the datauri constraint and column
function addDataUriLSR {
echo "INFO: Adding DataURI column to lsr"
${PSQL} -U awips -d metadata -c "ALTER TABLE lsr DROP CONSTRAINT IF EXISTS lsr_latitude_longitude_stationId_reftime_forecasttime_eventtype;"
${PSQL} -U awips -d metadata -c "ALTER TABLE lsr ADD COLUMN datauri varchar(255);"
${PSQL} -U awips -d metadata -c "ALTER TABLE lsr ADD CONSTRAINT lsr_datauri_key UNIQUE (datauri);"
if [ $? -ne 0 ]; then
echo "ERROR: Failed to repair dataURI for lsr"
echo "FATAL: The update has failed."
exit 1
fi
}
echo "INFO: Adding LSR dataURI column back in."
addDataUriLSR
echo "INFO: LSR dataURI column added successfully"

View file

@ -1,3 +0,0 @@
alter table if exists madis drop constraint madis_location_reftime_provider_subprovider_restriction_key;
alter table if exists madis add CONSTRAINT madis_latitude_longitude_stationid_reftime_provider_subprovider UNIQUE (latitude, longitude, stationid, reftime, provider, subprovider, restriction)

View file

@ -1,5 +0,0 @@
#!/bin/bash
PSQL=/awips2/psql/bin/psql
DIR=`dirname $0`
${PSQL} -U awips -d metadata -f ${DIR}/updateMadisTableConstraint.sql

View file

@ -1,6 +0,0 @@
#!/bin/bash
# DR #1538 Expand the name column in the slot table
PSQL="/awips2/psql/bin/psql"
${PSQL} -U awips -d metadata -c "ALTER TABLE ebxml.slot ALTER COLUMN name TYPE character varying(1024)"

View file

@ -1,31 +0,0 @@
#!/usr/bin/env python
# This script will update any saved displays or procedures with the old Topo file name
#
# This update only needs to be run if there are saved displays being stored
# outside of localization, for procedures saved in localization,
# updateTopoFile.sh will automatically call this.
import sys
import xml.etree.ElementTree as ET
xsitype = '{http://www.w3.org/2001/XMLSchema-instance}type'
def upgradeBundle(bundleFile):
tree = ET.parse(bundleFile)
root = tree.getroot()
iterpath = 'bundles/bundle/displayList/displays'
if root.tag == 'bundle':
iterpath = 'displayList/displays'
for display in root.iterfind(iterpath):
if display.get(xsitype) == "d2DMapRenderableDisplay":
for resourceData in display.iterfind('descriptor/resource/resourceData'):
if resourceData.get(xsitype) == 'topoResourceData':
for topoFile in resourceData.iterfind('topoFile'):
if topoFile.text == 'srtm30.hdf':
topoFile.text = 'defaultTopo.h5'
tree.write(bundleFile)
if __name__ == '__main__':
for arg in sys.argv[1:]:
upgradeBundle(arg)

View file

@ -1,24 +0,0 @@
#!/bin/bash
# This script will update any D2D procedures files
# which use the old Topo file name
IFS=$'\n'
files=`ls /awips2/edex/data/utility/cave_static/*/*/procedures/*.xml`
if [ $? -ne 0 ]; then
echo "No procedures found"
exit 1
fi
MY_DIR=`dirname $0`
for f in $files; do
grep 'srtm30.hdf' $f > /dev/null
if [ $? -eq 0 ]; then
echo Updating $f
python $MY_DIR/updateTopoFile.py $f
fi
done
echo "INFO: the update has completed successfully!"
exit 0

View file

@ -1,218 +0,0 @@
-- Script to create new tables in the IHFS db for dual-pol work
create table DAARadar
(
radid varchar(3) not null,
obstime TIMESTAMP not null,
minoff smallint,
maxvalh FLOAT4,
maxvald FLOAT4,
s1_bias_value FLOAT4,
producttime TIMESTAMP,
null_product_flag smallint,
coverage_dur integer,
grid_filename varchar(20)
)
;
ALTER TABLE DAARadar
ADD CONSTRAINT daaradar_pk
PRIMARY KEY (radid, obstime);
ALTER TABLE DAARadar
ADD CONSTRAINT daaradar_radloc_fk
FOREIGN KEY (radid)
REFERENCES RadarLoc (radid)
MATCH FULL;
revoke all on DAARadar from public;
grant select, insert, update, delete on DAARadar to public;
-- Add Table
create table DAABiasDyn
(
radid varchar(3) not null,
office_id varchar(5) not null,
obstime TIMESTAMP not null,
memspan_ind smallint not null,
numpairs FLOAT8,
sumgag FLOAT4,
sumrad FLOAT4,
bias FLOAT4
);
ALTER TABLE DAABiasDyn
ADD CONSTRAINT daabiasdynparams_pk
PRIMARY KEY (radid, office_id, obstime, memspan_ind);
revoke all on DAABiasDyn from public;
grant select, insert, update, delete on DAABiasDyn to public;
--
create table DAARadarResult
(
radid varchar(3) not null,
obstime TIMESTAMP not null,
num_gages smallint,
rad_avail varchar(1),
rw_bias_val_used FLOAT8,
mem_span_used FLOAT8,
edit_bias varchar(1),
ignore_radar varchar(1)
);
ALTER TABLE DAARadarResult
ADD CONSTRAINT daaradarresult_pk
PRIMARY KEY (radid, obstime);
revoke all on DAARadarResult from public;
grant select, insert, update, delete on DAARadarResult to public;
-- Add Table HPERadarResult
create table HPERadarResult
(
hpe_productname varchar(30) not null,
producttime TIMESTAMP not null,
num_radar_avail smallint,
bias_source varchar(20),
radar_data_source varchar(1)
)
;
ALTER TABLE HPERadarResult
ADD CONSTRAINT hperadarresult_pk
PRIMARY KEY (hpe_productname, producttime);
revoke all on HPERadarResult from public;
grant select, insert, update, delete on HPERadarResult to public;
-- Add DSARadar, DPRRadar tables, etc
create table DSARadar
(
radid varchar(3) not null,
obstime TIMESTAMP not null,
volcovpat smallint,
opermode smallint,
maxval FLOAT4,
scale FLOAT4,
setoff FLOAT4,
begin_time TIMESTAMP,
end_time TIMESTAMP,
j_beg_date smallint,
j_beg_time smallint,
j_end_date smallint,
j_end_time smallint,
mean_field_bias smallint,
nullproductflag smallint,
grid_filename varchar(20) -- file name of location of grid data
)
;
ALTER TABLE DSARadar
ADD CONSTRAINT DSAradar_pk
PRIMARY KEY (radid, obstime);
ALTER TABLE DSARadar
ADD CONSTRAINT DSAradar_radloc_fk
FOREIGN KEY (radid)
REFERENCES RadarLoc (radid)
MATCH FULL;
revoke all on DSARadar from public;
grant select, insert, update, delete on DSARadar to public;
-- Add Table DSAAdapt
create table DSAAdapt
(
radid varchar(3) not null,
obstime timestamp not null,
num_of_adap smallint,
default_ml_depth FLOAT4,
ml_overide_flag varchar(8),
kdp_mult FLOAT4,
kdp_power FLOAT4,
z_r_mult FLOAT4,
z_r_power FLOAT4,
zdr_z_mult FLOAT4,
zdr_z_power FLOAT4,
zdr_zdr_power FLOAT4,
min_corr_precip FLOAT4,
min_corr_kdp FLOAT4,
refl_max FLOAT4,
kdp_max_beam_blk FLOAT4,
max_usability_blk FLOAT4,
kdp_min_usage_rate FLOAT4,
ws_mult FLOAT4,
gr_mult FLOAT4,
rh_mult FLOAT4,
ds_mult FLOAT4,
ic_mult FLOAT4,
grid_is_full FLOAT4,
paif_rate FLOAT4,
paif_area FLOAT4,
rain_time_thresh FLOAT4,
num_zones FLOAT4,
max_precip_rate FLOAT4,
restart_time FLOAT4,
max_interp_time FLOAT4,
max_hourly_acc FLOAT4,
time_bias FLOAT4,
num_grpairs FLOAT4,
reset_bias FLOAT4,
longst_lag FLOAT4
)
;
ALTER TABLE DSAAdapt
ADD CONSTRAINT dsaadapt_pk
PRIMARY KEY (radid, obstime);
ALTER TABLE DSAAdapt
ADD CONSTRAINT dsaadapt_rad_fk
FOREIGN KEY (radid)
REFERENCES RadarLoc (radid)
MATCH FULL;
revoke all on DSAAdapt from public;
grant select, insert, update, delete on DSAAdapt to public;
-- Add Table DPRRadar
create table DPRRadar
(
radid varchar(3) not null,
obstime TIMESTAMP not null,
volcovpat smallint,
opermode smallint,
maxval FLOAT4,
scale FLOAT4,
setoff FLOAT4,
j_end_date integer,
j_end_time integer,
mean_field_bias smallint,
precipdetectedflag smallint,
grid_filename varchar(20) -- file name of location of grid data
)
;
ALTER TABLE DPRRadar
ADD CONSTRAINT DPRradar_pk
PRIMARY KEY (radid, obstime);
ALTER TABLE DPRRadar
ADD CONSTRAINT DPRradar_radloc_fk
FOREIGN KEY (radid)
REFERENCES RadarLoc (radid)
MATCH FULL;
revoke all on DPRRadar from public;
grant select, insert, update, delete on DPRRadar to public;

View file

@ -1,135 +0,0 @@
#!/bin/bash
#
# Clean up platform and users dictionaries.
# 09/10/2014 lshi
#
#platform dictionary(lx, px): /awips2/cave/etc/spelldict
#user EDEX dictionary(dx): /awips2/edex/data/utility/cave_static/user/USER/seplldict
#user CAVE dictionary(lx/px/dx): /home/USER/caveData/etc/user/USER/spelldict
#dx (one of dx):
#remove all users' CAVE dictionary
#cleanup all users' EDEX dictionary
#
#all others:
#clean up platform dictionary
#
cmddir=$(dirname $_)
FNAME="spelldict"
user=$(whoami)
host=$(hostname)
stddict=$cmddir/$FNAME.std
[ ! -f $stddict ] && {
echo Error: the standard dictionary \"$stddict\" not found.
exit 1
}
edex_user_dir=/awips2/edex/data/utility/cave_static/user/
cave_etc=/awips2/cave/etc
run_type=0
clean () {
lines=`cat $1 |wc -l`
size=`cat $1 |wc -c`
MSG="$1, size=$size, #line=$lines:"
LASTWD=$(grep 'zymurgy' $1)
if [ $run_type == 1 ]
then
cp -p $1 "$1.bak"
cp $stddict $1
if [ "$LASTWD" ]
then
sed "1, /^$LASTWD/d" "$1.bak" >> $1
else
cat $1.bak >> $1
fi
echo "$MSG modified, #line=$(cat $1 |wc -l)"
else
echo $MSG "modified"
fi
}
remove () {
if [ $run_type == 1 ] && [ -f $1 ]
then
mv $1 "$1.bak"
[[ $1 == ${cave_etc}* ]] && cat /dev/null > $1 || rm -f $1
fi
echo "$1, removed"
}
create () {
[ $run_type == 1 ] && (
cp $stddict $1
chown awips $1
chgrp fxalpha $1
chmod 644 $1
)
echo "$1, created the standard dictionary"
}
usage () {
echo "Option: -dryrun: dry run; -run: do it"
exit 0
}
[ $# = 0 ] && usage
[ $1 == -run ] && run_type=1
[ $1 == -dryrun ] && run_type=2
[ $run_type == 0 ] && usage
echo "run_type=$run_type"
wstype=xxx
[ $# == 2 ] && wstype=$2
if [ -d $edex_user_dir ] && [ $wstype != -lx ]
then
echo "Clean up users' dictionaries ..."
if [ $user != root ]
then
echo "You must run this script as the user 'root'."
exit 1
fi
for d in $(ls -d /home/*);
do
f=$d/caveData/etc/user/$(basename $d)/$FNAME
[ -f $f ] && remove $f
done
for f in `find $edex_user_dir -maxdepth 2 -name $FNAME`;
do
clean $f
done
fi
if [ -d $cave_etc ] && [ $wstype != -dx ]
then
f=$cave_etc/$FNAME
echo "Clean up the platform dictionary ${f} ..."
if [ $user != awips ] && [ $user != root ]
then
echo "You must run this script as the user 'awips' or 'root'."
exit 1
fi
if [ -f $f ]
then
clean $f
else
create $f
fi
fi
if [ ! -d $edex_user_dir ] && [ ! -d $cave_etc ]
then
echo "Please run this script on a 'dx', 'lx', px or 'xt' workstation. "
exit 1
fi
exit 0

View file

@ -1,66 +0,0 @@
#!/bin/bash
# DR #2060 - this update script will drop the dataURI column from the grid table
PSQL="/awips2/psql/bin/psql"
# drops the datauri constraint and column if they exist
function dropDatauri {
echo "INFO: Dropping DataURI column from grid"
${PSQL} -U awips -d metadata -c "ALTER TABLE grid DROP CONSTRAINT IF EXISTS grid_datauri_key;"
${PSQL} -U awips -d metadata -c "ALTER TABLE grid DROP COLUMN IF EXISTS datauri;"
if [ $? -ne 0 ]; then
echo "ERROR: Failed to drop dataURI column for grid"
echo "FATAL: The update has failed."
exit 1
fi
}
# takes one arg: name of the index
function dropIndex {
${PSQL} -U awips -d metadata -c "DROP INDEX IF EXISTS \"$1\";"
}
# takes three args: table, index name, columns
# will first drop the index if it exists and then adds it back, this is
# fairly inefficient if it does exist but operationally it won't exist and for
# testing this allows the script to be run easily as a noop.
function dropAndAddIndex {
${PSQL} -U awips -d metadata -c "DROP INDEX IF EXISTS \"$2\";"
${PSQL} -U awips -d metadata -c "CREATE INDEX $2 ON $1 USING btree $3;"
}
# takes three args: table, constraint name, unique columns
# will first drop the constraint if it exists and then adds it back, this is
# fairly inefficient if it does exist but operationally it won't exist and for
# testing this allows the script to be run easily as a noop.
function dropAndAddConstraint {
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 DROP CONSTRAINT IF EXISTS $2;"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 ADD CONSTRAINT $2 UNIQUE $3;"
if [ $? -ne 0 ]; then
echo "ERROR: Failed to add new unique constraint for $1"
echo "FATAL: The update has failed."
exit 1
fi
}
# takes one arg: name of the table
function vacuumTable {
${PSQL} -U awips -d metadata -c "VACUUM FULL ANALYZE $1"
}
echo "INFO: Dropping dataURI columns."
dropAndAddConstraint grid grid_reftime_forecasttime_info_id_rangestart_rangeend_key "(refTime, forecastTime, info_id, rangestart, rangeend)"
dropAndAddConstraint grid_info grid_info_datasetid_parameter_abbreviation_level_id_seconda_key "(datasetid, parameter_abbreviation, level_id, secondaryid, ensembleid, location_id)"
dropIndex gridDatasetReftime_idx
dropIndex grid_reftimeindex
dropIndex gridinfoNameParamLevel_idx
dropAndAddIndex grid grid_info_id_index "(info_id)"
dropDatauri
vacuumTable grid
vacuumTable grid_info
echo "INFO: grid dataURI column dropped successfully"

View file

@ -1,9 +0,0 @@
#!/bin/bash
# This script will rename the lightning source column in any D2D bundle files
# This update is only for edex servers which host the cave localization files
MY_DIR=`dirname $0`
bash $MY_DIR/utility/updateLightningNameInXML.sh -b
exit 0

View file

@ -1,9 +0,0 @@
#!/bin/bash
# DR #2667 Add binlightning support to Data Access Framework
PSQL="/awips2/psql/bin/psql"
SQL_COMMAND="
ALTER TABLE binlightning RENAME COLUMN lightsource TO source;
"
${PSQL} -U awips -d metadata -c "${SQL_COMMAND}"

View file

@ -1,9 +0,0 @@
#!/bin/bash
# This script will rename the lightning source column in any D2D procedure files
# This update is only for edex servers which host the cave localization files
MY_DIR=`dirname $0`
bash $MY_DIR/utility/updateLightningNameInXML.sh -p
exit 0

View file

@ -1,32 +0,0 @@
#!/usr/bin/env python
# This script will replace attribute values for XML elements that match an xpath
import sys
import xml.etree.ElementTree as ET
if len(sys.argv) != 6:
print "Usage: %s [xml inputfile file] [output file name] [xpath] [attribute name] [replacement value]" % (sys.argv[0])
sys.exit(1)
print "Parsing XML file at " + sys.argv[1]
tree = ET.parse(sys.argv[1])
root = tree.getroot()
matches = root.findall(sys.argv[3])
if len(matches) < 1:
print "No matches found, exiting"
sys.exit(0)
attribute = sys.argv[4]
replValue = sys.argv[5]
for match in matches:
if attribute in match.attrib:
print "Replacing attribute '%s': old value '%s', new value '%s'" % \
(attribute, match.attrib[attribute], replValue)
match.attrib[attribute] = replValue
print "Writing results to file at " + sys.argv[2]
tree.write(sys.argv[2])
print "Done"

View file

@ -1,63 +0,0 @@
#!/bin/bash
# This script will rename the lightning source column in any D2D bundle/procedure files
# This update is only for edex servers which host the cave localization files
function usage()
{
echo "Usage: $0 [-p|-b]"
echo "Use '$0 -p to update procedure files"
echo "Use '$0 -b to update bundle files"
}
if [[ $# < 1 ]]
then
usage
exit 1
fi
IFS=$'\n'
if [[ $1 == '-b' ]]
then
files=`find /awips2/edex/data/utility/cave_static/*/*/bundles/ -iname '*.xml'`
elif [[ $1 == '-p' ]]
then
files=`ls /awips2/edex/data/utility/cave_static/*/*/procedures/*.xml`
else
usage
exit 1
fi
if [ $? -ne 0 ]; then
echo "No files found."
exit 1
fi
echo ""
echo "Press Enter to perform the updates Ctrl-C to quit."
read done
xpath=".//resourceData[@{http://www.w3.org/2001/XMLSchema-instance}type='lightningResourceData']//mapping[@key='lightSource']"
attributeName="key"
replValue="source"
MY_DIR=`dirname $0`
for f in $files; do
python $MY_DIR/replaceAttributeInXML.py $f $f.tmp $xpath $attributeName $replValue
if [[ $? == 0 ]]
then
# if output file doesn't exist, xpath wasn't found
if [[ -e $f.tmp ]]
then
mv $f.tmp $f
fi
else
echo "ERROR: Problem updating file $f"
fi
done
echo "INFO: The update finished successfully."
exit 0

View file

@ -1,9 +0,0 @@
#!/bin/bash
echo "INFO: update started - removing collaboration local groups localization files"
find /awips2/edex/data/utility -type f -regex '.*collaboration/localGroups.xml$' -exec rm -vf {} \;
echo "INFO: the update has completed successfully!"
exit 0

View file

@ -1,38 +0,0 @@
#!/bin/bash
# This script will move any non-base derived parameter functions and definitions
# from cave_static to common_static.
#
# This update is required with 14.3.1.
#
echo "INFO: Moving all derived parameter definitions and functions to common_static."
IFS=$'\n'
# LEVEL NAME
definitionFiles=`find /awips2/edex/data/utility/cave_static/*/*/derivedParameters/definitions/ -maxdepth 1 -iname '*.xml'`
functionFiles=`find /awips2/edex/data/utility/cave_static/*/*/derivedParameters/functions/ -maxdepth 1 -iname '*.py'`
for f in $definitionFiles; do
newf=${f//cave_static/common_static}
if [ -e "$newf" ]; then
echo cannot upgrade $f because $newf already exists
else
mkdir -p `dirname $newf`
#echo "moving $f"
mv "$f" "$newf"
fi
done
for f in $functionFiles; do
newf=${f//cave_static/common_static}
if [ -e "$newf" ]; then
echo cannot upgrade $f because $newf already exists
else
mkdir -p `dirname $newf`
#echo "moving $f"
mv "$f" "$newf"
fi
done
echo "INFO: The update finished successfully."
exit 0

View file

@ -1,11 +0,0 @@
#!/bin/bash
echo "INFO: delete FSSObs config files - removing safeseas, snow and fog area configuration files."
echo "Replace site AKQ with current one."
cd /awips2/edex/data/utility/common_static/site/AKQ
rm -rf fog safeseas snow
echo "INFO: the update has completed successfully!"
exit 0

View file

@ -1,43 +0,0 @@
#!/bin/bash
# DR 2864 - update the location of saved subset files
echo "Updating saved subset file locations"
startDir=/awips2/edex/data/utility/cave_static/user
cd $startDir
users=$(ls -1)
for i in $users
do
cd $i
if [ -e dataDelivery/subset ]
then
cd dataDelivery/subset
if [ ! -e GRID ]
then
mkdir GRID
fi
if [ ! -e POINT ]
then
mkdir POINT
fi
gridFiles=$(grep providerName *.xml | grep NOMADS | cut -d: -f1)
pointFiles=$(grep providerName *.xml | grep MADIS | cut -d: -f1)
for j in $gridFiles
do
mv $j* GRID
done
for j in $pointFiles
do
mv $j* POINT
done
fi
cd $startDir
done
echo "Update complete"

View file

@ -1,16 +0,0 @@
#!/bin/bash
# DR 3026 - Add metadata column to FFMP database
PSQL="/awips2/psql/bin/psql"
echo "INFO: Adding column metadata to table ffmp"
${PSQL} -U awips -d metadata -c "ALTER TABLE ffmp ADD COLUMN metadata varchar(255);"
if [ $? -ne 0 ]; then
echo "ERROR: Failed to add column metadata to table ffmp"
echo "FATAL: The update has failed."
exit 1
fi
echo "INFO: column metadata added successfully"

View file

@ -1,10 +0,0 @@
-- Insert records into PurgedynData table
INSERT INTO PurgeDynData VALUES('DAARadar', 'obstime', 240, 240);
INSERT INTO PurgeDynData VALUES('DAABiasDyn', 'obstime', 240, 240);
INSERT INTO PurgeDynData VALUES('DAARadarResult', 'obstime', 240, 240);
INSERT INTO PurgeDynData VALUES('DSARadar', 'obstime', 24, 24);
INSERT INTO PurgeDynData VALUES('DSAAdapt', 'obstime', 24, 24);
INSERT INTO PurgeDynData VALUES('DPRRadar', 'obstime', 24, 24);
INSERT INTO PurgeDynData VALUES('hperadarresult', 'producttime', 24, 24);

View file

@ -1,12 +0,0 @@
# README, DO NOT RUN THIS SCRIPT UNLESS YOU READ THIS!!!!!!!!!!!!!!!
# This update needs to be performed with build 14.3.1.
# This update is only for edex servers which have run DD and have subscriptions loaded.
1.) Backup your subscriptions from the Registry Web Interface,
http://$REGISTRY_HOST:8082/registry/RegistryInterface.html.
hit the <Backup All Subscriptions> button.
2.) Then run this update script, updateSubscriptionOwners.sh
3.) If you are satisfied the subs are correct, either clean up the backup files by running
the deleteBackupSubs.sh or move them to a different directory for storage.
3.) After script runs, hit the <Restore Subscriptions> button
That will update the subscriptions will reflect the correct ownership at the registry level.

View file

@ -1,9 +0,0 @@
#!/bin/bash
#
# Deletes the backuped up subscriptions *.bak files
echo ""
echo "Press Enter to perform the updates Ctrl-C to quit."
read done
rm -rf /awips2/edex/data/registrySubscriptionBackup/*.bak

View file

@ -1,24 +0,0 @@
#!/usr/bin/perl
open (IN, $ARGV[0]);
print $ARGV[0];
@lines = <IN>;
close IN;
$site = "";
foreach $line (@lines) {
if ($line =~ /originatingSite=&quot;([\w]+)&quot;/) {
$site = $1;
}
}
foreach $line (@lines) {
if ($line =~ s/owner="([\w]+)"/owner="$site"/g) {
print $line;
}
}
$OUTFILE = "TEMP";
open (OUT, ">>TEMP");
foreach $line (@lines) {
print OUT $line;
}
rename $OUTFILE, $ARGV[0]

View file

@ -1,28 +0,0 @@
#!/bin/bash
#
# DO NOT RUN THIS SCRIPT UNLESS YOU READ THIS!!!!!!!!!!!!!!!
# This update needs to be performed with build 14.3.1.
# This update is only for edex servers which have run DD and have subscriptions loaded.
# FIRST! Backup your subscriptions from the Registry Web Interface,
# http://$REGISTRY_HOST:8082/registry/RegistryInterface.html. <Backup All Subscriptions>
# Then run this update script, after script runs, <Restore Subscriptions>
# That will update the subscriptions to reflect the correct ownership at the registry level.
echo ""
echo "Press Enter to perform the updates Ctrl-C to quit."
read done
files=`find /awips2/edex/data/registrySubscriptionBackup -iname \*-RECURRING`
if [[ -z "$files" ]]; then
echo "FATAL: Update Subscriptions has Failed, No subscription backup files found!"
exit 1
fi
for f in $files; do
echo Updating $f
bf=$f.bak.`date +%m%d%y`
cp $f $bf
perl updateSubscriptionOwners.pl $f;
done

View file

@ -1,96 +0,0 @@
#!/usr/bin/env python
import sys, os, glob, shutil, pwd
def main():
REMOVE = """#==============================================================================
#
# The following empty code is here to fool the ifpServer into
# thinking it's a tool. This is so that the configuration will
# appear right next to the primary tool.
#
# DO NOT CHANGE THE LINES BELOW
#
ToolType = "numeric"
WeatherElementEdited = "None"
from numpy import *
HideTool = 1
import SmartScript
class Tool (SmartScript.SmartScript):
def __init__(self, dbss):
SmartScript.SmartScript.__init__(self, dbss)
def execute(self):
return
"""
dryrun = 0
if len(sys.argv) > 1 and sys.argv[1] == "-dry":
dryrun = 1
print "running %s with dryrun = %d\n\n" % (sys.argv[0], dryrun)
pws = pwd.getpwnam("awips")
cavestatic = '/awips2/edex/data/utility/cave_static'
tool_subpaths = 'gfe/userPython/smartTools'
util_subpaths = 'gfe/userPython/utilities'
tool_list = glob.glob(cavestatic + "/*/*/" + tool_subpaths + "/SerpConfig*.py")
util_list = glob.glob(cavestatic + "/*/*/" + util_subpaths + "/SerpConfig*.py")
print "current tool files:"
print tool_list
print "\ncurrent utilities:"
print util_list
for f in tool_list:
print "\nworking from %s" % f
dirn, filen = os.path.split(f)
utildir = dirn.replace("smartTools", "utilities")
newfile = os.path.join(utildir, "SerpConfig.py")
if os.path.exists(newfile):
print "%s already exists. No need to create." % newfile
else:
content = open(f).read()
replaced = content.replace(REMOVE, "")
if not dryrun:
if not os.path.exists(utildir):
os.makedirs(utildir)
open(newfile, 'w+').write(replaced)
print "create new file %s" % newfile
if not dryrun:
if not os.path.exists(newfile):
print "Error: file %s is not created." % newfile
else:
os.chown(newfile, pws.pw_uid, pws.pw_gid)
os.chmod(newfile, 644)
if filen == "SerpConfig.py":
print "removing override %s" % f
if not dryrun:
os.remove(f)
print ""
for f in util_list:
dirn, filen = os.path.split(f)
utildir = dirn
newfile = os.path.join(utildir, "SerpConfig.py")
if not os.path.exists(newfile):
if not dryrun:
shutil.copy(f, newfile)
print "create new file %s from %s" % (newfile, filen)
if not dryrun:
if not os.path.exists(newfile):
print "Error: file %s is not created." % newfile
else:
os.chown(newfile, pws.pw_uid, pws.pw_gid)
pass
if __name__ == "__main__":
main()

View file

@ -1,40 +0,0 @@
#!/bin/bash
# DR #2061 - this update script will drop the dataURI column from acars
PSQL="/awips2/psql/bin/psql"
# takes one arg: a table name
# drops the datauri constraint and column if they exist
function dropDatauri {
echo "INFO: Dropping DataURI column from $1"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 DROP CONSTRAINT IF EXISTS ${1}_datauri_key;"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 DROP COLUMN IF EXISTS datauri;"
if [ $? -ne 0 ]; then
echo "ERROR: Failed to drop dataURI column for $table"
echo "FATAL: The update has failed."
exit 1
fi
}
# takes three args: table, constraint name, unique columns
# will first drop the constraint if it exists and then adds it back, this is
# fairly inefficient if it does exist but operationally it won't exist and for
# testing this allows the script to be run easily as a noop.
function dropDatauriAndAddConstraint {
dropDatauri $1
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 DROP CONSTRAINT IF EXISTS $2;"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 ADD CONSTRAINT $2 UNIQUE $3;"
if [ $? -ne 0 ]; then
echo "ERROR: Failed to add new unique constraint for $table"
echo "FATAL: The update has failed."
exit 1
fi
}
echo "INFO: Dropping acars dataURI columns."
dropDatauriAndAddConstraint acars acars_reftime_tailnumber_flightlevel_latitude_longitude_key "(reftime, tailnumber, flightlevel, latitude, longitude)"
${PSQL} -U awips -d metadata -c "update acars set stationid = to_char(longitude, 'FM999.999') || ':' || to_char(latitude, 'FM999.999')"
${PSQL} -U awips -d metadata -c "VACUUM FULL ANALYZE acars"
echo "INFO: acars dataURI columns dropped successfully"

View file

@ -1,75 +0,0 @@
#!/bin/bash
# moves subscriptions tables from metadata db to fxatext db
function rowcount {
psql -U awips -d $1 -c "select count(*) from $2"
}
function getSeqStart {
CURR_ID=$(psql -U awips metadata -Aqzt0 -c "select max(id) from $1")
if [[ -z $CURR_ID ]]
then
echo 1
else
echo $(( $CURR_ID + 1 ))
fi
}
BACKUPFILE=sub_dump.bak
MODIFIED_BACKUP=sub_modified.bak
echo "Moving subscriptions tables from metadata to fxatext"
OLD_SUB_COUNT=$(rowcount metadata subscription.subscriptions)
OLD_REP_COUNT=$(rowcount metadata subscription.replacements)
SUB_SEQ_START=$(getSeqStart subscription.subscriptions)
REQ_SEQ_START=$(getSeqStart subscription.replacements)
pg_dump -U awips -t subscription.subscriptions -t subscription.replacements metadata > $BACKUPFILE
if [[ $? != 0 ]]
then
echo "subscription tables backup failed, aborting"
exit 1
fi
sed 's/\(\(TABLE\s\+\)\|\(Schema:\s\+\)\|=\s\+\)subscription\([^s]\)/\1public\4/' $BACKUPFILE > $MODIFIED_BACKUP
if [[ $? != 0 ]]
then
echo "subscription tables backup editing failed, aborting"
exit 1
fi
psql -U awips fxatext < $MODIFIED_BACKUP
if [[ $? != 0 ]]
then
echo "Subscription tables restore failed, backup located at $BACKUPFILE"
exit 1
fi
NEW_SUB_COUNT=$(rowcount fxatext public.subscriptions)
NEW_REP_COUNT=$(rowcount fxatext public.replacements)
if [[ $OLD_SUB_COUNT != $NEW_SUB_COUNT || $OLD_REP_COUNT != $NEW_REP_COUNT ]]
then
echo "Row counts do not match before and after table move"
echo "Subscriptions before: \n$OLD_SUB_COUNT"
echo "Subscriptions after: \n$NEW_SUB_COUNT"
echo "Replacements before: \n$OLD_REP_COUNT"
echo "Replacements after: \n$NEW_REP_COUNT"
echo "skipping old table cleanup, backup exists at $BACKUPFILE"
exit 1
fi
echo "Creating sequences"
psql -U awips -d fxatext -c "CREATE SEQUENCE subscriptionseq START WITH $SUB_SEQ_START"
psql -U awips -d fxatext -c "CREATE SEQUENCE replacementseq START WITH $REQ_SEQ_START"
echo "Cleaning up old tables"
psql -U awips -d metadata -c 'DROP SCHEMA subscription CASCADE'
psql -U awips -d metadata -c "DELETE from awips.plugin_info WHERE name = 'com.raytheon.edex.autobldsrv'"
rm $MODIFIED_BACKUP
rm $BACKUPFILE
echo "Done moving subscription tables"

View file

@ -1,18 +0,0 @@
#!/bin/bash
PSQL="/awips2/psql/bin/psql"
# vaa plugin was in rare occassions inserting records without a time which are then never purged
${PSQL} -U awips -d metadata -c "delete from vaa_location where parentid in (select recordid from vaa_subpart where parentid in (select id from vaa where reftime is NULL))"
${PSQL} -U awips -d metadata -c "delete from vaa_subpart where parentid in (select id from vaa where reftime is NULL)"
${PSQL} -U awips -d metadata -c "delete from vaa where reftime is NULL"
tables=$(psql -U awips -d metadata -tc "select table_name from information_schema.columns where column_name = 'reftime'")
echo "Updating record tables to disallow null times"
for table in $tables
do
echo "Updating $table"
psql -U awips -d metadata -c "ALTER TABLE $table ALTER COLUMN reftime SET NOT NULL"
done
echo "Done"

View file

@ -1,95 +0,0 @@
#!/bin/bash
# eventType was stored as the numeric ID of an enum
# this does not cover all event types and storing the raw string is prefered
DBUSER="awips"
DBNAME="metadata"
TABLE_NAME='lsr'
COLUMN_NAME='eventtype'
BACKUP_NAME='eventtype_old'
DATAURI_COLUMN='datauri'
STATION_COLUMN='stationid'
LAT_COLUMN='latitude'
LON_COLUMN='longitude'
CONSTRAINT_NAME='latitude_longitude_stationId_refTime_forecastTime_eventType'
CONSTRAINT_COLUMNS='latitude, longitude, stationId, refTime, forecastTime, eventType'
PSQL="/awips2/psql/bin/psql"
SQL_STATEMENT="
BEGIN;
ALTER TABLE ${TABLE_NAME} RENAME COLUMN ${COLUMN_NAME} to ${BACKUP_NAME};
ALTER TABLE ${TABLE_NAME} ADD COLUMN ${COLUMN_NAME} character varying(255);
UPDATE ${TABLE_NAME}
SET ${COLUMN_NAME} =
CASE ${BACKUP_NAME}
WHEN 0 then 'AVALANCHE'
WHEN 1 then 'BLIZZARD'
WHEN 2 then 'DENSE FOG'
WHEN 3 then 'DOWNBURST'
WHEN 4 then 'DROUGHT'
WHEN 5 then 'DUST STORM'
WHEN 6 then 'EXCESSIVE HEAT'
WHEN 7 then 'EXTREME COLD'
WHEN 8 then 'EXTR WIND CHILL'
WHEN 9 then 'FLASH FLOOD'
WHEN 10 then 'FLOOD'
WHEN 11 then 'FREEZE'
WHEN 12 then 'FREEZING RAIN'
WHEN 13 then 'FUNNEL CLOUD'
WHEN 14 then 'HAIL'
WHEN 15 then 'HEAVY RAIN'
WHEN 16 then 'HEAVY SLEET'
WHEN 17 then 'HEAVY SNOW'
WHEN 18 then 'HIGH ASTR TIDES'
WHEN 19 then 'HIGH SURF'
WHEN 20 then 'HIGH SUST WINDS'
WHEN 21 then 'HURRICANE'
WHEN 22 then 'ICE STORM'
WHEN 23 then 'LIGHTNING'
WHEN 24 then 'LOW ASTR TIDES'
WHEN 25 then 'MARINE HAIL'
WHEN 26 then 'MARINE TSTM WIND'
WHEN 27 then 'NON-TSTM WND DMG'
WHEN 28 then 'NON-TSTM WND GST'
WHEN 29 then 'RIP CURRENTS'
WHEN 30 then 'SEICHE'
WHEN 31 then 'SLEET'
WHEN 32 then 'SNOW'
WHEN 33 then 'STORM SURGE'
WHEN 34 then 'TORNADO'
WHEN 35 then 'TROPICAL STORM'
WHEN 36 then 'TSTM WND DMG'
WHEN 37 then 'TSTM WND GST'
WHEN 38 then 'WATER SPOUT'
WHEN 39 then 'WILDFIRE'
WHEN 40 then 'FREEZING DRIZZLE'
WHEN 41 then 'COASTAL FLOOD'
WHEN 42 then 'DEBRIS FLOW'
WHEN 43 then 'BLOWING SNOW'
WHEN 44 then 'RAIN'
ELSE ''
END;
UPDATE ${TABLE_NAME} set ${STATION_COLUMN} = concat(${LON_COLUMN}, ':', ${LAT_COLUMN});
ALTER TABLE ${TABLE_NAME} DROP COLUMN ${BACKUP_NAME};
ALTER TABLE ${TABLE_NAME} DROP COLUMN ${DATAURI_COLUMN};
ALTER TABLE ${TABLE_NAME} ADD CONSTRAINT ${CONSTRAINT_NAME} UNIQUE (${CONSTRAINT_COLUMNS});
COMMIT;"
COLUMN_TYPE=$(${PSQL} -U ${DBUSER} -d ${DBNAME} -tc "select data_type from INFORMATION_SCHEMA.COLUMNS where table_name = '${TABLE_NAME}' and column_name = '${COLUMN_NAME}'")
if [[ $COLUMN_TYPE =~ integer ]]
then
if ${PSQL} -U ${DBUSER} -d ${DBNAME} -c "${SQL_STATEMENT}"
then
echo "${TABLE_NAME} updated successfully, vacuuming table"
${PSQL} -U ${DBUSER} -d ${DBNAME} -c "VACUUM FULL ANALYZE ${TABLE_NAME}"
else
echo "Update failed on ${TABLE_NAME}"
fi
else
echo "$TABLE_NAME already updated, no changes made"
fi

View file

@ -1,11 +0,0 @@
#!/bin/bash
# Omaha #2714 find all satellite distribution files in localization and rename to match plugin
for x in $(find /awips2/edex/data/utility/edex_static -regex '^.*distribution/satellite.xml$')
do
target=${x/satellite.xml/satellite-gini.xml}
echo Renaming $x to $target
mv "$x" "$target"
done
echo Done

View file

@ -1,22 +0,0 @@
#!/bin/bash
# Omaha #2714 static tables have been replaced by XML backed in-memory lookup tables
DBUSER="awips"
DBNAME="metadata"
PSQL="/awips2/psql/bin/psql"
for table in satellite_creating_entities satellite_geostationary_positions satellite_physical_elements satellite_sector_ids satellite_sources satellite_units
do
echo Dropping table: $table
command="DROP TABLE IF EXISTS $table"
if ${PSQL} -U ${DBUSER} -d ${DBNAME} -c "$command"
then
echo $table dropped successfully
else
echo problem dropping table: $table
fi
done
echo Done

View file

@ -1,40 +0,0 @@
#!/bin/bash
# Move offline stat files from localize location to /awips2/edex/data/stats
. /awips2/edex/bin/setup.env
oldStatDir=/awips2/edex/data/utility/common_static/configured/${AW_SITE_IDENTIFIER}/stats
newStatParent=/awips2/edex/data
if [ ! -d ${oldStatDir} ] ; then
echo "ERROR: ${oldStatDir} dirctory does not exist"
echo "FATAL: The update has failed."
exit 1
fi
if [ ! -d ${newStatParent} ] ; then
rm -rf ${newStatParent}
mkdir -p ${newStatParent}
if [ $? -ne 0 ] ; then
echo "ERROR: Unable to create ${newStatParent}"
echo "FATAL: The update has failed"
exit 1
fi
fi
cp -R -p ${oldStatDir} ${newStatParent}
if [ $? -ne 0 ] ; then
echo "ERROR: copying ${oldStatDir} to ${newStatParent} failed."
echo "FATAL: The update has failed."
exit 1
fi
rm -rf ${oldStatDir}
if [ $? -ne 0 ] ; then
echo "WARNING: Deleting ${oldStatDir} failed"
fi
echo "INFO: ${newStatParent}/stats updated from ${oldStatDir}"

View file

@ -1,220 +0,0 @@
#!/bin/bash
source settings.sh
if [ -z "${AWIPS2_DATA_DIRECTORY}" ]; then
echo "ERROR: AWIPS2_DATA_DIRECTORY must be set in settings.sh!"
exit 1
fi
SQL_SHARE_DIR=${DATABASE_INSTALL}/sqlScripts/share/sql
SQL_MAPS_SHARE_DIR=${SQL_SHARE_DIR}/maps
# This Is The Log File That We Will Use To Log All SQL Interactions.
SQL_LOG=${SQL_SHARE_DIR}/sql_upgrade.log
# Add The PostgreSQL Libraries And The PSQL Libraries To LD_LIBRARY_PATH.
export LD_LIBRARY_PATH=${POSTGRESQL_INSTALL}/lib:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=${PSQL_INSTALL}/lib:$LD_LIBRARY_PATH
function create_sql_element()
{
# $1 == element
mkdir -p ${1}
update_owner ${1}
}
function update_owner()
{
# $1 == element
chown ${POSTGRESQL_USER} ${1}
chgrp ${AWIPS_DEFAULT_GROUP} ${1}
}
function init_db()
{
su ${POSTGRESQL_USER} -c \
"${POSTGRESQL_INSTALL}/bin/initdb --auth=trust --locale=en_US.UTF-8 --pgdata=${AWIPS2_DATA_DIRECTORY} --lc-collate=en_US.UTF-8 --lc-ctype=en_US.UTF-8"
if [ $? -ne 0 ]; then
echo "init_db has failed!"
exit 1
fi
}
function control_pg_ctl()
{
# $1 == pg_ctl command
su ${POSTGRESQL_USER} -c \
"${POSTGRESQL_INSTALL}/bin/pg_ctl ${1} -D ${AWIPS2_DATA_DIRECTORY} -o \"-p ${POSTGRESQL_PORT}\" -w"
}
function execute_initial_sql_script()
{
# Make The Necessary Replacements In The Script.
perl -p -i -e "s/%{databaseUsername}/${POSTGRESQL_USER}/g" \
${1}
echo ${AWIPS2_DATA_DIRECTORY} | sed 's/\//\\\//g' > .awips2_escape.tmp
AWIPS2_DATA_DIRECTORY_ESCAPED=`cat .awips2_escape.tmp`
rm -f .awips2_escape.tmp
perl -p -i -e "s/%{database_files_home}/${AWIPS2_DATA_DIRECTORY_ESCAPED}/g" \
${1}
# $1 == script to execute
su ${POSTGRESQL_USER} -c \
"${PSQL_INSTALL}/bin/psql -d postgres -U ${POSTGRESQL_USER} -q -p ${POSTGRESQL_PORT} -f ${1}" \
> ${SQL_LOG} 2>&1
if [ $? -ne 0 ]; then
echo "Initial Database Setup has failed!"
exit 1
fi
}
function update_createDamcat()
{
echo ${AWIPS2_DATA_DIRECTORY} | sed 's/\//\\\//g' > .awips2_escape.tmp
AWIPS2_DATA_DIRECTORY_ESCAPED=`cat .awips2_escape.tmp`
rm -f .awips2_escape.tmp
perl -p -i -e "s/%{database_files_home}%/${AWIPS2_DATA_DIRECTORY_ESCAPED}/g" \
${SQL_SHARE_DIR}/createDamcat.sql
}
function createDamcatTablespace()
{
local sql_script="damcat.sql"
local current_location=`pwd`
touch ${sql_script}
echo "DROP TABLESPACE IF EXISTS damcat;" > ${sql_script}
echo "CREATE TABLESPACE damcat OWNER awips LOCATION '/awips2/data/damcat';" >> ${sql_script}
echo "COMMENT ON TABLESPACE damcat IS 'DAMCAT Database tablespace';" >> ${sql_script}
execute_psql_sql_script ${current_location}/${sql_script} postgres
rm -f ${current_location}/${sql_script}
}
function update_createEbxml()
{
echo ${AWIPS2_DATA_DIRECTORY} | sed 's/\//\\\//g' > .awips2_escape.tmp
AWIPS2_DATA_DIRECTORY_ESCAPED=`cat .awips2_escape.tmp`
rm -f .awips2_escape.tmp
perl -p -i -e "s/%{database_files_home}%/${AWIPS2_DATA_DIRECTORY_ESCAPED}/g" \
${SQL_SHARE_DIR}/createEbxml.sql
}
function update_createHMDB()
{
echo ${AWIPS2_DATA_DIRECTORY} | sed 's/\//\\\//g' > .awips2_escape.tmp
AWIPS2_DATA_DIRECTORY_ESCAPED=`cat .awips2_escape.tmp`
rm -f .awips2_escape.tmp
perl -p -i -e "s/%{database_files_home}%/${AWIPS2_DATA_DIRECTORY_ESCAPED}/g" \
${SQL_SHARE_DIR}/createHMDB.sql
}
function update_createMaps()
{
echo ${AWIPS2_DATA_DIRECTORY} | sed 's/\//\\\//g' > .awips2_escape.tmp
AWIPS2_DATA_DIRECTORY_ESCAPED=`cat .awips2_escape.tmp`
rm -f .awips2_escape.tmp
# Update the sql script that creates the maps database / tables.
perl -p -i -e "s/%{database_files_home}%/${AWIPS2_DATA_DIRECTORY_ESCAPED}/g" \
${SQL_MAPS_SHARE_DIR}/createMapsDb.sql
}
function execute_psql_sql_script()
{
# $1 == script to execute
# $2 == database
su ${POSTGRESQL_USER} -c \
"${PSQL_INSTALL}/bin/psql -d ${2} -U ${POSTGRESQL_USER} -q -p ${POSTGRESQL_PORT} -f \"${1}\"" \
>> ${SQL_LOG} 2>&1
if [ $? -ne 0 ]; then
echo "Failed to execute SQL script: ${1}!"
exit 1
fi
}
#temporarily relocate the PostgreSQL configuration
_BACKUP_CONF=/awips2/postgresql-configuration-bak
if [ -d ${_BACKUP_CONF} ]; then
rm -rf ${_BACKUP_CONF}
if [ $? -ne 0 ]; then
exit 1
fi
fi
mkdir -p ${_BACKUP_CONF}
if [ $? -ne 0 ]; then
exit 1
fi
POSTGRESQL_CONFIGURATION=( 'pg_hba.conf' 'pg_ident.conf' 'postgresql.conf' )
for configuration in ${POSTGRESQL_CONFIGURATION[*]};
do
mv ${AWIPS2_DATA_DIRECTORY}/${configuration} \
${_BACKUP_CONF}/${configuration}
if [ $? -ne 0 ]; then
exit 0
fi
done
# purge the existing data directory
rm -rf ${AWIPS2_DATA_DIRECTORY}/*
if [ $? -ne 0 ]; then
exit 1
fi
init_db
control_pg_ctl "start"
if [ $? -ne 0 ]; then
echo "ERROR: Failed to start PostgreSQL."
exit 1
fi
sleep 20
create_sql_element ${METADATA}
create_sql_element ${IFHS}
create_sql_element ${DAMCAT}
create_sql_element ${HMDB}
create_sql_element ${EBXML}
create_sql_element ${MAPS}
execute_initial_sql_script ${SQL_SHARE_DIR}/initial_setup_server.sql
execute_psql_sql_script ${SQL_SHARE_DIR}/fxatext.sql metadata
createDamcatTablespace
update_createHMDB
execute_psql_sql_script ${SQL_SHARE_DIR}/createHMDB.sql postgres
update_createEbxml
execute_psql_sql_script ${SQL_SHARE_DIR}/createEbxml.sql postgres
control_pg_ctl "stop"
if [ $? -ne 0 ]; then
echo "WARNING: Failed to stop PostgreSQL!"
else
sleep 20
fi
# restore the PostgreSQL configuration
for configuration in ${POSTGRESQL_CONFIGURATION[*]};
do
mv ${_BACKUP_CONF}/${configuration} \
${AWIPS2_DATA_DIRECTORY}/${configuration}
if [ $? -ne 0 ]; then
exit 1
fi
done
rm -rf ${_BACKUP_CONF}
if [ $? -ne 0 ]; then
echo "WARNING: Failed to Remove - ${_BACKUP_CONF}!"
fi
exit 0

View file

@ -1,99 +0,0 @@
#!/bin/bash
source settings.sh
# make a temporary update to the postgresql.conf file for the db vacuums
pushd . > /dev/null 2>&1
cd ${AWIPS2_DATA_DIRECTORY}
# backup the existing postgresql.conf
cp postgresql.conf postgresql.conf.orig
if [ $? -ne 0 ]; then
echo "Failed to create a temporary backup of postgresql.conf!"
exit 1
fi
# create an updated postgresql.conf
sed '/vacuum_freeze_table_age/d' postgresql.conf > postgresql.conf.update
if [ $? -ne 0 ]; then
echo "Failed to update postgresql.conf!"
exit 1
fi
echo "vacuum_freeze_table_age = 0" >> postgresql.conf.update
if [ $? -ne 0 ]; then
echo "Failed to update postgresql.conf!"
exit 1
fi
mv postgresql.conf.update postgresql.conf
if [ $? -ne 0 ]; then
echo "Failed to update postgresql.conf!"
exit 1
fi
popd > /dev/null 2>&1
# start PostgreSQL
/sbin/service edex_postgres start
if [ $? -ne 0 ]; then
echo "ERROR: Failed to start PostgreSQL."
exit 1
fi
sleep 20
# look at the standard PostgreSQL database exports
for database_export in `ls -1 *.db`;
do
echo "Restoring database '${database_export}' ..."
${PG_RESTORE} -C -d postgres -U ${POSTGRESQL_USER} ${database_export} >> errors.txt 2>&1
database=`basename ${database_export} .db`
# vaccum the database
${VACUUMDB} -d ${database} -U ${POSTGRESQL_USER} -p ${POSTGRESQL_PORT} -vz >> errors.txt 2>&1
done
echo ""
# look at the postgis enabled PostgreSQL database exports
for database_export in `ls -1 *.db.postgis`;
do
echo "Performing a postgis restoration of database '${database_export}' ..."
# restore the database
${PG_RESTORE} -C -d postgres -U ${POSTGRESQL_USER} ${database_export} >> errors.txt 2>&1
# vacuum the database.
database=`basename ${database_export} .db.postgis`
# vaccum the database
${VACUUMDB} -d ${database} -U ${POSTGRESQL_USER} -p ${POSTGRESQL_PORT} -vz >> errors.txt 2>&1
/bin/bash run_postgis_upgrade.sh ${database}
done
# stop PostgreSQL
/sbin/service edex_postgres stop
if [ $? -ne 0 ]; then
echo "WARNING: Failed to stop PostgreSQL."
else
sleep 20
fi
# restore the original postgresql.conf
pushd . > /dev/null 2>&1
cd ${AWIPS2_DATA_DIRECTORY}
cp -f postgresql.conf.orig postgresql.conf
if [ $? -ne 0 ]; then
echo "Failed to restore postgresql.conf. Original version is: ${AWIPS2_DATA_DIRECTORY}/postgresql.conf.orig"
exit 0
fi
rm -f postgresql.conf.orig
if [ $? -ne 0 ]; then
echo "Failed to remove backup postgresql.conf. Backup is: ${AWIPS2_DATA_DIRECTORY}/postgresql.conf.orig"
fi
popd > /dev/null 2>&1
exit 0

View file

@ -1,64 +0,0 @@
#!/bin/bash
source settings.sh
# start PostgreSQL
/sbin/service edex_postgres start
if [ $? -ne 0 ]; then
echo "ERROR: Failed to start PostgreSQL."
exit 1
fi
sleep 20
# get a list of the databases
# consider using the SQL query? ...
dbList=( $( psql --tuples-only -U awips -l | cut -f1 -d'|' | sed -e "s/^[[:space:]]*//g" -e "s/[[:space:]]*$//g" | grep -vE "template[01]|postgres|awips" ) )
# export the databases
for database in ${dbList[*]};
do
echo "exporting database '${database}' ..."
# determine if the database is postgis-enabled?
# postgis-enabled PostgreSQL databases include one or both of the following tables:
# { spatial_ref_sys, geometry_columns }
_SQL_="SELECT COUNT(*) FROM pg_catalog.pg_tables WHERE tablename IN ( 'spatial_ref_sys', 'geometry_columns' );"
COUNT=`${PSQL} -U ${POSTGRESQL_USER} -p ${POSTGRESQL_PORT} -d ${database} -t -c "${_SQL_}"`
# determine the suffix of the database export
_export_suffix=".db"
if [ ${COUNT} -ne 0 ]; then
_export_suffix="${_export_suffix}.postgis"
fi
${PG_DUMP} -Fc ${database} -U ${POSTGRESQL_USER} -p ${POSTGRESQL_PORT} > ${database}${_export_suffix}
if [ $? -ne 0 ]; then
exit 1
fi
done
echo ""
# at this point, we know that the database export was successful; so, we will now be dropping
# every database that was previously identified.
for database in ${dbList[*]};
do
echo "dropping database '${database}' ..."
${DROPDB} ${database} -U ${POSTGRESQL_USER} -p ${POSTGRESQL_PORT}
if [ $? -ne 0 ]; then
exit 1
fi
done
# start PostgreSQL
/sbin/service edex_postgres stop
if [ $? -ne 0 ]; then
echo "WARNING: Failed to stop PostgreSQL."
exit 0
else
sleep 20
fi
exit 0

View file

@ -1,18 +0,0 @@
#!/bin/bash
source settings.sh
POSTGIS_UPGRADE=${POSTGIS_CONTRIB}/postgis_upgrade_20_minor.sql
RTPOSTGIS_UPGRADE=${POSTGIS_CONTRIB}/rtpostgis_upgrade_20_minor.sql
TOPOLOGY_UPGRADE=${POSTGIS_CONTRIB}/topology_upgrade_20_minor.sql
echo "=== postgis upgrade ===" >> errors.txt
${PSQL} -U ${POSTGRESQL_USER} -f ${POSTGIS_UPGRADE} -d ${1} >> errors.txt 2>&1
echo "=== rtpostgis upgrade ===" >> errors.txt
${PSQL} -U ${POSTGRESQL_USER} -f ${RTPOSTGIS_UPGRADE} -d ${1} >> errors.txt 2>&1
echo "=== topology upgrade ===" >> errors.txt
${PSQL} -U ${POSTGRESQL_USER} -f ${TOPOLOGY_UPGRADE} -d ${1} >> errors.txt 2>&1
exit 0

View file

@ -1,30 +0,0 @@
# database ownership
POSTGRESQL_PORT=5432
POSTGRESQL_USER=awips
AWIPS_DEFAULT_GROUP="fxalpha"
# AWIPS II installations
POSTGRESQL_INSTALL="/awips2/postgresql"
PSQL_INSTALL="/awips2/psql"
AWIPS2_DATA_DIRECTORY="/awips2/data"
DATABASE_INSTALL="/awips2/database"
# SQL Data Directories
METADATA=${AWIPS2_DATA_DIRECTORY}/metadata
IFHS=${AWIPS2_DATA_DIRECTORY}/pgdata_ihfs
MAPS=${AWIPS2_DATA_DIRECTORY}/maps
DAMCAT=${AWIPS2_DATA_DIRECTORY}/damcat
HMDB=${AWIPS2_DATA_DIRECTORY}/hmdb
EBXML=${AWIPS2_DATA_DIRECTORY}/ebxml
# paths
POSTGIS_CONTRIB=${POSTGRESQL_INSTALL}/share/contrib/postgis-2.0
# executables
PERL=/usr/bin/perl
PG_RESTORE=${POSTGRESQL_INSTALL}/bin/pg_restore
PG_DUMP=${POSTGRESQL_INSTALL}/bin/pg_dump
DROPDB=${POSTGRESQL_INSTALL}/bin/dropdb
VACUUMDB=${POSTGRESQL_INSTALL}/bin/vacuumdb
POSTGIS_RESTORE=${POSTGIS_CONTRIB}/postgis_restore.pl
PSQL=${PSQL_INSTALL}/bin/psql

View file

@ -1,18 +0,0 @@
#!/bin/bash
# DR #3091 This changes the awips.taf_queue forecasterid column from type integer
# to a varchar converting any values in the table.
PSQL="/awips2/psql/bin/psql"
echo "INFO: Altering awips.taf_queue's forecaster column."
${PSQL} -U awips -d metadata -c "alter table awips.taf_queue alter column forecasterid set data type varchar(255)
using to_char(forecasterid, '000');"
if [ $? -ne 0 ]; then
echo "ERROR: Failed to alter tabelawips.taf_queue's column forecasterid."
echo "FATAL: The update has failed."
exit 1
fi
echo "INFO: awips.taf_queue's forecaster column altered successfully."

View file

@ -1,27 +0,0 @@
#!/usr/bin/env python
import h5py
import os
import sys
# multiplicity was incorrectly interpreted as 'stike count' when
# it was the number of strokes (AKA pulses) in the strike (AKA flash)
LIGHTNING_H5_PATH = '/awips2/edex/data/hdf5/binlightning'
OLD_NAME = 'strikeCount'
NEW_NAME = 'pulseCount'
for file in os.listdir(LIGHTNING_H5_PATH):
if file.endswith('h5'):
h5file = None
try:
fileName = os.path.join(LIGHTNING_H5_PATH, file)
h5file = h5py.File(fileName, 'r+')
for g in h5file.values():
if NEW_NAME not in g and OLD_NAME in g:
g[NEW_NAME] = g[OLD_NAME]
except Exception, e:
print "Error renaming strikeCount in file", fileName, ":", e
finally:
if h5file:
h5file.close()

View file

@ -1,16 +0,0 @@
#!/bin/bash
# DR 3220 - Remove metadata columns from FSSObs database
PSQL="/awips2/psql/bin/psql"
echo "INFO: Removing columns metadata cwa and monitoruse from table ffsobs"
${PSQL} -U awips -d metadata -c "ALTER TABLE fssobs DROP COLUMN cwa, DROP COLUMN monitoruse;"
if [ $? -ne 0 ]; then
echo "ERROR: Failed to remove columns metadata from table fssobs"
echo "FATAL: The update has failed."
exit 1
fi
echo "INFO: column metadata removed successfully"

View file

@ -1,19 +0,0 @@
#!/bin/bash
# DR #3318 - this update awips.metadata.grid_info and needs to run on dx1.
echo 'INFO: Update gridinfo'
/awips2/psql/bin/psql -U awips -d metadata -c "update grid_info set
datasetid='GEFS' where datasetid='gefs'"
if [ $? -ne 0 ] ; then
echo 'ERROR unable to update database'
exit 1
fi
echo 'INFO: Update gridinfo successful.'
echo 'INFO: Has renameGefs.py been run on dx2?'

View file

@ -1,74 +0,0 @@
#!/usr/bin/env python
# DR3318 Convert gefs' directories to the new GEFS' directories must run on dx2.
import h5py
import os
import sys
GRID_DIR= os.sep + 'awips2' + os.sep + 'edex' + os.sep + 'data' + os.sep + 'hdf5' + os.sep + 'grid'
OLD_DIR= os.path.join(GRID_DIR, 'gefs')
NEW_DIR= os.path.join(GRID_DIR, 'GEFS')
OLD_VALUE=':gefs:'
NEW_VALUE=':GEFS:'
def convertH5(dir):
for file in os.listdir(dir):
oldFilename = os.path.join(dir, file)
if os.path.isdir(oldFilename):
print 'INFO Converting %s' % (oldFilename)
convertH5(oldFilename)
elif file.startswith('gefs') and file.endswith('h5'):
newFile = file.replace('gefs', 'GEFS', 1)
filename = os.path.join(os.path.split(oldFilename)[0], newFile)
try:
os.rename(oldFilename, filename)
except Exception, e:
print 'WARNING: unable to rename %s to %s %s: ' % (oldFilename, filename, e)
continue
h5file = None
try:
h5file = h5py.File(filename, 'r+')
for g in h5file.keys():
if str.find(g, OLD_VALUE) > 0 :
new = str.replace(g, OLD_VALUE, NEW_VALUE, 1)
h5file[new] = h5file[g]
except Exception, e:
print "WARNING: in file %s: %s" % (filename, e)
finally:
if h5file:
h5file.close()
def moveDir(old, new):
if not os.path.isdir(old) :
print 'INFO: No %s directory to move.' % (old)
return
if os.path.exists(new):
print 'ERROR: Unable to create directory %s' % (new)
print 'Fatal: %s already exists.' % (new)
exit(1)
try:
os.rename(old, new)
except Exception, e:
print 'ERROR: Unable to create directory %s.' % (new)
print 'Fatal: %s' % (e)
exit(1)
print 'INFO: Updates for GEFS.'
print 'INFO: updating directory'
moveDir(OLD_DIR, NEW_DIR)
if os.path.isdir(NEW_DIR) :
print 'INFO: Converting h5 files'
convertH5(NEW_DIR)
else:
print "WARNING: %s directory not found" % (NEW_DIR)
print 'INFO: Updated GEFS successfully.'
print 'INFO: Has dbupdate.sh been run on dx1?'

View file

@ -1,30 +0,0 @@
#!/bin/bash
# DR #3392 - this update script will change columns from Double to Real
TABLES=(acars acarssoundinglayer)
COLUMNS=(dwpt humidity mixingratio pressure temp windspeed)
PSQL="/awips2/psql/bin/psql"
# takes two args: a table name and a column name
# alters the column in the table to real
function changeToReal {
echo "INFO: Changing table $1 column $2 to real."
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 ALTER COLUMN $2 TYPE real;"
if [ $? -ne 0 ]; then
echo "ERROR: Failed to change the column $2 for table $1"
echo "FATAL: The update has failed."
exit 1
fi
}
for table in ${TABLES[*]}
do
echo "INFO: Altering table $table."
for column in ${COLUMNS[*]}
do
changeToReal $table $column
done
done
echo "INFO: All columns changed successfully"

View file

@ -1,7 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<classpath>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.7"/>
<classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
<classpathentry kind="src" path="src"/>
<classpathentry kind="output" path="bin"/>
</classpath>

View file

@ -1,28 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>com.raytheon.uf.edex.upgrade.obslocation</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.pde.ManifestBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.pde.SchemaBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.eclipse.pde.PluginNature</nature>
<nature>org.eclipse.jdt.core.javanature</nature>
</natures>
</projectDescription>

View file

@ -1,8 +0,0 @@
Manifest-Version: 1.0
Bundle-ManifestVersion: 2
Bundle-Name: Obslocation
Bundle-SymbolicName: com.raytheon.uf.edex.upgrade.obslocation
Bundle-Version: 1.14.0.qualifier
Bundle-Vendor: RAYTHEON
Bundle-RequiredExecutionEnvironment: JavaSE-1.7
Require-Bundle: org.postgres;bundle-version="9.2.4"

View file

@ -1,4 +0,0 @@
source.. = src/
output.. = bin/
bin.includes = META-INF/,\
.

View file

@ -1,304 +0,0 @@
/**
* This software was developed and / or modified by Raytheon Company,
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
*
* U.S. EXPORT CONTROLLED TECHNICAL DATA
* This software product contains export-restricted data whose
* export/transfer/disclosure is restricted by U.S. law. Dissemination
* to non-U.S. persons whether in the United States or abroad requires
* an export license or other authorization.
*
* Contractor Name: Raytheon Company
* Contractor Address: 6825 Pine Street, Suite 340
* Mail Stop B8
* Omaha, NE 68106
* 402.291.0100
*
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
* further licensing information.
**/
package com.raytheon.uf.edex.upgrade.obslocation;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
/**
* Reformats the dataURI to match the new precision scheme for obs locations.
*
* <pre>
*
* SOFTWARE HISTORY
*
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Jul 30, 2014 3410 bclement Initial creation
*
* </pre>
*
* @author bclement
* @version 1.0
*/
public class DataUriDoubleToFloat {
private static final String HOST_ARGUMENT = "-host";
private static final String DEFAULT_HOST = "localhost";
private static final String PORT_ARGUMENT = "-port";
private static final String DEFAULT_PORT = "5432";
private static final String USER_ARGUMENT = "-user";
private static final String DEFAULT_USER = "awips";
private static final String PASSWORD_ARGUMENT = "-password";
private static final String DEFAULT_PASSWORD = "awips";
private static final String DATABASE_ARGUMENT = "-database";
private static final String DEFAULT_DATABASE = "metadata";
private static final String JDBC_CONNECTION_FORMAT_STRING = "jdbc:postgresql://%s:%s/%s";
private static final String USER_PROPERTY = "user";
private static final String PASSWORD_PROPERTY = "password";
private static Map<String, Object> argumentMap = new HashMap<String, Object>();
/* map of table names to latitude 0-based index in data uri */
private static final Map<String, Integer> latitudeIndexMap = new HashMap<String, Integer>();
static {
latitudeIndexMap.put("tcg", 5);
latitudeIndexMap.put("acars", 4);
latitudeIndexMap.put("acarssounding", 3);
latitudeIndexMap.put("ldad_manual", 5);
latitudeIndexMap.put("obs", 5);
latitudeIndexMap.put("airep", 5);
latitudeIndexMap.put("bufrncwf", 3);
latitudeIndexMap.put("svrwx", 4);
latitudeIndexMap.put("ldadprofiler", 4);
latitudeIndexMap.put("bufrquikscat", 4);
latitudeIndexMap.put("sfcobs", 5);
latitudeIndexMap.put("bufrua", 6);
latitudeIndexMap.put("modelsounding", 4);
latitudeIndexMap.put("fssobs", 5);
latitudeIndexMap.put("lsr", 4);
latitudeIndexMap.put("ldadhydro", 5);
latitudeIndexMap.put("pirep", 5);
latitudeIndexMap.put("profiler", 4);
latitudeIndexMap.put("tcs", 4);
latitudeIndexMap.put("ncpafm", 5);
latitudeIndexMap.put("ncscd", 4);
latitudeIndexMap.put("ncuair", 4);
latitudeIndexMap.put("nctaf", 4);
}
private static final String LOC_DEF_COL = "locationdefined";
private static final String DATAURI_COL = "datauri";
private static final String LAT_COL = "latitude";
private static final String LON_COL = "longitude";
/**
* @return a newly created connection to the database
* @throws SQLException
*/
private static Connection openConnection() throws SQLException {
String host = getString(HOST_ARGUMENT, DEFAULT_HOST);
String port = getString(PORT_ARGUMENT, DEFAULT_PORT);
String database = getString(DATABASE_ARGUMENT, DEFAULT_DATABASE);
String user = getString(USER_ARGUMENT, DEFAULT_USER);
String password = getString(PASSWORD_ARGUMENT, DEFAULT_PASSWORD);
DriverManager.registerDriver(new org.postgresql.Driver());
String connectionURL = String.format(JDBC_CONNECTION_FORMAT_STRING,
host, port, database);
Properties props = new Properties();
props.setProperty(USER_PROPERTY, user);
props.setProperty(PASSWORD_PROPERTY, password);
return DriverManager.getConnection(connectionURL, props);
}
/**
* Parse command line arguments into the argumentMap
*
* @param args
*/
private static void parseArguments(String[] args) {
for (int i = 0; i < args.length; ++i) {
String arg = args[i];
if (arg.startsWith("-")) {
// we have a key
if (args.length > (i + 1)
&& args[i + 1].startsWith("-") == false) {
argumentMap.put(arg, args[i + 1]);
++i;
} else {
argumentMap.put(arg, true);
}
}
}
}
/**
* Get command line argument value
*
* @param key
* @param defaultValue
* @return
*/
private static String getString(String key, String defaultValue) {
Object val = argumentMap.get(key);
if (val != null) {
return val.toString();
}
return defaultValue;
}
/**
* Get all tables in the schema with the provided column name
*
* @param conn
* @param column
* @return
* @throws Exception
*/
private static Set<String> getTablesWithColumn(Connection conn,
String column) throws Exception {
Statement query = conn.createStatement();
ResultSet result = query
.executeQuery("select table_name from information_schema.columns where column_name = '" + column +"'");
Set<String> rval = new HashSet<>();
while (result.next()) {
rval.add(result.getString("table_name"));
}
return rval;
}
/**
* Create an updatable result set with id, latitude, longitude and datauri
* columns
*
* @param conn
* @param table
* @return
* @throws Exception
*/
private static ResultSet getLocationAndDataUri(Connection conn, String table)
throws Exception {
Statement query = conn.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE,
ResultSet.CONCUR_UPDATABLE);
String sql = String.format("select id, %s, %s, %s from %s", LAT_COL,
LON_COL, DATAURI_COL, table);
return query.executeQuery(sql);
}
/**
* Reformat each dataURI in table
*
* @param conn
* @param table
* @throws Exception
*/
private static void updateTable(Connection conn, String table)
throws Exception {
Integer latIndex = latitudeIndexMap.get(table);
if (latIndex == null) {
throw new Exception(
"Unable to determine index of latitude/longitude in dataURI");
}
/* plus 1 here to account for how String.split() handles leading slash */
latIndex += 1;
int lonIndex = latIndex + 1;
ResultSet res = getLocationAndDataUri(conn, table);
while (res.next()) {
String uri = res.getString(DATAURI_COL);
Float lat = res.getFloat(LAT_COL);
Float lon = res.getFloat(LON_COL);
if (uri == null) {
int id = res.getInt("id");
throw new Exception("Empty dataURI on row with id " + id);
}
String[] parts = uri.split("/");
if (parts.length < lonIndex + 1) {
throw new Exception("Expected dataURI with at least "
+ (lonIndex + 1) + " parts, got " + uri);
}
String latStr = parts[latIndex];
String lonStr = parts[lonIndex];
String newLatStr = String.valueOf(lat);
String newLonStr = String.valueOf(lon);
if (!latStr.equals(newLatStr) || !lonStr.equals(newLonStr)) {
parts[latIndex] = newLatStr;
parts[lonIndex] = newLonStr;
StringBuilder sb = new StringBuilder();
/*
* skip first element due to String.split() with leading slash
*/
for (int i = 1; i < parts.length; ++i) {
sb.append("/").append(parts[i]);
}
res.updateString(DATAURI_COL, sb.toString());
try {
res.updateRow();
} catch (SQLException e) {
if (e.getMessage().contains("duplicate key")) {
/*
* this can happen if data has been ingested twice with
* both the float locations and the double locations.
*/
res.deleteRow();
System.out.println("Encountered duplicate row after"
+ " reformatting, deleted row with dataURI "
+ uri + " to resolve conflict.");
} else {
throw e;
}
}
}
}
System.out.println("Updated table: " + table);
}
/**
* @param args
* @throws Exception
*/
public static void main(String[] args) throws Exception {
parseArguments(args);
Connection conn = openConnection();
Set<String> locationTables = getTablesWithColumn(conn, LOC_DEF_COL);
Set<String> dataUriTables = getTablesWithColumn(conn, DATAURI_COL);
/* only look at tables that both use obs location and have data uris */
locationTables.retainAll(dataUriTables);
for (String table : locationTables) {
try {
updateTable(conn, table);
} catch (Exception e) {
String msg = e.getLocalizedMessage();
System.err.println("ERROR: Unable to update table " + table
+ ": " + e.getLocalizedMessage());
if (msg == null || msg.isEmpty()) {
e.printStackTrace();
}
}
}
}
}

View file

@ -1,40 +0,0 @@
#!/bin/bash
# DR #3410 - this update script will change columns from Double to Real
# operate on tables that were built with classes that embed SurfaceObsLocation or AircraftObsLocation
# only these tables contain the column 'locationdefined'
TABLES=$(psql -U awips -d metadata -tc "select table_name from information_schema.columns where column_name = 'locationdefined'")
COLUMNS=(latitude longitude)
PSQL="/awips2/psql/bin/psql"
# takes two args: a table name and a column name
# alters the column in the table to real
function changeToReal {
echo "INFO: Changing table $1 column $2 to real."
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 ALTER COLUMN $2 TYPE real;"
if [ $? -ne 0 ]; then
echo "ERROR: Failed to change the column $2 for table $1"
return 1
fi
}
msg="INFO: All columns changed successfully"
${PSQL} -U awips -d metadata -c "drop table if exists bufrmos"
for table in ${TABLES[*]}
do
echo "INFO: Altering table $table."
for column in ${COLUMNS[*]}
do
changeToReal $table $column || msg="INFO: Operation completed, some columns could not be changed"
done
done
echo $msg
echo "Reformatting dataURIs that used double precision locations..."
JAVA="/awips2/java/bin/java"
${JAVA} -jar reformat_obslocation_datauri.jar

View file

@ -1,35 +0,0 @@
#!/bin/bash
# This script creates sequences for the tables in the events schema
STATS_MAX_VAL=$(psql -U awips -d metadata -t -c "select max(id)+1 from events.stats;")
#Since events.notification will only exist at sites running datadelivery, check for table first to avoid ERRORs
if [ `psql -U awips -d metadata -tAc "select exists (select 1 from information_schema.tables where table_schema='events' and table_name='notification');"` = 't' ]; then
NOTIFICATION_MAX_VAL=$(psql -U awips -d metadata -t -c "select max(id)+1 from events.notification;")
else
NOTIFICATION_MAX_VAL=0
fi
AGGREGATE_MAX_VAL=$(psql -U awips -d metadata -t -c "select max(id)+1 from events.aggregate;")
if [ -z $STATS_MAX_VAL ]
then
STATS_MAX_VAL=1
fi
if [ -z $NOTIFICATION_MAX_VAL ]
then
NOTIFICATION_MAX_VAL=1
fi
if [ -z $AGGREGATE_MAX_VAL ]
then
AGGREGATE_MAX_VAL=1
fi
psql -U awips -d metadata -c \
"CREATE SEQUENCE stats_seq START WITH $STATS_MAX_VAL; \
CREATE SEQUENCE aggregate_seq START WITH $AGGREGATE_MAX_VAL;"
if [ $NOTIFICATION_MAX_VAL != 0 ]; then
psql -U awips -d metadata -c "CREATE SEQUENCE notification_seq START WITH $NOTIFICATION_MAX_VAL;"
fi

View file

@ -1,43 +0,0 @@
#!/bin/bash
# DR #3644 - this update script will drop the dataURI column from ccfp
PSQL="/awips2/psql/bin/psql"
${PSQL} -U awips -d metadata -c "delete from ccfp where id in (select a.id from ccfp a, ccfp b where a.id < b.id and a.reftime = b.reftime and a.producttype = b.producttype and a.boxlat = b.boxlat and a.boxlong = b.boxlong)"
# takes one arg: a table name
# drops the datauri constraint and column if they exist
function dropDatauri {
echo "INFO: Dropping DataURI column from $1"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 DROP CONSTRAINT IF EXISTS ${1}_datauri_key;"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 DROP COLUMN IF EXISTS datauri;"
if [ $? -ne 0 ]; then
echo "ERROR: Failed to drop dataURI column for $table"
echo "FATAL: The update has failed."
exit 1
fi
}
# takes three args: table, constraint name, unique columns
# will first drop the constraint if it exists and then adds it back, this is
# fairly inefficient if it does exist but operationally it won't exist and for
# testing this allows the script to be run easily as a noop.
function dropDatauriAndAddConstraint {
dropDatauri $1
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 DROP CONSTRAINT IF EXISTS $2;"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 ADD CONSTRAINT $2 UNIQUE $3;"
if [ $? -ne 0 ]; then
echo "ERROR: Failed to add new unique constraint for $table"
echo "FATAL: The update has failed."
exit 1
fi
}
echo "INFO: Dropping ccfp dataURI columns."
dropDatauriAndAddConstraint ccfp ccfp_reftime_producttype_boxlat_boxlong_key "(reftime, producttype, boxlat, boxlong)"
${PSQL} -U awips -d metadata -c "DROP INDEX IF EXISTS ccfp_reftimeindex;"
${PSQL} -U awips -d metadata -c "CREATE INDEX ccfp_reftimeindex ON ccfp USING btree (reftime);"
${PSQL} -U awips -d metadata -c "VACUUM FULL ANALYZE ccfp"
echo "INFO: ccfp dataURI columns dropped successfully"

View file

@ -1,97 +0,0 @@
#!/usr/bin/env python
import imp, sys
DICT_KEYS = ['fullStateName',
'partOfState',
'stateAbbr',
'ugcCityString',
'ugcCode',
'ugcName',
'ugcTimeZone',
]
def diffCities(cityStringA, cityStringB):
cityListA = cityListB = []
cityListAMixed = cityListBMixed = []
if cityStringA is not None:
cityListAMixed = cityStringA.strip('.').split('...')
cityListA = cityStringA.strip('.').upper().split('...')
if cityStringB is not None:
cityListBMixed = cityStringB.strip('.').split('...')
cityListB = cityStringB.strip('.').upper().split('...')
added = []
for city in set(cityListB).difference(set(cityListA)):
added.append(cityListBMixed[cityListB.index(city)])
removed = []
for city in set(cityListA).difference(set(cityListB)):
removed.append(cityListAMixed[cityListA.index(city)])
if len(added) > 0:
print " added cities:", list(added)
if len(removed) > 0:
print " removed cities:", list(removed)
def printEntry(dict):
for key in DICT_KEYS:
if dict.has_key(key):
print " ",key+':',dict[key]
def main():
if len(sys.argv) < 3:
print "Usage:"
print sys.argv[0], "fileA, fileB"
print " fileA: path to old AreaDictionary.py file"
print " fileB: path to new AreaDictionary.py file"
print "Example:"
print sys.argv[0], "/awips2/edex/data/utility/cave_static/site/OAX/gfe/userPython/textUtilities/regular/AreaDictionary.py /awips2/edex/data/utility/cave_static/configured/OAX/gfe/userPython/textUtilities/regular/AreaDictionary.py"
sys.exit(1)
modA = imp.load_source('modA', sys.argv[1])
modB = imp.load_source('modB', sys.argv[2])
dictA = modA.AreaDictionary
dictB = modB.AreaDictionary
keys = set()
keys.update(dictA.keys())
keys.update(dictB.keys())
keys = list(keys)
keys.sort()
for key in keys:
if not dictA.has_key(key):
print '\n'+key+": added"
printEntry(dictB[key])
elif not dictB.has_key(key):
print '\n'+key+": removed"
printEntry(dictA[key])
else:
differs = False
dataA = dictA[key]
dataB = dictB[key]
for key1 in DICT_KEYS:
valueA = valueB = None
if dataA.has_key(key1):
valueA = dataA[key1]
if dataB.has_key(key1):
valueB = dataB[key1]
if str(valueA).upper() != str(valueB).upper():
if not differs:
differs = True
print '\n'+key+": differs"
if key1 == 'ugcCityString':
diffCities(valueA, valueB)
else:
print " ", key1,"old:", valueA
print " ", " "*len(key1),"new:", valueB
if __name__ == "__main__":
main()

View file

@ -1,42 +0,0 @@
#!/usr/bin/env python
# This script will preserve the site's current configured AreaDictionary.py file as
# a site level file if one does not already exist.
AREA_DICTIONARY_PATH = "/awips2/edex/data/utility/cave_static/configured/*/gfe/userPython/textUtilities/regular/AreaDictionary.py"
import glob
import os
import shutil
import traceback
def main():
# for each configured AreaDictionary.py file
for configFile in glob.glob(AREA_DICTIONARY_PATH):
siteFile = configFile.replace("cave_static/configured", "cave_static/site")
# if site file does not exist
if not os.path.exists(siteFile):
# create site directory if necessary
try:
os.makedirs(os.path.dirname(siteFile))
except OSError as e:
import errno
if e.errno != errno.EEXIST:
print "Error copying", configFile, "\n to", siteFile, \
"\nPlease manually copy this file before starting EDEX"
traceback.print_exc()
continue
# copy configured file to site.
print "Preserving", siteFile
try:
shutil.copy(configFile, siteFile)
except:
print "Error copying", configFile, "\n to", siteFile, \
"\nPlease manually copy this file before starting EDEX"
traceback.print_exc()
else:
print "Skipping ", configFile, "\n ", siteFile, "exists"
if __name__ == "__main__":
main()

View file

@ -1,30 +0,0 @@
#!/bin/bash
# DR #3702 Copy text plugin_info to the fxatext database.
PSQL="/awips2/psql/bin/psql"
echo "INFO: Copying text plugin_info to fxatext database."
initialized=`$PSQL -t -U awips -d metadata -c "SELECT initialized FROM plugin_info WHERE name = 'text';"`
if [ -n "$initialized" ]; then
$PSQL -U awips -d fxatext -c "CREATE TABLE IF NOT EXISTS plugin_info(name character varying(255) NOT NULL, initialized boolean,tablename character varying(255),CONSTRAINT plugin_info_pkey PRIMARY KEY (name)); ALTER TABLE plugin_info OWNER TO awips;"
if [ $? -ne 0 ]; then
echo "ERROR: Failed to create plugin_info table in fxatext database"
echo "FATAL: The update has failed."
exit 1
fi
$PSQL -U awips -d fxatext -c "INSERT INTO plugin_info (name,initialized) VALUES ('text','$initialized');"
if [ $? -ne 0 ]; then
echo "ERROR: Failed to register text plugin in fxatext database."
echo "FATAL: The update has failed."
exit 1
fi
$PSQL -t -U awips -d metadata -c "DELETE FROM plugin_info WHERE name = 'text';"
else
echo "INFO: Nothing to do."
fi
${PSQL} -U awips -d metadata -c "ALTER TABLE plugin_info DROP COLUMN IF EXISTS database;"
echo "INFO: Done copying text plugin_info to fxatext database."

View file

@ -1,30 +0,0 @@
#!/bin/bash
# DR3708 updates collaboration config to support blacklists
IFS=$'\n'
files=`find /awips2/edex/data/utility/cave_static/*/*/collaboration/ -name 'config.xml'`
if [ $? -ne 0 ]; then
echo "No files found."
exit 1
fi
echo ""
echo "Press Enter to perform the updates Ctrl-C to quit."
read done
MY_DIR=`dirname $0`
for f in $files; do
echo "Updating $f"
xml=$(python $MY_DIR/util/updateCollaborationConfig.py $f)
if [[ $? != 0 ]]
then
echo "ERROR: Problem updating file $f"
elif [[ -n $xml ]]
then
echo $xml | xmllint --format - > $f
echo "Successfully updated"
else
echo "No update needed for $f"
fi
done

View file

@ -1,37 +0,0 @@
#!/usr/bin/env python
# DR3708 utility for updating collaboration config to support blacklists
# this script is not intended to be run standalone
# see shell script in parent directory
import sys
import xml.etree.ElementTree as ET
CONFIG_TAG="config"
LIST_TYPE_ATTRIBUTE="listType"
WHITELIST_ATTRIB_VALUE="WHITELIST"
SUB_SITE_TAG="subscribedSites"
LIST_ENTRY_TAG="listEntry"
if len(sys.argv) != 2:
sys.stderr.write("Usage: %s [xml inputfile file]" % (sys.argv[0]))
sys.exit(1)
tree = ET.parse(sys.argv[1])
root = tree.getroot()
matches = root.findall(CONFIG_TAG)
if len(matches) < 1:
sys.stderr.write("No matches found, exiting\n")
sys.exit(0)
for match in matches:
if LIST_TYPE_ATTRIBUTE not in match.attrib:
match.attrib[LIST_TYPE_ATTRIBUTE] = WHITELIST_ATTRIB_VALUE
subSites = match.findall(SUB_SITE_TAG)
for subSite in subSites :
lt = ET.SubElement(match, LIST_ENTRY_TAG)
lt.text = subSite.text
match.remove(subSite)
tree.write(sys.stdout)

View file

@ -1,39 +0,0 @@
#!/bin/bash
# DR #3720 - this update script will drop the dataURI column from redbook
PSQL="/awips2/psql/bin/psql"
# takes one arg: a table name
# drops the datauri constraint and column if they exist
function dropDatauri {
echo "INFO: Dropping DataURI column from $1"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 DROP CONSTRAINT IF EXISTS ${1}_datauri_key;"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 DROP COLUMN IF EXISTS datauri;"
if [ $? -ne 0 ]; then
echo "ERROR: Failed to drop dataURI column for $table"
echo "FATAL: The update has failed."
exit 1
fi
}
# takes three args: table, constraint name, unique columns
# will first drop the constraint if it exists and then adds it back, this is
# fairly inefficient if it does exist but operationally it won't exist and for
# testing this allows the script to be run easily as a noop.
function dropDatauriAndAddConstraint {
dropDatauri $1
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 DROP CONSTRAINT IF EXISTS $2;"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 ADD CONSTRAINT $2 UNIQUE $3;"
if [ $? -ne 0 ]; then
echo "ERROR: Failed to add new unique constraint for $table"
echo "FATAL: The update has failed."
exit 1
fi
}
echo "INFO: Dropping redbook dataURI columns."
dropDatauriAndAddConstraint redbook redbook_reftime_forecasttime_wmottaaii_corindicator_fcsthours_productid_fileid_originatorid_key "(reftime, forecasttime, wmottaaii, corindicator, fcsthours, productid, fileid, originatorid)"
${PSQL} -U awips -d metadata -c "VACUUM FULL ANALYZE redbook"
echo "INFO: redbook dataURI columns dropped successfully"

View file

@ -1,180 +0,0 @@
#!/bin/bash
# DR #3722 - this update script will drop the dataURI column from taf
PSQL="/awips2/psql/bin/psql"
SQL_FILE="/tmp/DR3722.sql"
# Creates new columns to replace corindicator and amdindicator
function addNewTafColumns {
if [[ "$ftype" != "boolean" ]]; then
echo "INFO: Converting taf corindicator and amdindicator to temporary boolean fields"
echo "ALTER TABLE taf ADD COLUMN corindicator_temp boolean NOT NULL DEFAULT false;" >> $SQL_FILE
echo "ALTER TABLE taf ADD COLUMN amdindicator_temp boolean NOT NULL DEFAULT false;" >> $SQL_FILE
echo "UPDATE taf set corindicator_temp = true where corindicator = 'COR';" >> $SQL_FILE
echo "UPDATE taf set corindicator_temp = true where corindicator = 'true';" >> $SQL_FILE
echo "UPDATE taf set amdindicator_temp = true where amdindicator = 'AMD';" >> $SQL_FILE
echo "UPDATE taf set amdindicator_temp = true where amdindicator = 'true';" >> $SQL_FILE
${PSQL} -U awips -d metadata -f $SQL_FILE
if [ $? -ne 0 ]; then
echo "ERROR: Failed to generate new corindicator and amdindicator columns for table $1. Commands that failed at $SQL_FILE"
echo "FATAL: The update has failed."
exit 1
fi
rm $SQL_FILE
fi
}
# Drops duplicates utilizing new indicator columns and also deletes any rows that null datauri fields that are no longer allowed to be null
function deleteTafData {
echo "INFO: Dropping any taf duplicates keeping the earliest insert, also dropping any invalid rows containing invalid NULL fields"
temp=""
if [[ "$ftype" != "boolean" ]]; then
temp="_temp"
fi
query="SELECT distinct b.id FROM taf a, taf b WHERE (a.reftime = b.reftime AND a.stationid = b.stationid AND a.corindicator$temp = b.corindicator$temp AND a.amdindicator$temp = b.amdindicator$temp AND a.issue_timestring = b.issue_timestring AND ((a.inserttime < b.inserttime) or (a.inserttime = b.inserttime and a.id < b.id))) or (b.reftime isnull) or (b.stationid isnull) or (b.issue_timestring isnull)"
echo " INFO: Finding taf entries to delete"
result=(`${PSQL} -U awips -d metadata -t -A -c "$query"`)
numEntries="${#result[@]}"
if [[ "${numEntries}" > 0 ]]; then
echo " INFO: Found $numEntries to delete"
taf_ids="${result[0]}"
if [[ "${numEntries}" > 1 ]]; then
for id in "${result[@]:1}"
do
taf_ids+=", $id"
done
fi
# handle cascade tables
echo "SELECT distinct id from taf_change_groups where parentid in ($taf_ids)" > $SQL_FILE
echo " INFO: Finding cascaded taf_change_group entries"
result=(`${PSQL} -U awips -d metadata -t -A -f $SQL_FILE`)
numEntries="${#result[@]}"
echo "" > $SQL_FILE
if [[ "${numEntries}" > 0 ]]; then
echo " INFO: Found $numEntries to delete"
taf_change_ids="${result[0]}"
if [[ "${numEntries}" > 1 ]]; then
for id in "${result[@]:1}"
do
taf_change_ids+=", $id"
done
fi
echo "DELETE FROM taf_icing_layers where parentid in ($taf_change_ids);" >> $SQL_FILE
echo "DELETE FROM taf_sky_cover where parentid in ($taf_change_ids);" >> $SQL_FILE
echo "DELETE FROM taf_temperature_forecasts where parentid in ($taf_change_ids);" >> $SQL_FILE
echo "DELETE FROM taf_turbulence_layers where parentid in ($taf_change_ids);" >> $SQL_FILE
echo "DELETE FROM taf_weather_conditions where parentid in ($taf_change_ids);" >> $SQL_FILE
echo "DELETE FROM taf_change_groups where id in ($taf_change_ids);" >> $SQL_FILE
fi
echo "DELETE FROM taf where id in ($taf_ids)" >> $SQL_FILE
echo " INFO: Deleting data"
${PSQL} -U awips -d metadata -f $SQL_FILE
if [ $? -ne 0 ]; then
echo "ERROR: Failed to delete duplicate and invalid data for taf tables. Commands that failed at $SQL_FILE"
echo "FATAL: The update has failed."
exit 1
fi
rm $SQL_FILE
else
echo " INFO: Found no entries to delete"
fi
}
# takes two args: table, old constraint name
# Drops the prior scripts unique constraint, the current unique constraint,
# old amdindicator and corindicator columns, and renames temp columns
function dropConstraintsAndRenameColumns {
echo "INFO: Dropping $1 unique constraints if exists. Replacing original corindicator and amdindicator with boolean fields."
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 DROP CONSTRAINT IF EXISTS taf_reftime_stationid_corindicator_amdindicator_issuetimestring_key;"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 DROP CONSTRAINT IF EXISTS uk_fs43xfrjmc8lk31lxp3516eh3;"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 DROP CONSTRAINT IF EXISTS $2;"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 ALTER COLUMN stationid SET NOT NULL;"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 ALTER COLUMN issue_timestring SET NOT NULL;"
if [[ "$ftype" != "boolean" ]]; then
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 DROP COLUMN corindicator;"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 DROP COLUMN amdindicator;"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 RENAME COLUMN corindicator_temp to corindicator;"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 ALTER COLUMN corindicator DROP DEFAULT;"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 RENAME COLUMN amdindicator_temp to amdindicator;"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 ALTER COLUMN amdindicator DROP DEFAULT;"
fi
}
# takes one arg: a table name
# drops the datauri constraint and column if they exist
function dropDatauri {
echo "INFO: Dropping DataURI column from $1"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 DROP CONSTRAINT IF EXISTS ${1}_datauri_key;"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 DROP COLUMN IF EXISTS datauri;"
if [ $? -ne 0 ]; then
echo "ERROR: Failed to drop dataURI column for $table"
echo "FATAL: The update has failed."
exit 1
fi
}
# takes three args: table, constraint name, unique columns
# will first drop the constraint if it exists and then adds it back, this is
# fairly inefficient if it does exist but operationally it won't exist and for
# testing this allows the script to be run easily as a noop.
function dropDatauriAndAddConstraint {
dropDatauri $1
echo "INFO: Adding unique constraint"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 DROP CONSTRAINT IF EXISTS $2;"
${PSQL} -U awips -d metadata -c "ALTER TABLE $1 ADD CONSTRAINT $2 UNIQUE $3;"
if [ $? -ne 0 ]; then
echo "ERROR: Failed to add new unique constraint for $table"
echo "FATAL: The update has failed."
exit 1
fi
}
ftype=`${PSQL} -d metadata -U awips -t -A -c "select data_type from information_schema.columns where table_name='taf' and table_schema='awips' and column_name='corindicator';"`
# delete previous file
if [[ -f $SQL_FILE ]]; then
rm $SQL_FILE
if [ $? -ne 0 ]; then
echo "ERROR: Failed to delete prior sql file $SQL_FILE"
echo "FATAL: The update has failed."
exit 1
fi
fi
# make sure can write to correct file
echo "" > $SQL_FILE
if [ $? -ne 0 ]; then
echo "ERROR: Failed to write to sql file $SQL_FILE"
echo "FATAL: The update has failed."
exit 1
fi
addNewTafColumns
deleteTafData
dropConstraintsAndRenameColumns taf uk_taf_datauri_fields
dropDatauriAndAddConstraint taf uk_taf_datauri_fields "(reftime, stationid, corindicator, amdindicator, issue_timestring)"
${PSQL} -U awips -d metadata -c "DROP INDEX IF EXISTS taf_reftimeindex;"
${PSQL} -U awips -d metadata -c "CREATE INDEX taf_reftimeindex ON taf USING btree (reftime);"
${PSQL} -U awips -d metadata -c "VACUUM FULL ANALYZE taf"
echo "INFO: taf dataURI columns dropped successfully"

View file

@ -1,13 +0,0 @@
#!/bin/bash
# DR #3788 - this update script will adjust the satellite_spatial to support sequenced gids.
PSQL="/awips2/psql/bin/psql"
echo "INFO: Updating satellite spatial table"
${PSQL} -U awips -d metadata -q -c "delete from satellite_spatial where gid not in (select distinct coverage_gid from satellite)"
${PSQL} -U awips -d metadata -q -c "ALTER TABLE satellite_spatial DROP CONSTRAINT IF EXISTS uk_fdpq7gpkgi3r3k76j83x7axb1"
${PSQL} -U awips -d metadata -c "ALTER TABLE satellite_spatial ADD CONSTRAINT uk_fdpq7gpkgi3r3k76j83x7axb1 UNIQUE (minx, miny, dx, dy, nx, ny, crswkt)"
${PSQL} -U awips -d metadata -c "CREATE SEQUENCE satspatial_seq INCREMENT 1 MINVALUE 1 MAXVALUE 9223372036854775807 START 1 CACHE 1;"
echo "INFO: Satellite spatial table successfully updated."

Some files were not shown because too many files have changed in this diff Show more