Merge branch 'master_13.5.3' into omaha_13.5.3

Former-commit-id: 18609de83d [formerly 3c862170db [formerly dbbf326f4fbfb3e07c97f6a7c380fa7e37544b72]]
Former-commit-id: 3c862170db
Former-commit-id: 569ecb35d5
This commit is contained in:
Steve Harris 2013-11-01 13:23:28 -05:00
commit 0dd6732b0e
9 changed files with 642 additions and 243 deletions

View file

@ -1028,7 +1028,10 @@ in|.0394|.0| 4 | |f5.2|@.|8000F0FF| |13|\
<styleRule>
<paramLevelMatches>
<parameter>TP24hr</parameter>
<parameter>TP36hr</parameter>
<parameter>TP36hr</parameter>
<parameter>TP6hr_std</parameter>
<parameter>TP6hr_avg</parameter>
<parameter>TP24hr_avg</parameter>
</paramLevelMatches>
<contourStyle>
<displayUnits>in</displayUnits>

View file

@ -1227,6 +1227,9 @@
<parameter>TP48hr</parameter>
<parameter>TPrun</parameter>
<parameter>TP120hr</parameter>
<parameter>TP6hr_std</parameter>
<parameter>TP6hr_avg</parameter>
<parameter>TP24hr_avg</parameter>
</paramLevelMatches>
<imageStyle>
<!-- filterLow="true" -->

View file

@ -67,6 +67,7 @@ from com.raytheon.uf.common.localization import LocalizationContext_Localization
# 05/23/13 1759 dgilling Remove unnecessary imports.
# 07/25/13 2233 randerso Improved memory utilization and performance
# 09/20/13 2405 dgilling Clip grids before inserting into cache.
# 10/22/13 2405 rjpeter Remove WECache and store directly to cube.
#
#
@ -97,65 +98,98 @@ def logDebug(*msg):
logVerbose(iscUtil.tupleToString(*msg))
class WECache(object):
def __init__(self, we, inv, clipArea):
self._we = we
self._clipArea = clipArea
self._inv = OrderedDict()
lst = list(inv)
while len(lst):
i = lst[:BATCH_WRITE_COUNT]
javaTRs = ArrayList()
for tr in i:
javaTRs.add(iscUtil.toJavaTimeRange(tr))
gridsAndHist = self._we.get(javaTRs, True)
for idx, tr in enumerate(i):
pair = gridsAndHist.get(idx)
g = self.__encodeGridSlice(pair.getFirst(), clipArea)
h = self.__encodeGridHistory(pair.getSecond())
self._inv[tr] = (g, h)
lst = lst[BATCH_WRITE_COUNT:]
time.sleep(BATCH_DELAY)
def retrieveData(we, inv, clipArea):
lst = list(inv)
trs=[]
histDict = OrderedDict()
cube = None
keyList = None
gridType = str(we.getGpi().getGridType())
def keys(self):
return tuple(self._inv.keys())
def __getitem__(self, key):
try:
return self._inv[key]
except KeyError:
logEvent("Cache miss for key:", str(key))
grid = self._we.getItem(iscUtil.toJavaTimeRange(key))
pyGrid = self.__encodeGridSlice(grid, self._clipArea)
history = grid.getGridDataHistory()
pyHist = self.__encodeGridHistory(history)
return (pyGrid, pyHist)
# clipped size
clipSize = (clipArea[1] - clipArea[0] + 1, clipArea[3] - clipArea[2] + 1)
gridCount = len(inv)
def __encodeGridSlice(self, grid, clipArea):
gridType = grid.getGridInfo().getGridType().toString()
if gridType == "SCALAR":
cube = numpy.empty(shape=(gridCount, clipSize[1], clipSize[0]), dtype=numpy.float32)
elif gridType == "VECTOR":
magCube = numpy.empty(shape=(gridCount, clipSize[1], clipSize[0]),dtype=numpy.float32)
dirCube = numpy.empty(shape=(gridCount, clipSize[1], clipSize[0]),dtype=numpy.float32)
cube = (magCube, dirCube)
elif gridType == "WEATHER" or gridType == "DISCRETE":
cube = numpy.empty(shape=(gridCount, clipSize[1], clipSize[0]), dtype=numpy.int8)
keyList = []
cubeIdx = 0
while len(lst):
i = lst[:BATCH_WRITE_COUNT]
javaTRs = ArrayList()
for tr in i:
javaTRs.add(iscUtil.toJavaTimeRange(tr))
gridsAndHist = we.get(javaTRs, True)
size = gridsAndHist.size()
for idx in xrange(size):
pair = gridsAndHist.get(idx)
grid = pair.getFirst()
tr = iscUtil.transformTime(grid.getValidTime())
encodeGridSlice(grid, gridType, clipArea, cube, cubeIdx, keyList)
cubeIdx += 1
histDict[tr] = encodeGridHistory(pair.getSecond())
lst = lst[BATCH_WRITE_COUNT:]
time.sleep(BATCH_DELAY)
if len(histDict) != gridCount:
# retrieved less grids than originally expected, purge ran?
gridCount = len(histDict)
if gridType == "SCALAR":
return clipToExtrema(grid.__numpy__[0], clipArea)
oldCube = cube
cube = numpy.empty(shape=(gridCount, clipSize[1], clipSize[0]), dtype=numpy.float32)
for idx in xrange(gridCount):
cube[idx] = oldCube[idx]
elif gridType == "VECTOR":
vecGrids = grid.__numpy__
return (clipToExtrema(vecGrids[0], clipArea), clipToExtrema(vecGrids[1], clipArea))
elif gridType == "WEATHER":
keys = grid.getKeys()
keyList = []
for theKey in keys:
keyList.append(theKey.toString())
return (clipToExtrema(grid.__numpy__[0], clipArea), keyList)
elif gridType =="DISCRETE":
keys = grid.getKey()
keyList = []
for theKey in keys:
keyList.append(theKey.toString())
return (clipToExtrema(grid.__numpy__[0], clipArea), keyList)
def __encodeGridHistory(self, histories):
retVal = []
for i in xrange(histories.size()):
retVal.append(histories.get(i).getCodedString())
return tuple(retVal)
oldMagCube = magCube
magCube = numpy.empty(shape=(gridCount, clipSize[1], clipSize[0]),dtype=numpy.float32)
oldDirCube = dirCube
dirCube = numpy.empty(shape=(gridCount, clipSize[1], clipSize[0]),dtype=numpy.float32)
cube = (magCube, dirCube)
for idx in xrange(gridCount):
magCube[idx] = oldMagCube[idx]
dirCube[idx] = oldDirCube[idx]
elif gridType == "WEATHER" or gridType == "DISCRETE":
oldCube = cube
cube = numpy.empty(shape=(gridCount, clipSize[1], clipSize[0]), dtype=numpy.int8)
for idx in xrange(gridCount):
cube[idx] = oldCube[idx]
return (cube, histDict, keyList)
###-------------------------------------------------------------------------###
### cube and keyList are out parameters to be filled by this method, idx is the index into cube to use
def encodeGridSlice(grid, gridType, clipArea, cube, idx, keyList):
if gridType == "SCALAR":
cube[idx] = clipToExtrema(grid.__numpy__[0], clipArea)
elif gridType == "VECTOR":
vecGrids = grid.__numpy__
cube[0][idx] = clipToExtrema(vecGrids[0], clipArea)
cube[1][idx] = clipToExtrema(vecGrids[1], clipArea)
elif gridType == "WEATHER" or gridType == "DISCRETE":
if gridType == "DISCRETE":
keys = grid.getKey()
else:
keys = grid.getKeys()
gridKeys = []
for theKey in keys:
gridKeys.append(theKey.toString())
keyList.append(gridKeys)
cube[idx]= clipToExtrema(grid.__numpy__[0], clipArea)
def encodeGridHistory(histories):
retVal = []
for i in xrange(histories.size()):
retVal.append(histories.get(i).getCodedString())
return tuple(retVal)
###-------------------------------------------------------------------------###
@ -529,19 +563,18 @@ def storeTopoGrid(client, file, databaseID, invMask, clipArea):
###-------------------------------------------------------------------------###
###
def storeGridDataHistory(file, we, wec, trList):
def storeGridDataHistory(file, we, histDict):
"Stores the Grid Data history string for each grid in we."
# get the maximum size of the history string
maxHistSize = 0
histList = []
for tr in trList:
his = wec[tr][1]
for (tr, his) in histDict.items():
hisString = ''
for i,h in enumerate(his):
hisString = hisString + str(h)
if i != len(his) - 1:
hisString = hisString + " ^"
hisString = hisString + " ^"
histList.append(hisString)
maxHistSize = max(maxHistSize,len(hisString))
@ -727,21 +760,17 @@ def storeScalarWE(we, trList, file, timeRange, databaseID,
# get the data and store it in a Numeric array.
timeList, overlappingTimes = findOverlappingTimes(trList, timeRange)
# clipped size
clipSize = (clipArea[1] - clipArea[0] + 1, clipArea[3] - clipArea[2] + 1)
gridCount = len(overlappingTimes)
(cube, histDict, keyList) = retrieveData(we, overlappingTimes, clipArea)
gridCount = len(cube)
for i in xrange(len(overlappingTimes) -1, -1, -1):
ot = overlappingTimes[i]
if not ot in histDict:
del overlappingTime[i]
del timeList[i]
elif we.getGpi().isRateParm():
durRatio = (float(timeList[i][1]-timeList[i][0]))/float((ot[1]-ot[0]))
cube[i] *= durRatio
cube = numpy.empty(shape=(gridCount, clipSize[1], clipSize[0]), dtype=numpy.float32)
wec = WECache(we, overlappingTimes, clipArea)
for i,t in enumerate(overlappingTimes):
grid = wec[t][0]
#adjust for time changes
if we.getGpi().isRateParm():
durRatio = (float(timeList[i][1]-timeList[i][0]))/float((t[1]-t[0]))
grid *= durRatio
cube[i]= grid
### Make sure we found some grids
# make the variable name
varName = we.getParmid().getParmName() + "_" + we.getParmid().getParmLevel()
@ -795,8 +824,8 @@ def storeScalarWE(we, trList, file, timeRange, databaseID,
setattr(var, "fillValue", fillValue)
## Extract the GridDataHistory info and save it
storeGridDataHistory(file, we, wec, overlappingTimes)
storeGridDataHistory(file, we, histDict)
logEvent("Saved", gridCount, varName, " grids")
return gridCount
@ -811,23 +840,16 @@ def storeVectorWE(we, trList, file, timeRange,
# get the data and store it in a Numeric array.
timeList, overlappingTimes = findOverlappingTimes(trList, timeRange)
# clipped size
clipSize = (clipArea[1] - clipArea[0] + 1, clipArea[3] - clipArea[2] + 1)
gridCount = len(overlappingTimes)
magCube = numpy.empty(shape=(gridCount, clipSize[1], clipSize[0]),dtype=numpy.float32)
dirCube = numpy.empty(shape=(gridCount, clipSize[1], clipSize[0]),dtype=numpy.float32)
wec = WECache(we, overlappingTimes, clipArea)
for i,t in enumerate(overlappingTimes):
vecData = wec[t][0]
mag = vecData[0]
dir = vecData[1]
if we.getGpi().isRateParm():
durRatio = (float(timeList[i][1]-timeList[i][0]))/float((t[1]-t[0]))
mag *= durRatio
magCube[i] = mag
dirCube[i] = dir
((magCube, dirCube), histDict, keyList) = retrieveData(we, overlappingTimes, clipArea)
gridCount = len(magCube)
for i in xrange(len(overlappingTimes) -1, -1, -1):
ot = overlappingTimes[i]
if not ot in histDict:
del overlappingTime[i]
del timeList[i]
elif we.getGpi().isRateParm():
durRatio = (float(timeList[i][1]-timeList[i][0]))/float((ot[1]-ot[0]))
magCube[i] *= durRatio
varName = we.getParmid().getParmName() + "_" + we.getParmid().getParmLevel()
@ -924,8 +946,8 @@ def storeVectorWE(we, trList, file, timeRange,
setattr(dirVar, "fillValue", dfillValue)
## Extract the GridDataHistory info and save it
storeGridDataHistory(file, we, wec, overlappingTimes)
storeGridDataHistory(file, we, histDict)
logEvent("Saved", gridCount, varName, "grids")
return gridCount * 2 #vector has two grids
@ -970,19 +992,14 @@ def storeWeatherWE(we, trList, file, timeRange, databaseID, invMask, clipArea):
# get the data and store it in a Numeric array.
timeList, overlappingTimes = findOverlappingTimes(trList, timeRange)
# clipped size
clipSize = (clipArea[1] - clipArea[0] + 1, clipArea[3] - clipArea[2] + 1)
gridCount = len(overlappingTimes)
byteCube = numpy.empty(shape=(gridCount, clipSize[1], clipSize[0]), dtype=numpy.int8)
keyList = []
wec = WECache(we, overlappingTimes, clipArea)
for i,t in enumerate(overlappingTimes):
wx = wec[t][0]
byteCube[i] = wx[0]
keyList.append(wx[1])
(byteCube, histDict, keyList) = retrieveData(we, overlappingTimes, clipArea)
gridCount = len(histDict)
for i in xrange(len(overlappingTimes) -1, -1, -1):
ot = overlappingTimes[i]
if not ot in histDict:
del overlappingTime[i]
del timeList[i]
# make the variable name
varName = we.getParmid().getParmName() + "_" + we.getParmid().getParmLevel()
@ -1046,7 +1063,7 @@ def storeWeatherWE(we, trList, file, timeRange, databaseID, invMask, clipArea):
setattr(var, "fillValue", fillValue)
## Extract the GridDataHistory info and save it
storeGridDataHistory(file, we, wec, overlappingTimes)
storeGridDataHistory(file, we, histDict)
logEvent("Saved", gridCount, varName, "grids")
@ -1061,19 +1078,13 @@ def storeDiscreteWE(we, trList, file, timeRange, databaseID, invMask, clipArea):
# get the data and store it in a Numeric array.
timeList, overlappingTimes = findOverlappingTimes(trList, timeRange)
# clipped size
clipSize = (clipArea[1] - clipArea[0] + 1, clipArea[3] - clipArea[2] + 1)
gridCount = len(overlappingTimes)
byteCube = numpy.empty(shape=(gridCount, clipSize[1], clipSize[0]), dtype=numpy.int8)
keyList = []
wec = WECache(we, overlappingTimes, clipArea)
for i,t in enumerate(overlappingTimes):
dis = wec[t][0]
byteCube[i] = dis[0]
keyList.append(dis[1])
(byteCube, histDict, keyList) = retrieveData(we, overlappingTimes, clipArea)
gridCount = len(histDict)
for i in xrange(len(overlappingTimes) -1, -1, -1):
ot = overlappingTimes[i]
if not ot in histDict:
del overlappingTime[i]
del timeList[i]
# make the variable name
varName = we.getParmid().getParmName() + "_" + we.getParmid().getParmLevel()
@ -1135,7 +1146,7 @@ def storeDiscreteWE(we, trList, file, timeRange, databaseID, invMask, clipArea):
setattr(var, "fillValue", fillValue)
## Extract the GridDataHistory info and save it
storeGridDataHistory(file, we, wec, overlappingTimes)
storeGridDataHistory(file, we, histDict)
logEvent("Saved", gridCount, varName, "grids")
@ -1325,7 +1336,7 @@ def main(outputFilename, parmList, databaseID, startTime,
clipArea = extremaOfSetBits(maskGrid)
maskGrid = clipToExtrema(maskGrid, clipArea)
clippedGridSize = maskGrid.shape
validPointCount = numpy.add.reduce(numpy.add.reduce(maskGrid))
validPointCount = float(numpy.add.reduce(numpy.add.reduce(maskGrid)))
#invert the mask grid
invMask = numpy.logical_not(maskGrid)

View file

@ -1 +1 @@
759799451b20c427bdaa8cb8185b9602cc66c6c6
bd6cb2ea1de310abb0f576998cd03a437683289f

View file

@ -1 +1 @@
759799451b20c427bdaa8cb8185b9602cc66c6c6
bd6cb2ea1de310abb0f576998cd03a437683289f

View file

@ -34,7 +34,7 @@
* a text file requires no code change as long as the parameters don't change.
* That logic could perhaps change as well.
*
* The routine first uses standard C calls to read the netcdf file. The structure
* The routine first uses standard C calls to read the NetCDF file. The structure
* of that file can be reviewed by reading the GFE help reference section on the
* ifpnetCDF command.
*
@ -61,12 +61,16 @@
*
* Version 4 allows users to combine all GRIB messages into one file. This becomes useful
* when dealing with a lot of files for a parameter such as 1 hour QPF or temperature that
* goes out to 240 hours.
* goes out to num_hours hours.
*
* This is still a work in progress and code can always be improved to increase efficiency.
*
* Oct 2011 - PTilles - added read of new token for defining number of days of data to process
*
* Mar 2012 - PTilles - added functionality to allow for more than 10 days (more than 240
* hours) of data in one file to be processed. This looks for a value of '10'
* in the 5th parameter of gfe2grib.txt.
*
* Sep 2012 -Dan Stein - The original nc2grib program assumed the first variable in the
* NetCDF file (variable[0]) would be the data variable to be converted to grib format. The
* nc2grib tool was hard-coded to only look at variable[0]. In AWIPS-II, GFE began putting
@ -93,9 +97,14 @@
#include "packgrib.h"
#include "getopt.h"
#include "cmapf.h"
/*#include "version_info.h"*/
#define VERSION_NAME "AWIPS II"
#define VERSION_NUMBER "13.5.2"
#define VERSION_DATE "(Oct 30, 2013)"
#define SECINHR 3600.
#define PATH_LEN 500
#define FILE_LEN 300
@ -200,23 +209,24 @@ int nc2grib_main (int argc, char *argv[])
char adayhrmin[7]={'\0'}; /* day, hour, minute info attached to WMO header */
int numgfeparms=0;
char cnum[3] = {'\0'};
int num_hours = 0; /* (num_days * 24) */
/* number of days of data to process - read from token - previously hard coded as 10 */
/* default value = 10 - if token not found then default value used */
int num_days = 0;
int numgfiles=0; /* number of grib files for combining files into one if desired */
char *gfiles[240]; /* array of char pointers for holding grib filenames if combining files */
/* for reading the NetCDF file */
int NetCDF_ID; /* Netcdf id */
int ndims; /* number of dimensions */
int NetCDF_ID; /* NetCDF id */
int numDims; /* number of dimensions */
int numVars; /* number of variables */
int ngatts; /* number of attributes */
int recdim;
int numGlobalAttributes; /* number of attributes */
int unlimitedDimensionID;
long start[] = {0, 0, 0}; /* start at first value */
long start1r[] = {0, 0}; /* accounts for netcdf with only 1 record and 2 dimensions of y,x */
@ -261,9 +271,9 @@ int nc2grib_main (int argc, char *argv[])
double *latlonLL, *latlonUR, lonOrigin,*domainOrigin, *domainExtent, *latLonOrigin;
int *gridPointLL, *gridPointUR;
double x1, y1, x2, y2, lat1, lon1, lat2, lon2;
nc_type vt_type, dn_type, ll_type, d_type, g_type;
nc_type dataType, dn_type, ll_type, d_type, g_type;
nc_type varDataType;
int vt_len, ll_len, d_len, g_len;
int attributeLength, ll_len, d_len, g_len;
int variableID, *gridSize;
int numberOfVariableDimensions;
int dimensionIDVector[MAX_VAR_DIMS];
@ -274,7 +284,7 @@ int nc2grib_main (int argc, char *argv[])
char cdfunits[MAX_NC_NAME]={'\0'};
char projection[MAX_NC_NAME]={'\0'};
long dim_size;
float *cdfvargrid=NULL; /* this is the main array holding the actual data values */
float *cdfDataArray=NULL; /* this is the main array holding the actual data values */
float arraysize;
long *validTimes;
@ -361,7 +371,7 @@ int nc2grib_main (int argc, char *argv[])
output_buffer = (size_t *) malloc (sizeof(size_t)*odim); /* output buffer used when writing GRIB message */
int variableFound = FALSE; /* Is the variable present in the NetCDF file? Stein Sep 2012 */
int variableFound = FALSE; /* Is the variable present in the NetCDF file? */
/* output_buffer = (int *) malloc (sizeof(int)*odim); /* output buffer used when writing GRIB message */
@ -378,7 +388,7 @@ int nc2grib_main (int argc, char *argv[])
/* parse command line arguments */
while ((c = getopt(argc, argv, ":n:i:t:o::b:p:g:Nfrqhv1")) != -1) {
while ((c = getopt(argc, argv, ":n:i:t:o::b:p:g:Nfrqhv1V")) != -1) {
switch (c) {
@ -710,6 +720,10 @@ int nc2grib_main (int argc, char *argv[])
case '1': /* process only one record of NetCDF, useful for debugging */
time1flag++;
break;
case 'V':
printf("version number = %s-%s\n",VERSION_NAME,VERSION_NUMBER);
exit(0);
break;
case ':': /* for options that need an operand */
if(optopt != 'o')
{
@ -738,7 +752,8 @@ int nc2grib_main (int argc, char *argv[])
printf("Unrecognized program command line option: -%c\n", optopt);
errflag++;
}
}
} /* while c = getopt */
if (errflag || helpflag || argc==1 || ( iflag==0 || pflag==0) )
@ -753,6 +768,24 @@ int nc2grib_main (int argc, char *argv[])
return USAGE;
}
/* Print CHPS build number */
printf("version number = %s-%s\n",VERSION_NAME,VERSION_NUMBER);
if(getAppsDefaults("nc2g_num_days",cnum) == -1)
{
num_days = 10;
}
else
{
num_days = atoi(cnum);
}
num_hours = num_days * 24;
char *gfiles[num_hours]; /* array of char pointers for holding grib filenames if combining files */
printf("\n number of days to process = %d \n", num_days);
if(nc_getAppsDefaults("nc2g_app_dir",appsdir) == -1)
{
@ -805,7 +838,7 @@ int nc2grib_main (int argc, char *argv[])
/**************************************************************************/
/* debugflag > 0; debug option is on */
if(debugflag>0)
if(debugflag)
printf("\n Debug option on...reading from GFE to GRIB configuation file:\n" \
" %s\n\n",file_path);
@ -817,9 +850,11 @@ int nc2grib_main (int argc, char *argv[])
if(fileline[0] != '#') /* check for comments */
{
sscanf(fileline,"%s%s%d%d%d%d%d",gfe2grib.GFEParameterName, gfe2grib.gfename, &gfe2grib.processid,
&gfe2grib.gribnum,&gfe2grib.decscale, &gfe2grib.timerange, &gfe2grib.timeunit);
if(debugflag>0)
sscanf(fileline,"%s%s%d%d%d%d%d",gfe2grib.GFEParameterName,
gfe2grib.gfename, &gfe2grib.processid,
&gfe2grib.gribnum,&gfe2grib.decscale, &gfe2grib.timerange,
&gfe2grib.timeunit);
if(debugflag)
printf(" DEBUG: Read in from gfe2grib.txt %s %s %d %d %d %d %d \n",gfe2grib.GFEParameterName, gfe2grib.gfename, gfe2grib.processid,
gfe2grib.gribnum,gfe2grib.decscale, gfe2grib.timerange, gfe2grib.timeunit);
@ -828,12 +863,12 @@ int nc2grib_main (int argc, char *argv[])
if (!(strcmp(gfe2grib.GFEParameterName, process)))
{
found = 1;
break;
}
}
}
} /* If not a comment */
} /* While we haven't reach the end of the gfe2grib.txt file */
@ -851,13 +886,12 @@ int nc2grib_main (int argc, char *argv[])
fclose(fp);
/* open the Netcdf file*/
/* open the NetCDF file*/
if(inpath==NULL)
{
inpath=(char *) malloc(sizeof(char)*(FILE_LEN+1));
if(inpath==NULL)
{
printf(" ERROR: Something went wrong with memory allocation for the NetCDF input directory....exiting\n");
@ -871,12 +905,13 @@ int nc2grib_main (int argc, char *argv[])
printf(" ERROR: Invalid token value for token \"netcdf_dir\".\n\t Program exit.");
return APSDEFERR;
}
else if (debugflag>0)
else if (debugflag)
{
printf(" Default path for the input NetCDF file not specified...Will use the following:\n" \
" %s\n",inpath);
}
}
} /* if inpath is NULL */
/***************************************************************************/
else if(debugflag)
printf(" Will attempt to read NetCDF file from this path:\n" \
@ -895,32 +930,21 @@ int nc2grib_main (int argc, char *argv[])
if (NetCDF_ID==-1)
{
printf("\n ERROR: Could not open the netcdf file: %s\n", fn);
printf("\n ERROR: Could not open the NetCDF file: %s\n", fn);
return CDFERR;
}
else
{
printf ("\n Netcdf file %s was opened successfully.\n\n",fn);
printf ("\n NetCDF file %s was opened successfully.\n\n",fn);
}
/* Inquire about the Netcdf file: No.of dimensions, No.of variables,
No. of global attributes etc.*/
/* Inquire about the NetCDF file: No.of dimensions, No.of variables, No.of
* global attributes etc.
*/
ncinquire (NetCDF_ID, &ndims, &numVars, &ngatts, &recdim);
/*************************************************************************/
/* debug */
ncinquire (NetCDF_ID, &numDims, &numVars, &numGlobalAttributes, &unlimitedDimensionID);
if (debugflag >0)
{
printf("\n Debug option on. Debug info from reading the netcdf file follows:\n\n");
printf (" Number of dimensions for this netcdf file is: %d\n",ndims);
printf (" Number of variables for this netcdf file is: %d\n",numVars);
printf (" Number of global attributes for this netcdf file is: %d\n",ngatts);
}
/*************************************************************************/
/**************************************************************************
* Sep 2012 - Stein The utility that takes GFE data and converts it to
/* Sep 2012 - Stein The utility that takes GFE data and converts it to
* NetCDF format is ifpNetCDF. To the best of my knowledge, this utility
* always puts exactly one variable and exactly one history variable into
* each NetCDF file. The section of code below originally assumed that the
@ -930,7 +954,7 @@ if (debugflag >0)
* For whatever reason, this order was changed in AWIPS-II so that the
* history variable showed up first and the program wouldn't work. I was
* tasked with correcting this program to make it order independent. My
* solution was to loop through all the variables to see whether the
* solution is to loop through all the variables to see whether the
* variable we're looking for is in the NetCDF file. If it is, variableID
* is set to it's value. If not found, the program will exit as it did
* before.
@ -989,11 +1013,6 @@ if (debugflag >0)
* end of the section of code that I changed.
*/
if(numberOfVariableDimensions==3) /* in some cases, this may not be true if file is produced from MPE/DQC */
{
for (i=0; i<numberOfVariableDimensions; i++)
@ -1014,18 +1033,19 @@ if (debugflag >0)
return CDFERR;
}
/*************************************************************************/
if (debugflag >0)
if (debugflag)
{
printf(" DEBUG: cdfvar dimension %d: name=%s size=%ld\n",i+1,dimname,dim_size);
}
/*************************************************************************/
}
}
} /* for i */
} /* if (numberOfVariableDimensions == 3) */
else if (numberOfVariableDimensions==2)
{
for (i=0; i<numberOfVariableDimensions; i++)
{
@ -1036,14 +1056,16 @@ if (debugflag >0)
else if (i==1)
x=dim_size;
/*************************************************************************/
if (debugflag >0)
if (debugflag)
{
printf(" DEBUG: cdfvar dimension %d: name=%s size=%ld\n",i+1,dimname,dim_size);
}
/*************************************************************************/
}
}
} /* for i */
} /* else if (numberOfVariableDimensions == 2) */
else
{
printf("\n nc2grib is not coded to handle %d number of dimensions for variable %s.\n" \
@ -1055,17 +1077,29 @@ if (debugflag >0)
/* get variable attributes */
/* get the values of NetCDF attributes given the variable ID and name */
arraysize = x * y;
cdfvargrid = (float *) malloc (sizeof(float)*arraysize);
cdfDataArray = (float *) malloc (sizeof(float) * arraysize);
long count[]={1,y,x};
long count1r[]={y,x};
ncattinq(NetCDF_ID,variableID,"validTimes",&vt_type,&vt_len);
if (debugflag)
{
printf ("DEBUG: ncattinq call Before\n");
}
validTimes = (long *) malloc(vt_len * nctypelen(vt_type));
/* Get Information about an Attribute (att inquiry) */
ncattinq(NetCDF_ID, variableID, "validTimes", &dataType, &attributeLength);
if (debugflag)
{
printf ("DEBUG: ncattinq call After\n");
}
validTimes = (long *) malloc (attributeLength * nctypelen(dataType));
ncattget(NetCDF_ID, variableID, "validTimes", validTimes);
@ -1077,6 +1111,8 @@ if (debugflag >0)
ncattget(NetCDF_ID, variableID, "projectionType", projection);
/* Get Information about an Attribute (att inquiry) */
ncattinq(NetCDF_ID,variableID,"latLonLL",&ll_type,&ll_len);
latlonLL = (double *) malloc(ll_len * nctypelen(ll_type));
@ -1087,30 +1123,40 @@ if (debugflag >0)
ncattget(NetCDF_ID, variableID, "latLonUR", (void *) latlonUR);
/* Get Information about an Attribute (att inquiry) */
ncattinq(NetCDF_ID,variableID,"domainOrigin",&d_type,&d_len);
domainOrigin = (double *) malloc(d_len * nctypelen(d_type));
ncattget(NetCDF_ID, variableID, "domainOrigin", (void *) domainOrigin);
/* Get Information about an Attribute (att inquiry) */
ncattinq(NetCDF_ID,variableID,"domainExtent",&d_type,&d_len);
domainExtent = (double *) malloc(d_len * nctypelen(d_type));
ncattget(NetCDF_ID, variableID, "domainExtent", (void *) domainExtent);
/* Get Information about an Attribute (att inquiry) */
ncattinq(NetCDF_ID,variableID,"gridSize",&g_type,&g_len);
gridSize = (int *) malloc(g_len * nctypelen(g_type));
ncattget(NetCDF_ID, variableID, "gridSize", (void *) gridSize);
/* Get Information about an Attribute (att inquiry) */
ncattinq(NetCDF_ID,variableID,"gridPointLL",&g_type,&g_len);
gridPointLL = (int *) malloc(g_len * nctypelen(g_type));
ncattget(NetCDF_ID, variableID, "gridPointLL", (void *) gridPointLL);
/* Get Information about an Attribute (att inquiry) */
ncattinq(NetCDF_ID,variableID,"gridPointUR",&g_type,&g_len);
gridPointUR = (int *) malloc(g_len * nctypelen(g_type));
@ -1119,8 +1165,8 @@ if (debugflag >0)
/* initialize the array to missing value */
for (i=0;i<arraysize;i++)
(*(cdfvargrid+i)) = xmissing;
for (i = 0; i < arraysize; ++i)
cdfDataArray[i] = xmissing;
/*************************************************************************/
@ -1128,7 +1174,7 @@ if (debugflag >0)
{
printf(" DEBUG: siteID = %s\n",siteID);
printf(" DEBUG: number of valid times = %d type = %d\n",vt_len, vt_type);
printf(" DEBUG: number of valid times = %d type = %d\n",attributeLength, dataType);
printf(" DEBUG: descriptName = %s\n",descriptName);
printf(" DEBUG: projection = %s\n",projection);
@ -1344,7 +1390,7 @@ if (debugflag >0)
}
else
{
printf(" Unknown projection read from netcdf...Exiting");
printf(" Unknown projection read from NetCDF...Exiting");
return CDFERR;
/* might account for this as this is a lat,lon grid */
@ -1602,16 +1648,15 @@ if (debugflag>0)
*/
if (time1flag>0) /* for testing only to do just the first valid time from the netcdf file */
vt_len=2;
if (time1flag>0) /* for testing only to do just the first valid time from the NetCDF file */
attributeLength=2;
/****************************************************************************/
if (debugflag>0)
printf("\n ***Entering main loop to process NetCDF records(s) into GRIB files*** \n\n");
/****************************************************************************/
for (m=0; m<vt_len; m+=2)
for (m = 0; m < attributeLength; m += 2)
{
status = timet_to_yearsec_ansi((time_t) *(validTimes+m+1), validtime);
@ -1699,7 +1744,7 @@ if (debugflag>0)
fcsth=0;
/* In the case of multiple accumulation periods in the same netcdf file, will need to attach this to the
/* In the case of multiple accumulation periods in the same NetCDF file, will need to attach this to the
filename in both cases. Can't reuse fcsth as it might be needed to determine the WMO header for any
future NPVU estimate/observed grids.
*/
@ -1714,14 +1759,14 @@ if (debugflag>0)
if (esth > 240 || esth < 0)
if (esth > num_hours || esth < 0)
{
printf(" The estimated/observed time period is either less than 0 or greater than 10 days (240 hours).\n" \
printf(" The estimated/observed time period is either less than 0 or greater than %d hours.\n" \
" Therefore, valid times within the input NetCDF filename may not have been generated \n" \
" correctly. Or this is actually a forecast grid and the -b option should be used so it \n" \
" will be processed correctly. Check your options and ensure this is an estimate or observed grid\n" \
" You could also try to generate the file again.\n" \
" For debug esth = %d\n",esth);
" For debug esth = %d\n",num_hours, esth);
return FILEERR;
}
@ -1784,13 +1829,13 @@ if (debugflag>0)
printf(" DEBUG: fcsth = %d timediff=%f valid time = %ld basis time_t = %ld\n",fcsth, timediff,(*(validTimes+m+1)), basetime_t);
/*************************************************************/
if (fcsth > 240 || fcsth < 0)
if (fcsth > num_hours || fcsth < 0)
{
printf(" The forecast time is either less than 0 or greater than 10 days (240 hours).\n" \
printf(" The forecast time is either less than 0 or greater than %d hours.\n" \
" Therefore, the basis time may not be specified correctly or may need to be specified \n" \
" on the command line according to guidance. Please check your command options or \n" \
" or the NetCDF file creation and try again.\n" \
" for debug fcsth = %d\n",fcsth);
" for debug fcsth = %d\n",num_hours, fcsth);
return FILEERR;
}
@ -1816,10 +1861,12 @@ if (debugflag >0)
grib_lbl[16]=fcsth-(int)(timediff/SECINHR); /* P1 */
grib_lbl[17]=fcsth; /* P2 */
}
else if (gfe2grib.timerange==0)
else if (gfe2grib.timerange==0 || gfe2grib.timerange == 10)
{
/* this is for a forecast product valid at reference time + P1 and
at present using this for PETF
OR
case of forecast hour > 255
*/
grib_lbl[16]=fcsth; /* P1 */
@ -1842,13 +1889,13 @@ if (debugflag >0)
start[0]=(long) (m/2);
status = ncvarget(NetCDF_ID,variableID,start,count,cdfvargrid);
status = ncvarget(NetCDF_ID,variableID,start,count,cdfDataArray);
}
else if (numberOfVariableDimensions==2)
{
start1r[0]=(long) (m/2);
status = ncvarget(NetCDF_ID,variableID,start1r,count1r,cdfvargrid);
status = ncvarget(NetCDF_ID,variableID,start1r,count1r,cdfDataArray);
}
if (status != NC_NOERR)
@ -1862,7 +1909,7 @@ if (debugflag >0)
for (i=0;i<arraysize;i++)
{
if((*(cdfvargrid+i))> xmissing)
if((*(cdfDataArray+i))> xmissing)
{
mischek=1;
break;
@ -1880,7 +1927,7 @@ if (debugflag >0)
for (i=0;i<arraysize;i++)
{
if((*(cdfvargrid+i))!= 0.)
if((*(cdfDataArray+i))!= 0.)
{
zerochek=1;
break;
@ -1904,9 +1951,9 @@ if (debugflag >0)
for (i=0;i<arraysize;i++)
{
if((*(cdfvargrid+i))> xmissing)
if((*(cdfDataArray+i))> xmissing)
*(cdfvargrid+i) *= 25.4; /* convert inches to mm */
*(cdfDataArray+i) *= 25.4; /* convert inches to mm */
}
}
@ -1920,9 +1967,9 @@ if (debugflag >0)
for (i=0;i<arraysize;i++)
{
if((*(cdfvargrid+i))> xmissing)
if((*(cdfDataArray+i))> xmissing)
*(cdfvargrid+i) = ((*(cdfvargrid+i)-32) * 5/9) + 273.16; /* convert F to K */
*(cdfDataArray+i) = ((*(cdfDataArray+i)-32) * 5/9) + 273.16; /* convert F to K */
}
@ -1931,9 +1978,9 @@ if (debugflag >0)
{
for (i=0;i<arraysize;i++)
{
if((*(cdfvargrid+i))> xmissing)
*(cdfvargrid+i) += 273.16; /* convert C to K */
if((*(cdfDataArray+i))> xmissing)
*(cdfDataArray+i) += 273.16; /* convert C to K */
}
}
@ -1953,9 +2000,9 @@ if (debugflag >0)
for (i=0;i<arraysize;i++)
{
if((*(cdfvargrid+i))> xmissing)
if((*(cdfDataArray+i))> xmissing)
*(cdfvargrid+i) *= 0.3048; /* convert feet to meters */
*(cdfDataArray+i) *= 0.3048; /* convert feet to meters */
}
}
@ -1983,9 +2030,8 @@ if (debugflag >0)
}
/*************************************************************************/
status = packgrib(grib_lbl,pds_ext,&iplen,cdfvargrid,&idim,&xmissing,
output_buffer,&odim,&length);
status = packgrib(grib_lbl, pds_ext, &iplen, cdfDataArray, &idim,
&xmissing, output_buffer,&odim,&length);
if (status !=0)
{
@ -2206,7 +2252,7 @@ if(debugflag)
sprintf(ofn,ofn,fcsth); /* standard forecast product using forecast hours past basis time */
}
} /* if (bflag) */
else /* without a basis time, this has to be an estimated/observed product using the valid time in
the output file. Note that if "%%" is NULL and bflag == 0, specifying esth here is
ignored in the output filename.
@ -2340,7 +2386,7 @@ if(debugflag>0)
if(bflag && qflag==0) /* old - strstr(process,"QPE")==NULL && strstr(process,"qpe")==NULL) */
if(bflag && qflag==0) /* old - strstr(GFEParameterName,"QPE")==NULL && strstr(process,"qpe")==NULL) */
{
if(debugflag>0)
@ -2357,6 +2403,7 @@ if(debugflag>0)
/* first write out the main GRIB file using the copygb command without the header determined above
to a temporary holding file. This file will now contain the QPF forecast on GRID218 at 10km
resolution */
copygb_main_(command);
/* status = system(command); */
}
@ -2768,8 +2815,8 @@ if (debugflag >0)
if(output_buffer!=NULL)
free(output_buffer);
if(cdfvargrid!=NULL)
free(cdfvargrid);
if(cdfDataArray!=NULL)
free(cdfDataArray);
if(gribdir!=NULL)
free(gribdir);
@ -2868,15 +2915,15 @@ int timet_to_userformat_ansi(time_t timet, char *ansi, char* userformat)
int display_usage(void)
{
printf("\n\n nc2grib GFE NetCDF to GRIB1 translator, usage:\n\n" \
"./nc2grib.LX -n (input netcdf path) -i (netcdf file) -t (output grib path) -o (output grib file) \n" \
"./nc2grib.LX -n (input NetCDF path) -i (NetCDF file) -t (output grib path) -o (output grib file) \n" \
" -b (basis time) -p (process ID) -g (one GRIB filename) -f -N -v -h\n" \
"where:\n" \
"-n (input netcdf path) Refers to the path containing the NetCDF file\n" \
" Optional, requires argument generated by the GFE routine ifpnetCDF.\n" \
"-n (input NetCDF path) Refers to the path containing the NetCDF file\n" \
" Optional, requires argument generated by the GFE routine ifpNetCDF.\n" \
" If not used, the token netcdf_dir will be used \n" \
" to retrieve this information\n\n" \
"-i (input netcdf file) Refers to the NetCDF file generated in the format\n" \
" Required, requires argument used by the GFE routine ifpnetCDF.\n\n" \
"-i (input NetCDF file) Refers to the NetCDF file generated in the format\n" \
" Required, requires argument used by the GFE routine ifpNetCDF.\n\n" \
" NOTE that this command line option and its argument\n" \
" must be specified in the call to nc2grib.\n\n" \
"-t (output grib path) Refers to the path of the GRIB file(s) generated by nc2grib.\n" \
@ -2893,7 +2940,7 @@ int display_usage(void)
" Required for forecast Example: -b 2009051412 \n" \
" grids and QPE grids going to \n" \
" NPVU,requires argument \n\n" \
"-p (process ID) Refers to the parameter process ID relating to a GFE parameter\n" \
"-p (GFEParameterName ID) Refers to the parameter process ID relating to a GFE parameter\n" \
" Required, requires argument such as QPF. Needs to match against a process in the gfe2grib.txt\n" \
" configuration file.\n" \
" NOTE that this command line option and its argument \n" \
@ -2935,10 +2982,6 @@ int display_usage(void)
return 0;
/* ============== Statements containing RCS keywords: */
{static char rcs_id1[] = "$Source: /fs/hseb/ob9d/ohd/pproc/src/nc2grib/RCS/main_nc2grib.c,v $";
static char rcs_id2[] = "$Id: main_nc2grib.c,v 1.2 2010/06/14 15:04:32 millerd Exp $";}
/* =================================================== */
}

View file

@ -405,11 +405,11 @@ if [ "${1}" = "-custom" ]; then
#fi
#buildRPM "awips2-httpd-pypies"
#buildRPM "awips2-ant"
#buildRPM "awips2-adapt-native"
buildRPM "awips2-adapt-native"
#buildRPM "awips2-common-base"
#buildRPM "awips2-hydroapps-shared"
buildRPM "awips2-hydroapps-shared"
#buildRPM "awips2-java"
buildRPM "awips2-python-dynamicserialize"
#buildRPM "awips2-python-dynamicserialize"
#buildRPM "awips2-rcm"
#buildRPM "awips2-tools"

View file

@ -281,15 +281,16 @@ fi
# Use the custom flag for selecting specific rpms to build
if [ "${1}" = "-custom" ]; then
unpackHttpdPypies
if [ $? -ne 0 ]; then
exit 1
fi
buildRPM "awips2-httpd-pypies"
#unpackHttpdPypies
#if [ $? -ne 0 ]; then
# exit 1
#fi
#buildRPM "awips2-httpd-pypies"
buildRPM "awips2-adapt-native"
buildRPM "awips2-hydroapps-shared"
#buildRPM "awips2-ant"
buildRPM "awips2-hydroapps-shared"
#buildRPM "awips2-java"
#buildRPM "awips2-python-dynamicserialize"
#buildRPM "awips2-tools"
exit 0

View file

@ -0,0 +1,338 @@
#!/bin/bash
function buildRPM()
{
# Arguments:
# ${1} == the name of the rpm.
lookupRPM "${1}"
if [ $? -ne 0 ]; then
echo "ERROR: '${1}' is not a recognized AWIPS II RPM."
exit 1
fi
/usr/bin/rpmbuild -ba \
--define '_topdir %(echo ${AWIPSII_TOP_DIR})' \
--define '_baseline_workspace %(echo ${WORKSPACE})' \
--define '_uframe_eclipse %(echo ${UFRAME_ECLIPSE})' \
--define '_awipscm_share %(echo ${AWIPSCM_SHARE})' \
--define '_build_root %(echo ${AWIPSII_BUILD_ROOT})' \
--define '_component_version %(echo ${AWIPSII_VERSION})' \
--define '_component_release %(echo ${AWIPSII_RELEASE})' \
--define '_component_build_date %(echo ${COMPONENT_BUILD_DATE})' \
--define '_component_build_time %(echo ${COMPONENT_BUILD_TIME})' \
--define '_component_build_system %(echo ${COMPONENT_BUILD_SYSTEM})' \
--buildroot ${AWIPSII_BUILD_ROOT} \
${RPM_SPECIFICATION}/component.spec
if [ $? -ne 0 ]; then
echo "ERROR: Failed to build RPM ${1}."
exit 1
fi
return 0
}
# This script will build all of the 64-bit rpms.
# Ensure that we are on a machine with the correct architecture.
architecture=`uname -i`
if [ ! "${architecture}" = "x86_64" ]; then
echo "ERROR: This build can only be performed on a 64-bit Operating System."
exit 1
fi
# Determine which directory we are running from.
path_to_script=`readlink -f $0`
dir=$(dirname $path_to_script)
common_dir=`cd ${dir}/../common; pwd;`
if [ $? -ne 0 ]; then
echo "ERROR: Unable to find the common functions directory."
exit 1
fi
# source the common functions.
source ${common_dir}/lookupRPM.sh
if [ $? -ne 0 ]; then
echo "ERROR: Unable to source the common functions."
exit 1
fi
source ${common_dir}/usage.sh
if [ $? -ne 0 ]; then
echo "ERROR: Unable to source the common functions."
exit 1
fi
source ${common_dir}/rpms.sh
if [ $? -ne 0 ]; then
echo "ERROR: Unable to source the common functions."
exit 1
fi
source ${common_dir}/systemInfo.sh
if [ $? -ne 0 ]; then
echo "ERROR: Unable to retrieve the system information."
exit 1
fi
# prepare the build environment.
source ${dir}/buildEnvironment.sh
if [ $? -ne 0 ]; then
echo "ERROR: Unable to prepare the build environment."
exit 1
fi
export LIGHTNING=true
# Determine if the optional '-nobinlightning' argument has been specified.
if [ "${2}" = "-nobinlightning" ]; then
LIGHTNING=false
fi
if [ "${1}" = "-64bit" ]; then
buildRPM "awips2-common-base"
buildCAVE
if [ $? -ne 0 ]; then
exit 1
fi
buildRPM "awips2-alertviz"
if [ $? -ne 0 ]; then
exit 1
fi
buildRPM "awips2-python"
buildRPM "awips2-python-cherrypy"
buildRPM "awips2-python-dynamicserialize"
buildRPM "awips2-python-h5py"
buildRPM "awips2-python-jimporter"
buildRPM "awips2-python-matplotlib"
buildRPM "awips2-python-nose"
buildRPM "awips2-python-numpy"
buildRPM "awips2-python-pil"
buildRPM "awips2-python-pmw"
buildRPM "awips2-python-pupynere"
buildRPM "awips2-python-qpid"
buildRPM "awips2-python-scientific"
buildRPM "awips2-python-scipy"
buildRPM "awips2-python-tables"
buildRPM "awips2-python-thrift"
buildRPM "awips2-python-tpg"
buildRPM "awips2-python-ufpy"
buildRPM "awips2-python-werkzeug"
buildRPM "awips2-python-pygtk"
buildRPM "awips2-python-pycairo"
buildJava
buildRPM "awips2-python-shapely"
buildRPM "awips2-notification"
exit 0
fi
if [ "${1}" = "-postgres" ]; then
buildRPM "awips2-postgres"
buildRPM "awips2-database-server-configuration"
buildRPM "awips2-database-standalone-configuration"
buildRPM "awips2-database"
buildRPM "awips2-maps-database"
buildRPM "awips2-ncep-database"
buildRPM "awips2-pgadmin3"
exit 0
fi
if [ "${1}" = "-delta" ]; then
buildRPM "awips2-common-base"
buildCAVE
if [ $? -ne 0 ]; then
exit 1
fi
buildRPM "awips2-alertviz"
buildEDEX
if [ $? -ne 0 ]; then
exit 1
fi
buildRPM "awips2-python-dynamicserialize"
buildRPM "awips2-python-ufpy"
buildRPM "awips2-cli"
buildRPM "awips2-data.hdf5-gfe.climo"
buildRPM "awips2-gfesuite-client"
buildRPM "awips2-gfesuite-server"
buildRPM "awips2-localapps-environment"
buildRPM "awips2-data.hdf5-topo"
buildRPM "awips2-data.gfe"
buildLocalizationRPMs
if [ $? -ne 0 ]; then
exit 1
fi
buildRPM "awips2-edex-environment"
buildRPM "awips2-notification"
exit 0
fi
if [ "${1}" = "-full" ]; then
buildRPM "awips2-common-base"
buildCAVE
if [ $? -ne 0 ]; then
exit 1
fi
buildRPM "awips2-alertviz"
buildEDEX
if [ $? -ne 0 ]; then
exit 1
fi
buildRPM "awips2-python"
buildRPM "awips2-python-cherrypy"
buildRPM "awips2-python-dynamicserialize"
buildRPM "awips2-python-h5py"
buildRPM "awips2-python-jimporter"
buildRPM "awips2-python-matplotlib"
buildRPM "awips2-python-nose"
buildRPM "awips2-python-numpy"
buildRPM "awips2-python-pil"
buildRPM "awips2-python-pmw"
buildRPM "awips2-python-pupynere"
buildRPM "awips2-python-qpid"
buildRPM "awips2-python-scientific"
buildRPM "awips2-python-scipy"
buildRPM "awips2-python-tables"
buildRPM "awips2-python-thrift"
buildRPM "awips2-python-tpg"
buildRPM "awips2-python-ufpy"
buildRPM "awips2-python-werkzeug"
buildRPM "awips2-python-pygtk"
buildRPM "awips2-python-pycairo"
buildRPM "awips2-cli"
buildRPM "awips2-data.hdf5-gfe.climo"
buildRPM "awips2-gfesuite-client"
buildRPM "awips2-gfesuite-server"
buildRPM "awips2-localapps-environment"
buildRPM "awips2-data.hdf5-topo"
buildRPM "awips2-data.gfe"
unpackHttpdPypies
if [ $? -ne 0 ]; then
exit 1
fi
buildRPM "awips2-httpd-pypies"
buildJava
buildRPM "awips2-groovy"
buildLocalizationRPMs
if [ $? -ne 0 ]; then
exit 1
fi
buildRPM "awips2-edex-environment"
buildRPM "awips2-notification"
buildRPM "awips2-python-shapely"
buildRPM "awips2-postgres"
buildRPM "awips2-database"
buildRPM "awips2-maps-database"
buildRPM "awips2-ncep-database"
buildRPM "awips2-pgadmin3"
buildRPM "awips2-ldm"
exit 0
fi
if [ "${1}" = "-ade" ]; then
echo "INFO: AWIPS II currently does not support a 64-bit version of the ADE."
exit 0
buildRPM "awips2-eclipse"
exit 0
fi
if [ "${1}" = "-viz" ]; then
buildRPM "awips2-common-base"
buildCAVE
if [ $? -ne 0 ]; then
exit 1
fi
buildRPM "awips2-alertviz"
exit 0
fi
if [ "${1}" = "-edex" ]; then
buildRPM "awips2-common-base"
buildRPM "awips2-edex-environment"
buildEDEX
if [ $? -ne 0 ]; then
exit 1
fi
exit 0
fi
if [ "${1}" = "-qpid" ]; then
buildQPID
if [ $? -ne 0 ]; then
exit 1
fi
exit 0
fi
if [ "${1}" = "-ldm" ]; then
buildRPM "awips2-ldm"
exit 0
fi
if [ "${1}" = "-awips2" ]; then
buildRPM "awips2"
exit 0
fi
# Use the custom flag for selecting specific rpms to build
if [ "${1}" = "-custom" ]; then
#unpackHttpdPypies
#if [ $? -ne 0 ]; then
# exit 1
#fi
#buildRPM "awips2-httpd-pypies"
#buildRPM "awips2-adapt-native"
#buildRPM "awips2-hydroapps-shared"
#buildRPM "awips2-ant"
buildRPM "awips2-python-dynamicserialize"
#buildRPM "awips2-java"
#buildRPM "awips2-tools"
exit 0
fi
if [ "${1}" = "-package" ]; then
repository_directory="awips2-repository-${AWIPSII_VERSION}-${AWIPSII_RELEASE}"
if [ -d ${WORKSPACE}/${repository_directory} ]; then
rm -rf ${WORKSPACE}/${repository_directory}
if [ $? -ne 0 ]; then
exit 1
fi
fi
mkdir -p ${WORKSPACE}/${repository_directory}/${AWIPSII_VERSION}-${AWIPSII_RELEASE}
if [ $? -ne 0 ]; then
exit 1
fi
cp -r ${AWIPSII_TOP_DIR}/RPMS/* \
${WORKSPACE}/${repository_directory}/${AWIPSII_VERSION}-${AWIPSII_RELEASE}
if [ $? -ne 0 ]; then
exit 1
fi
rpms_directory="${WORKSPACE}/rpms"
comps_xml="${rpms_directory}/common/yum/arch.x86_64/comps.xml"
cp -v ${comps_xml} ${WORKSPACE}/${repository_directory}
if [ $? -ne 0 ]; then
exit 1
fi
pushd . > /dev/null
cd ${WORKSPACE}
tar -cvf ${repository_directory}.tar ${repository_directory}
RC=$?
popd > /dev/null
if [ ${RC} -ne 0 ]; then
exit 1
fi
exit 0
fi
usage
exit 0