diff --git a/bin/nexrad-archive b/bin/nexrad-archive index 67f5810..fcdc0e9 100755 --- a/bin/nexrad-archive +++ b/bin/nexrad-archive @@ -13,8 +13,9 @@ parser = argparse.ArgumentParser( parser.add_argument('--quiet', action='store_true', help='Suppress output') parser.add_argument('--dry-run', action='store_true', help='Do not actually archive data') +parser.add_argument('--exclude', action='append', type=str, help='Exclude types of reports from ingest') parser.add_argument('db', help='SQLite3 NEXRAD radar site database') -parser.add_argument('csv-report-details', nargs='+', help='Compressed storm report details CSV file') +parser.add_argument('csv-report-details', nargs='+', help='Compressed storm report details CSV file') parser.add_argument('archive-dir', help='Target archive directory') args = parser.parse_args() @@ -22,9 +23,16 @@ args = parser.parse_args() db = Database.connect(args.db) bucket = S3Bucket() archive = Archive(getattr(args, 'archive-dir'), bucket) +exclude = dict() + +for event_type in args.exclude: + exclude[event_type] = True for path in getattr(args, 'csv-report-details'): for report in StormReport.each_from_csv_file(path): + if report.event_type in exclude: + continue + if report.coord_start is None or report.coord_end is None: continue @@ -36,13 +44,13 @@ for path in getattr(args, 'csv-report-details'): for key in bucket.each_matching_key(radars, report.timestamp_start, report.timestamp_end): if archive.is_downloaded(key): if not args.quiet: - print(f"event {report.id} key {key} already archived") + print(f"event {report.id} key {key} type {report.event_type} already archived") else: if not args.quiet: if args.dry_run: - print(f"event {report.id} key {key} would archive") + print(f"event {report.id} key {key} type {report.event_type} would archive") else: - print(f"event {report.id} key {key} archiving") + print(f"event {report.id} key {key} type {report.event_type} archiving") if not args.dry_run: archive.download(key)