Changes: * Implement config file loader * Add config-based database connector * Remove database path argument from all tools in bin/
		
			
				
	
	
		
			68 lines
		
	
	
	
		
			2.4 KiB
		
	
	
	
		
			Python
		
	
	
		
			Executable file
		
	
	
	
	
			
		
		
	
	
			68 lines
		
	
	
	
		
			2.4 KiB
		
	
	
	
		
			Python
		
	
	
		
			Executable file
		
	
	
	
	
#! /usr/bin/env python3
 | 
						|
 | 
						|
import argparse
 | 
						|
 | 
						|
from xmet.config  import Config
 | 
						|
from xmet.db      import Database
 | 
						|
from xmet.s3      import S3Bucket
 | 
						|
from xmet.storm   import StormEvent
 | 
						|
from xmet.archive import Archive
 | 
						|
 | 
						|
parser = argparse.ArgumentParser(
 | 
						|
    description = 'Archive NEXRAD Level II data from Amazon S3'
 | 
						|
)
 | 
						|
 | 
						|
parser.add_argument('--quiet',   action='store_true', help='Suppress output')
 | 
						|
parser.add_argument('--dry-run', action='store_true', help='Do not actually archive data')
 | 
						|
 | 
						|
group = parser.add_mutually_exclusive_group()
 | 
						|
group.add_argument('--exclude', action='append', type=str, help='Exclude types of events from ingest')
 | 
						|
group.add_argument('--type',    action='append', type=str, help='Specify only given types of events to ingest')
 | 
						|
 | 
						|
parser.add_argument('csv-event-details',  nargs='+', help='Compressed storm event details CSV file')
 | 
						|
parser.add_argument('archive-dir',                   help='Target archive directory')
 | 
						|
 | 
						|
args = parser.parse_args()
 | 
						|
 | 
						|
config  = Config.load()
 | 
						|
db      = Database.from_config(config)
 | 
						|
bucket  = S3Bucket()
 | 
						|
archive = Archive(getattr(args, 'archive-dir'), bucket)
 | 
						|
exclude = None
 | 
						|
types   = None
 | 
						|
 | 
						|
if args.exclude is not None:
 | 
						|
    exclude = {s: True for s in args.exclude}
 | 
						|
 | 
						|
if args.type is not None:
 | 
						|
    types = {s: True for s in args.type}
 | 
						|
 | 
						|
for path in getattr(args, 'csv-event-details'):
 | 
						|
    for event in StormEvent.each_from_csv_file(path):
 | 
						|
        if args.exclude is not None and event.event_type in exclude:
 | 
						|
            continue
 | 
						|
 | 
						|
        if args.type is not None and event.event_type not in types:
 | 
						|
            continue
 | 
						|
 | 
						|
        if event.coord_start is None or event.coord_end is None:
 | 
						|
            continue
 | 
						|
 | 
						|
        if not event.is_radar_significant():
 | 
						|
            continue
 | 
						|
 | 
						|
        radars = event.nearby_radars(db)
 | 
						|
 | 
						|
        for key in bucket.each_matching_key(radars, event.timestamp_start, event.timestamp_end):
 | 
						|
            if archive.is_downloaded(key):
 | 
						|
                if not args.quiet:
 | 
						|
                    print(f"event {event.id} key {key} type {event.event_type} already archived")
 | 
						|
            else:
 | 
						|
                if not args.quiet:
 | 
						|
                    if args.dry_run:
 | 
						|
                        print(f"event {event.id} key {key} type {event.event_type} would archive")
 | 
						|
                    else:
 | 
						|
                        print(f"event {event.id} key {key} type {event.event_type} archiving")
 | 
						|
 | 
						|
                if not args.dry_run:
 | 
						|
                    archive.download(key)
 |