Commit e750d1a6 authored by Klaus Zimmermann's avatar Klaus Zimmermann
Browse files

Improve help (closes #103)

parent c5004113
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from .main import main
from ._version import get_versions from ._version import get_versions
__version__ = get_versions()['version'] __version__ = get_versions()['version']
del get_versions del get_versions
...@@ -9,5 +7,4 @@ del get_versions ...@@ -9,5 +7,4 @@ del get_versions
__all__ = [ __all__ = [
'__version__', '__version__',
'main',
] ]
# -*- coding: utf-8 -*-
import logging
import threading
import time
import iris
from iris.experimental.equalise_cubes import equalise_attributes
import netCDF4
import numpy as np
MISSVAL = 1.0e20
def ignore_cb(cube, field, filename):
cube.attributes.pop('creation_date', None)
cube.attributes.pop('tracking_id', None)
def prepare_input_data(datafiles):
datacube = iris.load_raw(datafiles, callback=ignore_cb)
iris.util.unify_time_units(datacube)
equalise_attributes(datacube)
cube = datacube.concatenate_cube()
return cube
def save(result, output_filename, sliced_mode=False):
if sliced_mode:
logging.info('Performing aggregation in sliced mode')
data = result.core_data()
logging.info('creating empty data')
result.data = np.empty(data.shape, data.dtype)
result.data
logging.info('saving empty cube')
iris.save(result, output_filename, fill_value=MISSVAL,
local_keys=['proposed_standard_name'])
logging.info('opening')
result.data = data
with netCDF4.Dataset(output_filename, 'a') as ds:
var = ds[result.var_name]
time_dim = result.coord_dims('time')[0]
no_slices = result.shape[time_dim]
def store(i, data):
var[i, ...] = data
thread = threading.Thread()
thread.start()
start = time.time()
for i, result_cube in enumerate(result.slices_over(time_dim)):
logging.info(f'Starting with {result_cube.coord("time")}')
result_cube.data
logging.info('Waiting for previous save to finish')
thread.join()
thread = threading.Thread(target=store,
args=(i, result_cube.data))
thread.start()
end = time.time()
partial = end - start
total_estimate = partial/(i+1)*no_slices
logging.info(f'Finished {i+1}/{no_slices} in {partial:4f}s. '
f'Estimated total time is {total_estimate:4f}s.')
thread.join()
else:
logging.info('Performing aggregation in normal mode')
iris.save(result, output_filename, fill_value=MISSVAL,
local_keys=['proposed_standard_name'])
...@@ -2,30 +2,24 @@ ...@@ -2,30 +2,24 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import argparse import argparse
import logging
import os import os
import threading
import time import time
import iris
from iris.experimental.equalise_cubes import equalise_attributes
import netCDF4
import numpy as np
import sentry_sdk import sentry_sdk
import climix from . import __version__
from .dask_setup import SCHEDULERS, setup_scheduler from .dask_setup import SCHEDULERS, setup_scheduler
from .datahandling import prepare_input_data, save
from .metadata import load_metadata from .metadata import load_metadata
import logging
logging.basicConfig(level=logging.INFO)
MISSVAL = 1.0e20 logging.basicConfig(level=logging.INFO)
def parse_args(): def parse_args():
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description=(f'A climate index thing, version {climix.__version__}.')) description=(f'A climate index thing, version {__version__}.'))
parser.add_argument('-e', '--deactivate-error-tracking', parser.add_argument('-e', '--deactivate-error-tracking',
action='store_true', action='store_true',
help='deactivate sentry based error tracking') help='deactivate sentry based error tracking')
...@@ -41,8 +35,10 @@ def parse_args(): ...@@ -41,8 +35,10 @@ def parse_args():
help='output filename') help='output filename')
parser.add_argument('-x', '--index', action='append', parser.add_argument('-x', '--index', action='append',
required=True, metavar='INDEX', dest='indices', required=True, metavar='INDEX', dest='indices',
help='the index to calculcate') help='the index to calculcate '
parser.add_argument('datafiles', nargs='+', metavar="DATAFILE", '(use "-x list" to get a list '
'of all available indices)')
parser.add_argument('datafiles', nargs='*', metavar="DATAFILE",
help='the input data files') help='the input data files')
return parser.parse_args() return parser.parse_args()
...@@ -53,19 +49,6 @@ def init_sentry(): ...@@ -53,19 +49,6 @@ def init_sentry():
"https://d3ac73a62877407b848dfc3f318bed85@sentry.io/1458386") "https://d3ac73a62877407b848dfc3f318bed85@sentry.io/1458386")
def ignore_cb(cube, field, filename):
cube.attributes.pop('creation_date', None)
cube.attributes.pop('tracking_id', None)
def prepare_input_data(datafiles):
datacube = iris.load_raw(datafiles, callback=ignore_cb)
iris.util.unify_time_units(datacube)
equalise_attributes(datacube)
cube = datacube.concatenate_cube()
return cube
def guess_output_template(datafiles): def guess_output_template(datafiles):
output_template = '{var_name}_{frequency}.nc' output_template = '{var_name}_{frequency}.nc'
...@@ -106,51 +89,8 @@ def build_output_filename(index, datafiles, output_template): ...@@ -106,51 +89,8 @@ def build_output_filename(index, datafiles, output_template):
**index.output_metadata.drs) **index.output_metadata.drs)
def save(result, output_filename, sliced_mode=False): def do_main(index_catalog, requested_indices, datafiles,
if sliced_mode: output_template, sliced_mode):
logging.info('Performing aggregation in sliced mode')
data = result.core_data()
logging.info('creating empty data')
result.data = np.empty(data.shape, data.dtype)
result.data
logging.info('saving empty cube')
iris.save(result, output_filename, fill_value=MISSVAL,
local_keys=['proposed_standard_name'])
logging.info('opening')
result.data = data
with netCDF4.Dataset(output_filename, 'a') as ds:
var = ds[result.var_name]
time_dim = result.coord_dims('time')[0]
no_slices = result.shape[time_dim]
def store(i, data):
var[i, ...] = data
thread = threading.Thread()
thread.start()
start = time.time()
for i, result_cube in enumerate(result.slices_over(time_dim)):
logging.info(f'Starting with {result_cube.coord("time")}')
result_cube.data
logging.info('Waiting for previous save to finish')
thread.join()
thread = threading.Thread(target=store,
args=(i, result_cube.data))
thread.start()
end = time.time()
partial = end - start
total_estimate = partial/(i+1)*no_slices
logging.info(f'Finished {i+1}/{no_slices} in {partial:4f}s. '
f'Estimated total time is {total_estimate:4f}s.')
thread.join()
else:
logging.info('Performing aggregation in normal mode')
iris.save(result, output_filename, fill_value=MISSVAL,
local_keys=['proposed_standard_name'])
def do_main(requested_indices, datafiles, output_template, sliced_mode):
logging.info('Loading metadata')
index_catalog = load_metadata()
logging.info('Preparing indices') logging.info('Preparing indices')
indices = index_catalog.prepare_indices(requested_indices) indices = index_catalog.prepare_indices(requested_indices)
for index in indices: for index in indices:
...@@ -171,11 +111,18 @@ def main(): ...@@ -171,11 +111,18 @@ def main():
args = parse_args() args = parse_args()
if not args.deactivate_error_tracking: if not args.deactivate_error_tracking:
init_sentry() init_sentry()
logging.info('Loading metadata')
index_catalog = load_metadata()
if 'list' in args.indices:
print(list(index_catalog.get_list()))
return
with setup_scheduler(args): with setup_scheduler(args):
logging.info('Scheduler ready; starting main program.') logging.info('Scheduler ready; starting main program.')
start = time.time() start = time.time()
try: try:
do_main(args.indices, args.datafiles, do_main(index_catalog, args.indices, args.datafiles,
args.output_template, args.sliced_mode) args.output_template, args.sliced_mode)
finally: finally:
end = time.time() end = time.time()
......
...@@ -46,7 +46,7 @@ setuptools.setup( ...@@ -46,7 +46,7 @@ setuptools.setup(
}, },
entry_points={ entry_points={
'console_scripts': [ 'console_scripts': [
'climix=climix:main', 'climix=climix.main:main',
'climix-editor=climix.editor:main [editor]', 'climix-editor=climix.editor:main [editor]',
], ],
'climix.index_functions': [ 'climix.index_functions': [
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment