From 23bf35655f9d6bd0215dba024cadc09187c2ac40 Mon Sep 17 00:00:00 2001 From: Karl Kosack Date: Wed, 20 May 2020 14:32:29 +0200 Subject: [PATCH] reformat with black (#1345) * add pre-commit hook for black * reformat alll with black --- .pre-commit-config.yaml | 5 + ctapipe/__init__.py | 1 + ctapipe/analysis/camera/charge_resolution.py | 52 ++- .../camera/tests/test_charge_resolution.py | 41 +- ctapipe/calib/camera/__init__.py | 2 +- ctapipe/calib/camera/calibrator.py | 5 +- ctapipe/calib/camera/flatfield.py | 120 +++--- ctapipe/calib/camera/gainselection.py | 23 +- ctapipe/calib/camera/pedestals.py | 101 ++--- ctapipe/calib/camera/tests/test_calibrator.py | 14 +- ctapipe/calib/camera/tests/test_flatfield.py | 34 +- .../calib/camera/tests/test_gainselection.py | 8 +- ctapipe/conftest.py | 10 +- ctapipe/containers.py | 46 ++- ctapipe/coordinates/__init__.py | 30 +- ctapipe/coordinates/camera_frame.py | 48 +-- ctapipe/coordinates/ground_frames.py | 20 +- ctapipe/coordinates/nominal_frame.py | 25 +- ctapipe/coordinates/representation.py | 28 +- ctapipe/coordinates/telescope_frame.py | 23 +- ctapipe/coordinates/tests/test_coordinates.py | 50 ++- .../tests/test_engineering_frame.py | 8 +- .../coordinates/tests/test_nominal_frame.py | 30 +- ctapipe/coordinates/tests/test_roundtrip.py | 3 +- .../coordinates/tests/test_telescope_frame.py | 2 +- ctapipe/core/__init__.py | 2 +- ctapipe/core/logging.py | 14 +- ctapipe/core/plugins.py | 5 +- ctapipe/core/provenance.py | 104 +++--- ctapipe/core/support.py | 1 + ctapipe/core/tests/__init__.py | 2 +- ctapipe/core/tests/test_container.py | 4 +- ctapipe/core/tests/test_provenance.py | 17 +- ctapipe/core/tests/test_tool.py | 12 +- ctapipe/core/tests/test_traits.py | 30 +- ctapipe/core/tool.py | 4 +- ctapipe/core/traits.py | 36 +- ctapipe/image/__init__.py | 4 +- ctapipe/image/cleaning.py | 18 +- ctapipe/image/concentration.py | 8 +- ctapipe/image/extractor.py | 29 +- ctapipe/image/geometry_converter.py | 6 +- ctapipe/image/geometry_converter_astri.py | 92 ++--- ctapipe/image/hillas.py | 22 +- ctapipe/image/morphology.py | 4 +- ctapipe/image/muon/__init__.py | 6 +- ctapipe/image/muon/features.py | 24 +- ctapipe/image/muon/intensity_fitter.py | 148 ++++---- ctapipe/image/muon/ring_fitter.py | 2 +- .../image/muon/tests/test_intensity_fit.py | 33 +- .../image/muon/tests/test_muon_features.py | 20 +- ctapipe/image/pixel_likelihood.py | 70 ++-- ctapipe/image/reducer.py | 19 +- ctapipe/image/statistics.py | 4 +- ctapipe/image/tests/test_concentration.py | 6 +- ctapipe/image/tests/test_extractor.py | 15 +- ctapipe/image/tests/test_hillas.py | 75 ++-- ctapipe/image/tests/test_leakage.py | 2 +- ctapipe/image/tests/test_morphology.py | 1 + ctapipe/image/tests/test_pixel_likelihood.py | 20 +- ctapipe/image/tests/test_reducer.py | 27 +- ctapipe/image/tests/test_timing_parameters.py | 11 +- ctapipe/image/tests/test_toy.py | 8 +- ctapipe/image/timing.py | 14 +- ctapipe/image/toymodel.py | 5 +- ctapipe/instrument/__init__.py | 16 +- ctapipe/instrument/atmosphere.py | 21 +- ctapipe/instrument/camera/__init__.py | 2 +- ctapipe/instrument/camera/description.py | 6 +- ctapipe/instrument/camera/geometry.py | 260 +++++++------ ctapipe/instrument/camera/readout.py | 70 ++-- .../camera/tests/test_description.py | 4 +- .../instrument/camera/tests/test_geometry.py | 125 +++---- .../instrument/camera/tests/test_readout.py | 22 +- ctapipe/instrument/subarray.py | 4 +- ctapipe/instrument/telescope.py | 5 +- ctapipe/instrument/tests/__init__.py | 1 - ctapipe/instrument/tests/test_guessing.py | 12 +- ctapipe/instrument/tests/test_subarray.py | 8 +- ctapipe/instrument/tests/test_telescope.py | 9 +- ctapipe/io/__init__.py | 20 +- ctapipe/io/array.py | 8 +- ctapipe/io/datalevels.py | 3 +- ctapipe/io/eventseeker.py | 30 +- ctapipe/io/eventsource.py | 8 +- ctapipe/io/files.py | 6 +- ctapipe/io/hdf5tableio.py | 18 +- ctapipe/io/simteleventsource.py | 38 +- ctapipe/io/tableio.py | 2 +- ctapipe/io/tests/test_event_source.py | 20 +- ctapipe/io/tests/test_eventseeker.py | 10 +- ctapipe/io/tests/test_files.py | 16 +- ctapipe/io/tests/test_hdf5.py | 96 ++--- ctapipe/io/tests/test_simteleventsource.py | 4 +- ctapipe/io/tests/test_toysource.py | 12 +- ctapipe/io/toymodel.py | 38 +- ctapipe/plotting/bokeh_event_viewer.py | 40 +- ctapipe/plotting/charge_resolution.py | 100 +++-- .../plotting/tests/test_bokeh_event_viewer.py | 8 +- .../plotting/tests/test_charge_resolution.py | 23 +- ctapipe/reco/HillasReconstructor.py | 62 ++-- ctapipe/reco/ImPACT.py | 351 +++++++++++------- ctapipe/reco/__init__.py | 9 +- ctapipe/reco/energy_regressor.py | 23 +- ctapipe/reco/hillas_intersection.py | 153 ++++---- ctapipe/reco/reco_algorithms.py | 2 +- ctapipe/reco/regressor_classifier_base.py | 46 ++- ctapipe/reco/shower_max.py | 6 +- .../reco/tests/test_HillasReconstructor.py | 35 +- ctapipe/reco/tests/test_ImPACT.py | 153 ++++---- ctapipe/reco/tests/test_energy_regressor.py | 26 +- .../reco/tests/test_hillas_intersection.py | 108 +++--- .../reco/tests/test_reconstruction_methods.py | 17 +- .../tests/test_regressor_classifier_base.py | 61 ++- ctapipe/reco/tests/test_showermaxestimator.py | 10 +- ctapipe/tests/setup_package.py | 3 +- ctapipe/tools/__init__.py | 1 - ctapipe/tools/bokeh/file_viewer.py | 153 ++++---- ctapipe/tools/camdemo.py | 141 ++++--- ctapipe/tools/display_dl1.py | 22 +- ctapipe/tools/display_events_single_tel.py | 95 +++-- ctapipe/tools/display_integrator.py | 19 +- ctapipe/tools/display_summed_images.py | 52 ++- ctapipe/tools/dump_instrument.py | 71 ++-- ctapipe/tools/dump_triggers.py | 86 +++-- ctapipe/tools/extract_charge_resolution.py | 26 +- ctapipe/tools/info.py | 134 ++++--- ctapipe/tools/muon_reconstruction.py | 180 ++++----- ctapipe/tools/plot_charge_resolution.py | 24 +- ctapipe/tools/stage1.py | 25 +- ctapipe/tools/tests/test_tools.py | 144 +++---- ctapipe/tools/utils.py | 32 +- ctapipe/utils/__init__.py | 26 +- ctapipe/utils/astro.py | 30 +- ctapipe/utils/datasets.py | 51 +-- ctapipe/utils/fitshistogram.py | 76 ++-- ctapipe/utils/linalg.py | 8 +- ctapipe/utils/rgbtohex.py | 13 +- ctapipe/utils/table_interpolator.py | 18 +- .../utils/template_network_interpolator.py | 11 +- ctapipe/utils/tests/test_CutFlow.py | 37 +- ctapipe/utils/tests/test_astro.py | 7 +- ctapipe/utils/tests/test_datasets.py | 37 +- ctapipe/utils/tests/test_fitshistogram.py | 27 +- ctapipe/utils/tests/test_linalg.py | 8 +- ctapipe/utils/tests/test_rgbtohex.py | 2 +- .../tests/test_unstructured_interpolator.py | 93 ++--- ctapipe/utils/unstructured_interpolator.py | 45 ++- ctapipe/version.py | 29 +- ctapipe/visualization/__init__.py | 2 +- ctapipe/visualization/bokeh.py | 127 ++++--- ctapipe/visualization/mpl_array.py | 10 +- ctapipe/visualization/mpl_camera.py | 115 +++--- ctapipe/visualization/tests/test_bokeh.py | 25 +- environment.yml | 1 + examples/calc_pedestals.py | 6 +- examples/camera_animation.py | 10 +- examples/camera_display_multi.py | 24 +- examples/camera_norms.py | 22 +- examples/camera_rotation.py | 4 +- examples/coordinate_transformations.py | 15 +- examples/highlight_pixel.py | 18 +- examples/instrument_info.py | 4 +- examples/load_one_event.py | 2 +- examples/plot_all_cameras.py | 4 +- examples/plot_array_hillas.py | 22 +- examples/plot_camera_frames.py | 16 +- examples/plot_hillas_parameters.py | 20 +- examples/plot_showers_in_nominal.py | 20 +- examples/plot_theta_square.py | 26 +- examples/simple_event_writer.py | 57 +-- examples/stereo_reconstruction.py | 35 +- 172 files changed, 3152 insertions(+), 2939 deletions(-) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000000..efc691ef47b --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,5 @@ +repos: + - repo: https://github.com/psf/black + rev: 19.3b0 + hooks: + - id: black \ No newline at end of file diff --git a/ctapipe/__init__.py b/ctapipe/__init__.py index f5f88d12d46..1346ad4e041 100644 --- a/ctapipe/__init__.py +++ b/ctapipe/__init__.py @@ -4,4 +4,5 @@ """ from . import version + __version__ = version.get_version(pep440=False) diff --git a/ctapipe/analysis/camera/charge_resolution.py b/ctapipe/analysis/camera/charge_resolution.py index 2bf7cee1e9a..36abf609de3 100644 --- a/ctapipe/analysis/camera/charge_resolution.py +++ b/ctapipe/analysis/camera/charge_resolution.py @@ -1,7 +1,7 @@ import numpy as np import pandas as pd -__all__ = ['ChargeResolutionCalculator'] +__all__ = ["ChargeResolutionCalculator"] class ChargeResolutionCalculator: @@ -41,7 +41,7 @@ def __init__(self, mc_true=True): self._df_list = [] self._df = pd.DataFrame() self._n_bytes = 0 - self._max_bytes = 1E9 + self._max_bytes = 1e9 @staticmethod def rmse_abs(sum_, n): @@ -57,8 +57,7 @@ def charge_res_abs(true, sum_, n): @staticmethod def charge_res(true, sum_, n): - return (ChargeResolutionCalculator.charge_res_abs(true, sum_, n) - / np.abs(true)) + return ChargeResolutionCalculator.charge_res_abs(true, sum_, n) / np.abs(true) def add(self, pixel, true, measured): """ @@ -73,13 +72,8 @@ def add(self, pixel, true, measured): measured : ndarray 1D array containing the measured charge for each entry """ - diff2 = (measured - true)**2 - df = pd.DataFrame(dict( - pixel=pixel, - true=true, - sum=diff2, - n=np.uint32(1) - )) + diff2 = (measured - true) ** 2 + df = pd.DataFrame(dict(pixel=pixel, true=true, sum=diff2, n=np.uint32(1))) self._df_list.append(df) self._n_bytes += df.memory_usage(index=True, deep=True).sum() if self._n_bytes > self._max_bytes: @@ -91,7 +85,7 @@ def _amalgamate(self): values per pixel and true charge in order to reduce memory use. """ self._df = pd.concat([self._df, *self._df_list], ignore_index=True) - self._df = self._df.groupby(['pixel', 'true']).sum().reset_index() + self._df = self._df.groupby(["pixel", "true"]).sum().reset_index() self._n_bytes = 0 self._df_list = [] @@ -109,28 +103,28 @@ def finish(self): """ self._amalgamate() - self._df = self._df.loc[self._df['true'] != 0] + self._df = self._df.loc[self._df["true"] != 0] df_p = self._df.copy() - true = df_p['true'].values - sum_ = df_p['sum'].values - n = df_p['n'].values + true = df_p["true"].values + sum_ = df_p["sum"].values + n = df_p["n"].values if self._mc_true: - df_p['charge_resolution'] = self.charge_res(true, sum_, n) - df_p['charge_resolution_abs'] = self.charge_res_abs(true, sum_, n) + df_p["charge_resolution"] = self.charge_res(true, sum_, n) + df_p["charge_resolution_abs"] = self.charge_res_abs(true, sum_, n) else: - df_p['charge_resolution'] = self.rmse(true, sum_, n) - df_p['charge_resolution_abs'] = self.rmse_abs(sum_, n) - df_c = self._df.copy().groupby('true').sum().reset_index() - df_c = df_c.drop(columns='pixel') - true = df_c['true'].values - sum_ = df_c['sum'].values - n = df_c['n'].values + df_p["charge_resolution"] = self.rmse(true, sum_, n) + df_p["charge_resolution_abs"] = self.rmse_abs(sum_, n) + df_c = self._df.copy().groupby("true").sum().reset_index() + df_c = df_c.drop(columns="pixel") + true = df_c["true"].values + sum_ = df_c["sum"].values + n = df_c["n"].values if self._mc_true: - df_c['charge_resolution'] = self.charge_res(true, sum_, n) - df_c['charge_resolution_abs'] = self.charge_res_abs(true, sum_, n) + df_c["charge_resolution"] = self.charge_res(true, sum_, n) + df_c["charge_resolution_abs"] = self.charge_res_abs(true, sum_, n) else: - df_c['charge_resolution'] = self.rmse(true, sum_, n) - df_c['charge_resolution_abs'] = self.rmse_abs(sum_, n) + df_c["charge_resolution"] = self.rmse(true, sum_, n) + df_c["charge_resolution_abs"] = self.rmse_abs(sum_, n) return df_p, df_c diff --git a/ctapipe/analysis/camera/tests/test_charge_resolution.py b/ctapipe/analysis/camera/tests/test_charge_resolution.py index cdb2240507d..e8eb3b56940 100644 --- a/ctapipe/analysis/camera/tests/test_charge_resolution.py +++ b/ctapipe/analysis/camera/tests/test_charge_resolution.py @@ -1,5 +1,4 @@ -from ctapipe.analysis.camera.charge_resolution import \ - ChargeResolutionCalculator +from ctapipe.analysis.camera.charge_resolution import ChargeResolutionCalculator import numpy as np from numpy.testing import assert_almost_equal @@ -44,12 +43,14 @@ def test_finish(): chargeres.add(0, true_charge, measured_charge) chargeres.add(0, true_charge, measured_charge) df_p, df_c = chargeres.finish() - assert np.array_equal(df_p['charge_resolution'].values, - df_c['charge_resolution'].values) + assert np.array_equal( + df_p["charge_resolution"].values, df_c["charge_resolution"].values + ) chargeres.add(1, true_charge, measured_charge) df_p, df_c = chargeres.finish() - assert not np.array_equal(df_p['charge_resolution'].values, - df_c['charge_resolution'].values) + assert not np.array_equal( + df_p["charge_resolution"].values, df_c["charge_resolution"].values + ) def test_calculation(): @@ -58,7 +59,7 @@ def test_calculation(): true = 3 n = measured.size - sum_ = np.sum((measured - true)**2) + sum_ = np.sum((measured - true) ** 2) assert_almost_equal(sum_, 0.34, 3) assert_almost_equal(chargeres.rmse_abs(sum_, n), 0.412, 3) assert_almost_equal(chargeres.rmse(true, sum_, n), 0.137, 3) @@ -66,8 +67,10 @@ def test_calculation(): assert_almost_equal(chargeres.charge_res(true, sum_, n), 0.593, 3) assert chargeres.rmse_abs(sum_, n) == chargeres.rmse(true, sum_, n) * true - assert (chargeres.charge_res_abs(true, sum_, n) == - chargeres.charge_res(true, sum_, n) * true) + assert ( + chargeres.charge_res_abs(true, sum_, n) + == chargeres.charge_res(true, sum_, n) * true + ) def test_result(): @@ -75,14 +78,14 @@ def test_result(): measured = np.array([3.5, 2.7]) true = 3 n = measured.size - sum_ = np.sum((measured - true)**2) + sum_ = np.sum((measured - true) ** 2) charge_res.add(0, true, measured) df_p, df_c = charge_res.finish() - resolution = df_p['charge_resolution'].values[0] - assert (resolution == charge_res.rmse(true, sum_, n)) - resolution_abs = df_p['charge_resolution_abs'].values[0] - assert (resolution_abs == charge_res.rmse_abs(sum_, n)) + resolution = df_p["charge_resolution"].values[0] + assert resolution == charge_res.rmse(true, sum_, n) + resolution_abs = df_p["charge_resolution_abs"].values[0] + assert resolution_abs == charge_res.rmse_abs(sum_, n) def test_result_mc_true(): @@ -90,11 +93,11 @@ def test_result_mc_true(): measured = np.array([3.5, 2.7]) true = 3 n = measured.size - sum_ = np.sum((measured - true)**2) + sum_ = np.sum((measured - true) ** 2) chargeres.add(0, true, measured) df_p, df_c = chargeres.finish() - assert (df_p['charge_resolution'].values[0] == - chargeres.charge_res(true, sum_, n)) - assert (df_p['charge_resolution_abs'].values[0] == - chargeres.charge_res_abs(true, sum_, n)) + assert df_p["charge_resolution"].values[0] == chargeres.charge_res(true, sum_, n) + assert df_p["charge_resolution_abs"].values[0] == chargeres.charge_res_abs( + true, sum_, n + ) diff --git a/ctapipe/calib/camera/__init__.py b/ctapipe/calib/camera/__init__.py index 97baae6fa32..861d256f299 100644 --- a/ctapipe/calib/camera/__init__.py +++ b/ctapipe/calib/camera/__init__.py @@ -6,4 +6,4 @@ from .calibrator import CameraCalibrator from .gainselection import GainSelector -__all__ = ['CameraCalibrator', 'GainSelector'] +__all__ = ["CameraCalibrator", "GainSelector"] diff --git a/ctapipe/calib/camera/calibrator.py b/ctapipe/calib/camera/calibrator.py index 63e9e5535d7..39805c1079a 100644 --- a/ctapipe/calib/camera/calibrator.py +++ b/ctapipe/calib/camera/calibrator.py @@ -25,7 +25,7 @@ def __init__( parent=None, data_volume_reducer=None, image_extractor=None, - **kwargs + **kwargs, ): """ Parameters @@ -100,7 +100,8 @@ def _calibrate_dl0(self, event, telid): return reduced_waveforms_mask = self.data_volume_reducer( - waveforms, telid=telid, selected_gain_channel=selected_gain_channel) + waveforms, telid=telid, selected_gain_channel=selected_gain_channel + ) waveforms_copy = waveforms.copy() waveforms_copy[~reduced_waveforms_mask] = 0 diff --git a/ctapipe/calib/camera/flatfield.py b/ctapipe/calib/camera/flatfield.py index acd3ae38940..921c0abc113 100644 --- a/ctapipe/calib/camera/flatfield.py +++ b/ctapipe/calib/camera/flatfield.py @@ -10,10 +10,7 @@ from ctapipe.core.traits import Int, Unicode, List -__all__ = [ - 'FlatFieldCalculator', - 'FlasherFlatFieldCalculator' -] +__all__ = ["FlatFieldCalculator", "FlasherFlatFieldCalculator"] class FlatFieldCalculator(Component): @@ -48,24 +45,13 @@ class FlatFieldCalculator(Component): """ tel_id = Int( - 0, - help='id of the telescope to calculate the flat-field coefficients' - ).tag(config=True) - sample_duration = Int( - 60, - help='sample duration in seconds' - ).tag(config=True) - sample_size = Int( - 10000, - help='sample size' - ).tag(config=True) - n_channels = Int( - 2, - help='number of channels to be treated' + 0, help="id of the telescope to calculate the flat-field coefficients" ).tag(config=True) + sample_duration = Int(60, help="sample duration in seconds").tag(config=True) + sample_size = Int(10000, help="sample size").tag(config=True) + n_channels = Int(2, help="number of channels to be treated").tag(config=True) charge_product = Unicode( - 'LocalPeakWindowSum', - help='Name of the charge extractor to be used' + "LocalPeakWindowSum", help="Name of the charge extractor to be used" ).tag(config=True) def __init__(self, subarray, **kwargs): @@ -105,9 +91,7 @@ def __init__(self, subarray, **kwargs): super().__init__(**kwargs) # load the waveform charge extractor self.extractor = ImageExtractor.from_name( - self.charge_product, - config=self.config, - subarray=subarray, + self.charge_product, config=self.config, subarray=subarray, ) self.log.info(f"extractor {self.extractor}") @@ -147,11 +131,10 @@ class FlasherFlatFieldCalculator(FlatFieldCalculator): charge_cut_outliers = List( [-0.3, 0.3], - help='Interval of accepted charge values (fraction with respect to camera median value)' + help="Interval of accepted charge values (fraction with respect to camera median value)", ).tag(config=True) time_cut_outliers = List( - [0, 60], - help='Interval (in waveform samples) of accepted time values' + [0, 60], help="Interval (in waveform samples) of accepted time values" ).tag(config=True) def __init__(self, **kwargs): @@ -225,14 +208,16 @@ def calculate_relative_gain(self, event): self.num_events_seen = 0 # real data - if event.meta['origin'] != 'hessio': + if event.meta["origin"] != "hessio": trigger_time = event.r1.tel[self.tel_id].trigger_time hardware_or_pedestal_mask = np.logical_or( event.mon.tel[self.tel_id].pixel_status.hardware_failing_pixels, - event.mon.tel[self.tel_id].pixel_status.pedestal_failing_pixels) + event.mon.tel[self.tel_id].pixel_status.pedestal_failing_pixels, + ) pixel_mask = np.logical_or( hardware_or_pedestal_mask, - event.mon.tel[self.tel_id].pixel_status.flatfield_failing_pixels) + event.mon.tel[self.tel_id].pixel_status.flatfield_failing_pixels, + ) else: # patches for MC data if event.trig.tels_with_trigger: @@ -260,19 +245,17 @@ def calculate_relative_gain(self, event): or self.num_events_seen == self.sample_size ): relative_gain_results = self.calculate_relative_gain_results( - self.charge_medians, - self.charges, - self.sample_masked_pixels + self.charge_medians, self.charges, self.sample_masked_pixels ) time_results = self.calculate_time_results( self.arrival_times, self.sample_masked_pixels, self.time_start, - trigger_time + trigger_time, ) result = { - 'n_events': self.num_events_seen, + "n_events": self.num_events_seen, **relative_gain_results, **time_results, } @@ -313,17 +296,10 @@ def collect_sample(self, charge, pixel_mask, arrival_time): self.num_events_seen += 1 def calculate_time_results( - self, - trace_time, - masked_pixels_of_sample, - time_start, - trigger_time, + self, trace_time, masked_pixels_of_sample, time_start, trigger_time, ): """Calculate and return the time results """ - masked_trace_time = np.ma.array( - trace_time, - mask=masked_pixels_of_sample - ) + masked_trace_time = np.ma.array(trace_time, mask=masked_pixels_of_sample) # median over the sample per pixel pixel_median = np.ma.median(masked_trace_time, axis=0) @@ -339,31 +315,29 @@ def calculate_time_results( # time outliers from median relative_median = pixel_median - median_of_pixel_median[:, np.newaxis] - time_median_outliers = np.logical_or(pixel_median < self.time_cut_outliers[0], - pixel_median > self.time_cut_outliers[1]) + time_median_outliers = np.logical_or( + pixel_median < self.time_cut_outliers[0], + pixel_median > self.time_cut_outliers[1], + ) return { # FIXME Why divided by two here? - 'sample_time': u.Quantity((trigger_time - time_start) / 2, u.s), - 'sample_time_min': u.Quantity(time_start, u.s), - 'sample_time_max': u.Quantity(trigger_time, u.s), - 'time_mean': np.ma.getdata(pixel_mean), - 'time_median': np.ma.getdata(pixel_median), - 'time_std': np.ma.getdata(pixel_std), - 'relative_time_median': np.ma.getdata(relative_median), - 'time_median_outliers': np.ma.getdata(time_median_outliers), + "sample_time": u.Quantity((trigger_time - time_start) / 2, u.s), + "sample_time_min": u.Quantity(time_start, u.s), + "sample_time_max": u.Quantity(trigger_time, u.s), + "time_mean": np.ma.getdata(pixel_mean), + "time_median": np.ma.getdata(pixel_median), + "time_std": np.ma.getdata(pixel_std), + "relative_time_median": np.ma.getdata(relative_median), + "time_median_outliers": np.ma.getdata(time_median_outliers), } def calculate_relative_gain_results( - self, - event_median, - trace_integral, - masked_pixels_of_sample, + self, event_median, trace_integral, masked_pixels_of_sample, ): """Calculate and return the sample statistics""" masked_trace_integral = np.ma.array( - trace_integral, - mask=masked_pixels_of_sample + trace_integral, mask=masked_pixels_of_sample ) # median over the sample per pixel @@ -384,17 +358,23 @@ def calculate_relative_gain_results( # outliers from median charge_deviation = pixel_median - median_of_pixel_median[:, np.newaxis] - charge_median_outliers = ( - np.logical_or(charge_deviation < self.charge_cut_outliers[0] * median_of_pixel_median[:,np.newaxis], - charge_deviation > self.charge_cut_outliers[1] * median_of_pixel_median[:,np.newaxis])) + charge_median_outliers = np.logical_or( + charge_deviation + < self.charge_cut_outliers[0] * median_of_pixel_median[:, np.newaxis], + charge_deviation + > self.charge_cut_outliers[1] * median_of_pixel_median[:, np.newaxis], + ) return { - 'relative_gain_median': np.ma.getdata(np.ma.median(relative_gain_event, axis=0)), - 'relative_gain_mean': np.ma.getdata(np.ma.mean(relative_gain_event, axis=0)), - 'relative_gain_std': np.ma.getdata(np.ma.std(relative_gain_event, axis=0)), - 'charge_median': np.ma.getdata(pixel_median), - 'charge_mean': np.ma.getdata(pixel_mean), - 'charge_std': np.ma.getdata(pixel_std), - 'charge_median_outliers': np.ma.getdata(charge_median_outliers), + "relative_gain_median": np.ma.getdata( + np.ma.median(relative_gain_event, axis=0) + ), + "relative_gain_mean": np.ma.getdata( + np.ma.mean(relative_gain_event, axis=0) + ), + "relative_gain_std": np.ma.getdata(np.ma.std(relative_gain_event, axis=0)), + "charge_median": np.ma.getdata(pixel_median), + "charge_mean": np.ma.getdata(pixel_mean), + "charge_std": np.ma.getdata(pixel_std), + "charge_median_outliers": np.ma.getdata(charge_median_outliers), } - diff --git a/ctapipe/calib/camera/gainselection.py b/ctapipe/calib/camera/gainselection.py index c7ab6d94e07..b855bb8cd83 100644 --- a/ctapipe/calib/camera/gainselection.py +++ b/ctapipe/calib/camera/gainselection.py @@ -7,10 +7,10 @@ from ctapipe.core import Component, traits __all__ = [ - 'GainChannel', - 'GainSelector', - 'ManualGainSelector', - 'ThresholdGainSelector', + "GainChannel", + "GainSelector", + "ManualGainSelector", + "ThresholdGainSelector", ] @@ -18,6 +18,7 @@ class GainChannel(IntEnum): """ Possible gain channels """ + HIGH = 0 LOW = 1 @@ -53,9 +54,7 @@ def __call__(self, waveforms): else: return self.select_channel(waveforms) else: - raise ValueError( - f"Cannot handle waveform array of shape: {waveforms.ndim}" - ) + raise ValueError(f"Cannot handle waveform array of shape: {waveforms.ndim}") @abstractmethod def select_channel(self, waveforms): @@ -84,10 +83,9 @@ class ManualGainSelector(GainSelector): """ Manually choose a gain channel. """ + channel = traits.CaselessStrEnum( - ["HIGH", "LOW"], - default_value="HIGH", - help="Which gain channel to retain" + ["HIGH", "LOW"], default_value="HIGH", help="Which gain channel to retain" ).tag(config=True) def select_channel(self, waveforms): @@ -99,11 +97,12 @@ class ThresholdGainSelector(GainSelector): """ Select gain channel according to a maximum threshold value. """ + threshold = traits.Float( default_value=4000, help="Threshold value in waveform sample units. If a waveform " - "contains a sample above this threshold, use the low gain " - "channel for that pixel." + "contains a sample above this threshold, use the low gain " + "channel for that pixel.", ).tag(config=True) def select_channel(self, waveforms): diff --git a/ctapipe/calib/camera/pedestals.py b/ctapipe/calib/camera/pedestals.py index 1c9ad3e510d..252f8544d17 100644 --- a/ctapipe/calib/camera/pedestals.py +++ b/ctapipe/calib/camera/pedestals.py @@ -11,11 +11,7 @@ from ctapipe.image.extractor import ImageExtractor from ctapipe.core.traits import Int, Unicode, List -__all__ = [ - 'calc_pedestals_from_traces', - 'PedestalCalculator', - 'PedestalIntegrator' -] +__all__ = ["calc_pedestals_from_traces", "PedestalCalculator", "PedestalIntegrator"] def calc_pedestals_from_traces(traces, start_sample, end_sample): @@ -78,25 +74,14 @@ class PedestalCalculator(Component): """ - tel_id = Int( - 0, - help='id of the telescope to calculate the pedestal values' - ).tag(config=True) - sample_duration = Int( - 60, - help='sample duration in seconds' - ).tag(config=True) - sample_size = Int( - 10000, - help='sample size' - ).tag(config=True) - n_channels = Int( - 2, - help='number of channels to be treated' - ).tag(config=True) + tel_id = Int(0, help="id of the telescope to calculate the pedestal values").tag( + config=True + ) + sample_duration = Int(60, help="sample duration in seconds").tag(config=True) + sample_size = Int(10000, help="sample size").tag(config=True) + n_channels = Int(2, help="number of channels to be treated").tag(config=True) charge_product = Unicode( - 'FixedWindowSum', - help='Name of the charge extractor to be used' + "FixedWindowSum", help="Name of the charge extractor to be used" ).tag(config=True) def __init__(self, subarray, **kwargs): @@ -135,9 +120,7 @@ def __init__(self, subarray, **kwargs): # load the waveform charge extractor self.extractor = ImageExtractor.from_name( - self.charge_product, - config=self.config, - subarray=subarray, + self.charge_product, config=self.config, subarray=subarray, ) self.log.info(f"extractor {self.extractor}") @@ -174,13 +157,14 @@ class PedestalIntegrator(PedestalCalculator): Interval (number of std) of accepted charge standard deviation around camera median value """ + charge_median_cut_outliers = List( [-3, 3], - help='Interval (number of std) of accepted charge values around camera median value' + help="Interval (number of std) of accepted charge values around camera median value", ).tag(config=True) charge_std_cut_outliers = List( [-3, 3], - help='Interval (number of std) of accepted charge standard deviation around camera median value' + help="Interval (number of std) of accepted charge standard deviation around camera median value", ).tag(config=True) def __init__(self, **kwargs): @@ -255,12 +239,12 @@ def calculate_pedestals(self, event): self.num_events_seen = 0 # real data - if event.meta['origin'] != 'hessio': + if event.meta["origin"] != "hessio": trigger_time = event.r1.tel[self.tel_id].trigger_time pixel_mask = event.mon.tel[self.tel_id].pixel_status.hardware_failing_pixels - else: # patches for MC data + else: # patches for MC data if event.trig.tels_with_trigger: trigger_time = event.trig.gps_time.unix @@ -287,17 +271,12 @@ def calculate_pedestals(self, event): or self.num_events_seen == self.sample_size ): pedestal_results = calculate_pedestal_results( - self, - self.charges, - self.sample_masked_pixels, - ) - time_results = calculate_time_results( - self.time_start, - trigger_time, + self, self.charges, self.sample_masked_pixels, ) + time_results = calculate_time_results(self.time_start, trigger_time,) result = { - 'n_events': self.num_events_seen, + "n_events": self.num_events_seen, **pedestal_results, **time_results, } @@ -334,28 +313,22 @@ def collect_sample(self, charge, pixel_mask): def calculate_time_results( - time_start, - trigger_time, + time_start, trigger_time, ): """Calculate and return the sample time""" return { # FIXME Why divided by two here? - 'sample_time': u.Quantity((trigger_time - time_start) / 2, u.s), - 'sample_time_min': u.Quantity(time_start, u.s), - 'sample_time_max': u.Quantity(trigger_time, u.s), + "sample_time": u.Quantity((trigger_time - time_start) / 2, u.s), + "sample_time_min": u.Quantity(time_start, u.s), + "sample_time_max": u.Quantity(trigger_time, u.s), } def calculate_pedestal_results( - self, - trace_integral, - masked_pixels_of_sample, + self, trace_integral, masked_pixels_of_sample, ): """Calculate and return the sample statistics""" - masked_trace_integral = np.ma.array( - trace_integral, - mask=masked_pixels_of_sample - ) + masked_trace_integral = np.ma.array(trace_integral, mask=masked_pixels_of_sample) # median over the sample per pixel pixel_median = np.ma.median(masked_trace_integral, axis=0) @@ -379,22 +352,24 @@ def calculate_pedestal_results( # outliers from standard deviation deviation = pixel_std - median_of_pixel_std[:, np.newaxis] - charge_std_outliers = ( - np.logical_or(deviation < self.charge_std_cut_outliers[0] * std_of_pixel_std[:,np.newaxis], - deviation > self.charge_std_cut_outliers[1] * std_of_pixel_std[:,np.newaxis])) + charge_std_outliers = np.logical_or( + deviation < self.charge_std_cut_outliers[0] * std_of_pixel_std[:, np.newaxis], + deviation > self.charge_std_cut_outliers[1] * std_of_pixel_std[:, np.newaxis], + ) # outliers from median deviation = pixel_median - median_of_pixel_median[:, np.newaxis] - charge_median_outliers = ( - np.logical_or(deviation < self.charge_median_cut_outliers[0] * std_of_pixel_median[:,np.newaxis], - deviation > self.charge_median_cut_outliers[1] * std_of_pixel_median[:,np.newaxis])) + charge_median_outliers = np.logical_or( + deviation + < self.charge_median_cut_outliers[0] * std_of_pixel_median[:, np.newaxis], + deviation + > self.charge_median_cut_outliers[1] * std_of_pixel_median[:, np.newaxis], + ) return { - 'charge_median': np.ma.getdata(pixel_median), - 'charge_mean': np.ma.getdata(pixel_mean), - 'charge_std': np.ma.getdata(pixel_std), - 'charge_std_outliers': np.ma.getdata(charge_std_outliers), - 'charge_median_outliers': np.ma.getdata(charge_median_outliers) + "charge_median": np.ma.getdata(pixel_median), + "charge_mean": np.ma.getdata(pixel_mean), + "charge_std": np.ma.getdata(pixel_std), + "charge_std_outliers": np.ma.getdata(charge_std_outliers), + "charge_median_outliers": np.ma.getdata(charge_median_outliers), } - - diff --git a/ctapipe/calib/camera/tests/test_calibrator.py b/ctapipe/calib/camera/tests/test_calibrator.py index af1d1ab8000..2867e33f184 100644 --- a/ctapipe/calib/camera/tests/test_calibrator.py +++ b/ctapipe/calib/camera/tests/test_calibrator.py @@ -28,7 +28,7 @@ def test_camera_calibrator(example_event, example_subarray): def test_manual_extractor(example_subarray): calibrator = CameraCalibrator( subarray=example_subarray, - image_extractor=LocalPeakWindowSum(subarray=example_subarray) + image_extractor=LocalPeakWindowSum(subarray=example_subarray), ) assert isinstance(calibrator.image_extractor, LocalPeakWindowSum) @@ -47,7 +47,7 @@ def test_config(example_subarray): calibrator = CameraCalibrator( subarray=example_subarray, image_extractor=LocalPeakWindowSum(subarray=example_subarray, config=config), - config=config + config=config, ) assert calibrator.image_extractor.window_shift.tel[None] == window_shift assert calibrator.image_extractor.window_width.tel[None] == window_width @@ -67,7 +67,7 @@ def test_check_r1_empty(example_event, example_subarray): calibrator = CameraCalibrator( subarray=example_subarray, - image_extractor=FullWaveformSum(subarray=example_subarray) + image_extractor=FullWaveformSum(subarray=example_subarray), ) event = DataContainer() event.dl0.tel[telid].waveform = np.full((2048, 128), 2) @@ -109,10 +109,10 @@ def test_dl1_charge_calib(example_subarray): # Randomize times and create pulses time_offset = random.uniform(mid - 10, mid + 10, n_pixels)[:, np.newaxis] - y = norm.pdf(x, time_offset, pulse_sigma).astype('float32') + y = norm.pdf(x, time_offset, pulse_sigma).astype("float32") # Define absolute calibration coefficients - absolute = random.uniform(100, 1000, n_pixels).astype('float32') + absolute = random.uniform(100, 1000, n_pixels).astype("float32") y *= absolute[:, np.newaxis] # Define relative coefficients @@ -130,7 +130,7 @@ def test_dl1_charge_calib(example_subarray): # Test default calibrator = CameraCalibrator( subarray=example_subarray, - image_extractor=FullWaveformSum(subarray=example_subarray) + image_extractor=FullWaveformSum(subarray=example_subarray), ) calibrator(event) np.testing.assert_allclose(event.dl1.tel[telid].image, y.sum(1), rtol=1e-4) @@ -143,7 +143,7 @@ def test_dl1_charge_calib(example_subarray): # Test without need for timing corrections calibrator = CameraCalibrator( subarray=example_subarray, - image_extractor=FullWaveformSum(subarray=example_subarray) + image_extractor=FullWaveformSum(subarray=example_subarray), ) calibrator(event) np.testing.assert_allclose(event.dl1.tel[telid].image, 1, rtol=1e-5) diff --git a/ctapipe/calib/camera/tests/test_flatfield.py b/ctapipe/calib/camera/tests/test_flatfield.py index b775ee48778..78829cfd6aa 100644 --- a/ctapipe/calib/camera/tests/test_flatfield.py +++ b/ctapipe/calib/camera/tests/test_flatfield.py @@ -21,35 +21,36 @@ def test_flasherflatfieldcalculator(): 0: TelescopeDescription.from_name( optics_name="SST-ASTRI", camera_name="CHEC" ), - } + }, ) subarray.tel[0].camera.readout.reference_pulse_shape = np.ones((1, 2)) subarray.tel[0].camera.readout.reference_pulse_sample_width = u.Quantity(1, u.ns) - config = Config({ - "FixedWindowSum": { - "window_start": 15, - "window_width": 10 - } - }) + config = Config({"FixedWindowSum": {"window_start": 15, "window_width": 10}}) ff_calculator = FlasherFlatFieldCalculator( subarray=subarray, charge_product="FixedWindowSum", sample_size=n_events, tel_id=tel_id, - config=config + config=config, ) # create one event data = EventAndMonDataContainer() - data.meta['origin'] = 'test' + data.meta["origin"] = "test" # initialize mon and r1 data - data.mon.tel[tel_id].pixel_status.hardware_failing_pixels = np.zeros((n_gain, n_pixels), dtype=bool) - data.mon.tel[tel_id].pixel_status.pedestal_failing_pixels = np.zeros((n_gain, n_pixels), dtype=bool) - data.mon.tel[tel_id].pixel_status.flatfield_failing_pixels = np.zeros((n_gain, n_pixels), dtype=bool) + data.mon.tel[tel_id].pixel_status.hardware_failing_pixels = np.zeros( + (n_gain, n_pixels), dtype=bool + ) + data.mon.tel[tel_id].pixel_status.pedestal_failing_pixels = np.zeros( + (n_gain, n_pixels), dtype=bool + ) + data.mon.tel[tel_id].pixel_status.flatfield_failing_pixels = np.zeros( + (n_gain, n_pixels), dtype=bool + ) data.r1.tel[tel_id].waveform = np.zeros((n_gain, n_pixels, 40)) data.r1.tel[tel_id].trigger_time = 1000 - + # flat-field signal put == delta function of height ff_level at sample 20 data.r1.tel[tel_id].waveform[:, :, 20] = ff_level print(data.r1.tel[tel_id].waveform[0, 0, 20]) @@ -67,14 +68,15 @@ def test_flasherflatfieldcalculator(): # Second test: introduce some failing pixels failing_pixels_id = np.array([10, 20, 30, 40]) data.r1.tel[tel_id].waveform[:, failing_pixels_id, :] = 0 - data.mon.tel[tel_id].pixel_status.pedestal_failing_pixels[:,failing_pixels_id] = True + data.mon.tel[tel_id].pixel_status.pedestal_failing_pixels[ + :, failing_pixels_id + ] = True while ff_calculator.num_events_seen < n_events: if ff_calculator.calculate_relative_gain(data): # working pixel have good gain - assert (data.mon.tel[tel_id].flatfield.relative_gain_median[0, 0] == 1) + assert data.mon.tel[tel_id].flatfield.relative_gain_median[0, 0] == 1 # bad pixels do non influence the gain assert np.mean(data.mon.tel[tel_id].flatfield.relative_gain_std) == 0 - diff --git a/ctapipe/calib/camera/tests/test_gainselection.py b/ctapipe/calib/camera/tests/test_gainselection.py index c05bc0ad039..e471d8433af 100644 --- a/ctapipe/calib/camera/tests/test_gainselection.py +++ b/ctapipe/calib/camera/tests/test_gainselection.py @@ -1,6 +1,10 @@ import numpy as np -from ctapipe.calib.camera.gainselection import ManualGainSelector, \ - ThresholdGainSelector, GainChannel, GainSelector +from ctapipe.calib.camera.gainselection import ( + ManualGainSelector, + ThresholdGainSelector, + GainChannel, + GainSelector, +) class DummyGainSelector(GainSelector): diff --git a/ctapipe/conftest.py b/ctapipe/conftest.py index 0806ba1e6fb..40fecb8ace7 100644 --- a/ctapipe/conftest.py +++ b/ctapipe/conftest.py @@ -10,13 +10,13 @@ from ctapipe.utils import get_dataset_path -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def _global_example_event(): """ helper to get a single event from a MC file. Don't use this fixture directly, rather use `test_event` """ - filename = get_dataset_path('gamma_test_large.simtel.gz') + filename = get_dataset_path("gamma_test_large.simtel.gz") print("******************** LOAD TEST EVENT ***********************") @@ -26,12 +26,12 @@ def _global_example_event(): return event -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def example_subarray(): """ Subarray corresponding to the example event """ - filename = get_dataset_path('gamma_test_large.simtel.gz') + filename = get_dataset_path("gamma_test_large.simtel.gz") print("******************** LOAD TEST EVENT ***********************") @@ -39,7 +39,7 @@ def example_subarray(): return reader.subarray -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def example_event(_global_example_event): """ Use this fixture anywhere you need a test event read from a MC file. For diff --git a/ctapipe/containers.py b/ctapipe/containers.py index 20ff4a184ea..c1c22d4af99 100644 --- a/ctapipe/containers.py +++ b/ctapipe/containers.py @@ -83,6 +83,7 @@ class EventType(enum.Enum): class EventIndexContainer(Container): """ index columns to include in event lists, common to all data levels""" + container_prefix = "" # don't want to prefix these obs_id = Field(0, "observation identifier") event_id = Field(0, "event identifier") @@ -93,6 +94,7 @@ class TelEventIndexContainer(Container): index columns to include in telescope-wise event lists, common to all data levels that have telescope-wise information """ + container_prefix = "" # don't want to prefix these obs_id = Field(0, "observation identifier") event_id = Field(0, "event identifier") @@ -437,7 +439,7 @@ class MCEventContainer(Container): core_x = Field(nan * u.m, "MC core position", unit=u.m) core_y = Field(nan * u.m, "MC core position", unit=u.m) h_first_int = Field(nan * u.m, "Height of first interaction", unit=u.m) - x_max = Field(nan * u.g / (u.cm**2), "MC Xmax value", unit=u.g / (u.cm ** 2)) + x_max = Field(nan * u.g / (u.cm ** 2), "MC Xmax value", unit=u.g / (u.cm ** 2)) shower_primary_id = Field( -1, "MC shower primary ID 0 (gamma), 1(e-)," @@ -502,8 +504,12 @@ class MCHeaderContainer(Container): corsika_low_E_model = Field(nan, "Detector MC information") corsika_high_E_model = Field(nan, "Detector MC information") corsika_bunchsize = Field(nan, "Number of photons per bunch") - corsika_wlen_min = Field(nan * u.m, "Minimum wavelength of cherenkov light", unit=u.nm) - corsika_wlen_max = Field(nan * u.m, "Maximum wavelength of cherenkov light", unit=u.nm) + corsika_wlen_min = Field( + nan * u.m, "Minimum wavelength of cherenkov light", unit=u.nm + ) + corsika_wlen_max = Field( + nan * u.m, "Maximum wavelength of cherenkov light", unit=u.nm + ) corsika_low_E_detail = Field(nan, "Detector MC information") corsika_high_E_detail = Field(nan, "Detector MC information") @@ -528,9 +534,15 @@ class ReconstructedShowerContainer(Container): alt_uncert = Field(nan * u.deg, "reconstructed altitude uncertainty", unit=u.deg) az = Field(nan * u.deg, "reconstructed azimuth", unit=u.deg) az_uncert = Field(nan * u.deg, "reconstructed azimuth uncertainty", unit=u.deg) - core_x = Field(nan * u.m, "reconstructed x coordinate of the core position", unit=u.m) - core_y = Field(nan * u.m, "reconstructed y coordinate of the core position", unit=u.m) - core_uncert = Field(nan * u.m, "uncertainty of the reconstructed core position", unit=u.m) + core_x = Field( + nan * u.m, "reconstructed x coordinate of the core position", unit=u.m + ) + core_y = Field( + nan * u.m, "reconstructed y coordinate of the core position", unit=u.m + ) + core_uncert = Field( + nan * u.m, "uncertainty of the reconstructed core position", unit=u.m + ) h_max = Field(nan * u.m, "reconstructed height of the shower maximum", unit=u.m) h_max_uncert = Field(nan * u.m, "uncertainty of h_max", unit=u.m) is_valid = Field( @@ -733,7 +745,9 @@ class FlatFieldContainer(Container): [n_events] flat-field events """ - sample_time = Field(0 * u.s, "Time associated to the flat-field event set ", unit=u.s) + sample_time = Field( + 0 * u.s, "Time associated to the flat-field event set ", unit=u.s + ) sample_time_min = Field( nan * u.s, "Minimum time of the flat-field events", unit=u.s ) @@ -789,13 +803,11 @@ class PedestalContainer(Container): """ n_events = Field(-1, "Number of events used for statistics") - sample_time = Field(nan * u.s, "Time associated to the pedestal event set", unit=u.s) - sample_time_min = Field( - nan * u.s, "Time of first pedestal event", unit=u.s - ) - sample_time_max = Field( - nan * u.s, "Time of last pedestal event", unit=u.s + sample_time = Field( + nan * u.s, "Time associated to the pedestal event set", unit=u.s ) + sample_time_min = Field(nan * u.s, "Time of first pedestal event", unit=u.s) + sample_time_max = Field(nan * u.s, "Time of last pedestal event", unit=u.s) charge_mean = Field(None, "np array of pedestal average (n_chan, n_pix)") charge_median = Field(None, "np array of the pedestal median (n_chan, n_pix)") charge_std = Field( @@ -842,14 +854,10 @@ class WaveformCalibrationContainer(Container): time = Field(nan * u.s, "Time associated to the calibration event", unit=u.s) time_min = Field( - nan * u.s, - "Earliest time of validity for the calibration event", - unit=u.s, + nan * u.s, "Earliest time of validity for the calibration event", unit=u.s, ) time_max = Field( - nan * u.s, - "Latest time of validity for the calibration event", - unit=u.s, + nan * u.s, "Latest time of validity for the calibration event", unit=u.s, ) dc_to_pe = Field( diff --git a/ctapipe/coordinates/__init__.py b/ctapipe/coordinates/__init__.py index 8d7bf9f744a..5bcc2667e8d 100644 --- a/ctapipe/coordinates/__init__.py +++ b/ctapipe/coordinates/__init__.py @@ -1,6 +1,6 @@ -''' +""" Coordinates. -''' +""" from astropy.coordinates import ( AltAz, FunctionTransformWithFiniteDifference, @@ -15,14 +15,14 @@ __all__ = [ - 'TelescopeFrame', - 'CameraFrame', - 'EngineeringCameraFrame', - 'NominalFrame', - 'GroundFrame', - 'TiltedGroundFrame', - 'project_to_ground', - 'MissingFrameAttributeWarning', + "TelescopeFrame", + "CameraFrame", + "EngineeringCameraFrame", + "NominalFrame", + "GroundFrame", + "TiltedGroundFrame", + "project_to_ground", + "MissingFrameAttributeWarning", ] @@ -36,17 +36,13 @@ class MissingFrameAttributeWarning(Warning): # This forbids it to use AltAz without setting location and obstime # here, the astropy behaviour is defined so that it is assumed, # that if no information about location or obstime is known, both are the same -@frame_transform_graph.transform( - FunctionTransformWithFiniteDifference, - AltAz, - AltAz -) +@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, AltAz, AltAz) def altaz_to_altaz(from_coo, to_frame): # check if coordinates have obstimes defined obstime = from_coo.obstime if from_coo.obstime is None: warnings.warn( - 'AltAz coordinate has no obstime, assuming same frame', + "AltAz coordinate has no obstime, assuming same frame", MissingFrameAttributeWarning, ) obstime = to_frame.obstime @@ -54,7 +50,7 @@ def altaz_to_altaz(from_coo, to_frame): location = from_coo.location if from_coo.obstime is None: warnings.warn( - 'Horizontal coordinate has no location, assuming same frame', + "Horizontal coordinate has no location, assuming same frame", MissingFrameAttributeWarning, ) location = to_frame.location diff --git a/ctapipe/coordinates/camera_frame.py b/ctapipe/coordinates/camera_frame.py index a9a5e58f73d..535d830ca4b 100644 --- a/ctapipe/coordinates/camera_frame.py +++ b/ctapipe/coordinates/camera_frame.py @@ -20,34 +20,26 @@ class MirrorAttribute(Attribute): - '''A frame Attribute that can only store the integers 1 and 2''' + """A frame Attribute that can only store the integers 1 and 2""" def convert_input(self, value): - '''make sure input is 1 or 2''' + """make sure input is 1 or 2""" if value in (1, 2): return value, False - raise ValueError('Only 1 or 2 mirrors supported') + raise ValueError("Only 1 or 2 mirrors supported") # Go from SimTel / HESS to MAGIC/FACT/Engineering frame and back -CAMERA_TO_ENGINEERING_1M_MATRIX = np.array([ - [0, -1, 0], - [-1, 0, 0], - [0, 0, 1] -]) +CAMERA_TO_ENGINEERING_1M_MATRIX = np.array([[0, -1, 0], [-1, 0, 0], [0, 0, 1]]) ENGINEERING_1M_TO_CAMERA_MATRIX = CAMERA_TO_ENGINEERING_1M_MATRIX -CAMERA_TO_ENGINEERING_2M_MATRIX = np.array([ - [0, 1, 0], - [-1, 0, 0], - [0, 0, 1] -]) +CAMERA_TO_ENGINEERING_2M_MATRIX = np.array([[0, 1, 0], [-1, 0, 0], [0, 0, 1]]) ENGINEERING_2M_TO_CAMERA_MATRIX = CAMERA_TO_ENGINEERING_2M_MATRIX.T ZERO_OFFSET = CartesianRepresentation(0, 0, 0, unit=u.m) class CameraFrame(BaseCoordinateFrame): - ''' + """ Camera coordinate frame. The camera frame is a 2d cartesian frame, @@ -76,7 +68,8 @@ class CameraFrame(BaseCoordinateFrame): Observation time location : EarthLocation location of the telescope - ''' + """ + default_representation = PlanarRepresentation focal_length = QuantityAttribute(default=0, unit=u.m) @@ -88,7 +81,7 @@ class CameraFrame(BaseCoordinateFrame): class EngineeringCameraFrame(CameraFrame): - ''' + """ Engineering camera coordinate frame. The camera frame is a 2d cartesian frame, @@ -112,16 +105,17 @@ class EngineeringCameraFrame(CameraFrame): Observation time location : EarthLocation location of the telescope - ''' + """ + n_mirrors = MirrorAttribute(default=1) @frame_transform_graph.transform(FunctionTransform, CameraFrame, TelescopeFrame) def camera_to_telescope(camera_coord, telescope_frame): - ''' + """ Transformation between CameraFrame and TelescopeFrame. Is called when a SkyCoord is transformed from CameraFrame into TelescopeFrame - ''' + """ x_pos = camera_coord.cartesian.x y_pos = camera_coord.cartesian.y @@ -145,11 +139,13 @@ def camera_to_telescope(camera_coord, telescope_frame): # to the camera center in the focal plane fov_lat = u.Quantity( (x_rotated / focal_length).to_value(u.dimensionless_unscaled), - u.rad, copy=False, + u.rad, + copy=False, ) fov_lon = u.Quantity( (y_rotated / focal_length).to_value(u.dimensionless_unscaled), - u.rad, copy=False, + u.rad, + copy=False, ) representation = UnitSphericalRepresentation(lat=fov_lat, lon=fov_lon) @@ -159,11 +155,11 @@ def camera_to_telescope(camera_coord, telescope_frame): @frame_transform_graph.transform(FunctionTransform, TelescopeFrame, CameraFrame) def telescope_to_camera(telescope_coord, camera_frame): - ''' + """ Transformation between TelescopeFrame and CameraFrame Is called when a SkyCoord is transformed from TelescopeFrame into CameraFrame - ''' + """ x_pos = telescope_coord.fov_lat y_pos = telescope_coord.fov_lon # reverse the rotation applied to get to this system @@ -189,11 +185,7 @@ def telescope_to_camera(telescope_coord, camera_frame): x_rotated = x_rotated.to_value(u.rad) * focal_length y_rotated = y_rotated.to_value(u.rad) * focal_length - representation = CartesianRepresentation( - x_rotated, - y_rotated, - 0 * u.m - ) + representation = CartesianRepresentation(x_rotated, y_rotated, 0 * u.m) return camera_frame.realize_frame(representation) diff --git a/ctapipe/coordinates/ground_frames.py b/ctapipe/coordinates/ground_frames.py index 4ecd37a744e..93bb94d676e 100644 --- a/ctapipe/coordinates/ground_frames.py +++ b/ctapipe/coordinates/ground_frames.py @@ -28,9 +28,9 @@ from .representation import PlanarRepresentation __all__ = [ - 'GroundFrame', - 'TiltedGroundFrame', - 'project_to_ground', + "GroundFrame", + "TiltedGroundFrame", + "project_to_ground", ] @@ -62,6 +62,7 @@ class TiltedGroundFrame(BaseCoordinateFrame): Alt,Az direction of the tilted reference plane """ + default_representation = PlanarRepresentation # Pointing direction of the tilted system (alt,az), # could be the telescope pointing direction or the reconstructed shower @@ -102,14 +103,13 @@ def get_shower_trans_matrix(azimuth, altitude): trans[2][1] = -sin_z * sin_az trans[0][2] = -sin_z - trans[1][2] = 0. + trans[1][2] = 0.0 trans[2][2] = cos_z return trans -@frame_transform_graph.transform(FunctionTransform, GroundFrame, - TiltedGroundFrame) +@frame_transform_graph.transform(FunctionTransform, GroundFrame, TiltedGroundFrame) def ground_to_tilted(ground_coord, tilted_frame): """ Transformation from ground system to tilted ground system @@ -141,8 +141,7 @@ def ground_to_tilted(ground_coord, tilted_frame): return tilted_frame.realize_frame(representation) -@frame_transform_graph.transform(FunctionTransform, TiltedGroundFrame, - GroundFrame) +@frame_transform_graph.transform(FunctionTransform, TiltedGroundFrame, GroundFrame) def tilted_to_ground(tilted_coord, ground_frame): """ Transformation from tilted ground system to ground system @@ -200,8 +199,9 @@ def project_to_ground(tilt_system): y_initial = ground_system.y.value z_initial = ground_system.z.value - trans = get_shower_trans_matrix(tilt_system.pointing_direction.az, - tilt_system.pointing_direction.alt) + trans = get_shower_trans_matrix( + tilt_system.pointing_direction.az, tilt_system.pointing_direction.alt + ) x_projected = x_initial - trans[2][0] * z_initial / trans[2][2] y_projected = y_initial - trans[2][1] * z_initial / trans[2][2] diff --git a/ctapipe/coordinates/nominal_frame.py b/ctapipe/coordinates/nominal_frame.py index 96313d26520..c345b812ec7 100644 --- a/ctapipe/coordinates/nominal_frame.py +++ b/ctapipe/coordinates/nominal_frame.py @@ -1,10 +1,10 @@ -''' +""" The code in this module is basically a copy of http://docs.astropy.org/en/stable/_modules/astropy/coordinates/builtin_frames/skyoffset.html We are just not creating a metaclass and a factory but directly building the corresponding class. -''' +""" import astropy.units as u from astropy.coordinates.matrix_utilities import ( rotation_matrix, @@ -27,7 +27,7 @@ class NominalFrame(BaseCoordinateFrame): - ''' + """ Nominal coordinate frame. A Frame using a UnitSphericalRepresentation. @@ -47,11 +47,12 @@ class NominalFrame(BaseCoordinateFrame): Observation time location: EarthLocation Location of the telescope - ''' + """ + frame_specific_representation_info = { UnitSphericalRepresentation: [ - RepresentationMapping('lon', 'fov_lon'), - RepresentationMapping('lat', 'fov_lat'), + RepresentationMapping("lon", "fov_lon"), + RepresentationMapping("lat", "fov_lat"), ] } default_representation = UnitSphericalRepresentation @@ -73,12 +74,8 @@ def __init__(self, *args, **kwargs): def skyoffset_to_skyoffset(from_telescope_coord, to_telescope_frame): """Transform between two skyoffset frames.""" - intermediate_from = from_telescope_coord.transform_to( - from_telescope_coord.origin - ) - intermediate_to = intermediate_from.transform_to( - to_telescope_frame.origin - ) + intermediate_from = from_telescope_coord.transform_to(from_telescope_coord.origin) + intermediate_to = intermediate_from.transform_to(to_telescope_frame.origin) return intermediate_to.transform_to(to_telescope_frame) @@ -89,8 +86,8 @@ def reference_to_skyoffset(reference_frame, telescope_frame): # Define rotation matrices along the position angle vector, and # relative to the origin. origin = telescope_frame.origin.spherical - mat1 = rotation_matrix(-origin.lat, 'y') - mat2 = rotation_matrix(origin.lon, 'z') + mat1 = rotation_matrix(-origin.lat, "y") + mat2 = rotation_matrix(origin.lon, "z") return matrix_product(mat1, mat2) diff --git a/ctapipe/coordinates/representation.py b/ctapipe/coordinates/representation.py index bd2fed58f29..1093f316104 100644 --- a/ctapipe/coordinates/representation.py +++ b/ctapipe/coordinates/representation.py @@ -27,32 +27,32 @@ class PlanarRepresentation(BaseRepresentation): If True arrays will be copied rather than referenced. """ - attr_classes = OrderedDict([('x', u.Quantity), - ('y', u.Quantity)]) + + attr_classes = OrderedDict([("x", u.Quantity), ("y", u.Quantity)]) def __init__(self, x, y, copy=True, **kwargs): if x is None or y is None: raise ValueError( - 'x and y are required to instantiate CartesianRepresentation' + "x and y are required to instantiate CartesianRepresentation" ) - if not isinstance(x, self.attr_classes['x']): - raise TypeError('x should be a {}'.format(self.attr_classes['x'].__name__)) + if not isinstance(x, self.attr_classes["x"]): + raise TypeError("x should be a {}".format(self.attr_classes["x"].__name__)) - if not isinstance(y, self.attr_classes['y']): - raise TypeError('y should be a {}'.format(self.attr_classes['y'].__name__)) + if not isinstance(y, self.attr_classes["y"]): + raise TypeError("y should be a {}".format(self.attr_classes["y"].__name__)) - x = self.attr_classes['x'](x, copy=copy) - y = self.attr_classes['y'](y, copy=copy) + x = self.attr_classes["x"](x, copy=copy) + y = self.attr_classes["y"](y, copy=copy) if not (x.unit.physical_type == y.unit.physical_type): - raise u.UnitsError('x and y should have matching physical types') + raise u.UnitsError("x and y should have matching physical types") try: x, y = broadcast_arrays(x, y, subok=True) except ValueError: - raise ValueError('Input parameters x and y cannot be broadcast') + raise ValueError("Input parameters x and y cannot be broadcast") self._x = x self._y = y @@ -82,10 +82,8 @@ def from_cartesian(cls, cartesian): return cls(x=cartesian.x, y=cartesian.y) def to_cartesian(self): - return CartesianRepresentation( - x=self._x, y=self._y, z=0 * self._x.unit - ) + return CartesianRepresentation(x=self._x, y=self._y, z=0 * self._x.unit) @property def components(self): - return 'x', 'y' + return "x", "y" diff --git a/ctapipe/coordinates/telescope_frame.py b/ctapipe/coordinates/telescope_frame.py index 5f4b0eb391e..c80a5574560 100644 --- a/ctapipe/coordinates/telescope_frame.py +++ b/ctapipe/coordinates/telescope_frame.py @@ -1,10 +1,10 @@ -''' +""" The code in this module is basically a copy of http://docs.astropy.org/en/stable/_modules/astropy/coordinates/builtin_frames/skyoffset.html We are just not creating a metaclass and a factory but directly building the corresponding class. -''' +""" import astropy.units as u from astropy.coordinates.matrix_utilities import ( rotation_matrix, @@ -27,7 +27,7 @@ class TelescopeFrame(BaseCoordinateFrame): - ''' + """ Telescope coordinate frame. A Frame using a UnitSphericalRepresentation. @@ -51,11 +51,12 @@ class TelescopeFrame(BaseCoordinateFrame): Observation time location: EarthLocation Location of the telescope - ''' + """ + frame_specific_representation_info = { UnitSphericalRepresentation: [ - RepresentationMapping('lon', 'fov_lon'), - RepresentationMapping('lat', 'fov_lat'), + RepresentationMapping("lon", "fov_lon"), + RepresentationMapping("lat", "fov_lat"), ] } default_representation = UnitSphericalRepresentation @@ -75,7 +76,7 @@ def __init__(self, *args, **kwargs): @frame_transform_graph.transform(FunctionTransform, TelescopeFrame, TelescopeFrame) def skyoffset_to_skyoffset(from_telescope_coord, to_telescope_frame): - '''Transform between two skyoffset frames.''' + """Transform between two skyoffset frames.""" intermediate_from = from_telescope_coord.transform_to( from_telescope_coord.telescope_pointing @@ -88,19 +89,19 @@ def skyoffset_to_skyoffset(from_telescope_coord, to_telescope_frame): @frame_transform_graph.transform(DynamicMatrixTransform, AltAz, TelescopeFrame) def reference_to_skyoffset(reference_frame, telescope_frame): - '''Convert a reference coordinate to an sky offset frame.''' + """Convert a reference coordinate to an sky offset frame.""" # Define rotation matrices along the position angle vector, and # relative to the telescope_pointing. telescope_pointing = telescope_frame.telescope_pointing.spherical - mat1 = rotation_matrix(-telescope_pointing.lat, 'y') - mat2 = rotation_matrix(telescope_pointing.lon, 'z') + mat1 = rotation_matrix(-telescope_pointing.lat, "y") + mat2 = rotation_matrix(telescope_pointing.lon, "z") return matrix_product(mat1, mat2) @frame_transform_graph.transform(DynamicMatrixTransform, TelescopeFrame, AltAz) def skyoffset_to_reference(skyoffset_coord, reference_frame): - '''Convert an sky offset frame coordinate to the reference frame''' + """Convert an sky offset frame coordinate to the reference frame""" # use the forward transform, but just invert it mat = reference_to_skyoffset(reference_frame, skyoffset_coord) diff --git a/ctapipe/coordinates/tests/test_coordinates.py b/ctapipe/coordinates/tests/test_coordinates.py index 43da2abddff..e702550a2bf 100644 --- a/ctapipe/coordinates/tests/test_coordinates.py +++ b/ctapipe/coordinates/tests/test_coordinates.py @@ -4,7 +4,7 @@ from astropy.time import Time from pytest import approx -location = EarthLocation.of_site('Roque de los Muchachos') +location = EarthLocation.of_site("Roque de los Muchachos") def test_cam_to_nominal(): @@ -13,7 +13,9 @@ def test_cam_to_nominal(): telescope_pointing = SkyCoord(alt=70 * u.deg, az=0 * u.deg, frame=AltAz()) array_pointing = SkyCoord(alt=72 * u.deg, az=0 * u.deg, frame=AltAz()) - cam_frame = CameraFrame(focal_length=28 * u.m, telescope_pointing=telescope_pointing) + cam_frame = CameraFrame( + focal_length=28 * u.m, telescope_pointing=telescope_pointing + ) cam = SkyCoord(x=0.5 * u.m, y=0.1 * u.m, frame=cam_frame) nom_frame = NominalFrame(origin=array_pointing) @@ -23,21 +25,22 @@ def test_cam_to_nominal(): def test_icrs_to_camera(): from ctapipe.coordinates import CameraFrame - obstime = Time('2013-11-01T03:00') - location = EarthLocation.of_site('Roque de los Muchachos') + obstime = Time("2013-11-01T03:00") + location = EarthLocation.of_site("Roque de los Muchachos") horizon_frame = AltAz(location=location, obstime=obstime) # simulate crab "on" observations - crab = SkyCoord(ra='05h34m31.94s', dec='22d00m52.2s') + crab = SkyCoord(ra="05h34m31.94s", dec="22d00m52.2s") telescope_pointing = crab.transform_to(horizon_frame) camera_frame = CameraFrame( focal_length=28 * u.m, telescope_pointing=telescope_pointing, - location=location, obstime=obstime, + location=location, + obstime=obstime, ) - ceta_tauri = SkyCoord(ra='5h37m38.6854231s', dec='21d08m33.158804s') + ceta_tauri = SkyCoord(ra="5h37m38.6854231s", dec="21d08m33.158804s") ceta_tauri_camera = ceta_tauri.transform_to(camera_frame) camera_center = SkyCoord(0 * u.m, 0 * u.m, frame=camera_frame) @@ -53,19 +56,11 @@ def test_icrs_to_camera(): def test_telescope_separation(): from ctapipe.coordinates import TelescopeFrame - telescope_pointing = SkyCoord( - alt=70 * u.deg, - az=0 * u.deg, - frame=AltAz() - ) + telescope_pointing = SkyCoord(alt=70 * u.deg, az=0 * u.deg, frame=AltAz()) telescope_frame = TelescopeFrame(telescope_pointing=telescope_pointing) - tel1 = SkyCoord( - fov_lon=0 * u.deg, fov_lat=0 * u.deg, frame=telescope_frame - ) - tel2 = SkyCoord( - fov_lon=0 * u.deg, fov_lat=1 * u.deg, frame=telescope_frame - ) + tel1 = SkyCoord(fov_lon=0 * u.deg, fov_lat=0 * u.deg, frame=telescope_frame) + tel2 = SkyCoord(fov_lon=0 * u.deg, fov_lat=1 * u.deg, frame=telescope_frame) assert tel1.separation(tel2) == u.Quantity(1, u.deg) @@ -73,20 +68,18 @@ def test_telescope_separation(): def test_separation_is_the_same(): from ctapipe.coordinates import TelescopeFrame - obstime = Time('2013-11-01T03:00') - location = EarthLocation.of_site('Roque de los Muchachos') + obstime = Time("2013-11-01T03:00") + location = EarthLocation.of_site("Roque de los Muchachos") horizon_frame = AltAz(location=location, obstime=obstime) - crab = SkyCoord(ra='05h34m31.94s', dec='22d00m52.2s') - ceta_tauri = SkyCoord(ra='5h37m38.6854231s', dec='21d08m33.158804s') + crab = SkyCoord(ra="05h34m31.94s", dec="22d00m52.2s") + ceta_tauri = SkyCoord(ra="5h37m38.6854231s", dec="21d08m33.158804s") # simulate crab "on" observations telescope_pointing = crab.transform_to(horizon_frame) telescope_frame = TelescopeFrame( - telescope_pointing=telescope_pointing, - location=location, - obstime=obstime, + telescope_pointing=telescope_pointing, location=location, obstime=obstime, ) ceta_tauri_telescope = ceta_tauri.transform_to(telescope_frame) @@ -130,6 +123,7 @@ def test_cam_to_tel(): def test_cam_to_hor(): from ctapipe.coordinates import CameraFrame + # Coordinates in any frame can be given as a numpy array of the xyz positions # e.g. in this case the position on pixels in the camera pix_x = [1] * u.m @@ -138,7 +132,7 @@ def test_cam_to_hor(): focal_length = 15000 * u.mm # first define the camera frame - pointing = SkyCoord(alt=70*u.deg, az=0*u.deg,frame=AltAz()) + pointing = SkyCoord(alt=70 * u.deg, az=0 * u.deg, frame=AltAz()) camera_frame = CameraFrame(focal_length=focal_length, telescope_pointing=pointing) # transform @@ -148,7 +142,7 @@ def test_cam_to_hor(): # transform back altaz_coord2 = SkyCoord(az=altaz_coord.az, alt=altaz_coord.alt, frame=AltAz()) camera_coord2 = altaz_coord2.transform_to(camera_frame) - + # check transform assert np.isclose(camera_coord.x.to_value(u.m), camera_coord2.y.to_value(u.m)) @@ -171,7 +165,7 @@ def test_ground_to_tilt(): tilt_coord = grd_coord.transform_to( TiltedGroundFrame(pointing_direction=pointing_direction) ) - assert np.abs(tilt_coord.y + 2. * u.m) < 1e-5 * u.m + assert np.abs(tilt_coord.y + 2.0 * u.m) < 1e-5 * u.m # Check that if we look at horizon the x coordinate is 0 pointing_direction = SkyCoord(alt=0 * u.deg, az=0 * u.deg, frame=AltAz()) diff --git a/ctapipe/coordinates/tests/test_engineering_frame.py b/ctapipe/coordinates/tests/test_engineering_frame.py index 4d454d74822..3d9600fddf4 100644 --- a/ctapipe/coordinates/tests/test_engineering_frame.py +++ b/ctapipe/coordinates/tests/test_engineering_frame.py @@ -1,14 +1,14 @@ -''' +""" Tests for the conversion between camera coordinate frames -''' +""" from astropy.coordinates import SkyCoord import astropy.units as u def test_conversion(): - ''' + """ Test conversion between CameraFrame and EngineeringCameraFrame - ''' + """ from ctapipe.coordinates import CameraFrame, EngineeringCameraFrame coords = SkyCoord(x=[3, 1] * u.m, y=[2, 4] * u.m, frame=CameraFrame()) diff --git a/ctapipe/coordinates/tests/test_nominal_frame.py b/ctapipe/coordinates/tests/test_nominal_frame.py index 8493425eab5..17d2b9bf59b 100644 --- a/ctapipe/coordinates/tests/test_nominal_frame.py +++ b/ctapipe/coordinates/tests/test_nominal_frame.py @@ -11,18 +11,14 @@ def test_nominal_to_horizontal_alt0_az0(): nominal_frame = NominalFrame(origin=pointing) - nominal_coord = SkyCoord( - fov_lon=1 * u.deg, fov_lat=0 * u.deg, frame=nominal_frame - ) + nominal_coord = SkyCoord(fov_lon=1 * u.deg, fov_lat=0 * u.deg, frame=nominal_frame) horizon_coord = nominal_coord.transform_to(horizon_frame) assert horizon_coord.az.deg == 1.0 assert horizon_coord.alt.deg == 0.0 - nominal_coord = SkyCoord( - fov_lon=-1 * u.deg, fov_lat=0 * u.deg, frame=nominal_frame - ) + nominal_coord = SkyCoord(fov_lon=-1 * u.deg, fov_lat=0 * u.deg, frame=nominal_frame) horizon_coord = nominal_coord.transform_to(horizon_frame) - assert horizon_coord.az.wrap_at('180d').deg == -1.0 + assert horizon_coord.az.wrap_at("180d").deg == -1.0 assert horizon_coord.alt.deg == 0.0 nominal_coord = SkyCoord(fov_lon=0 * u.deg, fov_lat=1 * u.deg, frame=nominal_frame) @@ -30,9 +26,7 @@ def test_nominal_to_horizontal_alt0_az0(): assert horizon_coord.az.deg == 0.0 assert horizon_coord.alt.deg == 1.0 - nominal_coord = SkyCoord( - fov_lon=0 * u.deg, fov_lat=-1 * u.deg, frame=nominal_frame - ) + nominal_coord = SkyCoord(fov_lon=0 * u.deg, fov_lat=-1 * u.deg, frame=nominal_frame) horizon_coord = nominal_coord.transform_to(horizon_frame) assert horizon_coord.az.deg == 0.0 assert horizon_coord.alt.deg == -1.0 @@ -46,30 +40,22 @@ def test_nominal_to_horizontal_alt0_az180(): nominal_frame = NominalFrame(origin=pointing) - nominal_coord = SkyCoord( - fov_lon=1 * u.deg, fov_lat=0 * u.deg, frame=nominal_frame - ) + nominal_coord = SkyCoord(fov_lon=1 * u.deg, fov_lat=0 * u.deg, frame=nominal_frame) horizon_coord = nominal_coord.transform_to(horizon_frame) assert horizon_coord.az.deg == approx(181.0) assert horizon_coord.alt.deg == 0.0 - nominal_coord = SkyCoord( - fov_lon=-1 * u.deg, fov_lat=0 * u.deg, frame=nominal_frame - ) + nominal_coord = SkyCoord(fov_lon=-1 * u.deg, fov_lat=0 * u.deg, frame=nominal_frame) horizon_coord = nominal_coord.transform_to(horizon_frame) assert horizon_coord.az.deg == approx(179.0) assert horizon_coord.alt.deg == 0.0 - nominal_coord = SkyCoord( - fov_lon=0 * u.deg, fov_lat=1 * u.deg, frame=nominal_frame - ) + nominal_coord = SkyCoord(fov_lon=0 * u.deg, fov_lat=1 * u.deg, frame=nominal_frame) horizon_coord = nominal_coord.transform_to(horizon_frame) assert horizon_coord.az.deg == 180.0 assert horizon_coord.alt.deg == 1.0 - nominal_coord = SkyCoord( - fov_lon=0 * u.deg, fov_lat=-1 * u.deg, frame=nominal_frame - ) + nominal_coord = SkyCoord(fov_lon=0 * u.deg, fov_lat=-1 * u.deg, frame=nominal_frame) horizon_coord = nominal_coord.transform_to(horizon_frame) assert horizon_coord.az.deg == 180.0 assert horizon_coord.alt.deg == -1.0 diff --git a/ctapipe/coordinates/tests/test_roundtrip.py b/ctapipe/coordinates/tests/test_roundtrip.py index c493e398de0..4401cbcf7b9 100644 --- a/ctapipe/coordinates/tests/test_roundtrip.py +++ b/ctapipe/coordinates/tests/test_roundtrip.py @@ -8,8 +8,7 @@ def test_roundtrip_camera_horizon(): telescope_pointing = SkyCoord(alt=70 * u.deg, az=0 * u.deg, frame=AltAz()) camera_frame = CameraFrame( - focal_length=28 * u.m, - telescope_pointing=telescope_pointing + focal_length=28 * u.m, telescope_pointing=telescope_pointing ) cam_coord = SkyCoord(x=0.5 * u.m, y=0.1 * u.m, frame=camera_frame) diff --git a/ctapipe/coordinates/tests/test_telescope_frame.py b/ctapipe/coordinates/tests/test_telescope_frame.py index 41c770a4443..600a5b71bc8 100644 --- a/ctapipe/coordinates/tests/test_telescope_frame.py +++ b/ctapipe/coordinates/tests/test_telescope_frame.py @@ -22,7 +22,7 @@ def test_telescope_to_horizontal_alt0_az0(): fov_lon=-1 * u.deg, fov_lat=0 * u.deg, frame=telescope_frame ) horizon_coord = telescope_coord.transform_to(horizon_frame) - assert horizon_coord.az.wrap_at('180d').deg == -1.0 + assert horizon_coord.az.wrap_at("180d").deg == -1.0 assert horizon_coord.alt.deg == 0.0 telescope_coord = SkyCoord( diff --git a/ctapipe/core/__init__.py b/ctapipe/core/__init__.py index 6609f5542f2..d9fdc6919d0 100644 --- a/ctapipe/core/__init__.py +++ b/ctapipe/core/__init__.py @@ -24,5 +24,5 @@ "run_tool", "QualityQuery", "QualityCriteriaError", - "FieldValidationError" + "FieldValidationError", ] diff --git a/ctapipe/core/logging.py b/ctapipe/core/logging.py index 2dd0e36f765..5c4df95499e 100644 --- a/ctapipe/core/logging.py +++ b/ctapipe/core/logging.py @@ -8,6 +8,7 @@ class ColoredFormatter(logging.Formatter): Custom logging.Formatter that adds colors in addition to the original Application logger functionality from LevelFormatter (in application.py) """ + highlevel_limit = logging.WARN highlevel_format = " %(levelname)s |" @@ -16,18 +17,17 @@ def format(self, record): reset_seq = "\033[0m" color_seq = "\033[1;%dm" colors = { - 'INFO': green, - 'DEBUG': blue, - 'WARNING': yellow, - 'CRITICAL': magenta, - 'ERROR': red + "INFO": green, + "DEBUG": blue, + "WARNING": yellow, + "CRITICAL": magenta, + "ERROR": red, } levelname = record.levelname if levelname in colors: levelname_color = ( - color_seq % (30 + colors[levelname]) - + levelname + reset_seq + color_seq % (30 + colors[levelname]) + levelname + reset_seq ) record.levelname = levelname_color diff --git a/ctapipe/core/plugins.py b/ctapipe/core/plugins.py index 4c34f6f9004..71725e0b3df 100644 --- a/ctapipe/core/plugins.py +++ b/ctapipe/core/plugins.py @@ -8,11 +8,10 @@ def detect_and_import_plugins(prefix): """ detect and import plugin modules with given prefix, """ return { name: importlib.import_module(name) - for finder, name, ispkg - in pkgutil.iter_modules() + for finder, name, ispkg in pkgutil.iter_modules() if name.startswith(prefix) } def detect_and_import_io_plugins(): - return detect_and_import_plugins(prefix='ctapipe_io_') + return detect_and_import_plugins(prefix="ctapipe_io_") diff --git a/ctapipe/core/provenance.py b/ctapipe/core/provenance.py index c18a60484f9..22d2754fd51 100644 --- a/ctapipe/core/provenance.py +++ b/ctapipe/core/provenance.py @@ -25,21 +25,21 @@ log = logging.getLogger(__name__) -__all__ = ['Provenance'] +__all__ = ["Provenance"] _interesting_env_vars = [ - 'CONDA_DEFAULT_ENV', - 'CONDA_PREFIX', - 'CONDA_PYTHON_EXE', - 'CONDA_EXE', - 'CONDA_PROMPT_MODIFIER', - 'CONDA_SHLVL', - 'PATH', - 'LD_LIBRARY_PATH', - 'DYLD_LIBRARY_PATH', - 'USER', - 'HOME', - 'SHELL', + "CONDA_DEFAULT_ENV", + "CONDA_PREFIX", + "CONDA_PYTHON_EXE", + "CONDA_EXE", + "CONDA_PROMPT_MODIFIER", + "CONDA_SHLVL", + "PATH", + "LD_LIBRARY_PATH", + "DYLD_LIBRARY_PATH", + "USER", + "HOME", + "SHELL", ] @@ -51,9 +51,9 @@ def get_module_version(name): try: return get_distribution(name).version except: - return 'unknown' + return "unknown" except ImportError: - return 'not installed' + return "not installed" class Provenance(metaclass=Singleton): @@ -91,8 +91,11 @@ def add_input_file(self, filename, role=None): role this input file satisfies (optional) """ self.current_activity.register_input(abspath(filename), role=role) - log.debug("added input entity '{}' to activity: '{}'".format( - filename, self.current_activity.name)) + log.debug( + "added input entity '{}' to activity: '{}'".format( + filename, self.current_activity.name + ) + ) def add_output_file(self, filename, role=None): """ @@ -107,8 +110,11 @@ def add_output_file(self, filename, role=None): """ self.current_activity.register_output(abspath(filename), role=role) - log.debug("added output entity '{}' to activity: '{}'".format( - filename, self.current_activity.name)) + log.debug( + "added output entity '{}' to activity: '{}'".format( + filename, self.current_activity.name + ) + ) def add_config(self, config): """ @@ -121,12 +127,14 @@ def add_config(self, config): """ self.current_activity.register_config(config) - def finish_activity(self, status='completed', activity_name=None): + def finish_activity(self, status="completed", activity_name=None): """ end the current activity """ activity = self._activities.pop() if activity_name is not None and activity_name != activity.name: - raise ValueError("Tried to end activity '{}', but '{}' is current " - "activity".format(activity_name, activity.name)) + raise ValueError( + "Tried to end activity '{}', but '{}' is current " + "activity".format(activity_name, activity.name) + ) activity.finish(status) self._finished_activities.append(activity) @@ -191,13 +199,13 @@ class _ActivityProvenance: def __init__(self, activity_name=sys.executable): self._prov = { - 'activity_name': activity_name, - 'activity_uuid': str(uuid.uuid4()), - 'start': {}, - 'stop': {}, - 'system': {}, - 'input': [], - 'output': [] + "activity_name": activity_name, + "activity_uuid": str(uuid.uuid4()), + "start": {}, + "stop": {}, + "system": {}, + "input": [], + "output": [], } self.name = activity_name @@ -205,8 +213,8 @@ def start(self): """ begin recording provenance for this activity. Set's up the system and startup provenance data. Generally should be called at start of a program.""" - self._prov['start'].update(_sample_cpu_and_memory()) - self._prov['system'].update(_get_system_provenance()) + self._prov["start"].update(_sample_cpu_and_memory()) + self._prov["system"].update(_get_system_provenance()) def register_input(self, url, role=None): """ @@ -220,7 +228,7 @@ def register_input(self, url, role=None): role: str role name that this input satisfies """ - self._prov['input'].append(dict(url=url, role=role)) + self._prov["input"].append(dict(url=url, role=role)) def register_output(self, url, role=None): """ @@ -234,37 +242,37 @@ def register_output(self, url, role=None): role: str role name that this output satisfies """ - self._prov['output'].append(dict(url=url, role=role)) + self._prov["output"].append(dict(url=url, role=role)) def register_config(self, config): """ add a dictionary of configuration parameters to this activity""" - self._prov['config'] = config + self._prov["config"] = config - def finish(self, status='completed'): + def finish(self, status="completed"): """ record final provenance information, normally called at shutdown.""" - self._prov['stop'].update(_sample_cpu_and_memory()) + self._prov["stop"].update(_sample_cpu_and_memory()) # record the duration (wall-clock) for this activity - t_start = Time(self._prov['start']['time_utc'], format='isot') - t_stop = Time(self._prov['stop']['time_utc'], format='isot') - self._prov['status'] = status - self._prov['duration_min'] = (t_stop - t_start).to('min').value + t_start = Time(self._prov["start"]["time_utc"], format="isot") + t_stop = Time(self._prov["stop"]["time_utc"], format="isot") + self._prov["status"] = status + self._prov["duration_min"] = (t_stop - t_start).to("min").value @property def output(self): - return self._prov.get('output', None) + return self._prov.get("output", None) @property def input(self): - return self._prov.get('input', None) + return self._prov.get("input", None) def sample_cpu_and_memory(self): """ Record a snapshot of current CPU and memory information. """ - if 'samples' not in self._prov: - self._prov['samples'] = [] - self._prov['samples'].append(_sample_cpu_and_memory()) + if "samples" not in self._prov: + self._prov["samples"] = [] + self._prov["samples"].append(_sample_cpu_and_memory()) @property def provenance(self): @@ -279,8 +287,8 @@ def _get_system_provenance(): return dict( ctapipe_version=ctapipe.__version__, - ctapipe_resources_version=get_module_version('ctapipe_resources'), - eventio_version=get_module_version('eventio'), + ctapipe_resources_version=get_module_version("ctapipe_resources"), + eventio_version=get_module_version("eventio"), ctapipe_svc_path=os.getenv("CTAPIPE_SVC_PATH"), executable=sys.executable, platform=dict( @@ -294,7 +302,7 @@ def _get_system_provenance(): release=platform.release(), libcver=platform.libc_ver(), num_cpus=psutil.cpu_count(), - boot_time=Time(psutil.boot_time(), format='unix').isot, + boot_time=Time(psutil.boot_time(), format="unix").isot, ), python=dict( version_string=sys.version, diff --git a/ctapipe/core/support.py b/ctapipe/core/support.py index eefb38f1015..332831b8e89 100644 --- a/ctapipe/core/support.py +++ b/ctapipe/core/support.py @@ -1,5 +1,6 @@ class Singleton(type): """" metaclass for singleton pattern """ + instance = None def __call__(cls, *args, **kw): diff --git a/ctapipe/core/tests/__init__.py b/ctapipe/core/tests/__init__.py index 3de94947755..1102ea5ddcb 100644 --- a/ctapipe/core/tests/__init__.py +++ b/ctapipe/core/tests/__init__.py @@ -1,3 +1,3 @@ """ tests of core functionality of ctapipe -""" \ No newline at end of file +""" diff --git a/ctapipe/core/tests/test_container.py b/ctapipe/core/tests/test_container.py index 0bbe69a76ea..be40e9c992c 100644 --- a/ctapipe/core/tests/test_container.py +++ b/ctapipe/core/tests/test_container.py @@ -231,8 +231,8 @@ def test_field_validation(): field_s2.validate(3.3) # test scalars with units and dtypes: - field_s3 = Field(1.0, "scalar with dtype and unit", dtype='float32', unit='m') - field_s3.validate(np.float32(6)*u.m) + field_s3 = Field(1.0, "scalar with dtype and unit", dtype="float32", unit="m") + field_s3.validate(np.float32(6) * u.m) # test with no restrictions: field_all = Field(None, "stuff") diff --git a/ctapipe/core/tests/test_provenance.py b/ctapipe/core/tests/test_provenance.py index 6bb47cde572..1d96e67d4b8 100644 --- a/ctapipe/core/tests/test_provenance.py +++ b/ctapipe/core/tests/test_provenance.py @@ -15,7 +15,7 @@ def test_Provenance(): prov.finish_activity("test2") prov.finish_activity("test1") - assert set(prov.finished_activity_names) == {'test2', 'test1'} + assert set(prov.finished_activity_names) == {"test2", "test1"} return prov @@ -23,9 +23,9 @@ def test_Provenance(): def test_ActivityProvenance(): prov = _ActivityProvenance() prov.start() - prov.register_input('test.txt') - prov.register_input('test2.txt') - prov.register_output('out.txt') + prov.register_input("test.txt") + prov.register_input("test2.txt") + prov.register_output("out.txt") prov.sample_cpu_and_memory() prov.finish() @@ -35,15 +35,16 @@ def test_provenence_contextmanager(): prov = Provenance() with prov.activity("myactivity"): - assert 'myactivity' in prov.active_activity_names + assert "myactivity" in prov.active_activity_names - assert 'myactivity' in prov.finished_activity_names - assert 'myactivity' not in prov.active_activity_names + assert "myactivity" in prov.finished_activity_names + assert "myactivity" not in prov.active_activity_names -if __name__ == '__main__': +if __name__ == "__main__": import logging + logging.basicConfig(level=logging.DEBUG) prov = test_Provenance() diff --git a/ctapipe/core/tests/test_tool.py b/ctapipe/core/tests/test_tool.py index 2dd4be701e9..0890498db0a 100644 --- a/ctapipe/core/tests/test_tool.py +++ b/ctapipe/core/tests/test_tool.py @@ -83,8 +83,8 @@ class MyTool(Tool): tool.userparam = -1.0 conf2 = tool.get_current_config() - assert conf1['MyTool']['userparam'] == 5.0 - assert conf2['MyTool']['userparam'] == -1.0 + assert conf1["MyTool"]["userparam"] == 5.0 + assert conf2["MyTool"]["userparam"] == -1.0 def test_tool_exit_code(): @@ -99,17 +99,17 @@ class MyTool(Tool): tool = MyTool() with pytest.raises(SystemExit) as exc: - tool.run(['--non-existent-option']) + tool.run(["--non-existent-option"]) assert exc.value.code == 1 with pytest.raises(SystemExit) as exc: - tool.run(['--MyTool.userparam=foo']) + tool.run(["--MyTool.userparam=foo"]) assert exc.value.code == 1 - assert run_tool(tool, ['--help']) == 0 - assert run_tool(tool, ['--non-existent-option']) == 1 + assert run_tool(tool, ["--help"]) == 0 + assert run_tool(tool, ["--non-existent-option"]) == 1 def test_tool_command_line_precedence(): diff --git a/ctapipe/core/tests/test_traits.py b/ctapipe/core/tests/test_traits.py index 6683e53f0b9..32b8afad3cb 100644 --- a/ctapipe/core/tests/test_traits.py +++ b/ctapipe/core/tests/test_traits.py @@ -15,7 +15,7 @@ TelescopeParameter, FloatTelescopeParameter, IntTelescopeParameter, - AstroTime + AstroTime, ) from ctapipe.image import ImageExtractor @@ -69,7 +69,7 @@ class C1(Component): c1.p = 5 with pytest.raises(TraitError): - c1.p = '' + c1.p = "" def test_bytes(): @@ -77,8 +77,8 @@ class C1(Component): p = Path(exists=False) c1 = C1() - c1.p = b'/home/foo' - assert c1.p == pathlib.Path('/home/foo') + c1.p = b"/home/foo" + assert c1.p == pathlib.Path("/home/foo") def test_path_none(): @@ -86,7 +86,7 @@ class C1(Component): thepath = Path(exists=False) c1 = C1() - c1.thepath = 'foo' + c1.thepath = "foo" c1.thepath = None @@ -143,16 +143,16 @@ class C(Component): c = C() # test relative - c.thepath = 'file://foo.hdf5' - assert c.thepath == (pathlib.Path() / 'foo.hdf5').absolute() + c.thepath = "file://foo.hdf5" + assert c.thepath == (pathlib.Path() / "foo.hdf5").absolute() # test absolute - c.thepath = 'file:///foo.hdf5' - assert c.thepath == pathlib.Path('/foo.hdf5') + c.thepath = "file:///foo.hdf5" + assert c.thepath == pathlib.Path("/foo.hdf5") # test not other shemes raise trailet errors with pytest.raises(TraitError): - c.thepath = 'https://example.org/test.hdf5' + c.thepath = "https://example.org/test.hdf5" def test_enum_trait_default_is_right(): @@ -160,16 +160,16 @@ def test_enum_trait_default_is_right(): from ctapipe.core.traits import create_class_enum_trait with pytest.raises(ValueError): - create_class_enum_trait( - ImageExtractor, default_value="name_of_default_choice" - ) + create_class_enum_trait(ImageExtractor, default_value="name_of_default_choice") def test_enum_trait(): """ check that enum traits are constructable from a complex class """ from ctapipe.core.traits import create_class_enum_trait - trait = create_class_enum_trait(ImageExtractor, default_value="NeighborPeakWindowSum") + trait = create_class_enum_trait( + ImageExtractor, default_value="NeighborPeakWindowSum" + ) assert isinstance(trait, CaselessStrEnum) @@ -419,7 +419,7 @@ class AllowNone(Component): c.time = None class NoNone(Component): - time = AstroTime(default_value='2012-01-01T20:00', allow_none=False) + time = AstroTime(default_value="2012-01-01T20:00", allow_none=False) c = NoNone() with pytest.raises(TraitError): diff --git a/ctapipe/core/tool.py b/ctapipe/core/tool.py index b2d8ffdb1e6..794555fb4ed 100644 --- a/ctapipe/core/tool.py +++ b/ctapipe/core/tool.py @@ -367,14 +367,14 @@ def commented(text, indent_level=2, width=70): def run_tool(tool: Tool, argv=None): - ''' + """ Utility run a certain tool in a python session without exitinig Returns ------- exit_code: int The return code of the tool, 0 indicates success, everything else an error - ''' + """ try: tool.run(argv or []) return 0 diff --git a/ctapipe/core/traits.py b/ctapipe/core/traits.py index 1799917d781..3bdb9bdd6a7 100644 --- a/ctapipe/core/traits.py +++ b/ctapipe/core/traits.py @@ -51,7 +51,7 @@ "TelescopeParameter", "FloatTelescopeParameter", "IntTelescopeParameter", - "AstroTime" + "AstroTime", ] import logging @@ -61,19 +61,20 @@ class AstroTime(TraitType): """ A trait representing a point in Time, as understood by `astropy.time`""" + def validate(self, obj, value): """ try to parse and return an ISO time string """ try: the_time = Time(value) - the_time.format = 'iso' + the_time.format = "iso" return the_time except ValueError: return self.error(obj, value) def info(self): - info = 'an ISO8601 datestring or Time instance' + info = "an ISO8601 datestring or Time instance" if self.allow_none: - info += 'or None' + info += "or None" return info @@ -93,7 +94,7 @@ class Path(TraitType): """ def __init__(self, *args, exists=None, directory_ok=True, file_ok=True, **kwargs): - default_value = kwargs.pop('default_value', None) + default_value = kwargs.pop("default_value", None) super().__init__(*args, default_value=default_value, allow_none=True, **kwargs) self.exists = exists @@ -101,23 +102,23 @@ def __init__(self, *args, exists=None, directory_ok=True, file_ok=True, **kwargs self.file_ok = file_ok def info(self): - info = 'a pathlib.Path or non-empty str for ' + info = "a pathlib.Path or non-empty str for " if self.exists is True: - info += 'an existing' + info += "an existing" elif self.exists is False: - info += 'a not existing' + info += "a not existing" else: - info += 'a' + info += "a" if self.directory_ok and self.file_ok: - info += ' directory or file' + info += " directory or file" else: if self.file_ok: - info += ' file' + info += " file" if self.directory_ok: - info += 'directory' + info += "directory" if self.allow_none: - info += ' or None' + info += " or None" return info @@ -129,7 +130,7 @@ def validate(self, obj, value): return self.error(obj, value) if isinstance(value, str): - if value == '': + if value == "": return self.error(obj, value) try: @@ -137,7 +138,7 @@ def validate(self, obj, value): except ValueError: return self.error(obj, value) - if url.scheme not in ('', 'file'): + if url.scheme not in ("", "file"): return self.error(obj, value) value = pathlib.Path(url.netloc, url.path) @@ -178,10 +179,7 @@ def create_class_enum_trait(base_class, default_value, help=None): raise ValueError(f"{default_value} is not in choices: {choices}") return CaselessStrEnum( - choices, - default_value=default_value, - allow_none=False, - help=help, + choices, default_value=default_value, allow_none=False, help=help, ).tag(config=True) diff --git a/ctapipe/image/__init__.py b/ctapipe/image/__init__.py index e2d730fa822..91458bf81a8 100644 --- a/ctapipe/image/__init__.py +++ b/ctapipe/image/__init__.py @@ -1,5 +1,7 @@ from .hillas import ( - hillas_parameters, HillasParameterizationError, camera_to_shower_coordinates, + hillas_parameters, + HillasParameterizationError, + camera_to_shower_coordinates, ) from .timing import timing_parameters from .leakage import leakage diff --git a/ctapipe/image/cleaning.py b/ctapipe/image/cleaning.py index 20b26df51fa..efc8002375d 100644 --- a/ctapipe/image/cleaning.py +++ b/ctapipe/image/cleaning.py @@ -18,7 +18,9 @@ from ..core.component import TelescopeComponent from ..core.traits import ( - FloatTelescopeParameter, IntTelescopeParameter, BoolTelescopeParameter, + FloatTelescopeParameter, + IntTelescopeParameter, + BoolTelescopeParameter, ) @@ -367,24 +369,21 @@ class TailcutsImageCleaner(ImageCleaner): """ picture_threshold_pe = FloatTelescopeParameter( - default_value=10.0, - help="top-level threshold in photoelectrons" + default_value=10.0, help="top-level threshold in photoelectrons" ).tag(config=True) boundary_threshold_pe = FloatTelescopeParameter( - default_value=5.0, - help="second-level threshold in photoelectrons" + default_value=5.0, help="second-level threshold in photoelectrons" ).tag(config=True) min_picture_neighbors = IntTelescopeParameter( - default_value=2, - help="Minimum number of neighbors above threshold to consider" + default_value=2, help="Minimum number of neighbors above threshold to consider" ).tag(config=True) keep_isolated_pixels = BoolTelescopeParameter( default_value=False, help="If False, pixels with less neighbors than ``min_picture_neighbors`` are" - "removed." + "removed.", ).tag(config=True) def __call__( @@ -433,8 +432,7 @@ class FACTImageCleaner(TailcutsImageCleaner): """ time_limit_ns = FloatTelescopeParameter( - default_value=5.0, - help="arrival time limit for neighboring " "pixels, in ns" + default_value=5.0, help="arrival time limit for neighboring " "pixels, in ns" ).tag(config=True) def __call__( diff --git a/ctapipe/image/concentration.py b/ctapipe/image/concentration.py index 87c0bfc5246..ea35c62bf95 100644 --- a/ctapipe/image/concentration.py +++ b/ctapipe/image/concentration.py @@ -24,7 +24,9 @@ def concentration(geom, image, hillas_parameters): if hillas_parameters.width != 0: # get all pixels inside the hillas ellipse - longi, trans = camera_to_shower_coordinates(geom.pix_x, geom.pix_y, h.x, h.y, h.psi) + longi, trans = camera_to_shower_coordinates( + geom.pix_x, geom.pix_y, h.x, h.y, h.psi + ) mask_core = (longi ** 2 / h.length ** 2) + (trans ** 2 / h.width ** 2) <= 1.0 conc_core = image[mask_core].sum() / h.intensity else: @@ -33,7 +35,5 @@ def concentration(geom, image, hillas_parameters): concentration_pixel = image.max() / h.intensity return ConcentrationContainer( - cog=conc_cog, - core=conc_core, - pixel=concentration_pixel, + cog=conc_cog, core=conc_core, pixel=concentration_pixel, ) diff --git a/ctapipe/image/extractor.py b/ctapipe/image/extractor.py index 2acfb242b15..8eb476f79de 100644 --- a/ctapipe/image/extractor.py +++ b/ctapipe/image/extractor.py @@ -384,8 +384,11 @@ def _calculate_correction(self, telid): def __call__(self, waveforms, telid, selected_gain_channel): charge, peak_time = extract_around_peak( - waveforms, self.window_start.tel[telid], self.window_width.tel[telid], 0, - self.sampling_rate[telid] + waveforms, + self.window_start.tel[telid], + self.window_width.tel[telid], + 0, + self.sampling_rate[telid], ) charge *= self._calculate_correction(telid=telid)[selected_gain_channel] return charge, peak_time @@ -880,9 +883,7 @@ def _apply_second_pass( # image cleaning # WARNING: in case of outliers, the fit can perform better if # it is a robust algorithm. - timing = timing_parameters( - camera_geometry, image_2, pulse_time_1stpass, hillas - ) + timing = timing_parameters(camera_geometry, image_2, pulse_time_1stpass, hillas) # get projected distances along main image axis long, _ = camera_to_shower_coordinates( @@ -981,9 +982,7 @@ def _apply_second_pass( # Same approach for the pulse times pulse_time_2npass = pulse_time_1stpass # core + non-core pixels - pulse_time_2npass[ - nonCore_pixels_mask - ] = pulse_times_noCore # non-core pixels + pulse_time_2npass[nonCore_pixels_mask] = pulse_times_noCore # non-core pixels return charge_2ndpass, pulse_time_2npass @@ -1017,15 +1016,13 @@ def __call__(self, waveforms, telid, selected_gain_channel): # FIXME: properly make sure that output is 32Bit instead of downcasting here if self.disable_second_pass: - return (charge1 * correction1).astype('float32'), pulse_time1.astype('float32') + return ( + (charge1 * correction1).astype("float32"), + pulse_time1.astype("float32"), + ) charge2, pulse_time2 = self._apply_second_pass( - waveforms, - telid, - selected_gain_channel, - charge1, - pulse_time1, - correction1, + waveforms, telid, selected_gain_channel, charge1, pulse_time1, correction1, ) # FIXME: properly make sure that output is 32Bit instead of downcasting here - return charge2.astype('float32'), pulse_time2.astype('float32') + return charge2.astype("float32"), pulse_time2.astype("float32") diff --git a/ctapipe/image/geometry_converter.py b/ctapipe/image/geometry_converter.py index e9b8b4adae4..bd2202a5e8b 100644 --- a/ctapipe/image/geometry_converter.py +++ b/ctapipe/image/geometry_converter.py @@ -2,8 +2,10 @@ common module""" -from .geometry_converter_hex import (convert_geometry_hex1d_to_rect2d, - convert_geometry_rect2d_back_to_hexe1d) +from .geometry_converter_hex import ( + convert_geometry_hex1d_to_rect2d, + convert_geometry_rect2d_back_to_hexe1d, +) from .geometry_converter_astri import astri_to_2d_array, array_2d_to_astri from .geometry_converter_chec import chec_to_2d_array, array_2d_to_chec diff --git a/ctapipe/image/geometry_converter_astri.py b/ctapipe/image/geometry_converter_astri.py index 43cff43812a..3df84d4d0db 100644 --- a/ctapipe/image/geometry_converter_astri.py +++ b/ctapipe/image/geometry_converter_astri.py @@ -108,50 +108,52 @@ def array_2d_to_astri(img_2d): the 1D ASTRI image """ - img_1d = np.concatenate([ - img_2d[48:56, 16:24][::-1, :].ravel(), - img_2d[48:56, 24:32][::-1, :].ravel(), - img_2d[48:56, 32:40][::-1, :].ravel(), - # - img_2d[40:48, 8:16][::-1, :].ravel(), - img_2d[40:48, 16:24][::-1, :].ravel(), - img_2d[40:48, 24:32][::-1, :].ravel(), - img_2d[40:48, 32:40][::-1, :].ravel(), - img_2d[40:48, 40:48][::-1, :].ravel(), - # - img_2d[32:40, 0:8][::-1, :].ravel(), - img_2d[32:40, 8:16][::-1, :].ravel(), - img_2d[32:40, 16:24][::-1, :].ravel(), - img_2d[32:40, 24:32][::-1, :].ravel(), - img_2d[32:40, 32:40][::-1, :].ravel(), - img_2d[32:40, 40:48][::-1, :].ravel(), - img_2d[32:40, 48:56][::-1, :].ravel(), - # - img_2d[24:32, 0:8][::-1, :].ravel(), - img_2d[24:32, 8:16][::-1, :].ravel(), - img_2d[24:32, 16:24][::-1, :].ravel(), - img_2d[24:32, 24:32][::-1, :].ravel(), - img_2d[24:32, 32:40][::-1, :].ravel(), - img_2d[24:32, 40:48][::-1, :].ravel(), - img_2d[24:32, 48:56][::-1, :].ravel(), - # - img_2d[16:24, 0:8][::-1, :].ravel(), - img_2d[16:24, 8:16][::-1, :].ravel(), - img_2d[16:24, 16:24][::-1, :].ravel(), - img_2d[16:24, 24:32][::-1, :].ravel(), - img_2d[16:24, 32:40][::-1, :].ravel(), - img_2d[16:24, 40:48][::-1, :].ravel(), - img_2d[16:24, 48:56][::-1, :].ravel(), - # - img_2d[8:16, 8:16][::-1, :].ravel(), - img_2d[8:16, 16:24][::-1, :].ravel(), - img_2d[8:16, 24:32][::-1, :].ravel(), - img_2d[8:16, 32:40][::-1, :].ravel(), - img_2d[8:16, 40:48][::-1, :].ravel(), - # - img_2d[0:8, 16:24][::-1, :].ravel(), - img_2d[0:8, 24:32][::-1, :].ravel(), - img_2d[0:8, 32:40][::-1, :].ravel() - ]) + img_1d = np.concatenate( + [ + img_2d[48:56, 16:24][::-1, :].ravel(), + img_2d[48:56, 24:32][::-1, :].ravel(), + img_2d[48:56, 32:40][::-1, :].ravel(), + # + img_2d[40:48, 8:16][::-1, :].ravel(), + img_2d[40:48, 16:24][::-1, :].ravel(), + img_2d[40:48, 24:32][::-1, :].ravel(), + img_2d[40:48, 32:40][::-1, :].ravel(), + img_2d[40:48, 40:48][::-1, :].ravel(), + # + img_2d[32:40, 0:8][::-1, :].ravel(), + img_2d[32:40, 8:16][::-1, :].ravel(), + img_2d[32:40, 16:24][::-1, :].ravel(), + img_2d[32:40, 24:32][::-1, :].ravel(), + img_2d[32:40, 32:40][::-1, :].ravel(), + img_2d[32:40, 40:48][::-1, :].ravel(), + img_2d[32:40, 48:56][::-1, :].ravel(), + # + img_2d[24:32, 0:8][::-1, :].ravel(), + img_2d[24:32, 8:16][::-1, :].ravel(), + img_2d[24:32, 16:24][::-1, :].ravel(), + img_2d[24:32, 24:32][::-1, :].ravel(), + img_2d[24:32, 32:40][::-1, :].ravel(), + img_2d[24:32, 40:48][::-1, :].ravel(), + img_2d[24:32, 48:56][::-1, :].ravel(), + # + img_2d[16:24, 0:8][::-1, :].ravel(), + img_2d[16:24, 8:16][::-1, :].ravel(), + img_2d[16:24, 16:24][::-1, :].ravel(), + img_2d[16:24, 24:32][::-1, :].ravel(), + img_2d[16:24, 32:40][::-1, :].ravel(), + img_2d[16:24, 40:48][::-1, :].ravel(), + img_2d[16:24, 48:56][::-1, :].ravel(), + # + img_2d[8:16, 8:16][::-1, :].ravel(), + img_2d[8:16, 16:24][::-1, :].ravel(), + img_2d[8:16, 24:32][::-1, :].ravel(), + img_2d[8:16, 32:40][::-1, :].ravel(), + img_2d[8:16, 40:48][::-1, :].ravel(), + # + img_2d[0:8, 16:24][::-1, :].ravel(), + img_2d[0:8, 24:32][::-1, :].ravel(), + img_2d[0:8, 32:40][::-1, :].ravel(), + ] + ) return img_1d diff --git a/ctapipe/image/hillas.py b/ctapipe/image/hillas.py index 04ba06f8fc5..a2cf64d9ff7 100644 --- a/ctapipe/image/hillas.py +++ b/ctapipe/image/hillas.py @@ -15,13 +15,13 @@ __all__ = [ - 'hillas_parameters', - 'HillasParameterizationError', + "hillas_parameters", + "HillasParameterizationError", ] def camera_to_shower_coordinates(x, y, cog_x, cog_y, psi): - ''' + """ Return longitudinal and transverse coordinates for x and y for a given set of hillas parameters @@ -44,7 +44,7 @@ def camera_to_shower_coordinates(x, y, cog_x, cog_y, psi): longitudinal coordinates (along the shower axis) transverse: astropy.units.Quantity transverse coordinates (perpendicular to the shower axis) - ''' + """ cos_psi = np.cos(psi) sin_psi = np.sin(psi) @@ -112,15 +112,13 @@ def hillas_parameters(geom, image): pix_y = Quantity(np.asanyarray(geom.pix_y, dtype=np.float64)).value image = np.asanyarray(image, dtype=np.float64) image = np.ma.filled(image, 0) - msg = 'Image and pixel shape do not match' + msg = "Image and pixel shape do not match" assert pix_x.shape == pix_y.shape == image.shape, msg size = np.sum(image) if size == 0.0: - raise HillasParameterizationError( - 'size=0, cannot calculate HillasParameters' - ) + raise HillasParameterizationError("size=0, cannot calculate HillasParameters") # calculate the cog as the mean of the coordinates weighted with the image cog_x = np.average(pix_x, weights=image) @@ -162,11 +160,11 @@ def hillas_parameters(geom, image): # calculate higher order moments along shower axes longitudinal = delta_x * np.cos(psi) + delta_y * np.sin(psi) - m3_long = np.average(longitudinal**3, weights=image) - skewness_long = m3_long / length**3 + m3_long = np.average(longitudinal ** 3, weights=image) + skewness_long = m3_long / length ** 3 - m4_long = np.average(longitudinal**4, weights=image) - kurtosis_long = m4_long / length**4 + m4_long = np.average(longitudinal ** 4, weights=image) + kurtosis_long = m4_long / length ** 4 return HillasParametersContainer( x=u.Quantity(cog_x, unit), diff --git a/ctapipe/image/morphology.py b/ctapipe/image/morphology.py index f30a2688f6c..78c8f179e48 100644 --- a/ctapipe/image/morphology.py +++ b/ctapipe/image/morphology.py @@ -41,7 +41,7 @@ def number_of_islands(geom, mask): def number_of_island_sizes(island_labels): - ''' + """ Return number of small, medium and large islands Parameters @@ -57,7 +57,7 @@ def number_of_island_sizes(island_labels): number of islands with 3 <= n_pixels <= 50 n_large: int number of islands with more than 50 pixels - ''' + """ # count number of pixels in each island, remove 0 = no island island_sizes = np.bincount(island_labels)[1:] diff --git a/ctapipe/image/muon/__init__.py b/ctapipe/image/muon/__init__.py index 525124f3a14..23b4916e5cf 100644 --- a/ctapipe/image/muon/__init__.py +++ b/ctapipe/image/muon/__init__.py @@ -5,7 +5,7 @@ __all__ = [ - 'MuonIntensityFitter', - 'MuonRingFitter', - 'kundu_chaudhuri_circle_fit', + "MuonIntensityFitter", + "MuonRingFitter", + "kundu_chaudhuri_circle_fit", ] diff --git a/ctapipe/image/muon/features.py b/ctapipe/image/muon/features.py index 62e8945ab90..30c67999329 100644 --- a/ctapipe/image/muon/features.py +++ b/ctapipe/image/muon/features.py @@ -21,8 +21,8 @@ def mean_squared_error(pixel_x, pixel_y, weights, radius, center_x, center_y): center_y: float y coordinate of the ring center """ - r = np.sqrt((center_x - pixel_x)**2 + (center_y - pixel_y)**2) - return np.average((r - radius)**2, weights=weights) + r = np.sqrt((center_x - pixel_x) ** 2 + (center_y - pixel_y) ** 2) + return np.average((r - radius) ** 2, weights=weights) def intensity_ratio_inside_ring( @@ -52,10 +52,9 @@ def intensity_ratio_inside_ring( width of the ring """ - pixel_r = np.sqrt((center_x - pixel_x)**2 + (center_y - pixel_y)**2) + pixel_r = np.sqrt((center_x - pixel_x) ** 2 + (center_y - pixel_y) ** 2) mask = np.logical_and( - pixel_r >= radius - 0.5 * width, - pixel_r <= radius + 0.5 * width + pixel_r >= radius - 0.5 * width, pixel_r <= radius + 0.5 * width ) inside = weights[mask].sum() @@ -65,14 +64,7 @@ def intensity_ratio_inside_ring( def ring_completeness( - pixel_x, - pixel_y, - weights, - radius, - center_x, - center_y, - threshold=30, - bins=30, + pixel_x, pixel_y, weights, radius, center_x, center_y, threshold=30, bins=30, ): """ Estimate how complete a ring is. @@ -140,11 +132,11 @@ def ring_containment(radius, center_x, center_y, camera_radius): ringcontainment: float the ratio of ring inside the camera """ - if hasattr(radius, 'unit'): + if hasattr(radius, "unit"): radius, center_x, center_y, camera_radius = all_to_value( radius, center_x, center_y, camera_radius, unit=radius.unit ) - d = np.sqrt(center_x**2 + center_y**2) + d = np.sqrt(center_x ** 2 + center_y ** 2) # one circle fully contained in the other if d <= np.abs(camera_radius - radius): @@ -154,5 +146,5 @@ def ring_containment(radius, center_x, center_y, camera_radius): if d > (radius + camera_radius): return 0.0 - a = (radius**2 - camera_radius**2 + d**2) / (2 * d) + a = (radius ** 2 - camera_radius ** 2 + d ** 2) / (2 * d) return np.arccos(a / radius) / np.pi diff --git a/ctapipe/image/muon/intensity_fitter.py b/ctapipe/image/muon/intensity_fitter.py index c670f201b34..dc0d3cd183b 100644 --- a/ctapipe/image/muon/intensity_fitter.py +++ b/ctapipe/image/muon/intensity_fitter.py @@ -46,7 +46,7 @@ def chord_length(radius, rho, phi): scalar = np.isscalar(phi) phi = np.array(phi, ndmin=1, copy=False) - chord = 1 - (rho**2 * np.sin(phi)**2) + chord = 1 - (rho ** 2 * np.sin(phi) ** 2) valid = chord >= 0 if rho <= 1.0: @@ -77,11 +77,7 @@ def intersect_circle(mirror_radius, r, angle, hole_radius=0): float: length from impact point to mirror edge """ - mirror_length = chord_length( - mirror_radius, - (r / mirror_radius), - angle - ) + mirror_length = chord_length(mirror_radius, (r / mirror_radius), angle) if hole_radius == 0: return mirror_length @@ -91,9 +87,9 @@ def intersect_circle(mirror_radius, r, angle, hole_radius=0): def pixels_on_ring(radius, pixel_diameter): - '''Calculate number of pixels of diameter ``pixel_diameter`` on the circumference + """Calculate number of pixels of diameter ``pixel_diameter`` on the circumference of a circle with radius ``radius`` - ''' + """ circumference = 2 * np.pi * radius n_pixels = u.Quantity(circumference / pixel_diameter) return int(n_pixels.to_value(u.dimensionless_unscaled)) @@ -104,7 +100,15 @@ def linspace_two_pi(n_points): return np.linspace(-np.pi, np.pi, n_points) -def create_profile(mirror_radius, hole_radius, impact_parameter, radius, phi, pixel_diameter, oversampling=3): +def create_profile( + mirror_radius, + hole_radius, + impact_parameter, + radius, + phi, + pixel_diameter, + oversampling=3, +): """ Perform intersection over all angles and return length @@ -128,7 +132,7 @@ def create_profile(mirror_radius, hole_radius, impact_parameter, radius, phi, pi ang = phi + linspace_two_pi(pixels_on_circle * oversampling) length = intersect_circle(mirror_radius, impact_parameter, ang, hole_radius) - length = correlate1d(length, np.ones(oversampling), mode='wrap', axis=0) + length = correlate1d(length, np.ones(oversampling), mode="wrap", axis=0) length /= oversampling return ang, length @@ -150,7 +154,7 @@ def image_prediction( min_lambda=300 * u.nm, max_lambda=600 * u.nm, ): - ''' + """ Parameters ---------- impact_parameter: quantity[length] @@ -172,7 +176,7 @@ def image_prediction( ------- ndarray: Predicted signal - ''' + """ return image_prediction_no_units( mirror_radius.to_value(u.m), hole_radius.to_value(u.m), @@ -205,7 +209,7 @@ def image_prediction_no_units( pixel_diameter_rad, oversampling=3, min_lambda_m=300e-9, - max_lambda_m=600e-9 + max_lambda_m=600e-9, ): """Function for producing the expected image for a given set of trial muon parameters without using astropy units but expecting the input to @@ -221,16 +225,23 @@ def image_prediction_no_units( # Produce smoothed muon profile ang_prof, profile = create_profile( - mirror_radius_m, hole_radius_m, impact_parameter_m, - radius_rad, phi_rad, pixel_diameter_rad, oversampling=oversampling, + mirror_radius_m, + hole_radius_m, + impact_parameter_m, + radius_rad, + phi_rad, + pixel_diameter_rad, + oversampling=oversampling, ) # Produce gaussian weight for each pixel given ring width - radial_dist = np.sqrt(dx**2 + dy**2) + radial_dist = np.sqrt(dx ** 2 + dy ** 2) # The weight is the integral of the ring's radial gaussian profile inside the # ring's width delta = pixel_diameter_rad / 2 - cdfs = norm.cdf([radial_dist + delta, radial_dist - delta], radius_rad, ring_width_rad) + cdfs = norm.cdf( + [radial_dist + delta, radial_dist - delta], radius_rad, ring_width_rad + ) gauss = cdfs[0] - cdfs[1] # interpolate profile to find prediction for each pixel @@ -240,7 +251,7 @@ def image_prediction_no_units( # get total number of photons per pixel # ^ would be per radian, but no need to put it here, would anyway cancel out below - pred *= alpha * (min_lambda_m**-1 - max_lambda_m**-1) + pred *= alpha * (min_lambda_m ** -1 - max_lambda_m ** -1) pred *= pixel_diameter_rad / radius_rad # multiply by angle (in radians) subtended by pixel width as seen from ring center @@ -285,9 +296,9 @@ def calc_likelihood(image, pred, spe_width, ped): """ - sq = 1 / np.sqrt(2 * np.pi * (ped**2 + pred * (1 + spe_width**2))) - diff = (image - pred)**2 - denom = 2 * (ped**2 + pred * (1 + spe_width**2)) + sq = 1 / np.sqrt(2 * np.pi * (ped ** 2 + pred * (1 + spe_width ** 2))) + diff = (image - pred) ** 2 + denom = 2 * (ped ** 2 + pred * (1 + spe_width ** 2)) expo = np.exp(-diff / denom) + 1e-16 # add small epsilon to avoid nans log_value = sq * expo @@ -307,14 +318,14 @@ def build_negative_log_likelihood( pedestal, hole_radius=0 * u.m, ): - '''Create an efficient negative log_likelihood function that does + """Create an efficient negative log_likelihood function that does not rely on astropy units internally by defining needed values as closures in this function - ''' + """ # get all the neeed values and transform them into appropriate units optics = telescope_description.optics - mirror_area = optics.mirror_area.to_value(u.m**2) + mirror_area = optics.mirror_area.to_value(u.m ** 2) mirror_radius = np.sqrt(mirror_area / np.pi) focal_length = optics.equivalent_focal_length @@ -342,7 +353,7 @@ def negative_log_likelihood( center_y, radius, ring_width, - optical_efficiency_muon + optical_efficiency_muon, ): """ Likelihood function to be called by minimizer @@ -394,9 +405,11 @@ def negative_log_likelihood( # scale prediction by optical efficiency of array prediction *= optical_efficiency_muon - sq = 1 / np.sqrt(2 * np.pi * (pedestal**2 + prediction * (1 + spe_width**2))) - diff = (image - prediction)**2 - denom = 2 * (pedestal**2 + prediction * (1 + spe_width**2)) + sq = 1 / np.sqrt( + 2 * np.pi * (pedestal ** 2 + prediction * (1 + spe_width ** 2)) + ) + diff = (image - prediction) ** 2 + denom = 2 * (pedestal ** 2 + prediction * (1 + spe_width ** 2)) expo = np.exp(-diff / denom) + 1e-16 # add small epsilon to avoid nans value = sq * expo @@ -410,19 +423,19 @@ def create_initial_guess(center_x, center_y, radius, telescope_description): optics = telescope_description.optics focal_length = optics.equivalent_focal_length.to_value(u.m) - pixel_area = geometry.pix_area[0].to_value(u.m**2) + pixel_area = geometry.pix_area[0].to_value(u.m ** 2) pixel_radius = np.sqrt(pixel_area / np.pi) / focal_length - mirror_radius = np.sqrt(optics.mirror_area.to_value(u.m**2) / np.pi) + mirror_radius = np.sqrt(optics.mirror_area.to_value(u.m ** 2) / np.pi) initial_guess = {} - initial_guess['impact_parameter'] = mirror_radius / 2 - initial_guess['phi'] = 0 - initial_guess['radius'] = radius.to_value(u.rad) - initial_guess['center_x'] = center_x.to_value(u.rad) - initial_guess['center_y'] = center_y.to_value(u.rad) - initial_guess['ring_width'] = 3 * pixel_radius - initial_guess['optical_efficiency_muon'] = 0.1 + initial_guess["impact_parameter"] = mirror_radius / 2 + initial_guess["phi"] = 0 + initial_guess["radius"] = radius.to_value(u.rad) + initial_guess["center_x"] = center_x.to_value(u.rad) + initial_guess["center_y"] = center_y.to_value(u.rad) + initial_guess["ring_width"] = 3 * pixel_radius + initial_guess["optical_efficiency_muon"] = 0.1 return initial_guess @@ -443,24 +456,18 @@ class MuonIntensityFitter(TelescopeComponent): hole_radius_m = FloatTelescopeParameter( help="Hole radius of the reflector in m", default_value=[ - ('type', 'LST_*', 0.308), - ('type', 'MST_*', 0.244), - ('type', 'SST_1M_*', 0.130), - ] + ("type", "LST_*", 0.308), + ("type", "MST_*", 0.244), + ("type", "SST_1M_*", 0.130), + ], ).tag(config=True) oversampling = IntTelescopeParameter( - help='Oversampling for the line integration', default_value=3 + help="Oversampling for the line integration", default_value=3 ).tag(config=True) def __call__( - self, - tel_id, - center_x, - center_y, - radius, - image, - pedestal, + self, tel_id, center_x, center_y, radius, image, pedestal, ): """ @@ -486,12 +493,13 @@ def __call__( telescope = self.subarray.tel[tel_id] if telescope.optics.num_mirrors != 1: raise NotImplementedError( - 'Currently only single mirror telescopes' - f' are supported in {self.__class__.__name__}' + "Currently only single mirror telescopes" + f" are supported in {self.__class__.__name__}" ) negative_log_likelihood = build_negative_log_likelihood( - image, telescope, + image, + telescope, oversampling=self.oversampling.tel[tel_id], min_lambda=self.min_lambda_m.tel[tel_id] * u.m, max_lambda=self.max_lambda_m.tel[tel_id] * u.m, @@ -500,24 +508,22 @@ def __call__( hole_radius=self.hole_radius_m.tel[tel_id] * u.m, ) - initial_guess = create_initial_guess( - center_x, center_y, radius, telescope, - ) + initial_guess = create_initial_guess(center_x, center_y, radius, telescope,) step_sizes = {} - step_sizes['error_impact_parameter'] = 0.5 - step_sizes['error_phi'] = np.deg2rad(0.5) - step_sizes['error_ring_width'] = 0.001 * radius.to_value(u.rad) - step_sizes['error_optical_efficiency_muon'] = 0.05 + step_sizes["error_impact_parameter"] = 0.5 + step_sizes["error_phi"] = np.deg2rad(0.5) + step_sizes["error_ring_width"] = 0.001 * radius.to_value(u.rad) + step_sizes["error_optical_efficiency_muon"] = 0.05 constraints = {} - constraints['limit_impact_parameter'] = (0, None) - constraints['limit_phi'] = (-np.pi, np.pi) - constraints['fix_radius'] = True - constraints['fix_center_x'] = True - constraints['fix_center_y'] = True - constraints['limit_ring_width'] = (0., None) - constraints['limit_optical_efficiency_muon'] = (0., None) + constraints["limit_impact_parameter"] = (0, None) + constraints["limit_phi"] = (-np.pi, np.pi) + constraints["fix_radius"] = True + constraints["fix_center_x"] = True + constraints["fix_center_y"] = True + constraints["limit_ring_width"] = (0.0, None) + constraints["limit_optical_efficiency_muon"] = (0.0, None) # Create Minuit object with first guesses at parameters # strip away the units as Minuit doesnt like them @@ -540,9 +546,9 @@ def __call__( result = minuit.values return MuonEfficiencyContainer( - impact=result['impact_parameter'] * u.m, - impact_x=result['impact_parameter'] * np.cos(result['phi']) * u.m, - impact_y=result['impact_parameter'] * np.sin(result['phi']) * u.m, - width=u.Quantity(np.rad2deg(result['ring_width']), u.deg), - optical_efficiency=result['optical_efficiency_muon'], + impact=result["impact_parameter"] * u.m, + impact_x=result["impact_parameter"] * np.cos(result["phi"]) * u.m, + impact_y=result["impact_parameter"] * np.sin(result["phi"]) * u.m, + width=u.Quantity(np.rad2deg(result["ring_width"]), u.deg), + optical_efficiency=result["optical_efficiency_muon"], ) diff --git a/ctapipe/image/muon/ring_fitter.py b/ctapipe/image/muon/ring_fitter.py index a8d9c6c0710..5483a8653b4 100644 --- a/ctapipe/image/muon/ring_fitter.py +++ b/ctapipe/image/muon/ring_fitter.py @@ -46,5 +46,5 @@ def __call__(self, x, y, img, mask): center_y=center_y, radius=radius, center_phi=np.arctan2(center_y, center_x), - center_distance=np.sqrt(center_x**2 + center_y**2), + center_distance=np.sqrt(center_x ** 2 + center_y ** 2), ) diff --git a/ctapipe/image/muon/tests/test_intensity_fit.py b/ctapipe/image/muon/tests/test_intensity_fit.py index e86812203b3..95b7fb01b82 100644 --- a/ctapipe/image/muon/tests/test_intensity_fit.py +++ b/ctapipe/image/muon/tests/test_intensity_fit.py @@ -22,13 +22,14 @@ def test_chord_length(): def test_muon_efficiency_fit(): from ctapipe.instrument import TelescopeDescription, SubarrayDescription from ctapipe.coordinates import TelescopeFrame, CameraFrame - from ctapipe.image.muon.intensity_fitter import image_prediction, MuonIntensityFitter - - telescope = TelescopeDescription.from_name('LST', 'LSTCam') - subarray = SubarrayDescription( - 'LSTMono', {0: [0, 0, 0] * u.m}, {0: telescope}, + from ctapipe.image.muon.intensity_fitter import ( + image_prediction, + MuonIntensityFitter, ) + telescope = TelescopeDescription.from_name("LST", "LSTCam") + subarray = SubarrayDescription("LSTMono", {0: [0, 0, 0] * u.m}, {0: telescope},) + center_x = 0.8 * u.deg center_y = 0.4 * u.deg radius = 1.2 * u.deg @@ -40,7 +41,13 @@ def test_muon_efficiency_fit(): focal_length = telescope.optics.equivalent_focal_length geom = telescope.camera.geometry mirror_radius = np.sqrt(telescope.optics.mirror_area / np.pi) - pixel_diameter = 2 * u.rad * (np.sqrt(geom.pix_area / np.pi) / focal_length).to_value(u.dimensionless_unscaled) + pixel_diameter = ( + 2 + * u.rad + * (np.sqrt(geom.pix_area / np.pi) / focal_length).to_value( + u.dimensionless_unscaled + ) + ) tel = CameraFrame( x=geom.pix_x, @@ -62,7 +69,7 @@ def test_muon_efficiency_fit(): ring_width=ring_width, pixel_x=x, pixel_y=y, - pixel_diameter=pixel_diameter[0] + pixel_diameter=pixel_diameter[0], ) fitter = MuonIntensityFitter(subarray=subarray) @@ -72,7 +79,7 @@ def test_muon_efficiency_fit(): center_y=center_y, radius=radius, image=image * efficiency, - pedestal=np.full_like(image, 1.1) + pedestal=np.full_like(image, 1.1), ) assert u.isclose(result.impact, impact_parameter, rtol=0.05) @@ -84,10 +91,8 @@ def test_scts(): from ctapipe.instrument import TelescopeDescription, SubarrayDescription from ctapipe.image.muon.intensity_fitter import MuonIntensityFitter - telescope = TelescopeDescription.from_name('SST-ASTRI', 'CHEC') - subarray = SubarrayDescription( - 'ssts', {0: [0, 0, 0] * u.m}, {0: telescope}, - ) + telescope = TelescopeDescription.from_name("SST-ASTRI", "CHEC") + subarray = SubarrayDescription("ssts", {0: [0, 0, 0] * u.m}, {0: telescope},) fitter = MuonIntensityFitter(subarray=subarray) with pytest.raises(NotImplementedError): @@ -97,10 +102,10 @@ def test_scts(): center_y=2 * u.deg, radius=1.3 * u.deg, image=np.zeros(telescope.camera.geometry.n_pixels), - pedestal=np.zeros(telescope.camera.geometry.n_pixels) + pedestal=np.zeros(telescope.camera.geometry.n_pixels), ) -if __name__ == '__main__': +if __name__ == "__main__": # test_chord_length() test_muon_efficiency_fit() diff --git a/ctapipe/image/muon/tests/test_muon_features.py b/ctapipe/image/muon/tests/test_muon_features.py index c4cc4ecb346..f8557b24bde 100644 --- a/ctapipe/image/muon/tests/test_muon_features.py +++ b/ctapipe/image/muon/tests/test_muon_features.py @@ -13,24 +13,21 @@ def test_ring_containment(): ring_center_x = 0 * u.deg ring_center_y = 0 * u.deg containment = ring_containment( - ring_radius, ring_center_x, ring_center_y, - cam_radius, + ring_radius, ring_center_x, ring_center_y, cam_radius, ) assert containment == 1.0 ring_center_x = 0 * u.deg ring_center_y = cam_radius containment = ring_containment( - ring_radius, ring_center_x, ring_center_y, - cam_radius, + ring_radius, ring_center_x, ring_center_y, cam_radius, ) assert 0.4 <= containment <= 0.5 ring_center_x = 0 * u.deg ring_center_y = cam_radius + 1.1 * ring_radius containment = ring_containment( - ring_radius, ring_center_x, ring_center_y, - cam_radius, + ring_radius, ring_center_x, ring_center_y, cam_radius, ) assert containment == 0.0 @@ -41,15 +38,14 @@ def test_ring_completeness(): x = np.cos(angle_ring) * u.m y = np.sin(angle_ring) * u.m pe = np.random.uniform(0, 100, len(x)) - ring_radius = 1. * u.m + ring_radius = 1.0 * u.m - ring_center_x = 0. * u.m - ring_center_y = 0. * u.m + ring_center_x = 0.0 * u.m + ring_center_y = 0.0 * u.m ring_comp = ring_completeness( - x, y, pe, ring_radius, - ring_center_x, ring_center_y, - 30, 30) + x, y, pe, ring_radius, ring_center_x, ring_center_y, 30, 30 + ) assert ring_comp <= 1 assert ring_comp >= 0 diff --git a/ctapipe/image/pixel_likelihood.py b/ctapipe/image/pixel_likelihood.py index ed392f99898..3a1cd76b8dc 100644 --- a/ctapipe/image/pixel_likelihood.py +++ b/ctapipe/image/pixel_likelihood.py @@ -37,9 +37,13 @@ from scipy.special import factorial __all__ = [ - 'poisson_likelihood_gaussian', 'poisson_likelihood_full', - 'poisson_likelihood', 'mean_poisson_likelihood_gaussian', - 'mean_poisson_likelihood_full', 'PixelLikelihoodError', 'chi_squared' + "poisson_likelihood_gaussian", + "poisson_likelihood_full", + "poisson_likelihood", + "mean_poisson_likelihood_gaussian", + "mean_poisson_likelihood_full", + "PixelLikelihoodError", + "chi_squared", ] @@ -72,13 +76,10 @@ def poisson_likelihood_gaussian(image, prediction, spe_width, ped): spe_width = np.asarray(spe_width) ped = np.asarray(ped) - sq = 1. / np.sqrt( - 2 * np.pi - * (ped**2 + prediction * (1 + spe_width**2)) - ) + sq = 1.0 / np.sqrt(2 * np.pi * (ped ** 2 + prediction * (1 + spe_width ** 2))) - diff = (image - prediction)**2 - denom = 2 * (ped**2 + prediction * (1 + spe_width**2)) + diff = (image - prediction) ** 2 + denom = 2 * (ped ** 2 + prediction * (1 + spe_width ** 2)) expo = np.asarray(np.exp(-1 * diff / denom)) # If we are outside of the range of datatype, fix to lower bound @@ -88,8 +89,9 @@ def poisson_likelihood_gaussian(image, prediction, spe_width, ped): return -2 * np.log(sq * expo) -def poisson_likelihood_full(image, prediction, spe_width, ped, - width_fac=3, dtype=np.float32): +def poisson_likelihood_full( + image, prediction, spe_width, ped, width_fac=3, dtype=np.float32 +): """ Calculate likelihood of prediction given the measured signal, full numerical integration from de Naurois et al 2009. @@ -126,9 +128,11 @@ def poisson_likelihood_full(image, prediction, spe_width, ped, if image.shape != prediction.shape: raise PixelLikelihoodError( - ("Image and prediction arrays" - " have different dimensions"), "Image shape: ", image.shape[0], - "Prediction shape: ", prediction.shape[0] + ("Image and prediction arrays" " have different dimensions"), + "Image shape: ", + image.shape[0], + "Prediction shape: ", + prediction.shape[0], ) max_val = np.max(image) width = ped * ped + max_val * spe_width * spe_width @@ -141,15 +145,9 @@ def poisson_likelihood_full(image, prediction, spe_width, ped, pe_summed = np.arange(max_sum) # Need to decide how range is determined pe_factorial = factorial(pe_summed) - first_term = ( - prediction**pe_summed[:, np.newaxis] - * np.exp(-1 * prediction) - ) - first_term /= ( - pe_factorial[:, np.newaxis] * np.sqrt( - math.pi * 2 - * (ped * ped + pe_summed[:, np.newaxis] * spe_width * spe_width) - ) + first_term = prediction ** pe_summed[:, np.newaxis] * np.exp(-1 * prediction) + first_term /= pe_factorial[:, np.newaxis] * np.sqrt( + math.pi * 2 * (ped * ped + pe_summed[:, np.newaxis] * spe_width * spe_width) ) # Throw error if we get NaN in likelihood @@ -164,9 +162,8 @@ def poisson_likelihood_full(image, prediction, spe_width, ped, ) # Should not have any porblems here with NaN that have not bee seens - second_term = ( - (image - pe_summed[:, np.newaxis]) * - (image - pe_summed[:, np.newaxis]) + second_term = (image - pe_summed[:, np.newaxis]) * ( + image - pe_summed[:, np.newaxis] ) second_term_denom = 2 * ( ped * ped + spe_width * spe_width * pe_summed[:, np.newaxis] @@ -191,7 +188,7 @@ def poisson_likelihood( ped, pedestal_safety=1.5, width_fac=3, - dtype=np.float32 + dtype=np.float32, ): """ Safe implementation of the poissonian likelihood implementation, @@ -244,8 +241,12 @@ def poisson_likelihood( if np.any(poisson_pix): like[poisson_pix] = poisson_likelihood_full( - image[poisson_pix], prediction[poisson_pix], spe_width, ped, - width_fac, dtype + image[poisson_pix], + prediction[poisson_pix], + spe_width, + ped, + width_fac, + dtype, ) if np.any(gaus_pix): like[gaus_pix] = poisson_likelihood_gaussian( @@ -327,15 +328,13 @@ def mean_poisson_likelihood_full(prediction, spe_width, ped): width = np.sqrt(width) for p in range(len(prediction)): - int_range = ( - prediction[p] - 10 * width[p], prediction[p] + 10 * width[p] - ) + int_range = (prediction[p] - 10 * width[p], prediction[p] + 10 * width[p]) mean_like[p] = quad( _integral_poisson_likelihood_full, int_range[0], int_range[1], args=(prediction[p], spe_width[p], ped[p]), - epsrel=0.05 + epsrel=0.05, )[0] return mean_like @@ -367,12 +366,11 @@ def chi_squared(image, prediction, ped, error_factor=2.9): if image.shape is not prediction.shape: PixelLikelihoodError( "Image and prediction arrays have different dimensions Image " - "shape: {} Prediction shape: {}" - .format(image.shape, prediction.shape) + "shape: {} Prediction shape: {}".format(image.shape, prediction.shape) ) chi_square = (image - prediction) * (image - prediction) chi_square /= ped + 0.5 * (image - prediction) - chi_square *= 1. / error_factor + chi_square *= 1.0 / error_factor return chi_square diff --git a/ctapipe/image/reducer.py b/ctapipe/image/reducer.py index c0ec9051524..5a8c187cb9f 100644 --- a/ctapipe/image/reducer.py +++ b/ctapipe/image/reducer.py @@ -10,9 +10,9 @@ from ctapipe.image.cleaning import dilate __all__ = [ - 'DataVolumeReducer', - 'NullDataVolumeReducer', - 'TailCutsDataVolumeReducer', + "DataVolumeReducer", + "NullDataVolumeReducer", + "TailCutsDataVolumeReducer", ] @@ -22,12 +22,7 @@ class DataVolumeReducer(TelescopeComponent): """ def __init__( - self, - config=None, - parent=None, - subarray=None, - image_extractor=None, - **kwargs + self, config=None, parent=None, subarray=None, image_extractor=None, **kwargs ): """ Parameters @@ -126,14 +121,14 @@ class TailCutsDataVolumeReducer(DataVolumeReducer): with ctapipe module dilate until no new pixels were added. 3) Adding new pixels with dilate to get more conservative. """ + n_end_dilates = IntTelescopeParameter( - default_value=1, - help="Number of how many times to dilate at the end." + default_value=1, help="Number of how many times to dilate at the end." ).tag(config=True) do_boundary_dilation = BoolTelescopeParameter( default_value=True, help="If set to 'False', the iteration steps in 2) are skipped and" - "normal TailcutCleaning is used." + "normal TailcutCleaning is used.", ).tag(config=True) def select_pixels(self, waveforms, telid=None, selected_gain_channel=None): diff --git a/ctapipe/image/statistics.py b/ctapipe/image/statistics.py index 59a080eec50..e2fd7b94398 100644 --- a/ctapipe/image/statistics.py +++ b/ctapipe/image/statistics.py @@ -3,7 +3,9 @@ from ..containers import StatisticsContainer -def descriptive_statistics(values, container_class=StatisticsContainer) -> StatisticsContainer: +def descriptive_statistics( + values, container_class=StatisticsContainer +) -> StatisticsContainer: """ compute intensity statistics of an image """ return container_class( max=values.max(), diff --git a/ctapipe/image/tests/test_concentration.py b/ctapipe/image/tests/test_concentration.py index 2dcc18f45bd..d0f0dc398aa 100644 --- a/ctapipe/image/tests/test_concentration.py +++ b/ctapipe/image/tests/test_concentration.py @@ -6,7 +6,7 @@ def test_concentration(): - geom, image, clean_mask = create_sample_image('30d') + geom, image, clean_mask = create_sample_image("30d") hillas = hillas_parameters(geom[clean_mask], image[clean_mask]) @@ -19,7 +19,7 @@ def test_concentration(): @pytest.mark.filterwarnings("error") def test_width_0(): - geom, image, clean_mask = create_sample_image('30d') + geom, image, clean_mask = create_sample_image("30d") hillas = hillas_parameters(geom[clean_mask], image[clean_mask]) hillas.width = 0 * u.m @@ -28,5 +28,5 @@ def test_width_0(): assert conc.core == 0 -if __name__ == '__main__': +if __name__ == "__main__": test_concentration() diff --git a/ctapipe/image/tests/test_extractor.py b/ctapipe/image/tests/test_extractor.py index 1ad611c5f80..932ddfc98ce 100644 --- a/ctapipe/image/tests/test_extractor.py +++ b/ctapipe/image/tests/test_extractor.py @@ -275,9 +275,14 @@ def test_two_pass_window_sum(subarray): max_charges = [10, 100, 1000] for minCharge, maxCharge in zip(min_charges, max_charges): toymodel = get_test_toymodel(subarray, minCharge, maxCharge) - waveforms, subarray, telid, selected_gain_channel, true_charge, true_time = ( - toymodel - ) + ( + waveforms, + subarray, + telid, + selected_gain_channel, + true_charge, + true_time, + ) = toymodel charge, pulse_time = extractor(waveforms, telid, selected_gain_channel) assert_allclose(charge, true_charge, rtol=0.07) assert_allclose(pulse_time, true_time, rtol=0.07) @@ -334,14 +339,14 @@ def test_extractor_tel_param(toymodel): assert extractor.window_width.tel[2] == n_samples // 2 -@pytest.mark.parametrize('Extractor', non_abstract_children(ImageExtractor)) +@pytest.mark.parametrize("Extractor", non_abstract_children(ImageExtractor)) def test_dtype(Extractor, subarray): tel_id = 1 n_pixels = subarray.tel[tel_id].camera.geometry.n_pixels selected_gain_channel = np.zeros(n_pixels, dtype=int) - waveforms = np.ones((n_pixels, 50), dtype='float64') + waveforms = np.ones((n_pixels, 50), dtype="float64") extractor = Extractor(subarray=subarray) charge, peak_time = extractor(waveforms, tel_id, selected_gain_channel) assert charge.dtype == np.float32 diff --git a/ctapipe/image/tests/test_hillas.py b/ctapipe/image/tests/test_hillas.py index 018ad7b4428..e8597f0b75f 100644 --- a/ctapipe/image/tests/test_hillas.py +++ b/ctapipe/image/tests/test_hillas.py @@ -13,26 +13,22 @@ def create_sample_image( - psi='-30d', - x=0.2 * u.m, - y=0.3 * u.m, - width=0.05 * u.m, - length=0.15 * u.m, - intensity=1500 + psi="-30d", + x=0.2 * u.m, + y=0.3 * u.m, + width=0.05 * u.m, + length=0.15 * u.m, + intensity=1500, ): seed(10) - geom = CameraGeometry.from_name('LSTCam') + geom = CameraGeometry.from_name("LSTCam") # make a toymodel shower model model = toymodel.Gaussian(x=x, y=y, width=width, length=length, psi=psi) # generate toymodel image in camera for this shower model. - image, _, _ = model.generate_image( - geom, - intensity=1500, - nsb_level_pe=3, - ) + image, _, _ = model.generate_image(geom, intensity=1500, nsb_level_pe=3,) # calculate pixels likely containing signal clean_mask = tailcuts_clean(geom, image, 10, 5) @@ -40,7 +36,7 @@ def create_sample_image( return geom, image, clean_mask -def create_sample_image_zeros(psi='-30d'): +def create_sample_image_zeros(psi="-30d"): geom, image, clean_mask = create_sample_image(psi) @@ -50,7 +46,7 @@ def create_sample_image_zeros(psi='-30d'): return geom, image -def create_sample_image_selected_pixel(psi='-30d'): +def create_sample_image_selected_pixel(psi="-30d"): geom, image, clean_mask = create_sample_image(psi) return geom[clean_mask], image[clean_mask] @@ -85,7 +81,7 @@ def test_hillas_selected(): def test_hillas_failure(): - geom, image = create_sample_image_zeros(psi='0d') + geom, image = create_sample_image_zeros(psi="0d") blank_image = zeros_like(image) with pytest.raises(HillasParameterizationError): @@ -93,7 +89,7 @@ def test_hillas_failure(): def test_hillas_masked_array(): - geom, image, clean_mask = create_sample_image(psi='0d') + geom, image, clean_mask = create_sample_image(psi="0d") image_zeros = image.copy() image_zeros[~clean_mask] = 0 @@ -106,7 +102,7 @@ def test_hillas_masked_array(): def test_hillas_container(): - geom, image = create_sample_image_zeros(psi='0d') + geom, image = create_sample_image_zeros(psi="0d") params = hillas_parameters(geom, image) assert isinstance(params, HillasParametersContainer) @@ -115,7 +111,7 @@ def test_hillas_container(): def test_with_toy(): np.random.seed(42) - geom = CameraGeometry.from_name('LSTCam') + geom = CameraGeometry.from_name("LSTCam") width = 0.03 * u.m length = 0.15 * u.m @@ -123,17 +119,13 @@ def test_with_toy(): xs = u.Quantity([0.5, 0.5, -0.5, -0.5], u.m) ys = u.Quantity([0.5, -0.5, 0.5, -0.5], u.m) - psis = Angle([-90, -45, 0, 45, 90], unit='deg') + psis = Angle([-90, -45, 0, 45, 90], unit="deg") for x, y in zip(xs, ys): for psi in psis: # make a toymodel shower model - model = toymodel.Gaussian( - x=x, y=y, - width=width, length=length, - psi=psi, - ) + model = toymodel.Gaussian(x=x, y=y, width=width, length=length, psi=psi,) image, signal, noise = model.generate_image( geom, intensity=intensity, nsb_level_pe=5, @@ -146,10 +138,9 @@ def test_with_toy(): assert u.isclose(result.width, width, rtol=0.1) assert u.isclose(result.length, length, rtol=0.1) - assert ( - (result.psi.to_value(u.deg) == approx(psi.deg, abs=2)) - or abs(result.psi.to_value(u.deg) - psi.deg) == approx(180.0, abs=2) - ) + assert (result.psi.to_value(u.deg) == approx(psi.deg, abs=2)) or abs( + result.psi.to_value(u.deg) - psi.deg + ) == approx(180.0, abs=2) assert signal.sum() == result.intensity @@ -157,7 +148,7 @@ def test_with_toy(): def test_skewness(): np.random.seed(42) - geom = CameraGeometry.from_name('LSTCam') + geom = CameraGeometry.from_name("LSTCam") width = 0.03 * u.m length = 0.15 * u.m @@ -165,22 +156,16 @@ def test_skewness(): xs = u.Quantity([0.5, 0.5, -0.5, -0.5], u.m) ys = u.Quantity([0.5, -0.5, 0.5, -0.5], u.m) - psis = Angle([-90, -45, 0, 45, 90], unit='deg') + psis = Angle([-90, -45, 0, 45, 90], unit="deg") skews = [0, 0.3, 0.6] for x, y, psi, skew in itertools.product(xs, ys, psis, skews): # make a toymodel shower model model = toymodel.SkewedGaussian( - x=x, y=y, - width=width, - length=length, - psi=psi, - skewness=skew, + x=x, y=y, width=width, length=length, psi=psi, skewness=skew, ) - _, signal, _ = model.generate_image( - geom, intensity=intensity, nsb_level_pe=5, - ) + _, signal, _ = model.generate_image(geom, intensity=intensity, nsb_level_pe=5,) result = hillas_parameters(geom, signal) @@ -206,7 +191,7 @@ def test_skewness(): @pytest.mark.filterwarnings("error") def test_straight_line_width_0(): - ''' Test that hillas_parameters.width is 0 for a straight line of pixels ''' + """ Test that hillas_parameters.width is 0 for a straight line of pixels """ # three pixels in a straight line long = np.array([0, 1, 2]) * 0.01 trans = np.zeros(len(long)) @@ -221,12 +206,12 @@ def test_straight_line_width_0(): y = dy - np.sin(psi) * long + np.cos(psi) * trans geom = CameraGeometry( - camera_name='testcam', + camera_name="testcam", pix_id=pix_id, pix_x=x * u.m, pix_y=y * u.m, - pix_type='hexagonal', - pix_area=1 * u.m**2, + pix_type="hexagonal", + pix_area=1 * u.m ** 2, ) img = np.random.poisson(5, size=len(long)) @@ -240,12 +225,12 @@ def test_single_pixel(): x, y = np.meshgrid(x, y) geom = CameraGeometry( - camera_name='testcam', + camera_name="testcam", pix_id=np.arange(9), pix_x=x.ravel() * u.cm, pix_y=y.ravel() * u.cm, - pix_type='rectangular', - pix_area=1 * u.cm**2, + pix_type="rectangular", + pix_area=1 * u.cm ** 2, ) image = np.zeros((3, 3)) diff --git a/ctapipe/image/tests/test_leakage.py b/ctapipe/image/tests/test_leakage.py index a9efa6f85a3..f388b5403d1 100644 --- a/ctapipe/image/tests/test_leakage.py +++ b/ctapipe/image/tests/test_leakage.py @@ -5,7 +5,7 @@ def test_leakage(): from ctapipe.image.leakage import leakage - geom = CameraGeometry.from_name('LSTCam') + geom = CameraGeometry.from_name("LSTCam") img = np.ones(geom.n_pixels) mask = np.ones(len(geom), dtype=bool) diff --git a/ctapipe/image/tests/test_morphology.py b/ctapipe/image/tests/test_morphology.py index 870755d3cad..b7d68e6d946 100644 --- a/ctapipe/image/tests/test_morphology.py +++ b/ctapipe/image/tests/test_morphology.py @@ -5,6 +5,7 @@ def test_number_of_islands(): from ctapipe.image import number_of_islands + # test with LST geometry (1855 pixels) geom = CameraGeometry.from_name("LSTCam") diff --git a/ctapipe/image/tests/test_pixel_likelihood.py b/ctapipe/image/tests/test_pixel_likelihood.py index 85c39b10b50..3ae6aa401ee 100644 --- a/ctapipe/image/tests/test_pixel_likelihood.py +++ b/ctapipe/image/tests/test_pixel_likelihood.py @@ -14,8 +14,9 @@ def test_full_likelihood(): image_small = [0, 1, 2] expectation_small = [1, 1, 1] - full_like_small = poisson_likelihood_full(image_small, expectation_small, - spe, pedestal) + full_like_small = poisson_likelihood_full( + image_small, expectation_small, spe, pedestal + ) exp_diff = full_like_small - np.asarray([2.75630505, 2.62168656, 3.39248449]) exp_diff = np.sum(np.abs(exp_diff)) # Check against known values @@ -24,20 +25,19 @@ def test_full_likelihood(): image_large = [40, 50, 60] expectation_large = [50, 50, 50] - full_like_large = poisson_likelihood_full(image_large, expectation_large, - spe, pedestal) + full_like_large = poisson_likelihood_full( + image_large, expectation_large, spe, pedestal + ) # Check against known values exp_diff = full_like_large - np.asarray([7.45489137, 5.99305388, 7.66226007]) exp_diff = np.sum(np.abs(exp_diff)) assert exp_diff / np.sum(full_like_large) < 1e-4 - gaus_like_large = poisson_likelihood_gaussian(image_large, - expectation_large, - spe, - pedestal) + gaus_like_large = poisson_likelihood_gaussian( + image_large, expectation_large, spe, pedestal + ) # Check thats in large signal case the full expectation is equal to the # gaussian approximation (to 5%) - assert np.all(np.abs((full_like_large - gaus_like_large) / full_like_large) - < 0.05) + assert np.all(np.abs((full_like_large - gaus_like_large) / full_like_large) < 0.05) diff --git a/ctapipe/image/tests/test_reducer.py b/ctapipe/image/tests/test_reducer.py index 134b88dab0e..015ed649851 100644 --- a/ctapipe/image/tests/test_reducer.py +++ b/ctapipe/image/tests/test_reducer.py @@ -1,29 +1,21 @@ - import pytest import numpy as np from numpy.testing import assert_array_equal import astropy.units as u from traitlets.config import Config from ctapipe.instrument import SubarrayDescription, TelescopeDescription -from ctapipe.image.reducer import ( - NullDataVolumeReducer, - TailCutsDataVolumeReducer -) +from ctapipe.image.reducer import NullDataVolumeReducer, TailCutsDataVolumeReducer -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def subarray_lst(): telid = 1 subarray = SubarrayDescription( "test array lst", tel_positions={1: np.zeros(3) * u.m, 2: np.ones(3) * u.m}, tel_descriptions={ - 1: TelescopeDescription.from_name( - optics_name="LST", camera_name="LSTCam" - ), - 2: TelescopeDescription.from_name( - optics_name="LST", camera_name="LSTCam" - ), + 1: TelescopeDescription.from_name(optics_name="LST", camera_name="LSTCam"), + 2: TelescopeDescription.from_name(optics_name="LST", camera_name="LSTCam"), }, ) @@ -58,8 +50,7 @@ def test_tailcuts_data_volume_reducer(subarray_lst): waveforms_signal[[10, 8, 6, 5]] = 50 # pixels from dilate at the end in Step 3) - waveforms_signal[[0, 1, 4, 7, 11, 13, 121, 122, - 136, 137, 257, 258, 267, 272]] = 25 + waveforms_signal[[0, 1, 4, 7, 11, 13, 121, 122, 136, 137, 257, 258, 267, 272]] = 25 expected_waveforms = waveforms_signal.copy() @@ -74,16 +65,14 @@ def test_tailcuts_data_volume_reducer(subarray_lst): "picture_threshold_pe": 700.0, "boundary_threshold_pe": 350.0, "min_picture_neighbors": 0, - "keep_isolated_pixels": True + "keep_isolated_pixels": True, }, "n_end_dilates": 1, - "do_boundary_dilation": True + "do_boundary_dilation": True, } } ) - reducer = TailCutsDataVolumeReducer( - config=reduction_param, subarray=subarray - ) + reducer = TailCutsDataVolumeReducer(config=reduction_param, subarray=subarray) reduced_waveforms = waveforms_signal.copy() reduced_waveforms_mask = reducer( waveforms_signal, telid=telid, selected_gain_channel=selected_gain_channel diff --git a/ctapipe/image/tests/test_timing_parameters.py b/ctapipe/image/tests/test_timing_parameters.py index d4d22b4eb52..268a80666f7 100644 --- a/ctapipe/image/tests/test_timing_parameters.py +++ b/ctapipe/image/tests/test_timing_parameters.py @@ -7,6 +7,7 @@ def test_psi_0(): from ctapipe.image import timing_parameters + """ Simple test that gradient fitting gives expected answers for perfect gradient @@ -27,7 +28,7 @@ def test_psi_0(): image=np.ones(geom.n_pixels), peak_time=peak_time, hillas_parameters=hillas, - cleaning_mask=np.ones(geom.n_pixels, dtype=bool) + cleaning_mask=np.ones(geom.n_pixels, dtype=bool), ) # Test we get the values we put in back out again @@ -49,8 +50,9 @@ def test_psi_20(): hillas = HillasParametersContainer(x=0 * u.m, y=0 * u.m, psi=psi) random = np.random.RandomState(1) - peak_time = intercept + grad * (np.cos(psi) * geom.pix_x.value - + np.sin(psi) * geom.pix_y.value) + peak_time = intercept + grad * ( + np.cos(psi) * geom.pix_x.value + np.sin(psi) * geom.pix_y.value + ) peak_time += random.normal(0, deviation, geom.n_pixels) timing = timing_parameters( @@ -58,7 +60,7 @@ def test_psi_20(): image=np.ones(geom.n_pixels), peak_time=peak_time, hillas_parameters=hillas, - cleaning_mask=np.ones(geom.n_pixels, dtype=bool) + cleaning_mask=np.ones(geom.n_pixels, dtype=bool), ) # Test we get the values we put in back out again @@ -69,6 +71,7 @@ def test_psi_20(): def test_ignore_negative(): from ctapipe.image import timing_parameters + grad = 2.0 intercept = 1.0 deviation = 0.1 diff --git a/ctapipe/image/tests/test_toy.py b/ctapipe/image/tests/test_toy.py index eae83f604c2..4d2171507ff 100644 --- a/ctapipe/image/tests/test_toy.py +++ b/ctapipe/image/tests/test_toy.py @@ -21,9 +21,7 @@ def test_intensity(): # make a toymodel shower model model = Gaussian(x=x, y=y, width=width, length=length, psi=psi) - _, signal, _ = model.generate_image( - geom, intensity=intensity, nsb_level_pe=5, - ) + _, signal, _ = model.generate_image(geom, intensity=intensity, nsb_level_pe=5,) # test if signal reproduces given cog values assert np.average(geom.pix_x.to_value(u.m), weights=signal) == approx(0.2, rel=0.15) @@ -58,7 +56,9 @@ def test_skewed(): model = SkewedGaussian( x=x, y=y, width=width, length=length, psi=psi, skewness=skewness ) - model.generate_image(geom, intensity=intensity, nsb_level_pe=5,) + model.generate_image( + geom, intensity=intensity, nsb_level_pe=5, + ) a, loc, scale = model._moments_to_parameters() mean, var, skew = skewnorm(a=a, loc=loc, scale=scale).stats(moments="mvs") diff --git a/ctapipe/image/timing.py b/ctapipe/image/timing.py index 9596696be51..6489b8dc0c1 100644 --- a/ctapipe/image/timing.py +++ b/ctapipe/image/timing.py @@ -8,9 +8,7 @@ from .hillas import camera_to_shower_coordinates -__all__ = [ - 'timing_parameters' -] +__all__ = ["timing_parameters"] def timing_parameters(geom, image, peak_time, hillas_parameters, cleaning_mask=None): @@ -50,20 +48,14 @@ def timing_parameters(geom, image, peak_time, hillas_parameters, cleaning_mask=N pix_y = geom.pix_y longi, trans = camera_to_shower_coordinates( - pix_x, - pix_y, - hillas_parameters.x, - hillas_parameters.y, - hillas_parameters.psi + pix_x, pix_y, hillas_parameters.x, hillas_parameters.y, hillas_parameters.psi ) (slope, intercept), cov = np.polyfit( longi.value, peak_time, deg=1, w=np.sqrt(image), cov=True, ) slope_err, intercept_err = np.sqrt(np.diag(cov)) predicted_time = polyval(longi.value, (intercept, slope)) - deviation = np.sqrt( - np.sum((peak_time - predicted_time)**2) / peak_time.size - ) + deviation = np.sqrt(np.sum((peak_time - predicted_time) ** 2) / peak_time.size) return TimingParametersContainer( slope=slope / unit, diff --git a/ctapipe/image/toymodel.py b/ctapipe/image/toymodel.py index 8fa9c05561d..9c391523cba 100644 --- a/ctapipe/image/toymodel.py +++ b/ctapipe/image/toymodel.py @@ -29,7 +29,7 @@ "Gaussian", "SkewedGaussian", "ImageModel", - "obtain_time_image" + "obtain_time_image", ] @@ -148,7 +148,8 @@ def get_waveform(self, charge, time, n_samples): sampled = ( convolved.reshape( (n_pixels, convolved.shape[-1] // self.upsampling, self.upsampling) - ).sum(-1) * self.ref_width_ns # Waveform units: p.e. + ).sum(-1) + * self.ref_width_ns # Waveform units: p.e. ) return sampled diff --git a/ctapipe/instrument/__init__.py b/ctapipe/instrument/__init__.py index d31f6eae8ca..a8a11a9202c 100644 --- a/ctapipe/instrument/__init__.py +++ b/ctapipe/instrument/__init__.py @@ -7,12 +7,12 @@ __all__ = [ - 'CameraDescription', - 'CameraGeometry', - 'CameraReadout', - 'get_atmosphere_profile_functions', - 'TelescopeDescription', - 'OpticsDescription', - 'SubarrayDescription', - 'guess_telescope', + "CameraDescription", + "CameraGeometry", + "CameraReadout", + "get_atmosphere_profile_functions", + "TelescopeDescription", + "OpticsDescription", + "SubarrayDescription", + "guess_telescope", ] diff --git a/ctapipe/instrument/atmosphere.py b/ctapipe/instrument/atmosphere.py index d1c893e8f41..e0f85c6002e 100644 --- a/ctapipe/instrument/atmosphere.py +++ b/ctapipe/instrument/atmosphere.py @@ -7,10 +7,10 @@ from ctapipe.utils import get_table_dataset -__all__ = ['get_atmosphere_profile_table', 'get_atmosphere_profile_functions'] +__all__ = ["get_atmosphere_profile_table", "get_atmosphere_profile_functions"] -def get_atmosphere_profile_table(atmosphere_name='paranal'): +def get_atmosphere_profile_table(atmosphere_name="paranal"): """ Get an atmosphere profile table @@ -25,14 +25,12 @@ def get_atmosphere_profile_table(atmosphere_name='paranal'): 'altitude' (m), and 'thickness' (g cm-2) as well as others. """ - table_name = f'{atmosphere_name}.atmprof' - table = get_table_dataset(table_name=table_name, - role='dl0.arr.svc.atmosphere') + table_name = f"{atmosphere_name}.atmprof" + table = get_table_dataset(table_name=table_name, role="dl0.arr.svc.atmosphere") return table -def get_atmosphere_profile_functions(atmosphere_name="paranal", - with_units=True): +def get_atmosphere_profile_functions(atmosphere_name="paranal", with_units=True): """ Gives atmospheric profile as a continuous function thickness( altitude), and it's inverse altitude(thickness) in m and g/cm^2 @@ -51,18 +49,19 @@ def get_atmosphere_profile_functions(atmosphere_name="paranal", functions: thickness(alt), alt(thickness) """ tab = get_atmosphere_profile_table(atmosphere_name) - alt = tab['altitude'].to('m') - thick = (tab['thickness']).to("g cm-2") + alt = tab["altitude"].to("m") + thick = (tab["thickness"]).to("g cm-2") alt_to_thickness = interp1d(x=np.array(alt), y=np.array(thick)) thickness_to_alt = interp1d(x=np.array(thick), y=np.array(alt)) if with_units: + def thickness(a): - return Quantity(alt_to_thickness(a.to('m')), 'g cm-2') + return Quantity(alt_to_thickness(a.to("m")), "g cm-2") def altitude(a): - return Quantity(thickness_to_alt(a.to('g cm-2')), 'm') + return Quantity(thickness_to_alt(a.to("g cm-2")), "m") return thickness, altitude diff --git a/ctapipe/instrument/camera/__init__.py b/ctapipe/instrument/camera/__init__.py index e708a2ffc40..6001716dab6 100644 --- a/ctapipe/instrument/camera/__init__.py +++ b/ctapipe/instrument/camera/__init__.py @@ -6,5 +6,5 @@ "CameraDescription", "CameraGeometry", "UnknownPixelShapeWarning", - "CameraReadout" + "CameraReadout", ] diff --git a/ctapipe/instrument/camera/description.py b/ctapipe/instrument/camera/description.py index a8c7f55864e..1ecf6f747c5 100644 --- a/ctapipe/instrument/camera/description.py +++ b/ctapipe/instrument/camera/description.py @@ -6,9 +6,7 @@ from .readout import CameraReadout from ctapipe.utils import find_all_matching_datasets -__all__ = [ - "CameraDescription" -] +__all__ = ["CameraDescription"] class CameraDescription: @@ -52,7 +50,7 @@ def get_known_camera_names(cls): list(str) """ - pattern = r'(.*)\.camgeom\.fits(\.gz)?' + pattern = r"(.*)\.camgeom\.fits(\.gz)?" return find_all_matching_datasets(pattern, regexp_group=1) @classmethod diff --git a/ctapipe/instrument/camera/geometry.py b/ctapipe/instrument/camera/geometry.py index d94d9c8df5a..98b8b759ab5 100644 --- a/ctapipe/instrument/camera/geometry.py +++ b/ctapipe/instrument/camera/geometry.py @@ -18,10 +18,7 @@ from ctapipe.coordinates import CameraFrame -__all__ = [ - "CameraGeometry", - "UnknownPixelShapeWarning" -] +__all__ = ["CameraGeometry", "UnknownPixelShapeWarning"] logger = logging.getLogger(__name__) @@ -70,14 +67,27 @@ class CameraGeometry: _geometry_cache = {} # dictionary CameraGeometry instances for speed - def __init__(self, camera_name, pix_id, pix_x, pix_y, pix_area, pix_type, - pix_rotation="0d", cam_rotation="0d", - neighbors=None, apply_derotation=True, frame=None): + def __init__( + self, + camera_name, + pix_id, + pix_x, + pix_y, + pix_area, + pix_type, + pix_rotation="0d", + cam_rotation="0d", + neighbors=None, + apply_derotation=True, + frame=None, + ): if pix_x.ndim != 1 or pix_y.ndim != 1: - raise ValueError(f'Pixel coordinates must be 1 dimensional, got {pix_x.ndim}') + raise ValueError( + f"Pixel coordinates must be 1 dimensional, got {pix_x.ndim}" + ) - assert len(pix_x) == len(pix_y), 'pix_x and pix_y must have same length' + assert len(pix_x) == len(pix_y), "pix_x and pix_y must have same length" self.n_pixels = len(pix_x) self.camera_name = camera_name self.pix_id = pix_id @@ -100,9 +110,7 @@ def __init__(self, camera_name, pix_id, pix_x, pix_y, pix_area, pix_type, self._neighbors = csr_matrix(neighbors) if self.pix_area is None: - self.pix_area = self.guess_pixel_area( - pix_x, pix_y, pix_type - ) + self.pix_area = self.guess_pixel_area(pix_x, pix_y, pix_type) if apply_derotation: # todo: this should probably not be done, but need to fix @@ -125,27 +133,25 @@ def __eq__(self, other): if self.pix_rotation != other.pix_rotation: return False - return all([ - (self.pix_x == other.pix_x).all(), - (self.pix_y == other.pix_y).all(), - ]) + return all( + [(self.pix_x == other.pix_x).all(), (self.pix_y == other.pix_y).all(),] + ) def guess_radius(self): - ''' + """ Guess the camera radius as mean distance of the border pixels from the center pixel - ''' + """ border = self.get_border_pixel_mask() cx = self.pix_x.mean() cy = self.pix_y.mean() return np.sqrt( - (self.pix_x[border] - cx)**2 - + (self.pix_y[border] - cy)**2 + (self.pix_x[border] - cx) ** 2 + (self.pix_y[border] - cy) ** 2 ).mean() def transform_to(self, frame): - ''' + """ Transform the pixel coordinates stored in this geometry and the pixel and camera rotations to another camera coordinate frame. @@ -153,7 +159,7 @@ def transform_to(self, frame): ---------- frame: ctapipe.coordinates.CameraFrame The coordinate frame to transform to. - ''' + """ if self.frame is None: self.frame = CameraFrame() @@ -184,13 +190,15 @@ def transform_to(self, frame): ) def __hash__(self): - return hash(( - self.camera_name, - self.pix_x[0].to_value(u.m), - self.pix_y[0].to_value(u.m), - self.pix_type, - self.pix_rotation.deg, - )) + return hash( + ( + self.camera_name, + self.pix_x[0].to_value(u.m), + self.pix_y[0].to_value(u.m), + self.pix_type, + self.pix_rotation.deg, + ) + ) def __len__(self): return self.n_pixels @@ -221,9 +229,9 @@ def guess_pixel_area(cls, pix_x, pix_y, pix_type): dist = cls.guess_pixel_width(pix_x, pix_y) - if pix_type.startswith('hex'): - area = 2 * np.sqrt(3) * (dist / 2)**2 - elif pix_type.startswith('rect'): + if pix_type.startswith("hex"): + area = 2 * np.sqrt(3) * (dist / 2) ** 2 + elif pix_type.startswith("rect"): area = dist ** 2 else: raise KeyError("unsupported pixel type") @@ -232,15 +240,15 @@ def guess_pixel_area(cls, pix_x, pix_y, pix_type): @lazyproperty def pixel_width(self): - ''' + """ in-circle diameter for hexagons, edge width for square pixels This is calculated from the pixel area. - ''' + """ - if self.pix_type.startswith('hex'): + if self.pix_type.startswith("hex"): width = 2 * np.sqrt(self.pix_area / (2 * np.sqrt(3))) - elif self.pix_type.startswith('rect'): + elif self.pix_type.startswith("rect"): width = np.sqrt(self.pix_area) else: raise KeyError("unsupported pixel type") @@ -258,7 +266,9 @@ def guess_pixel_width(pix_x, pix_y): ------- in-circle diameter for hexagons, edge width for square pixels """ - return np.min(np.sqrt((pix_x[1:] - pix_x[0])**2 + (pix_y[1:] - pix_y[0])**2)) + return np.min( + np.sqrt((pix_x[1:] - pix_x[0]) ** 2 + (pix_y[1:] - pix_y[0]) ** 2) + ) @lazyproperty def _pixel_circumferences(self): @@ -266,9 +276,9 @@ def _pixel_circumferences(self): """ - if self.pix_type.startswith('hex'): + if self.pix_type.startswith("hex"): circum_rad = np.sqrt(2.0 * self.pix_area / 3.0 / np.sqrt(3)) - elif self.pix_type.startswith('rect'): + elif self.pix_type.startswith("rect"): circum_rad = np.sqrt(self.pix_area / 2.0) else: raise KeyError("unsupported pixel type") @@ -286,8 +296,9 @@ def _kdtree(self): """ - pixel_centers = np.column_stack([self.pix_x.to_value(u.m), - self.pix_y.to_value(u.m)]) + pixel_centers = np.column_stack( + [self.pix_x.to_value(u.m), self.pix_y.to_value(u.m)] + ) return KDTree(pixel_centers) @lazyproperty @@ -303,7 +314,7 @@ def _all_pixel_areas_equal(self): return ~np.any(~np.isclose(self.pix_area.value, self.pix_area[0].value), axis=0) @classmethod - def from_name(cls, camera_name='NectarCam', version=None): + def from_name(cls, camera_name="NectarCam", version=None): """ Construct a CameraGeometry using the name of the camera and array. @@ -324,29 +335,32 @@ def from_name(cls, camera_name='NectarCam', version=None): """ if version is None: - verstr = '' + verstr = "" else: verstr = f"-{version:03d}" tabname = "{camera_name}{verstr}.camgeom".format( camera_name=camera_name, verstr=verstr ) - table = get_table_dataset(tabname, role='dl0.tel.svc.camera') + table = get_table_dataset(tabname, role="dl0.tel.svc.camera") return CameraGeometry.from_table(table) def to_table(self): """ convert this to an `astropy.table.Table` """ # currently the neighbor list is not supported, since # var-length arrays are not supported by astropy.table.Table - return Table([self.pix_id, self.pix_x, self.pix_y, self.pix_area], - names=['pix_id', 'pix_x', 'pix_y', 'pix_area'], - meta=dict(PIX_TYPE=self.pix_type, - TAB_TYPE='ctapipe.instrument.CameraGeometry', - TAB_VER='1.1', - CAM_ID=self.camera_name, - PIX_ROT=self.pix_rotation.deg, - CAM_ROT=self.cam_rotation.deg, - )) + return Table( + [self.pix_id, self.pix_x, self.pix_y, self.pix_area], + names=["pix_id", "pix_x", "pix_y", "pix_area"], + meta=dict( + PIX_TYPE=self.pix_type, + TAB_TYPE="ctapipe.instrument.CameraGeometry", + TAB_VER="1.1", + CAM_ID=self.camera_name, + PIX_ROT=self.pix_rotation.deg, + CAM_ROT=self.cam_rotation.deg, + ), + ) @classmethod def from_table(cls, url_or_table, **kwargs): @@ -370,14 +384,14 @@ def from_table(cls, url_or_table, **kwargs): tab = Table.read(url_or_table, **kwargs) return cls( - camera_name=tab.meta.get('CAM_ID', 'Unknown'), - pix_id=tab['pix_id'], - pix_x=tab['pix_x'].quantity, - pix_y=tab['pix_y'].quantity, - pix_area=tab['pix_area'].quantity, - pix_type=tab.meta['PIX_TYPE'], - pix_rotation=Angle(tab.meta['PIX_ROT'] * u.deg), - cam_rotation=Angle(tab.meta['CAM_ROT'] * u.deg), + camera_name=tab.meta.get("CAM_ID", "Unknown"), + pix_id=tab["pix_id"], + pix_x=tab["pix_x"].quantity, + pix_y=tab["pix_y"].quantity, + pix_area=tab["pix_area"].quantity, + pix_type=tab.meta["PIX_TYPE"], + pix_rotation=Angle(tab.meta["PIX_ROT"] * u.deg), + cam_rotation=Angle(tab.meta["CAM_ROT"] * u.deg), ) def __repr__(self): @@ -389,7 +403,7 @@ def __repr__(self): pix_type=self.pix_type, npix=len(self.pix_id), pixrot=self.pix_rotation, - camrot=self.cam_rotation + camrot=self.cam_rotation, ) def __str__(self): @@ -397,7 +411,7 @@ def __str__(self): @lazyproperty def neighbors(self): - '''A list of the neighbors pixel_ids for each pixel''' + """A list of the neighbors pixel_ids for each pixel""" return [np.where(r)[0].tolist() for r in self.neighbor_matrix] @lazyproperty @@ -412,7 +426,7 @@ def neighbor_matrix_sparse(self): return self.calc_pixel_neighbors(diagonal=False) def calc_pixel_neighbors(self, diagonal=False): - ''' + """ Calculate the neighbors of pixels using a kdtree for nearest neighbor lookup. @@ -420,10 +434,10 @@ def calc_pixel_neighbors(self, diagonal=False): ---------- diagonal: bool If rectangular geometry, also add diagonal neighbors - ''' + """ neighbors = lil_matrix((self.n_pixels, self.n_pixels), dtype=bool) - if self.pix_type.startswith('hex'): + if self.pix_type.startswith("hex"): max_neighbors = 6 # on a hexgrid, the closest pixel in the second circle is # the diameter of the hexagon plus the inradius away @@ -465,10 +479,10 @@ def calc_pixel_neighbors(self, diagonal=False): # scipy still uses np.matrix in scipy.sparse, but we do not # explicitly use any feature of np.matrix, so we can ignore this here with warnings.catch_warnings(): - warnings.filterwarnings('ignore', category=PendingDeprecationWarning) + warnings.filterwarnings("ignore", category=PendingDeprecationWarning) if (neighbors.T != neighbors).sum() > 0: warnings.warn( - 'Neighbor matrix is not symmetric. Is camera geometry irregular?' + "Neighbor matrix is not symmetric. Is camera geometry irregular?" ) return neighbors.tocsr() @@ -514,11 +528,24 @@ def pixel_moment_matrix(self): x = self.pix_x.value y = self.pix_y.value - return np.row_stack([x, y, - x ** 2, x * y, y ** 2, - x ** 3, x ** 2 * y, x * y ** 2, y ** 3, - x ** 4, x ** 3 * y, x ** 2 * y ** 2, x * y ** 3, - y ** 4]) + return np.row_stack( + [ + x, + y, + x ** 2, + x * y, + y ** 2, + x ** 3, + x ** 2 * y, + x * y ** 2, + y ** 3, + x ** 4, + x ** 3 * y, + x ** 2 * y ** 2, + x * y ** 3, + y ** 4, + ] + ) def rotate(self, angle): """rotate the camera coordinates about the center of the camera by @@ -552,15 +579,16 @@ def rotate(self, angle): def info(self, printer=print): """ print detailed info about this camera """ printer(f'CameraGeometry: "{self}"') - printer(' - num-pixels: {}'.format(len(self.pix_id))) - printer(f' - pixel-type: {self.pix_type}') - printer(' - sensitive-area: {}'.format(self.pix_area.sum())) - printer(f' - pix-rotation: {self.pix_rotation}') - printer(f' - cam-rotation: {self.cam_rotation}') + printer(" - num-pixels: {}".format(len(self.pix_id))) + printer(f" - pixel-type: {self.pix_type}") + printer(" - sensitive-area: {}".format(self.pix_area.sum())) + printer(f" - pix-rotation: {self.pix_rotation}") + printer(f" - cam-rotation: {self.cam_rotation}") @classmethod - def make_rectangular(cls, npix_x=40, npix_y=40, range_x=(-0.5, 0.5), - range_y=(-0.5, 0.5)): + def make_rectangular( + cls, npix_x=40, npix_y=40, range_x=(-0.5, 0.5), range_y=(-0.5, 0.5) + ): """Generate a simple camera with 2D rectangular geometry. Used for testing. @@ -590,17 +618,18 @@ def make_rectangular(cls, npix_x=40, npix_y=40, range_x=(-0.5, 0.5), ids = np.arange(npix_x * npix_y) rr = np.ones_like(xx).value * (xx[1] - xx[0]) / 2.0 - return cls(camera_name=-1, - pix_id=ids, - pix_x=xx, - pix_y=yy, - pix_area=(2 * rr) ** 2, - neighbors=None, - pix_type='rectangular', - ) + return cls( + camera_name=-1, + pix_id=ids, + pix_x=xx, + pix_y=yy, + pix_area=(2 * rr) ** 2, + neighbors=None, + pix_type="rectangular", + ) def get_border_pixel_mask(self, width=1): - ''' + """ Get a mask for pixels at the border of the camera of arbitrary width Parameters @@ -612,7 +641,7 @@ def get_border_pixel_mask(self, width=1): ------- mask: array A boolean mask, True if pixel is in the border of the specified width - ''' + """ if width in self.border_cache: return self.border_cache[width] @@ -620,7 +649,7 @@ def get_border_pixel_mask(self, width=1): # scipy still uses np.matrix in scipy.sparse, but we do not # explicitly use any feature of np.matrix, so we can ignore this here with warnings.catch_warnings(): - warnings.filterwarnings('ignore', category=PendingDeprecationWarning) + warnings.filterwarnings("ignore", category=PendingDeprecationWarning) if width == 1: n_neighbors = self.neighbor_matrix_sparse.sum(axis=1).A1 @@ -634,7 +663,7 @@ def get_border_pixel_mask(self, width=1): return mask def position_to_pix_index(self, x, y): - ''' + """ Return the index of a camera pixel which contains a given position (x,y) in the camera frame. The (x,y) coordinates can be arrays (of equal length), for which the methods returns an array of pixel ids. A warning is raised if the @@ -649,15 +678,19 @@ def position_to_pix_index(self, x, y): ------- pix_indices: Pixel index or array of pixel indices. Returns -1 if position falls outside camera - ''' + """ if not self._all_pixel_areas_equal: - logger.warning(" Method not implemented for cameras with varying pixel sizes") + logger.warning( + " Method not implemented for cameras with varying pixel sizes" + ) points_searched = np.dstack([x.to_value(u.m), y.to_value(u.m)]) circum_rad = self._pixel_circumferences[0].to_value(u.m) kdtree = self._kdtree - dist, pix_indices = kdtree.query(points_searched, distance_upper_bound=circum_rad) + dist, pix_indices = kdtree.query( + points_searched, distance_upper_bound=circum_rad + ) del dist pix_indices = pix_indices.flatten() @@ -683,38 +716,45 @@ def position_to_pix_index(self, x, y): for borderpix_index in borderpix_indices_in_list: index = np.where(pix_indices == borderpix_index)[0][0] # compare with inside pixel: - xprime = (points_searched[0][index, 0] - - self.pix_x[borderpix_index].to_value(u.m) - + self.pix_x[insidepix_index].to_value(u.m)) - yprime = (points_searched[0][index, 1] - - self.pix_y[borderpix_index].to_value(u.m) - + self.pix_y[insidepix_index].to_value(u.m)) - dist_check, index_check = kdtree.query([xprime, yprime], - distance_upper_bound=circum_rad) + xprime = ( + points_searched[0][index, 0] + - self.pix_x[borderpix_index].to_value(u.m) + + self.pix_x[insidepix_index].to_value(u.m) + ) + yprime = ( + points_searched[0][index, 1] + - self.pix_y[borderpix_index].to_value(u.m) + + self.pix_y[insidepix_index].to_value(u.m) + ) + dist_check, index_check = kdtree.query( + [xprime, yprime], distance_upper_bound=circum_rad + ) del dist_check if index_check != insidepix_index: pix_indices[index] = -1 # print warning: for index in np.where(pix_indices == -1)[0]: - logger.warning(" Coordinate ({} m, {} m) lies outside camera" - .format(points_searched[0][index, 0], - points_searched[0][index, 1])) + logger.warning( + " Coordinate ({} m, {} m) lies outside camera".format( + points_searched[0][index, 0], points_searched[0][index, 1] + ) + ) return pix_indices if len(pix_indices) > 1 else pix_indices[0] @staticmethod def simtel_shape_to_type(pixel_shape): if pixel_shape == 1: - return 'hexagonal', Angle(0, u.deg) + return "hexagonal", Angle(0, u.deg) if pixel_shape == 2: - return 'rectangular', Angle(0, u.deg) + return "rectangular", Angle(0, u.deg) if pixel_shape == 3: - return 'hexagonal', Angle(30, u.deg) + return "hexagonal", Angle(30, u.deg) - raise ValueError(f'Unknown pixel_shape {pixel_shape}') + raise ValueError(f"Unknown pixel_shape {pixel_shape}") class UnknownPixelShapeWarning(UserWarning): diff --git a/ctapipe/instrument/camera/readout.py b/ctapipe/instrument/camera/readout.py index 2a7f39ba7c9..5b9a7e361c7 100644 --- a/ctapipe/instrument/camera/readout.py +++ b/ctapipe/instrument/camera/readout.py @@ -11,15 +11,19 @@ from ctapipe.utils import get_table_dataset -__all__ = ['CameraReadout'] +__all__ = ["CameraReadout"] logger = logging.getLogger(__name__) class CameraReadout: - - def __init__(self, camera_name, sampling_rate, reference_pulse_shape, - reference_pulse_sample_width): + def __init__( + self, + camera_name, + sampling_rate, + reference_pulse_shape, + reference_pulse_sample_width, + ): """Stores properties related to the readout of a Cherenkov Camera. Parameters @@ -57,12 +61,14 @@ def __eq__(self, other): return True def __hash__(self): - return hash(( - self.camera_name, - self.sampling_rate.to_value(u.GHz), - self.reference_pulse_shape.size, - self.reference_pulse_sample_width.to_value(u.ns), - )) + return hash( + ( + self.camera_name, + self.sampling_rate.to_value(u.GHz), + self.reference_pulse_shape.size, + self.reference_pulse_sample_width.to_value(u.ns), + ) + ) def __len__(self): return self.reference_pulse_shape.size @@ -79,7 +85,7 @@ def reference_pulse_sample_time(self): return u.Quantity(sample_time, u.ns) @classmethod - def from_name(cls, camera_name='NectarCam', version=None): + def from_name(cls, camera_name="NectarCam", version=None): """Construct a CameraReadout using the name of the camera and array. This expects that there is a resource in the `ctapipe_resources` module @@ -100,7 +106,7 @@ def from_name(cls, camera_name='NectarCam', version=None): """ if version is None: - verstr = '' + verstr = "" else: verstr = f"-{version:03d}" @@ -108,12 +114,14 @@ def from_name(cls, camera_name='NectarCam', version=None): tabname = "{camera_name}{verstr}.camreadout".format( camera_name=camera_name, verstr=verstr ) - table = get_table_dataset(tabname, role='dl0.tel.svc.camera') + table = get_table_dataset(tabname, role="dl0.tel.svc.camera") return CameraReadout.from_table(table) except FileNotFoundError: # TODO: remove case when files have been generated - logger.warning(f"Resorting to default CameraReadout," - f" File does not exist: ({tabname})") + logger.warning( + f"Resorting to default CameraReadout," + f" File does not exist: ({tabname})" + ) reference_pulse_shape = np.array([norm.pdf(np.arange(96), 48, 6)]) return cls( camera_name=camera_name, @@ -127,21 +135,25 @@ def to_table(self): n_channels = len(self.reference_pulse_shape) tables = [ *[self.reference_pulse_shape[i] for i in range(n_channels)], - self.reference_pulse_sample_time + self.reference_pulse_sample_time, ] names = [ *[f"reference_pulse_shape_channel{i}" for i in range(n_channels)], - "reference_pulse_sample_time" + "reference_pulse_sample_time", ] - return Table(tables, names=names, meta=dict( - TAB_TYPE='ctapipe.instrument.CameraReadout', - TAB_VER='1.0', - CAM_ID=self.camera_name, - NCHAN=n_channels, - SAMPFREQ=self.sampling_rate.to_value(u.GHz), - REF_WIDTH=self.reference_pulse_sample_width.to_value(u.ns), - )) + return Table( + tables, + names=names, + meta=dict( + TAB_TYPE="ctapipe.instrument.CameraReadout", + TAB_VER="1.0", + CAM_ID=self.camera_name, + NCHAN=n_channels, + SAMPFREQ=self.sampling_rate.to_value(u.GHz), + REF_WIDTH=self.reference_pulse_sample_width.to_value(u.ns), + ), + ) @classmethod def from_table(cls, url_or_table, **kwargs): @@ -161,12 +173,12 @@ def from_table(cls, url_or_table, **kwargs): if not isinstance(url_or_table, Table): tab = Table.read(url_or_table, **kwargs) - camera_name = tab.meta.get('CAM_ID', 'Unknown') - n_channels = tab.meta['NCHAN'] + camera_name = tab.meta.get("CAM_ID", "Unknown") + n_channels = tab.meta["NCHAN"] sampling_rate = u.Quantity(tab.meta["SAMPFREQ"], u.GHz) - reference_pulse_sample_width = u.Quantity(tab.meta['REF_WIDTH'], u.ns) + reference_pulse_sample_width = u.Quantity(tab.meta["REF_WIDTH"], u.ns) reference_pulse_shape = np.array( - [tab[f'reference_pulse_shape_channel{i}'] for i in range(n_channels)] + [tab[f"reference_pulse_shape_channel{i}"] for i in range(n_channels)] ) return cls( diff --git a/ctapipe/instrument/camera/tests/test_description.py b/ctapipe/instrument/camera/tests/test_description.py index 8b27d6edf99..2fd7b03f4b8 100644 --- a/ctapipe/instrument/camera/tests/test_description.py +++ b/ctapipe/instrument/camera/tests/test_description.py @@ -5,8 +5,8 @@ def test_known_camera_names(): """ Check that we can get a list of known camera names """ cams = CameraDescription.get_known_camera_names() assert len(cams) > 4 - assert 'FlashCam' in cams - assert 'NectarCam' in cams + assert "FlashCam" in cams + assert "NectarCam" in cams # TODO: Requires camreadout files to be generated # for cam in cams: diff --git a/ctapipe/instrument/camera/tests/test_geometry.py b/ctapipe/instrument/camera/tests/test_geometry.py index b652f2bcb12..337b437bcb4 100644 --- a/ctapipe/instrument/camera/tests/test_geometry.py +++ b/ctapipe/instrument/camera/tests/test_geometry.py @@ -11,12 +11,16 @@ def test_construct(): """ Check we can make a CameraGeometry from scratch """ x = np.linspace(-10, 10, 100) y = np.linspace(-10, 10, 100) - geom = CameraGeometry(camera_name="Unknown", pix_id=np.arange(100), - pix_x=x * u.m, pix_y=y * u.m, - pix_area=x * u.m**2, - pix_type='rectangular', - pix_rotation="10d", - cam_rotation="12d") + geom = CameraGeometry( + camera_name="Unknown", + pix_id=np.arange(100), + pix_x=x * u.m, + pix_y=y * u.m, + pix_area=x * u.m ** 2, + pix_type="rectangular", + pix_rotation="10d", + cam_rotation="12d", + ) assert geom.camera_name == "Unknown" assert geom.pix_area is not None @@ -40,7 +44,10 @@ def test_load_lst_camera(): def test_position_to_pix_index(): """ test that we can lookup a pixel from a coordinate""" geom = CameraGeometry.from_name("LSTCam") - x, y = 0.80 * u.m, 0.79 * u.m, + x, y = ( + 0.80 * u.m, + 0.79 * u.m, + ) pix_id = geom.position_to_pix_index(x, y) assert pix_id == 1790 @@ -48,18 +55,17 @@ def test_position_to_pix_index(): def test_find_neighbor_pixels(): """ test basic neighbor functionality """ n_pixels = 5 - x, y = u.Quantity(np.meshgrid( - np.linspace(-5, 5, n_pixels), - np.linspace(-5, 5, n_pixels) - ), u.cm) + x, y = u.Quantity( + np.meshgrid(np.linspace(-5, 5, n_pixels), np.linspace(-5, 5, n_pixels)), u.cm + ) geom = CameraGeometry( - 'test', + "test", pix_id=np.arange(n_pixels), - pix_area=u.Quantity(4, u.cm**2), + pix_area=u.Quantity(4, u.cm ** 2), pix_x=x.ravel(), pix_y=y.ravel(), - pix_type='rectangular', + pix_type="rectangular", ) neigh = geom.neighbors @@ -78,19 +84,18 @@ def test_neighbor_pixels(camera_name): n_pix = len(geom.pix_id) n_neighbors = [len(x) for x in geom.neighbors] - - if geom.pix_type.startswith('hex'): + if geom.pix_type.startswith("hex"): assert n_neighbors.count(6) > 0.5 * n_pix assert n_neighbors.count(6) > n_neighbors.count(4) - if geom.pix_type.startswith('rect'): + if geom.pix_type.startswith("rect"): assert n_neighbors.count(4) > 0.5 * n_pix assert n_neighbors.count(5) == 0 assert n_neighbors.count(6) == 0 # whipple has inhomogenious pixels that mess with pixel neighborhood # calculation - if camera_name != 'Whipple490': + if camera_name != "Whipple490": assert np.all(geom.neighbor_matrix == geom.neighbor_matrix.T) assert n_neighbors.count(1) == 0 # no pixel should have a single neighbor @@ -100,12 +105,12 @@ def test_calc_pixel_neighbors_square(): x, y = np.meshgrid(np.arange(20), np.arange(20)) cam = CameraGeometry( - camera_name='test', + camera_name="test", pix_id=np.arange(400), - pix_type='rectangular', + pix_type="rectangular", pix_x=u.Quantity(x.ravel(), u.cm), pix_y=u.Quantity(y.ravel(), u.cm), - pix_area=u.Quantity(np.ones(400), u.cm**2), + pix_area=u.Quantity(np.ones(400), u.cm ** 2), ) assert set(cam.neighbors[0]) == {1, 20} @@ -120,12 +125,12 @@ def test_calc_pixel_neighbors_square_diagonal(): x, y = np.meshgrid(np.arange(20), np.arange(20)) cam = CameraGeometry( - camera_name='test', + camera_name="test", pix_id=np.arange(400), - pix_type='rectangular', + pix_type="rectangular", pix_x=u.Quantity(x.ravel(), u.cm), pix_y=u.Quantity(y.ravel(), u.cm), - pix_area=u.Quantity(np.ones(400), u.cm**2), + pix_area=u.Quantity(np.ones(400), u.cm ** 2), ) cam._neighbors = cam.calc_pixel_neighbors(diagonal=True) @@ -148,7 +153,7 @@ def test_to_and_from_table(): def test_write_read(tmpdir): """ Check that serialization to disk doesn't lose info """ - filename = str(tmpdir.join('testcamera.fits.gz')) + filename = str(tmpdir.join("testcamera.fits.gz")) geom = CameraGeometry.from_name("LSTCam") @@ -167,17 +172,17 @@ def test_precal_neighbors(): test that pre-calculated neighbor lists don't get overwritten by automatic ones """ - geom = CameraGeometry(camera_name="TestCam", - pix_id=np.arange(3), - pix_x=np.arange(3) * u.deg, - pix_y=np.arange(3) * u.deg, - pix_area=np.ones(3) * u.deg**2, - neighbors=[ - [1, ], [0, 2], [1, ] - ], - pix_type='rectangular', - pix_rotation="0deg", - cam_rotation="0deg") + geom = CameraGeometry( + camera_name="TestCam", + pix_id=np.arange(3), + pix_x=np.arange(3) * u.deg, + pix_y=np.arange(3) * u.deg, + pix_area=np.ones(3) * u.deg ** 2, + neighbors=[[1,], [0, 2], [1,]], + pix_type="rectangular", + pix_rotation="0deg", + cam_rotation="0deg", + ) neigh = geom.neighbors assert len(neigh) == len(geom.pix_x) @@ -207,7 +212,7 @@ def test_slicing(): def test_slicing_rotation(camera_name): """ Check that we can rotate and slice """ cam = CameraGeometry.from_name(camera_name) - cam.rotate('25d') + cam.rotate("25d") sliced1 = cam[5:10] @@ -216,23 +221,15 @@ def test_slicing_rotation(camera_name): def test_rectangle_patch_neighbors(): """" test that a simple rectangular camera has the expected neighbors """ - pix_x = np.array([ - -1.1, 0.1, 0.9, - -1, 0, 1, - -0.9, -0.1, 1.1 - ]) * u.m - pix_y = np.array([ - 1.1, 1, 0.9, - -0.1, 0, 0.1, - -0.9, -1, -1.1 - ]) * u.m + pix_x = np.array([-1.1, 0.1, 0.9, -1, 0, 1, -0.9, -0.1, 1.1]) * u.m + pix_y = np.array([1.1, 1, 0.9, -0.1, 0, 0.1, -0.9, -1, -1.1]) * u.m cam = CameraGeometry( - camera_name='testcam', + camera_name="testcam", pix_id=np.arange(pix_x.size), pix_x=pix_x, pix_y=pix_y, pix_area=None, - pix_type='rectangular', + pix_type="rectangular", ) assert np.all(cam.neighbor_matrix.T == cam.neighbor_matrix) @@ -287,7 +284,7 @@ def test_camera_from_name(camera_name): @pytest.mark.parametrize("camera_name", camera_names) def test_camera_coordinate_transform(camera_name): - '''test conversion of the coordinates stored in a camera frame''' + """test conversion of the coordinates stored in a camera frame""" from ctapipe.coordinates import EngineeringCameraFrame geom = CameraGeometry.from_name(camera_name) @@ -304,25 +301,25 @@ def test_guess_area(): n_pixels = len(x) geom = CameraGeometry( - 'test', + "test", pix_id=np.arange(n_pixels), pix_area=None, pix_x=x, pix_y=y, - pix_type='rect', + pix_type="rect", ) - assert np.all(geom.pix_area == 1 * u.cm**2) + assert np.all(geom.pix_area == 1 * u.cm ** 2) geom = CameraGeometry( - 'test', + "test", pix_id=np.arange(n_pixels), pix_area=None, pix_x=x, pix_y=y, - pix_type='hexagonal', + pix_type="hexagonal", ) - assert u.allclose(geom.pix_area, 2 * np.sqrt(3) * (0.5 * u.cm)**2) + assert u.allclose(geom.pix_area, 2 * np.sqrt(3) * (0.5 * u.cm) ** 2) def test_guess_width(): @@ -334,31 +331,31 @@ def test_guess_width(): def test_pixel_width(): geom = CameraGeometry( - 'test', + "test", pix_id=[1], - pix_area=[2] * u.cm**2, + pix_area=[2] * u.cm ** 2, pix_x=[0] * u.m, pix_y=[0] * u.m, - pix_type='hex', + pix_type="hex", ) assert np.isclose(geom.pixel_width.to_value(u.cm), [2 * np.sqrt(1 / np.sqrt(3))]) geom = CameraGeometry( - 'test', + "test", pix_id=[1], - pix_area=[2] * u.cm**2, + pix_area=[2] * u.cm ** 2, pix_x=[0] * u.m, pix_y=[0] * u.m, - pix_type='rect', + pix_type="rect", ) assert np.isclose(geom.pixel_width.to_value(u.cm), [np.sqrt(2)]) def test_guess_radius(): - lst_cam = CameraGeometry.from_name('LSTCam') + lst_cam = CameraGeometry.from_name("LSTCam") assert u.isclose(lst_cam.guess_radius(), 1.1 * u.m, rtol=0.05) - lst_cam = CameraGeometry.from_name('CHEC') + lst_cam = CameraGeometry.from_name("CHEC") assert u.isclose(lst_cam.guess_radius(), 0.16 * u.m, rtol=0.05) diff --git a/ctapipe/instrument/camera/tests/test_readout.py b/ctapipe/instrument/camera/tests/test_readout.py index 8e5c80ef726..14afa3d85bd 100644 --- a/ctapipe/instrument/camera/tests/test_readout.py +++ b/ctapipe/instrument/camera/tests/test_readout.py @@ -17,7 +17,7 @@ def test_construct(): camera_name=camera_name, sampling_rate=sampling_rate, reference_pulse_shape=reference_pulse_shape, - reference_pulse_sample_width=reference_pulse_sample_width + reference_pulse_sample_width=reference_pulse_sample_width, ) assert readout.camera_name == camera_name @@ -26,7 +26,7 @@ def test_construct(): assert readout.reference_pulse_sample_width == reference_pulse_sample_width -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def readout(): camera_name = "Unknown" sampling_rate = u.Quantity(2, u.GHz) @@ -36,7 +36,7 @@ def readout(): camera_name=camera_name, sampling_rate=sampling_rate, reference_pulse_shape=reference_pulse_shape, - reference_pulse_sample_width=reference_pulse_sample_width + reference_pulse_sample_width=reference_pulse_sample_width, ) @@ -58,7 +58,7 @@ def test_to_and_from_table(readout): def test_write_read(tmpdir, readout): """ Check that serialization to disk doesn't lose info """ - filename = str(tmpdir.join('testcamera.fits.gz')) + filename = str(tmpdir.join("testcamera.fits.gz")) readout.to_table().write(filename, overwrite=True) readout2 = readout.from_table(filename) @@ -79,28 +79,28 @@ def test_equals(): camera_name=camera_name, sampling_rate=sampling_rate, reference_pulse_shape=reference_pulse_shape, - reference_pulse_sample_width=reference_pulse_sample_width + reference_pulse_sample_width=reference_pulse_sample_width, ) readout2 = CameraReadout( camera_name=camera_name, sampling_rate=sampling_rate, reference_pulse_shape=reference_pulse_shape, - reference_pulse_sample_width=reference_pulse_sample_width + reference_pulse_sample_width=reference_pulse_sample_width, ) readout3 = CameraReadout( camera_name=4, sampling_rate=sampling_rate, reference_pulse_shape=reference_pulse_shape, - reference_pulse_sample_width=reference_pulse_sample_width + reference_pulse_sample_width=reference_pulse_sample_width, ) readout4 = CameraReadout( camera_name=camera_name, sampling_rate=sampling_rate, reference_pulse_shape=reference_pulse_shape, - reference_pulse_sample_width=u.Quantity(1, u.ns) + reference_pulse_sample_width=u.Quantity(1, u.ns), ) assert readout1 is not readout2 @@ -122,21 +122,21 @@ def test_hashing(): camera_name=camera_name, sampling_rate=sampling_rate, reference_pulse_shape=reference_pulse_shape, - reference_pulse_sample_width=reference_pulse_sample_width + reference_pulse_sample_width=reference_pulse_sample_width, ) readout2 = CameraReadout( camera_name=camera_name, sampling_rate=sampling_rate, reference_pulse_shape=reference_pulse_shape, - reference_pulse_sample_width=reference_pulse_sample_width + reference_pulse_sample_width=reference_pulse_sample_width, ) readout3 = CameraReadout( camera_name=4, sampling_rate=sampling_rate, reference_pulse_shape=reference_pulse_shape, - reference_pulse_sample_width=reference_pulse_sample_width + reference_pulse_sample_width=reference_pulse_sample_width, ) assert len({readout1, readout2, readout3}) == 2 diff --git a/ctapipe/instrument/subarray.py b/ctapipe/instrument/subarray.py index 808a7455879..bb86eaac815 100644 --- a/ctapipe/instrument/subarray.py +++ b/ctapipe/instrument/subarray.py @@ -152,7 +152,7 @@ def tel_ids_to_indices(self, tel_ids): return self.tel_index_array[tel_ids] def tel_ids_to_mask(self, tel_ids): - '''Convert a list of telescope ids to a boolean mask + """Convert a list of telescope ids to a boolean mask of length ``num_tels`` where the **index** of the telescope is set to ``True`` for each tel_id in tel_ids @@ -166,7 +166,7 @@ def tel_ids_to_mask(self, tel_ids): np.array[dtype=bool]: Boolean array of length ``num_tels`` with indices of the telescopes in ``tel_ids`` set to True. - ''' + """ mask = np.zeros(self.num_tels, dtype=bool) indices = self.tel_ids_to_indices(tel_ids) mask[indices] = True diff --git a/ctapipe/instrument/telescope.py b/ctapipe/instrument/telescope.py index 6081ace1a6e..8de5891d985 100644 --- a/ctapipe/instrument/telescope.py +++ b/ctapipe/instrument/telescope.py @@ -41,8 +41,9 @@ class TelescopeDescription: the camera associated with this telescope """ - def __init__(self, name, tel_type, optics: OpticsDescription, - camera: CameraDescription): + def __init__( + self, name, tel_type, optics: OpticsDescription, camera: CameraDescription + ): self.name = name self.type = tel_type diff --git a/ctapipe/instrument/tests/__init__.py b/ctapipe/instrument/tests/__init__.py index 7f1a673306a..94607dd7af6 100644 --- a/ctapipe/instrument/tests/__init__.py +++ b/ctapipe/instrument/tests/__init__.py @@ -4,4 +4,3 @@ @author: zornju """ - diff --git a/ctapipe/instrument/tests/test_guessing.py b/ctapipe/instrument/tests/test_guessing.py index 231983cf89c..b296ed1ae86 100644 --- a/ctapipe/instrument/tests/test_guessing.py +++ b/ctapipe/instrument/tests/test_guessing.py @@ -6,12 +6,12 @@ def test_guessing(): from ctapipe.instrument import guess_telescope guess = guess_telescope(2048, 2.28) - assert guess.type == 'SST' - assert guess.name == 'GCT' + assert guess.type == "SST" + assert guess.name == "GCT" guess = guess_telescope(2048, 2.28 * u.m) - assert guess.type == 'SST' - assert guess.name == 'GCT' + assert guess.type == "SST" + assert guess.name == "GCT" with raises(ValueError): guess = guess_telescope(100, 2.28 * u.m) @@ -20,5 +20,5 @@ def test_guessing(): n_pixels = 1764 guess = guess_telescope(n_pixels, foclen) - assert guess.camera_name == 'FlashCam' - assert guess.type == 'MST' + assert guess.camera_name == "FlashCam" + assert guess.type == "MST" diff --git a/ctapipe/instrument/tests/test_subarray.py b/ctapipe/instrument/tests/test_subarray.py index 06e3b16d3b1..251b6e4b2ee 100644 --- a/ctapipe/instrument/tests/test_subarray.py +++ b/ctapipe/instrument/tests/test_subarray.py @@ -80,15 +80,15 @@ def test_tel_indexing(example_subarray): def test_tel_ids_to_mask(example_subarray): - lst = TelescopeDescription.from_name('LST', 'LSTCam') + lst = TelescopeDescription.from_name("LST", "LSTCam") subarray = SubarrayDescription( - 'someone_counted_in_binary', + "someone_counted_in_binary", tel_positions={1: [0, 0, 0] * u.m, 10: [50, 0, 0] * u.m}, - tel_descriptions={1: lst, 10: lst} + tel_descriptions={1: lst, 10: lst}, ) assert np.all(subarray.tel_ids_to_mask([]) == [False, False]) - assert np.all(subarray.tel_ids_to_mask([1, ]) == [True, False]) + assert np.all(subarray.tel_ids_to_mask([1,]) == [True, False]) assert np.all(subarray.tel_ids_to_mask([10]) == [False, True]) assert np.all(subarray.tel_ids_to_mask([1, 10]) == [True, True]) diff --git a/ctapipe/instrument/tests/test_telescope.py b/ctapipe/instrument/tests/test_telescope.py index d6b9235577d..7faa74755ee 100644 --- a/ctapipe/instrument/tests/test_telescope.py +++ b/ctapipe/instrument/tests/test_telescope.py @@ -17,9 +17,12 @@ def test_hash(): for i in range(3): telescopes.append( - TelescopeDescription(name=name, tel_type=type, - optics=OpticsDescription.from_name(name), - camera=CameraDescription.from_name(camera)) + TelescopeDescription( + name=name, + tel_type=type, + optics=OpticsDescription.from_name(name), + camera=CameraDescription.from_name(camera), + ) ) assert len(telescopes) == 9 diff --git a/ctapipe/io/__init__.py b/ctapipe/io/__init__.py index 38b6b669f5d..c12001d2875 100644 --- a/ctapipe/io/__init__.py +++ b/ctapipe/io/__init__.py @@ -9,14 +9,14 @@ from .simteleventsource import SimTelEventSource __all__ = [ - 'get_array_layout', - 'HDF5TableWriter', - 'HDF5TableReader', - 'TableWriter', - 'TableReader', - 'EventSeeker', - 'EventSource', - 'event_source', - 'SimTelEventSource', - 'DataLevel', + "get_array_layout", + "HDF5TableWriter", + "HDF5TableReader", + "TableWriter", + "TableReader", + "EventSeeker", + "EventSource", + "event_source", + "SimTelEventSource", + "DataLevel", ] diff --git a/ctapipe/io/array.py b/ctapipe/io/array.py index de4683dd592..af05e5645e5 100644 --- a/ctapipe/io/array.py +++ b/ctapipe/io/array.py @@ -5,7 +5,7 @@ from astropy.table import Table from ctapipe.utils import get_dataset_path -_telclass_map = {0: 'SST', 1: 'MST', 2: 'LST'} +_telclass_map = {0: "SST", 1: "MST", 2: "LST"} def get_array_layout(instrument_name): @@ -15,9 +15,9 @@ def get_array_layout(instrument_name): """ name = instrument_name.lower() try: - layoutfile = get_dataset_path(f'{name}_arraylayout.fits') + layoutfile = get_dataset_path(f"{name}_arraylayout.fits") except KeyError: - layoutfile = get_dataset_path(f'{name}_arraylayout.fits.gz') + layoutfile = get_dataset_path(f"{name}_arraylayout.fits.gz") return load_array_layout_from_file(layoutfile) @@ -25,4 +25,4 @@ def load_array_layout_from_file(array_layout_filename): """ Read an array layout from a FITS file with a ``TELARRAY`` extension """ - return Table.read(array_layout_filename, hdu='TELARRAY') + return Table.read(array_layout_filename, hdu="TELARRAY") diff --git a/ctapipe/io/datalevels.py b/ctapipe/io/datalevels.py index 6daf4ae7c79..5fee518f5c8 100644 --- a/ctapipe/io/datalevels.py +++ b/ctapipe/io/datalevels.py @@ -2,7 +2,8 @@ class DataLevel(Enum): - '''Enum of the different Data Levels''' + """Enum of the different Data Levels""" + R0 = auto() R1 = auto() R2 = auto() diff --git a/ctapipe/io/eventseeker.py b/ctapipe/io/eventseeker.py index 7804a2749d7..f7dffede91f 100644 --- a/ctapipe/io/eventseeker.py +++ b/ctapipe/io/eventseeker.py @@ -5,7 +5,9 @@ from copy import deepcopy from ctapipe.core import Component -__all__ = ['EventSeeker', ] +__all__ = [ + "EventSeeker", +] class EventSeeker(Component): @@ -92,8 +94,10 @@ def __init__(self, reader, config=None, parent=None, **kwargs): super().__init__(config=config, parent=parent, **kwargs) if reader.is_stream: - raise IOError("Reader is not compatible as input to the " - "event_source is a stream (seeking not possible)") + raise IOError( + "Reader is not compatible as input to the " + "event_source is a stream (seeking not possible)" + ) self._reader = reader @@ -144,8 +148,7 @@ def __getitem__(self, item): if item < 0: item = len(self) + item if item < 0 or item >= len(self): - msg = ("Event index {} out of range [0, {}]" - .format(item, len(self))) + msg = "Event index {} out of range [0, {}]".format(item, len(self)) raise IndexError(msg) elif isinstance(item, str): item = int(item) @@ -173,8 +176,9 @@ def __getitem__(self, item): # Check we are within max_events range max_events = self._reader.max_events if not use_event_id and max_events and item >= max_events: - msg = ("Event index {} outside of specified max_events {}" - .format(item, max_events)) + msg = "Event index {} outside of specified max_events {}".format( + item, max_events + ) raise IndexError(msg) try: @@ -184,8 +188,10 @@ def __getitem__(self, item): event = self._reader._get_event_by_id(item) except AttributeError: if self._getevent_warn: - self.log.warning("Seeking to event by looping through " - "events... (potentially long process)") + self.log.warning( + "Seeking to event by looping through " + "events... (potentially long process)" + ) self._getevent_warn = False if not use_event_id: event = self._get_event_by_index(item) @@ -264,8 +270,10 @@ def __len__(self): try: count = len(self._reader) except TypeError: - self.log.warning("Obtaining length of file by looping through " - "all events... (potentially long process)") + self.log.warning( + "Obtaining length of file by looping through " + "all events... (potentially long process)" + ) count = 0 for _ in self: count += 1 diff --git a/ctapipe/io/eventsource.py b/ctapipe/io/eventsource.py index 2a4f3b6115b..4512df23057 100644 --- a/ctapipe/io/eventsource.py +++ b/ctapipe/io/eventsource.py @@ -102,7 +102,7 @@ class EventSource(Component): input_url = Path( directory_ok=False, exists=True, - help="Path to the input file containing events." + help="Path to the input file containing events.", ).tag(config=True) max_events = Int( @@ -118,7 +118,7 @@ class EventSource(Component): "list of allowed tel_ids, others will be ignored. " "If None, all telescopes in the input stream " "will be included" - ) + ), ).tag(config=True) def __init__(self, input_url=None, config=None, parent=None, **kwargs): @@ -323,10 +323,10 @@ def from_config(cls, config=None, parent=None, **kwargs): Instance of a compatible EventSource subclass """ if config is None and parent is None: - raise ValueError('One of config or parent must be provided') + raise ValueError("One of config or parent must be provided") if config is not None and parent is not None: - raise ValueError('Only one of config or parent must be provided') + raise ValueError("Only one of config or parent must be provided") if config is None: config = parent.config diff --git a/ctapipe/io/files.py b/ctapipe/io/files.py index bd284d1d6a9..45057a7645a 100644 --- a/ctapipe/io/files.py +++ b/ctapipe/io/files.py @@ -16,13 +16,13 @@ def get_file_type(filename): """ root, ext = os.path.splitext(filename) - if ext in ['.gz', '.bz2']: + if ext in [".gz", ".bz2"]: ext = os.path.splitext(root)[1] ext = ext[1:] # strip off leading '.' # special cases: - if ext in ['fit', 'FITS', 'FIT']: - ext = 'fits' + if ext in ["fit", "FITS", "FIT"]: + ext = "fits" return ext diff --git a/ctapipe/io/hdf5tableio.py b/ctapipe/io/hdf5tableio.py index 65139a2cade..289beaceff7 100644 --- a/ctapipe/io/hdf5tableio.py +++ b/ctapipe/io/hdf5tableio.py @@ -33,9 +33,9 @@ DEFAULT_FILTERS = tables.Filters( - complevel=5, # compression medium, tradeoff between speed and compression - complib='blosc:zstd', # use modern zstd algorithm - fletcher32=True, # add checksums to data chunks + complevel=5, # compression medium, tradeoff between speed and compression + complib="blosc:zstd", # use modern zstd algorithm + fletcher32=True, # add checksums to data chunks ) @@ -161,7 +161,7 @@ class Schema(tables.IsDescription): continue if col_name in Schema.columns: - self.log.warning(f'Found duplicated column {col_name}, skipping') + self.log.warning(f"Found duplicated column {col_name}, skipping") continue # apply any user-defined transforms first @@ -185,7 +185,7 @@ def transform(enum_value): unit = container.fields[key].unit or value.unit tr = partial(tr_convert_and_strip_unit, unit=unit) - meta[f"{col_name}_UNIT"] = unit.to_string('vounit') + meta[f"{col_name}_UNIT"] = unit.to_string("vounit") value = tr(value) self.add_column_transform(table_name, col_name, tr) @@ -208,10 +208,10 @@ def transform(enum_value): else: self.log.warning( - f'Column {col_name} of' - f' container {container.__class__.__name__}' - f' in table {table_name}' - ' not writable, skipping' + f"Column {col_name} of" + f" container {container.__class__.__name__}" + f" in table {table_name}" + " not writable, skipping" ) continue diff --git a/ctapipe/io/simteleventsource.py b/ctapipe/io/simteleventsource.py index 7054f14f216..43710ab0983 100644 --- a/ctapipe/io/simteleventsource.py +++ b/ctapipe/io/simteleventsource.py @@ -48,7 +48,8 @@ def parse_simtel_time(simtel_time): return Time( u.Quantity(simtel_time[0], u.s), u.Quantity(simtel_time[1], u.ns), - format="unix", scale="utc", + format="unix", + scale="utc", ) @@ -159,7 +160,9 @@ class SimTelEventSource(EventSource): ), ).tag(config=True) - def __init__(self, input_url, config=None, parent=None, gain_selector=None, **kwargs): + def __init__( + self, input_url, config=None, parent=None, gain_selector=None, **kwargs + ): """ EventSource for simtelarray files using the pyeventio library. @@ -225,7 +228,7 @@ def datalevels(self): @property def obs_id(self): - return self.file_.header['run'] + return self.file_.header["run"] @property def mc_header(self): @@ -342,7 +345,7 @@ def _generate_events(self): for counter, array_event in enumerate(self.file_): - event_id = array_event.get('event_id', -1) + event_id = array_event.get("event_id", -1) obs_id = self.file_.header["run"] tels_with_data = set(array_event["telescope_events"].keys()) data.count = counter @@ -385,9 +388,7 @@ def _generate_events(self): ) self._fill_event_pointing( - data.pointing.tel[tel_id], - mc, - tracking_positions[tel_id], + data.pointing.tel[tel_id], mc, tracking_positions[tel_id], ) r0 = data.r0.tel[tel_id] @@ -433,27 +434,28 @@ def _fill_trigger_info(data, array_event): elif array_event["type"] == "calibration": # if using eventio >= 1.1.1, we can use the calibration_type data.trigger.event_type = SIMTEL_TO_CTA_EVENT_TYPE.get( - array_event.get("calibration_type", -1), - EventType.OTHER_CALIBRATION + array_event.get("calibration_type", -1), EventType.OTHER_CALIBRATION ) else: data.trigger.event_type = EventType.UNKNOWN data.trigger.tels_with_trigger = trigger["triggered_telescopes"] - data.trigger.time = parse_simtel_time(trigger['gps_time']) + data.trigger.time = parse_simtel_time(trigger["gps_time"]) - for tel_id, time in zip(trigger['triggered_telescopes'], trigger['trigger_times']): + for tel_id, time in zip( + trigger["triggered_telescopes"], trigger["trigger_times"] + ): # time is relative to central trigger in nano seconds data.trigger.tel[tel_id].time = data.trigger.time + u.Quantity(time, u.ns) def _fill_array_pointing(self, data): - if self.file_.header['tracking_mode'] == 0: - az, alt = self.file_.header['direction'] + if self.file_.header["tracking_mode"] == 0: + az, alt = self.file_.header["direction"] data.pointing.array_altitude = u.Quantity(alt, u.rad) data.pointing.array_azimuth = u.Quantity(az, u.rad) else: - ra, dec = self.file_.header['direction'] + ra, dec = self.file_.header["direction"] data.pointing.array_ra = u.Quantity(ra, u.rad) data.pointing.array_dec = u.Quantity(dec, u.rad) @@ -466,12 +468,8 @@ def _parse_mc_header(self): energy_range_min=mc_run_head["E_range"][0] * u.TeV, energy_range_max=mc_run_head["E_range"][1] * u.TeV, prod_site_B_total=mc_run_head["B_total"] * u.uT, - prod_site_B_declination=Angle( - mc_run_head["B_declination"], u.rad, - ), - prod_site_B_inclination=Angle( - mc_run_head["B_inclination"], u.rad, - ), + prod_site_B_declination=Angle(mc_run_head["B_declination"], u.rad,), + prod_site_B_inclination=Angle(mc_run_head["B_inclination"], u.rad,), prod_site_alt=mc_run_head["obsheight"] * u.m, spectral_index=mc_run_head["spectral_index"], shower_prog_start=mc_run_head["shower_prog_start"], diff --git a/ctapipe/io/tableio.py b/ctapipe/io/tableio.py index f6b55913694..72844427966 100644 --- a/ctapipe/io/tableio.py +++ b/ctapipe/io/tableio.py @@ -42,7 +42,7 @@ def exclude(self, table_name, pattern): pattern: str regular expression string to match column name """ - table_name = table_name.lstrip('/') + table_name = table_name.lstrip("/") self._exclusions[table_name].append(re.compile(pattern)) def _is_column_excluded(self, table_name, col_name): diff --git a/ctapipe/io/tests/test_event_source.py b/ctapipe/io/tests/test_event_source.py index f4748d546e8..fa51b49d9e1 100644 --- a/ctapipe/io/tests/test_event_source.py +++ b/ctapipe/io/tests/test_event_source.py @@ -38,7 +38,7 @@ def obs_id(self): @property def datalevels(self): - return (DataLevel.R0, ) + return (DataLevel.R0,) def test_can_be_implemented(): @@ -75,7 +75,7 @@ def test_function_nonexistant_file(): def test_from_config(): dataset = get_dataset_path("gamma_test_large.simtel.gz") - config = Config({'EventSource': {'input_url': dataset}}) + config = Config({"EventSource": {"input_url": dataset}}) reader = EventSource.from_config(config=config, parent=None) assert isinstance(reader, SimTelEventSource) assert str(reader.input_url) == dataset @@ -95,7 +95,7 @@ def test_from_config_default(): def test_from_config_invalid_type(): dataset = get_dataset_path("gamma_test_large.simtel.gz") EventSource.input_url.default_value = dataset - config = Config({'EventSource': {'input_url': 124}}) + config = Config({"EventSource": {"input_url": 124}}) with pytest.raises(TraitError): EventSource.from_config(config=config, parent=None) @@ -103,7 +103,7 @@ def test_from_config_invalid_type(): def test_event_source_config(): dataset1 = get_dataset_path("gamma_test_large.simtel.gz") dataset2 = get_dataset_path("gamma_test_large.simtel.gz") - config = Config({'EventSource': {'input_url': dataset1}}) + config = Config({"EventSource": {"input_url": dataset1}}) reader = event_source(dataset2, config=config) assert isinstance(reader, SimTelEventSource) assert str(reader.input_url) == dataset2 @@ -112,7 +112,7 @@ def test_event_source_config(): def test_event_source_input_url_config_override(): dataset1 = get_dataset_path("gamma_test_large.simtel.gz") dataset2 = get_dataset_path("gamma_test_large.simtel.gz") - config = Config({'EventSource': {'input_url': dataset1}}) + config = Config({"EventSource": {"input_url": dataset1}}) reader = event_source(input_url=dataset2, config=config) assert isinstance(reader, SimTelEventSource) assert str(reader.input_url) == dataset2 @@ -128,10 +128,7 @@ def test_max_events(): def test_max_events_from_config(): dataset = get_dataset_path("gamma_test_large.simtel.gz") max_events = 10 - config = Config({'EventSource': { - 'input_url': dataset, - 'max_events': max_events, - }}) + config = Config({"EventSource": {"input_url": dataset, "max_events": max_events,}}) reader = EventSource.from_config(config=config) assert reader.max_events == max_events @@ -146,9 +143,6 @@ def test_allowed_tels(): def test_allowed_tels_from_config(): dataset = get_dataset_path("gamma_test_large.simtel.gz") - config = Config({'EventSource': { - 'input_url': dataset, - 'allowed_tels': {1, 3} - }}) + config = Config({"EventSource": {"input_url": dataset, "allowed_tels": {1, 3}}}) reader = EventSource.from_config(config=config, parent=None) assert len(reader.allowed_tels) == 2 diff --git a/ctapipe/io/tests/test_eventseeker.py b/ctapipe/io/tests/test_eventseeker.py index ef1dcedce88..757cafa0c46 100644 --- a/ctapipe/io/tests/test_eventseeker.py +++ b/ctapipe/io/tests/test_eventseeker.py @@ -17,7 +17,7 @@ def test_eventseeker(): event = seeker[0] assert event.count == 0 - event = seeker['31007'] + event = seeker["31007"] assert event.index.event_id == 31007 events = seeker[0:2] @@ -29,7 +29,7 @@ def test_eventseeker(): for i, event in enumerate(events): assert event.count == i - ids = ['23703', '31007'] + ids = ["23703", "31007"] events = seeker[ids] for i, event in zip(ids, events): @@ -39,12 +39,14 @@ def test_eventseeker(): event = seeker[200] with pytest.raises(ValueError): - event = seeker['t'] + event = seeker["t"] with pytest.raises(TypeError): event = seeker[dict()] - with SimTelEventSource(input_url=dataset, max_events=5, back_seekable=True) as reader: + with SimTelEventSource( + input_url=dataset, max_events=5, back_seekable=True + ) as reader: seeker = EventSeeker(reader=reader) with pytest.raises(IndexError): event = seeker[5] diff --git a/ctapipe/io/tests/test_files.py b/ctapipe/io/tests/test_files.py index 933e4633d7f..07aeb050d91 100644 --- a/ctapipe/io/tests/test_files.py +++ b/ctapipe/io/tests/test_files.py @@ -3,13 +3,15 @@ def test_get_file_type(): - test_filenames = {'test.fits.gz': 'fits', - 'test.fits': 'fits', - 'test.fits.bz2': 'fits', - 'test.fit': 'fits', - 'test_file.eventio.gz': 'eventio', - 'test_file.eventio': 'eventio', - 'more.complex.fileame.txt.gz': 'txt'} + test_filenames = { + "test.fits.gz": "fits", + "test.fits": "fits", + "test.fits.bz2": "fits", + "test.fit": "fits", + "test_file.eventio.gz": "eventio", + "test_file.eventio": "eventio", + "more.complex.fileame.txt.gz": "txt", + } for filename, filetype in test_filenames.items(): assert get_file_type(filename) == filetype diff --git a/ctapipe/io/tests/test_hdf5.py b/ctapipe/io/tests/test_hdf5.py index 06de62a81bf..4ee570fbc22 100644 --- a/ctapipe/io/tests/test_hdf5.py +++ b/ctapipe/io/tests/test_hdf5.py @@ -73,21 +73,21 @@ def test_prefix(tmp_path): def test_units(): class WithUnits(Container): - inverse_length = Field(5 / u.m, 'foo') - time = Field(1 * u.s, 'bar', unit=u.s) - grammage = Field(2 * u.g / u.cm**2, 'baz', unit=u.g / u.cm**2) + inverse_length = Field(5 / u.m, "foo") + time = Field(1 * u.s, "bar", unit=u.s) + grammage = Field(2 * u.g / u.cm ** 2, "baz", unit=u.g / u.cm ** 2) c = WithUnits() with tempfile.NamedTemporaryFile() as f: with HDF5TableWriter(f.name, "data") as writer: - writer.write('units', c) + writer.write("units", c) - with tables.open_file(f.name, 'r') as f: + with tables.open_file(f.name, "r") as f: - assert f.root.data.units.attrs['inverse_length_UNIT'] == 'm-1' - assert f.root.data.units.attrs['time_UNIT'] == 's' - assert f.root.data.units.attrs['grammage_UNIT'] == 'cm-2 g' + assert f.root.data.units.attrs["inverse_length_UNIT"] == "m-1" + assert f.root.data.units.attrs["time_UNIT"] == "s" + assert f.root.data.units.attrs["grammage_UNIT"] == "cm-2 g" def test_write_containers(temp_h5_file): @@ -112,7 +112,7 @@ class C2(Container): def test_write_bool(): class C(Container): - boolean = Field(True, 'Boolean value') + boolean = Field(True, "Boolean value") with tempfile.NamedTemporaryFile() as f: with HDF5TableWriter(f.name, "test") as writer: @@ -122,7 +122,7 @@ class C(Container): c = C() with HDF5TableReader(f.name) as reader: - c_reader = reader.read('/test/c', c) + c_reader = reader.read("/test/c", c) for i in range(2): cur = next(c_reader) expected = (i % 2) == 0 @@ -132,21 +132,21 @@ class C(Container): def test_write_large_integer(): class C(Container): - value = Field(True, 'Integer value') + value = Field(True, "Integer value") exps = [15, 31, 63] with tempfile.NamedTemporaryFile() as f: with HDF5TableWriter(f.name, "test") as writer: for exp in exps: - c = C(value=2**exp - 1) + c = C(value=2 ** exp - 1) writer.write("c", c) c = C() with HDF5TableReader(f.name) as reader: - c_reader = reader.read('/test/c', c) + c_reader = reader.read("/test/c", c) for exp in exps: cur = next(c_reader) - assert cur.value == 2**exp - 1 + assert cur.value == 2 ** exp - 1 def test_read_container(temp_h5_file): @@ -435,62 +435,64 @@ def test_filters(): from tables import Filters, open_file class TestContainer(Container): - value = Field(-1, 'test') + value = Field(-1, "test") no_comp = Filters(complevel=0) - zstd = Filters(complevel=5, complib='blosc:zstd') + zstd = Filters(complevel=5, complib="blosc:zstd") - with tempfile.NamedTemporaryFile(suffix='.hdf5') as f: - with HDF5TableWriter(f.name, group_name='data', mode='w', filters=no_comp) as writer: + with tempfile.NamedTemporaryFile(suffix=".hdf5") as f: + with HDF5TableWriter( + f.name, group_name="data", mode="w", filters=no_comp + ) as writer: assert writer._h5file.filters.complevel == 0 c = TestContainer(value=5) - writer.write('default', c) + writer.write("default", c) writer.filters = zstd - writer.write('zstd', c) + writer.write("zstd", c) writer.filters = no_comp - writer.write('nocomp', c) + writer.write("nocomp", c) with open_file(f.name) as h5file: assert h5file.root.data.default.filters.complevel == 0 assert h5file.root.data.zstd.filters.complevel == 5 - assert h5file.root.data.zstd.filters.complib == 'blosc:zstd' + assert h5file.root.data.zstd.filters.complib == "blosc:zstd" assert h5file.root.data.nocomp.filters.complevel == 0 def test_column_order(): - ''' Test that columns are written in the order the containers define them''' + """ Test that columns are written in the order the containers define them""" class Container1(Container): - b = Field(1, 'b') - a = Field(2, 'a') + b = Field(1, "b") + a = Field(2, "a") class Container2(Container): - d = Field(3, 'd') - c = Field(4, 'c') + d = Field(3, "d") + c = Field(4, "c") # test with single container - with tempfile.NamedTemporaryFile(suffix='.hdf5') as f: - with HDF5TableWriter(f.name, mode='w') as writer: + with tempfile.NamedTemporaryFile(suffix=".hdf5") as f: + with HDF5TableWriter(f.name, mode="w") as writer: c = Container1() - writer.write('foo', c) + writer.write("foo", c) - with tables.open_file(f.name, 'r') as f: - assert f.root.foo[:].dtype.names == ('b', 'a') + with tables.open_file(f.name, "r") as f: + assert f.root.foo[:].dtype.names == ("b", "a") # test with two containers - with tempfile.NamedTemporaryFile(suffix='.hdf5') as f: - with HDF5TableWriter(f.name, mode='w') as writer: + with tempfile.NamedTemporaryFile(suffix=".hdf5") as f: + with HDF5TableWriter(f.name, mode="w") as writer: c1 = Container1() c2 = Container2() - writer.write('foo', [c2, c1]) - writer.write('bar', [c1, c2]) + writer.write("foo", [c2, c1]) + writer.write("bar", [c1, c2]) - with tables.open_file(f.name, 'r') as f: - assert f.root.foo[:].dtype.names == ('d', 'c', 'b', 'a') - assert f.root.bar[:].dtype.names == ('b', 'a', 'd', 'c') + with tables.open_file(f.name, "r") as f: + assert f.root.foo[:].dtype.names == ("d", "c", "b", "a") + assert f.root.bar[:].dtype.names == ("b", "a", "d", "c") def test_writing_nan_defaults(): @@ -498,9 +500,9 @@ def test_writing_nan_defaults(): params = ImageParametersContainer() - with tempfile.NamedTemporaryFile(suffix='.hdf5') as f: - with HDF5TableWriter(f.name, mode='w') as writer: - writer.write('params', params.values()) + with tempfile.NamedTemporaryFile(suffix=".hdf5") as f: + with HDF5TableWriter(f.name, mode="w") as writer: + writer.write("params", params.values()) ALL_CONTAINERS = [] @@ -513,17 +515,17 @@ def test_writing_nan_defaults(): pass -@pytest.mark.parametrize('cls', ALL_CONTAINERS) +@pytest.mark.parametrize("cls", ALL_CONTAINERS) def test_write_default_container(cls): - with tempfile.NamedTemporaryFile(suffix='.hdf5') as f: - with HDF5TableWriter(f.name, mode='w') as writer: + with tempfile.NamedTemporaryFile(suffix=".hdf5") as f: + with HDF5TableWriter(f.name, mode="w") as writer: try: - writer.write('params', cls()) + writer.write("params", cls()) except ValueError as e: # some containers do not have writable members, # only subcontainers. For now, ignore them. - if 'cannot create an empty data type' in str(e): + if "cannot create an empty data type" in str(e): pytest.xfail() else: raise diff --git a/ctapipe/io/tests/test_simteleventsource.py b/ctapipe/io/tests/test_simteleventsource.py index 73d2e3224a8..30256ca58ce 100644 --- a/ctapipe/io/tests/test_simteleventsource.py +++ b/ctapipe/io/tests/test_simteleventsource.py @@ -217,8 +217,8 @@ def test_calibration_events(): def test_trigger_times(): source = SimTelEventSource(input_url=calib_events_path) - t0 = Time('2020-05-06T15:30:00') - t1 = Time('2020-05-06T15:40:00') + t0 = Time("2020-05-06T15:30:00") + t1 = Time("2020-05-06T15:40:00") for event in source: assert t0 <= event.trigger.time <= t1 diff --git a/ctapipe/io/tests/test_toysource.py b/ctapipe/io/tests/test_toysource.py index 279195142a6..e477f8234a1 100644 --- a/ctapipe/io/tests/test_toysource.py +++ b/ctapipe/io/tests/test_toysource.py @@ -4,10 +4,10 @@ from ctapipe.utils import get_dataset_path -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def subarray(): - lst = TelescopeDescription.from_name('LST', 'LSTCam') + lst = TelescopeDescription.from_name("LST", "LSTCam") tels = [lst] * 4 positions = { @@ -18,7 +18,7 @@ def subarray(): } descriptions = {i: t for i, t in enumerate(tels, start=1)} - return SubarrayDescription('test', positions, descriptions) + return SubarrayDescription("test", positions, descriptions) def test_toyeventsource(subarray): @@ -36,5 +36,7 @@ def test_toyeventsource(subarray): def test_is_compatible(): from ctapipe.io.toymodel import ToyEventSource - assert not ToyEventSource.is_compatible('test.fits.gz') - assert not ToyEventSource.is_compatible(get_dataset_path('gamma_test_large.simtel.gz')) + assert not ToyEventSource.is_compatible("test.fits.gz") + assert not ToyEventSource.is_compatible( + get_dataset_path("gamma_test_large.simtel.gz") + ) diff --git a/ctapipe/io/toymodel.py b/ctapipe/io/toymodel.py index ee3427c71c3..62521420000 100644 --- a/ctapipe/io/toymodel.py +++ b/ctapipe/io/toymodel.py @@ -8,7 +8,9 @@ import astropy.units as u from ..containers import ( - DataContainer, DL1CameraContainer, EventIndexContainer, + DataContainer, + DL1CameraContainer, + EventIndexContainer, ) from ..core import traits from ..core import TelescopeComponent @@ -22,26 +24,26 @@ class ToyEventSource(EventSource, TelescopeComponent): trigger_probability = traits.FloatTelescopeParameter( - default_value=0.5, help='Probability that the telescope has an event', + default_value=0.5, help="Probability that the telescope has an event", ).tag(config=True) min_length_m = traits.FloatTelescopeParameter( - default_value=0.05, help='Minimum length m', + default_value=0.05, help="Minimum length m", ).tag(config=True) max_length_m = traits.FloatTelescopeParameter( - default_value=0.3, help='Maximum length in m', + default_value=0.3, help="Maximum length in m", ).tag(config=True) min_eccentricity = traits.FloatTelescopeParameter( - default_value=0.8, help='Minimum eccentricity = sqrt(1 - width**2/length**2)', + default_value=0.8, help="Minimum eccentricity = sqrt(1 - width**2/length**2)", ).tag(config=True) max_eccentricity = traits.FloatTelescopeParameter( - default_value=0.98, help='Maximum eccentricity = sqrt(1 - width**2/length**2)', + default_value=0.98, help="Maximum eccentricity = sqrt(1 - width**2/length**2)", ).tag(config=True) min_skewness = traits.FloatTelescopeParameter( - default_value=0.1, help='Minimum skewness', + default_value=0.1, help="Minimum skewness", ).tag(config=True) max_skewness = traits.FloatTelescopeParameter( - default_value=0.5, help='Maximum skewness', + default_value=0.5, help="Maximum skewness", ).tag(config=True) def __init__(self, subarray, config=None, parent=None, **kwargs): @@ -51,7 +53,7 @@ def __init__(self, subarray, config=None, parent=None, **kwargs): @staticmethod def calc_width(eccentricity, length): - return length * np.sqrt(1 - eccentricity**2) + return length * np.sqrt(1 - eccentricity ** 2) @property def subarray(self): @@ -67,7 +69,7 @@ def is_simulation(self): @property def datalevels(self): - return (DataLevel.DL1_IMAGES, ) + return (DataLevel.DL1_IMAGES,) @subarray.setter def subarray(self, value): @@ -115,20 +117,17 @@ def generate_event(self): # draw length length = np.random.uniform( - self.min_length_m.tel[tel_id], - self.max_length_m.tel[tel_id], + self.min_length_m.tel[tel_id], self.max_length_m.tel[tel_id], ) eccentricity = np.random.uniform( - self.min_eccentricity.tel[tel_id], - self.max_eccentricity.tel[tel_id], + self.min_eccentricity.tel[tel_id], self.max_eccentricity.tel[tel_id], ) width = self.calc_width(eccentricity, length) psi = np.random.randint(0, 360) intensity = np.random.poisson(int(1e5 * width * length)) skewness = np.random.uniform( - self.min_skewness.tel[tel_id], - self.max_skewness.tel[tel_id] + self.min_skewness.tel[tel_id], self.max_skewness.tel[tel_id] ) model = toymodel.SkewedGaussian( @@ -136,13 +135,10 @@ def generate_event(self): y=y, length=length * u.m, width=width * u.m, - psi=f'{psi}d', + psi=f"{psi}d", skewness=skewness, ) - image, _, _ = model.generate_image( - cam, - intensity, - ) + image, _, _ = model.generate_image(cam, intensity,) event.dl1.tel[tel_id] = DL1CameraContainer(image=image) diff --git a/ctapipe/plotting/bokeh_event_viewer.py b/ctapipe/plotting/bokeh_event_viewer.py index ea5a4d24e7c..d06dee5bc7a 100644 --- a/ctapipe/plotting/bokeh_event_viewer.py +++ b/ctapipe/plotting/bokeh_event_viewer.py @@ -19,18 +19,18 @@ def __init__(self, event_viewer, fig=None): Figure to store the bokeh plot onto (optional) """ self._event = None - self._view = 'r0' + self._view = "r0" self._telid = None self._channel = 0 self._time = 0 super().__init__(fig=fig) self._view_options = { - 'r0': lambda e, t, c, time: e.r0.tel[t].waveform[c, :, time], - 'r1': lambda e, t, c, time: e.r1.tel[t].waveform[:, time], - 'dl0': lambda e, t, c, time: e.dl0.tel[t].waveform[:, time], - 'dl1': lambda e, t, c, time: e.dl1.tel[t].image[:], - 'peak_time': lambda e, t, c, time: e.dl1.tel[t].peak_time[:], + "r0": lambda e, t, c, time: e.r0.tel[t].waveform[c, :, time], + "r1": lambda e, t, c, time: e.r1.tel[t].waveform[:, time], + "dl0": lambda e, t, c, time: e.dl0.tel[t].waveform[:, time], + "dl1": lambda e, t, c, time: e.dl1.tel[t].image[:], + "peak_time": lambda e, t, c, time: e.dl1.tel[t].peak_time[:], } self.w_view = None @@ -60,7 +60,7 @@ def _set_image(self): try: self.image = self._view_options[v](e, t, c, time) - self.fig.title.text = f'{v} (T = {time})' + self.fig.title.text = f"{v} (T = {time})" except TypeError: self.image = None @@ -142,7 +142,7 @@ def _on_pixel_click(self, pix_id): def create_view_widget(self): self.w_view = Select(title="View:", value="", options=[], width=5) - self.w_view.on_change('value', self.on_view_widget_change) + self.w_view.on_change("value", self.on_view_widget_change) self.layout = column([self.w_view, self.layout]) def update_view_widget(self): @@ -168,16 +168,16 @@ def __init__(self, event_viewer, fig=None): Figure to store the bokeh plot onto (optional) """ self._event = None - self._view = 'r0' + self._view = "r0" self._telid = None self._channel = 0 self._pixel = 0 super().__init__(fig=fig) self._view_options = { - 'r0': lambda e, t, c, p: e.r0.tel[t].waveform[c, p], - 'r1': lambda e, t, c, p: e.r1.tel[t].waveform[p], - 'dl0': lambda e, t, c, p: e.dl0.tel[t].waveform[p], + "r0": lambda e, t, c, p: e.r0.tel[t].waveform[c, p], + "r1": lambda e, t, c, p: e.r1.tel[t].waveform[p], + "dl0": lambda e, t, c, p: e.dl0.tel[t].waveform[p], } self.w_view = None @@ -206,7 +206,7 @@ def _set_waveform(self): try: self.waveform = self._view_options[v](e, t, c, p) - self.fig.title.text = f'{v} (Pixel = {p})' + self.fig.title.text = f"{v} (Pixel = {p})" except TypeError: self.waveform = None @@ -274,7 +274,7 @@ def _on_waveform_click(self, time): def create_view_widget(self): self.w_view = Select(title="View:", value="", options=[], width=5) - self.w_view.on_change('value', self.on_view_widget_change) + self.w_view.on_change("value", self.on_view_widget_change) self.layout = column([self.w_view, self.layout]) def update_view_widget(self): @@ -294,7 +294,7 @@ def __init__( parent=None, num_cameras=1, num_waveforms=2, - **kwargs + **kwargs, ): """ A class to organise the interface between @@ -321,7 +321,7 @@ def __init__( super().__init__(config=config, parent=parent, **kwargs) self._event = None - self._view = 'r0' + self._view = "r0" self._telid = None self._channel = 0 @@ -350,7 +350,7 @@ def create(self): for iwav in range(self.num_waveforms): wav = BokehEventViewerWaveform(self) active_color = self.cameras[0].active_colors[iwav] - wav.fig.select(name='line')[0].glyph.line_color = active_color + wav.fig.select(name="line")[0].glyph.line_color = active_color wav.enable_time_picker() wav.create_view_widget() wav.update_view_widget() @@ -358,9 +358,9 @@ def create(self): self.waveforms.append(wav) self.waveform_layouts.append(wav.layout) - self.layout = layout([ - [column(self.camera_layouts), column(self.waveform_layouts)], - ]) + self.layout = layout( + [[column(self.camera_layouts), column(self.waveform_layouts)],] + ) def enable_automatic_index_increment(self): for cam in self.cameras: diff --git a/ctapipe/plotting/charge_resolution.py b/ctapipe/plotting/charge_resolution.py index a4485f51f33..b64190314db 100644 --- a/ctapipe/plotting/charge_resolution.py +++ b/ctapipe/plotting/charge_resolution.py @@ -20,7 +20,7 @@ def root_mean_square(array): ------- float """ - return np.sqrt(np.mean(array**2)) + return np.sqrt(np.mean(array ** 2)) def bin_dataframe(df, n_bins): @@ -40,27 +40,25 @@ def bin_dataframe(df, n_bins): ------- pd.DataFrame """ - true = df['true'].values + true = df["true"].values min_ = true.min() max_ = true.max() bins = np.geomspace(min_, max_, n_bins) log_bin_width = np.diff(np.log10(bins))[0] - bins = np.append(bins, 10**(np.log10(bins[-1]) + log_bin_width)) - df['bin'] = np.digitize(true, bins, right=True) - 1 + bins = np.append(bins, 10 ** (np.log10(bins[-1]) + log_bin_width)) + df["bin"] = np.digitize(true, bins, right=True) - 1 return df class ChargeResolutionPlotter(Component): - output_path = Path( - help='Output path to save the plot.' - ).tag(config=True) + output_path = Path(help="Output path to save the plot.").tag(config=True) n_bins = Int( 40, - help='Number of bins for collecting true charges and combining ' - 'their resolution' + help="Number of bins for collecting true charges and combining " + "their resolution", ).tag(config=True) def __init__(self, config=None, parent=None, **kwargs): @@ -95,9 +93,7 @@ def __init__(self, config=None, parent=None, **kwargs): self.ax = self.fig.add_subplot(111) self.ax.set_xlabel("True Charge (p.e.)") - self.ax.set_ylabel( - r"Fractional Charge Resolution $\frac{{\sigma_Q}}{{Q}}$" - ) + self.ax.set_ylabel(r"Fractional Charge Resolution $\frac{{\sigma_Q}}{{Q}}$") if not self.output_path: raise ValueError("Output path must be specified") @@ -112,9 +108,9 @@ def _set_file(self, path): path : str Path to the charge resolution HDF5 file """ - with pd.HDFStore(path, 'r') as store: - self._df_pixel = store['charge_resolution_pixel'] - self._df_camera = store['charge_resolution_camera'] + with pd.HDFStore(path, "r") as store: + self._df_pixel = store["charge_resolution_pixel"] + self._df_camera = store["charge_resolution_camera"] def _plot(self, x, y, **kwargs): """ @@ -129,15 +125,14 @@ def _plot(self, x, y, **kwargs): label : str """ defaults = dict( - mew=1, capsize=1, elinewidth=0.5, markersize=2, - linewidth=0.5, fmt='.' + mew=1, capsize=1, elinewidth=0.5, markersize=2, linewidth=0.5, fmt="." ) kwargs = {**defaults, **kwargs} (_, caps, _) = self.ax.errorbar(x, y, **kwargs) for cap in caps: cap.set_markeredgewidth(0.5) - def plot_average(self, path, label='', **kwargs): + def plot_average(self, path, label="", **kwargs): """ Plot the average and standard deviation of the charge resolution across the pixels of the camera. @@ -152,14 +147,14 @@ def plot_average(self, path, label='', **kwargs): """ self._set_file(path) df_binned = bin_dataframe(self._df_pixel, self.n_bins) - agg = {'charge_resolution': ['mean', 'std'], 'true': 'mean'} - df_agg = df_binned.groupby(['bin']).agg(agg) - x = df_agg['true']['mean'].values - y = df_agg['charge_resolution']['mean'].values - yerr = df_agg['charge_resolution']['std'].values + agg = {"charge_resolution": ["mean", "std"], "true": "mean"} + df_agg = df_binned.groupby(["bin"]).agg(agg) + x = df_agg["true"]["mean"].values + y = df_agg["charge_resolution"]["mean"].values + yerr = df_agg["charge_resolution"]["std"].values self._plot(x, y, yerr=yerr, label=label, **kwargs) - def plot_pixel(self, path, pixel, label='', **kwargs): + def plot_pixel(self, path, pixel, label="", **kwargs): """ Plot a single pixel's charge resolution. @@ -176,16 +171,16 @@ def plot_pixel(self, path, pixel, label='', **kwargs): kwargs """ self._set_file(path) - df_p = self._df_pixel.loc[self._df_pixel['pixel'] == pixel] + df_p = self._df_pixel.loc[self._df_pixel["pixel"] == pixel] df_binned = bin_dataframe(df_p, self.n_bins) - agg = {'charge_resolution': 'mean', 'true': 'mean', 'n': 'sum'} - df_agg = df_binned.groupby(['bin']).agg(agg) - x = df_agg['true'].values - y = df_agg['charge_resolution'].values - yerr = 1 / np.sqrt(df_agg['n'].values) + agg = {"charge_resolution": "mean", "true": "mean", "n": "sum"} + df_agg = df_binned.groupby(["bin"]).agg(agg) + x = df_agg["true"].values + y = df_agg["charge_resolution"].values + yerr = 1 / np.sqrt(df_agg["n"].values) self._plot(x, y, yerr=yerr, label=label, **kwargs) - def plot_camera(self, path, label='', **kwargs): + def plot_camera(self, path, label="", **kwargs): """ Plot the charge resolution for the entire camera. @@ -201,23 +196,21 @@ def plot_camera(self, path, label='', **kwargs): """ self._set_file(path) df_binned = bin_dataframe(self._df_camera, self.n_bins) - agg = {'charge_resolution': 'mean', 'true': 'mean', 'n': 'sum'} - df_agg = df_binned.groupby(['bin']).agg(agg) - x = df_agg['true'].values - y = df_agg['charge_resolution'].values - yerr = 1 / np.sqrt(df_agg['n'].values) + agg = {"charge_resolution": "mean", "true": "mean", "n": "sum"} + df_agg = df_binned.groupby(["bin"]).agg(agg) + x = df_agg["true"].values + y = df_agg["charge_resolution"].values + yerr = 1 / np.sqrt(df_agg["n"].values) self._plot(x, y, yerr=yerr, label=label, **kwargs) def _finish(self): """ Perform the finishing touches to the figure before saving. """ - self.ax.set_xscale('log') - self.ax.get_xaxis().set_major_formatter( - FuncFormatter(lambda x, _: f'{x:g}')) - self.ax.set_yscale('log') - self.ax.get_yaxis().set_major_formatter( - FuncFormatter(lambda y, _: f'{y:g}')) + self.ax.set_xscale("log") + self.ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, _: f"{x:g}")) + self.ax.set_yscale("log") + self.ax.get_yaxis().set_major_formatter(FuncFormatter(lambda y, _: f"{y:g}")) def save(self): """ @@ -230,7 +223,7 @@ def save(self): print(f"Creating directory: {output_dir}") os.makedirs(output_dir) - self.fig.savefig(self.output_path, bbox_inches='tight') + self.fig.savefig(self.output_path, bbox_inches="tight") print(f"Figure saved to: {self.output_path}") Provenance().add_output_file(self.output_path) @@ -259,9 +252,9 @@ def limit_curves(q, nsb, t_w, n_e, sigma_g, enf): enf : float Excess noise factor. """ - sigma_0 = np.sqrt(nsb * t_w + n_e**2) + sigma_0 = np.sqrt(nsb * t_w + n_e ** 2) sigma_enf = 1 + enf - sigma_q = np.sqrt(sigma_0**2 + sigma_enf**2 * q + sigma_g**2 * q**2) + sigma_q = np.sqrt(sigma_0 ** 2 + sigma_enf ** 2 * q + sigma_g ** 2 * q ** 2) return sigma_q / q @staticmethod @@ -309,7 +302,7 @@ def plot_requirement(self, q): Charges to evaluate the requirement curve at """ req = self.requirement(q) - self.ax.plot(q, req, '--', color='black', label="Requirement") + self.ax.plot(q, req, "--", color="black", label="Requirement") def plot_poisson(self, q): """ @@ -321,7 +314,7 @@ def plot_poisson(self, q): Charges to evaluate the limit at """ poisson = self.poisson(q) - self.ax.plot(q, poisson, '--', color='grey', label="Poisson") + self.ax.plot(q, poisson, "--", color="grey", label="Poisson") class ChargeResolutionWRRPlotter(ChargeResolutionPlotter): @@ -348,21 +341,20 @@ def __init__(self, config=None, parent=None, **kwargs): def _plot(self, x, y, **kwargs): y = y / self.requirement(x) - if 'yerr' in kwargs: - kwargs['yerr'] /= self.requirement(x) + if "yerr" in kwargs: + kwargs["yerr"] /= self.requirement(x) super()._plot(x, y, **kwargs) def plot_requirement(self, q): req = self.requirement(q) req /= self.requirement(q) - self.ax.plot(q, req, '--', color='black', label="Requirement") + self.ax.plot(q, req, "--", color="black", label="Requirement") def plot_poisson(self, q): poisson = self.poisson(q) poisson /= self.requirement(q) - self.ax.plot(q, poisson, '--', color='grey', label="Poisson") + self.ax.plot(q, poisson, "--", color="grey", label="Poisson") def _finish(self): - self.ax.set_xscale('log') - self.ax.get_xaxis().set_major_formatter( - FuncFormatter(lambda x, _: f'{x:g}')) + self.ax.set_xscale("log") + self.ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, _: f"{x:g}")) diff --git a/ctapipe/plotting/tests/test_bokeh_event_viewer.py b/ctapipe/plotting/tests/test_bokeh_event_viewer.py index 4b448a36a1c..10480cfec80 100644 --- a/ctapipe/plotting/tests/test_bokeh_event_viewer.py +++ b/ctapipe/plotting/tests/test_bokeh_event_viewer.py @@ -140,11 +140,11 @@ def test_view_camera(example_event, example_subarray): t = list(example_event.r0.tels_with_data)[0] cam = viewer.cameras[0] - cam.view = 'r1' + cam.view = "r1" assert (cam.image == example_event.r1.tel[t].waveform[:, 0]).all() with pytest.raises(ValueError): - cam.view = 'q' + cam.view = "q" def test_view_wf(example_event, example_subarray): @@ -158,8 +158,8 @@ def test_view_wf(example_event, example_subarray): t = list(example_event.r0.tels_with_data)[0] wf = viewer.waveforms[0] - wf.view = 'r1' + wf.view = "r1" assert (wf.waveform == example_event.r1.tel[t].waveform[0, :]).all() with pytest.raises(ValueError): - wf.view = 'q' + wf.view = "q" diff --git a/ctapipe/plotting/tests/test_charge_resolution.py b/ctapipe/plotting/tests/test_charge_resolution.py index 8514d50e742..81cf2d08213 100644 --- a/ctapipe/plotting/tests/test_charge_resolution.py +++ b/ctapipe/plotting/tests/test_charge_resolution.py @@ -1,11 +1,12 @@ -from ctapipe.analysis.camera.charge_resolution import \ - ChargeResolutionCalculator +from ctapipe.analysis.camera.charge_resolution import ChargeResolutionCalculator import os import numpy as np import pandas as pd from ..charge_resolution import ( - root_mean_square, bin_dataframe, - ChargeResolutionPlotter, ChargeResolutionWRRPlotter + root_mean_square, + bin_dataframe, + ChargeResolutionPlotter, + ChargeResolutionWRRPlotter, ) from numpy.testing import assert_almost_equal import pytest @@ -20,9 +21,9 @@ def create_temp_cr_file(directory): df_p, df_c = chargeres.finish() output_path = os.path.join(str(directory), "cr.h5") - with pd.HDFStore(output_path, 'w') as store: - store['charge_resolution_pixel'] = df_p - store['charge_resolution_camera'] = df_c + with pd.HDFStore(output_path, "w") as store: + store["charge_resolution_pixel"] = df_p + store["charge_resolution_camera"] = df_c return output_path @@ -40,8 +41,8 @@ def test_bin_dataframe(): df_p, df_c = chargeres.finish() df = bin_dataframe(df_p, 20) - assert 'bin' in df.columns - assert np.unique(df['bin']).size <= 20 + assert "bin" in df.columns + assert np.unique(df["bin"]).size <= 20 def test_file_reading(tmpdir): @@ -56,8 +57,8 @@ def test_file_reading(tmpdir): def test_incorrect_file(tmpdir): path = os.path.join(str(tmpdir), "cr_incorrect.h5") output_path = os.path.join(str(tmpdir), "cr_incorrect.pdf") - with pd.HDFStore(path, 'w') as store: - store['test'] = pd.DataFrame(dict(a=[3])) + with pd.HDFStore(path, "w") as store: + store["test"] = pd.DataFrame(dict(a=[3])) plotter = ChargeResolutionPlotter(output_path=output_path) with pytest.raises(KeyError): diff --git a/ctapipe/reco/HillasReconstructor.py b/ctapipe/reco/HillasReconstructor.py index 7183943576a..2b29055980d 100644 --- a/ctapipe/reco/HillasReconstructor.py +++ b/ctapipe/reco/HillasReconstructor.py @@ -8,7 +8,7 @@ from ctapipe.reco.reco_algorithms import ( Reconstructor, InvalidWidthException, - TooFewTelescopesException + TooFewTelescopesException, ) from ctapipe.containers import ReconstructedShowerContainer from itertools import combinations @@ -31,7 +31,7 @@ from astropy import units as u -__all__ = ['HillasReconstructor', 'HillasPlane'] +__all__ = ["HillasReconstructor", "HillasPlane"] def angle(v1, v2): @@ -134,35 +134,36 @@ class are set to np.nan """ # filter warnings for missing obs time. this is needed because MC data has no obs time - warnings.filterwarnings(action='ignore', category=MissingFrameAttributeWarning) + warnings.filterwarnings(action="ignore", category=MissingFrameAttributeWarning) # stereoscopy needs at least two telescopes if len(hillas_dict) < 2: raise TooFewTelescopesException( - "need at least two telescopes, have {}" - .format(len(hillas_dict))) + "need at least two telescopes, have {}".format(len(hillas_dict)) + ) # check for np.nan or 0 width's as these screw up weights - if any([np.isnan(hillas_dict[tel]['width'].value) for tel in hillas_dict]): + if any([np.isnan(hillas_dict[tel]["width"].value) for tel in hillas_dict]): raise InvalidWidthException( - "A HillasContainer contains an ellipse of width==np.nan") + "A HillasContainer contains an ellipse of width==np.nan" + ) - if any([hillas_dict[tel]['width'].value == 0 for tel in hillas_dict]): + if any([hillas_dict[tel]["width"].value == 0 for tel in hillas_dict]): raise InvalidWidthException( - "A HillasContainer contains an ellipse of width==0") + "A HillasContainer contains an ellipse of width==0" + ) # use the single telescope pointing also for parallel pointing: code is more general if telescopes_pointings is None: - telescopes_pointings = {tel_id: array_pointing for tel_id in hillas_dict.keys()} + telescopes_pointings = { + tel_id: array_pointing for tel_id in hillas_dict.keys() + } else: self.divergent_mode = True self.corrected_angle_dict = {} self.initialize_hillas_planes( - hillas_dict, - subarray, - telescopes_pointings, - array_pointing + hillas_dict, subarray, telescopes_pointings, array_pointing ) # algebraic direction estimate @@ -189,17 +190,13 @@ class are set to np.nan average_intensity=np.mean([h.intensity for h in hillas_dict.values()]), is_valid=True, alt_uncert=err_est_dir, - h_max=h_max + h_max=h_max, ) return result def initialize_hillas_planes( - self, - hillas_dict, - subarray, - telescopes_pointings, - array_pointing + self, hillas_dict, subarray, telescopes_pointings, array_pointing ): """ Creates a dictionary of :class:`.HillasPlane` from a dictionary of @@ -233,15 +230,10 @@ def initialize_hillas_planes( ) camera_frame = CameraFrame( - focal_length=focal_length, - telescope_pointing=pointing + focal_length=focal_length, telescope_pointing=pointing ) - cog_coord = SkyCoord( - x=moments.x, - y=moments.y, - frame=camera_frame, - ) + cog_coord = SkyCoord(x=moments.x, y=moments.y, frame=camera_frame,) cog_coord = cog_coord.transform_to(horizon_frame) p2_coord = SkyCoord(x=p2_x, y=p2_y, frame=camera_frame) @@ -251,13 +243,14 @@ def initialize_hillas_planes( # then recalculate the psi angle if self.divergent_mode: camera_frame_parallel = CameraFrame( - focal_length=focal_length, - telescope_pointing=array_pointing + focal_length=focal_length, telescope_pointing=array_pointing ) cog_sky_to_parallel = cog_coord.transform_to(camera_frame_parallel) p2_sky_to_parallel = p2_coord.transform_to(camera_frame_parallel) - angle_psi_corr = np.arctan2(cog_sky_to_parallel.y - p2_sky_to_parallel.y, - cog_sky_to_parallel.x - p2_sky_to_parallel.x) + angle_psi_corr = np.arctan2( + cog_sky_to_parallel.y - p2_sky_to_parallel.y, + cog_sky_to_parallel.x - p2_sky_to_parallel.x, + ) self.corrected_angle_dict[tel_id] = angle_psi_corr circle = HillasPlane( @@ -300,8 +293,7 @@ def estimate_direction(self): off_angles = [angle(result, cross) for cross in crossings] * u.rad err_est_dir = np.average( - off_angles, - weights=[len(cross) for cross in crossings] + off_angles, weights=[len(cross) for cross in crossings] ) return result, err_est_dir @@ -348,9 +340,7 @@ def estimate_core_position(self, hillas_dict, array_pointing): core_position = line_line_intersection_3d(uvw_vectors, positions) core_pos_tilted = SkyCoord( - x=core_position[0] * u.m, - y=core_position[1] * u.m, - frame=tilted_frame + x=core_position[0] * u.m, y=core_position[1] * u.m, frame=tilted_frame ) core_pos = project_to_ground(core_pos_tilted) diff --git a/ctapipe/reco/ImPACT.py b/ctapipe/reco/ImPACT.py index 1445dae1fc7..e49ba6aa9ac 100644 --- a/ctapipe/reco/ImPACT.py +++ b/ctapipe/reco/ImPACT.py @@ -20,13 +20,17 @@ ) from ctapipe.image import poisson_likelihood_gaussian, mean_poisson_likelihood_gaussian from ctapipe.instrument import get_atmosphere_profile_functions -from ctapipe.containers import (ReconstructedShowerContainer, - ReconstructedEnergyContainer) +from ctapipe.containers import ( + ReconstructedShowerContainer, + ReconstructedEnergyContainer, +) from ctapipe.reco.reco_algorithms import Reconstructor -from ctapipe.utils.template_network_interpolator import TemplateNetworkInterpolator, \ - TimeGradientInterpolator +from ctapipe.utils.template_network_interpolator import ( + TemplateNetworkInterpolator, + TimeGradientInterpolator, +) -__all__ = ['ImPACTReconstructor', 'energy_prior', 'xmax_prior', 'guess_shower_depth'] +__all__ = ["ImPACTReconstructor", "energy_prior", "xmax_prior", "guess_shower_depth"] def guess_shower_depth(energy): @@ -50,7 +54,7 @@ def guess_shower_depth(energy): def energy_prior(energy, index=-1): - return -2 * np.log(energy**index) + return -2 * np.log(energy ** index) def xmax_prior(energy, xmax, width=100): @@ -85,15 +89,24 @@ class ImPACTReconstructor(Reconstructor): # pedestal distribution for each pixel # currently this is not availible from the calibration, # so for now lets hard code it in a dict - ped_table = {"LSTCam": 2.8, - "NectarCam": 2.3, - "FlashCam": 2.3, - "CHEC": 0.5, - "DUMMY": 0} + ped_table = { + "LSTCam": 2.8, + "NectarCam": 2.3, + "FlashCam": 2.3, + "CHEC": 0.5, + "DUMMY": 0, + } spe = 0.5 # Also hard code single p.e. distribution width - def __init__(self, root_dir=".", minimiser="minuit", prior="", - template_scale=1., xmax_offset=0, use_time_gradient=False): + def __init__( + self, + root_dir=".", + minimiser="minuit", + prior="", + template_scale=1.0, + xmax_offset=0, + use_time_gradient=False, + ): # First we create a dictionary of image template interpolators # for each telescope type @@ -101,18 +114,19 @@ def __init__(self, root_dir=".", minimiser="minuit", prior="", self.priors = prior self.minimiser_name = minimiser - self.file_names = {"CHEC": ["GCT_05deg_ada.template.gz", - "GCT_05deg_time.template.gz"], - "LSTCam": ["LST_05deg.template.gz", - "LST_05deg_time.template.gz"], - "NectarCam": ["MST_05deg.template.gz", - "MST_05deg_time.template.gz"], - "FlashCam": ["MST_xm_full.fits"]} + self.file_names = { + "CHEC": ["GCT_05deg_ada.template.gz", "GCT_05deg_time.template.gz"], + "LSTCam": ["LST_05deg.template.gz", "LST_05deg_time.template.gz"], + "NectarCam": ["MST_05deg.template.gz", "MST_05deg_time.template.gz"], + "FlashCam": ["MST_xm_full.fits"], + } # We also need a conversion function from height above ground to # depth of maximum To do this we need the conversion table from CORSIKA - self.thickness_profile, self.altitude_profile = \ - get_atmosphere_profile_functions('paranal', with_units=False) + ( + self.thickness_profile, + self.altitude_profile, + ) = get_atmosphere_profile_functions("paranal", with_units=False) # Next we need the position, area and amplitude from each pixel in the event # making this a class member makes passing them around much easier @@ -159,13 +173,13 @@ def initialise_templates(self, tel_type): if tel_type[t] in self.prediction.keys() or tel_type[t] == "DUMMY": continue - self.prediction[tel_type[t]] = \ - TemplateNetworkInterpolator(self.root_dir + "/" + - self.file_names[tel_type[t]][0]) + self.prediction[tel_type[t]] = TemplateNetworkInterpolator( + self.root_dir + "/" + self.file_names[tel_type[t]][0] + ) if self.use_time_gradient: - self.time_prediction[tel_type[t]] = \ - TimeGradientInterpolator(self.root_dir + "/" + - self.file_names[tel_type[t]][1]) + self.time_prediction[tel_type[t]] = TimeGradientInterpolator( + self.root_dir + "/" + self.file_names[tel_type[t]][1] + ) return True @@ -195,7 +209,7 @@ def get_hillas_mean(self): tel_num = 0 for hillas in self.hillas_parameters: - peak_x[tel_num] = hillas.x.to(u.rad).value # Fill up array + peak_x[tel_num] = hillas.x.to(u.rad).value # Fill up array peak_y[tel_num] = hillas.y.to(u.rad).value peak_amp[tel_num] = hillas.intensity tel_num += 1 @@ -232,15 +246,15 @@ def get_shower_max(self, source_x, source_y, core_x, core_y, zen): # Calculate displacement of image centroid from source position (in # rad) - disp = np.sqrt((self.peak_x - source_x)**2 + - (self.peak_y - source_y)**2) + disp = np.sqrt((self.peak_x - source_x) ** 2 + (self.peak_y - source_y) ** 2) # Calculate impact parameter of the shower - impact = np.sqrt((self.tel_pos_x - core_x)**2 + - (self.tel_pos_y - core_y)**2) + impact = np.sqrt( + (self.tel_pos_x - core_x) ** 2 + (self.tel_pos_y - core_y) ** 2 + ) # Distance above telescope is ratio of these two (small angle) height = impact / disp - weight = np.power(self.peak_amp, 0.) # weight average by sqrt amplitude + weight = np.power(self.peak_amp, 0.0) # weight average by sqrt amplitude # sqrt may not be the best option... # Take weighted mean of estimates @@ -289,11 +303,13 @@ def rotate_translate(pixel_pos_x, pixel_pos_y, x_trans, y_trans, phi): cosine_angle = np.cos(phi[..., np.newaxis]) sin_angle = np.sin(phi[..., np.newaxis]) - pixel_pos_trans_x = (x_trans - pixel_pos_x ) * cosine_angle - \ - (y_trans - pixel_pos_y ) * sin_angle + pixel_pos_trans_x = (x_trans - pixel_pos_x) * cosine_angle - ( + y_trans - pixel_pos_y + ) * sin_angle - pixel_pos_trans_y = (pixel_pos_x - x_trans) * sin_angle + \ - (pixel_pos_y - y_trans) * cosine_angle + pixel_pos_trans_y = (pixel_pos_x - x_trans) * sin_angle + ( + pixel_pos_y - y_trans + ) * cosine_angle return pixel_pos_trans_x, pixel_pos_trans_y def image_prediction(self, tel_type, energy, impact, x_max, pix_x, pix_y): @@ -345,8 +361,16 @@ def predict_time(self, tel_type, energy, impact, x_max): """ return self.time_prediction[tel_type](energy, impact, x_max) - def get_likelihood(self, source_x, source_y, core_x, core_y, - energy, x_max_scale, goodness_of_fit=False): + def get_likelihood( + self, + source_x, + source_y, + core_x, + core_y, + energy, + x_max_scale, + goodness_of_fit=False, + ): """Get the likelihood that the image predicted at the given test position matches the camera image. @@ -380,9 +404,7 @@ def get_likelihood(self, source_x, source_y, core_x, core_y, azimuth = self.array_direction.az # Geometrically calculate the depth of maximum given this test position - x_max = self.get_shower_max(source_x, source_y, - core_x, core_y, - zenith) + x_max = self.get_shower_max(source_x, source_y, core_x, core_y, zenith) x_max *= x_max_scale # Calculate expected Xmax given this energy @@ -399,20 +421,15 @@ def get_likelihood(self, source_x, source_y, core_x, core_y, # Calculate impact distance for all telescopes impact = np.sqrt( - (self.tel_pos_x - core_x)**2 + (self.tel_pos_y - core_y)**2 + (self.tel_pos_x - core_x) ** 2 + (self.tel_pos_y - core_y) ** 2 ) # And the expected rotation angle - phi = np.arctan2( - (self.tel_pos_x - core_x), - (self.tel_pos_y - core_y) - ) * u.rad + phi = np.arctan2((self.tel_pos_x - core_x), (self.tel_pos_y - core_y)) * u.rad # Rotate and translate all pixels such that they match the # template orientation pix_y_rot, pix_x_rot = self.rotate_translate( - self.pixel_x, - self.pixel_y, - source_x, source_y, phi + self.pixel_x, self.pixel_y, source_x, source_y, phi ) # In the interpolator class we can gain speed advantages by using masked arrays @@ -426,7 +443,8 @@ def get_likelihood(self, source_x, source_y, core_x, core_y, for tel_type in np.unique(self.tel_types).tolist(): type_mask = self.tel_types == tel_type prediction[type_mask] = self.image_prediction( - tel_type, energy * np.ones_like(impact[type_mask]), + tel_type, + energy * np.ones_like(impact[type_mask]), impact[type_mask], x_max_bin * np.ones_like(impact[type_mask]), -np.rad2deg(pix_x_rot[type_mask]), @@ -442,8 +460,7 @@ def get_likelihood(self, source_x, source_y, core_x, core_y, ) if self.use_time_gradient: - time_mask = np.logical_and(np.invert(ma.getmask(self.image)), - self.time > 0) + time_mask = np.logical_and(np.invert(ma.getmask(self.image)), self.time > 0) weight = np.sqrt(self.image) * time_mask rv = norm() @@ -453,8 +470,9 @@ def get_likelihood(self, source_x, source_y, core_x, core_y, sy = self.time * weight sxy = self.time * pix_x_rot * weight d = weight.sum(axis=1) * sxx.sum(axis=1) - sx.sum(axis=1) * sx.sum(axis=1) - time_fit = (weight.sum(axis=1) * sxy.sum(axis=1) - sx.sum(axis=1) * sy.sum( - axis=1)) / d + time_fit = ( + weight.sum(axis=1) * sxy.sum(axis=1) - sx.sum(axis=1) * sy.sum(axis=1) + ) / d time_fit /= -1 * (180 / math.pi) chi2 = -2 * np.log( rv.pdf((time_fit - time_gradients.T[0]) / time_gradients.T[1]) @@ -473,8 +491,9 @@ def get_likelihood(self, source_x, source_y, core_x, core_y, array_like = like if goodness_of_fit: - return np.sum(like - mean_poisson_likelihood_gaussian(prediction, self.spe, - self.ped)) + return np.sum( + like - mean_poisson_likelihood_gaussian(prediction, self.spe, self.ped) + ) prior_pen = 0 # Add prior penalities if we have them @@ -533,8 +552,18 @@ def get_likelihood_nlopt(self, x, grad): val = self.get_likelihood(x[0], x[1], x[2], x[3], x[4], x[5]) return val - def set_event_properties(self, image, time, pixel_x, pixel_y, type_tel, tel_x, tel_y, - array_direction, hillas): + def set_event_properties( + self, + image, + time, + pixel_x, + pixel_y, + type_tel, + tel_x, + tel_y, + array_direction, + hillas, + ): """The setter class is used to set the event properties within this class before minimisation can take place. This simply copies a bunch of useful properties to class members, so that we can @@ -605,8 +634,11 @@ class before minimisation can take place. This simply copies a # First allocate everything shape = (len(tel_x), max_pix_x) self.pixel_x, self.pixel_y = ma.zeros(shape), ma.zeros(shape) - self.image, self.time, self.ped = ma.zeros(shape), ma.zeros(shape),\ - ma.zeros(shape) + self.image, self.time, self.ped = ( + ma.zeros(shape), + ma.zeros(shape), + ma.zeros(shape), + ) self.tel_types = np.array(self.tel_types) # Copy everything into our masked arrays @@ -656,15 +688,12 @@ def predict(self, shower_seed, energy_seed): """ self.reset_interpolator() - horizon_seed = SkyCoord( - az=shower_seed.az, alt=shower_seed.alt, frame=AltAz() - ) + horizon_seed = SkyCoord(az=shower_seed.az, alt=shower_seed.alt, frame=AltAz()) nominal_seed = horizon_seed.transform_to(self.nominal_frame) source_x = nominal_seed.fov_lon.to_value(u.rad) source_y = nominal_seed.fov_lat.to_value(u.rad) - ground = GroundFrame(x=shower_seed.core_x, - y=shower_seed.core_y, z=0 * u.m) + ground = GroundFrame(x=shower_seed.core_x, y=shower_seed.core_y, z=0 * u.m) tilted = ground.transform_to( TiltedGroundFrame(pointing_direction=self.array_direction) ) @@ -672,17 +701,25 @@ def predict(self, shower_seed, energy_seed): tilt_y = tilted.y.to(u.m).value zenith = 90 * u.deg - self.array_direction.alt - seeds = spread_line_seed(self.hillas_parameters, - self.tel_pos_x, self.tel_pos_y, - source_x, source_y, tilt_x, tilt_y, - energy_seed.energy.value, - shift_frac=[1])[0] + seeds = spread_line_seed( + self.hillas_parameters, + self.tel_pos_x, + self.tel_pos_y, + source_x, + source_y, + tilt_x, + tilt_y, + energy_seed.energy.value, + shift_frac=[1], + )[0] # Perform maximum likelihood fit - fit_params, errors, like = self.minimise(params=seeds[0], - step=seeds[1], - limits=seeds[2], - minimiser_name=self.minimiser_name) + fit_params, errors, like = self.minimise( + params=seeds[0], + step=seeds[1], + limits=seeds[2], + minimiser_name=self.minimiser_name, + ) # Create a container class for reconstructed shower shower_result = ReconstructedShowerContainer() @@ -692,7 +729,7 @@ def predict(self, shower_seed, energy_seed): nominal = SkyCoord( fov_lon=fit_params[0] * u.rad, fov_lat=fit_params[1] * u.rad, - frame=self.nominal_frame + frame=self.nominal_frame, ) horizon = nominal.transform_to(AltAz()) @@ -700,7 +737,7 @@ def predict(self, shower_seed, energy_seed): tilted = TiltedGroundFrame( x=fit_params[2] * u.m, y=fit_params[3] * u.m, - pointing_direction=self.array_direction + pointing_direction=self.array_direction, ) ground = project_to_ground(tilted) @@ -715,11 +752,13 @@ def predict(self, shower_seed, energy_seed): shower_result.core_uncert = np.nan # Copy reconstructed Xmax - shower_result.h_max = fit_params[5] * self.get_shower_max(fit_params[0], - fit_params[1], - fit_params[2], - fit_params[3], - zenith.to(u.rad).value) + shower_result.h_max = fit_params[5] * self.get_shower_max( + fit_params[0], + fit_params[1], + fit_params[2], + fit_params[3], + zenith.to(u.rad).value, + ) shower_result.h_max *= np.cos(zenith) shower_result.h_max_uncert = errors[5] * shower_result.h_max @@ -757,22 +796,37 @@ def minimise(self, params, step, limits, minimiser_name="minuit", max_calls=0): limits = np.asarray(limits) if minimiser_name == "minuit": - self.min = Minuit(self.get_likelihood, - print_level=1, - source_x=params[0], error_source_x=step[0], - limit_source_x=limits[0], fix_source_x=False, - source_y=params[1], error_source_y=step[1], - limit_source_y=limits[1], fix_source_y=False, - core_x=params[2], error_core_x=step[2], - limit_core_x=limits[2], fix_core_x=False, - core_y=params[3], error_core_y=step[3], - limit_core_y=limits[3], fix_core_y=False, - energy=params[4], error_energy=step[4], - limit_energy=limits[4], fix_energy=False, - x_max_scale=params[5], error_x_max_scale=step[5], - limit_x_max_scale=limits[5], fix_x_max_scale=False, - goodness_of_fit=False, fix_goodness_of_fit=True, - errordef=1) + self.min = Minuit( + self.get_likelihood, + print_level=1, + source_x=params[0], + error_source_x=step[0], + limit_source_x=limits[0], + fix_source_x=False, + source_y=params[1], + error_source_y=step[1], + limit_source_y=limits[1], + fix_source_y=False, + core_x=params[2], + error_core_x=step[2], + limit_core_x=limits[2], + fix_core_x=False, + core_y=params[3], + error_core_y=step[3], + limit_core_y=limits[3], + fix_core_y=False, + energy=params[4], + error_energy=step[4], + limit_energy=limits[4], + fix_energy=False, + x_max_scale=params[5], + error_x_max_scale=step[5], + limit_x_max_scale=limits[5], + fix_x_max_scale=False, + goodness_of_fit=False, + fix_goodness_of_fit=True, + errordef=1, + ) self.min.tol *= 1000 self.min.set_strategy(1) @@ -781,15 +835,29 @@ def minimise(self, params, step, limits, minimiser_name="minuit", max_calls=0): fit_params = self.min.values errors = self.min.errors - return (fit_params["source_x"], fit_params["source_y"], fit_params["core_x"], - fit_params["core_y"], fit_params["energy"], fit_params[ - "x_max_scale"]), \ - (errors["source_x"], errors["source_y"], errors["core_x"], - errors["core_x"], errors["energy"], errors["x_max_scale"]), \ - self.min.fval + return ( + ( + fit_params["source_x"], + fit_params["source_y"], + fit_params["core_x"], + fit_params["core_y"], + fit_params["energy"], + fit_params["x_max_scale"], + ), + ( + errors["source_x"], + errors["source_y"], + errors["core_x"], + errors["core_x"], + errors["energy"], + errors["x_max_scale"], + ), + self.min.fval, + ) elif "nlopt" in minimiser_name: import nlopt + opt = nlopt.opt(nlopt.LN_BOBYQA, 6) opt.set_min_objective(self.get_likelihood_nlopt) opt.set_initial_step(step) @@ -807,27 +875,41 @@ def minimise(self, params, step, limits, minimiser_name="minuit", max_calls=0): elif minimiser_name in ("lm", "trf", "dogleg"): self.array_return = True - min = least_squares(self.get_likelihood_min, params, - method=minimiser_name, - x_scale=step, - xtol=1e-10, - ftol=1e-10, - ) + min = least_squares( + self.get_likelihood_min, + params, + method=minimiser_name, + x_scale=step, + xtol=1e-10, + ftol=1e-10, + ) return min.x, (0, 0, 0, 0, 0, 0), self.get_likelihood_min(min.x) else: - min = minimize(self.get_likelihood_min, np.array(params), - method=minimiser_name, - bounds=limits, - options={"disp": False}, - tol=1e-5 - ) + min = minimize( + self.get_likelihood_min, + np.array(params), + method=minimiser_name, + bounds=limits, + options={"disp": False}, + tol=1e-5, + ) return np.array(min.x), (0, 0, 0, 0, 0, 0), self.get_likelihood_min(min.x) -def spread_line_seed(hillas, tel_x, tel_y, source_x, source_y, tilt_x, tilt_y, energy, - shift_frac = [2, 1.5, 1, 0.5, 0 ,-0.5, -1, -1.5]): + +def spread_line_seed( + hillas, + tel_x, + tel_y, + source_x, + source_y, + tilt_x, + tilt_y, + energy, + shift_frac=[2, 1.5, 1, 0.5, 0, -0.5, -1, -1.5], +): """ Parameters ---------- @@ -874,10 +956,15 @@ def spread_line_seed(hillas, tel_x, tel_y, source_x, source_y, tilt_x, tilt_y, e seed_list = list() for shift in shift_frac: - seed_list.append(create_seed(centre_x + (diff_x*shift), - centre_y + (diff_y*shift), - centre_tel_x + (diff_tel_x * shift), - centre_tel_y + (diff_tel_y * shift), energy)) + seed_list.append( + create_seed( + centre_x + (diff_x * shift), + centre_y + (diff_y * shift), + centre_tel_x + (diff_tel_x * shift), + centre_tel_y + (diff_tel_y * shift), + energy, + ) + ) return seed_list @@ -912,18 +999,18 @@ def create_seed(source_x, source_y, tilt_x, tilt_y, energy): en_seed = 0.01 # Take the seed from Hillas-based reconstruction - seed = (source_x, source_y, tilt_x, - tilt_y, en_seed, 1) + seed = (source_x, source_y, tilt_x, tilt_y, en_seed, 1) # Take a reasonable first guess at step size step = [0.04 / 57.3, 0.04 / 57.3, 5, 5, en_seed * 0.1, 0.05] # And some sensible limits of the fit range - limits = [[source_x - 0.1, source_x + 0.1], - [source_y - 0.1, source_y + 0.1], - [tilt_x - 100, tilt_x + 100], - [tilt_y - 100, tilt_y + 100], - [lower_en_limit, en_seed * 2], - [0.5, 2] - ] + limits = [ + [source_x - 0.1, source_x + 0.1], + [source_y - 0.1, source_y + 0.1], + [tilt_x - 100, tilt_x + 100], + [tilt_y - 100, tilt_y + 100], + [lower_en_limit, en_seed * 2], + [0.5, 2], + ] return seed, step, limits diff --git a/ctapipe/reco/__init__.py b/ctapipe/reco/__init__.py index db467061ce7..3b8d37b0ead 100644 --- a/ctapipe/reco/__init__.py +++ b/ctapipe/reco/__init__.py @@ -5,5 +5,10 @@ from .shower_max import ShowerMaxEstimator -__all__ = ['HillasReconstructor', 'Reconstructor', 'ImPACTReconstructor', - 'EnergyRegressor', 'ShowerMaxEstimator'] +__all__ = [ + "HillasReconstructor", + "Reconstructor", + "ImPACTReconstructor", + "EnergyRegressor", + "ShowerMaxEstimator", +] diff --git a/ctapipe/reco/energy_regressor.py b/ctapipe/reco/energy_regressor.py index 45c3b0b9581..304a98b6abc 100644 --- a/ctapipe/reco/energy_regressor.py +++ b/ctapipe/reco/energy_regressor.py @@ -38,10 +38,10 @@ class EnergyRegressor(RegressorClassifierBase): """ - def __init__(self, regressor=RandomForestRegressor, - cam_id_list="cam", unit=u.TeV, **kwargs): - super().__init__(model=regressor, cam_id_list=cam_id_list, - unit=unit, **kwargs) + def __init__( + self, regressor=RandomForestRegressor, cam_id_list="cam", unit=u.TeV, **kwargs + ): + super().__init__(model=regressor, cam_id_list=cam_id_list, unit=unit, **kwargs) def predict_by_event(self, event_list): """expects a list of events where every "event" is a dictionary @@ -83,8 +83,11 @@ def predict_by_event(self, event_list): # QUESTION if there is no trained classifier for # `cam_id`, raise an error or just pass this # camera type? - raise KeyError("cam_id '{}' in event_list but no model defined: {}" - .format(cam_id, [k for k in self.model_dict])) + raise KeyError( + "cam_id '{}' in event_list but no model defined: {}".format( + cam_id, [k for k in self.model_dict] + ) + ) try: # if a `namedtuple` is provided, we can weight the different images @@ -98,9 +101,11 @@ def predict_by_event(self, event_list): predict_median.append(np.median(predicts)) predict_std.append(np.std(predicts)) - return {"mean": np.array(predict_mean) * self.unit, - "median": np.array(predict_median) * self.unit, - "std": np.array(predict_std) * self.unit} + return { + "mean": np.array(predict_mean) * self.unit, + "median": np.array(predict_median) * self.unit, + "std": np.array(predict_std) * self.unit, + } def predict_by_telescope_type(self, event_list): """same as `predict_dict` only that it returns a list of dictionaries diff --git a/ctapipe/reco/hillas_intersection.py b/ctapipe/reco/hillas_intersection.py index 9006b2ef56a..c5277c52c35 100644 --- a/ctapipe/reco/hillas_intersection.py +++ b/ctapipe/reco/hillas_intersection.py @@ -14,7 +14,7 @@ from ctapipe.reco.reco_algorithms import ( Reconstructor, InvalidWidthException, - TooFewTelescopesException + TooFewTelescopesException, ) from ctapipe.containers import ReconstructedShowerContainer from ctapipe.instrument import get_atmosphere_profile_functions @@ -26,14 +26,14 @@ TiltedGroundFrame, project_to_ground, GroundFrame, - MissingFrameAttributeWarning + MissingFrameAttributeWarning, ) import copy import warnings from ctapipe.core import traits -__all__ = ['HillasIntersection'] +__all__ = ["HillasIntersection"] class HillasIntersection(Reconstructor): @@ -57,15 +57,11 @@ class HillasIntersection(Reconstructor): """ atmosphere_profile_name = traits.CaselessStrEnum( - ['paranal', ], - default_value="paranal", - help="name of atmosphere profile to use" + ["paranal",], default_value="paranal", help="name of atmosphere profile to use" ).tag(config=True) weighting = traits.CaselessStrEnum( - ['Konrad', 'hess'], - default_value='Konrad', - help='Weighting Method name' + ["Konrad", "hess"], default_value="Konrad", help="Weighting Method name" ).tag(config=True) def __init__(self, config=None, parent=None, **kwargs): @@ -105,37 +101,45 @@ def predict(self, hillas_dict, subarray, array_pointing, telescopes_pointings=No """ # filter warnings for missing obs time. this is needed because MC data has no obs time - warnings.filterwarnings(action='ignore', category=MissingFrameAttributeWarning) + warnings.filterwarnings(action="ignore", category=MissingFrameAttributeWarning) # stereoscopy needs at least two telescopes if len(hillas_dict) < 2: raise TooFewTelescopesException( - "need at least two telescopes, have {}" - .format(len(hillas_dict))) + "need at least two telescopes, have {}".format(len(hillas_dict)) + ) # check for np.nan or 0 width's as these screw up weights - if any([np.isnan(hillas_dict[tel]['width'].value) for tel in hillas_dict]): + if any([np.isnan(hillas_dict[tel]["width"].value) for tel in hillas_dict]): raise InvalidWidthException( - "A HillasContainer contains an ellipse of width==np.nan") + "A HillasContainer contains an ellipse of width==np.nan" + ) - if any([hillas_dict[tel]['width'].value == 0 for tel in hillas_dict]): + if any([hillas_dict[tel]["width"].value == 0 for tel in hillas_dict]): raise InvalidWidthException( - "A HillasContainer contains an ellipse of width==0") + "A HillasContainer contains an ellipse of width==0" + ) if telescopes_pointings is None: - telescopes_pointings = {tel_id: array_pointing for tel_id in hillas_dict.keys()} + telescopes_pointings = { + tel_id: array_pointing for tel_id in hillas_dict.keys() + } tilted_frame = TiltedGroundFrame(pointing_direction=array_pointing) ground_positions = subarray.tel_coords - grd_coord = GroundFrame(x=ground_positions.x, - y=ground_positions.y, - z=ground_positions.z) + grd_coord = GroundFrame( + x=ground_positions.x, y=ground_positions.y, z=ground_positions.z + ) tilt_coord = grd_coord.transform_to(tilted_frame) - tel_x = {tel_id: tilt_coord.x[tel_id-1] for tel_id in list(hillas_dict.keys())} - tel_y = {tel_id: tilt_coord.y[tel_id-1] for tel_id in list(hillas_dict.keys())} + tel_x = { + tel_id: tilt_coord.x[tel_id - 1] for tel_id in list(hillas_dict.keys()) + } + tel_y = { + tel_id: tilt_coord.y[tel_id - 1] for tel_id in list(hillas_dict.keys()) + } nom_frame = NominalFrame(origin=array_pointing) @@ -143,7 +147,7 @@ def predict(self, hillas_dict, subarray, array_pointing, telescopes_pointings=No for tel_id, hillas in hillas_dict_mod.items(): # prevent from using rads instead of meters as inputs - assert hillas.x.to(u.m).unit == u.Unit('m') + assert hillas.x.to(u.m).unit == u.Unit("m") focal_length = subarray.tel[tel_id].optics.equivalent_focal_length @@ -158,41 +162,36 @@ def predict(self, hillas_dict, subarray, array_pointing, telescopes_pointings=No src_x, src_y, err_x, err_y = self.reconstruct_nominal(hillas_dict_mod) core_x, core_y, core_err_x, core_err_y = self.reconstruct_tilted( - hillas_dict_mod, tel_x, tel_y) + hillas_dict_mod, tel_x, tel_y + ) err_x *= u.rad err_y *= u.rad - nom = SkyCoord( - fov_lon=src_x * u.rad, - fov_lat=src_y * u.rad, - frame=nom_frame - ) + nom = SkyCoord(fov_lon=src_x * u.rad, fov_lat=src_y * u.rad, frame=nom_frame) # nom = sky_pos.transform_to(nom_frame) sky_pos = nom.transform_to(array_pointing.frame) - tilt = SkyCoord( - x=core_x * u.m, - y=core_y * u.m, - frame=tilted_frame, - ) + tilt = SkyCoord(x=core_x * u.m, y=core_y * u.m, frame=tilted_frame,) grd = project_to_ground(tilt) x_max = self.reconstruct_xmax( nom.fov_lon, nom.fov_lat, - tilt.x, tilt.y, + tilt.x, + tilt.y, hillas_dict_mod, - tel_x, tel_y, + tel_x, + tel_y, 90 * u.deg - array_pointing.alt, ) - src_error = np.sqrt(err_x**2 + err_y**2) + src_error = np.sqrt(err_x ** 2 + err_y ** 2) result = ReconstructedShowerContainer( alt=sky_pos.altaz.alt.to(u.rad), az=sky_pos.altaz.az.to(u.rad), core_x=grd.x, core_y=grd.y, - core_uncert=u.Quantity(np.sqrt(core_err_x**2 + core_err_y**2), u.m), + core_uncert=u.Quantity(np.sqrt(core_err_x ** 2 + core_err_y ** 2), u.m), tel_ids=[h for h in hillas_dict_mod.keys()], average_intensity=np.mean([h.intensity for h in hillas_dict_mod.values()]), is_valid=True, @@ -230,27 +229,34 @@ def reconstruct_nominal(self, hillas_parameters): # Copy parameters we need to a numpy array to speed things up h1 = list( map( - lambda h: [h[0].psi.to_value(u.rad), - h[0].x.to_value(u.rad), - h[0].y.to_value(u.rad), - h[0].intensity], hillas_pairs + lambda h: [ + h[0].psi.to_value(u.rad), + h[0].x.to_value(u.rad), + h[0].y.to_value(u.rad), + h[0].intensity, + ], + hillas_pairs, ) ) h1 = np.array(h1) h1 = np.transpose(h1) h2 = list( - map(lambda h: [h[1].psi.to_value(u.rad), - h[1].x.to_value(u.rad), - h[1].y.to_value(u.rad), - h[1].intensity], hillas_pairs) + map( + lambda h: [ + h[1].psi.to_value(u.rad), + h[1].x.to_value(u.rad), + h[1].y.to_value(u.rad), + h[1].intensity, + ], + hillas_pairs, + ) ) h2 = np.array(h2) h2 = np.transpose(h2) # Perform intersection - sx, sy = self.intersect_lines(h1[1], h1[2], h1[0], - h2[1], h2[2], h2[0]) + sx, sy = self.intersect_lines(h1[1], h1[2], h1[0], h2[1], h2[2], h2[0]) # Weight by chosen method weight = self._weight_method(h1[3], h2[3]) @@ -312,17 +318,22 @@ def reconstruct_tilted(self, hillas_parameters, tel_x, tel_y): tel_y = np.array(ty) # Copy parameters we need to a numpy array to speed things up - hillas1 = map(lambda h: [h[0].psi.to_value(u.rad), h[0].intensity], hillas_pairs) + hillas1 = map( + lambda h: [h[0].psi.to_value(u.rad), h[0].intensity], hillas_pairs + ) hillas1 = np.array(list(hillas1)) hillas1 = np.transpose(hillas1) - hillas2 = map(lambda h: [h[1].psi.to_value(u.rad), h[1].intensity], hillas_pairs) + hillas2 = map( + lambda h: [h[1].psi.to_value(u.rad), h[1].intensity], hillas_pairs + ) hillas2 = np.array(list(hillas2)) hillas2 = np.transpose(hillas2) # Perform intersection - crossing_x, crossing_y = self.intersect_lines(tel_x[:, 0], tel_y[:, 0], hillas1[0], - tel_x[:, 1], tel_y[:, 1], hillas2[0]) + crossing_x, crossing_y = self.intersect_lines( + tel_x[:, 0], tel_y[:, 0], hillas1[0], tel_x[:, 1], tel_y[:, 1], hillas2[0] + ) # Weight by chosen method weight = self._weight_method(hillas1[1], hillas2[1]) @@ -337,8 +348,9 @@ def reconstruct_tilted(self, hillas_parameters, tel_x, tel_y): return x_pos, y_pos, np.sqrt(var_x), np.sqrt(var_y) - def reconstruct_xmax(self, source_x, source_y, core_x, core_y, - hillas_parameters, tel_x, tel_y, zen): + def reconstruct_xmax( + self, source_x, source_y, core_x, core_y, hillas_parameters, tel_x, tel_y, zen + ): """ Geometrical depth of shower maximum reconstruction, assuming the shower maximum lies at the image centroid @@ -383,14 +395,16 @@ def reconstruct_xmax(self, source_x, source_y, core_x, core_y, tx.append(tel_x[tel].to_value(u.m)) ty.append(tel_y[tel].to_value(u.m)) - height = get_shower_height(source_x.to_value(u.rad), - source_y.to_value(u.rad), - np.array(cog_x), - np.array(cog_y), - core_x.to_value(u.m), - core_y.to_value(u.m), - np.array(tx), - np.array(ty)) + height = get_shower_height( + source_x.to_value(u.rad), + source_y.to_value(u.rad), + np.array(cog_x), + np.array(cog_y), + core_x.to_value(u.m), + core_y.to_value(u.m), + np.array(tx), + np.array(ty), + ) weight = np.array(amp) mean_height = np.sum(height * weight) / np.sum(weight) @@ -448,9 +462,9 @@ def intersect_lines(xp1, yp1, phi1, xp2, yp2, phi2): b2 = -1 * cos_2 c2 = yp2 * cos_2 - xp2 * sin_2 - det_ab = (a1 * b2 - a2 * b1) - det_bc = (b1 * c2 - b2 * c1) - det_ca = (c1 * a2 - c2 * a1) + det_ab = a1 * b2 - a2 * b1 + det_bc = b1 * c2 - b2 * c1 + det_ca = c1 * a2 - c2 * a1 # if math.fabs(det_ab) < 1e-14 : # /* parallel */ # return 0,0 @@ -468,8 +482,9 @@ def weight_sin(phi1, phi2): return np.abs(np.sin(phi1 - phi2)) -def get_shower_height(source_x, source_y, cog_x, cog_y, - core_x, core_y, tel_pos_x, tel_pos_y): +def get_shower_height( + source_x, source_y, cog_x, cog_y, core_x, core_y, tel_pos_x, tel_pos_y +): """ Function to calculate the depth of shower maximum geometrically under the assumption that the shower maximum lies at the brightest point of the camera image. @@ -498,9 +513,9 @@ def get_shower_height(source_x, source_y, cog_x, cog_y, """ # Calculate displacement of image centroid from source position (in rad) - disp = np.sqrt((cog_x - source_x)**2 + (cog_y - source_y)**2) + disp = np.sqrt((cog_x - source_x) ** 2 + (cog_y - source_y) ** 2) # Calculate impact parameter of the shower - impact = np.sqrt((tel_pos_x - core_x)**2 + (tel_pos_y - core_y)**2) + impact = np.sqrt((tel_pos_x - core_x) ** 2 + (tel_pos_y - core_y) ** 2) # Distance above telescope is ration of these two (small angle) height = impact / disp diff --git a/ctapipe/reco/reco_algorithms.py b/ctapipe/reco/reco_algorithms.py index cc0e5561f1b..c3bd7e47988 100644 --- a/ctapipe/reco/reco_algorithms.py +++ b/ctapipe/reco/reco_algorithms.py @@ -1,7 +1,7 @@ from ctapipe.core import Component from ctapipe.containers import ReconstructedShowerContainer -__all__ = ['Reconstructor', 'TooFewTelescopesException', 'InvalidWidthException'] +__all__ = ["Reconstructor", "TooFewTelescopesException", "InvalidWidthException"] class TooFewTelescopesException(Exception): diff --git a/ctapipe/reco/regressor_classifier_base.py b/ctapipe/reco/regressor_classifier_base.py index 4416fa34890..43ac4bcdcab 100644 --- a/ctapipe/reco/regressor_classifier_base.py +++ b/ctapipe/reco/regressor_classifier_base.py @@ -138,13 +138,15 @@ def reshuffle_event_list(self, X, y): # features-lists for this telescope type trainFeatures[cam_id] += tels except KeyError: - raise KeyError("cam_id '{}' in X but no model defined: {}" - .format(cam_id, [k for k in self.model_dict])) + raise KeyError( + "cam_id '{}' in X but no model defined: {}".format( + cam_id, [k for k in self.model_dict] + ) + ) try: # add a target-entry for every feature-list - trainTarget[cam_id] += \ - [target.to(self.unit).value] * len(tels) + trainTarget[cam_id] += [target.to(self.unit).value] * len(tels) except AttributeError: # in case the target is not given as an astropy # quantity let's hope that the user keeps proper @@ -187,12 +189,16 @@ def fit(self, X, y, sample_weight=None): for cam_id in X: if cam_id not in y: - raise KeyError("cam_id '{}' in X but not in y: {}" - .format(cam_id, [k for k in y])) + raise KeyError( + "cam_id '{}' in X but not in y: {}".format(cam_id, [k for k in y]) + ) if cam_id not in self.model_dict: - raise KeyError("cam_id '{}' in X but no model defined: {}" - .format(cam_id, [k for k in self.model_dict])) + raise KeyError( + "cam_id '{}' in X but no model defined: {}".format( + cam_id, [k for k in self.model_dict] + ) + ) # add a `None` entry in the weights dictionary in case there is no entry yet if cam_id not in sample_weight: @@ -201,8 +207,9 @@ def fit(self, X, y, sample_weight=None): # for every `cam_id` train one model (as long as there are events in `X`) if len(X[cam_id]): try: - self.model_dict[cam_id].fit(X[cam_id], y[cam_id], - sample_weight=sample_weight[cam_id]) + self.model_dict[cam_id].fit( + X[cam_id], y[cam_id], sample_weight=sample_weight[cam_id] + ) except (TypeError, ValueError): # some models do not like `sample_weight` in the `fit` call... # catch the exception and try again without the weights @@ -258,6 +265,7 @@ def save(self, path): """ import joblib + for cam_id, model in self.model_dict.items(): try: # assume that there is a `{cam_id}` keyword to replace @@ -369,6 +377,7 @@ def show_importances(self): """ import matplotlib.pyplot as plt + n_tel_types = len(self.model_dict) n_cols = np.ceil(np.sqrt(n_tel_types)).astype(int) n_rows = np.ceil(n_tel_types / n_cols).astype(int) @@ -381,21 +390,22 @@ def show_importances(self): try: importances = model.feature_importances_ except: - plt.gca().axis('off') + plt.gca().axis("off") continue bins = range(importances.shape[0]) - if cam_id in self.input_features_dict \ - and (len(self.input_features_dict[cam_id]) == len(bins)): + if cam_id in self.input_features_dict and ( + len(self.input_features_dict[cam_id]) == len(bins) + ): feature_labels = self.input_features_dict[cam_id] - importances, s_feature_labels = \ - zip(*sorted(zip(importances, feature_labels), reverse=True)) + importances, s_feature_labels = zip( + *sorted(zip(importances, feature_labels), reverse=True) + ) plt.xticks(bins, s_feature_labels, rotation=17) - plt.bar(bins, importances, - color='r', align='center') + plt.bar(bins, importances, color="r", align="center") # switch off superfluous axes for j in range(i + 1, n_rows * n_cols): - axs.ravel()[j].axis('off') + axs.ravel()[j].axis("off") return fig diff --git a/ctapipe/reco/shower_max.py b/ctapipe/reco/shower_max.py index 268126e8b4d..37134db42a1 100644 --- a/ctapipe/reco/shower_max.py +++ b/ctapipe/reco/shower_max.py @@ -23,8 +23,10 @@ class ShowerMaxEstimator: def __init__(self, atmosphere_profile_name): - self.thickness_profile, self.altitude_profile = \ - get_atmosphere_profile_functions(atmosphere_profile_name) + ( + self.thickness_profile, + self.altitude_profile, + ) = get_atmosphere_profile_functions(atmosphere_profile_name) def find_shower_max_height(self, energy, h_first_int, gamma_alt): """ diff --git a/ctapipe/reco/tests/test_HillasReconstructor.py b/ctapipe/reco/tests/test_HillasReconstructor.py index b88b5c2f835..b3d86136fcb 100644 --- a/ctapipe/reco/tests/test_HillasReconstructor.py +++ b/ctapipe/reco/tests/test_HillasReconstructor.py @@ -6,7 +6,10 @@ from ctapipe.image.hillas import hillas_parameters, HillasParameterizationError from ctapipe.io import event_source from ctapipe.reco.HillasReconstructor import HillasReconstructor, HillasPlane -from ctapipe.reco.reco_algorithms import TooFewTelescopesException, InvalidWidthException +from ctapipe.reco.reco_algorithms import ( + TooFewTelescopesException, + InvalidWidthException, +) from ctapipe.utils import get_dataset_path from astropy.coordinates import SkyCoord, AltAz @@ -100,11 +103,7 @@ def test_reconstruction(): reconstructed_events = 0 for event in source: - array_pointing = SkyCoord( - az=event.mc.az, - alt=event.mc.alt, - frame=horizon_frame - ) + array_pointing = SkyCoord(az=event.mc.az, alt=event.mc.alt, frame=horizon_frame) hillas_dict = {} telescope_pointings = {} @@ -120,8 +119,9 @@ def test_reconstruction(): ) pmt_signal = event.r0.tel[tel_id].waveform[0].sum(axis=1) - mask = tailcuts_clean(geom, pmt_signal, - picture_thresh=10., boundary_thresh=5.) + mask = tailcuts_clean( + geom, pmt_signal, picture_thresh=10.0, boundary_thresh=5.0 + ) pmt_signal[mask == 0] = 0 try: @@ -141,7 +141,9 @@ def test_reconstruction(): fit_result_parall = fit.predict(hillas_dict, source.subarray, array_pointing) fit = HillasReconstructor() - fit_result_tel_point = fit.predict(hillas_dict, source.subarray, array_pointing, telescope_pointings) + fit_result_tel_point = fit.predict( + hillas_dict, source.subarray, array_pointing, telescope_pointings + ) for key in fit_result_parall.keys(): print(key, fit_result_parall[key], fit_result_tel_point[key]) @@ -190,8 +192,9 @@ def test_invalid_events(): pmt_signal = event.r0.tel[tel_id].waveform[0].sum(axis=1) - mask = tailcuts_clean(geom, pmt_signal, - picture_thresh=10., boundary_thresh=5.) + mask = tailcuts_clean( + geom, pmt_signal, picture_thresh=10.0, boundary_thresh=5.0 + ) pmt_signal[mask == 0] = 0 try: @@ -200,23 +203,23 @@ def test_invalid_events(): except HillasParameterizationError as e: continue - # construct a dict only containing the last telescope events + # construct a dict only containing the last telescope events # (#telescopes < 2) hillas_dict_only_one_tel = dict() hillas_dict_only_one_tel[tel_id] = hillas_dict[tel_id] with pytest.raises(TooFewTelescopesException): fit.predict(hillas_dict_only_one_tel, subarray, tel_azimuth, tel_altitude) - # construct a hillas dict with the width of the last event set to 0 + # construct a hillas dict with the width of the last event set to 0 # (any width == 0) hillas_dict_zero_width = hillas_dict.copy() - hillas_dict_zero_width[tel_id]['width'] = 0 * u.m + hillas_dict_zero_width[tel_id]["width"] = 0 * u.m with pytest.raises(InvalidWidthException): fit.predict(hillas_dict_zero_width, subarray, tel_azimuth, tel_altitude) - # construct a hillas dict with the width of the last event set to np.nan + # construct a hillas dict with the width of the last event set to np.nan # (any width == nan) hillas_dict_nan_width = hillas_dict.copy() - hillas_dict_zero_width[tel_id]['width'] = np.nan * u.m + hillas_dict_zero_width[tel_id]["width"] = np.nan * u.m with pytest.raises(InvalidWidthException): fit.predict(hillas_dict_nan_width, subarray, tel_azimuth, tel_altitude) diff --git a/ctapipe/reco/tests/test_ImPACT.py b/ctapipe/reco/tests/test_ImPACT.py index 68ae41d1246..4990d4f50a5 100644 --- a/ctapipe/reco/tests/test_ImPACT.py +++ b/ctapipe/reco/tests/test_ImPACT.py @@ -3,66 +3,76 @@ import pytest from numpy.testing import assert_allclose -from ctapipe.containers import (ReconstructedShowerContainer, - ReconstructedEnergyContainer) +from ctapipe.containers import ( + ReconstructedShowerContainer, + ReconstructedEnergyContainer, +) from ctapipe.reco.ImPACT import ImPACTReconstructor from ctapipe.containers import HillasParametersContainer from astropy.coordinates import Angle, AltAz, SkyCoord -class TestImPACT(): - +class TestImPACT: @classmethod def setup_class(self): self.impact_reco = ImPACTReconstructor(root_dir=".") self.horizon_frame = AltAz() - self.h1 = HillasParametersContainer(x=1 * u.deg, y=1 * u.deg, - r=1 * u.deg, phi=Angle(0 * u.rad), - intensity=100, - length=0.4 * u.deg, - width=0.4 * u.deg, - psi=Angle(0 * u.rad), - skewness=0, - kurtosis=0) - - #@pytest.mark.skip('need a dataset for this to work') + self.h1 = HillasParametersContainer( + x=1 * u.deg, + y=1 * u.deg, + r=1 * u.deg, + phi=Angle(0 * u.rad), + intensity=100, + length=0.4 * u.deg, + width=0.4 * u.deg, + psi=Angle(0 * u.rad), + skewness=0, + kurtosis=0, + ) + + # @pytest.mark.skip('need a dataset for this to work') def test_brightest_mean_average(self): """ Test that averaging of the brightest pixel position give a sensible outcome """ image = np.array([1, 1, 1, 1]) - pixel_x = np.array([0., 1., 0., -1.]) * u.deg - pixel_y = np.array([-1., 0., 1., 0.]) * u.deg - - array_pointing = SkyCoord(alt=0 * u.deg, - az=0 * u.deg, - frame=self.horizon_frame) - - self.impact_reco.set_event_properties({1: image}, {1: image}, - {1: pixel_x}, {1: pixel_y}, - {1: "DUMMY"}, {1: 0 * u.m}, - {1: 0 * u.m}, - array_direction=array_pointing, - hillas={1: self.h1}) + pixel_x = np.array([0.0, 1.0, 0.0, -1.0]) * u.deg + pixel_y = np.array([-1.0, 0.0, 1.0, 0.0]) * u.deg + + array_pointing = SkyCoord(alt=0 * u.deg, az=0 * u.deg, frame=self.horizon_frame) + + self.impact_reco.set_event_properties( + {1: image}, + {1: image}, + {1: pixel_x}, + {1: pixel_y}, + {1: "DUMMY"}, + {1: 0 * u.m}, + {1: 0 * u.m}, + array_direction=array_pointing, + hillas={1: self.h1}, + ) self.impact_reco.get_hillas_mean() - assert_allclose(self.impact_reco.peak_x[0]*(180/np.pi), 1, rtol=0, atol=0.001) - assert_allclose(self.impact_reco.peak_y[0]*(180/np.pi), 1, rtol=0, atol=0.001) + assert_allclose( + self.impact_reco.peak_x[0] * (180 / np.pi), 1, rtol=0, atol=0.001 + ) + assert_allclose( + self.impact_reco.peak_y[0] * (180 / np.pi), 1, rtol=0, atol=0.001 + ) def test_rotation(self): """Test pixel rotation function""" x = np.array([1]) y = np.array([0]) - xt, yt = ImPACTReconstructor.rotate_translate(x, y, 0, 0, - np.deg2rad(90)) + xt, yt = ImPACTReconstructor.rotate_translate(x, y, 0, 0, np.deg2rad(90)) assert_allclose(xt, 0, rtol=0, atol=0.001) assert_allclose(yt, 1, rtol=0, atol=0.001) - xt, yt = ImPACTReconstructor.rotate_translate(x, y, 0, 0, - np.deg2rad(180)) + xt, yt = ImPACTReconstructor.rotate_translate(x, y, 0, 0, np.deg2rad(180)) assert_allclose(xt, 1, rtol=0, atol=0.001) assert_allclose(yt, 0, rtol=0, atol=0.001) @@ -82,21 +92,24 @@ def test_xmax_calculation(self): pixel_x = np.array([1, 1, 1]) * u.deg pixel_y = np.array([1, 1, 1]) * u.deg - array_pointing = SkyCoord(alt=0 * u.deg, - az=0 * u.deg, - frame=self.horizon_frame) + array_pointing = SkyCoord(alt=0 * u.deg, az=0 * u.deg, frame=self.horizon_frame) - self.impact_reco.set_event_properties({1: image}, {1: image}, - {1: pixel_x},{1: pixel_y}, - {1: "DUMMY"}, {1: 0 * u.m}, - {1: 0 * u.m}, - array_direction=array_pointing, - hillas={1:self.h1}) + self.impact_reco.set_event_properties( + {1: image}, + {1: image}, + {1: pixel_x}, + {1: pixel_y}, + {1: "DUMMY"}, + {1: 0 * u.m}, + {1: 0 * u.m}, + array_direction=array_pointing, + hillas={1: self.h1}, + ) shower_max = self.impact_reco.get_shower_max(0, 0, 0, 100, 0) - assert_allclose(shower_max, 484.2442217190515 , rtol=0.01) + assert_allclose(shower_max, 484.2442217190515, rtol=0.01) - @pytest.mark.skip('need a dataset for this to work') + @pytest.mark.skip("need a dataset for this to work") def test_image_prediction(self): pixel_x = np.array([0]) * u.deg pixel_y = np.array([0]) * u.deg @@ -104,17 +117,28 @@ def test_image_prediction(self): image = np.array([1]) pixel_area = np.array([1]) * u.deg * u.deg - self.impact_reco.set_event_properties({1: image}, {1: pixel_x}, - {1: pixel_y}, {1: pixel_area}, - {1: "CHEC"}, {1: 0 * u.m}, - {1: 0 * u.m}, - array_direction=[0 * u.deg, - 0 * u.deg]) + self.impact_reco.set_event_properties( + {1: image}, + {1: pixel_x}, + {1: pixel_y}, + {1: pixel_area}, + {1: "CHEC"}, + {1: 0 * u.m}, + {1: 0 * u.m}, + array_direction=[0 * u.deg, 0 * u.deg], + ) """First check image prediction by directly accessing the function""" - pred = self.impact_reco.image_prediction("CHEC", zenith=0, azimuth=0, - energy=1, impact=50, x_max=0, - pix_x=pixel_x, pix_y=pixel_y) + pred = self.impact_reco.image_prediction( + "CHEC", + zenith=0, + azimuth=0, + energy=1, + impact=50, + x_max=0, + pix_x=pixel_x, + pix_y=pixel_y, + ) assert np.sum(pred) != 0 @@ -130,12 +154,13 @@ def test_image_prediction(self): energy = ReconstructedEnergyContainer() energy.is_valid = True energy.energy = 1 * u.TeV - pred2 = self.impact_reco.get_prediction(1, shower_reco=shower, - energy_reco=energy) + pred2 = self.impact_reco.get_prediction( + 1, shower_reco=shower, energy_reco=energy + ) print(pred, pred2) assert pred.all() == pred2.all() - @pytest.mark.skip('need a dataset for this to work') + @pytest.mark.skip("need a dataset for this to work") def test_likelihood(self): pixel_x = np.array([0]) * u.deg pixel_y = np.array([0]) * u.deg @@ -143,12 +168,16 @@ def test_likelihood(self): image = np.array([1]) pixel_area = np.array([1]) * u.deg * u.deg - self.impact_reco.set_event_properties({1: image}, {1: pixel_x}, - {1: pixel_y}, {1: pixel_area}, - {1: "CHEC"}, {1: 0 * u.m}, - {1: 0 * u.m}, - array_direction=[0 * u.deg, - 0 * u.deg]) + self.impact_reco.set_event_properties( + {1: image}, + {1: pixel_x}, + {1: pixel_y}, + {1: pixel_area}, + {1: "CHEC"}, + {1: 0 * u.m}, + {1: 0 * u.m}, + array_direction=[0 * u.deg, 0 * u.deg], + ) like = self.impact_reco.get_likelihood(0, 0, 0, 100, 1, 0) assert like is not np.nan and like > 0 diff --git a/ctapipe/reco/tests/test_energy_regressor.py b/ctapipe/reco/tests/test_energy_regressor.py index f749bf0f812..808a1a36bfa 100644 --- a/ctapipe/reco/tests/test_energy_regressor.py +++ b/ctapipe/reco/tests/test_energy_regressor.py @@ -10,12 +10,14 @@ def test_prepare_model(): cam_id_list = ["FlashCam", "ASTRICam"] - feature_list = {"FlashCam": [[1, 10], [2, 20], [3, 30], [0.9, 9], - ], - "ASTRICam": [[10, 1], [20, 2], [30, 3], [9, 0.9], - ]} - target_list = {"FlashCam": np.array([1, 2, 3, 0.9]) * u.TeV, - "ASTRICam": np.array([1, 2, 3, 0.9]) * u.TeV} + feature_list = { + "FlashCam": [[1, 10], [2, 20], [3, 30], [0.9, 9],], + "ASTRICam": [[10, 1], [20, 2], [30, 3], [9, 0.9],], + } + target_list = { + "FlashCam": np.array([1, 2, 3, 0.9]) * u.TeV, + "ASTRICam": np.array([1, 2, 3, 0.9]) * u.TeV, + } reg = EnergyRegressor(cam_id_list=cam_id_list, n_estimators=10) reg.fit(feature_list, target_list) @@ -35,12 +37,12 @@ def test_predict_by_event(): np.random.seed(3) reg, cam_id_list = test_fit_save_load() - prediction = reg.predict_by_event([{"ASTRICam": [[10, 1]]}, - {"ASTRICam": [[20, 2]]}, - {"ASTRICam": [[30, 3]]}]) + prediction = reg.predict_by_event( + [{"ASTRICam": [[10, 1]]}, {"ASTRICam": [[20, 2]]}, {"ASTRICam": [[30, 3]]}] + ) assert_allclose(prediction["mean"].value, [1, 2, 3], rtol=0.2) - prediction = reg.predict_by_event([{"FlashCam": [[1, 10]]}, - {"FlashCam": [[2, 20]]}, - {"FlashCam": [[3, 30]]}]) + prediction = reg.predict_by_event( + [{"FlashCam": [[1, 10]]}, {"FlashCam": [[2, 20]]}, {"FlashCam": [[3, 30]]}] + ) assert_allclose(prediction["mean"].value, [1, 2, 3], rtol=0.2) diff --git a/ctapipe/reco/tests/test_hillas_intersection.py b/ctapipe/reco/tests/test_hillas_intersection.py index dec3f5c77a7..3295d9d8dcf 100644 --- a/ctapipe/reco/tests/test_hillas_intersection.py +++ b/ctapipe/reco/tests/test_hillas_intersection.py @@ -64,14 +64,14 @@ def test_intersection_xmax_reco(): horizon_frame = AltAz() zen_pointing = 10 * u.deg - array_direction = SkyCoord(alt=90*u.deg - zen_pointing, - az=0 * u.deg, - frame=horizon_frame) + array_direction = SkyCoord( + alt=90 * u.deg - zen_pointing, az=0 * u.deg, frame=horizon_frame + ) nom_frame = NominalFrame(origin=array_direction) - source_sky_pos_reco = SkyCoord(alt=90 * u.deg - zen_pointing, - az=0 * u.deg, - frame=horizon_frame) + source_sky_pos_reco = SkyCoord( + alt=90 * u.deg - zen_pointing, az=0 * u.deg, frame=horizon_frame + ) nom_pos_reco = source_sky_pos_reco.transform_to(nom_frame) delta = 1.0 * u.m @@ -80,12 +80,16 @@ def test_intersection_xmax_reco(): focal_length = 28 * u.m hillas_dict = { - 1: HillasParametersContainer(x=-(delta/focal_length)*u.rad, - y=((0 * u.m)/focal_length) * u.rad, - intensity=1), - 2: HillasParametersContainer(x=((0 * u.m)/focal_length) * u.rad, - y=-(delta/focal_length) * u.rad, - intensity=1) + 1: HillasParametersContainer( + x=-(delta / focal_length) * u.rad, + y=((0 * u.m) / focal_length) * u.rad, + intensity=1, + ), + 2: HillasParametersContainer( + x=((0 * u.m) / focal_length) * u.rad, + y=-(delta / focal_length) * u.rad, + intensity=1, + ), } x_max = hill_inter.reconstruct_xmax( @@ -96,7 +100,7 @@ def test_intersection_xmax_reco(): hillas_parameters=hillas_dict, tel_x={1: (150 * u.m), 2: (0 * u.m)}, tel_y={1: (0 * u.m), 2: (150 * u.m)}, - zen=zen_pointing + zen=zen_pointing, ) print(x_max) @@ -116,13 +120,11 @@ def test_intersection_reco_impact_point_tilted(): hillas_dict = { 1: HillasParametersContainer(intensity=100, psi=-90 * u.deg), 2: HillasParametersContainer(intensity=100, psi=-45 * u.deg), - 3: HillasParametersContainer(intensity=100, psi=0 * u.deg) + 3: HillasParametersContainer(intensity=100, psi=0 * u.deg), } reco_konrad = hill_inter.reconstruct_tilted( - hillas_parameters=hillas_dict, - tel_x=tel_x_dict, - tel_y=tel_y_dict + hillas_parameters=hillas_dict, tel_x=tel_x_dict, tel_y=tel_y_dict ) np.testing.assert_allclose(reco_konrad[0], delta.to_value(u.m), atol=1e-8) @@ -143,13 +145,11 @@ def test_intersection_weighting_spoiled_parameters(): hillas_dict = { 1: HillasParametersContainer(intensity=10000, psi=-90 * u.deg), 2: HillasParametersContainer(intensity=1, psi=45 * u.deg), - 3: HillasParametersContainer(intensity=10000, psi=0 * u.deg) + 3: HillasParametersContainer(intensity=10000, psi=0 * u.deg), } reco_konrad_spoiled = hill_inter.reconstruct_tilted( - hillas_parameters=hillas_dict, - tel_x=tel_x_dict, - tel_y=tel_y_dict + hillas_parameters=hillas_dict, tel_x=tel_x_dict, tel_y=tel_y_dict ) np.testing.assert_allclose(reco_konrad_spoiled[0], delta.to_value(u.m), atol=1e-1) @@ -169,16 +169,15 @@ def test_intersection_nominal_reconstruction(): altitude = 70 * u.deg azimuth = 10 * u.deg - array_direction = SkyCoord(alt=altitude, - az=azimuth, - frame=horizon_frame) + array_direction = SkyCoord(alt=altitude, az=azimuth, frame=horizon_frame) nominal_frame = NominalFrame(origin=array_direction) focal_length = 28 * u.m - camera_frame = CameraFrame(focal_length=focal_length, - telescope_pointing=array_direction) + camera_frame = CameraFrame( + focal_length=focal_length, telescope_pointing=array_direction + ) cog_coords_camera_1 = SkyCoord(x=delta, y=0 * u.m, frame=camera_frame) cog_coords_camera_2 = SkyCoord(x=delta / 0.7, y=delta / 0.7, frame=camera_frame) @@ -189,20 +188,26 @@ def test_intersection_nominal_reconstruction(): cog_coords_nom_3 = cog_coords_camera_3.transform_to(nominal_frame) # x-axis is along the altitude and y-axis is along the azimuth - hillas_1 = HillasParametersContainer(x=cog_coords_nom_1.fov_lat, - y=cog_coords_nom_1.fov_lon, - intensity=100, - psi=0 * u.deg) + hillas_1 = HillasParametersContainer( + x=cog_coords_nom_1.fov_lat, + y=cog_coords_nom_1.fov_lon, + intensity=100, + psi=0 * u.deg, + ) - hillas_2 = HillasParametersContainer(x=cog_coords_nom_2.fov_lat, - y=cog_coords_nom_2.fov_lon, - intensity=100, - psi=45 * u.deg) + hillas_2 = HillasParametersContainer( + x=cog_coords_nom_2.fov_lat, + y=cog_coords_nom_2.fov_lon, + intensity=100, + psi=45 * u.deg, + ) - hillas_3 = HillasParametersContainer(x=cog_coords_nom_3.fov_lat, - y=cog_coords_nom_3.fov_lon, - intensity=100, - psi=90 * u.deg) + hillas_3 = HillasParametersContainer( + x=cog_coords_nom_3.fov_lat, + y=cog_coords_nom_3.fov_lon, + intensity=100, + psi=90 * u.deg, + ) hillas_dict = {1: hillas_1, 2: hillas_2, 3: hillas_3} @@ -211,11 +216,15 @@ def test_intersection_nominal_reconstruction(): nominal_pos = SkyCoord( fov_lon=u.Quantity(reco_nominal[0], u.rad), fov_lat=u.Quantity(reco_nominal[1], u.rad), - frame=nominal_frame + frame=nominal_frame, ) - np.testing.assert_allclose(nominal_pos.altaz.az.to_value(u.deg), azimuth.to_value(u.deg), atol=1e-8) - np.testing.assert_allclose(nominal_pos.altaz.alt.to_value(u.deg), altitude.to_value(u.deg), atol=1e-8) + np.testing.assert_allclose( + nominal_pos.altaz.az.to_value(u.deg), azimuth.to_value(u.deg), atol=1e-8 + ) + np.testing.assert_allclose( + nominal_pos.altaz.alt.to_value(u.deg), altitude.to_value(u.deg), atol=1e-8 + ) def test_reconstruction(): @@ -239,11 +248,7 @@ def test_reconstruction(): reconstructed_events = 0 for event in source: - array_pointing = SkyCoord( - az=event.mc.az, - alt=event.mc.alt, - frame=horizon_frame - ) + array_pointing = SkyCoord(az=event.mc.az, alt=event.mc.alt, frame=horizon_frame) hillas_dict = {} telescope_pointings = {} @@ -255,12 +260,13 @@ def test_reconstruction(): telescope_pointings[tel_id] = SkyCoord( alt=event.pointing.tel[tel_id].altitude, az=event.pointing.tel[tel_id].azimuth, - frame=horizon_frame + frame=horizon_frame, ) pmt_signal = event.r0.tel[tel_id].waveform[0].sum(axis=1) - mask = tailcuts_clean(geom, pmt_signal, - picture_thresh=10., boundary_thresh=5.) + mask = tailcuts_clean( + geom, pmt_signal, picture_thresh=10.0, boundary_thresh=5.0 + ) pmt_signal[mask == 0] = 0 try: @@ -276,7 +282,9 @@ def test_reconstruction(): reconstructed_events += 1 # divergent mode put to on even though the file has parallel pointing. - fit_result = fit.predict(hillas_dict, source.subarray, array_pointing, telescope_pointings) + fit_result = fit.predict( + hillas_dict, source.subarray, array_pointing, telescope_pointings + ) print(fit_result) print(event.mc.core_x, event.mc.core_y) diff --git a/ctapipe/reco/tests/test_reconstruction_methods.py b/ctapipe/reco/tests/test_reconstruction_methods.py index cda5c6910e8..a072d0a6f70 100644 --- a/ctapipe/reco/tests/test_reconstruction_methods.py +++ b/ctapipe/reco/tests/test_reconstruction_methods.py @@ -38,11 +38,7 @@ def test_reconstructors(reconstructors): reconstructed_events = np.zeros((len(reconstructors))) for event in source: - array_pointing = SkyCoord( - az=event.mc.az, - alt=event.mc.alt, - frame=horizon_frame - ) + array_pointing = SkyCoord(az=event.mc.az, alt=event.mc.alt, frame=horizon_frame) hillas_dict = {} telescope_pointings = {} @@ -54,12 +50,13 @@ def test_reconstructors(reconstructors): telescope_pointings[tel_id] = SkyCoord( alt=event.pointing.tel[tel_id].altitude, az=event.pointing.tel[tel_id].azimuth, - frame=horizon_frame + frame=horizon_frame, ) pmt_signal = event.r0.tel[tel_id].waveform[0].sum(axis=1) - mask = tailcuts_clean(geom, pmt_signal, - picture_thresh=10., boundary_thresh=5.) + mask = tailcuts_clean( + geom, pmt_signal, picture_thresh=10.0, boundary_thresh=5.0 + ) pmt_signal[mask == 0] = 0 try: @@ -84,4 +81,6 @@ def test_reconstructors(reconstructors): reconstructor_out.core_x.to(u.m) assert reconstructor_out.is_valid - np.testing.assert_array_less(np.zeros_like(reconstructed_events), reconstructed_events) + np.testing.assert_array_less( + np.zeros_like(reconstructed_events), reconstructed_events + ) diff --git a/ctapipe/reco/tests/test_regressor_classifier_base.py b/ctapipe/reco/tests/test_regressor_classifier_base.py index 2f72af525c7..c131767e291 100644 --- a/ctapipe/reco/tests/test_regressor_classifier_base.py +++ b/ctapipe/reco/tests/test_regressor_classifier_base.py @@ -5,50 +5,49 @@ def test_reshuffle_event_list(): feature_list = [ - {"FlashCam": [[1, 10], [2, 20]], - "ASTRICam": [[30, 3], [40, 4]]}, - {"FlashCam": [[1.5, 15]], - "ASTRICam": [[35, 3.5], [25, 2.5], [30, 3]]} + {"FlashCam": [[1, 10], [2, 20]], "ASTRICam": [[30, 3], [40, 4]]}, + {"FlashCam": [[1.5, 15]], "ASTRICam": [[35, 3.5], [25, 2.5], [30, 3]]}, ] target_list = ["1", "2"] cam_id_list = ["FlashCam", "ASTRICam"] - my_base = RegressorClassifierBase(model=RandomForestClassifier, - cam_id_list=cam_id_list, unit=1, n_estimators=100) + my_base = RegressorClassifierBase( + model=RandomForestClassifier, cam_id_list=cam_id_list, unit=1, n_estimators=100 + ) feature_flattened, target_flattened = my_base.reshuffle_event_list( feature_list, target_list ) - assert feature_flattened == {'FlashCam': [[1, 10], [2, 20], [1.5, 15]], - 'ASTRICam': [[30, 3], [40, 4], [35, 3.5], - [25, 2.5], - [30, 3]]} + assert feature_flattened == { + "FlashCam": [[1, 10], [2, 20], [1.5, 15]], + "ASTRICam": [[30, 3], [40, 4], [35, 3.5], [25, 2.5], [30, 3]], + } - assert target_flattened == {'FlashCam': ['1', '1', '2'], - 'ASTRICam': ['1', '1', '2', '2', '2']} + assert target_flattened == { + "FlashCam": ["1", "1", "2"], + "ASTRICam": ["1", "1", "2", "2", "2"], + } assert len(str(my_base)) > 0 def test_failures(): cam_id_list = ["FlashCam", "ASTRICam"] - my_base = RegressorClassifierBase(model=RandomForestClassifier, - cam_id_list=cam_id_list, unit=1) + my_base = RegressorClassifierBase( + model=RandomForestClassifier, cam_id_list=cam_id_list, unit=1 + ) # some test data with a bad camera in it should raise a KeyError feature_list = [ - {"FlashCam": [[1, 10], [2, 20]], - "BadCam": [[30, 3], [40, 4]]}, - {"FlashCam": [[1.5, 15]], - "ASTRICam": [[35, 3.5], [25, 2.5], [30, 3]]} + {"FlashCam": [[1, 10], [2, 20]], "BadCam": [[30, 3], [40, 4]]}, + {"FlashCam": [[1.5, 15]], "ASTRICam": [[35, 3.5], [25, 2.5], [30, 3]]}, ] target_list = ["1", "2"] with pytest.raises(KeyError): feature_flattened, target_flattened = my_base.reshuffle_event_list( - feature_list, - target_list + feature_list, target_list ) assert feature_flattened is not None assert target_flattened is not None @@ -57,27 +56,23 @@ def test_failures(): def test_show_importances(): cam_id_list = ["FlashCam", "ASTRICam"] - feature_list = {"FlashCam": [[1, 10], [2, 20], [3, 30], [0.9, 9], - ], - "ASTRICam": [[10, 1], [20, 2], [30, 3], [9, 0.9], - ]} - target_list = {"FlashCam": [0, 1, 1, 0], - "ASTRICam": [1, 0, 0, 0]} + feature_list = { + "FlashCam": [[1, 10], [2, 20], [3, 30], [0.9, 9],], + "ASTRICam": [[10, 1], [20, 2], [30, 3], [9, 0.9],], + } + target_list = {"FlashCam": [0, 1, 1, 0], "ASTRICam": [1, 0, 0, 0]} reg = RegressorClassifierBase( - model=RandomForestClassifier, - cam_id_list=cam_id_list, - unit=1, - n_estimators=10, + model=RandomForestClassifier, cam_id_list=cam_id_list, unit=1, n_estimators=10, ) reg.fit(feature_list, target_list) reg.input_features_dict = { - "FlashCam": ['f1', 'f2'], - "ASTRICam": ['f1', 'f2'], + "FlashCam": ["f1", "f2"], + "ASTRICam": ["f1", "f2"], } fig = reg.show_importances() ax = fig.axes[0] assert len(ax.get_xticklabels()) == 2 for t in ax.get_xticklabels(): - assert t.get_text() in ['f1', 'f2'] + assert t.get_text() in ["f1", "f2"] diff --git a/ctapipe/reco/tests/test_showermaxestimator.py b/ctapipe/reco/tests/test_showermaxestimator.py index 8cb927fbe1b..ac8da3e5507 100644 --- a/ctapipe/reco/tests/test_showermaxestimator.py +++ b/ctapipe/reco/tests/test_showermaxestimator.py @@ -4,14 +4,18 @@ def test_showermaxestimator(en=5 * u.TeV, h_first_int=10 * u.km, az=70 * u.deg): - estim = ShowerMaxEstimator(atmosphere_profile_name='paranal') + estim = ShowerMaxEstimator(atmosphere_profile_name="paranal") h_max = estim.find_shower_max_height(en, h_first_int, az) assert h_max.unit.is_equivalent(u.m), "return value has not proper unit" return h_max + if __name__ == "__main__": en, h_first_int, az = 5 * u.TeV, 10 * u.km, 70 * u.deg print("ShowerMaxEstimator unit test:") - print("Energy = {}, hight of first interaction = {}, azimuth = {}" - .format(en, h_first_int, az)) + print( + "Energy = {}, hight of first interaction = {}, azimuth = {}".format( + en, h_first_int, az + ) + ) print("h_max:", test_showermaxestimator()) diff --git a/ctapipe/tests/setup_package.py b/ctapipe/tests/setup_package.py index f2fd9ed484f..17b092d110f 100644 --- a/ctapipe/tests/setup_package.py +++ b/ctapipe/tests/setup_package.py @@ -1,3 +1,2 @@ def get_package_data(): - return { - _ASTROPY_PACKAGE_NAME_ + '.tests': ['coveragerc']} + return {_ASTROPY_PACKAGE_NAME_ + ".tests": ["coveragerc"]} diff --git a/ctapipe/tools/__init__.py b/ctapipe/tools/__init__.py index fd1ab7fa9a7..2a475fa16b8 100644 --- a/ctapipe/tools/__init__.py +++ b/ctapipe/tools/__init__.py @@ -1,4 +1,3 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst """ctapipe command line tools. """ - diff --git a/ctapipe/tools/bokeh/file_viewer.py b/ctapipe/tools/bokeh/file_viewer.py index 071bfce7694..32827c694c0 100644 --- a/ctapipe/tools/bokeh/file_viewer.py +++ b/ctapipe/tools/bokeh/file_viewer.py @@ -18,35 +18,34 @@ class BokehFileViewer(Tool): name = "BokehFileViewer" - description = ("Interactively explore an event file using the bokeh " - "visualisation package") + description = ( + "Interactively explore an event file using the bokeh " "visualisation package" + ) port = Int(5006, help="Port to open bokeh server onto").tag(config=True) - disable_server = Bool(False, help="Do not start the bokeh server " - "(useful for testing)").tag(config=True) + disable_server = Bool( + False, help="Do not start the bokeh server " "(useful for testing)" + ).tag(config=True) default_url = get_dataset_path("gamma_test_large.simtel.gz") EventSource.input_url.default_value = default_url extractor_product = traits.create_class_enum_trait( - ImageExtractor, - default_value='NeighborPeakWindowSum' + ImageExtractor, default_value="NeighborPeakWindowSum" ) - aliases = Dict(dict( - port='BokehFileViewer.port', - disable_server='BokehFileViewer.disable_server', - f='EventSource.input_url', - max_events='EventSource.max_events', - extractor='BokehFileViewer.extractor_product', - )) - - classes = List( - [ - EventSource, - ] + traits.classes_with_traits(ImageExtractor) + aliases = Dict( + dict( + port="BokehFileViewer.port", + disable_server="BokehFileViewer.disable_server", + f="EventSource.input_url", + max_events="EventSource.max_events", + extractor="BokehFileViewer.extractor_product", + ) ) + classes = List([EventSource,] + traits.classes_with_traits(ImageExtractor)) + def __init__(self, **kwargs): super().__init__(**kwargs) self._event = None @@ -84,14 +83,10 @@ def setup(self): self.seeker = EventSeeker(self.reader, parent=self) self.extractor = ImageExtractor.from_name( - self.extractor_product, - parent=self, - subarray=self.reader.subarray, + self.extractor_product, parent=self, subarray=self.reader.subarray, ) self.calibrator = CameraCalibrator( - subarray=self.reader.subarray, - parent=self, - image_extractor=self.extractor, + subarray=self.reader.subarray, parent=self, image_extractor=self.extractor, ) self.viewer = BokehEventViewer(parent=self, subarray=self.reader.subarray) @@ -111,24 +106,27 @@ def setup(self): self.update_dl1_widget_values() # Setup layout - self.layout = layout([ - [self.viewer.layout], + self.layout = layout( [ - self.w_previous_event, - self.w_next_event, - self.w_goto_event_index, - self.w_goto_event_id - ], - [self.w_event_index, self.w_event_id], - [self.w_telid, self.w_channel], - [self.wb_extractor] - ]) + [self.viewer.layout], + [ + self.w_previous_event, + self.w_next_event, + self.w_goto_event_index, + self.w_goto_event_id, + ], + [self.w_event_index, self.w_event_id], + [self.w_telid, self.w_channel], + [self.wb_extractor], + ] + ) def start(self): self.event_index = 0 def finish(self): if not self.disable_server: + def modify_doc(doc): doc.add_root(self.layout) doc.title = self.name @@ -137,14 +135,13 @@ def modify_doc(doc): theme_path = os.path.join(directory, "theme.yaml") template_path = os.path.join(directory, "templates") doc.theme = Theme(filename=theme_path) - env = jinja2.Environment( - loader=jinja2.FileSystemLoader(template_path) - ) - doc.template = env.get_template('index.html') + env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_path)) + doc.template = env.get_template("index.html") - self.log.info('Opening Bokeh application on ' - 'http://localhost:{}/'.format(self.port)) - server = Server({'/': modify_doc}, num_procs=1, port=self.port) + self.log.info( + "Opening Bokeh application on " "http://localhost:{}/".format(self.port) + ) + server = Server({"/": modify_doc}, num_procs=1, port=self.port) server.start() server.io_loop.add_callback(server.show, "/") server.io_loop.start() @@ -232,9 +229,7 @@ def update_dl1_calibrator(self, extractor=None): self.extractor = extractor self.calibrator = CameraCalibrator( - subarray=self.reader.subarray, - parent=self, - image_extractor=self.extractor, + subarray=self.reader.subarray, parent=self, image_extractor=self.extractor, ) self.viewer.refresh() @@ -246,25 +241,21 @@ def on_next_event_widget_click(self): self.event_index += 1 def create_previous_event_widget(self): - self.w_previous_event = Button( - label="<", - button_type="default", - width=50 - ) + self.w_previous_event = Button(label="<", button_type="default", width=50) self.w_previous_event.on_click(self.on_previous_event_widget_click) def on_previous_event_widget_click(self): self.event_index -= 1 def create_event_index_widget(self): - self.w_event_index = TextInput(title="Event Index:", value='') + self.w_event_index = TextInput(title="Event Index:", value="") def update_event_index_widget(self): if self.w_event_index: self.w_event_index.value = str(self.event_index) def create_event_id_widget(self): - self.w_event_id = TextInput(title="Event ID:", value='') + self.w_event_id = TextInput(title="Event ID:", value="") def update_event_id_widget(self): if self.w_event_id: @@ -272,9 +263,7 @@ def update_event_id_widget(self): def create_goto_event_index_widget(self): self.w_goto_event_index = Button( - label="GOTO Index", - button_type="default", - width=100 + label="GOTO Index", button_type="default", width=100 ) self.w_goto_event_index.on_click(self.on_goto_event_index_widget_click) @@ -282,11 +271,7 @@ def on_goto_event_index_widget_click(self): self.event_index = int(self.w_event_index.value) def create_goto_event_id_widget(self): - self.w_goto_event_id = Button( - label="GOTO ID", - button_type="default", - width=70 - ) + self.w_goto_event_id = Button(label="GOTO ID", button_type="default", width=70) self.w_goto_event_id.on_click(self.on_goto_event_id_widget_click) def on_goto_event_id_widget_click(self): @@ -294,7 +279,7 @@ def on_goto_event_id_widget_click(self): def create_telid_widget(self): self.w_telid = Select(title="Telescope:", value="", options=[]) - self.w_telid.on_change('value', self.on_telid_widget_change) + self.w_telid.on_change("value", self.on_telid_widget_change) def update_telid_widget(self): if self.w_telid: @@ -308,7 +293,7 @@ def on_telid_widget_change(self, _, __, ___): def create_channel_widget(self): self.w_channel = Select(title="Channel:", value="", options=[]) - self.w_channel.on_change('value', self.on_channel_widget_change) + self.w_channel.on_change("value", self.on_channel_widget_change) def update_channel_widget(self): if self.w_channel: @@ -326,36 +311,42 @@ def on_channel_widget_change(self, _, __, ___): def create_dl1_widgets(self): self.w_dl1_dict = dict( - extractor=Select(title="Extractor:", value='', width=5, - options=BokehFileViewer.extractor_product.values), - extractor_window_start=TextInput(title="Window Start:", value=''), - extractor_window_width=TextInput(title="Window Width:", value=''), - extractor_window_shift=TextInput(title="Window Shift:", value=''), - extractor_lwt=TextInput(title="Local Pixel Weight:", value='')) + extractor=Select( + title="Extractor:", + value="", + width=5, + options=BokehFileViewer.extractor_product.values, + ), + extractor_window_start=TextInput(title="Window Start:", value=""), + extractor_window_width=TextInput(title="Window Width:", value=""), + extractor_window_shift=TextInput(title="Window Shift:", value=""), + extractor_lwt=TextInput(title="Local Pixel Weight:", value=""), + ) for val in self.w_dl1_dict.values(): - val.on_change('value', self.on_dl1_widget_change) + val.on_change("value", self.on_dl1_widget_change) self.wb_extractor = widgetbox( PreText(text="Charge Extractor Configuration"), - self.w_dl1_dict['extractor'], - self.w_dl1_dict['extractor_window_start'], - self.w_dl1_dict['extractor_window_width'], - self.w_dl1_dict['extractor_window_shift'], - self.w_dl1_dict['extractor_lwt']) + self.w_dl1_dict["extractor"], + self.w_dl1_dict["extractor_window_start"], + self.w_dl1_dict["extractor_window_width"], + self.w_dl1_dict["extractor_window_shift"], + self.w_dl1_dict["extractor_lwt"], + ) def update_dl1_widget_values(self): if self.w_dl1_dict: for key, val in self.w_dl1_dict.items(): - if 'extractor' in key: - if key == 'extractor': + if "extractor" in key: + if key == "extractor": val.value = self.extractor.__class__.__name__ else: key = key.replace("extractor_", "") try: val.value = str(getattr(self.extractor, key)) except AttributeError: - val.value = '' + val.value = "" def on_dl1_widget_change(self, _, __, ___): if self.event: @@ -365,11 +356,11 @@ def on_dl1_widget_change(self, _, __, ___): for key, val in self.w_dl1_dict.items(): k = key.replace("extractor_", "ImageExtractor.") if val.value: - cmdline.append(f'--{k}={val.value}') + cmdline.append(f"--{k}={val.value}") self.parse_command_line(cmdline) extractor = ImageExtractor.from_name( - self.extractor_product, - parent=self) + self.extractor_product, parent=self + ) self.update_dl1_calibrator(extractor) self.update_dl1_widget_values() self._updating_dl1 = False @@ -380,5 +371,5 @@ def main(): exe.run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ctapipe/tools/camdemo.py b/ctapipe/tools/camdemo.py index 24a3cbb3ae3..ad59e19a058 100644 --- a/ctapipe/tools/camdemo.py +++ b/ctapipe/tools/camdemo.py @@ -11,9 +11,18 @@ from matplotlib.animation import FuncAnimation from ctapipe.core import Tool, traits -from ctapipe.image import toymodel, tailcuts_clean, dilate, \ - hillas_parameters, HillasParameterizationError -from ctapipe.instrument import TelescopeDescription, OpticsDescription, CameraDescription +from ctapipe.image import ( + toymodel, + tailcuts_clean, + dilate, + hillas_parameters, + HillasParameterizationError, +) +from ctapipe.instrument import ( + TelescopeDescription, + OpticsDescription, + CameraDescription, +) from ctapipe.visualization import CameraDisplay @@ -22,39 +31,49 @@ class CameraDemo(Tool): description = "Display fake events in a demo camera" delay = traits.Int(50, help="Frame delay in ms", min=20).tag(config=True) - cleanframes = traits.Int(20, help="Number of frames between turning on " - "cleaning", min=0).tag(config=True) - autoscale = traits.Bool(False, help='scale each frame to max if ' - 'True').tag(config=True) - blit = traits.Bool(False, help='use blit operation to draw on screen (' - 'much faster but may cause some draw ' - 'artifacts)').tag(config=True) + cleanframes = traits.Int( + 20, help="Number of frames between turning on " "cleaning", min=0 + ).tag(config=True) + autoscale = traits.Bool(False, help="scale each frame to max if " "True").tag( + config=True + ) + blit = traits.Bool( + False, + help="use blit operation to draw on screen (" + "much faster but may cause some draw " + "artifacts)", + ).tag(config=True) camera = traits.CaselessStrEnum( CameraDescription.get_known_camera_names(), - default_value='NectarCam', - help='Name of camera to display').tag(config=True) + default_value="NectarCam", + help="Name of camera to display", + ).tag(config=True) optics = traits.CaselessStrEnum( OpticsDescription.get_known_optics_names(), - default_value='MST', - help='Telescope optics description name' + default_value="MST", + help="Telescope optics description name", ).tag(config=True) - num_events = traits.Int(0, help='events to show before exiting (0 for ' - 'unlimited)').tag(config=True) - - display = traits.Bool(True, "enable or disable display (for " - "testing)").tag(config=True) + num_events = traits.Int( + 0, help="events to show before exiting (0 for " "unlimited)" + ).tag(config=True) - aliases = traits.Dict({ - 'delay': 'CameraDemo.delay', - 'cleanframes': 'CameraDemo.cleanframes', - 'autoscale': 'CameraDemo.autoscale', - 'blit': 'CameraDemo.blit', - 'camera': 'CameraDemo.camera', - 'optics': 'CameraDemo.optics', - 'num-events': 'CameraDemo.num_events' - }) + display = traits.Bool(True, "enable or disable display (for " "testing)").tag( + config=True + ) + + aliases = traits.Dict( + { + "delay": "CameraDemo.delay", + "cleanframes": "CameraDemo.cleanframes", + "autoscale": "CameraDemo.autoscale", + "blit": "CameraDemo.blit", + "camera": "CameraDemo.camera", + "optics": "CameraDemo.optics", + "num-events": "CameraDemo.num_events", + } + ) def __init__(self): super().__init__() @@ -71,8 +90,9 @@ def _display_camera_animation(self): ax = plt.subplot(111) # load the camera - tel = TelescopeDescription.from_name(optics_name=self.optics, - camera_name=self.camera) + tel = TelescopeDescription.from_name( + optics_name=self.optics, camera_name=self.camera + ) geom = tel.camera.geometry # poor-man's coordinate transform from telscope to camera frame (it's @@ -87,8 +107,10 @@ def _display_camera_animation(self): self.log.debug(f"scale={scale} m, wid=({minwid}-{maxwid})") disp = CameraDisplay( - geom, ax=ax, autoupdate=True, - title=f"{tel}, f={tel.optics.equivalent_focal_length}" + geom, + ax=ax, + autoupdate=True, + title=f"{tel}, f={tel.optics.equivalent_focal_length}", ) disp.cmap = plt.cm.terrain @@ -107,14 +129,13 @@ def update(frame): ) self.log.debug( "Frame=%d width=%03f length=%03f intens=%03d", - frame, width, length, intens + frame, + width, + length, + intens, ) - image, _, _ = model.generate_image( - geom, - intensity=intens, - nsb_level_pe=3, - ) + image, _, _ = model.generate_image(geom, intensity=intens, nsb_level_pe=3,) # alternate between cleaned and raw images if self._counter == self.cleanframes: @@ -127,23 +148,32 @@ def update(frame): disp.clear_overlays() if self.imclean: - cleanmask = tailcuts_clean(geom, image, - picture_thresh=10.0, - boundary_thresh=5.0) + cleanmask = tailcuts_clean( + geom, image, picture_thresh=10.0, boundary_thresh=5.0 + ) for ii in range(2): dilate(geom, cleanmask) image[cleanmask == 0] = 0 # zero noise pixels try: hillas = hillas_parameters(geom, image) - disp.overlay_moments(hillas, with_label=False, - color='red', alpha=0.7, - linewidth=2, linestyle='dashed') + disp.overlay_moments( + hillas, + with_label=False, + color="red", + alpha=0.7, + linewidth=2, + linestyle="dashed", + ) except HillasParameterizationError: disp.clear_overlays() pass - self.log.debug("Frame=%d image_sum=%.3f max=%.3f", - self._counter, image.sum(), image.max()) + self.log.debug( + "Frame=%d image_sum=%.3f max=%.3f", + self._counter, + image.sum(), + image.max(), + ) disp.image = image if self.autoscale: @@ -153,17 +183,22 @@ def update(frame): disp.axes.figure.canvas.draw() self._counter += 1 - return [ax, ] + return [ + ax, + ] frames = None if self.num_events == 0 else self.num_events repeat = True if self.num_events == 0 else False self.log.info(f"Running for {frames} frames") - self.anim = FuncAnimation(fig, update, - interval=self.delay, - frames=frames, - repeat=repeat, - blit=self.blit) + self.anim = FuncAnimation( + fig, + update, + interval=self.delay, + frames=frames, + repeat=repeat, + blit=self.blit, + ) if self.display: plt.show() @@ -174,5 +209,5 @@ def main(): app.run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ctapipe/tools/display_dl1.py b/ctapipe/tools/display_dl1.py index 3fc535a1db8..2b77e0211fe 100644 --- a/ctapipe/tools/display_dl1.py +++ b/ctapipe/tools/display_dl1.py @@ -80,7 +80,7 @@ def plot(self, event, telid): self.c_intensity = CameraDisplay(geom, ax=self.ax_intensity) self.c_peak_time = CameraDisplay(geom, ax=self.ax_peak_time) - if (peak_time != 0.).all(): + if (peak_time != 0.0).all(): tmaxmin = event.dl0.tel[telid].waveform.shape[1] t_chargemax = peak_time[image.argmax()] cmap_time = colors.LinearSegmentedColormap.from_list( @@ -170,22 +170,22 @@ class DisplayDL1Calib(Tool): def __init__(self, **kwargs): super().__init__(**kwargs) - self.config.EventSource.input_url = get_dataset_path('gamma_test_large.simtel.gz') + self.config.EventSource.input_url = get_dataset_path( + "gamma_test_large.simtel.gz" + ) self.eventsource = None self.calibrator = None self.plotter = None def setup(self): - self.eventsource = self.add_component( - EventSource.from_config(parent=self) - ) + self.eventsource = self.add_component(EventSource.from_config(parent=self)) - self.calibrator = self.add_component(CameraCalibrator( - parent=self, subarray=self.eventsource.subarray - )) - self.plotter = self.add_component(ImagePlotter( - subarray=self.eventsource.subarray, parent=self - )) + self.calibrator = self.add_component( + CameraCalibrator(parent=self, subarray=self.eventsource.subarray) + ) + self.plotter = self.add_component( + ImagePlotter(subarray=self.eventsource.subarray, parent=self) + ) def start(self): for event in self.eventsource: diff --git a/ctapipe/tools/display_events_single_tel.py b/ctapipe/tools/display_events_single_tel.py index 571eec59ad4..57d247cf9cd 100755 --- a/ctapipe/tools/display_events_single_tel.py +++ b/ctapipe/tools/display_events_single_tel.py @@ -18,9 +18,7 @@ from ctapipe.core import Tool from ctapipe.core.traits import Float, Dict, List, Path from ctapipe.core.traits import Unicode, Int, Bool -from ctapipe.image import ( - tailcuts_clean, hillas_parameters, HillasParameterizationError -) +from ctapipe.image import tailcuts_clean, hillas_parameters, HillasParameterizationError from ctapipe.io import EventSource from ctapipe.visualization import CameraDisplay @@ -29,40 +27,38 @@ class SingleTelEventDisplay(Tool): name = "ctapipe-display-televents" description = Unicode(__doc__) - infile = Path( - help="input file to read", exists=True, directory_ok=False - ).tag(config=True) - tel = Int(help='Telescope ID to display', default=0).tag(config=True) - write = Bool( - help="Write out images to PNG files", default=False - ).tag(config=True) + infile = Path(help="input file to read", exists=True, directory_ok=False).tag( + config=True + ) + tel = Int(help="Telescope ID to display", default=0).tag(config=True) + write = Bool(help="Write out images to PNG files", default=False).tag(config=True) clean = Bool(help="Apply image cleaning", default=False).tag(config=True) - hillas = Bool( - help="Apply and display Hillas parametrization", default=False - ).tag(config=True) + hillas = Bool(help="Apply and display Hillas parametrization", default=False).tag( + config=True + ) samples = Bool(help="Show each sample", default=False).tag(config=True) display = Bool( help="Display results in interactive window", default_value=True ).tag(config=True) - delay = Float( - help='delay between events in s', default_value=0.01, min=0.001 - ).tag(config=True) - progress = Bool( - help='display progress bar', default_value=True - ).tag(config=True) - - aliases = Dict({ - 'infile': 'SingleTelEventDisplay.infile', - 'tel': 'SingleTelEventDisplay.tel', - 'max-events': 'EventSource.max_events', - 'write': 'SingleTelEventDisplay.write', - 'clean': 'SingleTelEventDisplay.clean', - 'hillas': 'SingleTelEventDisplay.hillas', - 'samples': 'SingleTelEventDisplay.samples', - 'display': 'SingleTelEventDisplay.display', - 'delay': 'SingleTelEventDisplay.delay', - 'progress': 'SingleTelEventDisplay.progress' - }) + delay = Float(help="delay between events in s", default_value=0.01, min=0.001).tag( + config=True + ) + progress = Bool(help="display progress bar", default_value=True).tag(config=True) + + aliases = Dict( + { + "infile": "SingleTelEventDisplay.infile", + "tel": "SingleTelEventDisplay.tel", + "max-events": "EventSource.max_events", + "write": "SingleTelEventDisplay.write", + "clean": "SingleTelEventDisplay.clean", + "hillas": "SingleTelEventDisplay.hillas", + "samples": "SingleTelEventDisplay.samples", + "display": "SingleTelEventDisplay.display", + "delay": "SingleTelEventDisplay.delay", + "progress": "SingleTelEventDisplay.progress", + } + ) classes = List([EventSource, CameraCalibrator]) @@ -70,27 +66,29 @@ def __init__(self, **kwargs): super().__init__(**kwargs) def setup(self): - print('TOLLES INFILE', self.infile) + print("TOLLES INFILE", self.infile) self.event_source = self.add_component( EventSource.from_url(self.infile, parent=self) ) - self.event_source.allowed_tels = {self.tel, } + self.event_source.allowed_tels = { + self.tel, + } self.calibrator = self.add_component( CameraCalibrator(parent=self, subarray=self.event_source.subarray) ) - self.log.info(f'SELECTING EVENTS FROM TELESCOPE {self.tel}') + self.log.info(f"SELECTING EVENTS FROM TELESCOPE {self.tel}") def start(self): disp = None for event in tqdm( - self.event_source, - desc=f'Tel{self.tel}', - total=self.event_source.max_events, - disable=~self.progress + self.event_source, + desc=f"Tel{self.tel}", + total=self.event_source.max_events, + disable=~self.progress, ): self.log.debug(event.trigger) @@ -109,7 +107,7 @@ def start(self): # display the event disp.axes.set_title( - 'CT{:03d} ({}), event {:06d}'.format( + "CT{:03d} ({}), event {:06d}".format( self.tel, geom.camera_name, event.index.event_id ) ) @@ -125,8 +123,8 @@ def start(self): plt.pause(self.delay) if self.write: plt.savefig( - f'CT{self.tel:03d}_EV{event.index.event_id:10d}' - f'_S{ii:02d}.png' + f"CT{self.tel:03d}_EV{event.index.event_id:10d}" + f"_S{ii:02d}.png" ) else: # display integrated event: @@ -148,7 +146,7 @@ def start(self): params = hillas_parameters(geom, image=im) disp.overlay_moments( - params, color='pink', lw=3, with_label=False + params, color="pink", lw=3, with_label=False ) except HillasParameterizationError: pass @@ -156,17 +154,16 @@ def start(self): if self.display: plt.pause(self.delay) if self.write: - plt.savefig( - f'CT{self.tel:03d}_EV{event.index.event_id:010d}.png' - ) + plt.savefig(f"CT{self.tel:03d}_EV{event.index.event_id:010d}.png") self.log.info("FINISHED READING DATA FILE") if disp is None: self.log.warning( - 'No events for tel {} were found in {}. Try a ' - 'different EventIO file or another telescope' - .format(self.tel, self.infile), + "No events for tel {} were found in {}. Try a " + "different EventIO file or another telescope".format( + self.tel, self.infile + ), ) diff --git a/ctapipe/tools/display_integrator.py b/ctapipe/tools/display_integrator.py index b72c1dadb27..e61342bb2f0 100644 --- a/ctapipe/tools/display_integrator.py +++ b/ctapipe/tools/display_integrator.py @@ -214,8 +214,7 @@ class DisplayIntegrator(Tool): event_index = Int(0, help="Event index to view.").tag(config=True) use_event_id = Bool( - False, - help="event_index will obtain an event using event_id instead of index.", + False, help="event_index will obtain an event using event_id instead of index.", ).tag(config=True) telescope = Int( None, @@ -263,12 +262,16 @@ def setup(self): event_source = self.add_component(EventSource.from_config(parent=self)) self.subarray = event_source.subarray self.eventseeker = self.add_component(EventSeeker(event_source, parent=self)) - self.extractor = self.add_component(ImageExtractor.from_name( - self.extractor_product, parent=self, subarray=self.subarray - )) - self.calibrate = self.add_component(CameraCalibrator( - parent=self, image_extractor=self.extractor, subarray=self.subarray - )) + self.extractor = self.add_component( + ImageExtractor.from_name( + self.extractor_product, parent=self, subarray=self.subarray + ) + ) + self.calibrate = self.add_component( + CameraCalibrator( + parent=self, image_extractor=self.extractor, subarray=self.subarray + ) + ) def start(self): event_num = self.event_index diff --git a/ctapipe/tools/display_summed_images.py b/ctapipe/tools/display_summed_images.py index 4afc25806bc..7479fd19bf2 100644 --- a/ctapipe/tools/display_summed_images.py +++ b/ctapipe/tools/display_summed_images.py @@ -20,34 +20,34 @@ class ImageSumDisplayerTool(Tool): name = "ctapipe-display-imagesum" infile = Path( - help='input simtelarray file', - default_value=get_dataset_path('gamma_test_large.simtel.gz'), + help="input simtelarray file", + default_value=get_dataset_path("gamma_test_large.simtel.gz"), exists=True, directory_ok=False, ).tag(config=True) - telgroup = Integer( - help='telescope group number', default_value=1 - ).tag(config=True) + telgroup = Integer(help="telescope group number", default_value=1).tag(config=True) max_events = Integer( - help='stop after this many events if non-zero', default_value=0, min=0 + help="stop after this many events if non-zero", default_value=0, min=0 ).tag(config=True) output_suffix = Unicode( - help='suffix (file extension) of output ' - 'filenames to write images ' - 'to (no writing is done if blank). ' - 'Images will be named [EVENTID][suffix]', - default_value="" + help="suffix (file extension) of output " + "filenames to write images " + "to (no writing is done if blank). " + "Images will be named [EVENTID][suffix]", + default_value="", ).tag(config=True) - aliases = Dict({ - 'infile': 'ImageSumDisplayerTool.infile', - 'telgroup': 'ImageSumDisplayerTool.telgroup', - 'max-events': 'ImageSumDisplayerTool.max_events', - 'output-suffix': 'ImageSumDisplayerTool.output_suffix' - }) + aliases = Dict( + { + "infile": "ImageSumDisplayerTool.infile", + "telgroup": "ImageSumDisplayerTool.telgroup", + "max-events": "ImageSumDisplayerTool.max_events", + "output-suffix": "ImageSumDisplayerTool.output_suffix", + } + ) classes = List([CameraCalibrator, SimTelEventSource]) @@ -61,15 +61,16 @@ def setup(self): input_url=self.infile, max_events=self.max_events, back_seekable=True ) - camtypes = self.reader.subarray.to_table().group_by('camera_type') + camtypes = self.reader.subarray.to_table().group_by("camera_type") self.reader.subarray.info(printer=self.log.info) group = camtypes.groups[self.telgroup] - self._selected_tels = list(group['tel_id'].data) + self._selected_tels = list(group["tel_id"].data) self._base_tel = self._selected_tels[0] self.log.info( - "Telescope group %d: %s", self.telgroup, - str(self.reader.subarray.tel[self._selected_tels[0]]) + "Telescope group %d: %s", + self.telgroup, + str(self.reader.subarray.tel[self._selected_tels[0]]), ) self.log.info(f"SELECTED TELESCOPES:{self._selected_tels}") @@ -91,7 +92,7 @@ def start(self): imsum = np.zeros(shape=geom.pix_x.shape, dtype=np.float) disp = CameraDisplay(geom, title=geom.camera_name) disp.add_colorbar() - disp.cmap = 'viridis' + disp.cmap = "viridis" if len(event.dl0.tels_with_data) <= 2: continue @@ -102,17 +103,14 @@ def start(self): self.log.info( "event={} ntels={} energy={}".format( - event.index.event_id, len(event.dl0.tels_with_data), - event.mc.energy + event.index.event_id, len(event.dl0.tels_with_data), event.mc.energy ) ) disp.image = imsum plt.pause(0.1) if self.output_suffix != "": - filename = "{:020d}{}".format( - event.index.event_id, self.output_suffix - ) + filename = "{:020d}{}".format(event.index.event_id, self.output_suffix) self.log.info(f"saving: '{filename}'") plt.savefig(filename) diff --git a/ctapipe/tools/dump_instrument.py b/ctapipe/tools/dump_instrument.py index ed62750593c..a044b08fdfa 100644 --- a/ctapipe/tools/dump_instrument.py +++ b/ctapipe/tools/dump_instrument.py @@ -33,16 +33,19 @@ def get_camera_types(subarray): class DumpInstrumentTool(Tool): description = Unicode(__doc__) - name = 'ctapipe-dump-instrument' + name = "ctapipe-dump-instrument" - infile = Path(exists=True, help='input simtelarray file').tag(config=True) - format = Enum(['fits', 'ecsv', 'hdf5'], - default_value='fits', - help='Format of output file', - config=True) + infile = Path(exists=True, help="input simtelarray file").tag(config=True) + format = Enum( + ["fits", "ecsv", "hdf5"], + default_value="fits", + help="Format of output file", + config=True, + ) - aliases = Dict(dict(infile='DumpInstrumentTool.infile', - format='DumpInstrumentTool.format')) + aliases = Dict( + dict(infile="DumpInstrumentTool.infile", format="DumpInstrumentTool.format") + ) def setup(self): with event_source(self.infile) as source: @@ -60,12 +63,12 @@ def finish(self): def _get_file_format_info(format_name, table_type, table_name): """ returns file extension + dict of required parameters for Table.write""" - if format_name == 'fits': - return 'fits.gz', dict() - elif format_name == 'ecsv': - return 'ecsv.txt', dict(format='ascii.ecsv') - elif format_name == 'hdf5': - return 'h5', dict(path="/" + table_type + "/" + table_name) + if format_name == "fits": + return "fits.gz", dict() + elif format_name == "ecsv": + return "ecsv.txt", dict(format="ascii.ecsv") + elif format_name == "hdf5": + return "h5", dict(path="/" + table_type + "/" + table_name) else: raise NameError("format not supported") @@ -73,56 +76,50 @@ def write_camera_geometries(self): cam_types = get_camera_types(self.subarray) self.subarray.info(printer=self.log.info) for cam_name in cam_types: - ext, args = self._get_file_format_info(self.format, - 'CAMGEOM', - cam_name) + ext, args = self._get_file_format_info(self.format, "CAMGEOM", cam_name) self.log.debug(f"writing {cam_name}") tel_id = cam_types[cam_name].pop() geom = self.subarray.tel[tel_id].camera.geometry table = geom.to_table() - table.meta['SOURCE'] = self.infile + table.meta["SOURCE"] = self.infile filename = f"{cam_name}.camgeom.{ext}" try: table.write(filename, **args) - Provenance().add_output_file(filename, 'dl0.tel.svc.camera') + Provenance().add_output_file(filename, "dl0.tel.svc.camera") except IOError as err: self.log.warning( - "couldn't write camera definition '%s' because: %s", - filename, err + "couldn't write camera definition '%s' because: %s", filename, err ) def write_optics_descriptions(self): sub = self.subarray - ext, args = self._get_file_format_info(self.format, sub.name, 'optics') + ext, args = self._get_file_format_info(self.format, sub.name, "optics") - tab = sub.to_table(kind='optics') - tab.meta['SOURCE'] = self.infile - filename = f'{sub.name}.optics.{ext}' + tab = sub.to_table(kind="optics") + tab.meta["SOURCE"] = self.infile + filename = f"{sub.name}.optics.{ext}" try: tab.write(filename, **args) - Provenance().add_output_file(filename, 'dl0.sub.svc.optics') + Provenance().add_output_file(filename, "dl0.sub.svc.optics") except IOError as err: self.log.warning( - "couldn't write optics description '%s' because: %s", - filename, err + "couldn't write optics description '%s' because: %s", filename, err ) def write_subarray_description(self): sub = self.subarray - ext, args = self._get_file_format_info(self.format, sub.name, - 'subarray') - tab = sub.to_table(kind='subarray') - tab.meta['SOURCE'] = self.infile - filename = f'{sub.name}.subarray.{ext}' + ext, args = self._get_file_format_info(self.format, sub.name, "subarray") + tab = sub.to_table(kind="subarray") + tab.meta["SOURCE"] = self.infile + filename = f"{sub.name}.subarray.{ext}" try: tab.write(filename, **args) - Provenance().add_output_file(filename, 'dl0.sub.svc.subarray') + Provenance().add_output_file(filename, "dl0.sub.svc.subarray") except IOError as err: self.log.warning( - "couldn't write subarray description '%s' because: %s", - filename, err + "couldn't write subarray description '%s' because: %s", filename, err ) @@ -131,5 +128,5 @@ def main(): tool.run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ctapipe/tools/dump_triggers.py b/ctapipe/tools/dump_triggers.py index 0a7cbecbccd..f19cfd64888 100644 --- a/ctapipe/tools/dump_triggers.py +++ b/ctapipe/tools/dump_triggers.py @@ -17,39 +17,47 @@ class DumpTriggersTool(Tool): description = Unicode(__doc__) - name = 'ctapipe-dump-triggers' + name = "ctapipe-dump-triggers" # ============================================= # configuration parameters: # ============================================= - infile = Path( - exists=True, directory_ok=False, help='input simtelarray file' - ).tag(config=True) + infile = Path(exists=True, directory_ok=False, help="input simtelarray file").tag( + config=True + ) outfile = Path( - default_value='triggers.fits', directory_ok=False, - help='output filename (*.fits, *.h5)', + default_value="triggers.fits", + directory_ok=False, + help="output filename (*.fits, *.h5)", ).tag(config=True) - overwrite = Bool(False, - help="overwrite existing output file" - ).tag(config=True) + overwrite = Bool(False, help="overwrite existing output file").tag(config=True) # ============================================= # map low-level options to high-level command-line options # ============================================= - aliases = Dict({'infile': 'DumpTriggersTool.infile', - 'outfile': 'DumpTriggersTool.outfile'}) - - flags = Dict({'overwrite': ({'DumpTriggersTool': {'overwrite': True}}, - 'Enable overwriting of output file')}) - - examples = ('ctapipe-dump-triggers --infile gamma.simtel.gz ' - '--outfile trig.fits --overwrite' - '\n\n' - 'If you want to see more output, use --log_level=DEBUG') + aliases = Dict( + {"infile": "DumpTriggersTool.infile", "outfile": "DumpTriggersTool.outfile"} + ) + + flags = Dict( + { + "overwrite": ( + {"DumpTriggersTool": {"overwrite": True}}, + "Enable overwriting of output file", + ) + } + ) + + examples = ( + "ctapipe-dump-triggers --infile gamma.simtel.gz " + "--outfile trig.fits --overwrite" + "\n\n" + "If you want to see more output, use --log_level=DEBUG" + ) # ============================================= # The methods of the Tool (initialize, start, finish): @@ -80,26 +88,32 @@ def add_event_to_table(self, event): # to 1 # insert the row into the table - self.events.add_row((event.index.event_id, relative_time.sec, delta_t.sec, - len(trigtels), - self._current_trigpattern)) + self.events.add_row( + ( + event.index.event_id, + relative_time.sec, + delta_t.sec, + len(trigtels), + self._current_trigpattern, + ) + ) def setup(self): """ setup function, called before `start()` """ - if self.infile == '': + if self.infile == "": raise ToolConfigurationError("No 'infile' parameter was specified. ") - self.events = Table(names=['EVENT_ID', 'T_REL', 'DELTA_T', - 'N_TRIG', 'TRIGGERED_TELS'], - dtype=[np.int64, np.float64, np.float64, - np.int32, np.uint8]) + self.events = Table( + names=["EVENT_ID", "T_REL", "DELTA_T", "N_TRIG", "TRIGGERED_TELS"], + dtype=[np.int64, np.float64, np.float64, np.int32, np.uint8], + ) - self.events['TRIGGERED_TELS'].shape = (0, MAX_TELS) - self.events['T_REL'].unit = u.s - self.events['T_REL'].description = 'Time relative to first event' - self.events['DELTA_T'].unit = u.s - self.events.meta['INPUT'] = self.infile + self.events["TRIGGERED_TELS"].shape = (0, MAX_TELS) + self.events["T_REL"].unit = u.s + self.events["T_REL"].description = "Time relative to first event" + self.events["DELTA_T"].unit = u.s + self.events.meta["INPUT"] = self.infile self._current_trigpattern = np.zeros(MAX_TELS) self._current_starttime = None @@ -118,11 +132,11 @@ def finish(self): """ # write out the final table try: - if '.fits' in self.outfile.suffixes: + if ".fits" in self.outfile.suffixes: self.events.write(self.outfile, overwrite=self.overwrite) - elif self.outfile.suffix in ('.hdf5', '.h5', '.hdf'): + elif self.outfile.suffix in (".hdf5", ".h5", ".hdf"): self.events.write( - self.outfile, path='/events', overwrite=self.overwrite + self.outfile, path="/events", overwrite=self.overwrite ) else: self.events.write(self.outfile) @@ -131,7 +145,7 @@ def finish(self): except IOError as err: self.log.warning("Couldn't write output (%s)", err) - self.log.info('\n %s', self.events) + self.log.info("\n %s", self.events) def main(): diff --git a/ctapipe/tools/extract_charge_resolution.py b/ctapipe/tools/extract_charge_resolution.py index 191d6ef736f..2462d917e8d 100644 --- a/ctapipe/tools/extract_charge_resolution.py +++ b/ctapipe/tools/extract_charge_resolution.py @@ -34,7 +34,7 @@ class ChargeResolutionGenerator(Tool): output_path = Path( default_value="charge_resolution.h5", directory_ok=False, - help="Path to store the output HDF5 file" + help="Path to store the output HDF5 file", ).tag(config=True) extractor_product = traits.create_class_enum_trait( ImageExtractor, default_value="NeighborPeakWindowSum" @@ -63,17 +63,19 @@ def setup(self): self.eventsource = self.add_component(SimTelEventSource(parent=self)) - extractor = self.add_component(ImageExtractor.from_name( - self.extractor_product, - parent=self, - subarray=self.eventsource.subarray, - )) - - self.calibrator = self.add_component(CameraCalibrator( - parent=self, - image_extractor=extractor, - subarray=self.eventsource.subarray, - )) + extractor = self.add_component( + ImageExtractor.from_name( + self.extractor_product, parent=self, subarray=self.eventsource.subarray, + ) + ) + + self.calibrator = self.add_component( + CameraCalibrator( + parent=self, + image_extractor=extractor, + subarray=self.eventsource.subarray, + ) + ) self.calculator = ChargeResolutionCalculator() def start(self): diff --git a/ctapipe/tools/info.py b/ctapipe/tools/info.py index b6b57c88709..f622610c644 100644 --- a/ctapipe/tools/info.py +++ b/ctapipe/tools/info.py @@ -9,40 +9,50 @@ from ..core import Provenance, get_module_version from ..utils import datasets -__all__ = ['info'] +__all__ = ["info"] # TODO: this list should be global (or generated at install time) -_dependencies = sorted([ - 'astropy', 'matplotlib', - 'numpy', 'traitlets', - 'sklearn', 'scipy', 'numba', - 'pytest', 'iminuit', 'tables', - 'eventio', -]) - -_optional_dependencies = sorted([ - 'ctapipe_resources', - 'pytest', - 'graphviz', - 'targetio', - 'matplotlib' -]) +_dependencies = sorted( + [ + "astropy", + "matplotlib", + "numpy", + "traitlets", + "sklearn", + "scipy", + "numba", + "pytest", + "iminuit", + "tables", + "eventio", + ] +) + +_optional_dependencies = sorted( + ["ctapipe_resources", "pytest", "graphviz", "targetio", "matplotlib"] +) def main(args=None): parser = get_parser(info) - parser.add_argument('--version', action='store_true', - help='Print version number') - parser.add_argument('--tools', action='store_true', - help='Print available command line tools') - parser.add_argument('--dependencies', action='store_true', - help='Print available versions of dependencies') - parser.add_argument('--resources', action='store_true', - help='Print available versions of dependencies') - parser.add_argument('--system', action='store_true', - help='Print system info') - parser.add_argument('--all', dest='show_all', action='store_true', - help='show all info') + parser.add_argument("--version", action="store_true", help="Print version number") + parser.add_argument( + "--tools", action="store_true", help="Print available command line tools" + ) + parser.add_argument( + "--dependencies", + action="store_true", + help="Print available versions of dependencies", + ) + parser.add_argument( + "--resources", + action="store_true", + help="Print available versions of dependencies", + ) + parser.add_argument("--system", action="store_true", help="Print system info") + parser.add_argument( + "--all", dest="show_all", action="store_true", help="show all info" + ) args = parser.parse_args(args) if len(sys.argv) <= 1: @@ -52,14 +62,19 @@ def main(args=None): info(**vars(args)) -def info(version=False, tools=False, dependencies=False, - resources=False, system=False, show_all=False): +def info( + version=False, + tools=False, + dependencies=False, + resources=False, + system=False, + show_all=False, +): """Print various info to the console. TODO: explain. """ - logging.basicConfig(level=logging.INFO, - format='%(levelname)s - %(message)s') + logging.basicConfig(level=logging.INFO, format="%(levelname)s - %(message)s") if version or show_all: _info_version() @@ -80,18 +95,19 @@ def info(version=False, tools=False, dependencies=False, def _info_version(): """Print version info.""" import ctapipe - print('\n*** ctapipe version info ***\n') - print(f'version: {ctapipe.__version__}') + + print("\n*** ctapipe version info ***\n") + print(f"version: {ctapipe.__version__}") # print('release: {0}'.format(version.release)) # print('githash: {0}'.format(version.githash)) - print('') + print("") def _info_tools(): """Print info about command line tools.""" - print('\n*** ctapipe tools ***\n') - print('the following can be executed by typing ctapipe-:') - print('') + print("\n*** ctapipe tools ***\n") + print("the following can be executed by typing ctapipe-:") + print("") # TODO: how to get a one-line description or # full help text from the docstring or ArgumentParser? @@ -99,57 +115,57 @@ def _info_tools(): # that are defined in setup.py !??? from ctapipe.tools.utils import get_all_descriptions from textwrap import TextWrapper - wrapper = TextWrapper(width=80, - subsequent_indent=" " * 35) + + wrapper = TextWrapper(width=80, subsequent_indent=" " * 35) scripts = get_all_descriptions() for name, desc in sorted(scripts.items()): text = f"{name:<30s} - {desc}" print(wrapper.fill(text)) - print('') - print('') + print("") + print("") def _info_dependencies(): """Print info about dependencies.""" - print('\n*** ctapipe core dependencies ***\n') + print("\n*** ctapipe core dependencies ***\n") for name in _dependencies: version = get_module_version(name) - print(f'{name:>20s} -- {version}') + print(f"{name:>20s} -- {version}") - print('\n*** ctapipe optional dependencies ***\n') + print("\n*** ctapipe optional dependencies ***\n") for name in _optional_dependencies: try: module = importlib.import_module(name) version = module.__version__ except ImportError: - version = 'not installed' + version = "not installed" except AttributeError: version = "installed, but __version__ doesn't exist" - print(f'{name:>20s} -- {version}') + print(f"{name:>20s} -- {version}") def _info_resources(): """ display all known resources """ - print('\n*** ctapipe resources ***\n') + print("\n*** ctapipe resources ***\n") print("CTAPIPE_SVC_PATH: (directories where resources are searched)") - if os.getenv('CTAPIPE_SVC_PATH') is not None: + if os.getenv("CTAPIPE_SVC_PATH") is not None: for directory in datasets.get_searchpath_dirs(): print(f"\t * {directory}") else: print("\t no path is set") print("") - all_resources = sorted(datasets.find_all_matching_datasets(r'\w.*')) - locations = [os.path.dirname(datasets.get_dataset_path(name)) - for name in all_resources] + all_resources = sorted(datasets.find_all_matching_datasets(r"\w.*")) + locations = [ + os.path.dirname(datasets.get_dataset_path(name)) for name in all_resources + ] home = os.path.expanduser("~") - resource_dir = os.path.dirname(datasets.get_dataset_path( - "HESS-I.camgeom.fits.gz")) + resource_dir = os.path.dirname(datasets.get_dataset_path("HESS-I.camgeom.fits.gz")) fmt = "{name:<30.30s} : {loc:<30.30s}" print(fmt.format(name="RESOURCE NAME", loc="LOCATION")) @@ -165,19 +181,19 @@ def _info_resources(): def _info_system(): # collect system info using the ctapipe provenance system : - print('\n*** ctapipe system environment ***\n') + print("\n*** ctapipe system environment ***\n") prov = Provenance() - system_prov = prov.current_activity.provenance['system'] + system_prov = prov.current_activity.provenance["system"] - for section in ['platform', 'python']: + for section in ["platform", "python"]: - print('\n====== ', section, " ======== \n") + print("\n====== ", section, " ======== \n") sysinfo = system_prov[section] for name, val in sysinfo.items(): print("{:>20.20s} -- {:<60.60s}".format(name, str(val))) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ctapipe/tools/muon_reconstruction.py b/ctapipe/tools/muon_reconstruction.py index 606f88de64d..0ce35bf348d 100644 --- a/ctapipe/tools/muon_reconstruction.py +++ b/ctapipe/tools/muon_reconstruction.py @@ -21,7 +21,7 @@ ring_containment, ring_completeness, intensity_ratio_inside_ring, - mean_squared_error + mean_squared_error, ) @@ -33,47 +33,44 @@ class MuonAnalysis(Tool): The resulting output can be read e.g. using for example `pandas.read_hdf(filename, 'dl1/event/telescope/parameters/muon')` """ - name = 'ctapipe-reconstruct-muons' + + name = "ctapipe-reconstruct-muons" description = traits.Unicode(__doc__) - output = traits.Path( - directory_ok=False, - help='HDF5 output file name' - ).tag(config=True) + output = traits.Path(directory_ok=False, help="HDF5 output file name").tag( + config=True + ) completeness_threshold = traits.FloatTelescopeParameter( - default_value=30.0, - help='Threshold for calculating the ``ring_completeness``', + default_value=30.0, help="Threshold for calculating the ``ring_completeness``", ).tag(config=True) ratio_width = traits.FloatTelescopeParameter( default_value=1.5, help=( - 'Ring width for intensity ratio' - ' computation as multiple of pixel diameter' - ) + "Ring width for intensity ratio" + " computation as multiple of pixel diameter" + ), ).tag(config=True) overwrite = traits.Bool( - default_value=False, help='If true, overwrite outputfile without asking' + default_value=False, help="If true, overwrite outputfile without asking" ).tag(config=True) min_pixels = traits.IntTelescopeParameter( help=( - 'Minimum number of pixels after cleaning and ring finding' - 'required to process an event' + "Minimum number of pixels after cleaning and ring finding" + "required to process an event" ), default_value=100, ).tag(config=True) pedestal = traits.FloatTelescopeParameter( - help='Pedestal noise rms', - default_value=1.1, + help="Pedestal noise rms", default_value=1.1, ).tag(config=True) extractor_name = traits.create_class_enum_trait( - ImageExtractor, - default_value='GlobalPeakWindowSum', + ImageExtractor, default_value="GlobalPeakWindowSum", ).tag(config=True) classes = [ @@ -85,58 +82,59 @@ class MuonAnalysis(Tool): ] + traits.classes_with_traits(ImageExtractor) aliases = { - 'i': 'EventSource.input_url', - 'input': 'EventSource.input_url', - 'o': 'MuonAnalysis.output', - 'output': 'MuonAnalysis.output', - 'max-events': 'EventSource.max_events', - 'allowed-tels': 'EventSource.allowed_tels', + "i": "EventSource.input_url", + "input": "EventSource.input_url", + "o": "MuonAnalysis.output", + "output": "MuonAnalysis.output", + "max-events": "EventSource.max_events", + "allowed-tels": "EventSource.allowed_tels", } flags = { - 'overwrite': ({'MuonAnalysis': {'overwrite': True}}, 'overwrite output file') + "overwrite": ({"MuonAnalysis": {"overwrite": True}}, "overwrite output file") } def setup(self): if self.output is None: - raise ToolConfigurationError('You need to provide an --output file') + raise ToolConfigurationError("You need to provide an --output file") if self.output.exists() and not self.overwrite: raise ToolConfigurationError( - 'Outputfile {self.output} already exists, use `--overwrite` to overwrite' + "Outputfile {self.output} already exists, use `--overwrite` to overwrite" ) self.source = self.add_component(EventSource.from_config(parent=self)) - self.extractor = self.add_component(ImageExtractor.from_name( - self.extractor_name, parent=self, subarray=self.source.subarray - )) - self.calib = self.add_component(CameraCalibrator( - subarray=self.source.subarray, parent=self, - image_extractor=self.extractor, - )) - self.ring_fitter = self.add_component(MuonRingFitter( - parent=self, - )) - self.intensity_fitter = self.add_component(MuonIntensityFitter( - subarray=self.source.subarray, parent=self, - )) - self.cleaning = self.add_component(TailcutsImageCleaner( - parent=self, - subarray=self.source.subarray, - )) - self.writer = self.add_component(HDF5TableWriter( - self.output, "", add_prefix=True, parent=self, - mode='w', - )) + self.extractor = self.add_component( + ImageExtractor.from_name( + self.extractor_name, parent=self, subarray=self.source.subarray + ) + ) + self.calib = self.add_component( + CameraCalibrator( + subarray=self.source.subarray, + parent=self, + image_extractor=self.extractor, + ) + ) + self.ring_fitter = self.add_component(MuonRingFitter(parent=self,)) + self.intensity_fitter = self.add_component( + MuonIntensityFitter(subarray=self.source.subarray, parent=self,) + ) + self.cleaning = self.add_component( + TailcutsImageCleaner(parent=self, subarray=self.source.subarray,) + ) + self.writer = self.add_component( + HDF5TableWriter(self.output, "", add_prefix=True, parent=self, mode="w",) + ) self.pixels_in_tel_frame = {} self.field_of_view = {} self.pixel_widths = {} - for p in ['min_pixels', 'pedestal', 'ratio_width', 'completeness_threshold']: + for p in ["min_pixels", "pedestal", "ratio_width", "completeness_threshold"]: getattr(self, p).attach_subarray(self.source.subarray) def start(self): - for event in tqdm(self.source, desc='Processing events: '): + for event in tqdm(self.source, desc="Processing events: "): self.process_array_event(event) def process_array_event(self, event): @@ -145,26 +143,26 @@ def process_array_event(self, event): for tel_id, dl1 in event.dl1.tel.items(): self.process_telescope_event(event.index, tel_id, dl1) - self.writer.write('sim/event/subarray/shower', [event.index, event.mc]) + self.writer.write("sim/event/subarray/shower", [event.index, event.mc]) def process_telescope_event(self, event_index, tel_id, dl1): event_id = event_index.event_id if self.source.subarray.tel[tel_id].optics.num_mirrors != 1: self.log.warn( - f'Skipping non-single mirror telescope {tel_id}' - ' set --allowed_tels to get rid of this warning' + f"Skipping non-single mirror telescope {tel_id}" + " set --allowed_tels to get rid of this warning" ) return - self.log.debug(f'Processing event {event_id}, telescope {tel_id}') + self.log.debug(f"Processing event {event_id}, telescope {tel_id}") image = dl1.image clean_mask = self.cleaning(tel_id, image) if np.count_nonzero(clean_mask) <= self.min_pixels.tel[tel_id]: self.log.debug( - f'Skipping event {event_id}-{tel_id}:' - f' has less then {self.min_pixels.tel[tel_id]} pixels after cleaning' + f"Skipping event {event_id}-{tel_id}:" + f" has less then {self.min_pixels.tel[tel_id]} pixels after cleaning" ) return @@ -176,18 +174,22 @@ def process_telescope_event(self, event_index, tel_id, dl1): mask = clean_mask for i in range(3): ring = self.ring_fitter(x, y, image, mask) - dist = np.sqrt((x - ring.center_x)**2 + (y - ring.center_y)**2) + dist = np.sqrt((x - ring.center_x) ** 2 + (y - ring.center_y) ** 2) mask = np.abs(dist - ring.radius) / ring.radius < 0.4 if np.count_nonzero(mask) <= self.min_pixels.tel[tel_id]: self.log.debug( - f'Skipping event {event_id}-{tel_id}:' - f' Less then {self.min_pixels.tel[tel_id]} pixels on ring' + f"Skipping event {event_id}-{tel_id}:" + f" Less then {self.min_pixels.tel[tel_id]} pixels on ring" ) return - if np.isnan([ring.radius.value, ring.center_x.value, ring.center_y.value]).any(): - self.log.debug(f'Skipping event {event_id}-{tel_id}: Ring fit did not succeed') + if np.isnan( + [ring.radius.value, ring.center_x.value, ring.center_y.value] + ).any(): + self.log.debug( + f"Skipping event {event_id}-{tel_id}: Ring fit did not succeed" + ) return parameters = self.calculate_muon_parameters(tel_id, image, clean_mask, ring) @@ -205,19 +207,16 @@ def process_telescope_event(self, event_index, tel_id, dl1): ) self.log.info( - f'Muon fit: r={ring.radius:.2f}' - f', width={result.width:.4f}' - f', efficiency={result.optical_efficiency:.2%}', + f"Muon fit: r={ring.radius:.2f}" + f", width={result.width:.4f}" + f", efficiency={result.optical_efficiency:.2%}", ) - tel_event_index = TelEventIndexContainer( - **event_index, - tel_id=tel_id, - ) + tel_event_index = TelEventIndexContainer(**event_index, tel_id=tel_id,) self.writer.write( - 'dl1/event/telescope/parameters/muons', - [tel_event_index, ring, parameters, result] + "dl1/event/telescope/parameters/muons", + [tel_event_index, ring, parameters, result], ) def calculate_muon_parameters(self, tel_id, image, clean_mask, ring): @@ -226,29 +225,37 @@ def calculate_muon_parameters(self, tel_id, image, clean_mask, ring): # add ring containment, not filled in fit containment = ring_containment( - ring.radius, - ring.center_x, - ring.center_y, - fov_radius, + ring.radius, ring.center_x, ring.center_y, fov_radius, ) completeness = ring_completeness( - x, y, image, - ring.radius, ring.center_x, ring.center_y, + x, + y, + image, + ring.radius, + ring.center_x, + ring.center_y, threshold=self.completeness_threshold.tel[tel_id], ) pixel_width = self.get_pixel_width(tel_id) intensity_ratio = intensity_ratio_inside_ring( - x[clean_mask], y[clean_mask], + x[clean_mask], + y[clean_mask], image[clean_mask], - ring.radius, ring.center_x, ring.center_y, + ring.radius, + ring.center_x, + ring.center_y, width=self.ratio_width.tel[tel_id] * pixel_width, ) mse = mean_squared_error( - x[clean_mask], y[clean_mask], image[clean_mask], - ring.radius, ring.center_x, ring.center_y + x[clean_mask], + y[clean_mask], + image[clean_mask], + ring.radius, + ring.center_x, + ring.center_y, ) return MuonParametersContainer( @@ -259,19 +266,19 @@ def calculate_muon_parameters(self, tel_id, image, clean_mask, ring): ) def get_fov(self, tel_id): - '''Guesstimate fov radius for telescope with id `tel_id`''' + """Guesstimate fov radius for telescope with id `tel_id`""" # memoize fov calculation if tel_id not in self.field_of_view: cam = self.source.subarray.tel[tel_id].camera.geometry border = cam.get_border_pixel_mask() x, y = self.get_pixel_coords(tel_id) - self.field_of_view[tel_id] = np.sqrt(x[border]**2 + y[border]**2).mean() + self.field_of_view[tel_id] = np.sqrt(x[border] ** 2 + y[border] ** 2).mean() return self.field_of_view[tel_id] def get_pixel_width(self, tel_id): - '''Guesstimate fov radius for telescope with id `tel_id`''' + """Guesstimate fov radius for telescope with id `tel_id`""" # memoize fov calculation if tel_id not in self.pixel_widths: x, y = self.get_pixel_coords(tel_id) @@ -280,7 +287,7 @@ def get_pixel_width(self, tel_id): return self.pixel_widths[tel_id] def get_pixel_coords(self, tel_id): - '''Get pixel coords in telescope frame for telescope with id `tel_id`''' + """Get pixel coords in telescope frame for telescope with id `tel_id`""" # memoize transformation if tel_id not in self.pixels_in_tel_frame: telescope = self.source.subarray.tel[tel_id] @@ -298,8 +305,7 @@ def get_pixel_coords(self, tel_id): def finish(self): Provenance().add_output_file( - self.output, - role='muon_efficiency_parameters', + self.output, role="muon_efficiency_parameters", ) self.writer.close() @@ -309,5 +315,5 @@ def main(): tool.run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ctapipe/tools/plot_charge_resolution.py b/ctapipe/tools/plot_charge_resolution.py index f62998b2e50..323a0b2eac1 100644 --- a/ctapipe/tools/plot_charge_resolution.py +++ b/ctapipe/tools/plot_charge_resolution.py @@ -10,22 +10,20 @@ class ChargeResolutionViewer(Tool): name = "ChargeResolutionViewer" - description = ("Plot charge resolutions generated by " - "ChargeResolutionCalculator.") + description = "Plot charge resolutions generated by " "ChargeResolutionCalculator." input_files = List( - Unicode(), None, - help='Input HDF5 files produced by ChargeResolutionCalculator' + Unicode(), None, help="Input HDF5 files produced by ChargeResolutionCalculator" ).tag(config=True) - aliases = Dict(dict( - f='ChargeResolutionViewer.input_files', - B='ChargeResolutionPlotter.n_bins', - o='ChargeResolutionPlotter.output_path', - )) - classes = List([ - ChargeResolutionPlotter, - ]) + aliases = Dict( + dict( + f="ChargeResolutionViewer.input_files", + B="ChargeResolutionPlotter.n_bins", + o="ChargeResolutionPlotter.output_path", + ) + ) + classes = List([ChargeResolutionPlotter,]) def __init__(self, **kwargs): super().__init__(**kwargs) @@ -51,5 +49,5 @@ def main(): exe.run() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ctapipe/tools/stage1.py b/ctapipe/tools/stage1.py index 3c74764e1f5..0a92f295719 100644 --- a/ctapipe/tools/stage1.py +++ b/ctapipe/tools/stage1.py @@ -296,7 +296,7 @@ def setup(self): ) # store last pointing to only write unique poitings - self._last_pointing_tel = defaultdict(lambda : (np.nan * u.deg, np.nan * u.deg)) + self._last_pointing_tel = defaultdict(lambda: (np.nan * u.deg, np.nan * u.deg)) def _write_simulation_configuration(self, writer): """ @@ -521,8 +521,8 @@ def _process_events(self, writer): p = event.pointing current_pointing = (p.array_azimuth, p.array_altitude) if current_pointing != last_pointing: - p.prefix = '' - writer.write('dl1/monitoring/subarray/pointing', [event.trigger, p]) + p.prefix = "" + writer.write("dl1/monitoring/subarray/pointing", [event.trigger, p]) last_pointing = current_pointing # write the subarray tables @@ -558,10 +558,10 @@ def _write_telescope_event(self, writer, event): p = event.pointing.tel[tel_id] current_pointing = (p.azimuth, p.altitude) if current_pointing != self._last_pointing_tel[tel_id]: - p.prefix = '' + p.prefix = "" writer.write( - f'dl1/monitoring/telescope/pointing/tel_{tel_id:03d}', - [event.trigger.tel[tel_id], p] + f"dl1/monitoring/telescope/pointing/tel_{tel_id:03d}", + [event.trigger.tel[tel_id], p], ) self._last_pointing_tel[tel_id] = current_pointing @@ -570,8 +570,7 @@ def _write_telescope_event(self, writer, event): ) writer.write( - 'dl1/event/telescope/trigger', - [tel_index, event.trigger.tel[tel_id]] + "dl1/event/telescope/trigger", [tel_index, event.trigger.tel[tel_id]] ) if self.event_source.is_simulation: @@ -668,9 +667,9 @@ def _setup_writer(self, writer): ) # exclude some columns that are not writable - writer.exclude("dl1/event/subarray/trigger", 'tel') - writer.exclude("dl1/monitoring/subarray/pointing", 'tel') - writer.exclude("dl1/monitoring/subarray/pointing", 'event_type') + writer.exclude("dl1/event/subarray/trigger", "tel") + writer.exclude("dl1/monitoring/subarray/pointing", "tel") + writer.exclude("dl1/monitoring/subarray/pointing", "event_type") for tel_id, telescope in self.event_source.subarray.tel.items(): tel_type = str(telescope) if self.split_datasets_by == "tel_id": @@ -683,7 +682,9 @@ def _setup_writer(self, writer): f"/dl1/event/telescope/images/{table_name}", "image_mask" ) writer.exclude(f"/dl1/event/telescope/images/{table_name}", "parameters") - writer.exclude(f"/dl1/monitoring/event/pointing/tel_{tel_id:03d}", 'event_type') + writer.exclude( + f"/dl1/monitoring/event/pointing/tel_{tel_id:03d}", "event_type" + ) if self.event_source.is_simulation: writer.exclude( f"/simulation/event/telescope/images/{table_name}", diff --git a/ctapipe/tools/tests/test_tools.py b/ctapipe/tools/tests/test_tools.py index 83fb005d54e..27ef5991a0c 100644 --- a/ctapipe/tools/tests/test_tools.py +++ b/ctapipe/tools/tests/test_tools.py @@ -18,26 +18,29 @@ GAMMA_TEST_LARGE = get_dataset_path("gamma_test_large.simtel.gz") -LST_MUONS = get_dataset_path('lst_muons.simtel.zst') +LST_MUONS = get_dataset_path("lst_muons.simtel.zst") def test_stage_1(): from ctapipe.tools.stage1 import Stage1ProcessorTool - with tempfile.NamedTemporaryFile(suffix='.hdf5') as f: - assert run_tool( - Stage1ProcessorTool(), - argv=[ - '--config=./examples/stage1_config.json', - f"--input={GAMMA_TEST_LARGE}", - f'--output={f.name}', - '--write-parameters', - '--overwrite', - ] - ) == 0 + with tempfile.NamedTemporaryFile(suffix=".hdf5") as f: + assert ( + run_tool( + Stage1ProcessorTool(), + argv=[ + "--config=./examples/stage1_config.json", + f"--input={GAMMA_TEST_LARGE}", + f"--output={f.name}", + "--write-parameters", + "--overwrite", + ], + ) + == 0 + ) # check tables were written - with tables.open_file(f.name, mode='r') as tf: + with tables.open_file(f.name, mode="r") as tf: assert tf.root.dl1 assert tf.root.dl1.event.telescope assert tf.root.dl1.event.subarray @@ -47,27 +50,34 @@ def test_stage_1(): assert tf.root.configuration.instrument.telescope.camera.readout_LSTCam # check we can read telescope parametrs - dl1_features = pd.read_hdf(f.name, '/dl1/event/telescope/parameters/tel_001') + dl1_features = pd.read_hdf(f.name, "/dl1/event/telescope/parameters/tel_001") features = ( - 'obs_id', 'event_id', 'tel_id', - 'hillas_intensity', 'concentration_cog', 'leakage_pixels_width_1' + "obs_id", + "event_id", + "tel_id", + "hillas_intensity", + "concentration_cog", + "leakage_pixels_width_1", ) for feature in features: assert feature in dl1_features.columns - with tempfile.NamedTemporaryFile(suffix='.hdf5') as f: - assert run_tool( - Stage1ProcessorTool(), - argv=[ - '--config=./examples/stage1_config.json', - f"--input={GAMMA_TEST_LARGE}", - f'--output={f.name}', - '--write-images', - '--overwrite', - ] - ) == 0 - - with tables.open_file(f.name, mode='r') as tf: + with tempfile.NamedTemporaryFile(suffix=".hdf5") as f: + assert ( + run_tool( + Stage1ProcessorTool(), + argv=[ + "--config=./examples/stage1_config.json", + f"--input={GAMMA_TEST_LARGE}", + f"--output={f.name}", + "--write-images", + "--overwrite", + ], + ) + == 0 + ) + + with tables.open_file(f.name, mode="r") as tf: assert tf.root.dl1 assert tf.root.dl1.event.telescope assert tf.root.dl1.event.subarray @@ -77,28 +87,27 @@ def test_stage_1(): assert tf.root.configuration.instrument.telescope.camera.readout_LSTCam assert tf.root.dl1.event.telescope.images.tel_001 dl1_image = tf.root.dl1.event.telescope.images.tel_001 - assert 'image_mask' in dl1_image.dtype.names - assert 'image' in dl1_image.dtype.names - assert 'peak_time' in dl1_image.dtype.names + assert "image_mask" in dl1_image.dtype.names + assert "image" in dl1_image.dtype.names + assert "peak_time" in dl1_image.dtype.names def test_muon_reconstruction(tmpdir): from ctapipe.tools.muon_reconstruction import MuonAnalysis - with tempfile.NamedTemporaryFile(suffix='.hdf5') as f: - assert run_tool( - MuonAnalysis(), - argv=[ - f"--input={LST_MUONS}", - f"--output={f.name}", - '--overwrite', - ] - ) == 0 + with tempfile.NamedTemporaryFile(suffix=".hdf5") as f: + assert ( + run_tool( + MuonAnalysis(), + argv=[f"--input={LST_MUONS}", f"--output={f.name}", "--overwrite",], + ) + == 0 + ) t = tables.open_file(f.name) table = t.root.dl1.event.telescope.parameters.muons[:] assert len(table) > 20 - assert np.count_nonzero(np.isnan(table['muonring_radius'])) == 0 + assert np.count_nonzero(np.isnan(table["muonring_radius"])) == 0 assert run_tool(MuonAnalysis(), ["--help-all"]) == 0 @@ -107,10 +116,13 @@ def test_display_summed_images(tmpdir): from ctapipe.tools.display_summed_images import ImageSumDisplayerTool mpl.use("Agg") - assert run_tool( - ImageSumDisplayerTool(), - argv=shlex.split(f"--infile={GAMMA_TEST_LARGE} " "--max-events=2 ") - ) == 0 + assert ( + run_tool( + ImageSumDisplayerTool(), + argv=shlex.split(f"--infile={GAMMA_TEST_LARGE} " "--max-events=2 "), + ) + == 0 + ) assert run_tool(ImageSumDisplayerTool(), ["--help-all"]) == 0 @@ -120,10 +132,13 @@ def test_display_integrator(tmpdir): mpl.use("Agg") - assert run_tool( - DisplayIntegrator(), - argv=shlex.split(f"--f={GAMMA_TEST_LARGE} " "--max_events=1 ") - ) == 0 + assert ( + run_tool( + DisplayIntegrator(), + argv=shlex.split(f"--f={GAMMA_TEST_LARGE} " "--max_events=1 "), + ) + == 0 + ) assert run_tool(DisplayIntegrator(), ["--help-all"]) == 0 @@ -133,14 +148,17 @@ def test_display_events_single_tel(tmpdir): mpl.use("Agg") - assert run_tool( - SingleTelEventDisplay(), - argv=shlex.split( - f"--infile={GAMMA_TEST_LARGE} " - "--tel=11 " - "--max-events=2 " # <--- inconsistent!!! + assert ( + run_tool( + SingleTelEventDisplay(), + argv=shlex.split( + f"--infile={GAMMA_TEST_LARGE} " + "--tel=11 " + "--max-events=2 " # <--- inconsistent!!! + ), ) - ) == 0 + == 0 + ) assert run_tool(SingleTelEventDisplay(), ["--help-all"]) == 0 @@ -150,10 +168,12 @@ def test_display_dl1(tmpdir): mpl.use("Agg") - assert run_tool( - DisplayDL1Calib(), - argv=shlex.split("--max_events=1 " "--telescope=11 ") - ) == 0 + assert ( + run_tool( + DisplayDL1Calib(), argv=shlex.split("--max_events=1 " "--telescope=11 ") + ) + == 0 + ) assert run_tool(DisplayDL1Calib(), ["--help-all"]) == 0 @@ -233,6 +253,6 @@ def test_plot_charge_resolution(tmpdir): output_path = os.path.join(str(tmpdir), "cr.pdf") tool = ChargeResolutionViewer() - assert run_tool(tool, ["-f", [path], "-o", output_path]) == 0 + assert run_tool(tool, ["-f", [path], "-o", output_path]) == 0 assert os.path.exists(output_path) assert run_tool(tool, ["--help-all"]) == 0 diff --git a/ctapipe/tools/utils.py b/ctapipe/tools/utils.py index e5efbdd886d..f635e82a6de 100644 --- a/ctapipe/tools/utils.py +++ b/ctapipe/tools/utils.py @@ -4,27 +4,31 @@ import importlib from collections import OrderedDict -__all__ = ['ArgparseFormatter', - 'get_parser', - 'get_installed_tools', - 'get_all_descriptions', - ] +__all__ = [ + "ArgparseFormatter", + "get_parser", + "get_installed_tools", + "get_all_descriptions", +] -class ArgparseFormatter(argparse.ArgumentDefaultsHelpFormatter, - argparse.RawTextHelpFormatter): +class ArgparseFormatter( + argparse.ArgumentDefaultsHelpFormatter, argparse.RawTextHelpFormatter +): """ArgumentParser formatter_class argument. """ + pass -def get_parser(function=None, description='N/A'): +def get_parser(function=None, description="N/A"): """Make an ArgumentParser how we like it. """ if function: - description = function.__doc__.split('\n')[0] - parser = argparse.ArgumentParser(description=description, - formatter_class=ArgparseFormatter) + description = function.__doc__.split("\n")[0] + parser = argparse.ArgumentParser( + description=description, formatter_class=ArgparseFormatter + ) return parser @@ -37,7 +41,8 @@ def get_installed_tools(): of installed packages matches the available scripts somehow? """ from pkg_resources import get_entry_map - console_tools = get_entry_map('ctapipe')['console_scripts'] + + console_tools = get_entry_map("ctapipe")["console_scripts"] return console_tools @@ -48,7 +53,7 @@ def get_all_descriptions(): descriptions = OrderedDict() for name, info in tools.items(): module = importlib.import_module(info.module_name) - if hasattr(module, '__doc__') and module.__doc__ is not None: + if hasattr(module, "__doc__") and module.__doc__ is not None: try: descrip = module.__doc__ descrip.replace("\n", "") @@ -59,4 +64,3 @@ def get_all_descriptions(): descriptions[name] = "[no documentation. Please add a docstring]" return descriptions - diff --git a/ctapipe/utils/__init__.py b/ctapipe/utils/__init__.py index 893839fdf01..1eb543295d7 100644 --- a/ctapipe/utils/__init__.py +++ b/ctapipe/utils/__init__.py @@ -3,7 +3,9 @@ from .table_interpolator import TableInterpolator from .unstructured_interpolator import UnstructuredInterpolator from .datasets import ( - find_all_matching_datasets, get_table_dataset, get_dataset_path, + find_all_matching_datasets, + get_table_dataset, + get_dataset_path, find_in_path, ) from .astro import get_bright_stars @@ -11,15 +13,15 @@ __all__ = [ - 'Histogram', - 'TableInterpolator', - 'UnstructuredInterpolator', - 'find_all_matching_datasets', - 'get_table_dataset', - 'get_dataset_path', - 'find_in_path', - 'get_bright_stars', - 'CutFlow', - 'PureCountingCut', - 'UndefinedCut', + "Histogram", + "TableInterpolator", + "UnstructuredInterpolator", + "find_all_matching_datasets", + "get_table_dataset", + "get_dataset_path", + "find_in_path", + "get_bright_stars", + "CutFlow", + "PureCountingCut", + "UndefinedCut", ] diff --git a/ctapipe/utils/astro.py b/ctapipe/utils/astro.py index caed19f8bf7..fb70ef59e5f 100644 --- a/ctapipe/utils/astro.py +++ b/ctapipe/utils/astro.py @@ -8,7 +8,7 @@ from astropy.coordinates import SkyCoord from astropy import units as u -__all__ = ['get_bright_stars'] +__all__ = ["get_bright_stars"] def get_bright_stars(pointing=None, radius=None, magnitude_cut=None): @@ -38,25 +38,29 @@ def get_bright_stars(pointing=None, radius=None, magnitude_cut=None): """ from ctapipe.utils import get_table_dataset - catalog = get_table_dataset("yale_bright_star_catalog5", - role="bright star catalog") + catalog = get_table_dataset("yale_bright_star_catalog5", role="bright star catalog") - starpositions = SkyCoord(ra=Angle(catalog['RAJ2000'], unit=u.deg), - dec=Angle(catalog['DEJ2000'], unit=u.deg), - frame='icrs', copy=False) - catalog['ra_dec'] = starpositions + starpositions = SkyCoord( + ra=Angle(catalog["RAJ2000"], unit=u.deg), + dec=Angle(catalog["DEJ2000"], unit=u.deg), + frame="icrs", + copy=False, + ) + catalog["ra_dec"] = starpositions if magnitude_cut is not None: - catalog = catalog[catalog['Vmag'] < magnitude_cut] + catalog = catalog[catalog["Vmag"] < magnitude_cut] if radius is not None: if pointing is None: - raise ValueError('Sky pointing, pointing=SkyCoord(), must be ' - 'provided if radius is given.') - separations = catalog['ra_dec'].separation(pointing) - catalog['separation'] = separations + raise ValueError( + "Sky pointing, pointing=SkyCoord(), must be " + "provided if radius is given." + ) + separations = catalog["ra_dec"].separation(pointing) + catalog["separation"] = separations catalog = catalog[separations < radius] - catalog.remove_columns(['RAJ2000', 'DEJ2000']) + catalog.remove_columns(["RAJ2000", "DEJ2000"]) return catalog diff --git a/ctapipe/utils/datasets.py b/ctapipe/utils/datasets.py index 37cc2d0436c..e39796abd2f 100644 --- a/ctapipe/utils/datasets.py +++ b/ctapipe/utils/datasets.py @@ -10,6 +10,7 @@ try: import ctapipe_resources + has_resources = True except ImportError: has_resources = False @@ -19,19 +20,17 @@ logger = logging.getLogger(__name__) -__all__ = ['get_dataset_path', 'find_in_path', 'find_all_matching_datasets'] +__all__ = ["get_dataset_path", "find_in_path", "find_all_matching_datasets"] def get_searchpath_dirs(searchpath=os.getenv("CTAPIPE_SVC_PATH")): """ returns a list of dirs in specified searchpath""" if searchpath == "" or searchpath is None: return [] - return os.path.expandvars(searchpath).split(':') + return os.path.expandvars(searchpath).split(":") -def find_all_matching_datasets(pattern, - searchpath=None, - regexp_group=None): +def find_all_matching_datasets(pattern, searchpath=None, regexp_group=None): """ Returns a list of resource names (or substrings) matching the given pattern, searching first in searchpath (a colon-separated list of @@ -72,7 +71,7 @@ def find_all_matching_datasets(pattern, # then check resources module if has_resources: - for resource in resource_listdir('ctapipe_resources', ''): + for resource in resource_listdir("ctapipe_resources", ""): match = re.match(pattern, resource) if match: if regexp_group is not None: @@ -136,8 +135,10 @@ def get_dataset_path(filename): return filepath if has_resources: - logger.debug("Resource '{}' not found in CTAPIPE_SVC_PATH, looking in " - "ctapipe_resources...".format(filename)) + logger.debug( + "Resource '{}' not found in CTAPIPE_SVC_PATH, looking in " + "ctapipe_resources...".format(filename) + ) return ctapipe_resources.get(filename) @@ -147,7 +148,7 @@ def get_dataset_path(filename): ) -def get_table_dataset(table_name, role='resource', **kwargs): +def get_table_dataset(table_name, role="resource", **kwargs): """ get a tabular dataset as an `astropy.table.Table` object @@ -170,10 +171,10 @@ def get_table_dataset(table_name, role='resource', **kwargs): # a mapping of types (keys) to any extra keyword args needed for # table.read() types_to_try = { - '.fits.gz': {}, - '.fits': {}, - '.ecsv': dict(format='ascii.ecsv'), - '.ecsv.txt': dict(format='ascii.ecsv'), + ".fits.gz": {}, + ".fits": {}, + ".ecsv": dict(format="ascii.ecsv"), + ".ecsv.txt": dict(format="ascii.ecsv"), } for table_type in types_to_try: @@ -189,11 +190,12 @@ def get_table_dataset(table_name, role='resource', **kwargs): except FileNotFoundError: pass - raise FileNotFoundError("couldn't locate table: {}[{}]".format( - table_name, ', '.join(types_to_try))) + raise FileNotFoundError( + "couldn't locate table: {}[{}]".format(table_name, ", ".join(types_to_try)) + ) -def get_structured_dataset(basename, role='resource', **kwargs): +def get_structured_dataset(basename, role="resource", **kwargs): """ find and return a YAML or JSON dataset as a dictionary @@ -215,9 +217,9 @@ def get_structured_dataset(basename, role='resource', **kwargs): # a mapping of types (keys) to any extra keyword args needed for # table.read() types_to_try = { - '.yaml': {}, - '.yml': {}, - '.json': {}, + ".yaml": {}, + ".yml": {}, + ".json": {}, } for data_type in types_to_try: @@ -229,9 +231,9 @@ def get_structured_dataset(basename, role='resource', **kwargs): args.update(kwargs) with open(fullname) as infile: - if data_type == '.yaml' or data_type == '.yml': + if data_type == ".yaml" or data_type == ".yml": dataset = yaml.safe_load(infile, **args) - elif data_type == '.json': + elif data_type == ".json": dataset = json.load(infile, **args) Provenance().add_input_file(fullname, role) @@ -239,5 +241,8 @@ def get_structured_dataset(basename, role='resource', **kwargs): except FileNotFoundError: pass - raise FileNotFoundError("couldn't locate structed dataset: {}[{}]".format( - basename, ', '.join(types_to_try))) + raise FileNotFoundError( + "couldn't locate structed dataset: {}[{}]".format( + basename, ", ".join(types_to_try) + ) + ) diff --git a/ctapipe/utils/fitshistogram.py b/ctapipe/utils/fitshistogram.py index 12152111081..40805c2861f 100644 --- a/ctapipe/utils/fitshistogram.py +++ b/ctapipe/utils/fitshistogram.py @@ -4,7 +4,7 @@ from astropy.io import fits from astropy.wcs import WCS -__all__ = ['Histogram'] +__all__ = ["Histogram"] class Histogram: @@ -60,8 +60,7 @@ class Histogram: """ - def __init__(self, nbins=None, ranges=None, name="Histogram", - axis_names=None): + def __init__(self, nbins=None, ranges=None, name="Histogram", axis_names=None): """ Initialize an unfilled histogram (need to call fill() put data into it) see also @@ -85,8 +84,11 @@ def __init__(self, nbins=None, ranges=None, name="Histogram", if self.ndims < 1: raise ValueError("No dimensions specified") if self.ndims != len(self._ranges): - raise ValueError("Dimensions of ranges {} don't match bins {}" - .format(len(self._ranges), self.ndims)) + raise ValueError( + "Dimensions of ranges {} don't match bins {}".format( + len(self._ranges), self.ndims + ) + ) if self.axis_names is not None: # ensure the array is size ndims self.axis_names = np.array(self.axis_names) @@ -95,10 +97,15 @@ def __init__(self, nbins=None, ranges=None, name="Histogram", self.axis_names = [f"axis{x}" for x in range(self.ndims)] def __str__(self,): - return ("Histogram(name='{name}', axes={axnames}, " - "nbins={nbins}, ranges={ranges})" - .format(name=self.name, ranges=self._ranges, - nbins=self._nbins, axnames=self.axis_names)) + return ( + "Histogram(name='{name}', axes={axnames}, " + "nbins={nbins}, ranges={ranges})".format( + name=self.name, + ranges=self._ranges, + nbins=self._nbins, + axnames=self.axis_names, + ) + ) @property def bin_lower_edges(self): @@ -108,10 +115,12 @@ def bin_lower_edges(self): use in vector operations """ if self._bin_lower_edges is None: - self._bin_lower_edges = [np.linspace(self._ranges[ii][0], - self._ranges[ii][-1], - self._nbins[ii] + 1) - for ii in range(self.ndims)] + self._bin_lower_edges = [ + np.linspace( + self._ranges[ii][0], self._ranges[ii][-1], self._nbins[ii] + 1 + ) + for ii in range(self.ndims) + ] return self._bin_lower_edges @property @@ -150,8 +159,9 @@ def fill(self, datapoints, **kwargs): creating the histogram """ - hist, __ = np.histogramdd(datapoints, bins=self._nbins, - range=self._ranges, **kwargs) + hist, __ = np.histogramdd( + datapoints, bins=self._nbins, range=self._ranges, **kwargs + ) self.data += hist self._numsamples += len(datapoints) @@ -160,8 +170,9 @@ def bin_centers(self, index): """ returns array of bin centers for the given index """ - return 0.5 * (self.bin_lower_edges[index][1:] + - self.bin_lower_edges[index][0:-1]) + return 0.5 * ( + self.bin_lower_edges[index][1:] + self.bin_lower_edges[index][0:-1] + ) def to_fits(self): """ @@ -216,8 +227,9 @@ def to_fits(self): if self.value_zero: ohdu.header.set("BZERO", float(self.value_zero)) - ohdu.header.set("NSAMP", self._numsamples, "Number of samples " - "originally filled") + ohdu.header.set( + "NSAMP", self._numsamples, "Number of samples " "originally filled" + ) return ohdu @@ -306,9 +318,12 @@ def get_value(self, coords, outlier_value=None): world = np.array(coords, ndmin=2) # at least 2D ndims = len(self._nbins) - bins = np.array([np.digitize(world[:, ii], - self.bin_lower_edges[ii][1:]) - for ii in range(ndims)]) + bins = np.array( + [ + np.digitize(world[:, ii], self.bin_lower_edges[ii][1:]) + for ii in range(ndims) + ] + ) maxbin = np.array(self.data.shape) @@ -343,9 +358,12 @@ def draw_2d(self, dims=(0, 1), **kwargs): if len(dims) != 2: raise ValueError("dims must be a length-2 integer array") - pyplot.pcolormesh(self.bin_lower_edges[dims[0]], - self.bin_lower_edges[dims[1]], - self.data, **kwargs) + pyplot.pcolormesh( + self.bin_lower_edges[dims[0]], + self.bin_lower_edges[dims[1]], + self.data, + **kwargs, + ) pyplot.title(self.name) pyplot.xlabel(self.axis_names[dims[0]]) pyplot.ylabel(self.axis_names[dims[1]]) @@ -354,8 +372,7 @@ def draw_1d(self, dim=0, **kwargs): from matplotlib import pyplot # todo fix this to work properly with dim argument! - pyplot.plot(self.bin_centers(dim), self.data, drawstyle='steps-mid', - **kwargs) + pyplot.plot(self.bin_centers(dim), self.data, drawstyle="steps-mid", **kwargs) def resample_inplace(self, nbins): """ @@ -375,8 +392,9 @@ def resample_inplace(self, nbins): # iold = np.indices(oldbins) inew = np.indices(nbins) - coords = np.array([inew[X] * (oldbins[X]) / float(nbins[X]) - for X in range(len(nbins))]) + coords = np.array( + [inew[X] * (oldbins[X]) / float(nbins[X]) for X in range(len(nbins))] + ) self._nbins = nbins self.data = ndimage.map_coordinates(self.data, coords) diff --git a/ctapipe/utils/linalg.py b/ctapipe/utils/linalg.py index 30ed088a7de..b244ea363ba 100644 --- a/ctapipe/utils/linalg.py +++ b/ctapipe/utils/linalg.py @@ -4,7 +4,7 @@ import numpy as np from numpy import cos, sin, arctan2 as atan2, arccos as acos -__all__ = ['rotation_matrix_2d', 'length', 'normalise', 'angle'] +__all__ = ["rotation_matrix_2d", "length", "normalise", "angle"] def rotation_matrix_2d(angle): @@ -13,16 +13,14 @@ def rotation_matrix_2d(angle): into an `astropy.coordinates.Angle` """ psi = Angle(angle).rad - return np.array([[cos(psi), -sin(psi)], - [sin(psi), cos(psi)]]) - + return np.array([[cos(psi), -sin(psi)], [sin(psi), cos(psi)]]) def length(vec): """ returns the length/norm of a numpy array as the square root of the inner product with itself """ - return vec.dot(vec)**.5 + return vec.dot(vec) ** 0.5 def normalise(vec): diff --git a/ctapipe/utils/rgbtohex.py b/ctapipe/utils/rgbtohex.py index 8ef0130cb7d..0f016d70603 100644 --- a/ctapipe/utils/rgbtohex.py +++ b/ctapipe/utils/rgbtohex.py @@ -1,7 +1,8 @@ from matplotlib.cm import get_cmap import numpy as np import codecs -viridis = get_cmap('viridis') + +viridis = get_cmap("viridis") def intensity_to_rgb(array, minval=None, maxval=None): @@ -58,13 +59,13 @@ def intensity_to_hex(array, minval=None, maxval=None): hex strings representing the intensity as a color """ - hex_ = np.zeros((array.size, 9), dtype='B') + hex_ = np.zeros((array.size, 9), dtype="B") rgb = intensity_to_rgb(array, minval, maxval) - hex_encoded = codecs.encode(rgb, 'hex') - bytes_ = np.frombuffer(hex_encoded, 'B') + hex_encoded = codecs.encode(rgb, "hex") + bytes_ = np.frombuffer(hex_encoded, "B") bytes_2d = bytes_.reshape(-1, 8) - hex_[:, 0] = ord('#') + hex_[:, 0] = ord("#") hex_[:, 1:9] = bytes_2d - return hex_.view('S9').astype('U9')[:, 0] + return hex_.view("S9").astype("U9")[:, 0] diff --git a/ctapipe/utils/table_interpolator.py b/ctapipe/utils/table_interpolator.py index d86327ddff6..5128982ef8e 100644 --- a/ctapipe/utils/table_interpolator.py +++ b/ctapipe/utils/table_interpolator.py @@ -61,16 +61,18 @@ def __init__(self, filename, verbose=1): grid, bins, template = self.parse_fits_table(filename) x_bins, y_bins = bins - self.interpolator = interpolate.LinearNDInterpolator(grid, template, fill_value=0) + self.interpolator = interpolate.LinearNDInterpolator( + grid, template, fill_value=0 + ) self.nearest_interpolator = interpolate.NearestNDInterpolator(grid, template) - self.grid_interp = interpolate.RegularGridInterpolator((x_bins, y_bins), - np.zeros([x_bins.shape[0], - y_bins.shape[ - 0]]), - method="linear", - bounds_error=False, - fill_value=0) + self.grid_interp = interpolate.RegularGridInterpolator( + (x_bins, y_bins), + np.zeros([x_bins.shape[0], y_bins.shape[0]]), + method="linear", + bounds_error=False, + fill_value=0, + ) def parse_fits_table(self, filename): """ diff --git a/ctapipe/utils/template_network_interpolator.py b/ctapipe/utils/template_network_interpolator.py index 067e08f5ed2..11e6901ca51 100644 --- a/ctapipe/utils/template_network_interpolator.py +++ b/ctapipe/utils/template_network_interpolator.py @@ -9,6 +9,7 @@ class TemplateNetworkInterpolator: """ Class for interpolating between the the predictions """ + def __init__(self, template_file): """ @@ -20,8 +21,9 @@ def __init__(self, template_file): file_list = gzip.open(template_file) input_dict = pickle.load(file_list) - self.interpolator = UnstructuredInterpolator(input_dict, remember_last=True, - bounds=((-5, 1),(-1.5, 1.5))) + self.interpolator = UnstructuredInterpolator( + input_dict, remember_last=True, bounds=((-5, 1), (-1.5, 1.5)) + ) def reset(self): """ @@ -54,7 +56,7 @@ def __call__(self, energy, impact, xmax, xb, yb): points = ma.dstack((xb, yb)) interpolated_value = self.interpolator(array, points) - interpolated_value[interpolated_value<0] = 0 + interpolated_value[interpolated_value < 0] = 0 interpolated_value = interpolated_value return interpolated_value @@ -64,6 +66,7 @@ class TimeGradientInterpolator: """ Class for interpolating between the time gradient predictions """ + def __init__(self, template_file): """ @@ -98,4 +101,4 @@ def __call__(self, energy, impact, xmax): interpolated_value = self.interpolator(array) - return interpolated_value \ No newline at end of file + return interpolated_value diff --git a/ctapipe/utils/tests/test_CutFlow.py b/ctapipe/utils/tests/test_CutFlow.py index adfc888ce9d..ab15efc3363 100644 --- a/ctapipe/utils/tests/test_CutFlow.py +++ b/ctapipe/utils/tests/test_CutFlow.py @@ -39,7 +39,6 @@ def test_CutFlow(): except: pass - t = flow(sort_column=1) assert np.all(t["selected Events"] == [4, 3, 2, 1]) @@ -54,32 +53,30 @@ def test_set_cuts_clear(): flow = CutFlow("TestFlow") flow.set_cut("smaller5", smaller5) - flow.set_cuts(OrderedDict([ - ("smaller3", smaller3), - ("smaller2", smaller2) - ]), clear=True) + flow.set_cuts( + OrderedDict([("smaller3", smaller3), ("smaller2", smaller2)]), clear=True + ) - assert flow.cuts == OrderedDict([ - ("smaller3", [smaller3, 0]), - ("smaller2", [smaller2, 0]) - ]) + assert flow.cuts == OrderedDict( + [("smaller3", [smaller3, 0]), ("smaller2", [smaller2, 0])] + ) def test_set_cuts_no_clear(): flow = CutFlow("TestFlow") flow.set_cut("smaller5", smaller5) - flow.set_cuts(OrderedDict([ - ("smaller3", smaller3), - ("smaller2", smaller2) - ]), clear=False) - - - assert flow.cuts == OrderedDict([ - ("smaller5", [smaller5, 0]), - ("smaller3", [smaller3, 0]), - ("smaller2", [smaller2, 0]) - ]) + flow.set_cuts( + OrderedDict([("smaller3", smaller3), ("smaller2", smaller2)]), clear=False + ) + + assert flow.cuts == OrderedDict( + [ + ("smaller5", [smaller5, 0]), + ("smaller3", [smaller3, 0]), + ("smaller2", [smaller2, 0]), + ] + ) if __name__ == "__main__": diff --git a/ctapipe/utils/tests/test_astro.py b/ctapipe/utils/tests/test_astro.py index 6676b142363..f9851dd1311 100644 --- a/ctapipe/utils/tests/test_astro.py +++ b/ctapipe/utils/tests/test_astro.py @@ -6,14 +6,15 @@ from astropy.coordinates import SkyCoord from astropy import units as u + def test_get_bright_stars(): """ unit test for utils.astro.get_bright_stars(). Tests that only Zeta Tau is returned close to the Crab Nebula as object brighter than mag=3.5. """ - pointing = SkyCoord(ra=83.275 * u.deg, dec=21.791 * u.deg, frame='icrs') + pointing = SkyCoord(ra=83.275 * u.deg, dec=21.791 * u.deg, frame="icrs") - table = get_bright_stars(pointing, radius=2. * u.deg, magnitude_cut=3.5) + table = get_bright_stars(pointing, radius=2.0 * u.deg, magnitude_cut=3.5) assert len(table) == 1 - assert table[0]['Name'] == '123Zet Tau' + assert table[0]["Name"] == "123Zet Tau" diff --git a/ctapipe/utils/tests/test_datasets.py b/ctapipe/utils/tests/test_datasets.py index 966aba296fc..2537baa8583 100644 --- a/ctapipe/utils/tests/test_datasets.py +++ b/ctapipe/utils/tests/test_datasets.py @@ -16,8 +16,7 @@ def test_find_datasets(): assert os.path.exists(datasets.get_dataset_path(r[0])) # try using a pattern - r = datasets.find_all_matching_datasets(r"(.*)\.camgeom\.fits\.gz", - regexp_group=1) + r = datasets.find_all_matching_datasets(r"(.*)\.camgeom\.fits\.gz", regexp_group=1) assert not r[0].endswith("gz") @@ -26,9 +25,9 @@ def test_datasets_in_custom_path(tmpdir_factory): check that a dataset in a user-defined CTAPIPE_SVC_PATH is located """ - tmpdir1 = tmpdir_factory.mktemp('datasets1') - tmpdir2 = tmpdir_factory.mktemp('datasets2') - os.environ['CTAPIPE_SVC_PATH'] = ":".join([str(tmpdir1), str(tmpdir2)]) + tmpdir1 = tmpdir_factory.mktemp("datasets1") + tmpdir2 = tmpdir_factory.mktemp("datasets2") + os.environ["CTAPIPE_SVC_PATH"] = ":".join([str(tmpdir1), str(tmpdir2)]) # create a dummy dataset to search for: @@ -47,31 +46,31 @@ def test_datasets_in_custom_path(tmpdir_factory): # try using find_all_matching_datasets: - ds = datasets.find_all_matching_datasets("test.*", - searchpath=os.environ[ - 'CTAPIPE_SVC_PATH']) + ds = datasets.find_all_matching_datasets( + "test.*", searchpath=os.environ["CTAPIPE_SVC_PATH"] + ) assert dataset_name in ds def test_structured_datasets(tmpdir): basename = "test.yml" - test_data = dict(x=[1, 2, 3, 4, 5], y='test_json') + test_data = dict(x=[1, 2, 3, 4, 5], y="test_json") - os.environ['CTAPIPE_SVC_PATH'] = ":".join([str(tmpdir)]) + os.environ["CTAPIPE_SVC_PATH"] = ":".join([str(tmpdir)]) - with tmpdir.join("data_test.json").open(mode='w') as fp: + with tmpdir.join("data_test.json").open(mode="w") as fp: json.dump(test_data, fp) - data1 = datasets.get_structured_dataset('data_test') - assert data1['x'] == [1, 2, 3, 4, 5] - assert data1['y'] == 'test_json' + data1 = datasets.get_structured_dataset("data_test") + assert data1["x"] == [1, 2, 3, 4, 5] + assert data1["y"] == "test_json" tmpdir.join("data_test.json").remove() - test_data['y'] = 'test_yaml' - with tmpdir.join("data_test.yaml").open(mode='w') as fp: + test_data["y"] = "test_yaml" + with tmpdir.join("data_test.yaml").open(mode="w") as fp: yaml.dump(test_data, fp) - data1 = datasets.get_structured_dataset('data_test') - assert data1['x'] == [1, 2, 3, 4, 5] - assert data1['y'] == 'test_yaml' + data1 = datasets.get_structured_dataset("data_test") + assert data1["x"] == [1, 2, 3, 4, 5] + assert data1["y"] == "test_yaml" diff --git a/ctapipe/utils/tests/test_fitshistogram.py b/ctapipe/utils/tests/test_fitshistogram.py index 58a50c663d1..04ef6fbe7cc 100644 --- a/ctapipe/utils/tests/test_fitshistogram.py +++ b/ctapipe/utils/tests/test_fitshistogram.py @@ -11,15 +11,15 @@ def compare_histograms(hist1: Histogram, hist2: Histogram): assert (hist1.data == hist2.data).all for ii in range(hist1.ndims): - assert np.isclose(hist1.bin_lower_edges[ii], - hist2.bin_lower_edges[ii]).all() + assert np.isclose(hist1.bin_lower_edges[ii], hist2.bin_lower_edges[ii]).all() def test_histogram_str(): - hist = Histogram(nbins=[5, 10], - ranges=[[-2.5, 2.5], [-1, 1]], name="testhisto") - expected = ("Histogram(name='testhisto', axes=['axis0', 'axis1'], " - "nbins=[ 5 10], ranges=[[-2.5 2.5]\n [-1. 1. ]])") + hist = Histogram(nbins=[5, 10], ranges=[[-2.5, 2.5], [-1, 1]], name="testhisto") + expected = ( + "Histogram(name='testhisto', axes=['axis0', 'axis1'], " + "nbins=[ 5 10], ranges=[[-2.5 2.5]\n [-1. 1. ]])" + ) assert str(hist) == expected @@ -55,8 +55,7 @@ def test_histogram_range_fill_and_read(): for xx in np.arange(-2.0, 2.0, 0.1): pp = (xx + 0.01829384, 0.1) coords = np.ones((num, 2)) * np.array(pp) - hist = Histogram(nbins=[nxbins, 10], - ranges=[[-2.5, 2.5], [-1, 1]]) + hist = Histogram(nbins=[nxbins, 10], ranges=[[-2.5, 2.5], [-1, 1]]) hist.fill(coords) val = hist.get_value(pp)[0] assert val == num @@ -68,18 +67,18 @@ def test_outliers(): Check that out-of-range values work as expected """ H = Histogram(nbins=[5, 10], ranges=[[-2.5, 2.5], [-1, 1]]) - H.fill(np.array([[1, 1], ])) + H.fill(np.array([[1, 1],])) val1 = H.get_value((100, 100), outlier_value=-10000) val2 = H.get_value((-100, 0), outlier_value=None) assert val1 == -10000 assert val2 == 0 -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def histogram_file(tmpdir_factory): """ a fixture that fetches a temporary output dir/file for a test histogram""" - return str(tmpdir_factory.mktemp('data').join('histogram_test.fits')) + return str(tmpdir_factory.mktemp("data").join("histogram_test.fits")) def test_histogram_fits(histogram_file): @@ -88,8 +87,7 @@ def test_histogram_fits(histogram_file): """ hist = Histogram(nbins=[5, 11], ranges=[[-2.5, 2.5], [-1, 1]]) - hist.fill(np.array([[0, 0], - [0, 0.5]])) + hist.fill(np.array([[0, 0], [0, 0.5]])) hist.to_fits().writeto(histogram_file, overwrite=True) newhist = Histogram.from_fits(histogram_file) @@ -100,8 +98,7 @@ def test_histogram_fits(histogram_file): def test_histogram_resample_inplace(): hist = Histogram(nbins=[5, 11], ranges=[[-2.5, 2.5], [-1, 1]]) - hist.fill(np.array([[0, 0], - [0, 0.5]])) + hist.fill(np.array([[0, 0], [0, 0.5]])) for testpoint in [(0, 0), (0, 1), (1, 0), (3, 3)]: val0 = hist.get_value(testpoint) diff --git a/ctapipe/utils/tests/test_linalg.py b/ctapipe/utils/tests/test_linalg.py index c466f2931e8..5d2ca0135d9 100644 --- a/ctapipe/utils/tests/test_linalg.py +++ b/ctapipe/utils/tests/test_linalg.py @@ -5,15 +5,15 @@ def test_rotation_matrix_2d(): # test that 360 rotation is back to the identity: - assert allclose(rotation_matrix_2d('360d'), identity(2)) + assert allclose(rotation_matrix_2d("360d"), identity(2)) # test that a vector can be rotated correcly: vec = [1, 0] - mat = rotation_matrix_2d('90d') + mat = rotation_matrix_2d("90d") vecp = dot(vec, mat) - assert allclose(vecp, [0, -1]), 'vector rotation is wrong' + assert allclose(vecp, [0, -1]), "vector rotation is wrong" # test that the rotation is Hermitian - m = rotation_matrix_2d('25d') + m = rotation_matrix_2d("25d") assert allclose(dot(m, m.T), identity(2)), "rotation should be Hermetian" diff --git a/ctapipe/utils/tests/test_rgbtohex.py b/ctapipe/utils/tests/test_rgbtohex.py index a2c2e72a2dd..b9b3e7a228e 100644 --- a/ctapipe/utils/tests/test_rgbtohex.py +++ b/ctapipe/utils/tests/test_rgbtohex.py @@ -27,4 +27,4 @@ def test_rgb_nan(): def test_hex_nan(): output = intensity_to_hex(np.array([np.nan, 2]), 0, 3) - assert output[0] == '#00000000' + assert output[0] == "#00000000" diff --git a/ctapipe/utils/tests/test_unstructured_interpolator.py b/ctapipe/utils/tests/test_unstructured_interpolator.py index 04b2350a897..32ca10580ae 100644 --- a/ctapipe/utils/tests/test_unstructured_interpolator.py +++ b/ctapipe/utils/tests/test_unstructured_interpolator.py @@ -3,25 +3,22 @@ from scipy.interpolate import LinearNDInterpolator, RegularGridInterpolator import numpy.ma as ma + def test_simple_interpolation(): """ Simple test function to asses the basic funtionality of the unstructured interpolator, check if we just spread points on a grid """ - interpolation_points = {(0, 0): 0., - (0, 1): 0., - (1, 0): 1., - (1, 1): 1.} + interpolation_points = {(0, 0): 0.0, (0, 1): 0.0, (1, 0): 1.0, (1, 1): 1.0} interpolator = UnstructuredInterpolator(interpolation_points) # OK lets first just check we get the values at the grid points back out again... - interpolated_point = interpolator([[0, 0], [0, 1], - [1, 0], [1, 1]]) - assert np.all(interpolated_point == [0., 0., 1., 1.]) + interpolated_point = interpolator([[0, 0], [0, 1], [1, 0], [1, 1]]) + assert np.all(interpolated_point == [0.0, 0.0, 1.0, 1.0]) - interpolated_point = interpolator([[0., 0.5], [0.5, 0.5], [1, 0.5]]) + interpolated_point = interpolator([[0.0, 0.5], [0.5, 0.5], [1, 0.5]]) assert np.all(interpolated_point == [0, 0.5, 1]) @@ -32,15 +29,18 @@ def test_linear_nd(): """ # First set up 4 grid points and fill them randomly - interpolation_points = {(0, 0): np.random.rand(2, 2), - (0, 1): np.random.rand(2, 2), - (1, 0): np.random.rand(2, 2), - (1, 1): np.random.rand(2, 2)} + interpolation_points = { + (0, 0): np.random.rand(2, 2), + (0, 1): np.random.rand(2, 2), + (1, 0): np.random.rand(2, 2), + (1, 1): np.random.rand(2, 2), + } # Create UnstructuredInterpolator and LinearNDInterpolator with these points interpolator = UnstructuredInterpolator(interpolation_points) - linear_nd = LinearNDInterpolator(list(interpolation_points.keys()), - list(interpolation_points.values())) + linear_nd = LinearNDInterpolator( + list(interpolation_points.keys()), list(interpolation_points.values()) + ) # Create some random coordinates in this space points = np.random.rand(10, 2) @@ -58,18 +58,19 @@ def test_remember_last(): """ # First set up 4 grid points and fill them randomly - interpolation_points = {(0, 0): np.random.rand(2, 2), - (0, 1): np.random.rand(2, 2), - (1, 0): np.random.rand(2, 2), - (1, 1): np.random.rand(2, 2)} + interpolation_points = { + (0, 0): np.random.rand(2, 2), + (0, 1): np.random.rand(2, 2), + (1, 0): np.random.rand(2, 2), + (1, 1): np.random.rand(2, 2), + } # Create UnstructuredInterpolator and LinearNDInterpolator with these points interpolator = UnstructuredInterpolator(interpolation_points, remember_last=True) # Create some random coordinates in this space random_nums = np.random.rand(2, 2) - points_mask = ma.masked_array(random_nums, mask=[[True, False], - [True, False]]) + points_mask = ma.masked_array(random_nums, mask=[[True, False], [True, False]]) # And interpolate... interpolated_points = interpolator(random_nums).T[0] @@ -85,15 +86,18 @@ def test_masked_input(): """ # First set up 4 grid points and fill them randomly - interpolation_points = {(0, 0): np.random.rand(2, 2), - (0, 1): np.random.rand(2, 2), - (1, 0): np.random.rand(2, 2), - (1, 1): np.random.rand(2, 2)} + interpolation_points = { + (0, 0): np.random.rand(2, 2), + (0, 1): np.random.rand(2, 2), + (1, 0): np.random.rand(2, 2), + (1, 1): np.random.rand(2, 2), + } # Create UnstructuredInterpolator and LinearNDInterpolator with these points interpolator = UnstructuredInterpolator(interpolation_points, remember_last=True) - linear_nd = LinearNDInterpolator(list(interpolation_points.keys()), - list(interpolation_points.values())) + linear_nd = LinearNDInterpolator( + list(interpolation_points.keys()), list(interpolation_points.values()) + ) # Create some random coordinates in this space points = np.random.rand(10, 2) @@ -120,10 +124,12 @@ def test_class_output(): rand_numbers = np.random.rand(4, 11, 11) # Create input for UnstructuredInterpolator - interpolation_points = {(0, 0): RegularGridInterpolator((x, x), rand_numbers[0]), - (0, 1): RegularGridInterpolator((x, x), rand_numbers[1]), - (1, 0): RegularGridInterpolator((x, x), rand_numbers[2]), - (1, 1): RegularGridInterpolator((x, x), rand_numbers[3])} + interpolation_points = { + (0, 0): RegularGridInterpolator((x, x), rand_numbers[0]), + (0, 1): RegularGridInterpolator((x, x), rand_numbers[1]), + (1, 0): RegularGridInterpolator((x, x), rand_numbers[2]), + (1, 1): RegularGridInterpolator((x, x), rand_numbers[3]), + } # Create some random points to evaluate our interpolators pts1 = np.random.rand(1, 2) @@ -132,14 +138,17 @@ def test_class_output(): interpolator = UnstructuredInterpolator(interpolation_points) unsort_value = interpolator(pts1, pts2) - interpolation_points = {(0, 0): rand_numbers[0], - (0, 1): rand_numbers[1], - (1, 0): rand_numbers[2], - (1, 1): rand_numbers[3]} + interpolation_points = { + (0, 0): rand_numbers[0], + (0, 1): rand_numbers[1], + (1, 0): rand_numbers[2], + (1, 1): rand_numbers[3], + } # Perform the same operation by interpolating the values of the full numpy array - linear_nd = LinearNDInterpolator(list(interpolation_points.keys()), - list(interpolation_points.values())) + linear_nd = LinearNDInterpolator( + list(interpolation_points.keys()), list(interpolation_points.values()) + ) array_out = linear_nd(pts1) # Then interpolate on this grid reg_interpolator = RegularGridInterpolator((x, x), array_out[0]) @@ -155,21 +164,17 @@ def test_out_of_bounds(): the interpolations bounds """ - interpolation_points = {(0, 0): 0., - (0, 1): 0., - (1, 0): 1., - (1, 1): 1.} + interpolation_points = {(0, 0): 0.0, (0, 1): 0.0, (1, 0): 1.0, (1, 1): 1.0} interpolator = UnstructuredInterpolator(interpolation_points) - interpolated_point = interpolator([[0,2],[1,2],[2,2]]) - assert np.all(interpolated_point == [0., 1., 2.]) + interpolated_point = interpolator([[0, 2], [1, 2], [2, 2]]) + assert np.all(interpolated_point == [0.0, 1.0, 2.0]) -if __name__ == '__main__': +if __name__ == "__main__": test_simple_interpolation() test_linear_nd() test_class_output() test_out_of_bounds() - diff --git a/ctapipe/utils/unstructured_interpolator.py b/ctapipe/utils/unstructured_interpolator.py index a17e3ec770d..7db04ac8efb 100644 --- a/ctapipe/utils/unstructured_interpolator.py +++ b/ctapipe/utils/unstructured_interpolator.py @@ -28,8 +28,15 @@ class UnstructuredInterpolator: In the case that a numpy array is passed as the interpolation values this class will behave exactly the same as the scipy LinearNDInterpolator """ - def __init__(self, interpolation_points, function_name=None, remember_last=False, - bounds=None, dtype=None): + + def __init__( + self, + interpolation_points, + function_name=None, + remember_last=False, + bounds=None, + dtype=None, + ): """ Parameters ---------- @@ -54,9 +61,9 @@ def __init__(self, interpolation_points, function_name=None, remember_last=False # OK this code is horrid and will need fixing self._numpy_input = ( - isinstance(self.values[0], np.ndarray) or - issubclass(type(self.values[0]), np.float) or - issubclass(type(self.values[0]), np.int) + isinstance(self.values[0], np.ndarray) + or issubclass(type(self.values[0]), np.float) + or issubclass(type(self.values[0]), np.int) ) if self._numpy_input is False and function_name is None: @@ -118,14 +125,16 @@ def __call__(self, points, eval_points=None): if np.all(eval_points is not None): self._previous_shape = eval_points.shape - # Here comes some serious numpy magic, it could be done with a loop but would # be pretty inefficient I had to rip this from stack overflow - RDP # For each interpolated point, take the the transform matrix and multiply it by # the vector p-r, where r=m[:,n,:] is one of the simplex vertices to which # the matrix m is related to - b = np.einsum('ijk,ik->ij', m[:, :self._num_dimensions, :self._num_dimensions], - points - m[:, self._num_dimensions, :]) + b = np.einsum( + "ijk,ik->ij", + m[:, : self._num_dimensions, : self._num_dimensions], + points - m[:, self._num_dimensions, :], + ) # Use the above array to get the weights for the vertices; `b` contains an # n-dimensional vector with weights for all but the last vertices of the simplex @@ -143,8 +152,8 @@ def __call__(self, points, eval_points=None): selected_points = self._call_class_function(v, eval_points) # Multiply point values by weight - p_values = np.einsum('ij...,ij...->i...', selected_points, w) - #print(time.time() - t) + p_values = np.einsum("ij...,ij...->i...", selected_points, w) + # print(time.time() - t) return p_values @@ -169,9 +178,8 @@ def _call_class_function(self, point_num, eval_points): three_dim = False if len(eval_points.shape) > 2: - first_index = ( - np.arange(point_num.shape[0])[..., np.newaxis] * - np.ones_like(point_num) + first_index = np.arange(point_num.shape[0])[..., np.newaxis] * np.ones_like( + point_num ) first_index = first_index.ravel() three_dim = True @@ -219,8 +227,7 @@ def _numpy_interpolation(self, point_num, eval_points): it = np.repeat(it, eval_points.shape[1], axis=0) eval_points = eval_points.reshape( - eval_points.shape[0] * eval_points.shape[1], - eval_points.shape[-1] + eval_points.shape[0] * eval_points.shape[1], eval_points.shape[-1] ) scaled_points = eval_points.T @@ -231,12 +238,12 @@ def _numpy_interpolation(self, point_num, eval_points): it = ma.masked_array(it, mask) scaled_points[0] = ( - (scaled_points[0] - (self._bounds[0][0])) / - (self._bounds[0][1] - self._bounds[0][0]) + (scaled_points[0] - (self._bounds[0][0])) + / (self._bounds[0][1] - self._bounds[0][0]) ) * (vals.shape[-2] - 1) scaled_points[1] += ( - (scaled_points[1] - (self._bounds[1][0])) / - (self._bounds[1][1] - self._bounds[1][0]) + (scaled_points[1] - (self._bounds[1][0])) + / (self._bounds[1][1] - self._bounds[1][0]) ) * (vals.shape[-1] - 1) scaled_points = np.vstack((it, scaled_points)) diff --git a/ctapipe/version.py b/ctapipe/version.py index a5dae0bac0c..d8f2b6d0687 100644 --- a/ctapipe/version.py +++ b/ctapipe/version.py @@ -35,6 +35,7 @@ GIT_COMMAND = "git" if name == "nt": + def find_git_on_windows(): """find the path to the git executable on windows""" # first see if git is in the path @@ -49,11 +50,11 @@ def find_git_on_windows(): possible_locations = [] # look in program files for msysgit if "PROGRAMFILES(X86)" in environ: - possible_locations.append("%s/Git/cmd/git.exe" % - environ["PROGRAMFILES(X86)"]) + possible_locations.append( + "%s/Git/cmd/git.exe" % environ["PROGRAMFILES(X86)"] + ) if "PROGRAMFILES" in environ: - possible_locations.append("%s/Git/cmd/git.exe" % - environ["PROGRAMFILES"]) + possible_locations.append("%s/Git/cmd/git.exe" % environ["PROGRAMFILES"]) # look for the github version of git if "LOCALAPPDATA" in environ: github_dir = "%s/GitHub" % environ["LOCALAPPDATA"] @@ -61,8 +62,9 @@ def find_git_on_windows(): for subdir in listdir(github_dir): if not subdir.startswith("PortableGit"): continue - possible_locations.append("%s/%s/bin/git.exe" % - (github_dir, subdir)) + possible_locations.append( + "%s/%s/bin/git.exe" % (github_dir, subdir) + ) for possible_location in possible_locations: if path.isfile(possible_location): return possible_location @@ -76,10 +78,12 @@ def get_git_describe_version(abbrev=7): """return the string output of git desribe""" try: with open(devnull, "w") as fnull: - arguments = [GIT_COMMAND, "describe", "--tags", - "--abbrev=%d" % abbrev] - return check_output(arguments, cwd=CURRENT_DIRECTORY, - stderr=fnull).decode("ascii").strip() + arguments = [GIT_COMMAND, "describe", "--tags", "--abbrev=%d" % abbrev] + return ( + check_output(arguments, cwd=CURRENT_DIRECTORY, stderr=fnull) + .decode("ascii") + .strip() + ) except (OSError, CalledProcessError): return None @@ -99,7 +103,7 @@ def format_git_describe(git_str, pep440=False): formatted_str = git_str.replace("-g", "+git") # need to remove the "v" to have a proper python version - if formatted_str.startswith('v'): + if formatted_str.startswith("v"): formatted_str = formatted_str[1:] return formatted_str @@ -109,6 +113,7 @@ def read_release_version(): """Read version information from VERSION file""" try: from ._version_cache import version + if len(version) == 0: version = None return version @@ -156,7 +161,7 @@ def get_version(pep440=False): raw_git_version = get_git_describe_version() if not raw_git_version: # not a git repository - return read_release_version() + return read_release_version() git_version = format_git_describe(raw_git_version, pep440=pep440) diff --git a/ctapipe/visualization/__init__.py b/ctapipe/visualization/__init__.py index 61f81a51ff7..9927340420e 100644 --- a/ctapipe/visualization/__init__.py +++ b/ctapipe/visualization/__init__.py @@ -10,4 +10,4 @@ pass -__all__ = ['CameraDisplay', 'ArrayDisplay'] +__all__ = ["CameraDisplay", "ArrayDisplay"] diff --git a/ctapipe/visualization/bokeh.py b/ctapipe/visualization/bokeh.py index 7f3fb39bbc3..3f5a3b2a04c 100644 --- a/ctapipe/visualization/bokeh.py +++ b/ctapipe/visualization/bokeh.py @@ -12,7 +12,7 @@ ) from ctapipe.utils.rgbtohex import intensity_to_hex -PLOTARGS = dict(tools="", toolbar_location=None, outline_line_color='#595959') +PLOTARGS = dict(tools="", toolbar_location=None, outline_line_color="#595959") class CameraDisplay: @@ -46,10 +46,15 @@ def __init__(self, geometry=None, image=None, fig=None): self.cm = None self.cb = None - cdsource_d = dict(image=[], - x=[], y=[], - width=[], height=[], - outline_color=[], outline_alpha=[]) + cdsource_d = dict( + image=[], + x=[], + y=[], + width=[], + height=[], + outline_color=[], + outline_alpha=[], + ) self.cdsource = ColumnDataSource(data=cdsource_d) self._active_pixels = [] @@ -92,20 +97,24 @@ def geom(self, val): self._pix_y = val.pix_y.value self._n_pixels = self._pix_x.size - if self._n_pixels == len(self.cdsource.data['x']): - self.cdsource.data['x'] = self._pix_x - self.cdsource.data['y'] = self._pix_y - self.cdsource.data['width'] = self._pix_sizes - self.cdsource.data['height'] = self._pix_sizes + if self._n_pixels == len(self.cdsource.data["x"]): + self.cdsource.data["x"] = self._pix_x + self.cdsource.data["y"] = self._pix_y + self.cdsource.data["width"] = self._pix_sizes + self.cdsource.data["height"] = self._pix_sizes else: self._image = np.empty(self._pix_x.shape) alpha = [0] * self._n_pixels - color = ['black'] * self._n_pixels - cdsource_d = dict(image=self.image, - x=self._pix_x, y=self._pix_y, - width=self._pix_sizes, height=self._pix_sizes, - outline_color=color, outline_alpha=alpha - ) + color = ["black"] * self._n_pixels + cdsource_d = dict( + image=self.image, + x=self._pix_x, + y=self._pix_y, + width=self._pix_sizes, + height=self._pix_sizes, + outline_color=color, + outline_alpha=alpha, + ) self.cdsource.data = cdsource_d self.active_pixels = [0] * len(self.active_pixels) @@ -133,12 +142,13 @@ def image(self, val): if len(colors) == self._n_pixels: with warnings.catch_warnings(): - warnings.simplefilter(action='ignore', category=FutureWarning) - self.cdsource.data['image'] = colors + warnings.simplefilter(action="ignore", category=FutureWarning) + self.cdsource.data["image"] = colors else: - raise ValueError("Image has a different size {} than the current " - "CameraGeometry n_pixels {}" - .format(colors.size, self._n_pixels)) + raise ValueError( + "Image has a different size {} than the current " + "CameraGeometry n_pixels {}".format(colors.size, self._n_pixels) + ) @property def image_min(self): @@ -170,8 +180,7 @@ def active_pixels(self, listval): palette = palettes.Set1[9] palette = tuple([palette[0]] + list(palette[3:])) - self.active_colors = [palette[i % (len(palette))] - for i in range(len(listval))] + self.active_colors = [palette[i % (len(palette))] for i in range(len(listval))] self.highlight_pixels() def reset_pixels(self): @@ -181,15 +190,19 @@ def _draw_camera(self): # TODO: Support other pixel shapes OR switch to ellipse # after https://github.com/bokeh/bokeh/issues/6985 self.glyphs = self.fig.ellipse( - 'x', 'y', color='image', width='width', height='height', - line_color='outline_color', - line_alpha='outline_alpha', + "x", + "y", + color="image", + width="width", + height="height", + line_color="outline_color", + line_alpha="outline_alpha", line_width=2, - nonselection_fill_color='image', + nonselection_fill_color="image", nonselection_fill_alpha=1, - nonselection_line_color='outline_color', - nonselection_line_alpha='outline_alpha', - source=self.cdsource + nonselection_line_color="outline_color", + nonselection_line_alpha="outline_alpha", + source=self.cdsource, ) def enable_pixel_picker(self, n_active): @@ -216,7 +229,7 @@ def source_change_response(_, __, val): if self.automatic_index_increment: self.active_index = (ai + 1) % len(self.active_pixels) - self.cdsource.selected.on_change('indices', source_change_response) + self.cdsource.selected.on_change("indices", source_change_response) def _on_pixel_click(self, pix_id): print(f"Clicked pixel_id: {pix_id}") @@ -224,22 +237,25 @@ def _on_pixel_click(self, pix_id): def highlight_pixels(self): alpha = [0] * self._n_pixels - color = ['black'] * self._n_pixels + color = ["black"] * self._n_pixels for i, pix in enumerate(self.active_pixels): alpha[pix] = 1 color[pix] = self.active_colors[i] - self.cdsource.data['outline_alpha'] = alpha - self.cdsource.data['outline_color'] = color + self.cdsource.data["outline_alpha"] = alpha + self.cdsource.data["outline_color"] = color def add_colorbar(self): - self.cm = LinearColorMapper(palette="Viridis256", low=0, high=100, - low_color='white', high_color='red') - self.cb = ColorBar(color_mapper=self.cm, - border_line_color=None, - background_fill_alpha=0, - major_label_text_color='green', - location=(0, 0)) - self.fig.add_layout(self.cb, 'right') + self.cm = LinearColorMapper( + palette="Viridis256", low=0, high=100, low_color="white", high_color="red" + ) + self.cb = ColorBar( + color_mapper=self.cm, + border_line_color=None, + background_fill_alpha=0, + major_label_text_color="green", + location=(0, 0), + ) + self.fig.add_layout(self.cb, "right") self.cm.low = self.image_min.item() self.cm.high = self.image_max.item() @@ -262,12 +278,18 @@ def __init__(self, x_pix, y_pix, pix_size): self._image = None n_pix = x_pix.size - cdsource_d = dict(image=np.empty(n_pix, dtype='=1.17 - numpydoc - pandas + - pre-commit - psutil - pytables - pytest diff --git a/examples/calc_pedestals.py b/examples/calc_pedestals.py index f2a3406a29c..5502fe30984 100755 --- a/examples/calc_pedestals.py +++ b/examples/calc_pedestals.py @@ -22,7 +22,7 @@ def plot_peds(peds, pedvars): plt.title(f"Ped Variances for event {event.index.event_id}") -if __name__ == '__main__': +if __name__ == "__main__": # if a filename is specified, use it, otherwise load sample data if len(sys.argv) > 1: @@ -52,9 +52,7 @@ def plot_peds(peds, pedvars): if traces.shape[1] == 1: continue - peds, pedvars = pedestals.calc_pedestals_from_traces( - traces, start, end - ) + peds, pedvars = pedestals.calc_pedestals_from_traces(traces, start, end) print("Number of samples: {}".format(traces.shape[1])) print(f"Calculate over window:({start},{end})") diff --git a/examples/camera_animation.py b/examples/camera_animation.py index 9c94f745f07..17c300aa7c5 100755 --- a/examples/camera_animation.py +++ b/examples/camera_animation.py @@ -15,7 +15,7 @@ from ctapipe.instrument import TelescopeDescription from ctapipe.visualization import CameraDisplay -if __name__ == '__main__': +if __name__ == "__main__": plt.style.use("ggplot") fig, ax = plt.subplots() @@ -29,7 +29,7 @@ maxlen = 0.1 disp = CameraDisplay(geom, ax=ax) - disp.cmap = 'inferno' + disp.cmap = "inferno" disp.add_colorbar(ax=ax) def update(frame): @@ -46,11 +46,7 @@ def update(frame): length=length * u.m, psi=angle * u.deg, ) - image, _, _ = model.generate_image( - geom, - intensity=intens, - nsb_level_pe=5, - ) + image, _, _ = model.generate_image(geom, intensity=intens, nsb_level_pe=5,) disp.image = image anim = FuncAnimation(fig, update, interval=500) diff --git a/examples/camera_display_multi.py b/examples/camera_display_multi.py index 6bb459523ec..540a49e3b30 100755 --- a/examples/camera_display_multi.py +++ b/examples/camera_display_multi.py @@ -17,17 +17,11 @@ def draw_several_cams(geom, ncams=4): - cmaps = ['jet', 'afmhot', 'terrain', 'autumn'] - fig, axs = plt.subplots( - 1, ncams, figsize=(15, 4), - ) + cmaps = ["jet", "afmhot", "terrain", "autumn"] + fig, axs = plt.subplots(1, ncams, figsize=(15, 4),) for ii in range(ncams): - disp = CameraDisplay( - geom, - ax=axs[ii], - title="CT{}".format(ii + 1), - ) + disp = CameraDisplay(geom, ax=axs[ii], title="CT{}".format(ii + 1),) disp.cmap = cmaps[ii] model = toymodel.Gaussian( @@ -38,17 +32,13 @@ def draw_several_cams(geom, ncams=4): psi=ii * 20 * u.deg, ) - image, _, _ = model.generate_image( - geom, - intensity=1500, - nsb_level_pe=5, - ) + image, _, _ = model.generate_image(geom, intensity=1500, nsb_level_pe=5,) mask = tailcuts_clean( geom, image, picture_thresh=6 * image.mean(), - boundary_thresh=4 * image.mean() + boundary_thresh=4 * image.mean(), ) cleaned = image.copy() cleaned[~mask] = 0 @@ -59,10 +49,10 @@ def draw_several_cams(geom, ncams=4): disp.add_colorbar(ax=axs[ii]) disp.set_limits_percent(95) - disp.overlay_moments(hillas, linewidth=3, color='blue') + disp.overlay_moments(hillas, linewidth=3, color="blue") -if __name__ == '__main__': +if __name__ == "__main__": hexgeom = CameraGeometry.from_name("LSTCam") recgeom = CameraGeometry.make_rectangular() diff --git a/examples/camera_norms.py b/examples/camera_norms.py index 720da02fdb8..220eb22aa27 100755 --- a/examples/camera_norms.py +++ b/examples/camera_norms.py @@ -11,35 +11,27 @@ from ctapipe.instrument import CameraGeometry from ctapipe.visualization import CameraDisplay -if __name__ == '__main__': +if __name__ == "__main__": - use('ggplot') + use("ggplot") # load the camera fig, axs = plt.subplots(1, 3, figsize=(15, 5)) geom = CameraGeometry.from_name("LSTCam") - titles = 'Linear Scale', 'Log-Scale', 'PowerNorm(gamma=2)' + titles = "Linear Scale", "Log-Scale", "PowerNorm(gamma=2)" model = toymodel.Gaussian( - x=0.2 * u.m, - y=0.0 * u.m, - width=0.05 * u.m, - length=0.15 * u.m, - psi='35d', + x=0.2 * u.m, y=0.0 * u.m, width=0.05 * u.m, length=0.15 * u.m, psi="35d", ) - image, sig, bg = model.generate_image( - geom, - intensity=1500, - nsb_level_pe=5, - ) + image, sig, bg = model.generate_image(geom, intensity=1500, nsb_level_pe=5,) disps = [] for ax, title in zip(axs, titles): disps.append(CameraDisplay(geom, ax=ax, image=image, title=title)) - disps[0].norm = 'lin' - disps[1].norm = 'log' + disps[0].norm = "lin" + disps[1].norm = "log" disps[2].norm = PowerNorm(2) for disp in disps: diff --git a/examples/camera_rotation.py b/examples/camera_rotation.py index aa07f90d312..2bbc20eed9b 100644 --- a/examples/camera_rotation.py +++ b/examples/camera_rotation.py @@ -5,7 +5,7 @@ from ctapipe.instrument import CameraGeometry from ctapipe.visualization import CameraDisplay -if __name__ == '__main__': +if __name__ == "__main__": geom = CameraGeometry.from_name("Whipple109") image = np.random.uniform(size=geom.pix_id.shape) @@ -17,7 +17,7 @@ for ii in range(N): plt.subplot(1, N, ii + 1) geom.rotate(ii * (geom.pix_rotation + 30 * u.deg)) - d2 = CameraDisplay(geom, image=image, cmap='viridis') + d2 = CameraDisplay(geom, image=image, cmap="viridis") plt.tight_layout() plt.show() diff --git a/examples/coordinate_transformations.py b/examples/coordinate_transformations.py index 5d04e1a9d40..c760994ac45 100755 --- a/examples/coordinate_transformations.py +++ b/examples/coordinate_transformations.py @@ -58,15 +58,12 @@ def cam_to_nom(): pointing_direction = SkyCoord(alt=70 * u.deg, az=180 * u.deg, frame=AltAz()) camera_frame = CameraFrame( - focal_length=15 * u.m, - telescope_pointing=pointing_direction + focal_length=15 * u.m, telescope_pointing=pointing_direction ) camera_coord = SkyCoord(pix_x, pix_y, frame=camera_frame) # In this case we bypass the telescope system - nominal_frame = NominalFrame( - origin=AltAz(alt=75 * u.deg, az=180 * u.deg) - ) + nominal_frame = NominalFrame(origin=AltAz(alt=75 * u.deg, az=180 * u.deg)) nom_coord = camera_coord.transform_to(nominal_frame) horizon = camera_coord.transform_to(AltAz()) @@ -82,7 +79,7 @@ def nominal_to_altaz(): nom = SkyCoord( fov_lon=0 * u.deg, fov_lat=0 * u.deg, - frame=NominalFrame(origin=AltAz(alt=75 * u.deg, az=180 * u.deg)) + frame=NominalFrame(origin=AltAz(alt=75 * u.deg, az=180 * u.deg)), ) alt_az = nom.transform_to(AltAz()) print("HorizonCoordinate", alt_az) @@ -95,15 +92,13 @@ def nominal_to_altaz(): def grd_to_tilt(): grd_coord = GroundFrame(x=1 * u.m, y=2 * u.m, z=0 * u.m) tilt_coord = grd_coord.transform_to( - TiltedGroundFrame( - pointing_direction=AltAz(alt=90 * u.deg, az=180 * u.deg) - ) + TiltedGroundFrame(pointing_direction=AltAz(alt=90 * u.deg, az=180 * u.deg)) ) print(project_to_ground(tilt_coord)) print("Tilted Coordinate", tilt_coord) -if __name__ == '__main__': +if __name__ == "__main__": cam_to_tel() cam_to_nom() nominal_to_altaz() diff --git a/examples/highlight_pixel.py b/examples/highlight_pixel.py index cbf3a8928b8..3a8f9adf877 100755 --- a/examples/highlight_pixel.py +++ b/examples/highlight_pixel.py @@ -5,32 +5,26 @@ from ctapipe.instrument import CameraGeometry from ctapipe.visualization import CameraDisplay -if __name__ == '__main__': +if __name__ == "__main__": - plt.style.use('ggplot') + plt.style.use("ggplot") fig = plt.figure(figsize=(12, 8)) ax = fig.add_subplot(1, 1, 1) - geom = CameraGeometry.from_name('NectarCam') + geom = CameraGeometry.from_name("NectarCam") disp = CameraDisplay(geom, ax=ax) disp.add_colorbar() model = toymodel.Gaussian( - x=0.05 * u.m, - y=0 * u.m, - width=0.05 * u.m, - length=0.15 * u.m, - psi='35d' + x=0.05 * u.m, y=0 * u.m, width=0.05 * u.m, length=0.15 * u.m, psi="35d" ) - image, sig, bg = model.generate_image( - geom, intensity=1500, nsb_level_pe=5 - ) + image, sig, bg = model.generate_image(geom, intensity=1500, nsb_level_pe=5) disp.image = image mask = disp.image > 10 - disp.highlight_pixels(mask, linewidth=2, color='crimson') + disp.highlight_pixels(mask, linewidth=2, color="crimson") plt.show() diff --git a/examples/instrument_info.py b/examples/instrument_info.py index c8f61831105..016661a7cde 100644 --- a/examples/instrument_info.py +++ b/examples/instrument_info.py @@ -5,7 +5,7 @@ from ctapipe.io import event_source from ctapipe.utils import get_dataset_path -if __name__ == '__main__': +if __name__ == "__main__": # load up one event so that we get the instrument info infile = get_dataset_path("gamma_test_large.simtel.gz") @@ -21,6 +21,6 @@ print("\n---------- Subarray Table:-----------") print(subarray.to_table()) print("\n---------- Subarray Optics:----------") - print(subarray.to_table(kind='optics')) + print(subarray.to_table(kind="optics")) print("\n---------- Mirror Area: -------------") print(subarray.tel[1].optics.mirror_area) diff --git a/examples/load_one_event.py b/examples/load_one_event.py index 3184ca51223..c606f0ed37f 100644 --- a/examples/load_one_event.py +++ b/examples/load_one_event.py @@ -8,7 +8,7 @@ from ctapipe.io import event_source from ctapipe.utils import get_dataset_path -if __name__ == '__main__': +if __name__ == "__main__": if len(sys.argv) >= 2: filename = sys.argv[1] diff --git a/examples/plot_all_cameras.py b/examples/plot_all_cameras.py index 47aa1e98df1..4c1922eace3 100644 --- a/examples/plot_all_cameras.py +++ b/examples/plot_all_cameras.py @@ -4,7 +4,7 @@ from ctapipe.instrument import CameraDescription, CameraGeometry from ctapipe.visualization import CameraDisplay -if __name__ == '__main__': +if __name__ == "__main__": plt.style.use("bmh") @@ -20,7 +20,7 @@ ax = plt.subplot(n_rows, n_cols, ii + 1) disp = CameraDisplay(geom) disp.image = np.random.uniform(size=geom.pix_id.shape) - disp.cmap = 'viridis' + disp.cmap = "viridis" plt.xlabel("") plt.ylabel("") diff --git a/examples/plot_array_hillas.py b/examples/plot_array_hillas.py index df34747f704..5a03f1d1446 100644 --- a/examples/plot_array_hillas.py +++ b/examples/plot_array_hillas.py @@ -11,17 +11,15 @@ from ctapipe.calib import CameraCalibrator from ctapipe.coordinates import TiltedGroundFrame, MissingFrameAttributeWarning -from ctapipe.image import ( - hillas_parameters, tailcuts_clean, HillasParameterizationError -) +from ctapipe.image import hillas_parameters, tailcuts_clean, HillasParameterizationError from ctapipe.image import timing_parameters from ctapipe.io import event_source from ctapipe.utils import datasets from ctapipe.visualization import ArrayDisplay import warnings -if __name__ == '__main__': - warnings.filterwarnings('ignore', category=MissingFrameAttributeWarning) +if __name__ == "__main__": + warnings.filterwarnings("ignore", category=MissingFrameAttributeWarning) # importing data from avaiable datasets in ctapipe filename = datasets.get_dataset_path("gamma_test_large.simtel.gz") @@ -62,23 +60,19 @@ # plot the core position, which must be transformed from the tilted # system to the system that the ArrayDisplay is in (default # GroundFrame) - point_dir = SkyCoord( - *event.mcheader.run_array_direction, - frame=AltAz() - ) + point_dir = SkyCoord(*event.mcheader.run_array_direction, frame=AltAz()) tiltedframe = TiltedGroundFrame(pointing_direction=point_dir) if markers: for marker in markers: marker.remove() core_coord = SkyCoord( - x=event.mc.core_x, - y=event.mc.core_y, - frame=tiltedframe + x=event.mc.core_x, y=event.mc.core_y, frame=tiltedframe ).transform_to(array_disp.frame) - markers = ax.plot([core_coord.x.value, ], [core_coord.y.value, ], - "r+", markersize=10) + markers = ax.plot( + [core_coord.x.value,], [core_coord.y.value,], "r+", markersize=10 + ) # plot the hit pattern (triggered tels). # first expand the tels_with_data list into a fixed-length vector, diff --git a/examples/plot_camera_frames.py b/examples/plot_camera_frames.py index a36ef581051..4ce20be9de9 100644 --- a/examples/plot_camera_frames.py +++ b/examples/plot_camera_frames.py @@ -1,7 +1,7 @@ -''' +""" Plot the same event in two camera displays showing the different coordinate frames for camera coordinates. -''' +""" from ctapipe.instrument import CameraGeometry from ctapipe.visualization import CameraDisplay import matplotlib.pyplot as plt @@ -14,21 +14,19 @@ def main(): fig, axs = plt.subplots(1, 2, constrained_layout=True, figsize=(6, 3)) model = Gaussian(0 * u.m, 0.1 * u.m, 0.3 * u.m, 0.05 * u.m, 25 * u.deg) - cam = CameraGeometry.from_name('FlashCam') + cam = CameraGeometry.from_name("FlashCam") image, *_ = model.generate_image(cam, 2500) CameraDisplay(cam, ax=axs[0], image=image) CameraDisplay( - cam.transform_to(EngineeringCameraFrame()), - ax=axs[1], - image=image, + cam.transform_to(EngineeringCameraFrame()), ax=axs[1], image=image, ) - axs[0].set_title('CameraFrame') - axs[1].set_title('EngineeringCameraFrame') + axs[0].set_title("CameraFrame") + axs[1].set_title("EngineeringCameraFrame") plt.show() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/examples/plot_hillas_parameters.py b/examples/plot_hillas_parameters.py index 529ccb32e8a..fd55801d057 100755 --- a/examples/plot_hillas_parameters.py +++ b/examples/plot_hillas_parameters.py @@ -11,7 +11,7 @@ from ctapipe.visualization import CameraDisplay -if __name__ == '__main__': +if __name__ == "__main__": # Load the camera geom = CameraGeometry.from_name("LSTCam") @@ -20,19 +20,13 @@ # Create a fake camera image to display: model = toymodel.Gaussian( - x=0.2 * u.m, y=0.0 * u.m, - width=0.05 * u.m, length=0.15 * u.m, - psi='35d' + x=0.2 * u.m, y=0.0 * u.m, width=0.05 * u.m, length=0.15 * u.m, psi="35d" ) - image, sig, bg = model.generate_image( - geom, intensity=1500, nsb_level_pe=2 - ) + image, sig, bg = model.generate_image(geom, intensity=1500, nsb_level_pe=2) # Apply image cleaning - cleanmask = tailcuts_clean( - geom, image, picture_thresh=10, boundary_thresh=5 - ) + cleanmask = tailcuts_clean(geom, image, picture_thresh=10, boundary_thresh=5) clean = image.copy() clean[~cleanmask] = 0.0 @@ -42,8 +36,8 @@ # Show the camera image and overlay Hillas ellipse and clean pixels disp.image = image - disp.cmap = 'inferno' - disp.highlight_pixels(cleanmask, color='crimson') - disp.overlay_moments(hillas, color='cyan', linewidth=1) + disp.cmap = "inferno" + disp.highlight_pixels(cleanmask, color="crimson") + disp.overlay_moments(hillas, color="cyan", linewidth=1) plt.show() diff --git a/examples/plot_showers_in_nominal.py b/examples/plot_showers_in_nominal.py index d8272b6d49f..8d414214bfd 100644 --- a/examples/plot_showers_in_nominal.py +++ b/examples/plot_showers_in_nominal.py @@ -11,17 +11,17 @@ from ctapipe.coordinates import CameraFrame, NominalFrame, MissingFrameAttributeWarning -warnings.filterwarnings('ignore', category=MissingFrameAttributeWarning) +warnings.filterwarnings("ignore", category=MissingFrameAttributeWarning) cleaning_level = { - 'LSTCam': (3.5, 7.5, 2), # ?? (3, 6) for Abelardo... - 'FlashCam': (4, 8, 2), # there is some scaling missing? - 'ASTRICam': (5, 7, 2), + "LSTCam": (3.5, 7.5, 2), # ?? (3, 6) for Abelardo... + "FlashCam": (4, 8, 2), # there is some scaling missing? + "ASTRICam": (5, 7, 2), } -input_url = get_dataset_path('gamma_test_large.simtel.gz') +input_url = get_dataset_path("gamma_test_large.simtel.gz") with event_source(input_url=input_url) as source: @@ -61,13 +61,11 @@ image, boundary_thresh=boundary, picture_thresh=picture, - min_number_picture_neighbors=min_neighbors + min_number_picture_neighbors=min_neighbors, ) cam_coords = SkyCoord( - geom.pix_x[clean], - geom.pix_y[clean], - frame=camera_frame + geom.pix_x[clean], geom.pix_y[clean], frame=camera_frame ) nom = cam_coords.transform_to(nominal_frame) nom_fov_lon.append(nom.fov_lon.to_value(u.deg)) @@ -82,7 +80,7 @@ nom_fov_lat = np.repeat(nom_fov_lat, photons.astype(int)) plt.hexbin(nom_fov_lon, nom_fov_lat, gridsize=50, extent=[-5, 5, -5, 5]) - plt.xlabel('fov_lon / deg') - plt.ylabel('fov_lat / deg') + plt.xlabel("fov_lon / deg") + plt.ylabel("fov_lat / deg") plt.gca().set_aspect(1) plt.show() diff --git a/examples/plot_theta_square.py b/examples/plot_theta_square.py index 4302b546f9d..9c535486a56 100644 --- a/examples/plot_theta_square.py +++ b/examples/plot_theta_square.py @@ -51,7 +51,7 @@ telescope_pointings[tel_id] = SkyCoord( alt=event.pointing.tel[tel_id].altitude, az=event.pointing.tel[tel_id].azimuth, - frame=horizon_frame + frame=horizon_frame, ) # Camera Geometry required for hillas parametrization @@ -63,9 +63,7 @@ # Cleaning of the image cleaned_image = image # create a clean mask of pixels above the threshold - cleanmask = tailcuts_clean( - camgeom, image, picture_thresh=10, boundary_thresh=5 - ) + cleanmask = tailcuts_clean(camgeom, image, picture_thresh=10, boundary_thresh=5) # set all rejected pixels to zero cleaned_image[~cleanmask] = 0 @@ -79,29 +77,23 @@ if params.width > 0: hillas_params[tel_id] = params - - array_pointing = SkyCoord( az=event.mcheader.run_array_direction[0], alt=event.mcheader.run_array_direction[1], - frame=horizon_frame + frame=horizon_frame, ) if len(hillas_params) < 2: continue reco_result = reco.predict( - hillas_params, source.subarray, - array_pointing, telescope_pointings + hillas_params, source.subarray, array_pointing, telescope_pointings ) # get angular offset between reconstructed shower direction and MC # generated shower direction off_angle = angular_separation( - event.mc.az, - event.mc.alt, - reco_result.az, - reco_result.alt + event.mc.az, event.mc.alt, reco_result.az, reco_result.alt ) # Appending all estimated off angles @@ -109,14 +101,14 @@ # calculate theta square for angles which are not nan off_angles = np.array(off_angles) -thetasquare = off_angles[np.isfinite(off_angles)]**2 +thetasquare = off_angles[np.isfinite(off_angles)] ** 2 # To plot thetasquare The number of events in th data files for LSTCam is not # significantly high to give a nice thetasquare plot for gammas One can use # dedicated MC file for LST get nice plot plt.figure(figsize=(10, 8)) plt.hist(thetasquare, bins=np.linspace(0, 1, 50)) -plt.title(r'$\theta^2$ plot') -plt.xlabel(r'$\theta^2$ (deg)') -plt.ylabel('# of events') +plt.title(r"$\theta^2$ plot") +plt.xlabel(r"$\theta^2$ (deg)") +plt.ylabel("# of events") plt.show() diff --git a/examples/simple_event_writer.py b/examples/simple_event_writer.py index a3a075d2e9c..7fc3a0a3c63 100755 --- a/examples/simple_event_writer.py +++ b/examples/simple_event_writer.py @@ -20,34 +20,37 @@ class SimpleEventWriter(Tool): - name = 'ctapipe-simple-event-writer' + name = "ctapipe-simple-event-writer" description = Unicode(__doc__) infile = Path( - default_value=get_dataset_path('lst_prod3_calibration_and_mcphotons.simtel.zst'), - help='input file to read', directory_ok=False, exists=True, + default_value=get_dataset_path( + "lst_prod3_calibration_and_mcphotons.simtel.zst" + ), + help="input file to read", + directory_ok=False, + exists=True, ).tag(config=True) outfile = Path( - help='output file name', directory_ok=False, default_value='output.h5' + help="output file name", directory_ok=False, default_value="output.h5" ).tag(config=True) - progress = Bool(help='display progress bar', default_value=True).tag(config=True) - - aliases = Dict({ - 'infile': 'EventSource.input_url', - 'outfile': 'SimpleEventWriter.outfile', - 'max-events': 'EventSource.max_events', - 'progress': 'SimpleEventWriter.progress' - }) + progress = Bool(help="display progress bar", default_value=True).tag(config=True) + + aliases = Dict( + { + "infile": "EventSource.input_url", + "outfile": "SimpleEventWriter.outfile", + "max-events": "EventSource.max_events", + "progress": "SimpleEventWriter.progress", + } + ) classes = List([EventSource, CameraCalibrator]) def setup(self): - self.log.info('Configure EventSource...') + self.log.info("Configure EventSource...") self.event_source = self.add_component( - EventSource.from_url( - self.infile, - parent=self - ) + EventSource.from_url(self.infile, parent=self) ) self.calibrator = self.add_component( @@ -56,20 +59,19 @@ def setup(self): self.writer = self.add_component( HDF5TableWriter( - filename=self.outfile, - group_name='image_infos', - overwrite=True + filename=self.outfile, group_name="image_infos", overwrite=True ) ) def start(self): - self.log.info('Loop on events...') + self.log.info("Loop on events...") for event in tqdm( - self.event_source, - desc='EventWriter', - total=self.event_source.max_events, - disable=~self.progress): + self.event_source, + desc="EventWriter", + total=self.event_source.max_events, + disable=~self.progress, + ): self.calibrator(event) @@ -91,11 +93,10 @@ def start(self): self.writer.write(geom.camera_name, [event.r0, event.mc, params]) def finish(self): - self.log.info('End of job.') + self.log.info("End of job.") self.writer.close() -if __name__ == '__main__': +if __name__ == "__main__": tool = SimpleEventWriter() tool.run() - diff --git a/examples/stereo_reconstruction.py b/examples/stereo_reconstruction.py index 056c2b9b219..f04874dfc9a 100644 --- a/examples/stereo_reconstruction.py +++ b/examples/stereo_reconstruction.py @@ -16,13 +16,13 @@ # unoptimized cleaning levels, copied from # https://github.com/tudo-astroparticlephysics/cta_preprocessing cleaning_level = { - 'LSTCam': (3.5, 7.5, 2), # ?? (3, 6) for Abelardo... - 'FlashCam': (4, 8, 2), # there is some scaling missing? - 'ASTRICam': (5, 7, 2), + "LSTCam": (3.5, 7.5, 2), # ?? (3, 6) for Abelardo... + "FlashCam": (4, 8, 2), # there is some scaling missing? + "ASTRICam": (5, 7, 2), } -input_url = get_dataset_path('gamma_test_large.simtel.gz') +input_url = get_dataset_path("gamma_test_large.simtel.gz") event_source = event_source(input_url) calibrator = CameraCalibrator(subarray=event_source.subarray) @@ -31,7 +31,7 @@ reco = HillasReconstructor() for event in event_source: - print('Event', event.count) + print("Event", event.count) calibrator(event) # mapping of telescope_id to parameters for stereo reconstruction @@ -52,7 +52,7 @@ image, boundary_thresh=boundary, picture_thresh=picture, - min_number_picture_neighbors=min_neighbors + min_number_picture_neighbors=min_neighbors, ) # ignore images with less than 5 pixels after cleaning @@ -73,7 +73,7 @@ # store timegradients for plotting # ASTRI has no timing in PROD3b, so we use skewness instead - if geom.camera_name != 'ASTRICam': + if geom.camera_name != "ASTRICam": time_gradients[telescope_id] = timing_c.slope.value else: time_gradients[telescope_id] = hillas_c.skewness @@ -89,13 +89,9 @@ array_pointing = SkyCoord( az=event.mcheader.run_array_direction[0], alt=event.mcheader.run_array_direction[1], - frame=horizon_frame - ) - stereo = reco.predict( - hillas_containers, - event_source.subarray, - array_pointing, + frame=horizon_frame, ) + stereo = reco.predict(hillas_containers, event_source.subarray, array_pointing,) plt.figure() angle_offset = event.mcheader.run_array_direction[0] @@ -105,15 +101,18 @@ hillas_containers, time_gradient=time_gradients, angle_offset=angle_offset, - length=500 + length=500, ) plt.scatter( - event.mc.core_x, event.mc.core_y, - s=200, c='k', marker='x', label='True Impact', + event.mc.core_x, event.mc.core_y, s=200, c="k", marker="x", label="True Impact", ) plt.scatter( - stereo.core_x, stereo.core_y, - s=200, c='r', marker='x', label='Estimated Impact', + stereo.core_x, + stereo.core_y, + s=200, + c="r", + marker="x", + label="Estimated Impact", ) plt.legend()