Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[pre-commit.ci] pre-commit autoupdate #365

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,20 +5,20 @@ exclude: tests/integration/data
repos:
# Normalise all Python code. (Black + isort + pyupgrade + autoflake)
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.3.5
rev: v0.9.6
hooks:
- id: ruff
args: [--fix, --show-fixes, --output-format, grouped]
exclude: '_version.py|versioneer.py'
- id: ruff-format
# Common Python security checks. (this is complementary to dlint in flake8)
- repo: https://github.com/PyCQA/bandit
rev: '1.7.8'
rev: '1.8.2'
hooks:
- id: bandit
exclude: '^tests/|_version.py|versioneer.py'
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.6.0
rev: v5.0.0
hooks:
- id: check-added-large-files # We don't want huge files. (Cut down test data!)
args: ['--maxkb=3000']
Expand Down
4 changes: 2 additions & 2 deletions eodatasets3/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
del get_versions

__all__ = (
"REPO_URL",
"DatasetAssembler",
"DatasetDoc",
"DatasetPrepare",
Expand All @@ -20,10 +21,9 @@
"IfExists",
"IncompleteDatasetError",
"NamingConventions",
"namer",
"REPO_URL",
"ValidDataMethod",
"__version__",
"namer",
)

__version__ = _version.get_versions()["version"]
4 changes: 2 additions & 2 deletions eodatasets3/assemble.py
Original file line number Diff line number Diff line change
Expand Up @@ -1018,7 +1018,7 @@ def format_list(items: list, max_len=60):

return dedent(
f"""
Assembling {product_name or ''} ({status})
Assembling {product_name or ""} ({status})
- {len(measurements)} measurements: {format_list(measurements)}
- {len(properties)} properties: {format_list(properties)}
Writing to location: {output_location}
Expand Down Expand Up @@ -1420,7 +1420,7 @@ def _write_measurement(
if file_format != self.properties["odc:file_format"]:
raise RuntimeError(
f"Inconsistent file formats between bands. "
f"Was {self.properties['odc:file_format']!r}, now {file_format !r}"
f"Was {self.properties['odc:file_format']!r}, now {file_format!r}"
)

self._measurements.record_image(
Expand Down
4 changes: 1 addition & 3 deletions eodatasets3/properties.py
Original file line number Diff line number Diff line change
Expand Up @@ -457,9 +457,7 @@ def normalise_and_set(self, key, value, allow_override=True, expect_override=Fal
self.normalise_and_set(k, v, allow_override=allow_override)

if key in self._props and value != self[key] and (not expect_override):
message = (
f"Overriding property {key!r} " f"(from {self[key]!r} to {value!r})"
)
message = f"Overriding property {key!r} (from {self[key]!r} to {value!r})"
if allow_override:
warnings.warn(message, category=PropertyOverrideWarning)
else:
Expand Down
2 changes: 1 addition & 1 deletion eodatasets3/scripts/packagewagl.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@
)
@click.option(
"--contiguity-resolution",
help="Resolution choice for contiguity " "(default: automatic based on sensor)",
help="Resolution choice for contiguity (default: automatic based on sensor)",
type=float,
default=None,
)
Expand Down
5 changes: 2 additions & 3 deletions eodatasets3/scripts/recompress.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,8 +346,7 @@ def _recompress_image(
@click.option(
"--output-base",
type=PathPath(file_okay=False, writable=True),
help="The base output directory "
"(default to same dir as input if --clean-inputs).",
help="The base output directory (default to same dir as input if --clean-inputs).",
)
@click.option(
"--zlevel", type=click.IntRange(0, 9), default=5, help="Deflate compression level."
Expand Down Expand Up @@ -417,7 +416,7 @@ def main(
)
else:
raise ValueError(
f"Expected either tar.gz or a dataset folder. " f"Got: {path!r}"
f"Expected either tar.gz or a dataset folder. Got: {path!r}"
)

if not success:
Expand Down
6 changes: 2 additions & 4 deletions eodatasets3/scripts/tostac.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def run(
json.dump(jsonify_document(item_doc), f, indent=4, default=json_fallback)

if verbose:
echo(f'Wrote {style(output_path.as_posix(), "green")}')
echo(f"Wrote {style(output_path.as_posix(), 'green')}")


def dc_to_stac(
Expand Down Expand Up @@ -118,9 +118,7 @@ def json_fallback(o):
return str(o)

raise TypeError(
f"Unhandled type for json conversion: "
f"{o.__class__.__name__!r} "
f"(object {o!r})"
f"Unhandled type for json conversion: {o.__class__.__name__!r} (object {o!r})"
)


Expand Down
2 changes: 1 addition & 1 deletion eodatasets3/validate.py
Original file line number Diff line number Diff line change
Expand Up @@ -466,7 +466,7 @@ def _error(code: str, reason: str, hint: str = None):
yield _error(
"different_nodata",
f"{name} nodata: "
f"product {expected_nodata !r} != dataset {ds_nodata !r}",
f"product {expected_nodata!r} != dataset {ds_nodata!r}",
)


Expand Down
2 changes: 1 addition & 1 deletion eodatasets3/wagl.py
Original file line number Diff line number Diff line change
Expand Up @@ -442,7 +442,7 @@ def get_oa_resolution_group(
oa_resolution = (20.0, 20.0)
else:
raise NotImplementedError(
f"Don't know how to choose a default OA resolution for platform {platform !r}"
f"Don't know how to choose a default OA resolution for platform {platform!r}"
)

res_grp = resolution_groups.get(oa_resolution)
Expand Down
6 changes: 3 additions & 3 deletions tests/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,9 @@ def assert_file_structure(folder, expected_structure, root=""):
optional_filenames = {
name for name, option in expected_structure.items() if option == "optional"
}
assert (
folder.exists()
), f"Expected base folder doesn't even exist! {folder.as_posix()!r}"
assert folder.exists(), (
f"Expected base folder doesn't even exist! {folder.as_posix()!r}"
)

actual_filenames = {f.name for f in folder.iterdir()}

Expand Down
18 changes: 9 additions & 9 deletions tests/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def check_prepare_outputs(
try:
assert_expected_eo3_path(expected_doc, expected_metadata_path, ignore_fields)
except AssertionError:
print(f'Output:\n{indent(res.output, " ")}')
print(f"Output:\n{indent(res.output, ' ')}")
raise


Expand All @@ -44,9 +44,9 @@ def assert_expected_eo3_path(
This is slightly smarter about doing geometry equality etc within the document.
"""
__tracebackhide__ = operator.methodcaller("errisinstance", AssertionError)
assert (
expected_path.exists()
), f"Expected output EO3 path doesn't exist: {expected_path}"
assert expected_path.exists(), (
f"Expected output EO3 path doesn't exist: {expected_path}"
)
assert_same_as_file(
expected_doc,
expected_path,
Expand Down Expand Up @@ -112,9 +112,9 @@ def assert_shapes_mostly_equal(
shape2 = shape(shape2)

# Check area first, as it's a nicer error message when they're wildly different.
assert shape1.area == pytest.approx(
shape2.area, abs=threshold
), f"Shapes have different areas: {shape1.area} != {shape2.area}"
assert shape1.area == pytest.approx(shape2.area, abs=threshold), (
f"Shapes have different areas: {shape1.area} != {shape2.area}"
)

s1 = shape1.simplify(tolerance=threshold)
s2 = shape2.simplify(tolerance=threshold)
Expand Down Expand Up @@ -194,8 +194,8 @@ def clean_offset(offset: str):
out.extend(
(
f" {clean_offset(offset)}: ",
f' {change["old_value"]!r}',
f' != {change["new_value"]!r}',
f" {change['old_value']!r}",
f" != {change['new_value']!r}",
)
)
if "dictionary_item_added" in doc_diffs:
Expand Down
12 changes: 6 additions & 6 deletions tests/integration/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,18 +28,18 @@ def assert_image(
assert d.count == bands, f"Expected {bands} band{'s' if bands > 1 else ''}"

if overviews is not allow_anything:
assert (
d.overviews(1) == overviews
), f"Unexpected overview: {d.overviews(1)!r} != {overviews!r}"
assert d.overviews(1) == overviews, (
f"Unexpected overview: {d.overviews(1)!r} != {overviews!r}"
)
if nodata is not allow_anything:
assert d.nodata == nodata, f"Unexpected nodata: {d.nodata!r} != {nodata!r}"

if unique_pixel_counts is not allow_anything:
array = d.read(1)
value_counts = dict(zip(*numpy.unique(array, return_counts=True)))
assert (
value_counts == unique_pixel_counts
), f"Unexpected pixel counts: {value_counts!r} != {unique_pixel_counts!r}"
assert value_counts == unique_pixel_counts, (
f"Unexpected pixel counts: {value_counts!r} != {unique_pixel_counts!r}"
)

if shape:
assert shape == d.shape, f"Unexpected shape: {shape!r} != {d.shape!r}"
Expand Down
12 changes: 6 additions & 6 deletions tests/integration/prepare/test_prepare_landsat_l1.py
Original file line number Diff line number Diff line change
Expand Up @@ -596,9 +596,9 @@ def test_skips_old_datasets(l1_ls7_tarball):
datetime.now().isoformat(),
str(l1_ls7_tarball),
)
assert (
not expected_metadata_path.exists()
), "Dataset should have been skipped due to age"
assert not expected_metadata_path.exists(), (
"Dataset should have been skipped due to age"
)

# It should work with an old date.
run_prepare_cli(
Expand All @@ -608,9 +608,9 @@ def test_skips_old_datasets(l1_ls7_tarball):
"2014-05-04",
str(l1_ls7_tarball),
)
assert (
expected_metadata_path.exists()
), "Dataset should have been packaged when using an ancient date cutoff"
assert expected_metadata_path.exists(), (
"Dataset should have been packaged when using an ancient date cutoff"
)


def expected_lc08_l2_c2_post_20210507_folder(
Expand Down
24 changes: 12 additions & 12 deletions tests/integration/prepare/test_prepare_sentinel_l1.py
Original file line number Diff line number Diff line change
Expand Up @@ -477,9 +477,9 @@ def test_filter_folder_structure_info(
output_folder,
input_dataset_path,
)
assert (
expected_metadata_path.exists()
), f"Expected dataset to be processed (it's within the region file)! {res.output}"
assert expected_metadata_path.exists(), (
f"Expected dataset to be processed (it's within the region file)! {res.output}"
)
expected_metadata_path.unlink()

# Run with a region list that doesn't include our dataset region.
Expand All @@ -493,9 +493,9 @@ def test_filter_folder_structure_info(
output_folder,
input_dataset_path,
)
assert (
not expected_metadata_path.exists()
), f"Expected dataset to be filtered out! {res.output}"
assert not expected_metadata_path.exists(), (
f"Expected dataset to be filtered out! {res.output}"
)

# Filter the time period
res = run_prepare_cli(
Expand All @@ -508,9 +508,9 @@ def test_filter_folder_structure_info(
output_folder,
input_dataset_path,
)
assert (
not expected_metadata_path.exists()
), f"Expected dataset to be filtered out! {res.output}"
assert not expected_metadata_path.exists(), (
f"Expected dataset to be filtered out! {res.output}"
)

# Filter the time period
res = run_prepare_cli(
Expand All @@ -523,9 +523,9 @@ def test_filter_folder_structure_info(
output_folder,
input_dataset_path,
)
assert (
not expected_metadata_path.exists()
), f"Expected dataset to be filtered out! {res.output}"
assert not expected_metadata_path.exists(), (
f"Expected dataset to be filtered out! {res.output}"
)

# Now run for real, expect an output.
check_prepare_outputs(
Expand Down
6 changes: 3 additions & 3 deletions tests/integration/test_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,9 @@ def test_rescale_intensity():
)
unmodified = original_image.copy()

assert np.array_equal(
original_image, unmodified
), "rescale_intensity modified the input image"
assert np.array_equal(original_image, unmodified), (
"rescale_intensity modified the input image"
)

staticly_rescaled = images.rescale_intensity(
original_image, in_range=(4000, 6000), out_range=(100, 255), image_nodata=-999
Expand Down
12 changes: 6 additions & 6 deletions tests/integration/test_packagewagl.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,9 +111,9 @@ def test_whole_landsat_wagl_package(
)
[output_metadata] = expected_folder.rglob("*.odc-metadata.yaml")

assert reported_metadata == str(
output_metadata
), "Cli didn't report the expected output path"
assert reported_metadata == str(output_metadata), (
"Cli didn't report the expected output path"
)

# Checksum should include all files other than itself.
[checksum_file] = expected_folder.rglob("*.sha1")
Expand Down Expand Up @@ -496,9 +496,9 @@ def _run_wagl(args):
# The last line of output ends with the dataset path.
words, reported_metadata = res.output.splitlines()[-1].rsplit(" ", 1)

assert (
res.exit_code == 0
), f"WAGL returned error code. Output:\n{indent(res.output, ' ' * 4)}"
assert res.exit_code == 0, (
f"WAGL returned error code. Output:\n{indent(res.output, ' ' * 4)}"
)

return reported_metadata

Expand Down
21 changes: 10 additions & 11 deletions tests/integration/test_recompress.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,9 @@ def test_recompress_dataset(base_in_path: Path, in_offset: str, tmp_path: Path):
)
assert all_output_files == {str(expected_output.relative_to(output_base))}

assert (
expected_output.exists()
), f"No output produced in expected location {expected_output}."
assert expected_output.exists(), (
f"No output produced in expected location {expected_output}."
)

# It should contain all of our files
checksums, members = _get_checksums_members(expected_output)
Expand Down Expand Up @@ -139,9 +139,9 @@ def test_recompress_gap_mask_dataset(tmp_path: Path):
)
assert all_output_files == [str(expected_output)]

assert (
expected_output.exists()
), f"No output produced in expected location {expected_output}."
assert expected_output.exists(), (
f"No output produced in expected location {expected_output}."
)

# It should contain all of our files
checksums, members = _get_checksums_members(expected_output)
Expand Down Expand Up @@ -222,9 +222,9 @@ def test_recompress_dirty_dataset(tmp_path: Path):
)
assert all_output_files == [str(expected_output)]

assert (
expected_output.exists()
), f"No output produced in expected location {expected_output}."
assert expected_output.exists(), (
f"No output produced in expected location {expected_output}."
)

checksums, members = _get_checksums_members(expected_output)

Expand Down Expand Up @@ -359,8 +359,7 @@ def test_calculate_out_path(tmp_path: Path):
mtl.write_text("fake mtl")
assert_path_eq(
out_base.joinpath(
"L1/092_091/LT50920911991126/"
"LT05_L1GS_092091_19910506_20170126_01_T2.tar"
"L1/092_091/LT50920911991126/LT05_L1GS_092091_19910506_20170126_01_T2.tar"
),
recompress._output_tar_path_from_directory(out_base, path),
)
Expand Down
Loading
Loading