diff --git a/conftest.py b/conftest.py index da1dbc274fb..862a1a1d0bc 100644 --- a/conftest.py +++ b/conftest.py @@ -19,7 +19,7 @@ def pytest_runtest_setup(item): pytest.skip("set --run-flaky option to run flaky tests") if "network" in item.keywords and not item.config.getoption("--run-network-tests"): pytest.skip( - "set --run-network-tests to run test requiring an " "internet connection" + "set --run-network-tests to run test requiring an internet connection" ) diff --git a/doc/examples/apply_ufunc_vectorize_1d.ipynb b/doc/examples/apply_ufunc_vectorize_1d.ipynb index 6d18d48fdb5..a79a4868b63 100644 --- a/doc/examples/apply_ufunc_vectorize_1d.ipynb +++ b/doc/examples/apply_ufunc_vectorize_1d.ipynb @@ -333,7 +333,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now our function currently only works on one vector of data which is not so useful given our 3D dataset.\n", + "Now our function currently only works on one vector of data which is not so useful given our 3D dataset.\n", "Let's try passing the whole dataset. We add a `print` statement so we can see what our function receives." ] }, diff --git a/doc/plotting.rst b/doc/plotting.rst index 3903ea5cde9..07cc524d869 100644 --- a/doc/plotting.rst +++ b/doc/plotting.rst @@ -106,7 +106,7 @@ The simplest way to make a plot is to call the :py:func:`DataArray.plot()` metho @savefig plotting_1d_simple.png width=4in air1d.plot() -xarray uses the coordinate name along with metadata ``attrs.long_name``, ``attrs.standard_name``, ``DataArray.name`` and ``attrs.units`` (if available) to label the axes. The names ``long_name``, ``standard_name`` and ``units`` are copied from the `CF-conventions spec `_. When choosing names, the order of precedence is ``long_name``, ``standard_name`` and finally ``DataArray.name``. The y-axis label in the above plot was constructed from the ``long_name`` and ``units`` attributes of ``air1d``. +xarray uses the coordinate name along with metadata ``attrs.long_name``, ``attrs.standard_name``, ``DataArray.name`` and ``attrs.units`` (if available) to label the axes. The names ``long_name``, ``standard_name`` and ``units`` are copied from the `CF-conventions spec `_. When choosing names, the order of precedence is ``long_name``, ``standard_name`` and finally ``DataArray.name``. The y-axis label in the above plot was constructed from the ``long_name`` and ``units`` attributes of ``air1d``. .. ipython:: python diff --git a/doc/reshaping.rst b/doc/reshaping.rst index 40de9ea799a..d07929b3b8f 100644 --- a/doc/reshaping.rst +++ b/doc/reshaping.rst @@ -237,7 +237,7 @@ of multi-index levels: mda.reorder_levels(x=["wavenumber", "band"]) As of xarray v0.9 coordinate labels for each dimension are optional. -You can also use ``.set_index`` / ``.reset_index`` to add / remove +You can also use ``.set_index`` / ``.reset_index`` to add / remove labels for one or several dimensions: .. ipython:: python diff --git a/doc/whats-new.rst b/doc/whats-new.rst index 448ab1355b0..62a5faeb396 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -52,7 +52,8 @@ Documentation Internal Changes ~~~~~~~~~~~~~~~~ - +- Removed stray spaces that stem from black removing new lines (:pull:`4504`). + By `Mathias Hauser `_. .. _whats-new.0.16.1: @@ -178,7 +179,7 @@ Internal Changes older than 2.9) - all versions of other packages released in the last 12 months - All are up from 6 months (:issue:`4295`) + All are up from 6 months (:issue:`4295`) `Guido Imperiale `_. - Use :py:func:`dask.array.apply_gufunc` instead of :py:func:`dask.array.blockwise` in :py:func:`xarray.apply_ufunc` when using ``dask='parallelized'``. (:pull:`4060`, :pull:`4391`, :pull:`4392`) @@ -2516,7 +2517,7 @@ Breaking changes - A new resampling interface to match pandas' groupby-like API was added to :py:meth:`Dataset.resample` and :py:meth:`DataArray.resample` (:issue:`1272`). :ref:`Timeseries resampling ` is - fully supported for data with arbitrary dimensions as is both downsampling + fully supported for data with arbitrary dimensions as is both downsampling and upsampling (including linear, quadratic, cubic, and spline interpolation). Old syntax: @@ -3647,7 +3648,7 @@ Bug fixes - Restore checks for shape consistency between data and coordinates in the DataArray constructor (:issue:`758`). - Single dimension variables no longer transpose as part of a broader - ``.transpose``. This behavior was causing ``pandas.PeriodIndex`` dimensions + ``.transpose``. This behavior was causing ``pandas.PeriodIndex`` dimensions to lose their type (:issue:`749`) - :py:class:`~xarray.Dataset` labels remain as their native type on ``.to_dataset``. Previously they were coerced to strings (:issue:`745`) diff --git a/doc/why-xarray.rst b/doc/why-xarray.rst index 7d14a6c9f9e..a5093a1ff2a 100644 --- a/doc/why-xarray.rst +++ b/doc/why-xarray.rst @@ -49,7 +49,7 @@ Core data structures -------------------- xarray has two core data structures, which build upon and extend the core -strengths of NumPy_ and pandas_. Both data structures are fundamentally N-dimensional: +strengths of NumPy_ and pandas_. Both data structures are fundamentally N-dimensional: - :py:class:`~xarray.DataArray` is our implementation of a labeled, N-dimensional array. It is an N-D generalization of a :py:class:`pandas.Series`. The name diff --git a/xarray/backends/api.py b/xarray/backends/api.py index 0317c61ab6f..56bffa05076 100644 --- a/xarray/backends/api.py +++ b/xarray/backends/api.py @@ -87,7 +87,7 @@ def _get_default_engine_grib(): if msgs: raise ValueError(" or\n".join(msgs)) else: - raise ValueError("PyNIO or cfgrib is required for accessing " "GRIB files") + raise ValueError("PyNIO or cfgrib is required for accessing GRIB files") def _get_default_engine_gz(): @@ -1228,7 +1228,7 @@ def save_mfdataset( """ if mode == "w" and len(set(paths)) < len(paths): raise ValueError( - "cannot use mode='w' when writing multiple " "datasets to the same path" + "cannot use mode='w' when writing multiple datasets to the same path" ) for obj in datasets: diff --git a/xarray/backends/h5netcdf_.py b/xarray/backends/h5netcdf_.py index 2d5292f5d10..163062664cb 100644 --- a/xarray/backends/h5netcdf_.py +++ b/xarray/backends/h5netcdf_.py @@ -280,7 +280,7 @@ def prepare_variable( and "compression_opts" in encoding and encoding["complevel"] != encoding["compression_opts"] ): - raise ValueError("'complevel' and 'compression_opts' encodings " "mismatch") + raise ValueError("'complevel' and 'compression_opts' encodings mismatch") complevel = encoding.pop("complevel", 0) if complevel != 0: encoding.setdefault("compression_opts", complevel) diff --git a/xarray/backends/netcdf3.py b/xarray/backends/netcdf3.py index 51d7fce22a0..001af0bf8e1 100644 --- a/xarray/backends/netcdf3.py +++ b/xarray/backends/netcdf3.py @@ -20,7 +20,8 @@ "uint", "int64", "uint64", - "float" "real", + "float", + "real", "double", "bool", "string", diff --git a/xarray/backends/scipy_.py b/xarray/backends/scipy_.py index b7d91a840fe..3e1c7e139aa 100644 --- a/xarray/backends/scipy_.py +++ b/xarray/backends/scipy_.py @@ -70,9 +70,7 @@ def _open_scipy_netcdf(filename, mode, mmap, version): except TypeError as e: # TODO: gzipped loading only works with NetCDF3 files. if "is not a valid NetCDF 3 file" in e.message: - raise ValueError( - "gzipped file loading only supports " "NetCDF 3 files." - ) + raise ValueError("gzipped file loading only supports NetCDF 3 files.") else: raise @@ -110,9 +108,7 @@ def __init__( self, filename_or_obj, mode="r", format=None, group=None, mmap=None, lock=None ): if group is not None: - raise ValueError( - "cannot save to a group with the " "scipy.io.netcdf backend" - ) + raise ValueError("cannot save to a group with the scipy.io.netcdf backend") if format is None or format == "NETCDF3_64BIT": version = 2 diff --git a/xarray/backends/zarr.py b/xarray/backends/zarr.py index 8482a1ba269..4b46eb2e1c0 100644 --- a/xarray/backends/zarr.py +++ b/xarray/backends/zarr.py @@ -207,7 +207,7 @@ def extract_zarr_variable_encoding(variable, raise_on_invalid=False, name=None): invalid = [k for k in encoding if k not in valid_encodings] if invalid: raise ValueError( - "unexpected encoding parameters for zarr " "backend: %r" % invalid + "unexpected encoding parameters for zarr backend: %r" % invalid ) else: for k in list(encoding): diff --git a/xarray/coding/cftime_offsets.py b/xarray/coding/cftime_offsets.py index 99c7d041671..79efc16cc8d 100644 --- a/xarray/coding/cftime_offsets.py +++ b/xarray/coding/cftime_offsets.py @@ -102,7 +102,7 @@ def __sub__(self, other): import cftime if isinstance(other, cftime.datetime): - raise TypeError("Cannot subtract a cftime.datetime " "from a time offset.") + raise TypeError("Cannot subtract a cftime.datetime from a time offset.") elif type(other) == type(self): return type(self)(self.n - other.n) else: @@ -122,7 +122,7 @@ def __radd__(self, other): def __rsub__(self, other): if isinstance(other, BaseCFTimeOffset) and type(self) != type(other): - raise TypeError("Cannot subtract cftime offsets of differing " "types") + raise TypeError("Cannot subtract cftime offsets of differing types") return -self + other def __apply__(self): diff --git a/xarray/core/accessor_str.py b/xarray/core/accessor_str.py index faf212e86ee..02d8ca00bf9 100644 --- a/xarray/core/accessor_str.py +++ b/xarray/core/accessor_str.py @@ -496,7 +496,7 @@ def zfill(self, width): Strings in the array are padded with '0' characters on the left of the string to reach a total string length `width`. Strings - in the array with length greater or equal to `width` are unchanged. + in the array with length greater or equal to `width` are unchanged. Parameters ---------- @@ -879,7 +879,7 @@ def replace(self, pat, repl, n=-1, case=None, flags=0, regex=True): if is_compiled_re: if (case is not None) or (flags != 0): raise ValueError( - "case and flags cannot be set" " when pat is a compiled regex" + "case and flags cannot be set when pat is a compiled regex" ) else: # not a compiled regex @@ -903,9 +903,7 @@ def replace(self, pat, repl, n=-1, case=None, flags=0, regex=True): "pattern with regex=False" ) if callable(repl): - raise ValueError( - "Cannot use a callable replacement when " "regex=False" - ) + raise ValueError("Cannot use a callable replacement when regex=False") f = lambda x: x.replace(pat, repl, n) return self._apply(f) diff --git a/xarray/core/common.py b/xarray/core/common.py index b7ae9121700..eda31a16558 100644 --- a/xarray/core/common.py +++ b/xarray/core/common.py @@ -626,7 +626,7 @@ def pipe( func, target = func if target in kwargs: raise ValueError( - "%s is both the pipe target and a keyword " "argument" % target + "%s is both the pipe target and a keyword argument" % target ) kwargs[target] = self return func(*args, **kwargs) diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index 94b7f702920..bf17a2da90f 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -2565,7 +2565,7 @@ def from_dict(cls, d: dict) -> "DataArray": } where "t" is the name of the dimesion, "a" is the name of the array, - and x and t are lists, numpy.arrays, or pandas objects. + and x and t are lists, numpy.arrays, or pandas objects. Parameters ---------- @@ -2949,7 +2949,7 @@ def roll( Positive offsets roll to the right; negative offsets roll to the left. roll_coords : bool - Indicates whether to roll the coordinates by the offset + Indicates whether to roll the coordinates by the offset The current default of roll_coords (None, equivalent to True) is deprecated and will change to False in a future version. Explicitly pass roll_coords to silence the warning. diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index 126c3deceb8..4b12652f630 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -197,7 +197,7 @@ def calculate_dimensions(variables: Mapping[Hashable, Variable]) -> Dict[Hashabl for dim, size in zip(var.dims, var.shape): if dim in scalar_vars: raise ValueError( - "dimension %r already exists as a scalar " "variable" % dim + "dimension %r already exists as a scalar variable" % dim ) if dim not in dims: dims[dim] = size @@ -286,7 +286,7 @@ def merge_indexes( new_variables = {k: v for k, v in variables.items() if k not in vars_to_remove} new_variables.update(vars_to_replace) - # update dimensions if necessary GH: 3512 + # update dimensions if necessary, GH: 3512 for k, v in new_variables.items(): if any(d in dims_to_replace for d in v.dims): new_dims = [dims_to_replace.get(d, d) for d in v.dims] @@ -1314,7 +1314,7 @@ def __setitem__(self, key: Hashable, value) -> None: """ if utils.is_dict_like(key): raise NotImplementedError( - "cannot yet use a dictionary as a key " "to set Dataset values" + "cannot yet use a dictionary as a key to set Dataset values" ) self.update({key: value}) @@ -1673,7 +1673,7 @@ def to_zarr( if mode not in ["w", "w-", "a"]: # TODO: figure out how to handle 'r+' raise ValueError( - "The only supported options for mode are 'w'," "'w-' and 'a'." + "The only supported options for mode are 'w', 'w-' and 'a'." ) from ..backends.api import to_zarr @@ -5107,9 +5107,7 @@ def diff(self, dim, n=1, label="upper"): elif label == "lower": kwargs_new = kwargs_start else: - raise ValueError( - "The 'label' argument has to be either " "'upper' or 'lower'" - ) + raise ValueError("The 'label' argument has to be either 'upper' or 'lower'") variables = {} diff --git a/xarray/core/indexes.py b/xarray/core/indexes.py index 84cf35d3b4f..a5d1896e74c 100644 --- a/xarray/core/indexes.py +++ b/xarray/core/indexes.py @@ -100,7 +100,7 @@ def isel_variable_and_index( if len(variable.dims) > 1: raise NotImplementedError( - "indexing multi-dimensional variable with indexes is not " "supported yet" + "indexing multi-dimensional variable with indexes is not supported yet" ) new_variable = variable.isel(indexers) diff --git a/xarray/core/indexing.py b/xarray/core/indexing.py index 9627f431cb6..d695c8c8085 100644 --- a/xarray/core/indexing.py +++ b/xarray/core/indexing.py @@ -121,7 +121,7 @@ def convert_label_indexer(index, label, index_name="", method=None, tolerance=No if isinstance(label, slice): if method is not None or tolerance is not None: raise NotImplementedError( - "cannot use ``method`` argument if any indexers are " "slice objects" + "cannot use ``method`` argument if any indexers are slice objects" ) indexer = index.slice_indexer( _sanitize_slice_element(label.start), diff --git a/xarray/core/options.py b/xarray/core/options.py index 5a78aa10b90..a14473c9b97 100644 --- a/xarray/core/options.py +++ b/xarray/core/options.py @@ -71,7 +71,7 @@ def _get_keep_attrs(default): return global_choice else: raise ValueError( - "The global option keep_attrs must be one of" " True, False or 'default'." + "The global option keep_attrs must be one of True, False or 'default'." ) diff --git a/xarray/core/resample.py b/xarray/core/resample.py index a00dedc8d05..0a20d918bf1 100644 --- a/xarray/core/resample.py +++ b/xarray/core/resample.py @@ -270,7 +270,7 @@ def __init__(self, *args, dim=None, resample_dim=None, **kwargs): def map(self, func, args=(), shortcut=None, **kwargs): """Apply a function over each Dataset in the groups generated for - resampling and concatenate them together into a new Dataset. + resampling and concatenate them together into a new Dataset. `func` is called like `func(ds, *args, **kwargs)` for each dataset `ds` in this group. diff --git a/xarray/core/rolling_exp.py b/xarray/core/rolling_exp.py index 31d3238e978..b80a4d313d9 100644 --- a/xarray/core/rolling_exp.py +++ b/xarray/core/rolling_exp.py @@ -31,7 +31,7 @@ def _get_center_of_mass(comass, span, halflife, alpha): """ valid_count = count_not_none(comass, span, halflife, alpha) if valid_count > 1: - raise ValueError("comass, span, halflife, and alpha " "are mutually exclusive") + raise ValueError("comass, span, halflife, and alpha are mutually exclusive") # Convert to center of mass; domain checks ensure 0 < alpha <= 1 if comass is not None: diff --git a/xarray/core/variable.py b/xarray/core/variable.py index 57dfe3d9091..26b9835387e 100644 --- a/xarray/core/variable.py +++ b/xarray/core/variable.py @@ -1060,7 +1060,7 @@ def _as_sparse(self, sparse_format=_default, fill_value=dtypes.NA): """ import sparse - # TODO what to do if dask-backended? + # TODO: what to do if dask-backended? if fill_value is dtypes.NA: dtype, fill_value = dtypes.maybe_promote(self.dtype) else: @@ -1286,7 +1286,7 @@ def pad( if isinstance(end_values, dict): end_values = self._pad_options_dim_to_index(end_values) - # workaround for bug in Dask's default value of stat_length https://github.com/dask/dask/issues/5303 + # workaround for bug in Dask's default value of stat_length https://github.com/dask/dask/issues/5303 if stat_length is None and mode in ["maximum", "mean", "median", "minimum"]: stat_length = [(n, n) for n in self.data.shape] # type: ignore @@ -2135,7 +2135,7 @@ def func(self, other): raise TypeError("cannot add a Dataset to a Variable in-place") self_data, other_data, dims = _broadcast_compat_data(self, other) if dims != self.dims: - raise ValueError("dimensions cannot change for in-place " "operations") + raise ValueError("dimensions cannot change for in-place operations") with np.errstate(all="ignore"): self.values = f(self_data, other_data) return self diff --git a/xarray/plot/dataset_plot.py b/xarray/plot/dataset_plot.py index c6158163b4d..7ba0f93f33a 100644 --- a/xarray/plot/dataset_plot.py +++ b/xarray/plot/dataset_plot.py @@ -38,7 +38,7 @@ def _infer_meta_data(ds, x, y, hue, hue_style, add_guide): if not hue_is_numeric and (hue_style == "continuous"): raise ValueError( - "Cannot create a colorbar for a non numeric" " coordinate: " + hue + f"Cannot create a colorbar for a non numeric coordinate: {hue}" ) if add_guide is None or add_guide is True: @@ -54,9 +54,7 @@ def _infer_meta_data(ds, x, y, hue, hue_style, add_guide): add_colorbar = False if hue_style is not None and hue_style not in ["discrete", "continuous"]: - raise ValueError( - "hue_style must be either None, 'discrete' " "or 'continuous'." - ) + raise ValueError("hue_style must be either None, 'discrete' or 'continuous'.") if hue: hue_label = label_from_attrs(ds[hue]) @@ -131,7 +129,7 @@ def _parse_size(data, norm): elif isinstance(norm, tuple): norm = mpl.colors.Normalize(*norm) elif not isinstance(norm, mpl.colors.Normalize): - err = "``size_norm`` must be None, tuple, " "or Normalize object." + err = "``size_norm`` must be None, tuple, or Normalize object." raise ValueError(err) norm.clip = True diff --git a/xarray/plot/facetgrid.py b/xarray/plot/facetgrid.py index 7860a082ab2..8ed8815a060 100644 --- a/xarray/plot/facetgrid.py +++ b/xarray/plot/facetgrid.py @@ -131,7 +131,7 @@ def __init__( ncol = len(data[col]) nfacet = nrow * ncol if col_wrap is not None: - warnings.warn("Ignoring col_wrap since both col and row " "were passed") + warnings.warn("Ignoring col_wrap since both col and row were passed") elif row and not col: single_group = row elif not row and col: diff --git a/xarray/plot/plot.py b/xarray/plot/plot.py index 0d44d5fd64a..2f10240e1b7 100644 --- a/xarray/plot/plot.py +++ b/xarray/plot/plot.py @@ -357,7 +357,7 @@ def step(darray, *args, where="pre", drawstyle=None, ds=None, **kwargs): Additional arguments following :py:func:`xarray.plot.line` """ if where not in {"pre", "post", "mid"}: - raise ValueError("'where' argument to step must be " "'pre', 'post' or 'mid'") + raise ValueError("'where' argument to step must be 'pre', 'post' or 'mid'") if ds is not None: if drawstyle is None: @@ -876,7 +876,7 @@ def imshow(x, y, z, ax, **kwargs): if x.ndim != 1 or y.ndim != 1: raise ValueError( - "imshow requires 1D coordinates, try using " "pcolormesh or contour(f)" + "imshow requires 1D coordinates, try using pcolormesh or contour(f)" ) # Centering the pixels- Assumes uniform spacing diff --git a/xarray/plot/utils.py b/xarray/plot/utils.py index 6cd44dbc098..e1793a0b6d4 100644 --- a/xarray/plot/utils.py +++ b/xarray/plot/utils.py @@ -421,9 +421,9 @@ def get_axis(figsize=None, size=None, aspect=None, ax=None, **kwargs): if figsize is not None: if ax is not None: - raise ValueError("cannot provide both `figsize` and " "`ax` arguments") + raise ValueError("cannot provide both `figsize` and `ax` arguments") if size is not None: - raise ValueError("cannot provide both `figsize` and " "`size` arguments") + raise ValueError("cannot provide both `figsize` and `size` arguments") _, ax = plt.subplots(figsize=figsize) elif size is not None: if ax is not None: diff --git a/xarray/tests/test_combine.py b/xarray/tests/test_combine.py index fa1b749b1a7..deaf58b2e59 100644 --- a/xarray/tests/test_combine.py +++ b/xarray/tests/test_combine.py @@ -169,7 +169,7 @@ def test_coord_not_monotonic(self): ds1 = Dataset({"x": [3, 2]}) with raises_regex( ValueError, - "Coordinate variable x is neither " "monotonically increasing nor", + "Coordinate variable x is neither monotonically increasing nor", ): _infer_concat_order_from_coords([ds1, ds0]) @@ -556,11 +556,11 @@ def test_invalid_hypercube_input(self): ds = create_test_data datasets = [[ds(0), ds(1), ds(2)], [ds(3), ds(4)]] - with raises_regex(ValueError, "sub-lists do not have " "consistent lengths"): + with raises_regex(ValueError, "sub-lists do not have consistent lengths"): combine_nested(datasets, concat_dim=["dim1", "dim2"]) datasets = [[ds(0), ds(1)], [[ds(3), ds(4)]]] - with raises_regex(ValueError, "sub-lists do not have " "consistent depths"): + with raises_regex(ValueError, "sub-lists do not have consistent depths"): combine_nested(datasets, concat_dim=["dim1", "dim2"]) datasets = [[ds(0), ds(1)], [ds(3), ds(4)]] @@ -798,7 +798,7 @@ def test_check_for_impossible_ordering(self): ds0 = Dataset({"x": [0, 1, 5]}) ds1 = Dataset({"x": [2, 3]}) with raises_regex( - ValueError, "does not have monotonic global indexes" " along dimension x" + ValueError, "does not have monotonic global indexes along dimension x" ): combine_by_coords([ds1, ds0]) diff --git a/xarray/tests/test_conventions.py b/xarray/tests/test_conventions.py index f7b113d0110..dcd9541dd79 100644 --- a/xarray/tests/test_conventions.py +++ b/xarray/tests/test_conventions.py @@ -371,7 +371,7 @@ def roundtrip( data.dump_to_store(store, **save_kwargs) yield open_dataset(store, **open_kwargs) - @pytest.mark.skip("cannot roundtrip coordinates yet for " "CFEncodedInMemoryStore") + @pytest.mark.skip("cannot roundtrip coordinates yet for CFEncodedInMemoryStore") def test_roundtrip_coordinates(self): pass diff --git a/xarray/tests/test_dataset.py b/xarray/tests/test_dataset.py index 40e2bdfc6de..08caf750687 100644 --- a/xarray/tests/test_dataset.py +++ b/xarray/tests/test_dataset.py @@ -1413,7 +1413,7 @@ def test_sel_dataarray_mindex(self): with raises_regex( ValueError, - "Vectorized selection is " "not available along MultiIndex variable:" " x", + "Vectorized selection is not available along MultiIndex variable: x", ): mds.sel( x=xr.DataArray( @@ -4250,7 +4250,7 @@ def test_to_and_from_dict(self): "t": {"data": t, "dims": "t"}, "b": {"dims": "t", "data": y}, } - with raises_regex(ValueError, "cannot convert dict " "without the key 'dims'"): + with raises_regex(ValueError, "cannot convert dict without the key 'dims'"): Dataset.from_dict(d) def test_to_and_from_dict_with_time_dim(self): @@ -4841,9 +4841,7 @@ def mean_only_one_axis(x, axis): actual = ds.reduce(mean_only_one_axis, "y") assert_identical(expected, actual) - with raises_regex( - TypeError, "missing 1 required positional argument: " "'axis'" - ): + with raises_regex(TypeError, "missing 1 required positional argument: 'axis'"): ds.reduce(mean_only_one_axis) with raises_regex(TypeError, "non-integer axis"): diff --git a/xarray/tests/test_duck_array_ops.py b/xarray/tests/test_duck_array_ops.py index 44b2ed23357..1342950f3e5 100644 --- a/xarray/tests/test_duck_array_ops.py +++ b/xarray/tests/test_duck_array_ops.py @@ -482,9 +482,7 @@ def test_argmin_max(dim_num, dtype, contains_nan, dask, func, skipna, aggdim): if contains_nan: if not skipna: - pytest.skip( - "numpy's argmin (not nanargmin) does not handle " "object-dtype" - ) + pytest.skip("numpy's argmin (not nanargmin) does not handle object-dtype") if skipna and np.dtype(dtype).kind in "iufc": pytest.skip("numpy's nanargmin raises ValueError for all nan axis") da = construct_dataarray(dim_num, dtype, contains_nan=contains_nan, dask=dask) diff --git a/xarray/tests/test_merge.py b/xarray/tests/test_merge.py index 26a1a82ae26..0994ed9a485 100644 --- a/xarray/tests/test_merge.py +++ b/xarray/tests/test_merge.py @@ -64,7 +64,7 @@ def test_merge_arrays_attrs_default(self): assert actual.identical(expected) @pytest.mark.parametrize( - "combine_attrs, var1_attrs, var2_attrs, expected_attrs, " "expect_exception", + "combine_attrs, var1_attrs, var2_attrs, expected_attrs, expect_exception", [ ( "no_conflicts",