Skip to content

Commit 361e730

Browse files
igorsugakfacebook-github-bot
authored andcommitted
replace uses of np.ndarray with npt.NDArray (pytorch#2584)
Summary: X-link: pytorch/audio#3846 X-link: pytorch/opacus#680 X-link: pytorch/captum#1387 X-link: pytorch/audio#3845 This replaces uses of `numpy.ndarray` in type annotations with `numpy.typing.NDArray`. In Numpy-1.24.0+ `numpy.ndarray` is annotated as generic type. Without template parameters it triggers static analysis errors: ```counterexample Generic type `ndarray` expects 2 type parameters. ``` `numpy.typing.NDArray` is an alias that provides default template parameters. Differential Revision: D64619891
1 parent d2c6f5e commit 361e730

File tree

12 files changed

+64
-53
lines changed

12 files changed

+64
-53
lines changed

botorch/exceptions/errors.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010

1111
from typing import Any
1212

13-
import numpy as np
13+
import numpy.typing as npt
1414

1515

1616
class BotorchError(Exception):
@@ -59,7 +59,7 @@ class OptimizationTimeoutError(BotorchError):
5959
r"""Exception raised when optimization times out."""
6060

6161
def __init__(
62-
self, /, *args: Any, current_x: np.ndarray, runtime: float, **kwargs: Any
62+
self, /, *args: Any, current_x: npt.NDArray, runtime: float, **kwargs: Any
6363
) -> None:
6464
r"""
6565
Args:
@@ -77,7 +77,7 @@ def __init__(
7777
class OptimizationGradientError(BotorchError, RuntimeError):
7878
r"""Exception raised when gradient array `gradf` containts NaNs."""
7979

80-
def __init__(self, /, *args: Any, current_x: np.ndarray, **kwargs: Any) -> None:
80+
def __init__(self, /, *args: Any, current_x: npt.NDArray, **kwargs: Any) -> None:
8181
r"""
8282
Args:
8383
*args: Standard args to `BoTorchError`.

botorch/generation/gen.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
from typing import Any, NoReturn
1818

1919
import numpy as np
20+
import numpy.typing as npt
2021
import torch
2122
from botorch.acquisition import AcquisitionFunction
2223
from botorch.exceptions.errors import OptimizationGradientError
@@ -191,7 +192,7 @@ def gen_candidates_scipy(
191192
with_grad = options.get("with_grad", True)
192193
if with_grad:
193194

194-
def f_np_wrapper(x: np.ndarray, f: Callable):
195+
def f_np_wrapper(x: npt.NDArray, f: Callable):
195196
"""Given a torch callable, compute value + grad given a numpy array."""
196197
if np.isnan(x).any():
197198
raise RuntimeError(
@@ -223,7 +224,7 @@ def f_np_wrapper(x: np.ndarray, f: Callable):
223224

224225
else:
225226

226-
def f_np_wrapper(x: np.ndarray, f: Callable):
227+
def f_np_wrapper(x: npt.NDArray, f: Callable):
227228
X = torch.from_numpy(x).to(initial_conditions).view(shapeX).contiguous()
228229
with torch.no_grad():
229230
X_fix = fix_features(X=X, fixed_features=fixed_features)

botorch/models/pairwise_gp.py

+5-4
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
from typing import Any
2727

2828
import numpy as np
29+
import numpy.typing as npt
2930
import torch
3031
from botorch.acquisition.objective import PosteriorTransform
3132
from botorch.exceptions import UnsupportedError
@@ -397,13 +398,13 @@ def _prior_predict(self, X: Tensor) -> tuple[Tensor, Tensor]:
397398

398399
def _grad_posterior_f(
399400
self,
400-
utility: Tensor | np.ndarray,
401+
utility: Tensor | npt.NDArray,
401402
datapoints: Tensor,
402403
D: Tensor,
403404
covar_chol: Tensor,
404405
covar_inv: Tensor | None = None,
405406
ret_np: bool = False,
406-
) -> Tensor | np.ndarray:
407+
) -> Tensor | npt.NDArray:
407408
r"""Compute the gradient of S loss wrt to f/utility in [Chu2005preference]_.
408409
409410
For finding f_map, which is negative of the log posterior, i.e., -log(p(f|D))
@@ -441,13 +442,13 @@ def _grad_posterior_f(
441442

442443
def _hess_posterior_f(
443444
self,
444-
utility: Tensor | np.ndarray,
445+
utility: Tensor | npt.NDArray,
445446
datapoints: Tensor,
446447
D: Tensor,
447448
covar_chol: Tensor,
448449
covar_inv: Tensor,
449450
ret_np: bool = False,
450-
) -> Tensor | np.ndarray:
451+
) -> Tensor | npt.NDArray:
451452
r"""Compute the hessian of S loss wrt utility for finding f_map.
452453
453454
which is negative of the log posterior, i.e., -log(p(f|D))

botorch/optim/closures/core.py

+13-11
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,8 @@
1313
from functools import partial
1414
from typing import Any
1515

16+
import numpy.typing as npt
17+
1618
import torch
1719
from botorch.optim.utils import (
1820
_handle_numerical_errors,
@@ -21,7 +23,7 @@
2123
)
2224
from botorch.optim.utils.numpy_utils import as_ndarray
2325
from botorch.utils.context_managers import zero_grad_ctx
24-
from numpy import float64 as np_float64, full as np_full, ndarray, zeros as np_zeros
26+
from numpy import float64 as np_float64, full as np_full, zeros as np_zeros
2527
from torch import Tensor
2628

2729

@@ -82,10 +84,10 @@ def __init__(
8284
self,
8385
closure: Callable[[], tuple[Tensor, Sequence[Tensor | None]]],
8486
parameters: dict[str, Tensor],
85-
as_array: Callable[[Tensor], ndarray] = None, # pyre-ignore [9]
86-
as_tensor: Callable[[ndarray], Tensor] = torch.as_tensor,
87-
get_state: Callable[[], ndarray] = None, # pyre-ignore [9]
88-
set_state: Callable[[ndarray], None] = None, # pyre-ignore [9]
87+
as_array: Callable[[Tensor], npt.NDArray] = None, # pyre-ignore [9]
88+
as_tensor: Callable[[npt.NDArray], Tensor] = torch.as_tensor,
89+
get_state: Callable[[], npt.NDArray] = None, # pyre-ignore [9]
90+
set_state: Callable[[npt.NDArray], None] = None, # pyre-ignore [9]
8991
fill_value: float = 0.0,
9092
persistent: bool = True,
9193
) -> None:
@@ -140,11 +142,11 @@ def __init__(
140142

141143
self.fill_value = fill_value
142144
self.persistent = persistent
143-
self._gradient_ndarray: ndarray | None = None
145+
self._gradient_ndarray: npt.NDArray | None = None
144146

145147
def __call__(
146-
self, state: ndarray | None = None, **kwargs: Any
147-
) -> tuple[ndarray, ndarray]:
148+
self, state: npt.NDArray | None = None, **kwargs: Any
149+
) -> tuple[npt.NDArray, npt.NDArray]:
148150
if state is not None:
149151
self.state = state
150152

@@ -164,14 +166,14 @@ def __call__(
164166
return value, grads
165167

166168
@property
167-
def state(self) -> ndarray:
169+
def state(self) -> npt.NDArray:
168170
return self._get_state()
169171

170172
@state.setter
171-
def state(self, state: ndarray) -> None:
173+
def state(self, state: npt.NDArray) -> None:
172174
self._set_state(state)
173175

174-
def _get_gradient_ndarray(self, fill_value: float | None = None) -> ndarray:
176+
def _get_gradient_ndarray(self, fill_value: float | None = None) -> npt.NDArray:
175177
if self.persistent and self._gradient_ndarray is not None:
176178
if fill_value is not None:
177179
self._gradient_ndarray.fill(fill_value)

botorch/optim/core.py

+5-3
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,12 @@
1717
from time import monotonic
1818
from typing import Any
1919

20+
import numpy.typing as npt
21+
2022
from botorch.optim.closures import NdarrayOptimizationClosure
2123
from botorch.optim.utils.numpy_utils import get_bounds_as_ndarray
2224
from botorch.optim.utils.timeout import minimize_with_timeout
23-
from numpy import asarray, float64 as np_float64, ndarray
25+
from numpy import asarray, float64 as np_float64
2426
from torch import Tensor
2527
from torch.optim.adam import Adam
2628
from torch.optim.optimizer import Optimizer
@@ -60,7 +62,7 @@ def scipy_minimize(
6062
parameters: dict[str, Tensor],
6163
bounds: dict[str, tuple[float | None, float | None]] | None = None,
6264
callback: Callable[[dict[str, Tensor], OptimizationResult], None] | None = None,
63-
x0: ndarray | None = None,
65+
x0: npt.NDArray | None = None,
6466
method: str = "L-BFGS-B",
6567
options: dict[str, Any] | None = None,
6668
timeout_sec: float | None = None,
@@ -98,7 +100,7 @@ def scipy_minimize(
98100
else:
99101
call_counter = count(1) # callbacks are typically made at the end of each iter
100102

101-
def wrapped_callback(x: ndarray):
103+
def wrapped_callback(x: npt.NDArray):
102104
result = OptimizationResult(
103105
step=next(call_counter),
104106
fval=float(wrapped_closure(x)[0]),

botorch/optim/parameter_constraints.py

+5-4
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
from typing import Union
1717

1818
import numpy as np
19+
import numpy.typing as npt
1920
import torch
2021
from botorch.exceptions.errors import CandidateGenerationError, UnsupportedError
2122
from scipy.optimize import Bounds
@@ -131,7 +132,7 @@ def make_scipy_linear_constraints(
131132

132133

133134
def eval_lin_constraint(
134-
x: np.ndarray, flat_idxr: list[int], coeffs: np.ndarray, rhs: float
135+
x: npt.NDArray, flat_idxr: list[int], coeffs: npt.NDArray, rhs: float
135136
) -> np.float64:
136137
r"""Evaluate a single linear constraint.
137138
@@ -148,8 +149,8 @@ def eval_lin_constraint(
148149

149150

150151
def lin_constraint_jac(
151-
x: np.ndarray, flat_idxr: list[int], coeffs: np.ndarray, n: int
152-
) -> np.ndarray:
152+
x: npt.NDArray, flat_idxr: list[int], coeffs: npt.NDArray, n: int
153+
) -> npt.NDArray:
153154
r"""Return the Jacobian associated with a linear constraint.
154155
155156
Args:
@@ -167,7 +168,7 @@ def lin_constraint_jac(
167168
return jac
168169

169170

170-
def _arrayify(X: Tensor) -> np.ndarray:
171+
def _arrayify(X: Tensor) -> npt.NDArray:
171172
r"""Convert a torch.Tensor (any dtype or device) to a numpy (double) array.
172173
173174
Args:

botorch/optim/utils/common.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -14,12 +14,13 @@
1414
from warnings import warn_explicit, WarningMessage
1515

1616
import numpy as np
17+
import numpy.typing as npt
1718
from linear_operator.utils.errors import NanError, NotPSDError
1819

1920

2021
def _handle_numerical_errors(
21-
error: RuntimeError, x: np.ndarray, dtype: np.dtype | None = None
22-
) -> tuple[np.ndarray, np.ndarray]:
22+
error: RuntimeError, x: npt.NDArray, dtype: np.dtype | None = None
23+
) -> tuple[npt.NDArray, npt.NDArray]:
2324
if isinstance(error, NotPSDError):
2425
raise error
2526
error_message = error.args[0] if len(error.args) > 0 else ""

botorch/optim/utils/numpy_utils.py

+8-8
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,8 @@
1313
from itertools import tee
1414

1515
import numpy as np
16+
import numpy.typing as npt
1617
import torch
17-
from numpy import ndarray
1818
from torch import Tensor
1919

2020

@@ -35,7 +35,7 @@
3535

3636
def as_ndarray(
3737
values: Tensor, dtype: np.dtype | None = None, inplace: bool = True
38-
) -> ndarray:
38+
) -> npt.NDArray:
3939
r"""Helper for going from torch.Tensor to numpy.ndarray.
4040
4141
Args:
@@ -67,10 +67,10 @@ def as_ndarray(
6767

6868
def get_tensors_as_ndarray_1d(
6969
tensors: Iterator[Tensor] | dict[str, Tensor],
70-
out: ndarray | None = None,
70+
out: npt.NDArray | None = None,
7171
dtype: np.dtype | str | None = None,
72-
as_array: Callable[[Tensor], ndarray] = as_ndarray,
73-
) -> ndarray:
72+
as_array: Callable[[Tensor], npt.NDArray] = as_ndarray,
73+
) -> npt.NDArray:
7474
# Create a pair of iterators, one for setup and one for data transfer
7575
named_tensors_iter, named_tensors_iter2 = tee(
7676
iter(tensors.items()) if isinstance(tensors, dict) else enumerate(tensors), 2
@@ -112,8 +112,8 @@ def get_tensors_as_ndarray_1d(
112112

113113
def set_tensors_from_ndarray_1d(
114114
tensors: Iterator[Tensor] | dict[str, Tensor],
115-
array: ndarray,
116-
as_tensor: Callable[[ndarray], Tensor] = torch.as_tensor,
115+
array: npt.NDArray,
116+
as_tensor: Callable[[npt.NDArray], Tensor] = torch.as_tensor,
117117
) -> None:
118118
r"""Sets the values of one more tensors based off of a vector of assignments."""
119119
named_tensors_iter = (
@@ -137,7 +137,7 @@ def set_tensors_from_ndarray_1d(
137137
def get_bounds_as_ndarray(
138138
parameters: dict[str, Tensor],
139139
bounds: dict[str, tuple[float | Tensor | None, float | Tensor | None]],
140-
) -> np.ndarray | None:
140+
) -> npt.NDArray | None:
141141
r"""Helper method for converting bounds into an ndarray.
142142
143143
Args:

botorch/optim/utils/timeout.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -11,14 +11,14 @@
1111
from collections.abc import Callable, Sequence
1212
from typing import Any
1313

14-
import numpy as np
14+
import numpy.typing as npt
1515
from botorch.exceptions.errors import OptimizationTimeoutError
1616
from scipy import optimize
1717

1818

1919
def minimize_with_timeout(
20-
fun: Callable[[np.ndarray, ...], float],
21-
x0: np.ndarray,
20+
fun: Callable[[npt.NDArray, ...], float],
21+
x0: npt.NDArray,
2222
args: tuple[Any, ...] = (),
2323
method: str | None = None,
2424
jac: str | Callable | bool | None = None,
@@ -45,7 +45,7 @@ def minimize_with_timeout(
4545
start_time = time.monotonic()
4646
callback_data = {"num_iterations": 0} # update from withing callback below
4747

48-
def timeout_callback(xk: np.ndarray) -> bool:
48+
def timeout_callback(xk: npt.NDArray) -> bool:
4949
runtime = time.monotonic() - start_time
5050
callback_data["num_iterations"] += 1
5151
if runtime > timeout_sec:
@@ -63,14 +63,14 @@ def timeout_callback(xk: np.ndarray) -> bool:
6363
elif method == "trust-constr": # special signature
6464

6565
def wrapped_callback(
66-
xk: np.ndarray, state: optimize.OptimizeResult
66+
xk: npt.NDArray, state: optimize.OptimizeResult
6767
) -> bool:
6868
# order here is important to make sure base callback gets executed
6969
return callback(xk, state) or timeout_callback(xk=xk)
7070

7171
else:
7272

73-
def wrapped_callback(xk: np.ndarray) -> None:
73+
def wrapped_callback(xk: npt.NDArray) -> None:
7474
timeout_callback(xk=xk)
7575
callback(xk)
7676

botorch/utils/sampling.py

+6-5
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
from typing import Any, TYPE_CHECKING
2525

2626
import numpy as np
27+
import numpy.typing as npt
2728
import scipy
2829
import torch
2930
from botorch.exceptions.errors import BotorchError
@@ -374,11 +375,11 @@ def _convert_bounds_to_inequality_constraints(bounds: Tensor) -> tuple[Tensor, T
374375

375376

376377
def find_interior_point(
377-
A: np.ndarray,
378-
b: np.ndarray,
379-
A_eq: np.ndarray | None = None,
380-
b_eq: np.ndarray | None = None,
381-
) -> np.ndarray:
378+
A: npt.NDArray,
379+
b: npt.NDArray,
380+
A_eq: npt.NDArray | None = None,
381+
b_eq: npt.NDArray | None = None,
382+
) -> npt.NDArray:
382383
r"""Find an interior point of a polytope via linear programming.
383384
384385
Args:

test/optim/test_parameter_constraints.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
from itertools import product
99

1010
import numpy as np
11+
import numpy.typing as npt
1112
import torch
1213
from botorch.exceptions.errors import CandidateGenerationError, UnsupportedError
1314
from botorch.optim.parameter_constraints import (
@@ -55,7 +56,7 @@ def test_make_nonlinear_constraints(self):
5556
def nlc(x):
5657
return 4 - x.sum()
5758

58-
def f_np_wrapper(x: np.ndarray, f: Callable):
59+
def f_np_wrapper(x: npt.NDArray, f: Callable):
5960
"""Given a torch callable, compute value + grad given a numpy array."""
6061
X = (
6162
torch.from_numpy(x)
@@ -114,7 +115,7 @@ def test_make_scipy_nonlinear_inequality_constraints(self):
114115
def nlc(x):
115116
return 4 - x.sum()
116117

117-
def f_np_wrapper(x: np.ndarray, f: Callable):
118+
def f_np_wrapper(x: npt.NDArray, f: Callable):
118119
"""Given a torch callable, compute value + grad given a numpy array."""
119120
X = (
120121
torch.from_numpy(x)

0 commit comments

Comments
 (0)