diff --git a/libs/infinity_emb/infinity_emb/inference/batch_handler.py b/libs/infinity_emb/infinity_emb/inference/batch_handler.py index e6d2bfd7..ca4a3511 100644 --- a/libs/infinity_emb/infinity_emb/inference/batch_handler.py +++ b/libs/infinity_emb/infinity_emb/inference/batch_handler.py @@ -91,7 +91,7 @@ def __init__( async def embed( self, sentences: List[str] - ) -> tuple[List[EmbeddingReturnType], int]: + ) -> Tuple[List[EmbeddingReturnType], int]: """Schedule a sentence to be embedded. Awaits until embedded. Args: @@ -113,7 +113,7 @@ async def embed( async def rerank( self, query: str, docs: List[str], raw_scores: bool = False - ) -> tuple[List[float], int]: + ) -> Tuple[List[float], int]: """Schedule a query to be reranked with documents. Awaits until reranked. Args: diff --git a/libs/infinity_emb/infinity_emb/transformer/embedder/neuron.py b/libs/infinity_emb/infinity_emb/transformer/embedder/neuron.py new file mode 100644 index 00000000..64fc9ce8 --- /dev/null +++ b/libs/infinity_emb/infinity_emb/transformer/embedder/neuron.py @@ -0,0 +1,153 @@ +import copy +import json +import subprocess +from typing import Dict, List, Union + +import numpy as np + +from infinity_emb.args import EngineArgs +from infinity_emb.primitives import EmbeddingReturnType, PoolingMethod +from infinity_emb.transformer.abstract import BaseEmbedder +from infinity_emb.transformer.utils_optimum import ( + cls_token_pooling, + mean_pooling, + normalize, +) + +try: + import torch + from optimum.neuron import NeuronModelForFeatureExtraction # type: ignore + from transformers import AutoConfig, AutoTokenizer # type: ignore + + OPTIMUM_AVAILABLE = True +except (ImportError, RuntimeError): + OPTIMUM_AVAILABLE = False + +__all__ = [ + "NeuronOptimumEmbedder", +] + + +def get_nc_count() -> Union[int, None]: + """Returns the number of neuron cores on the current instance.""" + try: + cmd = "neuron-ls --json-output" + result = subprocess.run(cmd, shell=True, capture_output=True) + print("inferring nc_count from `neuron-ls`") + print(result.stdout.decode("utf-8")) + json_output = json.loads(result.stdout) + count = sum([x["nc_count"] for x in json_output]) + print(f"nc_count={count}") + return count + except Exception: + return None + + +def pad_up_to_size(desired_max_bs, input_ids): + """input_ids a 2D array with batch_size on dim=0 + + makes sure the func runs with self.batch_size + """ + # access a from TestSample + batch_size = input_ids.shape[0] + + if batch_size < desired_max_bs: + # handle the event of input_ids.shape[0] != batch_size + # Neuron cores expect constant batch_size + input_ids = torch.concat( + ( + input_ids, + # add missing_batch_size dummy + torch.zeros( + [desired_max_bs - batch_size, *input_ids.size()[1:]], + dtype=input_ids.dtype, + device=input_ids.device, + ), + ), + dim=0, + ) + elif batch_size > desired_max_bs: + raise ValueError( + f"The specified batch_size ({batch_size}) exceeds the model static batch size ({desired_max_bs})" + ) + # return the forward pass that requires constant batch size + return input_ids + + +class NeuronOptimumEmbedder(BaseEmbedder): + def __init__(self, *, engine_args: EngineArgs): + if not OPTIMUM_AVAILABLE: + raise ImportError( + "optimum.neuron is not installed." "run this somewhere with neuron" + ) + + self.pooling = ( + mean_pooling + if engine_args.pooling_method == PoolingMethod.mean + else cls_token_pooling + ) + + self.tokenizer = AutoTokenizer.from_pretrained(engine_args.model_name_or_path) + self.config = AutoConfig.from_pretrained(engine_args.model_name_or_path) + self._infinity_tokenizer = copy.deepcopy(self.tokenizer) + + compiler_args = {"num_cores": get_nc_count(), "auto_cast_type": "fp16"} + input_shapes = { + "batch_size": 4, + "sequence_length": ( + self.config.max_position_embeddings + if hasattr(self.config, "max_position_embeddings") + else 512 + ), + } + self.model = NeuronModelForFeatureExtraction.from_pretrained( + model_id=engine_args.model_name_or_path, + export=True, + **compiler_args, + **input_shapes, + ) + self.batch_size = self.model.neuron_config.input_shapes["batch_size"] + + def encode_pre(self, sentences: List[str]) -> Dict[str, np.ndarray]: + input_dict = self.tokenizer( + sentences, + max_length=self.config.max_position_embeddings, + padding=True, + truncation="longest_first", + return_tensors="pt", + ) + input_dict.pop("token_type_ids", None) + return input_dict + + def encode_core(self, input_dict: Dict[str, np.ndarray]) -> dict: + """requires constant batch size, which is a bit of extra work""" + for key, tensor in input_dict.items(): + actual_bsize = tensor.shape[0] + input_dict[key] = pad_up_to_size(self.batch_size, tensor) + with torch.inference_mode(): + outputs = self.model(**input_dict) + return { + "token_embeddings": outputs["last_hidden_state"][:actual_bsize], + "attention_mask": input_dict["attention_mask"][:actual_bsize], + } + + def encode_post(self, embedding: dict) -> EmbeddingReturnType: + embedding = self.pooling( # type: ignore + embedding["token_embeddings"].numpy(), embedding["attention_mask"].numpy() + ) + + return normalize(embedding).astype(np.float32) + + def tokenize_lengths(self, sentences: List[str]) -> List[int]: + if hasattr(self._infinity_tokenizer, "encode_batch"): + tks = self._infinity_tokenizer.encode_batch( + sentences, + padding=False, + truncation="longest_first", + ) + else: + tks = self._infinity_tokenizer( + sentences, padding=False, truncation="longest_first" + ) + + return [len(t) for t in tks["input_ids"]] diff --git a/libs/infinity_emb/infinity_emb/transformer/embedder/optimum.py b/libs/infinity_emb/infinity_emb/transformer/embedder/optimum.py index 9108e1c2..9a4397e4 100644 --- a/libs/infinity_emb/infinity_emb/transformer/embedder/optimum.py +++ b/libs/infinity_emb/infinity_emb/transformer/embedder/optimum.py @@ -8,8 +8,10 @@ from infinity_emb.primitives import EmbeddingReturnType, PoolingMethod from infinity_emb.transformer.abstract import BaseEmbedder from infinity_emb.transformer.utils_optimum import ( + cls_token_pooling, device_to_onnx, get_onnx_files, + mean_pooling, normalize, optimize_model, ) @@ -23,21 +25,6 @@ OPTIMUM_AVAILABLE = False -def mean_pooling(last_hidden_states: np.ndarray, attention_mask: np.ndarray): - input_mask_expanded = (np.expand_dims(attention_mask, axis=-1)).astype(float) - - sum_embeddings = np.sum( - last_hidden_states.astype(float) * input_mask_expanded, axis=1 - ) - mask_sum = np.maximum(np.sum(input_mask_expanded, axis=1), 1e-9) - - return sum_embeddings / mask_sum - - -def cls_token_pooling(model_output, *args): - return model_output[:, 0] - - class OptimumEmbedder(BaseEmbedder): def __init__(self, *, engine_args: EngineArgs): if not OPTIMUM_AVAILABLE: diff --git a/libs/infinity_emb/infinity_emb/transformer/utils.py b/libs/infinity_emb/infinity_emb/transformer/utils.py index 8709c63f..32228c88 100644 --- a/libs/infinity_emb/infinity_emb/transformer/utils.py +++ b/libs/infinity_emb/infinity_emb/transformer/utils.py @@ -11,6 +11,7 @@ ) from infinity_emb.transformer.embedder.ct2 import CT2SentenceTransformer from infinity_emb.transformer.embedder.dummytransformer import DummyTransformer +from infinity_emb.transformer.embedder.neuron import NeuronOptimumEmbedder from infinity_emb.transformer.embedder.optimum import OptimumEmbedder from infinity_emb.transformer.embedder.sentence_transformer import ( SentenceTransformerPatched, @@ -28,6 +29,7 @@ class EmbedderEngine(Enum): ctranslate2 = CT2SentenceTransformer debugengine = DummyTransformer optimum = OptimumEmbedder + neuron = NeuronOptimumEmbedder @staticmethod def from_inference_engine(engine: InferenceEngine): @@ -39,6 +41,8 @@ def from_inference_engine(engine: InferenceEngine): return EmbedderEngine.debugengine elif engine == InferenceEngine.optimum: return EmbedderEngine.optimum + elif engine == InferenceEngine.neuron: + return EmbedderEngine.neuron else: raise NotImplementedError(f"EmbedderEngine for {engine} not implemented") diff --git a/libs/infinity_emb/infinity_emb/transformer/utils_optimum.py b/libs/infinity_emb/infinity_emb/transformer/utils_optimum.py index 045ed673..8df73b49 100644 --- a/libs/infinity_emb/infinity_emb/transformer/utils_optimum.py +++ b/libs/infinity_emb/infinity_emb/transformer/utils_optimum.py @@ -22,6 +22,21 @@ torch = None # type: ignore +def mean_pooling(last_hidden_states: np.ndarray, attention_mask: np.ndarray): + input_mask_expanded = (np.expand_dims(attention_mask, axis=-1)).astype(float) + + sum_embeddings = np.sum( + last_hidden_states.astype(float) * input_mask_expanded, axis=1 + ) + mask_sum = np.maximum(np.sum(input_mask_expanded, axis=1), 1e-9) + + return sum_embeddings / mask_sum + + +def cls_token_pooling(model_output, *args): + return model_output[:, 0] + + def normalize(input_array, p=2, dim=1, eps=1e-12): # Calculate the Lp norm along the specified dimension norm = np.linalg.norm(input_array, ord=p, axis=dim, keepdims=True) diff --git a/libs/infinity_emb/poetry.lock b/libs/infinity_emb/poetry.lock index f3ec5a03..fc7e1aeb 100644 --- a/libs/infinity_emb/poetry.lock +++ b/libs/infinity_emb/poetry.lock @@ -1797,21 +1797,21 @@ types-protobuf = ">=4.23.0.2" [[package]] name = "networkx" -version = "3.2.1" +version = "3.1" description = "Python package for creating and manipulating graphs and networks" optional = true -python-versions = ">=3.9" +python-versions = ">=3.8" files = [ - {file = "networkx-3.2.1-py3-none-any.whl", hash = "sha256:f18c69adc97877c42332c170849c96cefa91881c99a7cb3e95b7c659ebdc1ec2"}, - {file = "networkx-3.2.1.tar.gz", hash = "sha256:9f1bb5cf3409bf324e0a722c20bdb4c20ee39bf1c30ce8ae499c8502b0b5e0c6"}, + {file = "networkx-3.1-py3-none-any.whl", hash = "sha256:4f33f68cb2afcf86f28a45f43efc27a9386b535d567d2127f8f61d51dec58d36"}, + {file = "networkx-3.1.tar.gz", hash = "sha256:de346335408f84de0eada6ff9fafafff9bcda11f0a0dfaa931133debb146ab61"}, ] [package.extras] -default = ["matplotlib (>=3.5)", "numpy (>=1.22)", "pandas (>=1.4)", "scipy (>=1.9,!=1.11.0,!=1.11.1)"] -developer = ["changelist (==0.4)", "mypy (>=1.1)", "pre-commit (>=3.2)", "rtoml"] -doc = ["nb2plots (>=0.7)", "nbconvert (<7.9)", "numpydoc (>=1.6)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.14)", "sphinx (>=7)", "sphinx-gallery (>=0.14)", "texext (>=0.6.7)"] -extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.11)", "sympy (>=1.10)"] -test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"] +default = ["matplotlib (>=3.4)", "numpy (>=1.20)", "pandas (>=1.3)", "scipy (>=1.8)"] +developer = ["mypy (>=1.1)", "pre-commit (>=3.2)"] +doc = ["nb2plots (>=0.6)", "numpydoc (>=1.5)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.13)", "sphinx (>=6.1)", "sphinx-gallery (>=0.12)", "texext (>=0.6.7)"] +extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.10)", "sympy (>=1.10)"] +test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"] [[package]] name = "numpy" @@ -3200,45 +3200,50 @@ torch = ["safetensors[numpy]", "torch (>=1.10)"] [[package]] name = "scikit-learn" -version = "1.4.1.post1" +version = "1.3.2" description = "A set of python modules for machine learning and data mining" optional = true -python-versions = ">=3.9" +python-versions = ">=3.8" files = [ - {file = "scikit-learn-1.4.1.post1.tar.gz", hash = "sha256:93d3d496ff1965470f9977d05e5ec3376fb1e63b10e4fda5e39d23c2d8969a30"}, - {file = "scikit_learn-1.4.1.post1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c540aaf44729ab5cd4bd5e394f2b375e65ceaea9cdd8c195788e70433d91bbc5"}, - {file = "scikit_learn-1.4.1.post1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4310bff71aa98b45b46cd26fa641309deb73a5d1c0461d181587ad4f30ea3c36"}, - {file = "scikit_learn-1.4.1.post1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f43dd527dabff5521af2786a2f8de5ba381e182ec7292663508901cf6ceaf6e"}, - {file = "scikit_learn-1.4.1.post1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c02e27d65b0c7dc32f2c5eb601aaf5530b7a02bfbe92438188624524878336f2"}, - {file = "scikit_learn-1.4.1.post1-cp310-cp310-win_amd64.whl", hash = "sha256:629e09f772ad42f657ca60a1a52342eef786218dd20cf1369a3b8d085e55ef8f"}, - {file = "scikit_learn-1.4.1.post1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6145dfd9605b0b50ae72cdf72b61a2acd87501369a763b0d73d004710ebb76b5"}, - {file = "scikit_learn-1.4.1.post1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:1afed6951bc9d2053c6ee9a518a466cbc9b07c6a3f9d43bfe734192b6125d508"}, - {file = "scikit_learn-1.4.1.post1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce03506ccf5f96b7e9030fea7eb148999b254c44c10182ac55857bc9b5d4815f"}, - {file = "scikit_learn-1.4.1.post1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ba516fcdc73d60e7f48cbb0bccb9acbdb21807de3651531208aac73c758e3ab"}, - {file = "scikit_learn-1.4.1.post1-cp311-cp311-win_amd64.whl", hash = "sha256:78cd27b4669513b50db4f683ef41ea35b5dddc797bd2bbd990d49897fd1c8a46"}, - {file = "scikit_learn-1.4.1.post1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a1e289f33f613cefe6707dead50db31930530dc386b6ccff176c786335a7b01c"}, - {file = "scikit_learn-1.4.1.post1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:0df87de9ce1c0140f2818beef310fb2e2afdc1e66fc9ad587965577f17733649"}, - {file = "scikit_learn-1.4.1.post1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:712c1c69c45b58ef21635360b3d0a680ff7d83ac95b6f9b82cf9294070cda710"}, - {file = "scikit_learn-1.4.1.post1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1754b0c2409d6ed5a3380512d0adcf182a01363c669033a2b55cca429ed86a81"}, - {file = "scikit_learn-1.4.1.post1-cp312-cp312-win_amd64.whl", hash = "sha256:1d491ef66e37f4e812db7e6c8286520c2c3fc61b34bf5e59b67b4ce528de93af"}, - {file = "scikit_learn-1.4.1.post1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:aa0029b78ef59af22cfbd833e8ace8526e4df90212db7ceccbea582ebb5d6794"}, - {file = "scikit_learn-1.4.1.post1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:14e4c88436ac96bf69eb6d746ac76a574c314a23c6961b7d344b38877f20fee1"}, - {file = "scikit_learn-1.4.1.post1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7cd3a77c32879311f2aa93466d3c288c955ef71d191503cf0677c3340ae8ae0"}, - {file = "scikit_learn-1.4.1.post1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a3ee19211ded1a52ee37b0a7b373a8bfc66f95353af058a210b692bd4cda0dd"}, - {file = "scikit_learn-1.4.1.post1-cp39-cp39-win_amd64.whl", hash = "sha256:234b6bda70fdcae9e4abbbe028582ce99c280458665a155eed0b820599377d25"}, + {file = "scikit-learn-1.3.2.tar.gz", hash = "sha256:a2f54c76accc15a34bfb9066e6c7a56c1e7235dda5762b990792330b52ccfb05"}, + {file = "scikit_learn-1.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e326c0eb5cf4d6ba40f93776a20e9a7a69524c4db0757e7ce24ba222471ee8a1"}, + {file = "scikit_learn-1.3.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:535805c2a01ccb40ca4ab7d081d771aea67e535153e35a1fd99418fcedd1648a"}, + {file = "scikit_learn-1.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1215e5e58e9880b554b01187b8c9390bf4dc4692eedeaf542d3273f4785e342c"}, + {file = "scikit_learn-1.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ee107923a623b9f517754ea2f69ea3b62fc898a3641766cb7deb2f2ce450161"}, + {file = "scikit_learn-1.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:35a22e8015048c628ad099da9df5ab3004cdbf81edc75b396fd0cff8699ac58c"}, + {file = "scikit_learn-1.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6fb6bc98f234fda43163ddbe36df8bcde1d13ee176c6dc9b92bb7d3fc842eb66"}, + {file = "scikit_learn-1.3.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:18424efee518a1cde7b0b53a422cde2f6625197de6af36da0b57ec502f126157"}, + {file = "scikit_learn-1.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3271552a5eb16f208a6f7f617b8cc6d1f137b52c8a1ef8edf547db0259b2c9fb"}, + {file = "scikit_learn-1.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc4144a5004a676d5022b798d9e573b05139e77f271253a4703eed295bde0433"}, + {file = "scikit_learn-1.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:67f37d708f042a9b8d59551cf94d30431e01374e00dc2645fa186059c6c5d78b"}, + {file = "scikit_learn-1.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:8db94cd8a2e038b37a80a04df8783e09caac77cbe052146432e67800e430c028"}, + {file = "scikit_learn-1.3.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:61a6efd384258789aa89415a410dcdb39a50e19d3d8410bd29be365bcdd512d5"}, + {file = "scikit_learn-1.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb06f8dce3f5ddc5dee1715a9b9f19f20d295bed8e3cd4fa51e1d050347de525"}, + {file = "scikit_learn-1.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b2de18d86f630d68fe1f87af690d451388bb186480afc719e5f770590c2ef6c"}, + {file = "scikit_learn-1.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:0402638c9a7c219ee52c94cbebc8fcb5eb9fe9c773717965c1f4185588ad3107"}, + {file = "scikit_learn-1.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a19f90f95ba93c1a7f7924906d0576a84da7f3b2282ac3bfb7a08a32801add93"}, + {file = "scikit_learn-1.3.2-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:b8692e395a03a60cd927125eef3a8e3424d86dde9b2370d544f0ea35f78a8073"}, + {file = "scikit_learn-1.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15e1e94cc23d04d39da797ee34236ce2375ddea158b10bee3c343647d615581d"}, + {file = "scikit_learn-1.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:785a2213086b7b1abf037aeadbbd6d67159feb3e30263434139c98425e3dcfcf"}, + {file = "scikit_learn-1.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:64381066f8aa63c2710e6b56edc9f0894cc7bf59bd71b8ce5613a4559b6145e0"}, + {file = "scikit_learn-1.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6c43290337f7a4b969d207e620658372ba3c1ffb611f8bc2b6f031dc5c6d1d03"}, + {file = "scikit_learn-1.3.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:dc9002fc200bed597d5d34e90c752b74df516d592db162f756cc52836b38fe0e"}, + {file = "scikit_learn-1.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d08ada33e955c54355d909b9c06a4789a729977f165b8bae6f225ff0a60ec4a"}, + {file = "scikit_learn-1.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:763f0ae4b79b0ff9cca0bf3716bcc9915bdacff3cebea15ec79652d1cc4fa5c9"}, + {file = "scikit_learn-1.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:ed932ea780517b00dae7431e031faae6b49b20eb6950918eb83bd043237950e0"}, ] [package.dependencies] -joblib = ">=1.2.0" -numpy = ">=1.19.5,<2.0" -scipy = ">=1.6.0" +joblib = ">=1.1.1" +numpy = ">=1.17.3,<2.0" +scipy = ">=1.5.0" threadpoolctl = ">=2.0.0" [package.extras] -benchmark = ["matplotlib (>=3.3.4)", "memory-profiler (>=0.57.0)", "pandas (>=1.1.5)"] -docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.3.4)", "memory-profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)", "sphinx (>=6.0.0)", "sphinx-copybutton (>=0.5.2)", "sphinx-gallery (>=0.15.0)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"] -examples = ["matplotlib (>=3.3.4)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)"] -tests = ["black (>=23.3.0)", "matplotlib (>=3.3.4)", "mypy (>=1.3)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "polars (>=0.19.12)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pyarrow (>=12.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.0.272)", "scikit-image (>=0.17.2)"] +benchmark = ["matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "pandas (>=1.0.5)"] +docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)", "sphinx (>=6.0.0)", "sphinx-copybutton (>=0.5.2)", "sphinx-gallery (>=0.10.1)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"] +examples = ["matplotlib (>=3.1.3)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)"] +tests = ["black (>=23.3.0)", "matplotlib (>=3.1.3)", "mypy (>=1.3)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.0.272)", "scikit-image (>=0.16.2)"] [[package]] name = "scipy"