Skip to content

Commit ed7c6a2

Browse files
committed
Initial Commit
0 parents  commit ed7c6a2

File tree

149 files changed

+15924
-0
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

149 files changed

+15924
-0
lines changed

.gitignore

+174
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,174 @@
1+
# Project related
2+
datasets
3+
!anomalib/datasets
4+
!tests/pre_merge/datasets
5+
results
6+
!anomalib/core/results
7+
8+
# Jupyter Notebooks
9+
notebooks/500_use_cases/501_dobot/
10+
!notebooks/500_use_cases/501_dobot/*.ipynb
11+
12+
# VENV
13+
.python-version
14+
.anomalib
15+
.toxbase
16+
17+
# IDE
18+
.vscode
19+
.idea
20+
21+
# Byte-compiled / optimized / DLL files
22+
__pycache__/
23+
*.py[cod]
24+
*$py.class
25+
26+
# C extensions
27+
*.so
28+
29+
# Distribution / packaging
30+
.Python
31+
build/
32+
develop-eggs/
33+
dist/
34+
downloads/
35+
eggs/
36+
.eggs/
37+
lib/
38+
lib64/
39+
parts/
40+
sdist/
41+
var/
42+
wheels/
43+
share/python-wheels/
44+
*.egg-info/
45+
.installed.cfg
46+
*.egg
47+
MANIFEST
48+
49+
# PyInstaller
50+
# Usually these files are written by a python script from a template
51+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
52+
*.manifest
53+
*.spec
54+
55+
# Installer logs
56+
pip-log.txt
57+
pip-delete-this-directory.txt
58+
59+
# Unit test / coverage reports
60+
htmlcov/
61+
.tox/
62+
.nox/
63+
.coverage
64+
.coverage.*
65+
.cache
66+
nosetests.xml
67+
coverage.xml
68+
*.cover
69+
*.py,cover
70+
.hypothesis/
71+
.pytest_cache/
72+
cover/
73+
74+
# Translations
75+
*.mo
76+
*.pot
77+
78+
# Django stuff:
79+
*.log
80+
local_settings.py
81+
db.sqlite3
82+
db.sqlite3-journal
83+
84+
# Flask stuff:
85+
instance/
86+
.webassets-cache
87+
88+
# Scrapy stuff:
89+
.scrapy
90+
91+
# Sphinx documentation
92+
docs/build/
93+
docs/source/_build/
94+
95+
# PyBuilder
96+
.pybuilder/
97+
target/
98+
99+
# Jupyter Notebook
100+
.ipynb_checkpoints
101+
102+
# IPython
103+
profile_default/
104+
ipython_config.py
105+
106+
# pyenv
107+
# For a library or package, you might want to ignore these files since the code is
108+
# intended to run in multiple environments; otherwise, check them in:
109+
# .python-version
110+
111+
# pipenv
112+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
113+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
114+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
115+
# install all needed dependencies.
116+
#Pipfile.lock
117+
118+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
119+
__pypackages__/
120+
121+
# Celery stuff
122+
celerybeat-schedule
123+
celerybeat.pid
124+
125+
# SageMath parsed files
126+
*.sage.py
127+
128+
# Environments
129+
.env
130+
.venv
131+
env/
132+
venv/
133+
ENV/
134+
env.bak/
135+
venv.bak/
136+
137+
# Spyder project settings
138+
.spyderproject
139+
.spyproject
140+
141+
# Rope project settings
142+
.ropeproject
143+
144+
# mkdocs documentation
145+
/site
146+
147+
# mypy
148+
.mypy_cache/
149+
.dmypy.json
150+
dmypy.json
151+
152+
# Pyre type checker
153+
.pyre/
154+
155+
# pytype static type analyzer
156+
.pytype/
157+
158+
# Cython debug symbols
159+
cython_debug/
160+
161+
162+
# Documentations
163+
docs/source/generated
164+
docs/source/api
165+
docs/source/models
166+
docs/build/
167+
docs/source/_build/
168+
169+
# Misc
170+
.DS_Store
171+
172+
# logs
173+
wandb/
174+
lightning_logs/

.markdownlint.yaml

+34
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
# Default state for all rules
2+
default: true
3+
4+
# Path to configuration file to extend
5+
extends: null
6+
7+
# MD001/heading-increment/header-increment - Heading levels should only increment by one level at a time
8+
MD001: true
9+
10+
# MD013/line-length - Line length
11+
MD013:
12+
# Number of characters
13+
line_length: 1000
14+
15+
# This is not useful for some files such as `CHANGELOG.md`
16+
MD024:
17+
# Only check sibling headings
18+
allow_different_nesting: true
19+
# Only check sibling headings
20+
siblings_only: true
21+
22+
MD033: false
23+
24+
# If a page is printed, it helps if the URL is viewable.
25+
MD034: false # Bare URL used
26+
27+
# This is needed for the anomalib tagline in the README
28+
MD036: false # Emphasis used instead of a header
29+
30+
# Some md files have comments or links at the top of the files.
31+
MD041: false # First line in file should be a top level header
32+
33+
# Badges have empty links
34+
MD042: false # No empty links

.pre-commit-config.yaml

+63
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
default_language_version:
2+
node: system
3+
4+
repos:
5+
- repo: https://github.com/pre-commit/pre-commit-hooks
6+
rev: v4.4.0
7+
hooks:
8+
# list of supported hooks: https://pre-commit.com/hooks.html
9+
- id: trailing-whitespace
10+
- id: end-of-file-fixer
11+
- id: check-yaml
12+
- id: check-added-large-files
13+
- id: debug-statements
14+
- id: detect-private-key
15+
16+
# python code formatting
17+
- repo: https://github.com/psf/black
18+
rev: 23.1.0
19+
hooks:
20+
- id: black
21+
22+
# Ruff version.
23+
- repo: https://github.com/charliermarsh/ruff-pre-commit
24+
rev: "v0.0.253"
25+
hooks:
26+
- id: ruff
27+
exclude: "tests"
28+
29+
# python static type checking
30+
- repo: https://github.com/pre-commit/mirrors-mypy
31+
rev: "v1.0.1"
32+
hooks:
33+
- id: mypy
34+
additional_dependencies: [types-PyYAML]
35+
exclude: "tests"
36+
37+
# notebooks.
38+
- repo: https://github.com/nbQA-dev/nbQA
39+
rev: 1.6.3
40+
hooks:
41+
- id: nbqa-black
42+
- id: nbqa-ruff
43+
# Ignore unsorted imports. This is because jupyter notebooks can import
44+
# packages in a different order than the rest of the codebase.
45+
args: ["--ignore=I001"]
46+
47+
- repo: https://github.com/pre-commit/mirrors-prettier
48+
rev: v3.0.0-alpha.4
49+
hooks:
50+
- id: prettier
51+
52+
- repo: https://github.com/igorshubovych/markdownlint-cli
53+
rev: v0.33.0
54+
hooks:
55+
- id: markdownlint
56+
57+
- repo: https://github.com/AleksaC/hadolint-py
58+
rev: v2.12.0.2
59+
hooks:
60+
- id: hadolint
61+
name: Lint Dockerfiles
62+
description: Runs hadolint to lint Dockerfiles
63+
args: ["--ignore", "DL3008"]

MANIFEST.in

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
recursive-include requirements *

README.md

+53
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
# Optimizing PatchCore for Few/many-shot Anomaly Detection
2+
3+
This repository contains the implementation of our paper titled: **["Optimizing PatchCore for Few/many-shot Anomaly Detection"](https://arxiv.org/abs/2307.10792)**.
4+
We achieve a new state-of-the-art in few-shot Anomaly Detection on Visa by optimizing PatchCore, and furthermore won the the few-shot Anomaly Detection track of the [VAND challenge at CVPR 2023](https://sites.google.com/view/vand-cvpr23/challenge) with this approach.
5+
Most notably, we leverage a more constrained (i.e. anti-aliased) feature extractor to do this.
6+
For more details, please check the paper.
7+
8+
<p align="center">
9+
<img src="./figures/sota-comparison.png" alt="drawing" width="600"/>
10+
</p>
11+
12+
## Computing environment
13+
For our experiments, we used a machine with a Intel(R) Xeon(R) Silver 4110 CPU @ 2.10GHz and a GeForce GTX 1080 Ti GPU with 11GB of VRAM running on the Ubuntu 22.04 OS.
14+
Parameters like batch size were determined based on this hardware, so feel free to change them.
15+
The instructions below were made considering an Ubuntu distribution.
16+
17+
By default the code requires having a GPU installed. TO use only CPU, change the parameter `trainer.devices` to `1` inside `src/anomalib/models/config.yaml`
18+
19+
## Installation and running
20+
* Extract the code into your home directory and change directory: `cd ~/patchcore-few-shot/`
21+
* (Optional, but recommended) Create a virtual environment with python 3.8, you can use pyenv or another python environment manager.
22+
* Install dependencies in editable mode: `pip install -e .`
23+
* The experiments done in the paper can be reproduced by running the bash scripts inside the `experiments` directory.
24+
25+
## Code structure
26+
We based our code on the open-source package [anomalib](https://github.com/openvinotoolkit/anomalib) and modified it according to our needs.
27+
Anomalib is a great package with a lot of different algorithms implementation, so do check it out!
28+
29+
```
30+
patchcore-few-shot/
31+
├── experiments # Bash scripts to reproduce our paper's experiments
32+
33+
├── requirements # python package dependencies
34+
|
35+
├── scripts # python scripts called in our experiments
36+
37+
└── src # main source code
38+
```
39+
40+
## Citation
41+
42+
If you find our work useful, please consider citing it
43+
44+
```
45+
@misc{santos2023optimizing,
46+
title={Optimizing PatchCore for Few/many-shot Anomaly Detection},
47+
author={João Santos and Triet Tran and Oliver Rippel},
48+
year={2023},
49+
eprint={2307.10792},
50+
archivePrefix={arXiv},
51+
primaryClass={cs.CV}
52+
}
53+
```
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
# mvtec dataset
2+
3+
export CUDA_VISIBLE_DEVICES=0
4+
python scripts/few_shot_training.py --backbone antialiased_wide_resnet50_2 --dataset mvtec --augment True --number-transforms 16 --k-shots 1 5 10 --batch-size 4 4 1 --coreset-ratio 25088
5+
python scripts/few_shot_training.py --backbone antialiased_wide_resnet50_2 --dataset mvtec --augment True --number-transforms 16 --k-shots 1 5 10 --batch-size 4 4 1 --coreset-ratio 12544
6+
python scripts/few_shot_training.py --backbone antialiased_wide_resnet50_2 --dataset mvtec --augment True --number-transforms 16 --k-shots 1 5 10 --batch-size 4 4 1 --coreset-ratio 3136
7+
8+
# python scripts/few_shot_training.py --backbone antialiased_wide_resnet50_2 --dataset mvtec --augment True --number-transforms 8 --k-shots 1 5 10 --batch-size 4 4 1 --coreset-ratio 25088
9+
# python scripts/few_shot_training.py --backbone antialiased_wide_resnet50_2 --dataset mvtec --augment True --number-transforms 8 --k-shots 1 5 10 --batch-size 4 4 1 --coreset-ratio 12544
10+
# python scripts/few_shot_training.py --backbone antialiased_wide_resnet50_2 --dataset mvtec --augment True --number-transforms 8 --k-shots 1 5 10 --batch-size 4 4 1 --coreset-ratio 3136
11+
12+
# python scripts/few_shot_training.py --backbone antialiased_wide_resnet50_2 --dataset mvtec --augment True --number-transforms 16 --k-shots 1 5 10 --batch-size 4 4 1 --coreset-ratio 47040
13+
# python scripts/few_shot_training.py --backbone antialiased_wide_resnet50_2 --dataset mvtec --augment True --number-transforms 16 --k-shots 1 5 10 --batch-size 4 4 1 --coreset-ratio 31360
14+
# python scripts/few_shot_training.py --backbone antialiased_wide_resnet50_2 --dataset mvtec --augment True --number-transforms 16 --k-shots 1 5 10 --batch-size 4 4 1 --coreset-ratio 15680
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
# MVTec dataset
2+
3+
# + Apply the same number of augmentations
4+
# + Vary for different combinations of augmentations
5+
# for different k-shot
6+
7+
export CUDA_VISIBLE_DEVICES=0
8+
9+
# anti aliased wide resnet
10+
python scripts/few_shot_training_iterate_augmentations.py --backbone antialiased_wide_resnet50_2 --dataset mvtec --augment True --number-transforms 8 --k-shots 1 5 10 --batch-size 4 2 1
11+
12+
# python scripts/few_shot_training_iterate_augmentations.py --backbone antialiased_wide_resnet50_2 --dataset mvtec --augment True --number-transforms 6 --k-shots 1 5 10 --batch-size 4 2 1
13+
# python scripts/few_shot_training_iterate_augmentations.py --backbone antialiased_wide_resnet50_2 --dataset mvtec --augment True --number-transforms 4 --k-shots 1 5 10 --batch-size 4 2 1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
## mvtec experiments with many shot with different backbone & image size
2+
3+
export CUDA_VISIBLE_DEVICES=0
4+
# efficientnet b4
5+
python scripts/few_shot_training.py --backbone efficientnet_b4 --dataset mvtec --k-shots 1 5 10 25 50 --batch-size 16 4 4 1 1 --image-size 190
6+
python scripts/few_shot_training.py --backbone efficientnet_b4 --dataset mvtec --k-shots 1 5 10 25 50 --batch-size 16 4 4 1 1 --image-size 380
7+
python scripts/few_shot_training.py --backbone efficientnet_b4 --dataset mvtec --k-shots 1 5 10 25 50 --batch-size 16 4 4 1 1 --image-size 570
8+
python scripts/few_shot_training.py --backbone efficientnet_b4 --dataset mvtec --k-shots 1 5 10 25 50 --batch-size 16 4 4 1 1 --image-size 760
9+
10+
# convnext small
11+
python scripts/few_shot_training.py --backbone convnext_small_384_in22ft1k --dataset mvtec --k-shots 1 5 10 25 50 --batch-size 16 4 4 1 1 --image-size 192
12+
python scripts/few_shot_training.py --backbone convnext_small_384_in22ft1k --dataset mvtec --k-shots 1 5 10 25 50 --batch-size 16 4 4 1 1 --image-size 384
13+
python scripts/few_shot_training.py --backbone convnext_small_384_in22ft1k --dataset mvtec --k-shots 1 5 10 25 50 --batch-size 16 4 4 1 1 --image-size 576
14+
python scripts/few_shot_training.py --backbone convnext_small_384_in22ft1k --dataset mvtec --k-shots 1 5 10 25 50 --batch-size 16 4 4 1 1 --image-size 768
15+
16+
# convnext base
17+
python scripts/few_shot_training.py --backbone convnext_base_384_in22ft1k --dataset mvtec --k-shots 1 5 10 25 50 --batch-size 16 4 4 1 1 --image-size 192
18+
python scripts/few_shot_training.py --backbone convnext_base_384_in22ft1k --dataset mvtec --k-shots 1 5 10 25 50 --batch-size 16 4 4 1 1 --image-size 384
19+
python scripts/few_shot_training.py --backbone convnext_base_384_in22ft1k --dataset mvtec --k-shots 1 5 10 25 50 --batch-size 16 4 4 1 1 --image-size 576
20+
python scripts/few_shot_training.py --backbone convnext_base_384_in22ft1k --dataset mvtec --k-shots 1 5 10 25 50 --batch-size 16 4 4 1 1 --image-size 768
21+
22+
# wide resnet
23+
python scripts/few_shot_training.py --backbone wide_resnet50_2 --dataset mvtec --k-shots 1 5 10 25 50 --batch-size 16 4 4 1 1 --image-size 112
24+
python scripts/few_shot_training.py --backbone wide_resnet50_2 --dataset mvtec --k-shots 1 5 10 25 50 --batch-size 16 4 4 1 1 --image-size 224
25+
python scripts/few_shot_training.py --backbone wide_resnet50_2 --dataset mvtec --k-shots 1 5 10 25 50 --batch-size 16 4 4 1 1 --image-size 336
26+
python scripts/few_shot_training.py --backbone wide_resnet50_2 --dataset mvtec --k-shots 1 5 10 25 50 --batch-size 16 4 4 1 1 --image-size 448
27+
28+
# anti aliased wide resnet
29+
python scripts/few_shot_training.py --backbone antialiased_wide_resnet50_2 --dataset mvtec --k-shots 1 5 10 25 50 --batch-size 16 4 4 1 1 --image-size 112
30+
python scripts/few_shot_training.py --backbone antialiased_wide_resnet50_2 --dataset mvtec --k-shots 1 5 10 25 50 --batch-size 16 4 4 1 1 --image-size 224
31+
python scripts/few_shot_training.py --backbone antialiased_wide_resnet50_2 --dataset mvtec --k-shots 1 5 10 25 50 --batch-size 16 4 4 1 1 --image-size 336
32+
python scripts/few_shot_training.py --backbone antialiased_wide_resnet50_2 --dataset mvtec --k-shots 1 5 10 25 50 --batch-size 16 4 4 1 1 --image-size 448

0 commit comments

Comments
 (0)