Skip to content

Commit de4bb97

Browse files
authored
Remove accelerate 0.23.0 install command in readme and docker (#11333)
*ipex-llm's accelerate has been upgraded to 0.23.0. Remove accelerate 0.23.0 install command in README and docker。
1 parent ef4b651 commit de4bb97

File tree

17 files changed

+2
-17
lines changed

17 files changed

+2
-17
lines changed

.github/workflows/llm_unit_tests.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -381,7 +381,7 @@ jobs:
381381
shell: bash
382382
run: |
383383
python -m pip uninstall datasets -y
384-
python -m pip install transformers==4.36.0 datasets peft==0.10.0 accelerate==0.23.0
384+
python -m pip install transformers==4.36.0 datasets peft==0.10.0
385385
python -m pip install bitsandbytes scipy
386386
# Specific oneapi position on arc ut test machines
387387
if [[ "$RUNNER_OS" == "Linux" ]]; then

docker/llm/finetune/qlora/cpu/docker/Dockerfile

-1
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,6 @@ RUN mkdir -p /ipex_llm/data && mkdir -p /ipex_llm/model && \
5050
# install huggingface dependencies
5151
pip install datasets transformers==4.36.0 && \
5252
pip install fire peft==0.10.0 && \
53-
pip install accelerate==0.23.0 && \
5453
pip install bitsandbytes && \
5554
# get qlora example code
5655
cd /ipex_llm && \

docker/llm/finetune/qlora/cpu/docker/Dockerfile.k8s

-1
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,6 @@ RUN mkdir -p /ipex_llm/data && mkdir -p /ipex_llm/model && \
6363
# install huggingface dependencies
6464
pip install datasets transformers==4.36.0 && \
6565
pip install fire peft==0.10.0 && \
66-
pip install accelerate==0.23.0 && \
6766
# install basic dependencies
6867
apt-get update && apt-get install -y curl wget gpg gpg-agent && \
6968
# Install Intel oneAPI keys.

docker/llm/finetune/xpu/Dockerfile

+1-1
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRO
4141
rm -rf IPEX-LLM && \
4242
# install transformers & peft dependencies
4343
pip install transformers==4.36.0 && \
44-
pip install peft==0.10.0 datasets accelerate==0.23.0 && \
44+
pip install peft==0.10.0 datasets && \
4545
pip install bitsandbytes scipy fire && \
4646
# Prepare accelerate config
4747
mkdir -p /root/.cache/huggingface/accelerate && \

docs/readthedocs/source/doc/LLM/Quickstart/axolotl_quickstart.md

-1
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,6 @@ pip install -e .
216216
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
217217
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
218218
# install transformers etc
219-
pip install accelerate==0.23.0
220219
# to avoid https://github.com/OpenAccess-AI-Collective/axolotl/issues/1544
221220
pip install datasets==2.15.0
222221
pip install transformers==4.37.0

python/llm/example/CPU/QLoRA-FineTuning/README.md

-1
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@ pip install --pre --upgrade ipex-llm[all] --extra-index-url https://download.pyt
2222
pip install transformers==4.36.0
2323
pip install peft==0.10.0
2424
pip install datasets
25-
pip install accelerate==0.23.0
2625
pip install bitsandbytes scipy
2726
```
2827

python/llm/example/CPU/QLoRA-FineTuning/alpaca-qlora/README.md

-1
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@ conda activate llm
1010
pip install --pre --upgrade ipex-llm[all]
1111
pip install datasets transformers==4.36.0
1212
pip install fire peft==0.10.0
13-
pip install accelerate==0.23.0
1413
pip install bitsandbytes scipy
1514
```
1615

python/llm/example/GPU/LLM-Finetuning/DPO/README.md

-1
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@ conda activate llm
1919
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
2020
pip install transformers==4.36.0 datasets
2121
pip install trl peft==0.10.0
22-
pip install accelerate==0.23.0
2322
pip install bitsandbytes
2423
```
2524

python/llm/example/GPU/LLM-Finetuning/HF-PEFT/README.md

-1
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@ pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-exte
1717
pip install transformers==4.36.0 datasets
1818
pip install fire peft==0.10.0
1919
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
20-
pip install accelerate==0.23.0
2120
pip install bitsandbytes scipy
2221
```
2322

python/llm/example/GPU/LLM-Finetuning/LISA/README.md

-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@ conda create -n llm python=3.11
1313
conda activate llm
1414
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
1515
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
16-
pip install accelerate==0.23.0
1716
pip install bitsandbytes==0.43.0
1817
pip install datasets==2.18.0
1918
pip install --upgrade transformers==4.36.0

python/llm/example/GPU/LLM-Finetuning/LoRA/README.md

-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@ pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-exte
1515
pip install transformers==4.36.0 datasets
1616
pip install fire peft==0.10.0
1717
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
18-
pip install accelerate==0.23.0
1918
pip install bitsandbytes scipy
2019
```
2120

python/llm/example/GPU/LLM-Finetuning/QA-LoRA/README.md

-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@ pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-exte
1515
pip install transformers==4.36.0 datasets
1616
pip install fire peft==0.10.0
1717
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
18-
pip install accelerate==0.23.0
1918
pip install bitsandbytes scipy
2019
```
2120

python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/README.md

-1
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@ pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-exte
1818
pip install transformers==4.36.0 datasets
1919
pip install fire peft==0.10.0
2020
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
21-
pip install accelerate==0.23.0
2221
pip install bitsandbytes scipy
2322
# configures OneAPI environment variables
2423
source /opt/intel/oneapi/setvars.sh # necessary to run before installing deepspeed

python/llm/example/GPU/LLM-Finetuning/QLoRA/simple-example/README.md

-1
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@ conda activate llm
1919
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
2020
pip install transformers==4.36.0 datasets
2121
pip install peft==0.10.0
22-
pip install accelerate==0.23.0
2322
pip install bitsandbytes scipy
2423
```
2524

python/llm/example/GPU/LLM-Finetuning/QLoRA/trl-example/README.md

-1
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@ conda activate llm
1919
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
2020
pip install transformers==4.36.0 datasets
2121
pip install peft==0.10.0
22-
pip install accelerate==0.23.0
2322
pip install bitsandbytes scipy trl
2423
```
2524

python/llm/example/GPU/LLM-Finetuning/ReLora/README.md

-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@ pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-exte
1515
pip install transformers==4.36.0 datasets
1616
pip install fire peft==0.10.0
1717
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
18-
pip install accelerate==0.23.0
1918
pip install bitsandbytes scipy
2019
```
2120

python/llm/example/GPU/LLM-Finetuning/axolotl/README.md

-1
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,6 @@ pip install -e .
132132
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
133133
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
134134
# install transformers etc
135-
pip install accelerate==0.23.0
136135
# to avoid https://github.com/OpenAccess-AI-Collective/axolotl/issues/1544
137136
pip install datasets==2.15.0
138137
pip install transformers==4.37.0

0 commit comments

Comments
 (0)