Skip to content

Commit 8aea531

Browse files
authored
update more lora example (#12785)
1 parent fd28cf1 commit 8aea531

File tree

3 files changed

+30
-30
lines changed

3 files changed

+30
-30
lines changed

python/llm/example/GPU/LLM-Finetuning/LoRA/README.md

+10-10
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,10 @@ conda create -n llm python=3.11
1212
conda activate llm
1313
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
1414
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
15-
pip install transformers==4.36.0 datasets
15+
pip install transformers==4.45.0 "trl<0.12.0" datasets
1616
pip install fire peft==0.10.0
17+
pip install bitsandbytes==0.45.1 scipy
1718
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
18-
pip install bitsandbytes scipy
1919
```
2020

2121
### 2. Configures OneAPI environment variables
@@ -75,14 +75,14 @@ python ./alpaca_lora_finetuning.py \
7575

7676
### 5. Sample Output
7777
```log
78-
{'loss': 1.9231, 'learning_rate': 2.9999945367033285e-05, 'epoch': 0.0}
79-
{'loss': 1.8622, 'learning_rate': 2.9999781468531096e-05, 'epoch': 0.01}
80-
{'loss': 1.9043, 'learning_rate': 2.9999508305687345e-05, 'epoch': 0.01}
81-
{'loss': 1.8967, 'learning_rate': 2.999912588049185e-05, 'epoch': 0.01}
82-
{'loss': 1.9658, 'learning_rate': 2.9998634195730358e-05, 'epoch': 0.01}
83-
{'loss': 1.8386, 'learning_rate': 2.9998033254984483e-05, 'epoch': 0.02}
84-
{'loss': 1.809, 'learning_rate': 2.999732306263172e-05, 'epoch': 0.02}
85-
{'loss': 1.8552, 'learning_rate': 2.9996503623845395e-05, 'epoch': 0.02}
78+
{'loss': 1.9231, 'learning_rate': 2.9999945367033285e-05, 'epoch': 0.0}
79+
{'loss': 1.8622, 'learning_rate': 2.9999781468531096e-05, 'epoch': 0.01}
80+
{'loss': 1.9043, 'learning_rate': 2.9999508305687345e-05, 'epoch': 0.01}
81+
{'loss': 1.8967, 'learning_rate': 2.999912588049185e-05, 'epoch': 0.01}
82+
{'loss': 1.9658, 'learning_rate': 2.9998634195730358e-05, 'epoch': 0.01}
83+
{'loss': 1.8386, 'learning_rate': 2.9998033254984483e-05, 'epoch': 0.02}
84+
{'loss': 1.809, 'learning_rate': 2.999732306263172e-05, 'epoch': 0.02}
85+
{'loss': 1.8552, 'learning_rate': 2.9996503623845395e-05, 'epoch': 0.02}
8686
1%|█ | 8/1164 [xx:xx<xx:xx:xx, xx s/it]
8787
```
8888

python/llm/example/GPU/LLM-Finetuning/QA-LoRA/README.md

+10-10
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,10 @@ conda create -n llm python=3.11
1212
conda activate llm
1313
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
1414
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
15-
pip install transformers==4.36.0 datasets
15+
pip install transformers==4.45.0 "trl<0.12.0" datasets
1616
pip install fire peft==0.10.0
17+
pip install bitsandbytes==0.45.1 scipy
1718
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
18-
pip install bitsandbytes scipy
1919
```
2020

2121
### 2. Configures OneAPI environment variables
@@ -57,14 +57,14 @@ python ./alpaca_qalora_finetuning.py \
5757

5858
### 5. Sample Output
5959
```log
60-
{'loss': 1.9231, 'learning_rate': 2.9999945367033285e-05, 'epoch': 0.0}
61-
{'loss': 1.8622, 'learning_rate': 2.9999781468531096e-05, 'epoch': 0.01}
62-
{'loss': 1.9043, 'learning_rate': 2.9999508305687345e-05, 'epoch': 0.01}
63-
{'loss': 1.8967, 'learning_rate': 2.999912588049185e-05, 'epoch': 0.01}
64-
{'loss': 1.9658, 'learning_rate': 2.9998634195730358e-05, 'epoch': 0.01}
65-
{'loss': 1.8386, 'learning_rate': 2.9998033254984483e-05, 'epoch': 0.02}
66-
{'loss': 1.809, 'learning_rate': 2.999732306263172e-05, 'epoch': 0.02}
67-
{'loss': 1.8552, 'learning_rate': 2.9996503623845395e-05, 'epoch': 0.02}
60+
{'loss': 1.9231, 'learning_rate': 2.9999945367033285e-05, 'epoch': 0.0}
61+
{'loss': 1.8622, 'learning_rate': 2.9999781468531096e-05, 'epoch': 0.01}
62+
{'loss': 1.9043, 'learning_rate': 2.9999508305687345e-05, 'epoch': 0.01}
63+
{'loss': 1.8967, 'learning_rate': 2.999912588049185e-05, 'epoch': 0.01}
64+
{'loss': 1.9658, 'learning_rate': 2.9998634195730358e-05, 'epoch': 0.01}
65+
{'loss': 1.8386, 'learning_rate': 2.9998033254984483e-05, 'epoch': 0.02}
66+
{'loss': 1.809, 'learning_rate': 2.999732306263172e-05, 'epoch': 0.02}
67+
{'loss': 1.8552, 'learning_rate': 2.9996503623845395e-05, 'epoch': 0.02}
6868
1%|█ | 8/1164 [xx:xx<xx:xx:xx, xx s/it]
6969
```
7070

python/llm/example/GPU/LLM-Finetuning/ReLora/README.md

+10-10
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,10 @@ conda create -n llm python=3.11
1212
conda activate llm
1313
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
1414
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
15-
pip install transformers==4.36.0 datasets
15+
pip install transformers==4.45.0 "trl<0.12.0" datasets
1616
pip install fire peft==0.10.0
17+
pip install bitsandbytes==0.45.1 scipy
1718
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
18-
pip install bitsandbytes scipy
1919
```
2020

2121
### 2. Configures OneAPI environment variables
@@ -63,14 +63,14 @@ python ./alpaca_relora_finetuning.py \
6363

6464
### 5. Sample Output
6565
```log
66-
{'loss': 1.9231, 'learning_rate': 2.9999945367033285e-05, 'epoch': 0.0}
67-
{'loss': 1.8622, 'learning_rate': 2.9999781468531096e-05, 'epoch': 0.01}
68-
{'loss': 1.9043, 'learning_rate': 2.9999508305687345e-05, 'epoch': 0.01}
69-
{'loss': 1.8967, 'learning_rate': 2.999912588049185e-05, 'epoch': 0.01}
70-
{'loss': 1.9658, 'learning_rate': 2.9998634195730358e-05, 'epoch': 0.01}
71-
{'loss': 1.8386, 'learning_rate': 2.9998033254984483e-05, 'epoch': 0.02}
72-
{'loss': 1.809, 'learning_rate': 2.999732306263172e-05, 'epoch': 0.02}
73-
{'loss': 1.8552, 'learning_rate': 2.9996503623845395e-05, 'epoch': 0.02}
66+
{'loss': 1.9231, 'learning_rate': 2.9999945367033285e-05, 'epoch': 0.0}
67+
{'loss': 1.8622, 'learning_rate': 2.9999781468531096e-05, 'epoch': 0.01}
68+
{'loss': 1.9043, 'learning_rate': 2.9999508305687345e-05, 'epoch': 0.01}
69+
{'loss': 1.8967, 'learning_rate': 2.999912588049185e-05, 'epoch': 0.01}
70+
{'loss': 1.9658, 'learning_rate': 2.9998634195730358e-05, 'epoch': 0.01}
71+
{'loss': 1.8386, 'learning_rate': 2.9998033254984483e-05, 'epoch': 0.02}
72+
{'loss': 1.809, 'learning_rate': 2.999732306263172e-05, 'epoch': 0.02}
73+
{'loss': 1.8552, 'learning_rate': 2.9996503623845395e-05, 'epoch': 0.02}
7474
1%|█ | 8/1164 [xx:xx<xx:xx:xx, xx s/it]
7575
```
7676

0 commit comments

Comments
 (0)