@@ -13,10 +13,8 @@ conda create -n llm python=3.11
13
13
conda activate llm
14
14
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
15
15
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
16
- pip install bitsandbytes==0.43.0
17
- pip install datasets==2.18.0
18
- pip install --upgrade transformers==4.36.0
19
- pip install scipy fire
16
+ pip install transformers==4.45.0 " trl<0.12.0" datasets
17
+ pip install bitsandbytes==0.45.1 scipy fire
20
18
```
21
19
22
20
### 2. LISA Finetune
@@ -51,23 +49,23 @@ Optional parameters for `lisa_finetuning.py`:
51
49
52
50
``` log
53
51
......
54
- {'loss': 1.8391, 'learning_rate': 1.9967238104745695e-05, 'epoch': 0.03}
55
- {'loss': 1.8242, 'learning_rate': 1.9869167087338908e-05, 'epoch': 0.05}
52
+ {'loss': 1.8391, 'learning_rate': 1.9967238104745695e-05, 'epoch': 0.03}
53
+ {'loss': 1.8242, 'learning_rate': 1.9869167087338908e-05, 'epoch': 0.05}
56
54
5%|██████▉ | 20/388 [xx:xx<x:xx:xx, x.xxs/it]
57
55
Activating layers at indices: [10] for the next steps.
58
- {'loss': 1.8128, 'learning_rate': 1.9706429546259592e-05, 'epoch': 0.08}
59
- {'loss': 1.775, 'learning_rate': 1.9480091799562706e-05, 'epoch': 0.1}
56
+ {'loss': 1.8128, 'learning_rate': 1.9706429546259592e-05, 'epoch': 0.08}
57
+ {'loss': 1.775, 'learning_rate': 1.9480091799562706e-05, 'epoch': 0.1}
60
58
10%|██████████████ | 40/388 [xx:xx<xx:xx, x.xxs/it]
61
59
Activating layers at indices: [30] for the next steps.
62
- {'loss': 1.7669, 'learning_rate': 1.9191636897958123e-05, 'epoch': 0.13}
63
- {'loss': 1.7749, 'learning_rate': 1.8842954907300236e-05, 'epoch': 0.15}
60
+ {'loss': 1.7669, 'learning_rate': 1.9191636897958123e-05, 'epoch': 0.13}
61
+ {'loss': 1.7749, 'learning_rate': 1.8842954907300236e-05, 'epoch': 0.15}
64
62
15%|█████████████████████ | 60/388 [xx:xx<xx:xx, x.xxs/it]
65
63
Activating layers at indices: [26] for the next steps.
66
- {'loss': 1.7735, 'learning_rate': 1.8436330524160048e-05, 'epoch': 0.18}
67
- {'loss': 1.7199, 'learning_rate': 1.797442810562721e-05, 'epoch': 0.21}
64
+ {'loss': 1.7735, 'learning_rate': 1.8436330524160048e-05, 'epoch': 0.18}
65
+ {'loss': 1.7199, 'learning_rate': 1.797442810562721e-05, 'epoch': 0.21}
68
66
21%|████████████████████████████ | 80/388 [xx:xx<xx:xx, x.xxs/it]
69
67
Activating layers at indices: [17] for the next steps.
70
- {'loss': 1.7328, 'learning_rate': 1.7460274211432463e-05, 'epoch': 0.23}
68
+ {'loss': 1.7328, 'learning_rate': 1.7460274211432463e-05, 'epoch': 0.23}
71
69
25%|█████████████████████████████████▋ | 96/388 [xx:xx<xx:xx, x.xxs/it]
72
70
......
73
71
0 commit comments