forked from mallik3006/LLM_fine_tuning_llama3_8b
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathllama.yml
138 lines (137 loc) · 3.36 KB
/
llama.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
name: llama
channels:
- defaults
- https://conda.anaconda.org/pytorch
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2024.3.11=h06a4308_0
- ld_impl_linux-64=2.38=h1181459_1
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.13=h7f8727e_1
- pip=24.0=py39h06a4308_0
- python=3.9.19=h955ad1f_1
- readline=8.2=h5eee18b_0
- setuptools=69.5.1=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.43.0=py39h06a4308_0
- xz=5.4.6=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- absl-py==2.1.0
- accelerate==0.30.1
- aiohttp==3.9.5
- aiosignal==1.3.1
- annotated-types==0.7.0
- asttokens==2.4.1
- async-timeout==4.0.3
- attrs==23.2.0
- bitsandbytes==0.43.1
- certifi==2024.2.2
- charset-normalizer==3.3.2
- comm==0.2.2
- datasets==2.19.1
- debugpy==1.8.1
- decorator==5.1.1
- deepspeed==0.14.2
- dill==0.3.8
- docstring-parser==0.16
- eval-type-backport==0.2.0
- evaluate==0.4.1
- exceptiongroup==1.2.1
- executing==2.0.1
- filelock==3.14.0
- frozenlist==1.4.1
- fsspec==2024.2.0
- grpcio==1.64.0
- hjson==3.1.0
- huggingface-hub==0.23.1
- idna==3.7
- importlib-metadata==7.1.0
- ipykernel==6.29.4
- ipython==8.18.1
- jedi==0.19.1
- jinja2==3.1.4
- jupyter-client==8.6.1
- jupyter-core==5.7.2
- markdown==3.6
- markdown-it-py==3.0.0
- markupsafe==2.1.5
- matplotlib-inline==0.1.7
- mdurl==0.1.2
- mpmath==1.3.0
- multidict==6.0.5
- multiprocess==0.70.16
- nest-asyncio==1.6.0
- networkx==3.2.1
- ninja==1.11.1.1
- numpy==1.26.4
- nvidia-cublas-cu12==12.1.3.1
- nvidia-cuda-cupti-cu12==12.1.105
- nvidia-cuda-nvrtc-cu12==12.1.105
- nvidia-cuda-runtime-cu12==12.1.105
- nvidia-cudnn-cu12==8.9.2.26
- nvidia-cufft-cu12==11.0.2.54
- nvidia-curand-cu12==10.3.2.106
- nvidia-cusolver-cu12==11.4.5.107
- nvidia-cusparse-cu12==12.1.0.106
- nvidia-nccl-cu12==2.19.3
- nvidia-nvjitlink-cu12==12.5.40
- nvidia-nvtx-cu12==12.1.105
- packaging==24.0
- pandas==2.2.2
- parso==0.8.4
- peft==0.11.1
- pexpect==4.9.0
- platformdirs==4.2.2
- prompt-toolkit==3.0.43
- protobuf==5.26.1
- psutil==5.9.8
- ptyprocess==0.7.0
- pure-eval==0.2.2
- py-cpuinfo==9.0.0
- pyarrow==16.1.0
- pyarrow-hotfix==0.6
- pydantic==2.7.1
- pydantic-core==2.18.2
- pygments==2.18.0
- pynvml==11.5.0
- python-dateutil==2.9.0.post0
- pytz==2024.1
- pyyaml==6.0.1
- pyzmq==26.0.3
- regex==2024.5.15
- requests==2.32.2
- responses==0.18.0
- rich==13.7.1
- safetensors==0.4.3
- shtab==1.7.1
- six==1.16.0
- stack-data==0.6.3
- sympy==1.12
- tensorboard==2.16.2
- tensorboard-data-server==0.7.2
- tokenizers==0.19.1
- torch==2.2.2
- tornado==6.4
- tqdm==4.66.4
- traitlets==5.14.3
- transformers==4.41.0
- triton==2.2.0
- trl==0.8.6
- typing-extensions==4.11.0
- tyro==0.8.4
- tzdata==2024.1
- urllib3==2.2.1
- wcwidth==0.2.13
- werkzeug==3.0.3
- xxhash==3.4.1
- yarl==1.9.4
- zipp==3.18.2
prefix: /home/seoi0215/.conda/envs/llama