Skip to content

Commit 32ee427

Browse files
authored
Make variable names more consistent (#45)
* Make variable names more consistent * update
1 parent dc6ee26 commit 32ee427

File tree

8 files changed

+61
-61
lines changed

8 files changed

+61
-61
lines changed

ch02/01_main-chapter-code/ch02_exercise-solutions.ipynb

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -112,8 +112,8 @@
112112
"\n",
113113
"download_qwen3_small(kind=\"base\", tokenizer_only=True, out_dir=\"qwen3\")\n",
114114
"\n",
115-
"tokenizer_file_path = Path(\"qwen3\") / \"tokenizer-base.json\"\n",
116-
"tokenizer = Qwen3Tokenizer(tokenizer_file_path=tokenizer_file_path)\n",
115+
"tokenizer_path = Path(\"qwen3\") / \"tokenizer-base.json\"\n",
116+
"tokenizer = Qwen3Tokenizer(tokenizer_file_path=tokenizer_path)\n",
117117
"\n",
118118
"prompt = \"Hello, Ardwarklethyrx. Haus und Garten.\"\n",
119119
"input_token_ids_list = tokenizer.encode(prompt)\n",
@@ -174,12 +174,12 @@
174174
"\n",
175175
"download_qwen3_small(kind=\"base\", tokenizer_only=False, out_dir=\"qwen3\")\n",
176176
"\n",
177-
"tokenizer_file_path = Path(\"qwen3\") / \"tokenizer-base.json\"\n",
178-
"model_file = Path(\"qwen3\") / \"qwen3-0.6B-base.pth\"\n",
177+
"tokenizer_path = Path(\"qwen3\") / \"tokenizer-base.json\"\n",
178+
"model_path = Path(\"qwen3\") / \"qwen3-0.6B-base.pth\"\n",
179179
"\n",
180-
"tokenizer = Qwen3Tokenizer(tokenizer_file_path=tokenizer_file_path)\n",
180+
"tokenizer = Qwen3Tokenizer(tokenizer_file_path=tokenizer_path)\n",
181181
"model = Qwen3Model(QWEN_CONFIG_06_B)\n",
182-
"model.load_state_dict(torch.load(model_file))\n",
182+
"model.load_state_dict(torch.load(model_path))\n",
183183
"\n",
184184
"model.to(device)\n",
185185
"\n",
@@ -372,12 +372,12 @@
372372
"\n",
373373
"download_qwen3_small(kind=\"base\", tokenizer_only=False, out_dir=\"qwen3\")\n",
374374
"\n",
375-
"tokenizer_file_path = Path(\"qwen3\") / \"tokenizer-base.json\"\n",
376-
"model_file = Path(\"qwen3\") / \"qwen3-0.6B-base.pth\"\n",
375+
"tokenizer_path = Path(\"qwen3\") / \"tokenizer-base.json\"\n",
376+
"model_path = Path(\"qwen3\") / \"qwen3-0.6B-base.pth\"\n",
377377
"\n",
378-
"tokenizer = Qwen3Tokenizer(tokenizer_file_path=tokenizer_file_path)\n",
378+
"tokenizer = Qwen3Tokenizer(tokenizer_file_path=tokenizer_path)\n",
379379
"model = Qwen3Model(QWEN_CONFIG_06_B)\n",
380-
"model.load_state_dict(torch.load(model_file))\n",
380+
"model.load_state_dict(torch.load(model_path))\n",
381381
"\n",
382382
"model.to(device);"
383383
]

ch02/01_main-chapter-code/ch02_main.ipynb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -367,8 +367,8 @@
367367
"from pathlib import Path\n",
368368
"from reasoning_from_scratch.qwen3 import Qwen3Tokenizer\n",
369369
"\n",
370-
"tokenizer_file_path = Path(\"qwen3\") / \"tokenizer-base.json\"\n",
371-
"tokenizer = Qwen3Tokenizer(tokenizer_file_path=tokenizer_file_path)"
370+
"tokenizer_path = Path(\"qwen3\") / \"tokenizer-base.json\"\n",
371+
"tokenizer = Qwen3Tokenizer(tokenizer_file_path=tokenizer_path)"
372372
]
373373
},
374374
{
@@ -640,10 +640,10 @@
640640
"source": [
641641
"from reasoning_from_scratch.qwen3 import Qwen3Model, QWEN_CONFIG_06_B\n",
642642
"\n",
643-
"model_file = Path(\"qwen3\") / \"qwen3-0.6B-base.pth\"\n",
643+
"model_path = Path(\"qwen3\") / \"qwen3-0.6B-base.pth\"\n",
644644
"\n",
645645
"model = Qwen3Model(QWEN_CONFIG_06_B)\n",
646-
"model.load_state_dict(torch.load(model_file))\n",
646+
"model.load_state_dict(torch.load(model_path))\n",
647647
"\n",
648648
"model.to(device)"
649649
]

ch02/03_optimized-LLM/compare_inference.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -80,23 +80,23 @@
8080

8181
if args.reasoning:
8282
download_qwen3_small(kind="reasoning", tokenizer_only=False, out_dir="qwen3")
83-
tokenizer_file_path = Path("qwen3") / "tokenizer-reasoning.json"
84-
model_file = Path("qwen3") / "qwen3-0.6B-reasoning.pth"
83+
tokenizer_path = Path("qwen3") / "tokenizer-reasoning.json"
84+
model_path = Path("qwen3") / "qwen3-0.6B-reasoning.pth"
8585
tokenizer = Qwen3Tokenizer(
86-
tokenizer_file_path=tokenizer_file_path,
86+
tokenizer_file_path=tokenizer_path,
8787
apply_chat_template=True,
8888
add_generation_prompt=True,
8989
add_thinking=True
9090
)
9191

9292
else:
9393
download_qwen3_small(kind="base", tokenizer_only=False, out_dir="qwen3")
94-
tokenizer_file_path = Path("qwen3") / "tokenizer-base.json"
95-
model_file = Path("qwen3") / "qwen3-0.6B-base.pth"
96-
tokenizer = Qwen3Tokenizer(tokenizer_file_path=tokenizer_file_path)
94+
tokenizer_path = Path("qwen3") / "tokenizer-base.json"
95+
model_path = Path("qwen3") / "qwen3-0.6B-base.pth"
96+
tokenizer = Qwen3Tokenizer(tokenizer_file_path=tokenizer_path)
9797

9898
model = Qwen3Model(QWEN_CONFIG_06_B)
99-
model.load_state_dict(torch.load(model_file, map_location=device))
99+
model.load_state_dict(torch.load(model_path, map_location=device))
100100

101101
model.to(device)
102102

chG/01_main-chapter-code/qwen3_chat_interface.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -32,23 +32,23 @@
3232
def get_model_and_tokenizer(qwen3_config, local_dir, device, use_compile, use_reasoning):
3333
if use_reasoning:
3434
download_qwen3_small(kind="reasoning", tokenizer_only=False, out_dir=local_dir)
35-
tokenizer_file_path = Path(local_dir) / "tokenizer-reasoning.json"
36-
model_file = Path(local_dir) / "qwen3-0.6B-reasoning.pth"
35+
tokenizer_path = Path(local_dir) / "tokenizer-reasoning.json"
36+
model_path = Path(local_dir) / "qwen3-0.6B-reasoning.pth"
3737
tokenizer = Qwen3Tokenizer(
38-
tokenizer_file_path=tokenizer_file_path,
38+
tokenizer_file_path=tokenizer_path,
3939
apply_chat_template=True,
4040
add_generation_prompt=True,
4141
add_thinking=True
4242
)
4343

4444
else:
4545
download_qwen3_small(kind="base", tokenizer_only=False, out_dir=local_dir)
46-
tokenizer_file_path = Path(local_dir) / "tokenizer-base.json"
47-
model_file = Path(local_dir) / "qwen3-0.6B-base.pth"
48-
tokenizer = Qwen3Tokenizer(tokenizer_file_path=tokenizer_file_path)
46+
tokenizer_path = Path(local_dir) / "tokenizer-base.json"
47+
model_path = Path(local_dir) / "qwen3-0.6B-base.pth"
48+
tokenizer = Qwen3Tokenizer(tokenizer_file_path=tokenizer_path)
4949

5050
model = Qwen3Model(qwen3_config)
51-
model.load_state_dict(torch.load(model_file, map_location=device))
51+
model.load_state_dict(torch.load(model_path, map_location=device))
5252
model.to(device)
5353
if use_compile:
5454
model = torch.compile(model)

chG/01_main-chapter-code/qwen3_chat_interface_multiturn.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -43,23 +43,23 @@ def trim_input_tensor(input_ids_tensor, context_len, max_new_tokens):
4343
def get_model_and_tokenizer(qwen3_config, local_dir, device, use_compile, use_reasoning):
4444
if use_reasoning:
4545
download_qwen3_small(kind="reasoning", tokenizer_only=False, out_dir=local_dir)
46-
tokenizer_file_path = Path(local_dir) / "tokenizer-reasoning.json"
47-
model_file = Path(local_dir) / "qwen3-0.6B-reasoning.pth"
46+
tokenizer_path = Path(local_dir) / "tokenizer-reasoning.json"
47+
model_path = Path(local_dir) / "qwen3-0.6B-reasoning.pth"
4848
tokenizer = Qwen3Tokenizer(
49-
tokenizer_file_path=tokenizer_file_path,
49+
tokenizer_file_path=tokenizer_path,
5050
apply_chat_template=True,
5151
add_generation_prompt=True,
5252
add_thinking=True
5353
)
5454

5555
else:
5656
download_qwen3_small(kind="base", tokenizer_only=False, out_dir=local_dir)
57-
tokenizer_file_path = Path(local_dir) / "tokenizer-base.json"
58-
model_file = Path(local_dir) / "qwen3-0.6B-base.pth"
59-
tokenizer = Qwen3Tokenizer(tokenizer_file_path=tokenizer_file_path)
57+
tokenizer_path = Path(local_dir) / "tokenizer-base.json"
58+
model_path = Path(local_dir) / "qwen3-0.6B-base.pth"
59+
tokenizer = Qwen3Tokenizer(tokenizer_file_path=tokenizer_path)
6060

6161
model = Qwen3Model(qwen3_config)
62-
model.load_state_dict(torch.load(model_file, map_location=device))
62+
model.load_state_dict(torch.load(model_path, map_location=device))
6363
model.to(device)
6464
if use_compile:
6565
model = torch.compile(model)

tests/examples/01_quick-example.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -76,23 +76,23 @@
7676

7777
if args.reasoning:
7878
download_qwen3_small(kind="reasoning", tokenizer_only=False, out_dir="qwen3")
79-
tokenizer_file_path = Path("qwen3") / "tokenizer-reasoning.json"
80-
model_file = Path("qwen3") / "qwen3-0.6B-reasoning.pth"
79+
tokenizer_path = Path("qwen3") / "tokenizer-reasoning.json"
80+
model_path = Path("qwen3") / "qwen3-0.6B-reasoning.pth"
8181
tokenizer = Qwen3Tokenizer(
82-
tokenizer_file_path=tokenizer_file_path,
82+
tokenizer_file_path=tokenizer_path,
8383
apply_chat_template=True,
8484
add_generation_prompt=True,
8585
add_thinking=True
8686
)
8787

8888
else:
8989
download_qwen3_small(kind="base", tokenizer_only=False, out_dir="qwen3")
90-
tokenizer_file_path = Path("qwen3") / "tokenizer-base.json"
91-
model_file = Path("qwen3") / "qwen3-0.6B-base.pth"
92-
tokenizer = Qwen3Tokenizer(tokenizer_file_path=tokenizer_file_path)
90+
tokenizer_path = Path("qwen3") / "tokenizer-base.json"
91+
model_path = Path("qwen3") / "qwen3-0.6B-base.pth"
92+
tokenizer = Qwen3Tokenizer(tokenizer_file_path=tokenizer_path)
9393

9494
model = Qwen3Model(QWEN_CONFIG_06_B)
95-
model.load_state_dict(torch.load(model_file, map_location=device))
95+
model.load_state_dict(torch.load(model_path, map_location=device))
9696

9797
model.to(device)
9898

tests/test_qwen3_batched.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -37,22 +37,22 @@ def test_batched_vs_nonbatched_equivalence_with_batched_model(reasoning):
3737
# Download and init tokenizer
3838
kind = "reasoning" if reasoning else "base"
3939
download_qwen3_small(kind=kind, tokenizer_only=False, out_dir="qwen3")
40-
tokenizer_file = Path("qwen3") / (
40+
tokenizer_path = Path("qwen3") / (
4141
"tokenizer-reasoning.json" if reasoning else "tokenizer-base.json"
4242
)
43-
model_file = Path("qwen3") / (
43+
model_path = Path("qwen3") / (
4444
"qwen3-0.6B-reasoning.pth" if reasoning else "qwen3-0.6B-base.pth"
4545
)
4646
tokenizer = Qwen3Tokenizer(
47-
tokenizer_file_path=tokenizer_file,
47+
tokenizer_file_path=tokenizer_path,
4848
apply_chat_template=True if reasoning else False,
4949
add_generation_prompt=True if reasoning else False,
5050
add_thinking=True if reasoning else False,
5151
)
5252

5353
# Models
5454
model_batched = Qwen3ModelBatched(QWEN_CONFIG_06_B)
55-
model_batched.load_state_dict(torch.load(model_file, map_location=device))
55+
model_batched.load_state_dict(torch.load(model_path, map_location=device))
5656
model_batched.to(device)
5757
model_batched.eval()
5858

@@ -197,27 +197,27 @@ def test_batched_vs_nonbatched_equivalence_with_single_versus_batched_model(reas
197197
# Download and init tokenizer
198198
kind = "reasoning" if reasoning else "base"
199199
download_qwen3_small(kind=kind, tokenizer_only=False, out_dir="qwen3")
200-
tokenizer_file = Path("qwen3") / (
200+
tokenizer_path = Path("qwen3") / (
201201
"tokenizer-reasoning.json" if reasoning else "tokenizer-base.json"
202202
)
203-
model_file = Path("qwen3") / (
203+
model_path = Path("qwen3") / (
204204
"qwen3-0.6B-reasoning.pth" if reasoning else "qwen3-0.6B-base.pth"
205205
)
206206
tokenizer = Qwen3Tokenizer(
207-
tokenizer_file_path=tokenizer_file,
207+
tokenizer_file_path=tokenizer_path,
208208
apply_chat_template=True if reasoning else False,
209209
add_generation_prompt=True if reasoning else False,
210210
add_thinking=True if reasoning else False,
211211
)
212212

213213
# Models
214214
model = Qwen3Model(QWEN_CONFIG_06_B)
215-
model.load_state_dict(torch.load(model_file, map_location=device))
215+
model.load_state_dict(torch.load(model_path, map_location=device))
216216
model.to(device)
217217
model.eval()
218218

219219
model_batched = Qwen3ModelBatched(QWEN_CONFIG_06_B)
220-
model_batched.load_state_dict(torch.load(model_file, map_location=device))
220+
model_batched.load_state_dict(torch.load(model_path, map_location=device))
221221
model_batched.to(device)
222222
model_batched.eval()
223223

@@ -290,27 +290,27 @@ def test_plain_vs_streaming_generation(reasoning):
290290
# Download and init tokenizer
291291
kind = "reasoning" if reasoning else "base"
292292
download_qwen3_small(kind=kind, tokenizer_only=False, out_dir="qwen3")
293-
tokenizer_file = Path("qwen3") / (
293+
tokenizer_path = Path("qwen3") / (
294294
"tokenizer-reasoning.json" if reasoning else "tokenizer-base.json"
295295
)
296-
model_file = Path("qwen3") / (
296+
model_path = Path("qwen3") / (
297297
"qwen3-0.6B-reasoning.pth" if reasoning else "qwen3-0.6B-base.pth"
298298
)
299299
tokenizer = Qwen3Tokenizer(
300-
tokenizer_file_path=tokenizer_file,
300+
tokenizer_file_path=tokenizer_path,
301301
apply_chat_template=True if reasoning else False,
302302
add_generation_prompt=True if reasoning else False,
303303
add_thinking=True if reasoning else False,
304304
)
305305

306306
# Models
307307
model = Qwen3Model(QWEN_CONFIG_06_B)
308-
model.load_state_dict(torch.load(model_file, map_location=device))
308+
model.load_state_dict(torch.load(model_path, map_location=device))
309309
model.to(device)
310310
model.eval()
311311

312312
model_batched = Qwen3ModelBatched(QWEN_CONFIG_06_B)
313-
model_batched.load_state_dict(torch.load(model_file, map_location=device))
313+
model_batched.load_state_dict(torch.load(model_path, map_location=device))
314314
model_batched.to(device)
315315
model_batched.eval()
316316

tests/test_qwen3_optimized.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -86,27 +86,27 @@ def test_qwen3_vs_optimized_qwen3(reasoning):
8686
# Download and init tokenizer
8787
kind = "reasoning" if reasoning else "base"
8888
download_qwen3_small(kind=kind, tokenizer_only=False, out_dir="qwen3")
89-
tokenizer_file = Path("qwen3") / (
89+
tokenizer_path = Path("qwen3") / (
9090
"tokenizer-reasoning.json" if reasoning else "tokenizer-base.json"
9191
)
92-
model_file = Path("qwen3") / (
92+
model_path = Path("qwen3") / (
9393
"qwen3-0.6B-reasoning.pth" if reasoning else "qwen3-0.6B-base.pth"
9494
)
9595
tokenizer = Qwen3Tokenizer(
96-
tokenizer_file_path=tokenizer_file,
96+
tokenizer_file_path=tokenizer_path,
9797
apply_chat_template=True if reasoning else False,
9898
add_generation_prompt=True if reasoning else False,
9999
add_thinking=True if reasoning else False,
100100
)
101101

102102
# Models
103103
model = Qwen3Model(QWEN_CONFIG_06_B)
104-
model.load_state_dict(torch.load(model_file, map_location=device))
104+
model.load_state_dict(torch.load(model_path, map_location=device))
105105
model.to(device)
106106
model.eval()
107107

108108
model_optimized = Qwen3ModelOptimized(QWEN_CONFIG_06_B, exact=True)
109-
model_optimized.load_state_dict(torch.load(model_file, map_location=device))
109+
model_optimized.load_state_dict(torch.load(model_path, map_location=device))
110110
model_optimized.to(device)
111111
model_optimized.eval()
112112

0 commit comments

Comments
 (0)