@@ -37,22 +37,22 @@ def test_batched_vs_nonbatched_equivalence_with_batched_model(reasoning):
3737 # Download and init tokenizer
3838 kind = "reasoning" if reasoning else "base"
3939 download_qwen3_small (kind = kind , tokenizer_only = False , out_dir = "qwen3" )
40- tokenizer_file = Path ("qwen3" ) / (
40+ tokenizer_path = Path ("qwen3" ) / (
4141 "tokenizer-reasoning.json" if reasoning else "tokenizer-base.json"
4242 )
43- model_file = Path ("qwen3" ) / (
43+ model_path = Path ("qwen3" ) / (
4444 "qwen3-0.6B-reasoning.pth" if reasoning else "qwen3-0.6B-base.pth"
4545 )
4646 tokenizer = Qwen3Tokenizer (
47- tokenizer_file_path = tokenizer_file ,
47+ tokenizer_file_path = tokenizer_path ,
4848 apply_chat_template = True if reasoning else False ,
4949 add_generation_prompt = True if reasoning else False ,
5050 add_thinking = True if reasoning else False ,
5151 )
5252
5353 # Models
5454 model_batched = Qwen3ModelBatched (QWEN_CONFIG_06_B )
55- model_batched .load_state_dict (torch .load (model_file , map_location = device ))
55+ model_batched .load_state_dict (torch .load (model_path , map_location = device ))
5656 model_batched .to (device )
5757 model_batched .eval ()
5858
@@ -197,27 +197,27 @@ def test_batched_vs_nonbatched_equivalence_with_single_versus_batched_model(reas
197197 # Download and init tokenizer
198198 kind = "reasoning" if reasoning else "base"
199199 download_qwen3_small (kind = kind , tokenizer_only = False , out_dir = "qwen3" )
200- tokenizer_file = Path ("qwen3" ) / (
200+ tokenizer_path = Path ("qwen3" ) / (
201201 "tokenizer-reasoning.json" if reasoning else "tokenizer-base.json"
202202 )
203- model_file = Path ("qwen3" ) / (
203+ model_path = Path ("qwen3" ) / (
204204 "qwen3-0.6B-reasoning.pth" if reasoning else "qwen3-0.6B-base.pth"
205205 )
206206 tokenizer = Qwen3Tokenizer (
207- tokenizer_file_path = tokenizer_file ,
207+ tokenizer_file_path = tokenizer_path ,
208208 apply_chat_template = True if reasoning else False ,
209209 add_generation_prompt = True if reasoning else False ,
210210 add_thinking = True if reasoning else False ,
211211 )
212212
213213 # Models
214214 model = Qwen3Model (QWEN_CONFIG_06_B )
215- model .load_state_dict (torch .load (model_file , map_location = device ))
215+ model .load_state_dict (torch .load (model_path , map_location = device ))
216216 model .to (device )
217217 model .eval ()
218218
219219 model_batched = Qwen3ModelBatched (QWEN_CONFIG_06_B )
220- model_batched .load_state_dict (torch .load (model_file , map_location = device ))
220+ model_batched .load_state_dict (torch .load (model_path , map_location = device ))
221221 model_batched .to (device )
222222 model_batched .eval ()
223223
@@ -290,27 +290,27 @@ def test_plain_vs_streaming_generation(reasoning):
290290 # Download and init tokenizer
291291 kind = "reasoning" if reasoning else "base"
292292 download_qwen3_small (kind = kind , tokenizer_only = False , out_dir = "qwen3" )
293- tokenizer_file = Path ("qwen3" ) / (
293+ tokenizer_path = Path ("qwen3" ) / (
294294 "tokenizer-reasoning.json" if reasoning else "tokenizer-base.json"
295295 )
296- model_file = Path ("qwen3" ) / (
296+ model_path = Path ("qwen3" ) / (
297297 "qwen3-0.6B-reasoning.pth" if reasoning else "qwen3-0.6B-base.pth"
298298 )
299299 tokenizer = Qwen3Tokenizer (
300- tokenizer_file_path = tokenizer_file ,
300+ tokenizer_file_path = tokenizer_path ,
301301 apply_chat_template = True if reasoning else False ,
302302 add_generation_prompt = True if reasoning else False ,
303303 add_thinking = True if reasoning else False ,
304304 )
305305
306306 # Models
307307 model = Qwen3Model (QWEN_CONFIG_06_B )
308- model .load_state_dict (torch .load (model_file , map_location = device ))
308+ model .load_state_dict (torch .load (model_path , map_location = device ))
309309 model .to (device )
310310 model .eval ()
311311
312312 model_batched = Qwen3ModelBatched (QWEN_CONFIG_06_B )
313- model_batched .load_state_dict (torch .load (model_file , map_location = device ))
313+ model_batched .load_state_dict (torch .load (model_path , map_location = device ))
314314 model_batched .to (device )
315315 model_batched .eval ()
316316
0 commit comments