10
10
from pathlib import Path
11
11
from typing import Optional
12
12
13
- from torchchat .cli .convert_hf_checkpoint import convert_hf_checkpoint , convert_hf_checkpoint_to_tune
13
+ from torchchat .cli .convert_hf_checkpoint import (
14
+ convert_hf_checkpoint ,
15
+ convert_hf_checkpoint_to_tune ,
16
+ )
14
17
from torchchat .model_config .model_config import (
15
18
load_model_configs ,
16
19
ModelConfig ,
@@ -57,7 +60,6 @@ def _download_hf_snapshot(
57
60
snapshot_download (
58
61
model_config .distribution_path ,
59
62
local_dir = artifact_dir ,
60
- local_dir_use_symlinks = False ,
61
63
token = hf_token ,
62
64
ignore_patterns = ignore_patterns ,
63
65
)
@@ -77,9 +79,14 @@ def _download_hf_snapshot(
77
79
raise e
78
80
79
81
# Convert the Multimodal Llama model to the torchtune format.
80
- if model_config .name in {"meta-llama/Llama-3.2-11B-Vision-Instruct" , "meta-llama/Llama-3.2-11B-Vision" }:
82
+ if model_config .name in {
83
+ "meta-llama/Llama-3.2-11B-Vision-Instruct" ,
84
+ "meta-llama/Llama-3.2-11B-Vision" ,
85
+ }:
81
86
print (f"Converting { model_config .name } to torchtune format..." , file = sys .stderr )
82
- convert_hf_checkpoint_to_tune ( model_dir = artifact_dir , model_name = model_config .name )
87
+ convert_hf_checkpoint_to_tune (
88
+ model_dir = artifact_dir , model_name = model_config .name
89
+ )
83
90
84
91
else :
85
92
# Convert the model to the torchchat format.
0 commit comments