Skip to content

Commit 8d959a7

Browse files
committed
make it work with pythia in the cloud
1 parent ce24f5e commit 8d959a7

File tree

7 files changed

+352
-70
lines changed

7 files changed

+352
-70
lines changed

Diff for: .gitattributes

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
data/*.jsonl filter=lfs diff=lfs merge=lfs -text

Diff for: configs/pythia_1_2B_alpaca.yml

+13-12
Original file line numberDiff line numberDiff line change
@@ -3,35 +3,36 @@ model_type: GPTNeoXForCausalLM
33
tokenizer_type: AutoTokenizer
44
load_in_8bit: true
55
datasets:
6-
- path: ./data/alpaca_data_gpt4.jsonl
6+
- path: data/alpaca_data_gpt4.jsonl
77
type: alpaca
8-
- path: ./data/vicuna_cleaned.jsonl
8+
- path: data/vicuna_cleaned.jsonl
99
type: sharegpt
10-
- path: ./data/gpt4-instruct-similarity-0.6-dataset.jsonl
10+
- path: data/gpt4-instruct-similarity-0.6-dataset.jsonl
1111
type: gpteacher
12-
- path: ./data/roleplay-similarity_0.6-instruct-dataset.jsonl
12+
- path: data/roleplay-similarity_0.6-instruct-dataset.jsonl
1313
type: gpteacher
1414
val_set_size: 0.05
1515
adapter: lora
1616
sequence_len: 2048
17-
lora_r: 16
17+
lora_r: 8
1818
lora_alpha: 32
1919
lora_dropout: 0.05
2020
lora_target_modules:
21-
- q_proj
22-
- v_proj
23-
wandb_project:
21+
- query_key_value
22+
lora_fan_in_fan_out: true # pythia/GPTNeoX lora specific
23+
wandb_project: pythia-1.4b-lora
2424
wandb_watch:
25-
wandb:run_name:
25+
wandb_run_name:
2626
wandb_log_model: checkpoint
2727
output_dir: ./lora-alpaca
28-
batch_size: 128
29-
micro_batch_size: 8
28+
batch_size: 32
29+
micro_batch_size: 4
3030
num_epochs: 5
3131
learning_rate: 0.0003
3232
train_on_inputs: false
33+
group_by_length: false
3334
bf16: True
34-
fp16: True
35+
tf32: True
3536
resume_from_checkpoint:
3637
local_rank:
3738
deepspeed:

Diff for: scripts/finetune.py

+116-12
Original file line numberDiff line numberDiff line change
@@ -1,26 +1,32 @@
1+
import math
12
import os
3+
import signal
24
import sys
35
from pathlib import Path
46

7+
import bitsandbytes as bnb
58
import fire
69
import torch
710
import transformers
811
import yaml
912
from attrdict import AttrDict
10-
from datasets import load_dataset, IterableDataset
13+
from datasets import load_dataset, IterableDataset, Dataset
1114
from peft import (
1215
LoraConfig,
1316
get_peft_model,
14-
prepare_model_for_int8_training,
17+
prepare_model_for_int8_training, get_peft_model_state_dict,
1518
)
19+
from torch import nn
1620
from transformers import AutoModelForCausalLM, AutoTokenizer
1721

1822
# add src to the pythonpath so we don't need to pip install this
23+
from transformers.trainer_pt_utils import get_parameter_names
24+
1925
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
2026
src_dir = os.path.join(project_root, 'src')
2127
sys.path.insert(0, src_dir)
2228

23-
from axolotl.datasets import TokenizedPromptDataset
29+
from axolotl.datasets import TokenizedPromptDataset, ConstantLengthDataset
2430
from axolotl.prompt_tokenizers import AlpacaPromptTokenizingStrategy, ShareGPTPromptTokenizingStrategy, \
2531
LLAMA_DEFAULT_PAD_TOKEN, GPTeacherPromptTokenizingStrategy
2632
from axolotl.prompters import AlpacaPrompter, GPTeacherPrompter, ShareGPTPrompter
@@ -29,9 +35,9 @@ def setup_wandb_env_vars(cfg):
2935
if len(cfg.wandb_project) > 0:
3036
os.environ["WANDB_PROJECT"] = cfg.wandb_project
3137
cfg.use_wandb = True
32-
if len(cfg.wandb_watch) > 0:
38+
if cfg.wandb_watch and len(cfg.wandb_watch) > 0:
3339
os.environ["WANDB_WATCH"] = cfg.wandb_watch
34-
if len(cfg.wandb_log_model) > 0:
40+
if cfg.wandb_log_model and len(cfg.wandb_log_model) > 0:
3541
os.environ["WANDB_LOG_MODEL"] = cfg.wandb_log_model
3642

3743

@@ -61,6 +67,10 @@ def load_model(base_model, model_type, tokenizer_type, cfg, adapter="lora"):
6167
if tokenizer.__class__.__name__ == "LlamaTokenizer":
6268
tokenizer.pad_token = LLAMA_DEFAULT_PAD_TOKEN
6369

70+
if tokenizer.__class__.__name__ == "GPTNeoXTokenizerFast":
71+
tokenizer.add_special_tokens({'pad_token': '[PAD]'})
72+
os.environ["TOKENIZERS_PARALLELISM"] = "false"
73+
6474
if cfg.load_in_8bit:
6575
model = prepare_model_for_int8_training(model)
6676

@@ -69,6 +79,7 @@ def load_model(base_model, model_type, tokenizer_type, cfg, adapter="lora"):
6979
lora_alpha=cfg.lora_alpha,
7080
target_modules=cfg.lora_target_modules,
7181
lora_dropout=cfg.lora_dropout,
82+
fan_in_fan_out=cfg.lora_fan_in_fan_out,
7283
bias="none",
7384
task_type="CAUSAL_LM",
7485
)
@@ -79,7 +90,7 @@ def load_model(base_model, model_type, tokenizer_type, cfg, adapter="lora"):
7990
# TODO resume_from_checkpoint handling
8091

8192
model.print_trainable_parameters()
82-
return model, tokenizer
93+
return model, tokenizer, lora_config
8394

8495

8596
def train(
@@ -88,7 +99,7 @@ def train(
8899
):
89100
# load the config from the yaml file
90101
with open(config, 'r') as f:
91-
cfg: AttrDict = AttrDict(yaml.load(f))
102+
cfg: AttrDict = AttrDict(yaml.load(f, Loader=yaml.Loader))
92103
# if there are any options passed in the cli, if it is something that seems valid from the yaml,
93104
# then overwrite the value
94105
for k, v in enumerate(kwargs):
@@ -107,23 +118,116 @@ def train(
107118
setup_wandb_env_vars(cfg)
108119

109120
# Load the model and tokenizer
110-
model, tokenizer = load_model(cfg.base_model, cfg.model_type, cfg.tokenizer_type, cfg, adapter=cfg.adapter)
121+
model, tokenizer, lora_config = load_model(cfg.base_model, cfg.model_type, cfg.tokenizer_type, cfg, adapter=cfg.adapter)
111122
datasets = []
112123
for d in cfg.datasets:
113-
ds: IterableDataset = load_dataset("json", data_files=d.path, streaming=True, num_proc=4, split=None)
124+
ds: IterableDataset = load_dataset("json", data_files=d.path, streaming=True, split=None)
114125
if d.type == "alpaca":
115126
ds_strategy = AlpacaPromptTokenizingStrategy(AlpacaPrompter(), tokenizer, cfg.train_on_inputs, cfg.sequence_len)
116-
ds_wrapper = TokenizedPromptDataset(ds_strategy, ds)
127+
ds_wrapper = TokenizedPromptDataset(ds_strategy, ds["train"])
117128
datasets.append(ds_wrapper)
118129
elif d.type == "gpteacher":
119130
ds_strategy = GPTeacherPromptTokenizingStrategy(GPTeacherPrompter(), tokenizer, cfg.train_on_inputs, cfg.sequence_len)
120-
ds_wrapper = TokenizedPromptDataset(ds_strategy, ds)
131+
ds_wrapper = TokenizedPromptDataset(ds_strategy, ds["train"])
121132
datasets.append(ds_wrapper)
122133
elif d.type == "sharegpt":
123134
ds_strategy = ShareGPTPromptTokenizingStrategy(ShareGPTPrompter(), tokenizer, cfg.train_on_inputs, cfg.sequence_len)
124-
ds_wrapper = TokenizedPromptDataset(ds_strategy, ds)
135+
ds_wrapper = TokenizedPromptDataset(ds_strategy, ds["train"])
125136
datasets.append(ds_wrapper)
137+
constant_len_dataset = ConstantLengthDataset(tokenizer, datasets, seq_length=cfg.sequence_len)
138+
constant_len_dataset = Dataset.from_list([_ for _ in constant_len_dataset]).train_test_split(
139+
test_size=cfg.val_set_size, shuffle=True, seed=42
140+
)
141+
142+
print(constant_len_dataset)
143+
train_dataset = constant_len_dataset["train"]
144+
eval_dataset = constant_len_dataset["test"]
145+
146+
total_num_steps = int(math.ceil(len(train_dataset) * cfg.num_epochs / cfg.batch_size))
147+
warmup_steps = min(int(0.03 * total_num_steps), 100)
148+
logging_steps = min(int(0.005 * total_num_steps), 10)
149+
save_steps = eval_steps = min(int(0.05 * total_num_steps), 200)
150+
151+
training_args = transformers.TrainingArguments(
152+
per_device_train_batch_size=cfg.micro_batch_size,
153+
gradient_accumulation_steps=cfg.gradient_accumulation_steps,
154+
warmup_steps=warmup_steps,
155+
num_train_epochs=cfg.num_epochs,
156+
learning_rate=cfg.learning_rate,
157+
bf16=cfg.bf16,
158+
tf32=cfg.tf32,
159+
logging_steps=logging_steps,
160+
evaluation_strategy="steps" if cfg.val_set_size > 0 else "no",
161+
save_strategy="steps",
162+
eval_steps=eval_steps if cfg.val_set_size > 0 else None,
163+
save_steps=save_steps,
164+
output_dir=cfg.output_dir,
165+
save_total_limit=3,
166+
load_best_model_at_end=True if cfg.val_set_size > 0 else False,
167+
ddp_find_unused_parameters=False if cfg.ddp else None,
168+
group_by_length=cfg.group_by_length,
169+
report_to="wandb" if cfg.use_wandb else None,
170+
run_name=cfg.wandb_run_name if cfg.use_wandb else None,
171+
)
172+
173+
decay_parameters = get_parameter_names(model, [nn.LayerNorm])
174+
decay_parameters = [name for name in decay_parameters if "bias" not in name]
175+
optimizer_grouped_parameters = [
176+
{
177+
"params": [p for n, p in model.named_parameters() if n in decay_parameters],
178+
"weight_decay": training_args.weight_decay,
179+
},
180+
{
181+
"params": [p for n, p in model.named_parameters() if n not in decay_parameters],
182+
"weight_decay": 0.0,
183+
},
184+
]
185+
186+
adam_bnb_optim = bnb.optim.Adam8bit(
187+
optimizer_grouped_parameters,
188+
betas=(training_args.adam_beta1, training_args.adam_beta2),
189+
eps=training_args.adam_epsilon,
190+
lr=training_args.learning_rate,
191+
)
192+
193+
lr_scheduler = transformers.get_cosine_schedule_with_warmup(
194+
adam_bnb_optim,
195+
training_args.warmup_steps,
196+
total_num_steps,
197+
)
198+
199+
trainer = transformers.Trainer(
200+
model=model,
201+
train_dataset=train_dataset,
202+
eval_dataset=eval_dataset,
203+
args=training_args,
204+
optimizers=(adam_bnb_optim, lr_scheduler),
205+
data_collator=transformers.DataCollatorForSeq2Seq(
206+
tokenizer, pad_to_multiple_of=8, return_tensors="pt", padding=True
207+
),
208+
)
209+
model.config.use_cache = False
210+
211+
old_state_dict = model.state_dict
212+
model.state_dict = (
213+
lambda self, *_, **__: get_peft_model_state_dict(
214+
self, old_state_dict()
215+
)
216+
).__get__(model, type(model))
217+
218+
if torch.__version__ >= "2" and sys.platform != "win32":
219+
model = torch.compile(model)
220+
221+
signal.signal(signal.SIGINT, lambda signal, frame: (
222+
model.save_pretrained(cfg.output_dir),
223+
exit(0)
224+
))
225+
226+
# go ahead and presave the adapter config
227+
lora_config.save_pretrained(cfg.output_dir)
228+
trainer.train(resume_from_checkpoint=cfg.resume_from_checkpoint)
126229

230+
model.save_pretrained(cfg.output_dir)
127231

128232
if __name__ == "__main__":
129233
fire.Fire(train)

Diff for: src/axolotl/convert.py

+1
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@ def __init__(self, file_reader, file_writer, json_parser, jsonl_serializer):
4444
def convert(self, input_file_path, output_file_path):
4545
content = self.file_reader.read(input_file_path)
4646
data = self.json_parser.parse(content)
47+
# data = [r for r in data if r["conversations"]] # vicuna cleaned has rows with empty conversations
4748
jsonl_content = self.jsonl_serializer.serialize(data)
4849
self.file_writer.write(jsonl_content)
4950

Diff for: src/axolotl/datasets.py

+56-38
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
import torch
44
from datasets import IterableDataset
5-
from .prompt_tokenizers import PromptTokenizingStrategy
5+
from .prompt_tokenizers import PromptTokenizingStrategy, InvalidDataException
66

77

88
# We want this to be a wrapper for an existing dataset that we have loaded
@@ -23,7 +23,12 @@ def __init__(
2323

2424
def __iter__(self):
2525
iterator = iter(self.dataset)
26-
yield self.prompt_tokenizer.tokenize_prompt(next(iterator))
26+
# Loop through the entire dataset
27+
for example in iterator:
28+
try:
29+
yield self.prompt_tokenizer.tokenize_prompt(example)
30+
except InvalidDataException:
31+
pass
2732

2833

2934
class ConstantLengthDataset(IterableDataset):
@@ -32,55 +37,68 @@ class ConstantLengthDataset(IterableDataset):
3237
Args:
3338
tokenizer (Tokenizer): The processor used for proccessing the data.
3439
dataset (dataset.Dataset): Dataset with text files.
35-
infinite (bool): If True the iterator is reset after dataset reaches end else stops.
3640
seq_length (int): Length of token sequences to return.
37-
chars_per_token (int): Number of characters per token used to estimate number of tokens in text buffer.
3841
"""
3942

4043
def __init__(
4144
self,
4245
tokenizer,
4346
datasets,
44-
infinite=False,
4547
seq_length=2048,
46-
num_of_sequences=1024,
47-
chars_per_token=3.6,
4848
):
4949
self.tokenizer = tokenizer
50-
self.concat_token_id = tokenizer.eos_token_id if tokenizer.eos_token_id else args.eos_token_id
50+
self.concat_token_id = tokenizer.eos_token_id
5151
self.datasets: List[IterableDataset] = datasets
5252
self.seq_length = seq_length
53-
self.infinite = infinite
54-
self.current_size = 0
55-
self.max_buffer_size = seq_length * chars_per_token * num_of_sequences
5653

5754
def __iter__(self):
58-
iterator = iter(self.datasets)
59-
more_examples = True
60-
while more_examples:
61-
buffer, buffer_len = [], 0
62-
while True:
63-
if buffer_len >= self.max_buffer_size:
64-
break
55+
buffer = {"input_ids": [], "attention_mask": [], "labels": []}
56+
buffer_len = 0
57+
for dataset in self.datasets:
58+
iterator = iter(dataset)
59+
more_examples = True
60+
while more_examples:
6561
try:
66-
buffer.append(next(iterator))
67-
buffer_len += len(buffer[-1])
62+
example = next(iterator)
6863
except StopIteration:
69-
if self.infinite:
70-
iterator = iter(self.datasets)
71-
else:
72-
more_examples = False
73-
break
74-
tokenized_inputs = self.tokenizer(buffer, truncation=False)["input_ids"]
75-
all_token_ids = []
76-
for tokenized_input in tokenized_inputs:
77-
all_token_ids.extend(tokenized_input + [self.concat_token_id])
78-
for i in range(0, len(all_token_ids), self.seq_length):
79-
input_ids = all_token_ids[i : i + self.seq_length]
80-
if len(input_ids) == self.seq_length:
81-
self.current_size += 1
82-
yield {
83-
"input_ids": torch.LongTensor(input_ids),
84-
"labels": torch.LongTensor(input_ids),
85-
"attention_masks": torch.LongTensor(input_ids),
86-
}
64+
more_examples = False
65+
example = None
66+
67+
add_concat_token = False
68+
if example:
69+
example_len = len(example["input_ids"])
70+
add_concat_token = example["input_ids"][-1] != self.concat_token_id
71+
else:
72+
example_len = 0
73+
74+
if not example_len or buffer_len + int(add_concat_token) + example_len > self.seq_length:
75+
if buffer["input_ids"]:
76+
input_ids = torch.cat(buffer["input_ids"], dim=-1)[: self.seq_length]
77+
attention_mask = torch.cat(buffer["attention_mask"], dim=-1)[: self.seq_length]
78+
labels = torch.cat(buffer["labels"], dim=-1)[: self.seq_length]
79+
yield {
80+
"input_ids": input_ids,
81+
"labels": labels,
82+
"attention_mask": attention_mask,
83+
}
84+
buffer = {"input_ids": [], "attention_mask": [], "labels": []}
85+
buffer_len = 0
86+
87+
if example:
88+
input_ids = example["input_ids"]
89+
attention_mask = example["attention_mask"]
90+
labels = example["labels"]
91+
92+
if add_concat_token:
93+
input_ids.append(self.concat_token_id)
94+
attention_mask.append(1)
95+
labels.append(self.concat_token_id)
96+
97+
input_ids_with_concat = torch.tensor(input_ids, dtype=torch.long)
98+
attention_mask_with_concat = torch.tensor(attention_mask, dtype=torch.long)
99+
labels_with_concat = torch.tensor(labels, dtype=torch.long)
100+
101+
buffer["input_ids"].append(input_ids_with_concat)
102+
buffer["attention_mask"].append(attention_mask_with_concat)
103+
buffer["labels"].append(labels_with_concat)
104+
buffer_len += len(input_ids)

0 commit comments

Comments
 (0)