-
Notifications
You must be signed in to change notification settings - Fork 10
/
Copy pathcpo.py
140 lines (110 loc) · 4.59 KB
/
cpo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
import multiprocessing
import os
import random
import uuid
import warnings
from functools import partial
import torch
from accelerate import PartialState
from accelerate.logging import get_logger
from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed
from transformers.integrations import is_deepspeed_zero3_enabled
from trl import ModelConfig, get_peft_config, CPOConfig, CPOTrainer
from src.callbacks.generate_examples import GenerateExamplesCallback
from src.configs.additional.cpo_args import CPOScriptArguments
from src.utils.datasets import load_datasets, prepare_generative_row
from src.utils.logger import setup_logging
from src.utils.model_preparation import setup_model_and_tokenizer
from src.utils.yaml_args_parser import H4ArgumentParser
logger = get_logger(__name__)
LOGGING_TASK_NAME = str(uuid.uuid4())
os.environ['WANDB_RUN_ID'] = str(random.randint(100000, 999999))
os.environ['WANDB_NAME'] = LOGGING_TASK_NAME
os.environ['CLEARML_TASK'] = LOGGING_TASK_NAME
DATASET_PROCESSING_THREADS = min(multiprocessing.cpu_count() // 2, 16)
def main():
parser = H4ArgumentParser((CPOScriptArguments, CPOConfig, ModelConfig))
args, cpo_config, model_config = parser.parse()
setup_logging(logger, cpo_config)
set_seed(cpo_config.seed) # in case of new tokens added without initialize...
os.environ["WANDB_PROJECT"] = args.project_name
os.environ['CLEARML_PROJECT'] = args.project_name
################
# Model & Tokenizer
################
tokenizer = AutoTokenizer.from_pretrained(model_config.model_name_or_path)
model = AutoModelForCausalLM.from_pretrained(
model_config.model_name_or_path,
torch_dtype=torch.bfloat16 if cpo_config.bf16 else torch.float16,
# max_position_embeddings=sft_config.max_seq_length,
attn_implementation=model_config.attn_implementation
)
for n, p in model.named_parameters():
p.requires_grad = not model_config.use_peft
peft_config = get_peft_config(model_config)
if model_config.lora_task_type != "CAUSAL_LM":
warnings.warn(
"You are using a `task_type` that is different than `CAUSAL_LM` for PEFT. This will lead to silent bugs"
" Make sure to pass --lora_task_type CAUSAL_LM when using this script."
)
setup_model_and_tokenizer(args, model, tokenizer)
if PartialState().is_main_process:
logger.info(f'Tokenizer: {tokenizer}')
logger.info(f'Model config: {model.config}')
################
# Dataset
################
ds = load_datasets(args.dataset, args.test_size)
generate_dataset = ds['test']
def apply_chat_templates(row):
row["prompt"] = tokenizer.apply_chat_template(row["prompt"], tokenize=False)
row["chosen"] = tokenizer.apply_chat_template(row["chosen"], tokenize=False)
row["rejected"] = tokenizer.apply_chat_template(row["rejected"], tokenize=False)
return row
with PartialState().main_process_first():
ds = ds.map(
apply_chat_templates,
num_proc=DATASET_PROCESSING_THREADS,
load_from_cache_file=True,
)
generate_dataset = generate_dataset.map(
partial(prepare_generative_row, tokenizer=tokenizer, max_length=cpo_config.max_prompt_length),
num_proc=DATASET_PROCESSING_THREADS,
load_from_cache_file=True
)
train_dataset = ds["train"]
eval_dataset = ds["test"]
if PartialState().is_main_process:
logger.info('Example from train dataset:')
logger.info(train_dataset[0])
logger.info('Example from test dataset:')
logger.info(eval_dataset[0])
logger.info('Example from gen dataset:')
logger.info(generate_dataset[0])
generate_callback = GenerateExamplesCallback(
preprocessed_dataset=generate_dataset,
tokenizer=tokenizer,
num_examples=args.num_gen_examples,
is_deepspeed_zero3=is_deepspeed_zero3_enabled(),
logger_backend=cpo_config.report_to[0]
)
PartialState().wait_for_everyone()
################
# Training
################
trainer = CPOTrainer(
model,
args=cpo_config,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
tokenizer=tokenizer,
peft_config=peft_config,
callbacks=[generate_callback] if args.generate_eval_examples else []
)
# train and save the model
trainer.train()
if trainer.is_fsdp_enabled:
trainer.accelerator.state.fsdp_plugin.set_state_dict_type("FULL_STATE_DICT")
trainer.save_model(cpo_config.output_dir)
if __name__ == '__main__':
main()