vicgalle's picture
Upload train_human.py
e501d43 verified
raw
history blame contribute delete
No virus
3.6 kB
# %%
from transformers import AutoTokenizer, AutoModelForCausalLM
from datasets import load_dataset, Dataset
from trl import DPOTrainer, DPOConfig
from peft import LoraConfig
from peft import prepare_model_for_kbit_training
import torch
import pandas as pd
# %%
dataset = load_dataset("Undi95/Weyaxi-humanish-dpo-project-noemoji")["train"]
model_name = "Undi95/Meta-Llama-3.1-8B-Claude-bf16"
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.padding_side = "right"
tokenizer.pad_token = tokenizer.eos_token
# %%
tokenizer.chat_template = "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}"
# %%
dataset2 = load_dataset("ResplendentAI/NSFW_RP_Format_DPO")['train']
# %%
dataset = dataset.to_pandas(
)
dataset2 = dataset2.to_pandas()
dataset = Dataset.from_pandas(pd.concat([dataset.sample(400), dataset2]).sample(frac=1))
# %%
def template_prompt(system, prompt):
if system is None:
messages = [
{"role": "user", "content": prompt},
]
else:
messages = [
{
"role": "system",
"content": system,
},
{"role": "user", "content": prompt},
]
prompt = tokenizer.apply_chat_template(
messages, tokenize=False, add_generation_prompt=False
)
return prompt
def template_answer(answer):
messages = [
{
"role": "assistant",
"content": answer,
},
]
answer = tokenizer.apply_chat_template(
messages, tokenize=False, add_generation_prompt=False
)
return answer
# %%
# create new columns
dataset = dataset.map(
lambda x: {
"prompt": template_prompt(None, x["prompt"]).replace("<|start_header_id|>assistant<|end_header_id|>\n\n", "")
}, # change this according to the dataset!!!
)
# %%
dataset = dataset.map(
lambda x: {"chosen": template_answer(x["chosen"]).replace('<|begin_of_text|>', '').replace('><|start_header_id|>assistant<|end_header_id|>\n\n', '>')},
)
dataset = dataset.map(
lambda x: {"rejected": template_answer(x["rejected"]).replace('<|begin_of_text|>', '').replace('><|start_header_id|>assistant<|end_header_id|>\n\n', '>')},
)
# %%
dataset[0]
# %%
# LoRA configuration
peft_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
target_modules=[
"k_proj",
"gate_proj",
"v_proj",
"up_proj",
"q_proj",
"o_proj",
"down_proj",
],
)
# Model to fine-tune
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16,
load_in_4bit=True,
device_map="auto",
)
model.config.use_cache = False
model.gradient_checkpointing_enable()
model = prepare_model_for_kbit_training(model)
# %%
output_name = f"checkpoints/exp_human_{model_name}"
training_args = DPOConfig(
per_device_train_batch_size=1,
gradient_accumulation_steps=4,
num_train_epochs=1,
gradient_checkpointing=True,
output_dir=output_name,
logging_steps=1,
max_steps=50
)
trainer = DPOTrainer(
model,
ref_model=None,
train_dataset=dataset,
tokenizer=tokenizer,
args=training_args,
peft_config=peft_config,
)
trainer.train()
trainer.save_model(output_name)