|
|
import torch |
|
|
from transformers import GPT2Tokenizer, GPT2LMHeadModel, Trainer, TrainingArguments, DataCollatorForLanguageModeling |
|
|
from datasets import load_dataset |
|
|
import transformers |
|
|
transformers.logging.set_verbosity_info() |
|
|
|
|
|
fine_tune_ds = load_dataset('json', data_files='seed_tasks_5MB.jsonl', split='train') |
|
|
|
|
|
|
|
|
checkpoint_dir = '/Users/kharazmimac/PycharmProjects/Curiosity-Test14/results/checkpoint-1500' |
|
|
model_name = 'gpt2' |
|
|
tokenizer = GPT2Tokenizer.from_pretrained(model_name) |
|
|
model = GPT2LMHeadModel.from_pretrained(checkpoint_dir) |
|
|
|
|
|
|
|
|
tokenizer.pad_token = tokenizer.eos_token |
|
|
|
|
|
|
|
|
def preprocess_function(dataset_column_examples): |
|
|
|
|
|
text_fields = ['text', 'prompt', 'response', 'chosen', 'rejected', 'content', |
|
|
'sentence', 'concept_name', 'context', |
|
|
'column', 'id', 'name', 'instruction', 'instances', |
|
|
'input', 'noinput', 'output'] |
|
|
for field in text_fields: |
|
|
if field in dataset_column_examples: |
|
|
texts = dataset_column_examples[field] |
|
|
break |
|
|
else: |
|
|
raise ValueError(f"No available text fields were found: {dataset_column_examples.keys()}") |
|
|
|
|
|
texts = [str(text) if text is not None else "" for text in texts] |
|
|
return tokenizer(texts, truncation=True, padding='max_length', max_length=256) |
|
|
|
|
|
|
|
|
tokenized_datasets = fine_tune_ds.map(preprocess_function, batched=True, remove_columns=fine_tune_ds.column_names) |
|
|
tokenized_datasets.set_format('torch', columns=['input_ids', 'attention_mask']) |
|
|
|
|
|
dataset_size = len(tokenized_datasets) |
|
|
|
|
|
|
|
|
eval_size = min(200, dataset_size) |
|
|
|
|
|
|
|
|
shuffled_dataset = tokenized_datasets.shuffle(seed=42) |
|
|
small_eval_dataset = shuffled_dataset.select(range(eval_size)) |
|
|
|
|
|
|
|
|
training_args = TrainingArguments( |
|
|
output_dir='./fine_tuned_results', |
|
|
num_train_epochs=3, |
|
|
per_device_train_batch_size=2, |
|
|
save_total_limit=2, |
|
|
learning_rate=2e-5, |
|
|
weight_decay=0.01, |
|
|
eval_strategy='epoch', |
|
|
logging_dir='./logs', |
|
|
logging_steps=10, |
|
|
save_steps=500, |
|
|
) |
|
|
|
|
|
|
|
|
data_collator = DataCollatorForLanguageModeling( |
|
|
tokenizer=tokenizer, |
|
|
mlm=False, |
|
|
) |
|
|
|
|
|
|
|
|
trainer = Trainer( |
|
|
model=model, |
|
|
args=training_args, |
|
|
train_dataset=tokenized_datasets, |
|
|
data_collator=data_collator, |
|
|
tokenizer=tokenizer, |
|
|
eval_dataset=small_eval_dataset |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
trainer.train(resume_from_checkpoint="/Users/kharazmimac/PycharmProjects/Curiosity-Test14/fine_tuned_results/checkpoint-21000") |
|
|
|
|
|
|
|
|
trainer.save_model('./fine_tuned_model') |
|
|
|
|
|
|
|
|
eval_results = trainer.evaluate() |
|
|
print("Evaluation results:", eval_results) |