ably.do/gpt.py

75 lines
2.1 KiB
Python
Raw Normal View History

2025-02-25 18:19:51 -05:00
import os
import torch
2025-02-25 18:26:35 -05:00
from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, Trainer, DataCollatorForLanguageModeling
2025-02-25 18:19:51 -05:00
from datasets import Dataset
# Konfiguracja
os.environ["TOKENIZERS_PARALLELISM"] = "false"
2025-02-25 18:22:03 -05:00
MODEL_NAME = "gpt2"
2025-02-25 18:19:51 -05:00
SPECIAL_TOKENS = ["[CITATION_START]", "[CITATION_END]"]
def prepare_simple_dataset():
return [
2025-02-25 18:22:03 -05:00
{"text": "[CITATION_START] Kodeks Pracy, Art. 1 [CITATION_END] Tekst artykułu..."},
{"text": "[CITATION_START] Kodeks Pracy, Art. 2 [CITATION_END] Inny tekst..."}
2025-02-25 18:19:51 -05:00
]
def main():
2025-02-25 18:22:03 -05:00
# Inicjalizacja tokenizera
2025-02-25 18:28:34 -05:00
tokenizer = AutoTokenizer.from_pretrained(
MODEL_NAME,
mean_resizing=False
)
2025-02-25 18:19:51 -05:00
tokenizer.add_special_tokens({"additional_special_tokens": SPECIAL_TOKENS})
tokenizer.pad_token = tokenizer.eos_token
# Przygotowanie danych
data = prepare_simple_dataset()
2025-02-25 18:22:03 -05:00
dataset = Dataset.from_dict({"text": [d["text"] for d in data]})
2025-02-25 18:19:51 -05:00
2025-02-25 18:26:35 -05:00
# Tokenizacja z prawidłowymi etykietami
2025-02-25 18:19:51 -05:00
def tokenize_function(examples):
2025-02-25 18:26:35 -05:00
tokenized = tokenizer(
2025-02-25 18:19:51 -05:00
examples["text"],
truncation=True,
padding="max_length",
max_length=128,
return_tensors="pt"
)
2025-02-25 18:26:35 -05:00
tokenized["labels"] = tokenized["input_ids"].clone()
return tokenized
2025-02-25 18:19:51 -05:00
tokenized_dataset = dataset.map(tokenize_function, batched=True)
2025-02-25 18:26:35 -05:00
# Model i data collator
2025-02-25 18:24:56 -05:00
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
2025-02-25 18:19:51 -05:00
model.resize_token_embeddings(len(tokenizer))
2025-02-25 18:26:35 -05:00
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer,
mlm=False
)
2025-02-25 18:19:51 -05:00
# Konfiguracja treningu
training_args = TrainingArguments(
output_dir="./results",
2025-02-25 18:24:56 -05:00
num_train_epochs=1,
2025-02-25 18:19:51 -05:00
per_device_train_batch_size=2,
2025-02-25 18:23:26 -05:00
remove_unused_columns=True,
2025-02-25 18:24:56 -05:00
logging_steps=1,
2025-02-25 18:26:35 -05:00
report_to="none"
2025-02-25 18:19:51 -05:00
)
# Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_dataset,
2025-02-25 18:26:35 -05:00
data_collator=data_collator
2025-02-25 18:19:51 -05:00
)
print("Rozpoczęcie treningu...")
trainer.train()
if __name__ == "__main__":
main()