ably.do/allegro.py

40 lines
1.3 KiB
Python

from transformers import MarianForCausalLM, MarianTokenizer, Trainer, TrainingArguments
from datasets import load_dataset
# Załaduj model i tokenizer
model_name = "allegro/multislav-5lang"
model = MarianForCausalLM.from_pretrained(model_name)
tokenizer = MarianTokenizer.from_pretrained(model_name)
# Załaduj dane (przykład dla tłumaczenia z języka rumuńskiego na angielski)
dataset = load_dataset("wmt16", "ro-en")
# Przetwórz dane do formatu odpowiedniego dla modelu
def tokenize_function(examples):
# Jeśli 'translation' to lista słowników, np. [{'en': 'text1', 'ro': 'text1_translated'}, ...]
return tokenizer([example['en'] for example in examples['translation']],
[example['ro'] for example in examples['translation']],
truncation=True, padding='max_length', max_length=128)
tokenized_datasets = dataset.map(tokenize_function, batched=True)
# Skonfiguruj trenera
training_args = TrainingArguments(
output_dir="./results",
evaluation_strategy="epoch",
learning_rate=5e-5,
per_device_train_batch_size=4,
per_device_eval_batch_size=4,
num_train_epochs=3,
weight_decay=0.01,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["test"],
)
# Trening modelu
trainer.train()