From b9d7a7adc5826a62f324de002a134f9a70aeb5f8 Mon Sep 17 00:00:00 2001 From: "l.gabrysiak" Date: Wed, 26 Feb 2025 13:56:35 +0100 Subject: [PATCH] mod gemma --- gemma.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/gemma.py b/gemma.py index 1116d8d..8a5d12f 100644 --- a/gemma.py +++ b/gemma.py @@ -65,7 +65,7 @@ lora_config = LoraConfig( model = get_peft_model(model, lora_config) # 7️⃣ Tokenizacja danych -max_length = 128 +max_length = 1024 def tokenize_function(examples): return tokenizer( @@ -85,10 +85,10 @@ training_args = TrainingArguments( eval_steps=500, # Ewaluacja co 500 kroków save_strategy="steps", # Zapis modelu co określoną liczbę kroków save_steps=500, # Zapis modelu co 500 kroków - learning_rate=2e-5, + learning_rate=1e-5, per_device_train_batch_size=2, per_device_eval_batch_size=2, - num_train_epochs=5, + num_train_epochs=32, weight_decay=0.01, load_best_model_at_end=True, # Wczytaj najlepszy model na końcu metric_for_best_model="loss", # Kryterium wyboru najlepszego modelu