mod gemma
This commit is contained in:
parent
b822c32206
commit
51828191cc
19
gemma.py
19
gemma.py
|
|
@ -64,17 +64,18 @@ tokenized_dataset = dataset.map(tokenize_function, batched=True)
|
||||||
# 8️⃣ Parametry treningu
|
# 8️⃣ Parametry treningu
|
||||||
training_args = TrainingArguments(
|
training_args = TrainingArguments(
|
||||||
output_dir="./results",
|
output_dir="./results",
|
||||||
per_device_train_batch_size=2,
|
evaluation_strategy="steps", # Zmienione na "steps"
|
||||||
gradient_accumulation_steps=4, # Symuluje większy batch size
|
eval_steps=500, # Dodane
|
||||||
num_train_epochs=5,
|
save_strategy="steps", # Zmienione na "steps"
|
||||||
logging_dir="./logs",
|
save_steps=500, # Dodane, musi być takie samo jak eval_steps lub jego wielokrotność
|
||||||
save_strategy="epoch",
|
|
||||||
learning_rate=2e-5,
|
learning_rate=2e-5,
|
||||||
warmup_steps=100,
|
per_device_train_batch_size=2,
|
||||||
fp16=True, # Używa mixed precision training
|
per_device_eval_batch_size=2,
|
||||||
evaluation_strategy="steps",
|
num_train_epochs=5,
|
||||||
eval_steps=500,
|
weight_decay=0.01,
|
||||||
load_best_model_at_end=True,
|
load_best_model_at_end=True,
|
||||||
|
metric_for_best_model="loss", # lub inna metryka, którą chcesz optymalizować
|
||||||
|
greater_is_better=False, # Ustaw na True, jeśli wyższa wartość metryki jest lepsza
|
||||||
)
|
)
|
||||||
|
|
||||||
# 9️⃣ Data Collator
|
# 9️⃣ Data Collator
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue