diff --git a/gpt.py b/gpt.py index 5f8b18e..487cace 100644 --- a/gpt.py +++ b/gpt.py @@ -37,19 +37,17 @@ def main(): tokenized_dataset = dataset.map(tokenize_function, batched=True) # Model - model = AutoModelForCausalLM.from_pretrained( - MODEL_NAME, - mean_resizing=False # Wyłączenie ostrzeżenia - ) + model = AutoModelForCausalLM.from_pretrained(MODEL_NAME) model.resize_token_embeddings(len(tokenizer)) # Konfiguracja treningu training_args = TrainingArguments( output_dir="./results", - num_train_epochs=1, # Poprawiona nazwa parametru + num_train_epochs=1, per_device_train_batch_size=2, remove_unused_columns=True, - logging_steps=1 + logging_steps=1, + report_to="none" # Wyłączenie raportowania ) # Trainer