From e3a94fa5aee1ed698b6925d2318edefe13916099 Mon Sep 17 00:00:00 2001 From: "l.gabrysiak" Date: Fri, 28 Feb 2025 22:41:35 +0100 Subject: [PATCH] mod allegro --- allegro.py | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/allegro.py b/allegro.py index 9e1aec3..d5a1fe6 100644 --- a/allegro.py +++ b/allegro.py @@ -1,18 +1,11 @@ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM -from safetensors.torch import load_file # Załaduj plik safetensors -# Załaduj tokenizer -tokenizer = AutoTokenizer.from_pretrained("./models/ably") +# Załaduj model z formatu safetensors +model = AutoModelForSeq2SeqLM.from_pretrained("ścieżka_do_modelu", from_tf=False, from_safetensors=True) +tokenizer = AutoTokenizer.from_pretrained("ścieżka_do_modelu") -# Załaduj model z pliku safetensors -model = AutoModelForSeq2SeqLM.from_pretrained("./models/ably", local_files_only=True) +# Zapisz model i tokenizer w formacie kompatybilnym z PyTorch (plik .bin) +model.save_pretrained("./models/ably") +tokenizer.save_pretrained("./models/ably") -# Załaduj model.wagi z pliku safetensors -model_weights = load_file("./models/ably/model.safetensors") -model.load_state_dict(model_weights) - -# Zapisz model w formacie binarnym .bin dla PyTorch -model.save_pretrained("./models/ably_bin") -tokenizer.save_pretrained("./models/ably_bin") - -print("✅ Model został zapisany w formacie binarnym!") \ No newline at end of file +print("✅ Model zapisany w formacie binarnym!") \ No newline at end of file