diff --git a/allegro.py b/allegro.py index 9e1aec3..d5a1fe6 100644 --- a/allegro.py +++ b/allegro.py @@ -1,18 +1,11 @@ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM -from safetensors.torch import load_file # Załaduj plik safetensors -# Załaduj tokenizer -tokenizer = AutoTokenizer.from_pretrained("./models/ably") +# Załaduj model z formatu safetensors +model = AutoModelForSeq2SeqLM.from_pretrained("ścieżka_do_modelu", from_tf=False, from_safetensors=True) +tokenizer = AutoTokenizer.from_pretrained("ścieżka_do_modelu") -# Załaduj model z pliku safetensors -model = AutoModelForSeq2SeqLM.from_pretrained("./models/ably", local_files_only=True) +# Zapisz model i tokenizer w formacie kompatybilnym z PyTorch (plik .bin) +model.save_pretrained("./models/ably") +tokenizer.save_pretrained("./models/ably") -# Załaduj model.wagi z pliku safetensors -model_weights = load_file("./models/ably/model.safetensors") -model.load_state_dict(model_weights) - -# Zapisz model w formacie binarnym .bin dla PyTorch -model.save_pretrained("./models/ably_bin") -tokenizer.save_pretrained("./models/ably_bin") - -print("✅ Model został zapisany w formacie binarnym!") \ No newline at end of file +print("✅ Model zapisany w formacie binarnym!") \ No newline at end of file