2025-02-28 16:25:30 -05:00
|
|
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
2025-02-28 16:40:07 -05:00
|
|
|
from safetensors.torch import load_file # Załaduj plik safetensors
|
2025-02-26 05:37:10 -05:00
|
|
|
|
2025-02-28 16:40:07 -05:00
|
|
|
# Załaduj tokenizer
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("./models/ably")
|
2025-02-26 05:37:10 -05:00
|
|
|
|
2025-02-28 16:40:07 -05:00
|
|
|
# Załaduj model z pliku safetensors
|
|
|
|
|
model = AutoModelForSeq2SeqLM.from_pretrained("./models/ably", local_files_only=True)
|
2025-02-26 05:37:10 -05:00
|
|
|
|
2025-02-28 16:40:07 -05:00
|
|
|
# Załaduj model.wagi z pliku safetensors
|
|
|
|
|
model_weights = load_file("./models/ably/model.safetensors")
|
|
|
|
|
model.load_state_dict(model_weights)
|
|
|
|
|
|
|
|
|
|
# Zapisz model w formacie binarnym .bin dla PyTorch
|
|
|
|
|
model.save_pretrained("./models/ably_bin")
|
|
|
|
|
tokenizer.save_pretrained("./models/ably_bin")
|
|
|
|
|
|
|
|
|
|
print("✅ Model został zapisany w formacie binarnym!")
|