18 lines
677 B
Python
18 lines
677 B
Python
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
|
from safetensors.torch import load_file # Załaduj plik safetensors
|
|
|
|
# Załaduj tokenizer
|
|
tokenizer = AutoTokenizer.from_pretrained("./models/ably")
|
|
|
|
# Załaduj model z pliku safetensors
|
|
model = AutoModelForSeq2SeqLM.from_pretrained("./models/ably", local_files_only=True)
|
|
|
|
# Załaduj model.wagi z pliku safetensors
|
|
model_weights = load_file("./models/ably/model.safetensors")
|
|
model.load_state_dict(model_weights)
|
|
|
|
# Zapisz model w formacie binarnym .bin dla PyTorch
|
|
model.save_pretrained("./models/ably_bin")
|
|
tokenizer.save_pretrained("./models/ably_bin")
|
|
|
|
print("✅ Model został zapisany w formacie binarnym!") |