mod gemma
This commit is contained in:
parent
e41cb52136
commit
30a6350071
2
gemma.py
2
gemma.py
|
|
@ -36,7 +36,7 @@ dataset = create_training_data()
|
|||
|
||||
# 5️⃣ Ładowanie modelu Gemma 2 7B
|
||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
model_name = "google/gemma-7b"
|
||||
model_name = "google/gemma-2b"
|
||||
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16).to(device)
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue