From 30a63500717a206b22417f282b16006deac70a30 Mon Sep 17 00:00:00 2001 From: "l.gabrysiak" Date: Wed, 26 Feb 2025 13:09:06 +0100 Subject: [PATCH] mod gemma --- gemma.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gemma.py b/gemma.py index aeb8eed..f10d752 100644 --- a/gemma.py +++ b/gemma.py @@ -36,7 +36,7 @@ dataset = create_training_data() # 5️⃣ Ładowanie modelu Gemma 2 7B device = "cuda" if torch.cuda.is_available() else "cpu" -model_name = "google/gemma-7b" +model_name = "google/gemma-2b" model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16).to(device) tokenizer = AutoTokenizer.from_pretrained(model_name)