Mod allegro

This commit is contained in:
l.gabrysiak 2025-02-28 20:54:02 +01:00
parent 124e904c31
commit 4007d446e3
1 changed files with 15 additions and 7 deletions

View File

@ -8,24 +8,32 @@ from datasets import Dataset
from peft import LoraConfig, get_peft_model
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, TrainingArguments, Trainer, DataCollatorForSeq2Seq
import weaviate
from weaviate.client import WeaviateClient
from weaviate.connect import ConnectionParams
# 1⃣ Inicjalizacja modelu do embeddingów
embed_model = SentenceTransformer("all-MiniLM-L6-v2")
# 2⃣ Połączenie z Weaviate i pobranie dokumentów
client = weaviate.Client(
url="http://weaviate:8080" # Dostosuj URL do swojego środowiska
client = WeaviateClient(
connection_params=ConnectionParams.from_params(
http_host="weaviate",
http_port=8080,
http_secure=False,
grpc_host="weaviate",
grpc_port=50051,
grpc_secure=False,
)
)
collection_name = "Document" # Zakładam, że to jest nazwa Twojej kolekcji
response = (
client.query
.get(collection_name, ["content"])
result = (
client.query.get(collection_name, ["content"])
.with_additional(["id"])
.do()
)
documents = [item['content'] for item in response['data']['Get'][collection_name]]
documents = [item['content'] for item in result['data']['Get'][collection_name]]
# 3⃣ Generowanie embeddingów
embeddings = embed_model.encode(documents)
@ -74,7 +82,7 @@ tokenized_eval = eval_dataset.map(tokenize_function, batched=True)
# 8⃣ Parametry treningu
training_args = TrainingArguments(
output_dir="./results",
evaluation_strategy="steps",
eval_strategy="steps",
eval_steps=500,
save_strategy="steps",
save_steps=500,