diff --git a/allegro.py b/allegro.py index f4bfb67..978af6f 100644 --- a/allegro.py +++ b/allegro.py @@ -8,24 +8,32 @@ from datasets import Dataset from peft import LoraConfig, get_peft_model from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, TrainingArguments, Trainer, DataCollatorForSeq2Seq import weaviate +from weaviate.client import WeaviateClient +from weaviate.connect import ConnectionParams # 1️⃣ Inicjalizacja modelu do embeddingów embed_model = SentenceTransformer("all-MiniLM-L6-v2") # 2️⃣ Połączenie z Weaviate i pobranie dokumentów -client = weaviate.Client( - url="http://weaviate:8080" # Dostosuj URL do swojego środowiska +client = WeaviateClient( + connection_params=ConnectionParams.from_params( + http_host="weaviate", + http_port=8080, + http_secure=False, + grpc_host="weaviate", + grpc_port=50051, + grpc_secure=False, + ) ) collection_name = "Document" # Zakładam, że to jest nazwa Twojej kolekcji -response = ( - client.query - .get(collection_name, ["content"]) +result = ( + client.query.get(collection_name, ["content"]) .with_additional(["id"]) .do() ) -documents = [item['content'] for item in response['data']['Get'][collection_name]] +documents = [item['content'] for item in result['data']['Get'][collection_name]] # 3️⃣ Generowanie embeddingów embeddings = embed_model.encode(documents) @@ -74,7 +82,7 @@ tokenized_eval = eval_dataset.map(tokenize_function, batched=True) # 8️⃣ Parametry treningu training_args = TrainingArguments( output_dir="./results", - evaluation_strategy="steps", + eval_strategy="steps", eval_steps=500, save_strategy="steps", save_steps=500,