diff --git a/allegro.py b/allegro.py index 978af6f..f4bfb67 100644 --- a/allegro.py +++ b/allegro.py @@ -8,32 +8,24 @@ from datasets import Dataset from peft import LoraConfig, get_peft_model from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, TrainingArguments, Trainer, DataCollatorForSeq2Seq import weaviate -from weaviate.client import WeaviateClient -from weaviate.connect import ConnectionParams # 1️⃣ Inicjalizacja modelu do embeddingów embed_model = SentenceTransformer("all-MiniLM-L6-v2") # 2️⃣ Połączenie z Weaviate i pobranie dokumentów -client = WeaviateClient( - connection_params=ConnectionParams.from_params( - http_host="weaviate", - http_port=8080, - http_secure=False, - grpc_host="weaviate", - grpc_port=50051, - grpc_secure=False, - ) +client = weaviate.Client( + url="http://weaviate:8080" # Dostosuj URL do swojego środowiska ) collection_name = "Document" # Zakładam, że to jest nazwa Twojej kolekcji -result = ( - client.query.get(collection_name, ["content"]) +response = ( + client.query + .get(collection_name, ["content"]) .with_additional(["id"]) .do() ) -documents = [item['content'] for item in result['data']['Get'][collection_name]] +documents = [item['content'] for item in response['data']['Get'][collection_name]] # 3️⃣ Generowanie embeddingów embeddings = embed_model.encode(documents) @@ -82,7 +74,7 @@ tokenized_eval = eval_dataset.map(tokenize_function, batched=True) # 8️⃣ Parametry treningu training_args = TrainingArguments( output_dir="./results", - eval_strategy="steps", + evaluation_strategy="steps", eval_steps=500, save_strategy="steps", save_steps=500,