Mod allegro
This commit is contained in:
parent
124e904c31
commit
4007d446e3
22
allegro.py
22
allegro.py
|
|
@ -8,24 +8,32 @@ from datasets import Dataset
|
||||||
from peft import LoraConfig, get_peft_model
|
from peft import LoraConfig, get_peft_model
|
||||||
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, TrainingArguments, Trainer, DataCollatorForSeq2Seq
|
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, TrainingArguments, Trainer, DataCollatorForSeq2Seq
|
||||||
import weaviate
|
import weaviate
|
||||||
|
from weaviate.client import WeaviateClient
|
||||||
|
from weaviate.connect import ConnectionParams
|
||||||
|
|
||||||
# 1️⃣ Inicjalizacja modelu do embeddingów
|
# 1️⃣ Inicjalizacja modelu do embeddingów
|
||||||
embed_model = SentenceTransformer("all-MiniLM-L6-v2")
|
embed_model = SentenceTransformer("all-MiniLM-L6-v2")
|
||||||
|
|
||||||
# 2️⃣ Połączenie z Weaviate i pobranie dokumentów
|
# 2️⃣ Połączenie z Weaviate i pobranie dokumentów
|
||||||
client = weaviate.Client(
|
client = WeaviateClient(
|
||||||
url="http://weaviate:8080" # Dostosuj URL do swojego środowiska
|
connection_params=ConnectionParams.from_params(
|
||||||
|
http_host="weaviate",
|
||||||
|
http_port=8080,
|
||||||
|
http_secure=False,
|
||||||
|
grpc_host="weaviate",
|
||||||
|
grpc_port=50051,
|
||||||
|
grpc_secure=False,
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
collection_name = "Document" # Zakładam, że to jest nazwa Twojej kolekcji
|
collection_name = "Document" # Zakładam, że to jest nazwa Twojej kolekcji
|
||||||
response = (
|
result = (
|
||||||
client.query
|
client.query.get(collection_name, ["content"])
|
||||||
.get(collection_name, ["content"])
|
|
||||||
.with_additional(["id"])
|
.with_additional(["id"])
|
||||||
.do()
|
.do()
|
||||||
)
|
)
|
||||||
|
|
||||||
documents = [item['content'] for item in response['data']['Get'][collection_name]]
|
documents = [item['content'] for item in result['data']['Get'][collection_name]]
|
||||||
|
|
||||||
# 3️⃣ Generowanie embeddingów
|
# 3️⃣ Generowanie embeddingów
|
||||||
embeddings = embed_model.encode(documents)
|
embeddings = embed_model.encode(documents)
|
||||||
|
|
@ -74,7 +82,7 @@ tokenized_eval = eval_dataset.map(tokenize_function, batched=True)
|
||||||
# 8️⃣ Parametry treningu
|
# 8️⃣ Parametry treningu
|
||||||
training_args = TrainingArguments(
|
training_args = TrainingArguments(
|
||||||
output_dir="./results",
|
output_dir="./results",
|
||||||
evaluation_strategy="steps",
|
eval_strategy="steps",
|
||||||
eval_steps=500,
|
eval_steps=500,
|
||||||
save_strategy="steps",
|
save_strategy="steps",
|
||||||
save_steps=500,
|
save_steps=500,
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue