ably.do/herbert.py

98 lines
3.2 KiB
Python
Raw Normal View History

2025-02-28 18:23:47 -05:00
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
import torch
import faiss
import numpy as np
from sentence_transformers import SentenceTransformer
from datasets import Dataset
2025-03-01 05:32:47 -05:00
from peft import LoraConfig, get_peft_model, PeftModel
from transformers import (AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer,
DataCollatorForLanguageModeling, LlamaTokenizer, LlamaForCausalLM)
import bitsandbytes as bnb
2025-02-28 18:23:47 -05:00
# 1⃣ Inicjalizacja modelu do embeddingów
embed_model = SentenceTransformer("all-MiniLM-L6-v2")
2025-03-01 05:32:47 -05:00
# 2⃣ Wczytanie dokumentów i embeddingów
2025-02-28 18:23:47 -05:00
def read_documents_from_file(file_path):
with open(file_path, 'r', encoding='utf-8') as file:
content = file.read()
articles = content.split('\n\n')
2025-03-01 05:32:47 -05:00
return [article.strip() for article in articles if article.strip().startswith('Art.')]
2025-02-28 18:23:47 -05:00
file_path = './docs/kodekspracy.txt' # Zmień na właściwą ścieżkę
documents = read_documents_from_file(file_path)
embeddings = embed_model.encode(documents)
# 3⃣ Inicjalizacja FAISS i dodanie wektorów
dim = embeddings.shape[1]
index = faiss.IndexFlatL2(dim)
index.add(np.array(embeddings, dtype=np.float32))
# 4⃣ Przygotowanie danych treningowych
def create_training_data():
2025-03-01 05:32:47 -05:00
return Dataset.from_dict({"text": documents, "embedding": embeddings.tolist()})
2025-02-28 18:23:47 -05:00
dataset = create_training_data()
split_dataset = dataset.train_test_split(test_size=0.25)
2025-03-01 05:32:47 -05:00
train_dataset, eval_dataset = split_dataset["train"], split_dataset["test"]
# 5⃣ Ładowanie modelu bazowego i fine-tunowanego
base_model = "decapoda-research/llama-7b-hf"
finetuned_model = "mmosiolek/polpaca-lora-7b"
tokenizer = LlamaTokenizer.from_pretrained(base_model)
tokenizer.pad_token_id = 0
tokenizer.padding_side = "left"
2025-02-28 18:23:47 -05:00
2025-03-01 05:32:47 -05:00
model = LlamaForCausalLM.from_pretrained(base_model, torch_dtype=torch.float16).to("cuda")
model = PeftModel.from_pretrained(model, finetuned_model).to("cuda")
2025-02-28 18:23:47 -05:00
# 6⃣ Konfiguracja LoRA
lora_config = LoraConfig(
2025-03-01 05:32:47 -05:00
r=8, lora_alpha=32, lora_dropout=0.1, bias="none", task_type="CAUSAL_LM")
2025-02-28 18:23:47 -05:00
model = get_peft_model(model, lora_config)
2025-03-01 05:32:47 -05:00
# 7⃣ Tokenizacja
2025-02-28 18:23:47 -05:00
def tokenize_function(examples):
2025-03-01 05:32:47 -05:00
return tokenizer(examples["text"], padding="max_length", truncation=True, max_length=384)
2025-02-28 18:23:47 -05:00
tokenized_train = train_dataset.map(tokenize_function, batched=True)
tokenized_eval = eval_dataset.map(tokenize_function, batched=True)
# 8⃣ Parametry treningu
training_args = TrainingArguments(
output_dir="./results",
2025-03-01 05:32:47 -05:00
evaluation_strategy="steps",
eval_steps=500,
save_strategy="steps",
save_steps=500,
2025-02-28 18:23:47 -05:00
learning_rate=1e-5,
per_device_train_batch_size=2,
per_device_eval_batch_size=2,
num_train_epochs=16,
weight_decay=0.01,
2025-03-01 05:32:47 -05:00
load_best_model_at_end=True,
metric_for_best_model="loss",
greater_is_better=False,
2025-02-28 18:23:47 -05:00
)
# 9⃣ Data Collator
2025-03-01 05:32:47 -05:00
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
2025-02-28 18:23:47 -05:00
2025-03-01 05:32:47 -05:00
# 🔟 Trening
2025-02-28 18:23:47 -05:00
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_train,
2025-03-01 05:32:47 -05:00
eval_dataset=tokenized_eval,
2025-02-28 18:23:47 -05:00
data_collator=data_collator,
)
trainer.train()
2025-03-01 05:32:47 -05:00
# 1⃣1⃣ Zapis modelu lokalnie
model.save_pretrained("./models/finetuned_llama")
tokenizer.save_pretrained("./models/finetuned_llama")
2025-02-28 18:23:47 -05:00
print("✅ Model został wytrenowany i zapisany!")