ably.do/hft.py

187 lines
6.0 KiB
Python
Raw Normal View History

2025-02-25 04:03:59 -05:00
import os
import torch
2025-02-25 16:17:13 -05:00
import random
2025-02-25 04:03:59 -05:00
import re
2025-02-25 06:21:39 -05:00
import json
2025-02-25 15:23:33 -05:00
import PyPDF2
import docx2txt
import pytesseract
2025-02-25 16:17:13 -05:00
import numpy as np
2025-02-25 15:23:33 -05:00
from PIL import Image
2025-02-25 07:34:04 -05:00
from collections import defaultdict
2025-02-25 16:17:13 -05:00
from multiprocessing import cpu_count
from concurrent.futures import ThreadPoolExecutor
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
TrainingArguments,
Trainer,
DataCollatorForLanguageModeling
)
from datasets import Dataset
2025-02-25 16:45:58 -05:00
from nlpaug.augmenter.word import SynonymAug
2025-02-25 04:45:37 -05:00
from huggingface_hub import login
2025-02-25 15:17:17 -05:00
# Konfiguracja
2025-02-25 07:17:17 -05:00
os.environ["TOKENIZERS_PARALLELISM"] = "false"
2025-02-25 16:22:12 -05:00
login(token="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") # Zastąp swoim tokenem
2025-02-25 11:24:26 -05:00
2025-02-25 07:34:04 -05:00
class SourceMapper:
def __init__(self):
2025-02-25 09:20:55 -05:00
self.source_to_idx = defaultdict(lambda: len(self.source_to_idx))
self.idx_to_source = {}
2025-02-25 07:34:04 -05:00
def add_source(self, source):
if source and source not in self.source_to_idx:
2025-02-25 09:20:55 -05:00
idx = self.source_to_idx[source]
2025-02-25 07:34:04 -05:00
self.idx_to_source[idx] = source
def get_idx(self, source):
2025-02-25 09:20:55 -05:00
return self.source_to_idx[source] if source else -1
2025-02-25 07:34:04 -05:00
def get_source(self, idx):
return self.idx_to_source.get(idx, "Unknown")
2025-02-25 16:21:41 -05:00
class LegalProcessor:
2025-02-25 16:17:13 -05:00
def __init__(self, catalog_path):
self.catalog = self.load_catalog(catalog_path)
2025-02-25 16:45:58 -05:00
self.augmenter = SynonymAug(aug_src='wordnet', aug_max=3)
2025-02-25 15:30:01 -05:00
2025-02-25 16:17:13 -05:00
def load_catalog(self, path):
try:
with open(path, 'r', encoding='utf-8') as f:
return json.load(f)
except:
return defaultdict(str)
2025-02-25 16:21:41 -05:00
def process_file(self, file_path):
text = self.extract_text(file_path)
if not text:
return []
doc_type = self.identify_doc_type(file_path)
return self.split_content(text, doc_type)
2025-02-25 16:17:13 -05:00
def extract_text(self, file_path):
ext = os.path.splitext(file_path)[1].lower()
try:
if ext == '.pdf':
2025-02-25 16:21:41 -05:00
return self.extract_pdf(file_path)
2025-02-25 16:17:13 -05:00
elif ext in ['.doc', '.docx']:
return docx2txt.process(file_path)
elif ext in ['.jpg', '.jpeg', '.png']:
2025-02-25 16:21:41 -05:00
return self.extract_image(file_path)
2025-02-25 16:17:13 -05:00
else:
with open(file_path, 'r', encoding='utf-8') as f:
return f.read()
except Exception as e:
print(f"Błąd przetwarzania {file_path}: {str(e)}")
2025-02-25 15:30:01 -05:00
return ""
2025-02-25 07:34:04 -05:00
2025-02-25 16:21:41 -05:00
def extract_pdf(self, path):
2025-02-25 16:17:13 -05:00
text = ""
with open(path, 'rb') as f:
reader = PyPDF2.PdfReader(f)
for page in reader.pages:
text += page.extract_text() + "\n"
return re.sub(r'\s+', ' ', text)
2025-02-25 15:30:01 -05:00
2025-02-25 16:21:41 -05:00
def extract_image(self, path):
2025-02-25 16:17:13 -05:00
return pytesseract.image_to_string(
Image.open(path),
config='--psm 4 --oem 3 -c preserve_interword_spaces=1'
)
2025-02-25 16:21:41 -05:00
def identify_doc_type(self, file_path):
base = os.path.splitext(os.path.basename(file_path))[0].lower()
return self.catalog.get(base, "Custom")
def split_content(self, text, doc_type):
if doc_type == "Custom":
return self.split_custom(text)
return self.split_legal(text, doc_type)
def split_legal(self, text, doc_type):
pattern = r'(?i)(Art[\.\s]*\d+[a-z]*|§\s*\d+|Rozdział\s+[IVXLCDM]+)'
parts = re.split(pattern, text)
results = []
2025-02-25 16:17:13 -05:00
current_header = ""
2025-02-25 15:30:01 -05:00
2025-02-25 16:21:41 -05:00
for part in parts:
if not part:
continue
if re.match(pattern, part):
2025-02-25 16:17:13 -05:00
if current_header:
2025-02-25 16:21:41 -05:00
results.append(current_header)
current_header = f"[{doc_type}] {part.strip()}"
2025-02-25 16:17:13 -05:00
else:
2025-02-25 16:21:41 -05:00
if current_header:
results.append(f"{current_header}: {part.strip()}")
current_header = ""
else:
results.append(part.strip())
2025-02-25 07:34:04 -05:00
2025-02-25 16:21:41 -05:00
return [text for text in results if len(text) > 50]
2025-02-25 14:09:36 -05:00
2025-02-25 16:21:41 -05:00
def split_custom(self, text):
2025-02-25 16:17:13 -05:00
clean_text = re.sub(r'\s+', ' ', text).strip()
chunk_size = 384
2025-02-25 16:21:41 -05:00
overlap = 64
chunks = []
start = 0
while start < len(clean_text):
end = start + chunk_size
chunks.append(clean_text[start:end])
start = end - overlap
2025-02-25 16:17:13 -05:00
2025-02-25 16:21:41 -05:00
return [f"[Custom] {chunk}" for chunk in chunks if chunk.strip()]
2025-02-25 04:03:59 -05:00
2025-02-25 14:38:44 -05:00
def main():
2025-02-25 16:21:41 -05:00
# Inicjalizacja komponentów
2025-02-25 14:38:44 -05:00
source_mapper = SourceMapper()
2025-02-25 16:21:41 -05:00
processor = LegalProcessor("file_catalog.json")
2025-02-25 16:17:13 -05:00
tokenizer = AutoTokenizer.from_pretrained("crumb/nano-mistral")
2025-02-25 14:38:44 -05:00
tokenizer.pad_token = tokenizer.eos_token
2025-02-25 16:17:13 -05:00
2025-02-25 16:21:41 -05:00
# Przetwarzanie danych
2025-02-25 16:17:13 -05:00
data = []
2025-02-25 15:30:01 -05:00
2025-02-25 16:21:41 -05:00
def process_and_augment(file_path):
try:
items = processor.process_file(file_path)
for text in items:
source = text.split("]")[0][1:]
source_mapper.add_source(source)
# Oryginalny tekst
data.append({
"text": text,
"source_idx": source_mapper.get_idx(source)
})
2025-02-25 16:45:58 -05:00
# Augmentacja
augmented = processor.augmenter.augment(text)
if augmented != text:
data.append({
"text": augmented,
"source_idx": source_mapper.get_idx(source)
})
2025-02-25 16:21:41 -05:00
except Exception as e:
print(f"Błąd przetwarzania {file_path}: {str(e)}")
2025-02-25 16:17:13 -05:00
# Przetwarzanie wielowątkowe
with ThreadPoolExecutor(max_workers=cpu_count()) as executor:
futures = []
2025-02-25 16:45:58 -05:00
for root, _, files in os.walk("files"): # Zmieniono na "files"
2025-02-25 16:17:13 -05:00
for file in files:
2025-02-25 16:21:41 -05:00
file_path = os.path.join(root, file)
futures.append(executor.submit(process_and_augment, file_path))
2025-02-25 16:17:13 -05:00
for future in futures:
2025-02-25 16:21:41 -05:00
future.result()
2025-02-25 16:45:58 -05:00
# Reszta kodu pozostaje bez zmian...
2025-02-25 14:38:44 -05:00
if __name__ == "__main__":
main()