2025-02-26 03:49:28 -05:00
|
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
|
|
|
|
|
|
|
model_path = "./trained_model/gpt"
|
|
|
|
|
model = AutoModelForCausalLM.from_pretrained(model_path)
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
2025-02-26 04:18:49 -05:00
|
|
|
tokenizer.pad_token = tokenizer.eos_token
|
|
|
|
|
model.config.pad_token_id = tokenizer.eos_token_id
|
2025-02-26 03:49:28 -05:00
|
|
|
|
2025-02-26 04:22:13 -05:00
|
|
|
def generate_response(prompt, max_length=1000):
|
2025-02-26 04:18:49 -05:00
|
|
|
inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
|
|
|
|
|
outputs = model.generate(
|
|
|
|
|
inputs.input_ids,
|
|
|
|
|
attention_mask=inputs.attention_mask,
|
|
|
|
|
pad_token_id=tokenizer.pad_token_id,
|
|
|
|
|
max_length=100
|
|
|
|
|
)
|
2025-02-26 03:49:28 -05:00
|
|
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
|
|
return response
|
|
|
|
|
|
2025-02-26 04:32:08 -05:00
|
|
|
prompt = "Zacytuj paragraf pierwszy artykułu 154 Kodeksu pracy."
|
2025-02-26 03:49:28 -05:00
|
|
|
response = generate_response(prompt)
|
|
|
|
|
print(response)
|