15 lines
625 B
Python
15 lines
625 B
Python
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
|
model_path = "./trained_model/gpt"
|
|
model = AutoModelForCausalLM.from_pretrained(model_path)
|
|
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
|
|
|
def generate_response(prompt, max_length=100):
|
|
inputs = tokenizer(prompt, return_tensors="pt")
|
|
outputs = model.generate(inputs.input_ids, max_length=max_length, num_return_sequences=1, do_sample=True)
|
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
return response
|
|
|
|
prompt = "Jakie są prawa pracownika zgodnie z Kodeksem pracy?"
|
|
response = generate_response(prompt)
|
|
print(response) |