from transformers import AutoModelForCausalLM, AutoTokenizer model_path = "./trained_model/gpt" model = AutoModelForCausalLM.from_pretrained(model_path) tokenizer = AutoTokenizer.from_pretrained(model_path) tokenizer.pad_token = tokenizer.eos_token model.config.pad_token_id = tokenizer.eos_token_id def generate_response(prompt, max_length=100): inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True) outputs = model.generate( inputs.input_ids, attention_mask=inputs.attention_mask, pad_token_id=tokenizer.pad_token_id, max_length=100 ) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response prompt = "Zacytuj art. 154 kodeksu pracy" response = generate_response(prompt) print(response)