diff --git a/hft.py b/hft.py index 756535e..d1e432a 100644 --- a/hft.py +++ b/hft.py @@ -10,6 +10,7 @@ import pytesseract import docx2txt import PyPDF2 import json +from torch.cuda.amp import autocast from collections import defaultdict from huggingface_hub import login @@ -165,6 +166,8 @@ tokenized_dataset = dataset.map(tokenize_function, batched=True, batch_size=32) config = AutoModelForCausalLM.from_pretrained(model_name).config #model = CustomModel.from_pretrained(model_name, config=config) model = CustomModel.from_pretrained(model_name) +model.config.gradient_checkpointing = True +model.config.use_cache = False model.resize_token_embeddings(len(tokenizer)) model.gradient_checkpointing_enable()