diff --git a/hft.py b/hft.py index 984f23a..dcfdf35 100644 --- a/hft.py +++ b/hft.py @@ -109,7 +109,8 @@ tokenizer = AutoTokenizer.from_pretrained(model_name) model = CustomModel.from_pretrained(model_name) # Przygotowanie datasetu -data = prepare_dataset("files", "file_catalog.json") +catalog_path = "file_catalog.json" +data = prepare_dataset("files", catalog_path) dataset = load_dataset("dict", data=data) tokenized_dataset = dataset.map(tokenize_function, batched=True, remove_columns=dataset["train"].column_names)