import os import pathlib from typing import overload import datasets import json from datasets.info import DatasetInfo _DESCRIPTION = """\ CtkFactsNLI is a NLI version of the Czech CTKFacts dataset """ _CITATION = """\ todo """ class CtkfactsNli(datasets.GeneratorBasedBuilder): def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("int32"), "label": datasets.ClassLabel(names=["SUPPORTS", "REFUTES", "NOT ENOUGH INFO"]), # datasets.features.Sequence({"text": datasets.Value("string"),"answer_start": datasets.Value("int32"),}) "evidence": datasets.Value("string"), "claim": datasets.Value("string"), } ), # No default supervised_keys (as we have to pass both question # and context as input). supervised_keys=None, homepage="https://fcheck.fel.cvut.cz/dataset/", citation=_CITATION, ) def _split_generators(self, dl_managers): dir = pathlib.Path(__file__).parent.resolve() return [ datasets.SplitGenerator(datasets.Split.TRAIN, { "filepath": os.path.join(dir, "data", "train.jsonl") }), datasets.SplitGenerator(datasets.Split.VALIDATION, { "filepath": os.path.join(dir, "data", "validation.jsonl") }), datasets.SplitGenerator(datasets.Split.TEST, { "filepath": os.path.join(dir, "data", "test.jsonl") }), ] def _generate_examples(self, filepath): """This function returns the examples in the raw (text) form.""" key = 0 with open(filepath, encoding="utf-8") as f: for line in f: datapoint = json.loads(line) yield key, { "id": datapoint["id"], "evidence": " ".join(datapoint["evidence"]), "claim": datapoint["claim"], "label": datapoint["label"] } key += 1