ctkfacts_nli / ctkfacts_nli.py
herbievore's picture
0.0.6 Citation and README update
5843075
raw
history blame contribute delete
No virus
3.09 kB
import os
import pathlib
from typing import overload
import datasets
import json
from datasets.info import DatasetInfo
_VERSION = "0.0.6"
_URL= "data/"
_URLS = {
"train": _URL + "train.jsonl",
"validation": _URL + "validation.jsonl",
"test": _URL + "test.jsonl"
}
_DESCRIPTION = """\
CtkFactsNLI is a NLI version of the Czech CTKFacts dataset
"""
_CITATION = """\
@article{DBLP:journals/corr/abs-2201-11115,
author = {Jan Drchal and
Herbert Ullrich and
Martin R{\'{y}}par and
Hana Vincourov{\'{a}} and
V{\'{a}}clav Moravec},
title = {CsFEVER and CTKFacts: Czech Datasets for Fact Verification},
journal = {CoRR},
volume = {abs/2201.11115},
year = {2022},
url = {https://arxiv.org/abs/2201.11115},
eprinttype = {arXiv},
eprint = {2201.11115},
timestamp = {Tue, 01 Feb 2022 14:59:01 +0100},
biburl = {https://dblp.org/rec/journals/corr/abs-2201-11115.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
datasets.utils.version.Version
class CtkfactsNli(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("int32"),
"label": datasets.ClassLabel(names=["REFUTES", "NOT ENOUGH INFO", "SUPPORTS"]),
# datasets.features.Sequence({"text": datasets.Value("string"),"answer_start": datasets.Value("int32"),})
"evidence": datasets.Value("string"),
"claim": datasets.Value("string"),
}
),
# No default supervised_keys (as we have to pass both question
# and context as input).
supervised_keys=None,
version=_VERSION,
homepage="https://fcheck.fel.cvut.cz/dataset/",
citation=_CITATION,
)
def _split_generators(self, dl_manager: datasets.DownloadManager):
downloaded_files = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, {
"filepath": downloaded_files["train"]
}),
datasets.SplitGenerator(datasets.Split.VALIDATION, {
"filepath": downloaded_files["validation"]
}),
datasets.SplitGenerator(datasets.Split.TEST, {
"filepath": downloaded_files["test"]
}),
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
key = 0
with open(filepath, encoding="utf-8") as f:
for line in f:
datapoint = json.loads(line)
yield key, {
"id": datapoint["id"],
"evidence": " ".join(datapoint["evidence"]),
"claim": datapoint["claim"],
"label": datapoint["label"]
}
key += 1