ShkalikovOleh commited on
Commit
3e3575b
1 Parent(s): ef1dbcb

Convert dataset to Parquet (#9)

Browse files

- Convert dataset to Parquet (549279667653b12a7377001295615ea6cec3d369)
- Delete loading script (c8c9ca6ed35d48368a8e835afa15626ae6bbfd9e)

Files changed (3) hide show
  1. README.md +38 -8
  2. en/test-00000-of-00001.parquet +3 -0
  3. europarl-ner.py +0 -137
README.md CHANGED
@@ -1,17 +1,47 @@
1
  ---
2
- license: other
3
- license_name: europarl-custom
4
- license_link: https://www.statmt.org/europarl/
5
- task_categories:
6
- - token-classification
7
  language:
8
  - en
9
  - it
10
  - de
11
  - es
 
 
 
 
 
 
 
12
  tags:
13
  - NER
14
  - Europarl
15
- size_categories:
16
- - 1K<n<10K
17
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
 
 
 
 
 
2
  language:
3
  - en
4
  - it
5
  - de
6
  - es
7
+ license: other
8
+ size_categories:
9
+ - 1K<n<10K
10
+ task_categories:
11
+ - token-classification
12
+ license_name: europarl-custom
13
+ license_link: https://www.statmt.org/europarl/
14
  tags:
15
  - NER
16
  - Europarl
17
+ dataset_info:
18
+ config_name: en
19
+ features:
20
+ - name: tokens
21
+ sequence: string
22
+ - name: ner_tags
23
+ sequence:
24
+ class_label:
25
+ names:
26
+ '0': O
27
+ '1': B-PER
28
+ '2': I-PER
29
+ '3': B-ORG
30
+ '4': I-ORG
31
+ '5': B-LOC
32
+ '6': I-LOC
33
+ '7': B-MISC
34
+ '8': I-MISC
35
+ splits:
36
+ - name: test
37
+ num_bytes: 374649
38
+ num_examples: 799
39
+ download_size: 64713
40
+ dataset_size: 374649
41
+ configs:
42
+ - config_name: en
43
+ data_files:
44
+ - split: test
45
+ path: en/test-*
46
+ default: true
47
+ ---
en/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac5dd43f41ca544dfbcd4b1ad643626b8d6973313b2c5bf6655e22798b35f742
3
+ size 64713
europarl-ner.py DELETED
@@ -1,137 +0,0 @@
1
- # coding=utf-8
2
-
3
- """The HF Datasets adapter for Evaluation Corpus for Named Entity Recognition using Europarl"""
4
-
5
- import datasets
6
-
7
- _CITATION = """@inproceedings{agerri-etal-2018-building,
8
- title = "Building Named Entity Recognition Taggers via Parallel Corpora",
9
- author = "Agerri, Rodrigo and
10
- Chung, Yiling and
11
- Aldabe, Itziar and
12
- Aranberri, Nora and
13
- Labaka, Gorka and
14
- Rigau, German",
15
- editor = "Calzolari, Nicoletta and
16
- Choukri, Khalid and
17
- Cieri, Christopher and
18
- Declerck, Thierry and
19
- Goggi, Sara and
20
- Hasida, Koiti and
21
- Isahara, Hitoshi and
22
- Maegaard, Bente and
23
- Mariani, Joseph and
24
- Mazo, H{\'e}l{\`e}ne and
25
- Moreno, Asuncion and
26
- Odijk, Jan and
27
- Piperidis, Stelios and
28
- Tokunaga, Takenobu",
29
- booktitle = "Proceedings of the Eleventh International Conference on Language Resources and Evaluation ({LREC} 2018)",
30
- month = may,
31
- year = "2018",
32
- address = "Miyazaki, Japan",
33
- publisher = "European Language Resources Association (ELRA)",
34
- url = "https://aclanthology.org/L18-1557",
35
- }"""
36
-
37
- _DESCRIPTION = """This dataset contains a gold-standard test set created from the
38
- Europarl corpus. The test set consists of 799 sentences manually annotated using
39
- four entity types and following the CoNLL 2002 and 2003 guidelines for 4 languages:
40
- English, German, Italian and Spanish."""
41
-
42
- _DATA_URLs = {
43
- "en": "https://github.com/ixa-ehu/ner-evaluation-corpus-europarl/raw/master/en-europarl.test.conll02",
44
- "de": "https://github.com/ixa-ehu/ner-evaluation-corpus-europarl/raw/master/de-europarl.test.conll02",
45
- "es": "https://github.com/ixa-ehu/ner-evaluation-corpus-europarl/raw/master/es-europarl.test.conll02",
46
- "it": "https://github.com/ixa-ehu/ner-evaluation-corpus-europarl/raw/master/it-europarl.test.conll02",
47
- }
48
- _HOMEPAGE = "https://github.com/ixa-ehu/ner-evaluation-corpus-europarl"
49
- _VERSION = "1.0.0"
50
- _LANGS = ["en", "de", "es", "it"]
51
-
52
-
53
- class EuroparlNERConfig(datasets.BuilderConfig):
54
- def __init__(self, **kwargs):
55
- super(EuroparlNERConfig, self).__init__(
56
- version=datasets.Version(_VERSION, ""), **kwargs
57
- )
58
-
59
-
60
- class EuroparlNER(datasets.GeneratorBasedBuilder):
61
- """EuroparlNER is a multilingual named entity recognition dataset consisting of
62
- manualy anotated part of the European Parliament Proceedings Parallel Corpus
63
- 1996-2011 with LOC, PER, ORG and MISC tags"""
64
-
65
- VERSION = datasets.Version(_VERSION)
66
- BUILDER_CONFIGS = [
67
- EuroparlNERConfig(
68
- name=lang, description=f"EuroparlNER examples in language {lang}"
69
- )
70
- for lang in _LANGS
71
- ]
72
- DEFAULT_CONFIG_NAME = "en"
73
-
74
- def _info(self):
75
- features = datasets.Features(
76
- {
77
- "tokens": datasets.Sequence(datasets.Value("string")),
78
- "ner_tags": datasets.Sequence(
79
- datasets.features.ClassLabel(
80
- names=[
81
- "O",
82
- "B-PER",
83
- "I-PER",
84
- "B-ORG",
85
- "I-ORG",
86
- "B-LOC",
87
- "I-LOC",
88
- "B-MISC",
89
- "I-MISC",
90
- ]
91
- )
92
- ),
93
- }
94
- )
95
- return datasets.DatasetInfo(
96
- description=_DESCRIPTION,
97
- features=features,
98
- supervised_keys=None,
99
- homepage=_HOMEPAGE,
100
- citation=_CITATION,
101
- )
102
-
103
- def _split_generators(self, dl_manager):
104
- lang = self.config.name
105
- dl_dir = dl_manager.download(_DATA_URLs[lang])
106
-
107
- return [
108
- datasets.SplitGenerator(
109
- name=datasets.Split.TEST,
110
- gen_kwargs={"filepath": dl_dir},
111
- ),
112
- ]
113
-
114
- def _generate_examples(self, filepath):
115
- guid_index = 1
116
- with open(filepath, encoding="utf-8") as f:
117
- tokens = []
118
- ner_tags = []
119
- for line in f:
120
- if line == "" or line == "\n":
121
- if tokens:
122
- yield guid_index, {
123
- "tokens": tokens,
124
- "ner_tags": ner_tags,
125
- }
126
- guid_index += 1
127
- tokens = []
128
- ner_tags = []
129
- else:
130
- # EuroparlNER data is tab separated
131
- splits = line.split("\t")
132
- tokens.append(splits[0])
133
- if len(splits) > 1:
134
- ner_tags.append(splits[1].replace("\n", ""))
135
- else:
136
- # examples have no label in test set
137
- ner_tags.append("O")