SpeechTaxi / SpeechTaxi.py
Lennart Keller
release
6e5383a
raw
history blame contribute delete
No virus
4.24 kB
from pathlib import Path
import datasets
from datasets import (
Features,
SplitGenerator,
)
import pandas as pd
DATA_ARCHIVE = "data.zip"
TABLE_ARCHIVE = "tables.zip"
NAMES = [
"all",
"asm",
"bgc",
"bht",
"ckb",
"ewe",
"guj",
"ibo",
"kan",
"lin",
"luo",
"mal",
"mar",
"nag",
"nde",
"nlx",
"pan",
"peg",
"tam",
"tel",
"tw-akuapem",
"tw-asante",
"ukr",
"urd",
"vie",
"yor",
]
DESCRIPTION = """"""
CITATION = """@misc{keller2024speechtaximultilingualsemanticspeech,
title={SpeechTaxi: On Multilingual Semantic Speech Classification},
author={Lennart Keller and Goran Glavaš},
year={2024},
eprint={2409.06372},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2409.06372},
}"""
HOMPAGE = "https://huggingface.co/datasets/LennartKeller/SpeechTaxi"
class SpeechTaxiConfig(datasets.BuilderConfig):
def __init__(
self, name, description, citation, homepage
):
super().__init__(
name=name,
version=datasets.Version("0.0.1"),
description=description,
)
self.name = name
self.description = description
self.citation = citation
self.homepage = homepage
def get_config(name):
return SpeechTaxiConfig(name=name, description=DESCRIPTION, citation=CITATION, homepage=HOMPAGE)
class SpeechTaxi(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [get_config(name) for name in NAMES]
BUILDER_CONFIG_CLASS = SpeechTaxiConfig
def _info(self):
return datasets.DatasetInfo(
description=DESCRIPTION,
features=Features(
{
"verse_ref": datasets.features.Value("string"),
"text_en": datasets.features.Value("string"),
"language": datasets.features.Value("string"),
"transcription": datasets.features.Value("string"),
"transcription_romanized": datasets.features.Value("string"),
"label": datasets.features.ClassLabel(
names=[
"Recommendation",
"Faith",
"Description",
"Sin",
"Grace",
"Violence",
]
),
"audio": datasets.features.Audio(sampling_rate=16_000),
"transcription_mms-zeroshot-300m": datasets.features.Value("string"),
"transcription_whisper-large-v3": datasets.features.Value("string"),
}
),
supervised_keys=None,
homepage=HOMPAGE,
citation=CITATION,
)
def _split_generators(self, dl_manager):
table_dir = dl_manager.download_and_extract(TABLE_ARCHIVE)
# Make sure data is downloaded
data_dir = dl_manager.download_and_extract(DATA_ARCHIVE)
return [
SplitGenerator(name="train", gen_kwargs={"split": "train", "table_dir": table_dir, "data_dir": data_dir}),
SplitGenerator(name="test", gen_kwargs={"split": "test", "table_dir": table_dir, "data_dir": data_dir}),
SplitGenerator(name="dev", gen_kwargs={"split": "dev", "table_dir": table_dir, "data_dir": data_dir}),
# Add more splits as necessary
]
def _generate_examples(self, split, table_dir, data_dir):
table_dir = Path(table_dir)
data_dir = Path(data_dir)
idx = 0
if self.config.name != "all":
table_files = [table_dir / f"{self.config.name}.tsv"]
else:
table_files = sorted(table_dir.glob("*.tsv"))
for table_file in table_files:
# Load the table
df = pd.read_table(table_file)
df["language"] = table_file.stem
df = df.query("split == @split").reset_index(drop=True)
for _, row in df.iterrows():
row["audio"] = str(data_dir / row["audio"])
yield idx, row.to_dict()
idx += 1