Upload alorese.py with huggingface_hub
Browse files- alorese.py +259 -0
alorese.py
ADDED
@@ -0,0 +1,259 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
"""
|
17 |
+
Alorese Corpus is a collection of language data in a couple of Alorese variation (Alor and Pantar Alorese). The collection is available in video, audio, and text formats with genres
|
18 |
+
ranging from Experiment or task, Stimuli, Discourse, and Written materials.
|
19 |
+
"""
|
20 |
+
import xml.etree.ElementTree as ET
|
21 |
+
from typing import Dict, List, Tuple
|
22 |
+
|
23 |
+
import datasets
|
24 |
+
import pandas as pd
|
25 |
+
|
26 |
+
from seacrowd.sea_datasets.alorese.alorese_url import _URLS_DICT
|
27 |
+
from seacrowd.utils import schemas
|
28 |
+
from seacrowd.utils.configs import SEACrowdConfig
|
29 |
+
from seacrowd.utils.constants import Licenses, Tasks
|
30 |
+
|
31 |
+
_CITATION = """\
|
32 |
+
@article{Moro2018-ms,
|
33 |
+
title = "The plural word hire in alorese: Contact-induced change from
|
34 |
+
neighboring Alor-pantar languages",
|
35 |
+
author = "Moro, Francesca R",
|
36 |
+
journal = "Oceanic Linguistics",
|
37 |
+
publisher = "University of Hawai'i Press",
|
38 |
+
volume = 57,
|
39 |
+
number = 1,
|
40 |
+
pages = "177--198",
|
41 |
+
year = 2018,
|
42 |
+
language = "en"
|
43 |
+
}
|
44 |
+
"""
|
45 |
+
|
46 |
+
_DATASETNAME = "alorese"
|
47 |
+
_DESCRIPTION = """\
|
48 |
+
Alorese Corpus is a collection of language data in a couple of Alorese variation (Alor and Pantar Alorese). The collection is available in video, audio, and text formats with genres
|
49 |
+
ranging from Experiment or task, Stimuli, Discourse, and Written materials.
|
50 |
+
"""
|
51 |
+
_HOMEPAGE = "https://hdl.handle.net/1839/e10d7de5-0a6d-4926-967b-0a8cc6d21fb1"
|
52 |
+
_LANGUAGES = ["aol", "ind"]
|
53 |
+
_LICENSE = Licenses.UNKNOWN.value
|
54 |
+
_LOCAL = False
|
55 |
+
|
56 |
+
_URLS = _URLS_DICT
|
57 |
+
|
58 |
+
_SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION, Tasks.MACHINE_TRANSLATION]
|
59 |
+
|
60 |
+
_SOURCE_VERSION = "1.0.0"
|
61 |
+
_SEACROWD_VERSION = "2024.06.20"
|
62 |
+
|
63 |
+
|
64 |
+
class AloreseDataset(datasets.GeneratorBasedBuilder):
|
65 |
+
"""Alorese Corpus is a collection of language data in a couple of Alorese variation (Alor and Pantar Alorese). The collection is available in video, audio, and text formats with genres ranging
|
66 |
+
from Experiment or task, Stimuli, Discourse, and Written materials."""
|
67 |
+
|
68 |
+
BUILDER_CONFIGS = [
|
69 |
+
SEACrowdConfig(
|
70 |
+
name=f"{_DATASETNAME}_source",
|
71 |
+
version=datasets.Version(_SOURCE_VERSION),
|
72 |
+
description=f"{_DATASETNAME} source schema",
|
73 |
+
schema="source",
|
74 |
+
subset_id=f"{_DATASETNAME}"
|
75 |
+
),
|
76 |
+
SEACrowdConfig(
|
77 |
+
name=f"{_DATASETNAME}_seacrowd_t2t",
|
78 |
+
version=datasets.Version(_SEACROWD_VERSION),
|
79 |
+
description=f"{_DATASETNAME} SEACrowd for text2text schema",
|
80 |
+
schema="seacrowd_t2t",
|
81 |
+
subset_id=f"{_DATASETNAME}",
|
82 |
+
),
|
83 |
+
SEACrowdConfig(
|
84 |
+
name=f"{_DATASETNAME}_seacrowd_sptext",
|
85 |
+
version=datasets.Version(_SEACROWD_VERSION),
|
86 |
+
description=f"{_DATASETNAME} SEACrowd for sptext schema",
|
87 |
+
schema="seacrowd_sptext",
|
88 |
+
subset_id=f"{_DATASETNAME}",
|
89 |
+
),
|
90 |
+
]
|
91 |
+
|
92 |
+
DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
|
93 |
+
|
94 |
+
def _info(self) -> datasets.DatasetInfo:
|
95 |
+
|
96 |
+
if self.config.schema == "source":
|
97 |
+
features = datasets.Features(
|
98 |
+
{
|
99 |
+
"nr": datasets.Value("int64"),
|
100 |
+
"media_id": datasets.Value("string"),
|
101 |
+
"speaker_id": datasets.Value("string"),
|
102 |
+
"audio": datasets.Audio(sampling_rate=16000),
|
103 |
+
"annotation_aol": datasets.Value("string"),
|
104 |
+
"annotation_ind": datasets.Value("string"),
|
105 |
+
"begin_time": datasets.Value("int64"),
|
106 |
+
"end_time": datasets.Value("int64"),
|
107 |
+
}
|
108 |
+
)
|
109 |
+
|
110 |
+
elif self.config.schema == "seacrowd_sptext":
|
111 |
+
features = schemas.speech_text_features
|
112 |
+
|
113 |
+
elif self.config.schema == "seacrowd_t2t":
|
114 |
+
features = schemas.text2text_features
|
115 |
+
|
116 |
+
else:
|
117 |
+
raise ValueError(f"Invalid config schema: {self.config.schema}")
|
118 |
+
|
119 |
+
return datasets.DatasetInfo(
|
120 |
+
description=_DESCRIPTION,
|
121 |
+
features=features,
|
122 |
+
homepage=_HOMEPAGE,
|
123 |
+
license=_LICENSE,
|
124 |
+
citation=_CITATION,
|
125 |
+
)
|
126 |
+
|
127 |
+
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
128 |
+
|
129 |
+
if self.config.schema == "seacrowd_t2t":
|
130 |
+
filepath = {k: v["text_path"] for k, v in _URLS.items()}
|
131 |
+
paths = dl_manager.download(filepath)
|
132 |
+
else:
|
133 |
+
paths = dl_manager.download(_URLS)
|
134 |
+
|
135 |
+
return [
|
136 |
+
datasets.SplitGenerator(
|
137 |
+
name=datasets.Split.TRAIN,
|
138 |
+
gen_kwargs={
|
139 |
+
"filepath": paths,
|
140 |
+
},
|
141 |
+
),
|
142 |
+
]
|
143 |
+
|
144 |
+
def _generate_examples(self, filepath) -> Tuple[int, Dict]:
|
145 |
+
|
146 |
+
if self.config.schema == "source":
|
147 |
+
source_df = self._get_source_df(filepath)
|
148 |
+
|
149 |
+
for k, row in source_df.iterrows():
|
150 |
+
yield k, {
|
151 |
+
"nr": k + 1,
|
152 |
+
"media_id": row["media_id"],
|
153 |
+
"speaker_id": row["speaker_id"],
|
154 |
+
"audio": row["audio_path"],
|
155 |
+
"annotation_aol": row["annotation_aol"],
|
156 |
+
"annotation_ind": row["annotation_ind"],
|
157 |
+
"begin_time": row["begin_time"],
|
158 |
+
"end_time": row["end_time"],
|
159 |
+
}
|
160 |
+
|
161 |
+
elif self.config.schema == "seacrowd_t2t":
|
162 |
+
caption_df = self._merge_text_dfs(filepath)
|
163 |
+
|
164 |
+
for k, row in caption_df.iterrows():
|
165 |
+
yield k, {
|
166 |
+
"id": k + 1,
|
167 |
+
"text_1": row["annotation_aol"],
|
168 |
+
"text_2": row["annotation_ind"],
|
169 |
+
"text_1_name": _LANGUAGES[0],
|
170 |
+
"text_2_name": _LANGUAGES[1],
|
171 |
+
}
|
172 |
+
|
173 |
+
elif self.config.schema == "seacrowd_sptext":
|
174 |
+
sptext_df = self._get_sptext_df(filepath)
|
175 |
+
|
176 |
+
for k, row in sptext_df.iterrows():
|
177 |
+
yield k, {
|
178 |
+
"id": k + 1,
|
179 |
+
"path": row["audio_path"],
|
180 |
+
"audio": row["audio_path"],
|
181 |
+
"text": row["annotation_aol"],
|
182 |
+
"speaker_id": row["speaker_id"],
|
183 |
+
"metadata": {
|
184 |
+
"speaker_age": None,
|
185 |
+
"speaker_gender": None
|
186 |
+
}}
|
187 |
+
|
188 |
+
def _get_time_df(self, xml_tree) -> pd.DataFrame:
|
189 |
+
time_slot_values = [(time_slot.attrib["TIME_SLOT_ID"], int(time_slot.attrib["TIME_VALUE"])) for time_slot in xml_tree.iter(tag="TIME_SLOT")]
|
190 |
+
|
191 |
+
return pd.DataFrame({"time_slot_id": [v[0] for v in time_slot_values], "time_value": [v[1] for v in time_slot_values]})
|
192 |
+
|
193 |
+
def _get_aol_annotations(self, xml_tree) -> pd.DataFrame:
|
194 |
+
aol_annotations = [(annotation.attrib["ANNOTATION_ID"], annotation.attrib["TIME_SLOT_REF1"], annotation.attrib["TIME_SLOT_REF2"], annotation.find("ANNOTATION_VALUE").text) for annotation in xml_tree.iter(tag="ALIGNABLE_ANNOTATION")]
|
195 |
+
|
196 |
+
return pd.DataFrame({"annotation_id": [v[0] for v in aol_annotations], "time_slot_ref1": [v[1] for v in aol_annotations], "time_slot_ref2": [v[2] for v in aol_annotations], "annotation_value": [v[3] for v in aol_annotations]})
|
197 |
+
|
198 |
+
def _get_ind_annotations(self, xml_tree) -> pd.DataFrame:
|
199 |
+
ind_annotations = [(annotation.attrib["ANNOTATION_ID"], annotation.attrib["ANNOTATION_REF"], annotation.find("ANNOTATION_VALUE").text) for annotation in xml_tree.iter(tag="REF_ANNOTATION")]
|
200 |
+
|
201 |
+
return pd.DataFrame({"annotation_id": [v[0] for v in ind_annotations], "annotation_ref_id": [v[1] for v in ind_annotations], "annotation_value": [v[2] for v in ind_annotations]})
|
202 |
+
|
203 |
+
def _get_text_df(self, xml_tree) -> pd.DataFrame:
|
204 |
+
time_df = self._get_time_df(xml_tree)
|
205 |
+
aol_df = self._get_aol_annotations(xml_tree)
|
206 |
+
ind_df = self._get_ind_annotations(xml_tree)
|
207 |
+
|
208 |
+
df1 = aol_df.merge(time_df, left_on="time_slot_ref1", right_on="time_slot_id", how="left").rename(columns={"time_value": "begin_time", "annotation_value": "annotation_aol"}).drop(columns=["time_slot_ref1", "time_slot_id"])
|
209 |
+
df2 = df1.merge(time_df, left_on="time_slot_ref2", right_on="time_slot_id", how="left").rename(columns={"time_value": "end_time"}).drop(columns=["time_slot_ref2", "time_slot_id"])
|
210 |
+
final_df = df2.merge(ind_df, left_on="annotation_id", right_on="annotation_ref_id", how="left").rename(columns={"annotation_value": "annotation_ind"}).drop(columns=["annotation_ref_id", "annotation_id_y", "annotation_id_x"])
|
211 |
+
|
212 |
+
return final_df[["annotation_aol", "annotation_ind", "begin_time", "end_time"]]
|
213 |
+
|
214 |
+
def _merge_text_dfs(self, xml_dict) -> pd.DataFrame:
|
215 |
+
final_df = pd.DataFrame()
|
216 |
+
len_tracker = []
|
217 |
+
media_ids = []
|
218 |
+
|
219 |
+
xml_trees = [ET.parse(xml_path) for xml_path in xml_dict.values()]
|
220 |
+
for xml_tree in xml_trees:
|
221 |
+
cur_df = self._get_text_df(xml_tree)
|
222 |
+
final_df = pd.concat([final_df, cur_df], axis=0)
|
223 |
+
len_tracker.append(len(cur_df))
|
224 |
+
|
225 |
+
media_id_list = list(xml_dict.keys())
|
226 |
+
for i in range(len(len_tracker)):
|
227 |
+
media_ids.extend([media_id_list[i]] * len_tracker[i])
|
228 |
+
|
229 |
+
final_df["media_id"] = media_ids
|
230 |
+
|
231 |
+
return final_df.reset_index()
|
232 |
+
|
233 |
+
def _groupby_caption_by_media_ids(self, caption_df: pd.DataFrame) -> pd.DataFrame:
|
234 |
+
caption_df = (
|
235 |
+
caption_df.groupby("media_id")
|
236 |
+
.agg({"annotation_aol": lambda x: " ".join([str(value) if value is not None else "<NONE>" for value in x]), "annotation_ind": lambda x: " ".join([str(value) if value is not None else "<NONE>" for value in x])})
|
237 |
+
.reset_index()
|
238 |
+
)
|
239 |
+
return caption_df
|
240 |
+
|
241 |
+
def _get_sptext_df(self, complete_dict) -> pd.DataFrame:
|
242 |
+
xml_dict = {k: v["text_path"] for k, v in complete_dict.items()}
|
243 |
+
|
244 |
+
audio_df = pd.DataFrame({"media_id": [k for k in complete_dict.keys()], "speaker_id": [k.split("_")[-1] for k in complete_dict.keys()], "audio_path": [v["audio_path"] for v in complete_dict.values()]})
|
245 |
+
caption_df = self._groupby_caption_by_media_ids(self._merge_text_dfs(xml_dict))
|
246 |
+
|
247 |
+
df = caption_df.merge(audio_df, on="media_id", how="inner")
|
248 |
+
|
249 |
+
return df[["media_id", "speaker_id", "audio_path", "annotation_aol", "annotation_ind"]]
|
250 |
+
|
251 |
+
def _get_source_df(self, complete_dict) -> pd.DataFrame:
|
252 |
+
xml_dict = {k: v["text_path"] for k, v in complete_dict.items()}
|
253 |
+
|
254 |
+
audio_df = pd.DataFrame({"media_id": [k for k in complete_dict.keys()], "speaker_id": [k.split("_")[-1] for k in complete_dict.keys()], "audio_path": [v["audio_path"] for v in complete_dict.values()]})
|
255 |
+
text_df = self._merge_text_dfs(xml_dict)
|
256 |
+
|
257 |
+
df = text_df.merge(audio_df, on="media_id", how="inner")
|
258 |
+
|
259 |
+
return df[["media_id", "speaker_id", "audio_path", "annotation_aol", "annotation_ind", "begin_time", "end_time"]]
|