fix: loading script
Browse files- .gitignore +2 -0
- asr_dataset.py +0 -149
- audio_dataset.py +113 -0
.gitignore
CHANGED
@@ -4,3 +4,5 @@ debug_dataset.py
|
|
4 |
test_loading_script.py
|
5 |
asr_dataset-backup.py
|
6 |
**/__pycache__
|
|
|
|
|
|
4 |
test_loading_script.py
|
5 |
asr_dataset-backup.py
|
6 |
**/__pycache__
|
7 |
+
-asr_dataset.py
|
8 |
+
*.ipynb
|
asr_dataset.py
DELETED
@@ -1,149 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import tarfile
|
3 |
-
import pandas as pd
|
4 |
-
import datasets
|
5 |
-
from datasets import Audio, Value, Features
|
6 |
-
import logging
|
7 |
-
from typing import Dict, Generator, Tuple
|
8 |
-
|
9 |
-
logger = logging.getLogger(__name__)
|
10 |
-
|
11 |
-
|
12 |
-
_DESCRIPTION = """
|
13 |
-
This dataset consists of various Youtube videos in Persian language.
|
14 |
-
|
15 |
-
Note: This dataset contains raw, unvalidated transcriptions. Users are advised to:
|
16 |
-
1. Perform their own quality assessment
|
17 |
-
2. Create their own train/validation/test splits based on their specific needs
|
18 |
-
3. Validate a subset of the data if needed for their use case
|
19 |
-
"""
|
20 |
-
|
21 |
-
_CITATION = """
|
22 |
-
Use this repo info/link for citation.
|
23 |
-
"""
|
24 |
-
|
25 |
-
_LICENSE = "MIT"
|
26 |
-
|
27 |
-
_DATA_URL = [
|
28 |
-
"clips/clips_001.tar",
|
29 |
-
"clips/clips_002.tar",
|
30 |
-
"clips/clips_003.tar",
|
31 |
-
"clips/clips_004.tar",
|
32 |
-
"clips/clips_005.tar",
|
33 |
-
"clips/clips_006.tar",
|
34 |
-
"clips/clips_007.tar",
|
35 |
-
"clips/clips_008.tar",
|
36 |
-
"clips/clips_009.tar",
|
37 |
-
"clips/clips_010.tar",
|
38 |
-
"clips/clips_011.tar",
|
39 |
-
"clips/clips_012.tar",
|
40 |
-
"clips/clips_013.tar",
|
41 |
-
"clips/clips_014.tar",
|
42 |
-
"clips/clips_015.tar",
|
43 |
-
"clips/clips_016.tar",
|
44 |
-
"clips/clips_017.tar",
|
45 |
-
"clips/clips_018.tar",
|
46 |
-
"clips/clips_019.tar",
|
47 |
-
"clips/clips_020.tar",
|
48 |
-
"clips/clips_021.tar",
|
49 |
-
]
|
50 |
-
#_DATA_URL = ["ytDataset/" + x for x in _DATA_URL][:2]
|
51 |
-
|
52 |
-
_PROMPTS_URLS = {
|
53 |
-
#"train": "ytDataset/clips/metadata.csv"
|
54 |
-
"train": "clips/metadata.csv"
|
55 |
-
}
|
56 |
-
|
57 |
-
class ASRDataset(datasets.GeneratorBasedBuilder):
|
58 |
-
"""ASR dataset with audio files stored in tar archives."""
|
59 |
-
|
60 |
-
VERSION = datasets.Version("1.0.0")
|
61 |
-
|
62 |
-
def _info(self):
|
63 |
-
return datasets.DatasetInfo(
|
64 |
-
description=_DESCRIPTION,
|
65 |
-
features=datasets.Features({
|
66 |
-
"file_name": datasets.Value("string"),
|
67 |
-
"audio": datasets.Audio(sampling_rate=16000),
|
68 |
-
"sentence": datasets.Value("string"),
|
69 |
-
#"tar_file": datasets.Value("string"),
|
70 |
-
}),
|
71 |
-
supervised_keys=None,
|
72 |
-
citation=_CITATION,
|
73 |
-
)
|
74 |
-
|
75 |
-
def _split_generators(self, dl_manager):
|
76 |
-
"""Returns SplitGenerators with added error handling."""
|
77 |
-
|
78 |
-
prompts_paths = dl_manager.download_and_extract(_PROMPTS_URLS)
|
79 |
-
archive = dl_manager.download(_DATA_URL)
|
80 |
-
train_dir = "clips"
|
81 |
-
|
82 |
-
try:
|
83 |
-
return [
|
84 |
-
datasets.SplitGenerator(
|
85 |
-
name=datasets.Split.TRAIN,
|
86 |
-
gen_kwargs={
|
87 |
-
#"split": "train",
|
88 |
-
#"data_dir": self.config.data_dir,
|
89 |
-
"prompts_path": prompts_paths["train"],
|
90 |
-
"path_to_clips": train_dir,
|
91 |
-
"audio_files": dl_manager.iter_archive(archive)
|
92 |
-
},
|
93 |
-
),
|
94 |
-
]
|
95 |
-
except Exception as e:
|
96 |
-
logger.error(f"Error in _split_generators: {e}")
|
97 |
-
logger.error(traceback.format_exc())
|
98 |
-
raise
|
99 |
-
|
100 |
-
def _generate_examples(self, prompts_path, path_to_clips, audio_files):
|
101 |
-
"""Yields examples as (key, example) tuples."""
|
102 |
-
|
103 |
-
sentence_map = {}
|
104 |
-
with open(prompts_path, encoding="utf-8") as f:
|
105 |
-
for row in f:
|
106 |
-
data = row.strip().split("\t", 1)
|
107 |
-
file_name = data[0].strip()
|
108 |
-
sentence_map[audio_path] = sentence
|
109 |
-
|
110 |
-
id_ = 0
|
111 |
-
|
112 |
-
tar_files = [
|
113 |
-
"clips/clips_001.tar",
|
114 |
-
"clips/clips_002.tar",
|
115 |
-
"clips/clips_003.tar",
|
116 |
-
"clips/clips_004.tar",
|
117 |
-
"clips/clips_005.tar",
|
118 |
-
"clips/clips_006.tar",
|
119 |
-
"clips/clips_007.tar",
|
120 |
-
"clips/clips_008.tar",
|
121 |
-
"clips/clips_009.tar",
|
122 |
-
"clips/clips_010.tar",
|
123 |
-
"clips/clips_011.tar",
|
124 |
-
"clips/clips_012.tar",
|
125 |
-
"clips/clips_013.tar",
|
126 |
-
"clips/clips_014.tar",
|
127 |
-
"clips/clips_015.tar",
|
128 |
-
"clips/clips_016.tar",
|
129 |
-
"clips/clips_017.tar",
|
130 |
-
"clips/clips_018.tar",
|
131 |
-
"clips/clips_019.tar",
|
132 |
-
"clips/clips_020.tar",
|
133 |
-
"clips/clips_021.tar",
|
134 |
-
]
|
135 |
-
for tar_file in tar_files:
|
136 |
-
with tarfile.open(tar_file, 'r') as tar:
|
137 |
-
for member in tar.getmembers():
|
138 |
-
file_name = member.name #"/".join([path_to_clips, member.name])
|
139 |
-
|
140 |
-
sentence = sentence_map.get(file_name, "")
|
141 |
-
|
142 |
-
audio_file =tar.extractfile(member)
|
143 |
-
audio = {"path": file_name, "bytes": audio_file.read()}
|
144 |
-
yield id_, {
|
145 |
-
"file_name": file_name,
|
146 |
-
"audio": audio,
|
147 |
-
"sentence": sentence
|
148 |
-
}
|
149 |
-
id_ += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
audio_dataset.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import tarfile
|
3 |
+
import datasets
|
4 |
+
import pandas as pd
|
5 |
+
from typing import Dict, List
|
6 |
+
import io
|
7 |
+
|
8 |
+
_DESCRIPTION = """
|
9 |
+
This dataset consists of various Youtube videos in Persian language.
|
10 |
+
|
11 |
+
Note: This dataset contains raw, unvalidated transcriptions. Users are advised to:
|
12 |
+
1. Perform their own quality assessment
|
13 |
+
2. Create their own train/validation/test splits based on their specific needs
|
14 |
+
3. Validate a subset of the data if needed for their use case
|
15 |
+
"""
|
16 |
+
|
17 |
+
_CITATION = """
|
18 |
+
Use this repo info/link for citation.
|
19 |
+
"""
|
20 |
+
|
21 |
+
_LICENSE = "CC0"
|
22 |
+
|
23 |
+
class AudioTarDataset(datasets.GeneratorBasedBuilder):
|
24 |
+
"""Audio dataset with files stored in tar archives."""
|
25 |
+
|
26 |
+
VERSION = datasets.Version("1.0.0")
|
27 |
+
|
28 |
+
def _info(self):
|
29 |
+
return datasets.DatasetInfo(
|
30 |
+
features=datasets.Features({
|
31 |
+
"audio": datasets.Audio(sampling_rate=16_000), # Adjust sampling rate as needed
|
32 |
+
"text": datasets.Value("string"),
|
33 |
+
"file_name": datasets.Value("string"),
|
34 |
+
}),
|
35 |
+
supervised_keys=None,
|
36 |
+
)
|
37 |
+
|
38 |
+
def _split_generators(self, dl_manager):
|
39 |
+
"""Returns SplitGenerators."""
|
40 |
+
# Paths to your tar files and metadata
|
41 |
+
tar_dir = "./clips" # Update this
|
42 |
+
metadata_path = "./clips/metadata.csv" # Update this
|
43 |
+
|
44 |
+
return [
|
45 |
+
datasets.SplitGenerator(
|
46 |
+
name=datasets.Split.TRAIN, # Or adjust splits as needed
|
47 |
+
gen_kwargs={
|
48 |
+
"tar_dir": tar_dir,
|
49 |
+
"metadata_path": metadata_path,
|
50 |
+
},
|
51 |
+
),
|
52 |
+
]
|
53 |
+
|
54 |
+
def _read_tar_index(self, tar_paths: List[str]) -> Dict[str, str]:
|
55 |
+
"""Create an index mapping audio filenames to their tar files."""
|
56 |
+
file_to_tar = {}
|
57 |
+
for tar_path in tar_paths:
|
58 |
+
with tarfile.open(tar_path, "r") as tar:
|
59 |
+
for member in tar.getmembers():
|
60 |
+
if member.isfile():
|
61 |
+
file_to_tar[os.path.basename(member.name)] = tar_path
|
62 |
+
return file_to_tar
|
63 |
+
|
64 |
+
def _generate_examples(self, tar_dir, metadata_path):
|
65 |
+
"""Yields examples."""
|
66 |
+
# Load TSV metadata
|
67 |
+
metadata_df = pd.read_csv(metadata_path, sep=',', encoding='utf-8')
|
68 |
+
|
69 |
+
# Get all tar files
|
70 |
+
tar_paths = [os.path.join(tar_dir, f) for f in os.listdir(tar_dir) if f.endswith('.tar')]
|
71 |
+
|
72 |
+
# Create index of which audio file is in which tar
|
73 |
+
file_to_tar = self._read_tar_index(tar_paths)
|
74 |
+
|
75 |
+
# Keep tar files open during iteration
|
76 |
+
tar_handles = {path: tarfile.open(path, "r") for path in tar_paths}
|
77 |
+
|
78 |
+
try:
|
79 |
+
for idx, row in metadata_df.iterrows():
|
80 |
+
audio_filename = row['file_name']
|
81 |
+
tar_path = file_to_tar.get(audio_filename)
|
82 |
+
|
83 |
+
if not tar_path:
|
84 |
+
print(f"Warning: Could not find tar file for {audio_filename}")
|
85 |
+
continue
|
86 |
+
|
87 |
+
# Extract audio file from tar
|
88 |
+
tar = tar_handles[tar_path]
|
89 |
+
try:
|
90 |
+
audio_member = tar.getmember(audio_filename)
|
91 |
+
except KeyError:
|
92 |
+
print(f"Warning: Could not find {audio_filename} in tar file")
|
93 |
+
continue
|
94 |
+
|
95 |
+
audio_file = tar.extractfile(audio_member)
|
96 |
+
|
97 |
+
if audio_file is None:
|
98 |
+
print(f"Warning: Could not extract {audio_filename}")
|
99 |
+
continue
|
100 |
+
|
101 |
+
# Read audio data into memory
|
102 |
+
audio_data = audio_file.read()
|
103 |
+
|
104 |
+
yield idx, {
|
105 |
+
"audio": {"path": audio_filename, "bytes": audio_data},
|
106 |
+
"text": row['sentence'],
|
107 |
+
"file_name": audio_filename,
|
108 |
+
}
|
109 |
+
|
110 |
+
finally:
|
111 |
+
# Clean up tar handles
|
112 |
+
for tar in tar_handles.values():
|
113 |
+
tar.close()
|