import json import datasets _VERSION = "1.0.0" _DESCRIPTION = "Deepset's germanDPR dataset made compatible with BEIR benchmark framework. One version contains " \ "the original dataset 1:1 and the other dataset is preprocessed. See official dataset card for " \ "usage of dataset with BEIR." _SUBSETS = ["queries-original", "corpus-original", "queries-processed", "corpus-processed", "qrels"] class GermanDPRBeir(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = ( [ datasets.BuilderConfig( name="queries-original", description=f"BEIR queries created 1:1 from deepset/germanDPR.", version=_VERSION, ), datasets.BuilderConfig( name="corpus-original", description=f"BEIR corpus created 1:1 from deepset/germanDPR.", version=_VERSION, ), datasets.BuilderConfig( name="queries-processed", description=f"BEIR queries created and further text-processed from deepset/germanDPR.", version=_VERSION, ), datasets.BuilderConfig( name="corpus-processed", description=f"BEIR corpus created and further text-processed from deepset/germanDPR.", version=_VERSION, ), datasets.BuilderConfig( name="qrels", description=f"BEIR qrels created from deepset/germanDPR for train and test split.", version=_VERSION, ) ] ) DEFAULT_CONFIG_NAME = "qrels" def _info(self): name = self.config.name _SPLITS = ["queries-original", "corpus-original", "queries-processed", "corpus-processed", "qrels"] if name.startswith("queries"): features = { "_id": datasets.Value("string"), "text": datasets.Value("string") } elif name.startswith("corpus"): features = { "_id": datasets.Value("string"), "title": datasets.Value("string"), "text": datasets.Value("string"), } else: # name == qrels features = { "query-id": datasets.Value("string"), "corpus-id": datasets.Value("string"), "score": datasets.Value("int32") } return datasets.DatasetInfo( description=f"{_DESCRIPTION}\n{self.config.description}", features=datasets.Features(features), supervised_keys=None, homepage="https://huggingface.co/datasets/PM-AI/germandpr-beir", ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" _SPLITS = ["queries-original", "corpus-original", "queries-processed", "corpus-processed", "qrels"] name = self.config.name if name == "qrels": dl_path = dl_manager.download([ "https://huggingface.co/datasets/PM-AI/germandpr-beir/resolve/main/data/qrels/train.tsv", "https://huggingface.co/datasets/PM-AI/germandpr-beir/resolve/main/data/qrels/test.tsv" ]) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path[0]}), datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": dl_path[1]}) ] else: dl_path = dl_manager.download(f"https://huggingface.co/datasets/PM-AI/germandpr-beir/resolve/main/data/{name}.jsonl") return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path}), datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": dl_path}) ] def _generate_queries_data(self, filepath): with open(filepath, "r", encoding="utf-8") as in_file: for idx, line in enumerate(in_file): data = json.loads(line) yield idx, data def _generate_corpus_data(self, filepath): with open(filepath, "r", encoding="utf-8") as in_file: for idx, line in enumerate(in_file): data = json.loads(line) if "metadata" in data: del data["metadata"] yield idx, data def _generate_qrel_data(self, filepath): with open(filepath, "r", encoding="utf-8") as in_file: in_file.readline() # first line is header for idx, line in enumerate(in_file): qid, cid, score = line.rstrip().split("\t") yield idx, {"query-id": qid, "corpus-id": cid, "score": score} def _generate_examples(self, filepath): """Yields examples.""" name = self.config.name if name.startswith("queries"): return self._generate_queries_data(filepath) elif name.startswith("corpus"): return self._generate_corpus_data(filepath) else: # name == qrels return self._generate_qrel_data(filepath)