| import random | |
| from pathlib import Path | |
| import datasets | |
| from datasets import Value, Sequence, ClassLabel, Features | |
| _CITATION = """\ | |
| coming soon | |
| """ | |
| _DESCRIPTION = """\ | |
| German Legal Sentences (GLS) is an automatically generated training dataset for semantic sentence | |
| matching in the domain in german legal documents. It follows the concept of weak supervision, where | |
| imperfect labels are generated using multiple heuristics. For this purpose we use a combination of | |
| legal citation matching and BM25 similarity. The contained sentences and their citations are parsed | |
| from real judicial decisions provided by [Open Legal Data](http://openlegaldata.io/) | |
| """ | |
| _VERSION = "0.0.2" | |
| _DATA_URL = f"http://lavis.cs.hs-rm.de/storage/german-legal-sentences/GermanLegalSentences_v{_VERSION}.zip" | |
| class GLSConfig(datasets.BuilderConfig): | |
| """BuilderConfig.""" | |
| def __init__( | |
| self, | |
| load_collection, | |
| load_es_neighbors=None, | |
| n_es_neighbors=None, | |
| **kwargs, | |
| ): | |
| """BuilderConfig. | |
| Args: | |
| **kwargs: keyword arguments forwarded to super. | |
| """ | |
| super(GLSConfig, self).__init__(**kwargs) | |
| self.load_collection = load_collection | |
| self.load_es_neighbors = load_es_neighbors | |
| self.n_es_neighbors = n_es_neighbors | |
| class GermanLegalSentences(datasets.GeneratorBasedBuilder): | |
| BUILDER_CONFIGS = [ | |
| GLSConfig( | |
| name="sentences", | |
| load_es_neighbors=False, | |
| load_collection=False, | |
| version=datasets.Version(_VERSION, ""), | |
| description="Just the sentences and their masked references", | |
| ), | |
| GLSConfig( | |
| name="pairs", | |
| load_es_neighbors=False, | |
| load_collection=True, | |
| version=datasets.Version(_VERSION, ""), | |
| description="Sentence pairs sharing references", | |
| ), | |
| GLSConfig( | |
| name="pairs+es", | |
| load_es_neighbors=True, | |
| load_collection=True, | |
| n_es_neighbors=5, | |
| version=datasets.Version(_VERSION, ""), | |
| description="Sentence pairs sharing references plus ES neighbors", | |
| ), | |
| ] | |
| def _features(self): | |
| if self.config.name == "sentences": | |
| return datasets.Features( | |
| { | |
| "sent_id": Value("uint32"), | |
| "doc_id": Value("uint32"), | |
| "text": Value("string"), | |
| "references": Sequence( | |
| { | |
| "ref_id": Value("uint32"), | |
| "name": Value("string"), | |
| "type": ClassLabel(names=["AZ", "LAW"]), | |
| } | |
| ), | |
| } | |
| ) | |
| elif self.config.name == "pairs": | |
| return Features( | |
| { | |
| "query.sent_id": Value("uint32"), | |
| "query.doc_id": Value("uint32"), | |
| "query.text": Value("string"), | |
| "query.ref_ids": Sequence(Value("uint32")), | |
| "related.sent_id": Value("uint32"), | |
| "related.doc_id": Value("uint32"), | |
| "related.text": Value("string"), | |
| "related.ref_ids": Sequence(Value("uint32")), | |
| } | |
| ) | |
| elif self.config.name == "pairs+es": | |
| return Features( | |
| { | |
| "query.sent_id": Value("uint32"), | |
| "query.doc_id": Value("uint32"), | |
| "query.text": Value("string"), | |
| "query.ref_ids": Sequence(Value("uint32")), | |
| "related.sent_id": Value("uint32"), | |
| "related.doc_id": Value("uint32"), | |
| "related.text": Value("string"), | |
| "related.ref_ids": Sequence(Value("uint32")), | |
| "es_neighbors.text": Sequence(Value("string")), | |
| "es_neighbors.sent_id": Sequence(Value("uint32")), | |
| "es_neighbors.doc_id": Sequence(Value("uint32")), | |
| "es_neighbors.ref_ids": Sequence( | |
| Sequence(datasets.Value("uint32")) | |
| ), | |
| } | |
| ) | |
| assert True | |
| def _info(self): | |
| return datasets.DatasetInfo( | |
| description=_DESCRIPTION, | |
| features=self._features(), | |
| supervised_keys=None, | |
| homepage="", | |
| citation=_CITATION, | |
| ) | |
| def _split_generators(self, dl_manager): | |
| if dl_manager.manual_dir: | |
| data_dir = Path(dl_manager.manual_dir) | |
| else: | |
| data_dir = Path(dl_manager.download_and_extract(_DATA_URL)) | |
| collection = _load_collection(data_dir) if self.config.load_collection else None | |
| sent_ref_map = _load_sent_references(data_dir) | |
| references = ( | |
| _load_reference_info(data_dir) if self.config.name == "sentences" else None | |
| ) | |
| es_neighbors = ( | |
| _load_es_neighbors(data_dir) if self.config.load_es_neighbors else None | |
| ) | |
| gen_kwargs = dict() | |
| for split in ("train", "valid", "test"): | |
| gen_kwargs[split] = { | |
| "collection": collection, | |
| "pair_id_file": data_dir / f"{split}.pairs.tsv", | |
| "sentence_file": data_dir / f"{split}.sentences.tsv", | |
| "references": references, | |
| "sent_ref_map": sent_ref_map, | |
| "es_neighbors": es_neighbors, | |
| } | |
| return [ | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TRAIN, gen_kwargs=gen_kwargs["train"] | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.VALIDATION, gen_kwargs=gen_kwargs["valid"] | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TEST, gen_kwargs=gen_kwargs["test"] | |
| ), | |
| ] | |
| def _generate_examples(self, **kwargs): | |
| if self.config.name.startswith("pairs"): | |
| yield from self._generate_pairs(**kwargs) | |
| elif self.config.name == "sentences": | |
| yield from self._generate_sentences(**kwargs) | |
| else: | |
| assert True | |
| def _generate_pairs( | |
| self, pair_id_file, collection, sent_ref_map, es_neighbors, **kwargs | |
| ): | |
| random.seed(17) | |
| with open(pair_id_file, encoding="utf-8") as r: | |
| idx = 0 | |
| for line in r: | |
| stripped = line.rstrip() | |
| if stripped: | |
| a, b = stripped.split("\t") | |
| features = { | |
| "query.sent_id": int(a), | |
| "query.doc_id": int(collection[a]["doc_id"]), | |
| "query.text": collection[a]["text"], | |
| "query.ref_ids": sent_ref_map[a], | |
| "related.sent_id": int(b), | |
| "related.doc_id": int(collection[b]["doc_id"]), | |
| "related.text": collection[b]["text"], | |
| "related.ref_ids": sent_ref_map[b], | |
| } | |
| if self.config.name == "pairs+es": | |
| curr_es_neighbors = es_neighbors.get(a) or [] | |
| if len(curr_es_neighbors) < self.config.n_es_neighbors: | |
| continue | |
| es_sent_ids = random.sample( | |
| curr_es_neighbors, k=self.config.n_es_neighbors | |
| ) | |
| additional_features = { | |
| "es_neighbors.sent_id": [int(i) for i in es_sent_ids], | |
| "es_neighbors.doc_id": [ | |
| int(collection[i]["doc_id"]) for i in es_sent_ids | |
| ], | |
| "es_neighbors.text": [ | |
| collection[i]["text"] for i in es_sent_ids | |
| ], | |
| "es_neighbors.ref_ids": [ | |
| sent_ref_map[i] for i in es_sent_ids | |
| ], | |
| } | |
| features.update(additional_features) | |
| yield idx, features | |
| idx += 1 | |
| def _generate_sentences( | |
| self, | |
| sentence_file, | |
| references, | |
| sent_ref_map, | |
| **kwargs, | |
| ): | |
| with open(sentence_file, encoding="utf-8") as r: | |
| for idx, line in enumerate(r): | |
| stripped = line.rstrip() | |
| if stripped == "": | |
| continue | |
| s_id, doc_id, text = stripped.split("\t", maxsplit=2) | |
| yield idx, { | |
| "sent_id": int(s_id), | |
| "doc_id": int(doc_id), | |
| "text": text, | |
| "references": [ | |
| { | |
| "ref_id": int(r_id), | |
| "name": references[r_id][1], | |
| "type": references[r_id][0], | |
| } | |
| for r_id in sent_ref_map[s_id] | |
| ], | |
| } | |
| def _load_collection(data_dir): | |
| collection = dict() | |
| for split in ("train", "valid", "test"): | |
| with open(data_dir / f"{split}.sentences.tsv", encoding="utf-8") as r: | |
| for line in r: | |
| s_id, d_id, sent = line.strip().split("\t", maxsplit=2) | |
| collection[s_id] = {"doc_id": d_id, "text": sent} | |
| return collection | |
| def _load_reference_info(data_dir): | |
| with open(data_dir / "refs.tsv", encoding="utf-8") as r: | |
| references = { | |
| r_id: (r_type, r_name.rstrip()) | |
| for r_id, r_type, r_name in ( | |
| line.split("\t", maxsplit=2) for line in r if len(line) > 2 | |
| ) | |
| } | |
| return references | |
| def _load_sent_references(data_dir): | |
| with open(data_dir / "sent_ref_map.tsv", encoding="utf-8") as r: | |
| sent_ref_map = { | |
| s_id: r_ids.rstrip().split() | |
| for s_id, r_ids in ( | |
| line.split("\t", maxsplit=1) for line in r if len(line) > 2 | |
| ) | |
| } | |
| return sent_ref_map | |
| def _load_es_neighbors(data_dir): | |
| with open(data_dir / "es_neighbors.tsv", encoding="utf-8") as r: | |
| es_neighbors = { | |
| s_id: other_s_ids.rstrip().split() | |
| for s_id, other_s_ids in ( | |
| line.split("\t", maxsplit=1) for line in r if len(line) > 2 | |
| ) | |
| } | |
| return es_neighbors | |