File size: 4,753 Bytes
6f4e2b6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
import datasets
from huggingface_hub.file_download import hf_hub_url
import glob
import json
import pandas as pd

try:
    import lzma as xz
except ImportError:
    import pylzma as xz
datasets.logging.set_verbosity_info()
logger = datasets.logging.get_logger(__name__)

_DESCRIPTION ="""\

    """

_HOMEPAGE =  ""

_LICENSE = ""

_CITATION = ""

_URL = {
    'data/'
}
_LANGUAGES = [
    "fr","it","es","en","de","pt"
]

_TYPES = [
    "laws", "judgements"
]

_SOURCES = [
    "MultiLegalPile", "Wipolex", "Jug", "BVA", "CC", "IP", "SCOTUS", "SwissJudgementPrediction"
    "Gesetz", "Constitution", "CivilCode", "CriminalCode",
]


"""
see https://huggingface.co/datasets/joelito/MultiLegalPile_Wikipedia_Filtered/blob/main/MultiLegalPile_Wikipedia_Filtered.py
"""
_HIGHEST_NUMBER_OF_SHARDS = 4
class MultilingualSBDConfig(datasets.BuilderConfig):
    
    def __init__(self, name:str, **kwargs):
        super( MultilingualSBDConfig, self).__init__(**kwargs)
        self.name = name
        self.language = name.split("_")[0]
        self.type = name.split("_")[1]
        #self.source = name.split("_")[2]

class MultilingualSBD(datasets.GeneratorBasedBuilder):

    
    BUILDER_CONFIG_CLASS = MultilingualSBDConfig
    
    BUILDER_CONFIGS = [
        MultilingualSBDConfig(f"{language}_{type}")
        for language in _LANGUAGES + ['all']
        for type in _TYPES  + ["all"]
    ]
    DEFAULT_CONFIG_NAME = 'all_all'
    def _info(self):
        features = datasets.Features(
            {   
                #"language": datasets.Value("string"), #in _LANGUAGES
                #"type": datasets.Value("string"), # in _TYPES
                #"subtype": datasets.Value("string"), # in _SUBTYPES
                "text": datasets.Value("string"),
                "source": datasets.Value("string"),
                "spans": datasets.Sequence(
                    {
                        "start": datasets.Value("int64"),
                        "end": datasets.Value("int64"),
                        "token_start": datasets.Value("int64"),
                        "token_end": datasets.Value("int64"),
                        "label": datasets.Value("string")
                    }
                ),
                "tokens": datasets.Sequence(
                    {
                        "text": datasets.Value("string"),
                        "start": datasets.Value("int64"),
                        "end": datasets.Value("int64"),
                        "id": datasets.Value("int64"),
                        "ws": datasets.Value("bool")

                    }
                )
            }
        )
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features = features,
            homepage = _HOMEPAGE,
            citation=_CITATION
        )
    
    def _split_generators(self, dl_manager):
        def download_url(filename):
            url = hf_hub_url(
                repo_id="tbrugger/Multilingual-SBD",
                filename = f'data/{filename}.jsonl.xz',
                repo_type='dataset'
                )
            return dl_manager.download(url)
        languages = _LANGUAGES if self.config.language == "all" else [self.config.language]
        types = _TYPES if self.config.type == 'all' else [self.config.type]
        #sources = _SOURCES if self.config.source == 'all' else [self.config.source]

        split_generators = []
        for split in [datasets.Split.TRAIN]:
            filepaths = []
            for language in languages:
                for type in types:
                    for shard in range(_HIGHEST_NUMBER_OF_SHARDS):
                        try:
                            filepaths.append(download_url(f'{language}_{type}_{shard}'))
                        except:
                            break

            split_generators.append(
                datasets.SplitGenerator(name=split, gen_kwargs={'filepaths': filepaths})
            )
            
        return split_generators     

    def _generate_examples(self,filepaths):
        id_ = 0
        for filepath in filepaths:
            if filepath:
                logger.info("Generating examples from = %s", filepath)
                try:
                    with xz.open(open(filepath,'rb'), 'rt', encoding='utf-8') as f:
                        json_list = list(f)
                    
                    for json_str in json_list:
                        example = json.loads(json_str)
                        if example is not None and isinstance(example, dict):
                            yield id_, example
                            id_ +=1

                except Exception:
                    logger.exception("Error while processing file %s", filepath)