qanastek commited on
Commit
335fde2
1 Parent(s): 869eef2

Upload 2 files

Browse files
Files changed (2) hide show
  1. MantraGSC.py +411 -0
  2. test_mantra_gsc.py +8 -0
MantraGSC.py ADDED
@@ -0,0 +1,411 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import ast
17
+ from pathlib import Path
18
+ from itertools import product
19
+ from dataclasses import dataclass
20
+ from typing import Dict, List, Tuple
21
+
22
+ import datasets
23
+
24
+ _CITATION = """\
25
+ @article{10.1093/jamia/ocv037,
26
+ author = {Kors, Jan A and Clematide, Simon and Akhondi,
27
+ Saber A and van Mulligen, Erik M and Rebholz-Schuhmann, Dietrich},
28
+ title = "{A multilingual gold-standard corpus for biomedical concept recognition: the Mantra GSC}",
29
+ journal = {Journal of the American Medical Informatics Association},
30
+ volume = {22},
31
+ number = {5},
32
+ pages = {948-956},
33
+ year = {2015},
34
+ month = {05},
35
+ abstract = "{Objective To create a multilingual gold-standard corpus for biomedical concept recognition.Materials
36
+ and methods We selected text units from different parallel corpora (Medline abstract titles, drug labels,
37
+ biomedical patent claims) in English, French, German, Spanish, and Dutch. Three annotators per language
38
+ independently annotated the biomedical concepts, based on a subset of the Unified Medical Language System and
39
+ covering a wide range of semantic groups. To reduce the annotation workload, automatically generated
40
+ preannotations were provided. Individual annotations were automatically harmonized and then adjudicated, and
41
+ cross-language consistency checks were carried out to arrive at the final annotations.Results The number of final
42
+ annotations was 5530. Inter-annotator agreement scores indicate good agreement (median F-score 0.79), and are
43
+ similar to those between individual annotators and the gold standard. The automatically generated harmonized
44
+ annotation set for each language performed equally well as the best annotator for that language.Discussion The use
45
+ of automatic preannotations, harmonized annotations, and parallel corpora helped to keep the manual annotation
46
+ efforts manageable. The inter-annotator agreement scores provide a reference standard for gauging the performance
47
+ of automatic annotation techniques.Conclusion To our knowledge, this is the first gold-standard corpus for
48
+ biomedical concept recognition in languages other than English. Other distinguishing features are the wide variety
49
+ of semantic groups that are being covered, and the diversity of text genres that were annotated.}",
50
+ issn = {1067-5027},
51
+ doi = {10.1093/jamia/ocv037},
52
+ url = {https://doi.org/10.1093/jamia/ocv037},
53
+ eprint = {https://academic.oup.com/jamia/article-pdf/22/5/948/34146393/ocv037.pdf},
54
+ }
55
+ """
56
+
57
+ _DESCRIPTION = """\
58
+ We selected text units from different parallel corpora (Medline abstract titles, drug labels, biomedical patent claims)
59
+ in English, French, German, Spanish, and Dutch. Three annotators per language independently annotated the biomedical
60
+ concepts, based on a subset of the Unified Medical Language System and covering a wide range of semantic groups.
61
+ """
62
+
63
+ _HOMEPAGE = "https://biosemantics.erasmusmc.nl/index.php/resources/mantra-gsc"
64
+
65
+ _LICENSE = "CC_BY_4p0"
66
+
67
+ _URL = "http://biosemantics.org/MantraGSC/Mantra-GSC.zip"
68
+
69
+ _LANGUAGES_2 = {
70
+ "es": "Spanish",
71
+ "fr": "French",
72
+ "de": "German",
73
+ "nl": "Dutch",
74
+ "en": "English",
75
+ }
76
+
77
+ _DATASET_TYPES = {
78
+ "emea": "EMEA",
79
+ "medline": "Medline",
80
+ "patents": "Patents",
81
+ }
82
+
83
+ @dataclass
84
+ class DrBenchmarkConfig(datasets.BuilderConfig):
85
+ name: str = None
86
+ version: datasets.Version = None
87
+ description: str = None
88
+ schema: str = None
89
+ subset_id: str = None
90
+
91
+ class MantraGSC(datasets.GeneratorBasedBuilder):
92
+
93
+ SOURCE_VERSION = datasets.Version("1.0.0")
94
+
95
+ BUILDER_CONFIGS = []
96
+
97
+ for language, dataset_type in product(_LANGUAGES_2, _DATASET_TYPES):
98
+
99
+ if dataset_type == "patents" and language in ["nl", "es"]:
100
+ continue
101
+
102
+ BUILDER_CONFIGS.append(
103
+ DrBenchmarkConfig(
104
+ name=f"{language}_{dataset_type}",
105
+ version=SOURCE_VERSION,
106
+ description=f"Mantra GSC {_LANGUAGES_2[language]} {_DATASET_TYPES[dataset_type]} source schema",
107
+ schema="source",
108
+ subset_id=f"{language}_{_DATASET_TYPES[dataset_type]}",
109
+ )
110
+ )
111
+
112
+ DEFAULT_CONFIG_NAME = "fr_medline"
113
+
114
+ def _info(self) -> datasets.DatasetInfo:
115
+
116
+ if self.config.schema == "source":
117
+ features = datasets.Features(
118
+ {
119
+ "document_id": datasets.Value("string"),
120
+ "text": datasets.Value("string"),
121
+ "entities": [
122
+ {
123
+ "entity_id": datasets.Value("string"),
124
+ "type": datasets.Value("string"),
125
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
126
+ "text": datasets.Sequence(datasets.Value("string")),
127
+ "cui": datasets.Value("string"),
128
+ "preferred_term": datasets.Value("string"),
129
+ "semantic_type": datasets.Value("string"),
130
+ "normalized": [
131
+ {
132
+ "db_name": datasets.Value("string"),
133
+ "db_id": datasets.Value("string"),
134
+ }
135
+ ],
136
+ }
137
+ ],
138
+ }
139
+ )
140
+
141
+ return datasets.DatasetInfo(
142
+ description=_DESCRIPTION,
143
+ features=features,
144
+ homepage=_HOMEPAGE,
145
+ license=str(_LICENSE),
146
+ citation=_CITATION,
147
+ )
148
+
149
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
150
+
151
+ print("1 - " + "*"*50)
152
+ print(_URL)
153
+ data_dir = dl_manager.download_and_extract(_URL)
154
+
155
+ print("2 - " + "*"*50)
156
+ data_dir = Path(data_dir) / "Mantra-GSC"
157
+
158
+ print("3 - " + "*"*50)
159
+ language, dataset_type = self.config.name.split("_")
160
+
161
+ print("4 - " + "*"*50)
162
+ return [
163
+ datasets.SplitGenerator(
164
+ name=datasets.Split.TRAIN,
165
+ gen_kwargs={
166
+ "data_dir": data_dir,
167
+ "language": language,
168
+ "dataset_type": dataset_type,
169
+ },
170
+ ),
171
+ ]
172
+
173
+ def remove_prefix(self, a: str, prefix: str) -> str:
174
+ if a.startswith(prefix):
175
+ a = a[len(prefix) :]
176
+ return a
177
+
178
+ def parse_brat_file(self, txt_file: Path, annotation_file_suffixes: List[str] = None, parse_notes: bool = False) -> Dict:
179
+
180
+ example = {}
181
+ example["document_id"] = txt_file.with_suffix("").name
182
+ with txt_file.open() as f:
183
+ example["text"] = f.read()
184
+
185
+ # If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
186
+ # for event extraction
187
+ if annotation_file_suffixes is None:
188
+ annotation_file_suffixes = [".a1", ".a2", ".ann"]
189
+
190
+ if len(annotation_file_suffixes) == 0:
191
+ raise AssertionError(
192
+ "At least one suffix for the to-be-read annotation files should be given!"
193
+ )
194
+
195
+ ann_lines = []
196
+ for suffix in annotation_file_suffixes:
197
+ annotation_file = txt_file.with_suffix(suffix)
198
+ if annotation_file.exists():
199
+ with annotation_file.open() as f:
200
+ ann_lines.extend(f.readlines())
201
+
202
+ example["text_bound_annotations"] = []
203
+ example["events"] = []
204
+ example["relations"] = []
205
+ example["equivalences"] = []
206
+ example["attributes"] = []
207
+ example["normalizations"] = []
208
+
209
+ if parse_notes:
210
+ example["notes"] = []
211
+
212
+ for line in ann_lines:
213
+ line = line.strip()
214
+ if not line:
215
+ continue
216
+
217
+ if line.startswith("T"): # Text bound
218
+ ann = {}
219
+ fields = line.split("\t")
220
+
221
+ ann["id"] = fields[0]
222
+ ann["type"] = fields[1].split()[0]
223
+ ann["offsets"] = []
224
+ span_str = self.remove_prefix(fields[1], (ann["type"] + " "))
225
+ text = fields[2]
226
+ for span in span_str.split(";"):
227
+ start, end = span.split()
228
+ ann["offsets"].append([int(start), int(end)])
229
+
230
+ # Heuristically split text of discontiguous entities into chunks
231
+ ann["text"] = []
232
+ if len(ann["offsets"]) > 1:
233
+ i = 0
234
+ for start, end in ann["offsets"]:
235
+ chunk_len = end - start
236
+ ann["text"].append(text[i : chunk_len + i])
237
+ i += chunk_len
238
+ while i < len(text) and text[i] == " ":
239
+ i += 1
240
+ else:
241
+ ann["text"] = [text]
242
+
243
+ example["text_bound_annotations"].append(ann)
244
+
245
+ elif line.startswith("E"):
246
+ ann = {}
247
+ fields = line.split("\t")
248
+
249
+ ann["id"] = fields[0]
250
+
251
+ ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
252
+
253
+ ann["arguments"] = []
254
+ for role_ref_id in fields[1].split()[1:]:
255
+ argument = {
256
+ "role": (role_ref_id.split(":"))[0],
257
+ "ref_id": (role_ref_id.split(":"))[1],
258
+ }
259
+ ann["arguments"].append(argument)
260
+
261
+ example["events"].append(ann)
262
+
263
+ elif line.startswith("R"):
264
+ ann = {}
265
+ fields = line.split("\t")
266
+
267
+ ann["id"] = fields[0]
268
+ ann["type"] = fields[1].split()[0]
269
+
270
+ ann["head"] = {
271
+ "role": fields[1].split()[1].split(":")[0],
272
+ "ref_id": fields[1].split()[1].split(":")[1],
273
+ }
274
+ ann["tail"] = {
275
+ "role": fields[1].split()[2].split(":")[0],
276
+ "ref_id": fields[1].split()[2].split(":")[1],
277
+ }
278
+
279
+ example["relations"].append(ann)
280
+
281
+ # '*' seems to be the legacy way to mark equivalences,
282
+ # but I couldn't find any info on the current way
283
+ # this might have to be adapted dependent on the brat version
284
+ # of the annotation
285
+ elif line.startswith("*"):
286
+ ann = {}
287
+ fields = line.split("\t")
288
+
289
+ ann["id"] = fields[0]
290
+ ann["ref_ids"] = fields[1].split()[1:]
291
+
292
+ example["equivalences"].append(ann)
293
+
294
+ elif line.startswith("A") or line.startswith("M"):
295
+ ann = {}
296
+ fields = line.split("\t")
297
+
298
+ ann["id"] = fields[0]
299
+
300
+ info = fields[1].split()
301
+ ann["type"] = info[0]
302
+ ann["ref_id"] = info[1]
303
+
304
+ if len(info) > 2:
305
+ ann["value"] = info[2]
306
+ else:
307
+ ann["value"] = ""
308
+
309
+ example["attributes"].append(ann)
310
+
311
+ elif line.startswith("N"):
312
+ ann = {}
313
+ fields = line.split("\t")
314
+
315
+ ann["id"] = fields[0]
316
+ ann["text"] = fields[2]
317
+
318
+ info = fields[1].split()
319
+
320
+ ann["type"] = info[0]
321
+ ann["ref_id"] = info[1]
322
+ ann["resource_name"] = info[2].split(":")[0]
323
+ ann["cuid"] = info[2].split(":")[1]
324
+ example["normalizations"].append(ann)
325
+
326
+ elif parse_notes and line.startswith("#"):
327
+ ann = {}
328
+ fields = line.split("\t")
329
+
330
+ ann["id"] = fields[0]
331
+ ann["text"] = fields[2] if len(fields) == 3 else "<BB_NULL_STR>"
332
+
333
+ info = fields[1].split()
334
+
335
+ ann["type"] = info[0]
336
+ ann["ref_id"] = info[1]
337
+ example["notes"].append(ann)
338
+
339
+ return example
340
+
341
+
342
+ def _generate_examples(
343
+ self, data_dir: Path, language: str, dataset_type: str
344
+ ) -> Tuple[int, Dict]:
345
+ """Yields examples as (key, example) tuples."""
346
+ data_dir = data_dir / f"{_LANGUAGES_2[language]}"
347
+
348
+ if dataset_type in ["patents", "emea"]:
349
+ data_dir = data_dir / f"{_DATASET_TYPES[dataset_type]}_ec22-cui-best_man"
350
+ else:
351
+ # It is Medline now
352
+ if language != "en":
353
+ data_dir = (
354
+ data_dir
355
+ / f"{_DATASET_TYPES[dataset_type]}_EN_{language.upper()}_ec22-cui-best_man"
356
+ )
357
+ else:
358
+ data_dir = [
359
+ data_dir
360
+ / f"{_DATASET_TYPES[dataset_type]}_EN_{_lang.upper()}_ec22-cui-best_man"
361
+ for _lang in _LANGUAGES_2
362
+ if _lang != "en"
363
+ ]
364
+
365
+ if not isinstance(data_dir, list):
366
+ data_dir: List[Path] = [data_dir]
367
+
368
+ raw_files = [raw_file for _dir in data_dir for raw_file in _dir.glob("*.txt")]
369
+
370
+ if self.config.schema == "source":
371
+ for i, raw_file in enumerate(raw_files):
372
+ brat_example = self.parse_brat_file(raw_file, parse_notes=True)
373
+ source_example = self._to_source_example(brat_example)
374
+ yield i, source_example
375
+
376
+ def _to_source_example(self, brat_example: Dict) -> Dict:
377
+
378
+ source_example = {
379
+ "document_id": brat_example["document_id"],
380
+ "text": brat_example["text"],
381
+ }
382
+
383
+ source_example["entities"] = []
384
+
385
+ for entity_annotation, ann_notes in zip(
386
+ brat_example["text_bound_annotations"], brat_example["notes"]
387
+ ):
388
+ entity_ann = entity_annotation.copy()
389
+
390
+ # Change id property name
391
+ entity_ann["entity_id"] = entity_ann["id"]
392
+ entity_ann.pop("id")
393
+
394
+ # Get values from annotator notes
395
+ assert entity_ann["entity_id"] == ann_notes["ref_id"]
396
+ notes_values = ast.literal_eval(ann_notes["text"])
397
+ if len(notes_values) == 4:
398
+ cui, preferred_term, semantic_type, semantic_group = notes_values
399
+ else:
400
+ preferred_term, semantic_type, semantic_group = notes_values
401
+ cui = entity_ann["type"]
402
+ entity_ann["cui"] = cui
403
+ entity_ann["preferred_term"] = preferred_term
404
+ entity_ann["semantic_type"] = semantic_type
405
+ entity_ann["type"] = semantic_group
406
+ entity_ann["normalized"] = [{"db_name": "UMLS", "db_id": cui}]
407
+
408
+ # Add entity annotation to sample
409
+ source_example["entities"].append(entity_ann)
410
+
411
+ return source_example
test_mantra_gsc.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ from datasets import load_dataset
4
+
5
+ dataset = load_dataset("./MantraGSC.py", name="fr_emea")
6
+ print(dataset)
7
+ # print(dataset["train"][0])
8
+ print(json.dumps(dataset["train"][0], sort_keys=True, indent=4))