qanastek commited on
Commit
924f6f2
1 Parent(s): e0ffc32

Update MANTRAGSC.py

Browse files
Files changed (1) hide show
  1. MANTRAGSC.py +76 -354
MANTRAGSC.py CHANGED
@@ -13,19 +13,19 @@
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
 
16
- import re
17
- import ast
18
- import json
19
  import random
20
  from pathlib import Path
21
  from itertools import product
22
  from dataclasses import dataclass
23
  from typing import Dict, List, Tuple
24
 
25
- import datasets
26
-
27
  import numpy as np
28
 
 
 
29
  _CITATION = """\
30
  @article{10.1093/jamia/ocv037,
31
  author = {Kors, Jan A and Clematide, Simon and Akhondi,
@@ -69,7 +69,7 @@ _HOMEPAGE = "https://biosemantics.erasmusmc.nl/index.php/resources/mantra-gsc"
69
 
70
  _LICENSE = "CC_BY_4p0"
71
 
72
- _URL = "http://biosemantics.org/MantraGSC/Mantra-GSC.zip"
73
 
74
  _LANGUAGES_2 = {
75
  "es": "Spanish",
@@ -82,7 +82,7 @@ _LANGUAGES_2 = {
82
  _DATASET_TYPES = {
83
  "emea": "EMEA",
84
  "medline": "Medline",
85
- "patents": "Patents",
86
  }
87
 
88
  @dataclass
@@ -118,23 +118,26 @@ class MANTRAGSC(datasets.GeneratorBasedBuilder):
118
 
119
  def _info(self):
120
 
121
- if self.config.name.find("emea") != -1:
122
- names = ['O', 'DISO', 'CHEM|PHEN', 'DEVI', 'PHEN', 'PROC', 'OBJC', 'ANAT', 'LIVB', 'CHEM', 'PHYS']
123
- elif self.config.name.find("medline") != -1:
124
- names = ['O', 'DISO', 'GEOG', 'DEVI', 'Manufactured Object', 'PHEN', 'PROC', 'Research Device', 'OBJC', 'Mental or Behavioral Dysfunction', 'Research Activity', 'ANAT', 'LIVB', 'CHEM', 'PHYS']
125
- elif self.config.name.find("patents") != -1:
126
- names = ['O', 'PROC', 'DISO', 'LIVB', 'PHYS', 'PHEN', 'ANAT', 'OBJC', 'Amino Acid, Peptide, or Protein|Enzyme|Receptor', 'DEVI', 'CHEM']
127
 
128
  features = datasets.Features(
129
  {
130
  "id": datasets.Value("string"),
131
- "document_id": datasets.Value("string"),
132
  "tokens": [datasets.Value("string")],
133
  "ner_tags": datasets.Sequence(
134
- datasets.features.ClassLabel(
135
- names = names,
136
- )
137
  ),
 
 
 
 
 
138
  }
139
  )
140
 
@@ -148,11 +151,11 @@ class MANTRAGSC(datasets.GeneratorBasedBuilder):
148
 
149
  def _split_generators(self, dl_manager):
150
 
 
 
151
  data_dir = dl_manager.download_and_extract(_URL)
152
 
153
- data_dir = Path(data_dir) / "Mantra-GSC"
154
-
155
- language, dataset_type = self.config.name.split("_")
156
 
157
  return [
158
  datasets.SplitGenerator(
@@ -184,328 +187,83 @@ class MANTRAGSC(datasets.GeneratorBasedBuilder):
184
  ),
185
  ]
186
 
187
- def convert_to_prodigy(self, json_object):
188
-
189
- def prepare_split(text):
190
-
191
- rep_before = ['?', '!', ';', '*']
192
- rep_after = ['’', "'"]
193
- rep_both = ['-', '/', '[', ']', ':', ')', '(', ',', '.']
194
-
195
- for i in rep_before:
196
- text = text.replace(i, ' '+i)
197
-
198
- for i in rep_after:
199
- text = text.replace(i, i+' ')
200
-
201
- for i in rep_both:
202
- text = text.replace(i, ' '+i+' ')
203
-
204
- text_split = text.split()
205
-
206
- punctuations = [',', '.']
207
- for j in range(0, len(text_split)-1):
208
- if j-1 >= 0 and j+1 <= len(text_split)-1 and text_split[j-1][-1].isdigit() and text_split[j+1][0].isdigit():
209
- if text_split[j] in punctuations:
210
- text_split[j-1:j+2] = [''.join(text_split[j-1:j+2])]
211
-
212
- text = ' '.join(text_split)
213
-
214
- return text
215
-
216
- new_json = []
217
 
218
- for ex in [json_object]:
 
219
 
220
- text = prepare_split(ex['text'])
221
 
222
- tokenized_text = text.split()
223
 
224
- list_spans = []
 
225
 
226
- cpt = 0
 
227
 
228
- for a in ex['entities']:
229
 
230
- for o in range(len(a['offsets'])):
231
 
232
- text_annot = prepare_split(a['text'][o])
233
 
234
- offset_start = a['offsets'][o][0]
235
- offset_end = a['offsets'][o][1]
 
 
 
 
 
 
236
 
237
- nb_tokens_annot = len(text_annot.split())
 
238
 
239
- txt_offsetstart = prepare_split(ex['text'][:offset_start])
 
 
240
 
241
- nb_tokens_before_annot = len(txt_offsetstart.split())
242
 
243
- token_start = nb_tokens_before_annot
244
- token_end = token_start + nb_tokens_annot - 1
245
 
246
- list_spans.append({
247
- 'start': offset_start,
248
- 'end': offset_end,
249
- 'token_start': token_start,
250
- 'token_end': token_end,
251
- 'label': a['type'],
252
- 'id': ex['document_id'] + "_" + str(cpt),
253
- 'text': a['text'][o],
254
  })
255
 
256
- cpt += 1
257
-
258
- res = {
259
- 'id': ex['document_id'],
260
- 'document_id': ex['document_id'],
261
- 'text': ex['text'],
262
- 'tokens': tokenized_text,
263
- 'spans': list_spans
264
- }
265
-
266
- new_json.append(res)
267
-
268
- return new_json
269
-
270
- def convert_to_hf_format(self, json_object):
271
- """
272
- Le format prends en compte le multilabel en faisant une concaténation avec "_" entre chaque label
273
- """
274
-
275
- dict_out = []
276
-
277
- for i in json_object:
278
-
279
- nb_tokens = len(i['tokens'])
280
-
281
- ner_tags = ['O']*nb_tokens
282
-
283
- if 'spans' in i:
284
-
285
- for j in i['spans']:
286
-
287
- for x in range(j['token_start'], j['token_end']+1, 1):
288
-
289
- if i['tokens'][x] not in j['text'] and i['tokens'][x] != "Matériovigilance":
290
-
291
- if ner_tags[x-1] == 'O':
292
- ner_tags[x-1] = j['label']
293
- else:
294
- pass
295
- else:
296
- if ner_tags[x] == 'O':
297
- ner_tags[x] = j['label']
298
- else:
299
- # Commenter la ligne et mettre pass si on veut prendre qu'un label par token
300
- pass
301
-
302
- dict_out.append({
303
- 'id': i['id'],
304
- 'document_id': i['document_id'],
305
- "ner_tags": ner_tags,
306
- "tokens": i['tokens'],
307
- })
308
-
309
- return dict_out
310
-
311
- def remove_prefix(self, a: str, prefix: str) -> str:
312
- if a.startswith(prefix):
313
- a = a[len(prefix) :]
314
- return a
315
-
316
- def parse_brat_file(self, txt_file: Path, annotation_file_suffixes: List[str] = None, parse_notes: bool = False):
317
-
318
- example = {}
319
- example["document_id"] = txt_file.with_suffix("").name
320
- with txt_file.open() as f:
321
- example["text"] = f.read()
322
-
323
- if annotation_file_suffixes is None:
324
- annotation_file_suffixes = [".a1", ".a2", ".ann"]
325
-
326
- if len(annotation_file_suffixes) == 0:
327
- raise AssertionError("At least one suffix for the to-be-read annotation files should be given!")
328
-
329
- ann_lines = []
330
- for suffix in annotation_file_suffixes:
331
- annotation_file = txt_file.with_suffix(suffix)
332
- if annotation_file.exists():
333
- with annotation_file.open() as f:
334
- ann_lines.extend(f.readlines())
335
-
336
- example["text_bound_annotations"] = []
337
- example["events"] = []
338
- example["relations"] = []
339
- example["equivalences"] = []
340
- example["attributes"] = []
341
- example["normalizations"] = []
342
-
343
- if parse_notes:
344
- example["notes"] = []
345
-
346
- for line in ann_lines:
347
- line = line.strip()
348
- if not line:
349
- continue
350
-
351
- if line.startswith("T"): # Text bound
352
- ann = {}
353
- fields = line.split("\t")
354
-
355
- ann["id"] = fields[0]
356
- ann["type"] = fields[1].split()[0]
357
- ann["offsets"] = []
358
- span_str = self.remove_prefix(fields[1], (ann["type"] + " "))
359
- text = fields[2]
360
- for span in span_str.split(";"):
361
- start, end = span.split()
362
- ann["offsets"].append([int(start), int(end)])
363
-
364
- # Heuristically split text of discontiguous entities into chunks
365
- ann["text"] = []
366
- if len(ann["offsets"]) > 1:
367
- i = 0
368
- for start, end in ann["offsets"]:
369
- chunk_len = end - start
370
- ann["text"].append(text[i : chunk_len + i])
371
- i += chunk_len
372
- while i < len(text) and text[i] == " ":
373
- i += 1
374
- else:
375
- ann["text"] = [text]
376
-
377
- example["text_bound_annotations"].append(ann)
378
-
379
- elif line.startswith("E"):
380
- ann = {}
381
- fields = line.split("\t")
382
-
383
- ann["id"] = fields[0]
384
 
385
- ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
386
 
387
- ann["arguments"] = []
388
- for role_ref_id in fields[1].split()[1:]:
389
- argument = {
390
- "role": (role_ref_id.split(":"))[0],
391
- "ref_id": (role_ref_id.split(":"))[1],
392
- }
393
- ann["arguments"].append(argument)
394
 
395
- example["events"].append(ann)
 
 
396
 
397
- elif line.startswith("R"):
398
- ann = {}
399
- fields = line.split("\t")
400
 
401
- ann["id"] = fields[0]
402
- ann["type"] = fields[1].split()[0]
 
 
403
 
404
- ann["head"] = {
405
- "role": fields[1].split()[1].split(":")[0],
406
- "ref_id": fields[1].split()[1].split(":")[1],
 
407
  }
408
- ann["tail"] = {
409
- "role": fields[1].split()[2].split(":")[0],
410
- "ref_id": fields[1].split()[2].split(":")[1],
411
- }
412
-
413
- example["relations"].append(ann)
414
-
415
- elif line.startswith("*"):
416
- ann = {}
417
- fields = line.split("\t")
418
-
419
- ann["id"] = fields[0]
420
- ann["ref_ids"] = fields[1].split()[1:]
421
-
422
- example["equivalences"].append(ann)
423
-
424
- elif line.startswith("A") or line.startswith("M"):
425
- ann = {}
426
- fields = line.split("\t")
427
-
428
- ann["id"] = fields[0]
429
-
430
- info = fields[1].split()
431
- ann["type"] = info[0]
432
- ann["ref_id"] = info[1]
433
-
434
- if len(info) > 2:
435
- ann["value"] = info[2]
436
- else:
437
- ann["value"] = ""
438
-
439
- example["attributes"].append(ann)
440
-
441
- elif line.startswith("N"):
442
- ann = {}
443
- fields = line.split("\t")
444
-
445
- ann["id"] = fields[0]
446
- ann["text"] = fields[2]
447
-
448
- info = fields[1].split()
449
 
450
- ann["type"] = info[0]
451
- ann["ref_id"] = info[1]
452
- ann["resource_name"] = info[2].split(":")[0]
453
- ann["cuid"] = info[2].split(":")[1]
454
- example["normalizations"].append(ann)
455
-
456
- elif parse_notes and line.startswith("#"):
457
- ann = {}
458
- fields = line.split("\t")
459
-
460
- ann["id"] = fields[0]
461
- ann["text"] = fields[2] if len(fields) == 3 else "<BB_NULL_STR>"
462
-
463
- info = fields[1].split()
464
-
465
- ann["type"] = info[0]
466
- ann["ref_id"] = info[1]
467
- example["notes"].append(ann)
468
-
469
- return example
470
-
471
- def _generate_examples(self, data_dir, language, dataset_type, split):
472
- """Yields examples as (key, example) tuples."""
473
-
474
- data_dir = data_dir / f"{_LANGUAGES_2[language]}"
475
-
476
- if dataset_type in ["patents", "emea"]:
477
- data_dir = data_dir / f"{_DATASET_TYPES[dataset_type]}_ec22-cui-best_man"
478
- else:
479
- # Medline
480
- if language != "en":
481
- data_dir = (
482
- data_dir
483
- / f"{_DATASET_TYPES[dataset_type]}_EN_{language.upper()}_ec22-cui-best_man"
484
- )
485
- else:
486
- data_dir = [
487
- data_dir
488
- / f"{_DATASET_TYPES[dataset_type]}_EN_{_lang.upper()}_ec22-cui-best_man"
489
- for _lang in _LANGUAGES_2
490
- if _lang != "en"
491
- ]
492
-
493
- if not isinstance(data_dir, list):
494
- data_dir: List[Path] = [data_dir]
495
-
496
- raw_files = [raw_file for _dir in data_dir for raw_file in _dir.glob("*.txt")]
497
-
498
- all_res = []
499
-
500
- for i, raw_file in enumerate(raw_files):
501
- brat_example = self.parse_brat_file(raw_file, parse_notes=True)
502
- source_example = self._to_source_example(brat_example)
503
-
504
- prod_format = self.convert_to_prodigy(source_example)
505
-
506
- hf_format = self.convert_to_hf_format(prod_format)[0]
507
- all_res.append(hf_format)
508
-
509
  ids = [r["id"] for r in all_res]
510
 
511
  random.seed(4)
@@ -526,39 +284,3 @@ class MANTRAGSC(datasets.GeneratorBasedBuilder):
526
  identifier = r["id"]
527
  if identifier in allowed_ids:
528
  yield identifier, r
529
-
530
- def _to_source_example(self, brat_example):
531
-
532
- source_example = {
533
- "document_id": brat_example["document_id"],
534
- "text": brat_example["text"],
535
- }
536
-
537
- source_example["entities"] = []
538
-
539
- for entity_annotation, ann_notes in zip(brat_example["text_bound_annotations"], brat_example["notes"]):
540
-
541
- entity_ann = entity_annotation.copy()
542
-
543
- entity_ann["entity_id"] = entity_ann["id"]
544
- entity_ann.pop("id")
545
-
546
- # Get values from annotator notes
547
- assert entity_ann["entity_id"] == ann_notes["ref_id"]
548
- notes_values = ast.literal_eval(ann_notes["text"])
549
-
550
- if len(notes_values) == 4:
551
- cui, preferred_term, semantic_type, semantic_group = notes_values
552
- else:
553
- preferred_term, semantic_type, semantic_group = notes_values
554
- cui = entity_ann["type"]
555
-
556
- entity_ann["cui"] = cui
557
- entity_ann["preferred_term"] = preferred_term
558
- entity_ann["semantic_type"] = semantic_type
559
- entity_ann["type"] = semantic_group
560
- entity_ann["normalized"] = [{"db_name": "UMLS", "db_id": cui}]
561
-
562
- source_example["entities"].append(entity_ann)
563
-
564
- return source_example
 
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
 
16
+ # pip install xmltodict
17
+
 
18
  import random
19
  from pathlib import Path
20
  from itertools import product
21
  from dataclasses import dataclass
22
  from typing import Dict, List, Tuple
23
 
24
+ import xmltodict
 
25
  import numpy as np
26
 
27
+ import datasets
28
+
29
  _CITATION = """\
30
  @article{10.1093/jamia/ocv037,
31
  author = {Kors, Jan A and Clematide, Simon and Akhondi,
 
69
 
70
  _LICENSE = "CC_BY_4p0"
71
 
72
+ _URL = "https://files.ifi.uzh.ch/cl/mantra/gsc/GSC-v1.1.zip"
73
 
74
  _LANGUAGES_2 = {
75
  "es": "Spanish",
 
82
  _DATASET_TYPES = {
83
  "emea": "EMEA",
84
  "medline": "Medline",
85
+ "patents": "Patent",
86
  }
87
 
88
  @dataclass
 
118
 
119
  def _info(self):
120
 
121
+ # if self.config.name.find("emea") != -1:
122
+ # names = ['O', 'DISO', 'CHEM|PHEN', 'DEVI', 'PHEN', 'PROC', 'OBJC', 'ANAT', 'LIVB', 'CHEM', 'PHYS']
123
+ # elif self.config.name.find("medline") != -1:
124
+ # names = ['O', 'DISO', 'GEOG', 'DEVI', 'Manufactured Object', 'PHEN', 'PROC', 'Research Device', 'OBJC', 'Mental or Behavioral Dysfunction', 'Research Activity', 'ANAT', 'LIVB', 'CHEM', 'PHYS']
125
+ # elif self.config.name.find("patents") != -1:
126
+ # names = ['O', 'PROC', 'DISO', 'LIVB', 'PHYS', 'PHEN', 'ANAT', 'OBJC', 'Amino Acid, Peptide, or Protein|Enzyme|Receptor', 'DEVI', 'CHEM']
127
 
128
  features = datasets.Features(
129
  {
130
  "id": datasets.Value("string"),
131
+ # "document_id": datasets.Value("string"),
132
  "tokens": [datasets.Value("string")],
133
  "ner_tags": datasets.Sequence(
134
+ datasets.Value("string")
 
 
135
  ),
136
+ # "ner_tags": datasets.Sequence(
137
+ # datasets.features.ClassLabel(
138
+ # names = names,
139
+ # )
140
+ # ),
141
  }
142
  )
143
 
 
151
 
152
  def _split_generators(self, dl_manager):
153
 
154
+ language, dataset_type = self.config.name.split("_")
155
+
156
  data_dir = dl_manager.download_and_extract(_URL)
157
 
158
+ data_dir = Path(data_dir) / "GSC-v1.1" / f"{_DATASET_TYPES[dataset_type]}_GSC_{language}_man.xml"
 
 
159
 
160
  return [
161
  datasets.SplitGenerator(
 
187
  ),
188
  ]
189
 
190
+ def _generate_examples(self, data_dir, language, dataset_type, split):
191
+ """Yields examples as (key, example) tuples."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
 
193
+ with open(data_dir) as fd:
194
+ doc = xmltodict.parse(fd.read())
195
 
196
+ all_res = []
197
 
198
+ for d in doc["Corpus"]["document"]:
199
 
200
+ # print(d)
201
+ # print()
202
 
203
+ if type(d["unit"]) != type(list()):
204
+ d["unit"] = [d["unit"]]
205
 
206
+ for u in d["unit"]:
207
 
208
+ text = u["text"]
209
 
210
+ if "e" in u.keys():
211
 
212
+ if type(u["e"]) != type(list()):
213
+ u["e"] = [u["e"]]
214
+
215
+ tags = [{
216
+ "label": current["@grp"].upper(),
217
+ "offset_start": int(current["@offset"]),
218
+ "offset_end": int(current["@offset"]) + int(current["@len"]),
219
+ } for current in u["e"]]
220
 
221
+ else:
222
+ tags = []
223
 
224
+ _tokens = text.split(" ")
225
+ tokens = []
226
+ for i, t in enumerate(_tokens):
227
 
228
+ concat = " ".join(_tokens[0:i+1])
229
 
230
+ offset_start = len(concat) - len(t)
231
+ offset_end = len(concat)
232
 
233
+ tokens.append({
234
+ "token": t,
235
+ "offset_start": offset_start,
236
+ "offset_end": offset_end,
 
 
 
 
237
  })
238
 
239
+ ner_tags = ["O" for o in tokens]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240
 
241
+ for tag in tags:
242
 
243
+ for idx, token in enumerate(tokens):
 
 
 
 
 
 
244
 
245
+ # Range du tag
246
+ rtok = range(token["offset_start"], token["offset_end"]+1)
247
+ rtag = range(tag["offset_start"], tag["offset_end"]+1)
248
 
249
+ # Check if the ranges are overlapping
250
+ if bool(set(rtok) & set(rtag)):
 
251
 
252
+ # if ner_tags[idx] != "O" and ner_tags[idx] != tag['label']:
253
+ # print(f"{token} - currently: {ner_tags[idx]} - after: {tag['label']}")
254
+
255
+ ner_tags[idx] = tag["label"]
256
 
257
+ obj = {
258
+ "id": u["@id"],
259
+ "tokens": [t["token"] for t in tokens],
260
+ "ner_tags": ner_tags,
261
  }
262
+ # print(obj)
263
+ # print("*"*50)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
264
 
265
+ all_res.append(obj)
266
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267
  ids = [r["id"] for r in all_res]
268
 
269
  random.seed(4)
 
284
  identifier = r["id"]
285
  if identifier in allowed_ids:
286
  yield identifier, r