parquet-converter
commited on
Commit
•
68ca029
1
Parent(s):
5e6ac7b
Update parquet files
Browse files- .gitattributes +0 -54
- bigbiohub.py +0 -556
- spl_adr_200db.py +0 -402
- spl_adr_200db_train_bigbio_kb/spl_adr_200db-train.parquet +3 -0
- spl_adr_200db_train_source/spl_adr_200db-train.parquet +3 -0
- spl_adr_200db_unannotated_bigbio_kb/spl_adr_200db-train.parquet +3 -0
- spl_adr_200db_unannotated_source/spl_adr_200db-train.parquet +3 -0
.gitattributes
DELETED
@@ -1,54 +0,0 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
26 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
27 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
-
# Audio files - uncompressed
|
37 |
-
*.pcm filter=lfs diff=lfs merge=lfs -text
|
38 |
-
*.sam filter=lfs diff=lfs merge=lfs -text
|
39 |
-
*.raw filter=lfs diff=lfs merge=lfs -text
|
40 |
-
# Audio files - compressed
|
41 |
-
*.aac filter=lfs diff=lfs merge=lfs -text
|
42 |
-
*.flac filter=lfs diff=lfs merge=lfs -text
|
43 |
-
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
44 |
-
*.ogg filter=lfs diff=lfs merge=lfs -text
|
45 |
-
*.wav filter=lfs diff=lfs merge=lfs -text
|
46 |
-
# Image files - uncompressed
|
47 |
-
*.bmp filter=lfs diff=lfs merge=lfs -text
|
48 |
-
*.gif filter=lfs diff=lfs merge=lfs -text
|
49 |
-
*.png filter=lfs diff=lfs merge=lfs -text
|
50 |
-
*.tiff filter=lfs diff=lfs merge=lfs -text
|
51 |
-
# Image files - compressed
|
52 |
-
*.jpg filter=lfs diff=lfs merge=lfs -text
|
53 |
-
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
54 |
-
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
bigbiohub.py
DELETED
@@ -1,556 +0,0 @@
|
|
1 |
-
from collections import defaultdict
|
2 |
-
from dataclasses import dataclass
|
3 |
-
from enum import Enum
|
4 |
-
import logging
|
5 |
-
from pathlib import Path
|
6 |
-
from types import SimpleNamespace
|
7 |
-
from typing import TYPE_CHECKING, Dict, Iterable, List, Tuple
|
8 |
-
|
9 |
-
import datasets
|
10 |
-
|
11 |
-
if TYPE_CHECKING:
|
12 |
-
import bioc
|
13 |
-
|
14 |
-
logger = logging.getLogger(__name__)
|
15 |
-
|
16 |
-
|
17 |
-
BigBioValues = SimpleNamespace(NULL="<BB_NULL_STR>")
|
18 |
-
|
19 |
-
|
20 |
-
@dataclass
|
21 |
-
class BigBioConfig(datasets.BuilderConfig):
|
22 |
-
"""BuilderConfig for BigBio."""
|
23 |
-
|
24 |
-
name: str = None
|
25 |
-
version: datasets.Version = None
|
26 |
-
description: str = None
|
27 |
-
schema: str = None
|
28 |
-
subset_id: str = None
|
29 |
-
|
30 |
-
|
31 |
-
class Tasks(Enum):
|
32 |
-
NAMED_ENTITY_RECOGNITION = "NER"
|
33 |
-
NAMED_ENTITY_DISAMBIGUATION = "NED"
|
34 |
-
EVENT_EXTRACTION = "EE"
|
35 |
-
RELATION_EXTRACTION = "RE"
|
36 |
-
COREFERENCE_RESOLUTION = "COREF"
|
37 |
-
QUESTION_ANSWERING = "QA"
|
38 |
-
TEXTUAL_ENTAILMENT = "TE"
|
39 |
-
SEMANTIC_SIMILARITY = "STS"
|
40 |
-
TEXT_PAIRS_CLASSIFICATION = "TXT2CLASS"
|
41 |
-
PARAPHRASING = "PARA"
|
42 |
-
TRANSLATION = "TRANSL"
|
43 |
-
SUMMARIZATION = "SUM"
|
44 |
-
TEXT_CLASSIFICATION = "TXTCLASS"
|
45 |
-
|
46 |
-
|
47 |
-
entailment_features = datasets.Features(
|
48 |
-
{
|
49 |
-
"id": datasets.Value("string"),
|
50 |
-
"premise": datasets.Value("string"),
|
51 |
-
"hypothesis": datasets.Value("string"),
|
52 |
-
"label": datasets.Value("string"),
|
53 |
-
}
|
54 |
-
)
|
55 |
-
|
56 |
-
pairs_features = datasets.Features(
|
57 |
-
{
|
58 |
-
"id": datasets.Value("string"),
|
59 |
-
"document_id": datasets.Value("string"),
|
60 |
-
"text_1": datasets.Value("string"),
|
61 |
-
"text_2": datasets.Value("string"),
|
62 |
-
"label": datasets.Value("string"),
|
63 |
-
}
|
64 |
-
)
|
65 |
-
|
66 |
-
qa_features = datasets.Features(
|
67 |
-
{
|
68 |
-
"id": datasets.Value("string"),
|
69 |
-
"question_id": datasets.Value("string"),
|
70 |
-
"document_id": datasets.Value("string"),
|
71 |
-
"question": datasets.Value("string"),
|
72 |
-
"type": datasets.Value("string"),
|
73 |
-
"choices": [datasets.Value("string")],
|
74 |
-
"context": datasets.Value("string"),
|
75 |
-
"answer": datasets.Sequence(datasets.Value("string")),
|
76 |
-
}
|
77 |
-
)
|
78 |
-
|
79 |
-
text_features = datasets.Features(
|
80 |
-
{
|
81 |
-
"id": datasets.Value("string"),
|
82 |
-
"document_id": datasets.Value("string"),
|
83 |
-
"text": datasets.Value("string"),
|
84 |
-
"labels": [datasets.Value("string")],
|
85 |
-
}
|
86 |
-
)
|
87 |
-
|
88 |
-
text2text_features = datasets.Features(
|
89 |
-
{
|
90 |
-
"id": datasets.Value("string"),
|
91 |
-
"document_id": datasets.Value("string"),
|
92 |
-
"text_1": datasets.Value("string"),
|
93 |
-
"text_2": datasets.Value("string"),
|
94 |
-
"text_1_name": datasets.Value("string"),
|
95 |
-
"text_2_name": datasets.Value("string"),
|
96 |
-
}
|
97 |
-
)
|
98 |
-
|
99 |
-
kb_features = datasets.Features(
|
100 |
-
{
|
101 |
-
"id": datasets.Value("string"),
|
102 |
-
"document_id": datasets.Value("string"),
|
103 |
-
"passages": [
|
104 |
-
{
|
105 |
-
"id": datasets.Value("string"),
|
106 |
-
"type": datasets.Value("string"),
|
107 |
-
"text": datasets.Sequence(datasets.Value("string")),
|
108 |
-
"offsets": datasets.Sequence([datasets.Value("int32")]),
|
109 |
-
}
|
110 |
-
],
|
111 |
-
"entities": [
|
112 |
-
{
|
113 |
-
"id": datasets.Value("string"),
|
114 |
-
"type": datasets.Value("string"),
|
115 |
-
"text": datasets.Sequence(datasets.Value("string")),
|
116 |
-
"offsets": datasets.Sequence([datasets.Value("int32")]),
|
117 |
-
"normalized": [
|
118 |
-
{
|
119 |
-
"db_name": datasets.Value("string"),
|
120 |
-
"db_id": datasets.Value("string"),
|
121 |
-
}
|
122 |
-
],
|
123 |
-
}
|
124 |
-
],
|
125 |
-
"events": [
|
126 |
-
{
|
127 |
-
"id": datasets.Value("string"),
|
128 |
-
"type": datasets.Value("string"),
|
129 |
-
# refers to the text_bound_annotation of the trigger
|
130 |
-
"trigger": {
|
131 |
-
"text": datasets.Sequence(datasets.Value("string")),
|
132 |
-
"offsets": datasets.Sequence([datasets.Value("int32")]),
|
133 |
-
},
|
134 |
-
"arguments": [
|
135 |
-
{
|
136 |
-
"role": datasets.Value("string"),
|
137 |
-
"ref_id": datasets.Value("string"),
|
138 |
-
}
|
139 |
-
],
|
140 |
-
}
|
141 |
-
],
|
142 |
-
"coreferences": [
|
143 |
-
{
|
144 |
-
"id": datasets.Value("string"),
|
145 |
-
"entity_ids": datasets.Sequence(datasets.Value("string")),
|
146 |
-
}
|
147 |
-
],
|
148 |
-
"relations": [
|
149 |
-
{
|
150 |
-
"id": datasets.Value("string"),
|
151 |
-
"type": datasets.Value("string"),
|
152 |
-
"arg1_id": datasets.Value("string"),
|
153 |
-
"arg2_id": datasets.Value("string"),
|
154 |
-
"normalized": [
|
155 |
-
{
|
156 |
-
"db_name": datasets.Value("string"),
|
157 |
-
"db_id": datasets.Value("string"),
|
158 |
-
}
|
159 |
-
],
|
160 |
-
}
|
161 |
-
],
|
162 |
-
}
|
163 |
-
)
|
164 |
-
|
165 |
-
|
166 |
-
def get_texts_and_offsets_from_bioc_ann(ann: "bioc.BioCAnnotation") -> Tuple:
|
167 |
-
|
168 |
-
offsets = [(loc.offset, loc.offset + loc.length) for loc in ann.locations]
|
169 |
-
|
170 |
-
text = ann.text
|
171 |
-
|
172 |
-
if len(offsets) > 1:
|
173 |
-
i = 0
|
174 |
-
texts = []
|
175 |
-
for start, end in offsets:
|
176 |
-
chunk_len = end - start
|
177 |
-
texts.append(text[i : chunk_len + i])
|
178 |
-
i += chunk_len
|
179 |
-
while i < len(text) and text[i] == " ":
|
180 |
-
i += 1
|
181 |
-
else:
|
182 |
-
texts = [text]
|
183 |
-
|
184 |
-
return offsets, texts
|
185 |
-
|
186 |
-
|
187 |
-
def remove_prefix(a: str, prefix: str) -> str:
|
188 |
-
if a.startswith(prefix):
|
189 |
-
a = a[len(prefix) :]
|
190 |
-
return a
|
191 |
-
|
192 |
-
|
193 |
-
def parse_brat_file(
|
194 |
-
txt_file: Path,
|
195 |
-
annotation_file_suffixes: List[str] = None,
|
196 |
-
parse_notes: bool = False,
|
197 |
-
) -> Dict:
|
198 |
-
"""
|
199 |
-
Parse a brat file into the schema defined below.
|
200 |
-
`txt_file` should be the path to the brat '.txt' file you want to parse, e.g. 'data/1234.txt'
|
201 |
-
Assumes that the annotations are contained in one or more of the corresponding '.a1', '.a2' or '.ann' files,
|
202 |
-
e.g. 'data/1234.ann' or 'data/1234.a1' and 'data/1234.a2'.
|
203 |
-
Will include annotator notes, when `parse_notes == True`.
|
204 |
-
brat_features = datasets.Features(
|
205 |
-
{
|
206 |
-
"id": datasets.Value("string"),
|
207 |
-
"document_id": datasets.Value("string"),
|
208 |
-
"text": datasets.Value("string"),
|
209 |
-
"text_bound_annotations": [ # T line in brat, e.g. type or event trigger
|
210 |
-
{
|
211 |
-
"offsets": datasets.Sequence([datasets.Value("int32")]),
|
212 |
-
"text": datasets.Sequence(datasets.Value("string")),
|
213 |
-
"type": datasets.Value("string"),
|
214 |
-
"id": datasets.Value("string"),
|
215 |
-
}
|
216 |
-
],
|
217 |
-
"events": [ # E line in brat
|
218 |
-
{
|
219 |
-
"trigger": datasets.Value(
|
220 |
-
"string"
|
221 |
-
), # refers to the text_bound_annotation of the trigger,
|
222 |
-
"id": datasets.Value("string"),
|
223 |
-
"type": datasets.Value("string"),
|
224 |
-
"arguments": datasets.Sequence(
|
225 |
-
{
|
226 |
-
"role": datasets.Value("string"),
|
227 |
-
"ref_id": datasets.Value("string"),
|
228 |
-
}
|
229 |
-
),
|
230 |
-
}
|
231 |
-
],
|
232 |
-
"relations": [ # R line in brat
|
233 |
-
{
|
234 |
-
"id": datasets.Value("string"),
|
235 |
-
"head": {
|
236 |
-
"ref_id": datasets.Value("string"),
|
237 |
-
"role": datasets.Value("string"),
|
238 |
-
},
|
239 |
-
"tail": {
|
240 |
-
"ref_id": datasets.Value("string"),
|
241 |
-
"role": datasets.Value("string"),
|
242 |
-
},
|
243 |
-
"type": datasets.Value("string"),
|
244 |
-
}
|
245 |
-
],
|
246 |
-
"equivalences": [ # Equiv line in brat
|
247 |
-
{
|
248 |
-
"id": datasets.Value("string"),
|
249 |
-
"ref_ids": datasets.Sequence(datasets.Value("string")),
|
250 |
-
}
|
251 |
-
],
|
252 |
-
"attributes": [ # M or A lines in brat
|
253 |
-
{
|
254 |
-
"id": datasets.Value("string"),
|
255 |
-
"type": datasets.Value("string"),
|
256 |
-
"ref_id": datasets.Value("string"),
|
257 |
-
"value": datasets.Value("string"),
|
258 |
-
}
|
259 |
-
],
|
260 |
-
"normalizations": [ # N lines in brat
|
261 |
-
{
|
262 |
-
"id": datasets.Value("string"),
|
263 |
-
"type": datasets.Value("string"),
|
264 |
-
"ref_id": datasets.Value("string"),
|
265 |
-
"resource_name": datasets.Value(
|
266 |
-
"string"
|
267 |
-
), # Name of the resource, e.g. "Wikipedia"
|
268 |
-
"cuid": datasets.Value(
|
269 |
-
"string"
|
270 |
-
), # ID in the resource, e.g. 534366
|
271 |
-
"text": datasets.Value(
|
272 |
-
"string"
|
273 |
-
), # Human readable description/name of the entity, e.g. "Barack Obama"
|
274 |
-
}
|
275 |
-
],
|
276 |
-
### OPTIONAL: Only included when `parse_notes == True`
|
277 |
-
"notes": [ # # lines in brat
|
278 |
-
{
|
279 |
-
"id": datasets.Value("string"),
|
280 |
-
"type": datasets.Value("string"),
|
281 |
-
"ref_id": datasets.Value("string"),
|
282 |
-
"text": datasets.Value("string"),
|
283 |
-
}
|
284 |
-
],
|
285 |
-
},
|
286 |
-
)
|
287 |
-
"""
|
288 |
-
|
289 |
-
example = {}
|
290 |
-
example["document_id"] = txt_file.with_suffix("").name
|
291 |
-
with txt_file.open() as f:
|
292 |
-
example["text"] = f.read()
|
293 |
-
|
294 |
-
# If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
|
295 |
-
# for event extraction
|
296 |
-
if annotation_file_suffixes is None:
|
297 |
-
annotation_file_suffixes = [".a1", ".a2", ".ann"]
|
298 |
-
|
299 |
-
if len(annotation_file_suffixes) == 0:
|
300 |
-
raise AssertionError(
|
301 |
-
"At least one suffix for the to-be-read annotation files should be given!"
|
302 |
-
)
|
303 |
-
|
304 |
-
ann_lines = []
|
305 |
-
for suffix in annotation_file_suffixes:
|
306 |
-
annotation_file = txt_file.with_suffix(suffix)
|
307 |
-
if annotation_file.exists():
|
308 |
-
with annotation_file.open() as f:
|
309 |
-
ann_lines.extend(f.readlines())
|
310 |
-
|
311 |
-
example["text_bound_annotations"] = []
|
312 |
-
example["events"] = []
|
313 |
-
example["relations"] = []
|
314 |
-
example["equivalences"] = []
|
315 |
-
example["attributes"] = []
|
316 |
-
example["normalizations"] = []
|
317 |
-
|
318 |
-
if parse_notes:
|
319 |
-
example["notes"] = []
|
320 |
-
|
321 |
-
for line in ann_lines:
|
322 |
-
line = line.strip()
|
323 |
-
if not line:
|
324 |
-
continue
|
325 |
-
|
326 |
-
if line.startswith("T"): # Text bound
|
327 |
-
ann = {}
|
328 |
-
fields = line.split("\t")
|
329 |
-
|
330 |
-
ann["id"] = fields[0]
|
331 |
-
ann["type"] = fields[1].split()[0]
|
332 |
-
ann["offsets"] = []
|
333 |
-
span_str = remove_prefix(fields[1], (ann["type"] + " "))
|
334 |
-
text = fields[2]
|
335 |
-
for span in span_str.split(";"):
|
336 |
-
start, end = span.split()
|
337 |
-
ann["offsets"].append([int(start), int(end)])
|
338 |
-
|
339 |
-
# Heuristically split text of discontiguous entities into chunks
|
340 |
-
ann["text"] = []
|
341 |
-
if len(ann["offsets"]) > 1:
|
342 |
-
i = 0
|
343 |
-
for start, end in ann["offsets"]:
|
344 |
-
chunk_len = end - start
|
345 |
-
ann["text"].append(text[i : chunk_len + i])
|
346 |
-
i += chunk_len
|
347 |
-
while i < len(text) and text[i] == " ":
|
348 |
-
i += 1
|
349 |
-
else:
|
350 |
-
ann["text"] = [text]
|
351 |
-
|
352 |
-
example["text_bound_annotations"].append(ann)
|
353 |
-
|
354 |
-
elif line.startswith("E"):
|
355 |
-
ann = {}
|
356 |
-
fields = line.split("\t")
|
357 |
-
|
358 |
-
ann["id"] = fields[0]
|
359 |
-
|
360 |
-
ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
|
361 |
-
|
362 |
-
ann["arguments"] = []
|
363 |
-
for role_ref_id in fields[1].split()[1:]:
|
364 |
-
argument = {
|
365 |
-
"role": (role_ref_id.split(":"))[0],
|
366 |
-
"ref_id": (role_ref_id.split(":"))[1],
|
367 |
-
}
|
368 |
-
ann["arguments"].append(argument)
|
369 |
-
|
370 |
-
example["events"].append(ann)
|
371 |
-
|
372 |
-
elif line.startswith("R"):
|
373 |
-
ann = {}
|
374 |
-
fields = line.split("\t")
|
375 |
-
|
376 |
-
ann["id"] = fields[0]
|
377 |
-
ann["type"] = fields[1].split()[0]
|
378 |
-
|
379 |
-
ann["head"] = {
|
380 |
-
"role": fields[1].split()[1].split(":")[0],
|
381 |
-
"ref_id": fields[1].split()[1].split(":")[1],
|
382 |
-
}
|
383 |
-
ann["tail"] = {
|
384 |
-
"role": fields[1].split()[2].split(":")[0],
|
385 |
-
"ref_id": fields[1].split()[2].split(":")[1],
|
386 |
-
}
|
387 |
-
|
388 |
-
example["relations"].append(ann)
|
389 |
-
|
390 |
-
# '*' seems to be the legacy way to mark equivalences,
|
391 |
-
# but I couldn't find any info on the current way
|
392 |
-
# this might have to be adapted dependent on the brat version
|
393 |
-
# of the annotation
|
394 |
-
elif line.startswith("*"):
|
395 |
-
ann = {}
|
396 |
-
fields = line.split("\t")
|
397 |
-
|
398 |
-
ann["id"] = fields[0]
|
399 |
-
ann["ref_ids"] = fields[1].split()[1:]
|
400 |
-
|
401 |
-
example["equivalences"].append(ann)
|
402 |
-
|
403 |
-
elif line.startswith("A") or line.startswith("M"):
|
404 |
-
ann = {}
|
405 |
-
fields = line.split("\t")
|
406 |
-
|
407 |
-
ann["id"] = fields[0]
|
408 |
-
|
409 |
-
info = fields[1].split()
|
410 |
-
ann["type"] = info[0]
|
411 |
-
ann["ref_id"] = info[1]
|
412 |
-
|
413 |
-
if len(info) > 2:
|
414 |
-
ann["value"] = info[2]
|
415 |
-
else:
|
416 |
-
ann["value"] = ""
|
417 |
-
|
418 |
-
example["attributes"].append(ann)
|
419 |
-
|
420 |
-
elif line.startswith("N"):
|
421 |
-
ann = {}
|
422 |
-
fields = line.split("\t")
|
423 |
-
|
424 |
-
ann["id"] = fields[0]
|
425 |
-
ann["text"] = fields[2]
|
426 |
-
|
427 |
-
info = fields[1].split()
|
428 |
-
|
429 |
-
ann["type"] = info[0]
|
430 |
-
ann["ref_id"] = info[1]
|
431 |
-
ann["resource_name"] = info[2].split(":")[0]
|
432 |
-
ann["cuid"] = info[2].split(":")[1]
|
433 |
-
example["normalizations"].append(ann)
|
434 |
-
|
435 |
-
elif parse_notes and line.startswith("#"):
|
436 |
-
ann = {}
|
437 |
-
fields = line.split("\t")
|
438 |
-
|
439 |
-
ann["id"] = fields[0]
|
440 |
-
ann["text"] = fields[2] if len(fields) == 3 else BigBioValues.NULL
|
441 |
-
|
442 |
-
info = fields[1].split()
|
443 |
-
|
444 |
-
ann["type"] = info[0]
|
445 |
-
ann["ref_id"] = info[1]
|
446 |
-
example["notes"].append(ann)
|
447 |
-
|
448 |
-
return example
|
449 |
-
|
450 |
-
|
451 |
-
def brat_parse_to_bigbio_kb(brat_parse: Dict) -> Dict:
|
452 |
-
"""
|
453 |
-
Transform a brat parse (conforming to the standard brat schema) obtained with
|
454 |
-
`parse_brat_file` into a dictionary conforming to the `bigbio-kb` schema (as defined in ../schemas/kb.py)
|
455 |
-
:param brat_parse:
|
456 |
-
"""
|
457 |
-
|
458 |
-
unified_example = {}
|
459 |
-
|
460 |
-
# Prefix all ids with document id to ensure global uniqueness,
|
461 |
-
# because brat ids are only unique within their document
|
462 |
-
id_prefix = brat_parse["document_id"] + "_"
|
463 |
-
|
464 |
-
# identical
|
465 |
-
unified_example["document_id"] = brat_parse["document_id"]
|
466 |
-
unified_example["passages"] = [
|
467 |
-
{
|
468 |
-
"id": id_prefix + "_text",
|
469 |
-
"type": "abstract",
|
470 |
-
"text": [brat_parse["text"]],
|
471 |
-
"offsets": [[0, len(brat_parse["text"])]],
|
472 |
-
}
|
473 |
-
]
|
474 |
-
|
475 |
-
# get normalizations
|
476 |
-
ref_id_to_normalizations = defaultdict(list)
|
477 |
-
for normalization in brat_parse["normalizations"]:
|
478 |
-
ref_id_to_normalizations[normalization["ref_id"]].append(
|
479 |
-
{
|
480 |
-
"db_name": normalization["resource_name"],
|
481 |
-
"db_id": normalization["cuid"],
|
482 |
-
}
|
483 |
-
)
|
484 |
-
|
485 |
-
# separate entities and event triggers
|
486 |
-
unified_example["events"] = []
|
487 |
-
non_event_ann = brat_parse["text_bound_annotations"].copy()
|
488 |
-
for event in brat_parse["events"]:
|
489 |
-
event = event.copy()
|
490 |
-
event["id"] = id_prefix + event["id"]
|
491 |
-
trigger = next(
|
492 |
-
tr
|
493 |
-
for tr in brat_parse["text_bound_annotations"]
|
494 |
-
if tr["id"] == event["trigger"]
|
495 |
-
)
|
496 |
-
if trigger in non_event_ann:
|
497 |
-
non_event_ann.remove(trigger)
|
498 |
-
event["trigger"] = {
|
499 |
-
"text": trigger["text"].copy(),
|
500 |
-
"offsets": trigger["offsets"].copy(),
|
501 |
-
}
|
502 |
-
for argument in event["arguments"]:
|
503 |
-
argument["ref_id"] = id_prefix + argument["ref_id"]
|
504 |
-
|
505 |
-
unified_example["events"].append(event)
|
506 |
-
|
507 |
-
unified_example["entities"] = []
|
508 |
-
anno_ids = [ref_id["id"] for ref_id in non_event_ann]
|
509 |
-
for ann in non_event_ann:
|
510 |
-
entity_ann = ann.copy()
|
511 |
-
entity_ann["id"] = id_prefix + entity_ann["id"]
|
512 |
-
entity_ann["normalized"] = ref_id_to_normalizations[ann["id"]]
|
513 |
-
unified_example["entities"].append(entity_ann)
|
514 |
-
|
515 |
-
# massage relations
|
516 |
-
unified_example["relations"] = []
|
517 |
-
skipped_relations = set()
|
518 |
-
for ann in brat_parse["relations"]:
|
519 |
-
if (
|
520 |
-
ann["head"]["ref_id"] not in anno_ids
|
521 |
-
or ann["tail"]["ref_id"] not in anno_ids
|
522 |
-
):
|
523 |
-
skipped_relations.add(ann["id"])
|
524 |
-
continue
|
525 |
-
unified_example["relations"].append(
|
526 |
-
{
|
527 |
-
"arg1_id": id_prefix + ann["head"]["ref_id"],
|
528 |
-
"arg2_id": id_prefix + ann["tail"]["ref_id"],
|
529 |
-
"id": id_prefix + ann["id"],
|
530 |
-
"type": ann["type"],
|
531 |
-
"normalized": [],
|
532 |
-
}
|
533 |
-
)
|
534 |
-
if len(skipped_relations) > 0:
|
535 |
-
example_id = brat_parse["document_id"]
|
536 |
-
logger.info(
|
537 |
-
f"Example:{example_id}: The `bigbio_kb` schema allows `relations` only between entities."
|
538 |
-
f" Skip (for now): "
|
539 |
-
f"{list(skipped_relations)}"
|
540 |
-
)
|
541 |
-
|
542 |
-
# get coreferences
|
543 |
-
unified_example["coreferences"] = []
|
544 |
-
for i, ann in enumerate(brat_parse["equivalences"], start=1):
|
545 |
-
is_entity_cluster = True
|
546 |
-
for ref_id in ann["ref_ids"]:
|
547 |
-
if not ref_id.startswith("T"): # not textbound -> no entity
|
548 |
-
is_entity_cluster = False
|
549 |
-
elif ref_id not in anno_ids: # event trigger -> no entity
|
550 |
-
is_entity_cluster = False
|
551 |
-
if is_entity_cluster:
|
552 |
-
entity_ids = [id_prefix + i for i in ann["ref_ids"]]
|
553 |
-
unified_example["coreferences"].append(
|
554 |
-
{"id": id_prefix + str(i), "entity_ids": entity_ids}
|
555 |
-
)
|
556 |
-
return unified_example
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spl_adr_200db.py
DELETED
@@ -1,402 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
"""
|
17 |
-
Dataset containing standardised information about known adverse reactions for 200
|
18 |
-
FDA-approved drugs using information from the respective Structured Product Labels (SPLs).
|
19 |
-
This data resulted from a partnership between the United States Food and Drug Administration
|
20 |
-
(FDA) and the National Library of Medicine.
|
21 |
-
|
22 |
-
Structured Product Labels (SPLs) are the documents FDA uses to exchange information
|
23 |
-
about drugs and other products. For this dataset, SPLs were manually annotated for
|
24 |
-
adverse reactions at the mention level to facilitate development and evaluation of
|
25 |
-
text mining tools for extraction of ADRs from all SPLs. The ADRs were then normalised
|
26 |
-
to the Unified Medical Language System (UMLS) and to the Medical Dictionary for
|
27 |
-
Regulatory Activities (MedDRA).
|
28 |
-
|
29 |
-
These data were used for the adverse event challenge at TAC 2017 (Text Analysis Conference)
|
30 |
-
in four different tasks:
|
31 |
-
* Task 1: Extract AdverseReactions and related mentions (Severity, Factor, DrugClass,
|
32 |
-
Negation, Animal). This is similar to many NLP Named Entity Recognition (NER) evaluations.
|
33 |
-
* Task 2: Identify the relations between AdverseReactions and related mentions (i.e.,
|
34 |
-
Negated, Hypothetical, and Effect). This is similar to many NLP relation
|
35 |
-
identification evaluations.
|
36 |
-
* Task 3: Identify the positive AdverseReaction mention names in the labels.
|
37 |
-
For the purposes of this task, positive will be defined as the caseless strings
|
38 |
-
of all the AdverseReactions that have not been negated and are not related by
|
39 |
-
a Hypothetical relation to a DrugClass or Animal. Note that this means Factors
|
40 |
-
related via a Hypothetical relation are considered positive (e.g., "[unknown risk]
|
41 |
-
Factor of [stroke]AdverseReaction") for the purposes of this task. The result of
|
42 |
-
this task will be a list of unique strings corresponding to the positive ADRs
|
43 |
-
as they were written in the label.
|
44 |
-
* Task 4: Provide MedDRA PT(s) and LLT(s) for each positive AdverseReaction (occasionally,
|
45 |
-
two or more PTs are necessary to fully describe the reaction). For participants
|
46 |
-
approaching the tasks sequentially, this can be viewed as normalization of the terms
|
47 |
-
extracted in Task 3 to MedDRA LLTs/PTs. Because MedDRA is not publicly available,
|
48 |
-
and contains several versions, a standard version of MedDRA v18.1 will be provided
|
49 |
-
to the participants. Other resources such as the UMLS Terminology Services may be
|
50 |
-
used to aid with the normalization process.
|
51 |
-
|
52 |
-
For more information regarding the challenge at TAC 2017, please visit:
|
53 |
-
https://bionlp.nlm.nih.gov/tac2017adversereactions/
|
54 |
-
|
55 |
-
"""
|
56 |
-
|
57 |
-
import xml.etree.ElementTree as ET
|
58 |
-
from collections import defaultdict
|
59 |
-
from itertools import accumulate
|
60 |
-
from typing import BinaryIO, Dict, Iterable, List, Tuple
|
61 |
-
|
62 |
-
import datasets
|
63 |
-
|
64 |
-
from .bigbiohub import kb_features
|
65 |
-
from .bigbiohub import BigBioConfig
|
66 |
-
from .bigbiohub import Tasks
|
67 |
-
|
68 |
-
_LANGUAGES = ['English']
|
69 |
-
_PUBMED = False
|
70 |
-
_LOCAL = False
|
71 |
-
_CITATION = """\
|
72 |
-
@article{demner2018dataset,
|
73 |
-
author = {Demner-Fushman, Dina and Shooshan, Sonya and Rodriguez, Laritza and Aronson,
|
74 |
-
Alan and Lang, Francois and Rogers, Willie and Roberts, Kirk and Tonning, Joseph},
|
75 |
-
title = {A dataset of 200 structured product labels annotated for adverse drug reactions},
|
76 |
-
journal = {Scientific Data},
|
77 |
-
volume = {5},
|
78 |
-
year = {2018},
|
79 |
-
month = {01},
|
80 |
-
pages = {180001},
|
81 |
-
url = {
|
82 |
-
https://www.researchgate.net/publication/322810855_A_dataset_of_200_structured_product_labels_annotated_for_adverse_drug_reactions
|
83 |
-
},
|
84 |
-
doi = {10.1038/sdata.2018.1}
|
85 |
-
}
|
86 |
-
"""
|
87 |
-
|
88 |
-
_DATASETNAME = "spl_adr_200db"
|
89 |
-
_DISPLAYNAME = "SPL ADR"
|
90 |
-
|
91 |
-
_DESCRIPTION = """\
|
92 |
-
The United States Food and Drug Administration (FDA) partnered with the National Library
|
93 |
-
of Medicine to create a pilot dataset containing standardised information about known
|
94 |
-
adverse reactions for 200 FDA-approved drugs. The Structured Product Labels (SPLs),
|
95 |
-
the documents FDA uses to exchange information about drugs and other products, were
|
96 |
-
manually annotated for adverse reactions at the mention level to facilitate development
|
97 |
-
and evaluation of text mining tools for extraction of ADRs from all SPLs. The ADRs were
|
98 |
-
then normalised to the Unified Medical Language System (UMLS) and to the Medical
|
99 |
-
Dictionary for Regulatory Activities (MedDRA).
|
100 |
-
"""
|
101 |
-
|
102 |
-
_HOMEPAGE = "https://bionlp.nlm.nih.gov/tac2017adversereactions/"
|
103 |
-
|
104 |
-
# NOTE: Source: https://osf.io/6h9q4/
|
105 |
-
_LICENSE = 'Creative Commons Zero v1.0 Universal'
|
106 |
-
_URLS = {
|
107 |
-
_DATASETNAME: {
|
108 |
-
"train": "https://bionlp.nlm.nih.gov/tac2017adversereactions/train_xml.tar.gz",
|
109 |
-
"unannotated": "https://bionlp.nlm.nih.gov/tac2017adversereactions/unannotated_xml.tar.gz",
|
110 |
-
}
|
111 |
-
}
|
112 |
-
|
113 |
-
_SUPPORTED_TASKS = [
|
114 |
-
Tasks.NAMED_ENTITY_RECOGNITION,
|
115 |
-
Tasks.NAMED_ENTITY_DISAMBIGUATION,
|
116 |
-
Tasks.RELATION_EXTRACTION,
|
117 |
-
]
|
118 |
-
|
119 |
-
_SOURCE_VERSION = "1.0.0"
|
120 |
-
_BIGBIO_VERSION = "1.0.0"
|
121 |
-
|
122 |
-
|
123 |
-
class SplAdr200DBDataset(datasets.GeneratorBasedBuilder):
|
124 |
-
"""
|
125 |
-
The United States Food and Drug Administration (FDA) partnered with the National Library
|
126 |
-
of Medicine to create a pilot dataset containing standardised information about known
|
127 |
-
adverse reactions for 200 FDA-approved drugs.
|
128 |
-
|
129 |
-
These data were used in the adverse event challenge at TAC 2017 (Text Analysis Conference).
|
130 |
-
For more information on the tasks, see: https://bionlp.nlm.nih.gov/tac2017adversereactions/
|
131 |
-
"""
|
132 |
-
|
133 |
-
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
|
134 |
-
BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
|
135 |
-
|
136 |
-
BUILDER_CONFIGS = []
|
137 |
-
|
138 |
-
for subset_name in _URLS[_DATASETNAME]:
|
139 |
-
BUILDER_CONFIGS.extend(
|
140 |
-
[
|
141 |
-
BigBioConfig(
|
142 |
-
name=f"spl_adr_200db_{subset_name}_source",
|
143 |
-
version=SOURCE_VERSION,
|
144 |
-
description=f"SPL ADR 200db source {subset_name} schema",
|
145 |
-
schema="source",
|
146 |
-
subset_id=f"spl_adr_200db_{subset_name}",
|
147 |
-
),
|
148 |
-
BigBioConfig(
|
149 |
-
name=f"spl_adr_200db_{subset_name}_bigbio_kb",
|
150 |
-
version=BIGBIO_VERSION,
|
151 |
-
description=f"SPL ADR 200db BigBio {subset_name} schema",
|
152 |
-
schema="bigbio_kb",
|
153 |
-
subset_id=f"spl_adr_200db_{subset_name}",
|
154 |
-
),
|
155 |
-
]
|
156 |
-
)
|
157 |
-
|
158 |
-
DEFAULT_CONFIG_NAME = "spl_adr_200db_source"
|
159 |
-
|
160 |
-
def _info(self) -> datasets.DatasetInfo:
|
161 |
-
if self.config.schema == "source":
|
162 |
-
unannotated_features = {
|
163 |
-
"drug_name": datasets.Value("string"),
|
164 |
-
"text": [datasets.Value("string")],
|
165 |
-
"sections": [
|
166 |
-
{
|
167 |
-
"id": datasets.Value("string"),
|
168 |
-
"name": datasets.Value("string"),
|
169 |
-
"text": datasets.Value("string"),
|
170 |
-
}
|
171 |
-
],
|
172 |
-
}
|
173 |
-
features = datasets.Features(
|
174 |
-
{
|
175 |
-
**unannotated_features,
|
176 |
-
"mentions": [
|
177 |
-
{
|
178 |
-
"id": datasets.Value("string"),
|
179 |
-
"section": datasets.Value("string"),
|
180 |
-
"type": datasets.Value("string"),
|
181 |
-
"start": datasets.Value("string"),
|
182 |
-
"len": datasets.Value("string"),
|
183 |
-
"str": datasets.Value("string"),
|
184 |
-
}
|
185 |
-
],
|
186 |
-
"relations": [
|
187 |
-
{
|
188 |
-
"id": datasets.Value("string"),
|
189 |
-
"type": datasets.Value("string"),
|
190 |
-
"arg1": datasets.Value("string"),
|
191 |
-
"arg2": datasets.Value("string"),
|
192 |
-
}
|
193 |
-
],
|
194 |
-
"reactions": [
|
195 |
-
{
|
196 |
-
"id": datasets.Value("string"),
|
197 |
-
"str": datasets.Value("string"),
|
198 |
-
"normalizations": [
|
199 |
-
{
|
200 |
-
"id": datasets.Value("string"),
|
201 |
-
"meddra_pt": datasets.Value("string"),
|
202 |
-
"meddra_pt_id": datasets.Value("string"),
|
203 |
-
"meddra_llt": datasets.Value("string"),
|
204 |
-
"meddra_llt_id": datasets.Value("string"),
|
205 |
-
"flag": datasets.Value("string"),
|
206 |
-
}
|
207 |
-
],
|
208 |
-
}
|
209 |
-
],
|
210 |
-
}
|
211 |
-
)
|
212 |
-
|
213 |
-
elif self.config.schema == "bigbio_kb":
|
214 |
-
features = kb_features
|
215 |
-
|
216 |
-
return datasets.DatasetInfo(
|
217 |
-
description=_DESCRIPTION,
|
218 |
-
features=features,
|
219 |
-
homepage=_HOMEPAGE,
|
220 |
-
license=str(_LICENSE),
|
221 |
-
citation=_CITATION,
|
222 |
-
)
|
223 |
-
|
224 |
-
def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
|
225 |
-
"""Returns SplitGenerators."""
|
226 |
-
*_, subset_name = self.config.subset_id.split("_")
|
227 |
-
|
228 |
-
urls = _URLS[_DATASETNAME][subset_name]
|
229 |
-
|
230 |
-
data_dir = dl_manager.download(urls)
|
231 |
-
|
232 |
-
return [
|
233 |
-
datasets.SplitGenerator(
|
234 |
-
name=datasets.Split.TRAIN,
|
235 |
-
gen_kwargs={
|
236 |
-
"filepaths": dl_manager.iter_archive(data_dir),
|
237 |
-
},
|
238 |
-
),
|
239 |
-
]
|
240 |
-
|
241 |
-
def _source_features_from_xml(self, element_tree):
|
242 |
-
root = element_tree.getroot()
|
243 |
-
drug_name = root.attrib["drug"]
|
244 |
-
|
245 |
-
sections = root.findall(".//Text/Section")
|
246 |
-
relations = root.findall(".//Relations/Relation")
|
247 |
-
reactions = [
|
248 |
-
{
|
249 |
-
"id": reaction.attrib["id"],
|
250 |
-
"str": reaction.attrib["str"],
|
251 |
-
"normalizations": [
|
252 |
-
{
|
253 |
-
# NOTE: Default features to `None` as not all of them
|
254 |
-
# will be present in all reactions.
|
255 |
-
"meddra_pt": None,
|
256 |
-
"meddra_pt_id": None,
|
257 |
-
"meddra_llt": None,
|
258 |
-
"meddra_llt_id": None,
|
259 |
-
"flag": None,
|
260 |
-
**normalization.attrib,
|
261 |
-
}
|
262 |
-
for normalization in reaction.findall("Normalization")
|
263 |
-
],
|
264 |
-
}
|
265 |
-
for reaction in root.findall(".//Reactions/Reaction")
|
266 |
-
]
|
267 |
-
|
268 |
-
mentions = root.findall(".//Mentions/Mention")
|
269 |
-
return {
|
270 |
-
"drug_name": drug_name,
|
271 |
-
"text": [section.text for section in sections],
|
272 |
-
"mentions": [mention.attrib for mention in mentions],
|
273 |
-
"relations": [relation.attrib for relation in relations],
|
274 |
-
"reactions": reactions,
|
275 |
-
"sections": [
|
276 |
-
{**section.attrib, "text": section.text} for section in sections
|
277 |
-
],
|
278 |
-
}
|
279 |
-
|
280 |
-
def _bigbio_kb_features_from_xml(self, element_tree):
|
281 |
-
source_features = self._source_features_from_xml(
|
282 |
-
element_tree=element_tree,
|
283 |
-
)
|
284 |
-
entity_normalizations = defaultdict(list)
|
285 |
-
|
286 |
-
for reaction in source_features["reactions"]:
|
287 |
-
entity_name = reaction["str"]
|
288 |
-
for normalization in reaction["normalizations"]:
|
289 |
-
|
290 |
-
# commenting this out for now
|
291 |
-
# if there is no db_name then its not a useful normalization
|
292 |
-
# if normalization["meddra_pt_id"]:
|
293 |
-
# entity_normalizations[entity_name].append(
|
294 |
-
# {"db_name": None, "db_id": f"pt_{normalization['meddra_pt_id']}"}
|
295 |
-
# )
|
296 |
-
|
297 |
-
if normalization["meddra_llt_id"]:
|
298 |
-
entity_normalizations[entity_name].append(
|
299 |
-
{
|
300 |
-
"db_name": "MedDRA v18.1",
|
301 |
-
"db_id": f"llt_{normalization['meddra_llt_id']}",
|
302 |
-
}
|
303 |
-
)
|
304 |
-
|
305 |
-
section_lengths = list(
|
306 |
-
accumulate(len(section["text"]) for section in source_features["sections"])
|
307 |
-
)
|
308 |
-
|
309 |
-
section_offsets = [
|
310 |
-
(start + index, end + index)
|
311 |
-
for index, (start, end) in enumerate(
|
312 |
-
zip([0] + section_lengths[:-1], section_lengths)
|
313 |
-
)
|
314 |
-
]
|
315 |
-
|
316 |
-
section_start_offset_map = {
|
317 |
-
f"S{section_index}": offsets[0]
|
318 |
-
for section_index, offsets in enumerate(section_offsets, 1)
|
319 |
-
}
|
320 |
-
|
321 |
-
entities = []
|
322 |
-
|
323 |
-
for mention in source_features["mentions"]:
|
324 |
-
start_points = [
|
325 |
-
int(start_point) + section_start_offset_map[mention["section"]]
|
326 |
-
for start_point in mention["start"].split(",")
|
327 |
-
]
|
328 |
-
|
329 |
-
lens = [int(len_) for len_ in mention["len"].split(",")]
|
330 |
-
|
331 |
-
offsets = [
|
332 |
-
(start_point, start_point + len_)
|
333 |
-
for start_point, len_ in zip(start_points, lens)
|
334 |
-
]
|
335 |
-
|
336 |
-
text = " ".join(section["text"] for section in source_features["sections"])
|
337 |
-
|
338 |
-
entity_strings = [
|
339 |
-
text[start_point : start_point + len_]
|
340 |
-
for start_point, len_ in zip(start_points, lens)
|
341 |
-
]
|
342 |
-
|
343 |
-
entities.append(
|
344 |
-
{
|
345 |
-
"id": f"{source_features['drug_name']}_entity_{mention['id']}",
|
346 |
-
"type": mention["type"],
|
347 |
-
"text": entity_strings,
|
348 |
-
"offsets": offsets,
|
349 |
-
"normalized": entity_normalizations[mention["str"]],
|
350 |
-
}
|
351 |
-
)
|
352 |
-
|
353 |
-
return {
|
354 |
-
"document_id": source_features["drug_name"],
|
355 |
-
"passages": [
|
356 |
-
{
|
357 |
-
"id": f"{source_features['drug_name']}_section_{section['id']}",
|
358 |
-
"type": section["name"],
|
359 |
-
"text": [section["text"]],
|
360 |
-
"offsets": [offsets],
|
361 |
-
}
|
362 |
-
for section, offsets in zip(
|
363 |
-
source_features["sections"], section_offsets
|
364 |
-
)
|
365 |
-
],
|
366 |
-
"entities": entities,
|
367 |
-
"relations": [
|
368 |
-
{
|
369 |
-
"id": f"{source_features['drug_name']}_relation_{relation['id']}",
|
370 |
-
"type": relation["type"],
|
371 |
-
"arg1_id": relation["arg1"],
|
372 |
-
"arg2_id": relation["arg2"],
|
373 |
-
"normalized": [],
|
374 |
-
}
|
375 |
-
for relation in source_features["relations"]
|
376 |
-
],
|
377 |
-
"events": [],
|
378 |
-
"coreferences": [],
|
379 |
-
}
|
380 |
-
|
381 |
-
def _generate_examples(self, filepaths: Iterable[Tuple[str, BinaryIO]]) -> Tuple[int, Dict]:
|
382 |
-
"""Yields examples as (key, example) tuples."""
|
383 |
-
|
384 |
-
for file_index, (drug_filename, drug_file) in enumerate(filepaths):
|
385 |
-
element_tree = ET.parse(drug_file)
|
386 |
-
|
387 |
-
if self.config.schema == "source":
|
388 |
-
features = self._source_features_from_xml(
|
389 |
-
element_tree=element_tree,
|
390 |
-
)
|
391 |
-
elif self.config.schema == "bigbio_kb":
|
392 |
-
features = self._bigbio_kb_features_from_xml(
|
393 |
-
element_tree=element_tree,
|
394 |
-
)
|
395 |
-
features["id"] = file_index
|
396 |
-
else:
|
397 |
-
raise ValueError(
|
398 |
-
f"Unsupported schema '{self.config.schema}' requested for "
|
399 |
-
f"dataset with name '{_DATASETNAME}'."
|
400 |
-
)
|
401 |
-
|
402 |
-
yield file_index, features
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spl_adr_200db_train_bigbio_kb/spl_adr_200db-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:10a18828787c1a65d5ef064d423d30918b8c2de6f1ded2e1fd27e506eb64d8df
|
3 |
+
size 1107985
|
spl_adr_200db_train_source/spl_adr_200db-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:72d9eca2af019ae6687f14b82610b7db55699b28fbd5c2245630a2f090af811e
|
3 |
+
size 1757206
|
spl_adr_200db_unannotated_bigbio_kb/spl_adr_200db-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f50d0c0147579a38ee5674e7ccb5bcae393a1f7ee1b701fc729a11531b72b82d
|
3 |
+
size 11716379
|
spl_adr_200db_unannotated_source/spl_adr_200db-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a9e45398de8b2e2cc3643059d65a74bbf0696327c26b74cb77349e1734c43eac
|
3 |
+
size 23194338
|