parquet-converter commited on
Commit
8a1c6c2
1 Parent(s): f1025a3

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,54 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
- *.model filter=lfs diff=lfs merge=lfs -text
14
- *.msgpack filter=lfs diff=lfs merge=lfs -text
15
- *.npy filter=lfs diff=lfs merge=lfs -text
16
- *.npz filter=lfs diff=lfs merge=lfs -text
17
- *.onnx filter=lfs diff=lfs merge=lfs -text
18
- *.ot filter=lfs diff=lfs merge=lfs -text
19
- *.parquet filter=lfs diff=lfs merge=lfs -text
20
- *.pb filter=lfs diff=lfs merge=lfs -text
21
- *.pickle filter=lfs diff=lfs merge=lfs -text
22
- *.pkl filter=lfs diff=lfs merge=lfs -text
23
- *.pt filter=lfs diff=lfs merge=lfs -text
24
- *.pth filter=lfs diff=lfs merge=lfs -text
25
- *.rar filter=lfs diff=lfs merge=lfs -text
26
- *.safetensors filter=lfs diff=lfs merge=lfs -text
27
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
- *.tar.* filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
36
- # Audio files - uncompressed
37
- *.pcm filter=lfs diff=lfs merge=lfs -text
38
- *.sam filter=lfs diff=lfs merge=lfs -text
39
- *.raw filter=lfs diff=lfs merge=lfs -text
40
- # Audio files - compressed
41
- *.aac filter=lfs diff=lfs merge=lfs -text
42
- *.flac filter=lfs diff=lfs merge=lfs -text
43
- *.mp3 filter=lfs diff=lfs merge=lfs -text
44
- *.ogg filter=lfs diff=lfs merge=lfs -text
45
- *.wav filter=lfs diff=lfs merge=lfs -text
46
- # Image files - uncompressed
47
- *.bmp filter=lfs diff=lfs merge=lfs -text
48
- *.gif filter=lfs diff=lfs merge=lfs -text
49
- *.png filter=lfs diff=lfs merge=lfs -text
50
- *.tiff filter=lfs diff=lfs merge=lfs -text
51
- # Image files - compressed
52
- *.jpg filter=lfs diff=lfs merge=lfs -text
53
- *.jpeg filter=lfs diff=lfs merge=lfs -text
54
- *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bigbiohub.py DELETED
@@ -1,556 +0,0 @@
1
- from collections import defaultdict
2
- from dataclasses import dataclass
3
- from enum import Enum
4
- import logging
5
- from pathlib import Path
6
- from types import SimpleNamespace
7
- from typing import TYPE_CHECKING, Dict, Iterable, List, Tuple
8
-
9
- import datasets
10
-
11
- if TYPE_CHECKING:
12
- import bioc
13
-
14
- logger = logging.getLogger(__name__)
15
-
16
-
17
- BigBioValues = SimpleNamespace(NULL="<BB_NULL_STR>")
18
-
19
-
20
- @dataclass
21
- class BigBioConfig(datasets.BuilderConfig):
22
- """BuilderConfig for BigBio."""
23
-
24
- name: str = None
25
- version: datasets.Version = None
26
- description: str = None
27
- schema: str = None
28
- subset_id: str = None
29
-
30
-
31
- class Tasks(Enum):
32
- NAMED_ENTITY_RECOGNITION = "NER"
33
- NAMED_ENTITY_DISAMBIGUATION = "NED"
34
- EVENT_EXTRACTION = "EE"
35
- RELATION_EXTRACTION = "RE"
36
- COREFERENCE_RESOLUTION = "COREF"
37
- QUESTION_ANSWERING = "QA"
38
- TEXTUAL_ENTAILMENT = "TE"
39
- SEMANTIC_SIMILARITY = "STS"
40
- TEXT_PAIRS_CLASSIFICATION = "TXT2CLASS"
41
- PARAPHRASING = "PARA"
42
- TRANSLATION = "TRANSL"
43
- SUMMARIZATION = "SUM"
44
- TEXT_CLASSIFICATION = "TXTCLASS"
45
-
46
-
47
- entailment_features = datasets.Features(
48
- {
49
- "id": datasets.Value("string"),
50
- "premise": datasets.Value("string"),
51
- "hypothesis": datasets.Value("string"),
52
- "label": datasets.Value("string"),
53
- }
54
- )
55
-
56
- pairs_features = datasets.Features(
57
- {
58
- "id": datasets.Value("string"),
59
- "document_id": datasets.Value("string"),
60
- "text_1": datasets.Value("string"),
61
- "text_2": datasets.Value("string"),
62
- "label": datasets.Value("string"),
63
- }
64
- )
65
-
66
- qa_features = datasets.Features(
67
- {
68
- "id": datasets.Value("string"),
69
- "question_id": datasets.Value("string"),
70
- "document_id": datasets.Value("string"),
71
- "question": datasets.Value("string"),
72
- "type": datasets.Value("string"),
73
- "choices": [datasets.Value("string")],
74
- "context": datasets.Value("string"),
75
- "answer": datasets.Sequence(datasets.Value("string")),
76
- }
77
- )
78
-
79
- text_features = datasets.Features(
80
- {
81
- "id": datasets.Value("string"),
82
- "document_id": datasets.Value("string"),
83
- "text": datasets.Value("string"),
84
- "labels": [datasets.Value("string")],
85
- }
86
- )
87
-
88
- text2text_features = datasets.Features(
89
- {
90
- "id": datasets.Value("string"),
91
- "document_id": datasets.Value("string"),
92
- "text_1": datasets.Value("string"),
93
- "text_2": datasets.Value("string"),
94
- "text_1_name": datasets.Value("string"),
95
- "text_2_name": datasets.Value("string"),
96
- }
97
- )
98
-
99
- kb_features = datasets.Features(
100
- {
101
- "id": datasets.Value("string"),
102
- "document_id": datasets.Value("string"),
103
- "passages": [
104
- {
105
- "id": datasets.Value("string"),
106
- "type": datasets.Value("string"),
107
- "text": datasets.Sequence(datasets.Value("string")),
108
- "offsets": datasets.Sequence([datasets.Value("int32")]),
109
- }
110
- ],
111
- "entities": [
112
- {
113
- "id": datasets.Value("string"),
114
- "type": datasets.Value("string"),
115
- "text": datasets.Sequence(datasets.Value("string")),
116
- "offsets": datasets.Sequence([datasets.Value("int32")]),
117
- "normalized": [
118
- {
119
- "db_name": datasets.Value("string"),
120
- "db_id": datasets.Value("string"),
121
- }
122
- ],
123
- }
124
- ],
125
- "events": [
126
- {
127
- "id": datasets.Value("string"),
128
- "type": datasets.Value("string"),
129
- # refers to the text_bound_annotation of the trigger
130
- "trigger": {
131
- "text": datasets.Sequence(datasets.Value("string")),
132
- "offsets": datasets.Sequence([datasets.Value("int32")]),
133
- },
134
- "arguments": [
135
- {
136
- "role": datasets.Value("string"),
137
- "ref_id": datasets.Value("string"),
138
- }
139
- ],
140
- }
141
- ],
142
- "coreferences": [
143
- {
144
- "id": datasets.Value("string"),
145
- "entity_ids": datasets.Sequence(datasets.Value("string")),
146
- }
147
- ],
148
- "relations": [
149
- {
150
- "id": datasets.Value("string"),
151
- "type": datasets.Value("string"),
152
- "arg1_id": datasets.Value("string"),
153
- "arg2_id": datasets.Value("string"),
154
- "normalized": [
155
- {
156
- "db_name": datasets.Value("string"),
157
- "db_id": datasets.Value("string"),
158
- }
159
- ],
160
- }
161
- ],
162
- }
163
- )
164
-
165
-
166
- def get_texts_and_offsets_from_bioc_ann(ann: "bioc.BioCAnnotation") -> Tuple:
167
-
168
- offsets = [(loc.offset, loc.offset + loc.length) for loc in ann.locations]
169
-
170
- text = ann.text
171
-
172
- if len(offsets) > 1:
173
- i = 0
174
- texts = []
175
- for start, end in offsets:
176
- chunk_len = end - start
177
- texts.append(text[i : chunk_len + i])
178
- i += chunk_len
179
- while i < len(text) and text[i] == " ":
180
- i += 1
181
- else:
182
- texts = [text]
183
-
184
- return offsets, texts
185
-
186
-
187
- def remove_prefix(a: str, prefix: str) -> str:
188
- if a.startswith(prefix):
189
- a = a[len(prefix) :]
190
- return a
191
-
192
-
193
- def parse_brat_file(
194
- txt_file: Path,
195
- annotation_file_suffixes: List[str] = None,
196
- parse_notes: bool = False,
197
- ) -> Dict:
198
- """
199
- Parse a brat file into the schema defined below.
200
- `txt_file` should be the path to the brat '.txt' file you want to parse, e.g. 'data/1234.txt'
201
- Assumes that the annotations are contained in one or more of the corresponding '.a1', '.a2' or '.ann' files,
202
- e.g. 'data/1234.ann' or 'data/1234.a1' and 'data/1234.a2'.
203
- Will include annotator notes, when `parse_notes == True`.
204
- brat_features = datasets.Features(
205
- {
206
- "id": datasets.Value("string"),
207
- "document_id": datasets.Value("string"),
208
- "text": datasets.Value("string"),
209
- "text_bound_annotations": [ # T line in brat, e.g. type or event trigger
210
- {
211
- "offsets": datasets.Sequence([datasets.Value("int32")]),
212
- "text": datasets.Sequence(datasets.Value("string")),
213
- "type": datasets.Value("string"),
214
- "id": datasets.Value("string"),
215
- }
216
- ],
217
- "events": [ # E line in brat
218
- {
219
- "trigger": datasets.Value(
220
- "string"
221
- ), # refers to the text_bound_annotation of the trigger,
222
- "id": datasets.Value("string"),
223
- "type": datasets.Value("string"),
224
- "arguments": datasets.Sequence(
225
- {
226
- "role": datasets.Value("string"),
227
- "ref_id": datasets.Value("string"),
228
- }
229
- ),
230
- }
231
- ],
232
- "relations": [ # R line in brat
233
- {
234
- "id": datasets.Value("string"),
235
- "head": {
236
- "ref_id": datasets.Value("string"),
237
- "role": datasets.Value("string"),
238
- },
239
- "tail": {
240
- "ref_id": datasets.Value("string"),
241
- "role": datasets.Value("string"),
242
- },
243
- "type": datasets.Value("string"),
244
- }
245
- ],
246
- "equivalences": [ # Equiv line in brat
247
- {
248
- "id": datasets.Value("string"),
249
- "ref_ids": datasets.Sequence(datasets.Value("string")),
250
- }
251
- ],
252
- "attributes": [ # M or A lines in brat
253
- {
254
- "id": datasets.Value("string"),
255
- "type": datasets.Value("string"),
256
- "ref_id": datasets.Value("string"),
257
- "value": datasets.Value("string"),
258
- }
259
- ],
260
- "normalizations": [ # N lines in brat
261
- {
262
- "id": datasets.Value("string"),
263
- "type": datasets.Value("string"),
264
- "ref_id": datasets.Value("string"),
265
- "resource_name": datasets.Value(
266
- "string"
267
- ), # Name of the resource, e.g. "Wikipedia"
268
- "cuid": datasets.Value(
269
- "string"
270
- ), # ID in the resource, e.g. 534366
271
- "text": datasets.Value(
272
- "string"
273
- ), # Human readable description/name of the entity, e.g. "Barack Obama"
274
- }
275
- ],
276
- ### OPTIONAL: Only included when `parse_notes == True`
277
- "notes": [ # # lines in brat
278
- {
279
- "id": datasets.Value("string"),
280
- "type": datasets.Value("string"),
281
- "ref_id": datasets.Value("string"),
282
- "text": datasets.Value("string"),
283
- }
284
- ],
285
- },
286
- )
287
- """
288
-
289
- example = {}
290
- example["document_id"] = txt_file.with_suffix("").name
291
- with txt_file.open() as f:
292
- example["text"] = f.read()
293
-
294
- # If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
295
- # for event extraction
296
- if annotation_file_suffixes is None:
297
- annotation_file_suffixes = [".a1", ".a2", ".ann"]
298
-
299
- if len(annotation_file_suffixes) == 0:
300
- raise AssertionError(
301
- "At least one suffix for the to-be-read annotation files should be given!"
302
- )
303
-
304
- ann_lines = []
305
- for suffix in annotation_file_suffixes:
306
- annotation_file = txt_file.with_suffix(suffix)
307
- if annotation_file.exists():
308
- with annotation_file.open() as f:
309
- ann_lines.extend(f.readlines())
310
-
311
- example["text_bound_annotations"] = []
312
- example["events"] = []
313
- example["relations"] = []
314
- example["equivalences"] = []
315
- example["attributes"] = []
316
- example["normalizations"] = []
317
-
318
- if parse_notes:
319
- example["notes"] = []
320
-
321
- for line in ann_lines:
322
- line = line.strip()
323
- if not line:
324
- continue
325
-
326
- if line.startswith("T"): # Text bound
327
- ann = {}
328
- fields = line.split("\t")
329
-
330
- ann["id"] = fields[0]
331
- ann["type"] = fields[1].split()[0]
332
- ann["offsets"] = []
333
- span_str = remove_prefix(fields[1], (ann["type"] + " "))
334
- text = fields[2]
335
- for span in span_str.split(";"):
336
- start, end = span.split()
337
- ann["offsets"].append([int(start), int(end)])
338
-
339
- # Heuristically split text of discontiguous entities into chunks
340
- ann["text"] = []
341
- if len(ann["offsets"]) > 1:
342
- i = 0
343
- for start, end in ann["offsets"]:
344
- chunk_len = end - start
345
- ann["text"].append(text[i : chunk_len + i])
346
- i += chunk_len
347
- while i < len(text) and text[i] == " ":
348
- i += 1
349
- else:
350
- ann["text"] = [text]
351
-
352
- example["text_bound_annotations"].append(ann)
353
-
354
- elif line.startswith("E"):
355
- ann = {}
356
- fields = line.split("\t")
357
-
358
- ann["id"] = fields[0]
359
-
360
- ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
361
-
362
- ann["arguments"] = []
363
- for role_ref_id in fields[1].split()[1:]:
364
- argument = {
365
- "role": (role_ref_id.split(":"))[0],
366
- "ref_id": (role_ref_id.split(":"))[1],
367
- }
368
- ann["arguments"].append(argument)
369
-
370
- example["events"].append(ann)
371
-
372
- elif line.startswith("R"):
373
- ann = {}
374
- fields = line.split("\t")
375
-
376
- ann["id"] = fields[0]
377
- ann["type"] = fields[1].split()[0]
378
-
379
- ann["head"] = {
380
- "role": fields[1].split()[1].split(":")[0],
381
- "ref_id": fields[1].split()[1].split(":")[1],
382
- }
383
- ann["tail"] = {
384
- "role": fields[1].split()[2].split(":")[0],
385
- "ref_id": fields[1].split()[2].split(":")[1],
386
- }
387
-
388
- example["relations"].append(ann)
389
-
390
- # '*' seems to be the legacy way to mark equivalences,
391
- # but I couldn't find any info on the current way
392
- # this might have to be adapted dependent on the brat version
393
- # of the annotation
394
- elif line.startswith("*"):
395
- ann = {}
396
- fields = line.split("\t")
397
-
398
- ann["id"] = fields[0]
399
- ann["ref_ids"] = fields[1].split()[1:]
400
-
401
- example["equivalences"].append(ann)
402
-
403
- elif line.startswith("A") or line.startswith("M"):
404
- ann = {}
405
- fields = line.split("\t")
406
-
407
- ann["id"] = fields[0]
408
-
409
- info = fields[1].split()
410
- ann["type"] = info[0]
411
- ann["ref_id"] = info[1]
412
-
413
- if len(info) > 2:
414
- ann["value"] = info[2]
415
- else:
416
- ann["value"] = ""
417
-
418
- example["attributes"].append(ann)
419
-
420
- elif line.startswith("N"):
421
- ann = {}
422
- fields = line.split("\t")
423
-
424
- ann["id"] = fields[0]
425
- ann["text"] = fields[2]
426
-
427
- info = fields[1].split()
428
-
429
- ann["type"] = info[0]
430
- ann["ref_id"] = info[1]
431
- ann["resource_name"] = info[2].split(":")[0]
432
- ann["cuid"] = info[2].split(":")[1]
433
- example["normalizations"].append(ann)
434
-
435
- elif parse_notes and line.startswith("#"):
436
- ann = {}
437
- fields = line.split("\t")
438
-
439
- ann["id"] = fields[0]
440
- ann["text"] = fields[2] if len(fields) == 3 else BigBioValues.NULL
441
-
442
- info = fields[1].split()
443
-
444
- ann["type"] = info[0]
445
- ann["ref_id"] = info[1]
446
- example["notes"].append(ann)
447
-
448
- return example
449
-
450
-
451
- def brat_parse_to_bigbio_kb(brat_parse: Dict) -> Dict:
452
- """
453
- Transform a brat parse (conforming to the standard brat schema) obtained with
454
- `parse_brat_file` into a dictionary conforming to the `bigbio-kb` schema (as defined in ../schemas/kb.py)
455
- :param brat_parse:
456
- """
457
-
458
- unified_example = {}
459
-
460
- # Prefix all ids with document id to ensure global uniqueness,
461
- # because brat ids are only unique within their document
462
- id_prefix = brat_parse["document_id"] + "_"
463
-
464
- # identical
465
- unified_example["document_id"] = brat_parse["document_id"]
466
- unified_example["passages"] = [
467
- {
468
- "id": id_prefix + "_text",
469
- "type": "abstract",
470
- "text": [brat_parse["text"]],
471
- "offsets": [[0, len(brat_parse["text"])]],
472
- }
473
- ]
474
-
475
- # get normalizations
476
- ref_id_to_normalizations = defaultdict(list)
477
- for normalization in brat_parse["normalizations"]:
478
- ref_id_to_normalizations[normalization["ref_id"]].append(
479
- {
480
- "db_name": normalization["resource_name"],
481
- "db_id": normalization["cuid"],
482
- }
483
- )
484
-
485
- # separate entities and event triggers
486
- unified_example["events"] = []
487
- non_event_ann = brat_parse["text_bound_annotations"].copy()
488
- for event in brat_parse["events"]:
489
- event = event.copy()
490
- event["id"] = id_prefix + event["id"]
491
- trigger = next(
492
- tr
493
- for tr in brat_parse["text_bound_annotations"]
494
- if tr["id"] == event["trigger"]
495
- )
496
- if trigger in non_event_ann:
497
- non_event_ann.remove(trigger)
498
- event["trigger"] = {
499
- "text": trigger["text"].copy(),
500
- "offsets": trigger["offsets"].copy(),
501
- }
502
- for argument in event["arguments"]:
503
- argument["ref_id"] = id_prefix + argument["ref_id"]
504
-
505
- unified_example["events"].append(event)
506
-
507
- unified_example["entities"] = []
508
- anno_ids = [ref_id["id"] for ref_id in non_event_ann]
509
- for ann in non_event_ann:
510
- entity_ann = ann.copy()
511
- entity_ann["id"] = id_prefix + entity_ann["id"]
512
- entity_ann["normalized"] = ref_id_to_normalizations[ann["id"]]
513
- unified_example["entities"].append(entity_ann)
514
-
515
- # massage relations
516
- unified_example["relations"] = []
517
- skipped_relations = set()
518
- for ann in brat_parse["relations"]:
519
- if (
520
- ann["head"]["ref_id"] not in anno_ids
521
- or ann["tail"]["ref_id"] not in anno_ids
522
- ):
523
- skipped_relations.add(ann["id"])
524
- continue
525
- unified_example["relations"].append(
526
- {
527
- "arg1_id": id_prefix + ann["head"]["ref_id"],
528
- "arg2_id": id_prefix + ann["tail"]["ref_id"],
529
- "id": id_prefix + ann["id"],
530
- "type": ann["type"],
531
- "normalized": [],
532
- }
533
- )
534
- if len(skipped_relations) > 0:
535
- example_id = brat_parse["document_id"]
536
- logger.info(
537
- f"Example:{example_id}: The `bigbio_kb` schema allows `relations` only between entities."
538
- f" Skip (for now): "
539
- f"{list(skipped_relations)}"
540
- )
541
-
542
- # get coreferences
543
- unified_example["coreferences"] = []
544
- for i, ann in enumerate(brat_parse["equivalences"], start=1):
545
- is_entity_cluster = True
546
- for ref_id in ann["ref_ids"]:
547
- if not ref_id.startswith("T"): # not textbound -> no entity
548
- is_entity_cluster = False
549
- elif ref_id not in anno_ids: # event trigger -> no entity
550
- is_entity_cluster = False
551
- if is_entity_cluster:
552
- entity_ids = [id_prefix + i for i in ann["ref_ids"]]
553
- unified_example["coreferences"].append(
554
- {"id": id_prefix + str(i), "entity_ids": entity_ids}
555
- )
556
- return unified_example
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
chia.py DELETED
@@ -1,649 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """
16
- A large annotated corpus of patient eligibility criteria extracted from 1,000
17
- interventional, Phase IV clinical trials registered in ClinicalTrials.gov. This
18
- dataset includes 12,409 annotated eligibility criteria, represented by 41,487
19
- distinctive entities of 15 entity types and 25,017 relationships of 12
20
- relationship types."""
21
- from pathlib import Path
22
- from typing import Dict, Iterator, List, Tuple
23
-
24
- import datasets
25
-
26
- from .bigbiohub import kb_features
27
- from .bigbiohub import BigBioConfig
28
- from .bigbiohub import Tasks
29
- from .bigbiohub import remove_prefix
30
-
31
-
32
- _LANGUAGES = ['English']
33
- _PUBMED = False
34
- _LOCAL = False
35
- _CITATION = """\
36
- @article{kury2020chia,
37
- title = {Chia, a large annotated corpus of clinical trial eligibility criteria},
38
- author = {
39
- Kury, Fabr{\'\\i}cio and Butler, Alex and Yuan, Chi and Fu, Li-heng and
40
- Sun, Yingcheng and Liu, Hao and Sim, Ida and Carini, Simona and Weng,
41
- Chunhua
42
- },
43
- year = 2020,
44
- journal = {Scientific data},
45
- publisher = {Nature Publishing Group},
46
- volume = 7,
47
- number = 1,
48
- pages = {1--11}
49
- }
50
- """
51
-
52
- _DATASETNAME = "chia"
53
- _DISPLAYNAME = "CHIA"
54
-
55
- _DESCRIPTION = """\
56
- A large annotated corpus of patient eligibility criteria extracted from 1,000
57
- interventional, Phase IV clinical trials registered in ClinicalTrials.gov. This
58
- dataset includes 12,409 annotated eligibility criteria, represented by 41,487
59
- distinctive entities of 15 entity types and 25,017 relationships of 12
60
- relationship types.
61
- """
62
-
63
- _HOMEPAGE = "https://github.com/WengLab-InformaticsResearch/CHIA"
64
-
65
- _LICENSE = 'Creative Commons Attribution 4.0 International'
66
-
67
- _URLS = {
68
- _DATASETNAME: "https://figshare.com/ndownloader/files/21728850",
69
- _DATASETNAME + "_wo_scope": "https://figshare.com/ndownloader/files/21728853",
70
- }
71
-
72
- _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION]
73
-
74
- _SOURCE_VERSION = "2.0.0"
75
- _BIGBIO_VERSION = "1.0.0"
76
-
77
- # For further information see appendix of the publication
78
- _DOMAIN_ENTITY_TYPES = [
79
- "Condition",
80
- "Device",
81
- "Drug",
82
- "Measurement",
83
- "Observation",
84
- "Person",
85
- "Procedure",
86
- "Visit",
87
- ]
88
-
89
- # For further information see appendix of the publication
90
- _FIELD_ENTITY_TYPES = [
91
- "Temporal",
92
- "Value",
93
- ]
94
-
95
- # For further information see appendix of the publication
96
- _CONSTRUCT_ENTITY_TYPES = [
97
- "Scope", # Not part of the "without scope" schema / version
98
- "Negation",
99
- "Multiplier",
100
- "Qualifier",
101
- "Reference_point",
102
- "Mood",
103
- ]
104
-
105
- _ALL_ENTITY_TYPES = _DOMAIN_ENTITY_TYPES + _FIELD_ENTITY_TYPES + _CONSTRUCT_ENTITY_TYPES
106
-
107
- _RELATION_TYPES = [
108
- "AND",
109
- "OR",
110
- "SUBSUMES",
111
- "HAS_NEGATION",
112
- "HAS_MULTIPLIER",
113
- "HAS_QUALIFIER",
114
- "HAS_VALUE",
115
- "HAS_TEMPORAL",
116
- "HAS_INDEX",
117
- "HAS_MOOD",
118
- "HAS_CONTEXT ",
119
- "HAS_SCOPE", # Not part of the "without scope" schema / version
120
- ]
121
-
122
- _MAX_OFFSET_CORRECTION = 100
123
-
124
-
125
- class ChiaDataset(datasets.GeneratorBasedBuilder):
126
- """
127
- A large annotated corpus of patient eligibility criteria extracted from 1,000 interventional,
128
- Phase IV clinical trials registered in ClinicalTrials.gov.
129
- """
130
-
131
- SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
132
- BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
133
-
134
- BUILDER_CONFIGS = [
135
- BigBioConfig(
136
- name="chia_source",
137
- version=SOURCE_VERSION,
138
- description="Chia source schema",
139
- schema="source",
140
- subset_id="chia",
141
- ),
142
- BigBioConfig(
143
- name="chia_fixed_source",
144
- version=SOURCE_VERSION,
145
- description="Chia source schema (with fixed entity offsets)",
146
- schema="source",
147
- subset_id="chia_fixed",
148
- ),
149
- BigBioConfig(
150
- name="chia_without_scope_source",
151
- version=SOURCE_VERSION,
152
- description="Chia without scope source schema",
153
- schema="source",
154
- subset_id="chia_without_scope",
155
- ),
156
- BigBioConfig(
157
- name="chia_without_scope_fixed_source",
158
- version=SOURCE_VERSION,
159
- description="Chia without scope source schema (with fixed entity offsets)",
160
- schema="source",
161
- subset_id="chia_without_scope_fixed",
162
- ),
163
- BigBioConfig(
164
- name="chia_bigbio_kb",
165
- version=BIGBIO_VERSION,
166
- description="Chia BigBio schema",
167
- schema="bigbio_kb",
168
- subset_id="chia",
169
- ),
170
- ]
171
-
172
- DEFAULT_CONFIG_NAME = "chia_source"
173
-
174
- def _info(self):
175
- if self.config.schema == "source":
176
- features = datasets.Features(
177
- {
178
- "id": datasets.Value("string"),
179
- "document_id": datasets.Value(
180
- "string"
181
- ), # NCT-ID from clinicialtrials.gov
182
- "text": datasets.Value("string"),
183
- "text_type": datasets.Value(
184
- "string"
185
- ), # inclusion or exclusion (criteria)
186
- "entities": [
187
- {
188
- "id": datasets.Value("string"),
189
- "type": datasets.Value("string"),
190
- "text": datasets.Sequence(datasets.Value("string")),
191
- "offsets": datasets.Sequence([datasets.Value("int32")]),
192
- "normalized": [
193
- {
194
- "db_name": datasets.Value("string"),
195
- "db_id": datasets.Value("string"),
196
- }
197
- ],
198
- }
199
- ],
200
- "relations": [
201
- {
202
- "id": datasets.Value("string"),
203
- "type": datasets.Value("string"),
204
- "arg1_id": datasets.Value("string"),
205
- "arg2_id": datasets.Value("string"),
206
- "normalized": [
207
- {
208
- "db_name": datasets.Value("string"),
209
- "db_id": datasets.Value("string"),
210
- }
211
- ],
212
- }
213
- ],
214
- }
215
- )
216
-
217
- elif self.config.schema == "bigbio_kb":
218
- features = kb_features
219
-
220
- return datasets.DatasetInfo(
221
- description=_DESCRIPTION,
222
- features=features,
223
- homepage=_HOMEPAGE,
224
- license=str(_LICENSE),
225
- citation=_CITATION,
226
- )
227
-
228
- def _split_generators(self, dl_manager):
229
- url_key = _DATASETNAME
230
-
231
- if self.config.subset_id.startswith("chia_without_scope"):
232
- url_key += "_wo_scope"
233
-
234
- urls = _URLS[url_key]
235
- data_dir = Path(dl_manager.download_and_extract(urls))
236
-
237
- return [
238
- datasets.SplitGenerator(
239
- name=datasets.Split.TRAIN,
240
- gen_kwargs={"data_dir": data_dir},
241
- )
242
- ]
243
-
244
- def _generate_examples(self, data_dir: Path) -> Iterator[Tuple[str, Dict]]:
245
- if self.config.schema == "source":
246
- fix_offsets = "fixed" in self.config.subset_id
247
-
248
- for file in data_dir.iterdir():
249
- if not file.name.endswith(".txt"):
250
- continue
251
-
252
- brat_example = parse_brat_file(file, [".ann"])
253
- source_example = self._to_source_example(
254
- file, brat_example, fix_offsets
255
- )
256
- yield source_example["id"], source_example
257
-
258
- elif self.config.schema == "bigbio_kb":
259
- for file in data_dir.iterdir():
260
- if not file.name.endswith(".txt"):
261
- continue
262
-
263
- brat_example = parse_brat_file(file, [".ann"])
264
- source_example = self._to_source_example(file, brat_example, True)
265
-
266
- bigbio_example = {
267
- "id": source_example["id"],
268
- "document_id": source_example["document_id"],
269
- "passages": [
270
- {
271
- "id": source_example["id"] + "_text",
272
- "type": source_example["text_type"],
273
- "text": [source_example["text"]],
274
- "offsets": [[0, len(source_example["text"])]],
275
- }
276
- ],
277
- "entities": source_example["entities"],
278
- "relations": source_example["relations"],
279
- "events": [],
280
- "coreferences": [],
281
- }
282
-
283
- yield bigbio_example["id"], bigbio_example
284
-
285
- def _to_source_example(
286
- self, input_file: Path, brat_example: Dict, fix_offsets: bool
287
- ) -> Dict:
288
- """
289
- Converts the generic brat example to the source schema format.
290
- """
291
- example_id = str(input_file.stem)
292
- document_id = example_id.split("_")[0]
293
- criteria_type = "inclusion" if "_inc" in input_file.stem else "exclusion"
294
-
295
- text = brat_example["text"]
296
-
297
- source_example = {
298
- "id": example_id,
299
- "document_id": document_id,
300
- "text_type": criteria_type,
301
- "text": text,
302
- "entities": [],
303
- "relations": [],
304
- }
305
-
306
- example_prefix = example_id + "_"
307
- entity_ids = {}
308
-
309
- for tb_annotation in brat_example["text_bound_annotations"]:
310
- if tb_annotation["type"].capitalize() not in _ALL_ENTITY_TYPES:
311
- continue
312
-
313
- entity_ann = tb_annotation.copy()
314
- entity_ann["id"] = example_prefix + entity_ann["id"]
315
- entity_ids[entity_ann["id"]] = True
316
-
317
- if fix_offsets:
318
- if len(entity_ann["offsets"]) > 1:
319
- entity_ann["text"] = self._get_texts_for_multiple_offsets(
320
- text, entity_ann["offsets"]
321
- )
322
-
323
- fixed_offsets = []
324
- fixed_texts = []
325
- for entity_text, offsets in zip(
326
- entity_ann["text"], entity_ann["offsets"]
327
- ):
328
- fixed_offset = self._fix_entity_offsets(text, entity_text, offsets)
329
- fixed_offsets.append(fixed_offset)
330
- fixed_texts.append(text[fixed_offset[0] : fixed_offset[1]])
331
-
332
- entity_ann["offsets"] = fixed_offsets
333
- entity_ann["text"] = fixed_texts
334
-
335
- entity_ann["normalized"] = []
336
- source_example["entities"].append(entity_ann)
337
-
338
- for base_rel_annotation in brat_example["relations"]:
339
- if base_rel_annotation["type"].upper() not in _RELATION_TYPES:
340
- continue
341
-
342
- head_id = example_prefix + base_rel_annotation["head"]["ref_id"]
343
- tail_id = example_prefix + base_rel_annotation["tail"]["ref_id"]
344
-
345
- if head_id not in entity_ids or tail_id not in entity_ids:
346
- continue
347
-
348
- relation = {
349
- "id": example_prefix + base_rel_annotation["id"],
350
- "type": base_rel_annotation["type"],
351
- "arg1_id": head_id,
352
- "arg2_id": tail_id,
353
- "normalized": [],
354
- }
355
-
356
- source_example["relations"].append(relation)
357
-
358
- relation_id = len(brat_example["relations"]) + 10
359
- for base_co_reference in brat_example["equivalences"]:
360
- ref_ids = base_co_reference["ref_ids"]
361
- for i, arg1 in enumerate(ref_ids[:-1]):
362
- for arg2 in ref_ids[i + 1 :]:
363
- if arg1 not in entity_ids or arg2 not in entity_ids:
364
- continue
365
-
366
- or_relation = {
367
- "id": example_prefix + f"R{relation_id}",
368
- "type": "OR",
369
- "arg1_id": example_prefix + arg1,
370
- "arg2_id": example_prefix + arg2,
371
- "normalized": [],
372
- }
373
-
374
- source_example["relations"].append(or_relation)
375
- relation_id += 1
376
-
377
- return source_example
378
-
379
- def _fix_entity_offsets(
380
- self, doc_text: str, entity_text: str, given_offsets: List[int]
381
- ) -> List[int]:
382
- """
383
- Fixes incorrect mention offsets by checking whether the given entity mention text can be
384
- found to the left or right of the given offsets by considering incrementally larger shifts.
385
- """
386
- left = given_offsets[0]
387
- right = given_offsets[1]
388
-
389
- # Some annotations contain whitespaces - we ignore them
390
- clean_entity_text = entity_text.strip()
391
-
392
- i = 0
393
- while i <= _MAX_OFFSET_CORRECTION:
394
- # Move mention window to the left
395
- if doc_text[left - i : right - i].strip() == clean_entity_text:
396
- return [left - i, left - i + len(clean_entity_text)]
397
-
398
- # Move mention window to the right
399
- elif doc_text[left + i : right + i].strip() == clean_entity_text:
400
- return [left + i, left + i + len(clean_entity_text)]
401
-
402
- i += 1
403
-
404
- # We can't find any better offsets
405
- return given_offsets
406
-
407
- def _get_texts_for_multiple_offsets(
408
- self, document_text: str, offsets: List[List[int]]
409
- ) -> List[str]:
410
- """
411
- Extracts the single text span for a given list of offsets.
412
- """
413
- texts = []
414
- for offset in offsets:
415
- texts.append(document_text[offset[0] : offset[1]])
416
- return texts
417
-
418
-
419
- def parse_brat_file(txt_file: Path, annotation_file_suffixes: List[str] = None) -> Dict:
420
- """
421
- Parse a brat file into the schema defined below.
422
- `txt_file` should be the path to the brat '.txt' file you want to parse, e.g. 'data/1234.txt'
423
- Assumes that the annotations are contained in one or more of the corresponding '.a1', '.a2' or '.ann' files,
424
- e.g. 'data/1234.ann' or 'data/1234.a1' and 'data/1234.a2'.
425
-
426
- Schema of the parse:
427
- features = datasets.Features(
428
- {
429
- "id": datasets.Value("string"),
430
- "document_id": datasets.Value("string"),
431
- "text": datasets.Value("string"),
432
- "text_bound_annotations": [ # T line in brat, e.g. type or event trigger
433
- {
434
- "offsets": datasets.Sequence([datasets.Value("int32")]),
435
- "text": datasets.Sequence(datasets.Value("string")),
436
- "type": datasets.Value("string"),
437
- "id": datasets.Value("string"),
438
- }
439
- ],
440
- "events": [ # E line in brat
441
- {
442
- "trigger": datasets.Value(
443
- "string"
444
- ), # refers to the text_bound_annotation of the trigger,
445
- "id": datasets.Value("string"),
446
- "type": datasets.Value("string"),
447
- "arguments": datasets.Sequence(
448
- {
449
- "role": datasets.Value("string"),
450
- "ref_id": datasets.Value("string"),
451
- }
452
- ),
453
- }
454
- ],
455
- "relations": [ # R line in brat
456
- {
457
- "id": datasets.Value("string"),
458
- "head": {
459
- "ref_id": datasets.Value("string"),
460
- "role": datasets.Value("string"),
461
- },
462
- "tail": {
463
- "ref_id": datasets.Value("string"),
464
- "role": datasets.Value("string"),
465
- },
466
- "type": datasets.Value("string"),
467
- }
468
- ],
469
- "equivalences": [ # Equiv line in brat
470
- {
471
- "id": datasets.Value("string"),
472
- "ref_ids": datasets.Sequence(datasets.Value("string")),
473
- }
474
- ],
475
- "attributes": [ # M or A lines in brat
476
- {
477
- "id": datasets.Value("string"),
478
- "type": datasets.Value("string"),
479
- "ref_id": datasets.Value("string"),
480
- "value": datasets.Value("string"),
481
- }
482
- ],
483
- "normalizations": [ # N lines in brat
484
- {
485
- "id": datasets.Value("string"),
486
- "type": datasets.Value("string"),
487
- "ref_id": datasets.Value("string"),
488
- "resource_name": datasets.Value(
489
- "string"
490
- ), # Name of the resource, e.g. "Wikipedia"
491
- "cuid": datasets.Value(
492
- "string"
493
- ), # ID in the resource, e.g. 534366
494
- "text": datasets.Value(
495
- "string"
496
- ), # Human readable description/name of the entity, e.g. "Barack Obama"
497
- }
498
- ],
499
- },
500
- )
501
- """
502
-
503
- example = {}
504
- example["document_id"] = txt_file.with_suffix("").name
505
- with txt_file.open() as f:
506
- example["text"] = f.read()
507
-
508
- # If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
509
- # for event extraction
510
- if annotation_file_suffixes is None:
511
- annotation_file_suffixes = [".a1", ".a2", ".ann"]
512
-
513
- if len(annotation_file_suffixes) == 0:
514
- raise AssertionError(
515
- "At least one suffix for the to-be-read annotation files should be given!"
516
- )
517
-
518
- ann_lines = []
519
- for suffix in annotation_file_suffixes:
520
- annotation_file = txt_file.with_suffix(suffix)
521
- if annotation_file.exists():
522
- with annotation_file.open() as f:
523
- ann_lines.extend(f.readlines())
524
-
525
- example["text_bound_annotations"] = []
526
- example["events"] = []
527
- example["relations"] = []
528
- example["equivalences"] = []
529
- example["attributes"] = []
530
- example["normalizations"] = []
531
-
532
- prev_tb_annotation = None
533
-
534
- for line in ann_lines:
535
- orig_line = line
536
- line = line.strip()
537
- if not line:
538
- continue
539
-
540
- # If an (entity) annotation spans multiple lines, this will result in multiple
541
- # lines also in the annotation file
542
- if "\t" not in line and prev_tb_annotation is not None:
543
- prev_tb_annotation["text"][0] += "\n" + orig_line[:-1]
544
- continue
545
-
546
- if line.startswith("T"): # Text bound
547
- ann = {}
548
- fields = line.split("\t")
549
-
550
- ann["id"] = fields[0]
551
- ann["text"] = [fields[2]]
552
- ann["type"] = fields[1].split()[0]
553
- ann["offsets"] = []
554
- span_str = remove_prefix(fields[1], (ann["type"] + " "))
555
- for span in span_str.split(";"):
556
- start, end = span.split()
557
- ann["offsets"].append([int(start), int(end)])
558
-
559
- example["text_bound_annotations"].append(ann)
560
- prev_tb_annotation = ann
561
-
562
- elif line.startswith("E"):
563
- ann = {}
564
- fields = line.split("\t")
565
-
566
- ann["id"] = fields[0]
567
-
568
- ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
569
-
570
- ann["arguments"] = []
571
- for role_ref_id in fields[1].split()[1:]:
572
- argument = {
573
- "role": (role_ref_id.split(":"))[0],
574
- "ref_id": (role_ref_id.split(":"))[1],
575
- }
576
- ann["arguments"].append(argument)
577
-
578
- example["events"].append(ann)
579
- prev_tb_annotation = None
580
-
581
- elif line.startswith("R"):
582
- ann = {}
583
- fields = line.split("\t")
584
-
585
- ann["id"] = fields[0]
586
- ann["type"] = fields[1].split()[0]
587
-
588
- ann["head"] = {
589
- "role": fields[1].split()[1].split(":")[0],
590
- "ref_id": fields[1].split()[1].split(":")[1],
591
- }
592
- ann["tail"] = {
593
- "role": fields[1].split()[2].split(":")[0],
594
- "ref_id": fields[1].split()[2].split(":")[1],
595
- }
596
-
597
- example["relations"].append(ann)
598
- prev_tb_annotation = None
599
-
600
- # '*' seems to be the legacy way to mark equivalences,
601
- # but I couldn't find any info on the current way
602
- # this might have to be adapted dependent on the brat version
603
- # of the annotation
604
- elif line.startswith("*"):
605
- ann = {}
606
- fields = line.split("\t")
607
-
608
- ann["id"] = fields[0]
609
- ann["ref_ids"] = fields[1].split()[1:]
610
-
611
- example["equivalences"].append(ann)
612
- prev_tb_annotation = None
613
-
614
- elif line.startswith("A") or line.startswith("M"):
615
- ann = {}
616
- fields = line.split("\t")
617
-
618
- ann["id"] = fields[0]
619
-
620
- info = fields[1].split()
621
- ann["type"] = info[0]
622
- ann["ref_id"] = info[1]
623
-
624
- if len(info) > 2:
625
- ann["value"] = info[2]
626
- else:
627
- ann["value"] = ""
628
-
629
- example["attributes"].append(ann)
630
- prev_tb_annotation = None
631
-
632
- elif line.startswith("N"):
633
- ann = {}
634
- fields = line.split("\t")
635
-
636
- ann["id"] = fields[0]
637
- ann["text"] = fields[2]
638
-
639
- info = fields[1].split()
640
-
641
- ann["type"] = info[0]
642
- ann["ref_id"] = info[1]
643
- ann["resource_name"] = info[2].split(":")[0]
644
- ann["cuid"] = info[2].split(":")[1]
645
-
646
- example["normalizations"].append(ann)
647
- prev_tb_annotation = None
648
-
649
- return example
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
chia_bigbio_kb/chia-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a417dc3d8812440f53bd17eed23f77ee74583520a8da760830e721c3635259b
3
+ size 2083711
chia_fixed_source/chia-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c232d9360ef03b97c6bfb24d64ef355cfbec46483b3db30b91ac18fa71dc260b
3
+ size 2049327
chia_source/chia-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee45ed69ac2e9cd0a98d938157f39cab901d2fb6f525d8469c5eef120ec703ea
3
+ size 2037942
chia_without_scope_fixed_source/chia-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74d9425fc0d1be2c931b06c909089107de02f52890d18cee4c34a8415634a254
3
+ size 1905007
chia_without_scope_source/chia-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5070d9d8eb2aa1838e213e98595a2d4e6e3dbba93b06cb9a7f6df94c864d95e2
3
+ size 1893793