gabrielaltay commited on
Commit
01e74eb
1 Parent(s): 3318c39

upload hubscripts/bionlp_st_2013_pc_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. bionlp_st_2013_pc.py +273 -0
bionlp_st_2013_pc.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pathlib import Path
17
+ from typing import Dict, List
18
+
19
+ import datasets
20
+
21
+ from .bigbiohub import kb_features
22
+ from .bigbiohub import BigBioConfig
23
+ from .bigbiohub import Tasks
24
+
25
+ _DATASETNAME = "bionlp_st_2013_pc"
26
+ _DISPLAYNAME = "BioNLP 2013 PC"
27
+
28
+ _UNIFIED_VIEW_NAME = "bigbio"
29
+
30
+ _LANGUAGES = ['English']
31
+ _PUBMED = True
32
+ _LOCAL = False
33
+ _CITATION = """\
34
+ @inproceedings{ohta-etal-2013-overview,
35
+ title = "Overview of the Pathway Curation ({PC}) task of {B}io{NLP} Shared Task 2013",
36
+ author = "Ohta, Tomoko and
37
+ Pyysalo, Sampo and
38
+ Rak, Rafal and
39
+ Rowley, Andrew and
40
+ Chun, Hong-Woo and
41
+ Jung, Sung-Jae and
42
+ Choi, Sung-Pil and
43
+ Ananiadou, Sophia and
44
+ Tsujii, Jun{'}ichi",
45
+ booktitle = "Proceedings of the {B}io{NLP} Shared Task 2013 Workshop",
46
+ month = aug,
47
+ year = "2013",
48
+ address = "Sofia, Bulgaria",
49
+ publisher = "Association for Computational Linguistics",
50
+ url = "https://aclanthology.org/W13-2009",
51
+ pages = "67--75",
52
+ }
53
+ """
54
+
55
+ _DESCRIPTION = """\
56
+ the Pathway Curation (PC) task is a main event extraction task of the BioNLP shared task (ST) 2013.
57
+ The PC task concerns the automatic extraction of biomolecular reactions from text.
58
+ The task setting, representation and semantics are defined with respect to pathway
59
+ model standards and ontologies (SBML, BioPAX, SBO) and documents selected by relevance
60
+ to specific model reactions. Two BioNLP ST 2013 participants successfully completed
61
+ the PC task. The highest achieved F-score, 52.8%, indicates that event extraction is
62
+ a promising approach to supporting pathway curation efforts.
63
+ """
64
+
65
+ _HOMEPAGE = "https://github.com/openbiocorpora/bionlp-st-2013-pc"
66
+
67
+ _LICENSE = 'GENIA Project License for Annotated Corpora'
68
+
69
+ _URLs = {
70
+ "bionlp_st_2013_pc": "https://github.com/openbiocorpora/bionlp-st-2013-pc/archive/refs/heads/master.zip",
71
+ }
72
+
73
+ _SUPPORTED_TASKS = [
74
+ Tasks.EVENT_EXTRACTION,
75
+ Tasks.NAMED_ENTITY_RECOGNITION,
76
+ Tasks.COREFERENCE_RESOLUTION,
77
+ ]
78
+ _SOURCE_VERSION = "1.0.0"
79
+ _BIGBIO_VERSION = "1.0.0"
80
+
81
+
82
+ class bionlp_st_2013_pc(datasets.GeneratorBasedBuilder):
83
+ """the Pathway Curation (PC) task is a main event extraction task of the BioNLP shared task (ST) 2013."""
84
+
85
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
86
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
87
+
88
+ BUILDER_CONFIGS = [
89
+ BigBioConfig(
90
+ name="bionlp_st_2013_pc_source",
91
+ version=SOURCE_VERSION,
92
+ description="bionlp_st_2013 source schema",
93
+ schema="source",
94
+ subset_id="bionlp_st_2013_pc",
95
+ ),
96
+ BigBioConfig(
97
+ name="bionlp_st_2013_pc_bigbio_kb",
98
+ version=BIGBIO_VERSION,
99
+ description="bionlp_st_2013_pc BigBio schema",
100
+ schema="bigbio_kb",
101
+ subset_id="bionlp_st_2013_pc",
102
+ ),
103
+ ]
104
+
105
+ DEFAULT_CONFIG_NAME = "bionlp_st_2013_pc_source"
106
+
107
+ _ROLE_MAPPING = {
108
+ "Theme2": "Theme",
109
+ "Theme3": "Theme",
110
+ "Theme4": "Theme",
111
+ "Participant2": "Participant",
112
+ "Participant3": "Participant",
113
+ "Participant4": "Participant",
114
+ "Participant5": "Participant",
115
+ "Product2": "Product",
116
+ "Product3": "Product",
117
+ "Product4": "Product",
118
+ }
119
+
120
+ def _info(self):
121
+ """
122
+ - `features` defines the schema of the parsed data set. The schema depends on the
123
+ chosen `config`: If it is `_SOURCE_VIEW_NAME` the schema is the schema of the
124
+ original data. If `config` is `_UNIFIED_VIEW_NAME`, then the schema is the
125
+ canonical KB-task schema defined in `biomedical/schemas/kb.py`.
126
+ """
127
+ if self.config.schema == "source":
128
+ features = datasets.Features(
129
+ {
130
+ "id": datasets.Value("string"),
131
+ "document_id": datasets.Value("string"),
132
+ "text": datasets.Value("string"),
133
+ "text_bound_annotations": [ # T line in brat, e.g. type or event trigger
134
+ {
135
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
136
+ "text": datasets.Sequence(datasets.Value("string")),
137
+ "type": datasets.Value("string"),
138
+ "id": datasets.Value("string"),
139
+ }
140
+ ],
141
+ "events": [ # E line in brat
142
+ {
143
+ "trigger": datasets.Value(
144
+ "string"
145
+ ), # refers to the text_bound_annotation of the trigger,
146
+ "id": datasets.Value("string"),
147
+ "type": datasets.Value("string"),
148
+ "arguments": datasets.Sequence(
149
+ {
150
+ "role": datasets.Value("string"),
151
+ "ref_id": datasets.Value("string"),
152
+ }
153
+ ),
154
+ }
155
+ ],
156
+ "relations": [ # R line in brat
157
+ {
158
+ "id": datasets.Value("string"),
159
+ "head": {
160
+ "ref_id": datasets.Value("string"),
161
+ "role": datasets.Value("string"),
162
+ },
163
+ "tail": {
164
+ "ref_id": datasets.Value("string"),
165
+ "role": datasets.Value("string"),
166
+ },
167
+ "type": datasets.Value("string"),
168
+ }
169
+ ],
170
+ "equivalences": [ # Equiv line in brat
171
+ {
172
+ "id": datasets.Value("string"),
173
+ "ref_ids": datasets.Sequence(datasets.Value("string")),
174
+ }
175
+ ],
176
+ "attributes": [ # M or A lines in brat
177
+ {
178
+ "id": datasets.Value("string"),
179
+ "type": datasets.Value("string"),
180
+ "ref_id": datasets.Value("string"),
181
+ "value": datasets.Value("string"),
182
+ }
183
+ ],
184
+ "normalizations": [ # N lines in brat
185
+ {
186
+ "id": datasets.Value("string"),
187
+ "type": datasets.Value("string"),
188
+ "ref_id": datasets.Value("string"),
189
+ "resource_name": datasets.Value(
190
+ "string"
191
+ ), # Name of the resource, e.g. "Wikipedia"
192
+ "cuid": datasets.Value(
193
+ "string"
194
+ ), # ID in the resource, e.g. 534366
195
+ "text": datasets.Value(
196
+ "string"
197
+ ), # Human readable description/name of the entity, e.g. "Barack Obama"
198
+ }
199
+ ],
200
+ },
201
+ )
202
+ elif self.config.schema == "bigbio_kb":
203
+ features = kb_features
204
+
205
+ return datasets.DatasetInfo(
206
+ # This is the description that will appear on the datasets page.
207
+ description=_DESCRIPTION,
208
+ features=features,
209
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
210
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
211
+ # This is not applicable for MLEE.
212
+ # supervised_keys=("sentence", "label"),
213
+ # Homepage of the dataset for documentation
214
+ homepage=_HOMEPAGE,
215
+ # License for the dataset if available
216
+ license=str(_LICENSE),
217
+ # Citation for the dataset
218
+ citation=_CITATION,
219
+ )
220
+
221
+ def _split_generators(
222
+ self, dl_manager: datasets.DownloadManager
223
+ ) -> List[datasets.SplitGenerator]:
224
+ my_urls = _URLs[_DATASETNAME]
225
+ data_dir = Path(dl_manager.download_and_extract(my_urls))
226
+ data_files = {
227
+ "train": data_dir / f"bionlp-st-2013-pc-master" / "original-data" / "train",
228
+ "dev": data_dir / f"bionlp-st-2013-pc-master" / "original-data" / "devel",
229
+ "test": data_dir / f"bionlp-st-2013-pc-master" / "original-data" / "test",
230
+ }
231
+
232
+ return [
233
+ datasets.SplitGenerator(
234
+ name=datasets.Split.TRAIN,
235
+ gen_kwargs={"data_files": data_files["train"]},
236
+ ),
237
+ datasets.SplitGenerator(
238
+ name=datasets.Split.VALIDATION,
239
+ gen_kwargs={"data_files": data_files["dev"]},
240
+ ),
241
+ datasets.SplitGenerator(
242
+ name=datasets.Split.TEST,
243
+ gen_kwargs={"data_files": data_files["test"]},
244
+ ),
245
+ ]
246
+
247
+ def _standardize_arguments_roles(self, kb_example: Dict) -> Dict:
248
+
249
+ for event in kb_example["events"]:
250
+ for argument in event["arguments"]:
251
+ role = argument["role"]
252
+ argument["role"] = self._ROLE_MAPPING.get(role, role)
253
+
254
+ return kb_example
255
+
256
+ def _generate_examples(self, data_files: Path):
257
+ if self.config.schema == "source":
258
+ txt_files = list(data_files.glob("*txt"))
259
+ for guid, txt_file in enumerate(txt_files):
260
+ example = parsing.parse_brat_file(txt_file)
261
+ example["id"] = str(guid)
262
+ yield guid, example
263
+ elif self.config.schema == "bigbio_kb":
264
+ txt_files = list(data_files.glob("*txt"))
265
+ for guid, txt_file in enumerate(txt_files):
266
+ example = parsing.brat_parse_to_bigbio_kb(
267
+ parsing.parse_brat_file(txt_file)
268
+ )
269
+ example = self._standardize_arguments_roles(example)
270
+ example["id"] = str(guid)
271
+ yield guid, example
272
+ else:
273
+ raise ValueError(f"Invalid config: {self.config.name}")