gabrielaltay commited on
Commit
a9cc57b
1 Parent(s): fc490e7

upload hubscripts/ask_a_patient_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. ask_a_patient.py +170 -0
ask_a_patient.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import glob
17
+ import os
18
+ import re
19
+
20
+ import datasets
21
+
22
+ from .bigbiohub import kb_features
23
+ from .bigbiohub import BigBioConfig
24
+ from .bigbiohub import Tasks
25
+
26
+ _DATASETNAME = "ask_a_patient"
27
+ _DISPLAYNAME = "AskAPatient"
28
+
29
+ _LANGUAGES = ['English']
30
+ _PUBMED = True
31
+ _LOCAL = False
32
+ _CITATION = """
33
+ @inproceedings{limsopatham-collier-2016-normalising,
34
+ title = "Normalising Medical Concepts in Social Media Texts by Learning Semantic Representation",
35
+ author = "Limsopatham, Nut and
36
+ Collier, Nigel",
37
+ booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
38
+ month = aug,
39
+ year = "2016",
40
+ address = "Berlin, Germany",
41
+ publisher = "Association for Computational Linguistics",
42
+ url = "https://aclanthology.org/P16-1096",
43
+ doi = "10.18653/v1/P16-1096",
44
+ pages = "1014--1023",
45
+ }
46
+ """
47
+
48
+ _DESCRIPTION = """
49
+ The AskAPatient dataset contains medical concepts written on social media \
50
+ mapped to how they are formally written in medical ontologies (SNOMED-CT and AMT).
51
+ """
52
+
53
+ _HOMEPAGE = "https://zenodo.org/record/55013"
54
+
55
+ _LICENSE = 'Creative Commons Attribution 4.0 International'
56
+
57
+ _URLs = "https://zenodo.org/record/55013/files/datasets.zip"
58
+
59
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.NAMED_ENTITY_DISAMBIGUATION]
60
+ _SOURCE_VERSION = "1.0.0"
61
+ _BIGBIO_VERSION = "1.0.0"
62
+
63
+
64
+ class AskAPatient(datasets.GeneratorBasedBuilder):
65
+ """AskAPatient: Dataset for Normalising Medical Concepts in Social Media Text."""
66
+
67
+ DEFAULT_CONFIG_NAME = "ask_a_patient_source"
68
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
69
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
70
+
71
+ BUILDER_CONFIGS = [
72
+ BigBioConfig(
73
+ name="ask_a_patient_source",
74
+ version=SOURCE_VERSION,
75
+ description="AskAPatient source schema",
76
+ schema="source",
77
+ subset_id="ask_a_patient",
78
+ ),
79
+ BigBioConfig(
80
+ name="ask_a_patient_bigbio_kb",
81
+ version=BIGBIO_VERSION,
82
+ description="AskAPatient simplified BigBio schema",
83
+ schema="bigbio_kb",
84
+ subset_id="ask_a_patient",
85
+ ),
86
+ ]
87
+
88
+ def _info(self):
89
+ if self.config.schema == "source":
90
+ features = datasets.Features(
91
+ {
92
+ "cui": datasets.Value("string"),
93
+ "medical_concept": datasets.Value("string"),
94
+ "social_media_text": datasets.Value("string"),
95
+ }
96
+ )
97
+ elif self.config.schema == "bigbio_kb":
98
+ features = kb_features
99
+ return datasets.DatasetInfo(
100
+ description=_DESCRIPTION,
101
+ features=features,
102
+ supervised_keys=None,
103
+ homepage=_HOMEPAGE,
104
+ license=str(_LICENSE),
105
+ citation=_CITATION,
106
+ )
107
+
108
+ def _split_generators(self, dl_manager):
109
+ dl_dir = dl_manager.download_and_extract(_URLs)
110
+ dataset_dir = os.path.join(dl_dir, "datasets", "AskAPatient")
111
+ # dataset supports k-folds
112
+ splits = []
113
+ for split_name in [
114
+ datasets.Split.TRAIN,
115
+ datasets.Split.VALIDATION,
116
+ datasets.Split.TEST,
117
+ ]:
118
+ for fold_filepath in glob.glob(
119
+ os.path.join(dataset_dir, f"AskAPatient.fold-*.{split_name}.txt")
120
+ ):
121
+ fold_id = re.search("AskAPatient\.fold-(\d)\.", fold_filepath).group(1)
122
+ split_id = f"{split_name}_{fold_id}"
123
+ splits.append(
124
+ datasets.SplitGenerator(
125
+ name=split_id,
126
+ gen_kwargs={"filepath": fold_filepath, "split_id": split_id},
127
+ )
128
+ )
129
+ return splits
130
+
131
+ def _generate_examples(self, filepath, split_id):
132
+ with open(filepath, "r", encoding="latin-1") as f:
133
+ for i, line in enumerate(f):
134
+ id = f"{split_id}_{i}"
135
+ cui, medical_concept, social_media_text = line.strip().split("\t")
136
+ if self.config.schema == "source":
137
+ yield id, {
138
+ "cui": cui,
139
+ "medical_concept": medical_concept,
140
+ "social_media_text": social_media_text,
141
+ }
142
+ elif self.config.schema == "bigbio_kb":
143
+ text_type = "social_media_text"
144
+ offset = (0, len(social_media_text))
145
+ yield id, {
146
+ "id": id,
147
+ "document_id": id,
148
+ "passages": [
149
+ {
150
+ "id": f"{id}_passage",
151
+ "type": text_type,
152
+ "text": [social_media_text],
153
+ "offsets": [offset],
154
+ }
155
+ ],
156
+ "entities": [
157
+ {
158
+ "id": f"{id}_entity",
159
+ "type": text_type,
160
+ "text": [social_media_text],
161
+ "offsets": [offset],
162
+ "normalized": [
163
+ {"db_name": "SNOMED-CT|AMT", "db_id": cui}
164
+ ],
165
+ }
166
+ ],
167
+ "events": [],
168
+ "coreferences": [],
169
+ "relations": [],
170
+ }