gabrielaltay commited on
Commit
d5d71fd
1 Parent(s): c63bdb3

upload hubscripts/paramed_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. paramed.py +197 -0
paramed.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ NEJM is a Chinese-English parallel corpus crawled from the New England Journal of Medicine website.
18
+ English articles are distributed through https://www.nejm.org/ and Chinese articles are distributed through
19
+ http://nejmqianyan.cn/. The corpus contains all article pairs (around 2000 pairs) since 2011.
20
+ The script loads dataset in bigbio schema (using schemas/text-to-text) AND/OR source (default) schema
21
+ """
22
+ import os # useful for paths
23
+ from typing import Dict, Iterable, List
24
+
25
+ import datasets
26
+
27
+ from .bigbiohub import text2text_features
28
+ from .bigbiohub import BigBioConfig
29
+ from .bigbiohub import Tasks
30
+
31
+ logger = datasets.logging.get_logger(__name__)
32
+
33
+
34
+ _LANGUAGES = ['English', 'Chinese']
35
+ _PUBMED = False
36
+ _LOCAL = False
37
+ _CITATION = """\
38
+ @article{liu2021paramed,
39
+ author = {Liu, Boxiang and Huang, Liang},
40
+ title = {ParaMed: a parallel corpus for English–Chinese translation in the biomedical domain},
41
+ journal = {BMC Medical Informatics and Decision Making},
42
+ volume = {21},
43
+ year = {2021},
44
+ url = {https://bmcmedinformdecismak.biomedcentral.com/articles/10.1186/s12911-021-01621-8},
45
+ doi = {10.1186/s12911-021-01621-8}
46
+ }
47
+ """
48
+ _DATASETNAME = "paramed"
49
+ _DISPLAYNAME = "ParaMed"
50
+
51
+ _DESCRIPTION = """\
52
+ NEJM is a Chinese-English parallel corpus crawled from the New England Journal of Medicine website.
53
+ English articles are distributed through https://www.nejm.org/ and Chinese articles are distributed through
54
+ http://nejmqianyan.cn/. The corpus contains all article pairs (around 2000 pairs) since 2011.
55
+ """
56
+
57
+ _HOMEPAGE = "https://github.com/boxiangliu/ParaMed"
58
+
59
+ _LICENSE = 'Creative Commons Attribution 4.0 International'
60
+
61
+ _URLs = {
62
+ "source": "https://github.com/boxiangliu/ParaMed/blob/master/data/nejm-open-access.tar.gz?raw=true",
63
+ "bigbio_t2t": "https://github.com/boxiangliu/ParaMed/blob/master/data/nejm-open-access.tar.gz?raw=true",
64
+ }
65
+ _SUPPORTED_TASKS = [Tasks.TRANSLATION]
66
+ _SOURCE_VERSION = "1.0.0"
67
+ _BIGBIO_VERSION = "1.0.0"
68
+
69
+ _DATA_DIR = "./processed_data/open_access/open_access"
70
+
71
+
72
+ class ParamedDataset(datasets.GeneratorBasedBuilder):
73
+ """Write a short docstring documenting what this dataset is"""
74
+
75
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
76
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
77
+
78
+ BUILDER_CONFIGS = [
79
+ BigBioConfig(
80
+ name="paramed_source",
81
+ version=SOURCE_VERSION,
82
+ description="Paramed source schema",
83
+ schema="source",
84
+ subset_id="paramed",
85
+ ),
86
+ BigBioConfig(
87
+ name="paramed_bigbio_t2t",
88
+ version=BIGBIO_VERSION,
89
+ description="Paramed BigBio schema",
90
+ schema="bigbio_t2t",
91
+ subset_id="paramed",
92
+ ),
93
+ ]
94
+
95
+ DEFAULT_CONFIG_NAME = "paramed_source"
96
+
97
+ def _info(self):
98
+
99
+ if self.config.schema == "source":
100
+ features = datasets.Features(
101
+ {
102
+ "document_id": datasets.Value("string"),
103
+ "text_1": datasets.Value("string"),
104
+ "text_2": datasets.Value("string"),
105
+ "text_1_name": datasets.Value("string"),
106
+ "text_2_name": datasets.Value("string"),
107
+ }
108
+ )
109
+
110
+ elif self.config.schema == "bigbio_t2t":
111
+ features = text2text_features
112
+
113
+ return datasets.DatasetInfo(
114
+ # This is the description that will appear on the datasets page.
115
+ description=_DESCRIPTION,
116
+ features=features,
117
+ homepage=_HOMEPAGE,
118
+ license=str(_LICENSE),
119
+ citation=_CITATION,
120
+ )
121
+
122
+ def _split_generators(
123
+ self, dl_manager: datasets.DownloadManager
124
+ ) -> List[datasets.SplitGenerator]:
125
+
126
+ my_urls = _URLs[self.config.schema]
127
+ data_dir = os.path.join(dl_manager.download_and_extract(my_urls), _DATA_DIR)
128
+ print(data_dir)
129
+
130
+ return [
131
+ datasets.SplitGenerator(
132
+ name=datasets.Split.TRAIN,
133
+ gen_kwargs={
134
+ "filepath": data_dir,
135
+ "zh_file": os.path.join(data_dir, "nejm.train.zh"),
136
+ "en_file": os.path.join(data_dir, "nejm.train.en"),
137
+ "split": "train",
138
+ },
139
+ ),
140
+ datasets.SplitGenerator(
141
+ name=datasets.Split.VALIDATION,
142
+ gen_kwargs={
143
+ "filepath": data_dir,
144
+ "zh_file": os.path.join(data_dir, "nejm.dev.zh"),
145
+ "en_file": os.path.join(data_dir, "nejm.dev.en"),
146
+ "split": "dev",
147
+ },
148
+ ),
149
+ datasets.SplitGenerator(
150
+ name=datasets.Split.TEST,
151
+ gen_kwargs={
152
+ "filepath": data_dir,
153
+ "zh_file": os.path.join(data_dir, "nejm.test.zh"),
154
+ "en_file": os.path.join(data_dir, "nejm.test.en"),
155
+ "split": "test",
156
+ },
157
+ ),
158
+ ]
159
+
160
+ def _generate_examples(self, filepath, zh_file, en_file, split):
161
+
162
+ logger.info("generating examples from = %s", filepath)
163
+ zh_file = open(zh_file, "r")
164
+ en_file = open(en_file, "r")
165
+ zh_file.seek(0)
166
+ en_file.seek(0)
167
+ zh_lines = zh_file.readlines()
168
+ en_lines = en_file.readlines()
169
+
170
+ assert len(en_lines) == len(zh_lines), "Line mismatch"
171
+
172
+ if self.config.schema == "source":
173
+ for key, (zh_line, en_line) in enumerate(zip(zh_lines, en_lines)):
174
+ yield key, {
175
+ "document_id": str(key),
176
+ "text_1": zh_line,
177
+ "text_2": en_line,
178
+ "text_1_name": "zh",
179
+ "text_2_name": "en",
180
+ }
181
+ zh_file.close()
182
+ en_file.close()
183
+
184
+ elif self.config.schema == "bigbio_t2t":
185
+ uid = 0
186
+ for key, (zh_line, en_line) in enumerate(zip(zh_lines, en_lines)):
187
+ uid += 1
188
+ yield key, {
189
+ "id": str(uid),
190
+ "document_id": str(key),
191
+ "text_1": zh_line,
192
+ "text_2": en_line,
193
+ "text_1_name": "zh",
194
+ "text_2_name": "en",
195
+ }
196
+ zh_file.close()
197
+ en_file.close()