qanastek commited on
Commit
d32d58e
1 Parent(s): 297b7fb

Create E3C.py

Browse files
Files changed (1) hide show
  1. E3C.py +302 -0
E3C.py ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pip install bs4 syntok
2
+
3
+ import os
4
+ import random
5
+
6
+ import datasets
7
+
8
+ import numpy as np
9
+ from bs4 import BeautifulSoup, ResultSet
10
+ from syntok.tokenizer import Tokenizer
11
+
12
+ tokenizer = Tokenizer()
13
+
14
+ _CITATION = """\
15
+ @report{Magnini2021, \
16
+ author = {Bernardo Magnini and Begoña Altuna and Alberto Lavelli and Manuela Speranza \
17
+ and Roberto Zanoli and Fondazione Bruno Kessler}, \
18
+ keywords = {Clinical data,clinical enti-ties,corpus,multilingual,temporal information}, \
19
+ title = {The E3C Project: \
20
+ European Clinical Case Corpus El proyecto E3C: European Clinical Case Corpus}, \
21
+ url = {https://uts.nlm.nih.gov/uts/umls/home}, \
22
+ year = {2021}, \
23
+ }
24
+ """
25
+
26
+ _DESCRIPTION = """\
27
+ E3C is a freely available multilingual corpus (English, French, Italian, Spanish, and Basque) \
28
+ of semantically annotated clinical narratives to allow for the linguistic analysis, benchmarking, \
29
+ and training of information extraction systems. It consists of two types of annotations: \
30
+ (i) clinical entities (e.g., pathologies), (ii) temporal information and factuality (e.g., events). \
31
+ Researchers can use the benchmark training and test splits of our corpus to develop and test \
32
+ their own models.
33
+ """
34
+
35
+ _URL = "https://github.com/hltfbk/E3C-Corpus/archive/refs/tags/v2.0.0.zip"
36
+
37
+ _LANGUAGES = ["English","Spanish","Basque","French","Italian"]
38
+
39
+ class E3C(datasets.GeneratorBasedBuilder):
40
+
41
+ BUILDER_CONFIGS = [
42
+ datasets.BuilderConfig(name=f"{lang}_clinical", version="1.0.0", description=f"The {lang} subset of the E3C corpus") for lang in _LANGUAGES
43
+ ]
44
+
45
+ BUILDER_CONFIGS += [
46
+ datasets.BuilderConfig(name=f"{lang}_temporal", version="1.0.0", description=f"The {lang} subset of the E3C corpus") for lang in _LANGUAGES
47
+ ]
48
+
49
+ DEFAULT_CONFIG_NAME = "French_clinical"
50
+
51
+ def _info(self):
52
+
53
+ if self.config.name == "default":
54
+ self.config.name = self.DEFAULT_CONFIG_NAME
55
+
56
+ if self.config.name.find("clinical") != -1:
57
+ names = ["O","B-CLINENTITY","I-CLINENTITY"]
58
+ elif self.config.name.find("temporal") != -1:
59
+ names = ["O", "B-EVENT", "B-ACTOR", "B-BODYPART", "B-TIMEX3", "B-RML", "I-EVENT", "I-ACTOR", "I-BODYPART", "I-TIMEX3", "I-RML"]
60
+
61
+ features = datasets.Features(
62
+ {
63
+ "id": datasets.Value("string"),
64
+ "text": datasets.Value("string"),
65
+ "tokens": datasets.Sequence(datasets.Value("string")),
66
+ "ner_tags": datasets.Sequence(
67
+ datasets.features.ClassLabel(
68
+ names=names,
69
+ ),
70
+ ),
71
+ }
72
+ )
73
+
74
+ return datasets.DatasetInfo(
75
+ description=_DESCRIPTION,
76
+ features=features,
77
+ citation=_CITATION,
78
+ supervised_keys=None,
79
+ )
80
+
81
+ def _split_generators(self, dl_manager):
82
+
83
+ data_dir = dl_manager.download_and_extract(_URL)
84
+
85
+ print(data_dir)
86
+
87
+ if self.config.name.find("clinical") != -1:
88
+
89
+ print("clinical")
90
+
91
+ return [
92
+ datasets.SplitGenerator(
93
+ name=datasets.Split.TRAIN,
94
+ gen_kwargs={
95
+ "filepath": os.path.join(data_dir, "E3C-Corpus-2.0.0/data_annotation", self.config.name.replace("_clinical",""), "layer2"),
96
+ "split": "train",
97
+ },
98
+ ),
99
+ datasets.SplitGenerator(
100
+ name=datasets.Split.VALIDATION,
101
+ gen_kwargs={
102
+ "filepath": os.path.join(data_dir, "E3C-Corpus-2.0.0/data_annotation", self.config.name.replace("_clinical",""), "layer2"),
103
+ "split": "validation",
104
+ },
105
+ ),
106
+ datasets.SplitGenerator(
107
+ name=datasets.Split.TEST,
108
+ gen_kwargs={
109
+ "filepath": os.path.join(data_dir, "E3C-Corpus-2.0.0/data_annotation", self.config.name.replace("_clinical",""), "layer1"),
110
+ "split": "test",
111
+ },
112
+ ),
113
+ ]
114
+
115
+ elif self.config.name.find("temporal") != -1:
116
+
117
+ print("temporal")
118
+
119
+ return [
120
+ datasets.SplitGenerator(
121
+ name=datasets.Split.TRAIN,
122
+ gen_kwargs={
123
+ "filepath": os.path.join(data_dir, "E3C-Corpus-2.0.0/data_annotation", self.config.name.replace("_temporal",""), "layer1"),
124
+ "split": "train",
125
+ },
126
+ ),
127
+ datasets.SplitGenerator(
128
+ name=datasets.Split.VALIDATION,
129
+ gen_kwargs={
130
+ "filepath": os.path.join(data_dir, "E3C-Corpus-2.0.0/data_annotation", self.config.name.replace("_temporal",""), "layer1"),
131
+ "split": "validation",
132
+ },
133
+ ),
134
+ datasets.SplitGenerator(
135
+ name=datasets.Split.TEST,
136
+ gen_kwargs={
137
+ "filepath": os.path.join(data_dir, "E3C-Corpus-2.0.0/data_annotation", self.config.name.replace("_temporal",""), "layer1"),
138
+ "split": "test",
139
+ },
140
+ ),
141
+ ]
142
+
143
+ @staticmethod
144
+ def get_annotations(entities: ResultSet, text: str) -> list:
145
+
146
+ return [[
147
+ int(entity.get("begin")),
148
+ int(entity.get("end")),
149
+ text[int(entity.get("begin")) : int(entity.get("end"))],
150
+ ] for entity in entities]
151
+
152
+ def get_clinical_annotations(self, entities: ResultSet, text: str) -> list:
153
+
154
+ return [[
155
+ int(entity.get("begin")),
156
+ int(entity.get("end")),
157
+ text[int(entity.get("begin")) : int(entity.get("end"))],
158
+ entity.get("entityID"),
159
+ ] for entity in entities]
160
+
161
+ def get_parsed_data(self, filepath: str):
162
+
163
+ for root, _, files in os.walk(filepath):
164
+
165
+ for file in files:
166
+
167
+ with open(f"{root}/{file}") as soup_file:
168
+
169
+ soup = BeautifulSoup(soup_file, "xml")
170
+ text = soup.find("cas:Sofa").get("sofaString")
171
+
172
+ yield {
173
+ "CLINENTITY": self.get_clinical_annotations(soup.find_all("custom:CLINENTITY"), text),
174
+ "EVENT": self.get_annotations(soup.find_all("custom:EVENT"), text),
175
+ "ACTOR": self.get_annotations(soup.find_all("custom:ACTOR"), text),
176
+ "BODYPART": self.get_annotations(soup.find_all("custom:BODYPART"), text),
177
+ "TIMEX3": self.get_annotations(soup.find_all("custom:TIMEX3"), text),
178
+ "RML": self.get_annotations(soup.find_all("custom:RML"), text),
179
+ "SENTENCE": self.get_annotations(soup.find_all("type4:Sentence"), text),
180
+ "TOKENS": self.get_annotations(soup.find_all("type4:Token"), text),
181
+ }
182
+
183
+ def _generate_examples(self, filepath, split):
184
+
185
+ all_res = []
186
+
187
+ key = 0
188
+
189
+ parsed_content = self.get_parsed_data(filepath)
190
+
191
+ for content in parsed_content:
192
+
193
+ for sentence in content["SENTENCE"]:
194
+
195
+ tokens = [(
196
+ token.offset + sentence[0],
197
+ token.offset + sentence[0] + len(token.value),
198
+ token.value,
199
+ ) for token in list(tokenizer.tokenize(sentence[-1]))]
200
+
201
+ filtered_tokens = list(
202
+ filter(
203
+ lambda token: token[0] >= sentence[0] and token[1] <= sentence[1],
204
+ tokens,
205
+ )
206
+ )
207
+
208
+ tokens_offsets = [
209
+ [token[0] - sentence[0], token[1] - sentence[0]] for token in filtered_tokens
210
+ ]
211
+
212
+ clinical_labels = ["O"] * len(filtered_tokens)
213
+ clinical_cuid = ["CUI_LESS"] * len(filtered_tokens)
214
+ temporal_information_labels = ["O"] * len(filtered_tokens)
215
+
216
+ for entity_type in ["CLINENTITY","EVENT","ACTOR","BODYPART","TIMEX3","RML"]:
217
+
218
+ if len(content[entity_type]) != 0:
219
+
220
+ for entities in list(content[entity_type]):
221
+
222
+ annotated_tokens = [
223
+ idx_token
224
+ for idx_token, token in enumerate(filtered_tokens)
225
+ if token[0] >= entities[0] and token[1] <= entities[1]
226
+ ]
227
+
228
+ for idx_token in annotated_tokens:
229
+
230
+ if entity_type == "CLINENTITY":
231
+ if idx_token == annotated_tokens[0]:
232
+ clinical_labels[idx_token] = f"B-{entity_type}"
233
+ else:
234
+ clinical_labels[idx_token] = f"I-{entity_type}"
235
+ clinical_cuid[idx_token] = entities[-1]
236
+ else:
237
+ if idx_token == annotated_tokens[0]:
238
+ temporal_information_labels[idx_token] = f"B-{entity_type}"
239
+ else:
240
+ temporal_information_labels[idx_token] = f"I-{entity_type}"
241
+
242
+ if self.config.name.find("clinical") != -1:
243
+ _labels = clinical_labels
244
+ elif self.config.name.find("temporal") != -1:
245
+ _labels = temporal_information_labels
246
+
247
+ all_res.append({
248
+ "id": key,
249
+ "text": sentence[-1],
250
+ "tokens": list(map(lambda token: token[2], filtered_tokens)),
251
+ "ner_tags": _labels,
252
+ })
253
+
254
+ key += 1
255
+
256
+ if self.config.name.find("clinical") != -1:
257
+
258
+ if split != "test":
259
+
260
+ ids = [r["id"] for r in all_res]
261
+
262
+ random.seed(4)
263
+ random.shuffle(ids)
264
+ random.shuffle(ids)
265
+ random.shuffle(ids)
266
+
267
+ train, validation = np.split(ids, [int(len(ids)*0.8738)])
268
+
269
+ if split == "train":
270
+ allowed_ids = list(train)
271
+ elif split == "validation":
272
+ allowed_ids = list(validation)
273
+
274
+ for r in all_res:
275
+ if r["id"] in allowed_ids:
276
+ yield r["id"], r
277
+ else:
278
+
279
+ for r in all_res:
280
+ yield r["id"], r
281
+
282
+ elif self.config.name.find("temporal") != -1:
283
+
284
+ ids = [r["id"] for r in all_res]
285
+
286
+ random.seed(4)
287
+ random.shuffle(ids)
288
+ random.shuffle(ids)
289
+ random.shuffle(ids)
290
+
291
+ train, validation, test = np.split(ids, [int(len(ids)*0.70), int(len(ids)*0.80)])
292
+
293
+ if split == "train":
294
+ allowed_ids = list(train)
295
+ elif split == "validation":
296
+ allowed_ids = list(validation)
297
+ elif split == "test":
298
+ allowed_ids = list(test)
299
+
300
+ for r in all_res:
301
+ if r["id"] in allowed_ids:
302
+ yield r["id"], r