qanastek commited on
Commit
d4f2789
1 Parent(s): 3b9a4d8

Create CAS.py

Browse files
Files changed (1) hide show
  1. CAS.py +202 -0
CAS.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+
4
+ import datasets
5
+ import numpy as np
6
+
7
+ _CITATION = """\
8
+ @inproceedings{grabar-etal-2018-cas,
9
+ title = {{CAS}: {F}rench Corpus with Clinical Cases},
10
+ author = {Grabar, Natalia and Claveau, Vincent and Dalloux, Cl{\'e}ment},
11
+ year = 2018,
12
+ month = oct,
13
+ booktitle = {
14
+ Proceedings of the Ninth International Workshop on Health Text Mining and
15
+ Information Analysis
16
+ },
17
+ publisher = {Association for Computational Linguistics},
18
+ address = {Brussels, Belgium},
19
+ pages = {122--128},
20
+ doi = {10.18653/v1/W18-5614},
21
+ url = {https://aclanthology.org/W18-5614},
22
+ abstract = {
23
+ Textual corpora are extremely important for various NLP applications as
24
+ they provide information necessary for creating, setting and testing these
25
+ applications and the corresponding tools. They are also crucial for
26
+ designing reliable methods and reproducible results. Yet, in some areas,
27
+ such as the medical area, due to confidentiality or to ethical reasons, it
28
+ is complicated and even impossible to access textual data representative of
29
+ those produced in these areas. We propose the CAS corpus built with
30
+ clinical cases, such as they are reported in the published scientific
31
+ literature in French. We describe this corpus, currently containing over
32
+ 397,000 word occurrences, and the existing linguistic and semantic
33
+ annotations.
34
+ }
35
+ }
36
+ """
37
+
38
+ _DESCRIPTION = """\
39
+ We manually annotated two corpora from the biomedical field. The ESSAI corpus \
40
+ contains clinical trial protocols in French. They were mainly obtained from the \
41
+ National Cancer Institute The typical protocol consists of two parts: the \
42
+ summary of the trial, which indicates the purpose of the trial and the methods \
43
+ applied; and a detailed description of the trial with the inclusion and \
44
+ exclusion criteria. The CAS corpus contains clinical cases published in \
45
+ scientific literature and training material. They are published in different \
46
+ journals from French-speaking countries (France, Belgium, Switzerland, Canada, \
47
+ African countries, tropical countries) and are related to various medical \
48
+ specialties (cardiology, urology, oncology, obstetrics, pulmonology, \
49
+ gastro-enterology). The purpose of clinical cases is to describe clinical \
50
+ situations of patients. Hence, their content is close to the content of clinical \
51
+ narratives (description of diagnoses, treatments or procedures, evolution, \
52
+ family history, expected audience, etc.). In clinical cases, the negation is \
53
+ frequently used for describing the patient signs, symptoms, and diagnosis. \
54
+ Speculation is present as well but less frequently.
55
+ This version only contain the annotated CAS corpus
56
+ """
57
+
58
+ _HOMEPAGE = "https://clementdalloux.fr/?page_id=28"
59
+
60
+ _LICENSE = 'Data User Agreement'
61
+
62
+ class CAS(datasets.GeneratorBasedBuilder):
63
+
64
+ DEFAULT_CONFIG_NAME = "source"
65
+
66
+ BUILDER_CONFIGS = [
67
+ datasets.BuilderConfig(name="source", version="1.0.0", description="The CAS corpora"),
68
+ ]
69
+
70
+ def _info(self):
71
+
72
+ features = datasets.Features(
73
+ {
74
+ "id": datasets.Value("string"),
75
+ "document_id": datasets.Value("string"),
76
+ "tokens": [datasets.Value("string")],
77
+ "lemmas": [datasets.Value("string")],
78
+ "pos_tags": [datasets.features.ClassLabel(
79
+ names = ['VER:ppre', 'VER:infi', 'VER:impf', 'VER:simp', 'PUN', 'DET:POS', 'ADV', 'DET:ART', 'PRO:DEM', 'INT', 'VER:futu', 'VER:subp', 'VER:cond', 'VER:pper', 'KON', 'NAM', 'PRO:IND', 'VER:con', 'PRP', 'SYM', 'SENT', 'PUN:cit', 'VER:pres', 'PRP:det', 'PRO:REL', 'PRO:PER', 'VER:subi', 'ADJ', 'NUM', 'NOM', 'ABR'],
80
+ )],
81
+ "label": datasets.features.ClassLabel(
82
+ names = ['negation', 'speculation'],
83
+ ),
84
+ }
85
+ )
86
+
87
+ return datasets.DatasetInfo(
88
+ description=_DESCRIPTION,
89
+ features=features,
90
+ supervised_keys=None,
91
+ homepage=_HOMEPAGE,
92
+ license=str(_LICENSE),
93
+ citation=_CITATION,
94
+ )
95
+
96
+ def _split_generators(self, dl_manager):
97
+
98
+ if self.config.data_dir is None:
99
+ raise ValueError("This is a local dataset. Please pass the data_dir kwarg to load_dataset.")
100
+
101
+ else:
102
+ data_dir = self.config.data_dir
103
+
104
+ return [
105
+ datasets.SplitGenerator(
106
+ name=datasets.Split.TRAIN,
107
+ gen_kwargs={
108
+ "datadir": data_dir,
109
+ "split": "train",
110
+ },
111
+ ),
112
+ datasets.SplitGenerator(
113
+ name=datasets.Split.VALIDATION,
114
+ gen_kwargs={
115
+ "datadir": data_dir,
116
+ "split": "validation",
117
+ },
118
+ ),
119
+ datasets.SplitGenerator(
120
+ name=datasets.Split.TEST,
121
+ gen_kwargs={
122
+ "datadir": data_dir,
123
+ "split": "test",
124
+ },
125
+ ),
126
+ ]
127
+
128
+ def _generate_examples(self, datadir, split):
129
+
130
+ all_res = []
131
+
132
+ key = 0
133
+
134
+ for file in ["CAS_neg.txt", "CAS_spec.txt"]:
135
+
136
+ label = "negation" if "neg" in file else "speculation"
137
+ id_docs = []
138
+ id_words = []
139
+ words = []
140
+ lemmas = []
141
+ POS_tags = []
142
+
143
+ with open(os.path.join(datadir, file)) as f:
144
+
145
+ for line in f.readlines():
146
+
147
+ if len(line.split("\t")) < 5:
148
+ continue
149
+
150
+ id_doc, id_word, word, lemma, tag = line.split("\t")[0:5]
151
+
152
+ id_docs.append(id_doc)
153
+ id_words.append(id_word)
154
+ words.append(word)
155
+ lemmas.append(lemma)
156
+ POS_tags.append(tag)
157
+
158
+ dic = {
159
+ "id_docs": np.array(list(map(int, id_docs))),
160
+ "id_words": id_words,
161
+ "words": words,
162
+ "lemmas": lemmas,
163
+ "POS_tags": POS_tags,
164
+ }
165
+
166
+ for doc_id in set(dic["id_docs"]):
167
+
168
+ indexes = np.argwhere(dic["id_docs"] == doc_id)[:, 0]
169
+ tokens = [dic["words"][id] for id in indexes]
170
+ text_lemmas = [dic["lemmas"][id] for id in indexes]
171
+ pos_tags = [dic["POS_tags"][id] for id in indexes]
172
+
173
+ all_res.append({
174
+ "id": str(key),
175
+ "document_id": str(doc_id),
176
+ "tokens": tokens,
177
+ "lemmas": text_lemmas,
178
+ "pos_tags": pos_tags,
179
+ "label": label,
180
+ })
181
+
182
+ key += 1
183
+
184
+ ids = [r["id"] for r in all_res]
185
+
186
+ random.seed(4)
187
+ random.shuffle(ids)
188
+ random.shuffle(ids)
189
+ random.shuffle(ids)
190
+
191
+ train, validation, test = np.split(ids, [int(len(ids)*0.70), int(len(ids)*0.80)])
192
+
193
+ if split == "train":
194
+ allowed_ids = list(train)
195
+ elif split == "validation":
196
+ allowed_ids = list(validation)
197
+ elif split == "test":
198
+ allowed_ids = list(test)
199
+
200
+ for r in all_res:
201
+ if r["id"] in allowed_ids:
202
+ yield r["id"], r