qanastek commited on
Commit
4195ef1
1 Parent(s): 983bf43

Keep only the largest layer

Browse files
Files changed (1) hide show
  1. QUAERO.py +264 -223
QUAERO.py CHANGED
@@ -13,12 +13,12 @@ logger = datasets.logging.get_logger(__name__)
13
  _CITATION = """
14
  @InProceedings{neveol14quaero,
15
  author = {Névéol, Aurélie and Grouin, Cyril and Leixa, Jeremy
16
- and Rosset, Sophie and Zweigenbaum, Pierre},
17
  title = {The {QUAERO} {French} Medical Corpus: A Ressource for
18
- Medical Entity Recognition and Normalization},
19
  OPTbooktitle = {Proceedings of the Fourth Workshop on Building
20
- and Evaluating Ressources for Health and Biomedical
21
- Text Processing},
22
  booktitle = {Proc of BioTextMining Work},
23
  OPTseries = {BioTxtM 2014},
24
  year = {2014},
@@ -52,225 +52,266 @@ BioCreative IV track 1 - BioC: The BioCreative Interoperability Initiative, 2013
52
  Please note that the original version of the QUAERO corpus distributed in the CLEF eHealth challenge 2015 and 2016 came in the BRAT stand alone format. It was distributed with the CLEF eHealth evaluation tool. This original distribution of the QUAERO French Medical corpus is available separately from https://quaerofrenchmed.limsi.fr
53
  All questions regarding the task or data should be addressed to [email protected]
54
  """
55
-
56
  _LABELS_BASE = ['DISO', 'DEVI', 'CHEM', 'GEOG', 'OBJC', 'PHEN', 'PHYS', 'LIVB', 'PROC', 'ANAT']
57
 
58
  class QUAERO(datasets.GeneratorBasedBuilder):
59
- """QUAERO dataset."""
60
-
61
- VERSION = datasets.Version("1.0.0")
62
-
63
- BUILDER_CONFIGS = [
64
- datasets.BuilderConfig(name="emea", version=VERSION, description="The EMEA QUAERO corpora"),
65
- datasets.BuilderConfig(name="medline", version=VERSION, description="The MEDLINE QUAERO corpora"),
66
- ]
67
-
68
- DEFAULT_CONFIG_NAME = "emea"
69
-
70
- def _info(self):
71
-
72
- if self.config.name == "emea":
73
-
74
- return datasets.DatasetInfo(
75
- description=_DESCRIPTION,
76
- features=datasets.Features(
77
- {
78
- "id": datasets.Value("string"),
79
- "document_id": datasets.Value("string"),
80
- "tokens": datasets.Sequence(datasets.Value("string")),
81
- "ner_tags": datasets.Sequence(
82
- datasets.features.ClassLabel(
83
- names = ['O', 'LIVB', 'PROC', 'ANAT', 'DEVI', 'CHEM', 'GEOG', 'PHYS', 'PHEN', 'DISO', 'OBJC', 'CHEM_PHYS', 'ANAT_LIVB', 'ANAT_PROC', 'ANAT_DISO', 'DISO_PHYS', 'CHEM_OBJC', 'CHEM_LIVB', 'LIVB_PHYS', 'CHEM_PROC', 'PHEN_PROC', 'OBJC_PHEN', 'ANAT_CHEM', 'PHEN_PHYS', 'GEOG_LIVB', 'DISO_LIVB', 'CHEM_DISO', 'DISO_PROC', 'ANAT_PHYS', 'CHEM_DISO_PHYS', 'ANAT_DISO_PHYS', 'DISO_LIVB_PHYS'],
84
- )
85
- ),
86
- }
87
- ),
88
- supervised_keys=None,
89
- homepage="https://quaerofrenchmed.limsi.fr/",
90
- citation=_CITATION,
91
- license=_LICENSE,
92
- )
93
-
94
- elif self.config.name == "medline":
95
- return datasets.DatasetInfo(
96
- description=_DESCRIPTION,
97
- features=datasets.Features(
98
- {
99
- "id": datasets.Value("string"),
100
- "document_id": datasets.Value("string"),
101
- "tokens": datasets.Sequence(datasets.Value("string")),
102
- "ner_tags": datasets.Sequence(
103
- datasets.features.ClassLabel(
104
- names = ['O', 'LIVB', 'PROC', 'ANAT', 'DEVI', 'CHEM', 'GEOG', 'PHYS', 'PHEN', 'DISO', 'OBJC', 'CHEM_DEVI', 'CHEM_PHYS', 'ANAT_LIVB', 'ANAT_PROC', 'DEVI_OBJC', 'ANAT_DEVI', 'ANAT_PHEN', 'PHYS_PROC', 'ANAT_DISO', 'DEVI_DISO', 'DISO_PHYS', 'CHEM_OBJC', 'CHEM_LIVB', 'LIVB_PHYS', 'CHEM_PROC', 'LIVB_OBJC', 'PHEN_PROC', 'DISO_OBJC', 'OBJC_PHEN', 'LIVB_PROC', 'ANAT_CHEM', 'ANAT_OBJC', 'PHEN_PHYS', 'GEOG_LIVB', 'DEVI_PROC', 'DEVI_PHEN', 'DISO_LIVB', 'DEVI_PHYS', 'CHEM_PHEN', 'DISO_PHEN', 'CHEM_DISO', 'OBJC_PROC', 'DISO_PROC', 'ANAT_PHYS', 'ANAT_PHYS_PROC', 'CHEM_DISO_LIVB', 'ANAT_DISO_PHYS', 'ANAT_CHEM_PROC', 'ANAT_DISO_LIVB', 'ANAT_DEVI_PROC', 'DISO_PHEN_PHYS', 'DISO_LIVB_PHYS', 'DISO_PHEN_PROC', 'ANAT_PHEN_PROC'],
105
- )
106
- ),
107
- }
108
- ),
109
- supervised_keys=None,
110
- homepage="https://quaerofrenchmed.limsi.fr/",
111
- citation=_CITATION,
112
- license=_LICENSE,
113
- )
114
-
115
- def _split_generators(self, dl_manager):
116
-
117
- return [
118
- datasets.SplitGenerator(
119
- name=datasets.Split.TRAIN,
120
- gen_kwargs={
121
- "split": "train",
122
- }
123
- ),
124
- datasets.SplitGenerator(
125
- name=datasets.Split.VALIDATION,
126
- gen_kwargs={
127
- "split": "validation",
128
- }
129
- ),
130
- datasets.SplitGenerator(
131
- name=datasets.Split.TEST,
132
- gen_kwargs={
133
- "split": "test",
134
- }
135
- ),
136
- ]
137
-
138
- def split_sentences(self, json_o):
139
- """
140
- Split le corpus en phrase plus courtes pour que ça fit dans des modèles types BERT
141
-
142
- Le split est fait sur les points "."
143
-
144
- """
145
-
146
- final_json = []
147
-
148
- for i in json_o:
149
-
150
- ind_punc = [index for index, value in enumerate(i['tokens']) if value=='.'] + [len(i['tokens'])]
151
- # ind_punc = [index for index, value in enumerate(i['tokens']) if value=='.' and not str(i['tokens'][index-1]).isnumeric()]
152
-
153
- for index, value in enumerate(ind_punc):
154
-
155
- if index==0:
156
- final_json.append({'id': i['id']+'_'+str(index),
157
- 'document_id': i['id']+'_'+str(index),
158
- 'ner_tags': i['ner_tags'][:value+1],
159
- 'tokens': i['tokens'][:value+1]
160
- })
161
- else:
162
- prev_value = ind_punc[index-1]
163
- final_json.append({'id': i['id']+'_'+str(index),
164
- 'document_id': i['document_id']+'_'+str(index),
165
- 'ner_tags': i['ner_tags'][prev_value+1:value+1],
166
- 'tokens': i['tokens'][prev_value+1:value+1]
167
- })
168
-
169
- return final_json
170
-
171
- def convert_to_prodigy(self, json_object):
172
-
173
- new_json = []
174
-
175
- for ex in json_object:
176
-
177
- tokenized_text = ex['text'].split()
178
-
179
- list_spans = []
180
-
181
- for a in ex['text_bound_annotations']:
182
-
183
- for o in range(len(a['offsets'])):
184
-
185
- offset_start = a['offsets'][o][0]
186
- offset_end = a['offsets'][o][1]
187
-
188
- nb_tokens_annot = len(a['text'][o].split())
189
-
190
- nb_tokens_before_annot = len(ex['text'][:offset_start].split())
191
- nb_tokens_after_annot = len(ex['text'][offset_end:].split())
192
-
193
- token_start = nb_tokens_before_annot
194
- token_end = token_start + nb_tokens_annot - 1
195
-
196
- list_spans.append({
197
- 'start': offset_start,
198
- 'end': offset_end,
199
- 'token_start': token_start,
200
- 'token_end': token_end,
201
- 'label': a['type'],
202
- 'id': a['id'],
203
- 'text': a['text'][o],
204
- })
205
-
206
- res = {
207
- 'id': ex['id'],
208
- 'document_id': ex['document_id'],
209
- 'text': ex['text'],
210
- 'tokens': tokenized_text,
211
- 'spans': list_spans
212
- }
213
-
214
- new_json.append(res)
215
-
216
- return new_json
217
-
218
- def convert_to_hf_format(self, json_object, list_label):
219
- """
220
- Le format prends en compte le multilabel en faisant une concaténation avec "_" entre chaque label
221
- """
222
-
223
- dict_out = []
224
-
225
- for i in json_object:
226
-
227
- nb_tokens = len(i['tokens'])
228
-
229
- ner_tags = ['O']*nb_tokens
230
-
231
- if 'spans' in i:
232
-
233
- for j in i['spans']:
234
-
235
- for x in range(j['token_start'], j['token_end']+1, 1):
236
-
237
- if j['label'] in list_label:
238
-
239
- if ner_tags[x] == 'O':
240
- ner_tags[x] = j['label']
241
- else:
242
- # Commenter la ligne et mettre pass si on veut prendre qu'un label par token
243
- # pass
244
- ner_tags[x] = '_'.join(sorted(list(set(ner_tags[x].split('_')+[j['label']]))))
245
-
246
- dict_out.append({
247
- 'id': i['id'],
248
- 'document_id': i['document_id'],
249
- "ner_tags": ner_tags,
250
- "tokens": i['tokens'],
251
- })
252
-
253
- return dict_out
254
-
255
- def _generate_examples(self, split):
256
-
257
- ds = load_dataset("bigbio/quaero", f"quaero_{self.config.name}_source")[split]
258
-
259
- if self.config.name == "emea":
260
-
261
- ds = self.split_sentences(
262
- self.convert_to_hf_format(
263
- self.convert_to_prodigy(ds),
264
- _LABELS_BASE,
265
- )
266
- )
267
-
268
- else:
269
-
270
- ds = self.convert_to_hf_format(
271
- self.convert_to_prodigy(ds),
272
- _LABELS_BASE,
273
- )
274
-
275
- for d in ds:
276
- yield d["id"], d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  _CITATION = """
14
  @InProceedings{neveol14quaero,
15
  author = {Névéol, Aurélie and Grouin, Cyril and Leixa, Jeremy
16
+ and Rosset, Sophie and Zweigenbaum, Pierre},
17
  title = {The {QUAERO} {French} Medical Corpus: A Ressource for
18
+ Medical Entity Recognition and Normalization},
19
  OPTbooktitle = {Proceedings of the Fourth Workshop on Building
20
+ and Evaluating Ressources for Health and Biomedical
21
+ Text Processing},
22
  booktitle = {Proc of BioTextMining Work},
23
  OPTseries = {BioTxtM 2014},
24
  year = {2014},
 
52
  Please note that the original version of the QUAERO corpus distributed in the CLEF eHealth challenge 2015 and 2016 came in the BRAT stand alone format. It was distributed with the CLEF eHealth evaluation tool. This original distribution of the QUAERO French Medical corpus is available separately from https://quaerofrenchmed.limsi.fr
53
  All questions regarding the task or data should be addressed to [email protected]
54
  """
55
+
56
  _LABELS_BASE = ['DISO', 'DEVI', 'CHEM', 'GEOG', 'OBJC', 'PHEN', 'PHYS', 'LIVB', 'PROC', 'ANAT']
57
 
58
  class QUAERO(datasets.GeneratorBasedBuilder):
59
+ """QUAERO dataset."""
60
+
61
+ VERSION = datasets.Version("1.0.0")
62
+
63
+ BUILDER_CONFIGS = [
64
+ datasets.BuilderConfig(name="emea", version=VERSION, description="The EMEA QUAERO corpora"),
65
+ datasets.BuilderConfig(name="medline", version=VERSION, description="The MEDLINE QUAERO corpora"),
66
+ ]
67
+
68
+ DEFAULT_CONFIG_NAME = "emea"
69
+
70
+ def _info(self):
71
+
72
+ if self.config.name == "emea":
73
+
74
+ return datasets.DatasetInfo(
75
+ description=_DESCRIPTION,
76
+ features=datasets.Features(
77
+ {
78
+ "id": datasets.Value("string"),
79
+ "document_id": datasets.Value("string"),
80
+ "tokens": datasets.Sequence(datasets.Value("string")),
81
+ "ner_tags": datasets.Sequence(
82
+ datasets.features.ClassLabel(
83
+ names = ['O', 'B-LIVB', 'I-LIVB', 'B-PROC', 'I-PROC', 'B-ANAT', 'I-ANAT', 'B-DEVI', 'I-DEVI', 'B-CHEM', 'I-CHEM', 'B-GEOG', 'I-GEOG', 'B-PHYS', 'I-PHYS', 'B-PHEN', 'I-PHEN', 'B-DISO', 'I-DISO', 'B-OBJC', 'I-OBJC'],
84
+ )
85
+ ),
86
+ }
87
+ ),
88
+ supervised_keys=None,
89
+ homepage="https://quaerofrenchmed.limsi.fr/",
90
+ citation=_CITATION,
91
+ license=_LICENSE,
92
+ )
93
+
94
+ elif self.config.name == "medline":
95
+ return datasets.DatasetInfo(
96
+ description=_DESCRIPTION,
97
+ features=datasets.Features(
98
+ {
99
+ "id": datasets.Value("string"),
100
+ "document_id": datasets.Value("string"),
101
+ "tokens": datasets.Sequence(datasets.Value("string")),
102
+ "ner_tags": datasets.Sequence(
103
+ datasets.features.ClassLabel(
104
+ names = ['O', 'B-LIVB', 'I-LIVB', 'B-PROC', 'I-PROC', 'B-ANAT', 'I-ANAT', 'B-DEVI', 'I-DEVI', 'B-CHEM', 'I-CHEM', 'B-GEOG', 'I-GEOG', 'B-PHYS', 'I-PHYS', 'B-PHEN', 'I-PHEN', 'B-DISO', 'I-DISO', 'B-OBJC', 'I-OBJC'],
105
+ )
106
+ ),
107
+ }
108
+ ),
109
+ supervised_keys=None,
110
+ homepage="https://quaerofrenchmed.limsi.fr/",
111
+ citation=_CITATION,
112
+ license=_LICENSE,
113
+ )
114
+
115
+ def _split_generators(self, dl_manager):
116
+
117
+ return [
118
+ datasets.SplitGenerator(
119
+ name=datasets.Split.TRAIN,
120
+ gen_kwargs={
121
+ "split": "train",
122
+ }
123
+ ),
124
+ datasets.SplitGenerator(
125
+ name=datasets.Split.VALIDATION,
126
+ gen_kwargs={
127
+ "split": "validation",
128
+ }
129
+ ),
130
+ datasets.SplitGenerator(
131
+ name=datasets.Split.TEST,
132
+ gen_kwargs={
133
+ "split": "test",
134
+ }
135
+ ),
136
+ ]
137
+
138
+ def split_sentences(self, json_o):
139
+ """
140
+ Split le corpus en phrase plus courtes pour que ça fit dans des modèles types BERT
141
+
142
+ Le split est fait sur les points "."
143
+
144
+ """
145
+
146
+ final_json = []
147
+
148
+ for i in json_o:
149
+
150
+ ind_punc = [index for index, value in enumerate(i['tokens']) if value=='.'] + [len(i['tokens'])]
151
+ # ind_punc = [index for index, value in enumerate(i['tokens']) if value=='.' and not str(i['tokens'][index-1]).isnumeric()]
152
+
153
+ for index, value in enumerate(ind_punc):
154
+
155
+ if index==0:
156
+ final_json.append({'id': i['id']+'_'+str(index),
157
+ 'document_id': i['id']+'_'+str(index),
158
+ 'ner_tags': i['ner_tags'][:value+1],
159
+ 'tokens': i['tokens'][:value+1]
160
+ })
161
+ else:
162
+ prev_value = ind_punc[index-1]
163
+ final_json.append({'id': i['id']+'_'+str(index),
164
+ 'document_id': i['document_id']+'_'+str(index),
165
+ 'ner_tags': i['ner_tags'][prev_value+1:value+1],
166
+ 'tokens': i['tokens'][prev_value+1:value+1]
167
+ })
168
+
169
+ return final_json
170
+
171
+ def convert_to_prodigy(self, json_object):
172
+
173
+ new_json = []
174
+
175
+ for ex in json_object:
176
+
177
+ tokenized_text = ex['text'].split()
178
+
179
+ list_spans = []
180
+
181
+ for a in ex['text_bound_annotations']:
182
+
183
+ for o in range(len(a['offsets'])):
184
+
185
+ offset_start = a['offsets'][o][0]
186
+ offset_end = a['offsets'][o][1]
187
+
188
+ nb_tokens_annot = len(a['text'][o].split())
189
+
190
+ nb_tokens_before_annot = len(ex['text'][:offset_start].split())
191
+ nb_tokens_after_annot = len(ex['text'][offset_end:].split())
192
+
193
+ token_start = nb_tokens_before_annot
194
+ token_end = token_start + nb_tokens_annot - 1
195
+
196
+ list_spans.append({
197
+ 'start': offset_start,
198
+ 'end': offset_end,
199
+ 'token_start': token_start,
200
+ 'token_end': token_end,
201
+ 'label': a['type'],
202
+ 'id': a['id'],
203
+ 'text': a['text'][o],
204
+ })
205
+
206
+ res = {
207
+ 'id': ex['id'],
208
+ 'document_id': ex['document_id'],
209
+ 'text': ex['text'],
210
+ 'tokens': tokenized_text,
211
+ 'spans': list_spans
212
+ }
213
+
214
+ new_json.append(res)
215
+
216
+ return new_json
217
+
218
+ def convert_to_hf_format(self, json_object, list_label):
219
+ """
220
+ Le format prends en compte le multilabel en faisant une concaténation avec "_" entre chaque label
221
+ """
222
+
223
+ dict_out = []
224
+
225
+ for i in json_object:
226
+
227
+ # Filter annotations to keep the longest annotated spans when there is nested annotations
228
+ selected_annotations = []
229
+
230
+ if 'spans' in i:
231
+
232
+ # print(len(i['spans']))
233
+
234
+ for idx_j, j in enumerate(i['spans']):
235
+
236
+ len_j = int(j['end'])-int(j['start'])
237
+ range_j = [l for l in range(int(j['start']),int(j['end']),1)]
238
+
239
+ keep = True
240
+
241
+ for idx_k, k in enumerate(i['spans'][idx_j+1:]):
242
+
243
+ len_k = int(k['end'])-int(k['start'])
244
+ range_k = [l for l in range(int(k['start']),int(k['end']),1)]
245
+
246
+ inter = list(set(range_k).intersection(set(range_j)))
247
+ if len(inter) > 0 and len_j < len_k:
248
+ keep = False
249
+
250
+ if keep:
251
+ selected_annotations.append(j)
252
+
253
+ # Create list of labels + id to separate different annotation and prepare IOB2 format
254
+ nb_tokens = len(i['tokens'])
255
+ ner_tags = ['O']*nb_tokens
256
+
257
+ for slct in selected_annotations:
258
+
259
+ for x in range(slct['token_start'], slct['token_end']+1, 1):
260
+
261
+ if slct['label'] in list_label:
262
+
263
+ if ner_tags[x] == 'O':
264
+ ner_tags[x] = slct['label']+'-'+slct['id']
265
+
266
+ # Make IOB2 format
267
+ ner_tags_IOB2 = []
268
+ for idx_l, label in enumerate(ner_tags):
269
+ # print(label)
270
+
271
+ if label == 'O':
272
+ ner_tags_IOB2.append('O')
273
+ else:
274
+ current_label = label.split('-')[0]
275
+ current_id = label.split('-')[1]
276
+ if idx_l == 0:
277
+ ner_tags_IOB2.append('B-'+current_label)
278
+ elif current_label in ner_tags[idx_l-1]:
279
+ if current_id == ner_tags[idx_l-1].split('-')[1]:
280
+ ner_tags_IOB2.append('I-'+current_label)
281
+ else:
282
+ ner_tags_IOB2.append('B-'+current_label)
283
+ else:
284
+ ner_tags_IOB2.append('B-'+current_label)
285
+
286
+ # print(ner_tags_IOB2)
287
+ dict_out.append({
288
+ 'id': i['id'],
289
+ 'document_id': i['document_id'],
290
+ "ner_tags": ner_tags_IOB2,
291
+ "tokens": i['tokens'],
292
+ })
293
+
294
+ return dict_out
295
+
296
+ def _generate_examples(self, split):
297
+
298
+ ds = load_dataset("bigbio/quaero", f"quaero_{self.config.name}_source")[split]
299
+
300
+ if self.config.name == "emea":
301
+
302
+ ds = self.split_sentences(
303
+ self.convert_to_hf_format(
304
+ self.convert_to_prodigy(ds),
305
+ _LABELS_BASE,
306
+ )
307
+ )
308
+
309
+ else:
310
+
311
+ ds = self.convert_to_hf_format(
312
+ self.convert_to_prodigy(ds),
313
+ _LABELS_BASE,
314
+ )
315
+
316
+ for d in ds:
317
+ yield d["id"], d