|
"""OAB Exams dataset""" |
|
|
|
import datasets |
|
import pandas as pd |
|
import re |
|
|
|
_CITATION = """@misc{delfino2017passing, |
|
title={Passing the Brazilian OAB Exam: data preparation and some experiments}, |
|
author={Pedro Delfino and Bruno Cuconato and Edward Hermann Haeusler and Alexandre Rademaker}, |
|
year={2017}, |
|
eprint={1712.05128}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CL} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """ |
|
This repository contains the bar exams from the Ordem dos Advogados do Brasil (OAB) in Brazil from 2010 to 2018. |
|
In Brazil, all legal professionals must demonstrate their knowledge of the law and its application by passing the OAB exams, the national bar exams. The OAB exams therefore provide an excellent benchmark for the performance of legal information systems since passing the exam would arguably signal that the system has acquired capacity of legal reasoning comparable to that of a human lawyer. |
|
""" |
|
|
|
_HOMEPAGE="https://github.com/legal-nlp/oab-exams" |
|
|
|
BASE_URL = "https://raw.githubusercontent.com/legal-nlp/oab-exams/master/official/raw/" |
|
|
|
FILES = [ |
|
'2010-01.txt', |
|
'2010-02.txt', |
|
'2011-03.txt', |
|
'2011-04.txt', |
|
'2011-05.txt', |
|
'2012-06.txt', |
|
'2012-06a.txt', |
|
'2012-07.txt', |
|
'2012-08.txt', |
|
'2012-09.txt', |
|
'2013-10.txt', |
|
'2013-11.txt', |
|
'2013-12.txt', |
|
'2014-13.txt', |
|
'2014-14.txt', |
|
'2014-15.txt', |
|
'2015-16.txt', |
|
'2015-17.txt', |
|
'2015-18.txt', |
|
'2016-19.txt', |
|
'2016-20.txt', |
|
'2016-20a.txt', |
|
'2016-21.txt', |
|
'2017-22.txt', |
|
'2017-23.txt', |
|
'2017-24.txt', |
|
'2018-25.txt' |
|
] |
|
|
|
def join_lines(lines): |
|
texts = [] |
|
for line in lines: |
|
if line.strip() == "" and len(texts) > 0 and texts[-1] != "\n": |
|
texts.append("\n") |
|
else: |
|
if len(texts) > 0 and texts[-1] != "\n": |
|
texts.append(" ") |
|
texts.append(line.strip()) |
|
return "".join(texts).strip() |
|
|
|
class OABExams(datasets.GeneratorBasedBuilder): |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"question_number": datasets.Value("int32"), |
|
"exam_id": datasets.Value("string"), |
|
"exam_year": datasets.Value("string"), |
|
"question_type": datasets.Value("string"), |
|
"nullified": datasets.Value("bool"), |
|
"question": datasets.Value("string"), |
|
"choices": datasets.Sequence(feature={ |
|
"text": datasets.Value("string"), |
|
"label": datasets.Value("string") |
|
}), |
|
"answerKey": datasets.Value("string"), |
|
}), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
links = [BASE_URL + file for file in FILES] |
|
downloaded_files = dl_manager.download(links) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepaths": downloaded_files, |
|
"filenames": FILES |
|
} |
|
) |
|
] |
|
|
|
def _generate_examples(self, filepaths, filenames): |
|
for filepath, filename in zip(filepaths, filenames): |
|
exam_id = filename.replace(".txt", "") |
|
exam_year = int(filename.split("-")[0]) |
|
questions_temp = [] |
|
with open(filepath, encoding="utf-8") as f: |
|
lines = f.readlines() |
|
for i, line in enumerate(lines): |
|
|
|
if re.match(r"Questão \d{1,2}(\sNULL)?", line.strip()): |
|
nullified = 'NULL' in line |
|
question_number = int(line.strip().split(" ")[1]) |
|
question_id = exam_id + "_" + str(question_number) |
|
questions_temp.append( |
|
{ |
|
"question_id": question_id, |
|
"question_number": question_number, |
|
"exam_id": exam_id, |
|
"exam_year": exam_year, |
|
"lines": [line], |
|
"nullified": nullified |
|
} |
|
) |
|
else: |
|
questions_temp[-1]["lines"].append(line) |
|
|
|
for question_temp in questions_temp: |
|
question_lines = question_temp["lines"] |
|
|
|
area_index = 2 |
|
if question_lines[1].startswith("AREA"): |
|
area_index = 1 |
|
|
|
area_line = question_lines[area_index].strip() |
|
question_type = None if area_line == "AREA" else area_line.split(" ")[1] |
|
|
|
|
|
index_options = None |
|
for i, line in enumerate(question_lines): |
|
if line.strip() == "OPTIONS": |
|
index_options = i |
|
break |
|
|
|
if index_options is None: |
|
print(question_temp) |
|
|
|
question = join_lines(question_lines[3:index_options]) |
|
|
|
choices = { |
|
"text": [], |
|
"label": [] |
|
} |
|
answerKey = None |
|
temp_question_text = None |
|
for i, line in enumerate(question_lines[index_options+2:]): |
|
if "CORRECT)" in line: |
|
answerKey = line[0] |
|
if line[0] in ["A", "B", "C", "D", "E"] and (line[1:3] == ") " or line[1:11] == ":CORRECT) "): |
|
if temp_question_text is not None: |
|
choices["text"].append(join_lines(temp_question_text)) |
|
temp_question_text = [line[line.find(')')+2:]] |
|
choices["label"].append(line[0]) |
|
else: |
|
if temp_question_text is not None: |
|
temp_question_text.append(line) |
|
if temp_question_text is not None: |
|
choices["text"].append(join_lines(temp_question_text)) |
|
temp_question_text = None |
|
|
|
|
|
if question_temp["nullified"]: |
|
continue |
|
|
|
yield question_temp['question_id'], { |
|
"id": question_temp['question_id'], |
|
"question_number": question_temp['question_number'], |
|
"exam_id": question_temp['exam_id'], |
|
"exam_year": question_temp['exam_year'], |
|
"question_type": question_type, |
|
"nullified": question_temp['nullified'], |
|
"question": question, |
|
"choices": choices, |
|
"answerKey": answerKey |
|
} |