scientific_lay_summarisation / scientific_lay_summarisation.py
tomasg25's picture
Update scientific_lay_summarisation.py
9e109be verified
raw
history blame
6.46 kB
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Scientific Lay Summarization Datasets."""
import json
import os
import datasets
_CITATION = """
@misc{Goldsack_2022,
doi = {10.48550/ARXIV.2210.09932},
url = {https://arxiv.org/abs/2210.09932},
author = {Goldsack, Tomas and Zhang, Zhihao and Lin, Chenghua and Scarton, Carolina},
title = {Making Science Simple: Corpora for the Lay Summarisation of Scientific Literature},
publisher = {arXiv},
year = {2022},
copyright = {arXiv.org perpetual, non-exclusive license}
}
"""
_DESCRIPTION = """
This repository contains the PLOS and eLife datasets, introduced in the EMNLP 2022 paper "[Making Science Simple: Corpora for the Lay Summarisation of Scientific Literature
](https://arxiv.org/abs/2210.09932)".
Each dataset contains full biomedical research articles paired with expert-written lay summaries (i.e., non-technical summaries). PLOS articles are derived from various journals published by [the Public Library of Science (PLOS)](https://plos.org/), whereas eLife articles are derived from the [eLife](https://elifesciences.org/) journal. More details/anlaysis on the content of each dataset are provided in the paper.
Both "elife" and "plos" have 6 features:
- "article": the body of the document (including the abstract), sections seperated by "/n".
- "section_headings": the title of each section, seperated by "/n".
- "keywords": keywords describing the topic of the article, seperated by "/n".
- "title" : the title of the article.
- "year" : the year the article was published.
- "summary": the lay summary of the document.
"""
_DOCUMENT = "article"
_SUMMARY = "summary"
_URLS = {
"plos": "https://drive.usercontent.google.com/download?id=1lZ6PCAtXvmGjRZyp3vQQCEgO_yerH62Q&export=download&authuser=1&confirm=t&uuid=dc63dea1-0814-450f-9234-8bff2b9d1a94&at=APZUnTUfgwJ5Tdiin4ppFPPLWhMX%3A1716450460802",
"elife": "https://drive.usercontent.google.com/download?id=1WKW8BAqluOlXrpy1B9mV3j3CtAK3JdnE&export=download&authuser=1&confirm=t&uuid=1332bc11-7cbf-4c4d-8561-85621060f397&at=APZUnTVLLKAGVSBpQlYKojrJ57xb%3A1716450570186",
}
class ScientificLaySummarisationConfig(datasets.BuilderConfig):
"""BuilderConfig for Scientific Papers."""
def __init__(self, filename=None, **kwargs):
"""BuilderConfig for ScientificPapers
Args:
filename: filename of different configs for the dataset.
**kwargs: keyword arguments forwarded to super.
"""
super(ScientificLaySummarisationConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
self.filename = filename
class ScientificLaySummarisation(datasets.GeneratorBasedBuilder):
"""Scientific Papers."""
BUILDER_CONFIGS = [
ScientificLaySummarisationConfig(name="plos", description="Documents and lay summaries from PLOS journals."),
ScientificLaySummarisationConfig(name="elife", description="Documents and lay summaries from the eLife journal."),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
_DOCUMENT: datasets.Value("string"),
_SUMMARY: datasets.Value("string"),
"section_headings": datasets.Value("string"),
"keywords": datasets.Value("string"),
"year": datasets.Value("string"),
"title": datasets.Value("string"),
}
),
supervised_keys=None,
homepage="https://github.com/TGoldsack1/Corpora_for_Lay_Summarisation",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_paths = dl_manager.download_and_extract(_URLS)
path = dl_paths[self.config.name]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"path": os.path.join(path, "train.json")},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"path": os.path.join(path, "val.json")},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"path": os.path.join(path, "test.json")},
),
]
def _generate_examples(self, path=None):
"""Yields examples."""
with open(path, encoding="utf-8") as f:
f = json.loads(f.read())
for d in f:
# Possible keys are:
# "id": str, # unique identifier
# "year": int, # year of publication
# "title": str, # title
# "sections": List[List[str]], # main text, divided in to sections/sentences
# "headings" List[str], # headings of each section
# "abstract": List[str], # abstract, in sentences
# "summary": List[str], # lay summary, in sentences
# "keywords": List[str] # keywords/topic of article
sections = [" ".join(s).strip() for s in d["sections"]]
abstract = " ".join(d['abstract']).strip()
full_doc = [abstract] + sections
summary = " ".join(d["summary"]).strip()
yield d["id"], {
_DOCUMENT: "\n".join(full_doc),
_SUMMARY: summary,
"section_headings": "\n".join(["Abstract"] + d["headings"]),
"keywords": "\n".join(d["keywords"]),
"year": d["year"],
"title": d["title"]
}