nyt_ingredients / nyt_ingredients.py
napsternxg's picture
Update nyt_ingredients.py
2fd0c64
"""New York Times Ingredient Phrase Tagger Dataset"""
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@misc{nytimesTaggedIngredients,
author = {Erica Greene and Adam Mckaig},
title = {{O}ur {T}agged {I}ngredients {D}ata is {N}ow on {G}it{H}ub --- archive.nytimes.com},
howpublished = {\\url{https://archive.nytimes.com/open.blogs.nytimes.com/2016/04/27/structured-ingredients-data-tagging/}},
year = {},
note = {[Accessed 03-10-2023]},
}
"""
_DESCRIPTION = """\
New York Times Ingredient Phrase Tagger Dataset
We use a conditional random field model (CRF) to extract tags from labelled training data, which was tagged by human news assistants.
e wrote about our approach on the [New York Times Open blog](http://open.blogs.nytimes.com/2015/04/09/extracting-structured-data-from-recipes-using-conditional-random-fields/).
This repo contains scripts to extract the Quantity, Unit, Name, and Comments from unstructured ingredient phrases.
We use it on Cooking to format incoming recipes. Given the following input:
```
1 pound carrots, young ones if possible
Kosher salt, to taste
2 tablespoons sherry vinegar
2 tablespoons honey
2 tablespoons extra-virgin olive oil
1 medium-size shallot, peeled and finely diced
1/2 teaspoon fresh thyme leaves, finely chopped
Black pepper, to taste
```
"""
_URL = "https://github.com/nytimes/ingredient-phrase-tagger"
_URLS = {
"train": "https://huggingface.co/datasets/napsternxg/nyt_ingredients/resolve/main/nyt-ingredients.crf.jsonl"
}
import json
class NYTIngredientsConfig(datasets.BuilderConfig):
"""The NYT Ingredients Dataset."""
def __init__(self, **kwargs):
"""BuilderConfig for NYT Ingredients.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(NYTIngredientsConfig, self).__init__(**kwargs)
class NYTIngredients(datasets.GeneratorBasedBuilder):
"""The NYT Ingredients Dataset."""
BUILDER_CONFIGS = [
NYTIngredientsConfig(
name="nyt_ingredients",
version=datasets.Version("1.0.0"),
description="The NYT Ingredients Dataset",
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"input": datasets.Value("string"),
"display_input": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"index": datasets.Sequence(datasets.Value("string")),
"lengthGroup": datasets.Sequence(datasets.Value("string")),
"isCapitalized": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"NoCAP",
"YesCAP"
]
)
),
"insideParenthesis": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"NoPAREN",
"YesPAREN",
]
)
),
"label": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-COMMENT",
"I-COMMENT",
"B-NAME",
"I-NAME",
"B-RANGE_END",
"I-RANGE_END",
"B-QTY",
"I-QTY",
"B-UNIT",
"I-UNIT",
]
)
),
}
),
supervised_keys=None,
homepage="https://github.com/nytimes/ingredient-phrase-tagger",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
downloaded_files = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": downloaded_files["train"]},
),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as fp:
for i, line in enumerate(fp):
yield i, json.loads(line)