prompt
stringlengths
131
11.8k
completion
stringlengths
7
173
api
stringlengths
11
48
import dspy from dspy.evaluate.evaluate import Evaluate from dspy.teleprompt import BootstrapFewShotWithRandomSearch colbertv2 =
dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.ColBERTv2
get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_QuizGen_Cache') get_ipython().run_line_magic('cd', 'DSPy_QuizGen_Cache/') get_ipython().system('git checkout master') get_ipython().run_line_magic('cd', '..') import os repo_clone_path = '/content/DSPy_QuizGen_Cache' if not os.access('/content', os.W_OK): repo_clone_path = os.path.join(os.getcwd(), 'DSPy_QuizGen_Cache') os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os import regex as re import json try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) import pkg_resources # Install the package if it's not installed if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install dspy-ai') get_ipython().system('pip install openai~=0.28.1') get_ipython().system('pip install -e $repo_path') import dspy from dspy.predict import Retry from dspy.datasets import HotPotQA from dspy.teleprompt import BootstrapFewShotWithRandomSearch from dspy.evaluate.evaluate import Evaluate from dspy.primitives.assertions import assert_transform_module, backtrack_handler colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2_wiki17_abstracts) turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dspy.settings.configure
get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache') import dspy get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys; sys.path.append('/future/u/okhattab/repos/public/stanfordnlp/dspy') import dspy from dspy.evaluate import Evaluate from dspy.datasets.hotpotqa import HotPotQA from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch, BootstrapFinetune llama = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=[7140, 7141, 7142, 7143], max_tokens=150) colbertv2 = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2, lm=llama) train = [('Who was the director of the 2009 movie featuring Peter Outerbridge as William Easton?', 'Kevin Greutert'), ('The heir to the Du Pont family fortune sponsored what wrestling team?', 'Foxcatcher'), ('In what year was the star of To Hell and Back born?', '1925'), ('Which award did the first book of Gary Zukav receive?', 'U.S. National Book Award'), ('What documentary about the Gilgo Beach Killer debuted on A&E?', 'The Killing Season'), ('Which author is English: John Braine or Studs Terkel?', 'John Braine'), ('Who produced the album that included a re-recording of "Lithium"?', 'Butch Vig')] train = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in train] dev = [('Who has a broader scope of profession: E. L. Doctorow or Julia Peterkin?', 'E. L. Doctorow'), ('Right Back At It Again contains lyrics co-written by the singer born in what city?', 'Gainesville, Florida'), ('What year was the party of the winner of the 1971 San Francisco mayoral election founded?', '1828'), ('Anthony Dirrell is the brother of which super middleweight title holder?', 'Andre Dirrell'), ('The sports nutrition business established by Oliver Cookson is based in which county in the UK?', 'Cheshire'), ('Find the birth date of the actor who played roles in First Wives Club and Searching for the Elephant.', 'February 13, 1980'), ('Kyle Moran was born in the town on what river?', 'Castletown River'), ("The actress who played the niece in the Priest film was born in what city, country?", 'Surrey, England'), ('Name the movie in which the daughter of Noel Harrison plays Violet Trefusis.', 'Portrait of a Marriage'), ('What year was the father of the Princes in the Tower born?', '1442'), ('What river is near the Crichton Collegiate Church?', 'the River Tyne'), ('Who purchased the team Michael Schumacher raced for in the 1995 Monaco Grand Prix in 2000?', 'Renault'), ('André Zucca was a French photographer who worked with a German propaganda magazine published by what Nazi organization?', 'the Wehrmacht')] dev = [
dspy.Example(question=question, answer=answer)
dspy.Example
import dspy from dsp.utils import deduplicate from dspy.datasets import HotPotQA from dspy.predict.retry import Retry from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch from dspy.evaluate.evaluate import Evaluate from dspy.primitives.assertions import assert_transform_module, backtrack_handler import os import openai openai.api_key = os.getenv('OPENAI_API_KEY') colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2_wiki17_abstracts) turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500) dspy.settings.configure(lm=turbo, trace=[], temperature=0.7) dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0) trainset = [x.with_inputs('question') for x in dataset.train] devset = [x.with_inputs('question') for x in dataset.dev] def validate_query_distinction_local(previous_queries, query): """check if query is distinct from previous queries""" if previous_queries == []: return True if dspy.evaluate.answer_exact_match_str(query, previous_queries, frac=0.8): return False return True def validate_context_and_answer_and_hops(example, pred, trace=None): if not dspy.evaluate.answer_exact_match(example, pred): return False if not dspy.evaluate.answer_passage_match(example, pred): return False return True def gold_passages_retrieved(example, pred, trace=None): gold_titles = set(map(dspy.evaluate.normalize_text, example['gold_titles'])) found_titles = set(map(dspy.evaluate.normalize_text, [c.split(' | ')[0] for c in pred.context])) return gold_titles.issubset(found_titles) class GenerateAnswer(dspy.Signature): """Answer questions with short factoid answers.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() answer = dspy.OutputField(desc="often between 1 and 5 words") class GenerateSearchQuery(dspy.Signature): """Write a simple search query that will help answer a complex question.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() query = dspy.OutputField() def all_queries_distinct(prev_queries): query_distinct = True for i, query in enumerate(prev_queries): if validate_query_distinction_local(prev_queries[:i], query) == False: query_distinct = False break return query_distinct class SimplifiedBaleen(dspy.Module): def __init__(self, passages_per_hop=2, max_hops=2): super().__init__() self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)] self.retrieve =
dspy.Retrieve(k=passages_per_hop)
dspy.Retrieve
get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_QuizGen_Cache') get_ipython().run_line_magic('cd', 'DSPy_QuizGen_Cache/') get_ipython().system('git checkout master') get_ipython().run_line_magic('cd', '..') import os repo_clone_path = '/content/DSPy_QuizGen_Cache' if not os.access('/content', os.W_OK): repo_clone_path = os.path.join(os.getcwd(), 'DSPy_QuizGen_Cache') os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os import regex as re import json try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) import pkg_resources # Install the package if it's not installed if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install dspy-ai') get_ipython().system('pip install openai~=0.28.1') get_ipython().system('pip install -e $repo_path') import dspy from dspy.predict import Retry from dspy.datasets import HotPotQA from dspy.teleprompt import BootstrapFewShotWithRandomSearch from dspy.evaluate.evaluate import Evaluate from dspy.primitives.assertions import assert_transform_module, backtrack_handler colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2_wiki17_abstracts) turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500) dspy.settings.configure(lm=turbo, trace=[], temperature=0.7) dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True) trainset = [x.with_inputs('question', 'answer') for x in dataset.train] devset = [x.with_inputs('question', 'answer') for x in dataset.dev] class GenerateAnswerChoices(dspy.Signature): """Generate answer choices in JSON format that include the correct answer and plausible distractors for the specified question.""" question = dspy.InputField() correct_answer = dspy.InputField() number_of_choices = dspy.InputField() answer_choices = dspy.OutputField(desc='JSON key-value pairs') class QuizAnswerGenerator(dspy.Module): def __init__(self): super().__init__() self.generate_choices = dspy.ChainOfThought(GenerateAnswerChoices) def forward(self, question, answer): choices = self.generate_choices(question=question, correct_answer=answer, number_of_choices=number_of_choices).answer_choices return dspy.Prediction(choices = choices) number_of_choices = '4' quiz_generator = QuizAnswerGenerator() def format_checker(choice_string): try: choices = json.loads(choice_string) if isinstance(choices, dict) and all(isinstance(key, str) and isinstance(value, str) for key, value in choices.items()): return True except json.JSONDecodeError: return False return False def is_correct_answer_included(correct_answer, generated_choices): try: choices_dict = json.loads(generated_choices) return correct_answer in choices_dict.values() except json.JSONDecodeError: return False def is_plausibility_yes(assessment_answer): """Check if the first word of the assessment answer is 'yes'.""" return assessment_answer.split()[0].lower() == 'yes' class AssessQuizChoices(dspy.Signature): """Assess the quality of quiz answer choices along specified dimensions.""" question = dspy.InputField() answer_choices = dspy.InputField() assessment_question = dspy.InputField() assessment_answer =
dspy.OutputField(desc="Yes or No")
dspy.OutputField
get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import dspy from dspy.evaluate import Evaluate from dspy.datasets.hotpotqa import HotPotQA from dspy.teleprompt import BootstrapFewShotWithRandomSearch, BootstrapFinetune ports = [7140, 7141, 7142, 7143, 7144, 7145] llamaChat =
dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=ports, max_tokens=150)
dspy.HFClientTGI
get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_LongFormQA_Cache') get_ipython().run_line_magic('cd', 'DSPy_LongFormQA_Cache/') get_ipython().system('git checkout master') get_ipython().run_line_magic('cd', '..') import os repo_clone_path = '/content/DSPy_LongFormQA_Cache' if not os.access('/content', os.W_OK): repo_clone_path = os.path.join(os.getcwd(), 'DSPy_LongFormQA_Cache') os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os import regex as re try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) import pkg_resources # Install the package if it's not installed if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install dspy-ai') get_ipython().system('pip install openai~=0.28.1') get_ipython().system('pip install -e $repo_path') import dspy from dspy.predict import Retry from dspy.datasets import HotPotQA from dspy.teleprompt import BootstrapFewShotWithRandomSearch from dsp.utils import EM, normalize_text from dspy.primitives.assertions import assert_transform_module, backtrack_handler get_ipython().run_line_magic('cd', 'dspy/examples/longformqa') from utils import extract_text_by_citation, correct_citation_format, has_citations, citations_check colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2_wiki17_abstracts) turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500) dspy.settings.configure(lm=turbo, trace=[], temperature=0.7) dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True) trainset = [x.with_inputs('question') for x in dataset.train] devset = [x.with_inputs('question') for x in dataset.dev] train_example = trainset[0] print(f"Question: {train_example.question}") print(f"Answer: {train_example.answer}") print(f"Relevant Wikipedia Titles: {train_example.gold_titles}") dev_example = devset[18] print(f"Question: {dev_example.question}") print(f"Answer: {dev_example.answer}") print(f"Relevant Wikipedia Titles: {dev_example.gold_titles}") from dsp.utils import deduplicate class GenerateSearchQuery(dspy.Signature): """Write a simple search query that will help answer a complex question.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() query = dspy.OutputField() class GenerateCitedParagraph(dspy.Signature): """Generate a paragraph with citations.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() paragraph = dspy.OutputField(desc="includes citations") class LongFormQA(dspy.Module): def __init__(self, passages_per_hop=3, max_hops=2): super().__init__() self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)] self.retrieve = dspy.Retrieve(k=passages_per_hop) self.generate_cited_paragraph = dspy.ChainOfThought(GenerateCitedParagraph) self.max_hops = max_hops def forward(self, question): context = [] for hop in range(self.max_hops): query = self.generate_query[hop](context=context, question=question).query passages = self.retrieve(query).passages context = deduplicate(context + passages) pred = self.generate_cited_paragraph(context=context, question=question) pred = dspy.Prediction(context=context, paragraph=pred.paragraph) return pred class CheckCitationFaithfulness(dspy.Signature): """Verify that the text is based on the provided context.""" context = dspy.InputField(desc="may contain relevant facts") text = dspy.InputField(desc="between 1 to 2 sentences") faithfulness = dspy.OutputField(desc="boolean indicating if text is faithful to context") def citation_faithfulness(example, pred, trace): paragraph, context = pred.paragraph, pred.context citation_dict = extract_text_by_citation(paragraph) if not citation_dict: return False, None context_dict = {str(i): context[i].split(' | ')[1] for i in range(len(context))} faithfulness_results = [] unfaithful_citations = [] check_citation_faithfulness = dspy.ChainOfThought(CheckCitationFaithfulness) for citation_num, texts in citation_dict.items(): if citation_num not in context_dict: continue current_context = context_dict[citation_num] for text in texts: try: result = check_citation_faithfulness(context=current_context, text=text) is_faithful = result.faithfulness.lower() == 'true' faithfulness_results.append(is_faithful) if not is_faithful: unfaithful_citations.append({'paragraph': paragraph, 'text': text, 'context': current_context}) except ValueError as e: faithfulness_results.append(False) unfaithful_citations.append({'paragraph': paragraph, 'text': text, 'error': str(e)}) final_faithfulness = all(faithfulness_results) if not faithfulness_results: return False, None return final_faithfulness, unfaithful_citations def extract_cited_titles_from_paragraph(paragraph, context): cited_indices = [int(m.group(1)) for m in re.finditer(r'\[(\d+)\]\.', paragraph)] cited_indices = [index - 1 for index in cited_indices if index <= len(context)] cited_titles = [context[index].split(' | ')[0] for index in cited_indices] return cited_titles def calculate_recall(example, pred, trace=None): gold_titles = set(example['gold_titles']) found_cited_titles = set(extract_cited_titles_from_paragraph(pred.paragraph, pred.context)) intersection = gold_titles.intersection(found_cited_titles) recall = len(intersection) / len(gold_titles) if gold_titles else 0 return recall def calculate_precision(example, pred, trace=None): gold_titles = set(example['gold_titles']) found_cited_titles = set(extract_cited_titles_from_paragraph(pred.paragraph, pred.context)) intersection = gold_titles.intersection(found_cited_titles) precision = len(intersection) / len(found_cited_titles) if found_cited_titles else 0 return precision def answer_correctness(example, pred, trace=None): assert hasattr(example, 'answer'), "Example does not have 'answer'." normalized_context = normalize_text(pred.paragraph) if isinstance(example.answer, str): gold_answers = [example.answer] elif isinstance(example.answer, list): gold_answers = example.answer else: raise ValueError("'example.answer' is not string or list.") return 1 if any(normalize_text(answer) in normalized_context for answer in gold_answers) else 0 def evaluate(module): correctness_values = [] recall_values = [] precision_values = [] citation_faithfulness_values = [] for i in range(len(devset)): example = devset[i] try: pred = module(question=example.question) correctness_values.append(answer_correctness(example, pred)) citation_faithfulness_score, _ = citation_faithfulness(None, pred, None) citation_faithfulness_values.append(citation_faithfulness_score) recall = calculate_recall(example, pred) precision = calculate_precision(example, pred) recall_values.append(recall) precision_values.append(precision) except Exception as e: print(f"Failed generation with error: {e}") average_correctness = sum(correctness_values) / len(devset) if correctness_values else 0 average_recall = sum(recall_values) / len(devset) if recall_values else 0 average_precision = sum(precision_values) / len(devset) if precision_values else 0 average_citation_faithfulness = sum(citation_faithfulness_values) / len(devset) if citation_faithfulness_values else 0 print(f"Average Correctness: {average_correctness}") print(f"Average Recall: {average_recall}") print(f"Average Precision: {average_precision}") print(f"Average Citation Faithfulness: {average_citation_faithfulness}") longformqa = LongFormQA() evaluate(longformqa) question = devset[6].question pred = longformqa(question) citation_faithfulness_score, _ = citation_faithfulness(None, pred, None) print(f"Question: {question}") print(f"Predicted Paragraph: {pred.paragraph}") print(f"Citation Faithfulness: {citation_faithfulness_score}") class LongFormQAWithAssertions(dspy.Module): def __init__(self, passages_per_hop=3, max_hops=2): super().__init__() self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)] self.retrieve =
dspy.Retrieve(k=passages_per_hop)
dspy.Retrieve
get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_TweetGen_Cache') get_ipython().run_line_magic('cd', 'DSPy_TweetGen_Cache/') get_ipython().system('git checkout master') get_ipython().run_line_magic('cd', '..') import os repo_clone_path = '/content/DSPy_TweetGen_Cache' if not os.access('/content', os.W_OK): repo_clone_path = os.path.join(os.getcwd(), 'DSPy_TweetGen_Cache') os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os import regex as re import json try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) import pkg_resources # Install the package if it's not installed if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install dspy-ai') get_ipython().system('pip install openai~=0.28.1') get_ipython().system('pip install -e $repo_path') import dspy from dspy.predict import Retry from dspy.datasets import HotPotQA from dspy.teleprompt import BootstrapFewShotWithRandomSearch from dsp.utils import deduplicate from dspy.evaluate.evaluate import Evaluate from dspy.primitives.assertions import assert_transform_module, backtrack_handler colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2_wiki17_abstracts) turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500) dspy.settings.configure(lm=turbo, trace=[], temperature=0.7) dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True) trainset = [x.with_inputs('question', 'answer') for x in dataset.train] devset = [x.with_inputs('question', 'answer') for x in dataset.dev] class GenerateSearchQuery(dspy.Signature): """Write a simple search query that will help answer a complex question.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() query = dspy.OutputField() class GenerateTweet(dspy.Signature): """Generate an engaging tweet that effectively answers a question staying faithful to the context, is less than 280 characters, and has no hashtags.""" question = dspy.InputField() context = dspy.InputField(desc="may contain relevant facts") tweet = dspy.OutputField() class Tweeter(dspy.Module): def __init__(self): super().__init__() self.generate_tweet = dspy.ChainOfThought(GenerateTweet) def forward(self, question, answer): context = [] max_hops=2 passages_per_hop=3 generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)] retrieve = dspy.Retrieve(k=passages_per_hop) for hop in range(max_hops): query = generate_query[hop](context=context, question=question).query passages = retrieve(query).passages context = deduplicate(context + passages) generated_tweet = self.generate_tweet(question=question, context=context).tweet return dspy.Prediction(generated_tweet=generated_tweet, context=context) tweeter = Tweeter() def has_no_hashtags(text): return len(re.findall(r"#\w+", text)) == 0 def is_within_length_limit(text, length_limit=280): return len(text) <= length_limit def is_assessment_yes(assessment_answer): """Check if the first word of the assessment answer is 'yes'.""" return assessment_answer.split()[0].lower() == 'yes' def has_correct_answer(text, answer): return answer in text class AssessTweet(dspy.Signature): """Assess the quality of a tweet along the specified dimension.""" context = dspy.InputField(desc='ignore if N/A') assessed_text =
dspy.InputField()
dspy.InputField
get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import pkg_resources try: # When on Colab, let's install pyserini, Pytorch, and Faiss import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') get_ipython().run_line_magic('cd', '$repo_path') get_ipython().system('pip install -e .') if not "pyserini" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install pyserini') if not "torch" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install torch') if not "faiss-cpu" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install faiss-cpu') except: repo_path = '.' if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install dspy-ai') if repo_path not in sys.path: sys.path.append(repo_path) import dspy pys_ret_prebuilt = dspy.Pyserini(index='beir-v1.0.0-nfcorpus.contriever-msmarco', query_encoder='facebook/contriever-msmarco', id_field='_id', text_fields=['title', 'text'])
dspy.settings.configure(rm=pys_ret_prebuilt)
dspy.settings.configure
import dspy from dspy.evaluate import Evaluate from dspy.datasets.gsm8k import GSM8K, gsm8k_metric from dspy.teleprompt import BootstrapFewShotWithRandomSearch gms8k = GSM8K() turbo =
dspy.OpenAI(model='gpt-3.5-turbo-instruct', max_tokens=250)
dspy.OpenAI
get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache') import pkg_resources # Install the package if it's not installed if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install dspy-ai') get_ipython().system('pip install openai~=0.28.1') import dspy turbo = dspy.OpenAI(model='gpt-3.5-turbo') colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(lm=turbo, rm=colbertv2_wiki17_abstracts) from dspy.datasets import HotPotQA dataset =
HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0)
dspy.datasets.HotPotQA
import dspy from dsp.utils import deduplicate from dspy.datasets import HotPotQA from dspy.predict.retry import Retry from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch from dspy.evaluate.evaluate import Evaluate from dspy.primitives.assertions import assert_transform_module, backtrack_handler import os import openai openai.api_key = os.getenv('OPENAI_API_KEY') colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2_wiki17_abstracts) turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500) dspy.settings.configure(lm=turbo, trace=[], temperature=0.7) dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0) trainset = [x.with_inputs('question') for x in dataset.train] devset = [x.with_inputs('question') for x in dataset.dev] def validate_query_distinction_local(previous_queries, query): """check if query is distinct from previous queries""" if previous_queries == []: return True if dspy.evaluate.answer_exact_match_str(query, previous_queries, frac=0.8): return False return True def validate_context_and_answer_and_hops(example, pred, trace=None): if not dspy.evaluate.answer_exact_match(example, pred): return False if not dspy.evaluate.answer_passage_match(example, pred): return False return True def gold_passages_retrieved(example, pred, trace=None): gold_titles = set(map(dspy.evaluate.normalize_text, example['gold_titles'])) found_titles = set(map(dspy.evaluate.normalize_text, [c.split(' | ')[0] for c in pred.context])) return gold_titles.issubset(found_titles) class GenerateAnswer(dspy.Signature): """Answer questions with short factoid answers.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() answer = dspy.OutputField(desc="often between 1 and 5 words") class GenerateSearchQuery(dspy.Signature): """Write a simple search query that will help answer a complex question.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() query = dspy.OutputField() def all_queries_distinct(prev_queries): query_distinct = True for i, query in enumerate(prev_queries): if validate_query_distinction_local(prev_queries[:i], query) == False: query_distinct = False break return query_distinct class SimplifiedBaleen(dspy.Module): def __init__(self, passages_per_hop=2, max_hops=2): super().__init__() self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)] self.retrieve = dspy.Retrieve(k=passages_per_hop) self.generate_answer =
dspy.ChainOfThought(GenerateAnswer)
dspy.ChainOfThought
get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache') import pkg_resources # Install the package if it's not installed if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install -e $repo_path') get_ipython().system('pip install transformers') import dspy from dspy.evaluate import Evaluate from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch, BootstrapFinetune llama =
dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=[7140, 7141, 7142, 7143], max_tokens=150)
dspy.HFClientTGI
get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache') import pkg_resources # Install the package if it's not installed if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install dspy-ai') get_ipython().system('pip install openai~=0.28.1') import dspy turbo = dspy.OpenAI(model='gpt-3.5-turbo') colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(lm=turbo, rm=colbertv2_wiki17_abstracts) from dspy.datasets import HotPotQA dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0) trainset = [x.with_inputs('question') for x in dataset.train] devset = [x.with_inputs('question') for x in dataset.dev] len(trainset), len(devset) train_example = trainset[0] print(f"Question: {train_example.question}") print(f"Answer: {train_example.answer}") dev_example = devset[18] print(f"Question: {dev_example.question}") print(f"Answer: {dev_example.answer}") print(f"Relevant Wikipedia Titles: {dev_example.gold_titles}") print(f"For this dataset, training examples have input keys {train_example.inputs().keys()} and label keys {train_example.labels().keys()}") print(f"For this dataset, dev examples have input keys {dev_example.inputs().keys()} and label keys {dev_example.labels().keys()}") class BasicQA(dspy.Signature): """Answer questions with short factoid answers.""" question = dspy.InputField() answer = dspy.OutputField(desc="often between 1 and 5 words") generate_answer = dspy.Predict(BasicQA) pred = generate_answer(question=dev_example.question) print(f"Question: {dev_example.question}") print(f"Predicted Answer: {pred.answer}") turbo.inspect_history(n=1) generate_answer_with_chain_of_thought = dspy.ChainOfThought(BasicQA) pred = generate_answer_with_chain_of_thought(question=dev_example.question) print(f"Question: {dev_example.question}") print(f"Thought: {pred.rationale.split('.', 1)[1].strip()}") print(f"Predicted Answer: {pred.answer}") retrieve = dspy.Retrieve(k=3) topK_passages = retrieve(dev_example.question).passages print(f"Top {retrieve.k} passages for question: {dev_example.question} \n", '-' * 30, '\n') for idx, passage in enumerate(topK_passages): print(f'{idx+1}]', passage, '\n') retrieve("When was the first FIFA World Cup held?").passages[0] class GenerateAnswer(dspy.Signature): """Answer questions with short factoid answers.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() answer = dspy.OutputField(desc="often between 1 and 5 words") class RAG(dspy.Module): def __init__(self, num_passages=3): super().__init__() self.retrieve = dspy.Retrieve(k=num_passages) self.generate_answer = dspy.ChainOfThought(GenerateAnswer) def forward(self, question): context = self.retrieve(question).passages prediction = self.generate_answer(context=context, question=question) return dspy.Prediction(context=context, answer=prediction.answer) from dspy.teleprompt import BootstrapFewShot def validate_context_and_answer(example, pred, trace=None): answer_EM = dspy.evaluate.answer_exact_match(example, pred) answer_PM = dspy.evaluate.answer_passage_match(example, pred) return answer_EM and answer_PM teleprompter = BootstrapFewShot(metric=validate_context_and_answer) compiled_rag = teleprompter.compile(RAG(), trainset=trainset) my_question = "What castle did David Gregory inherit?" pred = compiled_rag(my_question) print(f"Question: {my_question}") print(f"Predicted Answer: {pred.answer}") print(f"Retrieved Contexts (truncated): {[c[:200] + '...' for c in pred.context]}") turbo.inspect_history(n=1) for name, parameter in compiled_rag.named_predictors(): print(name) print(parameter.demos[0]) print() from dspy.evaluate.evaluate import Evaluate evaluate_on_hotpotqa = Evaluate(devset=devset, num_threads=1, display_progress=True, display_table=5) metric = dspy.evaluate.answer_exact_match evaluate_on_hotpotqa(compiled_rag, metric=metric) def gold_passages_retrieved(example, pred, trace=None): gold_titles = set(map(dspy.evaluate.normalize_text, example['gold_titles'])) found_titles = set(map(dspy.evaluate.normalize_text, [c.split(' | ')[0] for c in pred.context])) return gold_titles.issubset(found_titles) compiled_rag_retrieval_score = evaluate_on_hotpotqa(compiled_rag, metric=gold_passages_retrieved) class GenerateSearchQuery(dspy.Signature): """Write a simple search query that will help answer a complex question.""" context = dspy.InputField(desc="may contain relevant facts") question =
dspy.InputField()
dspy.InputField
get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache') import pkg_resources # Install the package if it's not installed if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install dspy-ai') get_ipython().system('pip install openai~=0.28.1') import dspy turbo = dspy.OpenAI(model='gpt-3.5-turbo') colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(lm=turbo, rm=colbertv2_wiki17_abstracts) from dspy.datasets import HotPotQA dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0) trainset = [x.with_inputs('question') for x in dataset.train] devset = [x.with_inputs('question') for x in dataset.dev] len(trainset), len(devset) train_example = trainset[0] print(f"Question: {train_example.question}") print(f"Answer: {train_example.answer}") dev_example = devset[18] print(f"Question: {dev_example.question}") print(f"Answer: {dev_example.answer}") print(f"Relevant Wikipedia Titles: {dev_example.gold_titles}") print(f"For this dataset, training examples have input keys {train_example.inputs().keys()} and label keys {train_example.labels().keys()}") print(f"For this dataset, dev examples have input keys {dev_example.inputs().keys()} and label keys {dev_example.labels().keys()}") class BasicQA(dspy.Signature): """Answer questions with short factoid answers.""" question = dspy.InputField() answer = dspy.OutputField(desc="often between 1 and 5 words") generate_answer = dspy.Predict(BasicQA) pred = generate_answer(question=dev_example.question) print(f"Question: {dev_example.question}") print(f"Predicted Answer: {pred.answer}") turbo.inspect_history(n=1) generate_answer_with_chain_of_thought = dspy.ChainOfThought(BasicQA) pred = generate_answer_with_chain_of_thought(question=dev_example.question) print(f"Question: {dev_example.question}") print(f"Thought: {pred.rationale.split('.', 1)[1].strip()}") print(f"Predicted Answer: {pred.answer}") retrieve = dspy.Retrieve(k=3) topK_passages = retrieve(dev_example.question).passages print(f"Top {retrieve.k} passages for question: {dev_example.question} \n", '-' * 30, '\n') for idx, passage in enumerate(topK_passages): print(f'{idx+1}]', passage, '\n') retrieve("When was the first FIFA World Cup held?").passages[0] class GenerateAnswer(dspy.Signature): """Answer questions with short factoid answers.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() answer =
dspy.OutputField(desc="often between 1 and 5 words")
dspy.OutputField
get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache') import dspy get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys; sys.path.append('/future/u/okhattab/repos/public/stanfordnlp/dspy') import dspy from dspy.evaluate import Evaluate from dspy.datasets.hotpotqa import HotPotQA from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch, BootstrapFinetune llama =
dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=[7140, 7141, 7142, 7143], max_tokens=150)
dspy.HFClientTGI
import dspy from dspy.evaluate import Evaluate from dspy.datasets.gsm8k import GSM8K, gsm8k_metric from dspy.teleprompt import BootstrapFewShotWithRandomSearch gms8k = GSM8K() turbo = dspy.OpenAI(model='gpt-3.5-turbo-instruct', max_tokens=250) trainset, devset = gms8k.train, gms8k.dev dspy.settings.configure(lm=turbo) NUM_THREADS = 4 evaluate = Evaluate(devset=devset[:], metric=gsm8k_metric, num_threads=NUM_THREADS, display_progress=True, display_table=0) class CoT(dspy.Module): def __init__(self): super().__init__() self.prog = dspy.ChainOfThought("question -> answer") def forward(self, question): return self.prog(question=question) RUN_FROM_SCRATCH = False if RUN_FROM_SCRATCH: config = dict(max_bootstrapped_demos=8, max_labeled_demos=8, num_candidate_programs=10, num_threads=NUM_THREADS) teleprompter =
BootstrapFewShotWithRandomSearch(metric=gsm8k_metric, **config)
dspy.teleprompt.BootstrapFewShotWithRandomSearch
import dspy from dspy.evaluate import Evaluate from dspy.datasets.gsm8k import GSM8K, gsm8k_metric from dspy.teleprompt import BootstrapFewShotWithRandomSearch gms8k =
GSM8K()
dspy.datasets.gsm8k.GSM8K
get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache') import pkg_resources # Install the package if it's not installed if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install -e $repo_path') get_ipython().system('pip install transformers') import dspy from dspy.evaluate import Evaluate from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch, BootstrapFinetune llama = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=[7140, 7141, 7142, 7143], max_tokens=150) colbertv2 = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2, lm=llama) train = [('Who was the director of the 2009 movie featuring Peter Outerbridge as William Easton?', 'Kevin Greutert'), ('The heir to the Du Pont family fortune sponsored what wrestling team?', 'Foxcatcher'), ('In what year was the star of To Hell and Back born?', '1925'), ('Which award did the first book of Gary Zukav receive?', 'U.S. National Book Award'), ('What documentary about the Gilgo Beach Killer debuted on A&E?', 'The Killing Season'), ('Which author is English: John Braine or Studs Terkel?', 'John Braine'), ('Who produced the album that included a re-recording of "Lithium"?', 'Butch Vig')] train = [
dspy.Example(question=question, answer=answer)
dspy.Example
get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache') import dspy get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys; sys.path.append('/future/u/okhattab/repos/public/stanfordnlp/dspy') import dspy from dspy.evaluate import Evaluate from dspy.datasets.hotpotqa import HotPotQA from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch, BootstrapFinetune llama = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=[7140, 7141, 7142, 7143], max_tokens=150) colbertv2 = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2, lm=llama) train = [('Who was the director of the 2009 movie featuring Peter Outerbridge as William Easton?', 'Kevin Greutert'), ('The heir to the Du Pont family fortune sponsored what wrestling team?', 'Foxcatcher'), ('In what year was the star of To Hell and Back born?', '1925'), ('Which award did the first book of Gary Zukav receive?', 'U.S. National Book Award'), ('What documentary about the Gilgo Beach Killer debuted on A&E?', 'The Killing Season'), ('Which author is English: John Braine or Studs Terkel?', 'John Braine'), ('Who produced the album that included a re-recording of "Lithium"?', 'Butch Vig')] train = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in train] dev = [('Who has a broader scope of profession: E. L. Doctorow or Julia Peterkin?', 'E. L. Doctorow'), ('Right Back At It Again contains lyrics co-written by the singer born in what city?', 'Gainesville, Florida'), ('What year was the party of the winner of the 1971 San Francisco mayoral election founded?', '1828'), ('Anthony Dirrell is the brother of which super middleweight title holder?', 'Andre Dirrell'), ('The sports nutrition business established by Oliver Cookson is based in which county in the UK?', 'Cheshire'), ('Find the birth date of the actor who played roles in First Wives Club and Searching for the Elephant.', 'February 13, 1980'), ('Kyle Moran was born in the town on what river?', 'Castletown River'), ("The actress who played the niece in the Priest film was born in what city, country?", 'Surrey, England'), ('Name the movie in which the daughter of Noel Harrison plays Violet Trefusis.', 'Portrait of a Marriage'), ('What year was the father of the Princes in the Tower born?', '1442'), ('What river is near the Crichton Collegiate Church?', 'the River Tyne'), ('Who purchased the team Michael Schumacher raced for in the 1995 Monaco Grand Prix in 2000?', 'Renault'), ('André Zucca was a French photographer who worked with a German propaganda magazine published by what Nazi organization?', 'the Wehrmacht')] dev = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in dev] predict = dspy.Predict('question -> answer') predict(question="What is the capital of Germany?") class CoT(dspy.Module): # let's define a new module def __init__(self): super().__init__() self.generate_answer =
dspy.ChainOfThought('question -> answer')
dspy.ChainOfThought
import dspy from dspy.evaluate import Evaluate from dspy.datasets.gsm8k import GSM8K, gsm8k_metric from dspy.teleprompt import BootstrapFewShotWithRandomSearch gms8k = GSM8K() turbo = dspy.OpenAI(model='gpt-3.5-turbo-instruct', max_tokens=250) trainset, devset = gms8k.train, gms8k.dev dspy.settings.configure(lm=turbo) NUM_THREADS = 4 evaluate = Evaluate(devset=devset[:], metric=gsm8k_metric, num_threads=NUM_THREADS, display_progress=True, display_table=0) class CoT(dspy.Module): def __init__(self): super().__init__() self.prog =
dspy.ChainOfThought("question -> answer")
dspy.ChainOfThought
get_ipython().system('pip install clarifai') get_ipython().system('pip install dspy-ai') import dspy from dspy.retrieve.clarifai_rm import ClarifaiRM MODEL_URL = "https://clarifai.com/meta/Llama-2/models/llama2-70b-chat" PAT = "CLARIFAI_PAT" USER_ID = "YOUR_ID" APP_ID = "YOUR_APP" from langchain.text_splitter import CharacterTextSplitter from langchain.document_loaders import TextLoader from langchain.vectorstores import Clarifai as clarifaivectorstore loader = TextLoader("YOUR_TEXT_FILE") #replace with your file path documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1024, chunk_overlap=200) docs = text_splitter.split_documents(documents) clarifai_vector_db = clarifaivectorstore.from_documents( user_id=USER_ID, app_id=APP_ID, documents=docs, pat=PAT ) llm=dspy.Clarifai(model=MODEL_URL, api_key=PAT, n=2, inference_params={"max_tokens":100,'temperature':0.6}) retriever_model=ClarifaiRM(clarifai_user_id=USER_ID, clarfiai_app_id=APP_ID, clarifai_pat=PAT, k=2) dspy.settings.configure(lm=llm, rm=retriever_model) sentence = "disney again ransacks its archives for a quick-buck sequel ." # example from the SST-2 dataset. classify = dspy.Predict('sentence -> sentiment') print(classify(sentence=sentence).sentiment) retrieve =
dspy.Retrieve()
dspy.Retrieve
get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_QuizGen_Cache') get_ipython().run_line_magic('cd', 'DSPy_QuizGen_Cache/') get_ipython().system('git checkout master') get_ipython().run_line_magic('cd', '..') import os repo_clone_path = '/content/DSPy_QuizGen_Cache' if not os.access('/content', os.W_OK): repo_clone_path = os.path.join(os.getcwd(), 'DSPy_QuizGen_Cache') os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os import regex as re import json try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) import pkg_resources # Install the package if it's not installed if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install dspy-ai') get_ipython().system('pip install openai~=0.28.1') get_ipython().system('pip install -e $repo_path') import dspy from dspy.predict import Retry from dspy.datasets import HotPotQA from dspy.teleprompt import BootstrapFewShotWithRandomSearch from dspy.evaluate.evaluate import Evaluate from dspy.primitives.assertions import assert_transform_module, backtrack_handler colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2_wiki17_abstracts) turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500) dspy.settings.configure(lm=turbo, trace=[], temperature=0.7) dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True) trainset = [x.with_inputs('question', 'answer') for x in dataset.train] devset = [x.with_inputs('question', 'answer') for x in dataset.dev] class GenerateAnswerChoices(dspy.Signature): """Generate answer choices in JSON format that include the correct answer and plausible distractors for the specified question.""" question = dspy.InputField() correct_answer = dspy.InputField() number_of_choices = dspy.InputField() answer_choices = dspy.OutputField(desc='JSON key-value pairs') class QuizAnswerGenerator(dspy.Module): def __init__(self): super().__init__() self.generate_choices = dspy.ChainOfThought(GenerateAnswerChoices) def forward(self, question, answer): choices = self.generate_choices(question=question, correct_answer=answer, number_of_choices=number_of_choices).answer_choices return dspy.Prediction(choices = choices) number_of_choices = '4' quiz_generator = QuizAnswerGenerator() def format_checker(choice_string): try: choices = json.loads(choice_string) if isinstance(choices, dict) and all(isinstance(key, str) and isinstance(value, str) for key, value in choices.items()): return True except json.JSONDecodeError: return False return False def is_correct_answer_included(correct_answer, generated_choices): try: choices_dict = json.loads(generated_choices) return correct_answer in choices_dict.values() except json.JSONDecodeError: return False def is_plausibility_yes(assessment_answer): """Check if the first word of the assessment answer is 'yes'.""" return assessment_answer.split()[0].lower() == 'yes' class AssessQuizChoices(dspy.Signature): """Assess the quality of quiz answer choices along specified dimensions.""" question = dspy.InputField() answer_choices = dspy.InputField() assessment_question = dspy.InputField() assessment_answer = dspy.OutputField(desc="Yes or No") def format_valid_metric(gold, pred, trace=None): generated_choices = pred.choices format_valid = format_checker(generated_choices) score = format_valid return score def is_correct_metric(gold, pred, trace=None): correct_answer, generated_choices = gold.answer, pred.choices correct_included = is_correct_answer_included(correct_answer, generated_choices) score = correct_included return score def plausibility_metric(gold, pred, trace=None): question, generated_choices = gold.question, pred.choices plausibility_question = "Are the distractors in the answer choices plausible and not easily identifiable as incorrect?" plausibility_assessment = dspy.Predict(AssessQuizChoices)(question=question, answer_choices=generated_choices, assessment_question=plausibility_question) plausibility_result = plausibility_assessment.assessment_answer.split()[0].lower() == 'yes' score = plausibility_result return score def overall_metric(gold, pred, trace=None): question, correct_answer, generated_choices = gold.question, gold.answer, pred.choices format_valid = format_checker(generated_choices) correct_included = is_correct_answer_included(correct_answer, generated_choices) plausibility_question = "Are the distractors in the answer choices plausible and not easily identifiable as incorrect?" plausibility_assessment = dspy.Predict(AssessQuizChoices)(question=question, answer_choices=generated_choices, assessment_question=plausibility_question) plausibility_result = plausibility_assessment.assessment_answer.split()[0].lower() == 'yes' score = (format_valid + correct_included + plausibility_result) / 3.0 if correct_included and format_valid else 0 return score metrics = [format_valid_metric, is_correct_metric, plausibility_metric, overall_metric] for metric in metrics: evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5) evaluate(quiz_generator) example = devset[38] quiz_choices = quiz_generator(question=example.question, answer = example.answer) print(f'Generated Quiz Choices: ', quiz_choices.choices) for metric in metrics: evaluate = Evaluate(metric=metric, devset=devset[38:39], num_threads=1, display_progress=True, display_table=5) evaluate(quiz_generator) class QuizAnswerGeneratorWithAssertions(dspy.Module): def __init__(self): super().__init__() self.generate_choices = dspy.ChainOfThought(GenerateAnswerChoices) def forward(self, question, answer): choice_string = self.generate_choices(question=question, correct_answer=answer, number_of_choices=number_of_choices).answer_choices dspy.Suggest(format_checker(choice_string), "The format of the answer choices should be in JSON format. Please revise accordingly.", target_module=GenerateAnswerChoices) dspy.Suggest(is_correct_answer_included(answer, choice_string), "The answer choices do not include the correct answer to the question. Please revise accordingly.", target_module=GenerateAnswerChoices) plausibility_question = "Are the distractors in the answer choices plausible and not easily identifiable as incorrect?" plausibility_assessment = dspy.Predict(AssessQuizChoices)(question=question, answer_choices=choice_string, assessment_question=plausibility_question) dspy.Suggest(is_plausibility_yes(plausibility_assessment.assessment_answer), "The answer choices are not plausible distractors or are too easily identifiable as incorrect. Please revise to provide more challenging and plausible distractors.", target_module=GenerateAnswerChoices) return dspy.Prediction(choices = choice_string) number_of_choices = '4' quiz_generator_with_assertions = assert_transform_module(QuizAnswerGeneratorWithAssertions().map_named_predictors(Retry), backtrack_handler) metrics = [format_valid_metric, is_correct_metric, plausibility_metric, overall_metric] for metric in metrics: evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5) evaluate(quiz_generator_with_assertions) example = devset[38] quiz_choices = quiz_generator_with_assertions(question=example.question, answer = example.answer) print(f'Generated Quiz Choices: ', quiz_choices.choices) for metric in metrics: evaluate = Evaluate(metric=metric, devset=devset[38:39], num_threads=1, display_progress=True, display_table=30) evaluate(quiz_generator_with_assertions) teleprompter = BootstrapFewShotWithRandomSearch(metric = overall_metric, max_bootstrapped_demos=2, num_candidate_programs=6) compiled_quiz_generator = teleprompter.compile(student = quiz_generator, teacher = quiz_generator, trainset=trainset, valset=devset[:100]) for metric in metrics: evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5) evaluate(compiled_quiz_generator) teleprompter = BootstrapFewShotWithRandomSearch(metric = overall_metric, max_bootstrapped_demos=2, num_candidate_programs=6) compiled_with_assertions_quiz_generator = teleprompter.compile(student=quiz_generator, teacher = quiz_generator_with_assertions, trainset=trainset, valset=devset[:100]) for metric in metrics: evaluate =
Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
dspy.evaluate.evaluate.Evaluate
get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache') import pkg_resources # Install the package if it's not installed if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install -e $repo_path') get_ipython().system('pip install transformers') import dspy from dspy.evaluate import Evaluate from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch, BootstrapFinetune llama = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=[7140, 7141, 7142, 7143], max_tokens=150) colbertv2 = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2, lm=llama) train = [('Who was the director of the 2009 movie featuring Peter Outerbridge as William Easton?', 'Kevin Greutert'), ('The heir to the Du Pont family fortune sponsored what wrestling team?', 'Foxcatcher'), ('In what year was the star of To Hell and Back born?', '1925'), ('Which award did the first book of Gary Zukav receive?', 'U.S. National Book Award'), ('What documentary about the Gilgo Beach Killer debuted on A&E?', 'The Killing Season'), ('Which author is English: John Braine or Studs Terkel?', 'John Braine'), ('Who produced the album that included a re-recording of "Lithium"?', 'Butch Vig')] train = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in train] dev = [('Who has a broader scope of profession: E. L. Doctorow or Julia Peterkin?', 'E. L. Doctorow'), ('Right Back At It Again contains lyrics co-written by the singer born in what city?', 'Gainesville, Florida'), ('What year was the party of the winner of the 1971 San Francisco mayoral election founded?', '1828'), ('Anthony Dirrell is the brother of which super middleweight title holder?', 'Andre Dirrell'), ('The sports nutrition business established by Oliver Cookson is based in which county in the UK?', 'Cheshire'), ('Find the birth date of the actor who played roles in First Wives Club and Searching for the Elephant.', 'February 13, 1980'), ('Kyle Moran was born in the town on what river?', 'Castletown River'), ("The actress who played the niece in the Priest film was born in what city, country?", 'Surrey, England'), ('Name the movie in which the daughter of Noel Harrison plays Violet Trefusis.', 'Portrait of a Marriage'), ('What year was the father of the Princes in the Tower born?', '1442'), ('What river is near the Crichton Collegiate Church?', 'the River Tyne'), ('Who purchased the team Michael Schumacher raced for in the 1995 Monaco Grand Prix in 2000?', 'Renault'), ('André Zucca was a French photographer who worked with a German propaganda magazine published by what Nazi organization?', 'the Wehrmacht')] dev = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in dev] predict = dspy.Predict('question -> answer') predict(question="What is the capital of Germany?") class CoT(dspy.Module): # let's define a new module def __init__(self): super().__init__() self.generate_answer = dspy.ChainOfThought('question -> answer') def forward(self, question): return self.generate_answer(question=question) # here we use the module metric_EM = dspy.evaluate.answer_exact_match teleprompter = BootstrapFewShot(metric=metric_EM, max_bootstrapped_demos=2) cot_compiled = teleprompter.compile(CoT(), trainset=train) cot_compiled("What is the capital of Germany?") llama.inspect_history(n=1) NUM_THREADS = 32 evaluate_hotpot =
Evaluate(devset=dev, metric=metric_EM, num_threads=NUM_THREADS, display_progress=True, display_table=15)
dspy.evaluate.Evaluate
import dspy from dsp.utils import deduplicate from dspy.datasets import HotPotQA from dspy.predict.retry import Retry from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch from dspy.evaluate.evaluate import Evaluate from dspy.primitives.assertions import assert_transform_module, backtrack_handler import os import openai openai.api_key = os.getenv('OPENAI_API_KEY') colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2_wiki17_abstracts) turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500) dspy.settings.configure(lm=turbo, trace=[], temperature=0.7) dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0) trainset = [x.with_inputs('question') for x in dataset.train] devset = [x.with_inputs('question') for x in dataset.dev] def validate_query_distinction_local(previous_queries, query): """check if query is distinct from previous queries""" if previous_queries == []: return True if
dspy.evaluate.answer_exact_match_str(query, previous_queries, frac=0.8)
dspy.evaluate.answer_exact_match_str
get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_LongFormQA_Cache') get_ipython().run_line_magic('cd', 'DSPy_LongFormQA_Cache/') get_ipython().system('git checkout master') get_ipython().run_line_magic('cd', '..') import os repo_clone_path = '/content/DSPy_LongFormQA_Cache' if not os.access('/content', os.W_OK): repo_clone_path = os.path.join(os.getcwd(), 'DSPy_LongFormQA_Cache') os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os import regex as re try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) import pkg_resources # Install the package if it's not installed if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install dspy-ai') get_ipython().system('pip install openai~=0.28.1') get_ipython().system('pip install -e $repo_path') import dspy from dspy.predict import Retry from dspy.datasets import HotPotQA from dspy.teleprompt import BootstrapFewShotWithRandomSearch from dsp.utils import EM, normalize_text from dspy.primitives.assertions import assert_transform_module, backtrack_handler get_ipython().run_line_magic('cd', 'dspy/examples/longformqa') from utils import extract_text_by_citation, correct_citation_format, has_citations, citations_check colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2_wiki17_abstracts) turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500) dspy.settings.configure(lm=turbo, trace=[], temperature=0.7) dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True) trainset = [x.with_inputs('question') for x in dataset.train] devset = [x.with_inputs('question') for x in dataset.dev] train_example = trainset[0] print(f"Question: {train_example.question}") print(f"Answer: {train_example.answer}") print(f"Relevant Wikipedia Titles: {train_example.gold_titles}") dev_example = devset[18] print(f"Question: {dev_example.question}") print(f"Answer: {dev_example.answer}") print(f"Relevant Wikipedia Titles: {dev_example.gold_titles}") from dsp.utils import deduplicate class GenerateSearchQuery(dspy.Signature): """Write a simple search query that will help answer a complex question.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() query = dspy.OutputField() class GenerateCitedParagraph(dspy.Signature): """Generate a paragraph with citations.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() paragraph = dspy.OutputField(desc="includes citations") class LongFormQA(dspy.Module): def __init__(self, passages_per_hop=3, max_hops=2): super().__init__() self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)] self.retrieve = dspy.Retrieve(k=passages_per_hop) self.generate_cited_paragraph = dspy.ChainOfThought(GenerateCitedParagraph) self.max_hops = max_hops def forward(self, question): context = [] for hop in range(self.max_hops): query = self.generate_query[hop](context=context, question=question).query passages = self.retrieve(query).passages context = deduplicate(context + passages) pred = self.generate_cited_paragraph(context=context, question=question) pred = dspy.Prediction(context=context, paragraph=pred.paragraph) return pred class CheckCitationFaithfulness(dspy.Signature): """Verify that the text is based on the provided context.""" context =
dspy.InputField(desc="may contain relevant facts")
dspy.InputField
get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_QuizGen_Cache') get_ipython().run_line_magic('cd', 'DSPy_QuizGen_Cache/') get_ipython().system('git checkout master') get_ipython().run_line_magic('cd', '..') import os repo_clone_path = '/content/DSPy_QuizGen_Cache' if not os.access('/content', os.W_OK): repo_clone_path = os.path.join(os.getcwd(), 'DSPy_QuizGen_Cache') os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os import regex as re import json try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) import pkg_resources # Install the package if it's not installed if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install dspy-ai') get_ipython().system('pip install openai~=0.28.1') get_ipython().system('pip install -e $repo_path') import dspy from dspy.predict import Retry from dspy.datasets import HotPotQA from dspy.teleprompt import BootstrapFewShotWithRandomSearch from dspy.evaluate.evaluate import Evaluate from dspy.primitives.assertions import assert_transform_module, backtrack_handler colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2_wiki17_abstracts) turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500) dspy.settings.configure(lm=turbo, trace=[], temperature=0.7) dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True) trainset = [x.with_inputs('question', 'answer') for x in dataset.train] devset = [x.with_inputs('question', 'answer') for x in dataset.dev] class GenerateAnswerChoices(dspy.Signature): """Generate answer choices in JSON format that include the correct answer and plausible distractors for the specified question.""" question = dspy.InputField() correct_answer = dspy.InputField() number_of_choices = dspy.InputField() answer_choices = dspy.OutputField(desc='JSON key-value pairs') class QuizAnswerGenerator(dspy.Module): def __init__(self): super().__init__() self.generate_choices = dspy.ChainOfThought(GenerateAnswerChoices) def forward(self, question, answer): choices = self.generate_choices(question=question, correct_answer=answer, number_of_choices=number_of_choices).answer_choices return dspy.Prediction(choices = choices) number_of_choices = '4' quiz_generator = QuizAnswerGenerator() def format_checker(choice_string): try: choices = json.loads(choice_string) if isinstance(choices, dict) and all(isinstance(key, str) and isinstance(value, str) for key, value in choices.items()): return True except json.JSONDecodeError: return False return False def is_correct_answer_included(correct_answer, generated_choices): try: choices_dict = json.loads(generated_choices) return correct_answer in choices_dict.values() except json.JSONDecodeError: return False def is_plausibility_yes(assessment_answer): """Check if the first word of the assessment answer is 'yes'.""" return assessment_answer.split()[0].lower() == 'yes' class AssessQuizChoices(dspy.Signature): """Assess the quality of quiz answer choices along specified dimensions.""" question = dspy.InputField() answer_choices = dspy.InputField() assessment_question =
dspy.InputField()
dspy.InputField
get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_TweetGen_Cache') get_ipython().run_line_magic('cd', 'DSPy_TweetGen_Cache/') get_ipython().system('git checkout master') get_ipython().run_line_magic('cd', '..') import os repo_clone_path = '/content/DSPy_TweetGen_Cache' if not os.access('/content', os.W_OK): repo_clone_path = os.path.join(os.getcwd(), 'DSPy_TweetGen_Cache') os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os import regex as re import json try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) import pkg_resources # Install the package if it's not installed if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install dspy-ai') get_ipython().system('pip install openai~=0.28.1') get_ipython().system('pip install -e $repo_path') import dspy from dspy.predict import Retry from dspy.datasets import HotPotQA from dspy.teleprompt import BootstrapFewShotWithRandomSearch from dsp.utils import deduplicate from dspy.evaluate.evaluate import Evaluate from dspy.primitives.assertions import assert_transform_module, backtrack_handler colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2_wiki17_abstracts) turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500) dspy.settings.configure(lm=turbo, trace=[], temperature=0.7) dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True) trainset = [x.with_inputs('question', 'answer') for x in dataset.train] devset = [x.with_inputs('question', 'answer') for x in dataset.dev] class GenerateSearchQuery(dspy.Signature): """Write a simple search query that will help answer a complex question.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() query = dspy.OutputField() class GenerateTweet(dspy.Signature): """Generate an engaging tweet that effectively answers a question staying faithful to the context, is less than 280 characters, and has no hashtags.""" question = dspy.InputField() context = dspy.InputField(desc="may contain relevant facts") tweet = dspy.OutputField() class Tweeter(dspy.Module): def __init__(self): super().__init__() self.generate_tweet = dspy.ChainOfThought(GenerateTweet) def forward(self, question, answer): context = [] max_hops=2 passages_per_hop=3 generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)] retrieve = dspy.Retrieve(k=passages_per_hop) for hop in range(max_hops): query = generate_query[hop](context=context, question=question).query passages = retrieve(query).passages context = deduplicate(context + passages) generated_tweet = self.generate_tweet(question=question, context=context).tweet return dspy.Prediction(generated_tweet=generated_tweet, context=context) tweeter = Tweeter() def has_no_hashtags(text): return len(re.findall(r"#\w+", text)) == 0 def is_within_length_limit(text, length_limit=280): return len(text) <= length_limit def is_assessment_yes(assessment_answer): """Check if the first word of the assessment answer is 'yes'.""" return assessment_answer.split()[0].lower() == 'yes' def has_correct_answer(text, answer): return answer in text class AssessTweet(dspy.Signature): """Assess the quality of a tweet along the specified dimension.""" context = dspy.InputField(desc='ignore if N/A') assessed_text = dspy.InputField() assessment_question = dspy.InputField() assessment_answer =
dspy.OutputField(desc="Yes or No")
dspy.OutputField
get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache') import pkg_resources # Install the package if it's not installed if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install dspy-ai') get_ipython().system('pip install openai~=0.28.1') import dspy turbo = dspy.OpenAI(model='gpt-3.5-turbo') colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(lm=turbo, rm=colbertv2_wiki17_abstracts) from dspy.datasets import HotPotQA dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0) trainset = [x.with_inputs('question') for x in dataset.train] devset = [x.with_inputs('question') for x in dataset.dev] len(trainset), len(devset) train_example = trainset[0] print(f"Question: {train_example.question}") print(f"Answer: {train_example.answer}") dev_example = devset[18] print(f"Question: {dev_example.question}") print(f"Answer: {dev_example.answer}") print(f"Relevant Wikipedia Titles: {dev_example.gold_titles}") print(f"For this dataset, training examples have input keys {train_example.inputs().keys()} and label keys {train_example.labels().keys()}") print(f"For this dataset, dev examples have input keys {dev_example.inputs().keys()} and label keys {dev_example.labels().keys()}") class BasicQA(dspy.Signature): """Answer questions with short factoid answers.""" question = dspy.InputField() answer = dspy.OutputField(desc="often between 1 and 5 words") generate_answer = dspy.Predict(BasicQA) pred = generate_answer(question=dev_example.question) print(f"Question: {dev_example.question}") print(f"Predicted Answer: {pred.answer}") turbo.inspect_history(n=1) generate_answer_with_chain_of_thought = dspy.ChainOfThought(BasicQA) pred = generate_answer_with_chain_of_thought(question=dev_example.question) print(f"Question: {dev_example.question}") print(f"Thought: {pred.rationale.split('.', 1)[1].strip()}") print(f"Predicted Answer: {pred.answer}") retrieve = dspy.Retrieve(k=3) topK_passages = retrieve(dev_example.question).passages print(f"Top {retrieve.k} passages for question: {dev_example.question} \n", '-' * 30, '\n') for idx, passage in enumerate(topK_passages): print(f'{idx+1}]', passage, '\n') retrieve("When was the first FIFA World Cup held?").passages[0] class GenerateAnswer(dspy.Signature): """Answer questions with short factoid answers.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() answer = dspy.OutputField(desc="often between 1 and 5 words") class RAG(dspy.Module): def __init__(self, num_passages=3): super().__init__() self.retrieve = dspy.Retrieve(k=num_passages) self.generate_answer = dspy.ChainOfThought(GenerateAnswer) def forward(self, question): context = self.retrieve(question).passages prediction = self.generate_answer(context=context, question=question) return dspy.Prediction(context=context, answer=prediction.answer) from dspy.teleprompt import BootstrapFewShot def validate_context_and_answer(example, pred, trace=None): answer_EM = dspy.evaluate.answer_exact_match(example, pred) answer_PM = dspy.evaluate.answer_passage_match(example, pred) return answer_EM and answer_PM teleprompter = BootstrapFewShot(metric=validate_context_and_answer) compiled_rag = teleprompter.compile(RAG(), trainset=trainset) my_question = "What castle did David Gregory inherit?" pred = compiled_rag(my_question) print(f"Question: {my_question}") print(f"Predicted Answer: {pred.answer}") print(f"Retrieved Contexts (truncated): {[c[:200] + '...' for c in pred.context]}") turbo.inspect_history(n=1) for name, parameter in compiled_rag.named_predictors(): print(name) print(parameter.demos[0]) print() from dspy.evaluate.evaluate import Evaluate evaluate_on_hotpotqa = Evaluate(devset=devset, num_threads=1, display_progress=True, display_table=5) metric = dspy.evaluate.answer_exact_match evaluate_on_hotpotqa(compiled_rag, metric=metric) def gold_passages_retrieved(example, pred, trace=None): gold_titles = set(map(dspy.evaluate.normalize_text, example['gold_titles'])) found_titles = set(map(dspy.evaluate.normalize_text, [c.split(' | ')[0] for c in pred.context])) return gold_titles.issubset(found_titles) compiled_rag_retrieval_score = evaluate_on_hotpotqa(compiled_rag, metric=gold_passages_retrieved) class GenerateSearchQuery(dspy.Signature): """Write a simple search query that will help answer a complex question.""" context =
dspy.InputField(desc="may contain relevant facts")
dspy.InputField
get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') get_ipython().run_line_magic('pip', 'install datasets') import datasets ds = datasets.load_dataset("openai_humaneval") ds['test'][0] import dspy, dotenv, os dotenv.load_dotenv(os.path.expanduser("~/.env")) # load OpenAI API key from .env file lm = dspy.OpenAI(model="gpt-3.5-turbo", max_tokens=4000) dspy.settings.configure(lm=lm) predictor = dspy.Predict("question -> answer") print(predictor(question="What is the capital of France?")) from dspy import InputField, OutputField, Signature from dspy.functional import TypedPredictor import pydantic class PythonCode(pydantic.BaseModel): code: str @pydantic.field_validator('code') def check_syntax(cls, v): try: compile(v, "<string>", "exec") except SyntaxError as e: raise ValueError(f"Code is not syntactically valid: {e}") return v class CodeSignature(Signature): prompt: str = InputField() test: PythonCode =
InputField()
dspy.InputField
get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_LongFormQA_Cache') get_ipython().run_line_magic('cd', 'DSPy_LongFormQA_Cache/') get_ipython().system('git checkout master') get_ipython().run_line_magic('cd', '..') import os repo_clone_path = '/content/DSPy_LongFormQA_Cache' if not os.access('/content', os.W_OK): repo_clone_path = os.path.join(os.getcwd(), 'DSPy_LongFormQA_Cache') os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os import regex as re try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) import pkg_resources # Install the package if it's not installed if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install dspy-ai') get_ipython().system('pip install openai~=0.28.1') get_ipython().system('pip install -e $repo_path') import dspy from dspy.predict import Retry from dspy.datasets import HotPotQA from dspy.teleprompt import BootstrapFewShotWithRandomSearch from dsp.utils import EM, normalize_text from dspy.primitives.assertions import assert_transform_module, backtrack_handler get_ipython().run_line_magic('cd', 'dspy/examples/longformqa') from utils import extract_text_by_citation, correct_citation_format, has_citations, citations_check colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2_wiki17_abstracts) turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500) dspy.settings.configure(lm=turbo, trace=[], temperature=0.7) dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True) trainset = [x.with_inputs('question') for x in dataset.train] devset = [x.with_inputs('question') for x in dataset.dev] train_example = trainset[0] print(f"Question: {train_example.question}") print(f"Answer: {train_example.answer}") print(f"Relevant Wikipedia Titles: {train_example.gold_titles}") dev_example = devset[18] print(f"Question: {dev_example.question}") print(f"Answer: {dev_example.answer}") print(f"Relevant Wikipedia Titles: {dev_example.gold_titles}") from dsp.utils import deduplicate class GenerateSearchQuery(dspy.Signature): """Write a simple search query that will help answer a complex question.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() query = dspy.OutputField() class GenerateCitedParagraph(dspy.Signature): """Generate a paragraph with citations.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() paragraph = dspy.OutputField(desc="includes citations") class LongFormQA(dspy.Module): def __init__(self, passages_per_hop=3, max_hops=2): super().__init__() self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)] self.retrieve = dspy.Retrieve(k=passages_per_hop) self.generate_cited_paragraph = dspy.ChainOfThought(GenerateCitedParagraph) self.max_hops = max_hops def forward(self, question): context = [] for hop in range(self.max_hops): query = self.generate_query[hop](context=context, question=question).query passages = self.retrieve(query).passages context = deduplicate(context + passages) pred = self.generate_cited_paragraph(context=context, question=question) pred = dspy.Prediction(context=context, paragraph=pred.paragraph) return pred class CheckCitationFaithfulness(dspy.Signature): """Verify that the text is based on the provided context.""" context = dspy.InputField(desc="may contain relevant facts") text = dspy.InputField(desc="between 1 to 2 sentences") faithfulness = dspy.OutputField(desc="boolean indicating if text is faithful to context") def citation_faithfulness(example, pred, trace): paragraph, context = pred.paragraph, pred.context citation_dict = extract_text_by_citation(paragraph) if not citation_dict: return False, None context_dict = {str(i): context[i].split(' | ')[1] for i in range(len(context))} faithfulness_results = [] unfaithful_citations = [] check_citation_faithfulness = dspy.ChainOfThought(CheckCitationFaithfulness) for citation_num, texts in citation_dict.items(): if citation_num not in context_dict: continue current_context = context_dict[citation_num] for text in texts: try: result = check_citation_faithfulness(context=current_context, text=text) is_faithful = result.faithfulness.lower() == 'true' faithfulness_results.append(is_faithful) if not is_faithful: unfaithful_citations.append({'paragraph': paragraph, 'text': text, 'context': current_context}) except ValueError as e: faithfulness_results.append(False) unfaithful_citations.append({'paragraph': paragraph, 'text': text, 'error': str(e)}) final_faithfulness = all(faithfulness_results) if not faithfulness_results: return False, None return final_faithfulness, unfaithful_citations def extract_cited_titles_from_paragraph(paragraph, context): cited_indices = [int(m.group(1)) for m in re.finditer(r'\[(\d+)\]\.', paragraph)] cited_indices = [index - 1 for index in cited_indices if index <= len(context)] cited_titles = [context[index].split(' | ')[0] for index in cited_indices] return cited_titles def calculate_recall(example, pred, trace=None): gold_titles = set(example['gold_titles']) found_cited_titles = set(extract_cited_titles_from_paragraph(pred.paragraph, pred.context)) intersection = gold_titles.intersection(found_cited_titles) recall = len(intersection) / len(gold_titles) if gold_titles else 0 return recall def calculate_precision(example, pred, trace=None): gold_titles = set(example['gold_titles']) found_cited_titles = set(extract_cited_titles_from_paragraph(pred.paragraph, pred.context)) intersection = gold_titles.intersection(found_cited_titles) precision = len(intersection) / len(found_cited_titles) if found_cited_titles else 0 return precision def answer_correctness(example, pred, trace=None): assert hasattr(example, 'answer'), "Example does not have 'answer'." normalized_context = normalize_text(pred.paragraph) if isinstance(example.answer, str): gold_answers = [example.answer] elif isinstance(example.answer, list): gold_answers = example.answer else: raise ValueError("'example.answer' is not string or list.") return 1 if any(normalize_text(answer) in normalized_context for answer in gold_answers) else 0 def evaluate(module): correctness_values = [] recall_values = [] precision_values = [] citation_faithfulness_values = [] for i in range(len(devset)): example = devset[i] try: pred = module(question=example.question) correctness_values.append(answer_correctness(example, pred)) citation_faithfulness_score, _ = citation_faithfulness(None, pred, None) citation_faithfulness_values.append(citation_faithfulness_score) recall = calculate_recall(example, pred) precision = calculate_precision(example, pred) recall_values.append(recall) precision_values.append(precision) except Exception as e: print(f"Failed generation with error: {e}") average_correctness = sum(correctness_values) / len(devset) if correctness_values else 0 average_recall = sum(recall_values) / len(devset) if recall_values else 0 average_precision = sum(precision_values) / len(devset) if precision_values else 0 average_citation_faithfulness = sum(citation_faithfulness_values) / len(devset) if citation_faithfulness_values else 0 print(f"Average Correctness: {average_correctness}") print(f"Average Recall: {average_recall}") print(f"Average Precision: {average_precision}") print(f"Average Citation Faithfulness: {average_citation_faithfulness}") longformqa = LongFormQA() evaluate(longformqa) question = devset[6].question pred = longformqa(question) citation_faithfulness_score, _ = citation_faithfulness(None, pred, None) print(f"Question: {question}") print(f"Predicted Paragraph: {pred.paragraph}") print(f"Citation Faithfulness: {citation_faithfulness_score}") class LongFormQAWithAssertions(dspy.Module): def __init__(self, passages_per_hop=3, max_hops=2): super().__init__() self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)] self.retrieve = dspy.Retrieve(k=passages_per_hop) self.generate_cited_paragraph =
dspy.ChainOfThought(GenerateCitedParagraph)
dspy.ChainOfThought
import dspy from dsp.utils import deduplicate from dspy.datasets import HotPotQA from dspy.predict.retry import Retry from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch from dspy.evaluate.evaluate import Evaluate from dspy.primitives.assertions import assert_transform_module, backtrack_handler import os import openai openai.api_key = os.getenv('OPENAI_API_KEY') colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2_wiki17_abstracts) turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500) dspy.settings.configure(lm=turbo, trace=[], temperature=0.7) dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0) trainset = [x.with_inputs('question') for x in dataset.train] devset = [x.with_inputs('question') for x in dataset.dev] def validate_query_distinction_local(previous_queries, query): """check if query is distinct from previous queries""" if previous_queries == []: return True if dspy.evaluate.answer_exact_match_str(query, previous_queries, frac=0.8): return False return True def validate_context_and_answer_and_hops(example, pred, trace=None): if not dspy.evaluate.answer_exact_match(example, pred): return False if not dspy.evaluate.answer_passage_match(example, pred): return False return True def gold_passages_retrieved(example, pred, trace=None): gold_titles = set(map(dspy.evaluate.normalize_text, example['gold_titles'])) found_titles = set(map(dspy.evaluate.normalize_text, [c.split(' | ')[0] for c in pred.context])) return gold_titles.issubset(found_titles) class GenerateAnswer(dspy.Signature): """Answer questions with short factoid answers.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() answer = dspy.OutputField(desc="often between 1 and 5 words") class GenerateSearchQuery(dspy.Signature): """Write a simple search query that will help answer a complex question.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() query = dspy.OutputField() def all_queries_distinct(prev_queries): query_distinct = True for i, query in enumerate(prev_queries): if validate_query_distinction_local(prev_queries[:i], query) == False: query_distinct = False break return query_distinct class SimplifiedBaleen(dspy.Module): def __init__(self, passages_per_hop=2, max_hops=2): super().__init__() self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)] self.retrieve = dspy.Retrieve(k=passages_per_hop) self.generate_answer = dspy.ChainOfThought(GenerateAnswer) self.max_hops = max_hops self.passed_suggestions = 0 def forward(self, question): context = [] prev_queries = [question] for hop in range(self.max_hops): query = self.generate_query[hop](context=context, question=question).query prev_queries.append(query) passages = self.retrieve(query).passages context = deduplicate(context + passages) if all_queries_distinct(prev_queries): self.passed_suggestions += 1 pred = self.generate_answer(context=context, question=question) pred = dspy.Prediction(context=context, answer=pred.answer) return pred class SimplifiedBaleenAssertions(dspy.Module): def __init__(self, passages_per_hop=2, max_hops=2): super().__init__() self.generate_query = [
dspy.ChainOfThought(GenerateSearchQuery)
dspy.ChainOfThought
import dspy from dsp.utils import deduplicate from dspy.datasets import HotPotQA from dspy.predict.retry import Retry from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch from dspy.evaluate.evaluate import Evaluate from dspy.primitives.assertions import assert_transform_module, backtrack_handler import os import openai openai.api_key = os.getenv('OPENAI_API_KEY') colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2_wiki17_abstracts) turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500) dspy.settings.configure(lm=turbo, trace=[], temperature=0.7) dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0) trainset = [x.with_inputs('question') for x in dataset.train] devset = [x.with_inputs('question') for x in dataset.dev] def validate_query_distinction_local(previous_queries, query): """check if query is distinct from previous queries""" if previous_queries == []: return True if dspy.evaluate.answer_exact_match_str(query, previous_queries, frac=0.8): return False return True def validate_context_and_answer_and_hops(example, pred, trace=None): if not dspy.evaluate.answer_exact_match(example, pred): return False if not dspy.evaluate.answer_passage_match(example, pred): return False return True def gold_passages_retrieved(example, pred, trace=None): gold_titles = set(map(dspy.evaluate.normalize_text, example['gold_titles'])) found_titles = set(map(dspy.evaluate.normalize_text, [c.split(' | ')[0] for c in pred.context])) return gold_titles.issubset(found_titles) class GenerateAnswer(dspy.Signature): """Answer questions with short factoid answers.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() answer = dspy.OutputField(desc="often between 1 and 5 words") class GenerateSearchQuery(dspy.Signature): """Write a simple search query that will help answer a complex question.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() query = dspy.OutputField() def all_queries_distinct(prev_queries): query_distinct = True for i, query in enumerate(prev_queries): if validate_query_distinction_local(prev_queries[:i], query) == False: query_distinct = False break return query_distinct class SimplifiedBaleen(dspy.Module): def __init__(self, passages_per_hop=2, max_hops=2): super().__init__() self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)] self.retrieve = dspy.Retrieve(k=passages_per_hop) self.generate_answer = dspy.ChainOfThought(GenerateAnswer) self.max_hops = max_hops self.passed_suggestions = 0 def forward(self, question): context = [] prev_queries = [question] for hop in range(self.max_hops): query = self.generate_query[hop](context=context, question=question).query prev_queries.append(query) passages = self.retrieve(query).passages context = deduplicate(context + passages) if all_queries_distinct(prev_queries): self.passed_suggestions += 1 pred = self.generate_answer(context=context, question=question) pred = dspy.Prediction(context=context, answer=pred.answer) return pred class SimplifiedBaleenAssertions(dspy.Module): def __init__(self, passages_per_hop=2, max_hops=2): super().__init__() self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)] self.retrieve = dspy.Retrieve(k=passages_per_hop) self.generate_answer =
dspy.ChainOfThought(GenerateAnswer)
dspy.ChainOfThought
get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_QuizGen_Cache') get_ipython().run_line_magic('cd', 'DSPy_QuizGen_Cache/') get_ipython().system('git checkout master') get_ipython().run_line_magic('cd', '..') import os repo_clone_path = '/content/DSPy_QuizGen_Cache' if not os.access('/content', os.W_OK): repo_clone_path = os.path.join(os.getcwd(), 'DSPy_QuizGen_Cache') os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os import regex as re import json try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) import pkg_resources # Install the package if it's not installed if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install dspy-ai') get_ipython().system('pip install openai~=0.28.1') get_ipython().system('pip install -e $repo_path') import dspy from dspy.predict import Retry from dspy.datasets import HotPotQA from dspy.teleprompt import BootstrapFewShotWithRandomSearch from dspy.evaluate.evaluate import Evaluate from dspy.primitives.assertions import assert_transform_module, backtrack_handler colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2_wiki17_abstracts) turbo =
dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.OpenAI
get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import dspy from dspy.evaluate import Evaluate from dspy.datasets.hotpotqa import HotPotQA from dspy.teleprompt import BootstrapFewShotWithRandomSearch, BootstrapFinetune ports = [7140, 7141, 7142, 7143, 7144, 7145] llamaChat = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=ports, max_tokens=150) colbertv2 = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2, lm=llamaChat) dataset = HotPotQA(train_seed=1, train_size=200, eval_seed=2023, dev_size=1000, test_size=0) trainset = [x.with_inputs('question') for x in dataset.train] devset = [x.with_inputs('question') for x in dataset.dev] testset = [x.with_inputs('question') for x in dataset.test] len(trainset), len(devset), len(testset) trainset[0] from dsp.utils.utils import deduplicate class BasicMH(dspy.Module): def __init__(self, passages_per_hop=3): super().__init__() self.retrieve = dspy.Retrieve(k=passages_per_hop) self.generate_query = [dspy.ChainOfThought("context, question -> search_query") for _ in range(2)] self.generate_answer = dspy.ChainOfThought("context, question -> answer") def forward(self, question): context = [] for hop in range(2): search_query = self.generate_query[hop](context=context, question=question).search_query passages = self.retrieve(search_query).passages context = deduplicate(context + passages) return self.generate_answer(context=context, question=question).copy(context=context) RECOMPILE_INTO_LLAMA_FROM_SCRATCH = False NUM_THREADS = 24 metric_EM = dspy.evaluate.answer_exact_match if RECOMPILE_INTO_LLAMA_FROM_SCRATCH: tp = BootstrapFewShotWithRandomSearch(metric=metric_EM, max_bootstrapped_demos=2, num_threads=NUM_THREADS) basicmh_bs = tp.compile(BasicMH(), trainset=trainset[:50], valset=trainset[50:200]) ensemble = [prog for *_, prog in basicmh_bs.candidate_programs[:4]] for idx, prog in enumerate(ensemble): pass if not RECOMPILE_INTO_LLAMA_FROM_SCRATCH: ensemble = [] for idx in range(4): prog = BasicMH() prog.load(f'multihop_llama213b_{idx}.json') ensemble.append(prog) llama_program = ensemble[0] evaluate_hotpot =
Evaluate(devset=devset[:1000], metric=metric_EM, num_threads=NUM_THREADS, display_progress=True, display_table=0)
dspy.evaluate.Evaluate
get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache') import pkg_resources # Install the package if it's not installed if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install -e $repo_path') get_ipython().system('pip install transformers') import dspy from dspy.evaluate import Evaluate from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch, BootstrapFinetune llama = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=[7140, 7141, 7142, 7143], max_tokens=150) colbertv2 = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2, lm=llama) train = [('Who was the director of the 2009 movie featuring Peter Outerbridge as William Easton?', 'Kevin Greutert'), ('The heir to the Du Pont family fortune sponsored what wrestling team?', 'Foxcatcher'), ('In what year was the star of To Hell and Back born?', '1925'), ('Which award did the first book of Gary Zukav receive?', 'U.S. National Book Award'), ('What documentary about the Gilgo Beach Killer debuted on A&E?', 'The Killing Season'), ('Which author is English: John Braine or Studs Terkel?', 'John Braine'), ('Who produced the album that included a re-recording of "Lithium"?', 'Butch Vig')] train = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in train] dev = [('Who has a broader scope of profession: E. L. Doctorow or Julia Peterkin?', 'E. L. Doctorow'), ('Right Back At It Again contains lyrics co-written by the singer born in what city?', 'Gainesville, Florida'), ('What year was the party of the winner of the 1971 San Francisco mayoral election founded?', '1828'), ('Anthony Dirrell is the brother of which super middleweight title holder?', 'Andre Dirrell'), ('The sports nutrition business established by Oliver Cookson is based in which county in the UK?', 'Cheshire'), ('Find the birth date of the actor who played roles in First Wives Club and Searching for the Elephant.', 'February 13, 1980'), ('Kyle Moran was born in the town on what river?', 'Castletown River'), ("The actress who played the niece in the Priest film was born in what city, country?", 'Surrey, England'), ('Name the movie in which the daughter of Noel Harrison plays Violet Trefusis.', 'Portrait of a Marriage'), ('What year was the father of the Princes in the Tower born?', '1442'), ('What river is near the Crichton Collegiate Church?', 'the River Tyne'), ('Who purchased the team Michael Schumacher raced for in the 1995 Monaco Grand Prix in 2000?', 'Renault'), ('André Zucca was a French photographer who worked with a German propaganda magazine published by what Nazi organization?', 'the Wehrmacht')] dev = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in dev] predict =
dspy.Predict('question -> answer')
dspy.Predict
get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_LongFormQA_Cache') get_ipython().run_line_magic('cd', 'DSPy_LongFormQA_Cache/') get_ipython().system('git checkout master') get_ipython().run_line_magic('cd', '..') import os repo_clone_path = '/content/DSPy_LongFormQA_Cache' if not os.access('/content', os.W_OK): repo_clone_path = os.path.join(os.getcwd(), 'DSPy_LongFormQA_Cache') os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os import regex as re try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) import pkg_resources # Install the package if it's not installed if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install dspy-ai') get_ipython().system('pip install openai~=0.28.1') get_ipython().system('pip install -e $repo_path') import dspy from dspy.predict import Retry from dspy.datasets import HotPotQA from dspy.teleprompt import BootstrapFewShotWithRandomSearch from dsp.utils import EM, normalize_text from dspy.primitives.assertions import assert_transform_module, backtrack_handler get_ipython().run_line_magic('cd', 'dspy/examples/longformqa') from utils import extract_text_by_citation, correct_citation_format, has_citations, citations_check colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2_wiki17_abstracts) turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500) dspy.settings.configure(lm=turbo, trace=[], temperature=0.7) dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True) trainset = [x.with_inputs('question') for x in dataset.train] devset = [x.with_inputs('question') for x in dataset.dev] train_example = trainset[0] print(f"Question: {train_example.question}") print(f"Answer: {train_example.answer}") print(f"Relevant Wikipedia Titles: {train_example.gold_titles}") dev_example = devset[18] print(f"Question: {dev_example.question}") print(f"Answer: {dev_example.answer}") print(f"Relevant Wikipedia Titles: {dev_example.gold_titles}") from dsp.utils import deduplicate class GenerateSearchQuery(dspy.Signature): """Write a simple search query that will help answer a complex question.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() query = dspy.OutputField() class GenerateCitedParagraph(dspy.Signature): """Generate a paragraph with citations.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() paragraph = dspy.OutputField(desc="includes citations") class LongFormQA(dspy.Module): def __init__(self, passages_per_hop=3, max_hops=2): super().__init__() self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)] self.retrieve = dspy.Retrieve(k=passages_per_hop) self.generate_cited_paragraph = dspy.ChainOfThought(GenerateCitedParagraph) self.max_hops = max_hops def forward(self, question): context = [] for hop in range(self.max_hops): query = self.generate_query[hop](context=context, question=question).query passages = self.retrieve(query).passages context = deduplicate(context + passages) pred = self.generate_cited_paragraph(context=context, question=question) pred = dspy.Prediction(context=context, paragraph=pred.paragraph) return pred class CheckCitationFaithfulness(dspy.Signature): """Verify that the text is based on the provided context.""" context = dspy.InputField(desc="may contain relevant facts") text = dspy.InputField(desc="between 1 to 2 sentences") faithfulness = dspy.OutputField(desc="boolean indicating if text is faithful to context") def citation_faithfulness(example, pred, trace): paragraph, context = pred.paragraph, pred.context citation_dict = extract_text_by_citation(paragraph) if not citation_dict: return False, None context_dict = {str(i): context[i].split(' | ')[1] for i in range(len(context))} faithfulness_results = [] unfaithful_citations = [] check_citation_faithfulness = dspy.ChainOfThought(CheckCitationFaithfulness) for citation_num, texts in citation_dict.items(): if citation_num not in context_dict: continue current_context = context_dict[citation_num] for text in texts: try: result = check_citation_faithfulness(context=current_context, text=text) is_faithful = result.faithfulness.lower() == 'true' faithfulness_results.append(is_faithful) if not is_faithful: unfaithful_citations.append({'paragraph': paragraph, 'text': text, 'context': current_context}) except ValueError as e: faithfulness_results.append(False) unfaithful_citations.append({'paragraph': paragraph, 'text': text, 'error': str(e)}) final_faithfulness = all(faithfulness_results) if not faithfulness_results: return False, None return final_faithfulness, unfaithful_citations def extract_cited_titles_from_paragraph(paragraph, context): cited_indices = [int(m.group(1)) for m in re.finditer(r'\[(\d+)\]\.', paragraph)] cited_indices = [index - 1 for index in cited_indices if index <= len(context)] cited_titles = [context[index].split(' | ')[0] for index in cited_indices] return cited_titles def calculate_recall(example, pred, trace=None): gold_titles = set(example['gold_titles']) found_cited_titles = set(extract_cited_titles_from_paragraph(pred.paragraph, pred.context)) intersection = gold_titles.intersection(found_cited_titles) recall = len(intersection) / len(gold_titles) if gold_titles else 0 return recall def calculate_precision(example, pred, trace=None): gold_titles = set(example['gold_titles']) found_cited_titles = set(extract_cited_titles_from_paragraph(pred.paragraph, pred.context)) intersection = gold_titles.intersection(found_cited_titles) precision = len(intersection) / len(found_cited_titles) if found_cited_titles else 0 return precision def answer_correctness(example, pred, trace=None): assert hasattr(example, 'answer'), "Example does not have 'answer'." normalized_context = normalize_text(pred.paragraph) if isinstance(example.answer, str): gold_answers = [example.answer] elif isinstance(example.answer, list): gold_answers = example.answer else: raise ValueError("'example.answer' is not string or list.") return 1 if any(normalize_text(answer) in normalized_context for answer in gold_answers) else 0 def evaluate(module): correctness_values = [] recall_values = [] precision_values = [] citation_faithfulness_values = [] for i in range(len(devset)): example = devset[i] try: pred = module(question=example.question) correctness_values.append(answer_correctness(example, pred)) citation_faithfulness_score, _ = citation_faithfulness(None, pred, None) citation_faithfulness_values.append(citation_faithfulness_score) recall = calculate_recall(example, pred) precision = calculate_precision(example, pred) recall_values.append(recall) precision_values.append(precision) except Exception as e: print(f"Failed generation with error: {e}") average_correctness = sum(correctness_values) / len(devset) if correctness_values else 0 average_recall = sum(recall_values) / len(devset) if recall_values else 0 average_precision = sum(precision_values) / len(devset) if precision_values else 0 average_citation_faithfulness = sum(citation_faithfulness_values) / len(devset) if citation_faithfulness_values else 0 print(f"Average Correctness: {average_correctness}") print(f"Average Recall: {average_recall}") print(f"Average Precision: {average_precision}") print(f"Average Citation Faithfulness: {average_citation_faithfulness}") longformqa = LongFormQA() evaluate(longformqa) question = devset[6].question pred = longformqa(question) citation_faithfulness_score, _ = citation_faithfulness(None, pred, None) print(f"Question: {question}") print(f"Predicted Paragraph: {pred.paragraph}") print(f"Citation Faithfulness: {citation_faithfulness_score}") class LongFormQAWithAssertions(dspy.Module): def __init__(self, passages_per_hop=3, max_hops=2): super().__init__() self.generate_query = [
dspy.ChainOfThought(GenerateSearchQuery)
dspy.ChainOfThought
get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache') import pkg_resources # Install the package if it's not installed if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install dspy-ai') get_ipython().system('pip install openai~=0.28.1') import dspy turbo = dspy.OpenAI(model='gpt-3.5-turbo') colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(lm=turbo, rm=colbertv2_wiki17_abstracts) from dspy.datasets import HotPotQA dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0) trainset = [x.with_inputs('question') for x in dataset.train] devset = [x.with_inputs('question') for x in dataset.dev] len(trainset), len(devset) train_example = trainset[0] print(f"Question: {train_example.question}") print(f"Answer: {train_example.answer}") dev_example = devset[18] print(f"Question: {dev_example.question}") print(f"Answer: {dev_example.answer}") print(f"Relevant Wikipedia Titles: {dev_example.gold_titles}") print(f"For this dataset, training examples have input keys {train_example.inputs().keys()} and label keys {train_example.labels().keys()}") print(f"For this dataset, dev examples have input keys {dev_example.inputs().keys()} and label keys {dev_example.labels().keys()}") class BasicQA(dspy.Signature): """Answer questions with short factoid answers.""" question = dspy.InputField() answer = dspy.OutputField(desc="often between 1 and 5 words") generate_answer = dspy.Predict(BasicQA) pred = generate_answer(question=dev_example.question) print(f"Question: {dev_example.question}") print(f"Predicted Answer: {pred.answer}") turbo.inspect_history(n=1) generate_answer_with_chain_of_thought = dspy.ChainOfThought(BasicQA) pred = generate_answer_with_chain_of_thought(question=dev_example.question) print(f"Question: {dev_example.question}") print(f"Thought: {pred.rationale.split('.', 1)[1].strip()}") print(f"Predicted Answer: {pred.answer}") retrieve = dspy.Retrieve(k=3) topK_passages = retrieve(dev_example.question).passages print(f"Top {retrieve.k} passages for question: {dev_example.question} \n", '-' * 30, '\n') for idx, passage in enumerate(topK_passages): print(f'{idx+1}]', passage, '\n') retrieve("When was the first FIFA World Cup held?").passages[0] class GenerateAnswer(dspy.Signature): """Answer questions with short factoid answers.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() answer = dspy.OutputField(desc="often between 1 and 5 words") class RAG(dspy.Module): def __init__(self, num_passages=3): super().__init__() self.retrieve = dspy.Retrieve(k=num_passages) self.generate_answer = dspy.ChainOfThought(GenerateAnswer) def forward(self, question): context = self.retrieve(question).passages prediction = self.generate_answer(context=context, question=question) return dspy.Prediction(context=context, answer=prediction.answer) from dspy.teleprompt import BootstrapFewShot def validate_context_and_answer(example, pred, trace=None): answer_EM = dspy.evaluate.answer_exact_match(example, pred) answer_PM = dspy.evaluate.answer_passage_match(example, pred) return answer_EM and answer_PM teleprompter = BootstrapFewShot(metric=validate_context_and_answer) compiled_rag = teleprompter.compile(RAG(), trainset=trainset) my_question = "What castle did David Gregory inherit?" pred = compiled_rag(my_question) print(f"Question: {my_question}") print(f"Predicted Answer: {pred.answer}") print(f"Retrieved Contexts (truncated): {[c[:200] + '...' for c in pred.context]}") turbo.inspect_history(n=1) for name, parameter in compiled_rag.named_predictors(): print(name) print(parameter.demos[0]) print() from dspy.evaluate.evaluate import Evaluate evaluate_on_hotpotqa = Evaluate(devset=devset, num_threads=1, display_progress=True, display_table=5) metric = dspy.evaluate.answer_exact_match evaluate_on_hotpotqa(compiled_rag, metric=metric) def gold_passages_retrieved(example, pred, trace=None): gold_titles = set(map(dspy.evaluate.normalize_text, example['gold_titles'])) found_titles = set(map(dspy.evaluate.normalize_text, [c.split(' | ')[0] for c in pred.context])) return gold_titles.issubset(found_titles) compiled_rag_retrieval_score = evaluate_on_hotpotqa(compiled_rag, metric=gold_passages_retrieved) class GenerateSearchQuery(dspy.Signature): """Write a simple search query that will help answer a complex question.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() query = dspy.OutputField() from dsp.utils import deduplicate class SimplifiedBaleen(dspy.Module): def __init__(self, passages_per_hop=3, max_hops=2): super().__init__() self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)] self.retrieve = dspy.Retrieve(k=passages_per_hop) self.generate_answer = dspy.ChainOfThought(GenerateAnswer) self.max_hops = max_hops def forward(self, question): context = [] for hop in range(self.max_hops): query = self.generate_query[hop](context=context, question=question).query passages = self.retrieve(query).passages context = deduplicate(context + passages) pred = self.generate_answer(context=context, question=question) return
dspy.Prediction(context=context, answer=pred.answer)
dspy.Prediction
get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_TweetGen_Cache') get_ipython().run_line_magic('cd', 'DSPy_TweetGen_Cache/') get_ipython().system('git checkout master') get_ipython().run_line_magic('cd', '..') import os repo_clone_path = '/content/DSPy_TweetGen_Cache' if not os.access('/content', os.W_OK): repo_clone_path = os.path.join(os.getcwd(), 'DSPy_TweetGen_Cache') os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os import regex as re import json try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) import pkg_resources # Install the package if it's not installed if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install dspy-ai') get_ipython().system('pip install openai~=0.28.1') get_ipython().system('pip install -e $repo_path') import dspy from dspy.predict import Retry from dspy.datasets import HotPotQA from dspy.teleprompt import BootstrapFewShotWithRandomSearch from dsp.utils import deduplicate from dspy.evaluate.evaluate import Evaluate from dspy.primitives.assertions import assert_transform_module, backtrack_handler colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2_wiki17_abstracts) turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500) dspy.settings.configure(lm=turbo, trace=[], temperature=0.7) dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True) trainset = [x.with_inputs('question', 'answer') for x in dataset.train] devset = [x.with_inputs('question', 'answer') for x in dataset.dev] class GenerateSearchQuery(dspy.Signature): """Write a simple search query that will help answer a complex question.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() query = dspy.OutputField() class GenerateTweet(dspy.Signature): """Generate an engaging tweet that effectively answers a question staying faithful to the context, is less than 280 characters, and has no hashtags.""" question = dspy.InputField() context = dspy.InputField(desc="may contain relevant facts") tweet = dspy.OutputField() class Tweeter(dspy.Module): def __init__(self): super().__init__() self.generate_tweet = dspy.ChainOfThought(GenerateTweet) def forward(self, question, answer): context = [] max_hops=2 passages_per_hop=3 generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)] retrieve = dspy.Retrieve(k=passages_per_hop) for hop in range(max_hops): query = generate_query[hop](context=context, question=question).query passages = retrieve(query).passages context = deduplicate(context + passages) generated_tweet = self.generate_tweet(question=question, context=context).tweet return dspy.Prediction(generated_tweet=generated_tweet, context=context) tweeter = Tweeter() def has_no_hashtags(text): return len(re.findall(r"#\w+", text)) == 0 def is_within_length_limit(text, length_limit=280): return len(text) <= length_limit def is_assessment_yes(assessment_answer): """Check if the first word of the assessment answer is 'yes'.""" return assessment_answer.split()[0].lower() == 'yes' def has_correct_answer(text, answer): return answer in text class AssessTweet(dspy.Signature): """Assess the quality of a tweet along the specified dimension.""" context = dspy.InputField(desc='ignore if N/A') assessed_text = dspy.InputField() assessment_question = dspy.InputField() assessment_answer = dspy.OutputField(desc="Yes or No") def no_hashtags_metric(gold, pred, trace=None): tweet = pred.generated_tweet no_hashtags = has_no_hashtags(tweet) score = no_hashtags return score def is_correct_metric(gold, pred, trace=None): answer, tweet = gold.answer, pred.generated_tweet correct = has_correct_answer(tweet, answer) score = correct return score def within_length_metric(gold, pred, trace=None): tweet = pred.generated_tweet within_length_limit = is_within_length_limit(tweet, 280) score = within_length_limit return score def engaging_metric(gold, pred, trace=None): tweet = pred.generated_tweet engaging = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging." engaging = dspy.Predict(AssessTweet)(context='N/A', assessed_text=tweet, assessment_question=engaging) engaging = engaging.assessment_answer.split()[0].lower() == 'yes' score = engaging return score def faithful_metric(gold, pred, trace=None): context, tweet = pred.context, pred.generated_tweet faithful = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context." faithful = dspy.Predict(AssessTweet)(context=context, assessed_text=tweet, assessment_question=faithful) faithful = faithful.assessment_answer.split()[0].lower() == 'yes' score = faithful return score def overall_metric(gold, pred, trace=None): answer, context, tweet = gold.answer, pred.context, pred.generated_tweet no_hashtags = has_no_hashtags(tweet) within_length_limit = is_within_length_limit(tweet, 280) correct = has_correct_answer(tweet, answer) engaging = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging." faithful = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context." faithful = dspy.Predict(AssessTweet)(context=context, assessed_text=tweet, assessment_question=faithful) engaging = dspy.Predict(AssessTweet)(context='N/A', assessed_text=tweet, assessment_question=engaging) engaging, faithful = [m.assessment_answer.split()[0].lower() == 'yes' for m in [engaging, faithful]] score = (correct + engaging + faithful + no_hashtags + within_length_limit) if correct and within_length_limit else 0 return score / 5.0 metrics = [no_hashtags_metric, is_correct_metric, within_length_metric, engaging_metric, faithful_metric, overall_metric] for metric in metrics: evaluate =
Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
dspy.evaluate.evaluate.Evaluate
get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_QuizGen_Cache') get_ipython().run_line_magic('cd', 'DSPy_QuizGen_Cache/') get_ipython().system('git checkout master') get_ipython().run_line_magic('cd', '..') import os repo_clone_path = '/content/DSPy_QuizGen_Cache' if not os.access('/content', os.W_OK): repo_clone_path = os.path.join(os.getcwd(), 'DSPy_QuizGen_Cache') os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os import regex as re import json try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) import pkg_resources # Install the package if it's not installed if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install dspy-ai') get_ipython().system('pip install openai~=0.28.1') get_ipython().system('pip install -e $repo_path') import dspy from dspy.predict import Retry from dspy.datasets import HotPotQA from dspy.teleprompt import BootstrapFewShotWithRandomSearch from dspy.evaluate.evaluate import Evaluate from dspy.primitives.assertions import assert_transform_module, backtrack_handler colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2_wiki17_abstracts) turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500) dspy.settings.configure(lm=turbo, trace=[], temperature=0.7) dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True) trainset = [x.with_inputs('question', 'answer') for x in dataset.train] devset = [x.with_inputs('question', 'answer') for x in dataset.dev] class GenerateAnswerChoices(dspy.Signature): """Generate answer choices in JSON format that include the correct answer and plausible distractors for the specified question.""" question = dspy.InputField() correct_answer = dspy.InputField() number_of_choices = dspy.InputField() answer_choices = dspy.OutputField(desc='JSON key-value pairs') class QuizAnswerGenerator(dspy.Module): def __init__(self): super().__init__() self.generate_choices = dspy.ChainOfThought(GenerateAnswerChoices) def forward(self, question, answer): choices = self.generate_choices(question=question, correct_answer=answer, number_of_choices=number_of_choices).answer_choices return dspy.Prediction(choices = choices) number_of_choices = '4' quiz_generator = QuizAnswerGenerator() def format_checker(choice_string): try: choices = json.loads(choice_string) if isinstance(choices, dict) and all(isinstance(key, str) and isinstance(value, str) for key, value in choices.items()): return True except json.JSONDecodeError: return False return False def is_correct_answer_included(correct_answer, generated_choices): try: choices_dict = json.loads(generated_choices) return correct_answer in choices_dict.values() except json.JSONDecodeError: return False def is_plausibility_yes(assessment_answer): """Check if the first word of the assessment answer is 'yes'.""" return assessment_answer.split()[0].lower() == 'yes' class AssessQuizChoices(dspy.Signature): """Assess the quality of quiz answer choices along specified dimensions.""" question = dspy.InputField() answer_choices = dspy.InputField() assessment_question = dspy.InputField() assessment_answer = dspy.OutputField(desc="Yes or No") def format_valid_metric(gold, pred, trace=None): generated_choices = pred.choices format_valid = format_checker(generated_choices) score = format_valid return score def is_correct_metric(gold, pred, trace=None): correct_answer, generated_choices = gold.answer, pred.choices correct_included = is_correct_answer_included(correct_answer, generated_choices) score = correct_included return score def plausibility_metric(gold, pred, trace=None): question, generated_choices = gold.question, pred.choices plausibility_question = "Are the distractors in the answer choices plausible and not easily identifiable as incorrect?" plausibility_assessment = dspy.Predict(AssessQuizChoices)(question=question, answer_choices=generated_choices, assessment_question=plausibility_question) plausibility_result = plausibility_assessment.assessment_answer.split()[0].lower() == 'yes' score = plausibility_result return score def overall_metric(gold, pred, trace=None): question, correct_answer, generated_choices = gold.question, gold.answer, pred.choices format_valid = format_checker(generated_choices) correct_included = is_correct_answer_included(correct_answer, generated_choices) plausibility_question = "Are the distractors in the answer choices plausible and not easily identifiable as incorrect?" plausibility_assessment = dspy.Predict(AssessQuizChoices)(question=question, answer_choices=generated_choices, assessment_question=plausibility_question) plausibility_result = plausibility_assessment.assessment_answer.split()[0].lower() == 'yes' score = (format_valid + correct_included + plausibility_result) / 3.0 if correct_included and format_valid else 0 return score metrics = [format_valid_metric, is_correct_metric, plausibility_metric, overall_metric] for metric in metrics: evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5) evaluate(quiz_generator) example = devset[38] quiz_choices = quiz_generator(question=example.question, answer = example.answer) print(f'Generated Quiz Choices: ', quiz_choices.choices) for metric in metrics: evaluate = Evaluate(metric=metric, devset=devset[38:39], num_threads=1, display_progress=True, display_table=5) evaluate(quiz_generator) class QuizAnswerGeneratorWithAssertions(dspy.Module): def __init__(self): super().__init__() self.generate_choices = dspy.ChainOfThought(GenerateAnswerChoices) def forward(self, question, answer): choice_string = self.generate_choices(question=question, correct_answer=answer, number_of_choices=number_of_choices).answer_choices dspy.Suggest(format_checker(choice_string), "The format of the answer choices should be in JSON format. Please revise accordingly.", target_module=GenerateAnswerChoices) dspy.Suggest(is_correct_answer_included(answer, choice_string), "The answer choices do not include the correct answer to the question. Please revise accordingly.", target_module=GenerateAnswerChoices) plausibility_question = "Are the distractors in the answer choices plausible and not easily identifiable as incorrect?" plausibility_assessment = dspy.Predict(AssessQuizChoices)(question=question, answer_choices=choice_string, assessment_question=plausibility_question) dspy.Suggest(is_plausibility_yes(plausibility_assessment.assessment_answer), "The answer choices are not plausible distractors or are too easily identifiable as incorrect. Please revise to provide more challenging and plausible distractors.", target_module=GenerateAnswerChoices) return dspy.Prediction(choices = choice_string) number_of_choices = '4' quiz_generator_with_assertions = assert_transform_module(QuizAnswerGeneratorWithAssertions().map_named_predictors(Retry), backtrack_handler) metrics = [format_valid_metric, is_correct_metric, plausibility_metric, overall_metric] for metric in metrics: evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5) evaluate(quiz_generator_with_assertions) example = devset[38] quiz_choices = quiz_generator_with_assertions(question=example.question, answer = example.answer) print(f'Generated Quiz Choices: ', quiz_choices.choices) for metric in metrics: evaluate = Evaluate(metric=metric, devset=devset[38:39], num_threads=1, display_progress=True, display_table=30) evaluate(quiz_generator_with_assertions) teleprompter = BootstrapFewShotWithRandomSearch(metric = overall_metric, max_bootstrapped_demos=2, num_candidate_programs=6) compiled_quiz_generator = teleprompter.compile(student = quiz_generator, teacher = quiz_generator, trainset=trainset, valset=devset[:100]) for metric in metrics: evaluate =
Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
dspy.evaluate.evaluate.Evaluate
get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache') import pkg_resources # Install the package if it's not installed if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install dspy-ai') get_ipython().system('pip install openai~=0.28.1') import dspy turbo = dspy.OpenAI(model='gpt-3.5-turbo') colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(lm=turbo, rm=colbertv2_wiki17_abstracts) from dspy.datasets import HotPotQA dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0) trainset = [x.with_inputs('question') for x in dataset.train] devset = [x.with_inputs('question') for x in dataset.dev] len(trainset), len(devset) train_example = trainset[0] print(f"Question: {train_example.question}") print(f"Answer: {train_example.answer}") dev_example = devset[18] print(f"Question: {dev_example.question}") print(f"Answer: {dev_example.answer}") print(f"Relevant Wikipedia Titles: {dev_example.gold_titles}") print(f"For this dataset, training examples have input keys {train_example.inputs().keys()} and label keys {train_example.labels().keys()}") print(f"For this dataset, dev examples have input keys {dev_example.inputs().keys()} and label keys {dev_example.labels().keys()}") class BasicQA(dspy.Signature): """Answer questions with short factoid answers.""" question = dspy.InputField() answer = dspy.OutputField(desc="often between 1 and 5 words") generate_answer = dspy.Predict(BasicQA) pred = generate_answer(question=dev_example.question) print(f"Question: {dev_example.question}") print(f"Predicted Answer: {pred.answer}") turbo.inspect_history(n=1) generate_answer_with_chain_of_thought = dspy.ChainOfThought(BasicQA) pred = generate_answer_with_chain_of_thought(question=dev_example.question) print(f"Question: {dev_example.question}") print(f"Thought: {pred.rationale.split('.', 1)[1].strip()}") print(f"Predicted Answer: {pred.answer}") retrieve = dspy.Retrieve(k=3) topK_passages = retrieve(dev_example.question).passages print(f"Top {retrieve.k} passages for question: {dev_example.question} \n", '-' * 30, '\n') for idx, passage in enumerate(topK_passages): print(f'{idx+1}]', passage, '\n') retrieve("When was the first FIFA World Cup held?").passages[0] class GenerateAnswer(dspy.Signature): """Answer questions with short factoid answers.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() answer = dspy.OutputField(desc="often between 1 and 5 words") class RAG(dspy.Module): def __init__(self, num_passages=3): super().__init__() self.retrieve = dspy.Retrieve(k=num_passages) self.generate_answer = dspy.ChainOfThought(GenerateAnswer) def forward(self, question): context = self.retrieve(question).passages prediction = self.generate_answer(context=context, question=question) return dspy.Prediction(context=context, answer=prediction.answer) from dspy.teleprompt import BootstrapFewShot def validate_context_and_answer(example, pred, trace=None): answer_EM = dspy.evaluate.answer_exact_match(example, pred) answer_PM = dspy.evaluate.answer_passage_match(example, pred) return answer_EM and answer_PM teleprompter = BootstrapFewShot(metric=validate_context_and_answer) compiled_rag = teleprompter.compile(RAG(), trainset=trainset) my_question = "What castle did David Gregory inherit?" pred = compiled_rag(my_question) print(f"Question: {my_question}") print(f"Predicted Answer: {pred.answer}") print(f"Retrieved Contexts (truncated): {[c[:200] + '...' for c in pred.context]}") turbo.inspect_history(n=1) for name, parameter in compiled_rag.named_predictors(): print(name) print(parameter.demos[0]) print() from dspy.evaluate.evaluate import Evaluate evaluate_on_hotpotqa = Evaluate(devset=devset, num_threads=1, display_progress=True, display_table=5) metric = dspy.evaluate.answer_exact_match evaluate_on_hotpotqa(compiled_rag, metric=metric) def gold_passages_retrieved(example, pred, trace=None): gold_titles = set(map(dspy.evaluate.normalize_text, example['gold_titles'])) found_titles = set(map(dspy.evaluate.normalize_text, [c.split(' | ')[0] for c in pred.context])) return gold_titles.issubset(found_titles) compiled_rag_retrieval_score = evaluate_on_hotpotqa(compiled_rag, metric=gold_passages_retrieved) class GenerateSearchQuery(dspy.Signature): """Write a simple search query that will help answer a complex question.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() query = dspy.OutputField() from dsp.utils import deduplicate class SimplifiedBaleen(dspy.Module): def __init__(self, passages_per_hop=3, max_hops=2): super().__init__() self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)] self.retrieve =
dspy.Retrieve(k=passages_per_hop)
dspy.Retrieve
import dspy from dsp.utils import deduplicate from dspy.datasets import HotPotQA from dspy.predict.retry import Retry from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch from dspy.evaluate.evaluate import Evaluate from dspy.primitives.assertions import assert_transform_module, backtrack_handler import os import openai openai.api_key = os.getenv('OPENAI_API_KEY') colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2_wiki17_abstracts) turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500) dspy.settings.configure(lm=turbo, trace=[], temperature=0.7) dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0) trainset = [x.with_inputs('question') for x in dataset.train] devset = [x.with_inputs('question') for x in dataset.dev] def validate_query_distinction_local(previous_queries, query): """check if query is distinct from previous queries""" if previous_queries == []: return True if dspy.evaluate.answer_exact_match_str(query, previous_queries, frac=0.8): return False return True def validate_context_and_answer_and_hops(example, pred, trace=None): if not dspy.evaluate.answer_exact_match(example, pred): return False if not
dspy.evaluate.answer_passage_match(example, pred)
dspy.evaluate.answer_passage_match
get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_TweetGen_Cache') get_ipython().run_line_magic('cd', 'DSPy_TweetGen_Cache/') get_ipython().system('git checkout master') get_ipython().run_line_magic('cd', '..') import os repo_clone_path = '/content/DSPy_TweetGen_Cache' if not os.access('/content', os.W_OK): repo_clone_path = os.path.join(os.getcwd(), 'DSPy_TweetGen_Cache') os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os import regex as re import json try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) import pkg_resources # Install the package if it's not installed if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install dspy-ai') get_ipython().system('pip install openai~=0.28.1') get_ipython().system('pip install -e $repo_path') import dspy from dspy.predict import Retry from dspy.datasets import HotPotQA from dspy.teleprompt import BootstrapFewShotWithRandomSearch from dsp.utils import deduplicate from dspy.evaluate.evaluate import Evaluate from dspy.primitives.assertions import assert_transform_module, backtrack_handler colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2_wiki17_abstracts) turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500) dspy.settings.configure(lm=turbo, trace=[], temperature=0.7) dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True) trainset = [x.with_inputs('question', 'answer') for x in dataset.train] devset = [x.with_inputs('question', 'answer') for x in dataset.dev] class GenerateSearchQuery(dspy.Signature): """Write a simple search query that will help answer a complex question.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() query = dspy.OutputField() class GenerateTweet(dspy.Signature): """Generate an engaging tweet that effectively answers a question staying faithful to the context, is less than 280 characters, and has no hashtags.""" question = dspy.InputField() context = dspy.InputField(desc="may contain relevant facts") tweet = dspy.OutputField() class Tweeter(dspy.Module): def __init__(self): super().__init__() self.generate_tweet = dspy.ChainOfThought(GenerateTweet) def forward(self, question, answer): context = [] max_hops=2 passages_per_hop=3 generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)] retrieve = dspy.Retrieve(k=passages_per_hop) for hop in range(max_hops): query = generate_query[hop](context=context, question=question).query passages = retrieve(query).passages context = deduplicate(context + passages) generated_tweet = self.generate_tweet(question=question, context=context).tweet return dspy.Prediction(generated_tweet=generated_tweet, context=context) tweeter = Tweeter() def has_no_hashtags(text): return len(re.findall(r"#\w+", text)) == 0 def is_within_length_limit(text, length_limit=280): return len(text) <= length_limit def is_assessment_yes(assessment_answer): """Check if the first word of the assessment answer is 'yes'.""" return assessment_answer.split()[0].lower() == 'yes' def has_correct_answer(text, answer): return answer in text class AssessTweet(dspy.Signature): """Assess the quality of a tweet along the specified dimension.""" context = dspy.InputField(desc='ignore if N/A') assessed_text = dspy.InputField() assessment_question = dspy.InputField() assessment_answer = dspy.OutputField(desc="Yes or No") def no_hashtags_metric(gold, pred, trace=None): tweet = pred.generated_tweet no_hashtags = has_no_hashtags(tweet) score = no_hashtags return score def is_correct_metric(gold, pred, trace=None): answer, tweet = gold.answer, pred.generated_tweet correct = has_correct_answer(tweet, answer) score = correct return score def within_length_metric(gold, pred, trace=None): tweet = pred.generated_tweet within_length_limit = is_within_length_limit(tweet, 280) score = within_length_limit return score def engaging_metric(gold, pred, trace=None): tweet = pred.generated_tweet engaging = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging." engaging = dspy.Predict(AssessTweet)(context='N/A', assessed_text=tweet, assessment_question=engaging) engaging = engaging.assessment_answer.split()[0].lower() == 'yes' score = engaging return score def faithful_metric(gold, pred, trace=None): context, tweet = pred.context, pred.generated_tweet faithful = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context." faithful = dspy.Predict(AssessTweet)(context=context, assessed_text=tweet, assessment_question=faithful) faithful = faithful.assessment_answer.split()[0].lower() == 'yes' score = faithful return score def overall_metric(gold, pred, trace=None): answer, context, tweet = gold.answer, pred.context, pred.generated_tweet no_hashtags = has_no_hashtags(tweet) within_length_limit = is_within_length_limit(tweet, 280) correct = has_correct_answer(tweet, answer) engaging = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging." faithful = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context." faithful = dspy.Predict(AssessTweet)(context=context, assessed_text=tweet, assessment_question=faithful) engaging = dspy.Predict(AssessTweet)(context='N/A', assessed_text=tweet, assessment_question=engaging) engaging, faithful = [m.assessment_answer.split()[0].lower() == 'yes' for m in [engaging, faithful]] score = (correct + engaging + faithful + no_hashtags + within_length_limit) if correct and within_length_limit else 0 return score / 5.0 metrics = [no_hashtags_metric, is_correct_metric, within_length_metric, engaging_metric, faithful_metric, overall_metric] for metric in metrics: evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5) evaluate(tweeter) example = devset[10] tweet = tweeter(question=example.question, answer = example.answer) print(f'Generated Tweet: ', tweet.generated_tweet) tweet.context for metric in metrics: evaluate =
Evaluate(metric=metric, devset=devset[10:11], num_threads=1, display_progress=True, display_table=5)
dspy.evaluate.evaluate.Evaluate
get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import pkg_resources try: # When on Colab, let's install pyserini, Pytorch, and Faiss import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') get_ipython().run_line_magic('cd', '$repo_path') get_ipython().system('pip install -e .') if not "pyserini" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install pyserini') if not "torch" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install torch') if not "faiss-cpu" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install faiss-cpu') except: repo_path = '.' if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install dspy-ai') if repo_path not in sys.path: sys.path.append(repo_path) import dspy pys_ret_prebuilt = dspy.Pyserini(index='beir-v1.0.0-nfcorpus.contriever-msmarco', query_encoder='facebook/contriever-msmarco', id_field='_id', text_fields=['title', 'text']) dspy.settings.configure(rm=pys_ret_prebuilt) example_question = "How Curry Can Kill Cancer Cells" retrieve = dspy.Retrieve(k=3) topK_passages = retrieve(example_question).passages print(f"Top {retrieve.k} passages for question: {example_question} \n", '-' * 30, '\n') for idx, passage in enumerate(topK_passages): print(f'{idx+1}]', passage, '\n') get_ipython().system('wget https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/nfcorpus.zip -P collections') get_ipython().system('unzip collections/nfcorpus.zip -d collections') get_ipython().system('python -m pyserini.encode input --corpus collections/nfcorpus/corpus.jsonl --fields title text output --embeddings indexes/faiss.nfcorpus.contriever-msmarco --to-faiss encoder --encoder facebook/contriever-msmarco --device cuda:0 --pooling mean --fields title text') from datasets import load_dataset dataset = load_dataset(path='json', data_files='collections/nfcorpus/corpus.jsonl', split='train') pys_ret_local = dspy.Pyserini(index='indexes/faiss.nfcorpus.contriever-msmarco', query_encoder='facebook/contriever-msmarco', dataset=dataset, id_field='_id', text_fields=['title', 'text']) dspy.settings.configure(rm=pys_ret_local) dev_example = "How Curry Can Kill Cancer Cells" retrieve =
dspy.Retrieve(k=3)
dspy.Retrieve
get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') get_ipython().run_line_magic('pip', 'install datasets') import datasets ds = datasets.load_dataset("openai_humaneval") ds['test'][0] import dspy, dotenv, os dotenv.load_dotenv(os.path.expanduser("~/.env")) # load OpenAI API key from .env file lm =
dspy.OpenAI(model="gpt-3.5-turbo", max_tokens=4000)
dspy.OpenAI
get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_LongFormQA_Cache') get_ipython().run_line_magic('cd', 'DSPy_LongFormQA_Cache/') get_ipython().system('git checkout master') get_ipython().run_line_magic('cd', '..') import os repo_clone_path = '/content/DSPy_LongFormQA_Cache' if not os.access('/content', os.W_OK): repo_clone_path = os.path.join(os.getcwd(), 'DSPy_LongFormQA_Cache') os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os import regex as re try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) import pkg_resources # Install the package if it's not installed if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install dspy-ai') get_ipython().system('pip install openai~=0.28.1') get_ipython().system('pip install -e $repo_path') import dspy from dspy.predict import Retry from dspy.datasets import HotPotQA from dspy.teleprompt import BootstrapFewShotWithRandomSearch from dsp.utils import EM, normalize_text from dspy.primitives.assertions import assert_transform_module, backtrack_handler get_ipython().run_line_magic('cd', 'dspy/examples/longformqa') from utils import extract_text_by_citation, correct_citation_format, has_citations, citations_check colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2_wiki17_abstracts) turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500) dspy.settings.configure(lm=turbo, trace=[], temperature=0.7) dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True) trainset = [x.with_inputs('question') for x in dataset.train] devset = [x.with_inputs('question') for x in dataset.dev] train_example = trainset[0] print(f"Question: {train_example.question}") print(f"Answer: {train_example.answer}") print(f"Relevant Wikipedia Titles: {train_example.gold_titles}") dev_example = devset[18] print(f"Question: {dev_example.question}") print(f"Answer: {dev_example.answer}") print(f"Relevant Wikipedia Titles: {dev_example.gold_titles}") from dsp.utils import deduplicate class GenerateSearchQuery(dspy.Signature): """Write a simple search query that will help answer a complex question.""" context = dspy.InputField(desc="may contain relevant facts") question =
dspy.InputField()
dspy.InputField
get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import pkg_resources try: # When on Colab, let's install pyserini, Pytorch, and Faiss import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') get_ipython().run_line_magic('cd', '$repo_path') get_ipython().system('pip install -e .') if not "pyserini" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install pyserini') if not "torch" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install torch') if not "faiss-cpu" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install faiss-cpu') except: repo_path = '.' if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install dspy-ai') if repo_path not in sys.path: sys.path.append(repo_path) import dspy pys_ret_prebuilt = dspy.Pyserini(index='beir-v1.0.0-nfcorpus.contriever-msmarco', query_encoder='facebook/contriever-msmarco', id_field='_id', text_fields=['title', 'text']) dspy.settings.configure(rm=pys_ret_prebuilt) example_question = "How Curry Can Kill Cancer Cells" retrieve = dspy.Retrieve(k=3) topK_passages = retrieve(example_question).passages print(f"Top {retrieve.k} passages for question: {example_question} \n", '-' * 30, '\n') for idx, passage in enumerate(topK_passages): print(f'{idx+1}]', passage, '\n') get_ipython().system('wget https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/nfcorpus.zip -P collections') get_ipython().system('unzip collections/nfcorpus.zip -d collections') get_ipython().system('python -m pyserini.encode input --corpus collections/nfcorpus/corpus.jsonl --fields title text output --embeddings indexes/faiss.nfcorpus.contriever-msmarco --to-faiss encoder --encoder facebook/contriever-msmarco --device cuda:0 --pooling mean --fields title text') from datasets import load_dataset dataset = load_dataset(path='json', data_files='collections/nfcorpus/corpus.jsonl', split='train') pys_ret_local = dspy.Pyserini(index='indexes/faiss.nfcorpus.contriever-msmarco', query_encoder='facebook/contriever-msmarco', dataset=dataset, id_field='_id', text_fields=['title', 'text'])
dspy.settings.configure(rm=pys_ret_local)
dspy.settings.configure
get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache') import dspy get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys; sys.path.append('/future/u/okhattab/repos/public/stanfordnlp/dspy') import dspy from dspy.evaluate import Evaluate from dspy.datasets.hotpotqa import HotPotQA from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch, BootstrapFinetune llama = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=[7140, 7141, 7142, 7143], max_tokens=150) colbertv2 = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2, lm=llama) train = [('Who was the director of the 2009 movie featuring Peter Outerbridge as William Easton?', 'Kevin Greutert'), ('The heir to the Du Pont family fortune sponsored what wrestling team?', 'Foxcatcher'), ('In what year was the star of To Hell and Back born?', '1925'), ('Which award did the first book of Gary Zukav receive?', 'U.S. National Book Award'), ('What documentary about the Gilgo Beach Killer debuted on A&E?', 'The Killing Season'), ('Which author is English: John Braine or Studs Terkel?', 'John Braine'), ('Who produced the album that included a re-recording of "Lithium"?', 'Butch Vig')] train = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in train] dev = [('Who has a broader scope of profession: E. L. Doctorow or Julia Peterkin?', 'E. L. Doctorow'), ('Right Back At It Again contains lyrics co-written by the singer born in what city?', 'Gainesville, Florida'), ('What year was the party of the winner of the 1971 San Francisco mayoral election founded?', '1828'), ('Anthony Dirrell is the brother of which super middleweight title holder?', 'Andre Dirrell'), ('The sports nutrition business established by Oliver Cookson is based in which county in the UK?', 'Cheshire'), ('Find the birth date of the actor who played roles in First Wives Club and Searching for the Elephant.', 'February 13, 1980'), ('Kyle Moran was born in the town on what river?', 'Castletown River'), ("The actress who played the niece in the Priest film was born in what city, country?", 'Surrey, England'), ('Name the movie in which the daughter of Noel Harrison plays Violet Trefusis.', 'Portrait of a Marriage'), ('What year was the father of the Princes in the Tower born?', '1442'), ('What river is near the Crichton Collegiate Church?', 'the River Tyne'), ('Who purchased the team Michael Schumacher raced for in the 1995 Monaco Grand Prix in 2000?', 'Renault'), ('André Zucca was a French photographer who worked with a German propaganda magazine published by what Nazi organization?', 'the Wehrmacht')] dev = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in dev] predict = dspy.Predict('question -> answer') predict(question="What is the capital of Germany?") class CoT(dspy.Module): # let's define a new module def __init__(self): super().__init__() self.generate_answer = dspy.ChainOfThought('question -> answer') def forward(self, question): return self.generate_answer(question=question) # here we use the module metric_EM = dspy.evaluate.answer_exact_match teleprompter = BootstrapFewShot(metric=metric_EM, max_bootstrapped_demos=2) cot_compiled = teleprompter.compile(CoT(), trainset=train) cot_compiled("What is the capital of Germany?") llama.inspect_history(n=1) NUM_THREADS = 32 evaluate_hotpot = Evaluate(devset=dev, metric=metric_EM, num_threads=NUM_THREADS, display_progress=True, display_table=15) evaluate_hotpot(cot_compiled) class RAG(dspy.Module): def __init__(self, num_passages=3): super().__init__() self.retrieve = dspy.Retrieve(k=num_passages) self.generate_query = dspy.ChainOfThought("question -> search_query") self.generate_answer =
dspy.ChainOfThought("context, question -> answer")
dspy.ChainOfThought
get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_TweetGen_Cache') get_ipython().run_line_magic('cd', 'DSPy_TweetGen_Cache/') get_ipython().system('git checkout master') get_ipython().run_line_magic('cd', '..') import os repo_clone_path = '/content/DSPy_TweetGen_Cache' if not os.access('/content', os.W_OK): repo_clone_path = os.path.join(os.getcwd(), 'DSPy_TweetGen_Cache') os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import sys import os import regex as re import json try: # When on google Colab, let's clone the notebook so we download the cache. import google.colab repo_path = 'dspy' get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path') except: repo_path = '.' if repo_path not in sys.path: sys.path.append(repo_path) import pkg_resources # Install the package if it's not installed if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}: get_ipython().system('pip install -U pip') get_ipython().system('pip install dspy-ai') get_ipython().system('pip install openai~=0.28.1') get_ipython().system('pip install -e $repo_path') import dspy from dspy.predict import Retry from dspy.datasets import HotPotQA from dspy.teleprompt import BootstrapFewShotWithRandomSearch from dsp.utils import deduplicate from dspy.evaluate.evaluate import Evaluate from dspy.primitives.assertions import assert_transform_module, backtrack_handler colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(rm=colbertv2_wiki17_abstracts) turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500) dspy.settings.configure(lm=turbo, trace=[], temperature=0.7) dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True) trainset = [x.with_inputs('question', 'answer') for x in dataset.train] devset = [x.with_inputs('question', 'answer') for x in dataset.dev] class GenerateSearchQuery(dspy.Signature): """Write a simple search query that will help answer a complex question.""" context = dspy.InputField(desc="may contain relevant facts") question = dspy.InputField() query = dspy.OutputField() class GenerateTweet(dspy.Signature): """Generate an engaging tweet that effectively answers a question staying faithful to the context, is less than 280 characters, and has no hashtags.""" question = dspy.InputField() context = dspy.InputField(desc="may contain relevant facts") tweet = dspy.OutputField() class Tweeter(dspy.Module): def __init__(self): super().__init__() self.generate_tweet = dspy.ChainOfThought(GenerateTweet) def forward(self, question, answer): context = [] max_hops=2 passages_per_hop=3 generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)] retrieve = dspy.Retrieve(k=passages_per_hop) for hop in range(max_hops): query = generate_query[hop](context=context, question=question).query passages = retrieve(query).passages context = deduplicate(context + passages) generated_tweet = self.generate_tweet(question=question, context=context).tweet return dspy.Prediction(generated_tweet=generated_tweet, context=context) tweeter = Tweeter() def has_no_hashtags(text): return len(re.findall(r"#\w+", text)) == 0 def is_within_length_limit(text, length_limit=280): return len(text) <= length_limit def is_assessment_yes(assessment_answer): """Check if the first word of the assessment answer is 'yes'.""" return assessment_answer.split()[0].lower() == 'yes' def has_correct_answer(text, answer): return answer in text class AssessTweet(dspy.Signature): """Assess the quality of a tweet along the specified dimension.""" context =
dspy.InputField(desc='ignore if N/A')
dspy.InputField