Spaces:
Runtime error
Runtime error
import torch | |
from diffusers import StableDiffusionPipeline | |
from torch import autocast | |
import numpy as np | |
import gradio as gr | |
import openai | |
import os | |
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=os.environ.get('hugging-token')) | |
device="cpu" | |
openai.api_key = os.environ['openai-key'] | |
pipe = pipe.to(device) | |
def generateStory(theme1, theme2): | |
prompt_text = "Write the first paragraph of a story integrates the themes \"{}\" and \"{}\" in a creative way in the style of Kurt Vonnegut.\n\nFirst paragraph of story:\n\n".format(theme1,theme2) | |
response = openai.Completion.create( | |
engine="text-davinci-002", | |
prompt=prompt_text, | |
temperature=0.7, | |
max_tokens=250, | |
top_p=1, | |
frequency_penalty=0, | |
presence_penalty=0 | |
) | |
story = response["choices"][0]["text"] | |
content_to_classify = "Your content here" | |
response = openai.Completion.create( | |
model="content-filter-alpha", | |
prompt = "<|endoftext|>"+story+"\n--\nLabel:", | |
temperature=0, | |
max_tokens=1, | |
top_p=0, | |
logprobs=10 | |
) | |
output_label = response["choices"][0]["text"] | |
# This is the probability at which we evaluate that a "2" is likely real | |
# vs. should be discarded as a false positive | |
toxic_threshold = -0.355 | |
if output_label == "2": | |
story='Please generate again' | |
if story.startswith('\n\n'): | |
story = story[2:] | |
return story | |
def illustratedStory(story): | |
if story != 'Please generate again': | |
illustration_response = openai.Completion.create( | |
model="text-davinci-002", | |
prompt="Transform the following story into a caption of an accompanying illustration. Start with 'Beautiful digital illustration of':\n\nStory:\n\nI stand at the edge of the Blue Mountains and gaze out at the vastness before me. It's a beautiful day, and the sun is shining. I can see for miles and miles, and it feels like I'm standing at the edge of the world. I'm here with the person I love, and we're about to embark on a great adventure. I can't wait to explore every inch of this place with them.\n\nIllustration caption:\n\nBeautiful digital illustration of two people standing by the edge of a mountain holding hands looking out\n\nStory:\n\n{}\n\nIllustration caption:".format(story), | |
temperature=0.7, | |
max_tokens=256, | |
top_p=1, | |
frequency_penalty=0, | |
presence_penalty=0 | |
) | |
image_prompt = illustration_response["choices"][0]["text"] | |
generator = torch.Generator('cpu') | |
image = pipe(image_prompt).images[0] # image here is in [PIL format](https://pillow.readthedocs.io/en/stable/) | |
else: | |
image = np.zeros([100,100,3],dtype=np.uint8) | |
image.fill(255) # or img[:] = 255 | |
return image | |
''' | |
demo = gr.Interface( | |
fn=themes, | |
inputs=["text", "text"], | |
outputs=["text", "image"], | |
) | |
demo.launch() | |
''' | |
def continueStory(inputStory): | |
prompt_text = inputStory | |
response = openai.Completion.create( | |
engine="text-davinci-002", | |
prompt=prompt_text, | |
temperature=0.7, | |
max_tokens=250, | |
top_p=1, | |
frequency_penalty=0, | |
presence_penalty=0 | |
) | |
story = response["choices"][0]["text"] | |
content_to_classify = "Your content here" | |
response = openai.Completion.create( | |
model="content-filter-alpha", | |
prompt = "<|endoftext|>"+story+"\n--\nLabel:", | |
temperature=0, | |
max_tokens=1, | |
top_p=0, | |
logprobs=10 | |
) | |
output_label = response["choices"][0]["text"] | |
# This is the probability at which we evaluate that a "2" is likely real | |
# vs. should be discarded as a false positive | |
toxic_threshold = -0.355 | |
if output_label == "2": | |
story='Please generate again' | |
if story.startswith('\n\n'): | |
story = story[2:] | |
return inputStory + story | |
''' | |
demo = gr.Interface( | |
fn=themes, | |
inputs=["text", "text"], | |
outputs=["text", "image"], | |
) | |
demo.launch() | |
''' | |
with gr.Blocks(css=''' | |
.h1 { | |
font-family: HK Grotesk; | |
font-style: normal; | |
font-weight: bold; | |
font-size: 100px; | |
line-height: 105%; | |
margin: 0; | |
} | |
''') as demo: | |
title = gr.HTML( | |
""" | |
<div style="text-align: center; margin: 0;"> | |
<div style=" | |
display: inline-flex; | |
align-items: center; | |
gap: 0.8rem; | |
font-size: 1.75rem; | |
"> | |
<h1 style="font-weight: 900; margin-bottom: 7px;"> | |
Illustrated Narrative Device | |
</h1> | |
</div> | |
<p style="margin-bottom: 10px; font-size: 94%;"> | |
A playful AI co-writer! | |
</p> | |
<br> | |
<p style="font-size: 70%;>Generate the beginning of a story by writing two themes, then edit, add to it, extend it and illustrate it! </p> | |
</div> | |
""") | |
with gr.Row(): | |
theme1 = gr.Textbox(label='Theme 1', elem_id = 'theme') | |
theme2 = gr.Textbox(label='Theme 2', elem_id = 'theme') | |
b1 = gr.Button("Generate starting paragraph", elem_id="generate-btn") | |
story_output = gr.Textbox(label='Story (pro tip: you can edit this!)') | |
with gr.Row(): | |
b3 = gr.Button("Continue Story", elem_id="continue-btn") | |
b2 = gr.Button("Illustrate Story", elem_id="illustrated-btn") | |
gr.HTML('<p>Illustrations can take up to 10 minutes to generate. See it as an exercise in patience, amidst a sea of immediacy!</p>') | |
with gr.Row(): | |
illustration = gr.Image(label='Illustration') | |
gr.HTML('<div style="text-align: center; max-width: 650px; margin: 0 auto;"><p style="margin-bottom: 10px; font-size: 94%;">Compute credits are expensive. Please help me keep this experiment running by buying me a coffee <a href="https://www.buymeacoffee.com/jrodolfoocG"> <u><b>here</u></b> :) </a></p></div><br>') | |
gr.HTML('<div style="text-align: center; max-width: 650px; margin: 0 auto;"><p style="margin-bottom: 10px; font-size: 70%;">Built with GPT-3, Stable Diffusion, the Diffusers library and Gradio, by <a href="https://research.rodolfoocampo.com"><u><b>Rodolfo Ocampo</u></b></a></p></div>') | |
b1.click(generateStory, inputs=[theme1,theme2], outputs=[story_output]) | |
b2.click(illustratedStory, inputs=[story_output], outputs=[illustration]) | |
b3.click(continueStory, inputs=[story_output], outputs=[story_output]) | |
demo.launch(debug=True) |