toaru-xl-model / app.py
nyanko7's picture
Update app.py
33a45f9 verified
raw
history blame contribute delete
No virus
4.16 kB
import os
import torch
import random
import tempfile
import gradio as gr
import spaces
import httpimport
with httpimport.remote_repo(os.getenv("MODULE_URL")):
import pipeline
pipe, pipe2 = pipeline.get_pipeline_initialize()
theme = gr.themes.Base(font=[gr.themes.GoogleFont('Libre Franklin'), gr.themes.GoogleFont('Public Sans'), 'system-ui', 'sans-serif'])
device="cuda"
pipe = pipe.to(device)
pipe2 = pipe2.to(device)
PRESET_Q = "year_2022, best quality, high quality, very aesthetic"
NEGATIVE_PROMPT = "lowres, worst quality, displeasing, bad anatomy, text, error, extra digit, cropped, error, fewer, extra, missing, worst quality, jpeg artifacts, censored, ai-generated worst quality displeasing, bad quality"
@spaces.GPU
def run(prompt, radio="model-v2", preset=PRESET_Q, h=1216, w=832, negative_prompt=NEGATIVE_PROMPT, guidance_scale=4.0, randomize_seed=True, seed=42, progress=gr.Progress(track_tqdm=True)):
prompt = prompt.strip() + ", " + preset.strip()
negative_prompt = negative_prompt.strip() if negative_prompt and negative_prompt.strip() else None
print(f"Initial seed for prompt `{prompt}`", seed)
if(randomize_seed):
seed = random.randint(0, 9007199254740991)
if not prompt and not negative_prompt:
guidance_scale = 0.0
generator = torch.Generator(device="cuda").manual_seed(seed)
if radio == "model-v1":
image = pipe(prompt, height=h, width=w, negative_prompt=negative_prompt, guidance_scale=guidance_scale, guidance_rescale=0.75, generator=generator, num_inference_steps=25).images[0]
else:
image = pipe2(prompt, height=h, width=w, negative_prompt=negative_prompt, guidance_scale=guidance_scale, guidance_rescale=0.75, generator=generator, num_inference_steps=25).images[0]
with tempfile.NamedTemporaryFile(suffix=".webp", delete=False) as tmpfile:
image.save(tmpfile, "webp", quality=95)
return tmpfile.name, seed
with gr.Blocks(theme=theme) as demo:
gr.Markdown('''# SDXL Experiments
Just a simple demo for some SDXL model.''')
with gr.Row():
with gr.Column():
with gr.Group():
with gr.Row():
prompt = gr.Textbox(show_label=False, scale=5, value="1girl, rurudo", placeholder="Your prompt", info="Leave blank to test unconditional generation")
button = gr.Button("Generate", min_width=120)
preset = gr.Textbox(show_label=False, scale=5, value=PRESET_Q, info="Quality presets")
radio = gr.Radio(["model-v2", "model-v1"], value="model-v2", label = "Choose the inference model")
with gr.Row():
height = gr.Slider(label="Height", value=1216, minimum=512, maximum=2560, step=64)
width = gr.Slider(label="Width", value=832, minimum=512, maximum=2560, step=64)
guidance_scale = gr.Number(label="CFG Guidance Scale", info="The guidance scale for CFG, ignored if no prompt is entered (unconditional generation)", value=4.0)
negative_prompt = gr.Textbox(label="Negative prompt", value=NEGATIVE_PROMPT, info="Is only applied for the CFG part, leave blank for unconditional generation")
seed = gr.Number(label="Seed", value=42, info="Seed for random number generator")
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Column():
output = gr.Image(type="filepath", interactive=False)
gr.Examples(fn=run, examples=["mayano_top_gun_\(umamusume\), 1girl, rurudo", "sho (sho lwlw),[[[ohisashiburi]]],fukuro daizi,tianliang duohe fangdongye,[daidai ookami],year_2023, (wariza), depth of field, official_art"], inputs=prompt, outputs=[output, seed], cache_examples="lazy")
gr.on(
triggers=[
button.click,
prompt.submit
],
fn=run,
inputs=[prompt, radio, preset, height, width, negative_prompt, guidance_scale, randomize_seed, seed],
outputs=[output, seed],
)
if __name__ == "__main__":
demo.launch(share=True)