Spaces:
Runtime error
Runtime error
File size: 7,914 Bytes
2c19098 712e3af 890944a 2c19098 26063e6 2c19098 76b564d 8131717 6ba7689 01f98b3 fe2765b 76b564d 8131717 6ba7689 fe2765b 76b564d 9458c70 76b564d 890944a 2c19098 76b564d a735af2 9c52fdd 7b4bfcd 9c52fdd 712e3af a735af2 712e3af a735af2 712e3af a735af2 76b564d 890944a 712e3af 890944a 76b564d 01f98b3 a735af2 01f98b3 a735af2 01f98b3 890944a 4cb8223 2c19098 a735af2 712e3af 890944a 712e3af 890944a 712e3af a735af2 fe2765b 712e3af a735af2 712e3af 01f98b3 712e3af a735af2 01f98b3 890944a 712e3af 01b89ba 712e3af 01b89ba 7bd0a5a 712e3af 7bd0a5a 712e3af 01b89ba fe2765b 7bd0a5a 2c19098 088c386 2c19098 76b564d 6ba7689 a735af2 01f98b3 2c19098 4cb8223 a735af2 2c19098 a735af2 2c19098 01b89ba a735af2 dbc8c64 2c19098 dbc8c64 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 |
from diffusers import StableDiffusionPipeline
from diffusers import StableDiffusionImg2ImgPipeline
from diffusers import AutoencoderKL, UNet2DConditionModel
import gradio as gr
import torch
models = [
"nitrosocke/Arcane-Diffusion",
"nitrosocke/archer-diffusion",
"nitrosocke/elden-ring-diffusion",
"nitrosocke/spider-verse-diffusion",
"nitrosocke/modern-disney-diffusion",
"hakurei/waifu-diffusion",
"lambdalabs/sd-pokemon-diffusers",
"yuk/fuyuko-waifu-diffusion",
"AstraliteHeart/pony-diffusion",
"nousr/robo-diffusion",
"DGSpitzer/Cyberpunk-Anime-Diffusion",
"sd-dreambooth-library/herge-style"
]
prompt_prefixes = {
models[0]: "arcane style ",
models[1]: "archer style ",
models[2]: "elden ring style ",
models[3]: "spiderverse style ",
models[4]: "modern disney style ",
models[5]: "",
models[6]: "",
models[7]: "",
models[8]: "",
models[9]: "",
models[10]: "dgs illustration style ",
models[11]: "herge_style ",
}
current_model = models[0]
pipes = []
vae = AutoencoderKL.from_pretrained(current_model, subfolder="vae", torch_dtype=torch.float16)
for model in models:
unet = UNet2DConditionModel.from_pretrained(model, subfolder="unet", torch_dtype=torch.float16)
pipe = StableDiffusionPipeline.from_pretrained(model, unet=unet, vae=vae, torch_dtype=torch.float16)
pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(model, unet=unet, vae=vae, torch_dtype=torch.float16)
pipes.append({"name":model, "pipeline":pipe, "pipeline_i2i":pipe_i2i})
device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
def inference(model, img, strength, prompt, neg_prompt, guidance, steps, width, height, seed):
generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
if img is not None:
return txt_to_img(model, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator)
else:
return img_to_img(model, prompt, neg_prompt, guidance, steps, width, height, generator)
def img_to_img(model, prompt, neg_prompt, guidance, steps, width, height, generator=None):
global current_model
global pipe
if model != current_model:
current_model = model
pipe = pipe.to("cpu")
for pipe_dict in pipes:
if(pipe_dict["name"] == current_model):
pipe = pipe_dict["pipeline"]
if torch.cuda.is_available():
pipe = pipe.to("cuda")
prompt = prompt_prefixes[current_model] + prompt
image = pipe(
prompt,
negative_prompt=neg_prompt,
num_inference_steps=int(steps),
guidance_scale=guidance,
width=width,
height=height,
generator=generator).images[0]
return image
def txt_to_img(model, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator):
global current_model
global pipe
if model != current_model:
current_model = model
pipe = pipe.to("cpu")
for pipe_dict in pipes:
if(pipe_dict["name"] == current_model):
pipe = pipe_dict["pipeline_i2i"]
if torch.cuda.is_available():
pipe = pipe.to("cuda")
prompt = prompt_prefixes[current_model] + prompt
ratio = min(height / img.height, width / img.width)
img = img.resize((int(img.width * ratio), int(img.height * ratio)))
image = pipe(
prompt,
negative_prompt=neg_prompt,
init_image=img,
num_inference_steps=int(steps),
strength=strength,
guidance_scale=guidance,
width=width,
height=height,
generator=generator).images[0]
return image
css = """
<style>
.finetuned-diffusion-div {
text-align: center;
max-width: 700px;
margin: 0 auto;
}
.finetuned-diffusion-div div {
display: inline-flex;
align-items: center;
gap: 0.8rem;
font-size: 1.75rem;
}
.finetuned-diffusion-div div h1 {
font-weight: 900;
margin-bottom: 7px;
}
.finetuned-diffusion-div p {
margin-bottom: 10px;
font-size: 94%;
}
.finetuned-diffusion-div p a {
text-decoration: underline;
}
</style>
"""
with gr.Blocks(css=css) as demo:
gr.HTML(
"""
<div class="finetuned-diffusion-div">
<div>
<h1>Finetuned Diffusion</h1>
</div>
<p>
Demo for multiple fine-tuned Stable Diffusion models, trained on different styles: <br>
<a href="https://huggingface.co/nitrosocke/Arcane-Diffusion">Arcane</a>, <a href="https://huggingface.co/nitrosocke/archer-diffusion">Archer</a>, <a href="https://huggingface.co/nitrosocke/elden-ring-diffusion">Elden Ring</a>, <a href="https://huggingface.co/nitrosocke/spider-verse-diffusion">Spiderverse</a>, <a href="https://huggingface.co/nitrosocke/modern-disney-diffusion">Modern Disney</a>, <a href="https://huggingface.co/hakurei/waifu-diffusion">Waifu</a>, <a href="https://huggingface.co/lambdalabs/sd-pokemon-diffusers">Pokemon</a>, <a href="https://huggingface.co/yuk/fuyuko-waifu-diffusion">Fuyuko Waifu</a>, <a href="https://huggingface.co/AstraliteHeart/pony-diffusion">Pony</a>, <a href="https://huggingface.co/sd-dreambooth-library/herge-style">Hergé (Tintin)</a>, <a href="https://huggingface.co/nousr/robo-diffusion">Robo</a>, <a href="https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion">Cyberpunk Anime</a>
</p>
</div>
"""
)
with gr.Row():
with gr.Column():
model = gr.Dropdown(label="Model", choices=models, value=models[0])
prompt = gr.Textbox(label="Prompt", placeholder="Style prefix is applied automatically")
with gr.Tab("Options"):
neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
steps = gr.Slider(label="Steps", value=50, maximum=100, minimum=2)
width = gr.Slider(label="Width", value=512, maximum=1024, minimum=64)
height = gr.Slider(label="Height", value=512, maximum=1024, minimum=64)
seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
with gr.Tab("Image to image"):
image = gr.Image(label="Image", height=256, tool="editor", type="pil")
strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
with gr.Column():
image_out = gr.Image(height=512)
run = gr.Button(value="Run")
gr.Markdown(f"Running on: {device}")
inputs = [model, image, strength, prompt, neg_prompt, guidance, steps, width, height, seed]
prompt.submit(inference, inputs=inputs, outputs=image_out)
run.click(inference, inputs=inputs, outputs=image_out)
gr.Examples([
[models[0], "jason bateman disassembling the demon core", 7.5, 50],
[models[3], "portrait of dwayne johnson", 7.0, 75],
[models[4], "portrait of a beautiful alyx vance half life", 10, 50],
[models[5], "Aloy from Horizon: Zero Dawn, half body portrait, smooth, detailed armor, beautiful face, illustration", 7, 45],
[models[4], "fantasy portrait painting, digital art", 4, 30],
], [model, prompt, guidance, steps], image_out, img_to_img, cache_examples=False)
gr.Markdown('''
Models by [@nitrosocke](https://huggingface.co/nitrosocke), [@Helixngc7293](https://twitter.com/DGSpitzer) and others. ❤️<br>
Space by: [![Twitter Follow](https://img.shields.io/twitter/follow/hahahahohohe?label=%40anzorq&style=social)](https://twitter.com/hahahahohohe)
![visitors](https://visitor-badge.glitch.me/badge?page_id=anzorq.finetuned_diffusion)
''')
demo.queue()
demo.launch() |