AlPrompt / app.py
AlStable's picture
Update app.py
a6ca70e
raw
history blame
1.59 kB
import gradio
import accelerate
class Model:
def __init__(self, name, path="", prefix=""):
self.name = name
self.path = path
self.prefix = prefix
models = [
Model("Marvel","models/ItsJayQz/Marvel_WhatIf_Diffusion", "whatif style"),
Model("Cyberpunk Anime Diffusion", "models/DGSpitzer/Cyberpunk-Anime-Diffusion", "dgs illustration style"),
Model("Portrait plus", "models/wavymulder/portraitplus", "portrait+ style"),
Model("classic Disney", "models/nitrosocke/classic-anim-diffusion", "classic disney style"),
Model("vintedois", "models/22h/vintedois-diffusion-v0-1", "vintedois style"),
Model("dreamlike", "models/dreamlike-art/dreamlike-diffusion-1.0","dreamlike style"),
Model("SD21","models/stabilityai/stable-diffusion-2-1", "sd21 default style")
]
model1=[]
model2=[]
model3=[]
for i in range(len(models)):
model3.append(models[i].name)
model2.append(models[i].prefix)
model1.append(gradio.Interface.load(models[i].path))
def process1(prompt):
modelSelected=model3[0]
for i in range(len(models)):
if prompt.find(models[i].prefix)!=-1:
modelSelected=models[i].name
print(modelSelected)
model_idx=model3.index(modelSelected)
image_return = model1[model_idx](prompt)
return image_return
sandbox = gradio.Interface(fn=process1,
inputs=[gradio.Textbox(label="Enter Prompt:")],
outputs=[gradio.Image(label="Produced Image")],
title='AlStable Text to Image')
sandbox.queue(concurrency_count=20).launch()