import gradio as gr #gr.load("models/K00B404/tiny_image_gen").launch() # GenerativeApp.py import os import torch from PIL import Image from diffusers import StableDiffusionPipeline from transformers import AutoTokenizer def load_model(): device = "cuda" if torch.cuda.is_available() else "cpu" model_path = "K00B404/tiny_image_gen" pipe = StableDiffusionPipeline.from_pretrained(model_path, revision='main', torch_dtype=torch.float16) pipe = pipe.to(device) return pipe def generate_image(prompt): pipe = load_model() output = pipe(prompt, num_inference_steps=50, height=512, width=512) image = output["samples"][0] return Image.fromarray((image.transpose(1, 2, 0) * 255).detach().cpu().numpy().astype(np.uint8)) if __name__ == "__main__": gradio_app = gr.Interface(generate_image, inputs="text", outputs="image") gradio_app.launch()