import base64 import os import time from io import BytesIO from multiprocessing import Process import streamlit as st from PIL import Image import requests def start_server(): os.system("uvicorn server:app --port 8080 --host 0.0.0.0 --workers 2") def load_models(): if not is_port_in_use(8080): with st.spinner(text="Loading models, please wait..."): proc = Process(target=start_server, args=(), daemon=True) proc.start() while not is_port_in_use(8080): time.sleep(1) st.success("Model server started.") else: st.success("Model server already running...") st.session_state["models_loaded"] = True def is_port_in_use(port): import socket with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: return s.connect_ex(("0.0.0.0", port)) == 0 def generate(prompt): correct_request = f"http://0.0.0.0:8080/correct?prompt={prompt}" response = requests.get(correct_request) images = response.json()["images"] images = [Image.open(BytesIO(base64.b64decode(img))) for img in images] return images if "models_loaded" not in st.session_state: st.session_state["models_loaded"] = False st.header("minDALL-E") st.subheader("Generate images from text") if not st.session_state["models_loaded"]: load_models() prompt = st.text_input("What do you want to see?") DEBUG = False if prompt != "": container = st.empty() container.markdown( f"""
Predictions may take up to 40s under high load. Please stand by. """, unsafe_allow_html=True, ) print(f"Getting selections: {prompt}") selected = generate(prompt) margin = 0.1 # for better position of zoom in arrow n_columns = 3 cols = st.columns([1] + [margin, 1] * (n_columns - 1)) for i, img in enumerate(selected): cols[(i % n_columns) * 2].image(img) container.markdown(f"**{prompt}**") st.button("Again!", key="again_button")