KingNish commited on
Commit
11b1e15
1 Parent(s): 52d72c6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -5
app.py CHANGED
@@ -5,6 +5,9 @@ import spaces
5
  import gradio as gr
6
  import numpy as np
7
  import torch
 
 
 
8
  from PIL import Image
9
  from diffusers import StableDiffusionXLImg2ImgPipeline, StableDiffusionXLPipeline, EDMEulerScheduler, StableDiffusionXLInstructPix2PixPipeline, AutoencoderKL, DPMSolverMultistepScheduler
10
  from huggingface_hub import hf_hub_download, InferenceClient
@@ -29,6 +32,11 @@ To optimize image results:
29
  - **Increase the number of steps** for enhanced edits.
30
  """
31
 
 
 
 
 
 
32
  def set_timesteps_patched(self, num_inference_steps: int, device = None):
33
  self.num_inference_steps = num_inference_steps
34
 
@@ -91,8 +99,9 @@ def king(type ,
91
  num_inference_steps=steps,
92
  image=output_image,
93
  generator=generator,
94
- ).images[0]
95
- return seed, refine
 
96
  else :
97
  if randomize_seed:
98
  seed = random.randint(0, 999999)
@@ -108,7 +117,7 @@ def king(type ,
108
  num_inference_steps = int(steps/2.5),
109
  width = width, height = height,
110
  generator = generator,
111
- ).images[0]
112
  else:
113
  image = pipe_fast( prompt = instruction,
114
  negative_prompt=negative_prompt,
@@ -123,8 +132,9 @@ def king(type ,
123
  guidance_scale = 7.5,
124
  num_inference_steps= steps,
125
  image=image, generator=generator,
126
- ).images[0]
127
- return seed, refine
 
128
 
129
  client = InferenceClient()
130
  # Prompt classifier
 
5
  import gradio as gr
6
  import numpy as np
7
  import torch
8
+ import tempfile
9
+ import os
10
+ import uuid
11
  from PIL import Image
12
  from diffusers import StableDiffusionXLImg2ImgPipeline, StableDiffusionXLPipeline, EDMEulerScheduler, StableDiffusionXLInstructPix2PixPipeline, AutoencoderKL, DPMSolverMultistepScheduler
13
  from huggingface_hub import hf_hub_download, InferenceClient
 
32
  - **Increase the number of steps** for enhanced edits.
33
  """
34
 
35
+ def save_image(img):
36
+ unique_name = str(uuid.uuid4()) + ".png"
37
+ img.save(unique_name)
38
+ return unique_name
39
+
40
  def set_timesteps_patched(self, num_inference_steps: int, device = None):
41
  self.num_inference_steps = num_inference_steps
42
 
 
99
  num_inference_steps=steps,
100
  image=output_image,
101
  generator=generator,
102
+ ).images
103
+ image_paths = [save_image(img) for img in refine][0]
104
+ return seed, image_paths
105
  else :
106
  if randomize_seed:
107
  seed = random.randint(0, 999999)
 
117
  num_inference_steps = int(steps/2.5),
118
  width = width, height = height,
119
  generator = generator,
120
+ ).images
121
  else:
122
  image = pipe_fast( prompt = instruction,
123
  negative_prompt=negative_prompt,
 
132
  guidance_scale = 7.5,
133
  num_inference_steps= steps,
134
  image=image, generator=generator,
135
+ ).images
136
+ image_paths = [save_image(img) for img in refine][0]
137
+ return seed, image_paths
138
 
139
  client = InferenceClient()
140
  # Prompt classifier