gaur3009 commited on
Commit
d1dbfbe
1 Parent(s): 49b91ef

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -36
app.py CHANGED
@@ -16,40 +16,34 @@ else:
16
  pipe = pipe.to(device)
17
 
18
  MAX_SEED = np.iinfo(np.int32).max
19
- DEFAULT_IMAGE_SIZE = 512
20
  MAX_IMAGE_SIZE = 1024
21
 
22
- # Define the default parts of the prompt
23
- DEFAULT_PREFIX = "a single"
24
- DEFAULT_SUFFIX = "hanging on the grey wall"
25
- CATEGORIES = ["T-shirt", "Sweatshirt", "Shirt", "Hoodie"]
26
- EXAMPLES = [
27
- ["T-shirt", "floral pattern"],
28
- ["Sweatshirt", "abstract design"],
29
- ["Shirt", "geometric shapes"],
30
- ["Hoodie", "urban graffiti"],
31
- ]
32
 
33
- def infer(category, design, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
34
- prompt = f"{DEFAULT_PREFIX} {category} with {design} {DEFAULT_SUFFIX}"
35
-
36
  if randomize_seed:
37
  seed = random.randint(0, MAX_SEED)
 
38
  generator = torch.Generator().manual_seed(seed)
39
 
40
  image = pipe(
41
- prompt=prompt,
42
- negative_prompt=negative_prompt,
43
- guidance_scale=guidance_scale,
44
- num_inference_steps=num_inference_steps,
45
- width=width,
46
- height=height,
47
- generator=generator
48
  ).images[0]
49
 
50
  return image
51
 
52
- css = """
 
 
 
 
 
 
53
  #col-container {
54
  margin: 0 auto;
55
  max-width: 520px;
@@ -62,6 +56,7 @@ else:
62
  power_device = "CPU"
63
 
64
  with gr.Blocks(css=css) as demo:
 
65
  with gr.Column(elem_id="col-container"):
66
  gr.Markdown(f"""
67
  # Text-to-Image Gradio Template
@@ -69,12 +64,12 @@ with gr.Blocks(css=css) as demo:
69
  """)
70
 
71
  with gr.Row():
72
- category = gr.Dropdown(label="Category", choices=CATEGORIES, value=CATEGORIES[0])
73
- design = gr.Text(
74
- label="Design/Graphic",
75
- show_label=True,
76
  max_lines=1,
77
- placeholder="Enter design or graphic",
78
  container=False,
79
  )
80
 
@@ -83,6 +78,7 @@ with gr.Blocks(css=css) as demo:
83
  result = gr.Image(label="Result", show_label=False)
84
 
85
  with gr.Accordion("Advanced Settings", open=False):
 
86
  negative_prompt = gr.Text(
87
  label="Negative prompt",
88
  max_lines=1,
@@ -101,12 +97,13 @@ with gr.Blocks(css=css) as demo:
101
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
102
 
103
  with gr.Row():
 
104
  width = gr.Slider(
105
  label="Width",
106
  minimum=256,
107
  maximum=MAX_IMAGE_SIZE,
108
  step=32,
109
- value=DEFAULT_IMAGE_SIZE,
110
  )
111
 
112
  height = gr.Slider(
@@ -114,16 +111,17 @@ with gr.Blocks(css=css) as demo:
114
  minimum=256,
115
  maximum=MAX_IMAGE_SIZE,
116
  step=32,
117
- value=DEFAULT_IMAGE_SIZE,
118
  )
119
 
120
  with gr.Row():
 
121
  guidance_scale = gr.Slider(
122
  label="Guidance scale",
123
  minimum=0.0,
124
  maximum=10.0,
125
  step=0.1,
126
- value=7.5,
127
  )
128
 
129
  num_inference_steps = gr.Slider(
@@ -131,18 +129,20 @@ with gr.Blocks(css=css) as demo:
131
  minimum=1,
132
  maximum=12,
133
  step=1,
134
- value=50,
135
  )
136
 
137
  gr.Examples(
138
- examples=EXAMPLES,
139
- inputs=[category, design]
140
  )
141
 
142
  run_button.click(
143
- fn=infer,
144
- inputs=[category, design, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
145
- outputs=[result]
146
  )
147
 
148
  demo.queue().launch()
 
 
 
16
  pipe = pipe.to(device)
17
 
18
  MAX_SEED = np.iinfo(np.int32).max
 
19
  MAX_IMAGE_SIZE = 1024
20
 
21
+ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
 
 
 
 
 
 
 
 
 
22
 
 
 
 
23
  if randomize_seed:
24
  seed = random.randint(0, MAX_SEED)
25
+
26
  generator = torch.Generator().manual_seed(seed)
27
 
28
  image = pipe(
29
+ prompt = prompt,
30
+ negative_prompt = negative_prompt,
31
+ guidance_scale = guidance_scale,
32
+ num_inference_steps = num_inference_steps,
33
+ width = width,
34
+ height = height,
35
+ generator = generator
36
  ).images[0]
37
 
38
  return image
39
 
40
+ examples = [
41
+ "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
42
+ "An astronaut riding a green horse",
43
+ "A delicious ceviche cheesecake slice",
44
+ ]
45
+
46
+ css="""
47
  #col-container {
48
  margin: 0 auto;
49
  max-width: 520px;
 
56
  power_device = "CPU"
57
 
58
  with gr.Blocks(css=css) as demo:
59
+
60
  with gr.Column(elem_id="col-container"):
61
  gr.Markdown(f"""
62
  # Text-to-Image Gradio Template
 
64
  """)
65
 
66
  with gr.Row():
67
+
68
+ prompt = gr.Text(
69
+ label="Prompt",
70
+ show_label=False,
71
  max_lines=1,
72
+ placeholder="Enter your prompt",
73
  container=False,
74
  )
75
 
 
78
  result = gr.Image(label="Result", show_label=False)
79
 
80
  with gr.Accordion("Advanced Settings", open=False):
81
+
82
  negative_prompt = gr.Text(
83
  label="Negative prompt",
84
  max_lines=1,
 
97
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
98
 
99
  with gr.Row():
100
+
101
  width = gr.Slider(
102
  label="Width",
103
  minimum=256,
104
  maximum=MAX_IMAGE_SIZE,
105
  step=32,
106
+ value=512,
107
  )
108
 
109
  height = gr.Slider(
 
111
  minimum=256,
112
  maximum=MAX_IMAGE_SIZE,
113
  step=32,
114
+ value=512,
115
  )
116
 
117
  with gr.Row():
118
+
119
  guidance_scale = gr.Slider(
120
  label="Guidance scale",
121
  minimum=0.0,
122
  maximum=10.0,
123
  step=0.1,
124
+ value=0.0,
125
  )
126
 
127
  num_inference_steps = gr.Slider(
 
129
  minimum=1,
130
  maximum=12,
131
  step=1,
132
+ value=2,
133
  )
134
 
135
  gr.Examples(
136
+ examples = examples,
137
+ inputs = [prompt]
138
  )
139
 
140
  run_button.click(
141
+ fn = infer,
142
+ inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
143
+ outputs = [result]
144
  )
145
 
146
  demo.queue().launch()
147
+
148
+ DO the prompt tuning for the above as i is taking a lot of time for grnrating