Spaces:
Running
on
Zero
Running
on
Zero
Commit
•
1f2f9cf
1
Parent(s):
19adf9c
Update app.py
Browse files
app.py
CHANGED
@@ -11,6 +11,7 @@ dtype = torch.bfloat16
|
|
11 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
12 |
|
13 |
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
|
|
14 |
pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=dtype, vae=taef1).to(device)
|
15 |
torch.cuda.empty_cache()
|
16 |
|
@@ -33,6 +34,7 @@ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidan
|
|
33 |
height=height,
|
34 |
generator=generator,
|
35 |
output_type="pil",
|
|
|
36 |
):
|
37 |
yield img, seed
|
38 |
|
|
|
11 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
12 |
|
13 |
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
14 |
+
good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
|
15 |
pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=dtype, vae=taef1).to(device)
|
16 |
torch.cuda.empty_cache()
|
17 |
|
|
|
34 |
height=height,
|
35 |
generator=generator,
|
36 |
output_type="pil",
|
37 |
+
good_vae=good_vae,
|
38 |
):
|
39 |
yield img, seed
|
40 |
|