dataautogpt3
commited on
Commit
•
abbed05
1
Parent(s):
6cabc96
Update README.md
Browse files
README.md
CHANGED
@@ -74,3 +74,47 @@ Efficient fine-tuning: The Mobius base model serves as a superior foundation for
|
|
74 |
|
75 |
- a CFG of either 3.5 to 7
|
76 |
- Requires a CLIP skip of -3
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
|
75 |
- a CFG of either 3.5 to 7
|
76 |
- Requires a CLIP skip of -3
|
77 |
+
- Sampler: DPM++ 3M SDE
|
78 |
+
- Scheduler: Karras
|
79 |
+
- Resolution: 1024x1024
|
80 |
+
|
81 |
+
please also consider using these keep words to improve your prompts: best quality, HD, ~*~aesthetic~*~.
|
82 |
+
|
83 |
+
|
84 |
+
Use it with 🧨 diffusers
|
85 |
+
```
|
86 |
+
import torch
|
87 |
+
from diffusers import (
|
88 |
+
StableDiffusionXLPipeline,
|
89 |
+
KDPM2AncestralDiscreteScheduler,
|
90 |
+
AutoencoderKL
|
91 |
+
)
|
92 |
+
|
93 |
+
# Load VAE component
|
94 |
+
vae = AutoencoderKL.from_pretrained(
|
95 |
+
"madebyollin/sdxl-vae-fp16-fix",
|
96 |
+
torch_dtype=torch.float16
|
97 |
+
)
|
98 |
+
|
99 |
+
# Configure the pipeline
|
100 |
+
pipe = StableDiffusionXLPipeline.from_pretrained(
|
101 |
+
"Corcelio/mobius",
|
102 |
+
vae=vae,
|
103 |
+
torch_dtype=torch.float16
|
104 |
+
)
|
105 |
+
pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
106 |
+
pipe.to('cuda')
|
107 |
+
|
108 |
+
# Define prompts and generate image
|
109 |
+
prompt = "mystery"
|
110 |
+
negative_prompt = ""
|
111 |
+
|
112 |
+
image = pipe(
|
113 |
+
prompt,
|
114 |
+
negative_prompt=negative_prompt,
|
115 |
+
width=1024,
|
116 |
+
height=1024,
|
117 |
+
guidance_scale=7,
|
118 |
+
num_inference_steps=50
|
119 |
+
).images[0]
|
120 |
+
```
|