|
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
|
|
from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, AutoencoderKL
|
|
import torch
|
|
|
|
import spaces
|
|
|
|
device = "cuda"
|
|
|
|
def get_cn_pipeline():
|
|
controlnets = [
|
|
ControlNetModel.from_pretrained("./controlnet/lineart", torch_dtype=torch.float16, use_safetensors=True),
|
|
ControlNetModel.from_pretrained("mattyamonaca/controlnet_line2line_xl", torch_dtype=torch.float16)
|
|
]
|
|
|
|
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
|
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
|
"cagliostrolab/animagine-xl-3.1", controlnet=controlnets, vae=vae, torch_dtype=torch.float16
|
|
)
|
|
|
|
pipe.enable_model_cpu_offload()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return pipe
|
|
|
|
def invert_image(img):
|
|
|
|
|
|
img = img.convert('L')
|
|
|
|
inverted_img = img.point(lambda p: 255 - p)
|
|
|
|
return inverted_img
|
|
|
|
|
|
def get_cn_detector(image):
|
|
|
|
|
|
|
|
|
|
|
|
re_image = invert_image(image)
|
|
|
|
|
|
detectors = [re_image, image]
|
|
print(detectors)
|
|
return detectors
|
|
|
|
@spaces.GPU
|
|
def generate(pipe, detectors, prompt, negative_prompt):
|
|
default_pos = "1girl, bestquality, 4K, ((white background)), no background"
|
|
default_neg = "shadow, (worst quality, low quality:1.2), (lowres:1.2), (bad anatomy:1.2), (greyscale, monochrome:1.4)"
|
|
prompt = default_pos + prompt
|
|
negative_prompt = default_neg + negative_prompt
|
|
print(type(pipe))
|
|
image = pipe(
|
|
prompt=prompt,
|
|
negative_prompt = negative_prompt,
|
|
image=detectors,
|
|
num_inference_steps=50,
|
|
controlnet_conditioning_scale=[1.0, 0.2],
|
|
).images[0]
|
|
return image |