Fix imports for PR 8623 (#5)
Browse files- my_pipeline.py +4 -13
my_pipeline.py
CHANGED
@@ -27,12 +27,7 @@ from diffusers.loaders import (
|
|
27 |
from diffusers.models import AutoencoderKL
|
28 |
from .scheduler.my_scheduler import MyScheduler
|
29 |
from .unet.my_unet_model import MyUNetModel
|
30 |
-
from diffusers.models.attention_processor import
|
31 |
-
AttnProcessor2_0,
|
32 |
-
LoRAAttnProcessor2_0,
|
33 |
-
LoRAXFormersAttnProcessor,
|
34 |
-
XFormersAttnProcessor,
|
35 |
-
)
|
36 |
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
37 |
from diffusers.utils import (
|
38 |
USE_PEFT_BACKEND,
|
@@ -136,6 +131,7 @@ class MyPipeline(
|
|
136 |
watermark output images. If not defined, it will default to True if the package is installed, otherwise no
|
137 |
watermarker will be used.
|
138 |
"""
|
|
|
139 |
model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae"
|
140 |
_optional_components = ["tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2"]
|
141 |
|
@@ -572,12 +568,7 @@ class MyPipeline(
|
|
572 |
self.vae.to(dtype=torch.float32)
|
573 |
use_torch_2_0_or_xformers = isinstance(
|
574 |
self.vae.decoder.mid_block.attentions[0].processor,
|
575 |
-
(
|
576 |
-
AttnProcessor2_0,
|
577 |
-
XFormersAttnProcessor,
|
578 |
-
LoRAXFormersAttnProcessor,
|
579 |
-
LoRAAttnProcessor2_0,
|
580 |
-
),
|
581 |
)
|
582 |
# if xformers or torch_2_0 is used attention block does not need
|
583 |
# to be in float32 which can save lots of memory
|
@@ -972,4 +963,4 @@ class MyPipeline(
|
|
972 |
# Offload all models
|
973 |
self.maybe_free_model_hooks()
|
974 |
|
975 |
-
return (image,)
|
|
|
27 |
from diffusers.models import AutoencoderKL
|
28 |
from .scheduler.my_scheduler import MyScheduler
|
29 |
from .unet.my_unet_model import MyUNetModel
|
30 |
+
from diffusers.models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor
|
|
|
|
|
|
|
|
|
|
|
31 |
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
32 |
from diffusers.utils import (
|
33 |
USE_PEFT_BACKEND,
|
|
|
131 |
watermark output images. If not defined, it will default to True if the package is installed, otherwise no
|
132 |
watermarker will be used.
|
133 |
"""
|
134 |
+
|
135 |
model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae"
|
136 |
_optional_components = ["tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2"]
|
137 |
|
|
|
568 |
self.vae.to(dtype=torch.float32)
|
569 |
use_torch_2_0_or_xformers = isinstance(
|
570 |
self.vae.decoder.mid_block.attentions[0].processor,
|
571 |
+
(AttnProcessor2_0, XFormersAttnProcessor),
|
|
|
|
|
|
|
|
|
|
|
572 |
)
|
573 |
# if xformers or torch_2_0 is used attention block does not need
|
574 |
# to be in float32 which can save lots of memory
|
|
|
963 |
# Offload all models
|
964 |
self.maybe_free_model_hooks()
|
965 |
|
966 |
+
return (image,)
|