|
22 | 22 | from diffusers.image_processor import PipelineImageInput, VaeImageProcessor |
23 | 23 | from diffusers.loaders import IPAdapterMixin, StableDiffusionXLLoraLoaderMixin |
24 | 24 | from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel |
25 | | -from diffusers.models.attention_processor import AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor |
26 | 25 | from diffusers.pipelines.kolors.pipeline_output import KolorsPipelineOutput |
27 | 26 | from diffusers.pipelines.kolors.text_encoder import ChatGLMModel |
28 | 27 | from diffusers.pipelines.kolors.tokenizer import ChatGLMTokenizer |
29 | 28 | from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin |
30 | 29 | from diffusers.schedulers import KarrasDiffusionSchedulers |
31 | | -from diffusers.utils import is_torch_xla_available, logging, replace_example_docstring |
| 30 | +from diffusers.utils import deprecate, is_torch_xla_available, logging, replace_example_docstring |
32 | 31 | from diffusers.utils.torch_utils import randn_tensor |
33 | 32 |
|
34 | 33 |
|
@@ -709,24 +708,9 @@ def _get_add_time_ids( |
709 | 708 | add_time_ids = torch.tensor([add_time_ids], dtype=dtype) |
710 | 709 | return add_time_ids |
711 | 710 |
|
712 | | - # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.upcast_vae |
713 | 711 | def upcast_vae(self): |
714 | | - dtype = self.vae.dtype |
| 712 | + deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`") |
715 | 713 | self.vae.to(dtype=torch.float32) |
716 | | - use_torch_2_0_or_xformers = isinstance( |
717 | | - self.vae.decoder.mid_block.attentions[0].processor, |
718 | | - ( |
719 | | - AttnProcessor2_0, |
720 | | - XFormersAttnProcessor, |
721 | | - FusedAttnProcessor2_0, |
722 | | - ), |
723 | | - ) |
724 | | - # if xformers or torch_2_0 is used attention block does not need |
725 | | - # to be in float32 which can save lots of memory |
726 | | - if use_torch_2_0_or_xformers: |
727 | | - self.vae.post_quant_conv.to(dtype) |
728 | | - self.vae.decoder.conv_in.to(dtype) |
729 | | - self.vae.decoder.mid_block.to(dtype) |
730 | 714 |
|
731 | 715 | # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding |
732 | 716 | def get_guidance_scale_embedding( |
|
0 commit comments