| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | import inspect |
| | import math |
| | import warnings |
| | from typing import Any, Callable, Dict, List, Optional, Union |
| |
|
| | import PIL |
| | import torch |
| | import torchvision.transforms.functional as TF |
| | from diffusers.configuration_utils import ConfigMixin, FrozenDict, register_to_config |
| | from diffusers.image_processor import VaeImageProcessor |
| | from diffusers.models import AutoencoderKL, UNet2DConditionModel |
| | from diffusers.models.modeling_utils import ModelMixin |
| | from diffusers.pipelines.pipeline_utils import DiffusionPipeline |
| | from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput |
| | from diffusers.pipelines.stable_diffusion.safety_checker import ( |
| | StableDiffusionSafetyChecker, |
| | ) |
| | from diffusers.schedulers import KarrasDiffusionSchedulers |
| | from diffusers.utils import deprecate, is_accelerate_available, logging |
| | from diffusers.utils.torch_utils import randn_tensor |
| | from packaging import version |
| | from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection |
| |
|
| | logger = logging.get_logger(__name__) |
| |
|
| |
|
| | class CLIPCameraProjection(ModelMixin, ConfigMixin): |
| | """ |
| | A Projection layer for CLIP embedding and camera embedding. |
| | |
| | Parameters: |
| | embedding_dim (`int`, *optional*, defaults to 768): The dimension of the model input `clip_embed` |
| | additional_embeddings (`int`, *optional*, defaults to 4): The number of additional tokens appended to the |
| | projected `hidden_states`. The actual length of the used `hidden_states` is `num_embeddings + |
| | additional_embeddings`. |
| | """ |
| |
|
| | @register_to_config |
| | def __init__(self, embedding_dim: int = 768, additional_embeddings: int = 4): |
| | super().__init__() |
| | self.embedding_dim = embedding_dim |
| | self.additional_embeddings = additional_embeddings |
| |
|
| | self.input_dim = self.embedding_dim + self.additional_embeddings |
| | self.output_dim = self.embedding_dim |
| |
|
| | self.proj = torch.nn.Linear(self.input_dim, self.output_dim) |
| |
|
| | def forward( |
| | self, |
| | embedding: torch.FloatTensor, |
| | ): |
| | """ |
| | The [`PriorTransformer`] forward method. |
| | |
| | Args: |
| | hidden_states (`torch.FloatTensor` of shape `(batch_size, input_dim)`): |
| | The currently input embeddings. |
| | |
| | Returns: |
| | The output embedding projection (`torch.FloatTensor` of shape `(batch_size, output_dim)`). |
| | """ |
| | proj_embedding = self.proj(embedding) |
| | return proj_embedding |
| |
|
| |
|
| | class Zero123Pipeline(DiffusionPipeline): |
| | r""" |
| | Pipeline to generate variations from an input image using Stable Diffusion. |
| | |
| | This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the |
| | library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) |
| | |
| | Args: |
| | vae ([`AutoencoderKL`]): |
| | Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. |
| | image_encoder ([`CLIPVisionModelWithProjection`]): |
| | Frozen CLIP image-encoder. Stable Diffusion Image Variation uses the vision portion of |
| | [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPVisionModelWithProjection), |
| | specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. |
| | unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. |
| | scheduler ([`SchedulerMixin`]): |
| | A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of |
| | [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. |
| | safety_checker ([`StableDiffusionSafetyChecker`]): |
| | Classification module that estimates whether generated images could be considered offensive or harmful. |
| | Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. |
| | feature_extractor ([`CLIPImageProcessor`]): |
| | Model that extracts features from generated images to be used as inputs for the `safety_checker`. |
| | """ |
| | |
| | |
| | _optional_components = ["safety_checker"] |
| |
|
| | def __init__( |
| | self, |
| | vae: AutoencoderKL, |
| | image_encoder: CLIPVisionModelWithProjection, |
| | unet: UNet2DConditionModel, |
| | scheduler: KarrasDiffusionSchedulers, |
| | safety_checker: StableDiffusionSafetyChecker, |
| | feature_extractor: CLIPImageProcessor, |
| | clip_camera_projection: CLIPCameraProjection, |
| | requires_safety_checker: bool = True, |
| | ): |
| | super().__init__() |
| |
|
| | if safety_checker is None and requires_safety_checker: |
| | logger.warn( |
| | f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" |
| | " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" |
| | " results in services or applications open to the public. Both the diffusers team and Hugging Face" |
| | " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" |
| | " it only for use-cases that involve analyzing network behavior or auditing its results. For more" |
| | " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." |
| | ) |
| |
|
| | if safety_checker is not None and feature_extractor is None: |
| | raise ValueError( |
| | "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" |
| | " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." |
| | ) |
| |
|
| | is_unet_version_less_0_9_0 = hasattr( |
| | unet.config, "_diffusers_version" |
| | ) and version.parse( |
| | version.parse(unet.config._diffusers_version).base_version |
| | ) < version.parse( |
| | "0.9.0.dev0" |
| | ) |
| | is_unet_sample_size_less_64 = ( |
| | hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 |
| | ) |
| | if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: |
| | deprecation_message = ( |
| | "The configuration file of the unet has set the default `sample_size` to smaller than" |
| | " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" |
| | " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" |
| | " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" |
| | " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" |
| | " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" |
| | " in the config might lead to incorrect results in future versions. If you have downloaded this" |
| | " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" |
| | " the `unet/config.json` file" |
| | ) |
| | deprecate( |
| | "sample_size<64", "1.0.0", deprecation_message, standard_warn=False |
| | ) |
| | new_config = dict(unet.config) |
| | new_config["sample_size"] = 64 |
| | unet._internal_dict = FrozenDict(new_config) |
| |
|
| | self.register_modules( |
| | vae=vae, |
| | image_encoder=image_encoder, |
| | unet=unet, |
| | scheduler=scheduler, |
| | safety_checker=safety_checker, |
| | feature_extractor=feature_extractor, |
| | clip_camera_projection=clip_camera_projection, |
| | ) |
| | self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) |
| | self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) |
| | self.register_to_config(requires_safety_checker=requires_safety_checker) |
| |
|
| | def enable_sequential_cpu_offload(self, gpu_id=0): |
| | r""" |
| | Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, |
| | text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a |
| | `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. |
| | """ |
| | if is_accelerate_available(): |
| | from accelerate import cpu_offload |
| | else: |
| | raise ImportError("Please install accelerate via `pip install accelerate`") |
| |
|
| | device = torch.device(f"cuda:{gpu_id}") |
| |
|
| | for cpu_offloaded_model in [ |
| | self.unet, |
| | self.image_encoder, |
| | self.vae, |
| | self.safety_checker, |
| | ]: |
| | if cpu_offloaded_model is not None: |
| | cpu_offload(cpu_offloaded_model, device) |
| |
|
| | @property |
| | |
| | def _execution_device(self): |
| | r""" |
| | Returns the device on which the pipeline's models will be executed. After calling |
| | `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module |
| | hooks. |
| | """ |
| | if not hasattr(self.unet, "_hf_hook"): |
| | return self.device |
| | for module in self.unet.modules(): |
| | if ( |
| | hasattr(module, "_hf_hook") |
| | and hasattr(module._hf_hook, "execution_device") |
| | and module._hf_hook.execution_device is not None |
| | ): |
| | return torch.device(module._hf_hook.execution_device) |
| | return self.device |
| |
|
| | def _encode_image( |
| | self, |
| | image, |
| | elevation, |
| | azimuth, |
| | distance, |
| | device, |
| | num_images_per_prompt, |
| | do_classifier_free_guidance, |
| | clip_image_embeddings=None, |
| | image_camera_embeddings=None, |
| | ): |
| | dtype = next(self.image_encoder.parameters()).dtype |
| |
|
| | if image_camera_embeddings is None: |
| | if image is None: |
| | assert clip_image_embeddings is not None |
| | image_embeddings = clip_image_embeddings.to(device=device, dtype=dtype) |
| | else: |
| | if not isinstance(image, torch.Tensor): |
| | image = self.feature_extractor( |
| | images=image, return_tensors="pt" |
| | ).pixel_values |
| |
|
| | image = image.to(device=device, dtype=dtype) |
| | image_embeddings = self.image_encoder(image).image_embeds |
| | image_embeddings = image_embeddings.unsqueeze(1) |
| |
|
| | bs_embed, seq_len, _ = image_embeddings.shape |
| |
|
| | if isinstance(elevation, float): |
| | elevation = torch.as_tensor( |
| | [elevation] * bs_embed, dtype=dtype, device=device |
| | ) |
| | if isinstance(azimuth, float): |
| | azimuth = torch.as_tensor( |
| | [azimuth] * bs_embed, dtype=dtype, device=device |
| | ) |
| | if isinstance(distance, float): |
| | distance = torch.as_tensor( |
| | [distance] * bs_embed, dtype=dtype, device=device |
| | ) |
| |
|
| | camera_embeddings = torch.stack( |
| | [ |
| | torch.deg2rad(elevation), |
| | torch.sin(torch.deg2rad(azimuth)), |
| | torch.cos(torch.deg2rad(azimuth)), |
| | distance, |
| | ], |
| | dim=-1, |
| | )[:, None, :] |
| |
|
| | image_embeddings = torch.cat([image_embeddings, camera_embeddings], dim=-1) |
| |
|
| | |
| | image_embeddings = self.clip_camera_projection(image_embeddings) |
| | else: |
| | image_embeddings = image_camera_embeddings.to(device=device, dtype=dtype) |
| | bs_embed, seq_len, _ = image_embeddings.shape |
| |
|
| | |
| | image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1) |
| | image_embeddings = image_embeddings.view( |
| | bs_embed * num_images_per_prompt, seq_len, -1 |
| | ) |
| |
|
| | if do_classifier_free_guidance: |
| | negative_prompt_embeds = torch.zeros_like(image_embeddings) |
| |
|
| | |
| | |
| | |
| | image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings]) |
| |
|
| | return image_embeddings |
| |
|
| | |
| | def run_safety_checker(self, image, device, dtype): |
| | if self.safety_checker is None: |
| | has_nsfw_concept = None |
| | else: |
| | if torch.is_tensor(image): |
| | feature_extractor_input = self.image_processor.postprocess( |
| | image, output_type="pil" |
| | ) |
| | else: |
| | feature_extractor_input = self.image_processor.numpy_to_pil(image) |
| | safety_checker_input = self.feature_extractor( |
| | feature_extractor_input, return_tensors="pt" |
| | ).to(device) |
| | image, has_nsfw_concept = self.safety_checker( |
| | images=image, clip_input=safety_checker_input.pixel_values.to(dtype) |
| | ) |
| | return image, has_nsfw_concept |
| |
|
| | |
| | def decode_latents(self, latents): |
| | warnings.warn( |
| | "The decode_latents method is deprecated and will be removed in a future version. Please" |
| | " use VaeImageProcessor instead", |
| | FutureWarning, |
| | ) |
| | latents = 1 / self.vae.config.scaling_factor * latents |
| | image = self.vae.decode(latents, return_dict=False)[0] |
| | image = (image / 2 + 0.5).clamp(0, 1) |
| | |
| | image = image.cpu().permute(0, 2, 3, 1).float().numpy() |
| | return image |
| |
|
| | |
| | def prepare_extra_step_kwargs(self, generator, eta): |
| | |
| | |
| | |
| | |
| |
|
| | accepts_eta = "eta" in set( |
| | inspect.signature(self.scheduler.step).parameters.keys() |
| | ) |
| | extra_step_kwargs = {} |
| | if accepts_eta: |
| | extra_step_kwargs["eta"] = eta |
| |
|
| | |
| | accepts_generator = "generator" in set( |
| | inspect.signature(self.scheduler.step).parameters.keys() |
| | ) |
| | if accepts_generator: |
| | extra_step_kwargs["generator"] = generator |
| | return extra_step_kwargs |
| |
|
| | def check_inputs(self, image, height, width, callback_steps): |
| | |
| |
|
| | if height % 8 != 0 or width % 8 != 0: |
| | raise ValueError( |
| | f"`height` and `width` have to be divisible by 8 but are {height} and {width}." |
| | ) |
| |
|
| | if (callback_steps is None) or ( |
| | callback_steps is not None |
| | and (not isinstance(callback_steps, int) or callback_steps <= 0) |
| | ): |
| | raise ValueError( |
| | f"`callback_steps` has to be a positive integer but is {callback_steps} of type" |
| | f" {type(callback_steps)}." |
| | ) |
| |
|
| | |
| | def prepare_latents( |
| | self, |
| | batch_size, |
| | num_channels_latents, |
| | height, |
| | width, |
| | dtype, |
| | device, |
| | generator, |
| | latents=None, |
| | ): |
| | shape = ( |
| | batch_size, |
| | num_channels_latents, |
| | height // self.vae_scale_factor, |
| | width // self.vae_scale_factor, |
| | ) |
| | if isinstance(generator, list) and len(generator) != batch_size: |
| | raise ValueError( |
| | f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" |
| | f" size of {batch_size}. Make sure the batch size matches the length of the generators." |
| | ) |
| |
|
| | if latents is None: |
| | latents = randn_tensor( |
| | shape, generator=generator, device=device, dtype=dtype |
| | ) |
| | else: |
| | latents = latents.to(device) |
| |
|
| | |
| | latents = latents * self.scheduler.init_noise_sigma |
| | return latents |
| |
|
| | def _get_latent_model_input( |
| | self, |
| | latents: torch.FloatTensor, |
| | image: Optional[ |
| | Union[PIL.Image.Image, List[PIL.Image.Image], torch.FloatTensor] |
| | ], |
| | num_images_per_prompt: int, |
| | do_classifier_free_guidance: bool, |
| | image_latents: Optional[torch.FloatTensor] = None, |
| | ): |
| | if isinstance(image, PIL.Image.Image): |
| | image_pt = TF.to_tensor(image).unsqueeze(0).to(latents) |
| | elif isinstance(image, list): |
| | image_pt = torch.stack([TF.to_tensor(img) for img in image], dim=0).to( |
| | latents |
| | ) |
| | elif isinstance(image, torch.Tensor): |
| | image_pt = image |
| | else: |
| | image_pt = None |
| |
|
| | if image_pt is None: |
| | assert image_latents is not None |
| | image_pt = image_latents.repeat_interleave(num_images_per_prompt, dim=0) |
| | else: |
| | image_pt = image_pt * 2.0 - 1.0 |
| | |
| | |
| | image_pt = self.vae.encode(image_pt).latent_dist.mode() |
| | image_pt = image_pt.repeat_interleave(num_images_per_prompt, dim=0) |
| | if do_classifier_free_guidance: |
| | latent_model_input = torch.cat( |
| | [ |
| | torch.cat([latents, latents], dim=0), |
| | torch.cat([torch.zeros_like(image_pt), image_pt], dim=0), |
| | ], |
| | dim=1, |
| | ) |
| | else: |
| | latent_model_input = torch.cat([latents, image_pt], dim=1) |
| |
|
| | return latent_model_input |
| |
|
| | @torch.no_grad() |
| | def __call__( |
| | self, |
| | image: Optional[ |
| | Union[PIL.Image.Image, List[PIL.Image.Image], torch.FloatTensor] |
| | ] = None, |
| | elevation: Optional[Union[float, torch.FloatTensor]] = None, |
| | azimuth: Optional[Union[float, torch.FloatTensor]] = None, |
| | distance: Optional[Union[float, torch.FloatTensor]] = None, |
| | height: Optional[int] = None, |
| | width: Optional[int] = None, |
| | num_inference_steps: int = 50, |
| | guidance_scale: float = 3.0, |
| | num_images_per_prompt: int = 1, |
| | eta: float = 0.0, |
| | generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, |
| | latents: Optional[torch.FloatTensor] = None, |
| | clip_image_embeddings: Optional[torch.FloatTensor] = None, |
| | image_camera_embeddings: Optional[torch.FloatTensor] = None, |
| | image_latents: Optional[torch.FloatTensor] = None, |
| | output_type: Optional[str] = "pil", |
| | return_dict: bool = True, |
| | callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, |
| | callback_steps: int = 1, |
| | cross_attention_kwargs: Optional[Dict[str, Any]] = None, |
| | ): |
| | r""" |
| | Function invoked when calling the pipeline for generation. |
| | |
| | Args: |
| | image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.FloatTensor`): |
| | The image or images to guide the image generation. If you provide a tensor, it needs to comply with the |
| | configuration of |
| | [this](https://huggingface.co/lambdalabs/sd-image-variations-diffusers/blob/main/feature_extractor/preprocessor_config.json) |
| | `CLIPImageProcessor` |
| | height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): |
| | The height in pixels of the generated image. |
| | width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): |
| | The width in pixels of the generated image. |
| | num_inference_steps (`int`, *optional*, defaults to 50): |
| | The number of denoising steps. More denoising steps usually lead to a higher quality image at the |
| | expense of slower inference. |
| | guidance_scale (`float`, *optional*, defaults to 7.5): |
| | Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). |
| | `guidance_scale` is defined as `w` of equation 2. of [Imagen |
| | Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > |
| | 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, |
| | usually at the expense of lower image quality. |
| | num_images_per_prompt (`int`, *optional*, defaults to 1): |
| | The number of images to generate per prompt. |
| | eta (`float`, *optional*, defaults to 0.0): |
| | Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to |
| | [`schedulers.DDIMScheduler`], will be ignored for others. |
| | generator (`torch.Generator`, *optional*): |
| | One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) |
| | to make generation deterministic. |
| | latents (`torch.FloatTensor`, *optional*): |
| | Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image |
| | generation. Can be used to tweak the same generation with different prompts. If not provided, a latents |
| | tensor will ge generated by sampling using the supplied random `generator`. |
| | output_type (`str`, *optional*, defaults to `"pil"`): |
| | The output format of the generate image. Choose between |
| | [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. |
| | return_dict (`bool`, *optional*, defaults to `True`): |
| | Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a |
| | plain tuple. |
| | callback (`Callable`, *optional*): |
| | A function that will be called every `callback_steps` steps during inference. The function will be |
| | called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. |
| | callback_steps (`int`, *optional*, defaults to 1): |
| | The frequency at which the `callback` function will be called. If not specified, the callback will be |
| | called at every step. |
| | |
| | Returns: |
| | [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: |
| | [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. |
| | When returning a tuple, the first element is a list with the generated images, and the second element is a |
| | list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" |
| | (nsfw) content, according to the `safety_checker`. |
| | """ |
| | |
| | height = height or self.unet.config.sample_size * self.vae_scale_factor |
| | width = width or self.unet.config.sample_size * self.vae_scale_factor |
| |
|
| | |
| | |
| | |
| | self.check_inputs(image, height, width, callback_steps) |
| |
|
| | |
| | if isinstance(image, PIL.Image.Image): |
| | batch_size = 1 |
| | elif isinstance(image, list): |
| | batch_size = len(image) |
| | elif isinstance(image, torch.Tensor): |
| | batch_size = image.shape[0] |
| | else: |
| | assert image_latents is not None |
| | assert ( |
| | clip_image_embeddings is not None or image_camera_embeddings is not None |
| | ) |
| | batch_size = image_latents.shape[0] |
| |
|
| | device = self._execution_device |
| | |
| | |
| | |
| | do_classifier_free_guidance = guidance_scale > 1.0 |
| |
|
| | |
| | if isinstance(image, PIL.Image.Image) or isinstance(image, list): |
| | pil_image = image |
| | elif isinstance(image, torch.Tensor): |
| | pil_image = [TF.to_pil_image(image[i]) for i in range(image.shape[0])] |
| | else: |
| | pil_image = None |
| | image_embeddings = self._encode_image( |
| | pil_image, |
| | elevation, |
| | azimuth, |
| | distance, |
| | device, |
| | num_images_per_prompt, |
| | do_classifier_free_guidance, |
| | clip_image_embeddings, |
| | image_camera_embeddings, |
| | ) |
| |
|
| | |
| | self.scheduler.set_timesteps(num_inference_steps, device=device) |
| | timesteps = self.scheduler.timesteps |
| |
|
| | |
| | |
| | num_channels_latents = 4 |
| | latents = self.prepare_latents( |
| | batch_size * num_images_per_prompt, |
| | num_channels_latents, |
| | height, |
| | width, |
| | image_embeddings.dtype, |
| | device, |
| | generator, |
| | latents, |
| | ) |
| |
|
| | |
| | extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) |
| |
|
| | |
| | num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order |
| | with self.progress_bar(total=num_inference_steps) as progress_bar: |
| | for i, t in enumerate(timesteps): |
| | |
| | latent_model_input = self._get_latent_model_input( |
| | latents, |
| | image, |
| | num_images_per_prompt, |
| | do_classifier_free_guidance, |
| | image_latents, |
| | ) |
| | latent_model_input = self.scheduler.scale_model_input( |
| | latent_model_input, t |
| | ) |
| |
|
| | |
| | noise_pred = self.unet( |
| | latent_model_input, |
| | t, |
| | encoder_hidden_states=image_embeddings, |
| | cross_attention_kwargs=cross_attention_kwargs, |
| | ).sample |
| |
|
| | |
| | if do_classifier_free_guidance: |
| | noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) |
| | noise_pred = noise_pred_uncond + guidance_scale * ( |
| | noise_pred_text - noise_pred_uncond |
| | ) |
| |
|
| | |
| | latents = self.scheduler.step( |
| | noise_pred, t, latents, **extra_step_kwargs |
| | ).prev_sample |
| |
|
| | |
| | if i == len(timesteps) - 1 or ( |
| | (i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0 |
| | ): |
| | progress_bar.update() |
| | if callback is not None and i % callback_steps == 0: |
| | callback(i, t, latents) |
| |
|
| | if not output_type == "latent": |
| | image = self.vae.decode( |
| | latents / self.vae.config.scaling_factor, return_dict=False |
| | )[0] |
| | image, has_nsfw_concept = self.run_safety_checker( |
| | image, device, image_embeddings.dtype |
| | ) |
| | else: |
| | image = latents |
| | has_nsfw_concept = None |
| |
|
| | if has_nsfw_concept is None: |
| | do_denormalize = [True] * image.shape[0] |
| | else: |
| | do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] |
| |
|
| | image = self.image_processor.postprocess( |
| | image, output_type=output_type, do_denormalize=do_denormalize |
| | ) |
| |
|
| | if not return_dict: |
| | return (image, has_nsfw_concept) |
| |
|
| | return StableDiffusionPipelineOutput( |
| | images=image, nsfw_content_detected=has_nsfw_concept |
| | ) |