Spaces:
Running
on
Zero
Running
on
Zero
import gradio as gr | |
import spaces | |
import argparse | |
import inspect | |
import os | |
from typing import Any, Callable, Dict, List, Optional, Tuple, Union | |
import matplotlib.pyplot as plt | |
from PIL import Image | |
import torch | |
import torch.nn.functional as F | |
import numpy as np | |
import random | |
import warnings | |
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer | |
from utils import * | |
import hashlib | |
from diffusers.image_processor import VaeImageProcessor | |
from diffusers.loaders import ( | |
FromSingleFileMixin, | |
LoraLoaderMixin, | |
TextualInversionLoaderMixin, | |
) | |
from diffusers.models import AutoencoderKL, UNet2DConditionModel | |
from diffusers.models.attention_processor import ( | |
AttnProcessor2_0, | |
LoRAAttnProcessor2_0, | |
LoRAXFormersAttnProcessor, | |
XFormersAttnProcessor, | |
) | |
from diffusers.models.lora import adjust_lora_scale_text_encoder | |
from diffusers.schedulers import KarrasDiffusionSchedulers | |
from diffusers.utils import ( | |
is_accelerate_available, | |
is_accelerate_version, | |
is_invisible_watermark_available, | |
logging, | |
replace_example_docstring, | |
) | |
from diffusers.utils.torch_utils import randn_tensor | |
from diffusers.pipelines.pipeline_utils import DiffusionPipeline | |
from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput | |
from accelerate.utils import set_seed | |
from tqdm import tqdm | |
if is_invisible_watermark_available(): | |
from .watermark import StableDiffusionXLWatermarker | |
logger = logging.get_logger(__name__) # pylint: disable=invalid-name | |
EXAMPLE_DOC_STRING = """ | |
Examples: | |
```py | |
>>> import torch | |
>>> from diffusers import StableDiffusionXLPipeline | |
>>> pipe = StableDiffusionXLPipeline.from_pretrained( | |
... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 | |
... ) | |
>>> pipe = pipe.to("cuda") | |
>>> prompt = "a photo of an astronaut riding a horse on mars" | |
>>> image = pipe(prompt).images[0] | |
``` | |
""" | |
def gaussian_kernel(kernel_size=3, sigma=1.0, channels=3): | |
x_coord = torch.arange(kernel_size) | |
gaussian_1d = torch.exp(-(x_coord - (kernel_size - 1) / 2) ** 2 / (2 * sigma ** 2)) | |
gaussian_1d = gaussian_1d / gaussian_1d.sum() | |
gaussian_2d = gaussian_1d[:, None] * gaussian_1d[None, :] | |
kernel = gaussian_2d[None, None, :, :].repeat(channels, 1, 1, 1) | |
return kernel | |
def gaussian_filter(latents, kernel_size=3, sigma=1.0): | |
channels = latents.shape[1] | |
kernel = gaussian_kernel(kernel_size, sigma, channels).to(latents.device, latents.dtype) | |
blurred_latents = F.conv2d(latents, kernel, padding=kernel_size//2, groups=channels) | |
return blurred_latents | |
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg | |
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): | |
""" | |
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and | |
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 | |
""" | |
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) | |
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) | |
noise_pred_rescaled = noise_cfg * (std_text / std_cfg) | |
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg | |
return noise_cfg | |
class AccDiffusionSDXLPipeline(DiffusionPipeline, FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin): | |
""" | |
Pipeline for text-to-image generation using Stable Diffusion XL. | |
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the | |
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) | |
In addition the pipeline inherits the following loading methods: | |
- *LoRA*: [`StableDiffusionXLPipeline.load_lora_weights`] | |
- *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`] | |
as well as the following saving methods: | |
- *LoRA*: [`loaders.StableDiffusionXLPipeline.save_lora_weights`] | |
Args: | |
vae ([`AutoencoderKL`]): | |
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. | |
text_encoder ([`CLIPTextModel`]): | |
Frozen text-encoder. | |
text_encoder_2 ([`CLIPTextModelWithProjection`]): | |
Second frozen text-encoder. | |
tokenizer (`CLIPTokenizer`): | |
Tokenizer. | |
tokenizer_2 (`CLIPTokenizer`): | |
Second Tokenizer. | |
unet ([`UNet2DConditionModel`]): | |
Conditional U-Net architecture. | |
scheduler ([`SchedulerMixin`]): | |
A scheduler to be used in combination with `unet`. | |
force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): | |
Whether the negative prompt embeddings shall be forced to always be set to 0. | |
add_watermarker (`bool`, *optional*): | |
Whether to use the invisible watermark library. | |
""" | |
model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae" | |
def __init__( | |
self, | |
vae: AutoencoderKL, | |
text_encoder: CLIPTextModel, | |
text_encoder_2: CLIPTextModelWithProjection, | |
tokenizer: CLIPTokenizer, | |
tokenizer_2: CLIPTokenizer, | |
unet: UNet2DConditionModel, | |
scheduler: KarrasDiffusionSchedulers, | |
force_zeros_for_empty_prompt: bool = True, | |
add_watermarker: Optional[bool] = None, | |
): | |
super().__init__() | |
self.register_modules( | |
vae=vae, | |
text_encoder=text_encoder, | |
text_encoder_2=text_encoder_2, | |
tokenizer=tokenizer, | |
tokenizer_2=tokenizer_2, | |
unet=unet, | |
scheduler=scheduler, | |
) | |
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) | |
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) | |
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) | |
self.default_sample_size = self.unet.config.sample_size | |
add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() | |
if add_watermarker: | |
self.watermark = StableDiffusionXLWatermarker() | |
else: | |
self.watermark = None | |
def enable_vae_slicing(self): | |
self.vae.enable_slicing() | |
def disable_vae_slicing(self): | |
self.vae.disable_slicing() | |
def enable_vae_tiling(self): | |
self.vae.enable_tiling() | |
def disable_vae_tiling(self): | |
self.vae.disable_tiling() | |
def encode_prompt( | |
self, | |
prompt: str, | |
prompt_2: Optional[str] = None, | |
device: Optional[torch.device] = None, | |
num_images_per_prompt: int = 1, | |
do_classifier_free_guidance: bool = True, | |
negative_prompt: Optional[str] = None, | |
negative_prompt_2: Optional[str] = None, | |
prompt_embeds: Optional[torch.FloatTensor] = None, | |
negative_prompt_embeds: Optional[torch.FloatTensor] = None, | |
pooled_prompt_embeds: Optional[torch.FloatTensor] = None, | |
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, | |
lora_scale: Optional[float] = None, | |
): | |
device = device or self._execution_device | |
if lora_scale is not None and isinstance(self, LoraLoaderMixin): | |
self._lora_scale = lora_scale | |
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) | |
adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) | |
if prompt is not None and isinstance(prompt, str): | |
batch_size = 1 | |
elif prompt is not None and isinstance(prompt, list): | |
batch_size = len(prompt) | |
else: | |
batch_size = prompt_embeds.shape[0] | |
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] | |
text_encoders = ( | |
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] | |
) | |
if prompt_embeds is None: | |
prompt_2 = prompt_2 or prompt | |
prompt_embeds_list = [] | |
prompts = [prompt, prompt_2] | |
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): | |
if isinstance(self, TextualInversionLoaderMixin): | |
prompt = self.maybe_convert_prompt(prompt, tokenizer) | |
text_inputs = tokenizer( | |
prompt, | |
padding="max_length", | |
max_length=tokenizer.model_max_length, | |
truncation=True, | |
return_tensors="pt", | |
) | |
text_input_ids = text_inputs.input_ids | |
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids | |
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( | |
text_input_ids, untruncated_ids | |
): | |
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) | |
logger.warning( | |
"The following part of your input was truncated because CLIP can only handle sequences up to" | |
f" {tokenizer.model_max_length} tokens: {removed_text}" | |
) | |
prompt_embeds = text_encoder( | |
text_input_ids.to(device), | |
output_hidden_states=True, | |
) | |
pooled_prompt_embeds = prompt_embeds[0] | |
prompt_embeds = prompt_embeds.hidden_states[-2] | |
prompt_embeds_list.append(prompt_embeds) | |
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) | |
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt | |
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: | |
negative_prompt_embeds = torch.zeros_like(prompt_embeds) | |
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) | |
elif do_classifier_free_guidance and negative_prompt_embeds is None: | |
negative_prompt = negative_prompt or "" | |
negative_prompt_2 = negative_prompt_2 or negative_prompt | |
if prompt is not None and type(prompt) is not type(negative_prompt): | |
raise TypeError( | |
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" | |
f" {type(prompt)}." | |
) | |
elif isinstance(negative_prompt, str): | |
uncond_tokens = [negative_prompt, negative_prompt_2] | |
elif batch_size != len(negative_prompt): | |
raise ValueError( | |
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" | |
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" | |
" the batch size of `prompt`." | |
) | |
else: | |
uncond_tokens = [negative_prompt, negative_prompt_2] | |
negative_prompt_embeds_list = [] | |
for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): | |
if isinstance(self, TextualInversionLoaderMixin): | |
negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) | |
max_length = prompt_embeds.shape[1] | |
uncond_input = tokenizer( | |
negative_prompt, | |
padding="max_length", | |
max_length=max_length, | |
truncation=True, | |
return_tensors="pt", | |
) | |
negative_prompt_embeds = text_encoder( | |
uncond_input.input_ids.to(device), | |
output_hidden_states=True, | |
) | |
negative_pooled_prompt_embeds = negative_prompt_embeds[0] | |
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] | |
negative_prompt_embeds_list.append(negative_prompt_embeds) | |
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) | |
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) | |
bs_embed, seq_len, _ = prompt_embeds.shape | |
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) | |
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) | |
if do_classifier_free_guidance: | |
seq_len = negative_prompt_embeds.shape[1] | |
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) | |
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) | |
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) | |
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( | |
bs_embed * num_images_per_prompt, -1 | |
) | |
if do_classifier_free_guidance: | |
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( | |
bs_embed * num_images_per_prompt, -1 | |
) | |
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds | |
def prepare_extra_step_kwargs(self, generator, eta): | |
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) | |
extra_step_kwargs = {} | |
if accepts_eta: | |
extra_step_kwargs["eta"] = eta | |
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) | |
if accepts_generator: | |
extra_step_kwargs["generator"] = generator | |
return extra_step_kwargs | |
def check_inputs( | |
self, | |
prompt, | |
prompt_2, | |
height, | |
width, | |
callback_steps, | |
negative_prompt=None, | |
negative_prompt_2=None, | |
prompt_embeds=None, | |
negative_prompt_embeds=None, | |
pooled_prompt_embeds=None, | |
negative_pooled_prompt_embeds=None, | |
num_images_per_prompt=None, | |
): | |
if height % 8 != 0 or width % 8 != 0: | |
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") | |
if (callback_steps is None) or ( | |
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) | |
): | |
raise ValueError( | |
f"`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}." | |
) | |
if prompt is not None and prompt_embeds is not None: | |
raise ValueError( | |
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two." | |
) | |
elif prompt_2 is not None and prompt_embeds is not None: | |
raise ValueError( | |
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two." | |
) | |
elif prompt is None and prompt_embeds is None: | |
raise ValueError( | |
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." | |
) | |
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): | |
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") | |
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): | |
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") | |
if negative_prompt is not None and negative_prompt_embeds is not None: | |
raise ValueError( | |
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two." | |
) | |
elif negative_prompt_2 is not None and negative_prompt_embeds is not None: | |
raise ValueError( | |
f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two." | |
) | |
if prompt_embeds is not None and negative_prompt_embeds is not None: | |
if prompt_embeds.shape != negative_prompt_embeds.shape: | |
raise ValueError( | |
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}." | |
) | |
if prompt_embeds is not None and pooled_prompt_embeds is None: | |
raise ValueError( | |
"If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." | |
) | |
if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: | |
raise ValueError( | |
"If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." | |
) | |
if max(height, width) % 1024 != 0: | |
raise ValueError(f"the larger one of `height` and `width` has to be divisible by 1024 but are {height} and {width}.") | |
if num_images_per_prompt != 1: | |
warnings.warn("num_images_per_prompt != 1 is not supported by AccDiffusion and will be ignored.") | |
num_images_per_prompt = 1 | |
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): | |
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) | |
if isinstance(generator, list) and len(generator) != batch_size: | |
raise ValueError( | |
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators." | |
) | |
if latents is None: | |
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) | |
else: | |
latents = latents.to(device) | |
latents = latents * self.scheduler.init_noise_sigma | |
return latents | |
def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype): | |
add_time_ids = list(original_size + crops_coords_top_left + target_size) | |
passed_add_embed_dim = ( | |
self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim | |
) | |
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features | |
if expected_add_embed_dim != passed_add_embed_dim: | |
raise ValueError( | |
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. \ | |
The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." | |
) | |
add_time_ids = torch.tensor([add_time_ids], dtype=dtype) | |
return add_time_ids | |
def get_views(self, height, width, window_size=128, stride=64, random_jitter=False): | |
height //= self.vae_scale_factor | |
width //= self.vae_scale_factor | |
num_blocks_height = int((height - window_size) / stride - 1e-6) + 2 if height > window_size else 1 | |
num_blocks_width = int((width - window_size) / stride - 1e-6) + 2 if width > window_size else 1 | |
total_num_blocks = int(num_blocks_height * num_blocks_width) | |
views = [] | |
for i in range(total_num_blocks): | |
h_start = int((i // num_blocks_width) * stride) | |
h_end = h_start + window_size | |
w_start = int((i % num_blocks_width) * stride) | |
w_end = w_start + window_size | |
if h_end > height: | |
h_start = int(h_start + height - h_end) | |
h_end = int(height) | |
if w_end > width: | |
w_start = int(w_start + width - w_end) | |
w_end = int(width) | |
if h_start < 0: | |
h_end = int(h_end - h_start) | |
h_start = 0 | |
if w_start < 0: | |
w_end = int(w_end - w_start) | |
w_start = 0 | |
if random_jitter: | |
jitter_range = (window_size - stride) // 4 | |
w_jitter = 0 | |
h_jitter = 0 | |
if (w_start != 0) and (w_end != width): | |
w_jitter = random.randint(-jitter_range, jitter_range) | |
elif (w_start == 0) and (w_end != width): | |
w_jitter = random.randint(-jitter_range, 0) | |
elif (w_start != 0) and (w_end == width): | |
w_jitter = random.randint(0, jitter_range) | |
if (h_start != 0) and (h_end != height): | |
h_jitter = random.randint(-jitter_range, jitter_range) | |
elif (h_start == 0) and (h_end != height): | |
h_jitter = random.randint(-jitter_range, 0) | |
elif (h_start != 0) and (h_end == height): | |
h_jitter = random.randint(0, jitter_range) | |
h_start = h_start + h_jitter + jitter_range | |
h_end = h_end + h_jitter + jitter_range | |
w_start = w_start + w_jitter + jitter_range | |
w_end = w_end + w_jitter + jitter_range | |
views.append((h_start, h_end, w_start, w_end)) | |
return views | |
def upcast_vae(self): | |
dtype = self.vae.dtype | |
self.vae.to(dtype=torch.float32) | |
use_torch_2_0_or_xformers = isinstance( | |
self.vae.decoder.mid_block.attentions[0].processor, | |
( | |
AttnProcessor2_0, | |
XFormersAttnProcessor, | |
LoRAXFormersAttnProcessor, | |
LoRAAttnProcessor2_0, | |
), | |
) | |
if use_torch_2_0_or_xformers: | |
self.vae.post_quant_conv.to(dtype) | |
self.vae.decoder.conv_in.to(dtype) | |
self.vae.decoder.mid_block.to(dtype) | |
def register_attention_control(self, controller): | |
attn_procs = {} | |
cross_att_count = 0 | |
ori_attn_processors = self.unet.attn_processors | |
for name in self.unet.attn_processors.keys(): | |
if name.startswith("mid_block"): | |
place_in_unet = "mid" | |
elif name.startswith("up_blocks"): | |
place_in_unet = "up" | |
elif name.startswith("down_blocks"): | |
place_in_unet = "down" | |
else: | |
continue | |
cross_att_count += 1 | |
attn_procs[name] = P2PCrossAttnProcessor(controller=controller, place_in_unet=place_in_unet) | |
self.unet.set_attn_processor(attn_procs) | |
controller.num_att_layers = cross_att_count | |
return ori_attn_processors | |
def recover_attention_control(self, ori_attn_processors): | |
self.unet.set_attn_processor(ori_attn_processors) | |
def load_lora_weights(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs): | |
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): | |
from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module | |
else: | |
raise ImportError("Offloading requires `accelerate v0.17.0` or higher.") | |
is_model_cpu_offload = False | |
is_sequential_cpu_offload = False | |
recursive = False | |
for _, component in self.components.items(): | |
if isinstance(component, torch.nn.Module): | |
if hasattr(component, "_hf_hook"): | |
is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload) | |
is_sequential_cpu_offload = isinstance(getattr(component, "_hf_hook"), AlignDevicesHook) | |
logger.info( | |
"Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again." | |
) | |
recursive = is_sequential_cpu_offload | |
remove_hook_from_module(component, recurse=recursive) | |
state_dict, network_alphas = self.lora_state_dict( | |
pretrained_model_name_or_path_or_dict, | |
unet_config=self.unet.config, | |
**kwargs, | |
) | |
self.load_lora_into_unet(state_dict, network_alphas=network_alphas, unet=self.unet) | |
text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k} | |
if len(text_encoder_state_dict) > 0: | |
self.load_lora_into_text_encoder( | |
text_encoder_state_dict, | |
network_alphas=network_alphas, | |
text_encoder=self.text_encoder, | |
prefix="text_encoder", | |
lora_scale=self.lora_scale, | |
) | |
text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k} | |
if len(text_encoder_2_state_dict) > 0: | |
self.load_lora_into_text_encoder( | |
text_encoder_2_state_dict, | |
network_alphas=network_alphas, | |
text_encoder=self.text_encoder_2, | |
prefix="text_encoder_2", | |
lora_scale=self.lora_scale, | |
) | |
if is_model_cpu_offload: | |
self.enable_model_cpu_offload() | |
elif is_sequential_cpu_offload: | |
self.enable_sequential_cpu_offload() | |
def save_lora_weights( | |
self, | |
save_directory: Union[str, os.PathLike], | |
unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, | |
text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, | |
text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, | |
is_main_process: bool = True, | |
weight_name: str = None, | |
save_function: Callable = None, | |
safe_serialization: bool = True, | |
): | |
state_dict = {} | |
def pack_weights(layers, prefix): | |
layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers | |
layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()} | |
return layers_state_dict | |
if not (unet_lora_layers or text_encoder_lora_layers or text_encoder_2_lora_layers): | |
raise ValueError( | |
"You must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers` or `text_encoder_2_lora_layers`." | |
) | |
if unet_lora_layers: | |
state_dict.update(pack_weights(unet_lora_layers, "unet")) | |
if text_encoder_lora_layers and text_encoder_2_lora_layers: | |
state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder")) | |
state_dict.update(pack_weights(text_encoder_2_lora_layers, "text_encoder_2")) | |
self.write_lora_layers( | |
state_dict=state_dict, | |
save_directory=save_directory, | |
is_main_process=is_main_process, | |
weight_name=weight_name, | |
save_function=save_function, | |
safe_serialization=safe_serialization, | |
) | |
def _remove_text_encoder_monkey_patch(self): | |
self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder) | |
self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder_2) | |
def __call__( | |
self, | |
prompt: Union[str, List[str]] = None, | |
prompt_2: Optional[Union[str, List[str]]] = None, | |
height: Optional[int] = None, | |
width: Optional[int] = None, | |
num_inference_steps: int = 50, | |
denoising_end: Optional[float] = None, | |
guidance_scale: float = 5.0, | |
negative_prompt: Optional[Union[str, List[str]]] = None, | |
negative_prompt_2: Optional[Union[str, List[str]]] = None, | |
num_images_per_prompt: Optional[int] = 1, | |
eta: float = 0.0, | |
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, | |
latents: Optional[torch.FloatTensor] = None, | |
prompt_embeds: Optional[torch.FloatTensor] = None, | |
negative_prompt_embeds: Optional[torch.FloatTensor] = None, | |
pooled_prompt_embeds: Optional[torch.FloatTensor] = None, | |
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, | |
output_type: Optional[str] = "pil", | |
return_dict: bool = False, | |
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, | |
callback_steps: int = 1, | |
cross_attention_kwargs: Optional[Dict[str, Any]] = None, | |
guidance_rescale: float = 0.0, | |
original_size: Optional[Tuple[int, int]] = None, | |
crops_coords_top_left: Tuple[int, int] = (0, 0), | |
target_size: Optional[Tuple[int, int]] = None, | |
negative_original_size: Optional[Tuple[int, int]] = None, | |
negative_crops_coords_top_left: Tuple[int, int] = (0, 0), | |
negative_target_size: Optional[Tuple[int, int]] = None, | |
################### AccDiffusion specific parameters #################### | |
image_lr: Optional[torch.FloatTensor] = None, | |
view_batch_size: int = 16, | |
multi_decoder: bool = True, | |
stride: Optional[int] = 64, | |
cosine_scale_1: Optional[float] = 3., | |
cosine_scale_2: Optional[float] = 1., | |
cosine_scale_3: Optional[float] = 1., | |
sigma: Optional[float] = 1.0, | |
lowvram: bool = False, | |
multi_guidance_scale: Optional[float] = 7.5, | |
use_guassian: bool = True, | |
upscale_mode: Union[str, List[str]] = 'bicubic_latent', | |
use_multidiffusion: bool = True, | |
use_dilated_sampling : bool = True, | |
use_skip_residual: bool = True, | |
use_progressive_upscaling: bool = True, | |
shuffle: bool = False, | |
result_path: str = './outputs/AccDiffusion', | |
debug: bool = False, | |
use_md_prompt: bool = False, | |
attn_res=None, | |
save_attention_map: bool = False, | |
seed: Optional[int] = None, | |
c : Optional[float] = 0.3, | |
): | |
""" | |
Stable Diffusion XL ๊ธฐ๋ฐ AccDiffusion ํ์ดํ๋ผ์ธ์ ํตํด ์ด๋ฏธ์ง๋ฅผ ์์ฑํ๋ ํจ์์ ๋๋ค. | |
์ด ํจ์๋ ์ฃผ์ด์ง ํ๋กฌํํธ๋ฅผ ์ธ์ฝ๋ฉํ๊ณ , ๋๋ ธ์ด์ง ๋ฐ progressive upscaling ๊ณผ์ ์ ๊ฑฐ์ณ ์ต์ข ์ด๋ฏธ์ง๋ฅผ ์์ฑํฉ๋๋ค. | |
์์ธํ ์ฌ์ฉ๋ฒ์ ๋ฌธ์๋ฅผ ์ฐธ๊ณ ํ์ธ์. | |
Examples: | |
""" | |
if debug: | |
num_inference_steps = 1 | |
height = height or self.default_sample_size * self.vae_scale_factor | |
width = width or self.default_sample_size * self.vae_scale_factor | |
x1_size = self.default_sample_size * self.vae_scale_factor | |
height_scale = height / x1_size | |
width_scale = width / x1_size | |
scale_num = int(max(height_scale, width_scale)) | |
aspect_ratio = min(height_scale, width_scale) / max(height_scale, width_scale) | |
original_size = original_size or (height, width) | |
target_size = target_size or (height, width) | |
if attn_res is None: | |
attn_res = (int(np.ceil(self.default_sample_size * self.vae_scale_factor / 32)), int(np.ceil(self.default_sample_size * self.vae_scale_factor / 32))) | |
self.attn_res = attn_res | |
if lowvram: | |
attention_map_device = torch.device("cpu") | |
else: | |
attention_map_device = self.device | |
self.controller = create_controller( | |
prompt, cross_attention_kwargs, num_inference_steps, tokenizer=self.tokenizer, device=attention_map_device, attn_res=self.attn_res | |
) | |
if save_attention_map or use_md_prompt: | |
ori_attn_processors = self.register_attention_control(self.controller) | |
self.check_inputs( | |
prompt, | |
prompt_2, | |
height, | |
width, | |
callback_steps, | |
negative_prompt, | |
negative_prompt_2, | |
prompt_embeds, | |
negative_prompt_embeds, | |
pooled_prompt_embeds, | |
negative_pooled_prompt_embeds, | |
num_images_per_prompt, | |
) | |
if prompt is not None and isinstance(prompt, str): | |
batch_size = 1 | |
elif prompt is not None and isinstance(prompt, list): | |
batch_size = len(prompt) | |
else: | |
batch_size = prompt_embeds.shape[0] | |
device = self._execution_device | |
self.lowvram = lowvram | |
if self.lowvram: | |
self.vae.cpu() | |
self.unet.cpu() | |
self.text_encoder.to(device) | |
self.text_encoder_2.to(device) | |
do_classifier_free_guidance = guidance_scale > 1.0 | |
text_encoder_lora_scale = (cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None) | |
( | |
prompt_embeds, | |
negative_prompt_embeds, | |
pooled_prompt_embeds, | |
negative_pooled_prompt_embeds, | |
) = self.encode_prompt( | |
prompt=prompt, | |
prompt_2=prompt_2, | |
device=device, | |
num_images_per_prompt=num_images_per_prompt, | |
do_classifier_free_guidance=do_classifier_free_guidance, | |
negative_prompt=negative_prompt, | |
negative_prompt_2=negative_prompt_2, | |
prompt_embeds=prompt_embeds, | |
negative_prompt_embeds=negative_prompt_embeds, | |
pooled_prompt_embeds=pooled_prompt_embeds, | |
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, | |
lora_scale=text_encoder_lora_scale, | |
) | |
self.scheduler.set_timesteps(num_inference_steps, device=device) | |
timesteps = self.scheduler.timesteps | |
num_channels_latents = self.unet.config.in_channels | |
latents = self.prepare_latents( | |
batch_size * num_images_per_prompt, | |
num_channels_latents, | |
height // scale_num, | |
width // scale_num, | |
prompt_embeds.dtype, | |
device, | |
generator, | |
latents, | |
) | |
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) | |
add_text_embeds = pooled_prompt_embeds | |
add_time_ids = self._get_add_time_ids( | |
original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype | |
) | |
if negative_original_size is not None and negative_target_size is not None: | |
negative_add_time_ids = self._get_add_time_ids( | |
negative_original_size, | |
negative_crops_coords_top_left, | |
negative_target_size, | |
dtype=prompt_embeds.dtype, | |
) | |
else: | |
negative_add_time_ids = add_time_ids | |
if do_classifier_free_guidance: | |
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0).to(device) | |
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0).to(device) | |
add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0).to(device).repeat(batch_size * num_images_per_prompt, 1) | |
del negative_prompt_embeds, negative_pooled_prompt_embeds, negative_add_time_ids | |
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) | |
if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1: | |
discrete_timestep_cutoff = int( | |
round( | |
self.scheduler.config.num_train_timesteps - (denoising_end * self.scheduler.config.num_train_timesteps) | |
) | |
) | |
num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) | |
timesteps = timesteps[:num_inference_steps] | |
output_images = [] | |
###################################################### Phase Initialization ######################################################## | |
if self.lowvram: | |
self.text_encoder.cpu() | |
self.text_encoder_2.cpu() | |
if image_lr == None: | |
print("### Phase 1 Denoising ###") | |
with self.progress_bar(total=num_inference_steps) as progress_bar: | |
for i, t in enumerate(timesteps): | |
if self.lowvram: | |
self.vae.cpu() | |
self.unet.to(device) | |
latents_for_view = latents | |
latent_model_input = ( | |
latents.repeat_interleave(2, dim=0) | |
if do_classifier_free_guidance | |
else latents | |
) | |
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) | |
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} | |
noise_pred = self.unet( | |
latent_model_input, | |
t, | |
encoder_hidden_states=prompt_embeds, | |
added_cond_kwargs=added_cond_kwargs, | |
return_dict=False, | |
)[0] | |
if do_classifier_free_guidance: | |
noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2] | |
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) | |
if do_classifier_free_guidance and guidance_rescale > 0.0: | |
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) | |
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] | |
if t == 1 and use_md_prompt: | |
md_prompts, views_attention = get_multidiffusion_prompts(tokenizer=self.tokenizer, prompts=[prompt], threthod=c, attention_store=self.controller, height=height//scale_num, width=width//scale_num, from_where=["up","down"], random_jitter=True, scale_num=scale_num) | |
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): | |
progress_bar.update() | |
if callback is not None and i % callback_steps == 0: | |
step_idx = i // getattr(self.scheduler, "order", 1) | |
callback(step_idx, t, latents) | |
del latents_for_view, latent_model_input, noise_pred, noise_pred_text, noise_pred_uncond | |
if use_md_prompt or save_attention_map: | |
self.recover_attention_control(ori_attn_processors=ori_attn_processors) | |
del self.controller | |
torch.cuda.empty_cache() | |
else: | |
print("### Encoding Real Image ###") | |
latents = self.vae.encode(image_lr) | |
latents = latents.latent_dist.sample() * self.vae.config.scaling_factor | |
anchor_mean = latents.mean() | |
anchor_std = latents.std() | |
if self.lowvram: | |
latents = latents.cpu() | |
torch.cuda.empty_cache() | |
if not output_type == "latent": | |
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast | |
if self.lowvram: | |
needs_upcasting = False | |
self.unet.cpu() | |
self.vae.to(device) | |
if needs_upcasting: | |
self.upcast_vae() | |
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) | |
if self.lowvram and multi_decoder: | |
current_width_height = self.unet.config.sample_size * self.vae_scale_factor | |
image = self.tiled_decode(latents, current_width_height, current_width_height) | |
else: | |
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] | |
if needs_upcasting: | |
self.vae.to(dtype=torch.float16) | |
image = self.image_processor.postprocess(image, output_type=output_type) | |
if not os.path.exists(f'{result_path}'): | |
os.makedirs(f'{result_path}') | |
image_lr_save_path = f'{result_path}/{image[0].size[0]}_{image[0].size[1]}.png' | |
image[0].save(image_lr_save_path) | |
output_images.append(image[0]) | |
####################################################### Phase Upscaling ##################################################### | |
if use_progressive_upscaling: | |
if image_lr == None: | |
starting_scale = 2 | |
else: | |
starting_scale = 1 | |
else: | |
starting_scale = scale_num | |
for current_scale_num in range(starting_scale, scale_num + 1): | |
if self.lowvram: | |
latents = latents.to(device) | |
self.unet.to(device) | |
torch.cuda.empty_cache() | |
current_height = self.unet.config.sample_size * self.vae_scale_factor * current_scale_num | |
current_width = self.unet.config.sample_size * self.vae_scale_factor * current_scale_num | |
if height > width: | |
current_width = int(current_width * aspect_ratio) | |
else: | |
current_height = int(current_height * aspect_ratio) | |
if upscale_mode == "bicubic_latent" or debug: | |
latents = F.interpolate(latents.to(device), size=(int(current_height / self.vae_scale_factor), int(current_width / self.vae_scale_factor)), mode='bicubic') | |
else: | |
raise NotImplementedError | |
print("### Phase {} Denoising ###".format(current_scale_num)) | |
noise_latents = [] | |
noise = torch.randn_like(latents) | |
for timestep in timesteps: | |
noise_latent = self.scheduler.add_noise(latents, noise, timestep.unsqueeze(0)) | |
noise_latents.append(noise_latent) | |
latents = noise_latents[0] | |
with self.progress_bar(total=num_inference_steps) as progress_bar: | |
for i, t in enumerate(timesteps): | |
count = torch.zeros_like(latents) | |
value = torch.zeros_like(latents) | |
cosine_factor = 0.5 * (1 + torch.cos(torch.pi * (self.scheduler.config.num_train_timesteps - t) / self.scheduler.config.num_train_timesteps)).cpu() | |
if use_skip_residual: | |
c1 = cosine_factor ** cosine_scale_1 | |
latents = latents * (1 - c1) + noise_latents[i] * c1 | |
if use_multidiffusion: | |
if use_md_prompt: | |
md_prompt_embeds_list = [] | |
md_add_text_embeds_list = [] | |
for md_prompt in md_prompts[current_scale_num]: | |
( | |
md_prompt_embeds, | |
md_negative_prompt_embeds, | |
md_pooled_prompt_embeds, | |
md_negative_pooled_prompt_embeds, | |
) = self.encode_prompt( | |
prompt=md_prompt, | |
prompt_2=prompt_2, | |
device=device, | |
num_images_per_prompt=num_images_per_prompt, | |
do_classifier_free_guidance=do_classifier_free_guidance, | |
negative_prompt=negative_prompt, | |
negative_prompt_2=negative_prompt_2, | |
prompt_embeds=None, | |
negative_prompt_embeds=None, | |
pooled_prompt_embeds=None, | |
negative_pooled_prompt_embeds=None, | |
lora_scale=text_encoder_lora_scale, | |
) | |
md_prompt_embeds_list.append(torch.cat([md_negative_prompt_embeds, md_prompt_embeds], dim=0).to(device)) | |
md_add_text_embeds_list.append(torch.cat([md_negative_pooled_prompt_embeds, md_pooled_prompt_embeds], dim=0).to(device)) | |
del md_negative_prompt_embeds, md_negative_pooled_prompt_embeds | |
if use_md_prompt: | |
random_jitter = True | |
views = [(h_start*4, h_end*4, w_start*4, w_end*4) for h_start, h_end, w_start, w_end in views_attention[current_scale_num]] | |
else: | |
random_jitter = True | |
views = self.get_views(current_height, current_width, stride=stride, window_size=self.unet.config.sample_size, random_jitter=random_jitter) | |
views_batch = [views[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)] | |
if use_md_prompt: | |
views_prompt_embeds_input = [md_prompt_embeds_list[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)] | |
views_add_text_embeds_input = [md_add_text_embeds_list[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)] | |
if random_jitter: | |
jitter_range = int((self.unet.config.sample_size - stride) // 4) | |
latents_ = F.pad(latents, (jitter_range, jitter_range, jitter_range, jitter_range), 'constant', 0) | |
else: | |
latents_ = latents | |
count_local = torch.zeros_like(latents_) | |
value_local = torch.zeros_like(latents_) | |
for j, batch_view in enumerate(views_batch): | |
vb_size = len(batch_view) | |
latents_for_view = torch.cat( | |
[latents_[:, :, h_start:h_end, w_start:w_end] for h_start, h_end, w_start, w_end in batch_view] | |
) | |
latent_model_input = latents_for_view | |
latent_model_input = (latent_model_input.repeat_interleave(2, dim=0) | |
if do_classifier_free_guidance | |
else latent_model_input) | |
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) | |
add_time_ids_input = [] | |
for h_start, h_end, w_start, w_end in batch_view: | |
add_time_ids_ = add_time_ids.clone() | |
add_time_ids_[:, 2] = h_start * self.vae_scale_factor | |
add_time_ids_[:, 3] = w_start * self.vae_scale_factor | |
add_time_ids_input.append(add_time_ids_) | |
add_time_ids_input = torch.cat(add_time_ids_input) | |
if not use_md_prompt: | |
prompt_embeds_input = torch.cat([prompt_embeds] * vb_size) | |
add_text_embeds_input = torch.cat([add_text_embeds] * vb_size) | |
added_cond_kwargs = {"text_embeds": add_text_embeds_input, "time_ids": add_time_ids_input} | |
noise_pred = self.unet( | |
latent_model_input, | |
t, | |
encoder_hidden_states=prompt_embeds_input, | |
added_cond_kwargs=added_cond_kwargs, | |
return_dict=False, | |
)[0] | |
else: | |
md_prompt_embeds_input = torch.cat(views_prompt_embeds_input[j]) | |
md_add_text_embeds_input = torch.cat(views_add_text_embeds_input[j]) | |
md_added_cond_kwargs = {"text_embeds": md_add_text_embeds_input, "time_ids": add_time_ids_input} | |
noise_pred = self.unet( | |
latent_model_input, | |
t, | |
encoder_hidden_states=md_prompt_embeds_input, | |
added_cond_kwargs=md_added_cond_kwargs, | |
return_dict=False, | |
)[0] | |
if do_classifier_free_guidance: | |
noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2] | |
noise_pred = noise_pred_uncond + multi_guidance_scale * (noise_pred_text - noise_pred_uncond) | |
if do_classifier_free_guidance and guidance_rescale > 0.0: | |
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) | |
self.scheduler._init_step_index(t) | |
latents_denoised_batch = self.scheduler.step(noise_pred, t, latents_for_view, **extra_step_kwargs, return_dict=False)[0] | |
for latents_view_denoised, (h_start, h_end, w_start, w_end) in zip(latents_denoised_batch.chunk(vb_size), batch_view): | |
value_local[:, :, h_start:h_end, w_start:w_end] += latents_view_denoised | |
count_local[:, :, h_start:h_end, w_start:w_end] += 1 | |
if random_jitter: | |
value_local = value_local[:, :, jitter_range:jitter_range + current_height // self.vae_scale_factor, jitter_range:jitter_range + current_width // self.vae_scale_factor] | |
count_local = count_local[:, :, jitter_range:jitter_range + current_height // self.vae_scale_factor, jitter_range:jitter_range + current_width // self.vae_scale_factor] | |
noise_index = i + 1 if i != (len(timesteps) - 1) else i | |
value_local = torch.where(count_local == 0, noise_latents[noise_index], value_local) | |
count_local = torch.where(count_local == 0, torch.ones_like(count_local), count_local) | |
if use_dilated_sampling: | |
c2 = cosine_factor ** cosine_scale_2 | |
value += value_local / count_local * (1 - c2) | |
count += torch.ones_like(value_local) * (1 - c2) | |
else: | |
value += value_local / count_local | |
count += torch.ones_like(value_local) | |
if use_dilated_sampling: | |
views = [[h, w] for h in range(current_scale_num) for w in range(current_scale_num)] | |
views_batch = [views[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)] | |
h_pad = (current_scale_num - (latents.size(2) % current_scale_num)) % current_scale_num | |
w_pad = (current_scale_num - (latents.size(3) % current_scale_num)) % current_scale_num | |
latents_ = F.pad(latents, (w_pad, 0, h_pad, 0), 'constant', 0) | |
count_global = torch.zeros_like(latents_) | |
value_global = torch.zeros_like(latents_) | |
if use_guassian: | |
c3 = 0.99 * cosine_factor ** cosine_scale_3 + 1e-2 | |
std_, mean_ = latents_.std(), latents_.mean() | |
latents_gaussian = gaussian_filter(latents_, kernel_size=(2*current_scale_num-1), sigma=sigma*c3) | |
latents_gaussian = (latents_gaussian - latents_gaussian.mean()) / latents_gaussian.std() * std_ + mean_ | |
else: | |
latents_gaussian = latents_ | |
for j, batch_view in enumerate(views_batch): | |
latents_for_view = torch.cat( | |
[latents_[:, :, h::current_scale_num, w::current_scale_num] for h, w in batch_view] | |
) | |
latents_for_view_gaussian = torch.cat( | |
[latents_gaussian[:, :, h::current_scale_num, w::current_scale_num] for h, w in batch_view] | |
) | |
if shuffle: | |
shape = latents_for_view.shape | |
# ์์ : range(...) ๊ดํธ ์ถ๊ฐ | |
shuffle_index = torch.stack([torch.randperm(shape[0]) for _ in range(latents_for_view.reshape(-1).shape[0]//shape[0])]) | |
shuffle_index = shuffle_index.view(shape[1], shape[2], shape[3], shape[0]) | |
original_index = torch.zeros_like(shuffle_index).scatter_(3, shuffle_index, torch.arange(shape[0]).repeat(shape[1], shape[2], shape[3], 1)) | |
shuffle_index = shuffle_index.permute(3, 0, 1, 2).to(device) | |
original_index = original_index.permute(3, 0, 1, 2).to(device) | |
latents_for_view_gaussian = latents_for_view_gaussian.gather(0, shuffle_index) | |
vb_size = latents_for_view.size(0) | |
latent_model_input = latents_for_view_gaussian | |
latent_model_input = (latent_model_input.repeat_interleave(2, dim=0) | |
if do_classifier_free_guidance | |
else latent_model_input) | |
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) | |
prompt_embeds_input = torch.cat([prompt_embeds] * vb_size) | |
add_text_embeds_input = torch.cat([add_text_embeds] * vb_size) | |
add_time_ids_input = torch.cat([add_time_ids] * vb_size) | |
added_cond_kwargs = {"text_embeds": add_text_embeds_input, "time_ids": add_time_ids_input} | |
noise_pred = self.unet( | |
latent_model_input, | |
t, | |
encoder_hidden_states=prompt_embeds_input, | |
added_cond_kwargs=added_cond_kwargs, | |
return_dict=False, | |
)[0] | |
if do_classifier_free_guidance: | |
noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2] | |
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) | |
if do_classifier_free_guidance and guidance_rescale > 0.0: | |
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) | |
if shuffle: | |
noise_pred = noise_pred.gather(0, original_index) | |
self.scheduler._init_step_index(t) | |
latents_denoised_batch = self.scheduler.step(noise_pred, t, latents_for_view, **extra_step_kwargs, return_dict=False)[0] | |
for latents_view_denoised, (h, w) in zip(latents_denoised_batch.chunk(vb_size), batch_view): | |
value_global[:, :, h::current_scale_num, w::current_scale_num] += latents_view_denoised | |
count_global[:, :, h::current_scale_num, w::current_scale_num] += 1 | |
value_global = value_global[:, :, h_pad:, w_pad:] | |
if use_multidiffusion: | |
c2 = cosine_factor ** cosine_scale_2 | |
value += value_global * c2 | |
count += torch.ones_like(value_global) * c2 | |
else: | |
value += value_global | |
count += torch.ones_like(value_global) | |
latents = torch.where(count > 0, value / count, value) | |
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): | |
progress_bar.update() | |
if callback is not None and i % callback_steps == 0: | |
step_idx = i // getattr(self.scheduler, "order", 1) | |
callback(step_idx, t, latents) | |
latents = (latents - latents.mean()) / latents.std() * anchor_std + anchor_mean | |
if self.lowvram: | |
latents = latents.cpu() | |
torch.cuda.empty_cache() | |
if not output_type == "latent": | |
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast | |
if self.lowvram: | |
needs_upcasting = False | |
self.unet.cpu() | |
self.vae.to(device) | |
if needs_upcasting: | |
self.upcast_vae() | |
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) | |
print("### Phase {} Decoding ###".format(current_scale_num)) | |
if current_height > 2048 or current_width > 2048: | |
self.enable_vae_tiling() | |
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] | |
else: | |
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] | |
image = self.image_processor.postprocess(image, output_type=output_type) | |
image[0].save(f'{result_path}/AccDiffusion_{current_scale_num}.png') | |
output_images.append(image[0]) | |
if needs_upcasting: | |
self.vae.to(dtype=torch.float16) | |
else: | |
image = latents | |
self.maybe_free_model_hooks() | |
return output_images | |
if __name__ == "__main__": | |
parser = argparse.ArgumentParser() | |
### AccDiffusion PARAMETERS ### | |
parser.add_argument('--model_ckpt', default='stabilityai/stable-diffusion-xl-base-1.0') | |
parser.add_argument('--seed', type=int, default=42) | |
parser.add_argument('--prompt', default="Astronaut on Mars During sunset.") | |
parser.add_argument('--negative_prompt', default="blurry, ugly, duplicate, poorly drawn, deformed, mosaic") | |
parser.add_argument('--cosine_scale_1', default=3, type=float, help="cosine scale 1") | |
parser.add_argument('--cosine_scale_2', default=1, type=float, help="cosine scale 2") | |
parser.add_argument('--cosine_scale_3', default=1, type=float, help="cosine scale 3") | |
parser.add_argument('--sigma', default=0.8, type=float, help="sigma") | |
parser.add_argument('--multi_decoder', default=True, type=bool, help="multi decoder or not") | |
parser.add_argument('--num_inference_steps', default=50, type=int, help="num inference steps") | |
parser.add_argument('--resolution', default='1024,1024', help="target resolution") | |
parser.add_argument('--use_multidiffusion', default=False, action='store_true', help="use multidiffusion or not") | |
parser.add_argument('--use_guassian', default=False, action='store_true', help="use guassian or not") | |
parser.add_argument('--use_dilated_sampling', default=True, action='store_true', help="use dilated sampling or not") | |
parser.add_argument('--use_progressive_upscaling', default=False, action='store_true', help="use progressive upscaling or not") | |
parser.add_argument('--shuffle', default=False, action='store_true', help="shuffle or not") | |
parser.add_argument('--use_skip_residual', default=False, action='store_true', help="use skip_residual or not") | |
parser.add_argument('--save_attention_map', default=False, action='store_true', help="save attention map or not") | |
parser.add_argument('--multi_guidance_scale', default=7.5, type=float, help="multi guidance scale") | |
parser.add_argument('--upscale_mode', default="bicubic_latent", help="bicubic_image or bicubic_latent ") | |
parser.add_argument('--use_md_prompt', default=False, action='store_true', help="use md prompt or not") | |
parser.add_argument('--view_batch_size', default=16, type=int, help="view_batch_size") | |
parser.add_argument('--stride', default=64, type=int, help="stride") | |
parser.add_argument('--c', default=0.3, type=float, help="threshold") | |
## others ## | |
parser.add_argument('--debug', default=False, action='store_true') | |
parser.add_argument('--experiment_name', default="AccDiffusion") | |
args = parser.parse_args() | |
pipe = AccDiffusionSDXLPipeline.from_pretrained(args.model_ckpt, torch_dtype=torch.float16).to("cuda") | |
def infer(prompt, resolution, num_inference_steps, guidance_scale, seed, use_multidiffusion, use_skip_residual, use_dilated_sampling, use_progressive_upscaling, shuffle, use_md_prompt, progress=gr.Progress(track_tqdm=True)): | |
set_seed(seed) | |
width, height = list(map(int, resolution.split(','))) | |
cross_attention_kwargs = {"edit_type": "visualize", | |
"n_self_replace": 0.4, | |
"n_cross_replace": {"default_": 1.0, "confetti": 0.8}, | |
} | |
generator = torch.Generator(device='cuda').manual_seed(seed) | |
print(f"Prompt: {prompt}") | |
md5_hash = hashlib.md5(prompt.encode()).hexdigest() | |
result_path = f"./output/{args.experiment_name}/{md5_hash}/{width}_{height}_{seed}/" | |
images = pipe( | |
prompt, | |
negative_prompt=args.negative_prompt, | |
generator=generator, | |
width=width, | |
height=height, | |
view_batch_size=args.view_batch_size, | |
stride=args.stride, | |
cross_attention_kwargs=cross_attention_kwargs, | |
num_inference_steps=num_inference_steps, | |
guidance_scale=guidance_scale, | |
multi_guidance_scale=args.multi_guidance_scale, | |
cosine_scale_1=args.cosine_scale_1, | |
cosine_scale_2=args.cosine_scale_2, | |
cosine_scale_3=args.cosine_scale_3, | |
sigma=args.sigma, use_guassian=args.use_guassian, | |
multi_decoder=args.multi_decoder, | |
upscale_mode=args.upscale_mode, | |
use_multidiffusion=use_multidiffusion, | |
use_skip_residual=use_skip_residual, | |
use_progressive_upscaling=use_progressive_upscaling, | |
use_dilated_sampling=use_dilated_sampling, | |
shuffle=shuffle, | |
result_path=result_path, | |
debug=args.debug, save_attention_map=args.save_attention_map, use_md_prompt=use_md_prompt, c=args.c | |
) | |
print(images) | |
return images | |
MAX_SEED = np.iinfo(np.int32).max | |
css = """ | |
body { | |
background: linear-gradient(135deg, #2c3e50, #4ca1af); | |
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; | |
color: #ffffff; | |
} | |
#col-container { | |
margin: 20px auto; | |
padding: 20px; | |
max-width: 900px; | |
background-color: rgba(0, 0, 0, 0.5); | |
border-radius: 12px; | |
box-shadow: 0 4px 12px rgba(0,0,0,0.5); | |
} | |
h1, h2 { | |
text-align: center; | |
margin-bottom: 10px; | |
} | |
footer { | |
visibility: hidden; | |
} | |
""" | |
with gr.Blocks(css=css) as demo: | |
with gr.Column(elem_id="col-container"): | |
gr.Markdown("<h1>AccDiffusion: Advanced AI Art Generator</h1>") | |
gr.Markdown( | |
"์์ฑํ ์ด๋ฏธ์ง๋ฅผ ์ํ ์ฐฝ์์ ์ธ ํ๋กฌํํธ๋ฅผ ์ ๋ ฅํ์ธ์. ์ด ๋ชจ๋ธ์ ์ต์ AccDiffusion ๊ธฐ๋ฒ์ ์ ์ฉํ์ฌ ๋ค์ํ ์คํ์ผ๊ณผ ํด์๋์ ์์ ์ํ์ ๋ง๋ค์ด๋ ๋๋ค." | |
) | |
with gr.Row(): | |
prompt = gr.Textbox(label="Prompt", placeholder="์: A surreal landscape with floating islands and vibrant colors.", lines=2, scale=4) | |
submit_btn = gr.Button("Generate", scale=1) | |
with gr.Accordion("Advanced Settings", open=False): | |
with gr.Row(): | |
resolution = gr.Radio( | |
label="Resolution", | |
choices = [ | |
"1024,1024", "2048,2048", "2048,1024", "1536,3072", "3072,3072", "4096,4096", "4096,2048" | |
], | |
value = "1024,1024", | |
interactive=True | |
) | |
with gr.Column(): | |
num_inference_steps = gr.Slider(label="Inference Steps", minimum=2, maximum=50, step=1, value=30, info="Number of denoising steps") | |
guidance_scale = gr.Slider(label="Guidance Scale", minimum=1, maximum=510, step=0.1, value=7.5, info="Higher values increase adherence to the prompt") | |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, info="Set a seed for reproducibility") | |
with gr.Row(): | |
use_multidiffusion = gr.Checkbox(label="Use MultiDiffusion", value=True) | |
use_skip_residual = gr.Checkbox(label="Use Skip Residual", value=True) | |
use_dilated_sampling = gr.Checkbox(label="Use Dilated Sampling", value=True) | |
with gr.Row(): | |
use_progressive_upscaling = gr.Checkbox(label="Use Progressive Upscaling", value=False) | |
shuffle = gr.Checkbox(label="Shuffle", value=False) | |
use_md_prompt = gr.Checkbox(label="Use MD Prompt", value=False) | |
output_images = gr.Gallery(label="Output Images", format="png") | |
gr.Markdown("### Example Prompts") | |
gr.Examples( | |
examples = [ | |
["A surreal landscape with floating islands and vibrant colors."], | |
["Cyberpunk cityscape at night with neon lights and futuristic architecture."], | |
["A majestic dragon soaring over a medieval castle amidst stormy skies."], | |
["Futuristic robot exploring an alien planet with mysterious flora."], | |
["Abstract geometric patterns in vivid, pulsating colors."], | |
["A mystical forest illuminated by bioluminescent plants under a starry sky."] | |
], | |
inputs = [prompt], | |
label="Click an example to populate the prompt box." | |
) | |
submit_btn.click( | |
fn = infer, | |
inputs = [prompt, resolution, num_inference_steps, guidance_scale, seed, | |
use_multidiffusion, use_skip_residual, use_dilated_sampling, use_progressive_upscaling, shuffle, use_md_prompt], | |
outputs = [output_images], | |
show_api=False | |
) | |
demo.launch(show_api=False, show_error=True) | |