Upload folder using huggingface_hub
Browse files- main/README.md +1 -1
- main/dps_pipeline.py +2 -2
- main/hd_painter.py +6 -6
- main/img2img_inpainting.py +1 -1
- main/latent_consistency_img2img.py +2 -2
- main/magic_mix.py +1 -1
- main/mixture_tiling.py +3 -3
- main/pipeline_controlnet_xl_kolors.py +1 -1
- main/pipeline_controlnet_xl_kolors_img2img.py +1 -1
- main/pipeline_controlnet_xl_kolors_inpaint.py +1 -1
- main/pipeline_fabric.py +1 -1
- main/pipeline_faithdiff_stable_diffusion_xl.py +1 -1
- main/pipeline_stable_diffusion_boxdiff.py +1 -1
- main/pipeline_stable_diffusion_xl_attentive_eraser.py +2 -2
- main/pipeline_stable_diffusion_xl_controlnet_adapter.py +1 -1
- main/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py +1 -1
- main/regional_prompting_stable_diffusion.py +3 -3
- main/sde_drag.py +1 -1
- main/unclip_image_interpolation.py +1 -1
main/README.md
CHANGED
@@ -4865,7 +4865,7 @@ python -m pip install intel_extension_for_pytorch
|
|
4865 |
```
|
4866 |
python -m pip install intel_extension_for_pytorch==<version_name> -f https://developer.intel.com/ipex-whl-stable-cpu
|
4867 |
```
|
4868 |
-
2. After pipeline initialization, `prepare_for_ipex()` should be called to enable IPEX
|
4869 |
|
4870 |
```python
|
4871 |
pipe = AnimateDiffPipelineIpex.from_pretrained(base, motion_adapter=adapter, torch_dtype=dtype).to(device)
|
|
|
4865 |
```
|
4866 |
python -m pip install intel_extension_for_pytorch==<version_name> -f https://developer.intel.com/ipex-whl-stable-cpu
|
4867 |
```
|
4868 |
+
2. After pipeline initialization, `prepare_for_ipex()` should be called to enable IPEX acceleration. Supported inference datatypes are Float32 and BFloat16.
|
4869 |
|
4870 |
```python
|
4871 |
pipe = AnimateDiffPipelineIpex.from_pretrained(base, motion_adapter=adapter, torch_dtype=dtype).to(device)
|
main/dps_pipeline.py
CHANGED
@@ -336,13 +336,13 @@ if __name__ == "__main__":
|
|
336 |
expanded_kernel_width = np.ceil(kernel_width) + 2
|
337 |
|
338 |
# Determine a set of field_of_view for each each output position, these are the pixels in the input image
|
339 |
-
# that the pixel in the output image 'sees'. We get a matrix
|
340 |
# vertical dim is the pixels it 'sees' (kernel_size + 2)
|
341 |
field_of_view = np.squeeze(
|
342 |
np.int16(np.expand_dims(left_boundary, axis=1) + np.arange(expanded_kernel_width) - 1)
|
343 |
)
|
344 |
|
345 |
-
# Assign weight to each pixel in the field of view. A matrix
|
346 |
# vertical dim is a list of weights matching to the pixel in the field of view (that are specified in
|
347 |
# 'field_of_view')
|
348 |
weights = fixed_kernel(1.0 * np.expand_dims(match_coordinates, axis=1) - field_of_view - 1)
|
|
|
336 |
expanded_kernel_width = np.ceil(kernel_width) + 2
|
337 |
|
338 |
# Determine a set of field_of_view for each each output position, these are the pixels in the input image
|
339 |
+
# that the pixel in the output image 'sees'. We get a matrix whose horizontal dim is the output pixels (big) and the
|
340 |
# vertical dim is the pixels it 'sees' (kernel_size + 2)
|
341 |
field_of_view = np.squeeze(
|
342 |
np.int16(np.expand_dims(left_boundary, axis=1) + np.arange(expanded_kernel_width) - 1)
|
343 |
)
|
344 |
|
345 |
+
# Assign weight to each pixel in the field of view. A matrix whose horizontal dim is the output pixels and the
|
346 |
# vertical dim is a list of weights matching to the pixel in the field of view (that are specified in
|
347 |
# 'field_of_view')
|
348 |
weights = fixed_kernel(1.0 * np.expand_dims(match_coordinates, axis=1) - field_of_view - 1)
|
main/hd_painter.py
CHANGED
@@ -201,16 +201,16 @@ class PAIntAAttnProcessor:
|
|
201 |
# ================================================== #
|
202 |
# We use a hack by running the code from the BasicTransformerBlock that is between Self and Cross attentions here
|
203 |
# The other option would've been modifying the BasicTransformerBlock and adding this functionality here.
|
204 |
-
# I assumed that changing the BasicTransformerBlock would have been a bigger deal and decided to use this hack
|
205 |
|
206 |
-
# The SelfAttention block
|
207 |
# But the residual of the output is the non-normalized version.
|
208 |
# Therefore we unnormalize the input hidden state here
|
209 |
unnormalized_input_hidden_states = (
|
210 |
input_hidden_states + self.transformer_block.norm1.bias
|
211 |
) * self.transformer_block.norm1.weight
|
212 |
|
213 |
-
# TODO: return if
|
214 |
# if self.use_ada_layer_norm_zero:
|
215 |
# attn_output = gate_msa.unsqueeze(1) * attn_output
|
216 |
# elif self.use_ada_layer_norm_single:
|
@@ -220,7 +220,7 @@ class PAIntAAttnProcessor:
|
|
220 |
if transformer_hidden_states.ndim == 4:
|
221 |
transformer_hidden_states = transformer_hidden_states.squeeze(1)
|
222 |
|
223 |
-
# TODO: return if
|
224 |
# 2.5 GLIGEN Control
|
225 |
# if gligen_kwargs is not None:
|
226 |
# transformer_hidden_states = self.fuser(transformer_hidden_states, gligen_kwargs["objs"])
|
@@ -266,7 +266,7 @@ class PAIntAAttnProcessor:
|
|
266 |
) = cross_attention_input_hidden_states.chunk(2)
|
267 |
|
268 |
# Same split for the encoder_hidden_states i.e. the tokens
|
269 |
-
# Since the SelfAttention processors don't get the encoder states as input, we inject them into the processor in the
|
270 |
_encoder_hidden_states_unconditional, encoder_hidden_states_conditional = self.encoder_hidden_states.chunk(
|
271 |
2
|
272 |
)
|
@@ -896,7 +896,7 @@ class StableDiffusionHDPainterPipeline(StableDiffusionInpaintPipeline):
|
|
896 |
class GaussianSmoothing(nn.Module):
|
897 |
"""
|
898 |
Apply gaussian smoothing on a
|
899 |
-
1d, 2d or 3d tensor. Filtering is performed
|
900 |
in the input using a depthwise convolution.
|
901 |
|
902 |
Args:
|
|
|
201 |
# ================================================== #
|
202 |
# We use a hack by running the code from the BasicTransformerBlock that is between Self and Cross attentions here
|
203 |
# The other option would've been modifying the BasicTransformerBlock and adding this functionality here.
|
204 |
+
# I assumed that changing the BasicTransformerBlock would have been a bigger deal and decided to use this hack instead.
|
205 |
|
206 |
+
# The SelfAttention block receives the normalized latents from the BasicTransformerBlock,
|
207 |
# But the residual of the output is the non-normalized version.
|
208 |
# Therefore we unnormalize the input hidden state here
|
209 |
unnormalized_input_hidden_states = (
|
210 |
input_hidden_states + self.transformer_block.norm1.bias
|
211 |
) * self.transformer_block.norm1.weight
|
212 |
|
213 |
+
# TODO: return if necessary
|
214 |
# if self.use_ada_layer_norm_zero:
|
215 |
# attn_output = gate_msa.unsqueeze(1) * attn_output
|
216 |
# elif self.use_ada_layer_norm_single:
|
|
|
220 |
if transformer_hidden_states.ndim == 4:
|
221 |
transformer_hidden_states = transformer_hidden_states.squeeze(1)
|
222 |
|
223 |
+
# TODO: return if necessary
|
224 |
# 2.5 GLIGEN Control
|
225 |
# if gligen_kwargs is not None:
|
226 |
# transformer_hidden_states = self.fuser(transformer_hidden_states, gligen_kwargs["objs"])
|
|
|
266 |
) = cross_attention_input_hidden_states.chunk(2)
|
267 |
|
268 |
# Same split for the encoder_hidden_states i.e. the tokens
|
269 |
+
# Since the SelfAttention processors don't get the encoder states as input, we inject them into the processor in the beginning.
|
270 |
_encoder_hidden_states_unconditional, encoder_hidden_states_conditional = self.encoder_hidden_states.chunk(
|
271 |
2
|
272 |
)
|
|
|
896 |
class GaussianSmoothing(nn.Module):
|
897 |
"""
|
898 |
Apply gaussian smoothing on a
|
899 |
+
1d, 2d or 3d tensor. Filtering is performed separately for each channel
|
900 |
in the input using a depthwise convolution.
|
901 |
|
902 |
Args:
|
main/img2img_inpainting.py
CHANGED
@@ -161,7 +161,7 @@ class ImageToImageInpaintingPipeline(DiffusionPipeline):
|
|
161 |
`Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
|
162 |
be masked out with `mask_image` and repainted according to `prompt`.
|
163 |
inner_image (`torch.Tensor` or `PIL.Image.Image`):
|
164 |
-
`Image`, or tensor representing an image batch which will be
|
165 |
regions of `inner_image` must fit inside white pixels in `mask_image`. Expects four channels, with
|
166 |
the last channel representing the alpha channel, which will be used to blend `inner_image` with
|
167 |
`image`. If not provided, it will be forcibly cast to RGBA.
|
|
|
161 |
`Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
|
162 |
be masked out with `mask_image` and repainted according to `prompt`.
|
163 |
inner_image (`torch.Tensor` or `PIL.Image.Image`):
|
164 |
+
`Image`, or tensor representing an image batch which will be overlaid onto `image`. Non-transparent
|
165 |
regions of `inner_image` must fit inside white pixels in `mask_image`. Expects four channels, with
|
166 |
the last channel representing the alpha channel, which will be used to blend `inner_image` with
|
167 |
`image`. If not provided, it will be forcibly cast to RGBA.
|
main/latent_consistency_img2img.py
CHANGED
@@ -647,7 +647,7 @@ class LCMSchedulerWithTimestamp(SchedulerMixin, ConfigMixin):
|
|
647 |
return sample
|
648 |
|
649 |
def set_timesteps(
|
650 |
-
self,
|
651 |
):
|
652 |
"""
|
653 |
Sets the discrete timesteps used for the diffusion chain (to be run before inference).
|
@@ -668,7 +668,7 @@ class LCMSchedulerWithTimestamp(SchedulerMixin, ConfigMixin):
|
|
668 |
# LCM Timesteps Setting: # Linear Spacing
|
669 |
c = self.config.num_train_timesteps // lcm_origin_steps
|
670 |
lcm_origin_timesteps = (
|
671 |
-
np.asarray(list(range(1, int(lcm_origin_steps *
|
672 |
) # LCM Training Steps Schedule
|
673 |
skipping_step = len(lcm_origin_timesteps) // num_inference_steps
|
674 |
timesteps = lcm_origin_timesteps[::-skipping_step][:num_inference_steps] # LCM Inference Steps Schedule
|
|
|
647 |
return sample
|
648 |
|
649 |
def set_timesteps(
|
650 |
+
self, strength, num_inference_steps: int, lcm_origin_steps: int, device: Union[str, torch.device] = None
|
651 |
):
|
652 |
"""
|
653 |
Sets the discrete timesteps used for the diffusion chain (to be run before inference).
|
|
|
668 |
# LCM Timesteps Setting: # Linear Spacing
|
669 |
c = self.config.num_train_timesteps // lcm_origin_steps
|
670 |
lcm_origin_timesteps = (
|
671 |
+
np.asarray(list(range(1, int(lcm_origin_steps * strength) + 1))) * c - 1
|
672 |
) # LCM Training Steps Schedule
|
673 |
skipping_step = len(lcm_origin_timesteps) // num_inference_steps
|
674 |
timesteps = lcm_origin_timesteps[::-skipping_step][:num_inference_steps] # LCM Inference Steps Schedule
|
main/magic_mix.py
CHANGED
@@ -129,7 +129,7 @@ class MagicMixPipeline(DiffusionPipeline):
|
|
129 |
|
130 |
input = (
|
131 |
(mix_factor * latents) + (1 - mix_factor) * orig_latents
|
132 |
-
) # interpolating between layout noise and conditionally generated noise to preserve layout
|
133 |
input = torch.cat([input] * 2)
|
134 |
|
135 |
else: # content generation phase
|
|
|
129 |
|
130 |
input = (
|
131 |
(mix_factor * latents) + (1 - mix_factor) * orig_latents
|
132 |
+
) # interpolating between layout noise and conditionally generated noise to preserve layout semantics
|
133 |
input = torch.cat([input] * 2)
|
134 |
|
135 |
else: # content generation phase
|
main/mixture_tiling.py
CHANGED
@@ -196,9 +196,9 @@ class StableDiffusionTilingPipeline(DiffusionPipeline, StableDiffusionExtrasMixi
|
|
196 |
guidance_scale_tiles: specific weights for classifier-free guidance in each tile.
|
197 |
guidance_scale_tiles: specific weights for classifier-free guidance in each tile. If None, the value provided in guidance_scale will be used.
|
198 |
seed_tiles: specific seeds for the initialization latents in each tile. These will override the latents generated for the whole canvas using the standard seed parameter.
|
199 |
-
seed_tiles_mode: either "full" "exclusive". If "full", all the latents affected by the tile be
|
200 |
-
seed_reroll_regions: a list of tuples in the form (start row, end row, start column, end column, seed) defining regions in pixel space for which the latents will be
|
201 |
-
cpu_vae: the decoder from latent space to pixel space can require too
|
202 |
|
203 |
Examples:
|
204 |
|
|
|
196 |
guidance_scale_tiles: specific weights for classifier-free guidance in each tile.
|
197 |
guidance_scale_tiles: specific weights for classifier-free guidance in each tile. If None, the value provided in guidance_scale will be used.
|
198 |
seed_tiles: specific seeds for the initialization latents in each tile. These will override the latents generated for the whole canvas using the standard seed parameter.
|
199 |
+
seed_tiles_mode: either "full" "exclusive". If "full", all the latents affected by the tile be overridden. If "exclusive", only the latents that are affected exclusively by this tile (and no other tiles) will be overridden.
|
200 |
+
seed_reroll_regions: a list of tuples in the form (start row, end row, start column, end column, seed) defining regions in pixel space for which the latents will be overridden using the given seed. Takes priority over seed_tiles.
|
201 |
+
cpu_vae: the decoder from latent space to pixel space can require too much GPU RAM for large images. If you find out of memory errors at the end of the generation process, try setting this parameter to True to run the decoder in CPU. Slower, but should run without memory issues.
|
202 |
|
203 |
Examples:
|
204 |
|
main/pipeline_controlnet_xl_kolors.py
CHANGED
@@ -1258,7 +1258,7 @@ class KolorsControlNetPipeline(
|
|
1258 |
)
|
1259 |
|
1260 |
if guess_mode and self.do_classifier_free_guidance:
|
1261 |
-
#
|
1262 |
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
1263 |
# add 0 to the unconditional batch to keep it unchanged.
|
1264 |
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
|
|
1258 |
)
|
1259 |
|
1260 |
if guess_mode and self.do_classifier_free_guidance:
|
1261 |
+
# Inferred ControlNet only for the conditional batch.
|
1262 |
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
1263 |
# add 0 to the unconditional batch to keep it unchanged.
|
1264 |
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
main/pipeline_controlnet_xl_kolors_img2img.py
CHANGED
@@ -1462,7 +1462,7 @@ class KolorsControlNetImg2ImgPipeline(
|
|
1462 |
)
|
1463 |
|
1464 |
if guess_mode and self.do_classifier_free_guidance:
|
1465 |
-
#
|
1466 |
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
1467 |
# add 0 to the unconditional batch to keep it unchanged.
|
1468 |
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
|
|
1462 |
)
|
1463 |
|
1464 |
if guess_mode and self.do_classifier_free_guidance:
|
1465 |
+
# Inferred ControlNet only for the conditional batch.
|
1466 |
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
1467 |
# add 0 to the unconditional batch to keep it unchanged.
|
1468 |
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
main/pipeline_controlnet_xl_kolors_inpaint.py
CHANGED
@@ -1782,7 +1782,7 @@ class KolorsControlNetInpaintPipeline(
|
|
1782 |
)
|
1783 |
|
1784 |
if guess_mode and self.do_classifier_free_guidance:
|
1785 |
-
#
|
1786 |
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
1787 |
# add 0 to the unconditional batch to keep it unchanged.
|
1788 |
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
|
|
1782 |
)
|
1783 |
|
1784 |
if guess_mode and self.do_classifier_free_guidance:
|
1785 |
+
# Inferred ControlNet only for the conditional batch.
|
1786 |
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
1787 |
# add 0 to the unconditional batch to keep it unchanged.
|
1788 |
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
main/pipeline_fabric.py
CHANGED
@@ -559,7 +559,7 @@ class FabricPipeline(DiffusionPipeline):
|
|
559 |
End point for providing feedback (between 0 and 1).
|
560 |
min_weight (`float`, *optional*, defaults to `.05`):
|
561 |
Minimum weight for feedback.
|
562 |
-
max_weight (`float`, *optional*,
|
563 |
Maximum weight for feedback.
|
564 |
neg_scale (`float`, *optional*, defaults to `.5`):
|
565 |
Scale factor for negative feedback.
|
|
|
559 |
End point for providing feedback (between 0 and 1).
|
560 |
min_weight (`float`, *optional*, defaults to `.05`):
|
561 |
Minimum weight for feedback.
|
562 |
+
max_weight (`float`, *optional*, defaults tp `1.0`):
|
563 |
Maximum weight for feedback.
|
564 |
neg_scale (`float`, *optional*, defaults to `.5`):
|
565 |
Scale factor for negative feedback.
|
main/pipeline_faithdiff_stable_diffusion_xl.py
CHANGED
@@ -118,7 +118,7 @@ EXAMPLE_DOC_STRING = """
|
|
118 |
>>> # Here we need use pipeline internal unet model
|
119 |
>>> pipe.unet = pipe.unet_model.from_pretrained(model_id, subfolder="unet", variant="fp16", use_safetensors=True)
|
120 |
>>>
|
121 |
-
>>> # Load
|
122 |
>>> pipe.unet.load_additional_layers(weight_path="proc_data/faithdiff/FaithDiff.bin", dtype=dtype)
|
123 |
>>>
|
124 |
>>> # Enable vae tiling
|
|
|
118 |
>>> # Here we need use pipeline internal unet model
|
119 |
>>> pipe.unet = pipe.unet_model.from_pretrained(model_id, subfolder="unet", variant="fp16", use_safetensors=True)
|
120 |
>>>
|
121 |
+
>>> # Load additional layers to the model
|
122 |
>>> pipe.unet.load_additional_layers(weight_path="proc_data/faithdiff/FaithDiff.bin", dtype=dtype)
|
123 |
>>>
|
124 |
>>> # Enable vae tiling
|
main/pipeline_stable_diffusion_boxdiff.py
CHANGED
@@ -72,7 +72,7 @@ class GaussianSmoothing(nn.Module):
|
|
72 |
"""
|
73 |
Copied from official repo: https://github.com/showlab/BoxDiff/blob/master/utils/gaussian_smoothing.py
|
74 |
Apply gaussian smoothing on a
|
75 |
-
1d, 2d or 3d tensor. Filtering is performed
|
76 |
in the input using a depthwise convolution.
|
77 |
Arguments:
|
78 |
channels (int, sequence): Number of channels of the input tensors. Output will
|
|
|
72 |
"""
|
73 |
Copied from official repo: https://github.com/showlab/BoxDiff/blob/master/utils/gaussian_smoothing.py
|
74 |
Apply gaussian smoothing on a
|
75 |
+
1d, 2d or 3d tensor. Filtering is performed separately for each channel
|
76 |
in the input using a depthwise convolution.
|
77 |
Arguments:
|
78 |
channels (int, sequence): Number of channels of the input tensors. Output will
|
main/pipeline_stable_diffusion_xl_attentive_eraser.py
CHANGED
@@ -1509,7 +1509,7 @@ class StableDiffusionXL_AE_Pipeline(
|
|
1509 |
|
1510 |
add_time_ids = add_time_ids.repeat(batch_size, 1).to(DEVICE)
|
1511 |
|
1512 |
-
#
|
1513 |
self.scheduler.set_timesteps(num_inference_steps)
|
1514 |
latents_list = [latents]
|
1515 |
pred_x0_list = []
|
@@ -1548,7 +1548,7 @@ class StableDiffusionXL_AE_Pipeline(
|
|
1548 |
x: torch.FloatTensor,
|
1549 |
):
|
1550 |
"""
|
1551 |
-
predict the
|
1552 |
"""
|
1553 |
ref_noise = model_output[:1, :, :, :].expand(model_output.shape)
|
1554 |
alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
|
|
|
1509 |
|
1510 |
add_time_ids = add_time_ids.repeat(batch_size, 1).to(DEVICE)
|
1511 |
|
1512 |
+
# interactive sampling
|
1513 |
self.scheduler.set_timesteps(num_inference_steps)
|
1514 |
latents_list = [latents]
|
1515 |
pred_x0_list = []
|
|
|
1548 |
x: torch.FloatTensor,
|
1549 |
):
|
1550 |
"""
|
1551 |
+
predict the sample the next step in the denoise process.
|
1552 |
"""
|
1553 |
ref_noise = model_output[:1, :, :, :].expand(model_output.shape)
|
1554 |
alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
|
main/pipeline_stable_diffusion_xl_controlnet_adapter.py
CHANGED
@@ -132,7 +132,7 @@ def _preprocess_adapter_image(image, height, width):
|
|
132 |
image = torch.cat(image, dim=0)
|
133 |
else:
|
134 |
raise ValueError(
|
135 |
-
f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but
|
136 |
)
|
137 |
return image
|
138 |
|
|
|
132 |
image = torch.cat(image, dim=0)
|
133 |
else:
|
134 |
raise ValueError(
|
135 |
+
f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but receive: {image[0].ndim}"
|
136 |
)
|
137 |
return image
|
138 |
|
main/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py
CHANGED
@@ -150,7 +150,7 @@ def _preprocess_adapter_image(image, height, width):
|
|
150 |
image = torch.cat(image, dim=0)
|
151 |
else:
|
152 |
raise ValueError(
|
153 |
-
f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but
|
154 |
)
|
155 |
return image
|
156 |
|
|
|
150 |
image = torch.cat(image, dim=0)
|
151 |
else:
|
152 |
raise ValueError(
|
153 |
+
f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but receive: {image[0].ndim}"
|
154 |
)
|
155 |
return image
|
156 |
|
main/regional_prompting_stable_diffusion.py
CHANGED
@@ -220,7 +220,7 @@ class RegionalPromptingStableDiffusionPipeline(StableDiffusionPipeline):
|
|
220 |
revers = True
|
221 |
|
222 |
def pcallback(s_self, step: int, timestep: int, latents: torch.Tensor, selfs=None):
|
223 |
-
if "PRO" in mode: # in Prompt mode, make masks from sum of
|
224 |
self.step = step
|
225 |
|
226 |
if len(self.attnmaps_sizes) > 3:
|
@@ -552,9 +552,9 @@ def get_attn_maps(self, attn):
|
|
552 |
|
553 |
def reset_attnmaps(self): # init parameters in every batch
|
554 |
self.step = 0
|
555 |
-
self.attnmaps = {} #
|
556 |
self.attnmaps_sizes = [] # height,width set of u-net blocks
|
557 |
-
self.attnmasks = {} #
|
558 |
self.maskready = False
|
559 |
self.history = {}
|
560 |
|
|
|
220 |
revers = True
|
221 |
|
222 |
def pcallback(s_self, step: int, timestep: int, latents: torch.Tensor, selfs=None):
|
223 |
+
if "PRO" in mode: # in Prompt mode, make masks from sum of attention maps
|
224 |
self.step = step
|
225 |
|
226 |
if len(self.attnmaps_sizes) > 3:
|
|
|
552 |
|
553 |
def reset_attnmaps(self): # init parameters in every batch
|
554 |
self.step = 0
|
555 |
+
self.attnmaps = {} # made from attention maps
|
556 |
self.attnmaps_sizes = [] # height,width set of u-net blocks
|
557 |
+
self.attnmasks = {} # made from attnmaps for regions
|
558 |
self.maskready = False
|
559 |
self.history = {}
|
560 |
|
main/sde_drag.py
CHANGED
@@ -97,7 +97,7 @@ class SdeDragPipeline(DiffusionPipeline):
|
|
97 |
steps (`int`, *optional*, defaults to 200):
|
98 |
The number of sampling iterations.
|
99 |
step_size (`int`, *optional*, defaults to 2):
|
100 |
-
The drag
|
101 |
image_scale (`float`, *optional*, defaults to 0.3):
|
102 |
To avoid duplicating the content, use image_scale to perturbs the source.
|
103 |
adapt_radius (`int`, *optional*, defaults to 5):
|
|
|
97 |
steps (`int`, *optional*, defaults to 200):
|
98 |
The number of sampling iterations.
|
99 |
step_size (`int`, *optional*, defaults to 2):
|
100 |
+
The drag distance of each drag step.
|
101 |
image_scale (`float`, *optional*, defaults to 0.3):
|
102 |
To avoid duplicating the content, use image_scale to perturbs the source.
|
103 |
adapt_radius (`int`, *optional*, defaults to 5):
|
main/unclip_image_interpolation.py
CHANGED
@@ -284,7 +284,7 @@ class UnCLIPImageInterpolationPipeline(DiffusionPipeline):
|
|
284 |
)
|
285 |
else:
|
286 |
raise AssertionError(
|
287 |
-
f"Expected 'image' or 'image_embeddings' to be not None with types List[PIL.Image] or torch.Tensor respectively. Received {type(image)} and {type(image_embeddings)}
|
288 |
)
|
289 |
|
290 |
original_image_embeddings = self._encode_image(
|
|
|
284 |
)
|
285 |
else:
|
286 |
raise AssertionError(
|
287 |
+
f"Expected 'image' or 'image_embeddings' to be not None with types List[PIL.Image] or torch.Tensor respectively. Received {type(image)} and {type(image_embeddings)} respectively"
|
288 |
)
|
289 |
|
290 |
original_image_embeddings = self._encode_image(
|