JunhaoZhuang commited on
Commit
4313555
·
verified ·
1 Parent(s): 2227ec5

Update diffusers/src/diffusers/pipelines/cobra/pipeline_cobra_pixart.py

Browse files
diffusers/src/diffusers/pipelines/cobra/pipeline_cobra_pixart.py CHANGED
@@ -593,9 +593,9 @@ class CobraPixArtAlphaPipeline(DiffusionPipeline):
593
 
594
  device = self._execution_device
595
 
596
- prompt_embeds = torch.load('/mnt/workspace/zhuangjunhao/PixArt_RAG/causaldit_train/prompt_tensor/prompt_embeds.pt').unsqueeze(0).repeat(batch_size * num_images_per_prompt, 1, 1)
597
  prompt_embeds = prompt_embeds.to(dtype=self.transformer.dtype, device=device)
598
- prompt_attention_mask = torch.load('/mnt/workspace/zhuangjunhao/PixArt_RAG/causaldit_train/prompt_tensor/prompt_attention_mask.pt').unsqueeze(0).repeat(batch_size * num_images_per_prompt,1)
599
  prompt_attention_mask = prompt_attention_mask.to(dtype=self.transformer.dtype, device=device)
600
 
601
  # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
 
593
 
594
  device = self._execution_device
595
 
596
+ prompt_embeds = torch.load('./prompt_tensor/prompt_embeds.pt').unsqueeze(0).repeat(batch_size * num_images_per_prompt, 1, 1)
597
  prompt_embeds = prompt_embeds.to(dtype=self.transformer.dtype, device=device)
598
+ prompt_attention_mask = torch.load('./prompt_tensor/prompt_attention_mask.pt').unsqueeze(0).repeat(batch_size * num_images_per_prompt,1)
599
  prompt_attention_mask = prompt_attention_mask.to(dtype=self.transformer.dtype, device=device)
600
 
601
  # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)