diff --git a/src/refiners/foundationals/latent_diffusion/image_prompt.py b/src/refiners/foundationals/latent_diffusion/image_prompt.py index 8d6378b..bd1c8d4 100644 --- a/src/refiners/foundationals/latent_diffusion/image_prompt.py +++ b/src/refiners/foundationals/latent_diffusion/image_prompt.py @@ -413,7 +413,6 @@ class IPAdapter(Generic[T], fl.Chain, Adapter[T]): def set_clip_image_embedding(self, image_embedding: Tensor) -> None: self.set_context("ip_adapter", {"clip_image_embedding": image_embedding}) - # These should be concatenated to the CLIP text embedding before setting the UNet context def compute_clip_image_embedding(self, image_prompt: Tensor) -> Tensor: image_encoder = self.clip_image_encoder if not self.fine_grained else self.grid_image_encoder clip_embedding = image_encoder(image_prompt)