diff --git a/src/refiners/foundationals/latent_diffusion/reference_only_control.py b/src/refiners/foundationals/latent_diffusion/reference_only_control.py index 5ef99de..bf17bc7 100644 --- a/src/refiners/foundationals/latent_diffusion/reference_only_control.py +++ b/src/refiners/foundationals/latent_diffusion/reference_only_control.py @@ -12,8 +12,8 @@ from refiners.fluxion.layers import ( SetContext, UseContext, ) -from refiners.foundationals.latent_diffusion import SD1UNet from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock +from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import SD1UNet class SaveLayerNormAdapter(Chain, Adapter[SelfAttention]): diff --git a/src/refiners/foundationals/latent_diffusion/stable_diffusion_1/image_prompt.py b/src/refiners/foundationals/latent_diffusion/stable_diffusion_1/image_prompt.py index aa78cce..2ba0dc7 100644 --- a/src/refiners/foundationals/latent_diffusion/stable_diffusion_1/image_prompt.py +++ b/src/refiners/foundationals/latent_diffusion/stable_diffusion_1/image_prompt.py @@ -3,7 +3,7 @@ from torch import Tensor from refiners.foundationals.clip.image_encoder import CLIPImageEncoderH from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock2d from refiners.foundationals.latent_diffusion.image_prompt import ImageProjection, IPAdapter, PerceiverResampler -from refiners.foundationals.latent_diffusion.stable_diffusion_1 import SD1UNet +from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import SD1UNet class SD1IPAdapter(IPAdapter[SD1UNet]): diff --git a/src/refiners/foundationals/latent_diffusion/stable_diffusion_xl/image_prompt.py b/src/refiners/foundationals/latent_diffusion/stable_diffusion_xl/image_prompt.py index 74d1372..934ad29 100644 --- a/src/refiners/foundationals/latent_diffusion/stable_diffusion_xl/image_prompt.py +++ b/src/refiners/foundationals/latent_diffusion/stable_diffusion_xl/image_prompt.py @@ -3,7 +3,7 @@ from torch import Tensor from refiners.foundationals.clip.image_encoder import CLIPImageEncoderH from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock2d from refiners.foundationals.latent_diffusion.image_prompt import ImageProjection, IPAdapter, PerceiverResampler -from refiners.foundationals.latent_diffusion.stable_diffusion_xl import SDXLUNet +from refiners.foundationals.latent_diffusion.stable_diffusion_xl.unet import SDXLUNet class SDXLIPAdapter(IPAdapter[SDXLUNet]): diff --git a/src/refiners/foundationals/latent_diffusion/stable_diffusion_xl/t2i_adapter.py b/src/refiners/foundationals/latent_diffusion/stable_diffusion_xl/t2i_adapter.py index e955422..3e6d8a3 100644 --- a/src/refiners/foundationals/latent_diffusion/stable_diffusion_xl/t2i_adapter.py +++ b/src/refiners/foundationals/latent_diffusion/stable_diffusion_xl/t2i_adapter.py @@ -2,7 +2,7 @@ from torch import Tensor import refiners.fluxion.layers as fl from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import ResidualAccumulator -from refiners.foundationals.latent_diffusion.stable_diffusion_xl import SDXLUNet +from refiners.foundationals.latent_diffusion.stable_diffusion_xl.unet import SDXLUNet from refiners.foundationals.latent_diffusion.t2i_adapter import ConditionEncoderXL, T2IAdapter, T2IFeatures