mirror of
https://github.com/finegrain-ai/refiners.git
synced 2024-11-24 23:28:45 +00:00
make basic adapters a part of Fluxion
This commit is contained in:
parent
31785f2059
commit
d389d11a06
|
@ -179,7 +179,7 @@ The `Adapter` API lets you **easily patch models** by injecting parameters in ta
|
|||
E.g. to inject LoRA layers in all attention's linear layers:
|
||||
|
||||
```python
|
||||
from refiners.adapters.lora import SingleLoraAdapter
|
||||
from refiners.fluxion.adapters.lora import SingleLoraAdapter
|
||||
|
||||
for layer in vit.layers(fl.Attention):
|
||||
for linear, parent in layer.walk(fl.Linear):
|
||||
|
|
|
@ -12,7 +12,7 @@ from diffusers import DiffusionPipeline # type: ignore
|
|||
import refiners.fluxion.layers as fl
|
||||
from refiners.fluxion.model_converter import ModelConverter
|
||||
from refiners.fluxion.utils import save_to_safetensors
|
||||
from refiners.adapters.lora import Lora, LoraAdapter
|
||||
from refiners.fluxion.adapters.lora import Lora, LoraAdapter
|
||||
from refiners.foundationals.latent_diffusion import SD1UNet
|
||||
from refiners.foundationals.latent_diffusion.lora import LoraTarget, lora_targets
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
from typing import Iterable, Generic, TypeVar, Any
|
||||
|
||||
import refiners.fluxion.layers as fl
|
||||
from refiners.adapters.adapter import Adapter
|
||||
from refiners.fluxion.adapters.adapter import Adapter
|
||||
|
||||
from torch import Tensor, device as Device, dtype as DType
|
||||
from torch.nn import Parameter as TorchParameter
|
|
@ -1,4 +1,4 @@
|
|||
from refiners.adapters.adapter import Adapter
|
||||
from refiners.fluxion.adapters.adapter import Adapter
|
||||
from refiners.foundationals.clip.text_encoder import CLIPTextEncoder, TokenEncoder
|
||||
from refiners.foundationals.clip.tokenizer import CLIPTokenizer
|
||||
import refiners.fluxion.layers as fl
|
||||
|
|
|
@ -7,8 +7,8 @@ from torch import Tensor
|
|||
import refiners.fluxion.layers as fl
|
||||
from refiners.fluxion.utils import load_from_safetensors, load_metadata_from_safetensors
|
||||
|
||||
from refiners.adapters.adapter import Adapter
|
||||
from refiners.adapters.lora import SingleLoraAdapter, LoraAdapter
|
||||
from refiners.fluxion.adapters.adapter import Adapter
|
||||
from refiners.fluxion.adapters.lora import SingleLoraAdapter, LoraAdapter
|
||||
|
||||
from refiners.foundationals.clip.text_encoder import FeedForward, TransformerLayer
|
||||
from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock2d
|
||||
|
|
|
@ -2,7 +2,7 @@ import math
|
|||
from torch import Tensor, arange, float32, exp, sin, cat, cos, device as Device, dtype as DType
|
||||
from jaxtyping import Float, Int
|
||||
|
||||
from refiners.adapters.adapter import Adapter
|
||||
from refiners.fluxion.adapters.adapter import Adapter
|
||||
import refiners.fluxion.layers as fl
|
||||
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ from refiners.fluxion.layers import (
|
|||
Identity,
|
||||
Parallel,
|
||||
)
|
||||
from refiners.adapters.adapter import Adapter
|
||||
from refiners.fluxion.adapters.adapter import Adapter
|
||||
from refiners.foundationals.latent_diffusion import SD1UNet
|
||||
from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock
|
||||
from torch import Tensor
|
||||
|
|
|
@ -7,7 +7,7 @@ from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import (
|
|||
ResidualBlock,
|
||||
TimestepEncoder,
|
||||
)
|
||||
from refiners.adapters.adapter import Adapter
|
||||
from refiners.fluxion.adapters.adapter import Adapter
|
||||
from refiners.foundationals.latent_diffusion.range_adapter import RangeAdapter2d
|
||||
from typing import cast, Iterable
|
||||
from torch import Tensor, device as Device, dtype as DType
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
from typing import cast
|
||||
from torch import device as Device, dtype as DType, Tensor, cat
|
||||
from refiners.adapters.adapter import Adapter
|
||||
from refiners.fluxion.adapters.adapter import Adapter
|
||||
from refiners.fluxion.context import Contexts
|
||||
import refiners.fluxion.layers as fl
|
||||
from refiners.foundationals.clip.text_encoder import CLIPTextEncoderG, CLIPTextEncoderL
|
||||
|
|
|
@ -5,7 +5,7 @@ from torch.nn import Dropout as TorchDropout
|
|||
|
||||
import refiners.fluxion.layers as fl
|
||||
from refiners.training_utils.callback import Callback
|
||||
from refiners.adapters.adapter import Adapter
|
||||
from refiners.fluxion.adapters.adapter import Adapter
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from refiners.training_utils.config import BaseConfig
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import pytest
|
||||
from refiners.adapters.adapter import Adapter
|
||||
from refiners.fluxion.adapters.adapter import Adapter
|
||||
from refiners.fluxion.layers import Chain, Linear
|
||||
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from refiners.adapters.lora import Lora, SingleLoraAdapter, LoraAdapter
|
||||
from refiners.fluxion.adapters.lora import Lora, SingleLoraAdapter, LoraAdapter
|
||||
from torch import randn, allclose
|
||||
import refiners.fluxion.layers as fl
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import torch
|
||||
from refiners.adapters.adapter import Adapter
|
||||
from refiners.fluxion.adapters.adapter import Adapter
|
||||
from refiners.foundationals.latent_diffusion.range_adapter import RangeEncoder
|
||||
from refiners.fluxion.layers import Chain, Linear
|
||||
|
||||
|
|
Loading…
Reference in a new issue