run lint rules using latest isort settings

This commit is contained in:
Cédric Deltheil 2023-12-11 11:46:38 +01:00 committed by Cédric Deltheil
parent b44d6122c4
commit 792a0fc3d9
109 changed files with 502 additions and 441 deletions

View file

@ -1,10 +1,12 @@
import argparse import argparse
from pathlib import Path from pathlib import Path
import torch import torch
from torch import nn
from diffusers import AutoencoderKL # type: ignore from diffusers import AutoencoderKL # type: ignore
from refiners.foundationals.latent_diffusion.auto_encoder import LatentDiffusionAutoencoder from torch import nn
from refiners.fluxion.model_converter import ModelConverter from refiners.fluxion.model_converter import ModelConverter
from refiners.foundationals.latent_diffusion.auto_encoder import LatentDiffusionAutoencoder
class Args(argparse.Namespace): class Args(argparse.Namespace):

View file

@ -1,15 +1,17 @@
# pyright: reportPrivateUsage=false # pyright: reportPrivateUsage=false
import argparse import argparse
from pathlib import Path from pathlib import Path
import torch import torch
from torch import nn
from diffusers import ControlNetModel # type: ignore from diffusers import ControlNetModel # type: ignore
from refiners.fluxion.utils import save_to_safetensors from torch import nn
from refiners.fluxion.model_converter import ModelConverter from refiners.fluxion.model_converter import ModelConverter
from refiners.fluxion.utils import save_to_safetensors
from refiners.foundationals.latent_diffusion import ( from refiners.foundationals.latent_diffusion import (
SD1UNet,
SD1ControlnetAdapter,
DPMSolver, DPMSolver,
SD1ControlnetAdapter,
SD1UNet,
) )

View file

@ -1,11 +1,11 @@
import argparse
from pathlib import Path from pathlib import Path
from typing import Any from typing import Any
import argparse
import torch import torch
from refiners.foundationals.latent_diffusion import SD1UNet, SD1IPAdapter, SDXLUNet, SDXLIPAdapter
from refiners.fluxion.utils import save_to_safetensors from refiners.fluxion.utils import save_to_safetensors
from refiners.foundationals.latent_diffusion import SD1IPAdapter, SD1UNet, SDXLIPAdapter, SDXLUNet
# Running: # Running:
# #

View file

@ -3,16 +3,15 @@ from pathlib import Path
from typing import cast from typing import cast
import torch import torch
from torch import Tensor
from torch.nn.init import zeros_
from torch.nn import Parameter as TorchParameter
from diffusers import DiffusionPipeline # type: ignore from diffusers import DiffusionPipeline # type: ignore
from torch import Tensor
from torch.nn import Parameter as TorchParameter
from torch.nn.init import zeros_
import refiners.fluxion.layers as fl import refiners.fluxion.layers as fl
from refiners.fluxion.adapters.lora import Lora, LoraAdapter
from refiners.fluxion.model_converter import ModelConverter from refiners.fluxion.model_converter import ModelConverter
from refiners.fluxion.utils import save_to_safetensors from refiners.fluxion.utils import save_to_safetensors
from refiners.fluxion.adapters.lora import Lora, LoraAdapter
from refiners.foundationals.latent_diffusion import SD1UNet from refiners.foundationals.latent_diffusion import SD1UNet
from refiners.foundationals.latent_diffusion.lora import LoraTarget, lora_targets from refiners.foundationals.latent_diffusion.lora import LoraTarget, lora_targets

View file

@ -1,10 +1,12 @@
import argparse import argparse
from pathlib import Path from pathlib import Path
import torch import torch
from torch import nn
from diffusers import T2IAdapter # type: ignore from diffusers import T2IAdapter # type: ignore
from refiners.foundationals.latent_diffusion.t2i_adapter import ConditionEncoder, ConditionEncoderXL from torch import nn
from refiners.fluxion.model_converter import ModelConverter from refiners.fluxion.model_converter import ModelConverter
from refiners.foundationals.latent_diffusion.t2i_adapter import ConditionEncoder, ConditionEncoderXL
if __name__ == "__main__": if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Convert a pretrained diffusers T2I-Adapter model to refiners") parser = argparse.ArgumentParser(description="Convert a pretrained diffusers T2I-Adapter model to refiners")

View file

@ -1,9 +1,11 @@
import argparse import argparse
from pathlib import Path from pathlib import Path
import torch import torch
from torch import nn
from refiners.fluxion.model_converter import ModelConverter
from diffusers import UNet2DConditionModel # type: ignore from diffusers import UNet2DConditionModel # type: ignore
from torch import nn
from refiners.fluxion.model_converter import ModelConverter
from refiners.foundationals.latent_diffusion import SD1UNet, SDXLUNet from refiners.foundationals.latent_diffusion import SD1UNet, SDXLUNet

View file

@ -1,7 +1,9 @@
import argparse import argparse
from typing import TYPE_CHECKING, cast from typing import TYPE_CHECKING, cast
import torch import torch
from torch import nn from torch import nn
from refiners.fluxion.model_converter import ModelConverter from refiners.fluxion.model_converter import ModelConverter
from refiners.foundationals.latent_diffusion.preprocessors.informative_drawings import InformativeDrawings from refiners.foundationals.latent_diffusion.preprocessors.informative_drawings import InformativeDrawings

View file

@ -1,20 +1,22 @@
import argparse import argparse
from functools import partial from functools import partial
from convert_diffusers_unet import Args as UnetConversionArgs, setup_converter as convert_unet
from convert_transformers_clip_text_model import (
Args as TextEncoderConversionArgs,
setup_converter as convert_text_encoder,
)
from torch import Tensor from torch import Tensor
import refiners.fluxion.layers as fl
from refiners.fluxion.utils import ( from refiners.fluxion.utils import (
load_from_safetensors, load_from_safetensors,
load_metadata_from_safetensors, load_metadata_from_safetensors,
save_to_safetensors, save_to_safetensors,
) )
from convert_diffusers_unet import setup_converter as convert_unet, Args as UnetConversionArgs
from convert_transformers_clip_text_model import (
setup_converter as convert_text_encoder,
Args as TextEncoderConversionArgs,
)
from refiners.foundationals.clip.text_encoder import CLIPTextEncoderL from refiners.foundationals.clip.text_encoder import CLIPTextEncoderL
from refiners.foundationals.latent_diffusion import SD1UNet from refiners.foundationals.latent_diffusion import SD1UNet
from refiners.foundationals.latent_diffusion.lora import LoraTarget from refiners.foundationals.latent_diffusion.lora import LoraTarget
import refiners.fluxion.layers as fl
def get_unet_mapping(source_path: str) -> dict[str, str]: def get_unet_mapping(source_path: str) -> dict[str, str]:

View file

@ -1,20 +1,19 @@
import argparse import argparse
import types import types
from typing import Any, Callable, cast from typing import Any, Callable, cast
import torch import torch
import torch.nn as nn import torch.nn as nn
from segment_anything import build_sam_vit_h # type: ignore
from segment_anything.modeling.common import LayerNorm2d # type: ignore
from torch import Tensor from torch import Tensor
import refiners.fluxion.layers as fl import refiners.fluxion.layers as fl
from refiners.fluxion.model_converter import ModelConverter from refiners.fluxion.model_converter import ModelConverter
from refiners.fluxion.utils import manual_seed, save_to_safetensors from refiners.fluxion.utils import manual_seed, save_to_safetensors
from refiners.foundationals.segment_anything.image_encoder import SAMViTH from refiners.foundationals.segment_anything.image_encoder import SAMViTH
from refiners.foundationals.segment_anything.prompt_encoder import PointEncoder, MaskEncoder
from segment_anything import build_sam_vit_h # type: ignore
from segment_anything.modeling.common import LayerNorm2d # type: ignore
from refiners.foundationals.segment_anything.mask_decoder import MaskDecoder from refiners.foundationals.segment_anything.mask_decoder import MaskDecoder
from refiners.foundationals.segment_anything.prompt_encoder import MaskEncoder, PointEncoder
class FacebookSAM(nn.Module): class FacebookSAM(nn.Module):
@ -134,9 +133,10 @@ def convert_mask_decoder(mask_decoder: nn.Module) -> dict[str, Tensor]:
point_embedding = torch.randn(1, 3, 256) point_embedding = torch.randn(1, 3, 256)
mask_embedding = torch.randn(1, 256, 64, 64) mask_embedding = torch.randn(1, 256, 64, 64)
import refiners.fluxion.layers as fl
from segment_anything.modeling.common import LayerNorm2d # type: ignore from segment_anything.modeling.common import LayerNorm2d # type: ignore
import refiners.fluxion.layers as fl
assert issubclass(LayerNorm2d, nn.Module) assert issubclass(LayerNorm2d, nn.Module)
custom_layers = {LayerNorm2d: fl.LayerNorm2d} custom_layers = {LayerNorm2d: fl.LayerNorm2d}

View file

@ -1,12 +1,14 @@
import argparse import argparse
from pathlib import Path from pathlib import Path
from torch import nn
from refiners.fluxion.model_converter import ModelConverter
from transformers import CLIPVisionModelWithProjection # type: ignore
from refiners.foundationals.clip.image_encoder import CLIPImageEncoder
from refiners.fluxion.utils import save_to_safetensors
import torch import torch
from torch import nn
from transformers import CLIPVisionModelWithProjection # type: ignore
import refiners.fluxion.layers as fl import refiners.fluxion.layers as fl
from refiners.fluxion.model_converter import ModelConverter
from refiners.fluxion.utils import save_to_safetensors
from refiners.foundationals.clip.image_encoder import CLIPImageEncoder
class Args(argparse.Namespace): class Args(argparse.Namespace):

View file

@ -1,14 +1,16 @@
import argparse import argparse
from pathlib import Path from pathlib import Path
from typing import cast from typing import cast
from torch import nn from torch import nn
from refiners.fluxion.model_converter import ModelConverter
from transformers import CLIPTextModelWithProjection # type: ignore from transformers import CLIPTextModelWithProjection # type: ignore
from refiners.foundationals.clip.text_encoder import CLIPTextEncoder, CLIPTextEncoderL, CLIPTextEncoderG
import refiners.fluxion.layers as fl
from refiners.fluxion.model_converter import ModelConverter
from refiners.fluxion.utils import save_to_safetensors
from refiners.foundationals.clip.text_encoder import CLIPTextEncoder, CLIPTextEncoderG, CLIPTextEncoderL
from refiners.foundationals.clip.tokenizer import CLIPTokenizer from refiners.foundationals.clip.tokenizer import CLIPTokenizer
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.text_encoder import DoubleTextEncoder from refiners.foundationals.latent_diffusion.stable_diffusion_xl.text_encoder import DoubleTextEncoder
from refiners.fluxion.utils import save_to_safetensors
import refiners.fluxion.layers as fl
class Args(argparse.Namespace): class Args(argparse.Namespace):

View file

@ -1,20 +1,21 @@
import random import random
from typing import Any from typing import Any
from pydantic import BaseModel
from loguru import logger from loguru import logger
from refiners.fluxion.utils import save_to_safetensors from pydantic import BaseModel
from refiners.foundationals.latent_diffusion.lora import LoraTarget, LoraAdapter, MODELS, lora_targets
import refiners.fluxion.layers as fl
from torch import Tensor from torch import Tensor
from torch.utils.data import Dataset from torch.utils.data import Dataset
import refiners.fluxion.layers as fl
from refiners.fluxion.utils import save_to_safetensors
from refiners.foundationals.latent_diffusion.lora import MODELS, LoraAdapter, LoraTarget, lora_targets
from refiners.training_utils.callback import Callback from refiners.training_utils.callback import Callback
from refiners.training_utils.latent_diffusion import ( from refiners.training_utils.latent_diffusion import (
FinetuneLatentDiffusionConfig, FinetuneLatentDiffusionConfig,
LatentDiffusionConfig,
LatentDiffusionTrainer,
TextEmbeddingLatentsBatch, TextEmbeddingLatentsBatch,
TextEmbeddingLatentsDataset, TextEmbeddingLatentsDataset,
LatentDiffusionTrainer,
LatentDiffusionConfig,
) )

View file

@ -1,24 +1,24 @@
from typing import Any
from pydantic import BaseModel
from loguru import logger
from torch.utils.data import Dataset
from torch import randn, Tensor
import random import random
from typing import Any
from loguru import logger
from pydantic import BaseModel
from torch import Tensor, randn
from torch.utils.data import Dataset
from refiners.fluxion.utils import save_to_safetensors
from refiners.foundationals.clip.concepts import ConceptExtender, EmbeddingExtender from refiners.foundationals.clip.concepts import ConceptExtender, EmbeddingExtender
from refiners.foundationals.clip.text_encoder import CLIPTextEncoder, TokenEncoder from refiners.foundationals.clip.text_encoder import CLIPTextEncoder, TokenEncoder
from refiners.foundationals.clip.tokenizer import CLIPTokenizer from refiners.foundationals.clip.tokenizer import CLIPTokenizer
from refiners.fluxion.utils import save_to_safetensors
from refiners.training_utils.callback import Callback from refiners.training_utils.callback import Callback
from refiners.training_utils.latent_diffusion import ( from refiners.training_utils.latent_diffusion import (
FinetuneLatentDiffusionConfig, FinetuneLatentDiffusionConfig,
TextEmbeddingLatentsBatch,
LatentDiffusionTrainer,
LatentDiffusionConfig, LatentDiffusionConfig,
LatentDiffusionTrainer,
TextEmbeddingLatentsBatch,
TextEmbeddingLatentsDataset, TextEmbeddingLatentsDataset,
) )
IMAGENET_TEMPLATES_SMALL = [ IMAGENET_TEMPLATES_SMALL = [
"a photo of a {}", "a photo of a {}",
"a rendering of a {}", "a rendering of a {}",

View file

@ -1,3 +1,3 @@
from refiners.fluxion.utils import save_to_safetensors, load_from_safetensors, norm, manual_seed, pad from refiners.fluxion.utils import load_from_safetensors, manual_seed, norm, pad, save_to_safetensors
__all__ = ["norm", "manual_seed", "save_to_safetensors", "load_from_safetensors", "pad"] __all__ = ["norm", "manual_seed", "save_to_safetensors", "load_from_safetensors", "pad"]

View file

@ -1,7 +1,7 @@
import contextlib import contextlib
import refiners.fluxion.layers as fl from typing import Any, Generic, Iterator, TypeVar
from typing import Any, Generic, TypeVar, Iterator
import refiners.fluxion.layers as fl
T = TypeVar("T", bound=fl.Module) T = TypeVar("T", bound=fl.Module)
TAdapter = TypeVar("TAdapter", bound="Adapter[Any]") # Self (see PEP 673) TAdapter = TypeVar("TAdapter", bound="Adapter[Any]") # Self (see PEP 673)

View file

@ -1,11 +1,11 @@
from typing import Iterable, Generic, TypeVar, Any from typing import Any, Generic, Iterable, TypeVar
import refiners.fluxion.layers as fl
from refiners.fluxion.adapters.adapter import Adapter
from torch import Tensor, device as Device, dtype as DType from torch import Tensor, device as Device, dtype as DType
from torch.nn import Parameter as TorchParameter from torch.nn import Parameter as TorchParameter
from torch.nn.init import zeros_, normal_ from torch.nn.init import normal_, zeros_
import refiners.fluxion.layers as fl
from refiners.fluxion.adapters.adapter import Adapter
T = TypeVar("T", bound=fl.Chain) T = TypeVar("T", bound=fl.Chain)
TLoraAdapter = TypeVar("TLoraAdapter", bound="LoraAdapter[Any]") # Self (see PEP 673) TLoraAdapter = TypeVar("TLoraAdapter", bound="LoraAdapter[Any]") # Self (see PEP 673)

View file

@ -1,4 +1,5 @@
from typing import Any from typing import Any
from torch import Tensor from torch import Tensor
Context = dict[str, Any] Context = dict[str, Any]

View file

@ -1,50 +1,50 @@
from refiners.fluxion.layers.activations import GLU, SiLU, ReLU, ApproximateGeLU, GeLU, Sigmoid from refiners.fluxion.layers.activations import GLU, ApproximateGeLU, GeLU, ReLU, Sigmoid, SiLU
from refiners.fluxion.layers.norm import LayerNorm, GroupNorm, LayerNorm2d, InstanceNorm2d
from refiners.fluxion.layers.attentions import Attention, SelfAttention, SelfAttention2d from refiners.fluxion.layers.attentions import Attention, SelfAttention, SelfAttention2d
from refiners.fluxion.layers.basics import ( from refiners.fluxion.layers.basics import (
Identity, Buffer,
View, Chunk,
Cos,
Flatten, Flatten,
Unflatten,
Transpose,
GetArg, GetArg,
Identity,
Multiply,
Parameter,
Permute, Permute,
Reshape, Reshape,
Squeeze,
Unsqueeze,
Slicing,
Sin, Sin,
Cos, Slicing,
Chunk, Squeeze,
Multiply, Transpose,
Unbind, Unbind,
Parameter, Unflatten,
Buffer, Unsqueeze,
View,
) )
from refiners.fluxion.layers.chain import ( from refiners.fluxion.layers.chain import (
Breakpoint,
Chain,
Concatenate,
Distribute,
Lambda, Lambda,
Sum, Matmul,
Parallel,
Passthrough,
Residual, Residual,
Return, Return,
Chain,
UseContext,
SetContext, SetContext,
Parallel, Sum,
Distribute, UseContext,
Passthrough,
Breakpoint,
Concatenate,
Matmul,
) )
from refiners.fluxion.layers.conv import Conv2d, ConvTranspose2d from refiners.fluxion.layers.conv import Conv2d, ConvTranspose2d
from refiners.fluxion.layers.converter import Converter
from refiners.fluxion.layers.embedding import Embedding
from refiners.fluxion.layers.linear import Linear, MultiLinear from refiners.fluxion.layers.linear import Linear, MultiLinear
from refiners.fluxion.layers.module import Module, WeightedModule, ContextModule from refiners.fluxion.layers.maxpool import MaxPool1d, MaxPool2d
from refiners.fluxion.layers.module import ContextModule, Module, WeightedModule
from refiners.fluxion.layers.norm import GroupNorm, InstanceNorm2d, LayerNorm, LayerNorm2d
from refiners.fluxion.layers.padding import ReflectionPad2d from refiners.fluxion.layers.padding import ReflectionPad2d
from refiners.fluxion.layers.pixelshuffle import PixelUnshuffle from refiners.fluxion.layers.pixelshuffle import PixelUnshuffle
from refiners.fluxion.layers.sampling import Downsample, Upsample, Interpolate from refiners.fluxion.layers.sampling import Downsample, Interpolate, Upsample
from refiners.fluxion.layers.embedding import Embedding
from refiners.fluxion.layers.converter import Converter
from refiners.fluxion.layers.maxpool import MaxPool1d, MaxPool2d
__all__ = [ __all__ = [
"Embedding", "Embedding",

View file

@ -1,7 +1,10 @@
from refiners.fluxion.layers.module import Module
from torch.nn.functional import silu
from torch import Tensor, sigmoid from torch import Tensor, sigmoid
from torch.nn.functional import gelu # type: ignore from torch.nn.functional import (
gelu, # type: ignore
silu,
)
from refiners.fluxion.layers.module import Module
class Activation(Module): class Activation(Module):

View file

@ -2,14 +2,14 @@ import math
import torch import torch
from jaxtyping import Float from jaxtyping import Float
from torch.nn.functional import scaled_dot_product_attention as _scaled_dot_product_attention # type: ignore
from torch import Tensor, device as Device, dtype as DType from torch import Tensor, device as Device, dtype as DType
from torch.nn.functional import scaled_dot_product_attention as _scaled_dot_product_attention # type: ignore
from refiners.fluxion.context import Contexts
from refiners.fluxion.layers.basics import Identity
from refiners.fluxion.layers.chain import Chain, Distribute, Lambda, Parallel
from refiners.fluxion.layers.linear import Linear from refiners.fluxion.layers.linear import Linear
from refiners.fluxion.layers.module import Module from refiners.fluxion.layers.module import Module
from refiners.fluxion.layers.chain import Chain, Distribute, Parallel, Lambda
from refiners.fluxion.layers.basics import Identity
from refiners.fluxion.context import Contexts
def scaled_dot_product_attention( def scaled_dot_product_attention(

View file

@ -1,8 +1,9 @@
from refiners.fluxion.layers.module import Module, WeightedModule
import torch import torch
from torch import randn, Tensor, Size, device as Device, dtype as DType from torch import Size, Tensor, device as Device, dtype as DType, randn
from torch.nn import Parameter as TorchParameter from torch.nn import Parameter as TorchParameter
from refiners.fluxion.layers.module import Module, WeightedModule
class Identity(Module): class Identity(Module):
def __init__(self) -> None: def __init__(self) -> None:

View file

@ -1,15 +1,16 @@
from collections import defaultdict
import inspect import inspect
import re import re
import sys import sys
import traceback import traceback
from collections import defaultdict
from typing import Any, Callable, Iterable, Iterator, TypeVar, cast, overload from typing import Any, Callable, Iterable, Iterator, TypeVar, cast, overload
import torch import torch
from torch import Tensor, cat, device as Device, dtype as DType from torch import Tensor, cat, device as Device, dtype as DType
from refiners.fluxion.layers.module import Module, ContextModule, ModuleTree, WeightedModule
from refiners.fluxion.context import Contexts, ContextProvider
from refiners.fluxion.utils import summarize_tensor
from refiners.fluxion.context import ContextProvider, Contexts
from refiners.fluxion.layers.module import ContextModule, Module, ModuleTree, WeightedModule
from refiners.fluxion.utils import summarize_tensor
T = TypeVar("T", bound=Module) T = TypeVar("T", bound=Module)
TChain = TypeVar("TChain", bound="Chain") # because Self (PEP 673) is not in 3.10 TChain = TypeVar("TChain", bound="Chain") # because Self (PEP 673) is not in 3.10

View file

@ -1,4 +1,5 @@
from torch import nn, device as Device, dtype as DType from torch import device as Device, dtype as DType, nn
from refiners.fluxion.layers.module import WeightedModule from refiners.fluxion.layers.module import WeightedModule

View file

@ -1,6 +1,7 @@
from refiners.fluxion.layers.module import ContextModule
from torch import Tensor from torch import Tensor
from refiners.fluxion.layers.module import ContextModule
class Converter(ContextModule): class Converter(ContextModule):
""" """

View file

@ -1,8 +1,8 @@
from refiners.fluxion.layers.module import WeightedModule
from torch.nn import Embedding as _Embedding
from torch import Tensor, device as Device, dtype as DType
from jaxtyping import Float, Int from jaxtyping import Float, Int
from torch import Tensor, device as Device, dtype as DType
from torch.nn import Embedding as _Embedding
from refiners.fluxion.layers.module import WeightedModule
class Embedding(_Embedding, WeightedModule): # type: ignore class Embedding(_Embedding, WeightedModule): # type: ignore

View file

@ -1,11 +1,10 @@
from torch import device as Device, dtype as DType from jaxtyping import Float
from torch import Tensor, device as Device, dtype as DType
from torch.nn import Linear as _Linear from torch.nn import Linear as _Linear
from torch import Tensor
from refiners.fluxion.layers.module import Module, WeightedModule
from refiners.fluxion.layers.activations import ReLU from refiners.fluxion.layers.activations import ReLU
from refiners.fluxion.layers.chain import Chain from refiners.fluxion.layers.chain import Chain
from refiners.fluxion.layers.module import Module, WeightedModule
from jaxtyping import Float
class Linear(_Linear, WeightedModule): class Linear(_Linear, WeightedModule):

View file

@ -1,4 +1,5 @@
from torch import nn from torch import nn
from refiners.fluxion.layers.module import Module from refiners.fluxion.layers.module import Module

View file

@ -1,17 +1,15 @@
from collections import defaultdict
from inspect import signature, Parameter
import sys import sys
from collections import defaultdict
from inspect import Parameter, signature
from pathlib import Path from pathlib import Path
from types import ModuleType from types import ModuleType
from typing import Any, DefaultDict, Generator, TypeVar, TypedDict, cast from typing import TYPE_CHECKING, Any, DefaultDict, Generator, Sequence, TypedDict, TypeVar, cast
from torch import device as Device, dtype as DType from torch import device as Device, dtype as DType
from torch.nn.modules.module import Module as TorchModule from torch.nn.modules.module import Module as TorchModule
from refiners.fluxion.utils import load_from_safetensors
from refiners.fluxion.context import Context, ContextProvider from refiners.fluxion.context import Context, ContextProvider
from refiners.fluxion.utils import load_from_safetensors
from typing import TYPE_CHECKING, Sequence
if TYPE_CHECKING: if TYPE_CHECKING:
from refiners.fluxion.layers.chain import Chain from refiners.fluxion.layers.chain import Chain

View file

@ -1,5 +1,6 @@
from torch import nn, ones, zeros, Tensor, sqrt, device as Device, dtype as DType
from jaxtyping import Float from jaxtyping import Float
from torch import Tensor, device as Device, dtype as DType, nn, ones, sqrt, zeros
from refiners.fluxion.layers.module import Module, WeightedModule from refiners.fluxion.layers.module import Module, WeightedModule

View file

@ -1,4 +1,5 @@
from torch import nn from torch import nn
from refiners.fluxion.layers.module import Module from refiners.fluxion.layers.module import Module

View file

@ -1,6 +1,7 @@
from refiners.fluxion.layers.module import Module
from torch.nn import PixelUnshuffle as _PixelUnshuffle from torch.nn import PixelUnshuffle as _PixelUnshuffle
from refiners.fluxion.layers.module import Module
class PixelUnshuffle(_PixelUnshuffle, Module): class PixelUnshuffle(_PixelUnshuffle, Module):
def __init__(self, downscale_factor: int): def __init__(self, downscale_factor: int):

View file

@ -1,11 +1,11 @@
from refiners.fluxion.layers.chain import Chain, UseContext, SetContext from torch import Size, Tensor, device as Device, dtype as DType
from refiners.fluxion.layers.conv import Conv2d from torch.nn.functional import pad
from refiners.fluxion.layers.basics import Identity from refiners.fluxion.layers.basics import Identity
from refiners.fluxion.layers.chain import Parallel, Lambda from refiners.fluxion.layers.chain import Chain, Lambda, Parallel, SetContext, UseContext
from refiners.fluxion.layers.conv import Conv2d
from refiners.fluxion.layers.module import Module from refiners.fluxion.layers.module import Module
from refiners.fluxion.utils import interpolate from refiners.fluxion.utils import interpolate
from torch.nn.functional import pad
from torch import Tensor, Size, device as Device, dtype as DType
class Downsample(Chain): class Downsample(Chain):

View file

@ -1,10 +1,11 @@
from collections import defaultdict from collections import defaultdict
from enum import Enum, auto from enum import Enum, auto
from pathlib import Path from pathlib import Path
from typing import Any, DefaultDict, TypedDict
import torch
from torch import Tensor, nn from torch import Tensor, nn
from torch.utils.hooks import RemovableHandle from torch.utils.hooks import RemovableHandle
import torch
from typing import Any, DefaultDict, TypedDict
from refiners.fluxion.utils import norm, save_to_safetensors from refiners.fluxion.utils import norm, save_to_safetensors

View file

@ -1,15 +1,14 @@
from typing import Iterable, Literal, TypeVar
from PIL import Image
from numpy import array, float32
from pathlib import Path from pathlib import Path
from typing import Iterable, Literal, TypeVar
import torch
from jaxtyping import Float
from numpy import array, float32
from PIL import Image
from safetensors import safe_open as _safe_open # type: ignore from safetensors import safe_open as _safe_open # type: ignore
from safetensors.torch import save_file as _save_file # type: ignore from safetensors.torch import save_file as _save_file # type: ignore
from torch import norm as _norm, manual_seed as _manual_seed # type: ignore from torch import Tensor, device as Device, dtype as DType, manual_seed as _manual_seed, norm as _norm # type: ignore
import torch from torch.nn.functional import conv2d, interpolate as _interpolate, pad as _pad # type: ignore
from torch.nn.functional import pad as _pad, interpolate as _interpolate, conv2d # type: ignore
from torch import Tensor, device as Device, dtype as DType
from jaxtyping import Float
T = TypeVar("T") T = TypeVar("T")
E = TypeVar("E") E = TypeVar("E")

View file

@ -1,4 +1,5 @@
from torch import Tensor, arange, device as Device, dtype as DType from torch import Tensor, arange, device as Device, dtype as DType
import refiners.fluxion.layers as fl import refiners.fluxion.layers as fl

View file

@ -1,13 +1,14 @@
import re
from typing import cast
import torch.nn.functional as F
from torch import Tensor, cat, zeros
from torch.nn import Parameter
import refiners.fluxion.layers as fl
from refiners.fluxion.adapters.adapter import Adapter from refiners.fluxion.adapters.adapter import Adapter
from refiners.foundationals.clip.text_encoder import CLIPTextEncoder, TokenEncoder from refiners.foundationals.clip.text_encoder import CLIPTextEncoder, TokenEncoder
from refiners.foundationals.clip.tokenizer import CLIPTokenizer from refiners.foundationals.clip.tokenizer import CLIPTokenizer
import refiners.fluxion.layers as fl
from typing import cast
from torch import Tensor, cat, zeros
import torch.nn.functional as F
from torch.nn import Parameter
import re
class EmbeddingExtender(fl.Chain, Adapter[TokenEncoder]): class EmbeddingExtender(fl.Chain, Adapter[TokenEncoder]):

View file

@ -1,6 +1,7 @@
from torch import device as Device, dtype as DType from torch import device as Device, dtype as DType
import refiners.fluxion.layers as fl import refiners.fluxion.layers as fl
from refiners.foundationals.clip.common import PositionalEncoder, FeedForward from refiners.foundationals.clip.common import FeedForward, PositionalEncoder
class ClassToken(fl.Chain): class ClassToken(fl.Chain):

View file

@ -1,6 +1,7 @@
from torch import device as Device, dtype as DType from torch import device as Device, dtype as DType
import refiners.fluxion.layers as fl import refiners.fluxion.layers as fl
from refiners.foundationals.clip.common import PositionalEncoder, FeedForward from refiners.foundationals.clip.common import FeedForward, PositionalEncoder
from refiners.foundationals.clip.tokenizer import CLIPTokenizer from refiners.foundationals.clip.tokenizer import CLIPTokenizer

View file

@ -1,11 +1,13 @@
import gzip import gzip
from pathlib import Path import re
from functools import lru_cache from functools import lru_cache
from itertools import islice from itertools import islice
import re from pathlib import Path
from torch import Tensor, tensor from torch import Tensor, tensor
from refiners.fluxion import pad
import refiners.fluxion.layers as fl import refiners.fluxion.layers as fl
from refiners.fluxion import pad
class CLIPTokenizer(fl.Module): class CLIPTokenizer(fl.Module):

View file

@ -1,27 +1,26 @@
from refiners.foundationals.latent_diffusion.auto_encoder import (
LatentDiffusionAutoencoder,
)
from refiners.foundationals.clip.text_encoder import ( from refiners.foundationals.clip.text_encoder import (
CLIPTextEncoderL, CLIPTextEncoderL,
) )
from refiners.foundationals.latent_diffusion.auto_encoder import (
LatentDiffusionAutoencoder,
)
from refiners.foundationals.latent_diffusion.freeu import SDFreeUAdapter from refiners.foundationals.latent_diffusion.freeu import SDFreeUAdapter
from refiners.foundationals.latent_diffusion.schedulers import Scheduler, DPMSolver from refiners.foundationals.latent_diffusion.schedulers import DPMSolver, Scheduler
from refiners.foundationals.latent_diffusion.stable_diffusion_1 import ( from refiners.foundationals.latent_diffusion.stable_diffusion_1 import (
StableDiffusion_1,
StableDiffusion_1_Inpainting,
SD1UNet,
SD1ControlnetAdapter, SD1ControlnetAdapter,
SD1IPAdapter, SD1IPAdapter,
SD1T2IAdapter, SD1T2IAdapter,
SD1UNet,
StableDiffusion_1,
StableDiffusion_1_Inpainting,
) )
from refiners.foundationals.latent_diffusion.stable_diffusion_xl import ( from refiners.foundationals.latent_diffusion.stable_diffusion_xl import (
SDXLUNet,
DoubleTextEncoder, DoubleTextEncoder,
SDXLIPAdapter, SDXLIPAdapter,
SDXLT2IAdapter, SDXLT2IAdapter,
SDXLUNet,
) )
__all__ = [ __all__ = [
"StableDiffusion_1", "StableDiffusion_1",
"StableDiffusion_1_Inpainting", "StableDiffusion_1_Inpainting",

View file

@ -1,20 +1,21 @@
from PIL import Image
from torch import Tensor, device as Device, dtype as DType
from refiners.fluxion.context import Contexts from refiners.fluxion.context import Contexts
from refiners.fluxion.layers import ( from refiners.fluxion.layers import (
Chain, Chain,
Conv2d, Conv2d,
Downsample,
GroupNorm, GroupNorm,
Identity, Identity,
SiLU,
Downsample,
Upsample,
Sum,
SelfAttention2d,
Slicing,
Residual, Residual,
SelfAttention2d,
SiLU,
Slicing,
Sum,
Upsample,
) )
from refiners.fluxion.utils import image_to_tensor, tensor_to_image from refiners.fluxion.utils import image_to_tensor, tensor_to_image
from torch import Tensor, device as Device, dtype as DType
from PIL import Image
class Resnet(Sum): class Resnet(Sum):

View file

@ -1,24 +1,24 @@
from torch import Tensor, Size, device as Device, dtype as DType from torch import Size, Tensor, device as Device, dtype as DType
from refiners.fluxion.context import Contexts from refiners.fluxion.context import Contexts
from refiners.fluxion.layers import ( from refiners.fluxion.layers import (
Identity,
Flatten,
Unflatten,
Transpose,
Chain,
Parallel,
LayerNorm,
Attention,
UseContext,
Linear,
GLU, GLU,
Attention,
Chain,
Conv2d,
Flatten,
GeLU, GeLU,
GroupNorm, GroupNorm,
Conv2d, Identity,
LayerNorm,
Linear,
Parallel,
Residual,
SelfAttention, SelfAttention,
SetContext, SetContext,
Residual, Transpose,
Unflatten,
UseContext,
) )

View file

@ -1,13 +1,14 @@
import math import math
from typing import Any, Generic, TypeVar from typing import Any, Generic, TypeVar
import refiners.fluxion.layers as fl
import torch import torch
from torch import Tensor
from torch.fft import fftn, fftshift, ifftn, ifftshift # type: ignore
import refiners.fluxion.layers as fl
from refiners.fluxion.adapters.adapter import Adapter from refiners.fluxion.adapters.adapter import Adapter
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import ResidualConcatenator, SD1UNet from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import ResidualConcatenator, SD1UNet
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.unet import SDXLUNet from refiners.foundationals.latent_diffusion.stable_diffusion_xl.unet import SDXLUNet
from torch import Tensor
from torch.fft import fftn, fftshift, ifftn, ifftshift # type: ignore
T = TypeVar("T", bound="SD1UNet | SDXLUNet") T = TypeVar("T", bound="SD1UNet | SDXLUNet")
TSDFreeUAdapter = TypeVar("TSDFreeUAdapter", bound="SDFreeUAdapter[Any]") # Self (see PEP 673) TSDFreeUAdapter = TypeVar("TSDFreeUAdapter", bound="SDFreeUAdapter[Any]") # Self (see PEP 673)

View file

@ -1,19 +1,19 @@
import math
from enum import IntEnum from enum import IntEnum
from functools import partial from functools import partial
from typing import Generic, TypeVar, Any, Callable, TYPE_CHECKING from typing import TYPE_CHECKING, Any, Callable, Generic, TypeVar
import math
from jaxtyping import Float from jaxtyping import Float
from torch import Tensor, cat, softmax, zeros_like, device as Device, dtype as DType
from PIL import Image from PIL import Image
from torch import Tensor, cat, device as Device, dtype as DType, softmax, zeros_like
import refiners.fluxion.layers as fl
from refiners.fluxion.adapters.adapter import Adapter from refiners.fluxion.adapters.adapter import Adapter
from refiners.fluxion.adapters.lora import Lora from refiners.fluxion.adapters.lora import Lora
from refiners.foundationals.clip.image_encoder import CLIPImageEncoderH
from refiners.fluxion.context import Contexts from refiners.fluxion.context import Contexts
from refiners.fluxion.layers.attentions import ScaledDotProductAttention from refiners.fluxion.layers.attentions import ScaledDotProductAttention
from refiners.fluxion.utils import image_to_tensor, normalize from refiners.fluxion.utils import image_to_tensor, normalize
import refiners.fluxion.layers as fl from refiners.foundationals.clip.image_encoder import CLIPImageEncoderH
if TYPE_CHECKING: if TYPE_CHECKING:
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import SD1UNet from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import SD1UNet

View file

@ -1,23 +1,21 @@
from enum import Enum from enum import Enum
from pathlib import Path from pathlib import Path
from typing import Iterator, Callable from typing import Callable, Iterator
from torch import Tensor from torch import Tensor
import refiners.fluxion.layers as fl import refiners.fluxion.layers as fl
from refiners.fluxion.utils import load_from_safetensors, load_metadata_from_safetensors
from refiners.fluxion.adapters.adapter import Adapter from refiners.fluxion.adapters.adapter import Adapter
from refiners.fluxion.adapters.lora import LoraAdapter, Lora from refiners.fluxion.adapters.lora import Lora, LoraAdapter
from refiners.fluxion.utils import load_from_safetensors, load_metadata_from_safetensors
from refiners.foundationals.clip.text_encoder import FeedForward, TransformerLayer from refiners.foundationals.clip.text_encoder import FeedForward, TransformerLayer
from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock2d
from refiners.foundationals.latent_diffusion import ( from refiners.foundationals.latent_diffusion import (
StableDiffusion_1,
SD1UNet,
CLIPTextEncoderL, CLIPTextEncoderL,
LatentDiffusionAutoencoder, LatentDiffusionAutoencoder,
SD1UNet,
StableDiffusion_1,
) )
from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock2d
from refiners.foundationals.latent_diffusion.stable_diffusion_1.controlnet import Controlnet from refiners.foundationals.latent_diffusion.stable_diffusion_1.controlnet import Controlnet
MODELS = ["unet", "text_encoder", "lda"] MODELS = ["unet", "text_encoder", "lda"]

View file

@ -1,8 +1,10 @@
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from typing import TypeVar from typing import TypeVar
from torch import Tensor, device as Device, dtype as DType
from PIL import Image
import torch import torch
from PIL import Image
from torch import Tensor, device as Device, dtype as DType
import refiners.fluxion.layers as fl import refiners.fluxion.layers as fl
from refiners.foundationals.latent_diffusion.auto_encoder import LatentDiffusionAutoencoder from refiners.foundationals.latent_diffusion.auto_encoder import LatentDiffusionAutoencoder
from refiners.foundationals.latent_diffusion.schedulers.scheduler import Scheduler from refiners.foundationals.latent_diffusion.schedulers.scheduler import Scheduler

View file

@ -8,7 +8,6 @@ from torch import Tensor, device as Device, dtype as DType
from refiners.foundationals.latent_diffusion.model import LatentDiffusionModel from refiners.foundationals.latent_diffusion.model import LatentDiffusionModel
MAX_STEPS = 1000 MAX_STEPS = 1000

View file

@ -1,6 +1,7 @@
# Adapted from https://github.com/carolineec/informative-drawings, MIT License # Adapted from https://github.com/carolineec/informative-drawings, MIT License
from torch import device as Device, dtype as DType from torch import device as Device, dtype as DType
import refiners.fluxion.layers as fl import refiners.fluxion.layers as fl

View file

@ -1,9 +1,10 @@
import math import math
from torch import Tensor, arange, float32, exp, sin, cat, cos, device as Device, dtype as DType
from jaxtyping import Float, Int
from refiners.fluxion.adapters.adapter import Adapter from jaxtyping import Float, Int
from torch import Tensor, arange, cat, cos, device as Device, dtype as DType, exp, float32, sin
import refiners.fluxion.layers as fl import refiners.fluxion.layers as fl
from refiners.fluxion.adapters.adapter import Adapter
def compute_sinusoidal_embedding( def compute_sinusoidal_embedding(

View file

@ -1,18 +1,19 @@
from torch import Tensor
from refiners.fluxion.adapters.adapter import Adapter
from refiners.fluxion.layers import ( from refiners.fluxion.layers import (
Passthrough,
Lambda,
Chain, Chain,
Concatenate, Concatenate,
UseContext, Identity,
Lambda,
Parallel,
Passthrough,
SelfAttention, SelfAttention,
SetContext, SetContext,
Identity, UseContext,
Parallel,
) )
from refiners.fluxion.adapters.adapter import Adapter
from refiners.foundationals.latent_diffusion import SD1UNet from refiners.foundationals.latent_diffusion import SD1UNet
from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock
from torch import Tensor
class SaveLayerNormAdapter(Chain, Adapter[SelfAttention]): class SaveLayerNormAdapter(Chain, Adapter[SelfAttention]):

View file

@ -1,7 +1,7 @@
from refiners.foundationals.latent_diffusion.schedulers.scheduler import Scheduler
from refiners.foundationals.latent_diffusion.schedulers.dpm_solver import DPMSolver
from refiners.foundationals.latent_diffusion.schedulers.ddpm import DDPM
from refiners.foundationals.latent_diffusion.schedulers.ddim import DDIM from refiners.foundationals.latent_diffusion.schedulers.ddim import DDIM
from refiners.foundationals.latent_diffusion.schedulers.ddpm import DDPM
from refiners.foundationals.latent_diffusion.schedulers.dpm_solver import DPMSolver
from refiners.foundationals.latent_diffusion.schedulers.scheduler import Scheduler
__all__ = [ __all__ = [
"Scheduler", "Scheduler",

View file

@ -1,4 +1,5 @@
from torch import Tensor, device as Device, dtype as Dtype, arange, sqrt, float32, tensor from torch import Tensor, arange, device as Device, dtype as Dtype, float32, sqrt, tensor
from refiners.foundationals.latent_diffusion.schedulers.scheduler import NoiseSchedule, Scheduler from refiners.foundationals.latent_diffusion.schedulers.scheduler import NoiseSchedule, Scheduler

View file

@ -1,4 +1,5 @@
from torch import Tensor, device as Device, randn, arange, Generator, tensor from torch import Generator, Tensor, arange, device as Device, randn, tensor
from refiners.foundationals.latent_diffusion.schedulers.scheduler import Scheduler from refiners.foundationals.latent_diffusion.schedulers.scheduler import Scheduler

View file

@ -1,8 +1,10 @@
from refiners.foundationals.latent_diffusion.schedulers.scheduler import NoiseSchedule, Scheduler
import numpy as np
from torch import Tensor, device as Device, tensor, exp, float32, dtype as Dtype
from collections import deque from collections import deque
import numpy as np
from torch import Tensor, device as Device, dtype as Dtype, exp, float32, tensor
from refiners.foundationals.latent_diffusion.schedulers.scheduler import NoiseSchedule, Scheduler
class DPMSolver(Scheduler): class DPMSolver(Scheduler):
"""Implements DPM-Solver++ from https://arxiv.org/abs/2211.01095 """Implements DPM-Solver++ from https://arxiv.org/abs/2211.01095

View file

@ -1,8 +1,9 @@
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from enum import Enum from enum import Enum
from torch import Tensor, device as Device, dtype as DType, linspace, float32, sqrt, log
from typing import TypeVar from typing import TypeVar
from torch import Tensor, device as Device, dtype as DType, float32, linspace, log, sqrt
T = TypeVar("T", bound="Scheduler") T = TypeVar("T", bound="Scheduler")

View file

@ -1,15 +1,15 @@
from typing import Any, Generic, TypeVar, TYPE_CHECKING
import math import math
from typing import TYPE_CHECKING, Any, Generic, TypeVar
from torch import Tensor, Size
from jaxtyping import Float
import torch import torch
from jaxtyping import Float
from torch import Size, Tensor
from refiners.foundationals.latent_diffusion.schedulers.scheduler import Scheduler import refiners.fluxion.layers as fl
from refiners.fluxion.adapters.adapter import Adapter from refiners.fluxion.adapters.adapter import Adapter
from refiners.fluxion.context import Contexts from refiners.fluxion.context import Contexts
from refiners.fluxion.utils import interpolate, gaussian_blur from refiners.fluxion.utils import gaussian_blur, interpolate
import refiners.fluxion.layers as fl from refiners.foundationals.latent_diffusion.schedulers.scheduler import Scheduler
if TYPE_CHECKING: if TYPE_CHECKING:
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import SD1UNet from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import SD1UNet

View file

@ -1,11 +1,11 @@
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import SD1UNet from refiners.foundationals.latent_diffusion.stable_diffusion_1.controlnet import SD1ControlnetAdapter
from refiners.foundationals.latent_diffusion.stable_diffusion_1.image_prompt import SD1IPAdapter
from refiners.foundationals.latent_diffusion.stable_diffusion_1.model import ( from refiners.foundationals.latent_diffusion.stable_diffusion_1.model import (
StableDiffusion_1, StableDiffusion_1,
StableDiffusion_1_Inpainting, StableDiffusion_1_Inpainting,
) )
from refiners.foundationals.latent_diffusion.stable_diffusion_1.controlnet import SD1ControlnetAdapter
from refiners.foundationals.latent_diffusion.stable_diffusion_1.image_prompt import SD1IPAdapter
from refiners.foundationals.latent_diffusion.stable_diffusion_1.t2i_adapter import SD1T2IAdapter from refiners.foundationals.latent_diffusion.stable_diffusion_1.t2i_adapter import SD1T2IAdapter
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import SD1UNet
__all__ = [ __all__ = [
"StableDiffusion_1", "StableDiffusion_1",

View file

@ -1,16 +1,18 @@
from typing import Iterable, cast
from torch import Tensor, device as Device, dtype as DType
from refiners.fluxion.adapters.adapter import Adapter
from refiners.fluxion.context import Contexts from refiners.fluxion.context import Contexts
from refiners.fluxion.layers import Chain, Conv2d, SiLU, Lambda, Passthrough, UseContext, Slicing, Residual from refiners.fluxion.layers import Chain, Conv2d, Lambda, Passthrough, Residual, SiLU, Slicing, UseContext
from refiners.foundationals.latent_diffusion.range_adapter import RangeAdapter2d
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import ( from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import (
SD1UNet,
DownBlocks, DownBlocks,
MiddleBlock, MiddleBlock,
ResidualBlock, ResidualBlock,
SD1UNet,
TimestepEncoder, TimestepEncoder,
) )
from refiners.fluxion.adapters.adapter import Adapter
from refiners.foundationals.latent_diffusion.range_adapter import RangeAdapter2d
from typing import cast, Iterable
from torch import Tensor, device as Device, dtype as DType
class ConditionEncoder(Chain): class ConditionEncoder(Chain):

View file

@ -2,7 +2,7 @@ from torch import Tensor
from refiners.foundationals.clip.image_encoder import CLIPImageEncoderH from refiners.foundationals.clip.image_encoder import CLIPImageEncoderH
from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock2d from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock2d
from refiners.foundationals.latent_diffusion.image_prompt import IPAdapter, ImageProjection, PerceiverResampler from refiners.foundationals.latent_diffusion.image_prompt import ImageProjection, IPAdapter, PerceiverResampler
from refiners.foundationals.latent_diffusion.stable_diffusion_1 import SD1UNet from refiners.foundationals.latent_diffusion.stable_diffusion_1 import SD1UNet

View file

@ -1,15 +1,16 @@
import numpy as np
import torch import torch
from PIL import Image
from torch import Tensor, device as Device, dtype as DType
from refiners.fluxion.utils import image_to_tensor, interpolate from refiners.fluxion.utils import image_to_tensor, interpolate
from refiners.foundationals.clip.text_encoder import CLIPTextEncoderL from refiners.foundationals.clip.text_encoder import CLIPTextEncoderL
from refiners.foundationals.latent_diffusion.auto_encoder import LatentDiffusionAutoencoder from refiners.foundationals.latent_diffusion.auto_encoder import LatentDiffusionAutoencoder
from refiners.foundationals.latent_diffusion.model import LatentDiffusionModel from refiners.foundationals.latent_diffusion.model import LatentDiffusionModel
from refiners.foundationals.latent_diffusion.schedulers.dpm_solver import DPMSolver from refiners.foundationals.latent_diffusion.schedulers.dpm_solver import DPMSolver
from refiners.foundationals.latent_diffusion.schedulers.scheduler import Scheduler from refiners.foundationals.latent_diffusion.schedulers.scheduler import Scheduler
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import SD1UNet
from refiners.foundationals.latent_diffusion.stable_diffusion_1.self_attention_guidance import SD1SAGAdapter from refiners.foundationals.latent_diffusion.stable_diffusion_1.self_attention_guidance import SD1SAGAdapter
from PIL import Image from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import SD1UNet
import numpy as np
from torch import device as Device, dtype as DType, Tensor
class SD1Autoencoder(LatentDiffusionAutoencoder): class SD1Autoencoder(LatentDiffusionAutoencoder):

View file

@ -1,6 +1,7 @@
from dataclasses import field, dataclass from dataclasses import dataclass, field
from torch import Tensor
from PIL import Image from PIL import Image
from torch import Tensor
from refiners.foundationals.latent_diffusion.multi_diffusion import DiffusionTarget, MultiDiffusion from refiners.foundationals.latent_diffusion.multi_diffusion import DiffusionTarget, MultiDiffusion
from refiners.foundationals.latent_diffusion.stable_diffusion_1.model import ( from refiners.foundationals.latent_diffusion.stable_diffusion_1.model import (

View file

@ -1,11 +1,11 @@
import refiners.fluxion.layers as fl
from refiners.fluxion.layers.attentions import ScaledDotProductAttention
from refiners.foundationals.latent_diffusion.self_attention_guidance import ( from refiners.foundationals.latent_diffusion.self_attention_guidance import (
SAGAdapter, SAGAdapter,
SelfAttentionShape,
SelfAttentionMap, SelfAttentionMap,
SelfAttentionShape,
) )
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import SD1UNet, MiddleBlock, ResidualBlock from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import MiddleBlock, ResidualBlock, SD1UNet
from refiners.fluxion.layers.attentions import ScaledDotProductAttention
import refiners.fluxion.layers as fl
class SD1SAGAdapter(SAGAdapter[SD1UNet]): class SD1SAGAdapter(SAGAdapter[SD1UNet]):

View file

@ -1,8 +1,8 @@
from torch import Tensor from torch import Tensor
from refiners.foundationals.latent_diffusion.t2i_adapter import T2IAdapter, T2IFeatures, ConditionEncoder
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import SD1UNet, ResidualAccumulator
import refiners.fluxion.layers as fl import refiners.fluxion.layers as fl
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import ResidualAccumulator, SD1UNet
from refiners.foundationals.latent_diffusion.t2i_adapter import ConditionEncoder, T2IAdapter, T2IFeatures
class SD1T2IAdapter(T2IAdapter[SD1UNet]): class SD1T2IAdapter(T2IAdapter[SD1UNet]):

View file

@ -1,12 +1,11 @@
from typing import cast, Iterable from typing import Iterable, cast
from torch import Tensor, device as Device, dtype as DType from torch import Tensor, device as Device, dtype as DType
from refiners.fluxion.context import Contexts
import refiners.fluxion.layers as fl import refiners.fluxion.layers as fl
from refiners.fluxion.context import Contexts
from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock2d from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock2d
from refiners.foundationals.latent_diffusion.range_adapter import RangeEncoder, RangeAdapter2d from refiners.foundationals.latent_diffusion.range_adapter import RangeAdapter2d, RangeEncoder
class TimestepEncoder(fl.Passthrough): class TimestepEncoder(fl.Passthrough):

View file

@ -1,9 +1,8 @@
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.unet import SDXLUNet
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.text_encoder import DoubleTextEncoder
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.model import StableDiffusion_XL
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.image_prompt import SDXLIPAdapter from refiners.foundationals.latent_diffusion.stable_diffusion_xl.image_prompt import SDXLIPAdapter
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.model import StableDiffusion_XL
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.t2i_adapter import SDXLT2IAdapter from refiners.foundationals.latent_diffusion.stable_diffusion_xl.t2i_adapter import SDXLT2IAdapter
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.text_encoder import DoubleTextEncoder
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.unet import SDXLUNet
__all__ = [ __all__ = [
"SDXLUNet", "SDXLUNet",

View file

@ -2,7 +2,7 @@ from torch import Tensor
from refiners.foundationals.clip.image_encoder import CLIPImageEncoderH from refiners.foundationals.clip.image_encoder import CLIPImageEncoderH
from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock2d from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock2d
from refiners.foundationals.latent_diffusion.image_prompt import IPAdapter, ImageProjection, PerceiverResampler from refiners.foundationals.latent_diffusion.image_prompt import ImageProjection, IPAdapter, PerceiverResampler
from refiners.foundationals.latent_diffusion.stable_diffusion_xl import SDXLUNet from refiners.foundationals.latent_diffusion.stable_diffusion_xl import SDXLUNet

View file

@ -1,12 +1,13 @@
import torch import torch
from torch import Tensor, device as Device, dtype as DType
from refiners.foundationals.latent_diffusion.auto_encoder import LatentDiffusionAutoencoder from refiners.foundationals.latent_diffusion.auto_encoder import LatentDiffusionAutoencoder
from refiners.foundationals.latent_diffusion.model import LatentDiffusionModel from refiners.foundationals.latent_diffusion.model import LatentDiffusionModel
from refiners.foundationals.latent_diffusion.schedulers.ddim import DDIM from refiners.foundationals.latent_diffusion.schedulers.ddim import DDIM
from refiners.foundationals.latent_diffusion.schedulers.scheduler import Scheduler from refiners.foundationals.latent_diffusion.schedulers.scheduler import Scheduler
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.unet import SDXLUNet
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.self_attention_guidance import SDXLSAGAdapter from refiners.foundationals.latent_diffusion.stable_diffusion_xl.self_attention_guidance import SDXLSAGAdapter
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.text_encoder import DoubleTextEncoder from refiners.foundationals.latent_diffusion.stable_diffusion_xl.text_encoder import DoubleTextEncoder
from torch import device as Device, dtype as DType, Tensor from refiners.foundationals.latent_diffusion.stable_diffusion_xl.unet import SDXLUNet
class SDXLAutoencoder(LatentDiffusionAutoencoder): class SDXLAutoencoder(LatentDiffusionAutoencoder):

View file

@ -1,11 +1,11 @@
import refiners.fluxion.layers as fl
from refiners.fluxion.layers.attentions import ScaledDotProductAttention
from refiners.foundationals.latent_diffusion.self_attention_guidance import ( from refiners.foundationals.latent_diffusion.self_attention_guidance import (
SAGAdapter, SAGAdapter,
SelfAttentionShape,
SelfAttentionMap, SelfAttentionMap,
SelfAttentionShape,
) )
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.unet import SDXLUNet, MiddleBlock, ResidualBlock from refiners.foundationals.latent_diffusion.stable_diffusion_xl.unet import MiddleBlock, ResidualBlock, SDXLUNet
from refiners.fluxion.layers.attentions import ScaledDotProductAttention
import refiners.fluxion.layers as fl
class SDXLSAGAdapter(SAGAdapter[SDXLUNet]): class SDXLSAGAdapter(SAGAdapter[SDXLUNet]):

View file

@ -1,9 +1,9 @@
from torch import Tensor from torch import Tensor
from refiners.foundationals.latent_diffusion.t2i_adapter import T2IAdapter, T2IFeatures, ConditionEncoderXL
from refiners.foundationals.latent_diffusion.stable_diffusion_xl import SDXLUNet
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import ResidualAccumulator
import refiners.fluxion.layers as fl import refiners.fluxion.layers as fl
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import ResidualAccumulator
from refiners.foundationals.latent_diffusion.stable_diffusion_xl import SDXLUNet
from refiners.foundationals.latent_diffusion.t2i_adapter import ConditionEncoderXL, T2IAdapter, T2IFeatures
class SDXLT2IAdapter(T2IAdapter[SDXLUNet]): class SDXLT2IAdapter(T2IAdapter[SDXLUNet]):

View file

@ -1,11 +1,12 @@
from typing import cast from typing import cast
from torch import device as Device, dtype as DType, Tensor, cat
from jaxtyping import Float
from torch import Tensor, cat, device as Device, dtype as DType
import refiners.fluxion.layers as fl
from refiners.fluxion.adapters.adapter import Adapter from refiners.fluxion.adapters.adapter import Adapter
from refiners.fluxion.context import Contexts from refiners.fluxion.context import Contexts
import refiners.fluxion.layers as fl
from refiners.foundationals.clip.text_encoder import CLIPTextEncoderG, CLIPTextEncoderL from refiners.foundationals.clip.text_encoder import CLIPTextEncoderG, CLIPTextEncoderL
from jaxtyping import Float
from refiners.foundationals.clip.tokenizer import CLIPTokenizer from refiners.foundationals.clip.tokenizer import CLIPTokenizer

View file

@ -1,18 +1,20 @@
from typing import cast from typing import cast
from torch import Tensor, device as Device, dtype as DType from torch import Tensor, device as Device, dtype as DType
from refiners.fluxion.context import Contexts
import refiners.fluxion.layers as fl import refiners.fluxion.layers as fl
from refiners.fluxion.context import Contexts
from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock2d from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock2d
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import (
ResidualAccumulator,
ResidualBlock,
ResidualConcatenator,
)
from refiners.foundationals.latent_diffusion.range_adapter import ( from refiners.foundationals.latent_diffusion.range_adapter import (
RangeAdapter2d, RangeAdapter2d,
RangeEncoder, RangeEncoder,
compute_sinusoidal_embedding, compute_sinusoidal_embedding,
) )
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import (
ResidualAccumulator,
ResidualBlock,
ResidualConcatenator,
)
class TextTimeEmbedding(fl.Chain): class TextTimeEmbedding(fl.Chain):

View file

@ -1,12 +1,12 @@
from typing import Generic, TypeVar, Any, TYPE_CHECKING from typing import TYPE_CHECKING, Any, Generic, TypeVar
from torch import Tensor, device as Device, dtype as DType from torch import Tensor, device as Device, dtype as DType
from torch.nn import AvgPool2d as _AvgPool2d from torch.nn import AvgPool2d as _AvgPool2d
import refiners.fluxion.layers as fl
from refiners.fluxion.adapters.adapter import Adapter from refiners.fluxion.adapters.adapter import Adapter
from refiners.fluxion.context import Contexts from refiners.fluxion.context import Contexts
from refiners.fluxion.layers.module import Module from refiners.fluxion.layers.module import Module
import refiners.fluxion.layers as fl
if TYPE_CHECKING: if TYPE_CHECKING:
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import SD1UNet from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import SD1UNet

View file

@ -1,9 +1,9 @@
from torch import device as Device, dtype as DType, Tensor
from refiners.fluxion.context import Contexts
import refiners.fluxion.layers as fl
from refiners.fluxion.utils import pad
from torch import nn
import torch import torch
from torch import Tensor, device as Device, dtype as DType, nn
import refiners.fluxion.layers as fl
from refiners.fluxion.context import Contexts
from refiners.fluxion.utils import pad
class PatchEncoder(fl.Chain): class PatchEncoder(fl.Chain):

View file

@ -1,12 +1,12 @@
import refiners.fluxion.layers as fl
from torch import device as Device, dtype as DType, Tensor, nn
import torch import torch
from torch import Tensor, device as Device, dtype as DType, nn
import refiners.fluxion.layers as fl
from refiners.fluxion.context import Contexts
from refiners.foundationals.segment_anything.transformer import ( from refiners.foundationals.segment_anything.transformer import (
SparseCrossDenseAttention, SparseCrossDenseAttention,
TwoWayTranformerLayer, TwoWayTranformerLayer,
) )
from refiners.fluxion.context import Contexts
class EmbeddingsAggregator(fl.ContextModule): class EmbeddingsAggregator(fl.ContextModule):

View file

@ -1,11 +1,13 @@
from dataclasses import dataclass from dataclasses import dataclass
from typing import Sequence from typing import Sequence
from PIL import Image
from torch import device as Device, dtype as DType, Tensor
import numpy as np import numpy as np
import torch import torch
from PIL import Image
from torch import Tensor, device as Device, dtype as DType
import refiners.fluxion.layers as fl import refiners.fluxion.layers as fl
from refiners.fluxion.utils import image_to_tensor, normalize, pad, interpolate from refiners.fluxion.utils import image_to_tensor, interpolate, normalize, pad
from refiners.foundationals.segment_anything.image_encoder import SAMViT, SAMViTH from refiners.foundationals.segment_anything.image_encoder import SAMViT, SAMViTH
from refiners.foundationals.segment_anything.mask_decoder import MaskDecoder from refiners.foundationals.segment_anything.mask_decoder import MaskDecoder
from refiners.foundationals.segment_anything.prompt_encoder import MaskEncoder, PointEncoder from refiners.foundationals.segment_anything.prompt_encoder import MaskEncoder, PointEncoder

View file

@ -1,8 +1,10 @@
from enum import Enum, auto
from collections.abc import Sequence from collections.abc import Sequence
from torch import device as Device, dtype as DType, Tensor, nn from enum import Enum, auto
import torch import torch
from jaxtyping import Float, Int from jaxtyping import Float, Int
from torch import Tensor, device as Device, dtype as DType, nn
import refiners.fluxion.layers as fl import refiners.fluxion.layers as fl
from refiners.fluxion.context import Contexts from refiners.fluxion.context import Contexts

View file

@ -1,4 +1,5 @@
from torch import dtype as DType, device as Device from torch import device as Device, dtype as DType
import refiners.fluxion.layers as fl import refiners.fluxion.layers as fl

View file

@ -1,7 +1,8 @@
import sys
from importlib import import_module from importlib import import_module
from importlib.metadata import requires from importlib.metadata import requires
from packaging.requirements import Requirement from packaging.requirements import Requirement
import sys
refiners_requires = requires("refiners") refiners_requires = requires("refiners")
assert refiners_requires is not None assert refiners_requires is not None

View file

@ -1,7 +1,8 @@
from typing import TYPE_CHECKING, Generic, Iterable, Any, TypeVar from typing import TYPE_CHECKING, Any, Generic, Iterable, TypeVar
from loguru import logger
from torch import tensor from torch import tensor
from torch.nn import Parameter from torch.nn import Parameter
from loguru import logger
if TYPE_CHECKING: if TYPE_CHECKING:
from refiners.training_utils.config import BaseConfig from refiners.training_utils.config import BaseConfig

View file

@ -1,17 +1,18 @@
from enum import Enum
from logging import warn from logging import warn
from pathlib import Path from pathlib import Path
from typing import Any, Callable, Iterable, Literal, Type, TypeVar from typing import Any, Callable, Iterable, Literal, Type, TypeVar
from typing_extensions import TypedDict # https://errors.pydantic.dev/2.0b3/u/typed-dict-version
from torch.optim import AdamW, SGD, Optimizer, Adam
from torch.nn import Parameter
from enum import Enum
from bitsandbytes.optim import AdamW8bit, Lion8bit # type: ignore
from pydantic import BaseModel, validator
import tomli
import refiners.fluxion.layers as fl
from prodigyopt import Prodigy # type: ignore
from refiners.training_utils.dropout import apply_dropout, apply_gyro_dropout
import tomli
from bitsandbytes.optim import AdamW8bit, Lion8bit # type: ignore
from prodigyopt import Prodigy # type: ignore
from pydantic import BaseModel, validator
from torch.nn import Parameter
from torch.optim import SGD, Adam, AdamW, Optimizer
from typing_extensions import TypedDict # https://errors.pydantic.dev/2.0b3/u/typed-dict-version
import refiners.fluxion.layers as fl
from refiners.training_utils.dropout import apply_dropout, apply_gyro_dropout
__all__ = [ __all__ = [
"parse_number_unit_field", "parse_number_unit_field",

View file

@ -1,11 +1,11 @@
from typing import TYPE_CHECKING, Any, TypeVar from typing import TYPE_CHECKING, Any, TypeVar
from torch import Tensor, randint, cat, rand from torch import Tensor, cat, rand, randint
from torch.nn import Dropout as TorchDropout from torch.nn import Dropout as TorchDropout
import refiners.fluxion.layers as fl import refiners.fluxion.layers as fl
from refiners.training_utils.callback import Callback
from refiners.fluxion.adapters.adapter import Adapter from refiners.fluxion.adapters.adapter import Adapter
from refiners.training_utils.callback import Callback
if TYPE_CHECKING: if TYPE_CHECKING:
from refiners.training_utils.config import BaseConfig from refiners.training_utils.config import BaseConfig

View file

@ -1,6 +1,7 @@
from datasets import load_dataset as _load_dataset, VerificationMode # type: ignore
from typing import Any, Generic, Protocol, TypeVar, cast from typing import Any, Generic, Protocol, TypeVar, cast
from datasets import VerificationMode, load_dataset as _load_dataset # type: ignore
__all__ = ["load_hf_dataset", "HuggingfaceDataset"] __all__ = ["load_hf_dataset", "HuggingfaceDataset"]

View file

@ -1,29 +1,31 @@
import random
from dataclasses import dataclass from dataclasses import dataclass
from typing import Any, TypeVar, TypedDict, Callable
from pydantic import BaseModel
from torch import device as Device, Tensor, randn, dtype as DType, Generator, cat
from loguru import logger
from torch.utils.data import Dataset
from refiners.foundationals.clip.text_encoder import CLIPTextEncoderL
from torchvision.transforms import Compose, RandomCrop, RandomHorizontalFlip # type: ignore
import refiners.fluxion.layers as fl
from PIL import Image
from functools import cached_property from functools import cached_property
from refiners.foundationals.latent_diffusion.stable_diffusion_1.model import SD1Autoencoder from typing import Any, Callable, TypedDict, TypeVar
from refiners.training_utils.config import BaseConfig
from loguru import logger
from PIL import Image
from pydantic import BaseModel
from torch import Generator, Tensor, cat, device as Device, dtype as DType, randn
from torch.nn import Module
from torch.nn.functional import mse_loss
from torch.utils.data import Dataset
from torchvision.transforms import Compose, RandomCrop, RandomHorizontalFlip # type: ignore
import refiners.fluxion.layers as fl
from refiners.foundationals.clip.text_encoder import CLIPTextEncoderL
from refiners.foundationals.latent_diffusion import ( from refiners.foundationals.latent_diffusion import (
StableDiffusion_1,
DPMSolver, DPMSolver,
SD1UNet, SD1UNet,
StableDiffusion_1,
) )
from refiners.foundationals.latent_diffusion.schedulers import DDPM from refiners.foundationals.latent_diffusion.schedulers import DDPM
from torch.nn.functional import mse_loss from refiners.foundationals.latent_diffusion.stable_diffusion_1.model import SD1Autoencoder
import random
from refiners.training_utils.wandb import WandbLoggable
from refiners.training_utils.trainer import Trainer
from refiners.training_utils.callback import Callback from refiners.training_utils.callback import Callback
from refiners.training_utils.huggingface_datasets import load_hf_dataset, HuggingfaceDataset from refiners.training_utils.config import BaseConfig
from torch.nn import Module from refiners.training_utils.huggingface_datasets import HuggingfaceDataset, load_hf_dataset
from refiners.training_utils.trainer import Trainer
from refiners.training_utils.wandb import WandbLoggable
class LatentDiffusionConfig(BaseModel): class LatentDiffusionConfig(BaseModel):

View file

@ -1,41 +1,43 @@
from functools import cached_property, wraps
from pathlib import Path
import random import random
import time import time
from functools import cached_property, wraps
from pathlib import Path
from typing import Any, Callable, Generic, Iterable, TypeVar, cast
import numpy as np import numpy as np
from torch import device as Device, Tensor, get_rng_state, no_grad, set_rng_state, cuda, stack from loguru import logger
from torch import Tensor, cuda, device as Device, get_rng_state, no_grad, set_rng_state, stack
from torch.autograd import backward
from torch.nn import Parameter from torch.nn import Parameter
from torch.optim import Optimizer from torch.optim import Optimizer
from torch.optim.lr_scheduler import (
CosineAnnealingLR,
CosineAnnealingWarmRestarts,
CyclicLR,
ExponentialLR,
LambdaLR,
LRScheduler,
MultiplicativeLR,
MultiStepLR,
OneCycleLR,
ReduceLROnPlateau,
StepLR,
)
from torch.utils.data import DataLoader, Dataset from torch.utils.data import DataLoader, Dataset
from torch.autograd import backward
from typing import Any, Callable, Generic, Iterable, TypeVar, cast
from loguru import logger
from refiners.fluxion import layers as fl from refiners.fluxion import layers as fl
from refiners.fluxion.utils import manual_seed from refiners.fluxion.utils import manual_seed
from refiners.training_utils.wandb import WandbLogger, WandbLoggable
from refiners.training_utils.config import BaseConfig, TimeUnit, TimeValue, SchedulerType
from refiners.training_utils.dropout import DropoutCallback
from refiners.training_utils.callback import ( from refiners.training_utils.callback import (
Callback, Callback,
ClockCallback, ClockCallback,
GradientNormClipping, GradientNormClipping,
GradientValueClipping,
GradientNormLogging, GradientNormLogging,
GradientValueClipping,
MonitorLoss, MonitorLoss,
) )
from torch.optim.lr_scheduler import ( from refiners.training_utils.config import BaseConfig, SchedulerType, TimeUnit, TimeValue
StepLR, from refiners.training_utils.dropout import DropoutCallback
ExponentialLR, from refiners.training_utils.wandb import WandbLoggable, WandbLogger
ReduceLROnPlateau,
CosineAnnealingLR,
LambdaLR,
OneCycleLR,
LRScheduler,
MultiplicativeLR,
CosineAnnealingWarmRestarts,
CyclicLR,
MultiStepLR,
)
__all__ = ["seed_everything", "scoped_seed", "Trainer"] __all__ = ["seed_everything", "scoped_seed", "Trainer"]

View file

@ -1,4 +1,5 @@
from typing import Any from typing import Any
import wandb import wandb
from PIL import Image from PIL import Image

View file

@ -1,4 +1,5 @@
import pytest import pytest
from refiners.fluxion.adapters.adapter import Adapter from refiners.fluxion.adapters.adapter import Adapter
from refiners.fluxion.layers import Chain, Linear from refiners.fluxion.layers import Chain, Linear

View file

@ -1,6 +1,7 @@
from refiners.fluxion.adapters.lora import Lora, SingleLoraAdapter, LoraAdapter from torch import allclose, randn
from torch import randn, allclose
import refiners.fluxion.layers as fl import refiners.fluxion.layers as fl
from refiners.fluxion.adapters.lora import Lora, LoraAdapter, SingleLoraAdapter
def test_single_lora_adapter() -> None: def test_single_lora_adapter() -> None:

View file

@ -1,7 +1,8 @@
import torch import torch
from refiners.fluxion.adapters.adapter import Adapter from refiners.fluxion.adapters.adapter import Adapter
from refiners.foundationals.latent_diffusion.range_adapter import RangeEncoder
from refiners.fluxion.layers import Chain, Linear from refiners.fluxion.layers import Chain, Linear
from refiners.foundationals.latent_diffusion.range_adapter import RangeEncoder
class DummyLinearAdapter(Chain, Adapter[Linear]): class DummyLinearAdapter(Chain, Adapter[Linear]):

View file

@ -1,6 +1,7 @@
import os import os
import torch
from pathlib import Path from pathlib import Path
import torch
from pytest import fixture from pytest import fixture
PARENT_PATH = Path(__file__).parent PARENT_PATH = Path(__file__).parent

View file

@ -1,34 +1,32 @@
import torch
import pytest
from typing import Iterator
from warnings import warn
from PIL import Image
from pathlib import Path from pathlib import Path
from typing import Iterator
from warnings import warn
from refiners.fluxion.utils import load_from_safetensors, image_to_tensor, manual_seed import pytest
import torch
from PIL import Image
from refiners.fluxion.utils import image_to_tensor, load_from_safetensors, manual_seed
from refiners.foundationals.clip.concepts import ConceptExtender
from refiners.foundationals.latent_diffusion import ( from refiners.foundationals.latent_diffusion import (
StableDiffusion_1,
StableDiffusion_1_Inpainting,
SD1UNet,
SD1ControlnetAdapter, SD1ControlnetAdapter,
SD1IPAdapter, SD1IPAdapter,
SD1T2IAdapter, SD1T2IAdapter,
SD1UNet,
SDFreeUAdapter,
SDXLIPAdapter, SDXLIPAdapter,
SDXLT2IAdapter, SDXLT2IAdapter,
SDFreeUAdapter, StableDiffusion_1,
StableDiffusion_1_Inpainting,
) )
from refiners.foundationals.latent_diffusion.lora import SD1LoraAdapter from refiners.foundationals.latent_diffusion.lora import SD1LoraAdapter
from refiners.foundationals.latent_diffusion.multi_diffusion import DiffusionTarget from refiners.foundationals.latent_diffusion.multi_diffusion import DiffusionTarget
from refiners.foundationals.latent_diffusion.reference_only_control import ReferenceOnlyControlAdapter
from refiners.foundationals.latent_diffusion.restart import Restart from refiners.foundationals.latent_diffusion.restart import Restart
from refiners.foundationals.latent_diffusion.schedulers import DDIM from refiners.foundationals.latent_diffusion.schedulers import DDIM
from refiners.foundationals.latent_diffusion.reference_only_control import ReferenceOnlyControlAdapter
from refiners.foundationals.clip.concepts import ConceptExtender
from refiners.foundationals.latent_diffusion.schedulers.scheduler import NoiseSchedule from refiners.foundationals.latent_diffusion.schedulers.scheduler import NoiseSchedule
from refiners.foundationals.latent_diffusion.stable_diffusion_1.multi_diffusion import SD1MultiDiffusion from refiners.foundationals.latent_diffusion.stable_diffusion_1.multi_diffusion import SD1MultiDiffusion
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.model import StableDiffusion_XL from refiners.foundationals.latent_diffusion.stable_diffusion_xl.model import StableDiffusion_XL
from tests.utils import ensure_similar_images from tests.utils import ensure_similar_images

View file

@ -1,13 +1,12 @@
import torch
import pytest
from warnings import warn
from PIL import Image
from pathlib import Path from pathlib import Path
from warnings import warn
import pytest
import torch
from PIL import Image
from refiners.fluxion.utils import image_to_tensor, tensor_to_image from refiners.fluxion.utils import image_to_tensor, tensor_to_image
from refiners.foundationals.latent_diffusion.preprocessors.informative_drawings import InformativeDrawings from refiners.foundationals.latent_diffusion.preprocessors.informative_drawings import InformativeDrawings
from tests.utils import ensure_similar_images from tests.utils import ensure_similar_images

View file

@ -1,5 +1,6 @@
import pytest import pytest
import torch import torch
import refiners.fluxion.layers as fl import refiners.fluxion.layers as fl
from refiners.fluxion.context import Contexts from refiners.fluxion.context import Contexts

View file

@ -1,7 +1,8 @@
import torch
import pytest
from warnings import warn from warnings import warn
import pytest
import torch
import refiners.fluxion.layers as fl import refiners.fluxion.layers as fl
from refiners.fluxion.layers.chain import ChainError, Distribute from refiners.fluxion.layers.chain import ChainError, Distribute

View file

@ -1,10 +1,11 @@
# pyright: reportPrivateUsage=false # pyright: reportPrivateUsage=false
import pytest import pytest
import torch import torch
from torch import nn, Tensor from torch import Tensor, nn
from refiners.fluxion.utils import manual_seed
from refiners.fluxion.model_converter import ModelConverter, ConversionStage
import refiners.fluxion.layers as fl import refiners.fluxion.layers as fl
from refiners.fluxion.model_converter import ConversionStage, ModelConverter
from refiners.fluxion.utils import manual_seed
class CustomBasicLayer1(fl.Module): class CustomBasicLayer1(fl.Module):

View file

@ -1,11 +1,11 @@
from dataclasses import dataclass from dataclasses import dataclass
from warnings import warn from warnings import warn
from torchvision.transforms.functional import gaussian_blur as torch_gaussian_blur # type: ignore
from torch import device as Device, dtype as DType
from PIL import Image
import pytest import pytest
import torch import torch
from PIL import Image
from torch import device as Device, dtype as DType
from torchvision.transforms.functional import gaussian_blur as torch_gaussian_blur # type: ignore
from refiners.fluxion.utils import gaussian_blur, image_to_tensor, manual_seed, tensor_to_image from refiners.fluxion.utils import gaussian_blur, image_to_tensor, manual_seed, tensor_to_image

View file

@ -1,18 +1,16 @@
import torch
import pytest
from warnings import warn
from pathlib import Path from pathlib import Path
from warnings import warn
import pytest
import torch
import transformers # type: ignore
from diffusers import StableDiffusionPipeline # type: ignore
import refiners.fluxion.layers as fl
from refiners.fluxion.utils import load_from_safetensors
from refiners.foundationals.clip.concepts import ConceptExtender, TokenExtender from refiners.foundationals.clip.concepts import ConceptExtender, TokenExtender
from refiners.foundationals.clip.text_encoder import CLIPTextEncoderL from refiners.foundationals.clip.text_encoder import CLIPTextEncoderL
from refiners.foundationals.clip.tokenizer import CLIPTokenizer from refiners.foundationals.clip.tokenizer import CLIPTokenizer
from refiners.fluxion.utils import load_from_safetensors
import refiners.fluxion.layers as fl
from diffusers import StableDiffusionPipeline # type: ignore
import transformers # type: ignore
PROMPTS = [ PROMPTS = [
"a cute cat", # a simple prompt "a cute cat", # a simple prompt

View file

@ -1,13 +1,12 @@
import torch
import pytest
from warnings import warn
from pathlib import Path from pathlib import Path
from warnings import warn
import pytest
import torch
from transformers import CLIPVisionModelWithProjection # type: ignore from transformers import CLIPVisionModelWithProjection # type: ignore
from refiners.foundationals.clip.image_encoder import CLIPImageEncoderH
from refiners.fluxion.utils import load_from_safetensors from refiners.fluxion.utils import load_from_safetensors
from refiners.foundationals.clip.image_encoder import CLIPImageEncoderH
@pytest.fixture(scope="module") @pytest.fixture(scope="module")

View file

@ -1,15 +1,13 @@
import torch
import pytest
from warnings import warn
from pathlib import Path from pathlib import Path
from warnings import warn
from refiners.foundationals.clip.text_encoder import CLIPTextEncoderL import pytest
from refiners.fluxion.utils import load_from_safetensors import torch
import transformers # type: ignore import transformers # type: ignore
from refiners.foundationals.clip.tokenizer import CLIPTokenizer
from refiners.fluxion.utils import load_from_safetensors
from refiners.foundationals.clip.text_encoder import CLIPTextEncoderL
from refiners.foundationals.clip.tokenizer import CLIPTokenizer
long_prompt = """ long_prompt = """
Above these apparent hieroglyphics was a figure of evidently pictorial intent, Above these apparent hieroglyphics was a figure of evidently pictorial intent,

View file

@ -1,15 +1,14 @@
import torch
import pytest
from warnings import warn
from PIL import Image
from pathlib import Path from pathlib import Path
from warnings import warn
import pytest
import torch
from PIL import Image
from tests.utils import ensure_similar_images
from refiners.fluxion.utils import load_from_safetensors from refiners.fluxion.utils import load_from_safetensors
from refiners.foundationals.latent_diffusion.auto_encoder import LatentDiffusionAutoencoder from refiners.foundationals.latent_diffusion.auto_encoder import LatentDiffusionAutoencoder
from tests.utils import ensure_similar_images
@pytest.fixture(scope="module") @pytest.fixture(scope="module")
def ref_path() -> Path: def ref_path() -> Path:

View file

@ -1,11 +1,11 @@
from typing import Iterator from typing import Iterator
import torch
import pytest import pytest
import torch
import refiners.fluxion.layers as fl import refiners.fluxion.layers as fl
from refiners.fluxion.adapters.adapter import lookup_top_adapter from refiners.fluxion.adapters.adapter import lookup_top_adapter
from refiners.foundationals.latent_diffusion import SD1UNet, SD1ControlnetAdapter from refiners.foundationals.latent_diffusion import SD1ControlnetAdapter, SD1UNet
from refiners.foundationals.latent_diffusion.stable_diffusion_1.controlnet import Controlnet from refiners.foundationals.latent_diffusion.stable_diffusion_1.controlnet import Controlnet

Some files were not shown because too many files have changed in this diff Show more