run lint rules using latest isort settings

This commit is contained in:
Cédric Deltheil 2023-12-11 11:46:38 +01:00 committed by Cédric Deltheil
parent b44d6122c4
commit 792a0fc3d9
109 changed files with 502 additions and 441 deletions

View file

@ -1,10 +1,12 @@
import argparse
from pathlib import Path
import torch
from torch import nn
from diffusers import AutoencoderKL # type: ignore
from refiners.foundationals.latent_diffusion.auto_encoder import LatentDiffusionAutoencoder
from torch import nn
from refiners.fluxion.model_converter import ModelConverter
from refiners.foundationals.latent_diffusion.auto_encoder import LatentDiffusionAutoencoder
class Args(argparse.Namespace):

View file

@ -1,15 +1,17 @@
# pyright: reportPrivateUsage=false
import argparse
from pathlib import Path
import torch
from torch import nn
from diffusers import ControlNetModel # type: ignore
from refiners.fluxion.utils import save_to_safetensors
from torch import nn
from refiners.fluxion.model_converter import ModelConverter
from refiners.fluxion.utils import save_to_safetensors
from refiners.foundationals.latent_diffusion import (
SD1UNet,
SD1ControlnetAdapter,
DPMSolver,
SD1ControlnetAdapter,
SD1UNet,
)

View file

@ -1,11 +1,11 @@
import argparse
from pathlib import Path
from typing import Any
import argparse
import torch
from refiners.foundationals.latent_diffusion import SD1UNet, SD1IPAdapter, SDXLUNet, SDXLIPAdapter
from refiners.fluxion.utils import save_to_safetensors
from refiners.foundationals.latent_diffusion import SD1IPAdapter, SD1UNet, SDXLIPAdapter, SDXLUNet
# Running:
#

View file

@ -3,16 +3,15 @@ from pathlib import Path
from typing import cast
import torch
from torch import Tensor
from torch.nn.init import zeros_
from torch.nn import Parameter as TorchParameter
from diffusers import DiffusionPipeline # type: ignore
from torch import Tensor
from torch.nn import Parameter as TorchParameter
from torch.nn.init import zeros_
import refiners.fluxion.layers as fl
from refiners.fluxion.adapters.lora import Lora, LoraAdapter
from refiners.fluxion.model_converter import ModelConverter
from refiners.fluxion.utils import save_to_safetensors
from refiners.fluxion.adapters.lora import Lora, LoraAdapter
from refiners.foundationals.latent_diffusion import SD1UNet
from refiners.foundationals.latent_diffusion.lora import LoraTarget, lora_targets

View file

@ -1,10 +1,12 @@
import argparse
from pathlib import Path
import torch
from torch import nn
from diffusers import T2IAdapter # type: ignore
from refiners.foundationals.latent_diffusion.t2i_adapter import ConditionEncoder, ConditionEncoderXL
from torch import nn
from refiners.fluxion.model_converter import ModelConverter
from refiners.foundationals.latent_diffusion.t2i_adapter import ConditionEncoder, ConditionEncoderXL
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Convert a pretrained diffusers T2I-Adapter model to refiners")

View file

@ -1,9 +1,11 @@
import argparse
from pathlib import Path
import torch
from torch import nn
from refiners.fluxion.model_converter import ModelConverter
from diffusers import UNet2DConditionModel # type: ignore
from torch import nn
from refiners.fluxion.model_converter import ModelConverter
from refiners.foundationals.latent_diffusion import SD1UNet, SDXLUNet

View file

@ -1,7 +1,9 @@
import argparse
from typing import TYPE_CHECKING, cast
import torch
from torch import nn
from refiners.fluxion.model_converter import ModelConverter
from refiners.foundationals.latent_diffusion.preprocessors.informative_drawings import InformativeDrawings

View file

@ -1,20 +1,22 @@
import argparse
from functools import partial
from convert_diffusers_unet import Args as UnetConversionArgs, setup_converter as convert_unet
from convert_transformers_clip_text_model import (
Args as TextEncoderConversionArgs,
setup_converter as convert_text_encoder,
)
from torch import Tensor
import refiners.fluxion.layers as fl
from refiners.fluxion.utils import (
load_from_safetensors,
load_metadata_from_safetensors,
save_to_safetensors,
)
from convert_diffusers_unet import setup_converter as convert_unet, Args as UnetConversionArgs
from convert_transformers_clip_text_model import (
setup_converter as convert_text_encoder,
Args as TextEncoderConversionArgs,
)
from refiners.foundationals.clip.text_encoder import CLIPTextEncoderL
from refiners.foundationals.latent_diffusion import SD1UNet
from refiners.foundationals.latent_diffusion.lora import LoraTarget
import refiners.fluxion.layers as fl
def get_unet_mapping(source_path: str) -> dict[str, str]:

View file

@ -1,20 +1,19 @@
import argparse
import types
from typing import Any, Callable, cast
import torch
import torch.nn as nn
from segment_anything import build_sam_vit_h # type: ignore
from segment_anything.modeling.common import LayerNorm2d # type: ignore
from torch import Tensor
import refiners.fluxion.layers as fl
from refiners.fluxion.model_converter import ModelConverter
from refiners.fluxion.utils import manual_seed, save_to_safetensors
from refiners.foundationals.segment_anything.image_encoder import SAMViTH
from refiners.foundationals.segment_anything.prompt_encoder import PointEncoder, MaskEncoder
from segment_anything import build_sam_vit_h # type: ignore
from segment_anything.modeling.common import LayerNorm2d # type: ignore
from refiners.foundationals.segment_anything.mask_decoder import MaskDecoder
from refiners.foundationals.segment_anything.prompt_encoder import MaskEncoder, PointEncoder
class FacebookSAM(nn.Module):
@ -134,9 +133,10 @@ def convert_mask_decoder(mask_decoder: nn.Module) -> dict[str, Tensor]:
point_embedding = torch.randn(1, 3, 256)
mask_embedding = torch.randn(1, 256, 64, 64)
import refiners.fluxion.layers as fl
from segment_anything.modeling.common import LayerNorm2d # type: ignore
import refiners.fluxion.layers as fl
assert issubclass(LayerNorm2d, nn.Module)
custom_layers = {LayerNorm2d: fl.LayerNorm2d}

View file

@ -1,12 +1,14 @@
import argparse
from pathlib import Path
from torch import nn
from refiners.fluxion.model_converter import ModelConverter
from transformers import CLIPVisionModelWithProjection # type: ignore
from refiners.foundationals.clip.image_encoder import CLIPImageEncoder
from refiners.fluxion.utils import save_to_safetensors
import torch
from torch import nn
from transformers import CLIPVisionModelWithProjection # type: ignore
import refiners.fluxion.layers as fl
from refiners.fluxion.model_converter import ModelConverter
from refiners.fluxion.utils import save_to_safetensors
from refiners.foundationals.clip.image_encoder import CLIPImageEncoder
class Args(argparse.Namespace):

View file

@ -1,14 +1,16 @@
import argparse
from pathlib import Path
from typing import cast
from torch import nn
from refiners.fluxion.model_converter import ModelConverter
from transformers import CLIPTextModelWithProjection # type: ignore
from refiners.foundationals.clip.text_encoder import CLIPTextEncoder, CLIPTextEncoderL, CLIPTextEncoderG
import refiners.fluxion.layers as fl
from refiners.fluxion.model_converter import ModelConverter
from refiners.fluxion.utils import save_to_safetensors
from refiners.foundationals.clip.text_encoder import CLIPTextEncoder, CLIPTextEncoderG, CLIPTextEncoderL
from refiners.foundationals.clip.tokenizer import CLIPTokenizer
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.text_encoder import DoubleTextEncoder
from refiners.fluxion.utils import save_to_safetensors
import refiners.fluxion.layers as fl
class Args(argparse.Namespace):

View file

@ -1,20 +1,21 @@
import random
from typing import Any
from pydantic import BaseModel
from loguru import logger
from refiners.fluxion.utils import save_to_safetensors
from refiners.foundationals.latent_diffusion.lora import LoraTarget, LoraAdapter, MODELS, lora_targets
import refiners.fluxion.layers as fl
from pydantic import BaseModel
from torch import Tensor
from torch.utils.data import Dataset
import refiners.fluxion.layers as fl
from refiners.fluxion.utils import save_to_safetensors
from refiners.foundationals.latent_diffusion.lora import MODELS, LoraAdapter, LoraTarget, lora_targets
from refiners.training_utils.callback import Callback
from refiners.training_utils.latent_diffusion import (
FinetuneLatentDiffusionConfig,
LatentDiffusionConfig,
LatentDiffusionTrainer,
TextEmbeddingLatentsBatch,
TextEmbeddingLatentsDataset,
LatentDiffusionTrainer,
LatentDiffusionConfig,
)

View file

@ -1,24 +1,24 @@
from typing import Any
from pydantic import BaseModel
from loguru import logger
from torch.utils.data import Dataset
from torch import randn, Tensor
import random
from typing import Any
from loguru import logger
from pydantic import BaseModel
from torch import Tensor, randn
from torch.utils.data import Dataset
from refiners.fluxion.utils import save_to_safetensors
from refiners.foundationals.clip.concepts import ConceptExtender, EmbeddingExtender
from refiners.foundationals.clip.text_encoder import CLIPTextEncoder, TokenEncoder
from refiners.foundationals.clip.tokenizer import CLIPTokenizer
from refiners.fluxion.utils import save_to_safetensors
from refiners.training_utils.callback import Callback
from refiners.training_utils.latent_diffusion import (
FinetuneLatentDiffusionConfig,
TextEmbeddingLatentsBatch,
LatentDiffusionTrainer,
LatentDiffusionConfig,
LatentDiffusionTrainer,
TextEmbeddingLatentsBatch,
TextEmbeddingLatentsDataset,
)
IMAGENET_TEMPLATES_SMALL = [
"a photo of a {}",
"a rendering of a {}",

View file

@ -1,3 +1,3 @@
from refiners.fluxion.utils import save_to_safetensors, load_from_safetensors, norm, manual_seed, pad
from refiners.fluxion.utils import load_from_safetensors, manual_seed, norm, pad, save_to_safetensors
__all__ = ["norm", "manual_seed", "save_to_safetensors", "load_from_safetensors", "pad"]

View file

@ -1,7 +1,7 @@
import contextlib
import refiners.fluxion.layers as fl
from typing import Any, Generic, TypeVar, Iterator
from typing import Any, Generic, Iterator, TypeVar
import refiners.fluxion.layers as fl
T = TypeVar("T", bound=fl.Module)
TAdapter = TypeVar("TAdapter", bound="Adapter[Any]") # Self (see PEP 673)

View file

@ -1,11 +1,11 @@
from typing import Iterable, Generic, TypeVar, Any
import refiners.fluxion.layers as fl
from refiners.fluxion.adapters.adapter import Adapter
from typing import Any, Generic, Iterable, TypeVar
from torch import Tensor, device as Device, dtype as DType
from torch.nn import Parameter as TorchParameter
from torch.nn.init import zeros_, normal_
from torch.nn.init import normal_, zeros_
import refiners.fluxion.layers as fl
from refiners.fluxion.adapters.adapter import Adapter
T = TypeVar("T", bound=fl.Chain)
TLoraAdapter = TypeVar("TLoraAdapter", bound="LoraAdapter[Any]") # Self (see PEP 673)

View file

@ -1,4 +1,5 @@
from typing import Any
from torch import Tensor
Context = dict[str, Any]

View file

@ -1,50 +1,50 @@
from refiners.fluxion.layers.activations import GLU, SiLU, ReLU, ApproximateGeLU, GeLU, Sigmoid
from refiners.fluxion.layers.norm import LayerNorm, GroupNorm, LayerNorm2d, InstanceNorm2d
from refiners.fluxion.layers.activations import GLU, ApproximateGeLU, GeLU, ReLU, Sigmoid, SiLU
from refiners.fluxion.layers.attentions import Attention, SelfAttention, SelfAttention2d
from refiners.fluxion.layers.basics import (
Identity,
View,
Buffer,
Chunk,
Cos,
Flatten,
Unflatten,
Transpose,
GetArg,
Identity,
Multiply,
Parameter,
Permute,
Reshape,
Squeeze,
Unsqueeze,
Slicing,
Sin,
Cos,
Chunk,
Multiply,
Slicing,
Squeeze,
Transpose,
Unbind,
Parameter,
Buffer,
Unflatten,
Unsqueeze,
View,
)
from refiners.fluxion.layers.chain import (
Breakpoint,
Chain,
Concatenate,
Distribute,
Lambda,
Sum,
Matmul,
Parallel,
Passthrough,
Residual,
Return,
Chain,
UseContext,
SetContext,
Parallel,
Distribute,
Passthrough,
Breakpoint,
Concatenate,
Matmul,
Sum,
UseContext,
)
from refiners.fluxion.layers.conv import Conv2d, ConvTranspose2d
from refiners.fluxion.layers.converter import Converter
from refiners.fluxion.layers.embedding import Embedding
from refiners.fluxion.layers.linear import Linear, MultiLinear
from refiners.fluxion.layers.module import Module, WeightedModule, ContextModule
from refiners.fluxion.layers.maxpool import MaxPool1d, MaxPool2d
from refiners.fluxion.layers.module import ContextModule, Module, WeightedModule
from refiners.fluxion.layers.norm import GroupNorm, InstanceNorm2d, LayerNorm, LayerNorm2d
from refiners.fluxion.layers.padding import ReflectionPad2d
from refiners.fluxion.layers.pixelshuffle import PixelUnshuffle
from refiners.fluxion.layers.sampling import Downsample, Upsample, Interpolate
from refiners.fluxion.layers.embedding import Embedding
from refiners.fluxion.layers.converter import Converter
from refiners.fluxion.layers.maxpool import MaxPool1d, MaxPool2d
from refiners.fluxion.layers.sampling import Downsample, Interpolate, Upsample
__all__ = [
"Embedding",

View file

@ -1,7 +1,10 @@
from refiners.fluxion.layers.module import Module
from torch.nn.functional import silu
from torch import Tensor, sigmoid
from torch.nn.functional import gelu # type: ignore
from torch.nn.functional import (
gelu, # type: ignore
silu,
)
from refiners.fluxion.layers.module import Module
class Activation(Module):

View file

@ -2,14 +2,14 @@ import math
import torch
from jaxtyping import Float
from torch.nn.functional import scaled_dot_product_attention as _scaled_dot_product_attention # type: ignore
from torch import Tensor, device as Device, dtype as DType
from torch.nn.functional import scaled_dot_product_attention as _scaled_dot_product_attention # type: ignore
from refiners.fluxion.context import Contexts
from refiners.fluxion.layers.basics import Identity
from refiners.fluxion.layers.chain import Chain, Distribute, Lambda, Parallel
from refiners.fluxion.layers.linear import Linear
from refiners.fluxion.layers.module import Module
from refiners.fluxion.layers.chain import Chain, Distribute, Parallel, Lambda
from refiners.fluxion.layers.basics import Identity
from refiners.fluxion.context import Contexts
def scaled_dot_product_attention(

View file

@ -1,8 +1,9 @@
from refiners.fluxion.layers.module import Module, WeightedModule
import torch
from torch import randn, Tensor, Size, device as Device, dtype as DType
from torch import Size, Tensor, device as Device, dtype as DType, randn
from torch.nn import Parameter as TorchParameter
from refiners.fluxion.layers.module import Module, WeightedModule
class Identity(Module):
def __init__(self) -> None:

View file

@ -1,15 +1,16 @@
from collections import defaultdict
import inspect
import re
import sys
import traceback
from collections import defaultdict
from typing import Any, Callable, Iterable, Iterator, TypeVar, cast, overload
import torch
from torch import Tensor, cat, device as Device, dtype as DType
from refiners.fluxion.layers.module import Module, ContextModule, ModuleTree, WeightedModule
from refiners.fluxion.context import Contexts, ContextProvider
from refiners.fluxion.utils import summarize_tensor
from refiners.fluxion.context import ContextProvider, Contexts
from refiners.fluxion.layers.module import ContextModule, Module, ModuleTree, WeightedModule
from refiners.fluxion.utils import summarize_tensor
T = TypeVar("T", bound=Module)
TChain = TypeVar("TChain", bound="Chain") # because Self (PEP 673) is not in 3.10

View file

@ -1,4 +1,5 @@
from torch import nn, device as Device, dtype as DType
from torch import device as Device, dtype as DType, nn
from refiners.fluxion.layers.module import WeightedModule

View file

@ -1,6 +1,7 @@
from refiners.fluxion.layers.module import ContextModule
from torch import Tensor
from refiners.fluxion.layers.module import ContextModule
class Converter(ContextModule):
"""

View file

@ -1,8 +1,8 @@
from refiners.fluxion.layers.module import WeightedModule
from torch.nn import Embedding as _Embedding
from torch import Tensor, device as Device, dtype as DType
from jaxtyping import Float, Int
from torch import Tensor, device as Device, dtype as DType
from torch.nn import Embedding as _Embedding
from refiners.fluxion.layers.module import WeightedModule
class Embedding(_Embedding, WeightedModule): # type: ignore

View file

@ -1,11 +1,10 @@
from torch import device as Device, dtype as DType
from jaxtyping import Float
from torch import Tensor, device as Device, dtype as DType
from torch.nn import Linear as _Linear
from torch import Tensor
from refiners.fluxion.layers.module import Module, WeightedModule
from refiners.fluxion.layers.activations import ReLU
from refiners.fluxion.layers.chain import Chain
from jaxtyping import Float
from refiners.fluxion.layers.module import Module, WeightedModule
class Linear(_Linear, WeightedModule):

View file

@ -1,4 +1,5 @@
from torch import nn
from refiners.fluxion.layers.module import Module

View file

@ -1,17 +1,15 @@
from collections import defaultdict
from inspect import signature, Parameter
import sys
from collections import defaultdict
from inspect import Parameter, signature
from pathlib import Path
from types import ModuleType
from typing import Any, DefaultDict, Generator, TypeVar, TypedDict, cast
from typing import TYPE_CHECKING, Any, DefaultDict, Generator, Sequence, TypedDict, TypeVar, cast
from torch import device as Device, dtype as DType
from torch.nn.modules.module import Module as TorchModule
from refiners.fluxion.utils import load_from_safetensors
from refiners.fluxion.context import Context, ContextProvider
from typing import TYPE_CHECKING, Sequence
from refiners.fluxion.utils import load_from_safetensors
if TYPE_CHECKING:
from refiners.fluxion.layers.chain import Chain

View file

@ -1,5 +1,6 @@
from torch import nn, ones, zeros, Tensor, sqrt, device as Device, dtype as DType
from jaxtyping import Float
from torch import Tensor, device as Device, dtype as DType, nn, ones, sqrt, zeros
from refiners.fluxion.layers.module import Module, WeightedModule

View file

@ -1,4 +1,5 @@
from torch import nn
from refiners.fluxion.layers.module import Module

View file

@ -1,6 +1,7 @@
from refiners.fluxion.layers.module import Module
from torch.nn import PixelUnshuffle as _PixelUnshuffle
from refiners.fluxion.layers.module import Module
class PixelUnshuffle(_PixelUnshuffle, Module):
def __init__(self, downscale_factor: int):

View file

@ -1,11 +1,11 @@
from refiners.fluxion.layers.chain import Chain, UseContext, SetContext
from refiners.fluxion.layers.conv import Conv2d
from torch import Size, Tensor, device as Device, dtype as DType
from torch.nn.functional import pad
from refiners.fluxion.layers.basics import Identity
from refiners.fluxion.layers.chain import Parallel, Lambda
from refiners.fluxion.layers.chain import Chain, Lambda, Parallel, SetContext, UseContext
from refiners.fluxion.layers.conv import Conv2d
from refiners.fluxion.layers.module import Module
from refiners.fluxion.utils import interpolate
from torch.nn.functional import pad
from torch import Tensor, Size, device as Device, dtype as DType
class Downsample(Chain):

View file

@ -1,10 +1,11 @@
from collections import defaultdict
from enum import Enum, auto
from pathlib import Path
from typing import Any, DefaultDict, TypedDict
import torch
from torch import Tensor, nn
from torch.utils.hooks import RemovableHandle
import torch
from typing import Any, DefaultDict, TypedDict
from refiners.fluxion.utils import norm, save_to_safetensors

View file

@ -1,15 +1,14 @@
from typing import Iterable, Literal, TypeVar
from PIL import Image
from numpy import array, float32
from pathlib import Path
from typing import Iterable, Literal, TypeVar
import torch
from jaxtyping import Float
from numpy import array, float32
from PIL import Image
from safetensors import safe_open as _safe_open # type: ignore
from safetensors.torch import save_file as _save_file # type: ignore
from torch import norm as _norm, manual_seed as _manual_seed # type: ignore
import torch
from torch.nn.functional import pad as _pad, interpolate as _interpolate, conv2d # type: ignore
from torch import Tensor, device as Device, dtype as DType
from jaxtyping import Float
from torch import Tensor, device as Device, dtype as DType, manual_seed as _manual_seed, norm as _norm # type: ignore
from torch.nn.functional import conv2d, interpolate as _interpolate, pad as _pad # type: ignore
T = TypeVar("T")
E = TypeVar("E")

View file

@ -1,4 +1,5 @@
from torch import Tensor, arange, device as Device, dtype as DType
import refiners.fluxion.layers as fl

View file

@ -1,13 +1,14 @@
import re
from typing import cast
import torch.nn.functional as F
from torch import Tensor, cat, zeros
from torch.nn import Parameter
import refiners.fluxion.layers as fl
from refiners.fluxion.adapters.adapter import Adapter
from refiners.foundationals.clip.text_encoder import CLIPTextEncoder, TokenEncoder
from refiners.foundationals.clip.tokenizer import CLIPTokenizer
import refiners.fluxion.layers as fl
from typing import cast
from torch import Tensor, cat, zeros
import torch.nn.functional as F
from torch.nn import Parameter
import re
class EmbeddingExtender(fl.Chain, Adapter[TokenEncoder]):

View file

@ -1,6 +1,7 @@
from torch import device as Device, dtype as DType
import refiners.fluxion.layers as fl
from refiners.foundationals.clip.common import PositionalEncoder, FeedForward
from refiners.foundationals.clip.common import FeedForward, PositionalEncoder
class ClassToken(fl.Chain):

View file

@ -1,6 +1,7 @@
from torch import device as Device, dtype as DType
import refiners.fluxion.layers as fl
from refiners.foundationals.clip.common import PositionalEncoder, FeedForward
from refiners.foundationals.clip.common import FeedForward, PositionalEncoder
from refiners.foundationals.clip.tokenizer import CLIPTokenizer

View file

@ -1,11 +1,13 @@
import gzip
from pathlib import Path
import re
from functools import lru_cache
from itertools import islice
import re
from pathlib import Path
from torch import Tensor, tensor
from refiners.fluxion import pad
import refiners.fluxion.layers as fl
from refiners.fluxion import pad
class CLIPTokenizer(fl.Module):

View file

@ -1,27 +1,26 @@
from refiners.foundationals.latent_diffusion.auto_encoder import (
LatentDiffusionAutoencoder,
)
from refiners.foundationals.clip.text_encoder import (
CLIPTextEncoderL,
)
from refiners.foundationals.latent_diffusion.auto_encoder import (
LatentDiffusionAutoencoder,
)
from refiners.foundationals.latent_diffusion.freeu import SDFreeUAdapter
from refiners.foundationals.latent_diffusion.schedulers import Scheduler, DPMSolver
from refiners.foundationals.latent_diffusion.schedulers import DPMSolver, Scheduler
from refiners.foundationals.latent_diffusion.stable_diffusion_1 import (
StableDiffusion_1,
StableDiffusion_1_Inpainting,
SD1UNet,
SD1ControlnetAdapter,
SD1IPAdapter,
SD1T2IAdapter,
SD1UNet,
StableDiffusion_1,
StableDiffusion_1_Inpainting,
)
from refiners.foundationals.latent_diffusion.stable_diffusion_xl import (
SDXLUNet,
DoubleTextEncoder,
SDXLIPAdapter,
SDXLT2IAdapter,
SDXLUNet,
)
__all__ = [
"StableDiffusion_1",
"StableDiffusion_1_Inpainting",

View file

@ -1,20 +1,21 @@
from PIL import Image
from torch import Tensor, device as Device, dtype as DType
from refiners.fluxion.context import Contexts
from refiners.fluxion.layers import (
Chain,
Conv2d,
Downsample,
GroupNorm,
Identity,
SiLU,
Downsample,
Upsample,
Sum,
SelfAttention2d,
Slicing,
Residual,
SelfAttention2d,
SiLU,
Slicing,
Sum,
Upsample,
)
from refiners.fluxion.utils import image_to_tensor, tensor_to_image
from torch import Tensor, device as Device, dtype as DType
from PIL import Image
class Resnet(Sum):

View file

@ -1,24 +1,24 @@
from torch import Tensor, Size, device as Device, dtype as DType
from torch import Size, Tensor, device as Device, dtype as DType
from refiners.fluxion.context import Contexts
from refiners.fluxion.layers import (
Identity,
Flatten,
Unflatten,
Transpose,
Chain,
Parallel,
LayerNorm,
Attention,
UseContext,
Linear,
GLU,
Attention,
Chain,
Conv2d,
Flatten,
GeLU,
GroupNorm,
Conv2d,
Identity,
LayerNorm,
Linear,
Parallel,
Residual,
SelfAttention,
SetContext,
Residual,
Transpose,
Unflatten,
UseContext,
)

View file

@ -1,13 +1,14 @@
import math
from typing import Any, Generic, TypeVar
import refiners.fluxion.layers as fl
import torch
from torch import Tensor
from torch.fft import fftn, fftshift, ifftn, ifftshift # type: ignore
import refiners.fluxion.layers as fl
from refiners.fluxion.adapters.adapter import Adapter
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import ResidualConcatenator, SD1UNet
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.unet import SDXLUNet
from torch import Tensor
from torch.fft import fftn, fftshift, ifftn, ifftshift # type: ignore
T = TypeVar("T", bound="SD1UNet | SDXLUNet")
TSDFreeUAdapter = TypeVar("TSDFreeUAdapter", bound="SDFreeUAdapter[Any]") # Self (see PEP 673)

View file

@ -1,19 +1,19 @@
import math
from enum import IntEnum
from functools import partial
from typing import Generic, TypeVar, Any, Callable, TYPE_CHECKING
import math
from typing import TYPE_CHECKING, Any, Callable, Generic, TypeVar
from jaxtyping import Float
from torch import Tensor, cat, softmax, zeros_like, device as Device, dtype as DType
from PIL import Image
from torch import Tensor, cat, device as Device, dtype as DType, softmax, zeros_like
import refiners.fluxion.layers as fl
from refiners.fluxion.adapters.adapter import Adapter
from refiners.fluxion.adapters.lora import Lora
from refiners.foundationals.clip.image_encoder import CLIPImageEncoderH
from refiners.fluxion.context import Contexts
from refiners.fluxion.layers.attentions import ScaledDotProductAttention
from refiners.fluxion.utils import image_to_tensor, normalize
import refiners.fluxion.layers as fl
from refiners.foundationals.clip.image_encoder import CLIPImageEncoderH
if TYPE_CHECKING:
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import SD1UNet

View file

@ -1,23 +1,21 @@
from enum import Enum
from pathlib import Path
from typing import Iterator, Callable
from typing import Callable, Iterator
from torch import Tensor
import refiners.fluxion.layers as fl
from refiners.fluxion.utils import load_from_safetensors, load_metadata_from_safetensors
from refiners.fluxion.adapters.adapter import Adapter
from refiners.fluxion.adapters.lora import LoraAdapter, Lora
from refiners.fluxion.adapters.lora import Lora, LoraAdapter
from refiners.fluxion.utils import load_from_safetensors, load_metadata_from_safetensors
from refiners.foundationals.clip.text_encoder import FeedForward, TransformerLayer
from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock2d
from refiners.foundationals.latent_diffusion import (
StableDiffusion_1,
SD1UNet,
CLIPTextEncoderL,
LatentDiffusionAutoencoder,
SD1UNet,
StableDiffusion_1,
)
from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock2d
from refiners.foundationals.latent_diffusion.stable_diffusion_1.controlnet import Controlnet
MODELS = ["unet", "text_encoder", "lda"]

View file

@ -1,8 +1,10 @@
from abc import ABC, abstractmethod
from typing import TypeVar
from torch import Tensor, device as Device, dtype as DType
from PIL import Image
import torch
from PIL import Image
from torch import Tensor, device as Device, dtype as DType
import refiners.fluxion.layers as fl
from refiners.foundationals.latent_diffusion.auto_encoder import LatentDiffusionAutoencoder
from refiners.foundationals.latent_diffusion.schedulers.scheduler import Scheduler

View file

@ -8,7 +8,6 @@ from torch import Tensor, device as Device, dtype as DType
from refiners.foundationals.latent_diffusion.model import LatentDiffusionModel
MAX_STEPS = 1000

View file

@ -1,6 +1,7 @@
# Adapted from https://github.com/carolineec/informative-drawings, MIT License
from torch import device as Device, dtype as DType
import refiners.fluxion.layers as fl

View file

@ -1,9 +1,10 @@
import math
from torch import Tensor, arange, float32, exp, sin, cat, cos, device as Device, dtype as DType
from jaxtyping import Float, Int
from refiners.fluxion.adapters.adapter import Adapter
from jaxtyping import Float, Int
from torch import Tensor, arange, cat, cos, device as Device, dtype as DType, exp, float32, sin
import refiners.fluxion.layers as fl
from refiners.fluxion.adapters.adapter import Adapter
def compute_sinusoidal_embedding(

View file

@ -1,18 +1,19 @@
from torch import Tensor
from refiners.fluxion.adapters.adapter import Adapter
from refiners.fluxion.layers import (
Passthrough,
Lambda,
Chain,
Concatenate,
UseContext,
Identity,
Lambda,
Parallel,
Passthrough,
SelfAttention,
SetContext,
Identity,
Parallel,
UseContext,
)
from refiners.fluxion.adapters.adapter import Adapter
from refiners.foundationals.latent_diffusion import SD1UNet
from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock
from torch import Tensor
class SaveLayerNormAdapter(Chain, Adapter[SelfAttention]):

View file

@ -1,7 +1,7 @@
from refiners.foundationals.latent_diffusion.schedulers.scheduler import Scheduler
from refiners.foundationals.latent_diffusion.schedulers.dpm_solver import DPMSolver
from refiners.foundationals.latent_diffusion.schedulers.ddpm import DDPM
from refiners.foundationals.latent_diffusion.schedulers.ddim import DDIM
from refiners.foundationals.latent_diffusion.schedulers.ddpm import DDPM
from refiners.foundationals.latent_diffusion.schedulers.dpm_solver import DPMSolver
from refiners.foundationals.latent_diffusion.schedulers.scheduler import Scheduler
__all__ = [
"Scheduler",

View file

@ -1,4 +1,5 @@
from torch import Tensor, device as Device, dtype as Dtype, arange, sqrt, float32, tensor
from torch import Tensor, arange, device as Device, dtype as Dtype, float32, sqrt, tensor
from refiners.foundationals.latent_diffusion.schedulers.scheduler import NoiseSchedule, Scheduler

View file

@ -1,4 +1,5 @@
from torch import Tensor, device as Device, randn, arange, Generator, tensor
from torch import Generator, Tensor, arange, device as Device, randn, tensor
from refiners.foundationals.latent_diffusion.schedulers.scheduler import Scheduler

View file

@ -1,8 +1,10 @@
from refiners.foundationals.latent_diffusion.schedulers.scheduler import NoiseSchedule, Scheduler
import numpy as np
from torch import Tensor, device as Device, tensor, exp, float32, dtype as Dtype
from collections import deque
import numpy as np
from torch import Tensor, device as Device, dtype as Dtype, exp, float32, tensor
from refiners.foundationals.latent_diffusion.schedulers.scheduler import NoiseSchedule, Scheduler
class DPMSolver(Scheduler):
"""Implements DPM-Solver++ from https://arxiv.org/abs/2211.01095

View file

@ -1,8 +1,9 @@
from abc import ABC, abstractmethod
from enum import Enum
from torch import Tensor, device as Device, dtype as DType, linspace, float32, sqrt, log
from typing import TypeVar
from torch import Tensor, device as Device, dtype as DType, float32, linspace, log, sqrt
T = TypeVar("T", bound="Scheduler")

View file

@ -1,15 +1,15 @@
from typing import Any, Generic, TypeVar, TYPE_CHECKING
import math
from typing import TYPE_CHECKING, Any, Generic, TypeVar
from torch import Tensor, Size
from jaxtyping import Float
import torch
from jaxtyping import Float
from torch import Size, Tensor
from refiners.foundationals.latent_diffusion.schedulers.scheduler import Scheduler
import refiners.fluxion.layers as fl
from refiners.fluxion.adapters.adapter import Adapter
from refiners.fluxion.context import Contexts
from refiners.fluxion.utils import interpolate, gaussian_blur
import refiners.fluxion.layers as fl
from refiners.fluxion.utils import gaussian_blur, interpolate
from refiners.foundationals.latent_diffusion.schedulers.scheduler import Scheduler
if TYPE_CHECKING:
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import SD1UNet

View file

@ -1,11 +1,11 @@
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import SD1UNet
from refiners.foundationals.latent_diffusion.stable_diffusion_1.controlnet import SD1ControlnetAdapter
from refiners.foundationals.latent_diffusion.stable_diffusion_1.image_prompt import SD1IPAdapter
from refiners.foundationals.latent_diffusion.stable_diffusion_1.model import (
StableDiffusion_1,
StableDiffusion_1_Inpainting,
)
from refiners.foundationals.latent_diffusion.stable_diffusion_1.controlnet import SD1ControlnetAdapter
from refiners.foundationals.latent_diffusion.stable_diffusion_1.image_prompt import SD1IPAdapter
from refiners.foundationals.latent_diffusion.stable_diffusion_1.t2i_adapter import SD1T2IAdapter
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import SD1UNet
__all__ = [
"StableDiffusion_1",

View file

@ -1,16 +1,18 @@
from typing import Iterable, cast
from torch import Tensor, device as Device, dtype as DType
from refiners.fluxion.adapters.adapter import Adapter
from refiners.fluxion.context import Contexts
from refiners.fluxion.layers import Chain, Conv2d, SiLU, Lambda, Passthrough, UseContext, Slicing, Residual
from refiners.fluxion.layers import Chain, Conv2d, Lambda, Passthrough, Residual, SiLU, Slicing, UseContext
from refiners.foundationals.latent_diffusion.range_adapter import RangeAdapter2d
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import (
SD1UNet,
DownBlocks,
MiddleBlock,
ResidualBlock,
SD1UNet,
TimestepEncoder,
)
from refiners.fluxion.adapters.adapter import Adapter
from refiners.foundationals.latent_diffusion.range_adapter import RangeAdapter2d
from typing import cast, Iterable
from torch import Tensor, device as Device, dtype as DType
class ConditionEncoder(Chain):

View file

@ -2,7 +2,7 @@ from torch import Tensor
from refiners.foundationals.clip.image_encoder import CLIPImageEncoderH
from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock2d
from refiners.foundationals.latent_diffusion.image_prompt import IPAdapter, ImageProjection, PerceiverResampler
from refiners.foundationals.latent_diffusion.image_prompt import ImageProjection, IPAdapter, PerceiverResampler
from refiners.foundationals.latent_diffusion.stable_diffusion_1 import SD1UNet

View file

@ -1,15 +1,16 @@
import numpy as np
import torch
from PIL import Image
from torch import Tensor, device as Device, dtype as DType
from refiners.fluxion.utils import image_to_tensor, interpolate
from refiners.foundationals.clip.text_encoder import CLIPTextEncoderL
from refiners.foundationals.latent_diffusion.auto_encoder import LatentDiffusionAutoencoder
from refiners.foundationals.latent_diffusion.model import LatentDiffusionModel
from refiners.foundationals.latent_diffusion.schedulers.dpm_solver import DPMSolver
from refiners.foundationals.latent_diffusion.schedulers.scheduler import Scheduler
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import SD1UNet
from refiners.foundationals.latent_diffusion.stable_diffusion_1.self_attention_guidance import SD1SAGAdapter
from PIL import Image
import numpy as np
from torch import device as Device, dtype as DType, Tensor
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import SD1UNet
class SD1Autoencoder(LatentDiffusionAutoencoder):

View file

@ -1,6 +1,7 @@
from dataclasses import field, dataclass
from torch import Tensor
from dataclasses import dataclass, field
from PIL import Image
from torch import Tensor
from refiners.foundationals.latent_diffusion.multi_diffusion import DiffusionTarget, MultiDiffusion
from refiners.foundationals.latent_diffusion.stable_diffusion_1.model import (

View file

@ -1,11 +1,11 @@
import refiners.fluxion.layers as fl
from refiners.fluxion.layers.attentions import ScaledDotProductAttention
from refiners.foundationals.latent_diffusion.self_attention_guidance import (
SAGAdapter,
SelfAttentionShape,
SelfAttentionMap,
SelfAttentionShape,
)
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import SD1UNet, MiddleBlock, ResidualBlock
from refiners.fluxion.layers.attentions import ScaledDotProductAttention
import refiners.fluxion.layers as fl
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import MiddleBlock, ResidualBlock, SD1UNet
class SD1SAGAdapter(SAGAdapter[SD1UNet]):

View file

@ -1,8 +1,8 @@
from torch import Tensor
from refiners.foundationals.latent_diffusion.t2i_adapter import T2IAdapter, T2IFeatures, ConditionEncoder
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import SD1UNet, ResidualAccumulator
import refiners.fluxion.layers as fl
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import ResidualAccumulator, SD1UNet
from refiners.foundationals.latent_diffusion.t2i_adapter import ConditionEncoder, T2IAdapter, T2IFeatures
class SD1T2IAdapter(T2IAdapter[SD1UNet]):

View file

@ -1,12 +1,11 @@
from typing import cast, Iterable
from typing import Iterable, cast
from torch import Tensor, device as Device, dtype as DType
from refiners.fluxion.context import Contexts
import refiners.fluxion.layers as fl
from refiners.fluxion.context import Contexts
from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock2d
from refiners.foundationals.latent_diffusion.range_adapter import RangeEncoder, RangeAdapter2d
from refiners.foundationals.latent_diffusion.range_adapter import RangeAdapter2d, RangeEncoder
class TimestepEncoder(fl.Passthrough):

View file

@ -1,9 +1,8 @@
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.unet import SDXLUNet
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.text_encoder import DoubleTextEncoder
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.model import StableDiffusion_XL
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.image_prompt import SDXLIPAdapter
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.model import StableDiffusion_XL
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.t2i_adapter import SDXLT2IAdapter
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.text_encoder import DoubleTextEncoder
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.unet import SDXLUNet
__all__ = [
"SDXLUNet",

View file

@ -2,7 +2,7 @@ from torch import Tensor
from refiners.foundationals.clip.image_encoder import CLIPImageEncoderH
from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock2d
from refiners.foundationals.latent_diffusion.image_prompt import IPAdapter, ImageProjection, PerceiverResampler
from refiners.foundationals.latent_diffusion.image_prompt import ImageProjection, IPAdapter, PerceiverResampler
from refiners.foundationals.latent_diffusion.stable_diffusion_xl import SDXLUNet

View file

@ -1,12 +1,13 @@
import torch
from torch import Tensor, device as Device, dtype as DType
from refiners.foundationals.latent_diffusion.auto_encoder import LatentDiffusionAutoencoder
from refiners.foundationals.latent_diffusion.model import LatentDiffusionModel
from refiners.foundationals.latent_diffusion.schedulers.ddim import DDIM
from refiners.foundationals.latent_diffusion.schedulers.scheduler import Scheduler
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.unet import SDXLUNet
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.self_attention_guidance import SDXLSAGAdapter
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.text_encoder import DoubleTextEncoder
from torch import device as Device, dtype as DType, Tensor
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.unet import SDXLUNet
class SDXLAutoencoder(LatentDiffusionAutoencoder):

View file

@ -1,11 +1,11 @@
import refiners.fluxion.layers as fl
from refiners.fluxion.layers.attentions import ScaledDotProductAttention
from refiners.foundationals.latent_diffusion.self_attention_guidance import (
SAGAdapter,
SelfAttentionShape,
SelfAttentionMap,
SelfAttentionShape,
)
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.unet import SDXLUNet, MiddleBlock, ResidualBlock
from refiners.fluxion.layers.attentions import ScaledDotProductAttention
import refiners.fluxion.layers as fl
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.unet import MiddleBlock, ResidualBlock, SDXLUNet
class SDXLSAGAdapter(SAGAdapter[SDXLUNet]):

View file

@ -1,9 +1,9 @@
from torch import Tensor
from refiners.foundationals.latent_diffusion.t2i_adapter import T2IAdapter, T2IFeatures, ConditionEncoderXL
from refiners.foundationals.latent_diffusion.stable_diffusion_xl import SDXLUNet
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import ResidualAccumulator
import refiners.fluxion.layers as fl
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import ResidualAccumulator
from refiners.foundationals.latent_diffusion.stable_diffusion_xl import SDXLUNet
from refiners.foundationals.latent_diffusion.t2i_adapter import ConditionEncoderXL, T2IAdapter, T2IFeatures
class SDXLT2IAdapter(T2IAdapter[SDXLUNet]):

View file

@ -1,11 +1,12 @@
from typing import cast
from torch import device as Device, dtype as DType, Tensor, cat
from jaxtyping import Float
from torch import Tensor, cat, device as Device, dtype as DType
import refiners.fluxion.layers as fl
from refiners.fluxion.adapters.adapter import Adapter
from refiners.fluxion.context import Contexts
import refiners.fluxion.layers as fl
from refiners.foundationals.clip.text_encoder import CLIPTextEncoderG, CLIPTextEncoderL
from jaxtyping import Float
from refiners.foundationals.clip.tokenizer import CLIPTokenizer

View file

@ -1,18 +1,20 @@
from typing import cast
from torch import Tensor, device as Device, dtype as DType
from refiners.fluxion.context import Contexts
import refiners.fluxion.layers as fl
from refiners.fluxion.context import Contexts
from refiners.foundationals.latent_diffusion.cross_attention import CrossAttentionBlock2d
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import (
ResidualAccumulator,
ResidualBlock,
ResidualConcatenator,
)
from refiners.foundationals.latent_diffusion.range_adapter import (
RangeAdapter2d,
RangeEncoder,
compute_sinusoidal_embedding,
)
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import (
ResidualAccumulator,
ResidualBlock,
ResidualConcatenator,
)
class TextTimeEmbedding(fl.Chain):

View file

@ -1,12 +1,12 @@
from typing import Generic, TypeVar, Any, TYPE_CHECKING
from typing import TYPE_CHECKING, Any, Generic, TypeVar
from torch import Tensor, device as Device, dtype as DType
from torch.nn import AvgPool2d as _AvgPool2d
import refiners.fluxion.layers as fl
from refiners.fluxion.adapters.adapter import Adapter
from refiners.fluxion.context import Contexts
from refiners.fluxion.layers.module import Module
import refiners.fluxion.layers as fl
if TYPE_CHECKING:
from refiners.foundationals.latent_diffusion.stable_diffusion_1.unet import SD1UNet

View file

@ -1,9 +1,9 @@
from torch import device as Device, dtype as DType, Tensor
from refiners.fluxion.context import Contexts
import refiners.fluxion.layers as fl
from refiners.fluxion.utils import pad
from torch import nn
import torch
from torch import Tensor, device as Device, dtype as DType, nn
import refiners.fluxion.layers as fl
from refiners.fluxion.context import Contexts
from refiners.fluxion.utils import pad
class PatchEncoder(fl.Chain):

View file

@ -1,12 +1,12 @@
import refiners.fluxion.layers as fl
from torch import device as Device, dtype as DType, Tensor, nn
import torch
from torch import Tensor, device as Device, dtype as DType, nn
import refiners.fluxion.layers as fl
from refiners.fluxion.context import Contexts
from refiners.foundationals.segment_anything.transformer import (
SparseCrossDenseAttention,
TwoWayTranformerLayer,
)
from refiners.fluxion.context import Contexts
class EmbeddingsAggregator(fl.ContextModule):

View file

@ -1,11 +1,13 @@
from dataclasses import dataclass
from typing import Sequence
from PIL import Image
from torch import device as Device, dtype as DType, Tensor
import numpy as np
import torch
from PIL import Image
from torch import Tensor, device as Device, dtype as DType
import refiners.fluxion.layers as fl
from refiners.fluxion.utils import image_to_tensor, normalize, pad, interpolate
from refiners.fluxion.utils import image_to_tensor, interpolate, normalize, pad
from refiners.foundationals.segment_anything.image_encoder import SAMViT, SAMViTH
from refiners.foundationals.segment_anything.mask_decoder import MaskDecoder
from refiners.foundationals.segment_anything.prompt_encoder import MaskEncoder, PointEncoder

View file

@ -1,8 +1,10 @@
from enum import Enum, auto
from collections.abc import Sequence
from torch import device as Device, dtype as DType, Tensor, nn
from enum import Enum, auto
import torch
from jaxtyping import Float, Int
from torch import Tensor, device as Device, dtype as DType, nn
import refiners.fluxion.layers as fl
from refiners.fluxion.context import Contexts

View file

@ -1,4 +1,5 @@
from torch import dtype as DType, device as Device
from torch import device as Device, dtype as DType
import refiners.fluxion.layers as fl

View file

@ -1,7 +1,8 @@
import sys
from importlib import import_module
from importlib.metadata import requires
from packaging.requirements import Requirement
import sys
refiners_requires = requires("refiners")
assert refiners_requires is not None

View file

@ -1,7 +1,8 @@
from typing import TYPE_CHECKING, Generic, Iterable, Any, TypeVar
from typing import TYPE_CHECKING, Any, Generic, Iterable, TypeVar
from loguru import logger
from torch import tensor
from torch.nn import Parameter
from loguru import logger
if TYPE_CHECKING:
from refiners.training_utils.config import BaseConfig

View file

@ -1,17 +1,18 @@
from enum import Enum
from logging import warn
from pathlib import Path
from typing import Any, Callable, Iterable, Literal, Type, TypeVar
from typing_extensions import TypedDict # https://errors.pydantic.dev/2.0b3/u/typed-dict-version
from torch.optim import AdamW, SGD, Optimizer, Adam
from torch.nn import Parameter
from enum import Enum
from bitsandbytes.optim import AdamW8bit, Lion8bit # type: ignore
from pydantic import BaseModel, validator
import tomli
import refiners.fluxion.layers as fl
from prodigyopt import Prodigy # type: ignore
from refiners.training_utils.dropout import apply_dropout, apply_gyro_dropout
import tomli
from bitsandbytes.optim import AdamW8bit, Lion8bit # type: ignore
from prodigyopt import Prodigy # type: ignore
from pydantic import BaseModel, validator
from torch.nn import Parameter
from torch.optim import SGD, Adam, AdamW, Optimizer
from typing_extensions import TypedDict # https://errors.pydantic.dev/2.0b3/u/typed-dict-version
import refiners.fluxion.layers as fl
from refiners.training_utils.dropout import apply_dropout, apply_gyro_dropout
__all__ = [
"parse_number_unit_field",

View file

@ -1,11 +1,11 @@
from typing import TYPE_CHECKING, Any, TypeVar
from torch import Tensor, randint, cat, rand
from torch import Tensor, cat, rand, randint
from torch.nn import Dropout as TorchDropout
import refiners.fluxion.layers as fl
from refiners.training_utils.callback import Callback
from refiners.fluxion.adapters.adapter import Adapter
from refiners.training_utils.callback import Callback
if TYPE_CHECKING:
from refiners.training_utils.config import BaseConfig

View file

@ -1,6 +1,7 @@
from datasets import load_dataset as _load_dataset, VerificationMode # type: ignore
from typing import Any, Generic, Protocol, TypeVar, cast
from datasets import VerificationMode, load_dataset as _load_dataset # type: ignore
__all__ = ["load_hf_dataset", "HuggingfaceDataset"]

View file

@ -1,29 +1,31 @@
import random
from dataclasses import dataclass
from typing import Any, TypeVar, TypedDict, Callable
from pydantic import BaseModel
from torch import device as Device, Tensor, randn, dtype as DType, Generator, cat
from loguru import logger
from torch.utils.data import Dataset
from refiners.foundationals.clip.text_encoder import CLIPTextEncoderL
from torchvision.transforms import Compose, RandomCrop, RandomHorizontalFlip # type: ignore
import refiners.fluxion.layers as fl
from PIL import Image
from functools import cached_property
from refiners.foundationals.latent_diffusion.stable_diffusion_1.model import SD1Autoencoder
from refiners.training_utils.config import BaseConfig
from typing import Any, Callable, TypedDict, TypeVar
from loguru import logger
from PIL import Image
from pydantic import BaseModel
from torch import Generator, Tensor, cat, device as Device, dtype as DType, randn
from torch.nn import Module
from torch.nn.functional import mse_loss
from torch.utils.data import Dataset
from torchvision.transforms import Compose, RandomCrop, RandomHorizontalFlip # type: ignore
import refiners.fluxion.layers as fl
from refiners.foundationals.clip.text_encoder import CLIPTextEncoderL
from refiners.foundationals.latent_diffusion import (
StableDiffusion_1,
DPMSolver,
SD1UNet,
StableDiffusion_1,
)
from refiners.foundationals.latent_diffusion.schedulers import DDPM
from torch.nn.functional import mse_loss
import random
from refiners.training_utils.wandb import WandbLoggable
from refiners.training_utils.trainer import Trainer
from refiners.foundationals.latent_diffusion.stable_diffusion_1.model import SD1Autoencoder
from refiners.training_utils.callback import Callback
from refiners.training_utils.huggingface_datasets import load_hf_dataset, HuggingfaceDataset
from torch.nn import Module
from refiners.training_utils.config import BaseConfig
from refiners.training_utils.huggingface_datasets import HuggingfaceDataset, load_hf_dataset
from refiners.training_utils.trainer import Trainer
from refiners.training_utils.wandb import WandbLoggable
class LatentDiffusionConfig(BaseModel):

View file

@ -1,41 +1,43 @@
from functools import cached_property, wraps
from pathlib import Path
import random
import time
from functools import cached_property, wraps
from pathlib import Path
from typing import Any, Callable, Generic, Iterable, TypeVar, cast
import numpy as np
from torch import device as Device, Tensor, get_rng_state, no_grad, set_rng_state, cuda, stack
from loguru import logger
from torch import Tensor, cuda, device as Device, get_rng_state, no_grad, set_rng_state, stack
from torch.autograd import backward
from torch.nn import Parameter
from torch.optim import Optimizer
from torch.optim.lr_scheduler import (
CosineAnnealingLR,
CosineAnnealingWarmRestarts,
CyclicLR,
ExponentialLR,
LambdaLR,
LRScheduler,
MultiplicativeLR,
MultiStepLR,
OneCycleLR,
ReduceLROnPlateau,
StepLR,
)
from torch.utils.data import DataLoader, Dataset
from torch.autograd import backward
from typing import Any, Callable, Generic, Iterable, TypeVar, cast
from loguru import logger
from refiners.fluxion import layers as fl
from refiners.fluxion.utils import manual_seed
from refiners.training_utils.wandb import WandbLogger, WandbLoggable
from refiners.training_utils.config import BaseConfig, TimeUnit, TimeValue, SchedulerType
from refiners.training_utils.dropout import DropoutCallback
from refiners.training_utils.callback import (
Callback,
ClockCallback,
GradientNormClipping,
GradientValueClipping,
GradientNormLogging,
GradientValueClipping,
MonitorLoss,
)
from torch.optim.lr_scheduler import (
StepLR,
ExponentialLR,
ReduceLROnPlateau,
CosineAnnealingLR,
LambdaLR,
OneCycleLR,
LRScheduler,
MultiplicativeLR,
CosineAnnealingWarmRestarts,
CyclicLR,
MultiStepLR,
)
from refiners.training_utils.config import BaseConfig, SchedulerType, TimeUnit, TimeValue
from refiners.training_utils.dropout import DropoutCallback
from refiners.training_utils.wandb import WandbLoggable, WandbLogger
__all__ = ["seed_everything", "scoped_seed", "Trainer"]

View file

@ -1,4 +1,5 @@
from typing import Any
import wandb
from PIL import Image

View file

@ -1,4 +1,5 @@
import pytest
from refiners.fluxion.adapters.adapter import Adapter
from refiners.fluxion.layers import Chain, Linear

View file

@ -1,6 +1,7 @@
from refiners.fluxion.adapters.lora import Lora, SingleLoraAdapter, LoraAdapter
from torch import randn, allclose
from torch import allclose, randn
import refiners.fluxion.layers as fl
from refiners.fluxion.adapters.lora import Lora, LoraAdapter, SingleLoraAdapter
def test_single_lora_adapter() -> None:

View file

@ -1,7 +1,8 @@
import torch
from refiners.fluxion.adapters.adapter import Adapter
from refiners.foundationals.latent_diffusion.range_adapter import RangeEncoder
from refiners.fluxion.layers import Chain, Linear
from refiners.foundationals.latent_diffusion.range_adapter import RangeEncoder
class DummyLinearAdapter(Chain, Adapter[Linear]):

View file

@ -1,6 +1,7 @@
import os
import torch
from pathlib import Path
import torch
from pytest import fixture
PARENT_PATH = Path(__file__).parent

View file

@ -1,34 +1,32 @@
import torch
import pytest
from typing import Iterator
from warnings import warn
from PIL import Image
from pathlib import Path
from typing import Iterator
from warnings import warn
from refiners.fluxion.utils import load_from_safetensors, image_to_tensor, manual_seed
import pytest
import torch
from PIL import Image
from refiners.fluxion.utils import image_to_tensor, load_from_safetensors, manual_seed
from refiners.foundationals.clip.concepts import ConceptExtender
from refiners.foundationals.latent_diffusion import (
StableDiffusion_1,
StableDiffusion_1_Inpainting,
SD1UNet,
SD1ControlnetAdapter,
SD1IPAdapter,
SD1T2IAdapter,
SD1UNet,
SDFreeUAdapter,
SDXLIPAdapter,
SDXLT2IAdapter,
SDFreeUAdapter,
StableDiffusion_1,
StableDiffusion_1_Inpainting,
)
from refiners.foundationals.latent_diffusion.lora import SD1LoraAdapter
from refiners.foundationals.latent_diffusion.multi_diffusion import DiffusionTarget
from refiners.foundationals.latent_diffusion.reference_only_control import ReferenceOnlyControlAdapter
from refiners.foundationals.latent_diffusion.restart import Restart
from refiners.foundationals.latent_diffusion.schedulers import DDIM
from refiners.foundationals.latent_diffusion.reference_only_control import ReferenceOnlyControlAdapter
from refiners.foundationals.clip.concepts import ConceptExtender
from refiners.foundationals.latent_diffusion.schedulers.scheduler import NoiseSchedule
from refiners.foundationals.latent_diffusion.stable_diffusion_1.multi_diffusion import SD1MultiDiffusion
from refiners.foundationals.latent_diffusion.stable_diffusion_xl.model import StableDiffusion_XL
from tests.utils import ensure_similar_images

View file

@ -1,13 +1,12 @@
import torch
import pytest
from warnings import warn
from PIL import Image
from pathlib import Path
from warnings import warn
import pytest
import torch
from PIL import Image
from refiners.fluxion.utils import image_to_tensor, tensor_to_image
from refiners.foundationals.latent_diffusion.preprocessors.informative_drawings import InformativeDrawings
from tests.utils import ensure_similar_images

View file

@ -1,5 +1,6 @@
import pytest
import torch
import refiners.fluxion.layers as fl
from refiners.fluxion.context import Contexts

View file

@ -1,7 +1,8 @@
import torch
import pytest
from warnings import warn
import pytest
import torch
import refiners.fluxion.layers as fl
from refiners.fluxion.layers.chain import ChainError, Distribute

View file

@ -1,10 +1,11 @@
# pyright: reportPrivateUsage=false
import pytest
import torch
from torch import nn, Tensor
from refiners.fluxion.utils import manual_seed
from refiners.fluxion.model_converter import ModelConverter, ConversionStage
from torch import Tensor, nn
import refiners.fluxion.layers as fl
from refiners.fluxion.model_converter import ConversionStage, ModelConverter
from refiners.fluxion.utils import manual_seed
class CustomBasicLayer1(fl.Module):

View file

@ -1,11 +1,11 @@
from dataclasses import dataclass
from warnings import warn
from torchvision.transforms.functional import gaussian_blur as torch_gaussian_blur # type: ignore
from torch import device as Device, dtype as DType
from PIL import Image
import pytest
import torch
from PIL import Image
from torch import device as Device, dtype as DType
from torchvision.transforms.functional import gaussian_blur as torch_gaussian_blur # type: ignore
from refiners.fluxion.utils import gaussian_blur, image_to_tensor, manual_seed, tensor_to_image

View file

@ -1,18 +1,16 @@
import torch
import pytest
from warnings import warn
from pathlib import Path
from warnings import warn
import pytest
import torch
import transformers # type: ignore
from diffusers import StableDiffusionPipeline # type: ignore
import refiners.fluxion.layers as fl
from refiners.fluxion.utils import load_from_safetensors
from refiners.foundationals.clip.concepts import ConceptExtender, TokenExtender
from refiners.foundationals.clip.text_encoder import CLIPTextEncoderL
from refiners.foundationals.clip.tokenizer import CLIPTokenizer
from refiners.fluxion.utils import load_from_safetensors
import refiners.fluxion.layers as fl
from diffusers import StableDiffusionPipeline # type: ignore
import transformers # type: ignore
PROMPTS = [
"a cute cat", # a simple prompt

View file

@ -1,13 +1,12 @@
import torch
import pytest
from warnings import warn
from pathlib import Path
from warnings import warn
import pytest
import torch
from transformers import CLIPVisionModelWithProjection # type: ignore
from refiners.foundationals.clip.image_encoder import CLIPImageEncoderH
from refiners.fluxion.utils import load_from_safetensors
from refiners.foundationals.clip.image_encoder import CLIPImageEncoderH
@pytest.fixture(scope="module")

View file

@ -1,15 +1,13 @@
import torch
import pytest
from warnings import warn
from pathlib import Path
from warnings import warn
from refiners.foundationals.clip.text_encoder import CLIPTextEncoderL
from refiners.fluxion.utils import load_from_safetensors
import pytest
import torch
import transformers # type: ignore
from refiners.foundationals.clip.tokenizer import CLIPTokenizer
from refiners.fluxion.utils import load_from_safetensors
from refiners.foundationals.clip.text_encoder import CLIPTextEncoderL
from refiners.foundationals.clip.tokenizer import CLIPTokenizer
long_prompt = """
Above these apparent hieroglyphics was a figure of evidently pictorial intent,

View file

@ -1,15 +1,14 @@
import torch
import pytest
from warnings import warn
from PIL import Image
from pathlib import Path
from warnings import warn
import pytest
import torch
from PIL import Image
from tests.utils import ensure_similar_images
from refiners.fluxion.utils import load_from_safetensors
from refiners.foundationals.latent_diffusion.auto_encoder import LatentDiffusionAutoencoder
from tests.utils import ensure_similar_images
@pytest.fixture(scope="module")
def ref_path() -> Path:

View file

@ -1,11 +1,11 @@
from typing import Iterator
import torch
import pytest
import torch
import refiners.fluxion.layers as fl
from refiners.fluxion.adapters.adapter import lookup_top_adapter
from refiners.foundationals.latent_diffusion import SD1UNet, SD1ControlnetAdapter
from refiners.foundationals.latent_diffusion import SD1ControlnetAdapter, SD1UNet
from refiners.foundationals.latent_diffusion.stable_diffusion_1.controlnet import Controlnet

Some files were not shown because too many files have changed in this diff Show more