diff --git a/src/refiners/fluxion/layers/__init__.py b/src/refiners/fluxion/layers/__init__.py index 1446244..bb1889f 100644 --- a/src/refiners/fluxion/layers/__init__.py +++ b/src/refiners/fluxion/layers/__init__.py @@ -2,7 +2,6 @@ from refiners.fluxion.layers.activations import GLU, ApproximateGeLU, GeLU, ReLU from refiners.fluxion.layers.attentions import Attention, SelfAttention, SelfAttention2d from refiners.fluxion.layers.basics import ( Buffer, - Chunk, Cos, Flatten, GetArg, @@ -15,7 +14,6 @@ from refiners.fluxion.layers.basics import ( Slicing, Squeeze, Transpose, - Unbind, Unflatten, Unsqueeze, View, @@ -75,9 +73,7 @@ __all__ = [ "Parameter", "Sin", "Cos", - "Chunk", "Multiply", - "Unbind", "Matmul", "Buffer", "Lambda", diff --git a/src/refiners/fluxion/layers/basics.py b/src/refiners/fluxion/layers/basics.py index aa76694..e6b6c94 100644 --- a/src/refiners/fluxion/layers/basics.py +++ b/src/refiners/fluxion/layers/basics.py @@ -130,25 +130,6 @@ class Unsqueeze(Module): return x.unsqueeze(self.dim) -class Unbind(Module): - def __init__(self, dim: int = 0) -> None: - self.dim = dim - super().__init__() - - def forward(self, x: Tensor) -> tuple[Tensor, ...]: - return x.unbind(dim=self.dim) # type: ignore - - -class Chunk(Module): - def __init__(self, chunks: int, dim: int = 0) -> None: - self.chunks = chunks - self.dim = dim - super().__init__() - - def forward(self, x: Tensor) -> tuple[Tensor, ...]: - return x.chunk(chunks=self.chunks, dim=self.dim) # type: ignore - - class Sin(Module): def forward(self, x: Tensor) -> Tensor: return torch.sin(input=x)