diff --git a/src/refiners/foundationals/segment_anything/model.py b/src/refiners/foundationals/segment_anything/model.py index ea4b44a..286d291 100644 --- a/src/refiners/foundationals/segment_anything/model.py +++ b/src/refiners/foundationals/segment_anything/model.py @@ -80,7 +80,7 @@ class SegmentAnything(fl.Chain): @no_grad() def compute_image_embedding(self, image: Image.Image) -> ImageEmbedding: - """Compute the emmbedding of an image. + """Compute the embedding of an image. Args: image: The image to compute the embedding of. diff --git a/src/refiners/foundationals/swin/swin_transformer.py b/src/refiners/foundationals/swin/swin_transformer.py index 488819e..b1aedc8 100644 --- a/src/refiners/foundationals/swin/swin_transformer.py +++ b/src/refiners/foundationals/swin/swin_transformer.py @@ -205,7 +205,7 @@ class WindowSDPA(fl.Module): class WindowAttention(fl.Chain): """ - Window-based Multi-head Self-Attenion (W-MSA), optionally shifted (SW-MSA). + Window-based Multi-head Self-Attention (W-MSA), optionally shifted (SW-MSA). It has a trainable relative position bias (RelativePositionBias).