diff --git a/README.md b/README.md index 7b0ba89..c14db4a 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ Finegrain Refiners Library -**The simplest way to train and run adapters on top of foundational models** ([dive in!](https://blog.finegrain.ai/posts/simplifying-ai-code/)) +**The simplest way to train and run adapters on top of foundation models** ([dive in!](https://blog.finegrain.ai/posts/simplifying-ai-code/)) ______________________________________________________________________ @@ -27,8 +27,8 @@ ______________________________________________________________________ - Added [T2I-Adapter](https://github.com/TencentARC/T2I-Adapter) for extra guidance ([example](https://github.com/TencentARC/T2I-Adapter/discussions/93)) - Added [MultiDiffusion](https://github.com/omerbt/MultiDiffusion) for e.g. panorama images - Added [IP-Adapter](https://github.com/tencent-ailab/IP-Adapter), aka image prompt ([example](https://github.com/tencent-ailab/IP-Adapter/issues/92)) -- Added [Segment Anything](https://github.com/facebookresearch/segment-anything) to foundational models -- Added [SDXL 1.0](https://github.com/Stability-AI/generative-models) to foundational models +- Added [Segment Anything](https://github.com/facebookresearch/segment-anything) to foundation models +- Added [SDXL 1.0](https://github.com/Stability-AI/generative-models) to foundation models - Made possible to add new concepts to the CLIP text encoder, e.g. via [Textual Inversion](https://arxiv.org/abs/2208.01618) ## Getting Started @@ -175,7 +175,7 @@ At [Finegrain](https://finegrain.ai), we're on a mission to automate product pho That's why we're building Refiners. -It's a framework to easily bridge the last mile quality gap of foundational models like Stable Diffusion or Segment Anything Model (SAM), by adapting them to specific tasks with lightweight trainable and composable patches. +It's a framework to easily bridge the last mile quality gap of foundation models like Stable Diffusion or Segment Anything Model (SAM), by adapting them to specific tasks with lightweight trainable and composable patches. We decided to build Refiners in the open. @@ -349,7 +349,7 @@ We took inspiration from these great projects: ```bibtex @misc{the-finegrain-team-2023-refiners, author = {Benjamin Trom and Pierre Chapuis and Cédric Deltheil}, - title = {Refiners: The simplest way to train and run adapters on top of foundational models}, + title = {Refiners: The simplest way to train and run adapters on top of foundation models}, year = {2023}, publisher = {GitHub}, journal = {GitHub repository}, diff --git a/docs/reference/SUMMARY.md b/docs/reference/SUMMARY.md index ce56b88..8338a24 100644 --- a/docs/reference/SUMMARY.md +++ b/docs/reference/SUMMARY.md @@ -4,7 +4,7 @@ * [ Layers](fluxion/layers.md) * [ Model Converter](fluxion/model_converter.md) * [ Utils](fluxion/utils.md) -* Foundational Models +* Foundation Models * [ CLIP](foundationals/clip.md) * [ DINOv2](foundationals/dinov2.md) * [ Latent Diffusion](foundationals/latent_diffusion.md) diff --git a/pyproject.toml b/pyproject.toml index 49e7a18..659fdca 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [project] name = "refiners" version = "0.2.0" -description = "The simplest way to train and run adapters on top of foundational models" +description = "The simplest way to train and run adapters on top of foundation models" authors = [{ name = "The Finegrain Team", email = "bonjour@lagon.tech" }] license = "MIT" dependencies = [