From 547a73e67aad63613e26e569f2ec00a79162f33e Mon Sep 17 00:00:00 2001 From: Pierre Chapuis Date: Tue, 5 Sep 2023 14:50:27 +0200 Subject: [PATCH] clarify the "adapting when a LoRA is injected" issue in tests --- tests/adapters/test_lora.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tests/adapters/test_lora.py b/tests/adapters/test_lora.py index 1784bd9..8d25736 100644 --- a/tests/adapters/test_lora.py +++ b/tests/adapters/test_lora.py @@ -45,6 +45,15 @@ def test_lora_adapter() -> None: a2 = LoraAdapter[fl.Chain](chain, sub_targets=chain.walk(fl.Linear), rank=1, scale=1.0).inject() assert len(list(chain.layers(Lora))) == 6 + # If we init a LoRA when another LoRA is already injected, the Linear + # layers of the first LoRA will be adapted too, which is typically not + # what we want. + # This issue can be avoided either by making the predicate for + # `walk` raise StopIteration when it encounters a LoRA (see the SD LoRA) + # or by creating all the LoRA Adapters first, before injecting them + # (see below). + assert len(list(chain.layers(Lora, recurse=True))) == 12 + # ejection in forward order a1.eject() @@ -60,6 +69,10 @@ def test_lora_adapter() -> None: a2.inject() assert len(list(chain.layers(Lora))) == 6 + # If we inject after init we do not have the target selection problem, + # the LoRA layers are not adapted. + assert len(list(chain.layers(Lora, recurse=True))) == 6 + # ejection in reverse order a2.eject()