clarify the "adapting when a LoRA is injected" issue in tests

This commit is contained in:
Pierre Chapuis 2023-09-05 14:50:27 +02:00
parent 864937a776
commit 547a73e67a

View file

@ -45,6 +45,15 @@ def test_lora_adapter() -> None:
a2 = LoraAdapter[fl.Chain](chain, sub_targets=chain.walk(fl.Linear), rank=1, scale=1.0).inject() a2 = LoraAdapter[fl.Chain](chain, sub_targets=chain.walk(fl.Linear), rank=1, scale=1.0).inject()
assert len(list(chain.layers(Lora))) == 6 assert len(list(chain.layers(Lora))) == 6
# If we init a LoRA when another LoRA is already injected, the Linear
# layers of the first LoRA will be adapted too, which is typically not
# what we want.
# This issue can be avoided either by making the predicate for
# `walk` raise StopIteration when it encounters a LoRA (see the SD LoRA)
# or by creating all the LoRA Adapters first, before injecting them
# (see below).
assert len(list(chain.layers(Lora, recurse=True))) == 12
# ejection in forward order # ejection in forward order
a1.eject() a1.eject()
@ -60,6 +69,10 @@ def test_lora_adapter() -> None:
a2.inject() a2.inject()
assert len(list(chain.layers(Lora))) == 6 assert len(list(chain.layers(Lora))) == 6
# If we inject after init we do not have the target selection problem,
# the LoRA layers are not adapted.
assert len(list(chain.layers(Lora, recurse=True))) == 6
# ejection in reverse order # ejection in reverse order
a2.eject() a2.eject()