mirror of
https://github.com/finegrain-ai/refiners.git
synced 2024-11-24 15:18:46 +00:00
clarify the "adapting when a LoRA is injected" issue in tests
This commit is contained in:
parent
864937a776
commit
547a73e67a
|
@ -45,6 +45,15 @@ def test_lora_adapter() -> None:
|
|||
a2 = LoraAdapter[fl.Chain](chain, sub_targets=chain.walk(fl.Linear), rank=1, scale=1.0).inject()
|
||||
assert len(list(chain.layers(Lora))) == 6
|
||||
|
||||
# If we init a LoRA when another LoRA is already injected, the Linear
|
||||
# layers of the first LoRA will be adapted too, which is typically not
|
||||
# what we want.
|
||||
# This issue can be avoided either by making the predicate for
|
||||
# `walk` raise StopIteration when it encounters a LoRA (see the SD LoRA)
|
||||
# or by creating all the LoRA Adapters first, before injecting them
|
||||
# (see below).
|
||||
assert len(list(chain.layers(Lora, recurse=True))) == 12
|
||||
|
||||
# ejection in forward order
|
||||
|
||||
a1.eject()
|
||||
|
@ -60,6 +69,10 @@ def test_lora_adapter() -> None:
|
|||
a2.inject()
|
||||
assert len(list(chain.layers(Lora))) == 6
|
||||
|
||||
# If we inject after init we do not have the target selection problem,
|
||||
# the LoRA layers are not adapted.
|
||||
assert len(list(chain.layers(Lora, recurse=True))) == 6
|
||||
|
||||
# ejection in reverse order
|
||||
|
||||
a2.eject()
|
||||
|
|
Loading…
Reference in a new issue