From 2ee094c18ce39764681f2baf9f182f6b6bf5e05a Mon Sep 17 00:00:00 2001 From: Benjamin Trom <82658997+limiteinductive@users.noreply.github.com> Date: Tue, 15 Aug 2023 18:06:42 +0200 Subject: [PATCH] Update scripts/convert-lora-weights.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Cédric Deltheil <355031+deltheil@users.noreply.github.com> --- scripts/convert-lora-weights.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/convert-lora-weights.py b/scripts/convert-lora-weights.py index 8d45cb0..cdd15b5 100644 --- a/scripts/convert-lora-weights.py +++ b/scripts/convert-lora-weights.py @@ -57,7 +57,7 @@ def process(source: str, base_model: str, output_file: str) -> None: diffusers_to_refiners = create_state_dict_mapping( source_model=refiners_model, target_model=diffusers_model, source_args=refiners_args, target_args=diffusers_args ) - assert diffusers_to_refiners is not None + assert diffusers_to_refiners is not None, "Model conversion failed" apply_loras_to_target(module=refiners_model, target=LoraTarget(target), rank=rank, scale=1.0) for layer in refiners_model.layers(layer_type=Lora):