From 01e81a317c5b5b1a5324061c2235351a0477d76d Mon Sep 17 00:00:00 2001 From: fofr Date: Fri, 27 Sep 2024 11:00:35 +0000 Subject: [PATCH] Fix linting errors in lora_loading_patch.py --- lora_loading_patch.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/lora_loading_patch.py b/lora_loading_patch.py index 97a4796..394fdf5 100644 --- a/lora_loading_patch.py +++ b/lora_loading_patch.py @@ -49,7 +49,7 @@ def load_lora_into_transformer(cls, state_dict, network_alphas, transformer, ada if network_alphas is not None and len(network_alphas) >= 1: prefix = cls.transformer_name - alpha_keys = [k for k in network_alphas.keys() if k.startswith(prefix) and k.split(".")[0] == prefix] + alpha_keys = [k for k in network_alphas if k.startswith(prefix) and k.split(".")[0] == prefix] network_alphas = {k.replace(f"{prefix}.", ""): v for k, v in network_alphas.items() if k in alpha_keys} lora_config_kwargs = get_peft_kwargs(rank, network_alpha_dict=network_alphas, peft_state_dict=state_dict) @@ -58,8 +58,7 @@ def load_lora_into_transformer(cls, state_dict, network_alphas, transformer, ada raise ValueError( "You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`." ) - else: - lora_config_kwargs.pop("use_dora") + lora_config_kwargs.pop("use_dora") lora_config = LoraConfig(**lora_config_kwargs) # adapter_name @@ -87,4 +86,4 @@ def load_lora_into_transformer(cls, state_dict, network_alphas, transformer, ada _pipeline.enable_model_cpu_offload() elif is_sequential_cpu_offload: _pipeline.enable_sequential_cpu_offload() - # Unsafe code /> \ No newline at end of file + # Unsafe code />