Skip to content

Commit

Permalink
Fix linting errors in lora_loading_patch.py
Browse files Browse the repository at this point in the history
  • Loading branch information
fofr committed Sep 27, 2024
1 parent e2bb939 commit 01e81a3
Showing 1 changed file with 3 additions and 4 deletions.
7 changes: 3 additions & 4 deletions lora_loading_patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def load_lora_into_transformer(cls, state_dict, network_alphas, transformer, ada

if network_alphas is not None and len(network_alphas) >= 1:
prefix = cls.transformer_name
alpha_keys = [k for k in network_alphas.keys() if k.startswith(prefix) and k.split(".")[0] == prefix]
alpha_keys = [k for k in network_alphas if k.startswith(prefix) and k.split(".")[0] == prefix]
network_alphas = {k.replace(f"{prefix}.", ""): v for k, v in network_alphas.items() if k in alpha_keys}

lora_config_kwargs = get_peft_kwargs(rank, network_alpha_dict=network_alphas, peft_state_dict=state_dict)
Expand All @@ -58,8 +58,7 @@ def load_lora_into_transformer(cls, state_dict, network_alphas, transformer, ada
raise ValueError(
"You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`."
)
else:
lora_config_kwargs.pop("use_dora")
lora_config_kwargs.pop("use_dora")
lora_config = LoraConfig(**lora_config_kwargs)

# adapter_name
Expand Down Expand Up @@ -87,4 +86,4 @@ def load_lora_into_transformer(cls, state_dict, network_alphas, transformer, ada
_pipeline.enable_model_cpu_offload()
elif is_sequential_cpu_offload:
_pipeline.enable_sequential_cpu_offload()
# Unsafe code />
# Unsafe code />

0 comments on commit 01e81a3

Please sign in to comment.