Skip to content

Commit

Permalink
Revert "add deepseek v2 support (casper-hansen#508)"
Browse files Browse the repository at this point in the history
This reverts commit 6b45c95.
  • Loading branch information
TechxGenus authored Jun 27, 2024
1 parent c53cc7e commit fe521ca
Show file tree
Hide file tree
Showing 5 changed files with 1 addition and 140 deletions.
1 change: 0 additions & 1 deletion awq/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,5 +19,4 @@
from .starcoder2 import Starcoder2AWQForCausalLM
from .phi3 import Phi3AWQForCausalLM
from .cohere import CohereAWQForCausalLM
from .deepseek_v2 import DeepseekV2AWQForCausalLM
from .minicpm import MiniCPMAWQForCausalLM
1 change: 0 additions & 1 deletion awq/models/auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@
"starcoder2": Starcoder2AWQForCausalLM,
"phi3": Phi3AWQForCausalLM,
"cohere": CohereAWQForCausalLM,
"deepseek_v2": DeepseekV2AWQForCausalLM,
"minicpm": MiniCPMAWQForCausalLM,
}

Expand Down
5 changes: 1 addition & 4 deletions awq/models/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,8 +80,7 @@
"starcoder2": "AutoModelForCausalLM",
"phi3": "AutoModelForCausalLM",
"cohere": "AutoModelForCausalLM",
"deepseek_v2": "AutoModelForCausalLM",
"minicpm":"AutoModelForCausalLM",
"minicpm":"AutoModelForCausalLM"
}


Expand Down Expand Up @@ -507,8 +506,6 @@ def from_quantized(
max_batch_size=int(os.getenv("AWQ_BATCH_SIZE", 1)),
)

model.eval()

return self(
model,
model_type,
Expand Down
128 changes: 0 additions & 128 deletions awq/models/deepseek_v2.py

This file was deleted.

6 changes: 0 additions & 6 deletions awq/quantize/quantizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -522,12 +522,6 @@ def cache_input_hook(m, x, y, name, feat_dict):
"block_sparse_moe": layer.block_sparse_moe,
}

if self.awq_model.model_type == "deepseek_v2":
named_linears = {
**named_linears,
"mlp": layer.mlp,
}

for name in named_linears:
handles.append(
named_linears[name].register_forward_hook(
Expand Down

0 comments on commit fe521ca

Please sign in to comment.