Skip to content

Commit

Permalink
clean up comments
Browse files Browse the repository at this point in the history
Signed-off-by: NickLucche <[email protected]>
  • Loading branch information
NickLucche committed Feb 17, 2025
1 parent be71771 commit 9205c72
Showing 1 changed file with 0 additions and 24 deletions.
24 changes: 0 additions & 24 deletions vllm/model_executor/models/bart.py
Original file line number Diff line number Diff line change
Expand Up @@ -300,21 +300,6 @@ def __init__(
f" and `num_heads`: {num_heads}).")
self.scaling = self.head_dim**-0.5

# self.q_proj = ColumnParallelLinear(
# input_size=self.embed_dim,
# output_size=self.embed_dim,
# bias=bias,
# quant_config=quant_config,
# )

# self.kv_proj = QKVParallelLinear(
# hidden_size=self.d_model,
# head_size=self.d_model // self.total_num_heads,
# total_num_heads=0,
# total_num_kv_heads=self.total_num_kv_heads,
# bias=bias,
# quant_config=quant_config,
# )
self.qkv_proj = QKVCrossParallelLinear(self.d_model,
self.d_model //
self.total_num_heads,
Expand Down Expand Up @@ -364,15 +349,6 @@ def forward(
) -> torch.Tensor:
"""Input shape: Batch x Time x Channel"""

# q, _ = self.q_proj(decoder_hidden_states)

# if encoder_hidden_states is None:
# k = None
# v = None
# else:
# # Prefill, cache encoder KV.
# kv_enc, _ = self.kv_proj(encoder_hidden_states)
# k, v = kv_enc.split(self.kv_size, dim=-1)
q, k, v = self.qkv_proj(decoder_hidden_states, encoder_hidden_states)

attn_output = self.attn(q, k, v, kv_cache, attn_metadata)
Expand Down

0 comments on commit 9205c72

Please sign in to comment.