Skip to content

Commit f8fc141

Browse files
authored
fix llama_vision chat_template and decode (#498)
1 parent 50ed3ce commit f8fc141

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

lmms_eval/models/llama_vision.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,7 @@ def generate_until(self, requests: List[Instance]) -> List[str]:
187187
messages[-1]["content"].append({"type": "image"})
188188
messages[-1]["content"].append({"type": "text", "text": contexts})
189189
prompt = self.processor.apply_chat_template(messages, add_generation_prompt=True)
190-
inputs = self.processor(images, prompt, return_tensors="pt").to(self.model.device)
190+
inputs = self.processor(images, prompt, add_special_tokens=False, return_tensors="pt").to(self.model.device)
191191

192192
if "max_new_tokens" not in gen_kwargs:
193193
gen_kwargs["max_new_tokens"] = 1024
@@ -208,7 +208,7 @@ def generate_until(self, requests: List[Instance]) -> List[str]:
208208
do_sample=gen_kwargs["do_sample"],
209209
)
210210
output = output[:, inputs["input_ids"].shape[-1] :]
211-
res.append(self.processor.decode(output[0]))
211+
res.append(self.processor.decode(output[0], skip_special_tokens=True))
212212

213213
pbar.update(1)
214214
pbar.close()

0 commit comments

Comments
 (0)