diff --git a/lmms_eval/tasks/olympiadbench/utils.py b/lmms_eval/tasks/olympiadbench/cn_utils.py similarity index 50% rename from lmms_eval/tasks/olympiadbench/utils.py rename to lmms_eval/tasks/olympiadbench/cn_utils.py index e289f55e3..34e5ce4d4 100644 --- a/lmms_eval/tasks/olympiadbench/utils.py +++ b/lmms_eval/tasks/olympiadbench/cn_utils.py @@ -16,7 +16,6 @@ def olympiadbench_doc_to_visual(doc): def olympiadbench_doc_to_text(doc): question = doc["question"] subject = doc["subfield"] - language = "en" if "English" in doc["classification"] else "zh" mul_ans = doc["is_multiple_answer"] if mul_ans is None: mul_ans = False @@ -24,50 +23,34 @@ def olympiadbench_doc_to_text(doc): if ans_type == "Need_human_evaluate": ans_type = "proof based" - pre_prompt = "" - if language == "en": - pre_prompt += f"The following is a question from an International {subject} competition.\n" - else: - pre_prompt += f"以下是中国{subject}竞赛中的解答题。\n" + pre_prompt = f"以下是中国{subject}竞赛中的解答题。\n" post_prompt = "" - if language == "en": - if not mul_ans: - post_prompt += f"The answer of the question should be {ans_type}.\n" - else: - post_prompt += f"The question has multiple answers, each of them should be {ans_type}.\n" - post_prompt += "Please calculate the answer according to the given requirements and the information provided. Please use LaTeX format to represent the variables and formulas used in the solution process and results. Please end your solution with " - if not mul_ans: - post_prompt += '"So the final answer is \\boxed{answer}."\n' - else: - post_prompt += 'So the final answer is \\boxed{multiple answers connected with commas}.\n' + if not mul_ans: + post_prompt += f"答案类型为{ans_type}。\n" + else: + post_prompt += f"题目有多个答案,答案类型均为{ans_type}。\n" + post_prompt += "请根据题目的要求和所提供的信息计算得出答案。解答过程和结果中使用的变量和公式请使用LaTeX格式表示。请在最后以" + if not mul_ans: + post_prompt += '"所以最终答案是\\boxed{答案}。"\n' else: - if not mul_ans: - post_prompt += f"答案类型为{ans_type}。\n" - else: - post_prompt += f"题目有多个答案,答案类型均为{ans_type}。\n" - post_prompt += "请根据题目的要求和所提供的信息计算得出答案。解答过程和结果中使用的变量和公式请使用LaTeX格式表示。请在最后以" - if not mul_ans: - post_prompt += '"所以最终答案是\\boxed{答案}。"\n' - else: - post_prompt += '"所以最终答案是\\boxed{用英⽂逗号连接的多个答案}。"\n' + post_prompt += '"所以最终答案是\\boxed{用英⽂逗号连接的多个答案}。"\n' final_question = pre_prompt + question + '\n' + post_prompt return final_question def olympiadbench_process_results(doc, results): precision = doc["error"] - answer_type = doc["answer_type"] + is_proving = "TP" in doc["source"] if precision is None: precision = 0 prediction = results[0].strip() - if answer_type == "Need_human_evaluate": + if is_proving: return { "submission": prediction } else: - prediction = prediction.split("final answer is")[-1] prediction = prediction.split("所以最终答案是")[-1] prediction = prediction.replace('"', "").replace("\n", "").replace(" ", "").strip(".").strip("。") accuracy = olympiadbench_evaluator.judge(prediction, doc["final_answer"][0], precision) @@ -78,7 +61,7 @@ def olympiadbench_process_results(doc, results): def olympiadbench_aggregate_results(results, args): now_date_time = datetime.datetime.now().strftime("%Y-%m%d-%H%M-%S") - submission_file_name = f"olympiadbench-test-submission-{now_date_time}.json" + submission_file_name = f"olympiadbench-test-cn-submission-{now_date_time}.json" path = generate_submission_file(submission_file_name, args) with open(path, "w") as f: json.dump(results, f, ensure_ascii=False) diff --git a/lmms_eval/tasks/olympiadbench/en_utils.py b/lmms_eval/tasks/olympiadbench/en_utils.py new file mode 100644 index 000000000..a21ee159f --- /dev/null +++ b/lmms_eval/tasks/olympiadbench/en_utils.py @@ -0,0 +1,69 @@ +import os +import json +import datetime +from lmms_eval.tasks.olympiadbench.olympiadbench_evals import OlympiadBenchEvaluator +from lmms_eval.tasks._task_utils.file_utils import generate_submission_file + +import logging +eval_logger = logging.getLogger("lmms-eval") +dir_name = os.path.dirname(os.path.abspath(__file__)) + +olympiadbench_evaluator = OlympiadBenchEvaluator() + +def olympiadbench_doc_to_visual(doc): + return [image.convert("RGB") for image in doc["images"]] + +def olympiadbench_doc_to_text(doc): + question = doc["question"] + subject = doc["subfield"] + mul_ans = doc["is_multiple_answer"] + if mul_ans is None: + mul_ans = False + ans_type = doc["answer_type"] + if ans_type == "Need_human_evaluate": + ans_type = "proof based" + + pre_prompt = f"The following is a question from an International {subject} competition.\n" + + post_prompt = "" + if not mul_ans: + post_prompt += f"The answer of the question should be {ans_type}.\n" + else: + post_prompt += f"The question has multiple answers, each of them should be {ans_type}.\n" + post_prompt += "Please calculate the answer according to the given requirements and the information provided. Please use LaTeX format to represent the variables and formulas used in the solution process and results. Please end your solution with " + if not mul_ans: + post_prompt += '"So the final answer is \\boxed{answer}."\n' + else: + post_prompt += 'So the final answer is \\boxed{multiple answers connected with commas}.\n' + + final_question = pre_prompt + question + '\n' + post_prompt + return final_question + +def olympiadbench_process_results(doc, results): + precision = doc["error"] + is_proving = "TP" in doc["source"] + if precision is None: + precision = 0 + prediction = results[0].strip() + + if is_proving: + return { + "submission": prediction + } + else: + prediction = prediction.split("final answer is")[-1] + prediction = prediction.replace('"', "").replace("\n", "").replace(" ", "").strip(".").strip("。") + accuracy = olympiadbench_evaluator.judge(prediction, doc["final_answer"][0], precision) + accuracy = int(accuracy) + return { + "exact_match": accuracy + } + +def olympiadbench_aggregate_results(results, args): + now_date_time = datetime.datetime.now().strftime("%Y-%m%d-%H%M-%S") + submission_file_name = f"olympiadbench-test-en-submission-{now_date_time}.json" + path = generate_submission_file(submission_file_name, args) + with open(path, "w") as f: + json.dump(results, f, ensure_ascii=False) + print(f"Submission file saved to {path}") + \ No newline at end of file diff --git a/lmms_eval/tasks/olympiadbench/olympiadbench.yaml b/lmms_eval/tasks/olympiadbench/olympiadbench.yaml index 992686303..1580b158b 100644 --- a/lmms_eval/tasks/olympiadbench/olympiadbench.yaml +++ b/lmms_eval/tasks/olympiadbench/olympiadbench.yaml @@ -1,9 +1,6 @@ group: olympiadbench task: -# - olympiadbench_test_math_en_comp -# - olympiadbench_test_math_zh_comp -- olympiadbench_test_math_zh_cee -# - olympiadbench_test_physics_en_comp -# - olympiadbench_test_physics_zh_cee +- olympiadbench_test_en +- olympiadbench_test_cn metadata: - version: 0.0 diff --git a/lmms_eval/tasks/olympiadbench/olympiadbench_test_math_zh_cee.yaml b/lmms_eval/tasks/olympiadbench/olympiadbench_test_cn.yaml similarity index 55% rename from lmms_eval/tasks/olympiadbench/olympiadbench_test_math_zh_cee.yaml rename to lmms_eval/tasks/olympiadbench/olympiadbench_test_cn.yaml index b0f0e8849..574d0c194 100644 --- a/lmms_eval/tasks/olympiadbench/olympiadbench_test_math_zh_cee.yaml +++ b/lmms_eval/tasks/olympiadbench/olympiadbench_test_cn.yaml @@ -1,11 +1,11 @@ dataset_path: lmms-lab/OlympiadBench dataset_kwargs: token: True -task : "olympiadbench_test_math_zh_cee" -test_split: test_math_zh_cee +task : "olympiadbench_test_cn" +test_split: test_cn output_type: generate_until -doc_to_visual: !function utils.olympiadbench_doc_to_visual -doc_to_text: !function utils.olympiadbench_doc_to_text +doc_to_visual: !function cn_utils.olympiadbench_doc_to_visual +doc_to_text: !function cn_utils.olympiadbench_doc_to_text doc_to_target: "answer" generation_kwargs: until: @@ -15,10 +15,10 @@ generation_kwargs: top_p: 0 num_beams: 1 do_sample: false -process_results: !function utils.olympiadbench_process_results +process_results: !function cn_utils.olympiadbench_process_results metric_list: - metric: submission - aggregation: !function utils.olympiadbench_aggregate_results + aggregation: !function cn_utils.olympiadbench_aggregate_results higher_is_better: true - metric: exact_match aggregation: mean diff --git a/lmms_eval/tasks/olympiadbench/olympiadbench_test_math_en_comp.yaml b/lmms_eval/tasks/olympiadbench/olympiadbench_test_en.yaml similarity index 55% rename from lmms_eval/tasks/olympiadbench/olympiadbench_test_math_en_comp.yaml rename to lmms_eval/tasks/olympiadbench/olympiadbench_test_en.yaml index a3938e4e6..6d293fb7a 100644 --- a/lmms_eval/tasks/olympiadbench/olympiadbench_test_math_en_comp.yaml +++ b/lmms_eval/tasks/olympiadbench/olympiadbench_test_en.yaml @@ -1,11 +1,11 @@ dataset_path: lmms-lab/OlympiadBench dataset_kwargs: token: True -task : "olympiadbench_test_math_en_comp" -test_split: test_math_en_comp +task : "olympiadbench_test_en" +test_split: test_en output_type: generate_until -doc_to_visual: !function utils.olympiadbench_doc_to_visual -doc_to_text: !function utils.olympiadbench_doc_to_text +doc_to_visual: !function en_utils.olympiadbench_doc_to_visual +doc_to_text: !function en_utils.olympiadbench_doc_to_text doc_to_target: "answer" generation_kwargs: until: @@ -15,10 +15,10 @@ generation_kwargs: top_p: 0 num_beams: 1 do_sample: false -process_results: !function utils.olympiadbench_process_results +process_results: !function en_utils.olympiadbench_process_results metric_list: - metric: submission - aggregation: !function utils.olympiadbench_aggregate_results + aggregation: !function en_utils.olympiadbench_aggregate_results higher_is_better: true - metric: exact_match aggregation: mean diff --git a/lmms_eval/tasks/olympiadbench/olympiadbench_test_math_zh_comp.yaml b/lmms_eval/tasks/olympiadbench/olympiadbench_test_math_zh_comp.yaml deleted file mode 100644 index d92ab2a15..000000000 --- a/lmms_eval/tasks/olympiadbench/olympiadbench_test_math_zh_comp.yaml +++ /dev/null @@ -1,25 +0,0 @@ -dataset_path: lmms-lab/OlympiadBench -dataset_kwargs: - token: True -task : "olympiadbench_test_math_zh_comp" -test_split: test_math_zh_comp -output_type: generate_until -doc_to_visual: !function utils.olympiadbench_doc_to_visual -doc_to_text: !function utils.olympiadbench_doc_to_text -doc_to_target: "answer" -generation_kwargs: - until: - - "ASSISTANT:" - max_new_tokens: 1024 - temperature: 0 - top_p: 0 - num_beams: 1 - do_sample: false -process_results: !function utils.olympiadbench_process_results -metric_list: - - metric: submission - aggregation: !function utils.olympiadbench_aggregate_results - higher_is_better: true - - metric: exact_match - aggregation: mean - higher_is_better: true \ No newline at end of file diff --git a/lmms_eval/tasks/olympiadbench/olympiadbench_test_physics_en_comp.yaml b/lmms_eval/tasks/olympiadbench/olympiadbench_test_physics_en_comp.yaml deleted file mode 100644 index 1aed85fdc..000000000 --- a/lmms_eval/tasks/olympiadbench/olympiadbench_test_physics_en_comp.yaml +++ /dev/null @@ -1,25 +0,0 @@ -dataset_path: lmms-lab/OlympiadBench -dataset_kwargs: - token: True -task : "olympiadbench_test_physics_en_comp" -test_split: test_physics_en_comp -output_type: generate_until -doc_to_visual: !function utils.olympiadbench_doc_to_visual -doc_to_text: !function utils.olympiadbench_doc_to_text -doc_to_target: "answer" -generation_kwargs: - until: - - "ASSISTANT:" - max_new_tokens: 1024 - temperature: 0 - top_p: 0 - num_beams: 1 - do_sample: false -process_results: !function utils.olympiadbench_process_results -metric_list: - - metric: submission - aggregation: !function utils.olympiadbench_aggregate_results - higher_is_better: true - - metric: exact_match - aggregation: mean - higher_is_better: true \ No newline at end of file diff --git a/lmms_eval/tasks/olympiadbench/olympiadbench_test_physics_zh_cee.yaml b/lmms_eval/tasks/olympiadbench/olympiadbench_test_physics_zh_cee.yaml deleted file mode 100644 index c04bcabaa..000000000 --- a/lmms_eval/tasks/olympiadbench/olympiadbench_test_physics_zh_cee.yaml +++ /dev/null @@ -1,25 +0,0 @@ -dataset_path: lmms-lab/OlympiadBench -dataset_kwargs: - token: True -task : "olympiadbench_test_physics_zh_cee" -test_split: test_physics_zh_cee -output_type: generate_until -doc_to_visual: !function utils.olympiadbench_doc_to_visual -doc_to_text: !function utils.olympiadbench_doc_to_text -doc_to_target: "answer" -generation_kwargs: - until: - - "ASSISTANT:" - max_new_tokens: 1024 - temperature: 0 - top_p: 0 - num_beams: 1 - do_sample: false -process_results: !function utils.olympiadbench_process_results -metric_list: - - metric: submission - aggregation: !function utils.olympiadbench_aggregate_results - higher_is_better: true - - metric: exact_match - aggregation: mean - higher_is_better: true \ No newline at end of file