From a62b8a8576e74942868dcb3d2dda599b09dd95b3 Mon Sep 17 00:00:00 2001 From: Wendong-Fan <133094783+Wendong-Fan@users.noreply.github.com> Date: Sun, 19 Jan 2025 20:30:59 +0800 Subject: [PATCH] fix: Gemini tool calling support (#1469) --- camel/messages/func_message.py | 4 ++-- camel/models/gemini_model.py | 11 ++++++++++- test/messages/test_func_message.py | 4 ++-- 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/camel/messages/func_message.py b/camel/messages/func_message.py index 2e10f25d41..3c1d2575c7 100644 --- a/camel/messages/func_message.py +++ b/camel/messages/func_message.py @@ -129,7 +129,7 @@ def to_openai_assistant_message(self) -> OpenAIAssistantMessage: "content": self.content or "", "tool_calls": [ { - "id": self.tool_call_id or "", + "id": self.tool_call_id or "null", "type": "function", "function": { "name": self.func_name, @@ -159,5 +159,5 @@ def to_openai_tool_message(self) -> OpenAIToolMessageParam: return { "role": "tool", "content": result_content, - "tool_call_id": self.tool_call_id or "", + "tool_call_id": self.tool_call_id or "null", } diff --git a/camel/models/gemini_model.py b/camel/models/gemini_model.py index 1e5b6b670a..2da873998b 100644 --- a/camel/models/gemini_model.py +++ b/camel/models/gemini_model.py @@ -97,8 +97,17 @@ def run( `ChatCompletion` in the non-stream mode, or `Stream[ChatCompletionChunk]` in the stream mode. """ + # Process messages to ensure no empty content, it's not accepeted by + # Gemini + processed_messages = [] + for msg in messages: + msg_copy = msg.copy() + if 'content' in msg_copy and msg_copy['content'] == '': + msg_copy['content'] = 'null' + processed_messages.append(msg_copy) + response = self._client.chat.completions.create( - messages=messages, + messages=processed_messages, model=self.model_type, **self.model_config_dict, ) diff --git a/test/messages/test_func_message.py b/test/messages/test_func_message.py index fb6b17b0b3..c6e92bfaff 100644 --- a/test/messages/test_func_message.py +++ b/test/messages/test_func_message.py @@ -92,7 +92,7 @@ def test_function_func_message( msg_dict: Dict[str, str] = { "role": "tool", "content": json.dumps(3), - "tool_call_id": "", + "tool_call_id": "null", } assert function_result_message.to_openai_tool_message() == msg_dict @@ -103,7 +103,7 @@ def test_assistant_func_message_to_openai_tool_message( expected_msg_dict: Dict[str, str] = { "role": "tool", "content": json.dumps(None), - "tool_call_id": "", + "tool_call_id": "null", } assert (