From d8d04d06f912260707568735e442850b3ead6a39 Mon Sep 17 00:00:00 2001 From: Dillon Laird Date: Tue, 28 May 2024 17:31:39 -0700 Subject: [PATCH 01/11] renamed v2 to data interpreter --- vision_agent/agent/__init__.py | 2 +- .../agent/{vision_agent_v2.py => data_interpreter.py} | 8 ++++---- ...on_agent_v2_prompts.py => data_interpreter_prompts.py} | 0 3 files changed, 5 insertions(+), 5 deletions(-) rename vision_agent/agent/{vision_agent_v2.py => data_interpreter.py} (98%) rename vision_agent/agent/{vision_agent_v2_prompts.py => data_interpreter_prompts.py} (100%) diff --git a/vision_agent/agent/__init__.py b/vision_agent/agent/__init__.py index 2f62dbf1..8f62f770 100644 --- a/vision_agent/agent/__init__.py +++ b/vision_agent/agent/__init__.py @@ -1,7 +1,7 @@ from .agent import Agent from .agent_coder import AgentCoder +from .data_interpreter import DataInterpreter from .easytool import EasyTool from .reflexion import Reflexion from .vision_agent import VisionAgent -from .vision_agent_v2 import VisionAgentV2 from .vision_agent_v3 import VisionAgentV3 diff --git a/vision_agent/agent/vision_agent_v2.py b/vision_agent/agent/data_interpreter.py similarity index 98% rename from vision_agent/agent/vision_agent_v2.py rename to vision_agent/agent/data_interpreter.py index d7bf1372..139795d7 100644 --- a/vision_agent/agent/vision_agent_v2.py +++ b/vision_agent/agent/data_interpreter.py @@ -10,7 +10,7 @@ from tabulate import tabulate from vision_agent.agent import Agent -from vision_agent.agent.vision_agent_v2_prompts import ( +from vision_agent.agent.data_interpreter_prompts import ( CODE, CODE_SYS_MSG, DEBUG, @@ -331,9 +331,9 @@ def run_plan( return current_code, current_test, plan, working_memory -class VisionAgentV2(Agent): - """Vision Agent is an AI agentic framework geared towards outputting Python code to - solve vision tasks. It is inspired by MetaGPT's Data Interpreter +class DataInterpreter(Agent): + """Data Interpreter is an AI agentic framework geared towards outputting Python + code to solve vision tasks. It is inspired by MetaGPT's Data Interpreter https://arxiv.org/abs/2402.18679. Vision Agent has several key features to help it generate code: diff --git a/vision_agent/agent/vision_agent_v2_prompts.py b/vision_agent/agent/data_interpreter_prompts.py similarity index 100% rename from vision_agent/agent/vision_agent_v2_prompts.py rename to vision_agent/agent/data_interpreter_prompts.py From 07c9b913f8c63a4514909f4b2369b011996aefb9 Mon Sep 17 00:00:00 2001 From: Dillon Laird Date: Tue, 28 May 2024 18:29:59 -0700 Subject: [PATCH 02/11] renamed vision agent to easytool v2 --- vision_agent/agent/__init__.py | 2 +- vision_agent/agent/data_interpreter.py | 8 ++++---- ...gent_prompts.py => easy_tool_v2_prompts.py} | 0 .../agent/{vision_agent.py => easytool_v2.py} | 18 +++++++++--------- 4 files changed, 14 insertions(+), 14 deletions(-) rename vision_agent/agent/{vision_agent_prompts.py => easy_tool_v2_prompts.py} (100%) rename vision_agent/agent/{vision_agent.py => easytool_v2.py} (98%) diff --git a/vision_agent/agent/__init__.py b/vision_agent/agent/__init__.py index 8f62f770..17661091 100644 --- a/vision_agent/agent/__init__.py +++ b/vision_agent/agent/__init__.py @@ -2,6 +2,6 @@ from .agent_coder import AgentCoder from .data_interpreter import DataInterpreter from .easytool import EasyTool +from .easytool_v2 import EasyToolV2 from .reflexion import Reflexion -from .vision_agent import VisionAgent from .vision_agent_v3 import VisionAgentV3 diff --git a/vision_agent/agent/data_interpreter.py b/vision_agent/agent/data_interpreter.py index 139795d7..2c1711ab 100644 --- a/vision_agent/agent/data_interpreter.py +++ b/vision_agent/agent/data_interpreter.py @@ -332,10 +332,10 @@ def run_plan( class DataInterpreter(Agent): - """Data Interpreter is an AI agentic framework geared towards outputting Python - code to solve vision tasks. It is inspired by MetaGPT's Data Interpreter - https://arxiv.org/abs/2402.18679. Vision Agent has several key features to help it - generate code: + """This version of Data Interpreter is an AI agentic framework geared towards + outputting Python code to solve vision tasks. It is inspired by MetaGPT's Data + Interpreter https://arxiv.org/abs/2402.18679. This version of Data Interpreter has + several key features to help it generate code: - A planner to generate a plan of tasks to solve a user requirement. The planner can output code tasks or test tasks, where test tasks are used to verify the code. diff --git a/vision_agent/agent/vision_agent_prompts.py b/vision_agent/agent/easy_tool_v2_prompts.py similarity index 100% rename from vision_agent/agent/vision_agent_prompts.py rename to vision_agent/agent/easy_tool_v2_prompts.py diff --git a/vision_agent/agent/vision_agent.py b/vision_agent/agent/easytool_v2.py similarity index 98% rename from vision_agent/agent/vision_agent.py rename to vision_agent/agent/easytool_v2.py index 2db933d9..acdfa337 100644 --- a/vision_agent/agent/vision_agent.py +++ b/vision_agent/agent/easytool_v2.py @@ -17,7 +17,7 @@ TASK_DECOMPOSE, TASK_TOPOLOGY, ) -from vision_agent.agent.vision_agent_prompts import ( +from vision_agent.agent.easytool_v2_prompts import ( ANSWER_GENERATE_DEPENDS, ANSWER_SUMMARIZE_DEPENDS, CHOOSE_PARAMETER_DEPENDS, @@ -427,9 +427,9 @@ def visualize_result(all_tool_results: List[Dict]) -> Sequence[Union[str, Path]] return visualized_images -class VisionAgent(Agent): - r"""Vision Agent is an agent framework that utilizes tools as well as self - reflection to accomplish tasks, in particular vision tasks. Vision Agent is based +class EasyToolV2(Agent): + r"""EasyToolV2 is an agent framework that utilizes tools as well as self + reflection to accomplish tasks, in particular vision tasks. EasyToolV2 is based off of EasyTool https://arxiv.org/abs/2401.06201 and Reflexion https://arxiv.org/abs/2303.11366 where it will attempt to complete a task and then reflect on whether or not it was able to accomplish the task based off of the plan @@ -437,8 +437,8 @@ class VisionAgent(Agent): Example ------- - >>> from vision_agent.agent import VisionAgent - >>> agent = VisionAgent() + >>> from vision_agent.agent import EasyToolV2 + >>> agent = EasyToolV2() >>> resp = agent("If red tomatoes cost $5 each and yellow tomatoes cost $2.50 each, what is the total cost of all the tomatoes in the image?", image="tomatoes.jpg") >>> print(resp) "The total cost is $57.50." @@ -453,7 +453,7 @@ def __init__( verbose: bool = False, report_progress_callback: Optional[Callable[[Dict[str, Any]], None]] = None, ): - """VisionAgent constructor. + """EasyToolV2 constructor. Parameters: task_model: the model to use for task decomposition. @@ -461,7 +461,7 @@ def __init__( reflect_model: the model to use for self reflection. max_retries: maximum number of retries to attempt to complete the task. verbose: whether to print more logs. - report_progress_callback: a callback to report the progress of the agent. This is useful for streaming logs in a web application where multiple VisionAgent instances are running in parallel. This callback ensures that the progress are not mixed up. + report_progress_callback: a callback to report the progress of the agent. This is useful for streaming logs in a web application where multiple EasyToolV2 instances are running in parallel. This callback ensures that the progress are not mixed up. """ self.task_model = ( OpenAILLM(model_name="gpt-4-turbo", json_mode=True, temperature=0.0) @@ -658,7 +658,7 @@ def chat_with_workflow( # '' is a symbol to indicate the end of the chat, which is useful for streaming logs. self.log_progress( { - "log": f"The Vision Agent has concluded this chat. {final_answer}" + "log": f"EasyToolV2 has concluded this chat. {final_answer}" } ) From a4e69a95f6a0dc679655e23d12f69cedc9d42f85 Mon Sep 17 00:00:00 2001 From: Dillon Laird Date: Tue, 28 May 2024 18:31:41 -0700 Subject: [PATCH 03/11] renamed vision agent v3 to vision agent --- vision_agent/agent/__init__.py | 2 +- vision_agent/agent/{vision_agent_v3.py => vision_agent.py} | 5 ++--- .../{vision_agent_v3_prompts.py => vision_agent_prompts.py} | 0 3 files changed, 3 insertions(+), 4 deletions(-) rename vision_agent/agent/{vision_agent_v3.py => vision_agent.py} (99%) rename vision_agent/agent/{vision_agent_v3_prompts.py => vision_agent_prompts.py} (100%) diff --git a/vision_agent/agent/__init__.py b/vision_agent/agent/__init__.py index 17661091..3d989b34 100644 --- a/vision_agent/agent/__init__.py +++ b/vision_agent/agent/__init__.py @@ -4,4 +4,4 @@ from .easytool import EasyTool from .easytool_v2 import EasyToolV2 from .reflexion import Reflexion -from .vision_agent_v3 import VisionAgentV3 +from .vision_agent import VisionAgent diff --git a/vision_agent/agent/vision_agent_v3.py b/vision_agent/agent/vision_agent.py similarity index 99% rename from vision_agent/agent/vision_agent_v3.py rename to vision_agent/agent/vision_agent.py index d9fb8821..fdd5f7b8 100644 --- a/vision_agent/agent/vision_agent_v3.py +++ b/vision_agent/agent/vision_agent.py @@ -10,7 +10,7 @@ from tabulate import tabulate from vision_agent.agent import Agent -from vision_agent.agent.vision_agent_v3_prompts import ( +from vision_agent.agent.vision_agent_prompts import ( CODE, FEEDBACK, FIX_BUG, @@ -244,10 +244,9 @@ def retrieve_tools( return "\n\n".join(tool_info_set) -class VisionAgentV3(Agent): +class VisionAgent(Agent): def __init__( self, - timeout: int = 600, planner: Optional[LLM] = None, coder: Optional[LLM] = None, tester: Optional[LLM] = None, diff --git a/vision_agent/agent/vision_agent_v3_prompts.py b/vision_agent/agent/vision_agent_prompts.py similarity index 100% rename from vision_agent/agent/vision_agent_v3_prompts.py rename to vision_agent/agent/vision_agent_prompts.py From 7d1e80ec56e2e4aafb6284a889066e8220532344 Mon Sep 17 00:00:00 2001 From: Dillon Laird Date: Tue, 28 May 2024 18:59:14 -0700 Subject: [PATCH 04/11] renamed image to media --- tests/test_vision_agent.py | 2 +- vision_agent/agent/agent.py | 2 +- vision_agent/agent/agent_coder.py | 10 +++++----- vision_agent/agent/data_interpreter.py | 10 +++++----- vision_agent/agent/easytool.py | 14 +++++++------- vision_agent/agent/easytool_v2.py | 18 +++++++++--------- ...ol_v2_prompts.py => easytool_v2_prompts.py} | 0 vision_agent/agent/reflexion.py | 16 ++++++++-------- vision_agent/agent/vision_agent.py | 12 ++++++------ 9 files changed, 42 insertions(+), 42 deletions(-) rename vision_agent/agent/{easy_tool_v2_prompts.py => easytool_v2_prompts.py} (100%) diff --git a/tests/test_vision_agent.py b/tests/test_vision_agent.py index 98df1f3f..e05b4914 100644 --- a/tests/test_vision_agent.py +++ b/tests/test_vision_agent.py @@ -1,4 +1,4 @@ -from vision_agent.agent.vision_agent import sample_n_evenly_spaced +from vision_agent.agent.easytool_v2 import sample_n_evenly_spaced def test_sample_n_evenly_spaced_side_cases(): diff --git a/vision_agent/agent/agent.py b/vision_agent/agent/agent.py index 135319be..ec47ff86 100644 --- a/vision_agent/agent/agent.py +++ b/vision_agent/agent/agent.py @@ -8,7 +8,7 @@ class Agent(ABC): def __call__( self, input: Union[List[Dict[str, str]], str], - image: Optional[Union[str, Path]] = None, + media: Optional[Union[str, Path]] = None, ) -> str: pass diff --git a/vision_agent/agent/agent_coder.py b/vision_agent/agent/agent_coder.py index 0e4129ed..36c49bfa 100644 --- a/vision_agent/agent/agent_coder.py +++ b/vision_agent/agent/agent_coder.py @@ -150,20 +150,20 @@ def __init__( def __call__( self, input: Union[List[Dict[str, str]], str], - image: Optional[Union[str, Path]] = None, + media: Optional[Union[str, Path]] = None, ) -> str: if isinstance(input, str): input = [{"role": "user", "content": input}] - return self.chat(input, image) + return self.chat(input, media) def chat( self, input: List[Dict[str, str]], - image: Optional[Union[str, Path]] = None, + media: Optional[Union[str, Path]] = None, ) -> str: question = input[0]["content"] - if image: - question += f" Input file path: {os.path.abspath(image)}" + if media: + question += f" Input file path: {os.path.abspath(media)}" code = "" feedback = "" diff --git a/vision_agent/agent/data_interpreter.py b/vision_agent/agent/data_interpreter.py index 2c1711ab..d03e4baa 100644 --- a/vision_agent/agent/data_interpreter.py +++ b/vision_agent/agent/data_interpreter.py @@ -379,29 +379,29 @@ def __init__( def __call__( self, input: Union[List[Dict[str, str]], str], - image: Optional[Union[str, Path]] = None, + media: Optional[Union[str, Path]] = None, plan: Optional[List[Dict[str, Any]]] = None, ) -> str: if isinstance(input, str): input = [{"role": "user", "content": input}] - results = self.chat_with_workflow(input, image, plan) + results = self.chat_with_workflow(input, media, plan) return results["code"] # type: ignore @traceable def chat_with_workflow( self, chat: List[Dict[str, str]], - image: Optional[Union[str, Path]] = None, + media: Optional[Union[str, Path]] = None, plan: Optional[List[Dict[str, Any]]] = None, ) -> Dict[str, Any]: if len(chat) == 0: raise ValueError("Input cannot be empty.") - if image is not None: + if media is not None: # append file names to all user messages for chat_i in chat: if chat_i["role"] == "user": - chat_i["content"] += f" Image name {image}" + chat_i["content"] += f" Image name {media}" working_code = "" if plan is not None: diff --git a/vision_agent/agent/easytool.py b/vision_agent/agent/easytool.py index 72a6fd75..9357ac12 100644 --- a/vision_agent/agent/easytool.py +++ b/vision_agent/agent/easytool.py @@ -272,7 +272,7 @@ def __init__( def __call__( self, input: Union[List[Dict[str, str]], str], - image: Optional[Union[str, Path]] = None, + media: Optional[Union[str, Path]] = None, ) -> str: """Invoke the vision agent. @@ -285,14 +285,14 @@ def __call__( """ if isinstance(input, str): input = [{"role": "user", "content": input}] - return self.chat(input, image=image) + return self.chat(input, media=media) def chat_with_workflow( - self, chat: List[Dict[str, str]], image: Optional[Union[str, Path]] = None + self, chat: List[Dict[str, str]], media: Optional[Union[str, Path]] = None ) -> Tuple[str, List[Dict]]: question = chat[0]["content"] - if image: - question += f" Image name: {image}" + if media: + question += f" Image name: {media}" tasks = task_decompose( self.task_model, question, @@ -340,7 +340,7 @@ def chat_with_workflow( return answer_summarize(self.answer_model, question, answers), all_tool_results def chat( - self, chat: List[Dict[str, str]], image: Optional[Union[str, Path]] = None + self, chat: List[Dict[str, str]], media: Optional[Union[str, Path]] = None ) -> str: - answer, _ = self.chat_with_workflow(chat, image=image) + answer, _ = self.chat_with_workflow(chat, media=media) return answer diff --git a/vision_agent/agent/easytool_v2.py b/vision_agent/agent/easytool_v2.py index acdfa337..83565c0d 100644 --- a/vision_agent/agent/easytool_v2.py +++ b/vision_agent/agent/easytool_v2.py @@ -487,7 +487,7 @@ def __init__( def __call__( self, input: Union[List[Dict[str, str]], str], - image: Optional[Union[str, Path]] = None, + media: Optional[Union[str, Path]] = None, reference_data: Optional[Dict[str, str]] = None, visualize_output: Optional[bool] = False, self_reflection: Optional[bool] = True, @@ -512,7 +512,7 @@ def __call__( input = [{"role": "user", "content": input}] return self.chat( input, - image=image, + media=media, visualize_output=visualize_output, reference_data=reference_data, self_reflection=self_reflection, @@ -539,7 +539,7 @@ def _report_visualization_via_callback( def chat_with_workflow( self, chat: List[Dict[str, str]], - image: Optional[Union[str, Path]] = None, + media: Optional[Union[str, Path]] = None, reference_data: Optional[Dict[str, str]] = None, visualize_output: Optional[bool] = False, self_reflection: Optional[bool] = True, @@ -566,8 +566,8 @@ def chat_with_workflow( raise ValueError("Input cannot be empty.") question = chat[0]["content"] - if image: - question += f" Image name: {image}" + if media: + question += f" Image name: {media}" if reference_data: question += ( f" Reference image: {reference_data['image']}" @@ -630,8 +630,8 @@ def chat_with_workflow( all_tool_results.append({"visualized_output": visualized_output}) if len(visualized_output) > 0: reflection_images = sample_n_evenly_spaced(visualized_output, 3) - elif image is not None: - reflection_images = [image] + elif media is not None: + reflection_images = [media] else: reflection_images = None @@ -675,14 +675,14 @@ def chat_with_workflow( def chat( self, chat: List[Dict[str, str]], - image: Optional[Union[str, Path]] = None, + media: Optional[Union[str, Path]] = None, reference_data: Optional[Dict[str, str]] = None, visualize_output: Optional[bool] = False, self_reflection: Optional[bool] = True, ) -> str: answer, _ = self.chat_with_workflow( chat, - image=image, + media=media, visualize_output=visualize_output, reference_data=reference_data, self_reflection=self_reflection, diff --git a/vision_agent/agent/easy_tool_v2_prompts.py b/vision_agent/agent/easytool_v2_prompts.py similarity index 100% rename from vision_agent/agent/easy_tool_v2_prompts.py rename to vision_agent/agent/easytool_v2_prompts.py diff --git a/vision_agent/agent/reflexion.py b/vision_agent/agent/reflexion.py index 61dded6d..d3b479b2 100644 --- a/vision_agent/agent/reflexion.py +++ b/vision_agent/agent/reflexion.py @@ -138,7 +138,7 @@ def __init__( def __call__( self, input: Union[str, List[Dict[str, str]]], - image: Optional[Union[str, Path]] = None, + media: Optional[Union[str, Path]] = None, ) -> str: """Invoke the vision agent. @@ -151,24 +151,24 @@ def __call__( """ if isinstance(input, str): input = [{"role": "user", "content": input}] - return self.chat(input, image) + return self.chat(input, media) def chat( - self, chat: List[Dict[str, str]], image: Optional[Union[str, Path]] = None + self, chat: List[Dict[str, str]], media: Optional[Union[str, Path]] = None ) -> str: if len(chat) == 0 or chat[0]["role"] != "user": raise ValueError( f"Invalid chat. Should start with user and alternate between user" f"and assistant and contain at least one entry {chat}" ) - if image is not None and isinstance(self.action_agent, LLM): + if media is not None and isinstance(self.action_agent, LLM): raise ValueError( "If image is provided, then action_agent must be an agent or LMM." ) question = chat[0]["content"] if len(chat) == 1: - results = self._step(question, image=image) + results = self._step(question, image=media) self.last_scratchpad = results["scratchpad"] return results["action_arg"] @@ -183,10 +183,10 @@ def chat( self.last_scratchpad += "Answer is INCORRECT" chat_context = "The previous conversation was:\n" + chat_str reflections = self.reflect( - question, chat_context, self.last_scratchpad, image + question, chat_context, self.last_scratchpad, media ) _LOGGER.info(f" {reflections}") - results = self._step(question, reflections, image=image) + results = self._step(question, reflections, image=media) self.last_scratchpad = results["scratchpad"] return results["action_arg"] @@ -249,7 +249,7 @@ def prompt_agent( return format_step( self.action_agent( self._build_agent_prompt(question, reflections, scratchpad), - image=image, + media=image, ) ) diff --git a/vision_agent/agent/vision_agent.py b/vision_agent/agent/vision_agent.py index fdd5f7b8..bf83fbc4 100644 --- a/vision_agent/agent/vision_agent.py +++ b/vision_agent/agent/vision_agent.py @@ -277,27 +277,27 @@ def __init__( def __call__( self, input: Union[List[Dict[str, str]], str], - image: Optional[Union[str, Path]] = None, + media: Optional[Union[str, Path]] = None, ) -> Dict[str, Any]: if isinstance(input, str): input = [{"role": "user", "content": input}] - results = self.chat_with_workflow(input, image) + results = self.chat_with_workflow(input, media) results.pop("working_memory") return results def chat_with_workflow( self, chat: List[Dict[str, str]], - image: Optional[Union[str, Path]] = None, + media: Optional[Union[str, Path]] = None, self_reflection: bool = False, ) -> Dict[str, Any]: if len(chat) == 0: raise ValueError("Chat cannot be empty.") - if image is not None: + if media is not None: for chat_i in chat: if chat_i["role"] == "user": - chat_i["content"] += f" Image name {image}" + chat_i["content"] += f" Image name {media}" code = "" test = "" @@ -341,7 +341,7 @@ def chat_with_workflow( self.debugger, self.log_progress, verbosity=self.verbosity, - input_media=image, + input_media=media, ) success = cast(bool, results["success"]) code = cast(str, results["code"]) From 0a7fceb94fdcb4886fbb76025b3d22c4ade179cc Mon Sep 17 00:00:00 2001 From: Dillon Laird Date: Tue, 28 May 2024 20:59:46 -0700 Subject: [PATCH 05/11] moved tools_v2 to tools --- tests/fixtures.py | 2 +- tests/test_tools.py | 2 +- tests/tools/test_tools.py | 4 +- vision_agent/agent/agent_coder.py | 4 +- vision_agent/agent/data_interpreter.py | 2 +- .../agent/data_interpreter_prompts.py | 6 +- vision_agent/agent/easytool.py | 2 +- vision_agent/agent/easytool_v2.py | 2 +- vision_agent/agent/vision_agent.py | 2 +- vision_agent/agent/vision_agent_prompts.py | 8 +- vision_agent/llm/llm.py | 7 +- vision_agent/lmm/lmm.py | 12 +- vision_agent/tools/__init__.py | 43 +- vision_agent/tools/easytool_tools.py | 1242 +++++++++++++ vision_agent/tools/tools.py | 1623 ++++++----------- vision_agent/tools/tools_v2.py | 685 ------- 16 files changed, 1822 insertions(+), 1824 deletions(-) create mode 100644 vision_agent/tools/easytool_tools.py delete mode 100644 vision_agent/tools/tools_v2.py diff --git a/tests/fixtures.py b/tests/fixtures.py index 036ed9d6..4479e66f 100644 --- a/tests/fixtures.py +++ b/tests/fixtures.py @@ -2,7 +2,7 @@ import pytest -from vision_agent.tools import CLIP, GroundingDINO, GroundingSAM +from vision_agent.tools.easytool_tools import CLIP, GroundingDINO, GroundingSAM @pytest.fixture diff --git a/tests/test_tools.py b/tests/test_tools.py index 56ca2e02..e5ebe4f3 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -1,6 +1,6 @@ import skimage as ski -from vision_agent.tools.tools_v2 import ( +from vision_agent.tools import ( clip, grounding_dino, grounding_sam, diff --git a/tests/tools/test_tools.py b/tests/tools/test_tools.py index 648dbea9..5ac5a8c9 100644 --- a/tests/tools/test_tools.py +++ b/tests/tools/test_tools.py @@ -5,8 +5,8 @@ import pytest from PIL import Image -from vision_agent.tools import TOOLS, Tool, register_tool -from vision_agent.tools.tools import BboxIoU, BoxDistance, MaskDistance, SegArea, SegIoU +from vision_agent.tools.easytool_tools import TOOLS, Tool, register_tool +from vision_agent.tools.easytool_tools import BboxIoU, BoxDistance, MaskDistance, SegArea, SegIoU def test_bbox_iou(): diff --git a/vision_agent/agent/agent_coder.py b/vision_agent/agent/agent_coder.py index 36c49bfa..bba539f2 100644 --- a/vision_agent/agent/agent_coder.py +++ b/vision_agent/agent/agent_coder.py @@ -18,7 +18,7 @@ ) from vision_agent.llm import LLM, OpenAILLM from vision_agent.lmm import LMM, OpenAILMM -from vision_agent.tools.tools_v2 import TOOL_DOCSTRING, UTILITIES_DOCSTRING +from vision_agent.tools import TOOL_DOCSTRING, UTILITIES_DOCSTRING from vision_agent.utils import Execute IMPORT_HELPER = """ @@ -38,7 +38,7 @@ import string from typing import * from collections import * -from vision_agent.tools.tools_v2 import * +from vision_agent.tools import * """ logging.basicConfig(stream=sys.stdout) _LOGGER = logging.getLogger(__name__) diff --git a/vision_agent/agent/data_interpreter.py b/vision_agent/agent/data_interpreter.py index d03e4baa..cabf0240 100644 --- a/vision_agent/agent/data_interpreter.py +++ b/vision_agent/agent/data_interpreter.py @@ -25,7 +25,7 @@ USER_REQ_SUBTASK_WM_CONTEXT, ) from vision_agent.llm import LLM, OpenAILLM -from vision_agent.tools.tools_v2 import TOOL_DESCRIPTIONS, TOOLS_DF +from vision_agent.tools import TOOL_DESCRIPTIONS, TOOLS_DF from vision_agent.utils import Execute, Sim logging.basicConfig(level=logging.INFO) diff --git a/vision_agent/agent/data_interpreter_prompts.py b/vision_agent/agent/data_interpreter_prompts.py index 87895da0..998ccf97 100644 --- a/vision_agent/agent/data_interpreter_prompts.py +++ b/vision_agent/agent/data_interpreter_prompts.py @@ -74,15 +74,15 @@ # Constraints - Write a function that accomplishes the 'Current Subtask'. You are supplied code from a previous task under 'Previous Code', do not delete or change previous code unless it contains a bug or it is necessary to complete the 'Current Subtask'. -- Always prioritize using pre-defined tools or code for the same functionality from 'Tool Info' when working on 'Current Subtask'. You have access to all these tools through the `from vision_agent.tools.tools_v2 import *` import. +- Always prioritize using pre-defined tools or code for the same functionality from 'Tool Info' when working on 'Current Subtask'. You have access to all these tools through the `from vision_agent.tools import *` import. - You may recieve previous trials and errors under 'Previous Task', this is code, output and reflections from previous tasks. You can use these to avoid running in to the same issues when writing your code. -- Use the `save_json` function from `vision_agent.tools.tools_v2` to save your output as a json file. +- Use the `save_json` function from `vision_agent.tools` to save your output as a json file. - Write clean, readable, and well-documented code. # Output While some concise thoughts are helpful, code is absolutely required. If possible, execute your defined functions in the code output. Output code in the following format: ```python -from vision_agent.tools.tools_v2 imoprt * +from vision_agent.tools imoprt * # your code goes here ``` diff --git a/vision_agent/agent/easytool.py b/vision_agent/agent/easytool.py index 9357ac12..4d05838e 100644 --- a/vision_agent/agent/easytool.py +++ b/vision_agent/agent/easytool.py @@ -6,7 +6,7 @@ from vision_agent.llm import LLM, OpenAILLM from vision_agent.lmm import LMM -from vision_agent.tools import TOOLS +from vision_agent.tools.easytool_tools import TOOLS from .agent import Agent from .easytool_prompts import ( diff --git a/vision_agent/agent/easytool_v2.py b/vision_agent/agent/easytool_v2.py index 83565c0d..035dc391 100644 --- a/vision_agent/agent/easytool_v2.py +++ b/vision_agent/agent/easytool_v2.py @@ -27,7 +27,7 @@ ) from vision_agent.llm import LLM, OpenAILLM from vision_agent.lmm import LMM, OpenAILMM -from vision_agent.tools import TOOLS +from vision_agent.tools.easytool_tools import TOOLS from vision_agent.utils.image_utils import ( convert_to_b64, overlay_bboxes, diff --git a/vision_agent/agent/vision_agent.py b/vision_agent/agent/vision_agent.py index bf83fbc4..08aa8917 100644 --- a/vision_agent/agent/vision_agent.py +++ b/vision_agent/agent/vision_agent.py @@ -21,7 +21,7 @@ USER_REQ, ) from vision_agent.llm import LLM, OpenAILLM -from vision_agent.tools.tools_v2 import TOOL_DESCRIPTIONS, TOOLS_DF, UTILITIES_DOCSTRING +from vision_agent.tools import TOOL_DESCRIPTIONS, TOOLS_DF, UTILITIES_DOCSTRING from vision_agent.utils import Execute from vision_agent.utils.sim import Sim diff --git a/vision_agent/agent/vision_agent_prompts.py b/vision_agent/agent/vision_agent_prompts.py index d1e6077b..6041bfc3 100644 --- a/vision_agent/agent/vision_agent_prompts.py +++ b/vision_agent/agent/vision_agent_prompts.py @@ -49,7 +49,7 @@ **Task**: As a programmer, you are required to complete the function. Use a Chain-of-Thought approach to break down the problem, create pseudocode, and then write the code in Python language. Ensure that your code is efficient, readable, and well-commented. Return the requested information from the function you create. Do not call your code, a test will be run after the code is submitted. **Documentation**: -This is the documentation for the functions you have access to. You may call any of these functions to help you complete the task. They are available through importing `from vision_agent.tools.tools_v2 import *`. +This is the documentation for the functions you have access to. You may call any of these functions to help you complete the task. They are available through importing `from vision_agent.tools import *`. {docstring} @@ -69,14 +69,14 @@ 2. **Algorithm/Method Selection**: Decide on the most efficient way. 3. **Pseudocode Creation**: Write down the steps you will follow in pseudocode. 4. **Code Generation**: Translate your pseudocode into executable Python code. -5. **Logging**: Log the output of the custom functions that were provided to you from `from vision_agent.tools.tools_v2 import *`. Use a debug flag in the function parameters to toggle logging on and off. +5. **Logging**: Log the output of the custom functions that were provided to you from `from vision_agent.tools import *`. Use a debug flag in the function parameters to toggle logging on and off. """ TEST = """ **Role**: As a tester, your task is to create comprehensive test cases for the provided code. These test cases should encompass Basic and Edge case scenarios to ensure the code's robustness and reliability if possible. **Documentation**: -This is the documentation for the functions you have access to. You may call any of these functions to help you complete the task. They are available through importing `from vision_agent.tools.tools_v2 import *`. You do not need to test these functions. Test only the code provided by the user. +This is the documentation for the functions you have access to. You may call any of these functions to help you complete the task. They are available through importing `from vision_agent.tools import *`. You do not need to test these functions. Test only the code provided by the user. {docstring} @@ -149,7 +149,7 @@ def find_text(image_path: str, text: str) -> str: **Role**: As a tester, your task is to create a simple test case for the provided code. This test case should verify the fundamental functionality under normal conditions. **Documentation**: -This is the documentation for the functions you have access to. You may call any of these functions to help you complete the task. They are available through importing `from vision_agent.tools.tools_v2 import *`. You do not need to test these functions, only the code provided by the user. +This is the documentation for the functions you have access to. You may call any of these functions to help you complete the task. They are available through importing `from vision_agent.tools import *`. You do not need to test these functions, only the code provided by the user. {docstring} diff --git a/vision_agent/llm/llm.py b/vision_agent/llm/llm.py index a0035b29..7904cea0 100644 --- a/vision_agent/llm/llm.py +++ b/vision_agent/llm/llm.py @@ -6,14 +6,13 @@ from langsmith.wrappers import wrap_openai from openai import AzureOpenAI, OpenAI -from vision_agent.tools import ( - CHOOSE_PARAMS, +from vision_agent.tools.easytool_tools import ( CLIP, - SYSTEM_PROMPT, GroundingDINO, GroundingSAM, ZeroShotCounting, ) +from vision_agent.tools.prompts import CHOOSE_PARAMS, SYSTEM_PROMPT class LLM(ABC): @@ -141,7 +140,7 @@ def generate_zero_shot_counter(self, question: str) -> Callable: return lambda x: ZeroShotCounting()(**{"image": x}) def generate_image_qa_tool(self, question: str) -> Callable: - from vision_agent.tools import ImageQuestionAnswering + from vision_agent.tools.easytool_tools import ImageQuestionAnswering return lambda x: ImageQuestionAnswering()(**{"prompt": question, "image": x}) diff --git a/vision_agent/lmm/lmm.py b/vision_agent/lmm/lmm.py index cc8861bd..a8fa8312 100644 --- a/vision_agent/lmm/lmm.py +++ b/vision_agent/lmm/lmm.py @@ -9,7 +9,7 @@ import requests from openai import AzureOpenAI, OpenAI -from vision_agent.tools import CHOOSE_PARAMS, SYSTEM_PROMPT +from vision_agent.tools.prompts import CHOOSE_PARAMS, SYSTEM_PROMPT _LOGGER = logging.getLogger(__name__) @@ -198,7 +198,7 @@ def generate( return cast(str, response.choices[0].message.content) def generate_classifier(self, question: str) -> Callable: - from vision_agent.tools import CLIP + from vision_agent.tools.easytool_tools import CLIP api_doc = CLIP.description + "\n" + str(CLIP.usage) prompt = CHOOSE_PARAMS.format(api_doc=api_doc, question=question) @@ -223,7 +223,7 @@ def generate_classifier(self, question: str) -> Callable: return lambda x: CLIP()(**{"prompt": params["prompt"], "image": x}) def generate_detector(self, question: str) -> Callable: - from vision_agent.tools import GroundingDINO + from vision_agent.tools.easytool_tools import GroundingDINO api_doc = GroundingDINO.description + "\n" + str(GroundingDINO.usage) prompt = CHOOSE_PARAMS.format(api_doc=api_doc, question=question) @@ -248,7 +248,7 @@ def generate_detector(self, question: str) -> Callable: return lambda x: GroundingDINO()(**{"prompt": params["prompt"], "image": x}) def generate_segmentor(self, question: str) -> Callable: - from vision_agent.tools import GroundingSAM + from vision_agent.tools.easytool_tools import GroundingSAM api_doc = GroundingSAM.description + "\n" + str(GroundingSAM.usage) prompt = CHOOSE_PARAMS.format(api_doc=api_doc, question=question) @@ -273,12 +273,12 @@ def generate_segmentor(self, question: str) -> Callable: return lambda x: GroundingSAM()(**{"prompt": params["prompt"], "image": x}) def generate_zero_shot_counter(self, question: str) -> Callable: - from vision_agent.tools import ZeroShotCounting + from vision_agent.tools.easytool_tools import ZeroShotCounting return lambda x: ZeroShotCounting()(**{"image": x}) def generate_image_qa_tool(self, question: str) -> Callable: - from vision_agent.tools import ImageQuestionAnswering + from vision_agent.tools.easytool_tools import ImageQuestionAnswering return lambda x: ImageQuestionAnswering()(**{"prompt": question, "image": x}) diff --git a/vision_agent/tools/__init__.py b/vision_agent/tools/__init__.py index 08a96d81..a2ab06b3 100644 --- a/vision_agent/tools/__init__.py +++ b/vision_agent/tools/__init__.py @@ -1,25 +1,24 @@ from .prompts import CHOOSE_PARAMS, SYSTEM_PROMPT -from .tools import ( # Counter, - CLIP, - OCR, +from .tools import ( + TOOL_DESCRIPTIONS, + TOOL_DOCSTRING, TOOLS, - BboxIoU, - BboxStats, - BoxDistance, - Crop, - DINOv, - ExtractFrames, - GroundingDINO, - GroundingSAM, - ImageCaption, - ImageQuestionAnswering, - MaskDistance, - ObjectDistance, - SegArea, - SegIoU, - Tool, - VisualPromptCounting, - VisualQuestionAnswering, - ZeroShotCounting, - register_tool, + TOOLS_DF, + UTILITIES_DOCSTRING, + clip, + closest_box_distance, + closest_mask_distance, + extract_frames, + grounding_dino, + grounding_sam, + image_caption, + image_question_answering, + load_image, + ocr, + overlay_bounding_boxes, + overlay_segmentation_masks, + save_image, + save_json, + visual_prompt_counting, + zero_shot_counting, ) diff --git a/vision_agent/tools/easytool_tools.py b/vision_agent/tools/easytool_tools.py new file mode 100644 index 00000000..fdbc1fe2 --- /dev/null +++ b/vision_agent/tools/easytool_tools.py @@ -0,0 +1,1242 @@ +import io +import logging +import tempfile +from abc import ABC +from pathlib import Path +from typing import Any, Dict, List, Tuple, Type, Union, cast + +import numpy as np +import requests +from PIL import Image +from PIL.Image import Image as ImageType +from scipy.spatial import distance # type: ignore + +from vision_agent.lmm import OpenAILMM +from vision_agent.tools.tool_utils import _send_inference_request +from vision_agent.utils import extract_frames_from_video +from vision_agent.utils.image_utils import ( + b64_to_pil, + convert_to_b64, + denormalize_bbox, + get_image_size, + normalize_bbox, + rle_decode, +) + +_LOGGER = logging.getLogger(__name__) + + +class Tool(ABC): + name: str + description: str + usage: Dict + + def __call__(self, *args: Any, **kwargs: Any) -> Any: + raise NotImplementedError + + +class NoOp(Tool): + name = "noop_" + description = "'noop_' is a no-op tool that does nothing if you do not want answer the question directly and not use a tool." + usage = { + "required_parameters": [], + "examples": [ + { + "scenario": "If you do not want to use a tool.", + "parameters": {}, + } + ], + } + + def __call__(self) -> None: + return None + + +class CLIP(Tool): + r"""CLIP is a tool that can classify or tag any image given a set of input classes + or tags. + + Example + ------- + >>> import vision_agent as va + >>> clip = va.tools.CLIP() + >>> clip("red line, yellow dot", "ct_scan1.jpg")) + [{"labels": ["red line", "yellow dot"], "scores": [0.98, 0.02]}] + """ + + name = "clip_" + description = "'clip_' is a tool that can classify any image given a set of input names or tags. It returns a list of the input names along with their probability scores." + usage = { + "required_parameters": [ + {"name": "prompt", "type": "str"}, + {"name": "image", "type": "str"}, + ], + "examples": [ + { + "scenario": "Can you classify this image as a cat? Image name: cat.jpg", + "parameters": {"prompt": "cat", "image": "cat.jpg"}, + }, + { + "scenario": "Can you tag this photograph with cat or dog? Image name: cat_dog.jpg", + "parameters": {"prompt": "cat, dog", "image": "cat_dog.jpg"}, + }, + { + "scenario": "Can you build me a classifier that classifies red shirts, green shirts and other? Image name: shirts.jpg", + "parameters": { + "prompt": "red shirt, green shirt, other", + "image": "shirts.jpg", + }, + }, + ], + } + + # TODO: Add support for input multiple images, which aligns with the output type. + def __call__(self, prompt: str, image: Union[str, ImageType]) -> Dict: + """Invoke the CLIP model. + + Parameters: + prompt: a string includes a list of classes or tags to classify the image. + image: the input image to classify. + + Returns: + A list of dictionaries containing the labels and scores. Each dictionary contains the classification result for an image. E.g. [{"labels": ["red line", "yellow dot"], "scores": [0.98, 0.02]}] + """ + image_b64 = convert_to_b64(image) + data = { + "prompt": prompt, + "image": image_b64, + "tool": "closed_set_image_classification", + } + resp_data = _send_inference_request(data, "tools") + resp_data["scores"] = [round(prob, 4) for prob in resp_data["scores"]] + return resp_data + + +class ImageCaption(Tool): + r"""ImageCaption is a tool that can caption an image based on its contents or tags. + + Example + ------- + >>> import vision_agent as va + >>> caption = va.tools.ImageCaption() + >>> caption("image1.jpg") + {'text': ['a box of orange and white socks']} + """ + + name = "image_caption_" + description = "'image_caption_' is a tool that can caption an image based on its contents or tags. It returns a text describing the image." + usage = { + "required_parameters": [ + {"name": "image", "type": "str"}, + ], + "examples": [ + { + "scenario": "Can you describe this image? Image name: cat.jpg", + "parameters": {"image": "cat.jpg"}, + }, + { + "scenario": "Can you caption this image with their main contents? Image name: cat_dog.jpg", + "parameters": {"image": "cat_dog.jpg"}, + }, + ], + } + + # TODO: Add support for input multiple images, which aligns with the output type. + def __call__(self, image: Union[str, ImageType]) -> Dict: + """Invoke the Image captioning model. + + Parameters: + image: the input image to caption. + + Returns: + A list of dictionaries containing the labels and scores. Each dictionary contains the classification result for an image. E.g. [{"labels": ["red line", "yellow dot"], "scores": [0.98, 0.02]}] + """ + image_b64 = convert_to_b64(image) + data = { + "image": image_b64, + "tool": "image_captioning", + } + return _send_inference_request(data, "tools") + + +class GroundingDINO(Tool): + r"""Grounding DINO is a tool that can detect arbitrary objects with inputs such as + category names or referring expressions. + + Example + ------- + >>> import vision_agent as va + >>> t = va.tools.GroundingDINO() + >>> t("red line. yellow dot", "ct_scan1.jpg") + [{'labels': ['red line', 'yellow dot'], + 'bboxes': [[0.38, 0.15, 0.59, 0.7], [0.48, 0.25, 0.69, 0.71]], + 'scores': [0.98, 0.02]}] + """ + + name = "grounding_dino_" + description = "'grounding_dino_' is a tool that can detect and count multiple objects given a text prompt such as category names or referring expressions. It returns a list and count of bounding boxes, label names and associated probability scores." + usage = { + "required_parameters": [ + {"name": "prompt", "type": "str"}, + {"name": "image", "type": "str"}, + ], + "optional_parameters": [ + {"name": "box_threshold", "type": "float", "min": 0.1, "max": 0.5}, + {"name": "iou_threshold", "type": "float", "min": 0.01, "max": 0.99}, + ], + "examples": [ + { + "scenario": "Can you detect and count the giraffes and zebras in this image? Image name: animal.jpg", + "parameters": { + "prompt": "giraffe. zebra", + "image": "person.jpg", + }, + }, + { + "scenario": "Can you build me a car detector?", + "parameters": {"prompt": "car", "image": ""}, + }, + { + "scenario": "Can you detect the person on the left and right? Image name: person.jpg", + "parameters": { + "prompt": "left person. right person", + "image": "person.jpg", + }, + }, + { + "scenario": "Detect the red shirts and green shirt. Image name: shirts.jpg", + "parameters": { + "prompt": "red shirt. green shirt", + "image": "shirts.jpg", + "box_threshold": 0.20, + "iou_threshold": 0.20, + }, + }, + ], + } + + # TODO: Add support for input multiple images, which aligns with the output type. + def __call__( + self, + prompt: str, + image: Union[str, Path, ImageType], + box_threshold: float = 0.20, + iou_threshold: float = 0.20, + ) -> Dict: + """Invoke the Grounding DINO model. + + Parameters: + prompt: one or multiple class names to detect. The classes should be separated by a period if there are multiple classes. E.g. "big dog . small cat" + image: the input image to run against. + box_threshold: the threshold to filter out the bounding boxes with low scores. + iou_threshold: the threshold for intersection over union used in nms algorithm. It will suppress the boxes which have iou greater than this threshold. + + Returns: + A dictionary containing the labels, scores, and bboxes, which is the detection result for the input image. + """ + image_size = get_image_size(image) + image_b64 = convert_to_b64(image) + request_data = { + "prompt": prompt, + "image": image_b64, + "tool": "visual_grounding", + "kwargs": {"box_threshold": box_threshold, "iou_threshold": iou_threshold}, + } + data: Dict[str, Any] = _send_inference_request(request_data, "tools") + if "bboxes" in data: + data["bboxes"] = [normalize_bbox(box, image_size) for box in data["bboxes"]] + if "scores" in data: + data["scores"] = [round(score, 2) for score in data["scores"]] + if "labels" in data: + data["labels"] = list(data["labels"]) + data["image_size"] = image_size + return data + + +class GroundingSAM(Tool): + r"""Grounding SAM is a tool that can detect and segment arbitrary objects with + inputs such as category names or referring expressions. + + Example + ------- + >>> import vision_agent as va + >>> t = va.tools.GroundingSAM() + >>> t("red line, yellow dot", "ct_scan1.jpg"]) + [{'labels': ['yellow dot', 'red line'], + 'bboxes': [[0.38, 0.15, 0.59, 0.7], [0.48, 0.25, 0.69, 0.71]], + 'masks': [array([[0, 0, 0, ..., 0, 0, 0], + [0, 0, 0, ..., 0, 0, 0], + ..., + [0, 0, 0, ..., 0, 0, 0], + [0, 0, 0, ..., 0, 0, 0]], dtype=uint8)}, + array([[0, 0, 0, ..., 0, 0, 0], + [0, 0, 0, ..., 0, 0, 0], + ..., + [1, 1, 1, ..., 1, 1, 1], + [1, 1, 1, ..., 1, 1, 1]], dtype=uint8)]}] + """ + + name = "grounding_sam_" + description = "'grounding_sam_' is a tool that can detect and segment multiple objects given a text prompt such as category names or referring expressions. It returns a list of bounding boxes, label names and masks file names and associated probability scores." + usage = { + "required_parameters": [ + {"name": "prompt", "type": "str"}, + {"name": "image", "type": "str"}, + ], + "optional_parameters": [ + {"name": "box_threshold", "type": "float", "min": 0.1, "max": 0.5}, + {"name": "iou_threshold", "type": "float", "min": 0.01, "max": 0.99}, + ], + "examples": [ + { + "scenario": "Can you segment the apples and grapes in this image? Image name: fruits.jpg", + "parameters": { + "prompt": "apple. grape", + "image": "fruits.jpg", + }, + }, + { + "scenario": "Can you build me a car segmentor?", + "parameters": {"prompt": "car", "image": ""}, + }, + { + "scenario": "Can you segment the person on the left and right? Image name: person.jpg", + "parameters": { + "prompt": "left person. right person", + "image": "person.jpg", + }, + }, + { + "scenario": "Can you build me a tool that segments red shirts and green shirts? Image name: shirts.jpg", + "parameters": { + "prompt": "red shirt, green shirt", + "image": "shirts.jpg", + "box_threshold": 0.20, + "iou_threshold": 0.20, + }, + }, + ], + } + + # TODO: Add support for input multiple images, which aligns with the output type. + def __call__( + self, + prompt: str, + image: Union[str, ImageType], + box_threshold: float = 0.2, + iou_threshold: float = 0.2, + ) -> Dict: + """Invoke the Grounding SAM model. + + Parameters: + prompt: a list of classes to segment. + image: the input image to segment. + box_threshold: the threshold to filter out the bounding boxes with low scores. + iou_threshold: the threshold for intersection over union used in nms algorithm. It will suppress the boxes which have iou greater than this threshold. + + Returns: + A dictionary containing the labels, scores, bboxes and masks for the input image. + """ + image_size = get_image_size(image) + image_b64 = convert_to_b64(image) + request_data = { + "prompt": prompt, + "image": image_b64, + "tool": "visual_grounding_segment", + "kwargs": {"box_threshold": box_threshold, "iou_threshold": iou_threshold}, + } + data: Dict[str, Any] = _send_inference_request(request_data, "tools") + if "bboxes" in data: + data["bboxes"] = [normalize_bbox(box, image_size) for box in data["bboxes"]] + if "masks" in data: + data["masks"] = [ + rle_decode(mask_rle=mask, shape=data["mask_shape"]) + for mask in data["masks"] + ] + data["image_size"] = image_size + data.pop("mask_shape", None) + return data + + +class DINOv(Tool): + r"""DINOv is a tool that can detect and segment similar objects with the given input masks. + + Example + ------- + >>> import vision_agent as va + >>> t = va.tools.DINOv() + >>> t(prompt=[{"mask":"balloon_mask.jpg", "image": "balloon.jpg"}], image="balloon.jpg"]) + [{'scores': [0.512, 0.212], + 'masks': [array([[0, 0, 0, ..., 0, 0, 0], + ..., + [0, 0, 0, ..., 0, 0, 0]], dtype=uint8)}, + array([[0, 0, 0, ..., 0, 0, 0], + ..., + [1, 1, 1, ..., 1, 1, 1]], dtype=uint8)]}] + """ + + name = "dinov_" + description = "'dinov_' is a tool that can detect and segment similar objects given a reference segmentation mask." + usage = { + "required_parameters": [ + {"name": "prompt", "type": "List[Dict[str, str]]"}, + {"name": "image", "type": "str"}, + ], + "examples": [ + { + "scenario": "Can you find all the balloons in this image that is similar to the provided masked area? Image name: input.jpg Reference image: balloon.jpg Reference mask: balloon_mask.jpg", + "parameters": { + "prompt": [ + {"mask": "balloon_mask.jpg", "image": "balloon.jpg"}, + ], + "image": "input.jpg", + }, + }, + { + "scenario": "Detect all the objects in this image that are similar to the provided mask. Image name: original.jpg Reference image: mask.png Reference mask: background.png", + "parameters": { + "prompt": [ + {"mask": "mask.png", "image": "background.png"}, + ], + "image": "original.jpg", + }, + }, + ], + } + + def __call__( + self, prompt: List[Dict[str, str]], image: Union[str, ImageType] + ) -> Dict: + """Invoke the DINOv model. + + Parameters: + prompt: a list of visual prompts in the form of {'mask': 'MASK_FILE_PATH', 'image': 'IMAGE_FILE_PATH'}. + image: the input image to segment. + + Returns: + A dictionary of the below keys: 'scores', 'masks' and 'mask_shape', which stores a list of detected segmentation masks and its scores. + """ + image_b64 = convert_to_b64(image) + for p in prompt: + p["mask"] = convert_to_b64(p["mask"]) + p["image"] = convert_to_b64(p["image"]) + request_data = { + "prompt": prompt, + "image": image_b64, + } + data: Dict[str, Any] = _send_inference_request(request_data, "dinov") + if "bboxes" in data: + data["bboxes"] = [ + normalize_bbox(box, data["mask_shape"]) for box in data["bboxes"] + ] + if "masks" in data: + data["masks"] = [ + rle_decode(mask_rle=mask, shape=data["mask_shape"]) + for mask in data["masks"] + ] + data["labels"] = ["visual prompt" for _ in range(len(data["masks"]))] + mask_shape = data.pop("mask_shape", None) + data["image_size"] = (mask_shape[0], mask_shape[1]) if mask_shape else None + return data + + +class AgentDINOv(DINOv): + def __call__( + self, + prompt: List[Dict[str, str]], + image: Union[str, ImageType], + ) -> Dict: + rets = super().__call__(prompt, image) + mask_files = [] + for mask in rets["masks"]: + with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp: + file_name = Path(tmp.name).with_suffix(".mask.png") + Image.fromarray(mask * 255).save(file_name) + mask_files.append(str(file_name)) + rets["masks"] = mask_files + return rets + + +class AgentGroundingSAM(GroundingSAM): + r"""AgentGroundingSAM is the same as GroundingSAM but it saves the masks as files + returns the file name. This makes it easier for agents to use. + """ + + def __call__( + self, + prompt: str, + image: Union[str, ImageType], + box_threshold: float = 0.2, + iou_threshold: float = 0.75, + ) -> Dict: + rets = super().__call__(prompt, image, box_threshold, iou_threshold) + mask_files = [] + for mask in rets["masks"]: + with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp: + file_name = Path(tmp.name).with_suffix(".mask.png") + Image.fromarray(mask * 255).save(file_name) + mask_files.append(str(file_name)) + rets["masks"] = mask_files + return rets + + +class ZeroShotCounting(Tool): + r"""ZeroShotCounting is a tool that can count total number of instances of an object + present in an image belonging to same class without a text or visual prompt. + + Example + ------- + >>> import vision_agent as va + >>> zshot_count = va.tools.ZeroShotCounting() + >>> zshot_count("image1.jpg") + {'count': 45} + """ + + name = "zero_shot_counting_" + description = "'zero_shot_counting_' is a tool that counts foreground items given only an image and no other information. It returns only the count of the objects in the image" + + usage = { + "required_parameters": [ + {"name": "image", "type": "str"}, + ], + "examples": [ + { + "scenario": "Can you count the items in the image? Image name: lids.jpg", + "parameters": {"image": "lids.jpg"}, + }, + { + "scenario": "Can you count the total number of objects in this image? Image name: tray.jpg", + "parameters": {"image": "tray.jpg"}, + }, + { + "scenario": "Can you build me an object counting tool? Image name: shirts.jpg", + "parameters": { + "image": "shirts.jpg", + }, + }, + ], + } + + # TODO: Add support for input multiple images, which aligns with the output type. + def __call__(self, image: Union[str, ImageType]) -> Dict: + """Invoke the Zero shot counting model. + + Parameters: + image: the input image. + + Returns: + A dictionary containing the key 'count' and the count as value. E.g. {count: 12} + """ + image_b64 = convert_to_b64(image) + data = { + "image": image_b64, + "tool": "zero_shot_counting", + } + resp_data = _send_inference_request(data, "tools") + resp_data["heat_map"] = np.array(b64_to_pil(resp_data["heat_map"][0])) + return resp_data + + +class VisualPromptCounting(Tool): + r"""VisualPromptCounting is a tool that can count total number of instances of an object + present in an image belonging to same class with help of an visual prompt which is a bounding box. + + Example + ------- + >>> import vision_agent as va + >>> prompt_count = va.tools.VisualPromptCounting() + >>> prompt_count(image="image1.jpg", prompt={"bbox": [0.1, 0.1, 0.4, 0.42]}) + {'count': 23} + """ + + name = "visual_prompt_counting_" + description = "'visual_prompt_counting_' is a tool that counts foreground items in an image given a visual prompt which is a bounding box describing the object. It returns only the count of the objects in the image." + + usage = { + "required_parameters": [ + {"name": "image", "type": "str"}, + {"name": "prompt", "type": "Dict[str, List[float]"}, + ], + "examples": [ + { + "scenario": "Here is an example of a lid '0.1, 0.1, 0.14, 0.2', Can you count the items in the image ? Image name: lids.jpg", + "parameters": { + "image": "lids.jpg", + "prompt": {"bbox": [0.1, 0.1, 0.14, 0.2]}, + }, + }, + { + "scenario": "Can you count the total number of objects in this image ? Image name: tray.jpg, reference_data: {'bbox': [0.1, 0.1, 0.2, 0.25]}", + "parameters": { + "image": "tray.jpg", + "prompt": {"bbox": [0.1, 0.1, 0.2, 0.25]}, + }, + }, + { + "scenario": "Can you count this item based on an example, reference_data: {'bbox': [100, 115, 200, 200]} ? Image name: shirts.jpg", + "parameters": { + "image": "shirts.jpg", + "prompt": {"bbox": [100, 115, 200, 200]}, + }, + }, + { + "scenario": "Can you build me a counting tool based on an example prompt ? Image name: shoes.jpg, reference_data: {'bbox': [0.1, 0.1, 0.6, 0.65]}", + "parameters": { + "image": "shoes.jpg", + "prompt": {"bbox": [0.1, 0.1, 0.6, 0.65]}, + }, + }, + ], + } + + def __call__( + self, image: Union[str, ImageType], prompt: Dict[str, List[float]] + ) -> Dict: + """Invoke the few shot counting model. + + Parameters: + image: the input image. + prompt: the visual prompt which is a bounding box describing the object. + + Returns: + A dictionary containing the key 'count' and the count as value. E.g. {count: 12} + """ + image_size = get_image_size(image) + bbox = prompt["bbox"] + bbox_str = ", ".join(map(str, denormalize_bbox(bbox, image_size))) + image_b64 = convert_to_b64(image) + + data = { + "image": image_b64, + "prompt": bbox_str, + "tool": "few_shot_counting", + } + resp_data = _send_inference_request(data, "tools") + resp_data["heat_map"] = np.array(b64_to_pil(resp_data["heat_map"][0])) + return resp_data + + +class VisualQuestionAnswering(Tool): + r"""VisualQuestionAnswering is a tool that can explain contents of an image and answer questions about the same + + Example + ------- + >>> import vision_agent as va + >>> vqa_tool = va.tools.VisualQuestionAnswering() + >>> vqa_tool(image="image1.jpg", prompt="describe this image in detail") + {'text': "The image contains a cat sitting on a table with a bowl of milk."} + """ + + name = "visual_question_answering_" + description = "'visual_question_answering_' is a tool that can answer basic questions about the image given a question and an image. It returns a text describing the image and the answer to the question" + + usage = { + "required_parameters": [ + {"name": "image", "type": "str"}, + {"name": "prompt", "type": "str"}, + ], + "examples": [ + { + "scenario": "Describe this image in detail. Image name: cat.jpg", + "parameters": { + "image": "cats.jpg", + "prompt": "Describe this image in detail", + }, + }, + { + "scenario": "Can you help me with this street sign in this image ? What does it say ? Image name: sign.jpg", + "parameters": { + "image": "sign.jpg", + "prompt": "Can you help me with this street sign ? What does it say ?", + }, + }, + { + "scenario": "Describe the weather in the image for me ? Image name: weather.jpg", + "parameters": { + "image": "weather.jpg", + "prompt": "Describe the weather in the image for me ", + }, + }, + { + "scenario": "Which 2 are the least frequent bins in this histogram ? Image name: chart.jpg", + "parameters": { + "image": "chart.jpg", + "prompt": "Which 2 are the least frequent bins in this histogram", + }, + }, + ], + } + + def __call__(self, image: str, prompt: str) -> Dict: + """Invoke the visual question answering model. + + Parameters: + image: the input image. + + Returns: + A dictionary containing the key 'text' and the answer to the prompt. E.g. {'text': 'This image contains a cat sitting on a table with a bowl of milk.'} + """ + + gpt = OpenAILMM() + return {"text": gpt(input=prompt, images=[image])} + + +class ImageQuestionAnswering(Tool): + r"""ImageQuestionAnswering is a tool that can explain contents of an image and answer questions about the same + It is same as VisualQuestionAnswering but this tool is not used by agents. It is used when user requests a tool for VQA using generate_image_qa_tool function. + It is also useful if the user wants the data to be not exposed to OpenAI endpoints + + Example + ------- + >>> import vision_agent as va + >>> vqa_tool = va.tools.ImageQuestionAnswering() + >>> vqa_tool(image="image1.jpg", prompt="describe this image in detail") + {'text': "The image contains a cat sitting on a table with a bowl of milk."} + """ + + name = "image_question_answering_" + description = "'image_question_answering_' is a tool that can answer basic questions about the image given a question and an image. It returns a text describing the image and the answer to the question" + + usage = { + "required_parameters": [ + {"name": "image", "type": "str"}, + {"name": "prompt", "type": "str"}, + ], + "examples": [ + { + "scenario": "Describe this image in detail. Image name: cat.jpg", + "parameters": { + "image": "cats.jpg", + "prompt": "Describe this image in detail", + }, + }, + { + "scenario": "Can you help me with this street sign in this image ? What does it say ? Image name: sign.jpg", + "parameters": { + "image": "sign.jpg", + "prompt": "Can you help me with this street sign ? What does it say ?", + }, + }, + { + "scenario": "Describe the weather in the image for me ? Image name: weather.jpg", + "parameters": { + "image": "weather.jpg", + "prompt": "Describe the weather in the image for me ", + }, + }, + { + "scenario": "Can you generate an image question answering tool ? Image name: chart.jpg, prompt: Which 2 are the least frequent bins in this histogram", + "parameters": { + "image": "chart.jpg", + "prompt": "Which 2 are the least frequent bins in this histogram", + }, + }, + ], + } + + def __call__(self, image: Union[str, ImageType], prompt: str) -> Dict: + """Invoke the visual question answering model. + + Parameters: + image: the input image. + + Returns: + A dictionary containing the key 'text' and the answer to the prompt. E.g. {'text': 'This image contains a cat sitting on a table with a bowl of milk.'} + """ + + image_b64 = convert_to_b64(image) + data = { + "image": image_b64, + "prompt": prompt, + "tool": "image_question_answering", + } + + return _send_inference_request(data, "tools") + + +class Crop(Tool): + r"""Crop crops an image given a bounding box and returns a file name of the cropped image.""" + + name = "crop_" + description = "'crop_' crops an image given a bounding box and returns a file name of the cropped image. It returns a file with the cropped image." + usage = { + "required_parameters": [ + {"name": "bbox", "type": "List[float]"}, + {"name": "image", "type": "str"}, + ], + "examples": [ + { + "scenario": "Can you crop the image to the bounding box [0.1, 0.1, 0.9, 0.9]? Image name: image.jpg", + "parameters": {"bbox": [0.1, 0.1, 0.9, 0.9], "image": "image.jpg"}, + }, + { + "scenario": "Cut out the image to the bounding box [0.2, 0.2, 0.8, 0.8]. Image name: car.jpg", + "parameters": {"bbox": [0.2, 0.2, 0.8, 0.8], "image": "car.jpg"}, + }, + ], + } + + def __call__(self, bbox: List[float], image: Union[str, Path]) -> Dict: + pil_image = Image.open(image) + width, height = pil_image.size + bbox = [ + int(bbox[0] * width), + int(bbox[1] * height), + int(bbox[2] * width), + int(bbox[3] * height), + ] + cropped_image = pil_image.crop(bbox) # type: ignore + with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp: + cropped_image.save(tmp.name) + + return {"image": tmp.name} + + +class BboxStats(Tool): + r"""BboxStats returns the height, width and area of the bounding box in pixels to 2 decimal places.""" + + name = "bbox_stats_" + description = "'bbox_stats_' returns the height, width and area of the given bounding box in pixels to 2 decimal places." + usage = { + "required_parameters": [ + {"name": "bboxes", "type": "List[int]"}, + {"name": "image_size", "type": "Tuple[int]"}, + ], + "examples": [ + { + "scenario": "Calculate the width and height of the bounding box [0.2, 0.21, 0.34, 0.42]", + "parameters": { + "bboxes": [[0.2, 0.21, 0.34, 0.42]], + "image_size": (500, 1200), + }, + }, + { + "scenario": "Calculate the area of the bounding box [0.2, 0.21, 0.34, 0.42]", + "parameters": { + "bboxes": [[0.2, 0.21, 0.34, 0.42]], + "image_size": (640, 480), + }, + }, + ], + } + + def __call__( + self, bboxes: List[List[int]], image_size: Tuple[int, int] + ) -> List[Dict]: + areas = [] + height, width = image_size + for bbox in bboxes: + x1, y1, x2, y2 = bbox + areas.append( + { + "width": round((x2 - x1) * width, 2), + "height": round((y2 - y1) * height, 2), + "area": round((x2 - x1) * (y2 - y1) * width * height, 2), + } + ) + + return areas + + +class SegArea(Tool): + r"""SegArea returns the area of the segmentation mask in pixels normalized to 2 decimal places.""" + + name = "seg_area_" + description = "'seg_area_' returns the area of the given segmentation mask in pixels normalized to 2 decimal places." + usage = { + "required_parameters": [{"name": "masks", "type": "str"}], + "examples": [ + { + "scenario": "If you want to calculate the area of the segmentation mask, pass the masks file name.", + "parameters": {"masks": "mask_file.jpg"}, + }, + ], + } + + def __call__(self, masks: Union[str, Path]) -> float: + pil_mask = Image.open(str(masks)) + np_mask = np.array(pil_mask) + np_mask = np.clip(np_mask, 0, 1) + return cast(float, round(np.sum(np_mask), 2)) + + +class BboxIoU(Tool): + name = "bbox_iou_" + description = "'bbox_iou_' returns the intersection over union of two bounding boxes. This is a good tool for determining if two objects are overlapping." + usage = { + "required_parameters": [ + {"name": "bbox1", "type": "List[int]"}, + {"name": "bbox2", "type": "List[int]"}, + ], + "examples": [ + { + "scenario": "If you want to calculate the intersection over union of the bounding boxes [0.2, 0.21, 0.34, 0.42] and [0.3, 0.31, 0.44, 0.52]", + "parameters": { + "bbox1": [0.2, 0.21, 0.34, 0.42], + "bbox2": [0.3, 0.31, 0.44, 0.52], + }, + } + ], + } + + def __call__(self, bbox1: List[int], bbox2: List[int]) -> float: + x1, y1, x2, y2 = bbox1 + x3, y3, x4, y4 = bbox2 + xA = max(x1, x3) + yA = max(y1, y3) + xB = min(x2, x4) + yB = min(y2, y4) + inter_area = max(0, xB - xA) * max(0, yB - yA) + boxa_area = (x2 - x1) * (y2 - y1) + boxb_area = (x4 - x3) * (y4 - y3) + iou = inter_area / float(boxa_area + boxb_area - inter_area) + return round(iou, 2) + + +class SegIoU(Tool): + name = "seg_iou_" + description = "'seg_iou_' returns the intersection over union of two segmentation masks given their segmentation mask files." + usage = { + "required_parameters": [ + {"name": "mask1", "type": "str"}, + {"name": "mask2", "type": "str"}, + ], + "examples": [ + { + "scenario": "Calculate the intersection over union of the segmentation masks for mask_file1.jpg and mask_file2.jpg", + "parameters": {"mask1": "mask_file1.png", "mask2": "mask_file2.png"}, + } + ], + } + + def __call__(self, mask1: Union[str, Path], mask2: Union[str, Path]) -> float: + pil_mask1 = Image.open(str(mask1)) + pil_mask2 = Image.open(str(mask2)) + np_mask1 = np.clip(np.array(pil_mask1), 0, 1) + np_mask2 = np.clip(np.array(pil_mask2), 0, 1) + intersection = np.logical_and(np_mask1, np_mask2) + union = np.logical_or(np_mask1, np_mask2) + iou = np.sum(intersection) / np.sum(union) + return cast(float, round(iou, 2)) + + +class BboxContains(Tool): + name = "bbox_contains_" + description = "Given two bounding boxes, a target bounding box and a region bounding box, 'bbox_contains_' returns the intersection of the two bounding boxes which is the percentage area of the target bounding box overlaps with the region bounding box. This is a good tool for determining if the region object contains the target object." + usage = { + "required_parameters": [ + {"name": "target", "type": "List[int]"}, + {"name": "target_class", "type": "str"}, + {"name": "region", "type": "List[int]"}, + {"name": "region_class", "type": "str"}, + ], + "examples": [ + { + "scenario": "Determine if the dog on the couch, bounding box of the dog: [0.2, 0.21, 0.34, 0.42], bounding box of the couch: [0.3, 0.31, 0.44, 0.52]", + "parameters": { + "target": [0.2, 0.21, 0.34, 0.42], + "target_class": "dog", + "region": [0.3, 0.31, 0.44, 0.52], + "region_class": "couch", + }, + }, + { + "scenario": "Check if the kid is in the pool? bounding box of the kid: [0.2, 0.21, 0.34, 0.42], bounding box of the pool: [0.3, 0.31, 0.44, 0.52]", + "parameters": { + "target": [0.2, 0.21, 0.34, 0.42], + "target_class": "kid", + "region": [0.3, 0.31, 0.44, 0.52], + "region_class": "pool", + }, + }, + ], + } + + def __call__( + self, target: List[int], target_class: str, region: List[int], region_class: str + ) -> Dict[str, Union[str, float]]: + x1, y1, x2, y2 = target + x3, y3, x4, y4 = region + xA = max(x1, x3) + yA = max(y1, y3) + xB = min(x2, x4) + yB = min(y2, y4) + inter_area = max(0, xB - xA) * max(0, yB - yA) + boxa_area = (x2 - x1) * (y2 - y1) + iou = inter_area / float(boxa_area) + area = round(iou, 2) + return { + "target_class": target_class, + "region_class": region_class, + "intersection": area, + } + + +class ObjectDistance(Tool): + name = "object_distance_" + description = "'object_distance_' calculates the distance between two objects in an image. It returns the minimum distance between the two objects." + usage = { + "required_parameters": [ + {"name": "object1", "type": "Dict[str, Any]"}, + {"name": "object2", "type": "Dict[str, Any]"}, + ], + "examples": [ + { + "scenario": "Calculate the distance between these two objects {bboxes: [0.2, 0.21, 0.34, 0.42], masks: 'mask_file1.png'}, {bboxes: [0.3, 0.31, 0.44, 0.52], masks: 'mask_file2.png'}", + "parameters": { + "object1": { + "bboxes": [0.2, 0.21, 0.34, 0.42], + "scores": 0.54, + "masks": "mask_file1.png", + }, + "object2": { + "bboxes": [0.3, 0.31, 0.44, 0.52], + "scores": 0.66, + "masks": "mask_file2.png", + }, + }, + } + ], + } + + def __call__(self, object1: Dict[str, Any], object2: Dict[str, Any]) -> float: + if "masks" in object1 and "masks" in object2: + mask1 = object1["masks"] + mask2 = object2["masks"] + return MaskDistance()(mask1, mask2) + elif "bboxes" in object1 and "bboxes" in object2: + bbox1 = object1["bboxes"] + bbox2 = object2["bboxes"] + return BoxDistance()(bbox1, bbox2) + else: + raise ValueError("Either of the objects should have masks or bboxes") + + +class BoxDistance(Tool): + name = "box_distance_" + description = "'box_distance_' calculates distance between two bounding boxes. It returns the minumum distance between the given bounding boxes" + usage = { + "required_parameters": [ + {"name": "bbox1", "type": "List[int]"}, + {"name": "bbox2", "type": "List[int]"}, + ], + "examples": [ + { + "scenario": "Calculate the distance between these two bounding boxes [0.2, 0.21, 0.34, 0.42] and [0.3, 0.31, 0.44, 0.52]", + "parameters": { + "bbox1": [0.2, 0.21, 0.34, 0.42], + "bbox2": [0.3, 0.31, 0.44, 0.52], + }, + } + ], + } + + def __call__(self, bbox1: List[int], bbox2: List[int]) -> float: + x11, y11, x12, y12 = bbox1 + x21, y21, x22, y22 = bbox2 + + horizontal_dist = np.max([0, x21 - x12, x11 - x22]) + vertical_dist = np.max([0, y21 - y12, y11 - y22]) + + return cast(float, round(np.sqrt(horizontal_dist**2 + vertical_dist**2), 2)) + + +class MaskDistance(Tool): + name = "mask_distance_" + description = "'mask_distance_' calculates distance between two masks. It is helpful in checking proximity of two objects. It returns the minumum distance between the given masks" + usage = { + "required_parameters": [ + {"name": "mask1", "type": "str"}, + {"name": "mask2", "type": "str"}, + ], + "examples": [ + { + "scenario": "Calculate the distance between the segmentation masks for mask_file1.jpg and mask_file2.jpg", + "parameters": {"mask1": "mask_file1.png", "mask2": "mask_file2.png"}, + } + ], + } + + def __call__(self, mask1: Union[str, Path], mask2: Union[str, Path]) -> float: + pil_mask1 = Image.open(str(mask1)) + pil_mask2 = Image.open(str(mask2)) + np_mask1 = np.clip(np.array(pil_mask1), 0, 1) + np_mask2 = np.clip(np.array(pil_mask2), 0, 1) + + mask1_points = np.transpose(np.nonzero(np_mask1)) + mask2_points = np.transpose(np.nonzero(np_mask2)) + dist_matrix = distance.cdist(mask1_points, mask2_points, "euclidean") + return cast(float, np.round(np.min(dist_matrix), 2)) + + +class ExtractFrames(Tool): + r"""Extract frames from a video.""" + + name = "extract_frames_" + description = "'extract_frames_' extracts frames from a video every 2 seconds, returns a list of tuples (frame, timestamp), where timestamp is the relative time in seconds where the frame was captured. The frame is a local image file path." + usage = { + "required_parameters": [{"name": "video_uri", "type": "str"}], + "optional_parameters": [{"name": "frames_every", "type": "float"}], + "examples": [ + { + "scenario": "Can you extract the frames from this video? Video: www.foobar.com/video?name=test.mp4", + "parameters": {"video_uri": "www.foobar.com/video?name=test.mp4"}, + }, + { + "scenario": "Can you extract the images from this video file at every 2 seconds ? Video path: tests/data/test.mp4", + "parameters": {"video_uri": "tests/data/test.mp4", "frames_every": 2}, + }, + ], + } + + def __call__( + self, video_uri: str, frames_every: float = 2 + ) -> List[Tuple[str, float]]: + """Extract frames from a video. + + + Parameters: + video_uri: the path to the video file or a url points to the video data + + Returns: + a list of tuples containing the extracted frame and the timestamp in seconds. E.g. [(path_to_frame1, 0.0), (path_to_frame2, 0.5), ...]. The timestamp is the time in seconds from the start of the video. E.g. 12.125 means 12.125 seconds from the start of the video. The frames are sorted by the timestamp in ascending order. + """ + frames = extract_frames_from_video(video_uri, fps=round(1 / frames_every, 2)) + result = [] + _LOGGER.info( + f"Extracted {len(frames)} frames from video {video_uri}. Temporarily saving them as images to disk for downstream tasks." + ) + for frame, ts in frames: + with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp: + file_name = Path(tmp.name).with_suffix(".frame.png") + Image.fromarray(frame).save(file_name) + result.append((str(file_name), ts)) + return result + + +class OCR(Tool): + name = "ocr_" + description = "'ocr_' extracts text from an image. It returns a list of detected text, bounding boxes, and confidence scores." + usage = { + "required_parameters": [ + {"name": "image", "type": "str"}, + ], + "examples": [ + { + "scenario": "Can you extract the text from this image? Image name: image.png", + "parameters": {"image": "image.png"}, + }, + ], + } + _API_KEY = "land_sk_WVYwP00xA3iXely2vuar6YUDZ3MJT9yLX6oW5noUkwICzYLiDV" + _URL = "https://app.landing.ai/ocr/v1/detect-text" + + def __call__(self, image: str) -> dict: + pil_image = Image.open(image).convert("RGB") + image_size = pil_image.size[::-1] + image_buffer = io.BytesIO() + pil_image.save(image_buffer, format="PNG") + buffer_bytes = image_buffer.getvalue() + image_buffer.close() + + res = requests.post( + self._URL, + files={"images": buffer_bytes}, + data={"language": "en"}, + headers={"contentType": "multipart/form-data", "apikey": self._API_KEY}, + ) + if res.status_code != 200: + _LOGGER.error(f"Request failed: {res.text}") + raise ValueError(f"Request failed: {res.text}") + + data = res.json() + output: Dict[str, List] = {"labels": [], "bboxes": [], "scores": []} + for det in data[0]: + output["labels"].append(det["text"]) + box = [ + det["location"][0]["x"], + det["location"][0]["y"], + det["location"][2]["x"], + det["location"][2]["y"], + ] + box = normalize_bbox(box, image_size) + output["bboxes"].append(box) + output["scores"].append(round(det["score"], 2)) + return output + + +class Calculator(Tool): + r"""Calculator is a tool that can perform basic arithmetic operations.""" + + name = "calculator_" + description = ( + "'calculator_' is a tool that can perform basic arithmetic operations." + ) + usage = { + "required_parameters": [{"name": "equation", "type": "str"}], + "examples": [ + { + "scenario": "If you want to calculate (2 * 3) + 4", + "parameters": {"equation": "2 + 4"}, + }, + { + "scenario": "If you want to calculate (4 + 2.5) / 2.1", + "parameters": {"equation": "(4 + 2.5) / 2.1"}, + }, + ], + } + + def __call__(self, equation: str) -> float: + return cast(float, round(eval(equation), 2)) + + +TOOLS = { + i: {"name": c.name, "description": c.description, "usage": c.usage, "class": c} + for i, c in enumerate( + [ + NoOp, + CLIP, + GroundingDINO, + AgentGroundingSAM, + ZeroShotCounting, + VisualPromptCounting, + VisualQuestionAnswering, + AgentDINOv, + ExtractFrames, + Crop, + BboxStats, + SegArea, + ObjectDistance, + BboxContains, + SegIoU, + OCR, + Calculator, + ] + ) + if (hasattr(c, "name") and hasattr(c, "description") and hasattr(c, "usage")) +} + + +def register_tool(tool: Type[Tool]) -> Type[Tool]: + r"""Add a tool to the list of available tools. + + Parameters: + tool: The tool to add. + """ + + if ( + not hasattr(tool, "name") + or not hasattr(tool, "description") + or not hasattr(tool, "usage") + ): + raise ValueError( + "The tool must have 'name', 'description' and 'usage' attributes." + ) + + TOOLS[len(TOOLS)] = { + "name": tool.name, + "description": tool.description, + "usage": tool.usage, + "class": tool, + } + return tool diff --git a/vision_agent/tools/tools.py b/vision_agent/tools/tools.py index fdbc1fe2..8e202856 100644 --- a/vision_agent/tools/tools.py +++ b/vision_agent/tools/tools.py @@ -1,17 +1,18 @@ +import inspect import io +import json import logging import tempfile -from abc import ABC +from importlib import resources from pathlib import Path -from typing import Any, Dict, List, Tuple, Type, Union, cast +from typing import Any, Callable, Dict, List, Tuple, Union, cast import numpy as np +import pandas as pd import requests -from PIL import Image -from PIL.Image import Image as ImageType +from PIL import Image, ImageDraw, ImageFont from scipy.spatial import distance # type: ignore -from vision_agent.lmm import OpenAILMM from vision_agent.tools.tool_utils import _send_inference_request from vision_agent.utils import extract_frames_from_video from vision_agent.utils.image_utils import ( @@ -23,1220 +24,662 @@ rle_decode, ) +COLORS = [ + (158, 218, 229), + (219, 219, 141), + (23, 190, 207), + (188, 189, 34), + (199, 199, 199), + (247, 182, 210), + (127, 127, 127), + (227, 119, 194), + (196, 156, 148), + (197, 176, 213), + (140, 86, 75), + (148, 103, 189), + (255, 152, 150), + (152, 223, 138), + (214, 39, 40), + (44, 160, 44), + (255, 187, 120), + (174, 199, 232), + (255, 127, 14), + (31, 119, 180), +] +_API_KEY = "land_sk_WVYwP00xA3iXely2vuar6YUDZ3MJT9yLX6oW5noUkwICzYLiDV" +_OCR_URL = "https://app.landing.ai/ocr/v1/detect-text" +logging.basicConfig(level=logging.INFO) _LOGGER = logging.getLogger(__name__) -class Tool(ABC): - name: str - description: str - usage: Dict - - def __call__(self, *args: Any, **kwargs: Any) -> Any: - raise NotImplementedError +def grounding_dino( + prompt: str, + image: np.ndarray, + box_threshold: float = 0.20, + iou_threshold: float = 0.20, +) -> List[Dict[str, Any]]: + """'grounding_dino' is a tool that can detect and count objects given a text prompt + such as category names or referring expressions. It returns a list and count of + bounding boxes, label names and associated probability scores. + Parameters: + prompt (str): The prompt to ground to the image. + image (np.ndarray): The image to ground the prompt to. + box_threshold (float, optional): The threshold for the box detection. Defaults + to 0.20. + iou_threshold (float, optional): The threshold for the Intersection over Union + (IoU). Defaults to 0.20. + + Returns: + List[Dict[str, Any]]: A list of dictionaries containing the score, label, and + bounding box of the detected objects with normalized coordinates + (xmin, ymin, xmax, ymax). xmin and ymin are the coordinates of the top-left and + xmax and ymax are the coordinates of the bottom-right of the bounding box. -class NoOp(Tool): - name = "noop_" - description = "'noop_' is a no-op tool that does nothing if you do not want answer the question directly and not use a tool." - usage = { - "required_parameters": [], - "examples": [ - { - "scenario": "If you do not want to use a tool.", - "parameters": {}, - } - ], + Example + ------- + >>> grounding_dino("car. dinosaur", image) + [ + {'score': 0.99, 'label': 'dinosaur', 'bbox': [0.1, 0.11, 0.35, 0.4]}, + {'score': 0.98, 'label': 'car', 'bbox': [0.2, 0.21, 0.45, 0.5}, + ] + """ + image_size = image.shape[:2] + image_b64 = convert_to_b64(image) + request_data = { + "prompt": prompt, + "image": image_b64, + "tool": "visual_grounding", + "kwargs": {"box_threshold": box_threshold, "iou_threshold": iou_threshold}, } + data: Dict[str, Any] = _send_inference_request(request_data, "tools") + return_data = [] + for i in range(len(data["bboxes"])): + return_data.append( + { + "score": round(data["scores"][i], 2), + "label": data["labels"][i], + "bbox": normalize_bbox(data["bboxes"][i], image_size), + } + ) + return return_data - def __call__(self) -> None: - return None +def grounding_sam( + prompt: str, + image: np.ndarray, + box_threshold: float = 0.20, + iou_threshold: float = 0.20, +) -> List[Dict[str, Any]]: + """'grounding_sam' is a tool that can detect and segment objects given a text + prompt such as category names or referring expressions. It returns a list of + bounding boxes, label names and masks file names and associated probability scores. -class CLIP(Tool): - r"""CLIP is a tool that can classify or tag any image given a set of input classes - or tags. + Parameters: + prompt (str): The prompt to ground to the image. + image (np.ndarray): The image to ground the prompt to. + box_threshold (float, optional): The threshold for the box detection. Defaults + to 0.20. + iou_threshold (float, optional): The threshold for the Intersection over Union + (IoU). Defaults to 0.20. + + Returns: + List[Dict[str, Any]]: A list of dictionaries containing the score, label, + bounding box, and mask of the detected objects with normalized coordinates + (xmin, ymin, xmax, ymax). xmin and ymin are the coordinates of the top-left and + xmax and ymax are the coordinates of the bottom-right of the bounding box. + The mask is binary 2D numpy array where 1 indicates the object and 0 indicates + the background. Example ------- - >>> import vision_agent as va - >>> clip = va.tools.CLIP() - >>> clip("red line, yellow dot", "ct_scan1.jpg")) - [{"labels": ["red line", "yellow dot"], "scores": [0.98, 0.02]}] + >>> grounding_sam("car. dinosaur", image) + [ + { + 'score': 0.99, + 'label': 'dinosaur', + 'bbox': [0.1, 0.11, 0.35, 0.4], + 'mask': array([[0, 0, 0, ..., 0, 0, 0], + [0, 0, 0, ..., 0, 0, 0], + ..., + [0, 0, 0, ..., 0, 0, 0], + [0, 0, 0, ..., 0, 0, 0]], dtype=uint8), + }, + ] """ - - name = "clip_" - description = "'clip_' is a tool that can classify any image given a set of input names or tags. It returns a list of the input names along with their probability scores." - usage = { - "required_parameters": [ - {"name": "prompt", "type": "str"}, - {"name": "image", "type": "str"}, - ], - "examples": [ - { - "scenario": "Can you classify this image as a cat? Image name: cat.jpg", - "parameters": {"prompt": "cat", "image": "cat.jpg"}, - }, - { - "scenario": "Can you tag this photograph with cat or dog? Image name: cat_dog.jpg", - "parameters": {"prompt": "cat, dog", "image": "cat_dog.jpg"}, - }, - { - "scenario": "Can you build me a classifier that classifies red shirts, green shirts and other? Image name: shirts.jpg", - "parameters": { - "prompt": "red shirt, green shirt, other", - "image": "shirts.jpg", - }, - }, - ], + image_size = image.shape[:2] + image_b64 = convert_to_b64(image) + request_data = { + "prompt": prompt, + "image": image_b64, + "tool": "visual_grounding_segment", + "kwargs": {"box_threshold": box_threshold, "iou_threshold": iou_threshold}, } + data: Dict[str, Any] = _send_inference_request(request_data, "tools") + return_data = [] + for i in range(len(data["bboxes"])): + return_data.append( + { + "score": round(data["scores"][i], 2), + "label": data["labels"][i], + "bbox": normalize_bbox(data["bboxes"][i], image_size), + "mask": rle_decode(mask_rle=data["masks"][i], shape=data["mask_shape"]), + } + ) + return return_data - # TODO: Add support for input multiple images, which aligns with the output type. - def __call__(self, prompt: str, image: Union[str, ImageType]) -> Dict: - """Invoke the CLIP model. - - Parameters: - prompt: a string includes a list of classes or tags to classify the image. - image: the input image to classify. - Returns: - A list of dictionaries containing the labels and scores. Each dictionary contains the classification result for an image. E.g. [{"labels": ["red line", "yellow dot"], "scores": [0.98, 0.02]}] - """ - image_b64 = convert_to_b64(image) - data = { - "prompt": prompt, - "image": image_b64, - "tool": "closed_set_image_classification", - } - resp_data = _send_inference_request(data, "tools") - resp_data["scores"] = [round(prob, 4) for prob in resp_data["scores"]] - return resp_data +def extract_frames( + video_uri: Union[str, Path], fps: float = 0.5 +) -> List[Tuple[np.ndarray, float]]: + """'extract_frames' extracts frames from a video, returns a list of tuples (frame, + timestamp), where timestamp is the relative time in seconds where the frame was + captured. The frame is a local image file path. + Parameters: + video_uri (Union[str, Path]): The path to the video file. + fps (float, optional): The frame rate per second to extract the frames. Defaults + to 0.5. -class ImageCaption(Tool): - r"""ImageCaption is a tool that can caption an image based on its contents or tags. + Returns: + List[Tuple[np.ndarray, float]]: A list of tuples containing the extracted frame + and the timestamp in seconds. Example ------- - >>> import vision_agent as va - >>> caption = va.tools.ImageCaption() - >>> caption("image1.jpg") - {'text': ['a box of orange and white socks']} + >>> extract_frames("path/to/video.mp4") + [(frame1, 0.0), (frame2, 0.5), ...] """ - name = "image_caption_" - description = "'image_caption_' is a tool that can caption an image based on its contents or tags. It returns a text describing the image." - usage = { - "required_parameters": [ - {"name": "image", "type": "str"}, - ], - "examples": [ - { - "scenario": "Can you describe this image? Image name: cat.jpg", - "parameters": {"image": "cat.jpg"}, - }, - { - "scenario": "Can you caption this image with their main contents? Image name: cat_dog.jpg", - "parameters": {"image": "cat_dog.jpg"}, - }, - ], - } - - # TODO: Add support for input multiple images, which aligns with the output type. - def __call__(self, image: Union[str, ImageType]) -> Dict: - """Invoke the Image captioning model. + return extract_frames_from_video(str(video_uri), fps) - Parameters: - image: the input image to caption. - Returns: - A list of dictionaries containing the labels and scores. Each dictionary contains the classification result for an image. E.g. [{"labels": ["red line", "yellow dot"], "scores": [0.98, 0.02]}] - """ - image_b64 = convert_to_b64(image) - data = { - "image": image_b64, - "tool": "image_captioning", - } - return _send_inference_request(data, "tools") +def ocr(image: np.ndarray) -> List[Dict[str, Any]]: + """'ocr' extracts text from an image. It returns a list of detected text, bounding + boxes, and confidence scores. + Parameters: + image (np.ndarray): The image to extract text from. -class GroundingDINO(Tool): - r"""Grounding DINO is a tool that can detect arbitrary objects with inputs such as - category names or referring expressions. + Returns: + List[Dict[str, Any]]: A list of dictionaries containing the detected text, bbox, + and confidence score. Example ------- - >>> import vision_agent as va - >>> t = va.tools.GroundingDINO() - >>> t("red line. yellow dot", "ct_scan1.jpg") - [{'labels': ['red line', 'yellow dot'], - 'bboxes': [[0.38, 0.15, 0.59, 0.7], [0.48, 0.25, 0.69, 0.71]], - 'scores': [0.98, 0.02]}] + >>> ocr(image) + [ + {'label': 'some text', 'bbox': [0.1, 0.11, 0.35, 0.4], 'score': 0.99}, + ] """ - name = "grounding_dino_" - description = "'grounding_dino_' is a tool that can detect and count multiple objects given a text prompt such as category names or referring expressions. It returns a list and count of bounding boxes, label names and associated probability scores." - usage = { - "required_parameters": [ - {"name": "prompt", "type": "str"}, - {"name": "image", "type": "str"}, - ], - "optional_parameters": [ - {"name": "box_threshold", "type": "float", "min": 0.1, "max": 0.5}, - {"name": "iou_threshold", "type": "float", "min": 0.01, "max": 0.99}, - ], - "examples": [ - { - "scenario": "Can you detect and count the giraffes and zebras in this image? Image name: animal.jpg", - "parameters": { - "prompt": "giraffe. zebra", - "image": "person.jpg", - }, - }, - { - "scenario": "Can you build me a car detector?", - "parameters": {"prompt": "car", "image": ""}, - }, - { - "scenario": "Can you detect the person on the left and right? Image name: person.jpg", - "parameters": { - "prompt": "left person. right person", - "image": "person.jpg", - }, - }, - { - "scenario": "Detect the red shirts and green shirt. Image name: shirts.jpg", - "parameters": { - "prompt": "red shirt. green shirt", - "image": "shirts.jpg", - "box_threshold": 0.20, - "iou_threshold": 0.20, - }, - }, - ], - } + pil_image = Image.fromarray(image).convert("RGB") + image_size = pil_image.size[::-1] + image_buffer = io.BytesIO() + pil_image.save(image_buffer, format="PNG") + buffer_bytes = image_buffer.getvalue() + image_buffer.close() + + res = requests.post( + _OCR_URL, + files={"images": buffer_bytes}, + data={"language": "en"}, + headers={"contentType": "multipart/form-data", "apikey": _API_KEY}, + ) - # TODO: Add support for input multiple images, which aligns with the output type. - def __call__( - self, - prompt: str, - image: Union[str, Path, ImageType], - box_threshold: float = 0.20, - iou_threshold: float = 0.20, - ) -> Dict: - """Invoke the Grounding DINO model. - - Parameters: - prompt: one or multiple class names to detect. The classes should be separated by a period if there are multiple classes. E.g. "big dog . small cat" - image: the input image to run against. - box_threshold: the threshold to filter out the bounding boxes with low scores. - iou_threshold: the threshold for intersection over union used in nms algorithm. It will suppress the boxes which have iou greater than this threshold. - - Returns: - A dictionary containing the labels, scores, and bboxes, which is the detection result for the input image. - """ - image_size = get_image_size(image) - image_b64 = convert_to_b64(image) - request_data = { - "prompt": prompt, - "image": image_b64, - "tool": "visual_grounding", - "kwargs": {"box_threshold": box_threshold, "iou_threshold": iou_threshold}, - } - data: Dict[str, Any] = _send_inference_request(request_data, "tools") - if "bboxes" in data: - data["bboxes"] = [normalize_bbox(box, image_size) for box in data["bboxes"]] - if "scores" in data: - data["scores"] = [round(score, 2) for score in data["scores"]] - if "labels" in data: - data["labels"] = list(data["labels"]) - data["image_size"] = image_size - return data - - -class GroundingSAM(Tool): - r"""Grounding SAM is a tool that can detect and segment arbitrary objects with - inputs such as category names or referring expressions. + if res.status_code != 200: + raise ValueError(f"OCR request failed with status code {res.status_code}") + + data = res.json() + output = [] + for det in data[0]: + label = det["text"] + box = [ + det["location"][0]["x"], + det["location"][0]["y"], + det["location"][2]["x"], + det["location"][2]["y"], + ] + box = normalize_bbox(box, image_size) + output.append({"label": label, "bbox": box, "score": round(det["score"], 2)}) + + return output + + +def zero_shot_counting(image: np.ndarray) -> Dict[str, Any]: + """'zero_shot_counting' is a tool that counts the dominant foreground object given an image and no other information about the content. + It returns only the count of the objects in the image. + + Parameters: + image (np.ndarray): The image that contains lot of instances of a single object + + Returns: + Dict[str, Any]: A dictionary containing the key 'count' and the count as a value. E.g. {count: 12}. Example ------- - >>> import vision_agent as va - >>> t = va.tools.GroundingSAM() - >>> t("red line, yellow dot", "ct_scan1.jpg"]) - [{'labels': ['yellow dot', 'red line'], - 'bboxes': [[0.38, 0.15, 0.59, 0.7], [0.48, 0.25, 0.69, 0.71]], - 'masks': [array([[0, 0, 0, ..., 0, 0, 0], - [0, 0, 0, ..., 0, 0, 0], - ..., - [0, 0, 0, ..., 0, 0, 0], - [0, 0, 0, ..., 0, 0, 0]], dtype=uint8)}, - array([[0, 0, 0, ..., 0, 0, 0], - [0, 0, 0, ..., 0, 0, 0], - ..., - [1, 1, 1, ..., 1, 1, 1], - [1, 1, 1, ..., 1, 1, 1]], dtype=uint8)]}] + >>> zero_shot_counting(image) + {'count': 45}, + """ - name = "grounding_sam_" - description = "'grounding_sam_' is a tool that can detect and segment multiple objects given a text prompt such as category names or referring expressions. It returns a list of bounding boxes, label names and masks file names and associated probability scores." - usage = { - "required_parameters": [ - {"name": "prompt", "type": "str"}, - {"name": "image", "type": "str"}, - ], - "optional_parameters": [ - {"name": "box_threshold", "type": "float", "min": 0.1, "max": 0.5}, - {"name": "iou_threshold", "type": "float", "min": 0.01, "max": 0.99}, - ], - "examples": [ - { - "scenario": "Can you segment the apples and grapes in this image? Image name: fruits.jpg", - "parameters": { - "prompt": "apple. grape", - "image": "fruits.jpg", - }, - }, - { - "scenario": "Can you build me a car segmentor?", - "parameters": {"prompt": "car", "image": ""}, - }, - { - "scenario": "Can you segment the person on the left and right? Image name: person.jpg", - "parameters": { - "prompt": "left person. right person", - "image": "person.jpg", - }, - }, - { - "scenario": "Can you build me a tool that segments red shirts and green shirts? Image name: shirts.jpg", - "parameters": { - "prompt": "red shirt, green shirt", - "image": "shirts.jpg", - "box_threshold": 0.20, - "iou_threshold": 0.20, - }, - }, - ], + image_b64 = convert_to_b64(image) + data = { + "image": image_b64, + "tool": "zero_shot_counting", } + resp_data = _send_inference_request(data, "tools") + resp_data["heat_map"] = np.array(b64_to_pil(resp_data["heat_map"][0])) + return resp_data + + +def visual_prompt_counting( + image: np.ndarray, visual_prompt: Dict[str, List[float]] +) -> Dict[str, Any]: + """'visual_prompt_counting' is a tool that counts the dominant foreground object given an image and a visual prompt which is a bounding box describing the object. + It returns only the count of the objects in the image. + + Parameters: + image (np.ndarray): The image that contains lot of instances of a single object - # TODO: Add support for input multiple images, which aligns with the output type. - def __call__( - self, - prompt: str, - image: Union[str, ImageType], - box_threshold: float = 0.2, - iou_threshold: float = 0.2, - ) -> Dict: - """Invoke the Grounding SAM model. - - Parameters: - prompt: a list of classes to segment. - image: the input image to segment. - box_threshold: the threshold to filter out the bounding boxes with low scores. - iou_threshold: the threshold for intersection over union used in nms algorithm. It will suppress the boxes which have iou greater than this threshold. - - Returns: - A dictionary containing the labels, scores, bboxes and masks for the input image. - """ - image_size = get_image_size(image) - image_b64 = convert_to_b64(image) - request_data = { - "prompt": prompt, - "image": image_b64, - "tool": "visual_grounding_segment", - "kwargs": {"box_threshold": box_threshold, "iou_threshold": iou_threshold}, - } - data: Dict[str, Any] = _send_inference_request(request_data, "tools") - if "bboxes" in data: - data["bboxes"] = [normalize_bbox(box, image_size) for box in data["bboxes"]] - if "masks" in data: - data["masks"] = [ - rle_decode(mask_rle=mask, shape=data["mask_shape"]) - for mask in data["masks"] - ] - data["image_size"] = image_size - data.pop("mask_shape", None) - return data - - -class DINOv(Tool): - r"""DINOv is a tool that can detect and segment similar objects with the given input masks. + Returns: + Dict[str, Any]: A dictionary containing the key 'count' and the count as a value. E.g. {count: 12}. Example ------- - >>> import vision_agent as va - >>> t = va.tools.DINOv() - >>> t(prompt=[{"mask":"balloon_mask.jpg", "image": "balloon.jpg"}], image="balloon.jpg"]) - [{'scores': [0.512, 0.212], - 'masks': [array([[0, 0, 0, ..., 0, 0, 0], - ..., - [0, 0, 0, ..., 0, 0, 0]], dtype=uint8)}, - array([[0, 0, 0, ..., 0, 0, 0], - ..., - [1, 1, 1, ..., 1, 1, 1]], dtype=uint8)]}] + >>> visual_prompt_counting(image, {"bbox": [0.1, 0.1, 0.4, 0.42]}) + {'count': 45}, + """ - name = "dinov_" - description = "'dinov_' is a tool that can detect and segment similar objects given a reference segmentation mask." - usage = { - "required_parameters": [ - {"name": "prompt", "type": "List[Dict[str, str]]"}, - {"name": "image", "type": "str"}, - ], - "examples": [ - { - "scenario": "Can you find all the balloons in this image that is similar to the provided masked area? Image name: input.jpg Reference image: balloon.jpg Reference mask: balloon_mask.jpg", - "parameters": { - "prompt": [ - {"mask": "balloon_mask.jpg", "image": "balloon.jpg"}, - ], - "image": "input.jpg", - }, - }, - { - "scenario": "Detect all the objects in this image that are similar to the provided mask. Image name: original.jpg Reference image: mask.png Reference mask: background.png", - "parameters": { - "prompt": [ - {"mask": "mask.png", "image": "background.png"}, - ], - "image": "original.jpg", - }, - }, - ], + image_size = get_image_size(image) + bbox = visual_prompt["bbox"] + bbox_str = ", ".join(map(str, denormalize_bbox(bbox, image_size))) + image_b64 = convert_to_b64(image) + + data = { + "image": image_b64, + "prompt": bbox_str, + "tool": "few_shot_counting", } + resp_data = _send_inference_request(data, "tools") + resp_data["heat_map"] = np.array(b64_to_pil(resp_data["heat_map"][0])) + return resp_data - def __call__( - self, prompt: List[Dict[str, str]], image: Union[str, ImageType] - ) -> Dict: - """Invoke the DINOv model. - - Parameters: - prompt: a list of visual prompts in the form of {'mask': 'MASK_FILE_PATH', 'image': 'IMAGE_FILE_PATH'}. - image: the input image to segment. - - Returns: - A dictionary of the below keys: 'scores', 'masks' and 'mask_shape', which stores a list of detected segmentation masks and its scores. - """ - image_b64 = convert_to_b64(image) - for p in prompt: - p["mask"] = convert_to_b64(p["mask"]) - p["image"] = convert_to_b64(p["image"]) - request_data = { - "prompt": prompt, - "image": image_b64, - } - data: Dict[str, Any] = _send_inference_request(request_data, "dinov") - if "bboxes" in data: - data["bboxes"] = [ - normalize_bbox(box, data["mask_shape"]) for box in data["bboxes"] - ] - if "masks" in data: - data["masks"] = [ - rle_decode(mask_rle=mask, shape=data["mask_shape"]) - for mask in data["masks"] - ] - data["labels"] = ["visual prompt" for _ in range(len(data["masks"]))] - mask_shape = data.pop("mask_shape", None) - data["image_size"] = (mask_shape[0], mask_shape[1]) if mask_shape else None - return data - - -class AgentDINOv(DINOv): - def __call__( - self, - prompt: List[Dict[str, str]], - image: Union[str, ImageType], - ) -> Dict: - rets = super().__call__(prompt, image) - mask_files = [] - for mask in rets["masks"]: - with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp: - file_name = Path(tmp.name).with_suffix(".mask.png") - Image.fromarray(mask * 255).save(file_name) - mask_files.append(str(file_name)) - rets["masks"] = mask_files - return rets - - -class AgentGroundingSAM(GroundingSAM): - r"""AgentGroundingSAM is the same as GroundingSAM but it saves the masks as files - returns the file name. This makes it easier for agents to use. - """ - def __call__( - self, - prompt: str, - image: Union[str, ImageType], - box_threshold: float = 0.2, - iou_threshold: float = 0.75, - ) -> Dict: - rets = super().__call__(prompt, image, box_threshold, iou_threshold) - mask_files = [] - for mask in rets["masks"]: - with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp: - file_name = Path(tmp.name).with_suffix(".mask.png") - Image.fromarray(mask * 255).save(file_name) - mask_files.append(str(file_name)) - rets["masks"] = mask_files - return rets - - -class ZeroShotCounting(Tool): - r"""ZeroShotCounting is a tool that can count total number of instances of an object - present in an image belonging to same class without a text or visual prompt. +def image_question_answering(image: np.ndarray, prompt: str) -> str: + """'image_question_answering_' is a tool that can answer questions about the visual contents of an image given a question and an image. + It returns an answer to the question + + Parameters: + image (np.ndarray): The reference image used for the question + prompt (str): The question about the image + + Returns: + str: A string which is the answer to the given prompt. E.g. {'text': 'This image contains a cat sitting on a table with a bowl of milk.'}. Example ------- - >>> import vision_agent as va - >>> zshot_count = va.tools.ZeroShotCounting() - >>> zshot_count("image1.jpg") - {'count': 45} - """ + >>> image_question_answering(image, 'What is the cat doing ?') + 'drinking milk' - name = "zero_shot_counting_" - description = "'zero_shot_counting_' is a tool that counts foreground items given only an image and no other information. It returns only the count of the objects in the image" + """ - usage = { - "required_parameters": [ - {"name": "image", "type": "str"}, - ], - "examples": [ - { - "scenario": "Can you count the items in the image? Image name: lids.jpg", - "parameters": {"image": "lids.jpg"}, - }, - { - "scenario": "Can you count the total number of objects in this image? Image name: tray.jpg", - "parameters": {"image": "tray.jpg"}, - }, - { - "scenario": "Can you build me an object counting tool? Image name: shirts.jpg", - "parameters": { - "image": "shirts.jpg", - }, - }, - ], + image_b64 = convert_to_b64(image) + data = { + "image": image_b64, + "prompt": prompt, + "tool": "image_question_answering", } - # TODO: Add support for input multiple images, which aligns with the output type. - def __call__(self, image: Union[str, ImageType]) -> Dict: - """Invoke the Zero shot counting model. + answer = _send_inference_request(data, "tools") + return answer["text"][0] # type: ignore - Parameters: - image: the input image. - Returns: - A dictionary containing the key 'count' and the count as value. E.g. {count: 12} - """ - image_b64 = convert_to_b64(image) - data = { - "image": image_b64, - "tool": "zero_shot_counting", - } - resp_data = _send_inference_request(data, "tools") - resp_data["heat_map"] = np.array(b64_to_pil(resp_data["heat_map"][0])) - return resp_data +def clip(image: np.ndarray, classes: List[str]) -> Dict[str, Any]: + """'clip' is a tool that can classify an image given a list of input classes or tags. + It returns the same list of the input classes along with their probability scores based on image content. + Parameters: + image (np.ndarray): The image to classify or tag + classes (List[str]): The list of classes or tags that is associated with the image -class VisualPromptCounting(Tool): - r"""VisualPromptCounting is a tool that can count total number of instances of an object - present in an image belonging to same class with help of an visual prompt which is a bounding box. + Returns: + Dict[str, Any]: A dictionary containing the labels and scores. One dictionary contains a list of given labels and other a list of scores. Example ------- - >>> import vision_agent as va - >>> prompt_count = va.tools.VisualPromptCounting() - >>> prompt_count(image="image1.jpg", prompt={"bbox": [0.1, 0.1, 0.4, 0.42]}) - {'count': 23} - """ + >>> clip(image, ['dog', 'cat', 'bird']) + {"labels": ["dog", "cat", "bird"], "scores": [0.68, 0.30, 0.02]}, - name = "visual_prompt_counting_" - description = "'visual_prompt_counting_' is a tool that counts foreground items in an image given a visual prompt which is a bounding box describing the object. It returns only the count of the objects in the image." + """ - usage = { - "required_parameters": [ - {"name": "image", "type": "str"}, - {"name": "prompt", "type": "Dict[str, List[float]"}, - ], - "examples": [ - { - "scenario": "Here is an example of a lid '0.1, 0.1, 0.14, 0.2', Can you count the items in the image ? Image name: lids.jpg", - "parameters": { - "image": "lids.jpg", - "prompt": {"bbox": [0.1, 0.1, 0.14, 0.2]}, - }, - }, - { - "scenario": "Can you count the total number of objects in this image ? Image name: tray.jpg, reference_data: {'bbox': [0.1, 0.1, 0.2, 0.25]}", - "parameters": { - "image": "tray.jpg", - "prompt": {"bbox": [0.1, 0.1, 0.2, 0.25]}, - }, - }, - { - "scenario": "Can you count this item based on an example, reference_data: {'bbox': [100, 115, 200, 200]} ? Image name: shirts.jpg", - "parameters": { - "image": "shirts.jpg", - "prompt": {"bbox": [100, 115, 200, 200]}, - }, - }, - { - "scenario": "Can you build me a counting tool based on an example prompt ? Image name: shoes.jpg, reference_data: {'bbox': [0.1, 0.1, 0.6, 0.65]}", - "parameters": { - "image": "shoes.jpg", - "prompt": {"bbox": [0.1, 0.1, 0.6, 0.65]}, - }, - }, - ], + image_b64 = convert_to_b64(image) + data = { + "prompt": ",".join(classes), + "image": image_b64, + "tool": "closed_set_image_classification", } + resp_data = _send_inference_request(data, "tools") + resp_data["scores"] = [round(prob, 4) for prob in resp_data["scores"]] + return resp_data - def __call__( - self, image: Union[str, ImageType], prompt: Dict[str, List[float]] - ) -> Dict: - """Invoke the few shot counting model. - - Parameters: - image: the input image. - prompt: the visual prompt which is a bounding box describing the object. - - Returns: - A dictionary containing the key 'count' and the count as value. E.g. {count: 12} - """ - image_size = get_image_size(image) - bbox = prompt["bbox"] - bbox_str = ", ".join(map(str, denormalize_bbox(bbox, image_size))) - image_b64 = convert_to_b64(image) - data = { - "image": image_b64, - "prompt": bbox_str, - "tool": "few_shot_counting", - } - resp_data = _send_inference_request(data, "tools") - resp_data["heat_map"] = np.array(b64_to_pil(resp_data["heat_map"][0])) - return resp_data +def image_caption(image: np.ndarray) -> str: + """'image_caption' is a tool that can caption an image based on its contents. + It returns a text describing the image. + Parameters: + image (np.ndarray): The image to caption -class VisualQuestionAnswering(Tool): - r"""VisualQuestionAnswering is a tool that can explain contents of an image and answer questions about the same + Returns: + str: A string which is the caption for the given image. Example ------- - >>> import vision_agent as va - >>> vqa_tool = va.tools.VisualQuestionAnswering() - >>> vqa_tool(image="image1.jpg", prompt="describe this image in detail") - {'text': "The image contains a cat sitting on a table with a bowl of milk."} - """ + >>> image_caption(image) + 'This image contains a cat sitting on a table with a bowl of milk.' - name = "visual_question_answering_" - description = "'visual_question_answering_' is a tool that can answer basic questions about the image given a question and an image. It returns a text describing the image and the answer to the question" + """ - usage = { - "required_parameters": [ - {"name": "image", "type": "str"}, - {"name": "prompt", "type": "str"}, - ], - "examples": [ - { - "scenario": "Describe this image in detail. Image name: cat.jpg", - "parameters": { - "image": "cats.jpg", - "prompt": "Describe this image in detail", - }, - }, - { - "scenario": "Can you help me with this street sign in this image ? What does it say ? Image name: sign.jpg", - "parameters": { - "image": "sign.jpg", - "prompt": "Can you help me with this street sign ? What does it say ?", - }, - }, - { - "scenario": "Describe the weather in the image for me ? Image name: weather.jpg", - "parameters": { - "image": "weather.jpg", - "prompt": "Describe the weather in the image for me ", - }, - }, - { - "scenario": "Which 2 are the least frequent bins in this histogram ? Image name: chart.jpg", - "parameters": { - "image": "chart.jpg", - "prompt": "Which 2 are the least frequent bins in this histogram", - }, - }, - ], + image_b64 = convert_to_b64(image) + data = { + "image": image_b64, + "tool": "image_captioning", } - def __call__(self, image: str, prompt: str) -> Dict: - """Invoke the visual question answering model. + answer = _send_inference_request(data, "tools") + return answer["text"][0] # type: ignore - Parameters: - image: the input image. - Returns: - A dictionary containing the key 'text' and the answer to the prompt. E.g. {'text': 'This image contains a cat sitting on a table with a bowl of milk.'} - """ - - gpt = OpenAILMM() - return {"text": gpt(input=prompt, images=[image])} +def closest_mask_distance(mask1: np.ndarray, mask2: np.ndarray) -> float: + """'closest_mask_distance' calculates the closest distance between two masks. + Parameters: + mask1 (np.ndarray): The first mask. + mask2 (np.ndarray): The second mask. -class ImageQuestionAnswering(Tool): - r"""ImageQuestionAnswering is a tool that can explain contents of an image and answer questions about the same - It is same as VisualQuestionAnswering but this tool is not used by agents. It is used when user requests a tool for VQA using generate_image_qa_tool function. - It is also useful if the user wants the data to be not exposed to OpenAI endpoints + Returns: + float: The closest distance between the two masks. Example ------- - >>> import vision_agent as va - >>> vqa_tool = va.tools.ImageQuestionAnswering() - >>> vqa_tool(image="image1.jpg", prompt="describe this image in detail") - {'text': "The image contains a cat sitting on a table with a bowl of milk."} + >>> closest_mask_distance(mask1, mask2) + 0.5 """ - name = "image_question_answering_" - description = "'image_question_answering_' is a tool that can answer basic questions about the image given a question and an image. It returns a text describing the image and the answer to the question" - - usage = { - "required_parameters": [ - {"name": "image", "type": "str"}, - {"name": "prompt", "type": "str"}, - ], - "examples": [ - { - "scenario": "Describe this image in detail. Image name: cat.jpg", - "parameters": { - "image": "cats.jpg", - "prompt": "Describe this image in detail", - }, - }, - { - "scenario": "Can you help me with this street sign in this image ? What does it say ? Image name: sign.jpg", - "parameters": { - "image": "sign.jpg", - "prompt": "Can you help me with this street sign ? What does it say ?", - }, - }, - { - "scenario": "Describe the weather in the image for me ? Image name: weather.jpg", - "parameters": { - "image": "weather.jpg", - "prompt": "Describe the weather in the image for me ", - }, - }, - { - "scenario": "Can you generate an image question answering tool ? Image name: chart.jpg, prompt: Which 2 are the least frequent bins in this histogram", - "parameters": { - "image": "chart.jpg", - "prompt": "Which 2 are the least frequent bins in this histogram", - }, - }, - ], - } + mask1 = np.clip(mask1, 0, 1) + mask2 = np.clip(mask2, 0, 1) + mask1_points = np.transpose(np.nonzero(mask1)) + mask2_points = np.transpose(np.nonzero(mask2)) + dist_matrix = distance.cdist(mask1_points, mask2_points, "euclidean") + return cast(float, np.min(dist_matrix)) - def __call__(self, image: Union[str, ImageType], prompt: str) -> Dict: - """Invoke the visual question answering model. - Parameters: - image: the input image. +def closest_box_distance( + box1: List[float], box2: List[float], image_size: Tuple[int, int] +) -> float: + """'closest_box_distance' calculates the closest distance between two bounding boxes. - Returns: - A dictionary containing the key 'text' and the answer to the prompt. E.g. {'text': 'This image contains a cat sitting on a table with a bowl of milk.'} - """ + Parameters: + box1 (List[float]): The first bounding box. + box2 (List[float]): The second bounding box. + image_size (Tuple[int, int]): The size of the image given as (height, width). - image_b64 = convert_to_b64(image) - data = { - "image": image_b64, - "prompt": prompt, - "tool": "image_question_answering", - } + Returns: + float: The closest distance between the two bounding boxes. - return _send_inference_request(data, "tools") + Example + ------- + >>> closest_box_distance([100, 100, 200, 200], [300, 300, 400, 400]) + 141.42 + """ + x11, y11, x12, y12 = denormalize_bbox(box1, image_size) + x21, y21, x22, y22 = denormalize_bbox(box2, image_size) -class Crop(Tool): - r"""Crop crops an image given a bounding box and returns a file name of the cropped image.""" + horizontal_distance = np.max([0, x21 - x12, x11 - x22]) + vertical_distance = np.max([0, y21 - y12, y11 - y22]) + return cast(float, np.sqrt(horizontal_distance**2 + vertical_distance**2)) - name = "crop_" - description = "'crop_' crops an image given a bounding box and returns a file name of the cropped image. It returns a file with the cropped image." - usage = { - "required_parameters": [ - {"name": "bbox", "type": "List[float]"}, - {"name": "image", "type": "str"}, - ], - "examples": [ - { - "scenario": "Can you crop the image to the bounding box [0.1, 0.1, 0.9, 0.9]? Image name: image.jpg", - "parameters": {"bbox": [0.1, 0.1, 0.9, 0.9], "image": "image.jpg"}, - }, - { - "scenario": "Cut out the image to the bounding box [0.2, 0.2, 0.8, 0.8]. Image name: car.jpg", - "parameters": {"bbox": [0.2, 0.2, 0.8, 0.8], "image": "car.jpg"}, - }, - ], - } - def __call__(self, bbox: List[float], image: Union[str, Path]) -> Dict: - pil_image = Image.open(image) - width, height = pil_image.size - bbox = [ - int(bbox[0] * width), - int(bbox[1] * height), - int(bbox[2] * width), - int(bbox[3] * height), - ] - cropped_image = pil_image.crop(bbox) # type: ignore - with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp: - cropped_image.save(tmp.name) +# Utility and visualization functions - return {"image": tmp.name} +def save_json(data: Any, file_path: str) -> None: + """'save_json' is a utility function that saves data as a JSON file. It is helpful + for saving data that contains NumPy arrays which are not JSON serializable. -class BboxStats(Tool): - r"""BboxStats returns the height, width and area of the bounding box in pixels to 2 decimal places.""" + Parameters: + data (Any): The data to save. + file_path (str): The path to save the JSON file. - name = "bbox_stats_" - description = "'bbox_stats_' returns the height, width and area of the given bounding box in pixels to 2 decimal places." - usage = { - "required_parameters": [ - {"name": "bboxes", "type": "List[int]"}, - {"name": "image_size", "type": "Tuple[int]"}, - ], - "examples": [ - { - "scenario": "Calculate the width and height of the bounding box [0.2, 0.21, 0.34, 0.42]", - "parameters": { - "bboxes": [[0.2, 0.21, 0.34, 0.42]], - "image_size": (500, 1200), - }, - }, - { - "scenario": "Calculate the area of the bounding box [0.2, 0.21, 0.34, 0.42]", - "parameters": { - "bboxes": [[0.2, 0.21, 0.34, 0.42]], - "image_size": (640, 480), - }, - }, - ], - } + Example + ------- + >>> save_json(data, "path/to/file.json") + """ - def __call__( - self, bboxes: List[List[int]], image_size: Tuple[int, int] - ) -> List[Dict]: - areas = [] - height, width = image_size - for bbox in bboxes: - x1, y1, x2, y2 = bbox - areas.append( - { - "width": round((x2 - x1) * width, 2), - "height": round((y2 - y1) * height, 2), - "area": round((x2 - x1) * (y2 - y1) * width * height, 2), - } - ) - - return areas - - -class SegArea(Tool): - r"""SegArea returns the area of the segmentation mask in pixels normalized to 2 decimal places.""" - - name = "seg_area_" - description = "'seg_area_' returns the area of the given segmentation mask in pixels normalized to 2 decimal places." - usage = { - "required_parameters": [{"name": "masks", "type": "str"}], - "examples": [ - { - "scenario": "If you want to calculate the area of the segmentation mask, pass the masks file name.", - "parameters": {"masks": "mask_file.jpg"}, - }, - ], - } + class NumpyEncoder(json.JSONEncoder): + def default(self, obj: Any): # type: ignore + if isinstance(obj, np.ndarray): + return obj.tolist() + elif isinstance(obj, np.bool_): + return bool(obj) + return json.JSONEncoder.default(self, obj) - def __call__(self, masks: Union[str, Path]) -> float: - pil_mask = Image.open(str(masks)) - np_mask = np.array(pil_mask) - np_mask = np.clip(np_mask, 0, 1) - return cast(float, round(np.sum(np_mask), 2)) - - -class BboxIoU(Tool): - name = "bbox_iou_" - description = "'bbox_iou_' returns the intersection over union of two bounding boxes. This is a good tool for determining if two objects are overlapping." - usage = { - "required_parameters": [ - {"name": "bbox1", "type": "List[int]"}, - {"name": "bbox2", "type": "List[int]"}, - ], - "examples": [ - { - "scenario": "If you want to calculate the intersection over union of the bounding boxes [0.2, 0.21, 0.34, 0.42] and [0.3, 0.31, 0.44, 0.52]", - "parameters": { - "bbox1": [0.2, 0.21, 0.34, 0.42], - "bbox2": [0.3, 0.31, 0.44, 0.52], - }, - } - ], - } + with open(file_path, "w") as f: + json.dump(data, f, cls=NumpyEncoder) - def __call__(self, bbox1: List[int], bbox2: List[int]) -> float: - x1, y1, x2, y2 = bbox1 - x3, y3, x4, y4 = bbox2 - xA = max(x1, x3) - yA = max(y1, y3) - xB = min(x2, x4) - yB = min(y2, y4) - inter_area = max(0, xB - xA) * max(0, yB - yA) - boxa_area = (x2 - x1) * (y2 - y1) - boxb_area = (x4 - x3) * (y4 - y3) - iou = inter_area / float(boxa_area + boxb_area - inter_area) - return round(iou, 2) - - -class SegIoU(Tool): - name = "seg_iou_" - description = "'seg_iou_' returns the intersection over union of two segmentation masks given their segmentation mask files." - usage = { - "required_parameters": [ - {"name": "mask1", "type": "str"}, - {"name": "mask2", "type": "str"}, - ], - "examples": [ - { - "scenario": "Calculate the intersection over union of the segmentation masks for mask_file1.jpg and mask_file2.jpg", - "parameters": {"mask1": "mask_file1.png", "mask2": "mask_file2.png"}, - } - ], - } - def __call__(self, mask1: Union[str, Path], mask2: Union[str, Path]) -> float: - pil_mask1 = Image.open(str(mask1)) - pil_mask2 = Image.open(str(mask2)) - np_mask1 = np.clip(np.array(pil_mask1), 0, 1) - np_mask2 = np.clip(np.array(pil_mask2), 0, 1) - intersection = np.logical_and(np_mask1, np_mask2) - union = np.logical_or(np_mask1, np_mask2) - iou = np.sum(intersection) / np.sum(union) - return cast(float, round(iou, 2)) - - -class BboxContains(Tool): - name = "bbox_contains_" - description = "Given two bounding boxes, a target bounding box and a region bounding box, 'bbox_contains_' returns the intersection of the two bounding boxes which is the percentage area of the target bounding box overlaps with the region bounding box. This is a good tool for determining if the region object contains the target object." - usage = { - "required_parameters": [ - {"name": "target", "type": "List[int]"}, - {"name": "target_class", "type": "str"}, - {"name": "region", "type": "List[int]"}, - {"name": "region_class", "type": "str"}, - ], - "examples": [ - { - "scenario": "Determine if the dog on the couch, bounding box of the dog: [0.2, 0.21, 0.34, 0.42], bounding box of the couch: [0.3, 0.31, 0.44, 0.52]", - "parameters": { - "target": [0.2, 0.21, 0.34, 0.42], - "target_class": "dog", - "region": [0.3, 0.31, 0.44, 0.52], - "region_class": "couch", - }, - }, - { - "scenario": "Check if the kid is in the pool? bounding box of the kid: [0.2, 0.21, 0.34, 0.42], bounding box of the pool: [0.3, 0.31, 0.44, 0.52]", - "parameters": { - "target": [0.2, 0.21, 0.34, 0.42], - "target_class": "kid", - "region": [0.3, 0.31, 0.44, 0.52], - "region_class": "pool", - }, - }, - ], - } +def load_image(image_path: str) -> np.ndarray: + """'load_image' is a utility function that loads an image from the given path. - def __call__( - self, target: List[int], target_class: str, region: List[int], region_class: str - ) -> Dict[str, Union[str, float]]: - x1, y1, x2, y2 = target - x3, y3, x4, y4 = region - xA = max(x1, x3) - yA = max(y1, y3) - xB = min(x2, x4) - yB = min(y2, y4) - inter_area = max(0, xB - xA) * max(0, yB - yA) - boxa_area = (x2 - x1) * (y2 - y1) - iou = inter_area / float(boxa_area) - area = round(iou, 2) - return { - "target_class": target_class, - "region_class": region_class, - "intersection": area, - } - - -class ObjectDistance(Tool): - name = "object_distance_" - description = "'object_distance_' calculates the distance between two objects in an image. It returns the minimum distance between the two objects." - usage = { - "required_parameters": [ - {"name": "object1", "type": "Dict[str, Any]"}, - {"name": "object2", "type": "Dict[str, Any]"}, - ], - "examples": [ - { - "scenario": "Calculate the distance between these two objects {bboxes: [0.2, 0.21, 0.34, 0.42], masks: 'mask_file1.png'}, {bboxes: [0.3, 0.31, 0.44, 0.52], masks: 'mask_file2.png'}", - "parameters": { - "object1": { - "bboxes": [0.2, 0.21, 0.34, 0.42], - "scores": 0.54, - "masks": "mask_file1.png", - }, - "object2": { - "bboxes": [0.3, 0.31, 0.44, 0.52], - "scores": 0.66, - "masks": "mask_file2.png", - }, - }, - } - ], - } + Parameters: + image_path (str): The path to the image. - def __call__(self, object1: Dict[str, Any], object2: Dict[str, Any]) -> float: - if "masks" in object1 and "masks" in object2: - mask1 = object1["masks"] - mask2 = object2["masks"] - return MaskDistance()(mask1, mask2) - elif "bboxes" in object1 and "bboxes" in object2: - bbox1 = object1["bboxes"] - bbox2 = object2["bboxes"] - return BoxDistance()(bbox1, bbox2) - else: - raise ValueError("Either of the objects should have masks or bboxes") - - -class BoxDistance(Tool): - name = "box_distance_" - description = "'box_distance_' calculates distance between two bounding boxes. It returns the minumum distance between the given bounding boxes" - usage = { - "required_parameters": [ - {"name": "bbox1", "type": "List[int]"}, - {"name": "bbox2", "type": "List[int]"}, - ], - "examples": [ - { - "scenario": "Calculate the distance between these two bounding boxes [0.2, 0.21, 0.34, 0.42] and [0.3, 0.31, 0.44, 0.52]", - "parameters": { - "bbox1": [0.2, 0.21, 0.34, 0.42], - "bbox2": [0.3, 0.31, 0.44, 0.52], - }, - } - ], - } + Returns: + np.ndarray: The image as a NumPy array. - def __call__(self, bbox1: List[int], bbox2: List[int]) -> float: - x11, y11, x12, y12 = bbox1 - x21, y21, x22, y22 = bbox2 + Example + ------- + >>> load_image("path/to/image.jpg") + """ - horizontal_dist = np.max([0, x21 - x12, x11 - x22]) - vertical_dist = np.max([0, y21 - y12, y11 - y22]) + image = Image.open(image_path).convert("RGB") + return np.array(image) - return cast(float, round(np.sqrt(horizontal_dist**2 + vertical_dist**2), 2)) +def save_image(image: np.ndarray) -> str: + """'save_image' is a utility function that saves an image as a temporary file. -class MaskDistance(Tool): - name = "mask_distance_" - description = "'mask_distance_' calculates distance between two masks. It is helpful in checking proximity of two objects. It returns the minumum distance between the given masks" - usage = { - "required_parameters": [ - {"name": "mask1", "type": "str"}, - {"name": "mask2", "type": "str"}, - ], - "examples": [ - { - "scenario": "Calculate the distance between the segmentation masks for mask_file1.jpg and mask_file2.jpg", - "parameters": {"mask1": "mask_file1.png", "mask2": "mask_file2.png"}, - } - ], - } + Parameters: + image (np.ndarray): The image to save. - def __call__(self, mask1: Union[str, Path], mask2: Union[str, Path]) -> float: - pil_mask1 = Image.open(str(mask1)) - pil_mask2 = Image.open(str(mask2)) - np_mask1 = np.clip(np.array(pil_mask1), 0, 1) - np_mask2 = np.clip(np.array(pil_mask2), 0, 1) + Returns: + str: The path to the saved image. - mask1_points = np.transpose(np.nonzero(np_mask1)) - mask2_points = np.transpose(np.nonzero(np_mask2)) - dist_matrix = distance.cdist(mask1_points, mask2_points, "euclidean") - return cast(float, np.round(np.min(dist_matrix), 2)) + Example + ------- + >>> save_image(image) + "/tmp/tmpabc123.png" + """ + with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as f: + pil_image = Image.fromarray(image.astype(np.uint8)) + pil_image.save(f, "PNG") + return f.name -class ExtractFrames(Tool): - r"""Extract frames from a video.""" - name = "extract_frames_" - description = "'extract_frames_' extracts frames from a video every 2 seconds, returns a list of tuples (frame, timestamp), where timestamp is the relative time in seconds where the frame was captured. The frame is a local image file path." - usage = { - "required_parameters": [{"name": "video_uri", "type": "str"}], - "optional_parameters": [{"name": "frames_every", "type": "float"}], - "examples": [ - { - "scenario": "Can you extract the frames from this video? Video: www.foobar.com/video?name=test.mp4", - "parameters": {"video_uri": "www.foobar.com/video?name=test.mp4"}, - }, - { - "scenario": "Can you extract the images from this video file at every 2 seconds ? Video path: tests/data/test.mp4", - "parameters": {"video_uri": "tests/data/test.mp4", "frames_every": 2}, - }, - ], - } +def overlay_bounding_boxes( + image: np.ndarray, bboxes: List[Dict[str, Any]] +) -> np.ndarray: + """'display_bounding_boxes' is a utility function that displays bounding boxes on + an image. - def __call__( - self, video_uri: str, frames_every: float = 2 - ) -> List[Tuple[str, float]]: - """Extract frames from a video. + Parameters: + image (np.ndarray): The image to display the bounding boxes on. + bboxes (List[Dict[str, Any]]): A list of dictionaries containing the bounding + boxes. + Returns: + np.ndarray: The image with the bounding boxes, labels and scores displayed. - Parameters: - video_uri: the path to the video file or a url points to the video data + Example + ------- + >>> image_with_bboxes = display_bounding_boxes( + image, [{'score': 0.99, 'label': 'dinosaur', 'bbox': [0.1, 0.11, 0.35, 0.4]}], + ) + """ + pil_image = Image.fromarray(image.astype(np.uint8)) - Returns: - a list of tuples containing the extracted frame and the timestamp in seconds. E.g. [(path_to_frame1, 0.0), (path_to_frame2, 0.5), ...]. The timestamp is the time in seconds from the start of the video. E.g. 12.125 means 12.125 seconds from the start of the video. The frames are sorted by the timestamp in ascending order. - """ - frames = extract_frames_from_video(video_uri, fps=round(1 / frames_every, 2)) - result = [] - _LOGGER.info( - f"Extracted {len(frames)} frames from video {video_uri}. Temporarily saving them as images to disk for downstream tasks." + if len(set([box["label"] for box in bboxes])) > len(COLORS): + _LOGGER.warning( + "Number of unique labels exceeds the number of available colors. Some labels may have the same color." ) - for frame, ts in frames: - with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp: - file_name = Path(tmp.name).with_suffix(".frame.png") - Image.fromarray(frame).save(file_name) - result.append((str(file_name), ts)) - return result - - -class OCR(Tool): - name = "ocr_" - description = "'ocr_' extracts text from an image. It returns a list of detected text, bounding boxes, and confidence scores." - usage = { - "required_parameters": [ - {"name": "image", "type": "str"}, - ], - "examples": [ - { - "scenario": "Can you extract the text from this image? Image name: image.png", - "parameters": {"image": "image.png"}, - }, - ], + + color = { + label: COLORS[i % len(COLORS)] + for i, label in enumerate(set([box["label"] for box in bboxes])) } - _API_KEY = "land_sk_WVYwP00xA3iXely2vuar6YUDZ3MJT9yLX6oW5noUkwICzYLiDV" - _URL = "https://app.landing.ai/ocr/v1/detect-text" - - def __call__(self, image: str) -> dict: - pil_image = Image.open(image).convert("RGB") - image_size = pil_image.size[::-1] - image_buffer = io.BytesIO() - pil_image.save(image_buffer, format="PNG") - buffer_bytes = image_buffer.getvalue() - image_buffer.close() - - res = requests.post( - self._URL, - files={"images": buffer_bytes}, - data={"language": "en"}, - headers={"contentType": "multipart/form-data", "apikey": self._API_KEY}, - ) - if res.status_code != 200: - _LOGGER.error(f"Request failed: {res.text}") - raise ValueError(f"Request failed: {res.text}") - - data = res.json() - output: Dict[str, List] = {"labels": [], "bboxes": [], "scores": []} - for det in data[0]: - output["labels"].append(det["text"]) - box = [ - det["location"][0]["x"], - det["location"][0]["y"], - det["location"][2]["x"], - det["location"][2]["y"], - ] - box = normalize_bbox(box, image_size) - output["bboxes"].append(box) - output["scores"].append(round(det["score"], 2)) - return output - - -class Calculator(Tool): - r"""Calculator is a tool that can perform basic arithmetic operations.""" - - name = "calculator_" - description = ( - "'calculator_' is a tool that can perform basic arithmetic operations." + + width, height = pil_image.size + fontsize = max(12, int(min(width, height) / 40)) + draw = ImageDraw.Draw(pil_image) + font = ImageFont.truetype( + str(resources.files("vision_agent.fonts").joinpath("default_font_ch_en.ttf")), + fontsize, ) - usage = { - "required_parameters": [{"name": "equation", "type": "str"}], - "examples": [ - { - "scenario": "If you want to calculate (2 * 3) + 4", - "parameters": {"equation": "2 + 4"}, - }, - { - "scenario": "If you want to calculate (4 + 2.5) / 2.1", - "parameters": {"equation": "(4 + 2.5) / 2.1"}, - }, - ], - } - def __call__(self, equation: str) -> float: - return cast(float, round(eval(equation), 2)) - - -TOOLS = { - i: {"name": c.name, "description": c.description, "usage": c.usage, "class": c} - for i, c in enumerate( - [ - NoOp, - CLIP, - GroundingDINO, - AgentGroundingSAM, - ZeroShotCounting, - VisualPromptCounting, - VisualQuestionAnswering, - AgentDINOv, - ExtractFrames, - Crop, - BboxStats, - SegArea, - ObjectDistance, - BboxContains, - SegIoU, - OCR, - Calculator, + for elt in bboxes: + label = elt["label"] + box = elt["bbox"] + scores = elt["score"] + + box = [ + int(box[0] * width), + int(box[1] * height), + int(box[2] * width), + int(box[3] * height), ] - ) - if (hasattr(c, "name") and hasattr(c, "description") and hasattr(c, "usage")) -} + draw.rectangle(box, outline=color[label], width=4) + text = f"{label}: {scores:.2f}" + text_box = draw.textbbox((box[0], box[1]), text=text, font=font) + draw.rectangle((box[0], box[1], text_box[2], text_box[3]), fill=color[label]) + draw.text((box[0], box[1]), text, fill="black", font=font) + return np.array(pil_image.convert("RGB")) -def register_tool(tool: Type[Tool]) -> Type[Tool]: - r"""Add a tool to the list of available tools. +def overlay_segmentation_masks( + image: np.ndarray, masks: List[Dict[str, Any]] +) -> np.ndarray: + """'display_segmentation_masks' is a utility function that displays segmentation + masks. Parameters: - tool: The tool to add. + image (np.ndarray): The image to display the masks on. + masks (List[Dict[str, Any]]): A list of dictionaries containing the masks. + + Returns: + np.ndarray: The image with the masks displayed. + + Example + ------- + >>> image_with_masks = display_segmentation_masks( + image, + [{ + 'score': 0.99, + 'label': 'dinosaur', + 'mask': array([[0, 0, 0, ..., 0, 0, 0], + [0, 0, 0, ..., 0, 0, 0], + ..., + [0, 0, 0, ..., 0, 0, 0], + [0, 0, 0, ..., 0, 0, 0]], dtype=uint8), + }], + ) """ + pil_image = Image.fromarray(image.astype(np.uint8)).convert("RGBA") - if ( - not hasattr(tool, "name") - or not hasattr(tool, "description") - or not hasattr(tool, "usage") - ): - raise ValueError( - "The tool must have 'name', 'description' and 'usage' attributes." + if len(set([mask["label"] for mask in masks])) > len(COLORS): + _LOGGER.warning( + "Number of unique labels exceeds the number of available colors. Some labels may have the same color." ) - TOOLS[len(TOOLS)] = { - "name": tool.name, - "description": tool.description, - "usage": tool.usage, - "class": tool, + color = { + label: COLORS[i % len(COLORS)] + for i, label in enumerate(set([mask["label"] for mask in masks])) } - return tool + + for elt in masks: + mask = elt["mask"] + label = elt["label"] + np_mask = np.zeros((pil_image.size[1], pil_image.size[0], 4)) + np_mask[mask > 0, :] = color[label] + (255 * 0.5,) + mask_img = Image.fromarray(np_mask.astype(np.uint8)) + pil_image = Image.alpha_composite(pil_image, mask_img) + return np.array(pil_image.convert("RGB")) + + +def get_tool_documentation(funcs: List[Callable[..., Any]]) -> str: + docstrings = "" + for func in funcs: + docstrings += f"{func.__name__}{inspect.signature(func)}:\n{func.__doc__}\n\n" + + return docstrings + + +def get_tool_descriptions(funcs: List[Callable[..., Any]]) -> str: + descriptions = "" + for func in funcs: + description = func.__doc__ + if description is None: + description = "" + + description = ( + description[: description.find("Parameters:")].replace("\n", " ").strip() + ) + description = " ".join(description.split()) + descriptions += f"- {func.__name__}{inspect.signature(func)}: {description}\n" + return descriptions + + +def get_tools_df(funcs: List[Callable[..., Any]]) -> pd.DataFrame: + data: Dict[str, List[str]] = {"desc": [], "doc": []} + + for func in funcs: + desc = func.__doc__ + if desc is None: + desc = "" + desc = desc[: desc.find("Parameters:")].replace("\n", " ").strip() + desc = " ".join(desc.split()) + + doc = f"{func.__name__}{inspect.signature(func)}:\n{func.__doc__}" + data["desc"].append(desc) + data["doc"].append(doc) + + return pd.DataFrame(data) # type: ignore + + +TOOLS = [ + grounding_dino, + grounding_sam, + extract_frames, + ocr, + clip, + zero_shot_counting, + visual_prompt_counting, + image_question_answering, + image_caption, + closest_mask_distance, + closest_box_distance, + save_json, + load_image, + save_image, + overlay_bounding_boxes, + overlay_segmentation_masks, +] +TOOLS_DF = get_tools_df(TOOLS) # type: ignore +TOOL_DESCRIPTIONS = get_tool_descriptions(TOOLS) # type: ignore +TOOL_DOCSTRING = get_tool_documentation(TOOLS) # type: ignore +UTILITIES_DOCSTRING = get_tool_documentation( + [save_json, load_image, save_image, overlay_bounding_boxes] +) diff --git a/vision_agent/tools/tools_v2.py b/vision_agent/tools/tools_v2.py deleted file mode 100644 index 8e202856..00000000 --- a/vision_agent/tools/tools_v2.py +++ /dev/null @@ -1,685 +0,0 @@ -import inspect -import io -import json -import logging -import tempfile -from importlib import resources -from pathlib import Path -from typing import Any, Callable, Dict, List, Tuple, Union, cast - -import numpy as np -import pandas as pd -import requests -from PIL import Image, ImageDraw, ImageFont -from scipy.spatial import distance # type: ignore - -from vision_agent.tools.tool_utils import _send_inference_request -from vision_agent.utils import extract_frames_from_video -from vision_agent.utils.image_utils import ( - b64_to_pil, - convert_to_b64, - denormalize_bbox, - get_image_size, - normalize_bbox, - rle_decode, -) - -COLORS = [ - (158, 218, 229), - (219, 219, 141), - (23, 190, 207), - (188, 189, 34), - (199, 199, 199), - (247, 182, 210), - (127, 127, 127), - (227, 119, 194), - (196, 156, 148), - (197, 176, 213), - (140, 86, 75), - (148, 103, 189), - (255, 152, 150), - (152, 223, 138), - (214, 39, 40), - (44, 160, 44), - (255, 187, 120), - (174, 199, 232), - (255, 127, 14), - (31, 119, 180), -] -_API_KEY = "land_sk_WVYwP00xA3iXely2vuar6YUDZ3MJT9yLX6oW5noUkwICzYLiDV" -_OCR_URL = "https://app.landing.ai/ocr/v1/detect-text" -logging.basicConfig(level=logging.INFO) -_LOGGER = logging.getLogger(__name__) - - -def grounding_dino( - prompt: str, - image: np.ndarray, - box_threshold: float = 0.20, - iou_threshold: float = 0.20, -) -> List[Dict[str, Any]]: - """'grounding_dino' is a tool that can detect and count objects given a text prompt - such as category names or referring expressions. It returns a list and count of - bounding boxes, label names and associated probability scores. - - Parameters: - prompt (str): The prompt to ground to the image. - image (np.ndarray): The image to ground the prompt to. - box_threshold (float, optional): The threshold for the box detection. Defaults - to 0.20. - iou_threshold (float, optional): The threshold for the Intersection over Union - (IoU). Defaults to 0.20. - - Returns: - List[Dict[str, Any]]: A list of dictionaries containing the score, label, and - bounding box of the detected objects with normalized coordinates - (xmin, ymin, xmax, ymax). xmin and ymin are the coordinates of the top-left and - xmax and ymax are the coordinates of the bottom-right of the bounding box. - - Example - ------- - >>> grounding_dino("car. dinosaur", image) - [ - {'score': 0.99, 'label': 'dinosaur', 'bbox': [0.1, 0.11, 0.35, 0.4]}, - {'score': 0.98, 'label': 'car', 'bbox': [0.2, 0.21, 0.45, 0.5}, - ] - """ - image_size = image.shape[:2] - image_b64 = convert_to_b64(image) - request_data = { - "prompt": prompt, - "image": image_b64, - "tool": "visual_grounding", - "kwargs": {"box_threshold": box_threshold, "iou_threshold": iou_threshold}, - } - data: Dict[str, Any] = _send_inference_request(request_data, "tools") - return_data = [] - for i in range(len(data["bboxes"])): - return_data.append( - { - "score": round(data["scores"][i], 2), - "label": data["labels"][i], - "bbox": normalize_bbox(data["bboxes"][i], image_size), - } - ) - return return_data - - -def grounding_sam( - prompt: str, - image: np.ndarray, - box_threshold: float = 0.20, - iou_threshold: float = 0.20, -) -> List[Dict[str, Any]]: - """'grounding_sam' is a tool that can detect and segment objects given a text - prompt such as category names or referring expressions. It returns a list of - bounding boxes, label names and masks file names and associated probability scores. - - Parameters: - prompt (str): The prompt to ground to the image. - image (np.ndarray): The image to ground the prompt to. - box_threshold (float, optional): The threshold for the box detection. Defaults - to 0.20. - iou_threshold (float, optional): The threshold for the Intersection over Union - (IoU). Defaults to 0.20. - - Returns: - List[Dict[str, Any]]: A list of dictionaries containing the score, label, - bounding box, and mask of the detected objects with normalized coordinates - (xmin, ymin, xmax, ymax). xmin and ymin are the coordinates of the top-left and - xmax and ymax are the coordinates of the bottom-right of the bounding box. - The mask is binary 2D numpy array where 1 indicates the object and 0 indicates - the background. - - Example - ------- - >>> grounding_sam("car. dinosaur", image) - [ - { - 'score': 0.99, - 'label': 'dinosaur', - 'bbox': [0.1, 0.11, 0.35, 0.4], - 'mask': array([[0, 0, 0, ..., 0, 0, 0], - [0, 0, 0, ..., 0, 0, 0], - ..., - [0, 0, 0, ..., 0, 0, 0], - [0, 0, 0, ..., 0, 0, 0]], dtype=uint8), - }, - ] - """ - image_size = image.shape[:2] - image_b64 = convert_to_b64(image) - request_data = { - "prompt": prompt, - "image": image_b64, - "tool": "visual_grounding_segment", - "kwargs": {"box_threshold": box_threshold, "iou_threshold": iou_threshold}, - } - data: Dict[str, Any] = _send_inference_request(request_data, "tools") - return_data = [] - for i in range(len(data["bboxes"])): - return_data.append( - { - "score": round(data["scores"][i], 2), - "label": data["labels"][i], - "bbox": normalize_bbox(data["bboxes"][i], image_size), - "mask": rle_decode(mask_rle=data["masks"][i], shape=data["mask_shape"]), - } - ) - return return_data - - -def extract_frames( - video_uri: Union[str, Path], fps: float = 0.5 -) -> List[Tuple[np.ndarray, float]]: - """'extract_frames' extracts frames from a video, returns a list of tuples (frame, - timestamp), where timestamp is the relative time in seconds where the frame was - captured. The frame is a local image file path. - - Parameters: - video_uri (Union[str, Path]): The path to the video file. - fps (float, optional): The frame rate per second to extract the frames. Defaults - to 0.5. - - Returns: - List[Tuple[np.ndarray, float]]: A list of tuples containing the extracted frame - and the timestamp in seconds. - - Example - ------- - >>> extract_frames("path/to/video.mp4") - [(frame1, 0.0), (frame2, 0.5), ...] - """ - - return extract_frames_from_video(str(video_uri), fps) - - -def ocr(image: np.ndarray) -> List[Dict[str, Any]]: - """'ocr' extracts text from an image. It returns a list of detected text, bounding - boxes, and confidence scores. - - Parameters: - image (np.ndarray): The image to extract text from. - - Returns: - List[Dict[str, Any]]: A list of dictionaries containing the detected text, bbox, - and confidence score. - - Example - ------- - >>> ocr(image) - [ - {'label': 'some text', 'bbox': [0.1, 0.11, 0.35, 0.4], 'score': 0.99}, - ] - """ - - pil_image = Image.fromarray(image).convert("RGB") - image_size = pil_image.size[::-1] - image_buffer = io.BytesIO() - pil_image.save(image_buffer, format="PNG") - buffer_bytes = image_buffer.getvalue() - image_buffer.close() - - res = requests.post( - _OCR_URL, - files={"images": buffer_bytes}, - data={"language": "en"}, - headers={"contentType": "multipart/form-data", "apikey": _API_KEY}, - ) - - if res.status_code != 200: - raise ValueError(f"OCR request failed with status code {res.status_code}") - - data = res.json() - output = [] - for det in data[0]: - label = det["text"] - box = [ - det["location"][0]["x"], - det["location"][0]["y"], - det["location"][2]["x"], - det["location"][2]["y"], - ] - box = normalize_bbox(box, image_size) - output.append({"label": label, "bbox": box, "score": round(det["score"], 2)}) - - return output - - -def zero_shot_counting(image: np.ndarray) -> Dict[str, Any]: - """'zero_shot_counting' is a tool that counts the dominant foreground object given an image and no other information about the content. - It returns only the count of the objects in the image. - - Parameters: - image (np.ndarray): The image that contains lot of instances of a single object - - Returns: - Dict[str, Any]: A dictionary containing the key 'count' and the count as a value. E.g. {count: 12}. - - Example - ------- - >>> zero_shot_counting(image) - {'count': 45}, - - """ - - image_b64 = convert_to_b64(image) - data = { - "image": image_b64, - "tool": "zero_shot_counting", - } - resp_data = _send_inference_request(data, "tools") - resp_data["heat_map"] = np.array(b64_to_pil(resp_data["heat_map"][0])) - return resp_data - - -def visual_prompt_counting( - image: np.ndarray, visual_prompt: Dict[str, List[float]] -) -> Dict[str, Any]: - """'visual_prompt_counting' is a tool that counts the dominant foreground object given an image and a visual prompt which is a bounding box describing the object. - It returns only the count of the objects in the image. - - Parameters: - image (np.ndarray): The image that contains lot of instances of a single object - - Returns: - Dict[str, Any]: A dictionary containing the key 'count' and the count as a value. E.g. {count: 12}. - - Example - ------- - >>> visual_prompt_counting(image, {"bbox": [0.1, 0.1, 0.4, 0.42]}) - {'count': 45}, - - """ - - image_size = get_image_size(image) - bbox = visual_prompt["bbox"] - bbox_str = ", ".join(map(str, denormalize_bbox(bbox, image_size))) - image_b64 = convert_to_b64(image) - - data = { - "image": image_b64, - "prompt": bbox_str, - "tool": "few_shot_counting", - } - resp_data = _send_inference_request(data, "tools") - resp_data["heat_map"] = np.array(b64_to_pil(resp_data["heat_map"][0])) - return resp_data - - -def image_question_answering(image: np.ndarray, prompt: str) -> str: - """'image_question_answering_' is a tool that can answer questions about the visual contents of an image given a question and an image. - It returns an answer to the question - - Parameters: - image (np.ndarray): The reference image used for the question - prompt (str): The question about the image - - Returns: - str: A string which is the answer to the given prompt. E.g. {'text': 'This image contains a cat sitting on a table with a bowl of milk.'}. - - Example - ------- - >>> image_question_answering(image, 'What is the cat doing ?') - 'drinking milk' - - """ - - image_b64 = convert_to_b64(image) - data = { - "image": image_b64, - "prompt": prompt, - "tool": "image_question_answering", - } - - answer = _send_inference_request(data, "tools") - return answer["text"][0] # type: ignore - - -def clip(image: np.ndarray, classes: List[str]) -> Dict[str, Any]: - """'clip' is a tool that can classify an image given a list of input classes or tags. - It returns the same list of the input classes along with their probability scores based on image content. - - Parameters: - image (np.ndarray): The image to classify or tag - classes (List[str]): The list of classes or tags that is associated with the image - - Returns: - Dict[str, Any]: A dictionary containing the labels and scores. One dictionary contains a list of given labels and other a list of scores. - - Example - ------- - >>> clip(image, ['dog', 'cat', 'bird']) - {"labels": ["dog", "cat", "bird"], "scores": [0.68, 0.30, 0.02]}, - - """ - - image_b64 = convert_to_b64(image) - data = { - "prompt": ",".join(classes), - "image": image_b64, - "tool": "closed_set_image_classification", - } - resp_data = _send_inference_request(data, "tools") - resp_data["scores"] = [round(prob, 4) for prob in resp_data["scores"]] - return resp_data - - -def image_caption(image: np.ndarray) -> str: - """'image_caption' is a tool that can caption an image based on its contents. - It returns a text describing the image. - - Parameters: - image (np.ndarray): The image to caption - - Returns: - str: A string which is the caption for the given image. - - Example - ------- - >>> image_caption(image) - 'This image contains a cat sitting on a table with a bowl of milk.' - - """ - - image_b64 = convert_to_b64(image) - data = { - "image": image_b64, - "tool": "image_captioning", - } - - answer = _send_inference_request(data, "tools") - return answer["text"][0] # type: ignore - - -def closest_mask_distance(mask1: np.ndarray, mask2: np.ndarray) -> float: - """'closest_mask_distance' calculates the closest distance between two masks. - - Parameters: - mask1 (np.ndarray): The first mask. - mask2 (np.ndarray): The second mask. - - Returns: - float: The closest distance between the two masks. - - Example - ------- - >>> closest_mask_distance(mask1, mask2) - 0.5 - """ - - mask1 = np.clip(mask1, 0, 1) - mask2 = np.clip(mask2, 0, 1) - mask1_points = np.transpose(np.nonzero(mask1)) - mask2_points = np.transpose(np.nonzero(mask2)) - dist_matrix = distance.cdist(mask1_points, mask2_points, "euclidean") - return cast(float, np.min(dist_matrix)) - - -def closest_box_distance( - box1: List[float], box2: List[float], image_size: Tuple[int, int] -) -> float: - """'closest_box_distance' calculates the closest distance between two bounding boxes. - - Parameters: - box1 (List[float]): The first bounding box. - box2 (List[float]): The second bounding box. - image_size (Tuple[int, int]): The size of the image given as (height, width). - - Returns: - float: The closest distance between the two bounding boxes. - - Example - ------- - >>> closest_box_distance([100, 100, 200, 200], [300, 300, 400, 400]) - 141.42 - """ - - x11, y11, x12, y12 = denormalize_bbox(box1, image_size) - x21, y21, x22, y22 = denormalize_bbox(box2, image_size) - - horizontal_distance = np.max([0, x21 - x12, x11 - x22]) - vertical_distance = np.max([0, y21 - y12, y11 - y22]) - return cast(float, np.sqrt(horizontal_distance**2 + vertical_distance**2)) - - -# Utility and visualization functions - - -def save_json(data: Any, file_path: str) -> None: - """'save_json' is a utility function that saves data as a JSON file. It is helpful - for saving data that contains NumPy arrays which are not JSON serializable. - - Parameters: - data (Any): The data to save. - file_path (str): The path to save the JSON file. - - Example - ------- - >>> save_json(data, "path/to/file.json") - """ - - class NumpyEncoder(json.JSONEncoder): - def default(self, obj: Any): # type: ignore - if isinstance(obj, np.ndarray): - return obj.tolist() - elif isinstance(obj, np.bool_): - return bool(obj) - return json.JSONEncoder.default(self, obj) - - with open(file_path, "w") as f: - json.dump(data, f, cls=NumpyEncoder) - - -def load_image(image_path: str) -> np.ndarray: - """'load_image' is a utility function that loads an image from the given path. - - Parameters: - image_path (str): The path to the image. - - Returns: - np.ndarray: The image as a NumPy array. - - Example - ------- - >>> load_image("path/to/image.jpg") - """ - - image = Image.open(image_path).convert("RGB") - return np.array(image) - - -def save_image(image: np.ndarray) -> str: - """'save_image' is a utility function that saves an image as a temporary file. - - Parameters: - image (np.ndarray): The image to save. - - Returns: - str: The path to the saved image. - - Example - ------- - >>> save_image(image) - "/tmp/tmpabc123.png" - """ - - with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as f: - pil_image = Image.fromarray(image.astype(np.uint8)) - pil_image.save(f, "PNG") - return f.name - - -def overlay_bounding_boxes( - image: np.ndarray, bboxes: List[Dict[str, Any]] -) -> np.ndarray: - """'display_bounding_boxes' is a utility function that displays bounding boxes on - an image. - - Parameters: - image (np.ndarray): The image to display the bounding boxes on. - bboxes (List[Dict[str, Any]]): A list of dictionaries containing the bounding - boxes. - - Returns: - np.ndarray: The image with the bounding boxes, labels and scores displayed. - - Example - ------- - >>> image_with_bboxes = display_bounding_boxes( - image, [{'score': 0.99, 'label': 'dinosaur', 'bbox': [0.1, 0.11, 0.35, 0.4]}], - ) - """ - pil_image = Image.fromarray(image.astype(np.uint8)) - - if len(set([box["label"] for box in bboxes])) > len(COLORS): - _LOGGER.warning( - "Number of unique labels exceeds the number of available colors. Some labels may have the same color." - ) - - color = { - label: COLORS[i % len(COLORS)] - for i, label in enumerate(set([box["label"] for box in bboxes])) - } - - width, height = pil_image.size - fontsize = max(12, int(min(width, height) / 40)) - draw = ImageDraw.Draw(pil_image) - font = ImageFont.truetype( - str(resources.files("vision_agent.fonts").joinpath("default_font_ch_en.ttf")), - fontsize, - ) - - for elt in bboxes: - label = elt["label"] - box = elt["bbox"] - scores = elt["score"] - - box = [ - int(box[0] * width), - int(box[1] * height), - int(box[2] * width), - int(box[3] * height), - ] - draw.rectangle(box, outline=color[label], width=4) - text = f"{label}: {scores:.2f}" - text_box = draw.textbbox((box[0], box[1]), text=text, font=font) - draw.rectangle((box[0], box[1], text_box[2], text_box[3]), fill=color[label]) - draw.text((box[0], box[1]), text, fill="black", font=font) - return np.array(pil_image.convert("RGB")) - - -def overlay_segmentation_masks( - image: np.ndarray, masks: List[Dict[str, Any]] -) -> np.ndarray: - """'display_segmentation_masks' is a utility function that displays segmentation - masks. - - Parameters: - image (np.ndarray): The image to display the masks on. - masks (List[Dict[str, Any]]): A list of dictionaries containing the masks. - - Returns: - np.ndarray: The image with the masks displayed. - - Example - ------- - >>> image_with_masks = display_segmentation_masks( - image, - [{ - 'score': 0.99, - 'label': 'dinosaur', - 'mask': array([[0, 0, 0, ..., 0, 0, 0], - [0, 0, 0, ..., 0, 0, 0], - ..., - [0, 0, 0, ..., 0, 0, 0], - [0, 0, 0, ..., 0, 0, 0]], dtype=uint8), - }], - ) - """ - pil_image = Image.fromarray(image.astype(np.uint8)).convert("RGBA") - - if len(set([mask["label"] for mask in masks])) > len(COLORS): - _LOGGER.warning( - "Number of unique labels exceeds the number of available colors. Some labels may have the same color." - ) - - color = { - label: COLORS[i % len(COLORS)] - for i, label in enumerate(set([mask["label"] for mask in masks])) - } - - for elt in masks: - mask = elt["mask"] - label = elt["label"] - np_mask = np.zeros((pil_image.size[1], pil_image.size[0], 4)) - np_mask[mask > 0, :] = color[label] + (255 * 0.5,) - mask_img = Image.fromarray(np_mask.astype(np.uint8)) - pil_image = Image.alpha_composite(pil_image, mask_img) - return np.array(pil_image.convert("RGB")) - - -def get_tool_documentation(funcs: List[Callable[..., Any]]) -> str: - docstrings = "" - for func in funcs: - docstrings += f"{func.__name__}{inspect.signature(func)}:\n{func.__doc__}\n\n" - - return docstrings - - -def get_tool_descriptions(funcs: List[Callable[..., Any]]) -> str: - descriptions = "" - for func in funcs: - description = func.__doc__ - if description is None: - description = "" - - description = ( - description[: description.find("Parameters:")].replace("\n", " ").strip() - ) - description = " ".join(description.split()) - descriptions += f"- {func.__name__}{inspect.signature(func)}: {description}\n" - return descriptions - - -def get_tools_df(funcs: List[Callable[..., Any]]) -> pd.DataFrame: - data: Dict[str, List[str]] = {"desc": [], "doc": []} - - for func in funcs: - desc = func.__doc__ - if desc is None: - desc = "" - desc = desc[: desc.find("Parameters:")].replace("\n", " ").strip() - desc = " ".join(desc.split()) - - doc = f"{func.__name__}{inspect.signature(func)}:\n{func.__doc__}" - data["desc"].append(desc) - data["doc"].append(doc) - - return pd.DataFrame(data) # type: ignore - - -TOOLS = [ - grounding_dino, - grounding_sam, - extract_frames, - ocr, - clip, - zero_shot_counting, - visual_prompt_counting, - image_question_answering, - image_caption, - closest_mask_distance, - closest_box_distance, - save_json, - load_image, - save_image, - overlay_bounding_boxes, - overlay_segmentation_masks, -] -TOOLS_DF = get_tools_df(TOOLS) # type: ignore -TOOL_DESCRIPTIONS = get_tool_descriptions(TOOLS) # type: ignore -TOOL_DOCSTRING = get_tool_documentation(TOOLS) # type: ignore -UTILITIES_DOCSTRING = get_tool_documentation( - [save_json, load_image, save_image, overlay_bounding_boxes] -) From a7968a98fbd01d77c31d190ba977eb4bc545cbe7 Mon Sep 17 00:00:00 2001 From: Dillon Laird Date: Tue, 28 May 2024 21:26:58 -0700 Subject: [PATCH 06/11] return type of __call__ is str --- vision_agent/agent/vision_agent.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/vision_agent/agent/vision_agent.py b/vision_agent/agent/vision_agent.py index 08aa8917..f25ff67b 100644 --- a/vision_agent/agent/vision_agent.py +++ b/vision_agent/agent/vision_agent.py @@ -273,17 +273,16 @@ def __init__( self.max_retries = 2 self.report_progress_callback = report_progress_callback - @no_type_check def __call__( self, input: Union[List[Dict[str, str]], str], media: Optional[Union[str, Path]] = None, - ) -> Dict[str, Any]: + ) -> str: if isinstance(input, str): input = [{"role": "user", "content": input}] results = self.chat_with_workflow(input, media) results.pop("working_memory") - return results + return results["code"] # type: ignore def chat_with_workflow( self, From 1217c0c06a1b74447b75f49b4a1ef972ab7a1f43 Mon Sep 17 00:00:00 2001 From: Dillon Laird Date: Tue, 28 May 2024 21:27:21 -0700 Subject: [PATCH 07/11] updated docs --- README.md | 159 +++++++++++++------------------------ docs/easy_tool_v2.md | 183 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 237 insertions(+), 105 deletions(-) create mode 100644 docs/easy_tool_v2.md diff --git a/README.md b/README.md index c4c16e1b..1926f027 100644 --- a/README.md +++ b/README.md @@ -9,13 +9,12 @@ ![version](https://img.shields.io/pypi/pyversions/vision-agent) -Vision Agent is a library that helps you utilize agent frameworks for your vision tasks. -Many current vision problems can easily take hours or days to solve, you need to find the -right model, figure out how to use it, possibly write programming logic around it to -accomplish the task you want or even more expensive, train your own model. Vision Agent -aims to provide an in-seconds experience by allowing users to describe their problem in -text and utilizing agent frameworks to solve the task for them. Check out our discord -for updates and roadmaps! +Vision Agent is a library that helps you utilize agent frameworks to generate code to +solve your vision task. Many current vision problems can easily take hours or days to +solve, you need to find the right model, figure out how to use it and program it to +accomplish the task you want. Vision Agent aims to provide an in-seconds experience by +allowing users to describe their problem in text and have the agent framework generate +code to solve the task for them. Check out our discord for updates and roadmaps! ## Documentation @@ -37,70 +36,71 @@ using Azure OpenAI please see the Azure setup section): export OPENAI_API_KEY="your-api-key" ``` -### Vision Agents -You can interact with the agents as you would with any LLM or LMM model: +### Vision Agent +You can interact with the agent as you would with any LLM or LMM model: ```python >>> from vision_agent.agent import VisionAgent >>> agent = VisionAgent() ->>> agent("What percentage of the area of this jar is filled with coffee beans?", image="jar.jpg") -"The percentage of area of the jar filled with coffee beans is 25%." +>>> code = agent("What percentage of the area of the jar is filled with coffee beans?", media="jar.jpg") ``` -To better understand how the model came up with it's answer, you can also run it in -debug mode by passing in the verbose argument: - +Which produces the following code: ```python ->>> agent = VisionAgent(verbose=True) +from vision_agent.tools import load_image, grounding_sam + +def calculate_filled_percentage(image_path: str) -> float: + # Step 1: Load the image + image = load_image(image_path) + + # Step 2: Segment the jar + jar_segments = grounding_sam(prompt="jar", image=image) + + # Step 3: Segment the coffee beans + coffee_beans_segments = grounding_sam(prompt="coffee beans", image=image) + + # Step 4: Calculate the area of the segmented jar + jar_area = 0 + for segment in jar_segments: + jar_area += segment['mask'].sum() + + # Step 5: Calculate the area of the segmented coffee beans + coffee_beans_area = 0 + for segment in coffee_beans_segments: + coffee_beans_area += segment['mask'].sum() + + # Step 6: Compute the percentage of the jar area that is filled with coffee beans + if jar_area == 0: + return 0.0 # To avoid division by zero + filled_percentage = (coffee_beans_area / jar_area) * 100 + + # Step 7: Return the computed percentage + return filled_percentage ``` -You can also have it return the workflow it used to complete the task along with all -the individual steps and tools to get the answer: +To better understand how the model came up with it's answer, you can run it in debug +mode by passing in the verbose argument: ```python ->>> resp, workflow = agent.chat_with_workflow([{"role": "user", "content": "What percentage of the area of this jar is filled with coffee beans?"}], image="jar.jpg") ->>> print(workflow) -[{"task": "Segment the jar using 'grounding_sam_'.", - "tool": "grounding_sam_", - "parameters": {"prompt": "jar", "image": "jar.jpg"}, - "call_results": [[ - { - "labels": ["jar"], - "scores": [0.99], - "bboxes": [ - [0.58, 0.2, 0.72, 0.45], - ], - "masks": "mask.png" - } - ]], - "answer": "The jar is located at [0.58, 0.2, 0.72, 0.45].", -}, -{"visualize_output": "final_output.png"}] +>>> agent = VisionAgent(verbose=2) ``` -You can also provide reference data for the model to utilize. For example, if you want -to utilize VisualPromptCounting: +You can also have it return more information by calling `chat_with_workflow`: ```python -agent( - "How many apples are in this image?", - image="apples.jpg", - reference_data={"bbox": [0.1, 0.11, 0.24, 0.25]}, -) +>>> results = agent.chat_with_workflow([{"role": "user", "content": "What percentage of the area of the jar is filled with coffee beans?"}], media="jar.jpg") +>>> print(results) +{ + "code": "from vision_agent.tools import ..." + "test": "calculate_filled_percentage('jar.jpg')", + "test_result": "...", + "plan": [{"code": "...", "test": "...", "plan": "..."}, ...], + "working_memory": ..., +} ``` -Where `[0.1, 0.11, 0.24, 0.25]` is the normalized bounding box coordinates of an apple. -Similarly for DINOv you can provide a reference image and mask: -```python -agent( - "Can you detect all of the objects similar to the mask I've provided?", - image="image.jpg", - reference_data={"mask": "reference_mask.png", "image": "reference_image.png"}, -) -``` -Here, `reference_mask.png` and `reference_image.png` in `reference_data` could be any -image with it's corresponding mask that is the object you want to detect in `image.jpg`. -You can find a demo app to generate masks for DINOv [here](examples/mask_app/). +With this you can examine more detailed information such as the etesting code, testing +results, plan or working memory it used to complete the task. ### Tools There are a variety of tools for the model or the user to use. Some are executed locally @@ -120,57 +120,6 @@ you. For example: }] ``` -#### Custom Tools -You can also add your own custom tools for your vision agent to use: - -```python -from vision_agent.tools import Tool, register_tool -@register_tool -class NumItems(Tool): - name = "num_items_" - description = "Returns the number of items in a list." - usage = { - "required_parameters": [{"name": "prompt", "type": "list"}], - "examples": [ - { - "scenario": "How many items are in this list? ['a', 'b', 'c']", - "parameters": {"prompt": "['a', 'b', 'c']"}, - } - ], - } - def __call__(self, prompt: list[str]) -> int: - return len(prompt) -``` -This will register it with the list of tools Vision Agent has access to. It will be able -to pick it based on the tool description and use it based on the usage provided. You can -find an example that creates a custom tool for template matching [here](examples/custom_tools/). - -#### Tool List -| Tool | Description | -| --- | --- | -| CLIP | CLIP is a tool that can classify or tag any image given a set of input classes or tags. | -| ImageCaption| ImageCaption is a tool that can generate a caption for an image. | -| GroundingDINO | GroundingDINO is a tool that can detect arbitrary objects with inputs such as category names or referring expressions. | -| GroundingSAM | GroundingSAM is a tool that can detect and segment arbitrary objects with inputs such as category names or referring expressions. | -| DINOv | DINOv is a tool that can detect arbitrary objects with using a referring mask. | -| Crop | Crop crops an image given a bounding box and returns a file name of the cropped image. | -| BboxArea | BboxArea returns the area of the bounding box in pixels normalized to 2 decimal places. | -| SegArea | SegArea returns the area of the segmentation mask in pixels normalized to 2 decimal places. | -| BboxIoU | BboxIoU returns the intersection over union of two bounding boxes normalized to 2 decimal places. | -| SegIoU | SegIoU returns the intersection over union of two segmentation masks normalized to 2 decimal places. | -| BoxDistance | BoxDistance returns the minimum distance between two bounding boxes normalized to 2 decimal places. | -| MaskDistance | MaskDistance returns the minimum distance between two segmentation masks in pixel units | -| BboxContains | BboxContains returns the intersection of two boxes over the target box area. It is good for check if one box is contained within another box. | -| ExtractFrames | ExtractFrames extracts frames with motion from a video. | -| ZeroShotCounting | ZeroShotCounting returns the total number of objects belonging to a single class in a given image. | -| VisualPromptCounting | VisualPromptCounting returns the total number of objects belonging to a single class given an image and visual prompt. | -| VisualQuestionAnswering | VisualQuestionAnswering is a tool that can explain the contents of an image and answer questions about the image. | -| ImageQuestionAnswering | ImageQuestionAnswering is similar to VisualQuestionAnswering but does not rely on OpenAI and instead uses a dedicated model for the task. | -| OCR | OCR returns the text detected in an image along with the location. | - - -It also has a basic set of calculate tools such as add, subtract, multiply and divide. - ### Azure Setup If you want to use Azure OpenAI models, you can set the environment variable: diff --git a/docs/easy_tool_v2.md b/docs/easy_tool_v2.md new file mode 100644 index 00000000..4e4cca7f --- /dev/null +++ b/docs/easy_tool_v2.md @@ -0,0 +1,183 @@ +# 🔍🤖 Easy Tool V2 + +Easy Tool V2 is a library that helps you utilize agent frameworks for your vision tasks. +Many current vision problems can easily take hours or days to solve, you need to find the +right model, figure out how to use it, possibly write programming logic around it to +accomplish the task you want or even more expensive, train your own model. Easy Tool V2 +aims to provide an in-seconds experience by allowing users to describe their problem in +text and utilizing agent frameworks to solve the task for them. Check out our discord +for updates and roadmaps! + +## Documentation + +- [Easy Tool V2 Library Docs](https://landing-ai.github.io/vision-agent/) + + +## Getting Started +### Installation +To get started, you can install the library using pip: + +```bash +pip install vision-agent +``` + +Ensure you have an OpenAI API key and set it as an environment variable (if you are +using Azure OpenAI please see the Azure setup section): + +```bash +export OPENAI_API_KEY="your-api-key" +``` + +### Easy Tool V2 +You can interact with the agents as you would with any LLM or LMM model: + +```python +>>> from vision_agent.agent import EasyToolV2 +>>> agent = EasyToolV2() +>>> agent("What percentage of the area of this jar is filled with coffee beans?", image="jar.jpg") +"The percentage of area of the jar filled with coffee beans is 25%." +``` + +To better understand how the model came up with it's answer, you can also run it in +debug mode by passing in the verbose argument: + +```python +>>> agent = EasyToolV2(verbose=True) +``` + +You can also have it return the workflow it used to complete the task along with all +the individual steps and tools to get the answer: + +```python +>>> resp, workflow = agent.chat_with_workflow([{"role": "user", "content": "What percentage of the area of this jar is filled with coffee beans?"}], image="jar.jpg") +>>> print(workflow) +[{"task": "Segment the jar using 'grounding_sam_'.", + "tool": "grounding_sam_", + "parameters": {"prompt": "jar", "image": "jar.jpg"}, + "call_results": [[ + { + "labels": ["jar"], + "scores": [0.99], + "bboxes": [ + [0.58, 0.2, 0.72, 0.45], + ], + "masks": "mask.png" + } + ]], + "answer": "The jar is located at [0.58, 0.2, 0.72, 0.45].", +}, +{"visualize_output": "final_output.png"}] +``` + +You can also provide reference data for the model to utilize. For example, if you want +to utilize VisualPromptCounting: + +```python +agent( + "How many apples are in this image?", + image="apples.jpg", + reference_data={"bbox": [0.1, 0.11, 0.24, 0.25]}, +) +``` +Where `[0.1, 0.11, 0.24, 0.25]` is the normalized bounding box coordinates of an apple. +Similarly for DINOv you can provide a reference image and mask: + +```python +agent( + "Can you detect all of the objects similar to the mask I've provided?", + image="image.jpg", + reference_data={"mask": "reference_mask.png", "image": "reference_image.png"}, +) +``` +Here, `reference_mask.png` and `reference_image.png` in `reference_data` could be any +image with it's corresponding mask that is the object you want to detect in `image.jpg`. +You can find a demo app to generate masks for DINOv [here](examples/mask_app/). + +### Tools +There are a variety of tools for the model or the user to use. Some are executed locally +while others are hosted for you. You can also ask an LLM directly to build a tool for +you. For example: + +```python +>>> import vision_agent as va +>>> llm = va.llm.OpenAILLM() +>>> detector = llm.generate_detector("Can you build a jar detector for me?") +>>> detector("jar.jpg") +[{"labels": ["jar",], + "scores": [0.99], + "bboxes": [ + [0.58, 0.2, 0.72, 0.45], + ] +}] +``` + +#### Custom Tools +You can also add your own custom tools for your vision agent to use: + +```python +from vision_agent.tools import Tool, register_tool +@register_tool +class NumItems(Tool): + name = "num_items_" + description = "Returns the number of items in a list." + usage = { + "required_parameters": [{"name": "prompt", "type": "list"}], + "examples": [ + { + "scenario": "How many items are in this list? ['a', 'b', 'c']", + "parameters": {"prompt": "['a', 'b', 'c']"}, + } + ], + } + def __call__(self, prompt: list[str]) -> int: + return len(prompt) +``` +This will register it with the list of tools Easy Tool V2 has access to. It will be able +to pick it based on the tool description and use it based on the usage provided. You can +find an example that creates a custom tool for template matching [here](examples/custom_tools/). + +#### Tool List +| Tool | Description | +| --- | --- | +| CLIP | CLIP is a tool that can classify or tag any image given a set of input classes or tags. | +| ImageCaption| ImageCaption is a tool that can generate a caption for an image. | +| GroundingDINO | GroundingDINO is a tool that can detect arbitrary objects with inputs such as category names or referring expressions. | +| GroundingSAM | GroundingSAM is a tool that can detect and segment arbitrary objects with inputs such as category names or referring expressions. | +| DINOv | DINOv is a tool that can detect arbitrary objects with using a referring mask. | +| Crop | Crop crops an image given a bounding box and returns a file name of the cropped image. | +| BboxArea | BboxArea returns the area of the bounding box in pixels normalized to 2 decimal places. | +| SegArea | SegArea returns the area of the segmentation mask in pixels normalized to 2 decimal places. | +| BboxIoU | BboxIoU returns the intersection over union of two bounding boxes normalized to 2 decimal places. | +| SegIoU | SegIoU returns the intersection over union of two segmentation masks normalized to 2 decimal places. | +| BoxDistance | BoxDistance returns the minimum distance between two bounding boxes normalized to 2 decimal places. | +| MaskDistance | MaskDistance returns the minimum distance between two segmentation masks in pixel units | +| BboxContains | BboxContains returns the intersection of two boxes over the target box area. It is good for check if one box is contained within another box. | +| ExtractFrames | ExtractFrames extracts frames with motion from a video. | +| ZeroShotCounting | ZeroShotCounting returns the total number of objects belonging to a single class in a given image. | +| VisualPromptCounting | VisualPromptCounting returns the total number of objects belonging to a single class given an image and visual prompt. | +| VisualQuestionAnswering | VisualQuestionAnswering is a tool that can explain the contents of an image and answer questions about the image. | +| ImageQuestionAnswering | ImageQuestionAnswering is similar to VisualQuestionAnswering but does not rely on OpenAI and instead uses a dedicated model for the task. | +| OCR | OCR returns the text detected in an image along with the location. | + + +It also has a basic set of calculate tools such as add, subtract, multiply and divide. + +### Azure Setup +If you want to use Azure OpenAI models, you can set the environment variable: + +```bash +export AZURE_OPENAI_API_KEY="your-api-key" +export AZURE_OPENAI_ENDPOINT="your-endpoint" +``` + +You can then run Easy Tool V2 using the Azure OpenAI models: + +```python +>>> import vision_agent as va +>>> agent = va.agent.EasyToolV2( +>>> task_model=va.llm.AzureOpenAILLM(), +>>> answer_model=va.lmm.AzureOpenAILMM(), +>>> reflection_model=va.lmm.AzureOpenAILMM(), +>>> ) +``` + From b91849ac38206881dfb92d03c0cacb0ecf3f6aa4 Mon Sep 17 00:00:00 2001 From: Dillon Laird Date: Tue, 28 May 2024 21:29:27 -0700 Subject: [PATCH 08/11] remove unused import --- vision_agent/agent/vision_agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vision_agent/agent/vision_agent.py b/vision_agent/agent/vision_agent.py index f25ff67b..3d13bedd 100644 --- a/vision_agent/agent/vision_agent.py +++ b/vision_agent/agent/vision_agent.py @@ -3,7 +3,7 @@ import logging import sys from pathlib import Path -from typing import Any, Callable, Dict, List, Optional, Union, cast, no_type_check +from typing import Any, Callable, Dict, List, Optional, Union, cast from rich.console import Console from rich.syntax import Syntax From 6c60ed6766bf4d65e58e1c68b5b58f35c420bf22 Mon Sep 17 00:00:00 2001 From: Dillon Laird Date: Tue, 28 May 2024 21:31:19 -0700 Subject: [PATCH 09/11] black --- tests/tools/test_tools.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/tools/test_tools.py b/tests/tools/test_tools.py index 5ac5a8c9..3e7c0a0d 100644 --- a/tests/tools/test_tools.py +++ b/tests/tools/test_tools.py @@ -6,7 +6,13 @@ from PIL import Image from vision_agent.tools.easytool_tools import TOOLS, Tool, register_tool -from vision_agent.tools.easytool_tools import BboxIoU, BoxDistance, MaskDistance, SegArea, SegIoU +from vision_agent.tools.easytool_tools import ( + BboxIoU, + BoxDistance, + MaskDistance, + SegArea, + SegIoU, +) def test_bbox_iou(): From eb33d08aa955bfba78f351ee653b501d9dbafc23 Mon Sep 17 00:00:00 2001 From: Dillon Laird Date: Tue, 28 May 2024 21:40:04 -0700 Subject: [PATCH 10/11] added documentation for chat with workflow --- vision_agent/agent/easytool_v2.py | 2 +- vision_agent/agent/vision_agent.py | 53 +++++++++++++++++++++++++++++- 2 files changed, 53 insertions(+), 2 deletions(-) diff --git a/vision_agent/agent/easytool_v2.py b/vision_agent/agent/easytool_v2.py index 035dc391..1ef382e7 100644 --- a/vision_agent/agent/easytool_v2.py +++ b/vision_agent/agent/easytool_v2.py @@ -544,7 +544,7 @@ def chat_with_workflow( visualize_output: Optional[bool] = False, self_reflection: Optional[bool] = True, ) -> Tuple[str, List[Dict]]: - """Chat with the vision agent and return the final answer and all tool results. + """Chat with EasyToolV2 and return the final answer and all tool results. Parameters: chat: A conversation in the format of diff --git a/vision_agent/agent/vision_agent.py b/vision_agent/agent/vision_agent.py index 3d13bedd..ac4a2453 100644 --- a/vision_agent/agent/vision_agent.py +++ b/vision_agent/agent/vision_agent.py @@ -245,6 +245,18 @@ def retrieve_tools( class VisionAgent(Agent): + """Vision Agent is an agentic framework that can output code based on a user + request. It can plan tasks, retrieve relevant tools, write code, write tests and + reflect on failed test cases to debug code. It is inspired by AgentCoder + https://arxiv.org/abs/2312.13010 and Data Interpeter + https://arxiv.org/abs/2402.18679 + + Example + ------- + >>> from vision_agent import VisionAgent + >>> agent = VisionAgent() + >>> code = agent("What percentage of the area of the jar is filled with coffee beans?", media="jar.jpg") + """ def __init__( self, planner: Optional[LLM] = None, @@ -255,6 +267,22 @@ def __init__( verbosity: int = 0, report_progress_callback: Optional[Callable[[Dict[str, Any]], None]] = None, ) -> None: + """Initialize the Vision Agent. + + Parameters: + planner (Optional[LLM]): The planner model to use. Defaults to OpenAILLM. + coder (Optional[LLM]): The coder model to use. Defaults to OpenAILLM. + tester (Optional[LLM]): The tester model to use. Defaults to OpenAILLM. + debugger (Optional[LLM]): The debugger model to + tool_recommender (Optional[Sim]): The tool recommender model to use. + verbosity (int): The verbosity level of the agent. Defaults to 0. 2 is the + highest verbosity level which will output all intermediate debugging + code. + report_progress_callback: a callback to report the progress of the agent. + This is useful for streaming logs in a web application where multiple + VisionAgent instances are running in parallel. This callback ensures + that the progress are not mixed up. + """ self.planner = ( OpenAILLM(temperature=0.0, json_mode=True) if planner is None else planner ) @@ -278,6 +306,17 @@ def __call__( input: Union[List[Dict[str, str]], str], media: Optional[Union[str, Path]] = None, ) -> str: + """Chat with Vision Agent and return intermediate information regarding the task. + + Parameters: + chat (List[Dict[str, str]]): A conversation in the format of + [{"role": "user", "content": "describe your task here..."}]. + media (Optional[Union[str, Path]]): The media file to be used in the task. + self_reflection (bool): Whether to reflect on the task and debug the code. + + Returns: + str: The code output by the Vision Agent. + """ if isinstance(input, str): input = [{"role": "user", "content": input}] results = self.chat_with_workflow(input, media) @@ -290,6 +329,18 @@ def chat_with_workflow( media: Optional[Union[str, Path]] = None, self_reflection: bool = False, ) -> Dict[str, Any]: + """Chat with Vision Agent and return intermediate information regarding the task. + + Parameters: + chat (List[Dict[str, str]]): A conversation in the format of + [{"role": "user", "content": "describe your task here..."}]. + media (Optional[Union[str, Path]]): The media file to be used in the task. + self_reflection (bool): Whether to reflect on the task and debug the code. + + Returns: + Dict[str, Any]: A dictionary containing the code, test, test result, plan, + and working memory of the agent. + """ if len(chat) == 0: raise ValueError("Chat cannot be empty.") @@ -373,7 +424,7 @@ def chat_with_workflow( self.log_progress( { - "log": f"The Vision Agent V3 has concluded this chat.\nSuccess: {success}", + "log": f"Vision Agent has concluded this chat.\nSuccess: {success}", "finished": True, } ) From 71bcb895fd5e08985afa6787c7aa8784810e2fe7 Mon Sep 17 00:00:00 2001 From: Dillon Laird Date: Tue, 28 May 2024 21:46:22 -0700 Subject: [PATCH 11/11] formatting fix --- vision_agent/agent/vision_agent.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/vision_agent/agent/vision_agent.py b/vision_agent/agent/vision_agent.py index ac4a2453..f45ca7b4 100644 --- a/vision_agent/agent/vision_agent.py +++ b/vision_agent/agent/vision_agent.py @@ -257,6 +257,7 @@ class VisionAgent(Agent): >>> agent = VisionAgent() >>> code = agent("What percentage of the area of the jar is filled with coffee beans?", media="jar.jpg") """ + def __init__( self, planner: Optional[LLM] = None, @@ -283,6 +284,7 @@ def __init__( VisionAgent instances are running in parallel. This callback ensures that the progress are not mixed up. """ + self.planner = ( OpenAILLM(temperature=0.0, json_mode=True) if planner is None else planner ) @@ -317,6 +319,7 @@ def __call__( Returns: str: The code output by the Vision Agent. """ + if isinstance(input, str): input = [{"role": "user", "content": input}] results = self.chat_with_workflow(input, media) @@ -341,6 +344,7 @@ def chat_with_workflow( Dict[str, Any]: A dictionary containing the code, test, test result, plan, and working memory of the agent. """ + if len(chat) == 0: raise ValueError("Chat cannot be empty.")