From 54ae6620d5fb6d924caca3f01d5690121a20d7d3 Mon Sep 17 00:00:00 2001 From: Dillon Laird Date: Mon, 20 May 2024 17:45:54 -0700 Subject: [PATCH] Vision Agent v3 (#89) * renamed prompt to prompts * added vision agent v3 * fixed execute issue * fixed type issue * fixed flake8 * black and isort * switch to simple test case * fixed issue with chat not resetting * removed unused import * prmopt updates * update prompts * remove debug --------- Co-authored-by: shankar-landing-ai --- tests/test_tools.py | 8 +- vision_agent/agent/__init__.py | 1 + vision_agent/agent/vision_agent_v2.py | 2 +- ...2_prompt.py => vision_agent_v2_prompts.py} | 0 vision_agent/agent/vision_agent_v3.py | 305 ++++++++++++++++++ vision_agent/agent/vision_agent_v3_prompts.py | 221 +++++++++++++ vision_agent/tools/tools_v2.py | 6 +- vision_agent/utils/execute.py | 3 + 8 files changed, 538 insertions(+), 8 deletions(-) rename vision_agent/agent/{vision_agent_v2_prompt.py => vision_agent_v2_prompts.py} (100%) create mode 100644 vision_agent/agent/vision_agent_v3.py create mode 100644 vision_agent/agent/vision_agent_v3_prompts.py diff --git a/tests/test_tools.py b/tests/test_tools.py index 2a848a02..56ca2e02 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -2,13 +2,13 @@ from vision_agent.tools.tools_v2 import ( clip, - zero_shot_counting, - visual_prompt_counting, - image_question_answering, - ocr, grounding_dino, grounding_sam, image_caption, + image_question_answering, + ocr, + visual_prompt_counting, + zero_shot_counting, ) diff --git a/vision_agent/agent/__init__.py b/vision_agent/agent/__init__.py index b358d3b0..2f62dbf1 100644 --- a/vision_agent/agent/__init__.py +++ b/vision_agent/agent/__init__.py @@ -4,3 +4,4 @@ from .reflexion import Reflexion from .vision_agent import VisionAgent from .vision_agent_v2 import VisionAgentV2 +from .vision_agent_v3 import VisionAgentV3 diff --git a/vision_agent/agent/vision_agent_v2.py b/vision_agent/agent/vision_agent_v2.py index a20afcee..1c7083a8 100644 --- a/vision_agent/agent/vision_agent_v2.py +++ b/vision_agent/agent/vision_agent_v2.py @@ -10,7 +10,7 @@ from tabulate import tabulate from vision_agent.agent import Agent -from vision_agent.agent.vision_agent_v2_prompt import ( +from vision_agent.agent.vision_agent_v2_prompts import ( CODE, CODE_SYS_MSG, DEBUG, diff --git a/vision_agent/agent/vision_agent_v2_prompt.py b/vision_agent/agent/vision_agent_v2_prompts.py similarity index 100% rename from vision_agent/agent/vision_agent_v2_prompt.py rename to vision_agent/agent/vision_agent_v2_prompts.py diff --git a/vision_agent/agent/vision_agent_v3.py b/vision_agent/agent/vision_agent_v3.py new file mode 100644 index 00000000..d8de28c6 --- /dev/null +++ b/vision_agent/agent/vision_agent_v3.py @@ -0,0 +1,305 @@ +import copy +import json +import logging +import sys +from pathlib import Path +from typing import Any, Dict, List, Optional, Union, cast + +from rich.console import Console +from rich.syntax import Syntax +from tabulate import tabulate + +from vision_agent.agent import Agent +from vision_agent.agent.vision_agent_v3_prompts import ( + CODE, + FEEDBACK, + FIX_BUG, + PLAN, + REFLECT, + SIMPLE_TEST, + USER_REQ, +) +from vision_agent.llm import LLM, OpenAILLM +from vision_agent.tools.tools_v2 import TOOL_DESCRIPTIONS, TOOLS_DF, UTILITIES_DOCSTRING +from vision_agent.utils import Execute +from vision_agent.utils.sim import Sim + +logging.basicConfig(stream=sys.stdout) +_LOGGER = logging.getLogger(__name__) +_MAX_TABULATE_COL_WIDTH = 80 +_EXECUTE = Execute(600) +_CONSOLE = Console() + + +def format_memory(memory: List[Dict[str, str]]) -> str: + return FEEDBACK.format( + feedback="\n".join( + [ + f"### Feedback {i}:\nCode: ```python\n{m['code']}\n```\nFeedback: {m['feedback']}\n" + for i, m in enumerate(memory) + ] + ) + ) + + +def extract_code(code: str) -> str: + if "\n```python" in code: + start = "\n```python" + elif "```python" in code: + start = "```python" + else: + return code + + code = code[code.find(start) + len(start) :] + code = code[: code.find("```")] + if code.startswith("python\n"): + code = code[len("python\n") :] + return code + + +def extract_json(json_str: str) -> Dict[str, Any]: + try: + json_dict = json.loads(json_str) + except json.JSONDecodeError: + if "```json" in json_str: + json_str = json_str[json_str.find("```json") + len("```json") :] + json_str = json_str[: json_str.find("```")] + elif "```" in json_str: + json_str = json_str[json_str.find("```") + len("```") :] + # get the last ``` not one from an intermediate string + json_str = json_str[: json_str.find("}```")] + json_dict = json.loads(json_str) + return json_dict # type: ignore + + +def write_plan( + chat: List[Dict[str, str]], + tool_desc: str, + working_memory: str, + model: LLM, +) -> List[Dict[str, str]]: + chat = copy.deepcopy(chat) + if chat[-1]["role"] != "user": + raise ValueError("Last chat message must be from the user.") + + user_request = chat[-1]["content"] + context = USER_REQ.format(user_request=user_request) + prompt = PLAN.format(context=context, tool_desc=tool_desc, feedback=working_memory) + chat[-1]["content"] = prompt + return extract_json(model.chat(chat))["plan"] # type: ignore + + +def reflect( + chat: List[Dict[str, str]], + plan: str, + code: str, + model: LLM, +) -> Dict[str, Union[str, bool]]: + chat = copy.deepcopy(chat) + if chat[-1]["role"] != "user": + raise ValueError("Last chat message must be from the user.") + + user_request = chat[-1]["content"] + context = USER_REQ.format(user_request=user_request) + prompt = REFLECT.format(context=context, plan=plan, code=code) + chat[-1]["content"] = prompt + return extract_json(model.chat(chat)) + + +def write_and_test_code( + task: str, + tool_info: str, + tool_utils: str, + working_memory: str, + coder: LLM, + tester: LLM, + debugger: LLM, + verbosity: int = 0, + max_retries: int = 3, +) -> Dict[str, Any]: + code = extract_code( + coder(CODE.format(docstring=tool_info, question=task, feedback=working_memory)) + ) + test = extract_code( + tester( + SIMPLE_TEST.format( + docstring=tool_utils, question=task, code=code, feedback=working_memory + ) + ) + ) + + success, result = _EXECUTE.run_isolation(f"{code}\n{test}") + if verbosity == 2: + _LOGGER.info("First code and tests:") + _CONSOLE.print( + Syntax(f"{code}\n{test}", "python", theme="gruvbox-dark", line_numbers=True) + ) + _LOGGER.info(f"First result: {result}") + + count = 0 + new_working_memory = [] + while not success and count < max_retries: + fixed_code_and_test = extract_json( + debugger( + FIX_BUG.format( + code=code, tests=test, result=result, feedback=working_memory + ) + ) + ) + if fixed_code_and_test["code"].strip() != "": + code = extract_code(fixed_code_and_test["code"]) + if fixed_code_and_test["test"].strip() != "": + test = extract_code(fixed_code_and_test["test"]) + new_working_memory.append( + {"code": f"{code}\n{test}", "feedback": fixed_code_and_test["reflections"]} + ) + + success, result = _EXECUTE.run_isolation(f"{code}\n{test}") + if verbosity == 2: + _LOGGER.info( + f"Debug attempt {count + 1}, reflection: {fixed_code_and_test['reflections']}" + ) + _CONSOLE.print( + Syntax( + f"{code}\n{test}", "python", theme="gruvbox-dark", line_numbers=True + ) + ) + _LOGGER.info(f"Debug result: {result}") + count += 1 + + if verbosity == 1: + _CONSOLE.print( + Syntax(f"{code}\n{test}", "python", theme="gruvbox-dark", line_numbers=True) + ) + _LOGGER.info(f"Result: {result}") + + return { + "code": code, + "test": test, + "success": success, + "working_memory": new_working_memory, + } + + +def retrieve_tools( + plan: List[Dict[str, str]], tool_recommender: Sim, verbosity: int = 0 +) -> str: + tool_info = [] + tool_desc = [] + for task in plan: + tools = tool_recommender.top_k(task["instructions"], k=2, thresh=0.3) + tool_info.extend([e["doc"] for e in tools]) + tool_desc.extend([e["desc"] for e in tools]) + if verbosity == 2: + _LOGGER.info(f"Tools: {tool_desc}") + tool_info_set = set(tool_info) + return "\n\n".join(tool_info_set) + + +class VisionAgentV3(Agent): + def __init__( + self, + timeout: int = 600, + planner: Optional[LLM] = None, + coder: Optional[LLM] = None, + tester: Optional[LLM] = None, + debugger: Optional[LLM] = None, + tool_recommender: Optional[Sim] = None, + verbosity: int = 0, + ) -> None: + self.planner = ( + OpenAILLM(temperature=0.0, json_mode=True) if planner is None else planner + ) + self.coder = OpenAILLM(temperature=0.0) if coder is None else coder + self.tester = OpenAILLM(temperature=0.0) if tester is None else tester + self.debugger = ( + OpenAILLM(temperature=0.0, json_mode=True) if debugger is None else debugger + ) + + self.tool_recommender = ( + Sim(TOOLS_DF, sim_key="desc") + if tool_recommender is None + else tool_recommender + ) + self.verbosity = verbosity + self.max_retries = 3 + + def __call__( + self, + input: Union[List[Dict[str, str]], str], + image: Optional[Union[str, Path]] = None, + ) -> str: + if isinstance(input, str): + input = [{"role": "user", "content": input}] + results = self.chat_with_workflow(input, image) + return results["code"] # type: ignore + + def chat_with_workflow( + self, + chat: List[Dict[str, str]], + image: Optional[Union[str, Path]] = None, + ) -> Dict[str, Any]: + if len(chat) == 0: + raise ValueError("Chat cannot be empty.") + + if image is not None: + for chat_i in chat: + if chat_i["role"] == "user": + chat_i["content"] += f" Image name {image}" + + code = "" + test = "" + working_memory: List[Dict[str, str]] = [] + results = {"code": "", "test": "", "plan": []} + plan = [] + success = False + retries = 0 + + while not success and retries < self.max_retries: + plan_i = write_plan( + chat, TOOL_DESCRIPTIONS, format_memory(working_memory), self.planner + ) + plan_i_str = "\n-".join([e["instructions"] for e in plan_i]) + if self.verbosity == 1 or self.verbosity == 2: + _LOGGER.info( + f""" +{tabulate(tabular_data=plan_i, headers="keys", tablefmt="mixed_grid", maxcolwidths=_MAX_TABULATE_COL_WIDTH)}""" + ) + + tool_info = retrieve_tools( + plan_i, + self.tool_recommender, + self.verbosity, + ) + results = write_and_test_code( + plan_i_str, + tool_info, + UTILITIES_DOCSTRING, + format_memory(working_memory), + self.coder, + self.tester, + self.debugger, + verbosity=self.verbosity, + ) + success = cast(bool, results["success"]) + code = cast(str, results["code"]) + test = cast(str, results["test"]) + working_memory.extend(results["working_memory"]) # type: ignore + plan.append({"code": code, "test": test, "plan": plan_i}) + + reflection = reflect(chat, plan_i_str, code, self.planner) + if self.verbosity > 0: + _LOGGER.info(f"Reflection: {reflection}") + feedback = cast(str, reflection["feedback"]) + success = cast(bool, reflection["success"]) + working_memory.append({"code": f"{code}\n{test}", "feedback": feedback}) + + return { + "code": code, + "test": test, + "plan": plan, + "working_memory": working_memory, + } + + def log_progress(self, description: str) -> None: + pass diff --git a/vision_agent/agent/vision_agent_v3_prompts.py b/vision_agent/agent/vision_agent_v3_prompts.py new file mode 100644 index 00000000..3e0813af --- /dev/null +++ b/vision_agent/agent/vision_agent_v3_prompts.py @@ -0,0 +1,221 @@ +USER_REQ = """ +## User Request +{user_request} +""" + +FEEDBACK = """ +## This contains code and feedback from previous runs and is used for providing context so you do not make the same mistake again. + +{feedback} +""" + + +PLAN = """ +**Context** +{context} + +**Tools Available**: +{tool_desc} + +**Previous Feedback**: +{feedback} + +**Instructions**: +Based on the context and tools you have available, write a plan of subtasks to achieve the user request utilizing given tools when necessary. Output a list of jsons in the following format: + +```json +{{ + "plan": + [ + {{ + "instructions": str # what you should do in this task, one short phrase or sentence + }} + ] +}} +``` +""" + +CODE = """ +**Role**: You are a software programmer. + +**Task**: As a programmer, you are required to complete the function. Use a Chain-of-Thought approach to break down the problem, create pseudocode, and then write the code in Python language. Ensure that your code is efficient, readable, and well-commented. Return the requested information from the function you create. Do not call your code, a test will be run after the code is submitted. + +**Documentation**: +This is the documentation for the functions you have access to. You may call any of these functions to help you complete the task. They are available through importing `from vision_agent.tools.tools_v2 import *`. + +{docstring} + +**Input Code Snippet**: +```python +# Your code here +``` + +**User Instructions**: +{question} + +**Previous Feedback**: +{feedback} + +**Instructions**: +1. **Understand and Clarify**: Make sure you understand the task. +2. **Algorithm/Method Selection**: Decide on the most efficient way. +3. **Pseudocode Creation**: Write down the steps you will follow in pseudocode. +4. **Code Generation**: Translate your pseudocode into executable Python code. +""" + +TEST = """ +**Role**: As a tester, your task is to create comprehensive test cases for the provided code. These test cases should encompass Basic and Edge case scenarios to ensure the code's robustness and reliability if possible. + +**Documentation**: +This is the documentation for the functions you have access to. You may call any of these functions to help you complete the task. They are available through importing `from vision_agent.tools.tools_v2 import *`. You do not need to test these functions. Test only the code provided by the user. + +{docstring} + +**User Instructions**: +{question} + +**Input Code Snippet**: +```python +### Please decided how would you want to generate test cases. Based on incomplete code or completed version. +{code} +``` + +**Instructions**: +1. Verify the fundamental functionality under normal conditions. +2. Ensure each test case is well-documented with comments explaining the scenario it covers. +3. DO NOT use any files that are not provided by the user's instructions, your test must be run and will crash if it tries to load a non-existent file. +4. DO NOT mock any functions, you must test their functionality as is. + +You should format your test cases at the end of your response wrapped in ```python ``` tags like in the following example: +```python +# You can run assertions to ensure the function is working as expected +assert function(input) == expected_output, "Test case description" + +# You can simply call the function to ensure it runs +function(input) + +# Or you can visualize the output +output = function(input) +visualize(output) +``` + +**Examples**: +## Prompt 1: +```python +def detect_cats_and_dogs(image_path: str) -> Dict[str, List[List[float]]]: + \""" Detects cats and dogs in an image. Returns a dictionary with + {{ + "cats": [[x1, y1, x2, y2], ...], "dogs": [[x1, y1, x2, y2], ...] + }} + \""" +``` + +## Completion 1: +```python +# We can test to ensure the output has the correct structure but we cannot test the +# content of the output without knowing the image. We can test on "image.jpg" because +# it is provided by the user so we know it exists. +output = detect_cats_and_dogs("image.jpg") +assert "cats" in output, "The output should contain 'cats' +assert "dogs" in output, "The output should contain 'dogs' +``` + +## Prompt 2: +```python +def find_text(image_path: str, text: str) -> str: + \""" Finds the text in the image and returns the text. \""" + +## Completion 2: +```python +# Because we do not know ahead of time what text is in the image, we can only run the +# code and print the results. We can test on "image.jpg" because it is provided by the +# user so we know it exists. +found_text = find_text("image.jpg", "Hello World") +print(found_text) +``` +""" + + +SIMPLE_TEST = """ +**Role**: As a tester, your task is to create a simple test case for the provided code. This test case should verify the fundamental functionality under normal conditions. + +**Documentation**: +This is the documentation for the functions you have access to. You may call any of these functions to help you complete the task. They are available through importing `from vision_agent.tools.tools_v2 import *`. You do not need to test these functions, only the code provided by the user. + +{docstring} + +**User Instructions**: +{question} + +**Input Code Snippet**: +```python +### Please decided how would you want to generate test cases. Based on incomplete code or completed version. +{code} +``` + +**Previous Feedback**: +{feedback} + +**Instructions**: +1. Verify the fundamental functionality under normal conditions. +2. Ensure each test case is well-documented with comments explaining the scenario it covers. +3. DO NOT use any files that are not provided by the user's instructions, your test must be run and will crash if it tries to load a non-existent file. +4. DO NOT mock any functions, you must test their functionality as is. +""" + + +FIX_BUG = """ +**Role** As a coder, your job is to find the error in the code and fix it. You are running in a notebook setting so feel free to run !pip install to install missing packages. + +**Instructions**: +Please re-complete the code to fix the error message. Here is the previous version: +```python +{code} +``` + +When we run this test code: +```python +{tests} +``` + +It raises this error: +```python +{result} +``` + +This is previous feedback provided on the code: +{feedback} + +Please fix the bug by follow the error information and return a JSON object with the following format: +{{ + "reflections": str # any thoughts you have about the bug and how you fixed it + "code": str # the fixed code if any, else an empty string + "test": str # the fixed test code if any, else an empty string +}} +""" + + +REFLECT = """ +**Role**: You are a reflection agent. Your job is to look at the original user request and the code produced and determine if the code satisfies the user's request. If it does not, you must provide feedback on how to improve the code. You are concerned only if the code meets the user request, not if the code is good or bad. + +**Context**: +{context} + +**Plan**: +{plan} + +**Code**: +{code} + +**Instructions**: +1. **Understand the User Request**: Read the user request and understand what the user is asking for. +2. **Review the Plan**: Check the plan to see if it is a viable approach to solving the user request. +3. **Review the Code**: Check the code to see if it solves the user request. +4. DO NOT add any reflections for test cases, these are taken care of. + +Respond in JSON format with the following structure: +{{ + "feedback": str # the feedback you would give to the coder and tester + "success": bool # whether the code and tests meet the user request +}} +""" diff --git a/vision_agent/tools/tools_v2.py b/vision_agent/tools/tools_v2.py index 37e76a28..04f4dedf 100644 --- a/vision_agent/tools/tools_v2.py +++ b/vision_agent/tools/tools_v2.py @@ -16,12 +16,12 @@ from vision_agent.tools.tool_utils import _send_inference_request from vision_agent.utils import extract_frames_from_video from vision_agent.utils.image_utils import ( + b64_to_pil, convert_to_b64, + denormalize_bbox, + get_image_size, normalize_bbox, rle_decode, - b64_to_pil, - get_image_size, - denormalize_bbox, ) COLORS = [ diff --git a/vision_agent/utils/execute.py b/vision_agent/utils/execute.py index aa882728..b1c3417b 100644 --- a/vision_agent/utils/execute.py +++ b/vision_agent/utils/execute.py @@ -4,6 +4,7 @@ import base64 as b64 import io import re +from time import sleep from typing import Dict, List, Tuple import nbformat @@ -75,6 +76,7 @@ def reset(self) -> None: self.terminate() self.nb = nbformat.v4.new_notebook() self.nb_client = NotebookClient(self.nb, timeout=self.timeout) + sleep(1) self.build() def run_cell(self, cell: NotebookNode, cell_index: int) -> Tuple[bool, str]: @@ -83,6 +85,7 @@ def run_cell(self, cell: NotebookNode, cell_index: int) -> Tuple[bool, str]: return parse_outputs(self.nb.cells[-1].outputs) except CellTimeoutError: run_sync(self.nb_client.km.interrupt_kernel)() # type: ignore + sleep(1) return False, "Cell execution timed out." except DeadKernelError: self.reset()