Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add progress logging for v2 and v3 #93

Merged
merged 12 commits into from
May 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions vision_agent/agent/agent.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Union
from typing import Dict, List, Optional, Union, Any


class Agent(ABC):
Expand All @@ -13,7 +13,7 @@ def __call__(
pass

@abstractmethod
def log_progress(self, description: str) -> None:
def log_progress(self, data: Dict[str, Any]) -> None:
"""Log the progress of the agent.
This is a hook that is intended for reporting the progress of the agent.
"""
Expand Down
6 changes: 3 additions & 3 deletions vision_agent/agent/agent_coder.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import os
import sys
from pathlib import Path
from typing import Dict, List, Optional, Union
from typing import Dict, List, Optional, Union, Any

from rich.console import Console
from rich.syntax import Syntax
Expand Down Expand Up @@ -206,5 +206,5 @@ def chat(

return f"{IMPORT_HELPER}\n{code}"

def log_progress(self, description: str) -> None:
_LOGGER.info(description)
def log_progress(self, data: Dict[str, Any]) -> None:
_LOGGER.info(data)
38 changes: 23 additions & 15 deletions vision_agent/agent/vision_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -451,7 +451,7 @@ def __init__(
reflect_model: Optional[Union[LLM, LMM]] = None,
max_retries: int = 2,
verbose: bool = False,
report_progress_callback: Optional[Callable[[str], None]] = None,
report_progress_callback: Optional[Callable[[Dict[str, Any]], None]] = None,
):
"""VisionAgent constructor.

Expand Down Expand Up @@ -518,23 +518,23 @@ def __call__(
self_reflection=self_reflection,
)

def log_progress(self, description: str) -> None:
_LOGGER.info(description)
def log_progress(self, data: Dict[str, Any]) -> None:
_LOGGER.info(data)
if self.report_progress_callback:
self.report_progress_callback(description)
self.report_progress_callback(data)

def _report_visualization_via_callback(
self, images: Sequence[Union[str, Path]]
) -> None:
"""This is intended for streaming the visualization images via the callback to the client side."""
if self.report_progress_callback:
self.report_progress_callback("<VIZ>")
self.report_progress_callback({"log": "<VIZ>"})
if images:
for img in images:
self.report_progress_callback(
f"<IMG>base:64{convert_to_b64(img)}</IMG>"
{"log": f"<IMG>base:64{convert_to_b64(img)}</IMG>"}
)
self.report_progress_callback("</VIZ>")
self.report_progress_callback({"log": "</VIZ>"})

def chat_with_workflow(
self,
Expand Down Expand Up @@ -618,8 +618,8 @@ def chat_with_workflow(
tool_results["answer"] = answer
all_tool_results.append(tool_results)

self.log_progress(f"\tCall Result: {call_results}")
self.log_progress(f"\tAnswer: {answer}")
self.log_progress({"log": f"\tCall Result: {call_results}"})
self.log_progress({"log": f"\tAnswer: {answer}"})
answers.append({"task": task_str, "answer": answer})
task_depend[task["id"]]["answer"] = answer # type: ignore
task_depend[task["id"]]["call_result"] = call_results # type: ignore
Expand All @@ -644,18 +644,22 @@ def chat_with_workflow(
final_answer,
reflection_images,
)
self.log_progress(f"Reflection: {reflection}")
self.log_progress({"log": f"Reflection: {reflection}"})
parsed_reflection = parse_reflect(reflection)
if parsed_reflection["Finish"]:
break
else:
reflections += "\n" + parsed_reflection["Reflection"]
else:
self.log_progress("Self Reflection skipped based on user request.")
self.log_progress(
{"log": "Self Reflection skipped based on user request."}
)
break
# '<ANSWER>' is a symbol to indicate the end of the chat, which is useful for streaming logs.
self.log_progress(
f"The Vision Agent has concluded this chat. <ANSWER>{final_answer}</ANSWER>"
{
"log": f"The Vision Agent has concluded this chat. <ANSWER>{final_answer}</ANSWER>"
}
)

if visualize_output:
Expand Down Expand Up @@ -718,8 +722,10 @@ def retrieval(
}

self.log_progress(
f"""Going to run the following tool(s) in sequence:
{
"log": f"""Going to run the following tool(s) in sequence:
{tabulate(tabular_data=[tool_results], headers="keys", tablefmt="mixed_grid", maxcolwidths=_MAX_TABULATE_COL_WIDTH)}"""
}
)

def parse_tool_results(result: Dict[str, Union[Dict, List]]) -> Any:
Expand Down Expand Up @@ -764,7 +770,9 @@ def create_tasks(
else:
task_list = []
self.log_progress(
f"""Planned tasks:
{tabulate(task_list, headers="keys", tablefmt="mixed_grid", maxcolwidths=_MAX_TABULATE_COL_WIDTH)}"""
{
"log": "Planned tasks:",
"plan": task_list,
}
)
return task_list
72 changes: 57 additions & 15 deletions vision_agent/agent/vision_agent_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ def write_and_exec_code(
tool_info: str,
exec: Execute,
retrieved_ltm: str,
log_progress: Callable[..., str],
log_progress: Callable[[Dict[str, Any]], None],
max_retry: int = 3,
verbosity: int = 0,
) -> Tuple[bool, str, str, Dict[str, List[str]]]:
Expand All @@ -179,7 +179,23 @@ def write_and_exec_code(
success, result = exec.run_isolation(code)
if verbosity == 2:
_CONSOLE.print(Syntax(code, "python", theme="gruvbox-dark", line_numbers=True))
log_progress(f"\tCode success: {success}\n\tResult: {str(result)}", code)
log_progress(
{
"log": f"Code success: {success}",
}
)
log_progress(
{
"log": "Code:",
"code": code,
}
)
log_progress(
{
"log": "Result:",
"result": str(result),
}
)
_LOGGER.info(f"\tCode success: {success}, result: {str(result)}")
working_memory: Dict[str, List[str]] = {}
while not success and counter < max_retry:
Expand All @@ -206,7 +222,18 @@ def write_and_exec_code(
_CONSOLE.print(
Syntax(code, "python", theme="gruvbox-dark", line_numbers=True)
)
log_progress(f"\tDebugging reflection: {reflection}\n\tResult: {result}")
log_progress(
{
"log": "Debugging reflection:",
"reflection": reflection,
}
)
log_progress(
{
"log": "Result:",
"result": result,
}
)
_LOGGER.info(f"\tDebugging reflection: {reflection}, result: {result}")

if success:
Expand All @@ -227,7 +254,7 @@ def run_plan(
exec: Execute,
code: str,
tool_recommender: Sim,
log_progress: Callable[..., str],
log_progress: Callable[[Dict[str, Any]], None],
long_term_memory: Optional[Sim] = None,
verbosity: int = 0,
) -> Tuple[str, str, List[Dict[str, Any]], Dict[str, List[str]]]:
Expand All @@ -239,8 +266,7 @@ def run_plan(

for task in active_plan:
log_progress(
f"""Going to run the following task(s) in sequence:
{tabulate(tabular_data=[task], headers="keys", tablefmt="mixed_grid", maxcolwidths=_MAX_TABULATE_COL_WIDTH)}"""
{"log": "Going to run the following task(s) in sequence:", "task": task}
)
_LOGGER.info(
f"""
Expand All @@ -250,7 +276,7 @@ def run_plan(
tool_info = "\n".join([e["doc"] for e in tools])

if verbosity == 2:
log_progress(f"Tools retrieved: {[e['desc'] for e in tools]}")
log_progress({"log": f"Tools retrieved: {[e['desc'] for e in tools]}"})
_LOGGER.info(f"Tools retrieved: {[e['desc'] for e in tools]}")

if long_term_memory is not None:
Expand Down Expand Up @@ -282,7 +308,17 @@ def run_plan(
Syntax(code, "python", theme="gruvbox-dark", line_numbers=True)
)

log_progress(f"\tCode success: {success}\n\tResult: {str(result)}")
log_progress(
{
"log": f"Code success: {success}",
}
)
log_progress(
{
"log": "Result:",
"result": str(result),
}
)
_LOGGER.info(f"\tCode success: {success} result: {str(result)}")

task["success"] = success
Expand Down Expand Up @@ -320,7 +356,7 @@ def __init__(
tool_recommender: Optional[Sim] = None,
long_term_memory: Optional[Sim] = None,
verbosity: int = 0,
report_progress_callback: Optional[Callable[..., Any]] = None,
report_progress_callback: Optional[Callable[[Dict[str, Any]], None]] = None,
) -> None:
self.planner = OpenAILLM(temperature=0.0, json_mode=True)
self.coder = OpenAILLM(temperature=0.0)
Expand Down Expand Up @@ -376,8 +412,10 @@ def chat_with_workflow(

user_req, plan = write_plan(chat, plan, TOOL_DESCRIPTIONS, self.planner)
self.log_progress(
f"""Plan:
{tabulate(tabular_data=plan, headers="keys", tablefmt="mixed_grid", maxcolwidths=_MAX_TABULATE_COL_WIDTH)}"""
{
"log": "Plans:",
"plan": plan,
}
)
_LOGGER.info(
f"""Plan:
Expand Down Expand Up @@ -412,8 +450,12 @@ def chat_with_workflow(

retries += 1

self.log_progress("The Vision Agent V2 has concluded this chat.")
self.log_progress(f"<ANSWER>Plan success: {success}</ANSWER>")
self.log_progress(
{
"log": f"The Vision Agent V2 has concluded this chat.\nSuccess: {success}",
"finished": True,
}
)

return {
"code": working_code,
Expand All @@ -423,7 +465,7 @@ def chat_with_workflow(
"plan": plan,
}

def log_progress(self, description: str, code: Optional[str] = "") -> None:
def log_progress(self, data: Dict[str, Any]) -> None:
if self.report_progress_callback is not None:
self.report_progress_callback(description, code)
self.report_progress_callback(data)
pass
Loading
Loading