Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: log pregress for vision agent v2 #90

Merged
merged 5 commits into from
May 22, 2024
Merged
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 25 additions & 1 deletion vision_agent/agent/vision_agent_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,7 @@ def write_and_exec_code(
retrieved_ltm: str,
max_retry: int = 3,
verbosity: int = 0,
log_progress: Callable[..., str] = None,
AsiaCao marked this conversation as resolved.
Show resolved Hide resolved
) -> Tuple[bool, str, str, Dict[str, List[str]]]:
success = False
counter = 0
Expand All @@ -178,6 +179,7 @@ def write_and_exec_code(
success, result = exec.run_isolation(code)
if verbosity == 2:
_CONSOLE.print(Syntax(code, "python", theme="gruvbox-dark", line_numbers=True))
log_progress(f"\tCode success: {success}\n\tResult: {str(result)}", code)
_LOGGER.info(f"\tCode success: {success}, result: {str(result)}")
working_memory: Dict[str, List[str]] = {}
while not success and counter < max_retry:
Expand All @@ -204,6 +206,7 @@ def write_and_exec_code(
_CONSOLE.print(
Syntax(code, "python", theme="gruvbox-dark", line_numbers=True)
)
log_progress(f"\tDebugging reflection: {reflection}\n\tResult: {result}")
_LOGGER.info(f"\tDebugging reflection: {reflection}, result: {result}")

if success:
Expand All @@ -226,6 +229,7 @@ def run_plan(
tool_recommender: Sim,
long_term_memory: Optional[Sim] = None,
verbosity: int = 0,
log_progress: Callable[..., str] = None,
AsiaCao marked this conversation as resolved.
Show resolved Hide resolved
) -> Tuple[str, str, List[Dict[str, Any]], Dict[str, List[str]]]:
active_plan = [e for e in plan if "success" not in e or not e["success"]]
current_code = code
Expand All @@ -234,6 +238,10 @@ def run_plan(
working_memory: Dict[str, List[str]] = {}

for task in active_plan:
log_progress(
f"""Going to run the following task(s) in sequence:
{tabulate(tabular_data=[task], headers="keys", tablefmt="mixed_grid", maxcolwidths=_MAX_TABULATE_COL_WIDTH)}"""
)
_LOGGER.info(
f"""
{tabulate(tabular_data=[task], headers="keys", tablefmt="mixed_grid", maxcolwidths=_MAX_TABULATE_COL_WIDTH)}"""
Expand All @@ -242,6 +250,7 @@ def run_plan(
tool_info = "\n".join([e["doc"] for e in tools])

if verbosity == 2:
log_progress(f"Tools retrieved: {[e['desc'] for e in tools]}")
_LOGGER.info(f"Tools retrieved: {[e['desc'] for e in tools]}")

if long_term_memory is not None:
Expand All @@ -259,6 +268,7 @@ def run_plan(
exec,
retrieved_ltm,
verbosity=verbosity,
log_progress=log_progress,
)
if task["type"] == "code":
current_code = code
Expand All @@ -271,6 +281,8 @@ def run_plan(
_CONSOLE.print(
Syntax(code, "python", theme="gruvbox-dark", line_numbers=True)
)

log_progress(f"\tCode success: {success}\n\tResult: {str(result)}")
_LOGGER.info(f"\tCode success: {success} result: {str(result)}")

task["success"] = success
Expand Down Expand Up @@ -308,10 +320,12 @@ def __init__(
tool_recommender: Optional[Sim] = None,
long_term_memory: Optional[Sim] = None,
verbosity: int = 0,
report_progress_callback: Optional[Callable[..., Any]] = None,
) -> None:
self.planner = OpenAILLM(temperature=0.0, json_mode=True)
self.coder = OpenAILLM(temperature=0.0)
self.exec = Execute(timeout=timeout)
self.report_progress_callback = report_progress_callback
if tool_recommender is None:
self.tool_recommender = Sim(TOOLS_DF, sim_key="desc")
else:
Expand Down Expand Up @@ -361,6 +375,10 @@ def chat_with_workflow(
working_code = task["code"]

user_req, plan = write_plan(chat, plan, TOOL_DESCRIPTIONS, self.planner)
self.log_progress(
f"""Plan:
{tabulate(tabular_data=plan, headers="keys", tablefmt="mixed_grid", maxcolwidths=_MAX_TABULATE_COL_WIDTH)}"""
)
_LOGGER.info(
f"""Plan:
{tabulate(tabular_data=plan, headers="keys", tablefmt="mixed_grid", maxcolwidths=_MAX_TABULATE_COL_WIDTH)}"""
Expand All @@ -381,6 +399,7 @@ def chat_with_workflow(
self.tool_recommender,
self.long_term_memory,
self.verbosity,
self.log_progress,
)
success = all(
task["success"] if "success" in task else False for task in plan
Expand All @@ -393,6 +412,9 @@ def chat_with_workflow(

retries += 1

self.log_progress("The Vision Agent V2 has concluded this chat.")
self.log_progress(f"<ANSWER>Plan success: {success}</ANSWER>")

return {
"code": working_code,
"test": working_test,
Expand All @@ -401,5 +423,7 @@ def chat_with_workflow(
"plan": plan,
}

def log_progress(self, description: str) -> None:
def log_progress(self, description: str, code: Optional[str] = "") -> None:
if self.report_progress_callback is not None:
self.report_progress_callback(description, code)
pass
Loading