Skip to content

Commit

Permalink
fixed linting
Browse files Browse the repository at this point in the history
  • Loading branch information
dillonalaird committed Aug 9, 2024
1 parent ec93b88 commit 81ef0a8
Show file tree
Hide file tree
Showing 3 changed files with 9 additions and 9 deletions.
2 changes: 1 addition & 1 deletion vision_agent/agent/vision_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def run_conversation(orch: LMM, chat: List[Message]) -> Dict[str, Any]:
dir=WORKSPACE,
conversation=conversation,
)
return extract_json(orch([{"role": "user", "content": prompt}], stream=False)) # type: ignore
return extract_json(orch([{"role": "user", "content": prompt}], stream=False)) # type: ignore


def run_code_action(code: str, code_interpreter: CodeInterpreter) -> str:
Expand Down
12 changes: 6 additions & 6 deletions vision_agent/agent/vision_agent_coder.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ def pick_plan(
docstring=tool_info, plans=plan_str, previous_attempts="", media=media
)

code = extract_code(model(prompt, stream=False)) # type: ignore
code = extract_code(model(prompt, stream=False)) # type: ignore
log_progress(
{
"type": "log",
Expand Down Expand Up @@ -211,7 +211,7 @@ def pick_plan(
"code": DefaultImports.prepend_imports(code),
}
)
code = extract_code(model(prompt, stream=False)) # type: ignore
code = extract_code(model(prompt, stream=False)) # type: ignore
tool_output = code_interpreter.exec_isolation(
DefaultImports.prepend_imports(code)
)
Expand Down Expand Up @@ -251,7 +251,7 @@ def pick_plan(
tool_output=tool_output_str[:20_000],
)
chat[-1]["content"] = prompt
best_plan = extract_json(model(chat, stream=False)) # type: ignore
best_plan = extract_json(model(chat, stream=False)) # type: ignore

if verbosity >= 1:
_LOGGER.info(f"Best plan:\n{best_plan}")
Expand Down Expand Up @@ -286,7 +286,7 @@ def write_code(
feedback=feedback,
)
chat[-1]["content"] = prompt
return extract_code(coder(chat, stream=False)) # type: ignore
return extract_code(coder(chat, stream=False)) # type: ignore


def write_test(
Expand All @@ -310,7 +310,7 @@ def write_test(
media=media,
)
chat[-1]["content"] = prompt
return extract_code(tester(chat, stream=False)) # type: ignore
return extract_code(tester(chat, stream=False)) # type: ignore


def write_and_test_code(
Expand Down Expand Up @@ -439,7 +439,7 @@ def debug_code(
while not success and count < 3:
try:
fixed_code_and_test = extract_json(
debugger( # type: ignore
debugger( # type: ignore
FIX_BUG.format(
code=code,
tests=test,
Expand Down
4 changes: 2 additions & 2 deletions vision_agent/lmm/lmm.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ def chat(
)
if "stream" in tmp_kwargs and tmp_kwargs["stream"]:
for chunk in response:
chunk_message = chunk.choices[0].delta.content # type: ignore
chunk_message = chunk.choices[0].delta.content # type: ignore
yield chunk_message
else:
return cast(str, response.choices[0].message.content)
Expand Down Expand Up @@ -191,7 +191,7 @@ def generate(
)
if "stream" in tmp_kwargs and tmp_kwargs["stream"]:
for chunk in response:
chunk_message = chunk.choices[0].delta.content # type: ignore
chunk_message = chunk.choices[0].delta.content # type: ignore
yield chunk_message
else:
return cast(str, response.choices[0].message.content)
Expand Down

0 comments on commit 81ef0a8

Please sign in to comment.