Skip to content

Commit

Permalink
Integrate langsmith for better observability (#86)
Browse files Browse the repository at this point in the history
* Integrate langsmith for better observability

* Fix unit test

* Fix unit test

---------

Co-authored-by: Yazhou Cao <[email protected]>
  • Loading branch information
AsiaCao and humpydonkey authored May 16, 2024
1 parent 70b5465 commit 3630983
Show file tree
Hide file tree
Showing 6 changed files with 100 additions and 13 deletions.
81 changes: 76 additions & 5 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ packages = [{include = "vision_agent"}]
"documentation" = "https://github.com/landing-ai/vision-agent"

[tool.poetry.dependencies] # main dependency group
python = ">=3.9"
python = ">=3.9,<4.0"
numpy = ">=1.21.0,<2.0.0"
pillow = "10.*"
requests = "2.*"
Expand All @@ -32,6 +32,7 @@ scipy = "1.13.*"
nbclient = "^0.10.0"
nbformat = "^5.10.4"
rich = "^13.7.1"
langsmith = "^0.1.58"

[tool.poetry.group.dev.dependencies]
autoflake = "1.*"
Expand Down
7 changes: 7 additions & 0 deletions tests/fixtures.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,13 @@ def openai_llm_mock(request):
yield mock_instance


@pytest.fixture
def langsmith_wrap_oepnai_mock(request, openai_llm_mock):
with patch("vision_agent.llm.llm.wrap_openai") as mock:
mock.return_value = openai_llm_mock
yield mock


@pytest.fixture
def openai_lmm_mock(request):
content = request.param
Expand Down
7 changes: 4 additions & 3 deletions tests/test_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,15 @@
clip_mock,
grounding_dino_mock,
grounding_sam_mock,
langsmith_wrap_oepnai_mock,
openai_llm_mock,
)


@pytest.mark.parametrize(
"openai_llm_mock", ["mocked response"], indirect=["openai_llm_mock"]
)
def test_generate_with_mock(openai_llm_mock): # noqa: F811
def test_generate_with_mock(openai_llm_mock, langsmith_wrap_oepnai_mock): # noqa: F811
llm = OpenAILLM()
response = llm.generate("test prompt")
assert response == "mocked response"
Expand All @@ -26,7 +27,7 @@ def test_generate_with_mock(openai_llm_mock): # noqa: F811
@pytest.mark.parametrize(
"openai_llm_mock", ["mocked response"], indirect=["openai_llm_mock"]
)
def test_chat_with_mock(openai_llm_mock): # noqa: F811
def test_chat_with_mock(openai_llm_mock, langsmith_wrap_oepnai_mock): # noqa: F811
llm = OpenAILLM()
response = llm.chat([{"role": "user", "content": "test prompt"}])
assert response == "mocked response"
Expand All @@ -52,7 +53,7 @@ def openai_llm_mock_turbo(openai_llm_mock_2): # noqa: F811
@pytest.mark.parametrize(
"openai_llm_mock", ["mocked response"], indirect=["openai_llm_mock"]
)
def test_call_with_mock(openai_llm_mock): # noqa: F811
def test_call_with_mock(openai_llm_mock, langsmith_wrap_oepnai_mock): # noqa: F811
llm = OpenAILLM()
response = llm("test prompt")
assert response == "mocked response"
Expand Down
4 changes: 4 additions & 0 deletions vision_agent/agent/vision_agent_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, Union

import pandas as pd
from langsmith import traceable
from rich.console import Console
from rich.syntax import Syntax
from tabulate import tabulate
Expand Down Expand Up @@ -66,6 +67,7 @@ def extract_json(json_str: str) -> Dict[str, Any]:
return json_dict # type: ignore


@traceable(name="planning")
def write_plan(
chat: List[Dict[str, str]],
plan: Optional[List[Dict[str, Any]]],
Expand Down Expand Up @@ -214,6 +216,7 @@ def write_and_exec_code(
return success, code, result, working_memory


@traceable(name="plan execution")
def run_plan(
user_req: str,
plan: List[Dict[str, Any]],
Expand Down Expand Up @@ -333,6 +336,7 @@ def __call__(
results = self.chat_with_workflow(input, image, plan)
return results["code"] # type: ignore

@traceable
def chat_with_workflow(
self,
chat: List[Dict[str, str]],
Expand Down
11 changes: 7 additions & 4 deletions vision_agent/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, List, Mapping, Optional, Union, cast

from langsmith.wrappers import wrap_openai
from openai import AzureOpenAI, OpenAI

from vision_agent.tools import (
Expand Down Expand Up @@ -41,9 +42,9 @@ def __init__(
**kwargs: Any
):
if not api_key:
self.client = OpenAI()
self.client = wrap_openai(OpenAI())
else:
self.client = OpenAI(api_key=api_key)
self.client = wrap_openai(OpenAI(api_key=api_key))

self.model_name = model_name
self.system_prompt = system_prompt
Expand Down Expand Up @@ -165,8 +166,10 @@ def __init__(
if not azure_endpoint:
raise ValueError("Azure OpenAI endpoint is required.")

self.client = AzureOpenAI(
api_key=api_key, api_version=api_version, azure_endpoint=azure_endpoint
self.client = wrap_openai(
AzureOpenAI(
api_key=api_key, api_version=api_version, azure_endpoint=azure_endpoint
)
)
self.model_name = model_name
self.kwargs = kwargs
Expand Down

0 comments on commit 3630983

Please sign in to comment.