From 2cdaefb63081d6147d7a7bfec4ada8dabe2de695 Mon Sep 17 00:00:00 2001 From: Yazhou Cao Date: Wed, 15 May 2024 18:02:09 -0700 Subject: [PATCH] Fix unit test --- tests/fixtures.py | 9 ++++++++- tests/test_llm.py | 7 ++++--- vision_agent/agent/vision_agent_v2.py | 2 +- vision_agent/llm/llm.py | 1 + 4 files changed, 14 insertions(+), 5 deletions(-) diff --git a/tests/fixtures.py b/tests/fixtures.py index 75013c9f..036ed9d6 100644 --- a/tests/fixtures.py +++ b/tests/fixtures.py @@ -9,7 +9,7 @@ def openai_llm_mock(request): content = request.param # Note the path here is adjusted to where OpenAI is used, not where it's defined - with patch("vision_agent.llm.llm.wrap_openai") as mock: + with patch("vision_agent.llm.llm.OpenAI") as mock: # Setup a mock response structure that matches what your code expects mock_instance = mock.return_value mock_instance.chat.completions.create.return_value = MagicMock( @@ -18,6 +18,13 @@ def openai_llm_mock(request): yield mock_instance +@pytest.fixture +def langsmith_wrap_oepnai_mock(request, openai_llm_mock): + with patch("vision_agent.llm.llm.wrap_openai") as mock: + mock.return_value = openai_llm_mock + yield mock + + @pytest.fixture def openai_lmm_mock(request): content = request.param diff --git a/tests/test_llm.py b/tests/test_llm.py index 40c8f8bc..8d1eb1c8 100644 --- a/tests/test_llm.py +++ b/tests/test_llm.py @@ -6,6 +6,7 @@ clip_mock, grounding_dino_mock, grounding_sam_mock, + langsmith_wrap_oepnai_mock, openai_llm_mock, ) @@ -13,7 +14,7 @@ @pytest.mark.parametrize( "openai_llm_mock", ["mocked response"], indirect=["openai_llm_mock"] ) -def test_generate_with_mock(openai_llm_mock): # noqa: F811 +def test_generate_with_mock(openai_llm_mock, langsmith_wrap_oepnai_mock): # noqa: F811 llm = OpenAILLM() response = llm.generate("test prompt") assert response == "mocked response" @@ -26,7 +27,7 @@ def test_generate_with_mock(openai_llm_mock): # noqa: F811 @pytest.mark.parametrize( "openai_llm_mock", ["mocked response"], indirect=["openai_llm_mock"] ) -def test_chat_with_mock(openai_llm_mock): # noqa: F811 +def test_chat_with_mock(openai_llm_mock, langsmith_wrap_oepnai_mock): # noqa: F811 llm = OpenAILLM() response = llm.chat([{"role": "user", "content": "test prompt"}]) assert response == "mocked response" @@ -52,7 +53,7 @@ def openai_llm_mock_turbo(openai_llm_mock_2): # noqa: F811 @pytest.mark.parametrize( "openai_llm_mock", ["mocked response"], indirect=["openai_llm_mock"] ) -def test_call_with_mock(openai_llm_mock): # noqa: F811 +def test_call_with_mock(openai_llm_mock, langsmith_wrap_oepnai_mock): # noqa: F811 llm = OpenAILLM() response = llm("test prompt") assert response == "mocked response" diff --git a/vision_agent/agent/vision_agent_v2.py b/vision_agent/agent/vision_agent_v2.py index 5cc35567..6bb2125f 100644 --- a/vision_agent/agent/vision_agent_v2.py +++ b/vision_agent/agent/vision_agent_v2.py @@ -4,10 +4,10 @@ from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, Union import pandas as pd +from langsmith import traceable from rich.console import Console from rich.syntax import Syntax from tabulate import tabulate -from langsmith import traceable from vision_agent.agent import Agent from vision_agent.agent.vision_agent_v2_prompt import ( diff --git a/vision_agent/llm/llm.py b/vision_agent/llm/llm.py index 361b195d..a0035b29 100644 --- a/vision_agent/llm/llm.py +++ b/vision_agent/llm/llm.py @@ -2,6 +2,7 @@ import os from abc import ABC, abstractmethod from typing import Any, Callable, Dict, List, Mapping, Optional, Union, cast + from langsmith.wrappers import wrap_openai from openai import AzureOpenAI, OpenAI