Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add EasyTool #19

Merged
merged 12 commits into from
Mar 20, 2024
Merged
23 changes: 23 additions & 0 deletions tests/fixtures.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@

import pytest

from vision_agent.tools import CLIP, GroundingDINO, GroundingSAM


@pytest.fixture
def openai_llm_mock(request):
Expand All @@ -27,3 +29,24 @@ def openai_lmm_mock(request):
choices=[MagicMock(message=MagicMock(content=content))]
)
yield mock_instance


@pytest.fixture
def clip_mock(request):
with patch.object(CLIP, "__call__", autospec=True) as mock:
mock.return_value = "test"
yield mock


@pytest.fixture
def grounding_dino_mock(request):
with patch.object(GroundingDINO, "__call__", autospec=True) as mock:
mock.return_value = "test"
yield mock


@pytest.fixture
def grounding_sam_mock(request):
with patch.object(GroundingSAM, "__call__", autospec=True) as mock:
mock.return_value = "test"
yield mock
26 changes: 15 additions & 11 deletions tests/test_llm.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,13 @@
import pytest

from vision_agent.llm.llm import OpenAILLM
from vision_agent.tools import CLIP, GroundingDINO, GroundingSAM

from .fixtures import openai_llm_mock # noqa: F401
from .fixtures import ( # noqa: F401
clip_mock,
grounding_dino_mock,
grounding_sam_mock,
openai_llm_mock,
)


@pytest.mark.parametrize(
Expand Down Expand Up @@ -57,35 +61,35 @@ def test_call_with_mock(openai_llm_mock): # noqa: F811
['{"Parameters": {"prompt": "cat"}}'],
indirect=["openai_llm_mock"],
)
def test_generate_classifier(openai_llm_mock): # noqa: F811
def test_generate_classifier(openai_llm_mock, clip_mock): # noqa: F811
llm = OpenAILLM()
prompt = "Can you generate a cat classifier?"
classifier = llm.generate_classifier(prompt)
assert isinstance(classifier, CLIP)
assert classifier.prompt == "cat"
classifier("image.png")
assert clip_mock.call_args[1] == {"prompt": "cat", "image": "image.png"}


@pytest.mark.parametrize(
"openai_llm_mock",
['{"Parameters": {"prompt": "cat"}}'],
indirect=["openai_llm_mock"],
)
def test_generate_detector(openai_llm_mock): # noqa: F811
def test_generate_detector(openai_llm_mock, grounding_dino_mock): # noqa: F811
llm = OpenAILLM()
prompt = "Can you generate a cat detector?"
detector = llm.generate_detector(prompt)
assert isinstance(detector, GroundingDINO)
assert detector.prompt == "cat"
detector("image.png")
assert grounding_dino_mock.call_args[1] == {"prompt": "cat", "image": "image.png"}


@pytest.mark.parametrize(
"openai_llm_mock",
['{"Parameters": {"prompt": "cat"}}'],
indirect=["openai_llm_mock"],
)
def test_generate_segmentor(openai_llm_mock): # noqa: F811
def test_generate_segmentor(openai_llm_mock, grounding_sam_mock): # noqa: F811
llm = OpenAILLM()
prompt = "Can you generate a cat segmentor?"
segmentor = llm.generate_segmentor(prompt)
assert isinstance(segmentor, GroundingSAM)
assert segmentor.prompt == "cat"
segmentor("image.png")
assert grounding_sam_mock.call_args[1] == {"prompt": "cat", "image": "image.png"}
26 changes: 15 additions & 11 deletions tests/test_lmm.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,13 @@
from PIL import Image

from vision_agent.lmm.lmm import OpenAILMM
from vision_agent.tools import CLIP, GroundingDINO, GroundingSAM

from .fixtures import openai_lmm_mock # noqa: F401
from .fixtures import ( # noqa: F401
clip_mock,
grounding_dino_mock,
grounding_sam_mock,
openai_lmm_mock,
)


def create_temp_image(image_format="jpeg"):
Expand Down Expand Up @@ -77,35 +81,35 @@ def test_call_with_mock(openai_lmm_mock): # noqa: F811
['{"Parameters": {"prompt": "cat"}}'],
indirect=["openai_lmm_mock"],
)
def test_generate_classifier(openai_lmm_mock): # noqa: F811
def test_generate_classifier(openai_lmm_mock, clip_mock): # noqa: F811
lmm = OpenAILMM()
prompt = "Can you generate a cat classifier?"
classifier = lmm.generate_classifier(prompt)
assert isinstance(classifier, CLIP)
assert classifier.prompt == "cat"
classifier("image.png")
assert clip_mock.call_args[1] == {"prompt": "cat", "image": "image.png"}


@pytest.mark.parametrize(
"openai_lmm_mock",
['{"Parameters": {"prompt": "cat"}}'],
indirect=["openai_lmm_mock"],
)
def test_generate_detector(openai_lmm_mock): # noqa: F811
def test_generate_detector(openai_lmm_mock, grounding_dino_mock): # noqa: F811
lmm = OpenAILMM()
prompt = "Can you generate a cat classifier?"
detector = lmm.generate_detector(prompt)
assert isinstance(detector, GroundingDINO)
assert detector.prompt == "cat"
detector("image.png")
assert grounding_dino_mock.call_args[1] == {"prompt": "cat", "image": "image.png"}


@pytest.mark.parametrize(
"openai_lmm_mock",
['{"Parameters": {"prompt": "cat"}}'],
indirect=["openai_lmm_mock"],
)
def test_generate_segmentor(openai_lmm_mock): # noqa: F811
def test_generate_segmentor(openai_lmm_mock, grounding_sam_mock): # noqa: F811
lmm = OpenAILMM()
prompt = "Can you generate a cat classifier?"
segmentor = lmm.generate_segmentor(prompt)
assert isinstance(segmentor, GroundingSAM)
assert segmentor.prompt == "cat"
segmentor("image.png")
assert grounding_sam_mock.call_args[1] == {"prompt": "cat", "image": "image.png"}
1 change: 1 addition & 0 deletions vision_agent/agent/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
from .agent import Agent
from .reflexion import Reflexion
from .easytool import EasyTool
Loading
Loading