diff --git a/tests/test_llm.py b/tests/test_llm.py index a8070f30..a28288a4 100644 --- a/tests/test_llm.py +++ b/tests/test_llm.py @@ -19,6 +19,39 @@ def test_generate_with_mock(openai_llm_mock): # noqa: F811 ) +@pytest.mark.parametrize( + "openai_llm_mock", ["mocked response"], indirect=["openai_llm_mock"] +) +def test_chat_with_mock(openai_llm_mock): # noqa: F811 + llm = OpenAILLM() + response = llm.chat([{"role": "user", "content": "test prompt"}]) + assert response == "mocked response" + openai_llm_mock.chat.completions.create.assert_called_once_with( + model="gpt-4-turbo-preview", + messages=[{"role": "user", "content": "test prompt"}], + ) + + +@pytest.mark.parametrize( + "openai_llm_mock", ["mocked response"], indirect=["openai_llm_mock"] +) +def test_call_with_mock(openai_llm_mock): # noqa: F811 + llm = OpenAILLM() + response = llm("test prompt") + assert response == "mocked response" + openai_llm_mock.chat.completions.create.assert_called_once_with( + model="gpt-4-turbo-preview", + messages=[{"role": "user", "content": "test prompt"}], + ) + + response = llm([{"role": "user", "content": "test prompt"}]) + assert response == "mocked response" + openai_llm_mock.chat.completions.create.assert_called_with( + model="gpt-4-turbo-preview", + messages=[{"role": "user", "content": "test prompt"}], + ) + + @pytest.mark.parametrize( "openai_llm_mock", ['{"Parameters": {"prompt": "cat"}}'],