Skip to content

Commit

Permalink
fixed test case
Browse files Browse the repository at this point in the history
  • Loading branch information
dillonalaird committed Apr 15, 2024
1 parent 7f2140f commit 9f8b7e2
Showing 1 changed file with 4 additions and 4 deletions.
8 changes: 4 additions & 4 deletions tests/test_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def test_generate_with_mock(openai_llm_mock): # noqa: F811
response = llm.generate("test prompt")
assert response == "mocked response"
openai_llm_mock.chat.completions.create.assert_called_once_with(
model="gpt-4-turbo-preview",
model="gpt-4-turbo",
messages=[{"role": "user", "content": "test prompt"}],
)

Expand All @@ -31,7 +31,7 @@ def test_chat_with_mock(openai_llm_mock): # noqa: F811
response = llm.chat([{"role": "user", "content": "test prompt"}])
assert response == "mocked response"
openai_llm_mock.chat.completions.create.assert_called_once_with(
model="gpt-4-turbo-preview",
model="gpt-4-turbo",
messages=[{"role": "user", "content": "test prompt"}],
)

Expand All @@ -44,14 +44,14 @@ def test_call_with_mock(openai_llm_mock): # noqa: F811
response = llm("test prompt")
assert response == "mocked response"
openai_llm_mock.chat.completions.create.assert_called_once_with(
model="gpt-4-turbo-preview",
model="gpt-4-turbo",
messages=[{"role": "user", "content": "test prompt"}],
)

response = llm([{"role": "user", "content": "test prompt"}])
assert response == "mocked response"
openai_llm_mock.chat.completions.create.assert_called_with(
model="gpt-4-turbo-preview",
model="gpt-4-turbo",
messages=[{"role": "user", "content": "test prompt"}],
)

Expand Down

0 comments on commit 9f8b7e2

Please sign in to comment.