Skip to content
Permalink

Comparing changes

Choose two branches to see what’s changed or to start a new pull request. If you need to, you can also or learn more about diff comparisons.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also . Learn more about diff comparisons here.
base repository: openradx/radis
Failed to load repositories. Confirm that selected base ref is valid, then try again.
Loading
base: main
Choose a base ref
...
head repository: openradx/radis
Failed to load repositories. Confirm that selected head ref is valid, then try again.
Loading
compare: custom-rag-output
Choose a head ref
Can’t automatically merge. Don’t worry, you can still create the pull request.
  • 6 commits
  • 28 files changed
  • 1 contributor

Commits on Oct 27, 2024

  1. Copy the full SHA
    94b19cc View commit details

Commits on Dec 2, 2024

  1. Copy the full SHA
    4522498 View commit details
  2. state 2.12.2024

    hummerichsander authored Dec 2, 2024
    Copy the full SHA
    adbbb4e View commit details

Commits on Dec 3, 2024

  1. Copy the full SHA
    b8a3c9f View commit details
  2. Copy the full SHA
    030efad View commit details
  3. add space

    hummerichsander authored Dec 3, 2024
    Copy the full SHA
    1998c42 View commit details
1 change: 1 addition & 0 deletions docker-compose.dev.yml
Original file line number Diff line number Diff line change
@@ -43,6 +43,7 @@ services:
./manage.py create_example_users &&
./manage.py create_example_groups &&
./manage.py populate_example_reports --lng ${EXAMPLE_REPORTS_LANGUAGE:-en} &&
./manage.py create_default_grammars &&
wait-for-it -s llamacpp.local:8080 -t 60 &&
./manage.py runserver 0.0.0.0:8000
"
3 changes: 2 additions & 1 deletion radis/chats/admin.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from django.contrib import admin

from .models import ChatsSettings
from .models import ChatsSettings, Grammar

admin.site.register(ChatsSettings, admin.ModelAdmin)
admin.site.register(Grammar, admin.ModelAdmin)
20 changes: 20 additions & 0 deletions radis/chats/factories.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
import factory

from .models import Grammar


class BaseDjangoModelFactory[T](factory.django.DjangoModelFactory):
@classmethod
def create(cls, *args, **kwargs) -> T:
return super().create(*args, **kwargs)


class GrammarFactory(BaseDjangoModelFactory[Grammar]):
class Meta:
model = Grammar

name = factory.Faker("word")
human_readable_name = factory.Faker("word")
grammar = factory.Faker("sentence")
llm_instruction = factory.Faker("sentence")
is_default = False
24 changes: 24 additions & 0 deletions radis/chats/migrations/0003_grammar.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# Generated by Django 5.1.3 on 2024-12-03 14:21

from django.db import migrations, models


class Migration(migrations.Migration):

dependencies = [
('chats', '0002_chat_report'),
]

operations = [
migrations.CreateModel(
name='Grammar',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
('human_readable_name', models.CharField(max_length=100)),
('grammar', models.TextField()),
('llm_instruction', models.TextField()),
('is_default', models.BooleanField(default=False)),
],
),
]
12 changes: 12 additions & 0 deletions radis/chats/models.py
Original file line number Diff line number Diff line change
@@ -13,6 +13,18 @@ class Meta:
verbose_name_plural = "Chats settings"


class Grammar(models.Model):
name = models.CharField(max_length=100, unique=True)
human_readable_name = models.CharField(max_length=100)
grammar = models.TextField()
llm_instruction = models.TextField()

is_default = models.BooleanField(default=False)

def __str__(self):
return f"{self.human_readable_name}"


class Chat(models.Model):
title = models.CharField(max_length=255, default="New Chat")
owner = models.ForeignKey(
9 changes: 9 additions & 0 deletions radis/chats/tests/conftest.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
import pytest
from faker import Faker

from radis.chats.factories import GrammarFactory
from radis.chats.models import Grammar


@pytest.fixture
def report_body() -> str:
@@ -12,3 +15,9 @@ def report_body() -> str:
def question_body() -> str:
question_body = Faker().sentences(nb=1)
return " ".join(question_body)


@pytest.fixture
def grammar() -> Grammar:
grammar = GrammarFactory.create()
return grammar
32 changes: 24 additions & 8 deletions radis/chats/tests/utils/test_chat_client.py
Original file line number Diff line number Diff line change
@@ -1,34 +1,50 @@
from unittest.mock import patch

import pytest
from django.db import close_old_connections

from radis.chats.utils.chat_client import AsyncChatClient


@pytest.mark.asyncio
async def test_ask_question(report_body, question_body, openai_chat_completions_mock):
@pytest.mark.django_db(transaction=True)
async def test_ask_question(report_body, question_body, openai_chat_completions_mock, grammar):
openai_mock = openai_chat_completions_mock("Fake Answer")

with patch("openai.AsyncOpenAI", return_value=openai_mock):
answer = await AsyncChatClient().ask_report_question(report_body, question_body)
answer = await AsyncChatClient().ask_report_question(
context=report_body,
question=question_body,
grammar=grammar,
)

assert answer == "Fake Answer"
assert openai_mock.chat.completions.create.call_count == 1

close_old_connections()


@pytest.mark.asyncio
async def test_ask_yes_no_question(report_body, question_body, openai_chat_completions_mock):
@pytest.mark.django_db(transaction=True)
async def test_ask_yes_no_question(
report_body, question_body, openai_chat_completions_mock, grammar
):
openai_yes_mock = openai_chat_completions_mock("Yes")
openai_no_mock = openai_chat_completions_mock("No")

with patch("openai.AsyncOpenAI", return_value=openai_yes_mock):
answer = await AsyncChatClient().ask_report_yes_no_question(report_body, question_body)
answer = await AsyncChatClient().ask_report_question(
context=report_body, question=question_body, grammar=grammar
)

assert answer == "yes"
assert answer == "Yes"
assert openai_yes_mock.chat.completions.create.call_count == 1

with patch("openai.AsyncOpenAI", return_value=openai_no_mock):
answer = await AsyncChatClient().ask_report_yes_no_question(report_body, question_body)
answer = await AsyncChatClient().ask_report_question(
context=report_body, question=question_body, grammar=grammar
)

assert answer == "no"
assert answer == "No"
assert openai_no_mock.chat.completions.create.call_count == 1

close_old_connections()
73 changes: 45 additions & 28 deletions radis/chats/utils/chat_client.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,16 @@
import logging
from string import Template
from typing import Iterable, Literal
from typing import Iterable

import openai
from django.conf import settings
from openai.types.chat import ChatCompletionMessageParam
from openai.types.chat import (
ChatCompletionMessageParam,
ChatCompletionSystemMessageParam,
ChatCompletionUserMessageParam,
)

from radis.chats.models import Grammar

logger = logging.getLogger(__name__)

@@ -18,62 +24,73 @@ def __init__(self):
async def send_messages(
self,
messages: Iterable[ChatCompletionMessageParam],
grammar: Grammar,
max_tokens: int | None = None,
yes_no_answer: bool = False,
) -> str:
logger.debug(f"Sending messages to LLM:\n{messages}")
messages = [
ChatCompletionSystemMessageParam(
role="system", content=settings.CHAT_GENERAL_SYSTEM_PROMPT
),
*messages,
]

grammar = ""
if yes_no_answer:
grammar = settings.CHAT_YES_NO_ANSWER_GRAMMAR
logger.debug(f"\nUsing grammar: {grammar}")
logger.debug(f"Sending messages to LLM:\n{messages}")
logger.debug(f"Using grammar: {grammar.human_readable_name}")

completion = await self._client.chat.completions.create(
model="option_for_local_llm_not_needed",
messages=messages,
max_tokens=max_tokens,
extra_body={"grammar": grammar},
extra_body={"grammar": grammar.grammar},
)

answer = completion.choices[0].message.content
assert answer is not None
logger.debug("Received from LLM: %s", answer)

return answer

async def ask_report_question(self, context: str, question: str) -> str:
system_prompt = Template(settings.CHAT_REPORT_QUESTION_SYSTEM_PROMPT).substitute(
{"report": context}
async def ask_question(
self,
question: str,
grammar: Grammar,
) -> str:
system_prompt_str = Template(settings.CHAT_QUESTION_SYSTEM_PROMPT).substitute(
{"grammar_instructions": grammar.llm_instruction}
)
user_prompt = Template(settings.CHAT_REPORT_QUESTION_USER_PROMPT).substitute(
user_prompt_str = Template(settings.CHAT_QUESTION_USER_PROMPT).substitute(
{"question": question}
)

return await self.send_messages(
answer = await self.send_messages(
[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
ChatCompletionSystemMessageParam(role="system", content=system_prompt_str),
ChatCompletionUserMessageParam(role="user", content=user_prompt_str),
],
grammar=grammar,
)

async def ask_report_yes_no_question(self, context: str, question: str) -> Literal["yes", "no"]:
system_prompt = Template(settings.CHAT_REPORT_YES_NO_QUESTION_SYSTEM_PROMPT).substitute(
{"report": context}
return answer

async def ask_report_question(
self,
context: str,
question: str,
grammar: Grammar,
) -> str:
system_prompt = Template(settings.CHAT_REPORT_QUESTION_SYSTEM_PROMPT).substitute(
grammar_instructions=grammar.llm_instruction, report=context
)
user_prompt = Template(settings.CHAT_REPORT_QUESTION_USER_PROMPT).substitute(
{"question": question}
)

answer = await self.send_messages(
[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
ChatCompletionSystemMessageParam(role="system", content=system_prompt),
ChatCompletionUserMessageParam(role="user", content=user_prompt),
],
yes_no_answer=True,
grammar=grammar,
)

if answer == "Yes":
return "yes"
elif answer == "No":
return "no"
else:
raise ValueError(f"Unexpected answer: {answer}")
return answer
20 changes: 14 additions & 6 deletions radis/chats/views.py
Original file line number Diff line number Diff line change
@@ -19,7 +19,7 @@
from radis.chats.tables import ChatTable
from radis.reports.models import Report

from .models import Chat, ChatMessage, ChatRole
from .models import Chat, ChatMessage, ChatRole, Grammar
from .utils.chat_client import AsyncChatClient


@@ -64,26 +64,33 @@ async def chat_create_view(request: AuthenticatedHttpRequest) -> HttpResponse:

client = AsyncChatClient()

if request.POST.get("yes_no_answer"):
grammar = await Grammar.objects.aget(name="YES_NO")
else:
grammar = await Grammar.objects.aget(name="FREE_TEXT")

# Generate an answer for the user prompt
answer = await client.send_messages(
[
{"role": "system", "content": instructions_system_prompt},
{"role": "user", "content": user_prompt},
],
yes_no_answer=True if request.POST.get("yes_no_answer") else False,
grammar=grammar,
)

# Generate a title for the chat
title_system_prompt = Template(settings.CHAT_GENERATE_TITLE_SYSTEM_PROMPT).substitute(
{"num_words": 6}
)

free_text_grammar = await Grammar.objects.aget(name="FREE_TEXT")
title = await client.send_messages(
[
{"role": "system", "content": title_system_prompt},
{"role": "user", "content": user_prompt},
],
max_tokens=20,
grammar=free_text_grammar,
)
title = title.strip().rstrip(string.punctuation)[:100]

@@ -172,10 +179,11 @@ async def chat_update_view(request: AuthenticatedHttpRequest, pk: int) -> HttpRe
messages.append({"role": "user", "content": prompt})

client = AsyncChatClient()
response = await client.send_messages(
messages,
yes_no_answer=True if request.POST.get("yes_no_answer") else False,
)
if request.POST.get("yes_no_answer"):
grammar = await Grammar.objects.aget(name="YES_NO")
else:
grammar = await Grammar.objects.aget(name="FREE_TEXT")
response = await client.send_messages(messages, grammar=grammar)

await ChatMessage.objects.acreate(chat=chat, role=ChatRole.USER, content=prompt)
await ChatMessage.objects.acreate(chat=chat, role=ChatRole.ASSISTANT, content=response)
7 changes: 7 additions & 0 deletions radis/conftest.py
Original file line number Diff line number Diff line change
@@ -30,3 +30,10 @@ def _openai_chat_completions_mock(content: str) -> ContextManager:
return mock_openai

return _openai_chat_completions_mock


@pytest.fixture
def default_grammars() -> None:
from radis.core.management.commands.create_default_grammars import Command

Command().handle()
29 changes: 29 additions & 0 deletions radis/core/management/commands/create_default_grammars.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
import time

from django.conf import settings
from django.core.management.base import BaseCommand, CommandParser
from faker import Faker

from radis.chats.models import Grammar

fake = Faker()


class Command(BaseCommand):
help = "Populates the database with default grammars."

def add_arguments(self, parser: CommandParser) -> None:
super().add_arguments(parser)

def handle(self, *args, **options):
self.stdout.write(
f"Adding {len(settings.CHAT_DEFAULT_GRAMMARS)} default grammars to the database...",
ending="",
)
self.stdout.flush()

start = time.time()
for default_grammar in settings.CHAT_DEFAULT_GRAMMARS:
default_grammar["is_default"] = True
Grammar.objects.update_or_create(name=default_grammar["name"], defaults=default_grammar)
self.stdout.write(f"Done (in {time.time() - start:.2f} seconds)")
Loading