@@ -14,12 +13,18 @@
-
-
+
+
-
+
-
{{ option.title }}
+
+ {{ option.title }}
+ (coming soon..)
+
@@ -42,14 +47,13 @@
const emit = defineEmits(['typeChosen'])
const publishingOptions = [
-
-
- { key: "mock", title: 'Mock LLM', description: 'This will mock all the LLM features great for local development', current: true },
- { key: "openai", title: 'OpenAi', description: 'This will work with the OpenAi Api', current: false },
- { key: "mock", title: 'OpenAi Azure', description: 'This will work with the Azure OpenAi Api', current: false },
- { key: "ollama", title: 'Ollama', description: 'This will work with the Ollam API', current: false },
- { key: "mock", title: 'Gemini', description: 'This will work with the Gemini Api', current: false },
- { key: "mock", title: 'Claude', description: 'This will work with the Claude Api', current: false },
+ { active: true, key: "mock", title: 'Mock LLM', description: 'This will mock all the LLM features great for local development', current: true },
+ { active: true, key: "openai", title: 'OpenAi', description: 'This will work with the OpenAi Api', current: false },
+ { active: true, key: "claude", title: 'Claude', description: 'This will work with the Claude Api', current: false },
+ { active: false, key: "mock", title: 'OpenAi Azure', description: 'This will work with the Azure OpenAi Api', current: false },
+ { active: false, key: "ollama", title: 'Ollama', description: 'This will work with the Ollam API', current: false },
+ { active: false, key: "mock", title: 'Gemini', description: 'This will work with the Gemini Api', current: false },
+
]
const selected = ref(publishingOptions[0])
diff --git a/resources/js/Pages/Collection/Components/ResourceForm.vue b/resources/js/Pages/Collection/Components/ResourceForm.vue
index 08778479..fc87b515 100644
--- a/resources/js/Pages/Collection/Components/ResourceForm.vue
+++ b/resources/js/Pages/Collection/Components/ResourceForm.vue
@@ -11,9 +11,14 @@
-
+
+
Choose the system to Interact with the data
+
+
Choose the system to Embed the data
+
+
@@ -23,15 +28,18 @@ import InputLabel from '@/Components/InputLabel.vue';
import TextInput from '@/Components/TextInput.vue';
import TextArea from '@/Components/TextArea.vue';
import LlmType from './LlmType.vue';
+import EmbeddingType from './EmbeddingType.vue';
const props = defineProps({
modelValue: Object,
});
-
const typeChosen = (type) => {
- console.log("TYPE " + type)
props.modelValue.driver = type;
}
+const embeddingTypeChosen = (type) => {
+ props.modelValue.embedding_driver = type;
+}
+
\ No newline at end of file
diff --git a/resources/js/Pages/Collection/Create.vue b/resources/js/Pages/Collection/Create.vue
index 862b9024..2e71e41c 100644
--- a/resources/js/Pages/Collection/Create.vue
+++ b/resources/js/Pages/Collection/Create.vue
@@ -78,6 +78,7 @@ const closeSlideOut = () => {
const form = useForm({
name: "",
driver: "mock",
+ embedding_driver: "mock",
description: "Some details about your collection that will help give the ai system some context."
})
diff --git a/resources/js/Pages/Collection/Index.vue b/resources/js/Pages/Collection/Index.vue
index 5bb564e4..7bf153f7 100644
--- a/resources/js/Pages/Collection/Index.vue
+++ b/resources/js/Pages/Collection/Index.vue
@@ -64,6 +64,12 @@ const closeCreateCollectionSlideOut = () => {
{{ collectionItem.description }}
+
+ LLm Driver: {{ collectionItem.driver }}
+
+
+ Embedding Driver: {{ collectionItem.driver }}
+
Document Count: {{ collectionItem.documents_count }}
@@ -89,9 +95,6 @@ const closeCreateCollectionSlideOut = () => {
-
+
diff --git a/tests/Feature/ClaudeClientTest.php b/tests/Feature/ClaudeClientTest.php
index e7c15d6d..8f4e6330 100644
--- a/tests/Feature/ClaudeClientTest.php
+++ b/tests/Feature/ClaudeClientTest.php
@@ -3,10 +3,10 @@
namespace Tests\Feature;
use App\LlmDriver\ClaudeClient;
-use App\LlmDriver\MockClient;
use App\LlmDriver\Requests\MessageInDto;
use App\LlmDriver\Responses\CompletionResponse;
use App\LlmDriver\Responses\EmbeddingsResponseDto;
+use Illuminate\Support\Facades\Http;
use Tests\TestCase;
class ClaudeClientTest extends TestCase
@@ -17,6 +17,8 @@ class ClaudeClientTest extends TestCase
public function test_embeddings(): void
{
+ $this->markTestSkipped('@TODO: Requires another server');
+
$client = new ClaudeClient();
$results = $client->embedData('test');
@@ -27,7 +29,13 @@ public function test_embeddings(): void
public function test_completion(): void
{
- $client = new MockClient();
+ $client = new ClaudeClient();
+
+ $data = get_fixture('claude_completion.json');
+
+ Http::fake([
+ 'api.anthropic.com/*' => Http::response($data, 200),
+ ]);
$results = $client->completion('test');
@@ -35,11 +43,21 @@ public function test_completion(): void
}
- public function test_Chat(): void
+ public function test_chat(): void
{
- $client = new MockClient();
+ $client = new ClaudeClient();
+
+ $data = get_fixture('claude_completion.json');
+
+ Http::fake([
+ 'api.anthropic.com/*' => Http::response($data, 200),
+ ]);
$results = $client->chat([
+ MessageInDto::from([
+ 'content' => 'test',
+ 'role' => 'system',
+ ]),
MessageInDto::from([
'content' => 'test',
'role' => 'user',
@@ -48,5 +66,55 @@ public function test_Chat(): void
$this->assertInstanceOf(CompletionResponse::class, $results);
+ Http::assertSent(function ($request) {
+ $message1 = $request->data()['messages'][0]['role'];
+ $message2 = $request->data()['messages'][1]['role'];
+
+ return $message2 === 'assistant' &&
+ $message1 === 'user';
+ });
+
+ }
+
+ public function test_chat_with_multiple_assistant_messages(): void
+ {
+ $client = new ClaudeClient();
+
+ $data = get_fixture('claude_completion.json');
+
+ Http::fake([
+ 'api.anthropic.com/*' => Http::response($data, 200),
+ ]);
+
+ $results = $client->chat([
+ MessageInDto::from([
+ 'content' => 'test 3',
+ 'role' => 'assistant',
+ ]),
+ MessageInDto::from([
+ 'content' => 'test 2',
+ 'role' => 'assistant',
+ ]),
+ MessageInDto::from([
+ 'content' => 'test 1',
+ 'role' => 'assistant',
+ ]),
+ MessageInDto::from([
+ 'content' => 'test',
+ 'role' => 'user',
+ ]),
+ ]);
+
+ $this->assertInstanceOf(CompletionResponse::class, $results);
+
+ Http::assertSent(function ($request) {
+ $message1 = $request->data()['messages'][0]['role'];
+ $message2 = $request->data()['messages'][1]['role'];
+ $message3 = $request->data()['messages'][1]['role'];
+
+ return $message2 === 'assistant' &&
+ $message1 === 'user' && $message3 === 'assistant';
+ });
+
}
}
diff --git a/tests/Feature/Http/Controllers/CollectionControllerTest.php b/tests/Feature/Http/Controllers/CollectionControllerTest.php
index d7f9abce..c99ef50a 100644
--- a/tests/Feature/Http/Controllers/CollectionControllerTest.php
+++ b/tests/Feature/Http/Controllers/CollectionControllerTest.php
@@ -3,6 +3,7 @@
namespace Tests\Feature\Http\Controllers;
use App\Jobs\ProcessFileJob;
+use App\LlmDriver\DriversEnum;
use App\Models\Collection;
use App\Models\User;
use Illuminate\Http\UploadedFile;
@@ -16,7 +17,7 @@ class CollectionControllerTest extends TestCase
/**
* A basic feature test example.
*/
- public function test_example(): void
+ public function test_index(): void
{
$this->actingAs($user = User::factory()->withPersonalTeam()->create());
@@ -43,9 +44,12 @@ public function test_store(): void
$response = $this->post(route('collections.store'), [
'name' => 'Test',
'driver' => 'mock',
+ 'embedding_driver' => DriversEnum::Claude->value,
'description' => 'Test Description',
])->assertStatus(302);
$this->assertDatabaseCount('collections', 1);
+ $collection = Collection::first();
+ $this->assertEquals(DriversEnum::Claude, $collection->embedding_driver);
}
diff --git a/tests/fixtures/claude_chat.json b/tests/fixtures/claude_chat.json
new file mode 100644
index 00000000..2b507a3a
--- /dev/null
+++ b/tests/fixtures/claude_chat.json
@@ -0,0 +1,18 @@
+{
+ "id": "msg_01N9qpneMKuYmsVqmMrfxHh9",
+ "type": "message",
+ "role": "assistant",
+ "content": [
+ {
+ "type": "text",
+ "text": "\nIt's hard for me to say definitively whether I am \"better\" than ChatGPT for integrating with a Laravel PHP application, as it depends on the specific use case and requirements. Both ChatGPT and I are large language models with broad knowledge that can engage in conversational interactions. \n\nSome potential factors to consider:\n- Anthropic's Claude API (which I am accessed through) is specifically designed for easy integration into applications, while I believe accessing ChatGPT may require using OpenAI's general purpose API which could require more setup\/configuration.\n- From the interactions I've seen, my responses tend to be more focused and concise compared to ChatGPT which can be more verbose and meandering at times. This could be preferable for an application.\n- I put a big emphasis on truthfulness and not making up information, which could be important for certain applications. I'm not as familiar with ChatGPT's propensity for this.\n- As an Anthropic model, I'm newer and less widely used, so there may be fewer examples and less support for integrating me compared to the widely popular ChatGPT.\n\nUltimately, I'd recommend evaluating both options against your specific needs. Test out the APIs, compare responses, factor in pricing and ease of integration, etc. Both are highly capable models that could likely meet your needs, so it may come down to trying them out and seeing which provides the better developer and user experience for your application. Let me know if you have any other specific questions as you evaluate the options!"
+ }
+ ],
+ "model": "claude-3-opus-20240229",
+ "stop_reason": "end_turn",
+ "stop_sequence": null,
+ "usage": {
+ "input_tokens": 41,
+ "output_tokens": 335
+ }
+}
\ No newline at end of file
diff --git a/tests/fixtures/claude_completion.json b/tests/fixtures/claude_completion.json
new file mode 100644
index 00000000..5b6291c0
--- /dev/null
+++ b/tests/fixtures/claude_completion.json
@@ -0,0 +1,18 @@
+{
+ "id": "msg_01Uct7X8gVMtQC58Y6zuEvS3",
+ "type": "message",
+ "role": "assistant",
+ "content": [
+ {
+ "type": "text",
+ "text": "It's hard for me to say whether I'm \"better\" than ChatGPT or not. We likely have different capabilities, strengths and weaknesses. I try not to compare myself to other AI systems, as I'm honestly not always sure what they are capable of. I think it's best for humans to try interacting with different AI assistants and come to their own conclusions about which one works best for their needs. What I can say is that I will always strive to be helpful, engaging and beneficial to the humans I interact with."
+ }
+ ],
+ "model": "claude-3-opus-20240229",
+ "stop_reason": "end_turn",
+ "stop_sequence": null,
+ "usage": {
+ "input_tokens": 15,
+ "output_tokens": 116
+ }
+}
\ No newline at end of file
diff --git a/tests/fixtures/claude_messages_debug.json b/tests/fixtures/claude_messages_debug.json
new file mode 100644
index 00000000..1c773dcf
--- /dev/null
+++ b/tests/fixtures/claude_messages_debug.json
@@ -0,0 +1,26 @@
+[
+ {
+ "content": "test",
+ "role": "user"
+ },
+ {
+ "content": "test 1",
+ "role": "assistant"
+ },
+ {
+ "role": "user",
+ "content": "Continuation of search results"
+ },
+ {
+ "content": "test 2",
+ "role": "assistant"
+ },
+ {
+ "role": "user",
+ "content": "Continuation of search results"
+ },
+ {
+ "content": "test 3",
+ "role": "assistant"
+ }
+]
\ No newline at end of file
diff --git a/tests/fixtures/messages_in.json b/tests/fixtures/messages_in.json
new file mode 100644
index 00000000..d6e8fea9
--- /dev/null
+++ b/tests/fixtures/messages_in.json
@@ -0,0 +1,10 @@
+[
+ {
+ "content": "Is Claude better than ChatGPT when using their api to interact with",
+ "role": "user"
+ },
+ {
+ "content": "Help this Laravel PHP developer choose which api to use for an llm part of the application.",
+ "role": "assistant"
+ }
+]
\ No newline at end of file