Skip to content

Added basic Ollama support #11 #61

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,14 @@ UPSTASH_REDIS_REST_URL=****
# Get your Upstash Redis Token here: https://console.upstash.com/
UPSTASH_REDIS_REST_TOKEN=****

# If using Ollama locally (which provides an OpenAI-compatible API at port 11411),
# just point CUSTOM_OPENAI_URL at it and omit your official OPENAI_API_KEY:
# Visit https://ollama.ai/download for installation instructions
CUSTOM_OPENAI_URL="http://localhost:11411/v1"
CUSTOM_OPENAI_MODEL="qwen2.5:32b"
CUSTOM_OPENAI_REASONING_MODEL="qwq:32b"
# Unfortunately deepseek-r1:32b does not support tools

# Specify which model to use for reasoning (o1, o1-mini, o3-mini, etc..)
# REASONING_MODEL=deepseek-reasoner

Expand Down
2 changes: 1 addition & 1 deletion app/(chat)/actions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ export async function generateTitleFromUserMessage({
message: CoreUserMessage;
}) {
const { text: title } = await generateText({
model: customModel('gpt-4o'),
model: customModel(process.env.CUSTOM_OPENAI_MODEL || 'gpt-4o'),
system: `\n
- you will generate a short title based on the first message a user begins a conversation with
- ensure it is not more than 80 characters long
Expand Down
12 changes: 9 additions & 3 deletions app/(chat)/api/chat/route.ts
Original file line number Diff line number Diff line change
Expand Up @@ -144,8 +144,13 @@ export async function POST(request: Request) {
return new Response(`Too many requests`, { status: 429 });
}

const model = models.find((model) => model.id === modelId);
const reasoningModel = reasoningModels.find((model) => model.id === reasoningModelId);
const model = process.env.CUSTOM_OPENAI_URL
? { apiIdentifier: process.env.CUSTOM_OPENAI_MODEL || ''}
: models.find((model) => model.id === modelId);

const reasoningModel = process.env.CUSTOM_OPENAI_REASONING_MODEL
? { apiIdentifier: process.env.CUSTOM_OPENAI_REASONING_MODEL || '' }
: reasoningModels.find((model) => model.id === modelId);

if (!model || !reasoningModel) {
return new Response('Model not found', { status: 404 });
Expand Down Expand Up @@ -416,7 +421,8 @@ export async function POST(request: Request) {
});

try {
const parsed = JSON.parse(result.text);
const textCleaned = result.text.replace(/<think>[\s\S]*?<\/think>/g, '')
const parsed = JSON.parse(textCleaned);
return parsed.analysis;
} catch (error) {
console.error('Failed to parse JSON response:', error);
Expand Down
18 changes: 17 additions & 1 deletion lib/ai/index.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { openai } from '@ai-sdk/openai';
import { createOpenAI, openai as defaultOpenai} from '@ai-sdk/openai';
import { experimental_wrapLanguageModel as wrapLanguageModel } from 'ai';
import { openrouter } from '@openrouter/ai-sdk-provider';
import { togetherai } from '@ai-sdk/togetherai';
Expand Down Expand Up @@ -27,6 +27,11 @@ const BYPASS_JSON_VALIDATION = process.env.BYPASS_JSON_VALIDATION === 'true';

// Helper to get the reasoning model based on user's selected model
function getReasoningModel(modelId: string) {
// If we are using a custom openai endpoint, allow any model
if (process.env.CUSTOM_OPENAI_REASONING_MODEL) {
return process.env.CUSTOM_OPENAI_REASONING_MODEL;
}

// If already using a valid reasoning model, keep using it
if (VALID_REASONING_MODELS.includes(modelId as ReasoningModel)) {
return modelId;
Expand All @@ -48,6 +53,17 @@ function getReasoningModel(modelId: string) {
return configuredModel;
}

// Create a custom OpenAI client if CUSTOM_OPENAI_URL is defined
const openai = process.env.CUSTOM_OPENAI_URL
? createOpenAI({
baseURL: process.env.CUSTOM_OPENAI_URL,
apiKey:
process.env.OPENAI_API_KEY && process.env.OPENAI_API_KEY !== '****' ? process.env.OPENAI_API_KEY : undefined,
name: 'custom-openai',
compatibility: 'compatible',
})
: defaultOpenai;

export const customModel = (apiIdentifier: string, forReasoning: boolean = false) => {
// Check which API key is available
const hasOpenRouterKey = process.env.OPENROUTER_API_KEY && process.env.OPENROUTER_API_KEY !== "****";
Expand Down