Skip to content

Commit

Permalink
Token based and model conditional limits (#36)
Browse files Browse the repository at this point in the history
* use tiktoken for api limit

* model conditional char limits on frontend

* adjust for completion tokens

---------

Co-authored-by: Alan Pogrebinschi <[email protected]>
  • Loading branch information
mckaywrigley and alanpog committed Mar 21, 2023
1 parent 4c425eb commit 537957d
Show file tree
Hide file tree
Showing 6 changed files with 79 additions and 20 deletions.
34 changes: 28 additions & 6 deletions components/Chat/Chat.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,17 @@ interface Props {
onModelChange: (conversation: Conversation, model: OpenAIModel) => void;
}

export const Chat: FC<Props> = ({ conversation, models, messageIsStreaming, modelError, messageError, loading, lightMode, onSend, onModelChange }) => {
export const Chat: FC<Props> = ({
conversation,
models,
messageIsStreaming,
modelError,
messageError,
loading,
lightMode,
onSend,
onModelChange,
}) => {
const [currentMessage, setCurrentMessage] = useState<Message>();

const messagesEndRef = useRef<HTMLDivElement>(null);
Expand All @@ -36,8 +46,13 @@ export const Chat: FC<Props> = ({ conversation, models, messageIsStreaming, mode
{modelError ? (
<div className="flex flex-col justify-center mx-auto h-full w-[300px] sm:w-[500px] space-y-6">
<div className="text-center text-red-500">Error fetching models.</div>
<div className="text-center text-red-500">Make sure your OpenAI API key is set in the bottom left of the sidebar or in a .env.local file and refresh.</div>
<div className="text-center text-red-500">If you completed this step, OpenAI may be experiencing issues.</div>
<div className="text-center text-red-500">
Make sure your OpenAI API key is set in the bottom left of the
sidebar or in a .env.local file and refresh.
</div>
<div className="text-center text-red-500">
If you completed this step, OpenAI may be experiencing issues.
</div>
</div>
) : (
<>
Expand All @@ -48,15 +63,21 @@ export const Chat: FC<Props> = ({ conversation, models, messageIsStreaming, mode
<ModelSelect
model={conversation.model}
models={models}
onModelChange={(model) => onModelChange(conversation, model)}
onModelChange={(model) =>
onModelChange(conversation, model)
}
/>
</div>

<div className="text-4xl text-center text-neutral-600 dark:text-neutral-200 pt-[160px] sm:pt-[280px]">{models.length === 0 ? "Loading..." : "Chatbot UI"}</div>
<div className="text-4xl text-center text-neutral-600 dark:text-neutral-200 pt-[160px] sm:pt-[280px]">
{models.length === 0 ? "Loading..." : "Chatbot UI"}
</div>
</>
) : (
<>
<div className="flex justify-center py-2 text-neutral-500 bg-neutral-100 dark:bg-[#444654] dark:text-neutral-200 text-sm border border-b-neutral-300 dark:border-none">Model: {conversation.model.name}</div>
<div className="flex justify-center py-2 text-neutral-500 bg-neutral-100 dark:bg-[#444654] dark:text-neutral-200 text-sm border border-b-neutral-300 dark:border-none">
Model: {conversation.model.name}
</div>

{conversation.messages.map((message, index) => (
<ChatMessage
Expand Down Expand Up @@ -91,6 +112,7 @@ export const Chat: FC<Props> = ({ conversation, models, messageIsStreaming, mode
setCurrentMessage(message);
onSend(message, false);
}}
model={conversation.model}
/>
)}
</>
Expand Down
19 changes: 12 additions & 7 deletions components/Chat/ChatInput.tsx
Original file line number Diff line number Diff line change
@@ -1,22 +1,25 @@
import { Message } from "@/types";
import { Message, OpenAIModel, OpenAIModelID } from "@/types";
import { IconSend } from "@tabler/icons-react";
import { FC, KeyboardEvent, useEffect, useRef, useState } from "react";

interface Props {
messageIsStreaming: boolean;
onSend: (message: Message) => void;
model: OpenAIModel;
}

export const ChatInput: FC<Props> = ({ onSend, messageIsStreaming }) => {
export const ChatInput: FC<Props> = ({ onSend, messageIsStreaming, model }) => {
const [content, setContent] = useState<string>();
const [isTyping, setIsTyping] = useState<boolean>(false);

const textareaRef = useRef<HTMLTextAreaElement>(null);

const handleChange = (e: React.ChangeEvent<HTMLTextAreaElement>) => {
const value = e.target.value;
if (value.length > 4000) {
alert("Message limit is 4000 characters");
const maxLength = model.id === OpenAIModelID.GPT_3_5 ? 12000 : 24000;

if (value.length > maxLength) {
alert(`Message limit is ${maxLength} characters`);
return;
}

Expand All @@ -42,8 +45,10 @@ export const ChatInput: FC<Props> = ({ onSend, messageIsStreaming }) => {
};

const isMobile = () => {
const userAgent = typeof window.navigator === "undefined" ? "" : navigator.userAgent;
const mobileRegex = /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini|Mobile|mobile|CriOS/i;
const userAgent =
typeof window.navigator === "undefined" ? "" : navigator.userAgent;
const mobileRegex =
/Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini|Mobile|mobile|CriOS/i;
return mobileRegex.test(userAgent);
};

Expand Down Expand Up @@ -72,7 +77,7 @@ export const ChatInput: FC<Props> = ({ onSend, messageIsStreaming }) => {
resize: "none",
bottom: `${textareaRef?.current?.scrollHeight}px`,
maxHeight: "400px",
overflow: "auto"
overflow: "auto",
}}
placeholder="Type a message..."
value={content}
Expand Down
13 changes: 11 additions & 2 deletions next.config.js
Original file line number Diff line number Diff line change
@@ -1,6 +1,15 @@
/** @type {import('next').NextConfig} */
const nextConfig = {
reactStrictMode: true,
}

module.exports = nextConfig
webpack(config, { isServer, dev }) {
config.experiments = {
asyncWebAssembly: true,
layers: true,
};

return config;
},
};

module.exports = nextConfig;
11 changes: 11 additions & 0 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
"lint": "next lint"
},
"dependencies": {
"@dqbd/tiktoken": "^1.0.2",
"@tabler/icons-react": "^2.9.0",
"@types/node": "18.15.0",
"@types/react": "18.0.28",
Expand Down
21 changes: 16 additions & 5 deletions pages/api/chat.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
import { Message, OpenAIModel } from "@/types";
import { Message, OpenAIModel, OpenAIModelID } from "@/types";
import { OpenAIStream } from "@/utils/server";
import tiktokenModel from "@dqbd/tiktoken/encoders/cl100k_base.json";
import { init, Tiktoken } from "@dqbd/tiktoken/lite/init";
// @ts-expect-error
import wasm from "../../node_modules/@dqbd/tiktoken/lite/tiktoken_bg.wasm?module";

export const config = {
runtime: "edge"
Expand All @@ -13,19 +17,26 @@ const handler = async (req: Request): Promise<Response> => {
key: string;
};

const charLimit = 12000;
let charCount = 0;
await init((imports) => WebAssembly.instantiate(wasm, imports));
const encoding = new Tiktoken(tiktokenModel.bpe_ranks, tiktokenModel.special_tokens, tiktokenModel.pat_str);

const tokenLimit = model.id === OpenAIModelID.GPT_4 ? 6000 : 3000;
let tokenCount = 0;
let messagesToSend: Message[] = [];

for (let i = messages.length - 1; i >= 0; i--) {
const message = messages[i];
if (charCount + message.content.length > charLimit) {
const tokens = encoding.encode(message.content);

if (tokenCount + tokens.length > tokenLimit) {
break;
}
charCount += message.content.length;
tokenCount += tokens.length;
messagesToSend = [message, ...messagesToSend];
}

encoding.free();

const stream = await OpenAIStream(model, key, messagesToSend);

return new Response(stream);
Expand Down

1 comment on commit 537957d

@vercel
Copy link

@vercel vercel bot commented on 537957d Mar 21, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.