From 056e117280f3940655e583532901dbb3eb6084f4 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 12 Jun 2024 00:24:09 +0000 Subject: [PATCH] chore: version package --- .changeset/brown-ads-suffer.md | 5 - bun.lockb | Bin 391560 -> 391560 bytes examples/next-13/src/app/docs.ts | 1466 +++++++++++++++--------------- src/CHANGELOG.md | 6 + src/errors/version.ts | 1 + src/package.json | 14 +- 6 files changed, 730 insertions(+), 762 deletions(-) delete mode 100644 .changeset/brown-ads-suffer.md create mode 100644 src/errors/version.ts diff --git a/.changeset/brown-ads-suffer.md b/.changeset/brown-ads-suffer.md deleted file mode 100644 index fe454c0..0000000 --- a/.changeset/brown-ads-suffer.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -"hopfield": patch ---- - -**Fix:** updated the default model name to be more recent. diff --git a/bun.lockb b/bun.lockb index 47c4661fb22d7d0fd6574728679d1704e76b841a..12453c0ff1d219da0511aec6a1be0b1d876c131b 100755 GIT binary patch delta 57 zcmV-90LK4_^B0Ko7l4ETgaU*Ev;=Nn1Ti!&GKX?s1h;Zu1tyr2sL3doAmRczmw*8S P1egA710=T~_yuSIXPOkR delta 57 zcmeD9EZ*^1yrG4$g{g&k3rk)+i-n#cLwiv?%l4vpR<#+EHM}&YD?DPdnBKt1%rgC7 N4ztR3g&(XLi~vc+6M_H$ diff --git a/examples/next-13/src/app/docs.ts b/examples/next-13/src/app/docs.ts index 68d3150..12c2808 100644 --- a/examples/next-13/src/app/docs.ts +++ b/examples/next-13/src/app/docs.ts @@ -1,91 +1,178 @@ export const docs = `--- -description: "Quickly add Hopfield to your TypeScript project." -title: "Getting Started" +description: "A detailed guide on seamlessly fetching and streaming data directly into React components." +title: "Next.js App Router with Hopfield" --- -# Getting Started +# Next.js App Router -This section will help you start using Hopfield in your TypeScript project. +Hopfield empowers developers to seamlessly fetch and stream data directly into Next.js React Server Components. -## Install +## Overview -First, you will need to install Hopfield. +Hopfield streaming chat provides a readableStream() which can be used to build recursive React Server Components. -::: code-group +The readableStream() from Hopfield's streaming chat provider returns a [ReadableStream](https://developer.mozilla.org/en-US/docs/Web/API/ReadableStream) (available in Node 18+, or it can be polyfilled with a library like [web-streams-polyfill](https://www.npmjs.com/package/web-streams-polyfill).). -bash [bun] -bun i hopfield +::: info Non-streaming +If you are not interested in using streaming, you can use the non-streaming chat provider easily with a simple RSC +that awaits the full response from chat.get(). This is not shown below, but is a much simpler integration that does not +include any custom code for streaming token by token. -bash [pnpm] -pnpm add hopfield +::: +### Backpressure -bash [npm] -npm i hopfield +The readable stream handles backpressure with a pull-based approach. See our [tests](https://github.com/EnjoinHQ/hopfield/blob/main/src/utils.test.ts) for how Hopfield handles backpressure. For a more detailed explanation on "backpressure" and how it factors into streaming LLM responses, please see the +[vercel/ai docs](https://sdk.vercel.ai/docs/concepts/backpressure-and-cancellation). +## Usage -::: +Here's how to use Hopfield with a recursive React Server Component using Suspense: -### OpenAI +tsx +import { Suspense } from "react"; +import hop from "hopfield"; +import openai from "hopfield/openai"; +import OpenAI from "openai"; -You'll also need to set up a Hopfield Provider, which is used to interact with the API. -We currently only support OpenAI (but are working on adding other providers). +// Set up the OpenAI client +const openaiClient = new OpenAI({ apiKey: "OPENAI_API_KEY" }); +// Pass the OpenAI client into Hopfield +const hopfield = hop.client(openai).provider(openaiClient); +// Create a streaming chat provider +const chat = hopfield.chat("gpt-4o-2024-05-13").streaming(); -To use Hopfield, you will need to install the latest 4+ version of openai. +export type ChatResponseProps = { + prompt: string; +}; -::: code-group +export async function ChatResponse({ prompt }: ChatResponseProps) { + // construct messages with hop.inferMessageInput + const messages: hop.inferMessageInput[] = [ + { + role: "system", + content: "You are a helpful AI assistant.", + }, + { + role: "user", + content: prompt, + }, + ]; -bash [pnpm] -pnpm add openai@4 + const response = await chat.get( + { messages: messages }, + { + onChunk: async (value) => { + console.log(Received chunk type: \${value.choices[0].__type}); + // do something on the server with each individual chunk as it is + // streamed in + }, + onDone: async (chunks) => { + console.log(Total chunks received: \${chunks.length}); + // do something on the server when the chat completion is done + // this can be caching the response, storing in a database, etc. + // + // chunks is an array of all the streamed responses, so you + // can access the raw content and combine how you'd like + }, + // if you are using function calling, you can also add a onFunctionCall + // here with zod-parsed arguments + } + ); + // pass the readableStream to the RSC + return ; +} -bash [npm] -npm i openai@4 +type Props = { + /** + * A ReadableStream produced by Hopfield. + */ + stream: ReadableStream>; +}; +/** + * A React Server Component that recursively renders a stream of tokens. + */ +async function Tokens(props: Props) { + const { stream } = props; + const reader = stream.getReader(); -bash [yarn] -yarn add openai@4 + return ( + + + + ); +} +type RecursiveTokensProps = { + reader: ReadableStreamDefaultReader>; +}; -::: +async function RecursiveTokens({ reader }: RecursiveTokensProps) { + const { done, value } = await reader.read(); -## Create a Provider + if (done) { + return null; + } -We create a Hopfield provider, which stores the provider client and uses it for API requests under -the hood. + return ( + <> + {value.choices[0].__type === "content" ? ( + value.choices[0].delta.content + ) : ( + <> + )} + }> + + + + ); +} -ts -import hop from "hopfield"; -import openai from "hopfield/openai"; -import OpenAI from "openai"; +// This can be any loading indicator you want, which gets appended to the end +// of the tokens while waiting for the next token to be streamed +const LoadingDots = () => ...; -// create an OpenAI client (must be prerelease version for latest features) -const openAIClient = new OpenAI({ apiKey: "{OPENAI_API_KEY}" }); // [!code focus] -// use the OpenAI client with Hopfield -// or, you can *not* pass a provider, and just use the runtime validations -const hopfield = hop.client(openai).provider(openAIClient); // [!code focus] +We create a recursive React Server Component which uses Suspense boundaries to await each token, +and show a fallback loading indicator where the next token will be rendered. +See our [Next 13 RSC example](https://next-13.hopfield.ai) for a real-world integration +using Vercel, similar to this quick example. -## Streaming Chat +### Dive Deeper -We can now create a Streaming Chat instance of Hopfield. We use the provider we created above, -and create a new chat instance. +To deepen your understanding of how Streaming works, and how it can be further utilized within your application, +refer to the [Streaming Chat](/chat/streaming) section. +' -ts -export const chat = hopfield.chat().streaming(); // [!code focus] +'--- +description: "Deep dive into how to get streaming chat responses with Hopfield." +title: "Chat - Non-streaming" +--- +# Streaming chat -We can now use this chat instance for every chat interaction, with simplified streaming -and other features. Below, we show how to use get to interact with the Chat Completions -API, and utility types inferMessageInput and inferResult -to get the typing of the inputs/outputs for the chat instance. +Hopfield provides a simple way to interact with streaming chat models. You can use various +API providers with type guarantees with Zod. -ts +## Usage + +Use streaming chat models from OpenAI with a few lines of code: + +ts twoslash +const takeAction = async (message: string) => {}; +// ---cut--- import hop from "hopfield"; -import { chat } from "./chat"; -// [!code focus:12] +import openai from "hopfield/openai"; +import OpenAI from "openai"; + +const hopfield = hop.client(openai).provider(new OpenAI()); + +const chat = hopfield.chat().streaming(); + const messages: hop.inferMessageInput[] = [ { role: "user", @@ -93,556 +180,72 @@ const messages: hop.inferMessageInput[] = [ }, ]; -const response = await chat.get({ - messages, -}); - - -We can then stream the response from the chat instance and store the chunks -in an array, as well as take any action for the incoming chunk. +const response = await chat.get( + { + messages, + }, + { + onChunk: async (value) => { + console.log(Received chunk type: \${value.choices[0].__type}); + // do something on the server with each individual chunk as it is + // streamed in + }, + onDone: async (chunks) => { + console.log(Total chunks received: \${chunks.length}); + // do something on the server when the chat completion is done + // this can be caching the response, storing in a database, etc. + // + // chunks is an array of all the streamed responses, so you + // can access the raw content and combine how you'd like + }, + } +); -ts // store all of the streaming chat chunks const parts: hop.inferResult[] = []; for await (const part of response) { + parts.push(part); + // if the streaming delta contains new text content if (part.choices[0].__type === "content") { - // handle the new content + // ^? + // action based on the delta for the streaming message content + await takeAction(part.choices[0].delta.content); + // ^? } - - parts.push(part); } -As you can see, it's super easy to add streaming to your application with minimal dependencies -and a simple async iterator for easy streaming with Zod validation and strict typing. - -## What's next? +### Learn more -Now that you're all set up, you are ready to dive in to the docs further! +See how to use streaming results combined with type-driven prompt templates in the +[next section](/chat/templates). ' '--- -description: "Comparisons between Hopfield's features and features from similar libraries." -title: "Comparisons" +description: "Hopfield makes LLM function calling seamless." +title: "Chat - Functions" --- -# Comparisons - -No other library does what Hopfield does (inferring static LLM TypeScript types from Zod schemas), but there are some similarities with other libraries. This page compares Hopfield to other libraries. - -Comparisons strive to be as accurate and as unbiased as possible. If you use any of these libraries and feel the information could be improved, feel free to suggest changes. +# Functions -## vercel/ai +Hopfield lets you define validation-driven functions which can be passed to the LLM. +This lets you clearly build functions, which get transformed to JSON schema with +[zod-to-json-schema](https://github.com/StefanTerdell/zod-to-json-schema), so +the LLM can use these as tools. -[**ai**](https://github.com/vercel/ai) is a framework for AI-powered applications with React, Svelte, Vue, and Solid. They provide hooks to easily integrate -with a streaming text response (StreamingTextResponse) and allow a callback for function calling, as well as simple, drop-in components for React and other -frameworks. +## Usage -**Hopfield** provides a subset of these features, and focuses solely on the API interactions, and **not** on providing React components. +Use chat models from OpenAI: -Below is a comparison of the library features: +ts twoslash +import hop from "hopfield"; +import openai from "hopfield/openai"; +import OpenAI from "openai"; +import z from "zod"; -| | **ai** | **Hopfield** | -| ---------------------- | ----------------------------------------- | ---------------------------------------------- | -| **React Components** | Easy, inflexible UI components & hooks | No UI components or hooks | -| **Typed Functions** | Streaming function calls with loose types | Strict function call types with Zod validation | -| **Framework Examples** | Multiple | Multiple | -| **Chat Providers** | Multiple | OpenAI, with support for others coming | - -## Langchain.js - -[**Langchain.js**](https://github.com/hwchase17/langchainjs) is a framework for developing applications powered by language models -with Javascript. Developers usually use Langchain to develop apps which connect to internal tools (like internal knowledge bases, -LLM demos, and generally in trusted environments). - -**Hopfield** is a TypeScript library that provides a subset of Langchain's features, -prioritizing inferring static types from LLM input, alongside runtime response validation and static typing. - -Below is a comparison of the library features: - -| | **Langchain.js** | **Hopfield** | -| -------------------- | ------------------------------------------- | ------------------------------------------------------------------------ | -| **Prompt Templates** | Opaque | Use string template types for type inference | -| **Type Generation** | Loose types with some Typescript helpers | Static types with Zod validation | -| **Function Calling** | Starter templates, with some Zod validation | Validation-driven, composable functions | -| **Connectors/Tools** | Many, with various integrations | Only a select few, with examples (actively being developed) | -| **Dependencies** | Many, with non-optional peer dependencies | Few, with strict bundle splitting to avoid unnecessary peer dependencies | -' - -'--- -description: "An overview of working with embeddings in Hopfield." -title: "Overview of Embeddings" ---- - -# Embeddings - -Hopfield provides an easy way to get type-safe embeddings. You can use different API providers with type -guarantees with Zod, and composability across providers. - -::: info API Providers - -We currently only support OpenAI, but are -working on adding further providers. Reach out on -[Github Discussions](https://github.com/EnjoinHQ/hopfield/discussions) if you have any suggestions! - -::: - -## Usage - -Check out how we type responses: - -ts twoslash -import hop from "hopfield"; -import openai from "hopfield/openai"; -import OpenAI from "openai"; - -const hopfield = hop.client(openai).provider(new OpenAI()); - -const embeddings = hopfield.embedding(); - -const response = await hopfield.embedding().get({ input: ["hello"] }); -const embedding = response.data[0].embedding; -// ^? - - -You can guarantee that your response is constructed correctly (with no optional accessors) -and the embedding and outer array uses [the tuple type](https://www.typescriptlang.org/docs/handbook/2/objects.html#tuple-types), -based on the inputs you requested. - -## Composability - -The big unlock is not only that types are guaranteed to be safe - we provide composability to -allow building complex apps with [RAG](https://www.promptingguide.ai/techniques/rag) and embedding-driven search. - -::: info - -We are actively working on building a RAG solution - please reach out if you are interested -in influencing the API for this! - -::: - -## Learn More - -Learn more about the intricacies embeddings in the [Embeddings](/embeddings/details) page. -' - -'--- -description: "Deep dive into how to work with embeddings in Hopfield." -title: "Embeddings Details" ---- - -# Embeddings - -Hopfield allows developers to easily query and validate responses from embeddings providers. -You can use different APIs with type guarantees with Zod and composability. - -## Usage - -Create and use embeddings from OpenAI, directly with an API client: - -ts twoslash -import hop from "hopfield"; -import openai from "hopfield/openai"; -import OpenAI from "openai"; - -const hopfield = hop.client(openai).provider(new OpenAI()); - -const embeddings = hopfield.embedding("text-embedding-ada-002", 3); - -const response = await embeddings.get({ input: ["ready", "set", "hop"] }); - -const embeddingCount = response.data.length; -// ^? - -const embeddingLength = response.data[0].embedding.length; -// ^? - - -Or if you prefer, you can only use Hopfield's Zod validations, and use the OpenAI -SDK directly: - -ts twoslash -import hop from "hopfield"; -import openai from "hopfield/openai"; -import OpenAI from "openai"; - -const openAIClient = new OpenAI(); - -const hopfield = hop.client(openai); - -const embeddings = hopfield.embedding("text-embedding-ada-002", 3); - -const response = await openAIClient.embeddings.create({ - model: embeddings.model, - input: ["ready", "set", "hop"], -}); - -const parsed = embeddings.returnType.parse(response); - -const embeddingCount = parsed.data.length; -// ^? - -const embeddingLength = parsed.data[0].embedding.length; -// ^? - - -## Parameters - -### Model Name - -The model name to use for the embedding. This parameter depends on the client specified. -The embedding length will change based on this parameter, since different text embeddings -can have varying lengths. - -ts twoslash -import hop from "hopfield"; -import openai from "hopfield/openai"; -import OpenAI from "openai"; - -const hopfield = hop.client(openai).provider(new OpenAI()); -// ---cut--- -const embeddings = hopfield.embedding("text-embedding-ada-002"); - -const response = await embeddings.get({ input: ["hop"] }); - -const embedding = response.data[0].embedding; -// ^? - - -#### OpenAI - -The OpenAI model name defaults to the value shown below. This is currently the only supported model. - -ts twoslash -import type { DefaultOpenAIEmbeddingModelName } from "hopfield/openai"; -// ^? - - ---- - -### Embedding Count - -The count of text embeddings to be returned. For all providers, this defaults to 1. -This is capped at 20. - -ts twoslash -import hop from "hopfield"; -import openai from "hopfield/openai"; -import OpenAI from "openai"; - -const hopfield = hop.client(openai).provider(new OpenAI()); -// ---cut--- -const embeddings = hopfield.embedding("text-embedding-ada-002", 3); - -const response = await embeddings.get({ input: ["ready", "set", "hop"] }); - -const embeddingLength = response.data.length; -// ^? - -const thirdEmbeddingLength = response.data[2].embedding.length; -// ^? - -' - -'--- -description: "An overview of working with chat models in Hopfield." -title: "Overview of Chat Models" ---- - -# Chat - -Hopfield also provides simple APIs for interacting with chat models. It has different API providers with type -guarantees with Zod. - -::: info API Providers - -We currently only support OpenAI, but are -working on adding further providers. Reach out on -[Github Discussions](https://github.com/EnjoinHQ/hopfield/discussions) if you have any suggestions! - -::: - -## Usage - -Check out how we type responses: - -ts twoslash -import hop from "hopfield"; -import openai from "hopfield/openai"; -import OpenAI from "openai"; - -const hopfield = hop.client(openai).provider(new OpenAI()); - -const chat = hopfield.chat(); - -const parsed = await chat.get({ - messages: [ - { - content: "What's the best pizza restaurant in NYC?", - role: "user", - }, - ], -}); - -const choiceType = parsed.choices[0].__type; -// ^? - - -You can guarantee that your response is constructed correctly (with no optional accessors) -and the embedding and outer array uses [the tuple type](https://www.typescriptlang.org/docs/handbook/2/objects.html#tuple-types), -based on the inputs you requested. - -## Composability - -The big unlock is not only that types are guaranteed to be safe - we provide composability to -allow building complex apps with [RAG](https://www.promptingguide.ai/techniques/rag) and embedding-driven search. - -::: info - -We are actively working on building a RAG solution - please reach out if you are interested -in influencing the API for this! - -::: - -## Learn More - -Learn more about the intricacies embeddings in the [Embeddings](/embeddings/details) page. -' - -'--- -description: "Learn how Hopfield uses string literal types for prompt templates." -title: "Chat - Prompt Templates" ---- - -# Prompt Templates - -Hopfield always uses [string literal types](https://www.typescriptlang.org/docs/handbook/2/template-literal-types.html) -for prompt templates, so developers have visibility into the tranformations performed on their inputs. - -## Usage - -Check out how our types look when you use a template: - -ts twoslash -import hop from "hopfield"; -import openai from "hopfield/openai"; - -const template = hop.client(openai).template(); - -const description = template.enum("The category of the message."); -// ^? - - -You can see above that the description has type hints to tell you exactly what the -transformation was. In this case, the template appended -This must always be a possible value from the enum array. to the input string. - -This template is usually used with complex function calls. See the next section for -more information. - -## Composability - -We will be building on top of the Prompt Templating primitive with features which have more complex transformations. -Specifically, we will be shipping best practices for few-shot prompting and -[RAG](https://www.promptingguide.ai/techniques/rag), so chat messages are strongly typed and adhere -to emerging industry standards. - -::: info - -We are actively working on building this feature further - please reach out if you are interested -in influencing this! - -::: -' - -'--- -description: "Hopfield makes streaming with LLM function calling seamless." -title: "Chat - Functions with Streaming" ---- - -# Functions with Streaming - -Hopfield makes it easy to use streaming with function calling. -You define validation-driven functions which get passed to the LLM. - -## Usage - -Use streaming function calling like: - -ts twoslash -const takeAction = async ( - name: string, - args: { - location: string; - unit: "celsius" | "fahrenheit"; - } -) => {}; -// ---cut--- -import z from "zod"; -import hop from "hopfield"; -import openai from "hopfield/openai"; -import OpenAI from "openai"; - -const hopfield = hop.client(openai).provider(new OpenAI()); - -const weatherFunction = hopfield.function({ - name: "getCurrentWeather", - description: "Get the current weather in a given location", - parameters: z.object({ - location: z.string().describe("The city and state, e.g. San Francisco, CA"), - unit: z - .enum(["celsius", "fahrenheit"]) - .describe(hopfield.template().enum("The unit for the temperature.")), - }), -}); - -const chat = hopfield.chat().streaming().functions([weatherFunction]); - -const messages: hop.inferMessageInput[] = [ - { - role: "user", - content: "What's the weather in San Jose?", - }, -]; - -const response = await chat.get( - { - messages, - }, - { - onChunk(chunk) { - console.log(Received chunk type: \${chunk.choices[0].__type}); - // do something on the server with each individual chunk as it is - // streamed in - }, - onDone(chunks) { - console.log(Total chunks received: \${chunks.length}); - // do something on the server when the chat completion is done - // this can be caching the response, storing in a database, etc. - // - // chunks is an array of all the streamed responses, so you - // can access the raw content and combine how you'd like - }, - async onFunctionCall(fn) { - // do something based on the function call result - this - // is parsed by your function definition with zod, and - // the arguments are coerced into the object shape you expect - await takeAction(fn.name, fn.arguments); - // ^? - }, - } -); - - -::: info Feedback - -To influence these features, reach out on -[Github Discussions](https://github.com/EnjoinHQ/hopfield/discussions). -We want your feedback! - -::: -' - -'--- -description: "Deep dive into how to get streaming chat responses with Hopfield." -title: "Chat - Non-streaming" ---- - -# Streaming chat - -Hopfield provides a simple way to interact with streaming chat models. You can use various -API providers with type guarantees with Zod. - -## Usage - -Use streaming chat models from OpenAI with a few lines of code: - -ts twoslash -const takeAction = async (message: string) => {}; -// ---cut--- -import hop from "hopfield"; -import openai from "hopfield/openai"; -import OpenAI from "openai"; - -const hopfield = hop.client(openai).provider(new OpenAI()); - -const chat = hopfield.chat().streaming(); - -const messages: hop.inferMessageInput[] = [ - { - role: "user", - content: "What's the coolest way to count to ten?", - }, -]; - -const response = await chat.get( - { - messages, - }, - { - onChunk: async (value) => { - console.log(Received chunk type: \${value.choices[0].__type}); - // do something on the server with each individual chunk as it is - // streamed in - }, - onDone: async (chunks) => { - console.log(Total chunks received: \${chunks.length}); - // do something on the server when the chat completion is done - // this can be caching the response, storing in a database, etc. - // - // chunks is an array of all the streamed responses, so you - // can access the raw content and combine how you'd like - }, - } -); - -// store all of the streaming chat chunks -const parts: hop.inferResult[] = []; - -for await (const part of response) { - parts.push(part); - - // if the streaming delta contains new text content - if (part.choices[0].__type === "content") { - // ^? - // action based on the delta for the streaming message content - await takeAction(part.choices[0].delta.content); - // ^? - } -} - - -### Learn more - -See how to use streaming results combined with type-driven prompt templates in the -[next section](/chat/templates). -' - -'--- -description: "Hopfield makes LLM function calling seamless." -title: "Chat - Functions" ---- - -# Functions - -Hopfield lets you define validation-driven functions which can be passed to the LLM. -This lets you clearly build functions, which get transformed to JSON schema with -[zod-to-json-schema](https://github.com/StefanTerdell/zod-to-json-schema), so -the LLM can use these as tools. - -## Usage - -Use chat models from OpenAI: - -ts twoslash -import hop from "hopfield"; -import openai from "hopfield/openai"; -import OpenAI from "openai"; -import z from "zod"; - -const hopfield = hop.client(openai).provider(new OpenAI()); +const hopfield = hop.client(openai).provider(new OpenAI()); const weatherFunction = hopfield.function({ name: "getCurrentWeather", @@ -780,6 +383,94 @@ if (response.choices[0].__type === "function_call") { ' +'--- +description: "Hopfield makes streaming with LLM function calling seamless." +title: "Chat - Functions with Streaming" +--- + +# Functions with Streaming + +Hopfield makes it easy to use streaming with function calling. +You define validation-driven functions which get passed to the LLM. + +## Usage + +Use streaming function calling like: + +ts twoslash +const takeAction = async ( + name: string, + args: { + location: string; + unit: "celsius" | "fahrenheit"; + } +) => {}; +// ---cut--- +import z from "zod"; +import hop from "hopfield"; +import openai from "hopfield/openai"; +import OpenAI from "openai"; + +const hopfield = hop.client(openai).provider(new OpenAI()); + +const weatherFunction = hopfield.function({ + name: "getCurrentWeather", + description: "Get the current weather in a given location", + parameters: z.object({ + location: z.string().describe("The city and state, e.g. San Francisco, CA"), + unit: z + .enum(["celsius", "fahrenheit"]) + .describe(hopfield.template().enum("The unit for the temperature.")), + }), +}); + +const chat = hopfield.chat().streaming().functions([weatherFunction]); + +const messages: hop.inferMessageInput[] = [ + { + role: "user", + content: "What's the weather in San Jose?", + }, +]; + +const response = await chat.get( + { + messages, + }, + { + onChunk(chunk) { + console.log(Received chunk type: \${chunk.choices[0].__type}); + // do something on the server with each individual chunk as it is + // streamed in + }, + onDone(chunks) { + console.log(Total chunks received: \${chunks.length}); + // do something on the server when the chat completion is done + // this can be caching the response, storing in a database, etc. + // + // chunks is an array of all the streamed responses, so you + // can access the raw content and combine how you'd like + }, + async onFunctionCall(fn) { + // do something based on the function call result - this + // is parsed by your function definition with zod, and + // the arguments are coerced into the object shape you expect + await takeAction(fn.name, fn.arguments); + // ^? + }, + } +); + + +::: info Feedback + +To influence these features, reach out on +[Github Discussions](https://github.com/EnjoinHQ/hopfield/discussions). +We want your feedback! + +::: +' + '--- description: "Deep dive into how to get non-streaming chat responses with Hopfield." title: "Chat - Non-streaming" @@ -865,31 +556,142 @@ const hopfield = hop.client(openai).provider(new OpenAI()); const chat = hopfield.chat("gpt-4-0613", 10); // [!code focus] -The response can then be safely used: +The response can then be safely used: + +ts twoslash +import hop from "hopfield"; +import openai from "hopfield/openai"; +import OpenAI from "openai"; + +const hopfield = hop.client(openai).provider(new OpenAI()); + +const chat = hopfield.chat("gpt-4-0613", 10); +// ---cut--- +const messages: hop.inferMessageInput[] = [ + { + role: "user", + content: "What's the best way to get a bunch of chat responses?", + }, +]; + +const response = await chat.get({ + messages, +}); + +const chatCount = response.choices.length; +// ^? + +' + +'--- +description: "An overview of working with chat models in Hopfield." +title: "Overview of Chat Models" +--- + +# Chat + +Hopfield also provides simple APIs for interacting with chat models. It has different API providers with type +guarantees with Zod. + +::: info API Providers + +We currently only support OpenAI, but are +working on adding further providers. Reach out on +[Github Discussions](https://github.com/EnjoinHQ/hopfield/discussions) if you have any suggestions! + +::: + +## Usage + +Check out how we type responses: + +ts twoslash +import hop from "hopfield"; +import openai from "hopfield/openai"; +import OpenAI from "openai"; + +const hopfield = hop.client(openai).provider(new OpenAI()); + +const chat = hopfield.chat(); + +const parsed = await chat.get({ + messages: [ + { + content: "What's the best pizza restaurant in NYC?", + role: "user", + }, + ], +}); + +const choiceType = parsed.choices[0].__type; +// ^? + + +You can guarantee that your response is constructed correctly (with no optional accessors) +and the embedding and outer array uses [the tuple type](https://www.typescriptlang.org/docs/handbook/2/objects.html#tuple-types), +based on the inputs you requested. + +## Composability + +The big unlock is not only that types are guaranteed to be safe - we provide composability to +allow building complex apps with [RAG](https://www.promptingguide.ai/techniques/rag) and embedding-driven search. + +::: info + +We are actively working on building a RAG solution - please reach out if you are interested +in influencing the API for this! + +::: + +## Learn More + +Learn more about the intricacies embeddings in the [Embeddings](/embeddings/details) page. +' + +'--- +description: "Learn how Hopfield uses string literal types for prompt templates." +title: "Chat - Prompt Templates" +--- + +# Prompt Templates + +Hopfield always uses [string literal types](https://www.typescriptlang.org/docs/handbook/2/template-literal-types.html) +for prompt templates, so developers have visibility into the tranformations performed on their inputs. + +## Usage + +Check out how our types look when you use a template: ts twoslash import hop from "hopfield"; import openai from "hopfield/openai"; -import OpenAI from "openai"; -const hopfield = hop.client(openai).provider(new OpenAI()); +const template = hop.client(openai).template(); -const chat = hopfield.chat("gpt-4-0613", 10); -// ---cut--- -const messages: hop.inferMessageInput[] = [ - { - role: "user", - content: "What's the best way to get a bunch of chat responses?", - }, -]; +const description = template.enum("The category of the message."); +// ^? -const response = await chat.get({ - messages, -}); -const chatCount = response.choices.length; -// ^? +You can see above that the description has type hints to tell you exactly what the +transformation was. In this case, the template appended +This must always be a possible value from the enum array. to the input string. + +This template is usually used with complex function calls. See the next section for +more information. + +## Composability + +We will be building on top of the Prompt Templating primitive with features which have more complex transformations. +Specifically, we will be shipping best practices for few-shot prompting and +[RAG](https://www.promptingguide.ai/techniques/rag), so chat messages are strongly typed and adhere +to emerging industry standards. + +::: info + +We are actively working on building this feature further - please reach out if you are interested +in influencing this! +::: ' '--- @@ -956,22 +758,8 @@ with best practices baked in. Add it to your project, along with any peer dependencies: -::: code-group - -bash [bun] -bun i hopfield - - -bash [pnpm] -pnpm add hopfield - - -bash [npm] npm i hopfield - -::: - ### ready, set, hop See how easy it is to add composable, type-safe LLM features with Hopfield: @@ -1128,207 +916,377 @@ const classifyMessage = hopfield.function({ }), }); -export const chat = hopfield.chat().functions([classifyMessage]); +export const chat = hopfield.chat().functions([classifyMessage]); + + +::: + +## TL;DR + +Hopfield might be a good fit for your project if: + +- πŸ—οΈ You build with Typescript/Javascript, and have your database schemas in these languages (e.g. [Prisma](https://www.prisma.io/) and/or [Next.js](https://nextjs.org/)). +- πŸͺ¨ You don't need a heavyweight LLM orchestration framework that ships with a ton of dependencies you'll never use. +- πŸ€™ You're using OpenAI function calling and/or custom tools, and want Typescript-native features for them (e.g. validations w/ [Zod](https://github.com/colinhacks/zod)). +- πŸ’¬ You're building complex LLM interactions which use memory & [RAG](https://www.promptingguide.ai/techniques/rag), evaluation, and orchestration (_coming soonβ„’_). +- πŸ“ You want best-practice, extensible templates, which use [string literal types](https://www.typescriptlang.org/docs/handbook/2/template-literal-types.html) + under the hood for transparency. + +Oh, and liking Typescript is a nice-to-have. + +## Guiding principles + +- πŸŒ€ We are Typescript-first, and only support TS (or JS) - with services like [Replicate](https://replicate.com/) or [OpenAI](https://platform.openai.com/docs/introduction), why do you need Python? +- 🀏 We provide a simple, ejectable interface with common LLM use-cases. This is aligned 1-1 with LLM provider abstractions, like OpenAI's. +- πŸͺ’ We explicitly _don't_ provide a ton of custom tools (please don't ask for too many πŸ˜…) outside of the building blocks and simple examples provided. Other frameworks provide these, but when you use them, you soon realize the tool you want is very use-case specific. +- πŸ§ͺ We (will) provide evaluation frameworks which let you simulate user scenarios and backend interactions with the LLM, including multi-turn conversations and function calling. +- 🐢 We support Node.js, Vercel Edge Functions, Cloudflare Workers, and more (oh and even web, if you like giving away API keys). + +## Community + +If you have questions or need help, reach out to the community in the [Hopfield GitHub Discussions](https://github.com/EnjoinHQ/hopfield/discussions). + +
+ +
+ +## Learn more + +Read the [Getting Started](/guide/getting-started) guide to learn more how to use Hopfield. + +### Inspiration + +Shoutout to these projects which inspired us: + +- [Zod](https://github.com/colinhacks/zod) +- [zod-to-json-schema](https://github.com/StefanTerdell/zod-to-json-schema) +- [Autochain](https://github.com/Forethought-Technologies/AutoChain) +- [Langchain.js](https://github.com/hwchase17/langchainjs) +- [simpleaichat](https://github.com/minimaxir/simpleaichat) +- [Auto-GPT](https://github.com/Significant-Gravitas/Auto-GPT) +- [abitype](https://github.com/wagmi-dev/abitype) + +If you like Hopfield, go star them on Github too. +' + +'--- +description: "Deep dive into how to work with embeddings in Hopfield." +title: "Embeddings Details" +--- + +# Embeddings + +Hopfield allows developers to easily query and validate responses from embeddings providers. +You can use different APIs with type guarantees with Zod and composability. + +## Usage + +Create and use embeddings from OpenAI, directly with an API client: + +ts twoslash +import hop from "hopfield"; +import openai from "hopfield/openai"; +import OpenAI from "openai"; + +const hopfield = hop.client(openai).provider(new OpenAI()); + +const embeddings = hopfield.embedding("text-embedding-ada-002", 3); + +const response = await embeddings.get({ input: ["ready", "set", "hop"] }); + +const embeddingCount = response.data.length; +// ^? + +const embeddingLength = response.data[0].embedding.length; +// ^? + + +Or if you prefer, you can only use Hopfield's Zod validations, and use the OpenAI +SDK directly: + +ts twoslash +import hop from "hopfield"; +import openai from "hopfield/openai"; +import OpenAI from "openai"; + +const openAIClient = new OpenAI(); + +const hopfield = hop.client(openai); + +const embeddings = hopfield.embedding("text-embedding-ada-002", 3); + +const response = await openAIClient.embeddings.create({ + model: embeddings.model, + input: ["ready", "set", "hop"], +}); + +const parsed = embeddings.returnType.parse(response); + +const embeddingCount = parsed.data.length; +// ^? + +const embeddingLength = parsed.data[0].embedding.length; +// ^? + + +## Parameters + +### Model Name + +The model name to use for the embedding. This parameter depends on the client specified. +The embedding length will change based on this parameter, since different text embeddings +can have varying lengths. + +ts twoslash +import hop from "hopfield"; +import openai from "hopfield/openai"; +import OpenAI from "openai"; + +const hopfield = hop.client(openai).provider(new OpenAI()); +// ---cut--- +const embeddings = hopfield.embedding("text-embedding-ada-002"); + +const response = await embeddings.get({ input: ["hop"] }); + +const embedding = response.data[0].embedding; +// ^? + + +#### OpenAI + +The OpenAI model name defaults to the value shown below. This is currently the only supported model. + +ts twoslash +import type { DefaultOpenAIEmbeddingModelName } from "hopfield/openai"; +// ^? + + +--- + +### Embedding Count + +The count of text embeddings to be returned. For all providers, this defaults to 1. +This is capped at 20. + +ts twoslash +import hop from "hopfield"; +import openai from "hopfield/openai"; +import OpenAI from "openai"; + +const hopfield = hop.client(openai).provider(new OpenAI()); +// ---cut--- +const embeddings = hopfield.embedding("text-embedding-ada-002", 3); + +const response = await embeddings.get({ input: ["ready", "set", "hop"] }); + +const embeddingLength = response.data.length; +// ^? + +const thirdEmbeddingLength = response.data[2].embedding.length; +// ^? + +' + +'--- +description: "An overview of working with embeddings in Hopfield." +title: "Overview of Embeddings" +--- + +# Embeddings + +Hopfield provides an easy way to get type-safe embeddings. You can use different API providers with type +guarantees with Zod, and composability across providers. + +::: info API Providers + +We currently only support OpenAI, but are +working on adding further providers. Reach out on +[Github Discussions](https://github.com/EnjoinHQ/hopfield/discussions) if you have any suggestions! + +::: + +## Usage + +Check out how we type responses: + +ts twoslash +import hop from "hopfield"; +import openai from "hopfield/openai"; +import OpenAI from "openai"; + +const hopfield = hop.client(openai).provider(new OpenAI()); + +const embeddings = hopfield.embedding(); + +const response = await hopfield.embedding().get({ input: ["hello"] }); +const embedding = response.data[0].embedding; +// ^? + + +You can guarantee that your response is constructed correctly (with no optional accessors) +and the embedding and outer array uses [the tuple type](https://www.typescriptlang.org/docs/handbook/2/objects.html#tuple-types), +based on the inputs you requested. + +## Composability + +The big unlock is not only that types are guaranteed to be safe - we provide composability to +allow building complex apps with [RAG](https://www.promptingguide.ai/techniques/rag) and embedding-driven search. + +::: info +We are actively working on building a RAG solution - please reach out if you are interested +in influencing the API for this! ::: -## TL;DR +## Learn More -Hopfield might be a good fit for your project if: +Learn more about the intricacies embeddings in the [Embeddings](/embeddings/details) page. +' -- πŸ—οΈ You build with Typescript/Javascript, and have your database schemas in these languages (e.g. [Prisma](https://www.prisma.io/) and/or [Next.js](https://nextjs.org/)). -- πŸͺ¨ You don't need a heavyweight LLM orchestration framework that ships with a ton of dependencies you'll never use. -- πŸ€™ You're using OpenAI function calling and/or custom tools, and want Typescript-native features for them (e.g. validations w/ [Zod](https://github.com/colinhacks/zod)). -- πŸ’¬ You're building complex LLM interactions which use memory & [RAG](https://www.promptingguide.ai/techniques/rag), evaluation, and orchestration (_coming soonβ„’_). -- πŸ“ You want best-practice, extensible templates, which use [string literal types](https://www.typescriptlang.org/docs/handbook/2/template-literal-types.html) - under the hood for transparency. +'--- +description: "Comparisons between Hopfield's features and features from similar libraries." +title: "Comparisons" +--- -Oh, and liking Typescript is a nice-to-have. +# Comparisons -## Guiding principles +No other library does what Hopfield does (inferring static LLM TypeScript types from Zod schemas), but there are some similarities with other libraries. This page compares Hopfield to other libraries. -- πŸŒ€ We are Typescript-first, and only support TS (or JS) - with services like [Replicate](https://replicate.com/) or [OpenAI](https://platform.openai.com/docs/introduction), why do you need Python? -- 🀏 We provide a simple, ejectable interface with common LLM use-cases. This is aligned 1-1 with LLM provider abstractions, like OpenAI's. -- πŸͺ’ We explicitly _don't_ provide a ton of custom tools (please don't ask for too many πŸ˜…) outside of the building blocks and simple examples provided. Other frameworks provide these, but when you use them, you soon realize the tool you want is very use-case specific. -- πŸ§ͺ We (will) provide evaluation frameworks which let you simulate user scenarios and backend interactions with the LLM, including multi-turn conversations and function calling. -- 🐢 We support Node.js, Vercel Edge Functions, Cloudflare Workers, and more (oh and even web, if you like giving away API keys). +Comparisons strive to be as accurate and as unbiased as possible. If you use any of these libraries and feel the information could be improved, feel free to suggest changes. -## Community +## vercel/ai -If you have questions or need help, reach out to the community in the [Hopfield GitHub Discussions](https://github.com/EnjoinHQ/hopfield/discussions). +[**ai**](https://github.com/vercel/ai) is a framework for AI-powered applications with React, Svelte, Vue, and Solid. They provide hooks to easily integrate +with a streaming text response (StreamingTextResponse) and allow a callback for function calling, as well as simple, drop-in components for React and other +frameworks. -
- -
+**Hopfield** provides a subset of these features, and focuses solely on the API interactions, and **not** on providing React components. -## Learn more +Below is a comparison of the library features: -Read the [Getting Started](/guide/getting-started) guide to learn more how to use Hopfield. +| | **ai** | **Hopfield** | +| ---------------------- | ----------------------------------------- | ---------------------------------------------- | +| **React Components** | Easy, inflexible UI components & hooks | No UI components or hooks | +| **Typed Functions** | Streaming function calls with loose types | Strict function call types with Zod validation | +| **Framework Examples** | Multiple | Multiple | +| **Chat Providers** | Multiple | OpenAI, with support for others coming | -### Inspiration +## Langchain.js -Shoutout to these projects which inspired us: +[**Langchain.js**](https://github.com/hwchase17/langchainjs) is a framework for developing applications powered by language models +with Javascript. Developers usually use Langchain to develop apps which connect to internal tools (like internal knowledge bases, +LLM demos, and generally in trusted environments). -- [Zod](https://github.com/colinhacks/zod) -- [zod-to-json-schema](https://github.com/StefanTerdell/zod-to-json-schema) -- [Autochain](https://github.com/Forethought-Technologies/AutoChain) -- [Langchain.js](https://github.com/hwchase17/langchainjs) -- [simpleaichat](https://github.com/minimaxir/simpleaichat) -- [Auto-GPT](https://github.com/Significant-Gravitas/Auto-GPT) -- [abitype](https://github.com/wagmi-dev/abitype) +**Hopfield** is a TypeScript library that provides a subset of Langchain's features, +prioritizing inferring static types from LLM input, alongside runtime response validation and static typing. -If you like Hopfield, go star them on Github too. +Below is a comparison of the library features: + +| | **Langchain.js** | **Hopfield** | +| -------------------- | ------------------------------------------- | ------------------------------------------------------------------------ | +| **Prompt Templates** | Opaque | Use string template types for type inference | +| **Type Generation** | Loose types with some Typescript helpers | Static types with Zod validation | +| **Function Calling** | Starter templates, with some Zod validation | Validation-driven, composable functions | +| **Connectors/Tools** | Many, with various integrations | Only a select few, with examples (actively being developed) | +| **Dependencies** | Many, with non-optional peer dependencies | Few, with strict bundle splitting to avoid unnecessary peer dependencies | ' '--- -description: "A detailed guide on seamlessly fetching and streaming data directly into React components." -title: "Next.js App Router with Hopfield" +description: "Quickly add Hopfield to your TypeScript project." +title: "Getting Started" --- -# Next.js App Router - -Hopfield empowers developers to seamlessly fetch and stream data directly into Next.js React Server Components. +# Getting Started -## Overview +This section will help you start using Hopfield in your TypeScript project. -Hopfield streaming chat provides a readableStream() which can be used to build recursive React Server Components. +## Install -The readableStream() from Hopfield's streaming chat provider returns a [ReadableStream](https://developer.mozilla.org/en-US/docs/Web/API/ReadableStream) (available in Node 18+, or it can be polyfilled with a library like [web-streams-polyfill](https://www.npmjs.com/package/web-streams-polyfill).). +First, you will need to install Hopfield. -::: info Non-streaming +npm i hopfield -If you are not interested in using streaming, you can use the non-streaming chat provider easily with a simple RSC -that awaits the full response from chat.get(). This is not shown below, but is a much simpler integration that does not -include any custom code for streaming token by token. +### OpenAI -::: +You'll also need to set up a Hopfield Provider, which is used to interact with the API. +We currently only support OpenAI (but are working on adding other providers). -### Backpressure +To use Hopfield, you will need to install the latest 4+ version of openai. -The readable stream handles backpressure with a pull-based approach. See our [tests](https://github.com/EnjoinHQ/hopfield/blob/main/src/utils.test.ts) for how Hopfield handles backpressure. For a more detailed explanation on "backpressure" and how it factors into streaming LLM responses, please see the -[vercel/ai docs](https://sdk.vercel.ai/docs/concepts/backpressure-and-cancellation). +npm i openai@4 -## Usage +## Create a Provider -Here's how to use Hopfield with a recursive React Server Component using Suspense: +We create a Hopfield provider, which stores the provider client and uses it for API requests under +the hood. -tsx -import { Suspense } from "react"; +ts import hop from "hopfield"; import openai from "hopfield/openai"; import OpenAI from "openai"; -// Set up the OpenAI client -const openaiClient = new OpenAI({ apiKey: "OPENAI_API_KEY" }); -// Pass the OpenAI client into Hopfield -const hopfield = hop.client(openai).provider(openaiClient); -// Create a streaming chat provider -const chat = hopfield.chat("gpt-4o-2024-05-13").streaming(); +// create an OpenAI client (must be prerelease version for latest features) +const openAIClient = new OpenAI({ apiKey: "{OPENAI_API_KEY}" }); // [!code focus] -export type ChatResponseProps = { - prompt: string; -}; +// use the OpenAI client with Hopfield +// or, you can *not* pass a provider, and just use the runtime validations +const hopfield = hop.client(openai).provider(openAIClient); // [!code focus] -export async function ChatResponse({ prompt }: ChatResponseProps) { - // construct messages with hop.inferMessageInput - const messages: hop.inferMessageInput[] = [ - { - role: "system", - content: "You are a helpful AI assistant.", - }, - { - role: "user", - content: prompt, - }, - ]; - const response = await chat.get( - { messages: messages }, - { - onChunk: async (value) => { - console.log(Received chunk type: \${value.choices[0].__type}); - // do something on the server with each individual chunk as it is - // streamed in - }, - onDone: async (chunks) => { - console.log(Total chunks received: \${chunks.length}); - // do something on the server when the chat completion is done - // this can be caching the response, storing in a database, etc. - // - // chunks is an array of all the streamed responses, so you - // can access the raw content and combine how you'd like - }, - // if you are using function calling, you can also add a onFunctionCall - // here with zod-parsed arguments - } - ); +## Streaming Chat - // pass the readableStream to the RSC - return ; -} +We can now create a Streaming Chat instance of Hopfield. We use the provider we created above, +and create a new chat instance. -type Props = { - /** - * A ReadableStream produced by Hopfield. - */ - stream: ReadableStream>; -}; +ts +export const chat = hopfield.chat().streaming(); // [!code focus] -/** - * A React Server Component that recursively renders a stream of tokens. - */ -async function Tokens(props: Props) { - const { stream } = props; - const reader = stream.getReader(); - return ( - - - - ); -} +We can now use this chat instance for every chat interaction, with simplified streaming +and other features. Below, we show how to use get to interact with the Chat Completions +API, and utility types inferMessageInput and inferResult +to get the typing of the inputs/outputs for the chat instance. -type RecursiveTokensProps = { - reader: ReadableStreamDefaultReader>; -}; +ts +import hop from "hopfield"; +import { chat } from "./chat"; +// [!code focus:12] +const messages: hop.inferMessageInput[] = [ + { + role: "user", + content: "What's the coolest way to count to ten?", + }, +]; -async function RecursiveTokens({ reader }: RecursiveTokensProps) { - const { done, value } = await reader.read(); +const response = await chat.get({ + messages, +}); - if (done) { - return null; - } - return ( - <> - {value.choices[0].__type === "content" ? ( - value.choices[0].delta.content - ) : ( - <> - )} - }> - - - - ); -} +We can then stream the response from the chat instance and store the chunks +in an array, as well as take any action for the incoming chunk. -// This can be any loading indicator you want, which gets appended to the end -// of the tokens while waiting for the next token to be streamed -const LoadingDots = () => ...; +ts +// store all of the streaming chat chunks +const parts: hop.inferResult[] = []; + +for await (const part of response) { + // if the streaming delta contains new text content + if (part.choices[0].__type === "content") { + // handle the new content + } + parts.push(part); +} -We create a recursive React Server Component which uses Suspense boundaries to await each token, -and show a fallback loading indicator where the next token will be rendered. -See our [Next 13 RSC example](https://next-13.hopfield.ai) for a real-world integration -using Vercel, similar to this quick example. +As you can see, it's super easy to add streaming to your application with minimal dependencies +and a simple async iterator for easy streaming with Zod validation and strict typing. -### Dive Deeper +## What's next? -To deepen your understanding of how Streaming works, and how it can be further utilized within your application, -refer to the [Streaming Chat](/chat/streaming) section. +Now that you're all set up, you are ready to dive in to the docs further! ' '`; diff --git a/src/CHANGELOG.md b/src/CHANGELOG.md index c7346a3..c7dfd4c 100644 --- a/src/CHANGELOG.md +++ b/src/CHANGELOG.md @@ -1,5 +1,11 @@ # hopfield +## 0.3.7 + +### Patch Changes + +- [#32](https://github.com/EnjoinHQ/hopfield/pull/32) [`40d5dc9f1e972cfc503718fc8749e0613ae4b7ed`](https://github.com/EnjoinHQ/hopfield/commit/40d5dc9f1e972cfc503718fc8749e0613ae4b7ed) Thanks [@0xcadams](https://github.com/0xcadams)! - **Fix:** updated the default model name to be more recent. + ## 0.3.6 ### Patch Changes diff --git a/src/errors/version.ts b/src/errors/version.ts new file mode 100644 index 0000000..fd11fb3 --- /dev/null +++ b/src/errors/version.ts @@ -0,0 +1 @@ +export const version = '0.3.7' diff --git a/src/package.json b/src/package.json index 949b562..383db74 100644 --- a/src/package.json +++ b/src/package.json @@ -1,6 +1,6 @@ { "name": "hopfield", - "version": "0.3.6", + "version": "0.3.7", "repository": "EnjoinHQ/hopfield", "main": "./_cjs/index.js", "module": "./_esm/index.js", @@ -32,7 +32,13 @@ "!**/*.bench.ts", "!tsconfig.build.json" ], - "keywords": ["ai", "llm", "openai", "gpt", "ai-tools"], + "keywords": [ + "ai", + "llm", + "openai", + "gpt", + "ai-tools" + ], "license": "MIT", "peerDependenciesMeta": { "openai": { @@ -47,7 +53,9 @@ "types": "./_types/index.d.ts", "typesVersions": { "*": { - "openai": ["./_types/openai/index.d.ts"] + "openai": [ + "./_types/openai/index.d.ts" + ] } }, "typings": "./_types/index.d.ts",