@@ -11,7 +11,7 @@ fluent-ai is a lightweight, type-safe AI toolkit that seamlessly integrates mult
1111## Installation
1212
1313``` sh
14- npm install fluent-ai zod
14+ npm install fluent-ai zod@next
1515```
1616
1717## AI Service provider support
@@ -48,11 +48,11 @@ Each request to AI providers is wrapped in a `Job`. which can also serialized an
4848### Method chaining
4949
5050``` ts
51- import { openai , userPrompt } from " fluent-ai" ;
51+ import { openai , user } from " fluent-ai" ;
5252
5353const job = openai ()
5454 .chat (" gpt-4o-mini" )
55- .messages ([userPrompt (" Hi" )])
55+ .messages ([user (" Hi" )])
5656 .temperature (0.5 )
5757 .maxTokens (1024 );
5858```
@@ -101,11 +101,11 @@ Chat completion, such as ChatGPT, is the most common AI service. It generates re
101101### Text generation
102102
103103``` ts
104- import { openai , systemPrompt , userPrompt } from " fluent-ai" ;
104+ import { openai , system , user } from " fluent-ai" ;
105105
106106const job = openai ()
107107 .chat (" gpt-4o-mini" )
108- .messages ([systemPrompt (" You are a helpful assistant" ), userPrompt (" Hi" )]);
108+ .messages ([system (" You are a helpful assistant" ), user (" Hi" )]);
109109
110110const { text } = await job .run ();
111111```
@@ -120,7 +120,7 @@ fluent-ai provides a consistent `jsonSchema()` function for all providers to gen
120120
121121``` ts
122122import { z } from " zod" ;
123- import { openai , userPrompt } from " fluent-ai" ;
123+ import { openai , user } from " fluent-ai" ;
124124
125125const personSchema = z .object ({
126126 name: z .string (),
@@ -129,7 +129,7 @@ const personSchema = z.object({
129129
130130const job = openai ()
131131 .chat (" gpt-4o-mini" )
132- .messages ([userPrompt (" generate a person with name and age in json format" )])
132+ .messages ([user (" generate a person with name and age in json format" )])
133133 .jsonSchema (personSchema , " person" );
134134
135135const { object } = await job .run ();
@@ -161,7 +161,7 @@ To use the tool, add it to a chat job with a function-calling-enabled model, suc
161161const job = openai ().chat (" gpt-4o-mini" ).tool (weatherTool );
162162
163163const { toolCalls } = await job
164- .messages ([userPrompt (" What is the weather in San Francisco?" )])
164+ .messages ([user (" What is the weather in San Francisco?" )])
165165 .run ();
166166```
167167
@@ -172,7 +172,7 @@ Rather than waiting for the complete response, streaming enables the model to re
172172``` ts
173173const job = openai ()
174174 .chat (" gpt-4o-mini" )
175- .messages ([systemPrompt (" You are a helpful assistant" ), userPrompt (" Hi" )])
175+ .messages ([system (" You are a helpful assistant" ), user (" Hi" )])
176176 .stream ();
177177
178178const { stream } = await job .run ();
@@ -188,13 +188,11 @@ fluent-ai supports streaming text, object and tool calls on demand. For more det
188188You can leverage chat models with vision capabilities by including an image URL in your prompt.
189189
190190``` ts
191- import { openai , systemPrompt , userPrompt } from " fluent-ai" ;
191+ import { openai , system , user } from " fluent-ai" ;
192192
193193openai ()
194194 .chat (" gpt-4o-mini" )
195- .messages ([
196- userPrompt (" Describe the image" , { image: { url: " <image_url>" } }),
197- ]);
195+ .messages ([user (" Describe the image" , { image: { url: " <image_url>" } })]);
198196```
199197
200198## Embedding
0 commit comments