Skip to content

Commit 93c1d9b

Browse files
authored
feat: upgrade ai deps (#328)
1 parent 2e58acc commit 93c1d9b

File tree

9 files changed

+6120
-4782
lines changed

9 files changed

+6120
-4782
lines changed

app/[locale]/(home)/action.tsx

+6-6
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import { ExperimentalMessage } from 'ai';
1+
import { CoreMessage } from 'ai';
22
import { createAI, createStreamableUI, createStreamableValue, getMutableAIState, StreamableValue } from 'ai/rsc';
33

44
import { AskFollowUpQuestion } from '@/components/layout/search/block/ask-follow-up-question';
@@ -13,9 +13,9 @@ import { SimpleModel } from '@/types/model';
1313
import { SearchEngineSetting, TChallengerAction } from '@/types/search';
1414
import { ProviderSetting } from '@/types/settings';
1515

16-
const allowProvider = ['OpenAI'] as Provider[];
16+
const AllowSearchProvider = ['OpenAI'] as Provider[];
1717

18-
const chat = async (model: SimpleModel, messages: ExperimentalMessage[]) => {
18+
const chat = async (model: SimpleModel, messages: CoreMessage[]) => {
1919
'use server';
2020
};
2121

@@ -29,7 +29,7 @@ const search = async (
2929
) => {
3030
'use server';
3131

32-
if (!allowProvider.includes(model?.provider)) {
32+
if (!AllowSearchProvider.includes(model?.provider)) {
3333
return {
3434
id: Date.now(),
3535
isGenerating: false,
@@ -60,7 +60,7 @@ const search = async (
6060
const uiStream = createStreamableUI();
6161
const isGenerating = createStreamableValue(true);
6262

63-
const messages: ExperimentalMessage[] = aiState.get() as any;
63+
const messages: CoreMessage[] = aiState.get() as any;
6464

6565
const question = formData?.get('input') as string;
6666

@@ -69,7 +69,7 @@ const search = async (
6969

7070
if (content) {
7171
const message = { role: 'user', content };
72-
messages.push(message as ExperimentalMessage);
72+
messages.push(message as CoreMessage);
7373
aiState.update([...(aiState.get() as any), message]);
7474
}
7575

app/api/search/google/route.ts

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import { createOpenAI } from '@ai-sdk/openai';
2-
import { experimental_streamText, ExperimentalMessage, StreamingTextResponse, ToolCallPart, ToolResultPart } from 'ai';
2+
import { CoreMessage, StreamingTextResponse, streamText as aiStreamText, ToolCallPart, ToolResultPart } from 'ai';
33
import { createStreamableUI, createStreamableValue } from 'ai/rsc';
44

55
import { searcherPrompt } from '@/lib/prompt';
@@ -17,7 +17,7 @@ export async function POST(req: Request) {
1717
config,
1818
stream,
1919
}: {
20-
messages: ExperimentalMessage[];
20+
messages: CoreMessage[];
2121
config: ApiConfig;
2222
stream: boolean;
2323
} = await req.json();
@@ -33,7 +33,7 @@ export async function POST(req: Request) {
3333
baseUrl: config.provider?.endpoint ?? process.env.OPENAI_API_ENDPOINT ?? 'https://api.openai.com/v1',
3434
});
3535

36-
const result = await experimental_streamText({
36+
const result = await aiStreamText({
3737
model: openai.chat('gpt-4'),
3838
system: searcherPrompt,
3939
messages,

components/layout/settings/provider/anthropic.tsx

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ export const AnthropicProvider = ({ anthropic, setAnthropic }: { anthropic: Prov
88
<p className='px-1 text-sm'>Anthropic API Key</p>
99
<Input
1010
type='text'
11-
placeholder='sk-xxxx'
11+
placeholder='sk-ant-xxxx'
1212
value={anthropic?.apiKey}
1313
onChange={(e) => {
1414
setAnthropic({

lib/search/challenger.tsx

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import { createOpenAI } from '@ai-sdk/openai';
2-
import { experimental_generateObject, ExperimentalMessage } from 'ai';
2+
import { CoreMessage, generateObject } from 'ai';
33
import { z } from 'zod';
44

55
import { challengerPrompt } from '@/lib/prompt';
@@ -10,15 +10,15 @@ export const challengerSchema = z.object({
1010
next: z.enum(['proceed', 'challenge']),
1111
});
1212

13-
export const challenger = async (messages: ExperimentalMessage[], model: SimpleModel, currentProviderSettings: ProviderSetting | null) => {
13+
export const challenger = async (messages: CoreMessage[], model: SimpleModel, currentProviderSettings: ProviderSetting | null) => {
1414
'use server';
1515

1616
const openai = createOpenAI({
1717
apiKey: currentProviderSettings?.OpenAI?.apiKey ?? process.env.OPENAI_API_KEY ?? '',
1818
// baseURL: currentProviderSettings?.OpenAI?.endpoint ?? process.env.OPENAI_API_ENDPOINT ?? 'https://api.openai.com/v1',
1919
});
2020

21-
return await experimental_generateObject({
21+
return await generateObject({
2222
model: openai.chat(model.model_id ?? 'gpt-3.5-turbo'),
2323
system: challengerPrompt,
2424
messages,

lib/search/clarifier.tsx

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import { createOpenAI } from '@ai-sdk/openai';
2-
import { experimental_streamObject, ExperimentalMessage } from 'ai';
2+
import { CoreMessage, streamObject } from 'ai';
33
import { createStreamableUI, createStreamableValue } from 'ai/rsc';
44
import { z } from 'zod';
55

@@ -17,7 +17,7 @@ export const clarifierSchema = z.object({
1717
clarifyPlaceholder: z.string().optional().describe('The clarify placeholder for input'),
1818
});
1919

20-
export const clarifier = async (uiStream: ReturnType<typeof createStreamableUI>, messages: ExperimentalMessage[], model: SimpleModel, currentProviderSettings: ProviderSetting | null) => {
20+
export const clarifier = async (uiStream: ReturnType<typeof createStreamableUI>, messages: CoreMessage[], model: SimpleModel, currentProviderSettings: ProviderSetting | null) => {
2121
'use server';
2222

2323
const objectStream = createStreamableValue<TClarifier>();
@@ -31,7 +31,7 @@ export const clarifier = async (uiStream: ReturnType<typeof createStreamableUI>,
3131
// baseURL: currentProviderSettings?.OpenAI?.endpoint ?? process.env.OPENAI_API_ENDPOINT ?? 'https://api.openai.com/v1',
3232
});
3333

34-
await experimental_streamObject({
34+
await streamObject({
3535
model: openai.chat(model.model_id ?? 'gpt-4-turbo'),
3636
system: clarifierPrompt,
3737
messages,

lib/search/illustrator.tsx

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import { createOpenAI } from '@ai-sdk/openai';
2-
import { experimental_streamObject, ExperimentalMessage } from 'ai';
2+
import { CoreMessage, streamObject } from 'ai';
33
import { createStreamableUI, createStreamableValue } from 'ai/rsc';
44
import { z } from 'zod';
55

@@ -13,7 +13,7 @@ export const illustratorSchema = z.object({
1313
items: z.array(z.object({ query: z.string() })).length(3),
1414
});
1515

16-
export const illustrator = async (uiStream: ReturnType<typeof createStreamableUI>, messages: ExperimentalMessage[], model: SimpleModel, currentProviderSettings: ProviderSetting | null) => {
16+
export const illustrator = async (uiStream: ReturnType<typeof createStreamableUI>, messages: CoreMessage[], model: SimpleModel, currentProviderSettings: ProviderSetting | null) => {
1717
'use server';
1818

1919
const objectStream = createStreamableValue<TIllustrator>();
@@ -25,7 +25,7 @@ export const illustrator = async (uiStream: ReturnType<typeof createStreamableUI
2525
// baseURL: currentProviderSettings?.OpenAI?.endpoint ?? process.env.OPENAI_API_ENDPOINT ?? 'https://api.openai.com/v1',
2626
});
2727

28-
await experimental_streamObject({
28+
await streamObject({
2929
model: openai.chat(model.model_id ?? 'gpt-4-turbo'),
3030
system: illustratorPrompt,
3131
messages,

lib/search/searcher.tsx

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import { createOpenAI } from '@ai-sdk/openai';
2-
import { experimental_streamText, ExperimentalMessage, ToolCallPart, ToolResultPart } from 'ai';
2+
import { CoreMessage, streamText as aiStreamText, ToolCallPart, ToolResultPart } from 'ai';
33
import { createStreamableUI, createStreamableValue } from 'ai/rsc';
44
import { z } from 'zod';
55

@@ -22,7 +22,7 @@ export const searcherSchema = z.object({
2222
export const searcher = async (
2323
uiStream: ReturnType<typeof createStreamableUI>,
2424
streamText: ReturnType<typeof createStreamableValue<string>>,
25-
messages: ExperimentalMessage[],
25+
messages: CoreMessage[],
2626
isProSearch: boolean,
2727
model: SimpleModel,
2828
currentSearchEngineSettings: SearchEngineSetting | null,
@@ -37,7 +37,7 @@ export const searcher = async (
3737
// baseURL: currentProviderSettings?.OpenAI?.endpoint ?? process.env.OPENAI_API_ENDPOINT ?? 'https://api.openai.com/v1',
3838
});
3939

40-
const result = await experimental_streamText({
40+
const result = await aiStreamText({
4141
model: openai.chat(model.model_id ?? 'gpt-4-turbo'),
4242
maxTokens: 2500,
4343
system: searcherPrompt,

package.json

+7-8
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,9 @@
1111
"eslint:fix": "eslint --ext .ts,.tsx --fix ."
1212
},
1313
"dependencies": {
14-
"@ai-sdk/anthropic": "^0.0.5",
15-
"@ai-sdk/google": "^0.0.5",
16-
"@ai-sdk/openai": "^0.0.4",
14+
"@ai-sdk/anthropic": "^0.0.16",
15+
"@ai-sdk/google": "^0.0.16",
16+
"@ai-sdk/openai": "^0.0.16",
1717
"@anthropic-ai/sdk": "^0.20.6",
1818
"@aws-sdk/client-bedrock-runtime": "^3.556.0",
1919
"@azure/openai": "^1.0.0-beta.12",
@@ -36,15 +36,15 @@
3636
"@types/file-saver": "^2.0.7",
3737
"@vercel/analytics": "^1.2.2",
3838
"@vercel/speed-insights": "^1.0.10",
39-
"ai": "^3.0.24",
39+
"ai": "^3.1.15",
4040
"class-variance-authority": "^0.7.0",
4141
"clsx": "^2.1.0",
4242
"cohere-ai": "^7.9.5",
4343
"file-saver": "^2.0.5",
4444
"groq-sdk": "^0.3.2",
4545
"jotai": "^2.8.0",
4646
"lucide-react": "^0.363.0",
47-
"next": "14.2.2",
47+
"next": "14.2.3",
4848
"next-intl": "^3.11.3",
4949
"next-themes": "^0.3.0",
5050
"openai": "^4.38.2",
@@ -80,6 +80,5 @@
8080
"tailwindcss": "^3.4.3",
8181
"typescript": "^5.4.5",
8282
"webpack": "^5.91.0"
83-
},
84-
"packageManager": "[email protected]"
85-
}
83+
}
84+
}

0 commit comments

Comments
 (0)