From 9affca3ed058b285156249fef0314506f6ff7c40 Mon Sep 17 00:00:00 2001 From: g97iulio1609 Date: Sat, 14 Feb 2026 21:23:40 +0100 Subject: [PATCH 1/2] fix(openai-compatible): inject JSON schema instruction when structuredOutputs disabled (#12491) When the OpenAI-compatible provider has structuredOutputs disabled (the default), it falls back from json_schema to json_object response format, which silently drops the schema. The model is told to produce JSON but has no knowledge of what schema to produce, leading to validation failures. This fix injects the JSON schema as a system instruction into the prompt messages (using injectJsonInstructionIntoMessages from provider-utils) when the provider cannot enforce the schema via response_format. This gives the model explicit guidance about the expected output structure, matching the reliability of providers that support structured outputs natively. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- ...nai-compatible-chat-language-model.test.ts | 8 +++++++- .../openai-compatible-chat-language-model.ts | 20 +++++++++++++++---- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/packages/openai-compatible/src/chat/openai-compatible-chat-language-model.test.ts b/packages/openai-compatible/src/chat/openai-compatible-chat-language-model.test.ts index ee71b886e191..460327f6ed47 100644 --- a/packages/openai-compatible/src/chat/openai-compatible-chat-language-model.test.ts +++ b/packages/openai-compatible/src/chat/openai-compatible-chat-language-model.test.ts @@ -835,7 +835,7 @@ describe('doGenerate', () => { `); }); - it('should forward json response format as "json_object" and omit schema when structuredOutputs are disabled', async () => { + it('should forward json response format as "json_object" and inject schema instruction when structuredOutputs are disabled', async () => { prepareJsonResponse({ content: '{"value":"Spark"}' }); const model = new OpenAICompatibleChatLanguageModel('gpt-4o-2024-08-06', { @@ -862,6 +862,12 @@ describe('doGenerate', () => { expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(` { "messages": [ + { + "content": "JSON schema: + {"type":"object","properties":{"value":{"type":"string"}},"required":["value"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"} + You MUST answer with a JSON object that matches the JSON schema above.", + "role": "system", + }, { "content": "Hello", "role": "user", diff --git a/packages/openai-compatible/src/chat/openai-compatible-chat-language-model.ts b/packages/openai-compatible/src/chat/openai-compatible-chat-language-model.ts index 9a803cb1173e..3cb7bfeaf35d 100644 --- a/packages/openai-compatible/src/chat/openai-compatible-chat-language-model.ts +++ b/packages/openai-compatible/src/chat/openai-compatible-chat-language-model.ts @@ -18,6 +18,7 @@ import { createJsonResponseHandler, FetchFunction, generateId, + injectJsonInstructionIntoMessages, isParsableJson, parseProviderOptions, ParseResult, @@ -162,11 +163,15 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV3 { warnings.push({ type: 'unsupported', feature: 'topK' }); } - if ( + // When structured outputs are not supported but a schema is provided, + // inject the JSON schema as a system instruction so the model knows + // what structure to produce (since the schema is dropped from response_format). + const shouldInjectSchemaInstruction = responseFormat?.type === 'json' && responseFormat.schema != null && - !this.supportsStructuredOutputs - ) { + !this.supportsStructuredOutputs; + + if (shouldInjectSchemaInstruction) { warnings.push({ type: 'unsupported', feature: 'responseFormat', @@ -175,6 +180,13 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV3 { }); } + const messagesPrompt = shouldInjectSchemaInstruction + ? injectJsonInstructionIntoMessages({ + messages: prompt, + schema: responseFormat.schema, + }) + : prompt; + const { tools: openaiTools, toolChoice: openaiToolChoice, @@ -231,7 +243,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV3 { verbosity: compatibleOptions.textVerbosity, // messages: - messages: convertToOpenAICompatibleChatMessages(prompt), + messages: convertToOpenAICompatibleChatMessages(messagesPrompt), // tools: tools: openaiTools, From 91897c3ecd7fafc40fe7f6b3353dbe8785f997bc Mon Sep 17 00:00:00 2001 From: g97iulio1609 Date: Sat, 14 Feb 2026 21:33:17 +0100 Subject: [PATCH 2/2] add changeset for #12491 Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .changeset/fix-output-object-json-mode.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changeset/fix-output-object-json-mode.md diff --git a/.changeset/fix-output-object-json-mode.md b/.changeset/fix-output-object-json-mode.md new file mode 100644 index 000000000000..4950dc332875 --- /dev/null +++ b/.changeset/fix-output-object-json-mode.md @@ -0,0 +1,5 @@ +--- +'@ai-sdk/openai-compatible': patch +--- + +fix(openai-compatible): inject JSON schema instruction when structuredOutputs is disabled. Improves reliability of generateText + Output.object() with OpenAI-compatible providers that don't support json_schema response format.