Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(OpenAI Node): Don't send system prompt for O1 model #12200

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -220,13 +220,17 @@ export async function execute(this: IExecuteFunctions, i: number): Promise<INode
let response_format;
if (jsonOutput) {
response_format = { type: 'json_object' };
messages = [
{
role: 'system',
content: 'You are a helpful assistant designed to output JSON.',
},
...messages,
];

// o1 family doesn't support system prompt
if (!model?.toString().toLocaleLowerCase().startsWith('o1')) {
messages = [
{
role: 'system',
content: 'You are a helpful assistant designed to output JSON.',
},
...messages,
];
}
}

const hideTools = this.getNodeParameter('hideTools', i, '') as string;
Expand Down
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
import type { IDataObject, IExecuteFunctions } from 'n8n-workflow';
import get from 'lodash/get';
import type { IDataObject, IExecuteFunctions } from 'n8n-workflow';

import * as assistant from '../actions/assistant';
import * as audio from '../actions/audio';
import * as file from '../actions/file';
import * as image from '../actions/image';
import * as text from '../actions/text';

import * as transport from '../transport';

const createExecuteFunctionsMock = (parameters: IDataObject) => {
Expand Down Expand Up @@ -620,4 +620,72 @@ describe('OpenAi, Text resource', () => {
},
});
});

it('message => json output, o1 models should not receive system prompt', async () => {
(transport.apiRequest as jest.Mock).mockResolvedValueOnce({
choices: [{ message: { tool_calls: undefined } }],
});

await text.message.execute.call(
createExecuteFunctionsMock({
modelId: 'o1-mini',
messages: {
values: [{ role: 'user', content: 'message' }],
},

jsonOutput: true,

options: {},
}),
0,
);

expect(transport.apiRequest).toHaveBeenCalledWith('POST', '/chat/completions', {
body: {
messages: [{ content: 'message', role: 'user' }],
model: 'o1-mini',
response_format: {
type: 'json_object',
},
tools: undefined,
},
});
});

it('message => json output, older models should receive system prompt', async () => {
(transport.apiRequest as jest.Mock).mockResolvedValueOnce({
choices: [{ message: { tool_calls: undefined } }],
});

await text.message.execute.call(
createExecuteFunctionsMock({
modelId: 'gpt-model',
messages: {
values: [{ role: 'user', content: 'message' }],
},

jsonOutput: true,

options: {},
}),
0,
);

expect(transport.apiRequest).toHaveBeenCalledWith('POST', '/chat/completions', {
body: {
messages: [
{
role: 'system',
content: 'You are a helpful assistant designed to output JSON.',
},
{ content: 'message', role: 'user' },
],
model: 'gpt-model',
response_format: {
type: 'json_object',
},
tools: undefined,
},
});
});
});