Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: 🐛 [#17555] fix openai.BadRequestError: Invalid value for 'content': expected a string, got null. #17556

Open
wants to merge 6 commits into
base: main
Choose a base branch
from
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,13 @@
import os
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Type, Union

import openai
from deprecated import deprecated
from openai.types.chat import ChatCompletionMessageParam, ChatCompletionMessageToolCall
from openai.types.chat.chat_completion_chunk import ChoiceDeltaToolCall
from openai.types.chat.chat_completion_message import ChatCompletionMessage
from openai.types.chat.chat_completion_token_logprob import ChatCompletionTokenLogprob
from openai.types.completion_choice import Logprobs
from tenacity import (
before_sleep_log,
retry,
Expand All @@ -14,20 +20,9 @@
)
from tenacity.stop import stop_base

import openai
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
from llama_index.core.base.llms.types import (
ChatMessage,
ImageBlock,
LogProb,
TextBlock,
)
from llama_index.core.base.llms.types import ChatMessage, ImageBlock, LogProb, TextBlock
from llama_index.core.bridge.pydantic import BaseModel
from openai.types.chat import ChatCompletionMessageParam, ChatCompletionMessageToolCall
from openai.types.chat.chat_completion_chunk import ChoiceDeltaToolCall
from openai.types.chat.chat_completion_message import ChatCompletionMessage
from openai.types.chat.chat_completion_token_logprob import ChatCompletionTokenLogprob
from openai.types.completion_choice import Logprobs

DEFAULT_OPENAI_API_TYPE = "open_ai"
DEFAULT_OPENAI_API_BASE = "https://api.openai.com/v1"
Expand Down Expand Up @@ -292,20 +287,18 @@ def to_openai_message_dict(
msg = f"Unsupported content block type: {type(block).__name__}"
raise ValueError(msg)

# NOTE: Sending a blank string to openai will cause an error.
# This will commonly happen with tool calls.
upchunk marked this conversation as resolved.
Show resolved Hide resolved
content_txt = None if content_txt == "" else content_txt

# NOTE: Despite what the openai docs say, if the role is ASSISTANT, SYSTEM
# or TOOL, 'content' cannot be a list and must be string instead.
# Furthermore, if all blocks are text blocks, we can use the content_txt
# as the content. This will avoid breaking openai-like APIs.
message_dict = {
"role": message.role.value,
"content": content_txt
if message.role.value in ("assistant", "tool", "system")
or all(isinstance(block, TextBlock) for block in message.blocks)
else content,
"content": (
content_txt
if message.role.value in ("assistant", "tool", "system")
or all(isinstance(block, TextBlock) for block in message.blocks)
else content
),
}

# TODO: O1 models do not support system prompts
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,15 +59,15 @@ def chat_messages_with_function_calling() -> List[ChatMessage]:


@pytest.fixture()
def openi_message_dicts_with_function_calling() -> List[ChatCompletionMessageParam]:
def openai_message_dicts_with_function_calling() -> List[ChatCompletionMessageParam]:
return [
{
"role": "user",
"content": "test question with functions",
},
{
"role": "assistant",
"content": None,
"content": "",
"function_call": {
"name": "get_current_weather",
"arguments": '{ "location": "Boston, MA"}',
Expand Down Expand Up @@ -158,19 +158,19 @@ def test_to_openai_message_dicts_basic_string() -> None:

def test_to_openai_message_dicts_function_calling(
chat_messages_with_function_calling: List[ChatMessage],
openi_message_dicts_with_function_calling: List[ChatCompletionMessageParam],
openai_message_dicts_with_function_calling: List[ChatCompletionMessageParam],
) -> None:
message_dicts = to_openai_message_dicts(
chat_messages_with_function_calling,
)
assert message_dicts == openi_message_dicts_with_function_calling
assert message_dicts == openai_message_dicts_with_function_calling


def test_from_openai_message_dicts_function_calling(
openi_message_dicts_with_function_calling: List[ChatCompletionMessageParam],
openai_message_dicts_with_function_calling: List[ChatCompletionMessageParam],
chat_messages_with_function_calling: List[ChatMessage],
) -> None:
chat_messages = from_openai_message_dicts(openi_message_dicts_with_function_calling) # type: ignore
chat_messages = from_openai_message_dicts(openai_message_dicts_with_function_calling) # type: ignore

# assert attributes match
for chat_message, chat_message_with_function_calling in zip(
Expand Down
Loading