diff --git a/llama-index-integrations/llms/llama-index-llms-openai/llama_index/llms/openai/utils.py b/llama-index-integrations/llms/llama-index-llms-openai/llama_index/llms/openai/utils.py index 6e48083d24960..220ad8d860ccb 100644 --- a/llama-index-integrations/llms/llama-index-llms-openai/llama_index/llms/openai/utils.py +++ b/llama-index-integrations/llms/llama-index-llms-openai/llama_index/llms/openai/utils.py @@ -2,7 +2,13 @@ import os from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Type, Union +import openai from deprecated import deprecated +from openai.types.chat import ChatCompletionMessageParam, ChatCompletionMessageToolCall +from openai.types.chat.chat_completion_chunk import ChoiceDeltaToolCall +from openai.types.chat.chat_completion_message import ChatCompletionMessage +from openai.types.chat.chat_completion_token_logprob import ChatCompletionTokenLogprob +from openai.types.completion_choice import Logprobs from tenacity import ( before_sleep_log, retry, @@ -14,20 +20,15 @@ ) from tenacity.stop import stop_base -import openai from llama_index.core.base.llms.generic_utils import get_from_param_or_env from llama_index.core.base.llms.types import ( ChatMessage, ImageBlock, LogProb, + MessageRole, TextBlock, ) from llama_index.core.bridge.pydantic import BaseModel -from openai.types.chat import ChatCompletionMessageParam, ChatCompletionMessageToolCall -from openai.types.chat.chat_completion_chunk import ChoiceDeltaToolCall -from openai.types.chat.chat_completion_message import ChatCompletionMessage -from openai.types.chat.chat_completion_token_logprob import ChatCompletionTokenLogprob -from openai.types.completion_choice import Logprobs DEFAULT_OPENAI_API_TYPE = "open_ai" DEFAULT_OPENAI_API_BASE = "https://api.openai.com/v1" @@ -292,9 +293,14 @@ def to_openai_message_dict( msg = f"Unsupported content block type: {type(block).__name__}" raise ValueError(msg) - # NOTE: Sending a blank string to openai will cause an error. - # This will commonly happen with tool calls. - content_txt = None if content_txt == "" else content_txt + # NOTE: Sending a null value (None) for Tool Message to OpenAI will cause error + # It's only Allowed to send None if it's an Assistant Message + # Reference: https://platform.openai.com/docs/api-reference/chat/create + content_txt = ( + None + if content_txt == "" and message.role == MessageRole.ASSISTANT + else content_txt + ) # NOTE: Despite what the openai docs say, if the role is ASSISTANT, SYSTEM # or TOOL, 'content' cannot be a list and must be string instead. @@ -302,10 +308,12 @@ def to_openai_message_dict( # as the content. This will avoid breaking openai-like APIs. message_dict = { "role": message.role.value, - "content": content_txt - if message.role.value in ("assistant", "tool", "system") - or all(isinstance(block, TextBlock) for block in message.blocks) - else content, + "content": ( + content_txt + if message.role.value in ("assistant", "tool", "system") + or all(isinstance(block, TextBlock) for block in message.blocks) + else content + ), } # TODO: O1 models do not support system prompts diff --git a/llama-index-integrations/llms/llama-index-llms-openai/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-openai/pyproject.toml index efd26805d9b51..e92a58e044b7d 100644 --- a/llama-index-integrations/llms/llama-index-llms-openai/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-openai/pyproject.toml @@ -29,7 +29,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-openai" readme = "README.md" -version = "0.3.13" +version = "0.3.14" [tool.poetry.dependencies] python = ">=3.9,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-openai/tests/test_openai_utils.py b/llama-index-integrations/llms/llama-index-llms-openai/tests/test_openai_utils.py index 9f7ec48c5e923..b829d30b1dcb5 100644 --- a/llama-index-integrations/llms/llama-index-llms-openai/tests/test_openai_utils.py +++ b/llama-index-integrations/llms/llama-index-llms-openai/tests/test_openai_utils.py @@ -59,7 +59,7 @@ def chat_messages_with_function_calling() -> List[ChatMessage]: @pytest.fixture() -def openi_message_dicts_with_function_calling() -> List[ChatCompletionMessageParam]: +def openai_message_dicts_with_function_calling() -> List[ChatCompletionMessageParam]: return [ { "role": "user", @@ -158,19 +158,19 @@ def test_to_openai_message_dicts_basic_string() -> None: def test_to_openai_message_dicts_function_calling( chat_messages_with_function_calling: List[ChatMessage], - openi_message_dicts_with_function_calling: List[ChatCompletionMessageParam], + openai_message_dicts_with_function_calling: List[ChatCompletionMessageParam], ) -> None: message_dicts = to_openai_message_dicts( chat_messages_with_function_calling, ) - assert message_dicts == openi_message_dicts_with_function_calling + assert message_dicts == openai_message_dicts_with_function_calling def test_from_openai_message_dicts_function_calling( - openi_message_dicts_with_function_calling: List[ChatCompletionMessageParam], + openai_message_dicts_with_function_calling: List[ChatCompletionMessageParam], chat_messages_with_function_calling: List[ChatMessage], ) -> None: - chat_messages = from_openai_message_dicts(openi_message_dicts_with_function_calling) # type: ignore + chat_messages = from_openai_message_dicts(openai_message_dicts_with_function_calling) # type: ignore # assert attributes match for chat_message, chat_message_with_function_calling in zip(