Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: Fix error message if there is empty string in Gemini message #164

Merged
merged 2 commits into from
Jan 3, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions aidial_adapter_vertexai/chat/gemini/inputs.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ async def _message_to_gemini_parts(
return await processors.process_message(message)

case Role.USER:
if content is None:
if not content:
adubovik marked this conversation as resolved.
Show resolved Hide resolved
raise ValidationError("User message content must be present")
return await processors.process_message(message)

Expand All @@ -99,7 +99,7 @@ async def _message_to_gemini_parts(
elif message.tool_calls is not None:
return [tool_call_to_part(call) for call in message.tool_calls]
else:
if content is None:
if not content:
raise ValidationError(
"Assistant message content must be present"
)
Expand Down
43 changes: 43 additions & 0 deletions tests/integration_tests/test_chat_completion.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
GET_WEATHER_FUNCTION,
GET_WEATHER_TOOL,
ChatCompletionResult,
ai,
ai_function,
ai_tools,
blue_pic,
Expand Down Expand Up @@ -149,6 +150,14 @@ def supports_text_input(deployment: ChatCompletionDeployment) -> bool:
return deployment != ChatCompletionDeployment.GEMINI_PRO_VISION_1


def supports_empty_content(deployment: ChatCompletionDeployment) -> bool:
return is_codechat(deployment) or deployment in [
ChatCompletionDeployment.CHAT_BISON_1,
ChatCompletionDeployment.CHAT_BISON_2,
ChatCompletionDeployment.CHAT_BISON_2_32K,
]


def is_vision_model(deployment: ChatCompletionDeployment) -> bool:
return deployment in [
ChatCompletionDeployment.GEMINI_PRO_VISION_1,
Expand Down Expand Up @@ -216,6 +225,40 @@ def test_case(
expected=for_all_choices(lambda s: "7" in s),
)

test_case(
name="empty assistant content",
messages=[
user("hi, what is your name?"),
ai(""),
user("please come again?"),
],
expected=(
expected_success
if supports_empty_content(deployment)
else ExpectedException(
type=UnprocessableEntityError,
message="Assistant message content must be present",
status_code=422,
)
),
)

test_case(
name="empty user content",
messages=[
user(""),
],
expected=(
expected_success
if supports_empty_content(deployment)
else ExpectedException(
type=UnprocessableEntityError,
message="User message content must be present",
status_code=422,
)
),
)

test_case(
name="max tokens 1",
max_tokens=1,
Expand Down
Loading