diff --git a/ddtrace/contrib/internal/openai/utils.py b/ddtrace/contrib/internal/openai/utils.py index f5dfc10efe..0217b1e61d 100644 --- a/ddtrace/contrib/internal/openai/utils.py +++ b/ddtrace/contrib/internal/openai/utils.py @@ -89,7 +89,10 @@ def _extract_token_chunk(self, chunk): """Attempt to extract the token chunk (last chunk in the stream) from the streamed response.""" if not self._dd_span._get_ctx_item("_dd.auto_extract_token_chunk"): return - choice = getattr(chunk, "choices", [None])[0] + choices = getattr(chunk, "choices") + if not choices: + return + choice = choices[0] if not getattr(choice, "finish_reason", None): # Only the second-last chunk in the stream with token usage enabled will have finish_reason set return @@ -152,7 +155,10 @@ async def _extract_token_chunk(self, chunk): """Attempt to extract the token chunk (last chunk in the stream) from the streamed response.""" if not self._dd_span._get_ctx_item("_dd.auto_extract_token_chunk"): return - choice = getattr(chunk, "choices", [None])[0] + choices = getattr(chunk, "choices") + if not choices: + return + choice = choices[0] if not getattr(choice, "finish_reason", None): return try: diff --git a/releasenotes/notes/fix-token-extraction-0133808742374ef4.yaml b/releasenotes/notes/fix-token-extraction-0133808742374ef4.yaml new file mode 100644 index 0000000000..cc8c1aa127 --- /dev/null +++ b/releasenotes/notes/fix-token-extraction-0133808742374ef4.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + LLM Observability: This fix resolves an issue where extracting token metadata from openai streamed chat completion token chunks caused an IndexError.