From 713f2e7a77cc6f1b03abc064326062216e556143 Mon Sep 17 00:00:00 2001 From: lievan Date: Fri, 24 Jan 2025 10:15:27 -0500 Subject: [PATCH 1/4] be more defensive on extracting stream --- ddtrace/contrib/internal/openai/utils.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/ddtrace/contrib/internal/openai/utils.py b/ddtrace/contrib/internal/openai/utils.py index f5dfc10efef..d526f96b30a 100644 --- a/ddtrace/contrib/internal/openai/utils.py +++ b/ddtrace/contrib/internal/openai/utils.py @@ -89,7 +89,10 @@ def _extract_token_chunk(self, chunk): """Attempt to extract the token chunk (last chunk in the stream) from the streamed response.""" if not self._dd_span._get_ctx_item("_dd.auto_extract_token_chunk"): return - choice = getattr(chunk, "choices", [None])[0] + choices = getattr(chunk, "choices", None) + if not choices: + return + choice = choices[0] if not getattr(choice, "finish_reason", None): # Only the second-last chunk in the stream with token usage enabled will have finish_reason set return @@ -152,7 +155,10 @@ async def _extract_token_chunk(self, chunk): """Attempt to extract the token chunk (last chunk in the stream) from the streamed response.""" if not self._dd_span._get_ctx_item("_dd.auto_extract_token_chunk"): return - choice = getattr(chunk, "choices", [None])[0] + choices = getattr(chunk, "choices", None) + if not choices: + return + choice = choices[0] if not getattr(choice, "finish_reason", None): return try: From 3e6a79600cc07ed58b8b226da77c800c45944c39 Mon Sep 17 00:00:00 2001 From: lievan Date: Fri, 24 Jan 2025 10:18:35 -0500 Subject: [PATCH 2/4] remove unneeded default --- ddtrace/contrib/internal/openai/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ddtrace/contrib/internal/openai/utils.py b/ddtrace/contrib/internal/openai/utils.py index d526f96b30a..0217b1e61d2 100644 --- a/ddtrace/contrib/internal/openai/utils.py +++ b/ddtrace/contrib/internal/openai/utils.py @@ -89,7 +89,7 @@ def _extract_token_chunk(self, chunk): """Attempt to extract the token chunk (last chunk in the stream) from the streamed response.""" if not self._dd_span._get_ctx_item("_dd.auto_extract_token_chunk"): return - choices = getattr(chunk, "choices", None) + choices = getattr(chunk, "choices") if not choices: return choice = choices[0] @@ -155,7 +155,7 @@ async def _extract_token_chunk(self, chunk): """Attempt to extract the token chunk (last chunk in the stream) from the streamed response.""" if not self._dd_span._get_ctx_item("_dd.auto_extract_token_chunk"): return - choices = getattr(chunk, "choices", None) + choices = getattr(chunk, "choices") if not choices: return choice = choices[0] From f1777764aec300afad2e4d8db0358ef830f6b768 Mon Sep 17 00:00:00 2001 From: lievan Date: Fri, 24 Jan 2025 12:56:19 -0500 Subject: [PATCH 3/4] rel note --- releasenotes/notes/fix-token-extraction-0133808742374ef4.yaml | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 releasenotes/notes/fix-token-extraction-0133808742374ef4.yaml diff --git a/releasenotes/notes/fix-token-extraction-0133808742374ef4.yaml b/releasenotes/notes/fix-token-extraction-0133808742374ef4.yaml new file mode 100644 index 00000000000..557afb69fe8 --- /dev/null +++ b/releasenotes/notes/fix-token-extraction-0133808742374ef4.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + LLM Observability: This fix resolves an issue where extracting completion chunks from an openai streamed response caused an IndexError. \ No newline at end of file From 426d52aeacb5edf9425bf82ab60fdb4497fe6b5a Mon Sep 17 00:00:00 2001 From: lievan <42917263+lievan@users.noreply.github.com> Date: Fri, 24 Jan 2025 16:44:11 -0500 Subject: [PATCH 4/4] Update releasenotes/notes/fix-token-extraction-0133808742374ef4.yaml Co-authored-by: Yun Kim <35776586+Yun-Kim@users.noreply.github.com> --- releasenotes/notes/fix-token-extraction-0133808742374ef4.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/releasenotes/notes/fix-token-extraction-0133808742374ef4.yaml b/releasenotes/notes/fix-token-extraction-0133808742374ef4.yaml index 557afb69fe8..cc8c1aa127b 100644 --- a/releasenotes/notes/fix-token-extraction-0133808742374ef4.yaml +++ b/releasenotes/notes/fix-token-extraction-0133808742374ef4.yaml @@ -1,4 +1,4 @@ --- fixes: - | - LLM Observability: This fix resolves an issue where extracting completion chunks from an openai streamed response caused an IndexError. \ No newline at end of file + LLM Observability: This fix resolves an issue where extracting token metadata from openai streamed chat completion token chunks caused an IndexError.