Skip to content

Commit

Permalink
Make LangChain metadata logging opt-in (#105)
Browse files Browse the repository at this point in the history
* Update version to 0.0.9
* Make metadata logging opt in
  • Loading branch information
jamie256 authored Jul 24, 2023
1 parent 6f2e289 commit f13d6d0
Show file tree
Hide file tree
Showing 3 changed files with 9 additions and 7 deletions.
2 changes: 1 addition & 1 deletion .bumpversion.cfg
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
[bumpversion]
current_version = 0.0.8
current_version = 0.0.9
tag = False
parse = (?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)(\-(?P<release>[a-z]+)(?P<build>\d+))?
serialize =
Expand Down
12 changes: 7 additions & 5 deletions langkit/callback_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,9 +55,10 @@ def _generate_callback_wrapper(handler) -> Dict[str, partial]:


class LangKitCallback:
def __init__(self, logger: Logger):
def __init__(self, logger: Logger, log_metadata: bool = False):
"""Bind the configured logger for this langKit callback handler."""
self._logger = logger
self._log_metadata = log_metadata
diagnostic_logger.info(
f"Initialized LangKitCallback handler with configured whylogs Logger {logger}."
)
Expand All @@ -80,7 +81,7 @@ def on_llm_start(
invocation_params = kwargs.get("invocation_params")
run_id = kwargs.get("run_id", 0)
self.records[run_id] = {"prompts": prompts, "t0": time()}
if hasattr(self._logger, "_current_profile"):
if self._log_metadata and hasattr(self._logger, "_current_profile"):
profile = self._logger._current_profile
if invocation_params is not None:
profile.track(
Expand All @@ -96,8 +97,9 @@ def on_llm_end(self, response: Any, **kwargs: Any) -> None:
run_id = kwargs.get("run_id", 0)
llm_record = self.records.get(run_id)
if llm_record is not None:
response_latency_s = time() - llm_record["t0"]
self._logger.log({"response_latency_s": response_latency_s})
if self._log_metadata:
response_latency_s = time() - llm_record["t0"]
self._logger.log({"response_latency_s": response_latency_s})
index = 0
prompts = llm_record["prompts"]
for generations in response.generations:
Expand All @@ -107,7 +109,7 @@ def on_llm_end(self, response: Any, **kwargs: Any) -> None:
self._logger.log(response_record)
index = index + 1

if hasattr(response, "llm_output"):
if self._log_metadata and hasattr(response, "llm_output"):
llm_output = response.llm_output
token_usage = llm_output.get("token_usage")
if token_usage:
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "langkit"
version = "0.0.8"
version = "0.0.9"
description = "A collection of text metric udfs for whylogs profiling and monitoring in WhyLabs"
authors = ["WhyLabs.ai <[email protected]>"]
license = "Apache-2.0"
Expand Down

0 comments on commit f13d6d0

Please sign in to comment.