Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add TemplateNotFoundError #45

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions .editorconfig
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# EditorConfig is awesome: https://EditorConfig.org

# top-most EditorConfig file
root = true

[*]
indent_style = space
indent_size = 4
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true
103 changes: 52 additions & 51 deletions lunary/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
from .thread import Thread
from .utils import clean_nones, create_uuid_from_string
from .config import get_config, set_config
from .errors import TemplateNotFoundError

from .users import user_ctx, user_props_ctx, identify # DO NOT REMOVE `identify`` import
from .tags import tags_ctx, tags # DO NOT REMOVE `tags` import
Expand All @@ -35,7 +36,7 @@

event_queue_ctx = ContextVar("event_queue_ctx")
event_queue_ctx.set(EventQueue())
queue = event_queue_ctx.get()
queue = event_queue_ctx.get()

provider = TracerProvider()
trace.set_tracer_provider(provider)
Expand Down Expand Up @@ -84,14 +85,14 @@ def track_event(
):
try:
config = get_config()
app_id = app_id or config.app_id
app_id = app_id or config.app_id

if not app_id:
return warnings.warn("LUNARY_PUBLIC_KEY is not set, not sending events")

run_ctx.set(run_id) # done before run_id transformation because the context will be used to pass the id in track_event, so run_id will be transformed again
parent_run_id = get_parent_run_id(parent_run_id, run_type, app_id=app_id, run_id=run_id, is_openai=is_openai)
run_id = str(create_uuid_from_string(str(run_id) + str(app_id))) # We need to generate a UUID that is unique by run_id / project_id pair in case of multiple concurrent callback handler use
run_ctx.set(run_id) # done before run_id transformation because the context will be used to pass the id in track_event, so run_id will be transformed again
parent_run_id = get_parent_run_id(parent_run_id, run_type, app_id=app_id, run_id=run_id, is_openai=is_openai)
run_id = str(create_uuid_from_string(str(run_id) + str(app_id))) # We need to generate a UUID that is unique by run_id / project_id pair in case of multiple concurrent callback handler use

event = {
"event": event_name,
Expand All @@ -102,7 +103,7 @@ def track_event(
"tags": tags or tags_ctx.get(),
"threadTags": thread_tags,
"runId": run_id,
"parentRunId": parent_run_id,
"parentRunId": parent_run_id,
"timestamp": timestamp or datetime.now(timezone.utc).isoformat(),
"message": message,
"input": input,
Expand All @@ -126,15 +127,12 @@ def track_event(
if config.verbose:
event_copy = clean_nones(copy.deepcopy(event))
logger.info(f"\nAdd event: {jsonpickle.encode(event_copy, unpicklable=False, indent=4)}\n")

except Exception as e:
logger.exception("Error in `track_event`", e)




def handle_internal_error(e):
logging.info("Error: ", e)
logger.info("Error: ", e)


def stream_handler(fn, run_id, name, type, *args, **kwargs):
Expand Down Expand Up @@ -538,7 +536,7 @@ def monitor(object):
output_parser=OpenAIUtils.parse_output,
)
except Exception as e:
logging.info(
logger.info(
"Please use `lunary.monitor(openai)` or `lunary.monitor(client)` after setting the OpenAI api key"
)

Expand All @@ -550,7 +548,7 @@ def monitor(object):
output_parser=OpenAIUtils.parse_output,
)
else:
logging.info(
logger.info(
"Unknown OpenAI client. You can only use `lunary.monitor(openai)` or `lunary.monitor(client)`"
)
elif openai_version < parse_version("1.0.0"):
Expand All @@ -569,7 +567,7 @@ def monitor(object):
)

except PackageNotFoundError:
logging.info("The `openai` package is not installed")
logger.info("The `openai` package is not installed")


def agent(name=None, user_id=None, user_props=None, tags=None):
Expand Down Expand Up @@ -630,7 +628,7 @@ def decorator(fn):
import requests
from langchain_core.agents import AgentFinish
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.messages import BaseMessage, BaseMessageChunk
from langchain_core.messages import BaseMessage, BaseMessageChunk
from langchain_core.documents import Document
from langchain_core.outputs import LLMResult
from langchain_core.load import dumps
Expand Down Expand Up @@ -693,7 +691,7 @@ def identify(user_id: str, user_props: Any = None) -> UserContextManager:
def _serialize(data: Any):
if not data:
return None

if hasattr(data, 'messages'):
return _serialize(data.messages)
if isinstance(data, BaseMessage) or isinstance(data, BaseMessageChunk):
Expand All @@ -718,7 +716,7 @@ def _parse_input(raw_input: Any) -> Any:

return serialized



def _parse_output(raw_output: dict) -> Any:
serialized = _serialize(raw_output)
Expand Down Expand Up @@ -819,8 +817,8 @@ def __init__(

except ImportError:
logger.warning(
"""To use the Lunary callback handler you need to
have the `lunary` Python package installed. Please install it
"""To use the Lunary callback handler you need to
have the `lunary` Python package installed. Please install it
with `pip install lunary`"""
)
self.__has_valid_config = False
Expand All @@ -838,15 +836,15 @@ def __init__(
self.__has_valid_config = True


self.__app_id = app_id or config.app_id
self.__app_id = app_id or config.app_id
if self.__app_id is None:
logger.warning(
"""app_id must be provided either as an argument or
"""app_id must be provided either as an argument or
as an environment variable"""
)
self.__has_valid_config = False
self.queue = queue

self.queue = queue

if self.__has_valid_config is False:
return None
Expand Down Expand Up @@ -985,7 +983,7 @@ def on_chat_model_start(
user_props=user_props,
app_id=self.__app_id,
callback_queue=self.queue,
runtime="langchain-py"
runtime="langchain-py"
)
except Exception as e:
logger.exception(f"An error occurred in `on_chat_model_start`: {e}")
Expand Down Expand Up @@ -1027,7 +1025,7 @@ def on_llm_end(
},
app_id=self.__app_id,
callback_queue=self.queue,
runtime="langchain-py"
runtime="langchain-py"
)
except Exception as e:
logger.exception(f"An error occurred in `on_llm_end`: {e}")
Expand Down Expand Up @@ -1074,7 +1072,7 @@ def on_tool_start(
user_props=user_props,
app_id=self.__app_id,
callback_queue=self.queue,
runtime="langchain-py"
runtime="langchain-py"
)
except Exception as e:
logger.exception(f"An error occurred in `on_tool_start`: {e}")
Expand All @@ -1100,7 +1098,7 @@ def on_tool_end(
output=output,
app_id=self.__app_id,
callback_queue=self.queue,
runtime="langchain-py"
runtime="langchain-py"
)
except Exception as e:
logger.exception(f"An error occurred in `on_tool_end`: {e}")
Expand Down Expand Up @@ -1363,7 +1361,7 @@ def on_retriever_end(
)
except Exception as e:
logger.exception(f"An error occurred in `on_retriever_end`: {e}")

def on_retriever_error(
self,
error: BaseException,
Expand All @@ -1387,7 +1385,7 @@ def on_retriever_error(
logger.exception(f"An error occurred in `on_retriever_error`: {e}")

except Exception as e:
# Do not raise error for users that do not have Langchain installed
# Do not raise error for users that do not have Langchain installed
pass


Expand All @@ -1413,7 +1411,7 @@ def track_feedback(run_id: str, feedback: Dict[str, Any]):
def get_raw_template(slug: str, app_id: str | None = None, api_url: str | None = None):
config = get_config()
token = app_id or config.app_id
api_url = api_url or config.api_url
api_url = api_url or config.api_url

global templateCache
now = time.time() * 1000
Expand All @@ -1427,8 +1425,8 @@ def get_raw_template(slug: str, app_id: str | None = None, api_url: str | None
'Content-Type': 'application/json'
}

response = requests.get(f"{api_url}/v1/template_versions/latest?slug={slug}",
headers=headers,
response = requests.get(f"{api_url}/v1/template_versions/latest?slug={slug}",
headers=headers,
verify=config.ssl_verify)
if not response.ok:
logger.exception(f"Error fetching template: {response.status_code} - {response.text}")
Expand All @@ -1440,7 +1438,7 @@ def get_raw_template(slug: str, app_id: str | None = None, api_url: str | None
async def get_raw_template_async(slug: str, app_id: str | None = None, api_url: str | None = None):
config = get_config()
token = app_id or config.app_id
api_url = api_url or config.api_url
api_url = api_url or config.api_url


global templateCache
Expand Down Expand Up @@ -1471,7 +1469,7 @@ def render_template(slug: str, data = {}):
raw_template = get_raw_template(slug)

if(raw_template.get('message') == 'Template not found, is the project ID correct?'):
raise Exception("Template not found, are the project ID and slug correct?")
raise TemplateNotFoundError()

template_id = copy.deepcopy(raw_template['id'])
content = copy.deepcopy(raw_template['content'])
Expand All @@ -1497,22 +1495,23 @@ def render_template(slug: str, data = {}):
message["content"] = chevron.render(message["content"], data)
messages.append(message)
result = {
"messages": messages,
"messages": messages,
"extra_headers": extra_headers,
**extra
}

return result

except Exception as e:
logging.exception(f"Error rendering template {e}")
logger.exception(f"Error rendering template {e}")
raise

async def render_template_async(slug: str, data={}):
try:
raw_template = await get_raw_template_async(slug)

if(raw_template.get('message') == 'Template not found, is the project ID correct?'):
raise Exception("Template not found, are the project ID and slug correct?")

raise TemplateNotFoundError()

template_id = copy.deepcopy(raw_template['id'])
content = copy.deepcopy(raw_template['content'])
Expand Down Expand Up @@ -1547,7 +1546,8 @@ async def render_template_async(slug: str, data={}):

return result
except Exception as e:
logging.exception(f"Error rendering template {e}")
logger.exception(f"Error rendering template {e}")
raise

def get_langchain_template(slug: str):
try:
Expand All @@ -1556,7 +1556,7 @@ def get_langchain_template(slug: str):
raw_template = get_raw_template(slug)

if(raw_template.get('message') == 'Template not found, is the project ID correct?'):
raise Exception("Template not found, are the project ID and slug correct?")
raise TemplateNotFoundError()

content = copy.deepcopy(raw_template['content'])

Expand All @@ -1577,7 +1577,7 @@ def replace_double_braces(text):

messages = []

# Return array of messages:
# Return array of messages:
# [
# ("system", "You are a helpful AI bot. Your name is {name}."),
# ("human", "Hello, how are you doing?"),
Expand All @@ -1590,9 +1590,10 @@ def replace_double_braces(text):
template = ChatPromptTemplate.from_messages(messages)

return template

except Exception as e:
logger.exception(f"Error fetching template: {e}")
logger.exception(f"Error fetching template {e}")
raise

async def get_langchain_template_async(slug):
try:
Expand All @@ -1601,7 +1602,7 @@ async def get_langchain_template_async(slug):
raw_template = await get_raw_template_async(slug)

if raw_template.get('message') == 'Template not found, is the project ID correct?':
raise Exception("Template not found, are the project ID and slug correct?")
raise TemplateNotFoundError()

content = copy.deepcopy(raw_template['content'])

Expand Down Expand Up @@ -1638,6 +1639,7 @@ def replace_double_braces(text):

except Exception as e:
logger.exception(f"Error fetching template: {e}")
raise

import humps

Expand All @@ -1649,8 +1651,8 @@ def __init__(self, d=None):

def get_dataset(slug: str, app_id: str | None = None, api_url: str | None = None):
config = get_config()
token = app_id or config.app_id
api_url = api_url or config.api_url
token = app_id or config.app_id
api_url = api_url or config.api_url

try:
url = f"{api_url}/v1/datasets/{slug}"
Expand All @@ -1664,20 +1666,19 @@ def get_dataset(slug: str, app_id: str | None = None, api_url: str | None = None
dataset = humps.decamelize(dataset)
items_data = dataset.get('items', [])
items = [DatasetItem(d=item) for item in items_data]

return items
else:
raise Exception(f"Status code: {response.status_code}")

except Exception as e:
logger.exception(f"Error fetching dataset {e}")


raise

def evaluate(checklist, input, output, ideal_output=None, context=None, model=None, duration=None, tags=None, app_id=None, api_url=None):
config = get_config()
token = app_id or config.app_id
api_url = api_url or config.api_url
token = app_id or config.app_id
api_url = api_url or config.api_url

try:
url = f"{api_url}/v1/evaluations/run"
Expand Down
7 changes: 7 additions & 0 deletions lunary/errors.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
TEMPLATE_NOT_FOUND_MESSAGE = "Template not found, are the project ID and slug correct?"

class TemplateNotFoundError(Exception):
"""Custom exception for template not found errors."""
def __init__(self, message=TEMPLATE_NOT_FOUND_MESSAGE):
self.message = message
super().__init__(self.message)