From 69966e49adeaa87063a9f08d407e08165c75da3e Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Tue, 13 Aug 2024 10:36:36 +0100 Subject: [PATCH] fix(chalice): fixed predefined metrics refactor(chalice): refactored schemas refactor(chalice): refactored routers refactor(chalice): refactored unprocessed sessions --- api/app.py | 3 +- api/auth/auth_jwt.py | 5 +- .../core/custom_metrics_predefined.py | 5 +- api/chalicelib/core/unprocessed_sessions.py | 18 +++ api/chalicelib/utils/TimeUTC.py | 2 +- api/routers/core.py | 18 +-- api/routers/core_dynamic.py | 37 ++--- api/schemas/__init__.py | 2 +- api/schemas/overrides.py | 2 +- api/schemas/schemas.py | 137 ++++++++++-------- ee/api/.gitignore | 2 + ee/api/app.py | 9 +- ee/api/auth/auth_jwt.py | 7 +- ee/api/clean-dev.sh | 4 +- ee/api/routers/core_dynamic.py | 40 ++--- ee/api/schemas/assist_stats_schema.py | 4 +- ee/api/schemas/schemas_ee.py | 6 +- 17 files changed, 155 insertions(+), 146 deletions(-) create mode 100644 api/chalicelib/core/unprocessed_sessions.py diff --git a/api/app.py b/api/app.py index db1f99bf59..41775206ee 100644 --- a/api/app.py +++ b/api/app.py @@ -9,6 +9,7 @@ from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.gzip import GZipMiddleware from psycopg import AsyncConnection +from psycopg.rows import dict_row from starlette.responses import StreamingResponse from chalicelib.utils import helper @@ -20,7 +21,7 @@ loglevel = config("LOGLEVEL", default=logging.WARNING) print(f">Loglevel set to: {loglevel}") logging.basicConfig(level=loglevel) -from psycopg.rows import dict_row + class ORPYAsyncConnection(AsyncConnection): diff --git a/api/auth/auth_jwt.py b/api/auth/auth_jwt.py index 779dd825ae..4fd7191a9a 100644 --- a/api/auth/auth_jwt.py +++ b/api/auth/auth_jwt.py @@ -45,9 +45,8 @@ async def __call__(self, request: Request) -> Optional[schemas.CurrentContext]: raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid authentication scheme.") jwt_payload = authorizers.jwt_authorizer(scheme=credentials.scheme, token=credentials.credentials) - auth_exists = jwt_payload is not None \ - and users.auth_exists(user_id=jwt_payload.get("userId", -1), - jwt_iat=jwt_payload.get("iat", 100)) + auth_exists = jwt_payload is not None and users.auth_exists(user_id=jwt_payload.get("userId", -1), + jwt_iat=jwt_payload.get("iat", 100)) if jwt_payload is None \ or jwt_payload.get("iat") is None or jwt_payload.get("aud") is None \ or not auth_exists: diff --git a/api/chalicelib/core/custom_metrics_predefined.py b/api/chalicelib/core/custom_metrics_predefined.py index 3236d566fb..62f890194f 100644 --- a/api/chalicelib/core/custom_metrics_predefined.py +++ b/api/chalicelib/core/custom_metrics_predefined.py @@ -1,9 +1,6 @@ import logging from typing import Union -import logging -from typing import Union - import schemas from chalicelib.core import metrics @@ -30,7 +27,7 @@ def get_metric(key: Union[schemas.MetricOfWebVitals, schemas.MetricOfErrors, \ schemas.MetricOfWebVitals.COUNT_REQUESTS: metrics.get_top_metrics_count_requests, schemas.MetricOfWebVitals.AVG_TIME_TO_RENDER: metrics.get_time_to_render, schemas.MetricOfWebVitals.AVG_USED_JS_HEAP_SIZE: metrics.get_memory_consumption, - schemas.MetricOfWebVitals.avg_cpu: metrics.get_avg_cpu, + schemas.MetricOfWebVitals.AVG_CPU: metrics.get_avg_cpu, schemas.MetricOfWebVitals.AVG_FPS: metrics.get_avg_fps, schemas.MetricOfErrors.IMPACTED_SESSIONS_BY_JS_ERRORS: metrics.get_impacted_sessions_by_js_errors, schemas.MetricOfErrors.DOMAINS_ERRORS_4XX: metrics.get_domains_errors_4xx, diff --git a/api/chalicelib/core/unprocessed_sessions.py b/api/chalicelib/core/unprocessed_sessions.py new file mode 100644 index 0000000000..37fb9bdb04 --- /dev/null +++ b/api/chalicelib/core/unprocessed_sessions.py @@ -0,0 +1,18 @@ +import logging + +from chalicelib.core import sessions, assist + +logger = logging.getLogger(__name__) + + +def check_exists(project_id, session_id, not_found_response) -> (int | None, dict | None): + if session_id is None or not session_id.isnumeric(): + return session_id, not_found_response + else: + session_id = int(session_id) + if not sessions.session_exists(project_id=project_id, session_id=session_id): + logger.warning(f"{project_id}/{session_id} not found in DB.") + if not assist.session_exists(project_id=project_id, session_id=session_id): + logger.warning(f"{project_id}/{session_id} not found in Assist.") + return session_id, not_found_response + return session_id, None diff --git a/api/chalicelib/utils/TimeUTC.py b/api/chalicelib/utils/TimeUTC.py index d399e16511..9d3f1ebb2b 100644 --- a/api/chalicelib/utils/TimeUTC.py +++ b/api/chalicelib/utils/TimeUTC.py @@ -28,7 +28,7 @@ def __now(delta_days=0, delta_minutes=0, delta_seconds=0): .astimezone(UTC_ZI) @staticmethod - def now(delta_days=0, delta_minutes=0, delta_seconds=0): + def now(delta_days: int = 0, delta_minutes: int = 0, delta_seconds: int = 0) -> int: return int(TimeUTC.__now(delta_days=delta_days, delta_minutes=delta_minutes, delta_seconds=delta_seconds).timestamp() * 1000) diff --git a/api/routers/core.py b/api/routers/core.py index 9a5cee24e8..59ff60ccd2 100644 --- a/api/routers/core.py +++ b/api/routers/core.py @@ -4,13 +4,11 @@ from fastapi import Depends, Body, BackgroundTasks import schemas -from chalicelib.core import log_tool_rollbar, sourcemaps, events, sessions_assignments, projects, \ - alerts, issues, integrations_manager, metadata, \ - log_tool_elasticsearch, log_tool_datadog, \ - log_tool_stackdriver, reset_password, log_tool_cloudwatch, log_tool_sentry, log_tool_sumologic, log_tools, sessions, \ - log_tool_newrelic, announcements, log_tool_bugsnag, weekly_report, integration_jira_cloud, integration_github, \ - assist, mobile, tenants, boarding, notifications, webhook, users, \ - custom_metrics, saved_search, integrations_global, tags, autocomplete +from chalicelib.core import log_tool_rollbar, sourcemaps, events, sessions_assignments, projects, alerts, issues, \ + integrations_manager, metadata, log_tool_elasticsearch, log_tool_datadog, log_tool_stackdriver, reset_password, \ + log_tool_cloudwatch, log_tool_sentry, log_tool_sumologic, log_tools, sessions, log_tool_newrelic, announcements, \ + log_tool_bugsnag, weekly_report, integration_jira_cloud, integration_github, assist, mobile, tenants, boarding, \ + notifications, webhook, users, custom_metrics, saved_search, integrations_global, tags, autocomplete from chalicelib.core.collaboration_msteams import MSTeams from chalicelib.core.collaboration_slack import Slack from or_dependencies import OR_context, OR_role @@ -556,7 +554,7 @@ def get_all_alerts(projectId: int, context: schemas.CurrentContext = Depends(OR_ @app.get('/{projectId}/alerts/triggers', tags=["alerts", "customMetrics"]) def get_alerts_triggers(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): - return {"data": alerts.get_predefined_values() \ + return {"data": alerts.get_predefined_values() + custom_metrics.get_series_for_alert(project_id=projectId, user_id=context.user_id)} @@ -839,8 +837,8 @@ def edit_msteams_integration(webhookId: int, data: schemas.EditCollaborationSche if old["endpoint"] != data.url.unicode_string(): if not MSTeams.say_hello(data.url.unicode_string()): return { - "errors": [ - "We couldn't send you a test message on your Microsoft Teams channel. Please verify your webhook url."] + "errors": ["We couldn't send you a test message on your Microsoft Teams channel. " + "Please verify your webhook url."] } return {"data": webhook.update(tenant_id=context.tenant_id, webhook_id=webhookId, changes={"name": data.name, "endpoint": data.url.unicode_string()})} diff --git a/api/routers/core_dynamic.py b/api/routers/core_dynamic.py index b0ee025ce2..43bee36961 100644 --- a/api/routers/core_dynamic.py +++ b/api/routers/core_dynamic.py @@ -1,3 +1,4 @@ +import logging from typing import Optional, Union from decouple import config @@ -6,12 +7,13 @@ from starlette.responses import RedirectResponse, FileResponse, JSONResponse, Response import schemas +from chalicelib.core import scope from chalicelib.core import sessions, errors, errors_viewed, errors_favorite, sessions_assignments, heatmaps, \ sessions_favorite, assist, sessions_notes, sessions_replay, signup, feature_flags from chalicelib.core import sessions_viewed from chalicelib.core import tenants, users, projects, license +from chalicelib.core import unprocessed_sessions from chalicelib.core import webhook -from chalicelib.core import scope from chalicelib.core.collaboration_slack import Slack from chalicelib.utils import captcha, smtp from chalicelib.utils import helper @@ -19,6 +21,7 @@ from or_dependencies import OR_context, OR_role from routers.base import get_routers +logger = logging.getLogger(__name__) public_app, app, app_apikey = get_routers() COOKIE_PATH = "/api/refresh" @@ -249,7 +252,7 @@ def session_ids_search(projectId: int, data: schemas.SessionsSearchPayloadSchema @app.get('/{projectId}/sessions/{sessionId}/first-mob', tags=["sessions", "replay"]) -def get_first_mob_file(projectId: int, sessionId: Union[int, str], background_tasks: BackgroundTasks, +def get_first_mob_file(projectId: int, sessionId: Union[int, str], context: schemas.CurrentContext = Depends(OR_context)): if not sessionId.isnumeric(): return {"errors": ["session not found"]} @@ -368,16 +371,10 @@ def get_live_session(projectId: int, sessionId: str, background_tasks: Backgroun def get_live_session_replay_file(projectId: int, sessionId: Union[int, str], context: schemas.CurrentContext = Depends(OR_context)): not_found = {"errors": ["Replay file not found"]} - if not sessionId.isnumeric(): - return not_found - else: - sessionId = int(sessionId) - if not sessions.session_exists(project_id=projectId, session_id=sessionId): - print(f"{projectId}/{sessionId} not found in DB.") - if not assist.session_exists(project_id=projectId, session_id=sessionId): - print(f"{projectId}/{sessionId} not found in Assist.") - return not_found - + sessionId, err = unprocessed_sessions.check_exists(project_id=projectId, session_id=sessionId, + not_found_response=not_found) + if err is not None: + return err path = assist.get_raw_mob_by_id(project_id=projectId, session_id=sessionId) if path is None: return not_found @@ -389,19 +386,13 @@ def get_live_session_replay_file(projectId: int, sessionId: Union[int, str], def get_live_session_devtools_file(projectId: int, sessionId: Union[int, str], context: schemas.CurrentContext = Depends(OR_context)): not_found = {"errors": ["Devtools file not found"]} - if not sessionId.isnumeric(): - return not_found - else: - sessionId = int(sessionId) - if not sessions.session_exists(project_id=projectId, session_id=sessionId): - print(f"{projectId}/{sessionId} not found in DB.") - if not assist.session_exists(project_id=projectId, session_id=sessionId): - print(f"{projectId}/{sessionId} not found in Assist.") - return not_found - + sessionId, err = unprocessed_sessions.check_exists(project_id=projectId, session_id=sessionId, + not_found_response=not_found) + if err is not None: + return err path = assist.get_raw_devtools_by_id(project_id=projectId, session_id=sessionId) if path is None: - return {"errors": ["Devtools file not found"]} + return not_found return FileResponse(path=path, media_type="application/octet-stream") diff --git a/api/schemas/__init__.py b/api/schemas/__init__.py index d3ba6afc17..6013d7c2b6 100644 --- a/api/schemas/__init__.py +++ b/api/schemas/__init__.py @@ -1,2 +1,2 @@ from .schemas import * -from . import overrides as _overrides \ No newline at end of file +from . import overrides as _overrides diff --git a/api/schemas/overrides.py b/api/schemas/overrides.py index fccb13cbc3..51c81365cb 100644 --- a/api/schemas/overrides.py +++ b/api/schemas/overrides.py @@ -34,6 +34,6 @@ def has_value(cls, value) -> bool: class ORUnion: - def __new__(self, union_types: Union[AnyType], discriminator: str) -> T: + def __new__(cls, union_types: Union[AnyType], discriminator: str) -> T: return lambda **args: TypeAdapter(Annotated[union_types, Field(discriminator=discriminator)]) \ .validate_python(args) diff --git a/api/schemas/schemas.py b/api/schemas/schemas.py index a7fedc537f..c2e7bbea01 100644 --- a/api/schemas/schemas.py +++ b/api/schemas/schemas.py @@ -1,14 +1,14 @@ from typing import Annotated, Any from typing import Optional, List, Union, Literal -from pydantic import Field, EmailStr, HttpUrl, SecretStr, AnyHttpUrl, validator +from pydantic import Field, EmailStr, HttpUrl, SecretStr, AnyHttpUrl from pydantic import field_validator, model_validator, computed_field +from pydantic.functional_validators import BeforeValidator from chalicelib.utils.TimeUTC import TimeUTC from .overrides import BaseModel, Enum, ORUnion from .transformers_validators import transform_email, remove_whitespace, remove_duplicate_values, single_to_list, \ force_is_event, NAME_PATTERN, int_to_string -from pydantic.functional_validators import BeforeValidator def transform_old_filter_type(cls, values): @@ -162,7 +162,7 @@ class _TimedSchema(BaseModel): endTimestamp: int = Field(default=None) @model_validator(mode='before') - def transform_time(cls, values): + def transform_time(self, values): if values.get("startTimestamp") is None and values.get("startDate") is not None: values["startTimestamp"] = values["startDate"] if values.get("endTimestamp") is None and values.get("endDate") is not None: @@ -170,7 +170,7 @@ def transform_time(cls, values): return values @model_validator(mode='after') - def __time_validator(cls, values): + def __time_validator(self, values): if values.startTimestamp is not None: assert 0 <= values.startTimestamp, "startTimestamp must be greater or equal to 0" if values.endTimestamp is not None: @@ -435,7 +435,7 @@ class AlertSchema(BaseModel): series_id: Optional[int] = Field(default=None, doc_hidden=True) @model_validator(mode="after") - def transform_alert(cls, values): + def transform_alert(self, values): values.series_id = None if isinstance(values.query.left, int): values.series_id = values.query.left @@ -626,7 +626,7 @@ class SessionSearchEventSchema2(BaseModel): _transform = model_validator(mode='before')(transform_old_filter_type) @model_validator(mode='after') - def event_validator(cls, values): + def event_validator(self, values): if isinstance(values.type, PerformanceEventType): if values.type == PerformanceEventType.FETCH_FAILED: return values @@ -666,7 +666,7 @@ class SessionSearchFilterSchema(BaseModel): _single_to_list_values = field_validator('value', mode='before')(single_to_list) @model_validator(mode='before') - def _transform_data(cls, values): + def _transform_data(self, values): if values.get("source") is not None: if isinstance(values["source"], list): if len(values["source"]) == 0: @@ -678,20 +678,20 @@ def _transform_data(cls, values): return values @model_validator(mode='after') - def filter_validator(cls, values): + def filter_validator(self, values): if values.type == FilterType.METADATA: assert values.source is not None and len(values.source) > 0, \ "must specify a valid 'source' for metadata filter" elif values.type == FilterType.ISSUE: - for v in values.value: + for i, v in enumerate(values.value): if IssueType.has_value(v): - v = IssueType(v) + values.value[i] = IssueType(v) else: raise ValueError(f"value should be of type IssueType for {values.type} filter") elif values.type == FilterType.PLATFORM: - for v in values.value: + for i, v in enumerate(values.value): if PlatformType.has_value(v): - v = PlatformType(v) + values.value[i] = PlatformType(v) else: raise ValueError(f"value should be of type PlatformType for {values.type} filter") elif values.type == FilterType.EVENTS_COUNT: @@ -730,8 +730,8 @@ def add_missing_is_event(values: dict): # this type is created to allow mixing events&filters and specifying a discriminator -GroupedFilterType = Annotated[Union[SessionSearchFilterSchema, SessionSearchEventSchema2], \ - Field(discriminator='is_event'), BeforeValidator(add_missing_is_event)] +GroupedFilterType = Annotated[Union[SessionSearchFilterSchema, SessionSearchEventSchema2], +Field(discriminator='is_event'), BeforeValidator(add_missing_is_event)] class SessionsSearchPayloadSchema(_TimedSchema, _PaginatedSchema): @@ -744,7 +744,7 @@ class SessionsSearchPayloadSchema(_TimedSchema, _PaginatedSchema): bookmarked: bool = Field(default=False) @model_validator(mode="before") - def transform_order(cls, values): + def transform_order(self, values): if values.get("sort") is None: values["sort"] = "startTs" @@ -755,7 +755,7 @@ def transform_order(cls, values): return values @model_validator(mode="before") - def add_missing_attributes(cls, values): + def add_missing_attributes(self, values): # in case isEvent is wrong: for f in values.get("filters") or []: if EventType.has_value(f["type"]) and not f.get("isEvent"): @@ -770,7 +770,7 @@ def add_missing_attributes(cls, values): return values @model_validator(mode="before") - def remove_wrong_filter_values(cls, values): + def remove_wrong_filter_values(self, values): for f in values.get("filters", []): vals = [] for v in f.get("value", []): @@ -780,7 +780,7 @@ def remove_wrong_filter_values(cls, values): return values @model_validator(mode="after") - def split_filters_events(cls, values): + def split_filters_events(self, values): n_filters = [] n_events = [] for v in values.filters: @@ -793,7 +793,7 @@ def split_filters_events(cls, values): return values @field_validator("filters", mode="after") - def merge_identical_filters(cls, values): + def merge_identical_filters(self, values): # ignore 'issue' type as it could be used for step-filters and tab-filters at the same time i = 0 while i < len(values): @@ -853,7 +853,7 @@ class PathAnalysisSubFilterSchema(BaseModel): _remove_duplicate_values = field_validator('value', mode='before')(remove_duplicate_values) @model_validator(mode="before") - def __force_is_event(cls, values): + def __force_is_event(self, values): values["isEvent"] = True return values @@ -879,8 +879,8 @@ class _ProductAnalyticsEventFilter(BaseModel): # this type is created to allow mixing events&filters and specifying a discriminator for PathAnalysis series filter -ProductAnalyticsFilter = Annotated[Union[_ProductAnalyticsFilter, _ProductAnalyticsEventFilter], \ - Field(discriminator='is_event')] +ProductAnalyticsFilter = Annotated[Union[_ProductAnalyticsFilter, _ProductAnalyticsEventFilter], +Field(discriminator='is_event')] class PathAnalysisSchema(_TimedSchema, _PaginatedSchema): @@ -1033,7 +1033,7 @@ class MetricOfPathAnalysis(str, Enum): # class CardSessionsSchema(SessionsSearchPayloadSchema): class CardSessionsSchema(_TimedSchema, _PaginatedSchema): startTimestamp: int = Field(default=TimeUTC.now(-7)) - endTimestamp: int = Field(defautl=TimeUTC.now()) + endTimestamp: int = Field(default=TimeUTC.now()) density: int = Field(default=7, ge=1, le=200) series: List[CardSeriesSchema] = Field(default=[]) @@ -1047,7 +1047,7 @@ class CardSessionsSchema(_TimedSchema, _PaginatedSchema): (force_is_event(events_enum=[EventType, PerformanceEventType])) @model_validator(mode="before") - def remove_wrong_filter_values(cls, values): + def remove_wrong_filter_values(self, values): for f in values.get("filters", []): vals = [] for v in f.get("value", []): @@ -1057,7 +1057,7 @@ def remove_wrong_filter_values(cls, values): return values @model_validator(mode="before") - def __enforce_default(cls, values): + def __enforce_default(self, values): if values.get("startTimestamp") is None: values["startTimestamp"] = TimeUTC.now(-7) @@ -1067,7 +1067,7 @@ def __enforce_default(cls, values): return values @model_validator(mode="after") - def __enforce_default_after(cls, values): + def __enforce_default_after(self, values): for s in values.series: if s.filter is not None: s.filter.limit = values.limit @@ -1078,7 +1078,7 @@ def __enforce_default_after(cls, values): return values @model_validator(mode="after") - def __merge_out_filters_with_series(cls, values): + def __merge_out_filters_with_series(self, values): if len(values.filters) > 0: for f in values.filters: for s in values.series: @@ -1140,12 +1140,12 @@ class CardTimeSeries(__CardSchema): view_type: MetricTimeseriesViewType @model_validator(mode="before") - def __enforce_default(cls, values): + def __enforce_default(self, values): values["metricValue"] = [] return values @model_validator(mode="after") - def __transform(cls, values): + def __transform(self, values): values.metric_of = MetricOfTimeseries(values.metric_of) return values @@ -1157,18 +1157,18 @@ class CardTable(__CardSchema): metric_format: MetricExtendedFormatType = Field(default=MetricExtendedFormatType.SESSION_COUNT) @model_validator(mode="before") - def __enforce_default(cls, values): + def __enforce_default(self, values): if values.get("metricOf") is not None and values.get("metricOf") != MetricOfTable.ISSUES: values["metricValue"] = [] return values @model_validator(mode="after") - def __transform(cls, values): + def __transform(self, values): values.metric_of = MetricOfTable(values.metric_of) return values @model_validator(mode="after") - def __validator(cls, values): + def __validator(self, values): if values.metric_of not in (MetricOfTable.ISSUES, MetricOfTable.USER_BROWSER, MetricOfTable.USER_DEVICE, MetricOfTable.USER_COUNTRY, MetricOfTable.VISITED_URL): @@ -1183,7 +1183,7 @@ class CardFunnel(__CardSchema): view_type: MetricOtherViewType = Field(...) @model_validator(mode="before") - def __enforce_default(cls, values): + def __enforce_default(self, values): if values.get("metricOf") and not MetricOfFunnels.has_value(values["metricOf"]): values["metricOf"] = MetricOfFunnels.SESSION_COUNT values["viewType"] = MetricOtherViewType.OTHER_CHART @@ -1192,7 +1192,7 @@ def __enforce_default(cls, values): return values @model_validator(mode="after") - def __transform(cls, values): + def __transform(self, values): values.metric_of = MetricOfTimeseries(values.metric_of) return values @@ -1203,12 +1203,12 @@ class CardErrors(__CardSchema): view_type: MetricOtherViewType = Field(...) @model_validator(mode="before") - def __enforce_default(cls, values): + def __enforce_default(self, values): values["series"] = [] return values @model_validator(mode="after") - def __transform(cls, values): + def __transform(self, values): values.metric_of = MetricOfErrors(values.metric_of) return values @@ -1219,12 +1219,12 @@ class CardPerformance(__CardSchema): view_type: MetricOtherViewType = Field(...) @model_validator(mode="before") - def __enforce_default(cls, values): + def __enforce_default(self, values): values["series"] = [] return values @model_validator(mode="after") - def __transform(cls, values): + def __transform(self, values): values.metric_of = MetricOfPerformance(values.metric_of) return values @@ -1235,12 +1235,12 @@ class CardResources(__CardSchema): view_type: MetricOtherViewType = Field(...) @model_validator(mode="before") - def __enforce_default(cls, values): + def __enforce_default(self, values): values["series"] = [] return values @model_validator(mode="after") - def __transform(cls, values): + def __transform(self, values): values.metric_of = MetricOfResources(values.metric_of) return values @@ -1251,12 +1251,12 @@ class CardWebVital(__CardSchema): view_type: MetricOtherViewType = Field(...) @model_validator(mode="before") - def __enforce_default(cls, values): + def __enforce_default(self, values): values["series"] = [] return values @model_validator(mode="after") - def __transform(cls, values): + def __transform(self, values): values.metric_of = MetricOfWebVitals(values.metric_of) return values @@ -1267,11 +1267,11 @@ class CardHeatMap(__CardSchema): view_type: MetricOtherViewType = Field(...) @model_validator(mode="before") - def __enforce_default(cls, values): + def __enforce_default(self, values): return values @model_validator(mode="after") - def __transform(cls, values): + def __transform(self, values): values.metric_of = MetricOfHeatMap(values.metric_of) return values @@ -1286,17 +1286,17 @@ class CardInsights(__CardSchema): view_type: MetricOtherViewType = Field(...) @model_validator(mode="before") - def __enforce_default(cls, values): + def __enforce_default(self, values): values["view_type"] = MetricOtherViewType.LIST_CHART return values @model_validator(mode="after") - def __transform(cls, values): + def __transform(self, values): values.metric_of = MetricOfInsights(values.metric_of) return values @model_validator(mode='after') - def restrictions(cls, values): + def restrictions(self, values): raise ValueError(f"metricType:{MetricType.INSIGHTS} not supported yet.") @@ -1306,7 +1306,7 @@ class CardPathAnalysisSeriesSchema(CardSeriesSchema): density: int = Field(default=4, ge=2, le=10) @model_validator(mode="before") - def __enforce_default(cls, values): + def __enforce_default(self, values): if values.get("filter") is None and values.get("startTimestamp") and values.get("endTimestamp"): values["filter"] = PathAnalysisSchema(startTimestamp=values["startTimestamp"], endTimestamp=values["endTimestamp"], @@ -1328,14 +1328,14 @@ class CardPathAnalysis(__CardSchema): series: List[CardPathAnalysisSeriesSchema] = Field(default=[]) @model_validator(mode="before") - def __enforce_default(cls, values): + def __enforce_default(self, values): values["viewType"] = MetricOtherViewType.OTHER_CHART.value if values.get("series") is not None and len(values["series"]) > 0: values["series"] = [values["series"][0]] return values @model_validator(mode="after") - def __clean_start_point_and_enforce_metric_value(cls, values): + def __clean_start_point_and_enforce_metric_value(self, values): start_point = [] for s in values.start_point: if len(s.value) == 0: @@ -1349,7 +1349,7 @@ def __clean_start_point_and_enforce_metric_value(cls, values): return values @model_validator(mode='after') - def __validator(cls, values): + def __validator(self, values): s_e_values = {} exclude_values = {} for f in values.start_point: @@ -1359,7 +1359,8 @@ def __validator(cls, values): exclude_values[f.type] = exclude_values.get(f.type, []) + f.value assert len( - values.start_point) <= 1, f"Only 1 startPoint with multiple values OR 1 endPoint with multiple values is allowed" + values.start_point) <= 1, \ + f"Only 1 startPoint with multiple values OR 1 endPoint with multiple values is allowed" for t in exclude_values: for v in t: assert v not in s_e_values.get(t, []), f"startPoint and endPoint cannot be excluded, value: {v}" @@ -1452,13 +1453,13 @@ class LiveSessionSearchFilterSchema(BaseModel): value: Union[List[str], str] = Field(...) type: LiveFilterType = Field(...) source: Optional[str] = Field(default=None) - operator: Literal[SearchEventOperator.IS, \ - SearchEventOperator.CONTAINS] = Field(default=SearchEventOperator.CONTAINS) + operator: Literal[SearchEventOperator.IS, SearchEventOperator.CONTAINS] \ + = Field(default=SearchEventOperator.CONTAINS) _transform = model_validator(mode='before')(transform_old_filter_type) @model_validator(mode='after') - def __validator(cls, values): + def __validator(self, values): if values.type is not None and values.type == LiveFilterType.METADATA: assert values.source is not None, "source should not be null for METADATA type" assert len(values.source) > 0, "source should not be empty for METADATA type" @@ -1471,7 +1472,7 @@ class LiveSessionsSearchPayloadSchema(_PaginatedSchema): order: SortOrderType = Field(default=SortOrderType.DESC) @model_validator(mode="before") - def __transform(cls, values): + def __transform(self, values): if values.get("order") is not None: values["order"] = values["order"].upper() if values.get("filters") is not None: @@ -1527,8 +1528,9 @@ class SessionUpdateNoteSchema(SessionNoteSchema): is_public: Optional[bool] = Field(default=None) @model_validator(mode='after') - def __validator(cls, values): - assert values.message is not None or values.timestamp is not None or values.is_public is not None, "at least 1 attribute should be provided for update" + def __validator(self, values): + assert values.message is not None or values.timestamp is not None or values.is_public is not None, \ + "at least 1 attribute should be provided for update" return values @@ -1555,7 +1557,7 @@ class HeatMapSessionsSearch(SessionsSearchPayloadSchema): filters: List[Union[SessionSearchFilterSchema, _HeatMapSearchEventRaw]] = Field(default=[]) @model_validator(mode="before") - def __transform(cls, values): + def __transform(self, values): for f in values.get("filters", []): if f.get("type") == FilterType.DURATION: return values @@ -1598,7 +1600,7 @@ class FeatureFlagConditionFilterSchema(BaseModel): sourceOperator: Optional[Union[SearchEventOperator, MathOperator]] = Field(default=None) @model_validator(mode="before") - def __force_is_event(cls, values): + def __force_is_event(self, values): values["isEvent"] = False return values @@ -1638,10 +1640,19 @@ class FeatureFlagSchema(BaseModel): variants: List[FeatureFlagVariant] = Field(default=[]) +class ModuleType(str, Enum): + ASSIST = "assist" + NOTES = "notes" + BUG_REPORTS = "bug-reports" + OFFLINE_RECORDINGS = "offline-recordings" + ALERTS = "alerts" + ASSIST_STATTS = "assist-statts" + RECOMMENDATIONS = "recommendations" + FEATURE_FLAGS = "feature-flags" + + class ModuleStatus(BaseModel): - module: Literal["assist", "notes", "bug-reports", - "offline-recordings", "alerts", "assist-statts", "recommendations", "feature-flags"] = Field(..., - description="Possible values: assist, notes, bug-reports, offline-recordings, alerts, assist-statts, recommendations, feature-flags") + module: ModuleType = Field(...) status: bool = Field(...) diff --git a/ee/api/.gitignore b/ee/api/.gitignore index 8d6bc0036e..15be40a0e7 100644 --- a/ee/api/.gitignore +++ b/ee/api/.gitignore @@ -276,3 +276,5 @@ Pipfile.lock /routers/subs/spot.py /chalicelib/utils/or_cache/ /routers/subs/health.py +/chalicelib/core/spot.py +/chalicelib/core/unprocessed_sessions.py diff --git a/ee/api/app.py b/ee/api/app.py index 0d15d39856..7a24c76837 100644 --- a/ee/api/app.py +++ b/ee/api/app.py @@ -10,6 +10,7 @@ from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.gzip import GZipMiddleware from psycopg import AsyncConnection +from psycopg.rows import dict_row from starlette import status from starlette.responses import StreamingResponse, JSONResponse @@ -17,21 +18,19 @@ from chalicelib.utils import events_queue from chalicelib.utils import helper from chalicelib.utils import pg_client +from crons import core_crons, ee_crons, core_dynamic_crons from routers import core, core_dynamic from routers import ee +from routers.subs import insights, metrics, v1_api, health, usability_tests, spot +from routers.subs import v1_api_ee if config("ENABLE_SSO", cast=bool, default=True): from routers import saml -from crons import core_crons, ee_crons, core_dynamic_crons -from routers.subs import insights, metrics, v1_api, health, usability_tests, spot -from routers.subs import v1_api_ee loglevel = config("LOGLEVEL", default=logging.WARNING) print(f">Loglevel set to: {loglevel}") logging.basicConfig(level=loglevel) -from psycopg.rows import dict_row - class ORPYAsyncConnection(AsyncConnection): diff --git a/ee/api/auth/auth_jwt.py b/ee/api/auth/auth_jwt.py index 6222eee959..0fa8ea1309 100644 --- a/ee/api/auth/auth_jwt.py +++ b/ee/api/auth/auth_jwt.py @@ -53,10 +53,9 @@ async def __call__(self, request: Request) -> Optional[schemas.CurrentContext]: raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid authentication scheme.") jwt_payload = authorizers.jwt_authorizer(scheme=credentials.scheme, token=credentials.credentials) - auth_exists = jwt_payload is not None \ - and users.auth_exists(user_id=jwt_payload.get("userId", -1), - tenant_id=jwt_payload.get("tenantId", -1), - jwt_iat=jwt_payload.get("iat", 100)) + auth_exists = jwt_payload is not None and users.auth_exists(user_id=jwt_payload.get("userId", -1), + tenant_id=jwt_payload.get("tenantId", -1), + jwt_iat=jwt_payload.get("iat", 100)) if jwt_payload is None \ or jwt_payload.get("iat") is None or jwt_payload.get("aud") is None \ or not auth_exists: diff --git a/ee/api/clean-dev.sh b/ee/api/clean-dev.sh index ccc3d402f3..261ffbbc52 100755 --- a/ee/api/clean-dev.sh +++ b/ee/api/clean-dev.sh @@ -96,4 +96,6 @@ rm -rf ./chalicelib/core/db_request_handler.py rm -rf ./chalicelib/core/db_request_handler.py rm -rf ./routers/subs/spot.py rm -rf ./chalicelib/utils/or_cache -rm -rf ./routers/subs/health.py \ No newline at end of file +rm -rf ./routers/subs/health.py +rm -rf ./chalicelib/core/spot.py +rm -rf ./chalicelib/core/unprocessed_sessions.py \ No newline at end of file diff --git a/ee/api/routers/core_dynamic.py b/ee/api/routers/core_dynamic.py index ae22e2dd0f..28fa11e129 100644 --- a/ee/api/routers/core_dynamic.py +++ b/ee/api/routers/core_dynamic.py @@ -1,3 +1,4 @@ +import logging from typing import Optional, Union from decouple import config @@ -6,12 +7,13 @@ from starlette.responses import RedirectResponse, FileResponse, JSONResponse, Response import schemas +from chalicelib.core import scope from chalicelib.core import sessions, assist, heatmaps, sessions_favorite, sessions_assignments, errors, errors_viewed, \ errors_favorite, sessions_notes, sessions_replay, signup, feature_flags from chalicelib.core import sessions_viewed from chalicelib.core import tenants, users, projects, license +from chalicelib.core import unprocessed_sessions from chalicelib.core import webhook -from chalicelib.core import scope from chalicelib.core.collaboration_slack import Slack from chalicelib.core.users import get_user_settings from chalicelib.utils import SAML2_helper, smtp @@ -24,7 +26,7 @@ if config("ENABLE_SSO", cast=bool, default=True): from routers import saml - +logger = logging.getLogger(__name__) public_app, app, app_apikey = get_routers() COOKIE_PATH = "/api/refresh" @@ -79,7 +81,7 @@ def login_user(response: JSONResponse, spot: Optional[bool] = False, data: schem content = { 'jwt': r.pop('jwt'), 'data': { - "scope":scope.get_scope(r["tenantId"]), + "scope": scope.get_scope(r["tenantId"]), "user": r } } @@ -140,6 +142,8 @@ def get_account(context: schemas.CurrentContext = Depends(OR_context)): def edit_account(data: schemas.EditAccountSchema = Body(...), context: schemas.CurrentContext = Depends(OR_context)): return users.edit_account(tenant_id=context.tenant_id, user_id=context.user_id, changes=data) + + @app.post('/account/scope', tags=["account"]) def change_scope(data: schemas.ScopeSchema = Body(), context: schemas.CurrentContext = Depends(OR_context)): @@ -392,16 +396,10 @@ def get_live_session(projectId: int, sessionId: str, background_tasks: Backgroun def get_live_session_replay_file(projectId: int, sessionId: Union[int, str], context: schemas.CurrentContext = Depends(OR_context)): not_found = {"errors": ["Replay file not found"]} - if not sessionId.isnumeric(): - return not_found - else: - sessionId = int(sessionId) - if not sessions.session_exists(project_id=projectId, session_id=sessionId): - print(f"{projectId}/{sessionId} not found in DB.") - if not assist.session_exists(project_id=projectId, session_id=sessionId): - print(f"{projectId}/{sessionId} not found in Assist.") - return not_found - + sessionId, err = unprocessed_sessions.check_exists(project_id=projectId, session_id=sessionId, + not_found_response=not_found) + if err is not None: + return err path = assist.get_raw_mob_by_id(project_id=projectId, session_id=sessionId) if path is None: return not_found @@ -416,19 +414,13 @@ def get_live_session_replay_file(projectId: int, sessionId: Union[int, str], def get_live_session_devtools_file(projectId: int, sessionId: Union[int, str], context: schemas.CurrentContext = Depends(OR_context)): not_found = {"errors": ["Devtools file not found"]} - if not sessionId.isnumeric(): - return not_found - else: - sessionId = int(sessionId) - if not sessions.session_exists(project_id=projectId, session_id=sessionId): - print(f"{projectId}/{sessionId} not found in DB.") - if not assist.session_exists(project_id=projectId, session_id=sessionId): - print(f"{projectId}/{sessionId} not found in Assist.") - return not_found - + sessionId, err = unprocessed_sessions.check_exists(project_id=projectId, session_id=sessionId, + not_found_response=not_found) + if err is not None: + return err path = assist.get_raw_devtools_by_id(project_id=projectId, session_id=sessionId) if path is None: - return {"errors": ["Devtools file not found"]} + return not_found return FileResponse(path=path, media_type="application/octet-stream") diff --git a/ee/api/schemas/assist_stats_schema.py b/ee/api/schemas/assist_stats_schema.py index b59282a099..b6d588b7bd 100644 --- a/ee/api/schemas/assist_stats_schema.py +++ b/ee/api/schemas/assist_stats_schema.py @@ -60,13 +60,13 @@ class AssistStatsSessionsRequest(BaseModel): userId: Optional[int] = Field(default=None) @field_validator("sort") - def validate_sort(cls, v): + def validate_sort(self, v): if v not in assist_sort_options: raise ValueError(f"Invalid sort option. Allowed options: {', '.join(assist_sort_options)}") return v @field_validator("order") - def validate_order(cls, v): + def validate_order(self, v): if v not in ["desc", "asc"]: raise ValueError("Invalid order option. Must be 'desc' or 'asc'.") return v diff --git a/ee/api/schemas/schemas_ee.py b/ee/api/schemas/schemas_ee.py index 2cf7dc8ee1..4e31782e27 100644 --- a/ee/api/schemas/schemas_ee.py +++ b/ee/api/schemas/schemas_ee.py @@ -32,7 +32,7 @@ class CurrentContext(schemas.CurrentContext): service_account: bool = Field(default=False) @model_validator(mode="before") - def remove_unsupported_perms(cls, values): + def remove_unsupported_perms(self, values): if values.get("permissions") is not None: perms = [] for p in values["permissions"]: @@ -94,7 +94,7 @@ class TrailSearchPayloadSchema(schemas._PaginatedSchema): order: schemas.SortOrderType = Field(default=schemas.SortOrderType.DESC) @model_validator(mode="before") - def transform_order(cls, values): + def transform_order(self, values): if values.get("order") is None: values["order"] = schemas.SortOrderType.DESC else: @@ -154,7 +154,7 @@ class CardInsights(schemas.CardInsights): metric_value: List[InsightCategories] = Field(default=[]) @model_validator(mode='after') - def restrictions(cls, values): + def restrictions(self, values): return values