Skip to content

Commit

Permalink
Fix: Refresh snapshot intervals after determining the max interval en…
Browse files Browse the repository at this point in the history
…d per model (#3664)
  • Loading branch information
izeigerman committed Jan 20, 2025
1 parent d4c10e1 commit 202b73c
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 5 deletions.
3 changes: 3 additions & 0 deletions sqlmesh/core/context.py
Original file line number Diff line number Diff line change
Expand Up @@ -1328,6 +1328,9 @@ def plan_builder(
),
)

# Refresh snapshot intervals to ensure that they are up to date with values reflected in the max_interval_end_per_model.
self.state_sync.refresh_snapshot_intervals(context_diff.snapshots.values())

return self.PLAN_BUILDER_TYPE(
context_diff=context_diff,
start=start,
Expand Down
13 changes: 8 additions & 5 deletions tests/core/test_plan.py
Original file line number Diff line number Diff line change
Expand Up @@ -780,15 +780,18 @@ def test_restate_models(sushi_context_pre_scheduling: Context):


@pytest.mark.slow
@time_machine.travel(now(), tick=False)
def test_restate_models_with_existing_missing_intervals(sushi_context: Context):
@time_machine.travel(now(minute_floor=False), tick=False)
def test_restate_models_with_existing_missing_intervals(init_and_plan_context: t.Callable):
sushi_context, plan = init_and_plan_context("examples/sushi")
sushi_context.apply(plan)

yesterday_ts = to_timestamp(yesterday_ds())

assert not sushi_context.plan(no_prompts=True).requires_backfill
waiter_revenue_by_day = sushi_context.snapshots['"memory"."sushi"."waiter_revenue_by_day"']
waiter_revenue_by_day.intervals = [
(waiter_revenue_by_day.intervals[0][0], yesterday_ts),
]
sushi_context.state_sync.remove_intervals(
[(waiter_revenue_by_day, (yesterday_ts, waiter_revenue_by_day.intervals[0][1]))]
)
assert sushi_context.plan(no_prompts=True).requires_backfill

plan = sushi_context.plan(restate_models=["sushi.waiter_revenue_by_day"], no_prompts=True)
Expand Down

0 comments on commit 202b73c

Please sign in to comment.