Skip to content

Commit afbf801

Browse files
committed
Optimize aggregation query for /job-status
Doing the aggregation on subquery, then joining with channel is upto 10x faster on a 14.5M rows job table.
1 parent 19ad51b commit afbf801

File tree

1 file changed

+9
-3
lines changed

1 file changed

+9
-3
lines changed

storage/deliveryjobrepo.go

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -310,9 +310,15 @@ func (djRepo *DeliveryJobDBRepository) GetJobStatusCountsGroupedByConsumer() (ma
310310
newest string
311311
}
312312
rows := make([]*statusRow, 0)
313-
query := `SELECT c.channelId, j.consumerId, j.status, count(j.id), min(j.statusChangedAt), max(j.statusChangedAt)
314-
FROM job j JOIN consumer c on j.consumerId = c.id
315-
GROUP BY c.channelId, j.consumerId, j.status`
313+
query := `SELECT c.channelId, j.consumerId, j.status, j.j_count, j.min_statusChangedAt, j.max_statusChangedAt
314+
FROM
315+
(
316+
SELECT consumerId, status, count(id) j_count, min(statusChangedAt) min_statusChangedAt, max(statusChangedAt) max_statusChangedAt
317+
FROM job
318+
GROUP BY consumerId, status
319+
) j
320+
JOIN consumer c
321+
ON j.consumerId = c.id`
316322
scanStatusCount := func() []interface{} {
317323
statusCount := &statusRow{}
318324
rows = append(rows, statusCount)

0 commit comments

Comments
 (0)