Skip to content

Commit

Permalink
chore: use loki comntant for metrics namespace
Browse files Browse the repository at this point in the history
  • Loading branch information
salvacorts committed Nov 20, 2024
1 parent 79cccfd commit 973d68f
Show file tree
Hide file tree
Showing 4 changed files with 41 additions and 38 deletions.
25 changes: 13 additions & 12 deletions pkg/bloombuild/builder/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,11 @@ package builder
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"

"github.com/grafana/loki/v3/pkg/util/constants"
)

const (
metricsNamespace = "loki"
metricsSubsystem = "bloombuilder"

statusSuccess = "success"
Expand Down Expand Up @@ -34,32 +35,32 @@ type Metrics struct {
func NewMetrics(r prometheus.Registerer) *Metrics {
return &Metrics{
running: promauto.With(r).NewGauge(prometheus.GaugeOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "running",
Help: "Value will be 1 if the bloom builder is currently running on this instance",
}),
processingTask: promauto.With(r).NewGauge(prometheus.GaugeOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "processing_task",
Help: "Value will be 1 if the bloom builder is currently processing a task",
}),

taskStarted: promauto.With(r).NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "task_started_total",
Help: "Total number of task started",
}),
taskCompleted: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "task_completed_total",
Help: "Total number of task completed",
}, []string{"status"}),
taskDuration: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "task_duration_seconds",
Help: "Time spent processing a task.",
Expand All @@ -73,34 +74,34 @@ func NewMetrics(r prometheus.Registerer) *Metrics {
}, []string{"status"}),

blocksReused: promauto.With(r).NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "blocks_reused_total",
Help: "Number of overlapping bloom blocks reused when creating new blocks",
}),
blocksCreated: promauto.With(r).NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "blocks_created_total",
Help: "Number of blocks created",
}),
metasCreated: promauto.With(r).NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "metas_created_total",
Help: "Number of metas created",
}),

seriesPerTask: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "series_per_task",
Help: "Number of series during task processing. Includes series which copied from other blocks and don't need to be indexed",
// Up to 10M series per tenant, way more than what we expect given our max_global_streams_per_user limits
Buckets: prometheus.ExponentialBucketsRange(1, 10e6, 10),
}),
bytesPerTask: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "bytes_per_task",
Help: "Number of source bytes from chunks added during a task processing.",
Expand All @@ -109,7 +110,7 @@ func NewMetrics(r prometheus.Registerer) *Metrics {
}),

chunkSize: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "chunk_series_size",
Help: "Uncompressed size of chunks in a series",
Expand Down
47 changes: 24 additions & 23 deletions pkg/bloombuild/planner/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,11 @@ import (

"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"

"github.com/grafana/loki/v3/pkg/util/constants"
)

const (
metricsNamespace = "loki"
metricsSubsystem = "bloomplanner"

statusSuccess = "success"
Expand Down Expand Up @@ -56,26 +57,26 @@ func NewMetrics(
) *Metrics {
return &Metrics{
running: promauto.With(r).NewGauge(prometheus.GaugeOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "running",
Help: "Value will be 1 if bloom planner is currently running on this instance",
}),
connectedBuilders: promauto.With(r).NewGaugeFunc(prometheus.GaugeOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "connected_builders",
Help: "Number of builders currently connected to the planner.",
}, getConnectedBuilders),
queueDuration: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "queue_duration_seconds",
Help: "Time spend by tasks in queue before getting picked up by a builder.",
Buckets: prometheus.DefBuckets,
}),
inflightRequests: promauto.With(r).NewSummary(prometheus.SummaryOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "inflight_tasks",
Help: "Number of inflight tasks (either queued or processing) sampled at a regular interval. Quantile buckets keep track of inflight tasks over the last 60s.",
Expand All @@ -84,40 +85,40 @@ func NewMetrics(
AgeBuckets: 6,
}),
tasksRequeued: promauto.With(r).NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "tasks_requeued_total",
Help: "Total number of tasks requeued due to not being picked up by a builder.",
}),
taskLost: promauto.With(r).NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "tasks_lost_total",
Help: "Total number of tasks lost due to not being picked up by a builder and failed to be requeued.",
}),

planningTime: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "planning_time_seconds",
Help: "Time spent planning a build cycle.",
// 1s --> 1h (steps of 1 minute)
Buckets: prometheus.LinearBuckets(1, 60, 60),
}),
buildStarted: promauto.With(r).NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "build_started_total",
Help: "Total number of builds started",
}),
buildCompleted: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "build_completed_total",
Help: "Total number of builds completed",
}, []string{"status"}),
buildTime: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "build_time_seconds",
Help: "Time spent during a builds cycle.",
Expand All @@ -130,45 +131,45 @@ func NewMetrics(
),
}, []string{"status"}),
buildLastSuccess: promauto.With(r).NewGauge(prometheus.GaugeOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "build_last_successful_run_timestamp_seconds",
Help: "Unix timestamp of the last successful build cycle.",
}),

blocksDeleted: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "blocks_deleted_total",
Help: "Number of blocks deleted",
}, []string{"phase"}),
metasDeleted: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "metas_deleted_total",
Help: "Number of metas deleted",
}, []string{"phase"}),

tenantsDiscovered: promauto.With(r).NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "tenants_discovered_total",
Help: "Number of tenants discovered during the current build iteration",
}),
tenantTasksPlanned: promauto.With(r).NewGaugeVec(prometheus.GaugeOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "tenant_tasks_planned",
Help: "Number of tasks planned for a tenant during the current build iteration.",
}, []string{"tenant"}),
tenantTasksCompleted: promauto.With(r).NewGaugeVec(prometheus.GaugeOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "tenant_tasks_completed",
Help: "Number of tasks completed for a tenant during the current build iteration.",
}, []string{"tenant", "status"}),
tenantTasksTiming: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "tenant_tasks_time_seconds",
Help: "Time spent building tasks for a tenant during the current build iteration.",
Expand All @@ -178,22 +179,22 @@ func NewMetrics(

// Retention
retentionRunning: promauto.With(r).NewGauge(prometheus.GaugeOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "retention_running",
Help: "1 if retention is running in this compactor.",
}),

retentionTime: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "retention_time_seconds",
Help: "Time this retention process took to complete.",
Buckets: prometheus.DefBuckets,
}, []string{"status"}),

retentionDaysPerIteration: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "retention_days_processed",
Help: "Number of days iterated over during the retention process.",
Expand All @@ -202,7 +203,7 @@ func NewMetrics(
}, []string{"status"}),

retentionTenantsPerIteration: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "retention_tenants_processed",
Help: "Number of tenants on which retention was applied during the retention process.",
Expand All @@ -211,7 +212,7 @@ func NewMetrics(
}, []string{"status"}),

retentionTenantsExceedingLookback: promauto.With(r).NewGauge(prometheus.GaugeOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "retention_tenants_exceeding_lookback",
Help: "Number of tenants with a retention exceeding the configured retention lookback.",
Expand Down
3 changes: 2 additions & 1 deletion pkg/bloombuild/planner/planner.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
"github.com/grafana/loki/v3/pkg/storage/config"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb"
"github.com/grafana/loki/v3/pkg/util/constants"
utillog "github.com/grafana/loki/v3/pkg/util/log"
"github.com/grafana/loki/v3/pkg/util/ring"
)
Expand Down Expand Up @@ -79,7 +80,7 @@ func New(
}

// Queue to manage tasks
queueMetrics := queue.NewMetrics(r, metricsNamespace, metricsSubsystem)
queueMetrics := queue.NewMetrics(r, constants.Loki, metricsSubsystem)
queueLimits := NewQueueLimits(limits)
tasksQueue, err := queue.NewQueue(logger, cfg.Queue, queueLimits, queueMetrics, storageMetrics)
if err != nil {
Expand Down
4 changes: 2 additions & 2 deletions pkg/bloombuild/planner/strategies/chunksize.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,10 @@ import (
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/index"
"github.com/grafana/loki/v3/pkg/util/constants"
)

const (
metricsNamespace = "loki"
metricsSubsystem = "bloomplanner"
)

Expand All @@ -35,7 +35,7 @@ type ChunkSizeStrategyMetrics struct {
func NewChunkSizeStrategyMetrics(r prometheus.Registerer) *ChunkSizeStrategyMetrics {
return &ChunkSizeStrategyMetrics{
tenantTaskSize: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{
Namespace: metricsNamespace,
Namespace: constants.Loki,
Subsystem: metricsSubsystem,
Name: "tenant_task_size_bytes",
Help: "Size of tasks generated by the chunk size strategy",
Expand Down

0 comments on commit 973d68f

Please sign in to comment.