From 2f2e7d06b5eff9af17adee1b49368c3a5d88a32e Mon Sep 17 00:00:00 2001 From: Riccardo Busetti Date: Fri, 11 Oct 2024 13:17:12 +0200 Subject: [PATCH 01/16] feat(spooler): Add metric to track serialization performance (#4135) --- relay-server/src/services/buffer/envelope_stack/sqlite.rs | 4 +++- relay-server/src/statsd.rs | 3 +++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/relay-server/src/services/buffer/envelope_stack/sqlite.rs b/relay-server/src/services/buffer/envelope_stack/sqlite.rs index 703569ddab..97accdf3cb 100644 --- a/relay-server/src/services/buffer/envelope_stack/sqlite.rs +++ b/relay-server/src/services/buffer/envelope_stack/sqlite.rs @@ -97,7 +97,9 @@ impl SqliteEnvelopeStack { // We convert envelopes into a format which simplifies insertion in the store. If an // envelope can't be serialized, we will not insert it. - let envelopes = envelopes.iter().filter_map(|e| e.as_ref().try_into().ok()); + let envelopes = relay_statsd::metric!(timer(RelayTimers::BufferEnvelopesSerialization), { + envelopes.iter().filter_map(|e| e.as_ref().try_into().ok()) + }); // When early return here, we are acknowledging that the elements that we popped from // the buffer are lost in case of failure. We are doing this on purposes, since if we were diff --git a/relay-server/src/statsd.rs b/relay-server/src/statsd.rs index fbae91d779..3526e30b0a 100644 --- a/relay-server/src/statsd.rs +++ b/relay-server/src/statsd.rs @@ -538,6 +538,8 @@ pub enum RelayTimers { BufferPop, /// Timing in milliseconds for the time it takes for the buffer to drain its envelopes. BufferDrain, + /// Timing in milliseconds for the time it takes for the envelopes to be serialized. + BufferEnvelopesSerialization, } impl TimerMetric for RelayTimers { @@ -586,6 +588,7 @@ impl TimerMetric for RelayTimers { RelayTimers::BufferPeek => "buffer.peek.duration", RelayTimers::BufferPop => "buffer.pop.duration", RelayTimers::BufferDrain => "buffer.drain.duration", + RelayTimers::BufferEnvelopesSerialization => "buffer.envelopes_serialization", } } } From e4761f025d24d3355c2b13fc1b9f95e2f2be7b4f Mon Sep 17 00:00:00 2001 From: Riccardo Busetti Date: Mon, 14 Oct 2024 08:42:48 +0200 Subject: [PATCH 02/16] feat(redis): Implement parallel cmd execution of Redis calls (#4118) --- CHANGELOG.md | 1 + relay-redis/src/real.rs | 95 ++++++++++++++++++++++------------------- 2 files changed, 53 insertions(+), 43 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6e9333d66c..bc96f5f8af 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ - Use custom wildcard matching instead of regular expressions. ([#4073](https://github.com/getsentry/relay/pull/4073)) - Allowlist the SentryUptimeBot user-agent. ([#4068](https://github.com/getsentry/relay/pull/4068)) - Feature flags of graduated features are now hard-coded in Relay so they can be removed from Sentry. ([#4076](https://github.com/getsentry/relay/pull/4076), [#4080](https://github.com/getsentry/relay/pull/4080)) +- Add parallelization in Redis commands. ([#4118](https://github.com/getsentry/relay/pull/4118)) ## 24.9.0 diff --git a/relay-redis/src/real.rs b/relay-redis/src/real.rs index e535c43125..f47d4c929e 100644 --- a/relay-redis/src/real.rs +++ b/relay-redis/src/real.rs @@ -1,10 +1,10 @@ -use std::error::Error; -use std::fmt; -use std::time::Duration; - use r2d2::{Builder, ManageConnection, Pool, PooledConnection}; pub use redis; use redis::ConnectionLike; +use std::error::Error; +use std::thread::Scope; +use std::time::Duration; +use std::{fmt, thread}; use thiserror::Error; use crate::config::RedisConfigOptions; @@ -25,6 +25,30 @@ pub enum RedisError { Redis(#[source] redis::RedisError), } +fn log_secondary_redis_error(result: redis::RedisResult) { + if let Err(error) = result { + relay_log::error!( + error = &error as &dyn Error, + "sending cmds to the secondary Redis instance failed", + ); + } +} + +fn spawn_secondary_thread<'scope, 'env: 'scope, T>( + scope: &'scope Scope<'scope, 'env>, + block: impl FnOnce() -> redis::RedisResult + Send + 'scope, +) { + let result = thread::Builder::new().spawn_scoped(scope, move || { + log_secondary_redis_error(block()); + }); + if let Err(error) = result { + relay_log::error!( + error = &error as &dyn Error, + "spawning the thread for the secondary Redis connection failed", + ); + } +} + enum ConnectionInner<'a> { Cluster(&'a mut redis::cluster::ClusterConnection), MultiWrite { @@ -37,24 +61,17 @@ enum ConnectionInner<'a> { impl ConnectionLike for ConnectionInner<'_> { fn req_packed_command(&mut self, cmd: &[u8]) -> redis::RedisResult { match self { - ConnectionInner::Cluster(ref mut con) => con.req_packed_command(cmd), + ConnectionInner::Cluster(con) => con.req_packed_command(cmd), + ConnectionInner::Single(con) => con.req_packed_command(cmd), ConnectionInner::MultiWrite { - primary: primary_connection, - secondaries: secondary_connections, - } => { - let primary_result = primary_connection.req_packed_command(cmd); - for secondary_connection in secondary_connections.iter_mut() { - if let Err(error) = secondary_connection.req_packed_command(cmd) { - relay_log::error!( - error = &error as &dyn Error, - "sending cmd to the secondary Redis instance failed", - ); - } + primary, + secondaries, + } => thread::scope(|s| { + for connection in secondaries { + spawn_secondary_thread(s, || connection.req_packed_command(cmd)) } - - primary_result - } - ConnectionInner::Single(ref mut con) => con.req_packed_command(cmd), + primary.req_packed_command(cmd) + }), } } @@ -65,58 +82,50 @@ impl ConnectionLike for ConnectionInner<'_> { count: usize, ) -> redis::RedisResult> { match self { - ConnectionInner::Cluster(ref mut con) => con.req_packed_commands(cmd, offset, count), + ConnectionInner::Cluster(con) => con.req_packed_commands(cmd, offset, count), + ConnectionInner::Single(con) => con.req_packed_commands(cmd, offset, count), ConnectionInner::MultiWrite { - primary: primary_connection, - secondaries: secondary_connections, - } => { - let primary_result = primary_connection.req_packed_commands(cmd, offset, count); - for secondary_connection in secondary_connections.iter_mut() { - if let Err(error) = secondary_connection.req_packed_commands(cmd, offset, count) - { - relay_log::error!( - error = &error as &dyn Error, - "sending cmds to the secondary Redis instance failed", - ); - } + primary, + secondaries, + } => thread::scope(|s| { + for connection in secondaries { + spawn_secondary_thread(s, || connection.req_packed_commands(cmd, offset, count)) } - - primary_result - } - ConnectionInner::Single(ref mut con) => con.req_packed_commands(cmd, offset, count), + primary.req_packed_commands(cmd, offset, count) + }), } } fn get_db(&self) -> i64 { match self { - ConnectionInner::Cluster(ref con) => con.get_db(), + ConnectionInner::Cluster(con) => con.get_db(), ConnectionInner::MultiWrite { primary: primary_connection, .. } => primary_connection.get_db(), - ConnectionInner::Single(ref con) => con.get_db(), + ConnectionInner::Single(con) => con.get_db(), } } fn check_connection(&mut self) -> bool { match self { - ConnectionInner::Cluster(ref mut con) => con.check_connection(), + ConnectionInner::Cluster(con) => con.check_connection(), ConnectionInner::MultiWrite { primary: primary_connection, .. } => primary_connection.check_connection(), - ConnectionInner::Single(ref mut con) => con.check_connection(), + ConnectionInner::Single(con) => con.check_connection(), } } fn is_open(&self) -> bool { match self { - ConnectionInner::Cluster(ref con) => con.is_open(), + ConnectionInner::Cluster(con) => con.is_open(), ConnectionInner::MultiWrite { primary: primary_connection, .. } => primary_connection.is_open(), - ConnectionInner::Single(ref con) => con.is_open(), + ConnectionInner::Single(con) => con.is_open(), } } } From fb6bdef94713c3f99b5498f791c120497f2f2357 Mon Sep 17 00:00:00 2001 From: David Herberth Date: Mon, 14 Oct 2024 11:23:27 +0200 Subject: [PATCH 03/16] ref(redis): Update to redis client version 0.27.4 (#4132) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Finally back to stable! The data corruption issue on timeouts has been merged upstream and released 🎉 . Fixes: https://github.com/getsentry/team-ingest/issues/368 --- Cargo.lock | 5 +++-- Cargo.toml | 3 +-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 20f71c802e..fde0527ff0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3187,8 +3187,9 @@ dependencies = [ [[package]] name = "redis" -version = "0.27.2" -source = "git+https://github.com/getsentry/redis-rs.git?rev=fc7d98cc10c16fa7c0c31de64dc1b713354a4384#fc7d98cc10c16fa7c0c31de64dc1b713354a4384" +version = "0.27.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc6baebe319ef5e4b470f248335620098d1c2e9261e995be05f56f719ca4bdb2" dependencies = [ "arc-swap", "combine", diff --git a/Cargo.toml b/Cargo.toml index 100fca3242..72998d2388 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -135,8 +135,7 @@ rand_pcg = "0.3.1" rayon = "1.10" rdkafka = "0.36.2" rdkafka-sys = "4.3.0" -# Git revision until https://github.com/redis-rs/redis-rs/pull/1097 (merged) and https://github.com/redis-rs/redis-rs/pull/1290 are released. -redis = { git = "https://github.com/getsentry/redis-rs.git", rev = "fc7d98cc10c16fa7c0c31de64dc1b713354a4384", default-features = false } +redis = { version = "0.27.4", default-features = false } regex = "1.10.2" regex-lite = "0.1.6" reqwest = "0.12.7" From 9afb49f0cbd74fed5a8c27ebaac982cb45137a44 Mon Sep 17 00:00:00 2001 From: David Herberth Date: Tue, 15 Oct 2024 10:06:11 +0200 Subject: [PATCH 04/16] ref(server): Organize project services in nested modules (#4139) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Organizing all the `project_*.rs` files we have in the `services/` module in a module structure. ``` projects ├── cache.rs // <- services/project_cache.rs ├── mod.rs ├── project │   ├── mod.rs // <- services/project.rs │   └── state // <- services/project/state/* │   ├── fetch_state.rs │   ├── info.rs │   └── mod.rs └── source ├── local.rs // <- services/project_local.rs ├── mod.rs ├── redis.rs // <- services/project_redis.rs └── upstream.rs // <- services/project_upstream.rs ``` This only moves files around, no functional changes. It is a preparation to split out more of the convoluted services into smaller digestible parts. For example, the `project_cache.rs` module contains a lot of utility and helper types not relevant for the service itself, it should be possible to split this out eventually. I chose `projects` as the base module to avoid having a `project/project/` module, also I think it makes sense, it manages all projects and the nested module just represents a single project. --- relay-server/src/endpoints/batch_metrics.rs | 2 +- relay-server/src/endpoints/common.rs | 2 +- relay-server/src/endpoints/project_configs.rs | 8 +++++--- relay-server/src/service.rs | 2 +- relay-server/src/services/buffer/mod.rs | 2 +- relay-server/src/services/mod.rs | 9 ++------- relay-server/src/services/processor.rs | 4 ++-- .../src/services/processor/dynamic_sampling.rs | 2 +- relay-server/src/services/processor/metrics.rs | 4 ++-- relay-server/src/services/processor/profile.rs | 2 +- relay-server/src/services/processor/report.rs | 2 +- .../src/services/processor/span/processing.rs | 2 +- .../{project_cache.rs => projects/cache.rs} | 14 ++++++++------ relay-server/src/services/projects/mod.rs | 3 +++ .../{project.rs => projects/project/mod.rs} | 10 +++++----- .../{ => projects}/project/state/fetch_state.rs | 6 +++--- .../services/{ => projects}/project/state/info.rs | 4 ++-- .../state.rs => projects/project/state/mod.rs} | 6 +++--- .../{project_local.rs => projects/source/local.rs} | 6 +++--- relay-server/src/services/projects/source/mod.rs | 4 ++++ .../{project_redis.rs => projects/source/redis.rs} | 4 ++-- .../source/upstream.rs} | 8 ++++---- relay-server/src/services/spooler/mod.rs | 4 ++-- relay-server/src/testutils.rs | 2 +- 24 files changed, 59 insertions(+), 53 deletions(-) rename relay-server/src/services/{project_cache.rs => projects/cache.rs} (99%) create mode 100644 relay-server/src/services/projects/mod.rs rename relay-server/src/services/{project.rs => projects/project/mod.rs} (98%) rename relay-server/src/services/{ => projects}/project/state/fetch_state.rs (95%) rename relay-server/src/services/{ => projects}/project/state/info.rs (98%) rename relay-server/src/services/{project/state.rs => projects/project/state/mod.rs} (97%) rename relay-server/src/services/{project_local.rs => projects/source/local.rs} (97%) create mode 100644 relay-server/src/services/projects/source/mod.rs rename relay-server/src/services/{project_redis.rs => projects/source/redis.rs} (97%) rename relay-server/src/services/{project_upstream.rs => projects/source/upstream.rs} (99%) diff --git a/relay-server/src/endpoints/batch_metrics.rs b/relay-server/src/endpoints/batch_metrics.rs index 77abf88833..d068e59fde 100644 --- a/relay-server/src/endpoints/batch_metrics.rs +++ b/relay-server/src/endpoints/batch_metrics.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use crate::extractors::{SignedBytes, StartTime}; use crate::service::ServiceState; use crate::services::processor::ProcessBatchedMetrics; -use crate::services::project_cache::BucketSource; +use crate::services::projects::cache::BucketSource; #[derive(Debug, Serialize, Deserialize)] struct SendMetricsResponse {} diff --git a/relay-server/src/endpoints/common.rs b/relay-server/src/endpoints/common.rs index 9d5e9c50fc..5a124b84b7 100644 --- a/relay-server/src/endpoints/common.rs +++ b/relay-server/src/endpoints/common.rs @@ -13,7 +13,7 @@ use crate::service::ServiceState; use crate::services::buffer::EnvelopeBuffer; use crate::services::outcome::{DiscardReason, Outcome}; use crate::services::processor::{MetricData, ProcessMetricMeta, ProcessingGroup}; -use crate::services::project_cache::{CheckEnvelope, ProcessMetrics, ValidateEnvelope}; +use crate::services::projects::cache::{CheckEnvelope, ProcessMetrics, ValidateEnvelope}; use crate::statsd::{RelayCounters, RelayHistograms}; use crate::utils::{self, ApiErrorResponse, FormDataIter, ManagedEnvelope}; diff --git a/relay-server/src/endpoints/project_configs.rs b/relay-server/src/endpoints/project_configs.rs index 2ede7027ec..391687a31f 100644 --- a/relay-server/src/endpoints/project_configs.rs +++ b/relay-server/src/endpoints/project_configs.rs @@ -16,8 +16,10 @@ use crate::endpoints::forward; use crate::extractors::SignedJson; use crate::service::ServiceState; use crate::services::global_config::{self, StatusResponse}; -use crate::services::project::{LimitedParsedProjectState, ParsedProjectState, ProjectState}; -use crate::services::project_cache::{GetCachedProjectState, GetProjectState}; +use crate::services::projects::cache::{GetCachedProjectState, GetProjectState}; +use crate::services::projects::project::{ + LimitedParsedProjectState, ParsedProjectState, ProjectState, +}; /// V2 version of this endpoint. /// @@ -89,7 +91,7 @@ struct GetProjectStatesResponseWrapper { /// Request payload of the project config endpoint. /// -/// This is a replica of [`GetProjectStates`](crate::services::project_upstream::GetProjectStates) +/// This is a replica of [`GetProjectStates`](crate::services::projects::source::upstream::GetProjectStates) /// which allows skipping invalid project keys. #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] diff --git a/relay-server/src/service.rs b/relay-server/src/service.rs index 69b20af3f4..d7e954fe86 100644 --- a/relay-server/src/service.rs +++ b/relay-server/src/service.rs @@ -12,7 +12,7 @@ use crate::services::metrics::{Aggregator, RouterService}; use crate::services::outcome::{OutcomeProducer, OutcomeProducerService, TrackOutcome}; use crate::services::outcome_aggregator::OutcomeAggregator; use crate::services::processor::{self, EnvelopeProcessor, EnvelopeProcessorService}; -use crate::services::project_cache::{ProjectCache, ProjectCacheService, Services}; +use crate::services::projects::cache::{ProjectCache, ProjectCacheService, Services}; use crate::services::relays::{RelayCache, RelayCacheService}; use crate::services::stats::RelayStats; #[cfg(feature = "processing")] diff --git a/relay-server/src/services/buffer/mod.rs b/relay-server/src/services/buffer/mod.rs index d732452763..3bcc0c5fea 100644 --- a/relay-server/src/services/buffer/mod.rs +++ b/relay-server/src/services/buffer/mod.rs @@ -21,7 +21,7 @@ use crate::services::outcome::DiscardReason; use crate::services::outcome::Outcome; use crate::services::outcome::TrackOutcome; use crate::services::processor::ProcessingGroup; -use crate::services::project_cache::{DequeuedEnvelope, ProjectCache, UpdateProject}; +use crate::services::projects::cache::{DequeuedEnvelope, ProjectCache, UpdateProject}; use crate::services::test_store::TestStore; use crate::statsd::{RelayCounters, RelayHistograms}; diff --git a/relay-server/src/services/mod.rs b/relay-server/src/services/mod.rs index be7b542b06..8c06c1d0af 100644 --- a/relay-server/src/services/mod.rs +++ b/relay-server/src/services/mod.rs @@ -7,7 +7,7 @@ //! and dispatches the graceful shutdown signal. Internally, it creates several other services //! comprising the service state: //! -//! - [`ProjectCache`](project_cache::ProjectCache): A cache that serves queries for project +//! - [`ProjectCache`](projects::cache::ProjectCache): A cache that serves queries for project //! configurations. Its requests are debounced and batched based on a configured interval (100ms //! by default). Also, missing projects are cached for some time. //! - [`EnvelopeProcessor`](processor::EnvelopeProcessor): A worker pool for CPU-intensive tasks. @@ -35,10 +35,7 @@ pub mod metrics; pub mod outcome; pub mod outcome_aggregator; pub mod processor; -pub mod project; -pub mod project_cache; -pub mod project_local; -pub mod project_upstream; +pub mod projects; pub mod relays; pub mod server; pub mod spooler; @@ -46,7 +43,5 @@ pub mod stats; pub mod test_store; pub mod upstream; -#[cfg(feature = "processing")] -pub mod project_redis; #[cfg(feature = "processing")] pub mod store; diff --git a/relay-server/src/services/processor.rs b/relay-server/src/services/processor.rs index 0d81f67043..e6502b5187 100644 --- a/relay-server/src/services/processor.rs +++ b/relay-server/src/services/processor.rs @@ -72,10 +72,10 @@ use crate::services::global_config::GlobalConfigHandle; use crate::services::metrics::{Aggregator, MergeBuckets}; use crate::services::outcome::{DiscardReason, Outcome, TrackOutcome}; use crate::services::processor::event::FiltersStatus; -use crate::services::project::{ProjectInfo, ProjectState}; -use crate::services::project_cache::{ +use crate::services::projects::cache::{ AddMetricMeta, BucketSource, ProcessMetrics, ProjectCache, UpdateRateLimits, }; +use crate::services::projects::project::{ProjectInfo, ProjectState}; use crate::services::test_store::{Capture, TestStore}; use crate::services::upstream::{ SendRequest, UpstreamRelay, UpstreamRequest, UpstreamRequestError, diff --git a/relay-server/src/services/processor/dynamic_sampling.rs b/relay-server/src/services/processor/dynamic_sampling.rs index 1267015444..b75a4162af 100644 --- a/relay-server/src/services/processor/dynamic_sampling.rs +++ b/relay-server/src/services/processor/dynamic_sampling.rs @@ -252,7 +252,7 @@ mod tests { use crate::services::processor::{ ProcessEnvelope, ProcessingExtractedMetrics, ProcessingGroup, SpanGroup, }; - use crate::services::project::ProjectInfo; + use crate::services::projects::project::ProjectInfo; use crate::testutils::{ self, create_test_processor, new_envelope, state_with_rule_and_condition, }; diff --git a/relay-server/src/services/processor/metrics.rs b/relay-server/src/services/processor/metrics.rs index 8c6ac4fcc4..0d28285e77 100644 --- a/relay-server/src/services/processor/metrics.rs +++ b/relay-server/src/services/processor/metrics.rs @@ -5,8 +5,8 @@ use relay_quotas::Scoping; use crate::metrics::MetricOutcomes; use crate::services::outcome::Outcome; -use crate::services::project::ProjectInfo; -use crate::services::project_cache::BucketSource; +use crate::services::projects::cache::BucketSource; +use crate::services::projects::project::ProjectInfo; /// Checks if the namespace of the passed bucket is valid. /// diff --git a/relay-server/src/services/processor/profile.rs b/relay-server/src/services/processor/profile.rs index be93112a02..6f0565afca 100644 --- a/relay-server/src/services/processor/profile.rs +++ b/relay-server/src/services/processor/profile.rs @@ -152,7 +152,7 @@ mod tests { use crate::envelope::Envelope; use crate::extractors::RequestMeta; use crate::services::processor::{ProcessEnvelope, ProcessingGroup}; - use crate::services::project::ProjectInfo; + use crate::services::projects::project::ProjectInfo; use crate::testutils::create_test_processor; use crate::utils::ManagedEnvelope; diff --git a/relay-server/src/services/processor/report.rs b/relay-server/src/services/processor/report.rs index 002a0753de..d0c101755b 100644 --- a/relay-server/src/services/processor/report.rs +++ b/relay-server/src/services/processor/report.rs @@ -274,7 +274,7 @@ mod tests { use crate::extractors::RequestMeta; use crate::services::outcome::RuleCategory; use crate::services::processor::{ProcessEnvelope, ProcessingGroup}; - use crate::services::project::ProjectInfo; + use crate::services::projects::project::ProjectInfo; use crate::testutils::{self, create_test_processor}; use crate::utils::ManagedEnvelope; diff --git a/relay-server/src/services/processor/span/processing.rs b/relay-server/src/services/processor/span/processing.rs index 2f6f9f94dd..a9801e45f8 100644 --- a/relay-server/src/services/processor/span/processing.rs +++ b/relay-server/src/services/processor/span/processing.rs @@ -768,7 +768,7 @@ mod tests { use crate::envelope::Envelope; use crate::services::processor::{ProcessingExtractedMetrics, ProcessingGroup}; - use crate::services::project::ProjectInfo; + use crate::services::projects::project::ProjectInfo; use crate::utils::ManagedEnvelope; use super::*; diff --git a/relay-server/src/services/project_cache.rs b/relay-server/src/services/projects/cache.rs similarity index 99% rename from relay-server/src/services/project_cache.rs rename to relay-server/src/services/projects/cache.rs index 815d265226..20d9eed7f3 100644 --- a/relay-server/src/services/project_cache.rs +++ b/relay-server/src/services/projects/cache.rs @@ -9,7 +9,7 @@ use crate::services::global_config; use crate::services::processor::{ EncodeMetrics, EnvelopeProcessor, MetricData, ProcessEnvelope, ProcessingGroup, ProjectMetrics, }; -use crate::services::project::state::UpstreamProjectState; +use crate::services::projects::project::state::UpstreamProjectState; use crate::Envelope; use chrono::{DateTime, Utc}; use hashbrown::HashSet; @@ -29,11 +29,13 @@ use tokio::time::Instant; use crate::services::metrics::{Aggregator, FlushBuckets}; use crate::services::outcome::{DiscardReason, Outcome, TrackOutcome}; -use crate::services::project::{Project, ProjectFetchState, ProjectSender, ProjectState}; -use crate::services::project_local::{LocalProjectSource, LocalProjectSourceService}; +use crate::services::projects::project::{Project, ProjectFetchState, ProjectSender, ProjectState}; +use crate::services::projects::source::local::{LocalProjectSource, LocalProjectSourceService}; #[cfg(feature = "processing")] -use crate::services::project_redis::RedisProjectSource; -use crate::services::project_upstream::{UpstreamProjectSource, UpstreamProjectSourceService}; +use crate::services::projects::source::redis::RedisProjectSource; +use crate::services::projects::source::upstream::{ + UpstreamProjectSource, UpstreamProjectSourceService, +}; use crate::services::spooler::{ self, Buffer, BufferService, DequeueMany, Enqueue, QueueKey, RemoveMany, RestoreIndex, UnspooledEnvelope, BATCH_KEY_COUNT, @@ -669,7 +671,7 @@ impl ProjectCacheBroker { /// Ideally, we would use `check_expiry` to determine expiry here. /// However, for eviction, we want to add an additional delay, such that we do not delete /// a project that has expired recently and for which a fetch is already underway in - /// [`super::project_upstream`]. + /// [`super::source::upstream`]. fn evict_stale_project_caches(&mut self) { let eviction_start = Instant::now(); let delta = 2 * self.config.project_cache_expiry() + self.config.project_grace_period(); diff --git a/relay-server/src/services/projects/mod.rs b/relay-server/src/services/projects/mod.rs new file mode 100644 index 0000000000..bc8f253eee --- /dev/null +++ b/relay-server/src/services/projects/mod.rs @@ -0,0 +1,3 @@ +pub mod cache; +pub mod project; +pub mod source; diff --git a/relay-server/src/services/project.rs b/relay-server/src/services/projects/project/mod.rs similarity index 98% rename from relay-server/src/services/project.rs rename to relay-server/src/services/projects/project/mod.rs index 7e5ab2bb97..a553ebd89b 100644 --- a/relay-server/src/services/project.rs +++ b/relay-server/src/services/projects/project/mod.rs @@ -16,8 +16,7 @@ use crate::envelope::ItemType; use crate::services::metrics::{Aggregator, MergeBuckets}; use crate::services::outcome::{DiscardReason, Outcome}; use crate::services::processor::{EncodeMetricMeta, EnvelopeProcessor, ProcessProjectMetrics}; -use crate::services::project::state::ExpiryState; -use crate::services::project_cache::{ +use crate::services::projects::cache::{ CheckedEnvelope, ProcessMetrics, ProjectCache, RequestUpdate, }; use crate::utils::{Enforcement, SeqCount}; @@ -28,7 +27,8 @@ use crate::utils::{CheckLimits, EnvelopeLimiter, ManagedEnvelope, RetryBackoff}; pub mod state; pub use state::{ - LimitedParsedProjectState, ParsedProjectState, ProjectFetchState, ProjectInfo, ProjectState, + ExpiryState, LimitedParsedProjectState, ParsedProjectState, ProjectFetchState, ProjectInfo, + ProjectState, }; /// Sender type for messages that respond with project states. @@ -433,7 +433,7 @@ impl Project { /// `no_cache` should be passed from the requesting call. Updates with `no_cache` will always /// take precedence. /// - /// [`ValidateEnvelope`]: crate::services::project_cache::ValidateEnvelope + /// [`ValidateEnvelope`]: crate::services::projects::cache::ValidateEnvelope pub fn update_state( &mut self, project_cache: &Addr, @@ -515,7 +515,7 @@ impl Project { /// Runs the checks on incoming envelopes. /// - /// See, [`crate::services::project_cache::CheckEnvelope`] for more information + /// See, [`crate::services::projects::cache::CheckEnvelope`] for more information /// /// * checks the rate limits /// * validates the envelope meta in `check_request` - determines whether the given request diff --git a/relay-server/src/services/project/state/fetch_state.rs b/relay-server/src/services/projects/project/state/fetch_state.rs similarity index 95% rename from relay-server/src/services/project/state/fetch_state.rs rename to relay-server/src/services/projects/project/state/fetch_state.rs index abc4a9f79e..020150f303 100644 --- a/relay-server/src/services/project/state/fetch_state.rs +++ b/relay-server/src/services/projects/project/state/fetch_state.rs @@ -5,8 +5,8 @@ use tokio::time::Instant; use relay_config::Config; use relay_dynamic_config::ProjectConfig; -use crate::services::project::state::info::ProjectInfo; -use crate::services::project::ProjectState; +use crate::services::projects::project::state::info::ProjectInfo; +use crate::services::projects::project::ProjectState; /// Hides a cached project state and only exposes it if it has not expired. #[derive(Clone, Debug)] @@ -65,7 +65,7 @@ impl ProjectFetchState { /// Create a config that immediately counts as expired. /// - /// This is what [`Project`](crate::services::project::Project) initializes itself with. + /// This is what [`Project`](crate::services::projects::project::Project) initializes itself with. pub fn expired() -> Self { Self { // Make sure the state immediately qualifies as expired: diff --git a/relay-server/src/services/project/state/info.rs b/relay-server/src/services/projects/project/state/info.rs similarity index 98% rename from relay-server/src/services/project/state/info.rs rename to relay-server/src/services/projects/project/state/info.rs index 7b8b9e3975..eab9b8fb62 100644 --- a/relay-server/src/services/project/state/info.rs +++ b/relay-server/src/services/projects/project/state/info.rs @@ -16,7 +16,7 @@ use url::Url; use crate::envelope::Envelope; use crate::extractors::RequestMeta; use crate::services::outcome::DiscardReason; -use crate::services::project::PublicKeyConfig; +use crate::services::projects::project::PublicKeyConfig; /// Information about an enabled project. /// @@ -189,7 +189,7 @@ impl ProjectInfo { /// scoping. /// /// To get the own scoping of this ProjectKey without amending request information, use - /// [`Project::scoping`](crate::services::project::Project::scoping) instead. + /// [`Project::scoping`](crate::services::projects::project::Project::scoping) instead. pub fn scope_request(&self, meta: &RequestMeta) -> Scoping { let mut scoping = meta.get_partial_scoping(); diff --git a/relay-server/src/services/project/state.rs b/relay-server/src/services/projects/project/state/mod.rs similarity index 97% rename from relay-server/src/services/project/state.rs rename to relay-server/src/services/projects/project/state/mod.rs index 70616d4a6e..868ad5ed4a 100644 --- a/relay-server/src/services/project/state.rs +++ b/relay-server/src/services/projects/project/state/mod.rs @@ -22,7 +22,7 @@ pub enum ProjectState { Disabled, /// A project to which one of the following conditions apply: /// - The project has not yet been fetched. - /// - The upstream returned "pending" for this project (see [`crate::services::project_upstream`]). + /// - The upstream returned "pending" for this project (see [`crate::services::projects::source::upstream`]). /// - The upstream returned an unparsable project so we have to try again. /// - The project has expired and must be treated as "has not been fetched". Pending, @@ -95,7 +95,7 @@ pub struct ParsedProjectState { /// Project info. /// /// This contains no information when `disabled` is `true`, except for - /// public keys in static project configs (see [`crate::services::project_local`]). + /// public keys in static project configs (see [`crate::services::projects::source::local`]). #[serde(flatten)] pub info: ProjectInfo, } @@ -109,7 +109,7 @@ pub struct LimitedParsedProjectState { /// Limited project info for external Relays. /// /// This contains no information when `disabled` is `true`, except for - /// public keys in static project configs (see [`crate::services::project_local`]). + /// public keys in static project configs (see [`crate::services::projects::source::local`]). #[serde(with = "LimitedProjectInfo")] #[serde(flatten)] pub info: ProjectInfo, diff --git a/relay-server/src/services/project_local.rs b/relay-server/src/services/projects/source/local.rs similarity index 97% rename from relay-server/src/services/project_local.rs rename to relay-server/src/services/projects/source/local.rs index 745203780a..94f3a50835 100644 --- a/relay-server/src/services/project_local.rs +++ b/relay-server/src/services/projects/source/local.rs @@ -9,8 +9,8 @@ use relay_system::{AsyncResponse, FromMessage, Interface, Receiver, Sender, Serv use tokio::sync::mpsc; use tokio::time::Instant; -use crate::services::project::{ParsedProjectState, ProjectState}; -use crate::services::project_cache::FetchOptionalProjectState; +use crate::services::projects::cache::FetchOptionalProjectState; +use crate::services::projects::project::{ParsedProjectState, ProjectState}; /// Service interface of the local project source. #[derive(Debug)] @@ -196,7 +196,7 @@ mod tests { use std::str::FromStr; use super::*; - use crate::services::project::{ProjectInfo, PublicKeyConfig}; + use crate::services::projects::project::{ProjectInfo, PublicKeyConfig}; /// Tests that we can follow the symlinks and read the project file properly. #[tokio::test] diff --git a/relay-server/src/services/projects/source/mod.rs b/relay-server/src/services/projects/source/mod.rs new file mode 100644 index 0000000000..80f6a34d78 --- /dev/null +++ b/relay-server/src/services/projects/source/mod.rs @@ -0,0 +1,4 @@ +pub mod local; +#[cfg(feature = "processing")] +pub mod redis; +pub mod upstream; diff --git a/relay-server/src/services/project_redis.rs b/relay-server/src/services/projects/source/redis.rs similarity index 97% rename from relay-server/src/services/project_redis.rs rename to relay-server/src/services/projects/source/redis.rs index 18e8dd0e53..e2a520f4e7 100644 --- a/relay-server/src/services/project_redis.rs +++ b/relay-server/src/services/projects/source/redis.rs @@ -5,8 +5,8 @@ use relay_config::Config; use relay_redis::{RedisError, RedisPool}; use relay_statsd::metric; -use crate::services::project::state::UpstreamProjectState; -use crate::services::project::{ParsedProjectState, ProjectState}; +use crate::services::projects::project::state::UpstreamProjectState; +use crate::services::projects::project::{ParsedProjectState, ProjectState}; use crate::statsd::{RelayCounters, RelayHistograms, RelayTimers}; #[derive(Debug, Clone)] diff --git a/relay-server/src/services/project_upstream.rs b/relay-server/src/services/projects/source/upstream.rs similarity index 99% rename from relay-server/src/services/project_upstream.rs rename to relay-server/src/services/projects/source/upstream.rs index 0b54cc17e6..960aa4c882 100644 --- a/relay-server/src/services/project_upstream.rs +++ b/relay-server/src/services/projects/source/upstream.rs @@ -18,10 +18,10 @@ use serde::{Deserialize, Serialize}; use tokio::sync::mpsc; use tokio::time::Instant; -use crate::services::project::state::UpstreamProjectState; -use crate::services::project::ParsedProjectState; -use crate::services::project::ProjectState; -use crate::services::project_cache::FetchProjectState; +use crate::services::projects::cache::FetchProjectState; +use crate::services::projects::project::state::UpstreamProjectState; +use crate::services::projects::project::ParsedProjectState; +use crate::services::projects::project::ProjectState; use crate::services::upstream::{ Method, RequestPriority, SendQuery, UpstreamQuery, UpstreamRelay, UpstreamRequestError, }; diff --git a/relay-server/src/services/spooler/mod.rs b/relay-server/src/services/spooler/mod.rs index 36debfc9bf..54c2530550 100644 --- a/relay-server/src/services/spooler/mod.rs +++ b/relay-server/src/services/spooler/mod.rs @@ -13,7 +13,7 @@ //! be happening. //! //! The initial state is always [`InMemory`], and if the Relay can properly fetch all the -//! [`crate::services::project::ProjectState`] it continues to use the memory as temporary spool. +//! [`crate::services::projects::project::ProjectState`] it continues to use the memory as temporary spool. //! //! Keeping the envelopes in memory as long as we can, we ensure the fast unspool operations and //! fast processing times. @@ -55,7 +55,7 @@ use crate::envelope::{Envelope, EnvelopeError}; use crate::extractors::StartTime; use crate::services::outcome::TrackOutcome; use crate::services::processor::ProcessingGroup; -use crate::services::project_cache::{ProjectCache, RefreshIndexCache, UpdateSpoolIndex}; +use crate::services::projects::cache::{ProjectCache, RefreshIndexCache, UpdateSpoolIndex}; use crate::services::test_store::TestStore; use crate::statsd::{RelayCounters, RelayGauges, RelayHistograms, RelayTimers}; use crate::utils::{ManagedEnvelope, MemoryChecker}; diff --git a/relay-server/src/testutils.rs b/relay-server/src/testutils.rs index 47e8f988fd..9f4c255d35 100644 --- a/relay-server/src/testutils.rs +++ b/relay-server/src/testutils.rs @@ -20,7 +20,7 @@ use crate::service::create_redis_pools; use crate::services::global_config::GlobalConfigHandle; use crate::services::outcome::TrackOutcome; use crate::services::processor::{self, EnvelopeProcessorService}; -use crate::services::project::ProjectInfo; +use crate::services::projects::project::ProjectInfo; use crate::services::test_store::TestStore; use crate::utils::{ThreadPool, ThreadPoolBuilder}; From cca0db878d679db1163d3809f1f4751d4839d2c2 Mon Sep 17 00:00:00 2001 From: David Herberth Date: Tue, 15 Oct 2024 11:11:12 +0200 Subject: [PATCH 05/16] ref(otel): Disable default features for otel schema (#4141) By default the package pulls in all features, we only need a select few of them, this removes a few dependencies from Relay. --- Cargo.lock | 94 ++++++++---------------------------------------------- Cargo.toml | 2 +- 2 files changed, 15 insertions(+), 81 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fde0527ff0..a89b816cbc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -197,28 +197,6 @@ dependencies = [ "zstd-safe", ] -[[package]] -name = "async-stream" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" -dependencies = [ - "async-stream-impl", - "futures-core", - "pin-project-lite", -] - -[[package]] -name = "async-stream-impl" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.77", -] - [[package]] name = "async-trait" version = "0.1.68" @@ -1547,7 +1525,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.2.5", + "indexmap", "slab", "tokio", "tokio-util", @@ -1573,12 +1551,6 @@ dependencies = [ "byteorder", ] -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - [[package]] name = "hashbrown" version = "0.14.5" @@ -1595,7 +1567,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" dependencies = [ - "hashbrown 0.14.5", + "hashbrown", ] [[package]] @@ -1815,19 +1787,6 @@ dependencies = [ "tower-service", ] -[[package]] -name = "hyper-timeout" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" -dependencies = [ - "hyper", - "hyper-util", - "pin-project-lite", - "tokio", - "tower-service", -] - [[package]] name = "hyper-tls" version = "0.6.0" @@ -1914,16 +1873,6 @@ dependencies = [ "unicode-normalization", ] -[[package]] -name = "indexmap" -version = "1.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" -dependencies = [ - "autocfg", - "hashbrown 0.12.3", -] - [[package]] name = "indexmap" version = "2.2.5" @@ -1931,7 +1880,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown", ] [[package]] @@ -2216,7 +2165,7 @@ version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37ee39891760e7d94734f6f63fedc29a2e4a152f836120753a72503f09fcf904" dependencies = [ - "hashbrown 0.14.5", + "hashbrown", ] [[package]] @@ -2707,12 +2656,10 @@ dependencies = [ "futures-channel", "futures-executor", "futures-util", - "glob", "once_cell", "opentelemetry", "percent-encoding", "rand", - "serde_json", "thiserror", ] @@ -2975,7 +2922,7 @@ checksum = "70c501afe3a2e25c9bd219aa56ec1e04cdb3fcdd763055be268778c13fa82c1f" dependencies = [ "autocfg", "equivalent", - "indexmap 2.2.5", + "indexmap", ] [[package]] @@ -3346,7 +3293,7 @@ version = "24.9.0" dependencies = [ "criterion", "hash32", - "hashbrown 0.14.5", + "hashbrown", "parking_lot", "relay-base-schema", "relay-common", @@ -3517,7 +3464,7 @@ dependencies = [ name = "relay-filter" version = "24.9.0" dependencies = [ - "indexmap 2.2.5", + "indexmap", "insta", "ipnetwork", "once_cell", @@ -3571,7 +3518,7 @@ dependencies = [ "criterion", "fnv", "hash32", - "hashbrown 0.14.5", + "hashbrown", "insta", "itertools 0.13.0", "priority-queue", @@ -3702,7 +3649,7 @@ dependencies = [ name = "relay-quotas" version = "24.9.0" dependencies = [ - "hashbrown 0.14.5", + "hashbrown", "insta", "itertools 0.13.0", "relay-base-schema", @@ -3783,7 +3730,7 @@ dependencies = [ "flate2", "fnv", "futures", - "hashbrown 0.14.5", + "hashbrown", "http", "hyper-util", "insta", @@ -4432,7 +4379,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.2.5", + "indexmap", "itoa", "ryu", "serde", @@ -4660,10 +4607,10 @@ dependencies = [ "futures-intrusive", "futures-io", "futures-util", - "hashbrown 0.14.5", + "hashbrown", "hashlink", "hex", - "indexmap 2.2.5", + "indexmap", "log", "memchr", "once_cell", @@ -5214,7 +5161,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.2.5", + "indexmap", "toml_datetime", "winnow", ] @@ -5225,25 +5172,16 @@ version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6f6ba989e4b2c58ae83d862d3a3e27690b6e3ae630d0deb59f3697f32aa88ad" dependencies = [ - "async-stream", "async-trait", - "axum", "base64 0.22.1", "bytes", - "h2", "http", "http-body", "http-body-util", - "hyper", - "hyper-timeout", - "hyper-util", "percent-encoding", "pin-project", "prost", - "socket2", - "tokio", "tokio-stream", - "tower", "tower-layer", "tower-service", "tracing", @@ -5257,13 +5195,9 @@ checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", - "indexmap 1.9.2", "pin-project", "pin-project-lite", - "rand", - "slab", "tokio", - "tokio-util", "tower-layer", "tower-service", "tracing", diff --git a/Cargo.toml b/Cargo.toml index 72998d2388..2865dccce0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -118,7 +118,7 @@ multer = "3.1.0" num-traits = "0.2.18" num_cpus = "1.13.0" once_cell = "1.13.1" -opentelemetry-proto = "0.7.0" +opentelemetry-proto = { version = "0.7.0", default-features = false } parking_lot = "0.12.1" path-slash = "0.2.1" pest = "2.1.3" From 58dd95ef35363739bf9734b05023d4368b3ef264 Mon Sep 17 00:00:00 2001 From: Joris Bayer Date: Tue, 15 Oct 2024 13:21:31 +0200 Subject: [PATCH 06/16] instr(projects): Log fetch failure (#4142) Measure impact before we merge https://github.com/getsentry/relay/pull/4140. --- relay-server/src/services/projects/cache.rs | 41 ++++++++++++++++----- 1 file changed, 32 insertions(+), 9 deletions(-) diff --git a/relay-server/src/services/projects/cache.rs b/relay-server/src/services/projects/cache.rs index 20d9eed7f3..6bab22f493 100644 --- a/relay-server/src/services/projects/cache.rs +++ b/relay-server/src/services/projects/cache.rs @@ -1,4 +1,5 @@ use std::collections::{BTreeMap, BTreeSet}; +use std::convert::Infallible; use std::error::Error; use std::sync::Arc; use std::time::Duration; @@ -493,12 +494,11 @@ impl ProjectSource { project_key: ProjectKey, no_cache: bool, cached_state: ProjectFetchState, - ) -> Result { + ) -> Result { let state_opt = self .local_source .send(FetchOptionalProjectState { project_key }) - .await - .map_err(|_| ())?; + .await?; if let Some(state) = state_opt { return Ok(ProjectFetchState::new(state)); @@ -516,12 +516,11 @@ impl ProjectSource { if let Some(redis_source) = self.redis_source { let current_revision = current_revision.clone(); - let redis_permit = self.redis_semaphore.acquire().await.map_err(|_| ())?; + let redis_permit = self.redis_semaphore.acquire().await?; let state_fetch_result = tokio::task::spawn_blocking(move || { redis_source.get_config_if_changed(project_key, current_revision.as_deref()) }) - .await - .map_err(|_| ())?; + .await?; drop(redis_permit); match state_fetch_result { @@ -553,8 +552,7 @@ impl ProjectSource { current_revision, no_cache, }) - .await - .map_err(|_| ())?; + .await?; match state { UpstreamProjectState::New(state) => Ok(ProjectFetchState::new(state.sanitized())), @@ -563,6 +561,22 @@ impl ProjectSource { } } +#[derive(Debug, thiserror::Error)] +enum ProjectSourceError { + #[error("redis permit error {0}")] + RedisPermit(#[from] tokio::sync::AcquireError), + #[error("redis join error {0}")] + RedisJoin(#[from] tokio::task::JoinError), + #[error("upstream error {0}")] + Upstream(#[from] relay_system::SendError), +} + +impl From for ProjectSourceError { + fn from(value: Infallible) -> Self { + match value {} + } +} + /// Updates the cache with new project state information. struct UpdateProjectState { /// The public key to fetch the project by. @@ -777,7 +791,16 @@ impl ProjectCacheBroker { let state = source .fetch(project_key, no_cache, cached_state) .await - .unwrap_or_else(|()| ProjectFetchState::disabled()); + .unwrap_or_else(|e| { + relay_log::error!( + error = &e as &dyn Error, + tags.project_key = %project_key, + "Failed to fetch project from source" + ); + // TODO: change this to ProjectFetchState::pending() once we consider it safe to do so. + // see https://github.com/getsentry/relay/pull/4140. + ProjectFetchState::disabled() + }); let message = UpdateProjectState { project_key, From 28112595dd068e6a11caf7f232d3703f9e066a91 Mon Sep 17 00:00:00 2001 From: David Herberth Date: Tue, 15 Oct 2024 13:50:20 +0200 Subject: [PATCH 07/16] build(deps): bump tonic from 0.12.2 to 0.12.3 (#4143) From https://github.com/getsentry/relay/pull/4091 There is currently an [open security advisory](https://github.com/getsentry/relay/security/dependabot/93) for `0.12.2`, not relevant for Relay, but might as well update :) --- Cargo.lock | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a89b816cbc..ced53e9923 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -397,7 +397,7 @@ dependencies = [ "bitflags 2.4.1", "cexpr", "clang-sys", - "itertools 0.13.0", + "itertools 0.10.5", "log", "prettyplease", "proc-macro2", @@ -2984,7 +2984,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acf0c195eebb4af52c752bec4f52f645da98b6e92077a04110c7f349477ae5ac" dependencies = [ "anyhow", - "itertools 0.13.0", + "itertools 0.10.5", "proc-macro2", "quote", "syn 2.0.77", @@ -5127,9 +5127,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.12" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313" +checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ "futures-core", "pin-project-lite", @@ -5168,9 +5168,9 @@ dependencies = [ [[package]] name = "tonic" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6f6ba989e4b2c58ae83d862d3a3e27690b6e3ae630d0deb59f3697f32aa88ad" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" dependencies = [ "async-trait", "base64 0.22.1", From 2176efefd6f76fffa93e470d51c6918ff494bf23 Mon Sep 17 00:00:00 2001 From: getsentry-bot Date: Tue, 15 Oct 2024 17:00:40 +0000 Subject: [PATCH 08/16] release: 24.10.0 --- CHANGELOG.md | 2 +- Cargo.lock | 70 ++++++++++++++-------------- relay-auth/Cargo.toml | 2 +- relay-base-schema/Cargo.toml | 2 +- relay-cardinality/Cargo.toml | 2 +- relay-cogs/Cargo.toml | 2 +- relay-common/Cargo.toml | 2 +- relay-config/Cargo.toml | 2 +- relay-crash/Cargo.toml | 2 +- relay-dynamic-config/Cargo.toml | 2 +- relay-event-derive/Cargo.toml | 2 +- relay-event-normalization/Cargo.toml | 2 +- relay-event-schema/Cargo.toml | 2 +- relay-ffi-macros/Cargo.toml | 2 +- relay-ffi/Cargo.toml | 2 +- relay-filter/Cargo.toml | 2 +- relay-kafka/Cargo.toml | 2 +- relay-log/Cargo.toml | 2 +- relay-metrics/Cargo.toml | 2 +- relay-monitors/Cargo.toml | 2 +- relay-pattern/Cargo.toml | 2 +- relay-pattern/fuzz/Cargo.toml | 2 +- relay-pii/Cargo.toml | 2 +- relay-profiling/Cargo.toml | 2 +- relay-protocol-derive/Cargo.toml | 2 +- relay-protocol/Cargo.toml | 2 +- relay-quotas/Cargo.toml | 2 +- relay-redis/Cargo.toml | 2 +- relay-replays/Cargo.toml | 2 +- relay-sampling/Cargo.toml | 2 +- relay-server/Cargo.toml | 2 +- relay-spans/Cargo.toml | 2 +- relay-statsd/Cargo.toml | 2 +- relay-system/Cargo.toml | 2 +- relay-test/Cargo.toml | 2 +- relay-ua/Cargo.toml | 2 +- relay/Cargo.toml | 2 +- 37 files changed, 71 insertions(+), 71 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bc96f5f8af..bca8fe15fe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,6 @@ # Changelog -## Unreleased +## 24.10.0 **Breaking Changes:** diff --git a/Cargo.lock b/Cargo.lock index ced53e9923..b5703e7f5c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3214,7 +3214,7 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "relay" -version = "24.9.0" +version = "24.10.0" dependencies = [ "anyhow", "clap", @@ -3233,7 +3233,7 @@ dependencies = [ [[package]] name = "relay-auth" -version = "24.9.0" +version = "24.10.0" dependencies = [ "chrono", "data-encoding", @@ -3250,7 +3250,7 @@ dependencies = [ [[package]] name = "relay-base-schema" -version = "24.9.0" +version = "24.10.0" dependencies = [ "regex", "relay-common", @@ -3289,7 +3289,7 @@ dependencies = [ [[package]] name = "relay-cardinality" -version = "24.9.0" +version = "24.10.0" dependencies = [ "criterion", "hash32", @@ -3308,11 +3308,11 @@ dependencies = [ [[package]] name = "relay-cogs" -version = "24.9.0" +version = "24.10.0" [[package]] name = "relay-common" -version = "24.9.0" +version = "24.10.0" dependencies = [ "chrono", "criterion", @@ -3326,7 +3326,7 @@ dependencies = [ [[package]] name = "relay-config" -version = "24.9.0" +version = "24.10.0" dependencies = [ "anyhow", "human-size", @@ -3348,7 +3348,7 @@ dependencies = [ [[package]] name = "relay-crash" -version = "24.9.0" +version = "24.10.0" dependencies = [ "bindgen", "cmake", @@ -3356,7 +3356,7 @@ dependencies = [ [[package]] name = "relay-dynamic-config" -version = "24.9.0" +version = "24.10.0" dependencies = [ "anyhow", "relay-auth", @@ -3379,7 +3379,7 @@ dependencies = [ [[package]] name = "relay-event-derive" -version = "24.9.0" +version = "24.10.0" dependencies = [ "proc-macro2", "quote", @@ -3389,7 +3389,7 @@ dependencies = [ [[package]] name = "relay-event-normalization" -version = "24.9.0" +version = "24.10.0" dependencies = [ "bytecount", "chrono", @@ -3423,7 +3423,7 @@ dependencies = [ [[package]] name = "relay-event-schema" -version = "24.9.0" +version = "24.10.0" dependencies = [ "bytecount", "chrono", @@ -3446,7 +3446,7 @@ dependencies = [ [[package]] name = "relay-ffi" -version = "24.9.0" +version = "24.10.0" dependencies = [ "anyhow", "relay-ffi-macros", @@ -3454,7 +3454,7 @@ dependencies = [ [[package]] name = "relay-ffi-macros" -version = "24.9.0" +version = "24.10.0" dependencies = [ "quote", "syn 1.0.109", @@ -3462,7 +3462,7 @@ dependencies = [ [[package]] name = "relay-filter" -version = "24.9.0" +version = "24.10.0" dependencies = [ "indexmap", "insta", @@ -3481,7 +3481,7 @@ dependencies = [ [[package]] name = "relay-kafka" -version = "24.9.0" +version = "24.10.0" dependencies = [ "rdkafka", "rdkafka-sys", @@ -3497,7 +3497,7 @@ dependencies = [ [[package]] name = "relay-log" -version = "24.9.0" +version = "24.10.0" dependencies = [ "console", "relay-common", @@ -3511,7 +3511,7 @@ dependencies = [ [[package]] name = "relay-metrics" -version = "24.9.0" +version = "24.10.0" dependencies = [ "bytecount", "chrono", @@ -3543,7 +3543,7 @@ dependencies = [ [[package]] name = "relay-monitors" -version = "24.9.0" +version = "24.10.0" dependencies = [ "relay-base-schema", "serde", @@ -3555,7 +3555,7 @@ dependencies = [ [[package]] name = "relay-pattern" -version = "24.9.0" +version = "24.10.0" dependencies = [ "criterion", "memchr", @@ -3565,7 +3565,7 @@ dependencies = [ [[package]] name = "relay-pattern-fuzz" -version = "0.0.0" +version = "24.10.0" dependencies = [ "libfuzzer-sys", "relay-pattern", @@ -3573,7 +3573,7 @@ dependencies = [ [[package]] name = "relay-pii" -version = "24.9.0" +version = "24.10.0" dependencies = [ "hmac", "insta", @@ -3600,7 +3600,7 @@ dependencies = [ [[package]] name = "relay-profiling" -version = "24.9.0" +version = "24.10.0" dependencies = [ "android_trace_log", "chrono", @@ -3620,7 +3620,7 @@ dependencies = [ [[package]] name = "relay-protocol" -version = "24.9.0" +version = "24.10.0" dependencies = [ "insta", "num-traits", @@ -3637,7 +3637,7 @@ dependencies = [ [[package]] name = "relay-protocol-derive" -version = "24.9.0" +version = "24.10.0" dependencies = [ "proc-macro2", "quote", @@ -3647,7 +3647,7 @@ dependencies = [ [[package]] name = "relay-quotas" -version = "24.9.0" +version = "24.10.0" dependencies = [ "hashbrown", "insta", @@ -3665,7 +3665,7 @@ dependencies = [ [[package]] name = "relay-redis" -version = "24.9.0" +version = "24.10.0" dependencies = [ "r2d2", "redis", @@ -3677,7 +3677,7 @@ dependencies = [ [[package]] name = "relay-replays" -version = "24.9.0" +version = "24.10.0" dependencies = [ "criterion", "flate2", @@ -3694,7 +3694,7 @@ dependencies = [ [[package]] name = "relay-sampling" -version = "24.9.0" +version = "24.10.0" dependencies = [ "anyhow", "chrono", @@ -3713,7 +3713,7 @@ dependencies = [ [[package]] name = "relay-server" -version = "24.9.0" +version = "24.10.0" dependencies = [ "anyhow", "arc-swap", @@ -3797,7 +3797,7 @@ dependencies = [ [[package]] name = "relay-spans" -version = "24.9.0" +version = "24.10.0" dependencies = [ "chrono", "hex", @@ -3811,7 +3811,7 @@ dependencies = [ [[package]] name = "relay-statsd" -version = "24.9.0" +version = "24.10.0" dependencies = [ "cadence", "crossbeam-channel", @@ -3823,7 +3823,7 @@ dependencies = [ [[package]] name = "relay-system" -version = "24.9.0" +version = "24.10.0" dependencies = [ "futures", "once_cell", @@ -3834,7 +3834,7 @@ dependencies = [ [[package]] name = "relay-test" -version = "24.9.0" +version = "24.10.0" dependencies = [ "relay-log", "relay-system", @@ -3843,7 +3843,7 @@ dependencies = [ [[package]] name = "relay-ua" -version = "24.9.0" +version = "24.10.0" dependencies = [ "once_cell", "uaparser", diff --git a/relay-auth/Cargo.toml b/relay-auth/Cargo.toml index 9b597bb1df..d74f9f1148 100644 --- a/relay-auth/Cargo.toml +++ b/relay-auth/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Authentication and crypto for Relay" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-base-schema/Cargo.toml b/relay-base-schema/Cargo.toml index 9dd1e5b86f..769d01348b 100644 --- a/relay-base-schema/Cargo.toml +++ b/relay-base-schema/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Basic types for Relay's API schema used across multiple services" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-cardinality/Cargo.toml b/relay-cardinality/Cargo.toml index 7f6d5818af..b66eefb00f 100644 --- a/relay-cardinality/Cargo.toml +++ b/relay-cardinality/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Metrics Cardinality Limiter" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE" publish = false diff --git a/relay-cogs/Cargo.toml b/relay-cogs/Cargo.toml index 3b69068489..2897455b68 100644 --- a/relay-cogs/Cargo.toml +++ b/relay-cogs/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Break down the cost of Relay by its features" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE" publish = false diff --git a/relay-common/Cargo.toml b/relay-common/Cargo.toml index 5420ee7338..1967e8578c 100644 --- a/relay-common/Cargo.toml +++ b/relay-common/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Common utilities and crate re-exports for Relay" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-config/Cargo.toml b/relay-config/Cargo.toml index 642cdc251e..2faba0abbc 100644 --- a/relay-config/Cargo.toml +++ b/relay-config/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Configuration for the Relay CLI and server" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-crash/Cargo.toml b/relay-crash/Cargo.toml index 382b7e8e1c..fa88b70855 100644 --- a/relay-crash/Cargo.toml +++ b/relay-crash/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Native crash reporting for Relay" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" build = "build.rs" license-file = "../LICENSE.md" diff --git a/relay-dynamic-config/Cargo.toml b/relay-dynamic-config/Cargo.toml index eebd57a1c2..cb2eb773b6 100644 --- a/relay-dynamic-config/Cargo.toml +++ b/relay-dynamic-config/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Dynamic configuration passed down from sentry" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-event-derive/Cargo.toml b/relay-event-derive/Cargo.toml index 2a69890564..6be12e0275 100644 --- a/relay-event-derive/Cargo.toml +++ b/relay-event-derive/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Derive for visitor traits on the Event schema" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-event-normalization/Cargo.toml b/relay-event-normalization/Cargo.toml index 9ee4a7d69e..4ad7d6edef 100644 --- a/relay-event-normalization/Cargo.toml +++ b/relay-event-normalization/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Event normalization and processing" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-event-schema/Cargo.toml b/relay-event-schema/Cargo.toml index e3d7a66b4f..017fec7f4d 100644 --- a/relay-event-schema/Cargo.toml +++ b/relay-event-schema/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Event schema (Error, Transaction, Security) and types for event processing" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-ffi-macros/Cargo.toml b/relay-ffi-macros/Cargo.toml index 74d80079c2..a25c01930a 100644 --- a/relay-ffi-macros/Cargo.toml +++ b/relay-ffi-macros/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Macros for error handling in FFI bindings" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-ffi/Cargo.toml b/relay-ffi/Cargo.toml index 37f6c25d55..61a5b85b4e 100644 --- a/relay-ffi/Cargo.toml +++ b/relay-ffi/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Utilities for error handling in FFI bindings" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-filter/Cargo.toml b/relay-filter/Cargo.toml index e61b9c6a24..e1a5e98126 100644 --- a/relay-filter/Cargo.toml +++ b/relay-filter/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Inbound data filters for Relay" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-kafka/Cargo.toml b/relay-kafka/Cargo.toml index f64f7ac51f..e313c9a564 100644 --- a/relay-kafka/Cargo.toml +++ b/relay-kafka/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Kafka related functionality for Relay" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-log/Cargo.toml b/relay-log/Cargo.toml index 005357491e..c0e2734ca4 100644 --- a/relay-log/Cargo.toml +++ b/relay-log/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Error reporting and logging for Relay" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-metrics/Cargo.toml b/relay-metrics/Cargo.toml index 95e75669b5..62c18b0936 100644 --- a/relay-metrics/Cargo.toml +++ b/relay-metrics/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Metrics protocol and processing" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-monitors/Cargo.toml b/relay-monitors/Cargo.toml index 6985ad06ac..1d35c06df3 100644 --- a/relay-monitors/Cargo.toml +++ b/relay-monitors/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Monitors processing for Relay" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-pattern/Cargo.toml b/relay-pattern/Cargo.toml index e2a0ca2ae9..0b070b50bb 100644 --- a/relay-pattern/Cargo.toml +++ b/relay-pattern/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "A glob like pattern used throughout Relay" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-pattern/fuzz/Cargo.toml b/relay-pattern/fuzz/Cargo.toml index 8d0750a09b..d525c55658 100644 --- a/relay-pattern/fuzz/Cargo.toml +++ b/relay-pattern/fuzz/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "relay-pattern-fuzz" -version = "0.0.0" +version = "24.10.0" publish = false edition = "2021" diff --git a/relay-pii/Cargo.toml b/relay-pii/Cargo.toml index 6dbe93551d..d15e2ce1dd 100644 --- a/relay-pii/Cargo.toml +++ b/relay-pii/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Scrubbing of personally identifiable information (PII) from events" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-profiling/Cargo.toml b/relay-profiling/Cargo.toml index 5e42825e2b..75485d3bca 100644 --- a/relay-profiling/Cargo.toml +++ b/relay-profiling/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Profiling processing for Relay" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-protocol-derive/Cargo.toml b/relay-protocol-derive/Cargo.toml index dacfdf3712..36dc11b15a 100644 --- a/relay-protocol-derive/Cargo.toml +++ b/relay-protocol-derive/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Derives for Relay's protocol traits" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-protocol/Cargo.toml b/relay-protocol/Cargo.toml index c231e19148..dde64725e6 100644 --- a/relay-protocol/Cargo.toml +++ b/relay-protocol/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Types and traits for building JSON-based protocols and schemas" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-quotas/Cargo.toml b/relay-quotas/Cargo.toml index 3a4ddc78e9..4ac6cdb070 100644 --- a/relay-quotas/Cargo.toml +++ b/relay-quotas/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Sentry quotas and rate limiting" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-redis/Cargo.toml b/relay-redis/Cargo.toml index 2694e078a2..5dd94c19b7 100644 --- a/relay-redis/Cargo.toml +++ b/relay-redis/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Pooled Redis and Redis cluster abstraction for Relay" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-replays/Cargo.toml b/relay-replays/Cargo.toml index 3e727ccbb6..2dc8ab0b81 100644 --- a/relay-replays/Cargo.toml +++ b/relay-replays/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Session replay functionality for Relay" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-sampling/Cargo.toml b/relay-sampling/Cargo.toml index 7c7c949cc2..a366b3e933 100644 --- a/relay-sampling/Cargo.toml +++ b/relay-sampling/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Dynamic sampling functionality for Relay" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-server/Cargo.toml b/relay-server/Cargo.toml index e817a203d7..dbec10dc95 100644 --- a/relay-server/Cargo.toml +++ b/relay-server/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Endpoints and services for Relay" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" build = "build.rs" license-file = "../LICENSE.md" diff --git a/relay-spans/Cargo.toml b/relay-spans/Cargo.toml index ed65eed8a7..807b2e9acf 100644 --- a/relay-spans/Cargo.toml +++ b/relay-spans/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Event normalization and processing" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE" publish = false diff --git a/relay-statsd/Cargo.toml b/relay-statsd/Cargo.toml index f5ee35cdab..1b74fe65d4 100644 --- a/relay-statsd/Cargo.toml +++ b/relay-statsd/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "High-level StatsD metric client for internal measurements" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-system/Cargo.toml b/relay-system/Cargo.toml index 9a33e90d21..a2d3bf9f06 100644 --- a/relay-system/Cargo.toml +++ b/relay-system/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Foundational system components for Relay's services" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-test/Cargo.toml b/relay-test/Cargo.toml index 274a5d7310..77ea2231d8 100644 --- a/relay-test/Cargo.toml +++ b/relay-test/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "Test utilities for Relay" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay-ua/Cargo.toml b/relay-ua/Cargo.toml index b3da7547c2..4d2087729b 100644 --- a/relay-ua/Cargo.toml +++ b/relay-ua/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "User agent parser with built-in rules" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false diff --git a/relay/Cargo.toml b/relay/Cargo.toml index 90a255b35d..751d8f001e 100644 --- a/relay/Cargo.toml +++ b/relay/Cargo.toml @@ -4,7 +4,7 @@ authors = ["Sentry "] description = "The Relay binary, a proxy server for Sentry" homepage = "https://getsentry.github.io/relay/" repository = "https://github.com/getsentry/relay" -version = "24.9.0" +version = "24.10.0" edition = "2021" license-file = "../LICENSE.md" publish = false From cd8492787a69545b1d71e3f84d3e8bb0676c30fd Mon Sep 17 00:00:00 2001 From: Joris Bayer Date: Wed, 16 Oct 2024 13:13:33 +0200 Subject: [PATCH 09/16] ref(buffer): remove peek (#4136) The envelope buffer does not need to peek to function correctly. Without peek, the stack interface becomes easier to implement. When an element is removed from a stack, we now keep the `received_at` of the popped element. This means that we relax the LIFO constraint for the sake of simplicity: a stack might be top-priority even though an other stack might have newer envelopes, but only until the next pop. Another side effect is that empty stacks remain in the priority queue until another pop attempt is made. --- .../services/buffer/envelope_buffer/mod.rs | 310 +++++++----------- .../services/buffer/envelope_stack/memory.rs | 4 - .../src/services/buffer/envelope_stack/mod.rs | 3 - .../services/buffer/envelope_stack/sqlite.rs | 42 +-- relay-server/src/services/buffer/mod.rs | 178 +++++++--- 5 files changed, 252 insertions(+), 285 deletions(-) diff --git a/relay-server/src/services/buffer/envelope_buffer/mod.rs b/relay-server/src/services/buffer/envelope_buffer/mod.rs index 0ff0bc2b7e..7cc978daac 100644 --- a/relay-server/src/services/buffer/envelope_buffer/mod.rs +++ b/relay-server/src/services/buffer/envelope_buffer/mod.rs @@ -20,8 +20,12 @@ use crate::services::buffer::envelope_stack::EnvelopeStack; use crate::services::buffer::envelope_store::sqlite::SqliteEnvelopeStoreError; use crate::services::buffer::stack_provider::memory::MemoryStackProvider; use crate::services::buffer::stack_provider::sqlite::SqliteStackProvider; -use crate::services::buffer::stack_provider::{StackCreationType, StackProvider}; -use crate::statsd::{RelayCounters, RelayGauges, RelayHistograms, RelayTimers}; +use crate::services::buffer::stack_provider::StackCreationType; +use crate::services::buffer::stack_provider::StackProvider; +use crate::statsd::RelayCounters; +use crate::statsd::RelayGauges; +use crate::statsd::RelayHistograms; +use crate::statsd::RelayTimers; use crate::utils::MemoryChecker; /// Polymorphic envelope buffering interface. @@ -88,12 +92,12 @@ impl PolymorphicEnvelopeBuffer { Ok(()) } - /// Returns a reference to the next-in-line envelope. - pub async fn peek(&mut self) -> Result { + /// Returns the state of the next-in-line envelope stack, if any exists. + pub fn peek(&mut self) -> Option { relay_statsd::metric!(timer(RelayTimers::BufferPeek), { match self { - Self::Sqlite(buffer) => buffer.peek().await, - Self::InMemory(buffer) => buffer.peek().await, + Self::Sqlite(buffer) => buffer.peek(), + Self::InMemory(buffer) => buffer.peek(), } }) } @@ -283,29 +287,21 @@ where Ok(()) } - /// Returns a reference to the next-in-line envelope, if one exists. - pub async fn peek(&mut self) -> Result { - let Some(( - QueueItem { - key: stack_key, - value: stack, - }, + pub fn peek(&mut self) -> Option { + let ( + QueueItem { key, .. }, Priority { readiness, next_project_fetch, - .. + received_at, }, - )) = self.priority_queue.peek_mut() - else { - return Ok(Peek::Empty); - }; - - let ready = readiness.ready(); + ) = self.priority_queue.peek_mut()?; - Ok(match (stack.peek().await?, ready) { - (None, _) => Peek::Empty, - (Some(envelope), true) => Peek::Ready(envelope), - (Some(envelope), false) => Peek::NotReady(*stack_key, *next_project_fetch, envelope), + Some(Peek { + received_at: *received_at, + ready: readiness.ready(), + project_key_pair: *key, + next_project_fetch: *next_project_fetch, }) } @@ -314,29 +310,24 @@ where /// The priority of the envelope's stack is updated with the next envelope's received_at /// time. If the stack is empty after popping, it is removed from the priority queue. pub async fn pop(&mut self) -> Result>, EnvelopeBufferError> { - let Some((QueueItem { key, value: stack }, _)) = self.priority_queue.peek_mut() else { - return Ok(None); - }; - let project_key_pair = *key; - let envelope = stack.pop().await.unwrap().expect("found an empty stack"); - - let next_received_at = stack - .peek() - .await? - .map(|next_envelope| next_envelope.meta().start_time().into()); - - match next_received_at { - None => { - relay_statsd::metric!(counter(RelayCounters::BufferEnvelopeStacksPopped) += 1); - self.pop_stack(project_key_pair); + // Pop empty stacks until we got a full one: + let (project_key_pair, envelope) = loop { + let Some((QueueItem { key, value: stack }, _)) = self.priority_queue.peek_mut() else { + // The priority queue is empty. + return Ok(None); + }; + let project_key_pair = *key; + + match stack.pop().await? { + Some(envelope) => break (project_key_pair, envelope), + None => self.pop_stack(project_key_pair), } - Some(next_received_at) => { - self.priority_queue - .change_priority_by(&project_key_pair, |prio| { - prio.received_at = next_received_at; - }); - } - } + }; + + self.priority_queue + .change_priority_by(&project_key_pair, |prio| { + prio.received_at = envelope.meta().start_time().into(); + }); // We are fine with the count going negative, since it represents that more data was popped, // than it was initially counted, meaning that we had a wrong total count from @@ -451,6 +442,7 @@ where /// Pops an [`EnvelopeStack`] with the supplied [`EnvelopeBufferError`]. fn pop_stack(&mut self, project_key_pair: ProjectKeyPair) { + relay_statsd::metric!(counter(RelayCounters::BufferEnvelopeStacksPopped) += 1); for project_key in project_key_pair.iter() { self.stacks_by_project .get_mut(&project_key) @@ -515,11 +507,12 @@ where } } -/// Contains a reference to the first element in the buffer, together with its stack's ready state. -pub enum Peek<'a> { - Empty, - Ready(&'a Envelope), - NotReady(ProjectKeyPair, Instant, &'a Envelope), +/// State of the top-priority envelope stack. +pub struct Peek { + pub project_key_pair: ProjectKeyPair, + pub received_at: Instant, // TODO: use wall clock time instead. + pub ready: bool, + pub next_project_fetch: Instant, } #[derive(Debug)] @@ -638,19 +631,6 @@ mod tests { use super::*; - impl Peek<'_> { - fn is_empty(&self) -> bool { - matches!(self, Peek::Empty) - } - - fn envelope(&self) -> Option<&Envelope> { - match self { - Peek::Empty => None, - Peek::Ready(envelope) | Peek::NotReady(_, _, envelope) => Some(envelope), - } - } - } - fn new_envelope( own_key: ProjectKey, sampling_key: Option, @@ -697,15 +677,8 @@ mod tests { MemoryChecker::new(MemoryStat::default(), mock_config("my/db/path").clone()) } - async fn peek_project_key(buffer: &mut EnvelopeBuffer) -> ProjectKey { - buffer - .peek() - .await - .unwrap() - .envelope() - .unwrap() - .meta() - .public_key() + async fn pop_project_key(buffer: &mut EnvelopeBuffer) -> ProjectKey { + buffer.pop().await.unwrap().unwrap().meta().public_key() } #[tokio::test] @@ -717,26 +690,27 @@ mod tests { let project_key3 = ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fef").unwrap(); assert!(buffer.pop().await.unwrap().is_none()); - assert!(buffer.peek().await.unwrap().is_empty()); - - buffer - .push(new_envelope(project_key1, None, None)) - .await - .unwrap(); - - buffer - .push(new_envelope(project_key2, None, None)) - .await - .unwrap(); + assert!(buffer.peek().is_none()); + + for key in [ + project_key1, + project_key1, + project_key1, + project_key1, + project_key2, + project_key2, + ] { + buffer.push(new_envelope(key, None, None)).await.unwrap(); + } // Both projects are ready, so project 2 is on top (has the newest envelopes): - assert_eq!(peek_project_key(&mut buffer).await, project_key2); + assert_eq!(pop_project_key(&mut buffer).await, project_key2); buffer.mark_ready(&project_key1, false); buffer.mark_ready(&project_key2, false); // Both projects are not ready, so project 1 is on top (has the oldest envelopes): - assert_eq!(peek_project_key(&mut buffer).await, project_key1); + assert_eq!(pop_project_key(&mut buffer).await, project_key1); buffer .push(new_envelope(project_key3, None, None)) @@ -745,38 +719,26 @@ mod tests { buffer.mark_ready(&project_key3, false); // All projects are not ready, so project 1 is on top (has the oldest envelopes): - assert_eq!(peek_project_key(&mut buffer).await, project_key1); + assert_eq!(pop_project_key(&mut buffer).await, project_key1); // After marking a project ready, it goes to the top: buffer.mark_ready(&project_key3, true); - assert_eq!(peek_project_key(&mut buffer).await, project_key3); - assert_eq!( - buffer.pop().await.unwrap().unwrap().meta().public_key(), - project_key3 - ); + assert_eq!(pop_project_key(&mut buffer).await, project_key3); // After popping, project 1 is on top again: - assert_eq!(peek_project_key(&mut buffer).await, project_key1); + assert_eq!(pop_project_key(&mut buffer).await, project_key1); // Mark project 1 as ready (still on top): buffer.mark_ready(&project_key1, true); - assert_eq!(peek_project_key(&mut buffer).await, project_key1); + assert_eq!(pop_project_key(&mut buffer).await, project_key1); // Mark project 2 as ready as well (now on top because most recent): buffer.mark_ready(&project_key2, true); - assert_eq!(peek_project_key(&mut buffer).await, project_key2); - assert_eq!( - buffer.pop().await.unwrap().unwrap().meta().public_key(), - project_key2 - ); + assert_eq!(pop_project_key(&mut buffer).await, project_key2); - // Pop last element: - assert_eq!( - buffer.pop().await.unwrap().unwrap().meta().public_key(), - project_key1 - ); + // The buffer is now empty: assert!(buffer.pop().await.unwrap().is_none()); - assert!(buffer.peek().await.unwrap().is_empty()); + assert!(buffer.peek().is_none()); } #[tokio::test] @@ -814,101 +776,53 @@ mod tests { let project_key2 = ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fef").unwrap(); let envelope1 = new_envelope(project_key1, None, None); - let instant1 = envelope1.meta().start_time(); + let instant1: tokio::time::Instant = envelope1.meta().start_time().into(); buffer.push(envelope1).await.unwrap(); let envelope2 = new_envelope(project_key2, None, None); - let instant2 = envelope2.meta().start_time(); + let instant2: tokio::time::Instant = envelope2.meta().start_time().into(); buffer.push(envelope2).await.unwrap(); let envelope3 = new_envelope(project_key1, Some(project_key2), None); - let instant3 = envelope3.meta().start_time(); + let instant3: tokio::time::Instant = envelope3.meta().start_time().into(); buffer.push(envelope3).await.unwrap(); buffer.mark_ready(&project_key1, false); buffer.mark_ready(&project_key2, false); // Nothing is ready, instant1 is on top: - assert_eq!( - buffer - .peek() - .await - .unwrap() - .envelope() - .unwrap() - .meta() - .start_time(), - instant1 - ); + assert_eq!(buffer.peek().unwrap().received_at, instant1); // Mark project 2 ready, gets on top: buffer.mark_ready(&project_key2, true); - assert_eq!( - buffer - .peek() - .await - .unwrap() - .envelope() - .unwrap() - .meta() - .start_time(), - instant2 - ); + assert_eq!(buffer.peek().unwrap().received_at, instant2); // Revert buffer.mark_ready(&project_key2, false); - assert_eq!( - buffer - .peek() - .await - .unwrap() - .envelope() - .unwrap() - .meta() - .start_time(), - instant1 - ); + assert_eq!(buffer.peek().unwrap().received_at, instant1); // Project 1 ready: buffer.mark_ready(&project_key1, true); - assert_eq!( - buffer - .peek() - .await - .unwrap() - .envelope() - .unwrap() - .meta() - .start_time(), - instant1 - ); + assert_eq!(buffer.peek().unwrap().received_at, instant1); // when both projects are ready, event no 3 ends up on top: buffer.mark_ready(&project_key2, true); assert_eq!( buffer.pop().await.unwrap().unwrap().meta().start_time(), - instant3 - ); - assert_eq!( - buffer - .peek() - .await - .unwrap() - .envelope() - .unwrap() - .meta() - .start_time(), - instant2 + instant3.into_std() ); + // The timestamp of the last removed item stays on top: + assert_eq!(buffer.peek().unwrap().received_at, instant3); + buffer.mark_ready(&project_key2, false); assert_eq!( buffer.pop().await.unwrap().unwrap().meta().start_time(), - instant1 + instant1.into_std() ); assert_eq!( buffer.pop().await.unwrap().unwrap().meta().start_time(), - instant2 + instant2.into_std() ); assert!(buffer.pop().await.unwrap().is_none()); @@ -959,51 +873,53 @@ mod tests { let mut buffer = EnvelopeBuffer::::new(mock_memory_checker()); let project_key_1 = ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fed").unwrap(); - let event_id_1 = EventId::new(); - let envelope1 = new_envelope(project_key_1, None, Some(event_id_1)); + let event_id_1a = EventId::new(); + let event_id_1b = EventId::new(); + let envelope1a = new_envelope(project_key_1, None, Some(event_id_1a)); + let envelope1b = new_envelope(project_key_1, None, Some(event_id_1b)); let project_key_2 = ProjectKey::parse("b56ae32be2584e0bbd7a4cbb95971fed").unwrap(); - let event_id_2 = EventId::new(); - let envelope2 = new_envelope(project_key_2, None, Some(event_id_2)); + let event_id_2a = EventId::new(); + let event_id_2b = EventId::new(); + let envelope2a = new_envelope(project_key_2, None, Some(event_id_2a)); + let envelope2b = new_envelope(project_key_2, None, Some(event_id_2b)); - buffer.push(envelope1).await.unwrap(); - buffer.push(envelope2).await.unwrap(); + buffer.push(envelope1a).await.unwrap(); + buffer.push(envelope1b).await.unwrap(); + buffer.push(envelope2a).await.unwrap(); + buffer.push(envelope2b).await.unwrap(); + // set readiness to false to trigger last_seen logic. buffer.mark_ready(&project_key_1, false); buffer.mark_ready(&project_key_2, false); // event_id_1 is first element: - let Peek::NotReady(_, _, envelope) = buffer.peek().await.unwrap() else { - panic!(); - }; - assert_eq!(envelope.event_id(), Some(event_id_1)); - - // Second peek returns same element: - let Peek::NotReady(stack_key, _, envelope) = buffer.peek().await.unwrap() else { - panic!(); - }; - assert_eq!(envelope.event_id(), Some(event_id_1)); + let Peek { + project_key_pair, + ready, + .. + } = buffer.peek().unwrap(); + assert!(!ready); + let envelope = buffer.pop().await.unwrap().unwrap(); + assert_eq!(envelope.event_id(), Some(event_id_1b)); - buffer.mark_seen(&stack_key, Duration::ZERO); + buffer.mark_seen(&project_key_pair, Duration::ZERO); // After mark_seen, event 2 is on top: - let Peek::NotReady(_, _, envelope) = buffer.peek().await.unwrap() else { - panic!(); - }; - assert_eq!(envelope.event_id(), Some(event_id_2)); - - let Peek::NotReady(stack_key, _, envelope) = buffer.peek().await.unwrap() else { - panic!(); - }; - assert_eq!(envelope.event_id(), Some(event_id_2)); + let Peek { + project_key_pair, + ready, + .. + } = buffer.peek().unwrap(); + assert!(!ready); + let envelope = buffer.pop().await.unwrap().unwrap(); + assert_eq!(envelope.event_id(), Some(event_id_2b)); - buffer.mark_seen(&stack_key, Duration::ZERO); + buffer.mark_seen(&project_key_pair, Duration::ZERO); // After another mark_seen, cycle back to event 1: - let Peek::NotReady(_, _, envelope) = buffer.peek().await.unwrap() else { - panic!(); - }; - assert_eq!(envelope.event_id(), Some(event_id_1)); + let envelope = buffer.pop().await.unwrap().unwrap(); + assert_eq!(envelope.event_id(), Some(event_id_1a)); } #[tokio::test] diff --git a/relay-server/src/services/buffer/envelope_stack/memory.rs b/relay-server/src/services/buffer/envelope_stack/memory.rs index ceb771ec95..2a5eda8371 100644 --- a/relay-server/src/services/buffer/envelope_stack/memory.rs +++ b/relay-server/src/services/buffer/envelope_stack/memory.rs @@ -21,10 +21,6 @@ impl EnvelopeStack for MemoryEnvelopeStack { Ok(()) } - async fn peek(&mut self) -> Result, Self::Error> { - Ok(self.0.last().map(Box::as_ref)) - } - async fn pop(&mut self) -> Result>, Self::Error> { Ok(self.0.pop()) } diff --git a/relay-server/src/services/buffer/envelope_stack/mod.rs b/relay-server/src/services/buffer/envelope_stack/mod.rs index 8acfff0600..4087946dd6 100644 --- a/relay-server/src/services/buffer/envelope_stack/mod.rs +++ b/relay-server/src/services/buffer/envelope_stack/mod.rs @@ -14,9 +14,6 @@ pub trait EnvelopeStack: Send + std::fmt::Debug { /// Pushes an [`Envelope`] on top of the stack. fn push(&mut self, envelope: Box) -> impl Future>; - /// Peeks the [`Envelope`] on top of the stack. - fn peek(&mut self) -> impl Future, Self::Error>>; - /// Pops the [`Envelope`] on top of the stack. fn pop(&mut self) -> impl Future>, Self::Error>>; diff --git a/relay-server/src/services/buffer/envelope_stack/sqlite.rs b/relay-server/src/services/buffer/envelope_stack/sqlite.rs index 97accdf3cb..430ab4972d 100644 --- a/relay-server/src/services/buffer/envelope_stack/sqlite.rs +++ b/relay-server/src/services/buffer/envelope_stack/sqlite.rs @@ -196,20 +196,6 @@ impl EnvelopeStack for SqliteEnvelopeStack { Ok(()) } - async fn peek(&mut self) -> Result, Self::Error> { - if self.below_unspool_threshold() && self.check_disk { - self.unspool_from_disk().await? - } - - let last = self - .batches_buffer - .back() - .and_then(|last_batch| last_batch.last()) - .map(|last_batch| last_batch.as_ref()); - - Ok(last) - } - async fn pop(&mut self) -> Result>, Self::Error> { if self.below_unspool_threshold() && self.check_disk { relay_log::trace!("Unspool from disk"); @@ -381,15 +367,15 @@ mod tests { } assert_eq!(stack.batches_buffer_size, 5); - // We peek the top element. - let peeked_envelope = stack.peek().await.unwrap().unwrap(); + // We pop the top element. + let envelope = stack.pop().await.unwrap().unwrap(); assert_eq!( - peeked_envelope.event_id().unwrap(), + envelope.event_id().unwrap(), envelopes.clone()[4].event_id().unwrap() ); - // We pop 5 envelopes. - for envelope in envelopes.iter().rev() { + // We pop 4 envelopes. + for envelope in envelopes.clone()[0..4].iter().rev() { let popped_envelope = stack.pop().await.unwrap().unwrap(); assert_eq!( popped_envelope.event_id().unwrap(), @@ -420,15 +406,15 @@ mod tests { assert_eq!(stack.batches_buffer_size, 10); // We peek the top element. - let peeked_envelope = stack.peek().await.unwrap().unwrap(); + let popped_envelope = stack.pop().await.unwrap().unwrap(); assert_eq!( - peeked_envelope.event_id().unwrap(), + popped_envelope.event_id().unwrap(), envelopes.clone()[14].event_id().unwrap() ); - // We pop 10 envelopes, and we expect that the last 10 are in memory, since the first 5 + // We pop 9 envelopes, and we expect that the last 10 are in memory, since the first 5 // should have been spooled to disk. - for envelope in envelopes[5..15].iter().rev() { + for envelope in envelopes[5..14].iter().rev() { let popped_envelope = stack.pop().await.unwrap().unwrap(); assert_eq!( popped_envelope.event_id().unwrap(), @@ -438,13 +424,13 @@ mod tests { assert_eq!(stack.batches_buffer_size, 0); // We peek the top element, which since the buffer is empty should result in a disk load. - let peeked_envelope = stack.peek().await.unwrap().unwrap(); + let popped_envelope = stack.pop().await.unwrap().unwrap(); assert_eq!( - peeked_envelope.event_id().unwrap(), + popped_envelope.event_id().unwrap(), envelopes.clone()[4].event_id().unwrap() ); - // We insert a new envelope, to test the load from disk happening during `peek()` gives + // We insert a new envelope, to test the load from disk happening during `pop()` gives // priority to this envelope in the stack. let envelope = mock_envelope(Instant::now()); assert!(stack.push(envelope.clone()).await.is_ok()); @@ -456,9 +442,9 @@ mod tests { envelope.event_id().unwrap() ); - // We pop 5 envelopes, which should not result in a disk load since `peek()` already should + // We pop 4 envelopes, which should not result in a disk load since `pop()` already should // have caused it. - for envelope in envelopes[0..5].iter().rev() { + for envelope in envelopes[0..4].iter().rev() { let popped_envelope = stack.pop().await.unwrap().unwrap(); assert_eq!( popped_envelope.event_id().unwrap(), diff --git a/relay-server/src/services/buffer/mod.rs b/relay-server/src/services/buffer/mod.rs index 3bcc0c5fea..ea9c5e1529 100644 --- a/relay-server/src/services/buffer/mod.rs +++ b/relay-server/src/services/buffer/mod.rs @@ -15,6 +15,7 @@ use tokio::sync::{mpsc, watch}; use tokio::time::{timeout, Instant}; use crate::envelope::Envelope; +use crate::services::buffer::common::ProjectKeyPair; use crate::services::buffer::envelope_buffer::Peek; use crate::services::global_config; use crate::services::outcome::DiscardReason; @@ -220,8 +221,8 @@ impl EnvelopeBufferService { services: &Services, envelopes_tx_permit: Permit<'a, DequeuedEnvelope>, ) -> Result { - let sleep = match buffer.peek().await? { - Peek::Empty => { + let sleep = match buffer.peek() { + None => { relay_statsd::metric!( counter(RelayCounters::BufferTryPop) += 1, peek_result = "empty" @@ -229,71 +230,69 @@ impl EnvelopeBufferService { Duration::MAX // wait for reset by `handle_message`. } - Peek::Ready(envelope) | Peek::NotReady(.., envelope) - if Self::expired(config, envelope) => - { - let envelope = buffer - .pop() - .await? - .expect("Element disappeared despite exclusive excess"); - - Self::drop_expired(envelope, services); + Some(Peek { + project_key_pair, + received_at, + ready, + next_project_fetch, + }) => { + if received_at.elapsed() > config.spool_envelopes_max_age() { + relay_statsd::metric!( + counter(RelayCounters::BufferTryPop) += 1, + peek_result = "expired" + ); + if let Some(envelope) = buffer.pop().await? { + Self::drop_expired(envelope, services); + } - Duration::ZERO // try next pop immediately - } - Peek::Ready(_) => { - relay_log::trace!("EnvelopeBufferService: popping envelope"); - relay_statsd::metric!( - counter(RelayCounters::BufferTryPop) += 1, - peek_result = "ready" - ); - let envelope = buffer - .pop() - .await? - .expect("Element disappeared despite exclusive excess"); - envelopes_tx_permit.send(DequeuedEnvelope(envelope)); + Duration::ZERO // try next pop immediately + } else if ready { + relay_statsd::metric!( + counter(RelayCounters::BufferTryPop) += 1, + peek_result = "ready" + ); + if let Some(envelope) = buffer.pop().await? { + // The cached `received_at` time on the queue might be newer than + // the actual timestamp of the envelope, so check again here. + if envelope.meta().start_time().elapsed() > config.spool_envelopes_max_age() + { + Self::drop_expired(envelope, services); + } else { + envelopes_tx_permit.send(DequeuedEnvelope(envelope)); + } + } - Duration::ZERO // try next pop immediately - } - Peek::NotReady(stack_key, next_project_fetch, envelope) => { - relay_log::trace!("EnvelopeBufferService: project(s) of envelope not ready"); - relay_statsd::metric!( - counter(RelayCounters::BufferTryPop) += 1, - peek_result = "not_ready" - ); + Duration::ZERO // try next pop immediately + } else { + let ProjectKeyPair { + own_key, + sampling_key, + } = project_key_pair; + relay_statsd::metric!( + counter(RelayCounters::BufferTryPop) += 1, + peek_result = "not_ready" + ); + if Instant::now() >= next_project_fetch { + relay_log::trace!("EnvelopeBufferService: requesting project(s) update"); - // We want to fetch the configs again, only if some time passed between the last - // peek of this not ready project key pair and the current peek. This is done to - // avoid flooding the project cache with `UpdateProject` messages. - if Instant::now() >= next_project_fetch { - relay_log::trace!("EnvelopeBufferService: requesting project(s) update"); - let own_key = envelope.meta().public_key(); - - services.project_cache.send(UpdateProject(own_key)); - match envelope.sampling_key() { - None => {} - Some(sampling_key) if sampling_key == own_key => {} // already sent. - Some(sampling_key) => { + services.project_cache.send(UpdateProject(own_key)); + if sampling_key != own_key { services.project_cache.send(UpdateProject(sampling_key)); } + + // Deprioritize the stack to prevent head-of-line blocking and update the next fetch + // time. + buffer.mark_seen(&project_key_pair, DEFAULT_SLEEP); } - // Deprioritize the stack to prevent head-of-line blocking and update the next fetch - // time. - buffer.mark_seen(&stack_key, DEFAULT_SLEEP); + DEFAULT_SLEEP // wait and prioritize handling new messages. } - - DEFAULT_SLEEP // wait and prioritize handling new messages. } }; Ok(sleep) } - fn expired(config: &Config, envelope: &Envelope) -> bool { - envelope.meta().start_time().elapsed() > config.spool_envelopes_max_age() - } - fn drop_expired(envelope: Box, services: &Services) { let mut managed_envelope = ManagedEnvelope::new( envelope, @@ -471,7 +470,9 @@ mod tests { use std::time::{Duration, Instant}; use relay_dynamic_config::GlobalConfig; + use relay_metrics::UnixTimestamp; use relay_quotas::DataCategory; + use sqlx::Connection; use tokio::sync::mpsc; use uuid::Uuid; @@ -667,6 +668,77 @@ mod tests { assert_eq!(outcome.quantity, 1); } + #[tokio::test] + async fn old_envelope_from_disk_is_dropped() { + relay_log::init_test!(); + + let tmp = tempfile::TempDir::new().unwrap(); + let path = tmp.path().join("envelopes.db"); + + let buffer_service = || { + envelope_buffer_service( + Some(serde_json::json!({ + "spool": { + "envelopes": { + "version": "experimental", + "path": path, + "max_envelope_delay_secs": 1, + } + } + })), + global_config::Status::Ready(Arc::new(GlobalConfig::default())), + ) + }; + + // Initialize once to migrate the database: + let service = buffer_service().service; + let config = service.config.clone(); + service.start(); + + tokio::time::sleep(Duration::from_millis(1000)).await; + + // Write an envelope to the db + let envelope = new_envelope(false, "foo"); + let mut db = sqlx::SqliteConnection::connect(path.to_str().unwrap()) + .await + .unwrap(); + + let received_at = + UnixTimestamp::now().as_datetime().unwrap() - 2 * config.spool_envelopes_max_age(); + + let query = sqlx::query("INSERT INTO envelopes (received_at, own_key, sampling_key, envelope) VALUES ($1, $2, $3, $4);") + .bind(received_at.timestamp_millis()) + .bind(envelope.meta().public_key().to_string()) + .bind(envelope.meta().public_key().to_string()) + .bind(envelope.to_vec().unwrap()); + query.execute(&mut db).await.unwrap(); + + // Initialize again to read from db: + let EnvelopeBufferServiceResult { + service, + envelopes_rx, + project_cache_rx, + mut outcome_aggregator_rx, + global_tx, + } = buffer_service(); + + let _addr = service.start(); + global_tx + .send(global_config::Status::Ready(Arc::new( + GlobalConfig::default(), + ))) + .unwrap(); + + tokio::time::sleep(Duration::from_millis(100)).await; + + assert_eq!(envelopes_rx.len(), 0); + assert_eq!(project_cache_rx.len(), 0); + + let outcome = outcome_aggregator_rx.try_recv().unwrap(); + assert_eq!(outcome.category, DataCategory::TransactionIndexed); + assert_eq!(outcome.quantity, 1); + } + #[tokio::test] async fn test_update_project() { tokio::time::pause(); From b88fdb7b8c7a2cc1baf2260cbdd95d44d822930f Mon Sep 17 00:00:00 2001 From: Joris Bayer Date: Wed, 16 Oct 2024 16:02:54 +0200 Subject: [PATCH 10/16] Revert "ref(buffer): remove peek" (#4146) Reverts getsentry/relay#4136 The new version worked fine functionally speaking, but it created more traffic between the envelope buffer service and the project cache, which I want to investigate before proceeding. --- .../services/buffer/envelope_buffer/mod.rs | 310 +++++++++++------- .../services/buffer/envelope_stack/memory.rs | 4 + .../src/services/buffer/envelope_stack/mod.rs | 3 + .../services/buffer/envelope_stack/sqlite.rs | 42 ++- relay-server/src/services/buffer/mod.rs | 178 +++------- 5 files changed, 285 insertions(+), 252 deletions(-) diff --git a/relay-server/src/services/buffer/envelope_buffer/mod.rs b/relay-server/src/services/buffer/envelope_buffer/mod.rs index 7cc978daac..0ff0bc2b7e 100644 --- a/relay-server/src/services/buffer/envelope_buffer/mod.rs +++ b/relay-server/src/services/buffer/envelope_buffer/mod.rs @@ -20,12 +20,8 @@ use crate::services::buffer::envelope_stack::EnvelopeStack; use crate::services::buffer::envelope_store::sqlite::SqliteEnvelopeStoreError; use crate::services::buffer::stack_provider::memory::MemoryStackProvider; use crate::services::buffer::stack_provider::sqlite::SqliteStackProvider; -use crate::services::buffer::stack_provider::StackCreationType; -use crate::services::buffer::stack_provider::StackProvider; -use crate::statsd::RelayCounters; -use crate::statsd::RelayGauges; -use crate::statsd::RelayHistograms; -use crate::statsd::RelayTimers; +use crate::services::buffer::stack_provider::{StackCreationType, StackProvider}; +use crate::statsd::{RelayCounters, RelayGauges, RelayHistograms, RelayTimers}; use crate::utils::MemoryChecker; /// Polymorphic envelope buffering interface. @@ -92,12 +88,12 @@ impl PolymorphicEnvelopeBuffer { Ok(()) } - /// Returns the state of the next-in-line envelope stack, if any exists. - pub fn peek(&mut self) -> Option { + /// Returns a reference to the next-in-line envelope. + pub async fn peek(&mut self) -> Result { relay_statsd::metric!(timer(RelayTimers::BufferPeek), { match self { - Self::Sqlite(buffer) => buffer.peek(), - Self::InMemory(buffer) => buffer.peek(), + Self::Sqlite(buffer) => buffer.peek().await, + Self::InMemory(buffer) => buffer.peek().await, } }) } @@ -287,21 +283,29 @@ where Ok(()) } - pub fn peek(&mut self) -> Option { - let ( - QueueItem { key, .. }, + /// Returns a reference to the next-in-line envelope, if one exists. + pub async fn peek(&mut self) -> Result { + let Some(( + QueueItem { + key: stack_key, + value: stack, + }, Priority { readiness, next_project_fetch, - received_at, + .. }, - ) = self.priority_queue.peek_mut()?; + )) = self.priority_queue.peek_mut() + else { + return Ok(Peek::Empty); + }; + + let ready = readiness.ready(); - Some(Peek { - received_at: *received_at, - ready: readiness.ready(), - project_key_pair: *key, - next_project_fetch: *next_project_fetch, + Ok(match (stack.peek().await?, ready) { + (None, _) => Peek::Empty, + (Some(envelope), true) => Peek::Ready(envelope), + (Some(envelope), false) => Peek::NotReady(*stack_key, *next_project_fetch, envelope), }) } @@ -310,24 +314,29 @@ where /// The priority of the envelope's stack is updated with the next envelope's received_at /// time. If the stack is empty after popping, it is removed from the priority queue. pub async fn pop(&mut self) -> Result>, EnvelopeBufferError> { - // Pop empty stacks until we got a full one: - let (project_key_pair, envelope) = loop { - let Some((QueueItem { key, value: stack }, _)) = self.priority_queue.peek_mut() else { - // The priority queue is empty. - return Ok(None); - }; - let project_key_pair = *key; - - match stack.pop().await? { - Some(envelope) => break (project_key_pair, envelope), - None => self.pop_stack(project_key_pair), - } + let Some((QueueItem { key, value: stack }, _)) = self.priority_queue.peek_mut() else { + return Ok(None); }; - - self.priority_queue - .change_priority_by(&project_key_pair, |prio| { - prio.received_at = envelope.meta().start_time().into(); - }); + let project_key_pair = *key; + let envelope = stack.pop().await.unwrap().expect("found an empty stack"); + + let next_received_at = stack + .peek() + .await? + .map(|next_envelope| next_envelope.meta().start_time().into()); + + match next_received_at { + None => { + relay_statsd::metric!(counter(RelayCounters::BufferEnvelopeStacksPopped) += 1); + self.pop_stack(project_key_pair); + } + Some(next_received_at) => { + self.priority_queue + .change_priority_by(&project_key_pair, |prio| { + prio.received_at = next_received_at; + }); + } + } // We are fine with the count going negative, since it represents that more data was popped, // than it was initially counted, meaning that we had a wrong total count from @@ -442,7 +451,6 @@ where /// Pops an [`EnvelopeStack`] with the supplied [`EnvelopeBufferError`]. fn pop_stack(&mut self, project_key_pair: ProjectKeyPair) { - relay_statsd::metric!(counter(RelayCounters::BufferEnvelopeStacksPopped) += 1); for project_key in project_key_pair.iter() { self.stacks_by_project .get_mut(&project_key) @@ -507,12 +515,11 @@ where } } -/// State of the top-priority envelope stack. -pub struct Peek { - pub project_key_pair: ProjectKeyPair, - pub received_at: Instant, // TODO: use wall clock time instead. - pub ready: bool, - pub next_project_fetch: Instant, +/// Contains a reference to the first element in the buffer, together with its stack's ready state. +pub enum Peek<'a> { + Empty, + Ready(&'a Envelope), + NotReady(ProjectKeyPair, Instant, &'a Envelope), } #[derive(Debug)] @@ -631,6 +638,19 @@ mod tests { use super::*; + impl Peek<'_> { + fn is_empty(&self) -> bool { + matches!(self, Peek::Empty) + } + + fn envelope(&self) -> Option<&Envelope> { + match self { + Peek::Empty => None, + Peek::Ready(envelope) | Peek::NotReady(_, _, envelope) => Some(envelope), + } + } + } + fn new_envelope( own_key: ProjectKey, sampling_key: Option, @@ -677,8 +697,15 @@ mod tests { MemoryChecker::new(MemoryStat::default(), mock_config("my/db/path").clone()) } - async fn pop_project_key(buffer: &mut EnvelopeBuffer) -> ProjectKey { - buffer.pop().await.unwrap().unwrap().meta().public_key() + async fn peek_project_key(buffer: &mut EnvelopeBuffer) -> ProjectKey { + buffer + .peek() + .await + .unwrap() + .envelope() + .unwrap() + .meta() + .public_key() } #[tokio::test] @@ -690,27 +717,26 @@ mod tests { let project_key3 = ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fef").unwrap(); assert!(buffer.pop().await.unwrap().is_none()); - assert!(buffer.peek().is_none()); - - for key in [ - project_key1, - project_key1, - project_key1, - project_key1, - project_key2, - project_key2, - ] { - buffer.push(new_envelope(key, None, None)).await.unwrap(); - } + assert!(buffer.peek().await.unwrap().is_empty()); + + buffer + .push(new_envelope(project_key1, None, None)) + .await + .unwrap(); + + buffer + .push(new_envelope(project_key2, None, None)) + .await + .unwrap(); // Both projects are ready, so project 2 is on top (has the newest envelopes): - assert_eq!(pop_project_key(&mut buffer).await, project_key2); + assert_eq!(peek_project_key(&mut buffer).await, project_key2); buffer.mark_ready(&project_key1, false); buffer.mark_ready(&project_key2, false); // Both projects are not ready, so project 1 is on top (has the oldest envelopes): - assert_eq!(pop_project_key(&mut buffer).await, project_key1); + assert_eq!(peek_project_key(&mut buffer).await, project_key1); buffer .push(new_envelope(project_key3, None, None)) @@ -719,26 +745,38 @@ mod tests { buffer.mark_ready(&project_key3, false); // All projects are not ready, so project 1 is on top (has the oldest envelopes): - assert_eq!(pop_project_key(&mut buffer).await, project_key1); + assert_eq!(peek_project_key(&mut buffer).await, project_key1); // After marking a project ready, it goes to the top: buffer.mark_ready(&project_key3, true); - assert_eq!(pop_project_key(&mut buffer).await, project_key3); + assert_eq!(peek_project_key(&mut buffer).await, project_key3); + assert_eq!( + buffer.pop().await.unwrap().unwrap().meta().public_key(), + project_key3 + ); // After popping, project 1 is on top again: - assert_eq!(pop_project_key(&mut buffer).await, project_key1); + assert_eq!(peek_project_key(&mut buffer).await, project_key1); // Mark project 1 as ready (still on top): buffer.mark_ready(&project_key1, true); - assert_eq!(pop_project_key(&mut buffer).await, project_key1); + assert_eq!(peek_project_key(&mut buffer).await, project_key1); // Mark project 2 as ready as well (now on top because most recent): buffer.mark_ready(&project_key2, true); - assert_eq!(pop_project_key(&mut buffer).await, project_key2); + assert_eq!(peek_project_key(&mut buffer).await, project_key2); + assert_eq!( + buffer.pop().await.unwrap().unwrap().meta().public_key(), + project_key2 + ); - // The buffer is now empty: + // Pop last element: + assert_eq!( + buffer.pop().await.unwrap().unwrap().meta().public_key(), + project_key1 + ); assert!(buffer.pop().await.unwrap().is_none()); - assert!(buffer.peek().is_none()); + assert!(buffer.peek().await.unwrap().is_empty()); } #[tokio::test] @@ -776,53 +814,101 @@ mod tests { let project_key2 = ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fef").unwrap(); let envelope1 = new_envelope(project_key1, None, None); - let instant1: tokio::time::Instant = envelope1.meta().start_time().into(); + let instant1 = envelope1.meta().start_time(); buffer.push(envelope1).await.unwrap(); let envelope2 = new_envelope(project_key2, None, None); - let instant2: tokio::time::Instant = envelope2.meta().start_time().into(); + let instant2 = envelope2.meta().start_time(); buffer.push(envelope2).await.unwrap(); let envelope3 = new_envelope(project_key1, Some(project_key2), None); - let instant3: tokio::time::Instant = envelope3.meta().start_time().into(); + let instant3 = envelope3.meta().start_time(); buffer.push(envelope3).await.unwrap(); buffer.mark_ready(&project_key1, false); buffer.mark_ready(&project_key2, false); // Nothing is ready, instant1 is on top: - assert_eq!(buffer.peek().unwrap().received_at, instant1); + assert_eq!( + buffer + .peek() + .await + .unwrap() + .envelope() + .unwrap() + .meta() + .start_time(), + instant1 + ); // Mark project 2 ready, gets on top: buffer.mark_ready(&project_key2, true); - assert_eq!(buffer.peek().unwrap().received_at, instant2); + assert_eq!( + buffer + .peek() + .await + .unwrap() + .envelope() + .unwrap() + .meta() + .start_time(), + instant2 + ); // Revert buffer.mark_ready(&project_key2, false); - assert_eq!(buffer.peek().unwrap().received_at, instant1); + assert_eq!( + buffer + .peek() + .await + .unwrap() + .envelope() + .unwrap() + .meta() + .start_time(), + instant1 + ); // Project 1 ready: buffer.mark_ready(&project_key1, true); - assert_eq!(buffer.peek().unwrap().received_at, instant1); + assert_eq!( + buffer + .peek() + .await + .unwrap() + .envelope() + .unwrap() + .meta() + .start_time(), + instant1 + ); // when both projects are ready, event no 3 ends up on top: buffer.mark_ready(&project_key2, true); assert_eq!( buffer.pop().await.unwrap().unwrap().meta().start_time(), - instant3.into_std() + instant3 + ); + assert_eq!( + buffer + .peek() + .await + .unwrap() + .envelope() + .unwrap() + .meta() + .start_time(), + instant2 ); - - // The timestamp of the last removed item stays on top: - assert_eq!(buffer.peek().unwrap().received_at, instant3); buffer.mark_ready(&project_key2, false); assert_eq!( buffer.pop().await.unwrap().unwrap().meta().start_time(), - instant1.into_std() + instant1 ); assert_eq!( buffer.pop().await.unwrap().unwrap().meta().start_time(), - instant2.into_std() + instant2 ); assert!(buffer.pop().await.unwrap().is_none()); @@ -873,53 +959,51 @@ mod tests { let mut buffer = EnvelopeBuffer::::new(mock_memory_checker()); let project_key_1 = ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fed").unwrap(); - let event_id_1a = EventId::new(); - let event_id_1b = EventId::new(); - let envelope1a = new_envelope(project_key_1, None, Some(event_id_1a)); - let envelope1b = new_envelope(project_key_1, None, Some(event_id_1b)); + let event_id_1 = EventId::new(); + let envelope1 = new_envelope(project_key_1, None, Some(event_id_1)); let project_key_2 = ProjectKey::parse("b56ae32be2584e0bbd7a4cbb95971fed").unwrap(); - let event_id_2a = EventId::new(); - let event_id_2b = EventId::new(); - let envelope2a = new_envelope(project_key_2, None, Some(event_id_2a)); - let envelope2b = new_envelope(project_key_2, None, Some(event_id_2b)); + let event_id_2 = EventId::new(); + let envelope2 = new_envelope(project_key_2, None, Some(event_id_2)); - buffer.push(envelope1a).await.unwrap(); - buffer.push(envelope1b).await.unwrap(); - buffer.push(envelope2a).await.unwrap(); - buffer.push(envelope2b).await.unwrap(); + buffer.push(envelope1).await.unwrap(); + buffer.push(envelope2).await.unwrap(); - // set readiness to false to trigger last_seen logic. buffer.mark_ready(&project_key_1, false); buffer.mark_ready(&project_key_2, false); // event_id_1 is first element: - let Peek { - project_key_pair, - ready, - .. - } = buffer.peek().unwrap(); - assert!(!ready); - let envelope = buffer.pop().await.unwrap().unwrap(); - assert_eq!(envelope.event_id(), Some(event_id_1b)); + let Peek::NotReady(_, _, envelope) = buffer.peek().await.unwrap() else { + panic!(); + }; + assert_eq!(envelope.event_id(), Some(event_id_1)); + + // Second peek returns same element: + let Peek::NotReady(stack_key, _, envelope) = buffer.peek().await.unwrap() else { + panic!(); + }; + assert_eq!(envelope.event_id(), Some(event_id_1)); - buffer.mark_seen(&project_key_pair, Duration::ZERO); + buffer.mark_seen(&stack_key, Duration::ZERO); // After mark_seen, event 2 is on top: - let Peek { - project_key_pair, - ready, - .. - } = buffer.peek().unwrap(); - assert!(!ready); - let envelope = buffer.pop().await.unwrap().unwrap(); - assert_eq!(envelope.event_id(), Some(event_id_2b)); + let Peek::NotReady(_, _, envelope) = buffer.peek().await.unwrap() else { + panic!(); + }; + assert_eq!(envelope.event_id(), Some(event_id_2)); + + let Peek::NotReady(stack_key, _, envelope) = buffer.peek().await.unwrap() else { + panic!(); + }; + assert_eq!(envelope.event_id(), Some(event_id_2)); - buffer.mark_seen(&project_key_pair, Duration::ZERO); + buffer.mark_seen(&stack_key, Duration::ZERO); // After another mark_seen, cycle back to event 1: - let envelope = buffer.pop().await.unwrap().unwrap(); - assert_eq!(envelope.event_id(), Some(event_id_1a)); + let Peek::NotReady(_, _, envelope) = buffer.peek().await.unwrap() else { + panic!(); + }; + assert_eq!(envelope.event_id(), Some(event_id_1)); } #[tokio::test] diff --git a/relay-server/src/services/buffer/envelope_stack/memory.rs b/relay-server/src/services/buffer/envelope_stack/memory.rs index 2a5eda8371..ceb771ec95 100644 --- a/relay-server/src/services/buffer/envelope_stack/memory.rs +++ b/relay-server/src/services/buffer/envelope_stack/memory.rs @@ -21,6 +21,10 @@ impl EnvelopeStack for MemoryEnvelopeStack { Ok(()) } + async fn peek(&mut self) -> Result, Self::Error> { + Ok(self.0.last().map(Box::as_ref)) + } + async fn pop(&mut self) -> Result>, Self::Error> { Ok(self.0.pop()) } diff --git a/relay-server/src/services/buffer/envelope_stack/mod.rs b/relay-server/src/services/buffer/envelope_stack/mod.rs index 4087946dd6..8acfff0600 100644 --- a/relay-server/src/services/buffer/envelope_stack/mod.rs +++ b/relay-server/src/services/buffer/envelope_stack/mod.rs @@ -14,6 +14,9 @@ pub trait EnvelopeStack: Send + std::fmt::Debug { /// Pushes an [`Envelope`] on top of the stack. fn push(&mut self, envelope: Box) -> impl Future>; + /// Peeks the [`Envelope`] on top of the stack. + fn peek(&mut self) -> impl Future, Self::Error>>; + /// Pops the [`Envelope`] on top of the stack. fn pop(&mut self) -> impl Future>, Self::Error>>; diff --git a/relay-server/src/services/buffer/envelope_stack/sqlite.rs b/relay-server/src/services/buffer/envelope_stack/sqlite.rs index 430ab4972d..97accdf3cb 100644 --- a/relay-server/src/services/buffer/envelope_stack/sqlite.rs +++ b/relay-server/src/services/buffer/envelope_stack/sqlite.rs @@ -196,6 +196,20 @@ impl EnvelopeStack for SqliteEnvelopeStack { Ok(()) } + async fn peek(&mut self) -> Result, Self::Error> { + if self.below_unspool_threshold() && self.check_disk { + self.unspool_from_disk().await? + } + + let last = self + .batches_buffer + .back() + .and_then(|last_batch| last_batch.last()) + .map(|last_batch| last_batch.as_ref()); + + Ok(last) + } + async fn pop(&mut self) -> Result>, Self::Error> { if self.below_unspool_threshold() && self.check_disk { relay_log::trace!("Unspool from disk"); @@ -367,15 +381,15 @@ mod tests { } assert_eq!(stack.batches_buffer_size, 5); - // We pop the top element. - let envelope = stack.pop().await.unwrap().unwrap(); + // We peek the top element. + let peeked_envelope = stack.peek().await.unwrap().unwrap(); assert_eq!( - envelope.event_id().unwrap(), + peeked_envelope.event_id().unwrap(), envelopes.clone()[4].event_id().unwrap() ); - // We pop 4 envelopes. - for envelope in envelopes.clone()[0..4].iter().rev() { + // We pop 5 envelopes. + for envelope in envelopes.iter().rev() { let popped_envelope = stack.pop().await.unwrap().unwrap(); assert_eq!( popped_envelope.event_id().unwrap(), @@ -406,15 +420,15 @@ mod tests { assert_eq!(stack.batches_buffer_size, 10); // We peek the top element. - let popped_envelope = stack.pop().await.unwrap().unwrap(); + let peeked_envelope = stack.peek().await.unwrap().unwrap(); assert_eq!( - popped_envelope.event_id().unwrap(), + peeked_envelope.event_id().unwrap(), envelopes.clone()[14].event_id().unwrap() ); - // We pop 9 envelopes, and we expect that the last 10 are in memory, since the first 5 + // We pop 10 envelopes, and we expect that the last 10 are in memory, since the first 5 // should have been spooled to disk. - for envelope in envelopes[5..14].iter().rev() { + for envelope in envelopes[5..15].iter().rev() { let popped_envelope = stack.pop().await.unwrap().unwrap(); assert_eq!( popped_envelope.event_id().unwrap(), @@ -424,13 +438,13 @@ mod tests { assert_eq!(stack.batches_buffer_size, 0); // We peek the top element, which since the buffer is empty should result in a disk load. - let popped_envelope = stack.pop().await.unwrap().unwrap(); + let peeked_envelope = stack.peek().await.unwrap().unwrap(); assert_eq!( - popped_envelope.event_id().unwrap(), + peeked_envelope.event_id().unwrap(), envelopes.clone()[4].event_id().unwrap() ); - // We insert a new envelope, to test the load from disk happening during `pop()` gives + // We insert a new envelope, to test the load from disk happening during `peek()` gives // priority to this envelope in the stack. let envelope = mock_envelope(Instant::now()); assert!(stack.push(envelope.clone()).await.is_ok()); @@ -442,9 +456,9 @@ mod tests { envelope.event_id().unwrap() ); - // We pop 4 envelopes, which should not result in a disk load since `pop()` already should + // We pop 5 envelopes, which should not result in a disk load since `peek()` already should // have caused it. - for envelope in envelopes[0..4].iter().rev() { + for envelope in envelopes[0..5].iter().rev() { let popped_envelope = stack.pop().await.unwrap().unwrap(); assert_eq!( popped_envelope.event_id().unwrap(), diff --git a/relay-server/src/services/buffer/mod.rs b/relay-server/src/services/buffer/mod.rs index ea9c5e1529..3bcc0c5fea 100644 --- a/relay-server/src/services/buffer/mod.rs +++ b/relay-server/src/services/buffer/mod.rs @@ -15,7 +15,6 @@ use tokio::sync::{mpsc, watch}; use tokio::time::{timeout, Instant}; use crate::envelope::Envelope; -use crate::services::buffer::common::ProjectKeyPair; use crate::services::buffer::envelope_buffer::Peek; use crate::services::global_config; use crate::services::outcome::DiscardReason; @@ -221,8 +220,8 @@ impl EnvelopeBufferService { services: &Services, envelopes_tx_permit: Permit<'a, DequeuedEnvelope>, ) -> Result { - let sleep = match buffer.peek() { - None => { + let sleep = match buffer.peek().await? { + Peek::Empty => { relay_statsd::metric!( counter(RelayCounters::BufferTryPop) += 1, peek_result = "empty" @@ -230,69 +229,71 @@ impl EnvelopeBufferService { Duration::MAX // wait for reset by `handle_message`. } - Some(Peek { - project_key_pair, - received_at, - ready, - next_project_fetch, - }) => { - if received_at.elapsed() > config.spool_envelopes_max_age() { - relay_statsd::metric!( - counter(RelayCounters::BufferTryPop) += 1, - peek_result = "expired" - ); - if let Some(envelope) = buffer.pop().await? { - Self::drop_expired(envelope, services); - } + Peek::Ready(envelope) | Peek::NotReady(.., envelope) + if Self::expired(config, envelope) => + { + let envelope = buffer + .pop() + .await? + .expect("Element disappeared despite exclusive excess"); - Duration::ZERO // try next pop immediately - } else if ready { - relay_statsd::metric!( - counter(RelayCounters::BufferTryPop) += 1, - peek_result = "ready" - ); - if let Some(envelope) = buffer.pop().await? { - // The cached `received_at` time on the queue might be newer than - // the actual timestamp of the envelope, so check again here. - if envelope.meta().start_time().elapsed() > config.spool_envelopes_max_age() - { - Self::drop_expired(envelope, services); - } else { - envelopes_tx_permit.send(DequeuedEnvelope(envelope)); - } - } + Self::drop_expired(envelope, services); - Duration::ZERO // try next pop immediately - } else { - let ProjectKeyPair { - own_key, - sampling_key, - } = project_key_pair; - relay_statsd::metric!( - counter(RelayCounters::BufferTryPop) += 1, - peek_result = "not_ready" - ); - if Instant::now() >= next_project_fetch { - relay_log::trace!("EnvelopeBufferService: requesting project(s) update"); + Duration::ZERO // try next pop immediately + } + Peek::Ready(_) => { + relay_log::trace!("EnvelopeBufferService: popping envelope"); + relay_statsd::metric!( + counter(RelayCounters::BufferTryPop) += 1, + peek_result = "ready" + ); + let envelope = buffer + .pop() + .await? + .expect("Element disappeared despite exclusive excess"); + envelopes_tx_permit.send(DequeuedEnvelope(envelope)); - services.project_cache.send(UpdateProject(own_key)); - if sampling_key != own_key { + Duration::ZERO // try next pop immediately + } + Peek::NotReady(stack_key, next_project_fetch, envelope) => { + relay_log::trace!("EnvelopeBufferService: project(s) of envelope not ready"); + relay_statsd::metric!( + counter(RelayCounters::BufferTryPop) += 1, + peek_result = "not_ready" + ); + + // We want to fetch the configs again, only if some time passed between the last + // peek of this not ready project key pair and the current peek. This is done to + // avoid flooding the project cache with `UpdateProject` messages. + if Instant::now() >= next_project_fetch { + relay_log::trace!("EnvelopeBufferService: requesting project(s) update"); + let own_key = envelope.meta().public_key(); + + services.project_cache.send(UpdateProject(own_key)); + match envelope.sampling_key() { + None => {} + Some(sampling_key) if sampling_key == own_key => {} // already sent. + Some(sampling_key) => { services.project_cache.send(UpdateProject(sampling_key)); } - - // Deprioritize the stack to prevent head-of-line blocking and update the next fetch - // time. - buffer.mark_seen(&project_key_pair, DEFAULT_SLEEP); } - DEFAULT_SLEEP // wait and prioritize handling new messages. + // Deprioritize the stack to prevent head-of-line blocking and update the next fetch + // time. + buffer.mark_seen(&stack_key, DEFAULT_SLEEP); } + + DEFAULT_SLEEP // wait and prioritize handling new messages. } }; Ok(sleep) } + fn expired(config: &Config, envelope: &Envelope) -> bool { + envelope.meta().start_time().elapsed() > config.spool_envelopes_max_age() + } + fn drop_expired(envelope: Box, services: &Services) { let mut managed_envelope = ManagedEnvelope::new( envelope, @@ -470,9 +471,7 @@ mod tests { use std::time::{Duration, Instant}; use relay_dynamic_config::GlobalConfig; - use relay_metrics::UnixTimestamp; use relay_quotas::DataCategory; - use sqlx::Connection; use tokio::sync::mpsc; use uuid::Uuid; @@ -668,77 +667,6 @@ mod tests { assert_eq!(outcome.quantity, 1); } - #[tokio::test] - async fn old_envelope_from_disk_is_dropped() { - relay_log::init_test!(); - - let tmp = tempfile::TempDir::new().unwrap(); - let path = tmp.path().join("envelopes.db"); - - let buffer_service = || { - envelope_buffer_service( - Some(serde_json::json!({ - "spool": { - "envelopes": { - "version": "experimental", - "path": path, - "max_envelope_delay_secs": 1, - } - } - })), - global_config::Status::Ready(Arc::new(GlobalConfig::default())), - ) - }; - - // Initialize once to migrate the database: - let service = buffer_service().service; - let config = service.config.clone(); - service.start(); - - tokio::time::sleep(Duration::from_millis(1000)).await; - - // Write an envelope to the db - let envelope = new_envelope(false, "foo"); - let mut db = sqlx::SqliteConnection::connect(path.to_str().unwrap()) - .await - .unwrap(); - - let received_at = - UnixTimestamp::now().as_datetime().unwrap() - 2 * config.spool_envelopes_max_age(); - - let query = sqlx::query("INSERT INTO envelopes (received_at, own_key, sampling_key, envelope) VALUES ($1, $2, $3, $4);") - .bind(received_at.timestamp_millis()) - .bind(envelope.meta().public_key().to_string()) - .bind(envelope.meta().public_key().to_string()) - .bind(envelope.to_vec().unwrap()); - query.execute(&mut db).await.unwrap(); - - // Initialize again to read from db: - let EnvelopeBufferServiceResult { - service, - envelopes_rx, - project_cache_rx, - mut outcome_aggregator_rx, - global_tx, - } = buffer_service(); - - let _addr = service.start(); - global_tx - .send(global_config::Status::Ready(Arc::new( - GlobalConfig::default(), - ))) - .unwrap(); - - tokio::time::sleep(Duration::from_millis(100)).await; - - assert_eq!(envelopes_rx.len(), 0); - assert_eq!(project_cache_rx.len(), 0); - - let outcome = outcome_aggregator_rx.try_recv().unwrap(); - assert_eq!(outcome.category, DataCategory::TransactionIndexed); - assert_eq!(outcome.quantity, 1); - } - #[tokio::test] async fn test_update_project() { tokio::time::pause(); From 7fbf4cadf0928fc74a958ab4b4083788302bd211 Mon Sep 17 00:00:00 2001 From: Tony Xiao Date: Wed, 16 Oct 2024 10:50:25 -0400 Subject: [PATCH 11/16] feat(eap): Extract user IP (#4144) This starts extracting user IP from the transaction into sentry tags. --- CHANGELOG.md | 1 + .../src/normalize/span/tag_extraction.rs | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bc96f5f8af..02f4479203 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ - Allowlist the SentryUptimeBot user-agent. ([#4068](https://github.com/getsentry/relay/pull/4068)) - Feature flags of graduated features are now hard-coded in Relay so they can be removed from Sentry. ([#4076](https://github.com/getsentry/relay/pull/4076), [#4080](https://github.com/getsentry/relay/pull/4080)) - Add parallelization in Redis commands. ([#4118](https://github.com/getsentry/relay/pull/4118)) +- Extract user ip for spans. ([#4144](https://github.com/getsentry/relay/pull/4144)) ## 24.9.0 diff --git a/relay-event-normalization/src/normalize/span/tag_extraction.rs b/relay-event-normalization/src/normalize/span/tag_extraction.rs index fa634b31ad..9e6555cbd4 100644 --- a/relay-event-normalization/src/normalize/span/tag_extraction.rs +++ b/relay-event-normalization/src/normalize/span/tag_extraction.rs @@ -35,6 +35,7 @@ pub enum SpanTagKey { Release, User, UserID, + UserIP, UserUsername, UserEmail, Environment, @@ -100,6 +101,7 @@ impl SpanTagKey { SpanTagKey::Release => "release", SpanTagKey::User => "user", SpanTagKey::UserID => "user.id", + SpanTagKey::UserIP => "user.ip", SpanTagKey::UserUsername => "user.username", SpanTagKey::UserEmail => "user.email", SpanTagKey::UserCountryCode => "user.geo.country_code", @@ -300,6 +302,9 @@ fn extract_shared_tags(event: &Event) -> BTreeMap { if let Some(user_id) = user.id.value() { tags.insert(SpanTagKey::UserID, user_id.as_str().to_owned()); } + if let Some(user_ip) = user.ip_address.value() { + tags.insert(SpanTagKey::UserIP, user_ip.as_str().to_owned()); + } if let Some(user_username) = user.username.value() { tags.insert(SpanTagKey::UserUsername, user_username.as_str().to_owned()); } @@ -2647,6 +2652,7 @@ LIMIT 1 }, "user": { "id": "1", + "ip_address": "127.0.0.1", "email": "admin@sentry.io", "username": "admin", "geo": { @@ -2680,6 +2686,7 @@ LIMIT 1 assert_eq!(get_value!(span.sentry_tags["user"]!), "id:1"); assert_eq!(get_value!(span.sentry_tags["user.id"]!), "1"); + assert_eq!(get_value!(span.sentry_tags["user.ip"]!), "127.0.0.1"); assert_eq!(get_value!(span.sentry_tags["user.username"]!), "admin"); assert_eq!( get_value!(span.sentry_tags["user.email"]!), From b4255aa2c8f88e2ef5c5ef223aa90b83fe5fc5c1 Mon Sep 17 00:00:00 2001 From: Jan Michael Auer Date: Thu, 17 Oct 2024 23:18:51 +0200 Subject: [PATCH 12/16] chore(server): Remove native placeholders from transaction processing (#4148) --- relay-server/src/services/processor.rs | 4 ---- relay-server/src/services/processor/attachment.rs | 4 ++-- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/relay-server/src/services/processor.rs b/relay-server/src/services/processor.rs index e6502b5187..494630a01b 100644 --- a/relay-server/src/services/processor.rs +++ b/relay-server/src/services/processor.rs @@ -1701,10 +1701,6 @@ impl EnvelopeProcessorService { let profile_id = profile::filter(state); profile::transfer_id(state, profile_id); - if_processing!(self.inner.config, { - attachment::create_placeholders(state); - }); - event::finalize(state, &self.inner.config)?; self.normalize_event(state)?; diff --git a/relay-server/src/services/processor/attachment.rs b/relay-server/src/services/processor/attachment.rs index 21814def5e..c45f3fbbc5 100644 --- a/relay-server/src/services/processor/attachment.rs +++ b/relay-server/src/services/processor/attachment.rs @@ -12,7 +12,7 @@ use crate::statsd::RelayTimers; #[cfg(feature = "processing")] use { - crate::services::processor::EventProcessing, crate::utils, relay_event_schema::protocol::Event, + crate::services::processor::ErrorGroup, crate::utils, relay_event_schema::protocol::Event, relay_protocol::Annotated, }; @@ -23,7 +23,7 @@ use { /// /// If the event payload was empty before, it is created. #[cfg(feature = "processing")] -pub fn create_placeholders(state: &mut ProcessEnvelopeState) { +pub fn create_placeholders(state: &mut ProcessEnvelopeState) { let envelope = state.managed_envelope.envelope(); let minidump_attachment = envelope.get_item_by(|item| item.attachment_type() == Some(&AttachmentType::Minidump)); From 7e496391c6e5719e04d0d09c98f63976962e654a Mon Sep 17 00:00:00 2001 From: Joris Bayer Date: Fri, 18 Oct 2024 09:28:14 +0200 Subject: [PATCH 13/16] lint: Rust 1.82 (#4150) --- relay-config/src/redis.rs | 1 + relay-ffi/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/relay-config/src/redis.rs b/relay-config/src/redis.rs index c6350d5a21..4874863195 100644 --- a/relay-config/src/redis.rs +++ b/relay-config/src/redis.rs @@ -208,6 +208,7 @@ pub enum RedisConfigRef<'a> { } /// Helper struct bundling connections and options for the various Redis pools. +#[allow(clippy::large_enum_variant)] #[derive(Clone, Debug)] pub enum RedisPoolConfigs<'a> { /// Use one pool for everything. diff --git a/relay-ffi/src/lib.rs b/relay-ffi/src/lib.rs index 3a2552a6ac..0b0c8b9b2a 100644 --- a/relay-ffi/src/lib.rs +++ b/relay-ffi/src/lib.rs @@ -231,7 +231,7 @@ pub fn take_last_error() -> Option { pub struct Panic(String); impl Panic { - fn new(info: &panic::PanicInfo) -> Self { + fn new(info: &panic::PanicHookInfo) -> Self { let thread = thread::current(); let thread = thread.name().unwrap_or("unnamed"); From 6a76476e759f44e262e9d1a52f0274f401f4db92 Mon Sep 17 00:00:00 2001 From: Riccardo Busetti Date: Fri, 18 Oct 2024 10:56:35 +0200 Subject: [PATCH 14/16] ref(redis): Cleanup code around Redis (#4151) --- relay-config/src/redis.rs | 9 +++------ relay-redis/src/real.rs | 22 +++++++--------------- 2 files changed, 10 insertions(+), 21 deletions(-) diff --git a/relay-config/src/redis.rs b/relay-config/src/redis.rs index 4874863195..90d29617c9 100644 --- a/relay-config/src/redis.rs +++ b/relay-config/src/redis.rs @@ -233,7 +233,7 @@ fn build_redis_config_options( let max_connections = options.max_connections.unwrap_or(default_connections); let min_idle = options .min_idle - .unwrap_or_else(|| max_connections.div_ceil(crate::redis::DEFAULT_MIN_IDLE_RATIO)); + .unwrap_or_else(|| max_connections.div_ceil(DEFAULT_MIN_IDLE_RATIO)); RedisConfigOptions { max_connections, @@ -254,7 +254,6 @@ pub(super) fn create_redis_pool( RedisConfig::Cluster { cluster_nodes, options, - .. } => RedisConfigRef::Cluster { cluster_nodes, options: build_redis_config_options(options, default_connections), @@ -281,10 +280,8 @@ pub(super) fn create_redis_pool( pub(super) fn create_redis_pools(configs: &RedisConfigs, cpu_concurrency: u32) -> RedisPoolConfigs { // Default `max_connections` for the `project_configs` pool. // In a unified config, this is used for all pools. - let project_configs_default_connections = std::cmp::max( - cpu_concurrency * 2, - crate::redis::DEFAULT_MIN_MAX_CONNECTIONS, - ); + let project_configs_default_connections = + std::cmp::max(cpu_concurrency * 2, DEFAULT_MIN_MAX_CONNECTIONS); match configs { RedisConfigs::Unified(cfg) => { diff --git a/relay-redis/src/real.rs b/relay-redis/src/real.rs index f47d4c929e..4f6e3cfa8c 100644 --- a/relay-redis/src/real.rs +++ b/relay-redis/src/real.rs @@ -194,7 +194,7 @@ impl PooledClient { /// Recursively computes the [`ConnectionInner`] from a [`PooledClient`]. fn connection_inner(&mut self) -> Result, RedisError> { let inner = match self { - PooledClient::Cluster(ref mut connection, opts) => { + PooledClient::Cluster(connection, opts) => { connection .set_read_timeout(Some(Duration::from_secs(opts.read_timeout))) .map_err(RedisError::Redis)?; @@ -220,7 +220,7 @@ impl PooledClient { secondaries: secondary_connections, } } - PooledClient::Single(ref mut connection, opts) => { + PooledClient::Single(connection, opts) => { connection .set_read_timeout(Some(Duration::from_secs(opts.read_timeout))) .map_err(RedisError::Redis)?; @@ -295,7 +295,9 @@ impl RedisPool { /// Creates a [`RedisPool`] in single-node configuration. pub fn single(server: &str, opts: RedisConfigOptions) -> Result { - let pool = Self::client_pool(server, &opts)?; + let pool = Self::base_pool_builder(&opts) + .build(redis::Client::open(server).map_err(RedisError::Redis)?) + .map_err(RedisError::Pool)?; Ok(RedisPool::Single(pool, opts)) } @@ -303,7 +305,7 @@ impl RedisPool { /// Returns a pooled connection to a client. pub fn client(&self) -> Result { let pool = match self { - RedisPool::Cluster(ref pool, opts) => PooledClient::Cluster( + RedisPool::Cluster(pool, opts) => PooledClient::Cluster( Box::new(pool.get().map_err(RedisError::Pool)?), opts.clone(), ), @@ -323,7 +325,7 @@ impl RedisPool { secondaries: secondary_clients, } } - RedisPool::Single(ref pool, opts) => PooledClient::Single( + RedisPool::Single(pool, opts) => PooledClient::Single( Box::new(pool.get().map_err(RedisError::Pool)?), opts.clone(), ), @@ -341,16 +343,6 @@ impl RedisPool { } } - /// Returns a [`Pool`] with a [`redis::Client`]. - fn client_pool( - server: &str, - opts: &RedisConfigOptions, - ) -> Result, RedisError> { - Self::base_pool_builder(opts) - .build(redis::Client::open(server).map_err(RedisError::Redis)?) - .map_err(RedisError::Pool) - } - /// Returns the base builder for the pool with the options applied. fn base_pool_builder(opts: &RedisConfigOptions) -> Builder { Pool::builder() From a10023621e09c29aeb053a9a98001e3b01b962b0 Mon Sep 17 00:00:00 2001 From: Joris Bayer Date: Fri, 18 Oct 2024 11:47:38 +0200 Subject: [PATCH 15/16] instr(buffer): Measure envelope size (#4153) Collect a histogram metric for the size of the envelopes that get pushed into the envelope buffer. This will help us tune batch sizes for writing. We already have a metric for item sizes in the request handler, but we want one for the entire envelope & restrict it to envelopes that actually make it to the buffer (exclude rate limited). --- relay-server/src/services/buffer/envelope_buffer/mod.rs | 6 ++++++ relay-server/src/statsd.rs | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/relay-server/src/services/buffer/envelope_buffer/mod.rs b/relay-server/src/services/buffer/envelope_buffer/mod.rs index 0ff0bc2b7e..02871016c4 100644 --- a/relay-server/src/services/buffer/envelope_buffer/mod.rs +++ b/relay-server/src/services/buffer/envelope_buffer/mod.rs @@ -14,6 +14,7 @@ use relay_config::Config; use tokio::time::{timeout, Instant}; use crate::envelope::Envelope; +use crate::envelope::Item; use crate::services::buffer::common::ProjectKeyPair; use crate::services::buffer::envelope_stack::sqlite::SqliteEnvelopeStackError; use crate::services::buffer::envelope_stack::EnvelopeStack; @@ -78,6 +79,11 @@ impl PolymorphicEnvelopeBuffer { /// Adds an envelope to the buffer. pub async fn push(&mut self, envelope: Box) -> Result<(), EnvelopeBufferError> { + relay_statsd::metric!( + histogram(RelayHistograms::BufferEnvelopeBodySize) = + envelope.items().map(Item::len).sum::() as u64 + ); + relay_statsd::metric!(timer(RelayTimers::BufferPush), { match self { Self::Sqlite(buffer) => buffer.push(envelope).await, diff --git a/relay-server/src/statsd.rs b/relay-server/src/statsd.rs index 3526e30b0a..e7f3a498db 100644 --- a/relay-server/src/statsd.rs +++ b/relay-server/src/statsd.rs @@ -181,6 +181,11 @@ pub enum RelayHistograms { /// Number of envelopes in the backpressure buffer between the envelope buffer /// and the project cache. BufferBackpressureEnvelopesCount, + /// The amount of bytes in the item payloads of an envelope pushed to the envelope buffer. + /// + /// This is not quite the same as the actual size of a serialized envelope, because it ignores + /// the envelope header and item headers. + BufferEnvelopeBodySize, /// The number of batches emitted per partition. BatchesPerPartition, /// The number of buckets in a batch emitted. @@ -309,6 +314,7 @@ impl HistogramMetric for RelayHistograms { RelayHistograms::BufferBackpressureEnvelopesCount => { "buffer.backpressure_envelopes_count" } + RelayHistograms::BufferEnvelopeBodySize => "buffer.envelope_body_size", RelayHistograms::ProjectStatePending => "project_state.pending", RelayHistograms::ProjectStateAttempts => "project_state.attempts", RelayHistograms::ProjectStateRequestBatchSize => "project_state.request.batch_size", From fa1b2540b5003e3eb7ccac5d1150950f6661fbbe Mon Sep 17 00:00:00 2001 From: David Herberth Date: Fri, 18 Oct 2024 13:15:43 +0200 Subject: [PATCH 16/16] ref(server): Move project source into the source module (#4154) Project source should go with all the other sources. Just a move without any functional changes. --- relay-server/src/services/projects/cache.rs | 202 +---------------- .../src/services/projects/source/local.rs | 2 +- .../src/services/projects/source/mod.rs | 206 ++++++++++++++++++ .../src/services/projects/source/upstream.rs | 2 +- 4 files changed, 210 insertions(+), 202 deletions(-) diff --git a/relay-server/src/services/projects/cache.rs b/relay-server/src/services/projects/cache.rs index 6bab22f493..dc29f20bc2 100644 --- a/relay-server/src/services/projects/cache.rs +++ b/relay-server/src/services/projects/cache.rs @@ -1,5 +1,4 @@ use std::collections::{BTreeMap, BTreeSet}; -use std::convert::Infallible; use std::error::Error; use std::sync::Arc; use std::time::Duration; @@ -10,33 +9,23 @@ use crate::services::global_config; use crate::services::processor::{ EncodeMetrics, EnvelopeProcessor, MetricData, ProcessEnvelope, ProcessingGroup, ProjectMetrics, }; -use crate::services::projects::project::state::UpstreamProjectState; use crate::Envelope; use chrono::{DateTime, Utc}; use hashbrown::HashSet; use relay_base_schema::project::ProjectKey; -#[cfg(feature = "processing")] -use relay_config::RedisConfigRef; -use relay_config::{Config, RelayMode}; +use relay_config::Config; use relay_metrics::{Bucket, MetricMeta}; use relay_quotas::RateLimits; use relay_redis::RedisPool; use relay_statsd::metric; use relay_system::{Addr, FromMessage, Interface, Sender, Service}; -#[cfg(feature = "processing")] -use tokio::sync::Semaphore; use tokio::sync::{mpsc, watch}; use tokio::time::Instant; use crate::services::metrics::{Aggregator, FlushBuckets}; use crate::services::outcome::{DiscardReason, Outcome, TrackOutcome}; use crate::services::projects::project::{Project, ProjectFetchState, ProjectSender, ProjectState}; -use crate::services::projects::source::local::{LocalProjectSource, LocalProjectSourceService}; -#[cfg(feature = "processing")] -use crate::services::projects::source::redis::RedisProjectSource; -use crate::services::projects::source::upstream::{ - UpstreamProjectSource, UpstreamProjectSourceService, -}; +use crate::services::projects::source::ProjectSource; use crate::services::spooler::{ self, Buffer, BufferService, DequeueMany, Enqueue, QueueKey, RemoveMany, RestoreIndex, UnspooledEnvelope, BATCH_KEY_COUNT, @@ -47,10 +36,6 @@ use crate::services::upstream::UpstreamRelay; use crate::statsd::{RelayCounters, RelayGauges, RelayHistograms, RelayTimers}; use crate::utils::{GarbageDisposal, ManagedEnvelope, MemoryChecker, RetryBackoff, SleepHandle}; -/// Default value of maximum connections to Redis. This value was arbitrarily determined. -#[cfg(feature = "processing")] -const DEFAULT_REDIS_MAX_CONNECTIONS: u32 = 10; - /// Requests a refresh of a project state from one of the available sources. /// /// The project state is resolved in the following precedence: @@ -424,159 +409,6 @@ impl FromMessage for ProjectCache { } } -/// Helper type that contains all configured sources for project cache fetching. -/// -/// See [`RequestUpdate`] for a description on how project states are fetched. -#[derive(Clone, Debug)] -struct ProjectSource { - config: Arc, - local_source: Addr, - upstream_source: Addr, - #[cfg(feature = "processing")] - redis_source: Option, - #[cfg(feature = "processing")] - redis_semaphore: Arc, -} - -impl ProjectSource { - /// Starts all project source services in the current runtime. - pub fn start( - config: Arc, - upstream_relay: Addr, - _redis: Option, - ) -> Self { - let local_source = LocalProjectSourceService::new(config.clone()).start(); - let upstream_source = - UpstreamProjectSourceService::new(config.clone(), upstream_relay).start(); - - #[cfg(feature = "processing")] - let redis_max_connections = config - .redis() - .map(|configs| { - let config = match configs { - relay_config::RedisPoolConfigs::Unified(config) => config, - relay_config::RedisPoolConfigs::Individual { - project_configs: config, - .. - } => config, - }; - Self::compute_max_connections(config).unwrap_or(DEFAULT_REDIS_MAX_CONNECTIONS) - }) - .unwrap_or(DEFAULT_REDIS_MAX_CONNECTIONS); - #[cfg(feature = "processing")] - let redis_source = _redis.map(|pool| RedisProjectSource::new(config.clone(), pool)); - - Self { - config, - local_source, - upstream_source, - #[cfg(feature = "processing")] - redis_source, - #[cfg(feature = "processing")] - redis_semaphore: Arc::new(Semaphore::new(redis_max_connections.try_into().unwrap())), - } - } - - #[cfg(feature = "processing")] - fn compute_max_connections(config: RedisConfigRef) -> Option { - match config { - RedisConfigRef::Cluster { options, .. } => Some(options.max_connections), - RedisConfigRef::MultiWrite { configs } => configs - .into_iter() - .filter_map(|c| Self::compute_max_connections(c)) - .max(), - RedisConfigRef::Single { options, .. } => Some(options.max_connections), - } - } - - async fn fetch( - self, - project_key: ProjectKey, - no_cache: bool, - cached_state: ProjectFetchState, - ) -> Result { - let state_opt = self - .local_source - .send(FetchOptionalProjectState { project_key }) - .await?; - - if let Some(state) = state_opt { - return Ok(ProjectFetchState::new(state)); - } - - match self.config.relay_mode() { - RelayMode::Proxy => return Ok(ProjectFetchState::allowed()), - RelayMode::Static => return Ok(ProjectFetchState::disabled()), - RelayMode::Capture => return Ok(ProjectFetchState::allowed()), - RelayMode::Managed => (), // Proceed with loading the config from redis or upstream - } - - let current_revision = cached_state.revision().map(String::from); - #[cfg(feature = "processing")] - if let Some(redis_source) = self.redis_source { - let current_revision = current_revision.clone(); - - let redis_permit = self.redis_semaphore.acquire().await?; - let state_fetch_result = tokio::task::spawn_blocking(move || { - redis_source.get_config_if_changed(project_key, current_revision.as_deref()) - }) - .await?; - drop(redis_permit); - - match state_fetch_result { - // New state fetched from Redis, possibly pending. - Ok(UpstreamProjectState::New(state)) => { - let state = state.sanitized(); - if !state.is_pending() { - return Ok(ProjectFetchState::new(state)); - } - } - // Redis reported that we're holding an up-to-date version of the state already, - // refresh the state and return the old cached state again. - Ok(UpstreamProjectState::NotModified) => { - return Ok(ProjectFetchState::refresh(cached_state)) - } - Err(error) => { - relay_log::error!( - error = &error as &dyn Error, - "failed to fetch project from Redis", - ); - } - }; - }; - - let state = self - .upstream_source - .send(FetchProjectState { - project_key, - current_revision, - no_cache, - }) - .await?; - - match state { - UpstreamProjectState::New(state) => Ok(ProjectFetchState::new(state.sanitized())), - UpstreamProjectState::NotModified => Ok(ProjectFetchState::refresh(cached_state)), - } - } -} - -#[derive(Debug, thiserror::Error)] -enum ProjectSourceError { - #[error("redis permit error {0}")] - RedisPermit(#[from] tokio::sync::AcquireError), - #[error("redis join error {0}")] - RedisJoin(#[from] tokio::task::JoinError), - #[error("upstream error {0}")] - Upstream(#[from] relay_system::SendError), -} - -impl From for ProjectSourceError { - fn from(value: Infallible) -> Self { - match value {} - } -} - /// Updates the cache with new project state information. struct UpdateProjectState { /// The public key to fetch the project by. @@ -1528,36 +1360,6 @@ impl Service for ProjectCacheService { } } -#[derive(Clone, Debug)] -pub struct FetchProjectState { - /// The public key to fetch the project by. - pub project_key: ProjectKey, - - /// Currently cached revision if available. - /// - /// The upstream is allowed to omit full project configs - /// for requests for which the requester already has the most - /// recent revision. - /// - /// Settings this to `None` will essentially always re-fetch - /// the project config. - pub current_revision: Option, - - /// If true, all caches should be skipped and a fresh state should be computed. - pub no_cache: bool, -} - -#[derive(Clone, Debug)] -pub struct FetchOptionalProjectState { - project_key: ProjectKey, -} - -impl FetchOptionalProjectState { - pub fn project_key(&self) -> ProjectKey { - self.project_key - } -} - /// Sum type for all objects which need to be discareded through the [`GarbageDisposal`]. #[derive(Debug)] #[allow(dead_code)] // Fields are never read, only used for discarding/dropping data. diff --git a/relay-server/src/services/projects/source/local.rs b/relay-server/src/services/projects/source/local.rs index 94f3a50835..9bffd851ef 100644 --- a/relay-server/src/services/projects/source/local.rs +++ b/relay-server/src/services/projects/source/local.rs @@ -9,8 +9,8 @@ use relay_system::{AsyncResponse, FromMessage, Interface, Receiver, Sender, Serv use tokio::sync::mpsc; use tokio::time::Instant; -use crate::services::projects::cache::FetchOptionalProjectState; use crate::services::projects::project::{ParsedProjectState, ProjectState}; +use crate::services::projects::source::FetchOptionalProjectState; /// Service interface of the local project source. #[derive(Debug)] diff --git a/relay-server/src/services/projects/source/mod.rs b/relay-server/src/services/projects/source/mod.rs index 80f6a34d78..5b208ef3ae 100644 --- a/relay-server/src/services/projects/source/mod.rs +++ b/relay-server/src/services/projects/source/mod.rs @@ -1,4 +1,210 @@ +use std::convert::Infallible; +use std::sync::Arc; + +use relay_base_schema::project::ProjectKey; +#[cfg(feature = "processing")] +use relay_config::RedisConfigRef; +use relay_config::{Config, RelayMode}; +use relay_redis::RedisPool; +use relay_system::{Addr, Service as _}; +#[cfg(feature = "processing")] +use tokio::sync::Semaphore; + pub mod local; #[cfg(feature = "processing")] pub mod redis; pub mod upstream; + +use crate::services::projects::project::state::UpstreamProjectState; +use crate::services::projects::project::ProjectFetchState; +use crate::services::upstream::UpstreamRelay; + +use self::local::{LocalProjectSource, LocalProjectSourceService}; +#[cfg(feature = "processing")] +use self::redis::RedisProjectSource; +use self::upstream::{UpstreamProjectSource, UpstreamProjectSourceService}; + +/// Default value of maximum connections to Redis. This value was arbitrarily determined. +#[cfg(feature = "processing")] +const DEFAULT_REDIS_MAX_CONNECTIONS: u32 = 10; + +/// Helper type that contains all configured sources for project cache fetching. +#[derive(Clone, Debug)] +pub struct ProjectSource { + config: Arc, + local_source: Addr, + upstream_source: Addr, + #[cfg(feature = "processing")] + redis_source: Option, + #[cfg(feature = "processing")] + redis_semaphore: Arc, +} + +impl ProjectSource { + /// Starts all project source services in the current runtime. + pub fn start( + config: Arc, + upstream_relay: Addr, + _redis: Option, + ) -> Self { + let local_source = LocalProjectSourceService::new(config.clone()).start(); + let upstream_source = + UpstreamProjectSourceService::new(config.clone(), upstream_relay).start(); + + #[cfg(feature = "processing")] + let redis_max_connections = config + .redis() + .map(|configs| { + let config = match configs { + relay_config::RedisPoolConfigs::Unified(config) => config, + relay_config::RedisPoolConfigs::Individual { + project_configs: config, + .. + } => config, + }; + Self::compute_max_connections(config).unwrap_or(DEFAULT_REDIS_MAX_CONNECTIONS) + }) + .unwrap_or(DEFAULT_REDIS_MAX_CONNECTIONS); + #[cfg(feature = "processing")] + let redis_source = _redis.map(|pool| RedisProjectSource::new(config.clone(), pool)); + + Self { + config, + local_source, + upstream_source, + #[cfg(feature = "processing")] + redis_source, + #[cfg(feature = "processing")] + redis_semaphore: Arc::new(Semaphore::new(redis_max_connections.try_into().unwrap())), + } + } + + #[cfg(feature = "processing")] + fn compute_max_connections(config: RedisConfigRef) -> Option { + match config { + RedisConfigRef::Cluster { options, .. } => Some(options.max_connections), + RedisConfigRef::MultiWrite { configs } => configs + .into_iter() + .filter_map(|c| Self::compute_max_connections(c)) + .max(), + RedisConfigRef::Single { options, .. } => Some(options.max_connections), + } + } + + pub async fn fetch( + self, + project_key: ProjectKey, + no_cache: bool, + cached_state: ProjectFetchState, + ) -> Result { + let state_opt = self + .local_source + .send(FetchOptionalProjectState { project_key }) + .await?; + + if let Some(state) = state_opt { + return Ok(ProjectFetchState::new(state)); + } + + match self.config.relay_mode() { + RelayMode::Proxy => return Ok(ProjectFetchState::allowed()), + RelayMode::Static => return Ok(ProjectFetchState::disabled()), + RelayMode::Capture => return Ok(ProjectFetchState::allowed()), + RelayMode::Managed => (), // Proceed with loading the config from redis or upstream + } + + let current_revision = cached_state.revision().map(String::from); + #[cfg(feature = "processing")] + if let Some(redis_source) = self.redis_source { + let current_revision = current_revision.clone(); + + let redis_permit = self.redis_semaphore.acquire().await?; + let state_fetch_result = tokio::task::spawn_blocking(move || { + redis_source.get_config_if_changed(project_key, current_revision.as_deref()) + }) + .await?; + drop(redis_permit); + + match state_fetch_result { + // New state fetched from Redis, possibly pending. + Ok(UpstreamProjectState::New(state)) => { + let state = state.sanitized(); + if !state.is_pending() { + return Ok(ProjectFetchState::new(state)); + } + } + // Redis reported that we're holding an up-to-date version of the state already, + // refresh the state and return the old cached state again. + Ok(UpstreamProjectState::NotModified) => { + return Ok(ProjectFetchState::refresh(cached_state)) + } + Err(error) => { + relay_log::error!( + error = &error as &dyn std::error::Error, + "failed to fetch project from Redis", + ); + } + }; + }; + + let state = self + .upstream_source + .send(FetchProjectState { + project_key, + current_revision, + no_cache, + }) + .await?; + + match state { + UpstreamProjectState::New(state) => Ok(ProjectFetchState::new(state.sanitized())), + UpstreamProjectState::NotModified => Ok(ProjectFetchState::refresh(cached_state)), + } + } +} + +#[derive(Debug, thiserror::Error)] +pub enum ProjectSourceError { + #[error("redis permit error {0}")] + RedisPermit(#[from] tokio::sync::AcquireError), + #[error("redis join error {0}")] + RedisJoin(#[from] tokio::task::JoinError), + #[error("upstream error {0}")] + Upstream(#[from] relay_system::SendError), +} + +impl From for ProjectSourceError { + fn from(value: Infallible) -> Self { + match value {} + } +} + +#[derive(Clone, Debug)] +pub struct FetchProjectState { + /// The public key to fetch the project by. + pub project_key: ProjectKey, + + /// Currently cached revision if available. + /// + /// The upstream is allowed to omit full project configs + /// for requests for which the requester already has the most + /// recent revision. + /// + /// Settings this to `None` will essentially always re-fetch + /// the project config. + pub current_revision: Option, + + /// If true, all caches should be skipped and a fresh state should be computed. + pub no_cache: bool, +} + +#[derive(Clone, Debug)] +pub struct FetchOptionalProjectState { + project_key: ProjectKey, +} + +impl FetchOptionalProjectState { + pub fn project_key(&self) -> ProjectKey { + self.project_key + } +} diff --git a/relay-server/src/services/projects/source/upstream.rs b/relay-server/src/services/projects/source/upstream.rs index 960aa4c882..bcbfd5a279 100644 --- a/relay-server/src/services/projects/source/upstream.rs +++ b/relay-server/src/services/projects/source/upstream.rs @@ -18,10 +18,10 @@ use serde::{Deserialize, Serialize}; use tokio::sync::mpsc; use tokio::time::Instant; -use crate::services::projects::cache::FetchProjectState; use crate::services::projects::project::state::UpstreamProjectState; use crate::services::projects::project::ParsedProjectState; use crate::services::projects::project::ProjectState; +use crate::services::projects::source::FetchProjectState; use crate::services::upstream::{ Method, RequestPriority, SendQuery, UpstreamQuery, UpstreamRelay, UpstreamRequestError, };