diff --git a/examples/cqlsh-rs.rs b/examples/cqlsh-rs.rs index 8d9ca8ea6a..c12b17a76c 100644 --- a/examples/cqlsh-rs.rs +++ b/examples/cqlsh-rs.rs @@ -4,7 +4,7 @@ use rustyline::error::ReadlineError; use rustyline::{CompletionType, Config, Context, Editor}; use rustyline_derive::{Helper, Highlighter, Hinter, Validator}; use scylla::transport::Compression; -use scylla::{QueryResult, Session, SessionBuilder}; +use scylla::{LegacyQueryResult, Session, SessionBuilder}; use std::env; #[derive(Helper, Highlighter, Validator, Hinter)] @@ -173,7 +173,7 @@ impl Completer for CqlHelper { } } -fn print_result(result: &QueryResult) { +fn print_result(result: &LegacyQueryResult) { if result.rows.is_none() { println!("OK"); return; diff --git a/examples/tower.rs b/examples/tower.rs index b45b08ae19..0d28407da4 100644 --- a/examples/tower.rs +++ b/examples/tower.rs @@ -12,7 +12,7 @@ struct SessionService { // A trivial service implementation for sending parameterless simple string requests to Scylla. impl Service for SessionService { - type Response = scylla::QueryResult; + type Response = scylla::LegacyQueryResult; type Error = scylla::transport::errors::QueryError; type Future = Pin>>>; diff --git a/examples/tracing.rs b/examples/tracing.rs index d742de7e5f..12767de5b0 100644 --- a/examples/tracing.rs +++ b/examples/tracing.rs @@ -8,8 +8,8 @@ use scylla::statement::{ prepared_statement::PreparedStatement, query::Query, Consistency, SerialConsistency, }; use scylla::tracing::TracingInfo; -use scylla::transport::iterator::RowIterator; -use scylla::QueryResult; +use scylla::transport::iterator::LegacyRowIterator; +use scylla::LegacyQueryResult; use scylla::{Session, SessionBuilder}; use std::env; use std::num::NonZeroU32; @@ -42,7 +42,7 @@ async fn main() -> Result<()> { query.set_serial_consistency(Some(SerialConsistency::LocalSerial)); // QueryResult will contain a tracing_id which can be used to query tracing information - let query_result: QueryResult = session.query_unpaged(query.clone(), &[]).await?; + let query_result: LegacyQueryResult = session.query_unpaged(query.clone(), &[]).await?; let query_tracing_id: Uuid = query_result .tracing_id .ok_or_else(|| anyhow!("Tracing id is None!"))?; @@ -79,14 +79,14 @@ async fn main() -> Result<()> { // To trace execution of a prepared statement tracing must be enabled for it prepared.set_tracing(true); - let execute_result: QueryResult = session.execute_unpaged(&prepared, &[]).await?; + let execute_result: LegacyQueryResult = session.execute_unpaged(&prepared, &[]).await?; println!("Execute tracing id: {:?}", execute_result.tracing_id); // PAGED QUERY_ITER EXECUTE_ITER // It's also possible to trace paged queries like query_iter or execute_iter // After iterating through all rows iterator.get_tracing_ids() will give tracing ids // for all page queries - let mut row_iterator: RowIterator = session.query_iter(query, &[]).await?; + let mut row_iterator: LegacyRowIterator = session.query_iter(query, &[]).await?; while let Some(_row) = row_iterator.next().await { // Receive rows @@ -105,7 +105,7 @@ async fn main() -> Result<()> { batch.set_tracing(true); // Run the batch and print its tracing_id - let batch_result: QueryResult = session.batch(&batch, ((),)).await?; + let batch_result: LegacyQueryResult = session.batch(&batch, ((),)).await?; println!("Batch tracing id: {:?}\n", batch_result.tracing_id); // CUSTOM diff --git a/scylla-cql/src/frame/response/result.rs b/scylla-cql/src/frame/response/result.rs index ab85cfa3cc..63483d91f8 100644 --- a/scylla-cql/src/frame/response/result.rs +++ b/scylla-cql/src/frame/response/result.rs @@ -11,12 +11,14 @@ use crate::frame::value::{ Counter, CqlDate, CqlDecimal, CqlDuration, CqlTime, CqlTimestamp, CqlTimeuuid, CqlVarint, }; use crate::types::deserialize::result::{RowIterator, TypedRowIterator}; +use crate::types::deserialize::row::DeserializeRow; use crate::types::deserialize::value::{ mk_deser_err, BuiltinDeserializationErrorKind, DeserializeValue, MapIterator, UdtIterator, }; -use crate::types::deserialize::{DeserializationError, FrameSlice}; +use crate::types::deserialize::{DeserializationError, FrameSlice, TypeCheckError}; use bytes::{Buf, Bytes}; use std::borrow::Cow; +use std::ops::Deref; use std::sync::Arc; use std::{net::IpAddr, result::Result as StdResult, str}; use uuid::Uuid; @@ -526,6 +528,18 @@ pub struct ResultMetadata<'a> { } impl<'a> ResultMetadata<'a> { + #[inline] + pub fn col_count(&self) -> usize { + self.col_count + } + + #[inline] + pub fn col_specs(&self) -> &[ColumnSpec] { + &self.col_specs + } + + // Preferred to implementing Default, because users shouldn't be encouraged to create + // empty ResultMetadata. #[inline] pub fn mock_empty() -> Self { Self { @@ -533,24 +547,28 @@ impl<'a> ResultMetadata<'a> { col_specs: Vec::new(), } } +} - #[inline] - #[doc(hidden)] - pub fn new_for_test(col_count: usize, col_specs: Vec>) -> Self { - Self { - col_count, - col_specs, - } - } +/// Versatile holder for ResultMetadata. Allows 3 types of ownership +/// of ResultMetadata: +/// 1) borrowing it from somewhere, be it the RESULT:Rows frame +/// or the cached metadata in PreparedStatement; +/// 2) owning it after deserializing from RESULT:Rows; +/// 3) sharing ownership of metadata cached in PreparedStatement. +#[derive(Debug, Clone)] +pub enum ResultMetadataHolder<'frame> { + BorrowedOrOwned(Cow<'frame, ResultMetadata<'frame>>), + SharedCached(Arc>), +} - #[inline] - pub fn col_count(&self) -> usize { - self.col_count - } +impl<'a> Deref for ResultMetadataHolder<'a> { + type Target = ResultMetadata<'a>; - #[inline] - pub fn col_specs(&self) -> &[ColumnSpec] { - &self.col_specs + fn deref(&self) -> &Self::Target { + match self { + ResultMetadataHolder::BorrowedOrOwned(cow) => cow.deref(), + ResultMetadataHolder::SharedCached(arc) => arc.deref(), + } } } @@ -584,20 +602,117 @@ impl Row { } } +/// RESULT:Rows response, in partially serialized form. +/// +/// Flags and paging state are deserialized, remaining part of metadata +/// as well as rows remain serialized. +#[derive(Debug)] +pub struct RawRows { + // Already deserialized part of metadata: + col_count: usize, + global_tables_spec: bool, + no_metadata: bool, + + /// The remaining part of the RESULT frame. + raw_metadata_and_rows: Bytes, + + /// Metadata cached in PreparedStatement, if present. + cached_metadata: Option>>, +} + +impl RawRows { + // Preferred to implementing Default, because users shouldn't be encouraged to create + // empty RawRows. + #[inline] + pub fn mock_empty() -> Self { + // Minimal correct `raw_metadata_and_rows` looks like this: + // Empty metadata (0 bytes), rows_count=0 (i32 big endian), empty rows (0 bytes). + static EMPTY_METADATA_ZERO_ROWS: &[u8] = &0_i32.to_be_bytes(); + let raw_metadata_and_rows = Bytes::from_static(EMPTY_METADATA_ZERO_ROWS); + + Self { + col_count: 0, + global_tables_spec: false, + no_metadata: false, + raw_metadata_and_rows, + cached_metadata: None, + } + } +} + +/// RESULT:Rows response, in partially serialized form. +/// +/// Paging state and metadata are deserialized, rows remain serialized. #[derive(Debug)] -pub struct Rows { - pub metadata: Arc>, - pub paging_state_response: PagingStateResponse, - pub rows_count: usize, - pub rows: Vec, - /// Original size of the serialized rows. - pub serialized_size: usize, +pub struct RawRowsWithDeserializedMetadata<'frame> { + metadata: ResultMetadataHolder<'frame>, + rows_count: usize, + raw_rows: Bytes, +} + +impl<'frame> RawRowsWithDeserializedMetadata<'frame> { + // Preferred to implementing Default, because users shouldn't be encouraged to create + // empty RawRowsWithDeserializedMetadata. + #[inline] + pub fn mock_empty() -> Self { + Self { + metadata: ResultMetadataHolder::BorrowedOrOwned(Cow::Owned( + ResultMetadata::mock_empty(), + )), + rows_count: 0, + raw_rows: Bytes::new(), + } + } + + /// Returns the metadata associated with this response + /// (table and column specifications). + #[inline] + pub fn metadata(&self) -> &ResultMetadata { + &self.metadata + } + + #[inline] + pub(crate) fn into_inner(self) -> (ResultMetadataHolder<'frame>, usize, Bytes) { + (self.metadata, self.rows_count, self.raw_rows) + } + + /// Consumes the `RawRowsWithDeserializedMetadata` and returns metadata + /// associated with the response (or cached metadata, if used in its stead). + #[inline] + pub fn into_metadata(self) -> ResultMetadataHolder<'frame> { + self.metadata + } + + /// Returns the number of rows that the RESULT:Rows contain. + #[inline] + pub fn rows_count(&self) -> usize { + self.rows_count + } + + /// Returns the serialized size of the raw rows. + #[inline] + pub fn rows_size(&self) -> usize { + self.raw_rows.len() + } + + /// Creates a typed iterator over the rows that lazily deserializes + /// rows in the result. + /// + /// Returns Err if the schema of returned result doesn't match R. + #[inline] + pub fn rows_iter<'r, R: DeserializeRow<'r>>( + &'r self, + ) -> StdResult, TypeCheckError> { + let slice = FrameSlice::new(&self.raw_rows); + let raw = RowIterator::new(self.rows_count, self.metadata.col_specs(), slice); + TypedRowIterator::new(raw) + } } #[derive(Debug)] pub enum Result { Void, - Rows(Rows), + Rows((RawRows, PagingStateResponse)), SetKeyspace(SetKeyspace), Prepared(Prepared), SchemaChange(SchemaChange), @@ -693,7 +808,7 @@ macro_rules! generate_deser_type { }; } -generate_deser_type!(_deser_type_borrowed, 'frame, types::read_string); +generate_deser_type!(deser_type_borrowed, 'frame, types::read_string); generate_deser_type!(deser_type_owned, 'static, |buf| types::read_string(buf).map(ToOwned::to_owned)); @@ -815,9 +930,9 @@ macro_rules! generate_deser_col_specs { } generate_deser_col_specs!( - _deser_col_specs_borrowed, + deser_col_specs_borrowed, 'frame, - _deser_type_borrowed, + deser_type_borrowed, ColumnSpec::borrowed, ); @@ -863,6 +978,134 @@ fn deser_result_metadata( Ok((metadata, paging_state)) } +impl RawRows { + /// Deserializes flags and paging state; the other part of result metadata + /// as well as rows remain serialized. + fn deserialize( + frame: &mut FrameSlice, + cached_metadata: Option>>, + ) -> StdResult<(Self, PagingStateResponse), RowsParseError> { + let flags = types::read_int(frame.as_slice_mut()) + .map_err(|err| ResultMetadataParseError::FlagsParseError(err.into()))?; + let global_tables_spec = flags & 0x0001 != 0; + let has_more_pages = flags & 0x0002 != 0; + let no_metadata = flags & 0x0004 != 0; + + let col_count = types::read_int_length(frame.as_slice_mut()) + .map_err(ResultMetadataParseError::ColumnCountParseError)?; + + let raw_paging_state = has_more_pages + .then(|| { + types::read_bytes(frame.as_slice_mut()) + .map_err(ResultMetadataParseError::PagingStateParseError) + }) + .transpose()?; + + let paging_state = PagingStateResponse::new_from_raw_bytes(raw_paging_state); + + let raw_rows = Self { + col_count, + global_tables_spec, + no_metadata, + raw_metadata_and_rows: frame.to_bytes(), + cached_metadata, + }; + + Ok((raw_rows, paging_state)) + } +} + +macro_rules! generate_deserialize_metadata { + ($deserialize_metadata: ident, $l: lifetime, $use_cached_metadata: expr, $deser_col_specs: ident $(,)?) => { + /// Deserializes ResultMetadata in the form mentioned by its name, + /// and deserializes rows count. Keeps rows in the serialized form. + /// + /// If metadata is cached (in the PreparedStatement), it is reused (shared) from cache + /// instead of deserializing. + pub fn $deserialize_metadata( + &self, + ) -> StdResult, RowsParseError> { + let mut frame_slice = FrameSlice::new(&self.raw_metadata_and_rows); + + let metadata = match self.cached_metadata.as_ref() { + Some(cached) if self.no_metadata => { + // Server sent no metadata, but we have metadata cached. This means that we asked the server + // not to send metadata in the response as an optimization. We use cached metadata instead. + $use_cached_metadata(cached) + } + None if self.no_metadata => { + // Server sent no metadata and we have no metadata cached. Having no metadata cached, + // we wouldn't have asked the server for skipping metadata. Therefore, this is most probably + // not a SELECT, because in such case the server would send empty metadata both in Prepared + // and in Result responses. + ResultMetadataHolder::BorrowedOrOwned(Cow::Owned(ResultMetadata::mock_empty())) + } + Some(_) | None => { + // Two possibilities: + // 1) no cached_metadata provided. Server is supposed to provide the result metadata. + // 2) cached metadata present (so we should have asked for skipping metadata), + // but the server sent result metadata anyway. + // In case 1 we have to deserialize result metadata. In case 2 we choose to do that, + // too, because it's suspicious, so we had better use the new metadata just in case. + // Also, we simply need to advance the buffer pointer past metadata, and this requires + // parsing metadata. + let server_metadata = { + let global_table_spec = self + .global_tables_spec + .then(|| deser_table_spec(frame_slice.as_slice_mut())) + .transpose() + .map_err(ResultMetadataParseError::from)?; + + let col_specs = $deser_col_specs( + frame_slice.as_slice_mut(), + global_table_spec, + self.col_count, + ) + .map_err(ResultMetadataParseError::from)?; + + ResultMetadata { + col_count: self.col_count, + col_specs, + } + }; + if server_metadata.col_count() != server_metadata.col_specs().len() { + return Err(RowsParseError::ColumnCountMismatch { + col_count: server_metadata.col_count(), + col_specs_count: server_metadata.col_specs().len(), + }); + } + ResultMetadataHolder::BorrowedOrOwned(Cow::Owned(server_metadata)) + } + }; + + let rows_count: usize = types::read_int_length(frame_slice.as_slice_mut()) + .map_err(RowsParseError::RowsCountParseError)?; + + Ok(RawRowsWithDeserializedMetadata { + metadata, + rows_count, + raw_rows: frame_slice.to_bytes(), + }) + } + }; +} + +impl RawRows { + generate_deserialize_metadata!( + deserialize_borrowed_metadata, + '_, + |cached| ResultMetadataHolder::BorrowedOrOwned(Cow::Borrowed(cached)), + deser_col_specs_borrowed, + ); + + generate_deserialize_metadata!( + deserialize_owned_metadata, + 'static, + |cached| ResultMetadataHolder::SharedCached(Arc::clone(cached)), + deser_col_specs_owned, + ); +} + fn deser_prepared_metadata( buf: &mut &[u8], ) -> StdResult { @@ -1064,46 +1307,9 @@ pub fn deser_cql_value( fn deser_rows( buf_bytes: Bytes, cached_metadata: Option<&Arc>>, -) -> StdResult { - let buf = &mut &*buf_bytes; - let (server_metadata, paging_state_response) = deser_result_metadata(buf)?; - - let metadata = match cached_metadata { - Some(cached) => Arc::clone(cached), - None => { - // No cached_metadata provided. Server is supposed to provide the result metadata. - if server_metadata.col_count != server_metadata.col_specs.len() { - return Err(RowsParseError::ColumnCountMismatch { - col_count: server_metadata.col_count, - col_specs_count: server_metadata.col_specs.len(), - }); - } - Arc::new(server_metadata) - } - }; - - let original_size = buf.len(); - - let rows_count: usize = - types::read_int_length(buf).map_err(RowsParseError::RowsCountParseError)?; - - let raw_rows_iter = RowIterator::new( - rows_count, - &metadata.col_specs, - FrameSlice::new_borrowed(buf), - ); - let rows_iter = TypedRowIterator::::new(raw_rows_iter) - .map_err(|err| DeserializationError::new(err.0))?; - - let rows = rows_iter.collect::>()?; - - Ok(Rows { - metadata, - paging_state_response, - rows_count, - rows, - serialized_size: original_size - buf.len(), - }) +) -> StdResult<(RawRows, PagingStateResponse), RowsParseError> { + let mut frame_slice = FrameSlice::new(&buf_bytes); + RawRows::deserialize(&mut frame_slice, cached_metadata.cloned()).map_err(Into::into) } fn deser_set_keyspace(buf: &mut &[u8]) -> StdResult { @@ -1164,6 +1370,227 @@ pub fn deserialize( ) } +// This is not #[cfg(test)], because it is used by scylla crate. +// Unfortunately, this attribute does not apply recursively to +// children item. Therefore, every `pub` item here must use have +// the specifier, too. +#[doc(hidden)] +mod test_utils { + use std::num::TryFromIntError; + + use bytes::{BufMut, BytesMut}; + + use super::*; + + impl TableSpec<'_> { + pub(crate) fn serialize(&self, buf: &mut impl BufMut) -> StdResult<(), TryFromIntError> { + types::write_string(&self.ks_name, buf)?; + types::write_string(&self.table_name, buf)?; + + Ok(()) + } + } + + impl ColumnType<'_> { + fn id(&self) -> u16 { + match self { + Self::Custom(_) => 0x0000, + Self::Ascii => 0x0001, + Self::BigInt => 0x0002, + Self::Blob => 0x0003, + Self::Boolean => 0x0004, + Self::Counter => 0x0005, + Self::Decimal => 0x0006, + Self::Double => 0x0007, + Self::Float => 0x0008, + Self::Int => 0x0009, + Self::Timestamp => 0x000B, + Self::Uuid => 0x000C, + Self::Text => 0x000D, + Self::Varint => 0x000E, + Self::Timeuuid => 0x000F, + Self::Inet => 0x0010, + Self::Date => 0x0011, + Self::Time => 0x0012, + Self::SmallInt => 0x0013, + Self::TinyInt => 0x0014, + Self::Duration => 0x0015, + Self::List(_) => 0x0020, + Self::Map(_, _) => 0x0021, + Self::Set(_) => 0x0022, + Self::UserDefinedType { .. } => 0x0030, + Self::Tuple(_) => 0x0031, + } + } + + // Only for use in tests + pub(crate) fn serialize(&self, buf: &mut impl BufMut) -> StdResult<(), TryFromIntError> { + let id = self.id(); + types::write_short(id, buf); + + match self { + ColumnType::Custom(type_name) => { + types::write_string(type_name, buf)?; + } + + // Simple types + ColumnType::Ascii + | ColumnType::Boolean + | ColumnType::Blob + | ColumnType::Counter + | ColumnType::Date + | ColumnType::Decimal + | ColumnType::Double + | ColumnType::Duration + | ColumnType::Float + | ColumnType::Int + | ColumnType::BigInt + | ColumnType::Text + | ColumnType::Timestamp + | ColumnType::Inet + | ColumnType::SmallInt + | ColumnType::TinyInt + | ColumnType::Time + | ColumnType::Timeuuid + | ColumnType::Uuid + | ColumnType::Varint => (), + + ColumnType::List(elem_type) | ColumnType::Set(elem_type) => { + elem_type.serialize(buf)?; + } + ColumnType::Map(key_type, value_type) => { + key_type.serialize(buf)?; + value_type.serialize(buf)?; + } + ColumnType::Tuple(types) => { + types::write_short_length(types.len(), buf)?; + for typ in types.iter() { + typ.serialize(buf)?; + } + } + ColumnType::UserDefinedType { + type_name, + keyspace, + field_types, + } => { + types::write_string(keyspace, buf)?; + types::write_string(type_name, buf)?; + types::write_short_length(field_types.len(), buf)?; + for (field_name, field_type) in field_types { + types::write_string(field_name, buf)?; + field_type.serialize(buf)?; + } + } + } + + Ok(()) + } + } + + impl<'a> ResultMetadata<'a> { + #[inline] + #[doc(hidden)] + pub fn new_for_test(col_count: usize, col_specs: Vec>) -> Self { + Self { + col_count, + col_specs, + } + } + + pub(crate) fn serialize( + &self, + buf: &mut impl BufMut, + no_metadata: bool, + global_tables_spec: bool, + ) -> StdResult<(), TryFromIntError> { + let global_table_spec = global_tables_spec + .then(|| self.col_specs.first().map(|col_spec| col_spec.table_spec())) + .flatten(); + + let mut flags = 0; + if global_table_spec.is_some() { + flags |= 0x0001; + } + if no_metadata { + flags |= 0x0004; + } + types::write_int(flags, buf); + + types::write_int_length(self.col_count, buf)?; + + // No paging state. + + if !no_metadata { + if let Some(spec) = global_table_spec { + spec.serialize(buf)?; + } + + for col_spec in self.col_specs() { + if global_table_spec.is_none() { + col_spec.table_spec().serialize(buf)?; + } + + types::write_string(col_spec.name(), buf)?; + col_spec.typ().serialize(buf)?; + } + } + + Ok(()) + } + } + + impl RawRows { + #[doc(hidden)] + #[inline] + pub fn new_for_test( + cached_metadata: Option>>, + metadata: Option, + global_tables_spec: bool, + rows_count: usize, + raw_rows: &[u8], + ) -> StdResult { + let no_metadata = metadata.is_none(); + let empty_metadata = ResultMetadata::mock_empty(); + let used_metadata = metadata + .as_ref() + .or(cached_metadata.as_deref()) + .unwrap_or(&empty_metadata); + + let raw_result_rows = { + let mut buf = BytesMut::new(); + used_metadata.serialize(&mut buf, global_tables_spec, no_metadata)?; + types::write_int_length(rows_count, &mut buf)?; + buf.extend_from_slice(raw_rows); + + buf.freeze() + }; + + let (raw_rows, _paging_state_response) = + Self::deserialize(&mut FrameSlice::new(&raw_result_rows), cached_metadata).expect( + "Ill-formed serialized metadata for tests - likely bug in serialization code", + ); + + Ok(raw_rows) + } + } + + impl<'frame> RawRowsWithDeserializedMetadata<'frame> { + #[inline] + #[doc(hidden)] + pub fn new_for_test( + metadata: ResultMetadata<'frame>, + rows_count: usize, + raw_rows: Bytes, + ) -> Self { + Self { + metadata: ResultMetadataHolder::BorrowedOrOwned(Cow::Owned(metadata)), + rows_count, + raw_rows, + } + } + } +} + #[cfg(test)] mod tests { use crate as scylla; diff --git a/scylla-cql/src/frame/types.rs b/scylla-cql/src/frame/types.rs index e73347039b..70f28f6c2b 100644 --- a/scylla-cql/src/frame/types.rs +++ b/scylla-cql/src/frame/types.rs @@ -173,7 +173,10 @@ pub fn read_int_length(buf: &mut &[u8]) -> Result Result<(), std::num::TryFromIntError> { +pub(crate) fn write_int_length( + v: usize, + buf: &mut impl BufMut, +) -> Result<(), std::num::TryFromIntError> { let v: i32 = v.try_into()?; write_int(v, buf); @@ -224,7 +227,10 @@ pub(crate) fn read_short_length(buf: &mut &[u8]) -> Result Result<(), std::num::TryFromIntError> { +pub(crate) fn write_short_length( + v: usize, + buf: &mut impl BufMut, +) -> Result<(), std::num::TryFromIntError> { let v: u16 = v.try_into()?; write_short(v, buf); Ok(()) diff --git a/scylla-cql/src/types/deserialize/result.rs b/scylla-cql/src/types/deserialize/result.rs index 8c71403b0d..e625e71ef1 100644 --- a/scylla-cql/src/types/deserialize/result.rs +++ b/scylla-cql/src/types/deserialize/result.rs @@ -1,4 +1,8 @@ -use crate::frame::response::result::ColumnSpec; +use bytes::Bytes; + +use crate::frame::response::result::{ + ColumnSpec, RawRowsWithDeserializedMetadata, ResultMetadata, ResultMetadataHolder, +}; use super::row::{mk_deser_err, BuiltinDeserializationErrorKind, ColumnIterator, DeserializeRow}; use super::{DeserializationError, FrameSlice, TypeCheckError}; @@ -132,46 +136,212 @@ where } } +// Technically not an iterator because it returns items that borrow from it, +// and the std Iterator interface does not allow for that. +/// A _lending_ iterator over serialized rows. +/// +/// This type is similar to `RowIterator`, but keeps ownership of the serialized +/// result. Because it returns `ColumnIterator`s that need to borrow from it, +/// it does not implement the `Iterator` trait (there is no type in the standard +/// library to represent this concept yet). +#[derive(Debug)] +pub struct RawRowsLendingIterator { + metadata: ResultMetadataHolder<'static>, + remaining: usize, + at: usize, + raw_rows: Bytes, +} + +impl RawRowsLendingIterator { + /// Creates a new `RawRowsLendingIterator`, consuming given `RawRows`. + #[inline] + pub fn new(raw_rows: RawRowsWithDeserializedMetadata<'static>) -> Self { + let (metadata, rows_count, raw_rows) = raw_rows.into_inner(); + Self { + metadata, + remaining: rows_count, + at: 0, + raw_rows, + } + } + + /// Returns a `ColumnIterator` that represents the next row. + /// + /// Note: the `ColumnIterator` borrows from the `RawRowsLendingIterator`. + /// The column iterator must be consumed before the rows iterator can + /// continue. + #[inline] + #[allow(clippy::should_implement_trait)] // https://github.com/rust-lang/rust-clippy/issues/5004 + pub fn next(&mut self) -> Option> { + self.remaining = self.remaining.checked_sub(1)?; + + // First create the slice encompassing the whole frame. + let mut remaining_frame = FrameSlice::new(&self.raw_rows); + // Then slice it to encompass the remaining suffix of the frame. + *remaining_frame.as_slice_mut() = &remaining_frame.as_slice()[self.at..]; + // Ideally, we would prefer to preserve the FrameSlice between calls to `next()`, + // but borrowing from oneself is impossible, so we have to recreate it this way. + + let iter = ColumnIterator::new(self.metadata.col_specs(), remaining_frame); + + // Skip the row here, manually + for (column_index, spec) in self.metadata.col_specs().iter().enumerate() { + let remaining_frame_len_before_column_read = remaining_frame.as_slice().len(); + if let Err(err) = remaining_frame.read_cql_bytes() { + return Some(Err(mk_deser_err::( + BuiltinDeserializationErrorKind::RawColumnDeserializationFailed { + column_index, + column_name: spec.name().to_owned(), + err: DeserializationError::new(err), + }, + ))); + } else { + let remaining_frame_len_after_column_read = remaining_frame.as_slice().len(); + self.at += + remaining_frame_len_before_column_read - remaining_frame_len_after_column_read; + } + } + + Some(Ok(iter)) + } + + #[inline] + pub fn size_hint(&self) -> (usize, Option) { + // next() is written in a way that it does not progress on error, so once an error + // is encountered, the same error keeps being returned until `self.remaining` + // elements are yielded in total. + (self.remaining, Some(self.remaining)) + } + + /// Returns the metadata associated with the response (paging state and + /// column specifications). + #[inline] + pub fn metadata(&self) -> &ResultMetadata { + &self.metadata + } + + /// Returns the remaining number of rows that this iterator is expected + /// to produce. + #[inline] + pub fn rows_remaining(&self) -> usize { + self.remaining + } +} + #[cfg(test)] mod tests { + use bytes::Bytes; - use crate::frame::response::result::ColumnType; + use crate::frame::response::result::{ + ColumnType, RawRowsWithDeserializedMetadata, ResultMetadata, + }; use super::super::tests::{serialize_cells, spec, CELL1, CELL2}; - use super::{FrameSlice, RowIterator, TypedRowIterator}; + use super::{ + ColumnIterator, DeserializationError, FrameSlice, RawRowsLendingIterator, RowIterator, + TypedRowIterator, + }; + + trait LendingIterator { + type Item<'borrow> + where + Self: 'borrow; + fn lend_next(&mut self) -> Option, DeserializationError>>; + } + + impl<'frame> LendingIterator for RowIterator<'frame> { + type Item<'borrow> = ColumnIterator<'borrow> + where + Self: 'borrow; + + fn lend_next(&mut self) -> Option, DeserializationError>> { + self.next() + } + } + + impl LendingIterator for RawRowsLendingIterator { + type Item<'borrow> = ColumnIterator<'borrow>; + + fn lend_next(&mut self) -> Option, DeserializationError>> { + self.next() + } + } #[test] - fn test_row_iterator_basic_parse() { + fn test_row_iterators_basic_parse() { let raw_data = serialize_cells([Some(CELL1), Some(CELL2), Some(CELL2), Some(CELL1)]); - let specs = [spec("b1", ColumnType::Blob), spec("b2", ColumnType::Blob)]; - let mut iter = RowIterator::new(2, &specs, FrameSlice::new(&raw_data)); - - let mut row1 = iter.next().unwrap().unwrap(); - let c11 = row1.next().unwrap().unwrap(); - assert_eq!(c11.slice.unwrap().as_slice(), CELL1); - let c12 = row1.next().unwrap().unwrap(); - assert_eq!(c12.slice.unwrap().as_slice(), CELL2); - assert!(row1.next().is_none()); - - let mut row2 = iter.next().unwrap().unwrap(); - let c21 = row2.next().unwrap().unwrap(); - assert_eq!(c21.slice.unwrap().as_slice(), CELL2); - let c22 = row2.next().unwrap().unwrap(); - assert_eq!(c22.slice.unwrap().as_slice(), CELL1); - assert!(row2.next().is_none()); - - assert!(iter.next().is_none()); + let specs = vec![spec("b1", ColumnType::Blob), spec("b2", ColumnType::Blob)]; + + let row_iter = RowIterator::new( + 2, + // Those leaks are required because of a compiler bug-limitation about GATs: + // https://blog.rust-lang.org/2022/10/28/gats-stabilization.html#implied-static-requirement-from-higher-ranked-trait-bounds + // the following type higher-ranked lifetime constraint implies 'static lifetime. + // + // I: for<'item> LendingIterator = ColumnIterator<'item>>, + // + // The bug is said to be a lot of effort to fix, so in tests let's just use `leak` + // to obtain 'static references. + Vec::leak(specs.clone()), + FrameSlice::new(Box::leak(Box::new(raw_data.clone()))), + ); + let lending_row_iter = + RawRowsLendingIterator::new(RawRowsWithDeserializedMetadata::new_for_test( + ResultMetadata::new_for_test(specs.len(), specs), + 2, + raw_data, + )); + check(row_iter); + check(lending_row_iter); + + fn check(mut iter: I) + where + I: for<'item> LendingIterator = ColumnIterator<'item>>, + { + let mut row1 = iter.lend_next().unwrap().unwrap(); + let c11 = row1.next().unwrap().unwrap(); + assert_eq!(c11.slice.unwrap().as_slice(), CELL1); + let c12 = row1.next().unwrap().unwrap(); + assert_eq!(c12.slice.unwrap().as_slice(), CELL2); + assert!(row1.next().is_none()); + + let mut row2 = iter.lend_next().unwrap().unwrap(); + let c21 = row2.next().unwrap().unwrap(); + assert_eq!(c21.slice.unwrap().as_slice(), CELL2); + let c22 = row2.next().unwrap().unwrap(); + assert_eq!(c22.slice.unwrap().as_slice(), CELL1); + assert!(row2.next().is_none()); + + assert!(iter.lend_next().is_none()); + } } #[test] - fn test_row_iterator_too_few_rows() { + fn test_row_iterators_too_few_rows() { let raw_data = serialize_cells([Some(CELL1), Some(CELL2)]); - let specs = [spec("b1", ColumnType::Blob), spec("b2", ColumnType::Blob)]; - let mut iter = RowIterator::new(2, &specs, FrameSlice::new(&raw_data)); - - iter.next().unwrap().unwrap(); - assert!(iter.next().unwrap().is_err()); + let specs = vec![spec("b1", ColumnType::Blob), spec("b2", ColumnType::Blob)]; + let row_iter = RowIterator::new( + 2, + Vec::leak(specs.clone()), + FrameSlice::new(Box::leak(Box::new(raw_data.clone()))), + ); + let lending_row_iter = + RawRowsLendingIterator::new(RawRowsWithDeserializedMetadata::new_for_test( + ResultMetadata::new_for_test(specs.len(), specs), + 2, + raw_data, + )); + check(row_iter); + check(lending_row_iter); + + fn check(mut iter: I) + where + I: for<'item> LendingIterator = ColumnIterator<'item>>, + { + iter.lend_next().unwrap().unwrap(); + assert!(iter.lend_next().unwrap().is_err()); + } } #[test] diff --git a/scylla/src/lib.rs b/scylla/src/lib.rs index 3adce0fc04..8b57c26154 100644 --- a/scylla/src/lib.rs +++ b/scylla/src/lib.rs @@ -192,6 +192,7 @@ pub use frame::response::cql_to_rust::FromRow; pub use transport::caching_session::CachingSession; pub use transport::execution_profile::ExecutionProfile; +pub use transport::legacy_query_result::LegacyQueryResult; pub use transport::query_result::QueryResult; pub use transport::session::{IntoTypedRows, Session, SessionConfig}; pub use transport::session_builder::SessionBuilder; diff --git a/scylla/src/transport/caching_session.rs b/scylla/src/transport/caching_session.rs index e4bca85bda..cbf9d3c6dc 100644 --- a/scylla/src/transport/caching_session.rs +++ b/scylla/src/transport/caching_session.rs @@ -3,9 +3,9 @@ use crate::prepared_statement::PreparedStatement; use crate::query::Query; use crate::statement::{PagingState, PagingStateResponse}; use crate::transport::errors::QueryError; -use crate::transport::iterator::RowIterator; +use crate::transport::iterator::LegacyRowIterator; use crate::transport::partitioner::PartitionerName; -use crate::{QueryResult, Session}; +use crate::{LegacyQueryResult, Session}; use bytes::Bytes; use dashmap::DashMap; use futures::future::try_join_all; @@ -75,7 +75,7 @@ where &self, query: impl Into, values: impl SerializeRow, - ) -> Result { + ) -> Result { let query = query.into(); let prepared = self.add_prepared_statement_owned(query).await?; self.session.execute_unpaged(&prepared, values).await @@ -86,7 +86,7 @@ where &self, query: impl Into, values: impl SerializeRow, - ) -> Result { + ) -> Result { let query = query.into(); let prepared = self.add_prepared_statement_owned(query).await?; self.session.execute_iter(prepared, values).await @@ -98,7 +98,7 @@ where query: impl Into, values: impl SerializeRow, paging_state: PagingState, - ) -> Result<(QueryResult, PagingStateResponse), QueryError> { + ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { let query = query.into(); let prepared = self.add_prepared_statement_owned(query).await?; self.session @@ -112,7 +112,7 @@ where &self, batch: &Batch, values: impl BatchValues, - ) -> Result { + ) -> Result { let all_prepared: bool = batch .statements .iter() diff --git a/scylla/src/transport/connection.rs b/scylla/src/transport/connection.rs index a7a4766379..6b77b1b46e 100644 --- a/scylla/src/transport/connection.rs +++ b/scylla/src/transport/connection.rs @@ -47,8 +47,9 @@ use std::{ }; use super::errors::{ProtocolError, UseKeyspaceProtocolError}; -use super::iterator::RowIterator; +use super::iterator::{LegacyRowIterator, RawIterator}; use super::locator::tablets::{RawTablet, TabletParsingError}; +use super::query_result::QueryResult; use super::session::AddressTranslator; use super::topology::{PeerEndpoint, UntranslatedEndpoint, UntranslatedPeer}; use super::NodeAddr; @@ -69,7 +70,6 @@ use crate::routing::ShardInfo; use crate::statement::prepared_statement::PreparedStatement; use crate::statement::{Consistency, PageSize, PagingState, PagingStateResponse}; use crate::transport::Compression; -use crate::QueryResult; // Queries for schema agreement const LOCAL_VERSION: &str = "SELECT schema_version FROM system.local WHERE key='local'"; @@ -268,14 +268,11 @@ impl NonErrorQueryResponse { pub(crate) fn into_query_result_and_paging_state( self, ) -> Result<(QueryResult, PagingStateResponse), UserRequestError> { - let (rows, paging_state, metadata, serialized_size) = match self.response { - NonErrorResponse::Result(result::Result::Rows(rs)) => ( - Some(rs.rows), - rs.paging_state_response, - Some(rs.metadata), - rs.serialized_size, - ), - NonErrorResponse::Result(_) => (None, PagingStateResponse::NoMorePages, None, 0), + let (raw_rows, paging_state_response) = match self.response { + NonErrorResponse::Result(result::Result::Rows((rs, paging_state_response))) => { + (Some(rs), paging_state_response) + } + NonErrorResponse::Result(_) => (None, PagingStateResponse::NoMorePages), _ => { return Err(UserRequestError::UnexpectedResponse( self.response.to_response_kind(), @@ -284,14 +281,8 @@ impl NonErrorQueryResponse { }; Ok(( - QueryResult { - rows, - warnings: self.warnings, - tracing_id: self.tracing_id, - metadata, - serialized_size, - }, - paging_state, + QueryResult::new(raw_rows, self.tracing_id, self.warnings), + paging_state_response, )) } @@ -1191,14 +1182,15 @@ impl Connection { pub(crate) async fn query_iter( self: Arc, query: Query, - ) -> Result { + ) -> Result { let consistency = query .config .determine_consistency(self.config.default_consistency); let serial_consistency = query.config.serial_consistency.flatten(); - RowIterator::new_for_connection_query_iter(query, self, consistency, serial_consistency) + RawIterator::new_for_connection_query_iter(query, self, consistency, serial_consistency) .await + .map(RawIterator::into_legacy) } /// Executes a prepared statements and fetches its results over multiple pages, using @@ -1207,13 +1199,13 @@ impl Connection { self: Arc, prepared_statement: PreparedStatement, values: SerializedValues, - ) -> Result { + ) -> Result { let consistency = prepared_statement .config .determine_consistency(self.config.default_consistency); let serial_consistency = prepared_statement.config.serial_consistency.flatten(); - RowIterator::new_for_connection_execute_iter( + RawIterator::new_for_connection_execute_iter( prepared_statement, values, self, @@ -1221,6 +1213,7 @@ impl Connection { serial_consistency, ) .await + .map(RawIterator::into_legacy) } #[allow(dead_code)] @@ -1443,6 +1436,7 @@ impl Connection { let (version_id,) = self .query_unpaged(LOCAL_VERSION) .await? + .into_legacy_result()? .single_row_typed() .map_err(ProtocolError::SchemaVersionFetch)?; Ok(version_id) @@ -2610,6 +2604,8 @@ mod tests { .query_unpaged("SELECT p, v FROM t") .await .unwrap() + .into_legacy_result() + .unwrap() .rows_typed::<(i32, Vec)>() .unwrap() .collect::, _>>() diff --git a/scylla/src/transport/errors.rs b/scylla/src/transport/errors.rs index 8ef4a99437..2c7db9bde1 100644 --- a/scylla/src/transport/errors.rs +++ b/scylla/src/transport/errors.rs @@ -32,7 +32,7 @@ use thiserror::Error; use crate::{authentication::AuthError, frame::response}; -use super::query_result::{RowsExpectedError, SingleRowTypedError}; +use super::legacy_query_result::{RowsExpectedError, SingleRowTypedError}; /// Error that occurred during query execution #[derive(Error, Debug, Clone)] diff --git a/scylla/src/transport/iterator.rs b/scylla/src/transport/iterator.rs index 4457e47fdb..bbf9347e35 100644 --- a/scylla/src/transport/iterator.rs +++ b/scylla/src/transport/iterator.rs @@ -1,7 +1,6 @@ //! Iterators over rows returned by paged queries use std::future::Future; -use std::mem; use std::net::SocketAddr; use std::ops::ControlFlow; use std::pin::Pin; @@ -9,19 +8,25 @@ use std::sync::Arc; use std::task::{Context, Poll}; use futures::Stream; +use scylla_cql::frame::frame_errors::RowsParseError; +use scylla_cql::frame::response::result::RawRows; use scylla_cql::frame::response::NonErrorResponse; +use scylla_cql::types::deserialize::result::RawRowsLendingIterator; +use scylla_cql::types::deserialize::row::{ColumnIterator, DeserializeRow}; +use scylla_cql::types::deserialize::TypeCheckError; use scylla_cql::types::serialize::row::SerializedValues; use std::result::Result; use thiserror::Error; use tokio::sync::mpsc; use super::execution_profile::ExecutionProfileInner; +use super::query_result::ColumnSpecs; use super::session::RequestSpan; use crate::cql_to_rust::{FromRow, FromRowError}; use crate::frame::response::{ result, - result::{ColumnSpec, Row, Rows}, + result::{ColumnSpec, Row}, }; use crate::history::{self, HistoryListener}; use crate::statement::{prepared_statement::PreparedStatement, query::Query}; @@ -36,17 +41,22 @@ use crate::transport::NodeRef; use tracing::{trace, trace_span, warn, Instrument}; use uuid::Uuid; -/// Iterator over rows returned by paged queries\ -/// Allows to easily access rows without worrying about handling multiple pages -pub struct RowIterator { - current_row_idx: usize, - current_page: Rows, - page_receiver: mpsc::Receiver>, - tracing_ids: Vec, +// Like std::task::ready!, but handles the whole stack of Poll>>. +// If it matches Poll::Ready(Some(Ok(_))), then it returns the innermost value, +// otherwise it returns from the surrounding function. +macro_rules! ready_some_ok { + ($e:expr) => { + match $e { + Poll::Ready(Some(Ok(x))) => x, + Poll::Ready(Some(Err(err))) => return Poll::Ready(Some(Err(err.into()))), + Poll::Ready(None) => return Poll::Ready(None), + Poll::Pending => return Poll::Pending, + } + }; } struct ReceivedPage { - rows: Rows, + rows: RawRows, tracing_id: Option, } @@ -58,833 +68,1075 @@ pub(crate) struct PreparedIteratorConfig { pub(crate) metrics: Arc, } -/// Fetching pages is asynchronous so `RowIterator` does not implement the `Iterator` trait.\ -/// Instead it uses the asynchronous `Stream` trait -impl Stream for RowIterator { - type Item = Result; +// A separate module is used here so that the parent module cannot construct +// SendAttemptedProof directly. +mod checked_channel_sender { + use scylla_cql::frame::response::result::RawRows; + use std::marker::PhantomData; + use tokio::sync::mpsc; + use uuid::Uuid; - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut s = self.as_mut(); + use crate::transport::errors::QueryError; - if s.is_current_page_exhausted() { - match Pin::new(&mut s.page_receiver).poll_recv(cx) { - Poll::Ready(Some(Ok(received_page))) => { - s.current_page = received_page.rows; - s.current_row_idx = 0; + use super::ReceivedPage; - if let Some(tracing_id) = received_page.tracing_id { - s.tracing_ids.push(tracing_id); - } - } - Poll::Ready(Some(Err(err))) => return Poll::Ready(Some(Err(err))), - Poll::Ready(None) => return Poll::Ready(None), - Poll::Pending => return Poll::Pending, - } - } + /// A value whose existence proves that there was an attempt + /// to send an item of type T through a channel. + /// Can only be constructed by ProvingSender::send. + pub(crate) struct SendAttemptedProof(PhantomData); - let idx = s.current_row_idx; - if idx < s.current_page.rows.len() { - let row = mem::take(&mut s.current_page.rows[idx]); - s.current_row_idx += 1; - return Poll::Ready(Some(Ok(row))); - } + /// An mpsc::Sender which returns proofs that it attempted to send items. + pub(crate) struct ProvingSender(mpsc::Sender); - // We probably got a zero-sized page - // Yield, but tell that we are ready - cx.waker().wake_by_ref(); - Poll::Pending + impl From> for ProvingSender { + fn from(s: mpsc::Sender) -> Self { + Self(s) + } } -} -impl RowIterator { - /// Converts this iterator into an iterator over rows parsed as given type - pub fn into_typed(self) -> TypedRowIterator { - TypedRowIterator { - row_iterator: self, - phantom_data: Default::default(), + impl ProvingSender { + pub(crate) async fn send( + &self, + value: T, + ) -> (SendAttemptedProof, Result<(), mpsc::error::SendError>) { + (SendAttemptedProof(PhantomData), self.0.send(value).await) } } - pub(crate) async fn new_for_query( - query: Query, - execution_profile: Arc, - cluster_data: Arc, - metrics: Arc, - ) -> Result { - let (sender, receiver) = mpsc::channel(1); - - let consistency = query - .config - .consistency - .unwrap_or(execution_profile.consistency); - let serial_consistency = query - .config - .serial_consistency - .unwrap_or(execution_profile.serial_consistency); - - let page_size = query.get_validated_page_size(); - - let routing_info = RoutingInfo { - consistency, - serial_consistency, - ..Default::default() - }; - - let retry_session = query - .get_retry_policy() - .map(|rp| &**rp) - .unwrap_or(&*execution_profile.retry_policy) - .new_session(); - - let parent_span = tracing::Span::current(); - let worker_task = async move { - let query_ref = &query; + type ResultPage = Result; - let page_query = |connection: Arc, - consistency: Consistency, - paging_state: PagingState| { - async move { - connection - .query_raw_with_consistency( - query_ref, - consistency, - serial_consistency, - Some(page_size), - paging_state, - ) - .await - } + impl ProvingSender { + pub(crate) async fn send_empty_page( + &self, + tracing_id: Option, + ) -> ( + SendAttemptedProof, + Result<(), mpsc::error::SendError>, + ) { + let empty_page = ReceivedPage { + rows: RawRows::mock_empty(), + tracing_id, }; + self.send(Ok(empty_page)).await + } + } +} - let query_ref = &query; - - let span_creator = move || { - let span = RequestSpan::new_query(&query_ref.contents); - span.record_request_size(0); - span - }; +use checked_channel_sender::{ProvingSender, SendAttemptedProof}; - let worker = RowIteratorWorker { - sender: sender.into(), - page_query, - statement_info: routing_info, - query_is_idempotent: query.config.is_idempotent, - query_consistency: consistency, - retry_session, - execution_profile, - metrics, - paging_state: PagingState::start(), - history_listener: query.config.history_listener.clone(), - current_query_id: None, - current_attempt_id: None, - parent_span, - span_creator, - }; +type PageSendAttemptedProof = SendAttemptedProof>; - worker.work(cluster_data).await - }; +// RowIteratorWorker works in the background to fetch pages +// RowIterator receives them through a channel +struct RowIteratorWorker<'a, QueryFunc, SpanCreatorFunc> { + sender: ProvingSender>, - Self::new_from_worker_future(worker_task, receiver).await - } + // Closure used to perform a single page query + // AsyncFn(Arc, Option>) -> Result + page_query: QueryFunc, - pub(crate) async fn new_for_prepared_statement( - config: PreparedIteratorConfig, - ) -> Result { - let (sender, receiver) = mpsc::channel(1); + statement_info: RoutingInfo<'a>, + query_is_idempotent: bool, + query_consistency: Consistency, + retry_session: Box, + execution_profile: Arc, + metrics: Arc, - let consistency = config - .prepared - .config - .consistency - .unwrap_or(config.execution_profile.consistency); - let serial_consistency = config - .prepared - .config - .serial_consistency - .unwrap_or(config.execution_profile.serial_consistency); + paging_state: PagingState, - let page_size = config.prepared.get_validated_page_size(); + history_listener: Option>, + current_query_id: Option, + current_attempt_id: Option, - let retry_session = config - .prepared - .get_retry_policy() - .map(|rp| &**rp) - .unwrap_or(&*config.execution_profile.retry_policy) - .new_session(); + parent_span: tracing::Span, + span_creator: SpanCreatorFunc, +} - let parent_span = tracing::Span::current(); - let worker_task = async move { - let prepared_ref = &config.prepared; - let values_ref = &config.values; +impl RowIteratorWorker<'_, QueryFunc, SpanCreator> +where + QueryFunc: Fn(Arc, Consistency, PagingState) -> QueryFut, + QueryFut: Future>, + SpanCreator: Fn() -> RequestSpan, +{ + // Contract: this function MUST send at least one item through self.sender + async fn work(mut self, cluster_data: Arc) -> PageSendAttemptedProof { + let load_balancer = self.execution_profile.load_balancing_policy.clone(); + let statement_info = self.statement_info.clone(); + let query_plan = + load_balancing::Plan::new(load_balancer.as_ref(), &statement_info, &cluster_data); - let (partition_key, token) = match prepared_ref - .extract_partition_key_and_calculate_token( - prepared_ref.get_partitioner_name(), - values_ref, - ) { - Ok(res) => res.unzip(), - Err(err) => { - let (proof, _res) = ProvingSender::from(sender).send(Err(err)).await; - return proof; - } - }; + let mut last_error: QueryError = QueryError::EmptyPlan; + let mut current_consistency: Consistency = self.query_consistency; - let table_spec = config.prepared.get_table_spec(); - let statement_info = RoutingInfo { - consistency, - serial_consistency, - token, - table: table_spec, - is_confirmed_lwt: config.prepared.is_confirmed_lwt(), - }; + self.log_query_start(); - let page_query = |connection: Arc, - consistency: Consistency, - paging_state: PagingState| async move { - connection - .execute_raw_with_consistency( - prepared_ref, - values_ref, - consistency, - serial_consistency, - Some(page_size), - paging_state, - ) - .await + 'nodes_in_plan: for (node, shard) in query_plan { + let span = + trace_span!(parent: &self.parent_span, "Executing query", node = %node.address); + // For each node in the plan choose a connection to use + // This connection will be reused for same node retries to preserve paging cache on the shard + let connection: Arc = match node + .connection_for_shard(shard) + .instrument(span.clone()) + .await + { + Ok(connection) => connection, + Err(e) => { + trace!( + parent: &span, + error = %e, + "Choosing connection failed" + ); + last_error = e.into(); + // Broken connection doesn't count as a failed query, don't log in metrics + continue 'nodes_in_plan; + } }; - let serialized_values_size = config.values.buffer_size(); + 'same_node_retries: loop { + trace!(parent: &span, "Execution started"); + // Query pages until an error occurs + let queries_result: Result = self + .query_pages(&connection, current_consistency, node) + .instrument(span.clone()) + .await; - let replicas: Option> = - if let (Some(table_spec), Some(token)) = - (statement_info.table, statement_info.token) - { - Some( - config - .cluster_data - .get_token_endpoints_iter(table_spec, token) - .map(|(node, shard)| (node.clone(), shard)) - .collect(), - ) - } else { - None + last_error = match queries_result { + Ok(proof) => { + trace!(parent: &span, "Query succeeded"); + // query_pages returned Ok, so we are guaranteed + // that it attempted to send at least one page + // through self.sender and we can safely return now. + return proof; + } + Err(error) => { + trace!( + parent: &span, + error = %error, + "Query failed" + ); + error + } }; - let span_creator = move || { - let span = RequestSpan::new_prepared( - partition_key.as_ref().map(|pk| pk.iter()), - token, - serialized_values_size, - ); - if let Some(replicas) = replicas.as_ref() { - span.record_replicas(replicas); - } - span - }; - - let worker = RowIteratorWorker { - sender: sender.into(), - page_query, - statement_info, - query_is_idempotent: config.prepared.config.is_idempotent, - query_consistency: consistency, - retry_session, - execution_profile: config.execution_profile, - metrics: config.metrics, - paging_state: PagingState::start(), - history_listener: config.prepared.config.history_listener.clone(), - current_query_id: None, - current_attempt_id: None, - parent_span, - span_creator, - }; + // Use retry policy to decide what to do next + let query_info = QueryInfo { + error: &last_error, + is_idempotent: self.query_is_idempotent, + consistency: self.query_consistency, + }; - worker.work(config.cluster_data).await - }; + let retry_decision = self.retry_session.decide_should_retry(query_info); + trace!( + parent: &span, + retry_decision = format!("{:?}", retry_decision).as_str() + ); + self.log_attempt_error(&last_error, &retry_decision); + match retry_decision { + RetryDecision::RetrySameNode(cl) => { + self.metrics.inc_retries_num(); + current_consistency = cl.unwrap_or(current_consistency); + continue 'same_node_retries; + } + RetryDecision::RetryNextNode(cl) => { + self.metrics.inc_retries_num(); + current_consistency = cl.unwrap_or(current_consistency); + continue 'nodes_in_plan; + } + RetryDecision::DontRetry => break 'nodes_in_plan, + RetryDecision::IgnoreWriteError => { + warn!("Ignoring error during fetching pages; stopping fetching."); + // If we are here then, most likely, we didn't send + // anything through the self.sender channel. + // Although we are in an awkward situation (_iter + // interface isn't meant for sending writes), + // we must attempt to send something because + // the iterator expects it. + let (proof, _) = self.sender.send_empty_page(None).await; + return proof; + } + }; + } + } - Self::new_from_worker_future(worker_task, receiver).await + // Send last_error to RowIterator - query failed fully + self.log_query_error(&last_error); + let (proof, _) = self.sender.send(Err(last_error)).await; + proof } - pub(crate) async fn new_for_connection_query_iter( - query: Query, - connection: Arc, + // Given a working connection query as many pages as possible until the first error. + // + // Contract: this function must either: + // - Return an error + // - Return Ok but have attempted to send a page via self.sender + async fn query_pages( + &mut self, + connection: &Arc, consistency: Consistency, - serial_consistency: Option, - ) -> Result { - let (sender, receiver) = mpsc::channel::>(1); - - let page_size = query.get_validated_page_size(); - - let worker_task = async move { - let worker = SingleConnectionRowIteratorWorker { - sender: sender.into(), - fetcher: |paging_state| { - connection.query_raw_with_consistency( - &query, - consistency, - serial_consistency, - Some(page_size), - paging_state, - ) - }, - }; - worker.work().await - }; - - Self::new_from_worker_future(worker_task, receiver).await + node: NodeRef<'_>, + ) -> Result { + loop { + let request_span = (self.span_creator)(); + match self + .query_one_page(connection, consistency, node, &request_span) + .instrument(request_span.span().clone()) + .await? + { + ControlFlow::Break(proof) => return Ok(proof), + ControlFlow::Continue(_) => {} + } + } } - pub(crate) async fn new_for_connection_execute_iter( - prepared: PreparedStatement, - values: SerializedValues, - connection: Arc, + async fn query_one_page( + &mut self, + connection: &Arc, consistency: Consistency, - serial_consistency: Option, - ) -> Result { - let (sender, receiver) = mpsc::channel::>(1); - - let page_size = prepared.get_validated_page_size(); - - let worker_task = async move { - let worker = SingleConnectionRowIteratorWorker { - sender: sender.into(), - fetcher: |paging_state| { - connection.execute_raw_with_consistency( - &prepared, - &values, - consistency, - serial_consistency, - Some(page_size), - paging_state, - ) - }, - }; - worker.work().await - }; - - Self::new_from_worker_future(worker_task, receiver).await - } + node: NodeRef<'_>, + request_span: &RequestSpan, + ) -> Result, QueryError> { + self.metrics.inc_total_paged_queries(); + let query_start = std::time::Instant::now(); - async fn new_from_worker_future( - worker_task: impl Future + Send + 'static, - mut receiver: mpsc::Receiver>, - ) -> Result { - tokio::task::spawn(worker_task); + trace!( + connection = %connection.get_connect_address(), + "Sending" + ); + self.log_attempt_start(connection.get_connect_address()); - // This unwrap is safe because: - // - The future returned by worker.work sends at least one item - // to the channel (the PageSendAttemptedProof helps enforce this) - // - That future is polled in a tokio::task which isn't going to be - // cancelled - let pages_received = receiver.recv().await.unwrap()?; + let query_response = + (self.page_query)(connection.clone(), consistency, self.paging_state.clone()) + .await + .and_then(QueryResponse::into_non_error_query_response); - Ok(RowIterator { - current_row_idx: 0, - current_page: pages_received.rows, - page_receiver: receiver, - tracing_ids: if let Some(tracing_id) = pages_received.tracing_id { - vec![tracing_id] - } else { - Vec::new() - }, - }) - } + let elapsed = query_start.elapsed(); - /// If tracing was enabled returns tracing ids of all finished page queries - pub fn get_tracing_ids(&self) -> &[Uuid] { - &self.tracing_ids - } + request_span.record_shard_id(connection); - /// Returns specification of row columns - pub fn get_column_specs(&self) -> &[ColumnSpec] { - self.current_page.metadata.col_specs() - } + match query_response { + Ok(NonErrorQueryResponse { + response: + NonErrorResponse::Result(result::Result::Rows((rows, paging_state_response))), + tracing_id, + .. + }) => { + let _ = self.metrics.log_query_latency(elapsed.as_millis() as u64); + self.log_attempt_success(); + self.log_query_success(); + self.execution_profile + .load_balancing_policy + .on_query_success(&self.statement_info, elapsed, node); - fn is_current_page_exhausted(&self) -> bool { - self.current_row_idx >= self.current_page.rows.len() - } -} + let received_page = ReceivedPage { rows, tracing_id }; -// A separate module is used here so that the parent module cannot construct -// SendAttemptedProof directly. -mod checked_channel_sender { - use scylla_cql::frame::{ - request::query::PagingStateResponse, - response::result::{ResultMetadata, Rows}, - }; - use std::{marker::PhantomData, sync::Arc}; - use tokio::sync::mpsc; - use uuid::Uuid; + // Send next page to RowIterator + let (proof, res) = self.sender.send(Ok(received_page)).await; + if res.is_err() { + // channel was closed, RowIterator was dropped - should shutdown + return Ok(ControlFlow::Break(proof)); + } - use crate::transport::errors::QueryError; + match paging_state_response.into_paging_control_flow() { + ControlFlow::Continue(paging_state) => { + self.paging_state = paging_state; + } + ControlFlow::Break(()) => { + // Reached the last query, shutdown + return Ok(ControlFlow::Break(proof)); + } + } - use super::ReceivedPage; + // Query succeeded, reset retry policy for future retries + self.retry_session.reset(); + self.log_query_start(); - /// A value whose existence proves that there was an attempt - /// to send an item of type T through a channel. - /// Can only be constructed by ProvingSender::send. - pub(crate) struct SendAttemptedProof(PhantomData); - - /// An mpsc::Sender which returns proofs that it attempted to send items. - pub(crate) struct ProvingSender(mpsc::Sender); + Ok(ControlFlow::Continue(())) + } + Err(err) => { + let err = err.into(); + self.metrics.inc_failed_paged_queries(); + self.execution_profile + .load_balancing_policy + .on_query_failure(&self.statement_info, elapsed, node, &err); + Err(err) + } + Ok(NonErrorQueryResponse { + response: NonErrorResponse::Result(_), + tracing_id, + .. + }) => { + // We have most probably sent a modification statement (e.g. INSERT or UPDATE), + // so let's return an empty iterator as suggested in #631. - impl From> for ProvingSender { - fn from(s: mpsc::Sender) -> Self { - Self(s) + // We must attempt to send something because the iterator expects it. + let (proof, _) = self.sender.send_empty_page(tracing_id).await; + Ok(ControlFlow::Break(proof)) + } + Ok(response) => { + self.metrics.inc_failed_paged_queries(); + let err = + ProtocolError::UnexpectedResponse(response.response.to_response_kind()).into(); + self.execution_profile + .load_balancing_policy + .on_query_failure(&self.statement_info, elapsed, node, &err); + Err(err) + } } } - impl ProvingSender { - pub(crate) async fn send( - &self, - value: T, - ) -> (SendAttemptedProof, Result<(), mpsc::error::SendError>) { - (SendAttemptedProof(PhantomData), self.0.send(value).await) - } + fn log_query_start(&mut self) { + let history_listener: &dyn HistoryListener = match &self.history_listener { + Some(hl) => &**hl, + None => return, + }; + + self.current_query_id = Some(history_listener.log_query_start()); } - type ResultPage = Result; + fn log_query_success(&mut self) { + let history_listener: &dyn HistoryListener = match &self.history_listener { + Some(hl) => &**hl, + None => return, + }; - impl ProvingSender { - pub(crate) async fn send_empty_page( - &self, - tracing_id: Option, - ) -> ( - SendAttemptedProof, - Result<(), mpsc::error::SendError>, - ) { - let empty_page = ReceivedPage { - rows: Rows { - metadata: Arc::new(ResultMetadata::mock_empty()), - paging_state_response: PagingStateResponse::NoMorePages, - rows_count: 0, - rows: Vec::new(), - serialized_size: 0, - }, - tracing_id, - }; - self.send(Ok(empty_page)).await - } + let query_id: history::QueryId = match &self.current_query_id { + Some(id) => *id, + None => return, + }; + + history_listener.log_query_success(query_id); } -} -use checked_channel_sender::{ProvingSender, SendAttemptedProof}; + fn log_query_error(&mut self, error: &QueryError) { + let history_listener: &dyn HistoryListener = match &self.history_listener { + Some(hl) => &**hl, + None => return, + }; -type PageSendAttemptedProof = SendAttemptedProof>; + let query_id: history::QueryId = match &self.current_query_id { + Some(id) => *id, + None => return, + }; -// RowIteratorWorker works in the background to fetch pages -// RowIterator receives them through a channel -struct RowIteratorWorker<'a, QueryFunc, SpanCreatorFunc> { - sender: ProvingSender>, + history_listener.log_query_error(query_id, error); + } - // Closure used to perform a single page query - // AsyncFn(Arc, Option>) -> Result - page_query: QueryFunc, + fn log_attempt_start(&mut self, node_addr: SocketAddr) { + let history_listener: &dyn HistoryListener = match &self.history_listener { + Some(hl) => &**hl, + None => return, + }; - statement_info: RoutingInfo<'a>, - query_is_idempotent: bool, - query_consistency: Consistency, - retry_session: Box, - execution_profile: Arc, - metrics: Arc, + let query_id: history::QueryId = match &self.current_query_id { + Some(id) => *id, + None => return, + }; - paging_state: PagingState, + self.current_attempt_id = + Some(history_listener.log_attempt_start(query_id, None, node_addr)); + } - history_listener: Option>, - current_query_id: Option, - current_attempt_id: Option, + fn log_attempt_success(&mut self) { + let history_listener: &dyn HistoryListener = match &self.history_listener { + Some(hl) => &**hl, + None => return, + }; - parent_span: tracing::Span, - span_creator: SpanCreatorFunc, + let attempt_id: history::AttemptId = match &self.current_attempt_id { + Some(id) => *id, + None => return, + }; + + history_listener.log_attempt_success(attempt_id); + } + + fn log_attempt_error(&mut self, error: &QueryError, retry_decision: &RetryDecision) { + let history_listener: &dyn HistoryListener = match &self.history_listener { + Some(hl) => &**hl, + None => return, + }; + + let attempt_id: history::AttemptId = match &self.current_attempt_id { + Some(id) => *id, + None => return, + }; + + history_listener.log_attempt_error(attempt_id, error, retry_decision); + } } -impl RowIteratorWorker<'_, QueryFunc, SpanCreator> +/// A massively simplified version of the RowIteratorWorker. It does not have +/// any complicated logic related to retries, it just fetches pages from +/// a single connection. +struct SingleConnectionRowIteratorWorker { + sender: ProvingSender>, + fetcher: Fetcher, +} + +impl SingleConnectionRowIteratorWorker where - QueryFunc: Fn(Arc, Consistency, PagingState) -> QueryFut, - QueryFut: Future>, - SpanCreator: Fn() -> RequestSpan, + Fetcher: Fn(PagingState) -> FetchFut + Send + Sync, + FetchFut: Future> + Send, { - // Contract: this function MUST send at least one item through self.sender - async fn work(mut self, cluster_data: Arc) -> PageSendAttemptedProof { - let load_balancer = self.execution_profile.load_balancing_policy.clone(); - let statement_info = self.statement_info.clone(); - let query_plan = - load_balancing::Plan::new(load_balancer.as_ref(), &statement_info, &cluster_data); + async fn work(mut self) -> PageSendAttemptedProof { + match self.do_work().await { + Ok(proof) => proof, + Err(err) => { + let (proof, _) = self.sender.send(Err(err)).await; + proof + } + } + } - let mut last_error: QueryError = QueryError::EmptyPlan; - let mut current_consistency: Consistency = self.query_consistency; + async fn do_work(&mut self) -> Result { + let mut paging_state = PagingState::start(); + loop { + let result = (self.fetcher)(paging_state).await?; + let response = result.into_non_error_query_response()?; + match response.response { + NonErrorResponse::Result(result::Result::Rows((rows, paging_state_response))) => { + let (proof, send_result) = self + .sender + .send(Ok(ReceivedPage { + rows, + tracing_id: response.tracing_id, + })) + .await; - self.log_query_start(); + if send_result.is_err() { + // channel was closed, RowIterator was dropped - should shutdown + return Ok(proof); + } - 'nodes_in_plan: for (node, shard) in query_plan { - let span = - trace_span!(parent: &self.parent_span, "Executing query", node = %node.address); - // For each node in the plan choose a connection to use - // This connection will be reused for same node retries to preserve paging cache on the shard - let connection: Arc = match node - .connection_for_shard(shard) - .instrument(span.clone()) - .await - { - Ok(connection) => connection, - Err(e) => { - trace!( - parent: &span, - error = %e, - "Choosing connection failed" - ); - last_error = e.into(); - // Broken connection doesn't count as a failed query, don't log in metrics - continue 'nodes_in_plan; + match paging_state_response.into_paging_control_flow() { + ControlFlow::Continue(new_paging_state) => { + paging_state = new_paging_state; + } + ControlFlow::Break(()) => { + // Reached the last query, shutdown + return Ok(proof); + } + } } - }; + NonErrorResponse::Result(_) => { + // We have most probably sent a modification statement (e.g. INSERT or UPDATE), + // so let's return an empty iterator as suggested in #631. - 'same_node_retries: loop { - trace!(parent: &span, "Execution started"); - // Query pages until an error occurs - let queries_result: Result = self - .query_pages(&connection, current_consistency, node) - .instrument(span.clone()) - .await; + // We must attempt to send something because the iterator expects it. + let (proof, _) = self.sender.send_empty_page(response.tracing_id).await; + return Ok(proof); + } + _ => { + return Err(ProtocolError::UnexpectedResponse( + response.response.to_response_kind(), + ) + .into()); + } + } + } + } +} - last_error = match queries_result { - Ok(proof) => { - trace!(parent: &span, "Query succeeded"); - // query_pages returned Ok, so we are guaranteed - // that it attempted to send at least one page - // through self.sender and we can safely return now. - return proof; - } - Err(error) => { - trace!( - parent: &span, - error = %error, - "Query failed" - ); - error - } - }; - - // Use retry policy to decide what to do next - let query_info = QueryInfo { - error: &last_error, - is_idempotent: self.query_is_idempotent, - consistency: self.query_consistency, - }; +/// An intermediate object that allows to construct an iterator over a query +/// that is asynchronously paged in the background. +/// +/// Before the results can be processed in a convenient way, the RawIterator +/// needs to be cast into a typed iterator. This is done by use of `into_typed()` method. +/// As the method is generic over the target type, the turbofish syntax +/// can come in handy there, e.g. `raw_iter.into_typed::<(i32, &str, Uuid)>()`. +/// +/// A pre-0.14.0 interface is also available, although deprecated: +/// `into_legacy()` method converts RawIterator to LegacyRowIterator, +/// enabling Stream'ed operation on rows being eagerly deserialized +/// to a middle-man Row type. This is inefficient, especially if +/// the Row type is not the intended target type. +pub struct RawIterator { + current_page: RawRowsLendingIterator, + page_receiver: mpsc::Receiver>, + tracing_ids: Vec, +} - let retry_decision = self.retry_session.decide_should_retry(query_info); - trace!( - parent: &span, - retry_decision = format!("{:?}", retry_decision).as_str() - ); - self.log_attempt_error(&last_error, &retry_decision); - match retry_decision { - RetryDecision::RetrySameNode(cl) => { - self.metrics.inc_retries_num(); - current_consistency = cl.unwrap_or(current_consistency); - continue 'same_node_retries; - } - RetryDecision::RetryNextNode(cl) => { - self.metrics.inc_retries_num(); - current_consistency = cl.unwrap_or(current_consistency); - continue 'nodes_in_plan; - } - RetryDecision::DontRetry => break 'nodes_in_plan, - RetryDecision::IgnoreWriteError => { - warn!("Ignoring error during fetching pages; stopping fetching."); - // If we are here then, most likely, we didn't send - // anything through the self.sender channel. - // Although we are in an awkward situation (_iter - // interface isn't meant for sending writes), - // we must attempt to send something because - // the iterator expects it. - let (proof, _) = self.sender.send_empty_page(None).await; - return proof; - } - }; - } +/// RawIterator is not an iterator or a stream! However, it implements +/// a `next()` method that returns a ColumnIterator<'r>, which can be used +/// to manually deserialize a row. +/// The ColumnIterator borrows from the RawIterator, and the futures::Stream trait +/// does not allow for such a pattern. Lending streams are not a thing yet. +impl RawIterator { + /// Returns the next item (ColumnIterator) from the stream. + + /// This can be used with `type_check() for manual deserialization - see example below. + /// + /// This is not a part of the Stream interface because the returned iterator + /// borrows from self. + /// + /// This is cancel-safe. + /// + /// # Example + /// + /// // FIXME: change `text` to `rust` when Session API is migrated to the new deserialization framework. + /// ```text + /// # use scylla::Session; + /// # use std::error::Error; + /// # async fn check_only_compiles(session: &Session) -> Result<(), Box> { + /// use futures::stream::StreamExt; + /// use scylla::deserialize::DeserializeRow; + /// + /// let mut raw_iter = session + /// .query_iter("SELECT a, b FROM ks.t", &[]) + /// .await?; + /// + /// // Remember to type check! Failure to call type_check() can result + /// // in panics upon deserialization. + /// raw_iter.type_check::<(i32, i32)>()?; + /// + /// // Now that we type-checked, we can manually deserialize from RawIterator. + /// while let Some(column_iterator) = raw_iter.next().await.transpose()? { + /// let (a, b) = <(i32, i32) as DeserializeRow>::deserialize(column_iterator)?; + /// println!("a, b: {}, {}", a, b); + /// } + /// # Ok(()) + /// # } + /// ``` + pub async fn next(&mut self) -> Option> { + let res = std::future::poll_fn(|cx| Pin::new(&mut *self).poll_fill_page(cx)).await; + match res { + Some(Ok(())) => {} + Some(Err(err)) => return Some(Err(err)), + None => return None, } - // Send last_error to RowIterator - query failed fully - self.log_query_error(&last_error); - let (proof, _) = self.sender.send(Err(last_error)).await; - proof + // We are guaranteed here to have a non-empty page, so unwrap + Some( + self.current_page + .next() + .unwrap() + .map_err(|e| RowsParseError::from(e).into()), + ) } - // Given a working connection query as many pages as possible until the first error. - // - // Contract: this function must either: - // - Return an error - // - Return Ok but have attempted to send a page via self.sender - async fn query_pages( - &mut self, - connection: &Arc, - consistency: Consistency, - node: NodeRef<'_>, - ) -> Result { - loop { - let request_span = (self.span_creator)(); - match self - .query_one_page(connection, consistency, node, &request_span) - .instrument(request_span.span().clone()) - .await? - { - ControlFlow::Break(proof) => return Ok(proof), - ControlFlow::Continue(_) => {} - } + /// Tries to acquire a non-empty page, if current page is exhausted. + fn poll_fill_page<'r>( + mut self: Pin<&'r mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + if !self.is_current_page_exhausted() { + return Poll::Ready(Some(Ok(()))); + } + ready_some_ok!(self.as_mut().poll_next_page(cx)); + if self.is_current_page_exhausted() { + // Try again later + cx.waker().wake_by_ref(); + Poll::Pending + } else { + Poll::Ready(Some(Ok(()))) } } - async fn query_one_page( - &mut self, - connection: &Arc, - consistency: Consistency, - node: NodeRef<'_>, - request_span: &RequestSpan, - ) -> Result, QueryError> { - self.metrics.inc_total_paged_queries(); - let query_start = std::time::Instant::now(); + /// Makes an attempt to acquire the next page (which may be empty). + /// + /// On success, returns Some(Ok()). + /// On failure, returns Some(Err()). + /// If there are no more pages, returns None. + fn poll_next_page<'r>( + mut self: Pin<&'r mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + let mut s = self.as_mut(); - trace!( - connection = %connection.get_connect_address(), - "Sending" - ); - self.log_attempt_start(connection.get_connect_address()); + let received_page = ready_some_ok!(Pin::new(&mut s.page_receiver).poll_recv(cx)); + let raw_rows_with_deserialized_metadata = + received_page.rows.deserialize_owned_metadata()?; + s.current_page = RawRowsLendingIterator::new(raw_rows_with_deserialized_metadata); - let query_response = - (self.page_query)(connection.clone(), consistency, self.paging_state.clone()) - .await - .and_then(QueryResponse::into_non_error_query_response); + if let Some(tracing_id) = received_page.tracing_id { + s.tracing_ids.push(tracing_id); + } - let elapsed = query_start.elapsed(); + Poll::Ready(Some(Ok(()))) + } - request_span.record_shard_id(connection); + /// Type-checks the iterator against given type. + /// + /// This is automatically called upon transforming [RawIterator] into [TypedRowIterator]. + /// Can be used with `next()` for manual deserialization. See `next()` for an example. + #[inline] + pub fn type_check<'frame, RowT: DeserializeRow<'frame>>(&self) -> Result<(), TypeCheckError> { + RowT::type_check(self.column_specs().inner()) + } - match query_response { - Ok(NonErrorQueryResponse { - response: NonErrorResponse::Result(result::Result::Rows(mut rows)), - tracing_id, - .. - }) => { - let _ = self.metrics.log_query_latency(elapsed.as_millis() as u64); - self.log_attempt_success(); - self.log_query_success(); - self.execution_profile - .load_balancing_policy - .on_query_success(&self.statement_info, elapsed, node); + /// Casts the iterator to a given row type, enabling Stream'ed operations + /// on rows, which deserialize them in-fly to that given type. + /// Begins with performing type check. + #[inline] + pub fn into_typed<'frame, RowT: DeserializeRow<'frame>>( + self, + ) -> Result, TypeCheckError> { + TypedRowIterator::::new(self) + } - let paging_state_response = rows.paging_state_response.take(); + /// Converts this iterator into an iterator over rows parsed as given type, + /// using the legacy deserialization framework. + #[inline] + pub fn into_legacy(self) -> LegacyRowIterator { + LegacyRowIterator { raw_iterator: self } + } - request_span.record_rows_fields(&rows); + pub(crate) async fn new_for_query( + query: Query, + execution_profile: Arc, + cluster_data: Arc, + metrics: Arc, + ) -> Result { + let (sender, receiver) = mpsc::channel(1); - let received_page = ReceivedPage { rows, tracing_id }; + let consistency = query + .config + .consistency + .unwrap_or(execution_profile.consistency); + let serial_consistency = query + .config + .serial_consistency + .unwrap_or(execution_profile.serial_consistency); - // Send next page to RowIterator - let (proof, res) = self.sender.send(Ok(received_page)).await; - if res.is_err() { - // channel was closed, RowIterator was dropped - should shutdown - return Ok(ControlFlow::Break(proof)); + let page_size = query.get_validated_page_size(); + + let routing_info = RoutingInfo { + consistency, + serial_consistency, + ..Default::default() + }; + + let retry_session = query + .get_retry_policy() + .map(|rp| &**rp) + .unwrap_or(&*execution_profile.retry_policy) + .new_session(); + + let parent_span = tracing::Span::current(); + let worker_task = async move { + let query_ref = &query; + + let page_query = |connection: Arc, + consistency: Consistency, + paging_state: PagingState| { + async move { + connection + .query_raw_with_consistency( + query_ref, + consistency, + serial_consistency, + Some(page_size), + paging_state, + ) + .await } + }; - match paging_state_response.into_paging_control_flow() { - ControlFlow::Continue(paging_state) => { - self.paging_state = paging_state; - } - ControlFlow::Break(()) => { - // Reached the last query, shutdown - return Ok(ControlFlow::Break(proof)); - } + let query_ref = &query; + + let span_creator = move || { + let span = RequestSpan::new_query(&query_ref.contents); + span.record_request_size(0); + span + }; + + let worker = RowIteratorWorker { + sender: sender.into(), + page_query, + statement_info: routing_info, + query_is_idempotent: query.config.is_idempotent, + query_consistency: consistency, + retry_session, + execution_profile, + metrics, + paging_state: PagingState::start(), + history_listener: query.config.history_listener.clone(), + current_query_id: None, + current_attempt_id: None, + parent_span, + span_creator, + }; + + worker.work(cluster_data).await + }; + + Self::new_from_worker_future(worker_task, receiver).await + } + + pub(crate) async fn new_for_prepared_statement( + config: PreparedIteratorConfig, + ) -> Result { + let (sender, receiver) = mpsc::channel(1); + + let consistency = config + .prepared + .config + .consistency + .unwrap_or(config.execution_profile.consistency); + let serial_consistency = config + .prepared + .config + .serial_consistency + .unwrap_or(config.execution_profile.serial_consistency); + + let page_size = config.prepared.get_validated_page_size(); + + let retry_session = config + .prepared + .get_retry_policy() + .map(|rp| &**rp) + .unwrap_or(&*config.execution_profile.retry_policy) + .new_session(); + + let parent_span = tracing::Span::current(); + let worker_task = async move { + let prepared_ref = &config.prepared; + let values_ref = &config.values; + + let (partition_key, token) = match prepared_ref + .extract_partition_key_and_calculate_token( + prepared_ref.get_partitioner_name(), + values_ref, + ) { + Ok(res) => res.unzip(), + Err(err) => { + let (proof, _res) = ProvingSender::from(sender).send(Err(err)).await; + return proof; } + }; - // Query succeeded, reset retry policy for future retries - self.retry_session.reset(); - self.log_query_start(); + let table_spec = config.prepared.get_table_spec(); + let statement_info = RoutingInfo { + consistency, + serial_consistency, + token, + table: table_spec, + is_confirmed_lwt: config.prepared.is_confirmed_lwt(), + }; - Ok(ControlFlow::Continue(())) - } - Err(err) => { - let err = err.into(); - self.metrics.inc_failed_paged_queries(); - self.execution_profile - .load_balancing_policy - .on_query_failure(&self.statement_info, elapsed, node, &err); - Err(err) - } - Ok(NonErrorQueryResponse { - response: NonErrorResponse::Result(_), - tracing_id, - .. - }) => { - // We have most probably sent a modification statement (e.g. INSERT or UPDATE), - // so let's return an empty iterator as suggested in #631. + let page_query = |connection: Arc, + consistency: Consistency, + paging_state: PagingState| async move { + connection + .execute_raw_with_consistency( + prepared_ref, + values_ref, + consistency, + serial_consistency, + Some(page_size), + paging_state, + ) + .await + }; + + let serialized_values_size = config.values.buffer_size(); + + let replicas: Option> = + if let (Some(table_spec), Some(token)) = + (statement_info.table, statement_info.token) + { + Some( + config + .cluster_data + .get_token_endpoints_iter(table_spec, token) + .map(|(node, shard)| (node.clone(), shard)) + .collect(), + ) + } else { + None + }; + + let span_creator = move || { + let span = RequestSpan::new_prepared( + partition_key.as_ref().map(|pk| pk.iter()), + token, + serialized_values_size, + ); + if let Some(replicas) = replicas.as_ref() { + span.record_replicas(replicas); + } + span + }; + + let worker = RowIteratorWorker { + sender: sender.into(), + page_query, + statement_info, + query_is_idempotent: config.prepared.config.is_idempotent, + query_consistency: consistency, + retry_session, + execution_profile: config.execution_profile, + metrics: config.metrics, + paging_state: PagingState::start(), + history_listener: config.prepared.config.history_listener.clone(), + current_query_id: None, + current_attempt_id: None, + parent_span, + span_creator, + }; - // We must attempt to send something because the iterator expects it. - let (proof, _) = self.sender.send_empty_page(tracing_id).await; - Ok(ControlFlow::Break(proof)) - } - Ok(response) => { - self.metrics.inc_failed_paged_queries(); - let err = - ProtocolError::UnexpectedResponse(response.response.to_response_kind()).into(); - self.execution_profile - .load_balancing_policy - .on_query_failure(&self.statement_info, elapsed, node, &err); - Err(err) - } - } + worker.work(config.cluster_data).await + }; + + Self::new_from_worker_future(worker_task, receiver).await } - fn log_query_start(&mut self) { - let history_listener: &dyn HistoryListener = match &self.history_listener { - Some(hl) => &**hl, - None => return, + pub(crate) async fn new_for_connection_query_iter( + query: Query, + connection: Arc, + consistency: Consistency, + serial_consistency: Option, + ) -> Result { + let (sender, receiver) = mpsc::channel::>(1); + + let page_size = query.get_validated_page_size(); + + let worker_task = async move { + let worker = SingleConnectionRowIteratorWorker { + sender: sender.into(), + fetcher: |paging_state| { + connection.query_raw_with_consistency( + &query, + consistency, + serial_consistency, + Some(page_size), + paging_state, + ) + }, + }; + worker.work().await }; - self.current_query_id = Some(history_listener.log_query_start()); + Self::new_from_worker_future(worker_task, receiver).await } - fn log_query_success(&mut self) { - let history_listener: &dyn HistoryListener = match &self.history_listener { - Some(hl) => &**hl, - None => return, - }; + pub(crate) async fn new_for_connection_execute_iter( + prepared: PreparedStatement, + values: SerializedValues, + connection: Arc, + consistency: Consistency, + serial_consistency: Option, + ) -> Result { + let (sender, receiver) = mpsc::channel::>(1); - let query_id: history::QueryId = match &self.current_query_id { - Some(id) => *id, - None => return, + let page_size = prepared.get_validated_page_size(); + + let worker_task = async move { + let worker = SingleConnectionRowIteratorWorker { + sender: sender.into(), + fetcher: |paging_state| { + connection.execute_raw_with_consistency( + &prepared, + &values, + consistency, + serial_consistency, + Some(page_size), + paging_state, + ) + }, + }; + worker.work().await }; - history_listener.log_query_success(query_id); + Self::new_from_worker_future(worker_task, receiver).await } - fn log_query_error(&mut self, error: &QueryError) { - let history_listener: &dyn HistoryListener = match &self.history_listener { - Some(hl) => &**hl, - None => return, - }; + async fn new_from_worker_future( + worker_task: impl Future + Send + 'static, + mut receiver: mpsc::Receiver>, + ) -> Result { + tokio::task::spawn(worker_task); - let query_id: history::QueryId = match &self.current_query_id { - Some(id) => *id, - None => return, - }; + // This unwrap is safe because: + // - The future returned by worker.work sends at least one item + // to the channel (the PageSendAttemptedProof helps enforce this) + // - That future is polled in a tokio::task which isn't going to be + // cancelled + let page_received = receiver.recv().await.unwrap()?; + let raw_rows_with_deserialized_metadata = + page_received.rows.deserialize_owned_metadata()?; - history_listener.log_query_error(query_id, error); + Ok(Self { + current_page: RawRowsLendingIterator::new(raw_rows_with_deserialized_metadata), + page_receiver: receiver, + tracing_ids: if let Some(tracing_id) = page_received.tracing_id { + vec![tracing_id] + } else { + Vec::new() + }, + }) } - fn log_attempt_start(&mut self, node_addr: SocketAddr) { - let history_listener: &dyn HistoryListener = match &self.history_listener { - Some(hl) => &**hl, - None => return, - }; + /// If tracing was enabled returns tracing ids of all finished page queries + #[inline] + pub fn tracing_ids(&self) -> &[Uuid] { + &self.tracing_ids + } - let query_id: history::QueryId = match &self.current_query_id { - Some(id) => *id, - None => return, - }; + /// Returns specification of row columns + #[inline] + pub fn column_specs(&self) -> ColumnSpecs<'_> { + ColumnSpecs::new(self.current_page.metadata().col_specs()) + } - self.current_attempt_id = - Some(history_listener.log_attempt_start(query_id, None, node_addr)); + fn is_current_page_exhausted(&self) -> bool { + self.current_page.rows_remaining() == 0 } +} - fn log_attempt_success(&mut self) { - let history_listener: &dyn HistoryListener = match &self.history_listener { - Some(hl) => &**hl, - None => return, - }; +/// Returned by [RawIterator::into_typed]. +/// +/// Does not implement [Stream], but permits deserialization of borrowed types. +/// To use [Stream] API (only accessible for owned types), use [TypedRowIterator::into_stream]. +pub struct TypedRowIterator { + raw_iterator: RawIterator, + _phantom: std::marker::PhantomData, +} - let attempt_id: history::AttemptId = match &self.current_attempt_id { - Some(id) => *id, - None => return, - }; +impl Unpin for TypedRowIterator {} - history_listener.log_attempt_success(attempt_id); +impl<'frame, RowT> TypedRowIterator +where + RowT: DeserializeRow<'frame>, +{ + fn new(raw_iterator: RawIterator) -> Result { + raw_iterator.type_check::()?; + + Ok(Self { + raw_iterator, + _phantom: Default::default(), + }) } - fn log_attempt_error(&mut self, error: &QueryError, retry_decision: &RetryDecision) { - let history_listener: &dyn HistoryListener = match &self.history_listener { - Some(hl) => &**hl, - None => return, - }; + /// If tracing was enabled, returns tracing ids of all finished page queries. + #[inline] + pub fn tracing_ids(&self) -> &[Uuid] { + self.raw_iterator.tracing_ids() + } - let attempt_id: history::AttemptId = match &self.current_attempt_id { - Some(id) => *id, - None => return, - }; + /// Returns specification of row columns + #[inline] + pub fn column_specs(&self) -> ColumnSpecs { + self.raw_iterator.column_specs() + } - history_listener.log_attempt_error(attempt_id, error, retry_decision); + /// Stream-like next() implementation for TypedRowIterator. + /// + /// It also works with borrowed types! For example, &str is supported. + /// However, this is not a Stream. To create a Stream, use `into_stream()`. + #[inline] + pub async fn next(&'frame mut self) -> Option> { + self.raw_iterator.next().await.map(|res| { + res.and_then(|column_iterator| { + ::deserialize(column_iterator) + .map_err(|err| RowsParseError::from(err).into()) + }) + }) + } + + /// Stream-like try_next() implementation for TypedRowIterator. + /// + /// It also works with borrowed types! For example, &str is supported. + /// However, this is not a Stream. To create a Stream, use `into_stream()`. + #[inline] + pub async fn try_next(&'frame mut self) -> Result, QueryError> { + self.next().await.transpose() } } -/// A massively simplified version of the RowIteratorWorker. It does not have -/// any complicated logic related to retries, it just fetches pages from -/// a single connection. -struct SingleConnectionRowIteratorWorker { - sender: ProvingSender>, - fetcher: Fetcher, +impl TypedRowIterator { + /// Transforms [TypedRowIterator] into [TypedRowStream]. + /// + /// If you deserialize to owned types only, use this method to unleash power of the `Stream` API. + /// This operation involves no runtime cost, but it limits the iterator to owned types only. + /// Therefore, if you want to work with borrowed types (e.g., to avoid heap allocations), + /// you can't use the `Stream` trait. + pub fn into_stream(self) -> TypedRowStream { + TypedRowStream { + typed_row_iterator: self, + } + } } -impl SingleConnectionRowIteratorWorker +/// Returned by [TypedRowIterator::into_stream]. +/// +/// Implements [Stream], but only permits deserialization of owned types. +pub struct TypedRowStream { + typed_row_iterator: TypedRowIterator, +} + +impl Unpin for TypedRowStream {} + +/// Stream implementation for TypedRowStream. +/// +/// It only works with owned types! For example, &str is not supported. +impl Stream for TypedRowStream where - Fetcher: Fn(PagingState) -> FetchFut + Send + Sync, - FetchFut: Future> + Send, + RowT: for<'r> DeserializeRow<'r>, { - async fn work(mut self) -> PageSendAttemptedProof { - match self.do_work().await { - Ok(proof) => proof, - Err(err) => { - let (proof, _) = self.sender.send(Err(err)).await; - proof - } - } + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut s = self.as_mut(); + + let next_fut = s.typed_row_iterator.raw_iterator.next(); + futures::pin_mut!(next_fut); + let iter = ready_some_ok!(next_fut.poll(cx)); + let value = >::deserialize(iter) + .map_err(|err| RowsParseError::from(err).into()); + Poll::Ready(Some(value)) } +} - async fn do_work(&mut self) -> Result { - let mut paging_state = PagingState::start(); - loop { - let result = (self.fetcher)(paging_state).await?; - let response = result.into_non_error_query_response()?; - match response.response { - NonErrorResponse::Result(result::Result::Rows(mut rows)) => { - let paging_state_response = rows.paging_state_response.take(); +/// Iterator over rows returned by paged queries. +/// +/// Allows to easily access rows without worrying about handling multiple pages. +pub struct LegacyRowIterator { + raw_iterator: RawIterator, +} - let (proof, send_result) = self - .sender - .send(Ok(ReceivedPage { - rows, - tracing_id: response.tracing_id, - })) - .await; +impl Stream for LegacyRowIterator { + type Item = Result; - if send_result.is_err() { - // channel was closed, RowIterator was dropped - should shutdown - return Ok(proof); - } + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut s = self.as_mut(); - match paging_state_response.into_paging_control_flow() { - ControlFlow::Continue(new_paging_state) => { - paging_state = new_paging_state; - } - ControlFlow::Break(()) => { - // Reached the last query, shutdown - return Ok(proof); - } - } - } - NonErrorResponse::Result(_) => { - // We have most probably sent a modification statement (e.g. INSERT or UPDATE), - // so let's return an empty iterator as suggested in #631. + let next_fut = s.raw_iterator.next(); + futures::pin_mut!(next_fut); - // We must attempt to send something because the iterator expects it. - let (proof, _) = self.sender.send_empty_page(response.tracing_id).await; - return Ok(proof); - } - _ => { - return Err(ProtocolError::UnexpectedResponse( - response.response.to_response_kind(), - ) - .into()); - } + let next_elem: Option, QueryError>> = match next_fut.poll(cx) { + Poll::Ready(next_elem) => next_elem, + Poll::Pending => return Poll::Pending, + }; + + let next_ready: Option = match next_elem { + Some(Ok(iter)) => { + Some(Row::deserialize(iter).map_err(|e| RowsParseError::from(e).into())) } + Some(Err(e)) => Some(Err(e)), + None => None, + }; + + Poll::Ready(next_ready) + } +} + +impl LegacyRowIterator { + /// If tracing was enabled returns tracing ids of all finished page queries + pub fn get_tracing_ids(&self) -> &[Uuid] { + self.raw_iterator.tracing_ids() + } + + /// Returns specification of row columns + pub fn get_column_specs(&self) -> &[ColumnSpec<'_>] { + self.raw_iterator.column_specs().inner() + } + + pub fn into_typed(self) -> LegacyTypedRowIterator { + LegacyTypedRowIterator { + row_iterator: self, + _phantom_data: Default::default(), } } } @@ -892,18 +1144,20 @@ where /// Iterator over rows returned by paged queries /// where each row is parsed as the given type\ /// Returned by `RowIterator::into_typed` -pub struct TypedRowIterator { - row_iterator: RowIterator, - phantom_data: std::marker::PhantomData, +pub struct LegacyTypedRowIterator { + row_iterator: LegacyRowIterator, + _phantom_data: std::marker::PhantomData, } -impl TypedRowIterator { +impl LegacyTypedRowIterator { /// If tracing was enabled returns tracing ids of all finished page queries + #[inline] pub fn get_tracing_ids(&self) -> &[Uuid] { self.row_iterator.get_tracing_ids() } /// Returns specification of row columns + #[inline] pub fn get_column_specs(&self) -> &[ColumnSpec] { self.row_iterator.get_column_specs() } @@ -921,29 +1175,19 @@ pub enum NextRowError { FromRowError(#[from] FromRowError), } -/// Fetching pages is asynchronous so `TypedRowIterator` does not implement the `Iterator` trait.\ +/// Fetching pages is asynchronous so `LegacyTypedRowIterator` does not implement the `Iterator` trait.\ /// Instead it uses the asynchronous `Stream` trait -impl Stream for TypedRowIterator { +impl Stream for LegacyTypedRowIterator { type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut s = self.as_mut(); - let next_elem: Option> = - match Pin::new(&mut s.row_iterator).poll_next(cx) { - Poll::Ready(next_elem) => next_elem, - Poll::Pending => return Poll::Pending, - }; - - let next_ready: Option = match next_elem { - Some(Ok(next_row)) => Some(RowT::from_row(next_row).map_err(|e| e.into())), - Some(Err(e)) => Some(Err(e.into())), - None => None, - }; - - Poll::Ready(next_ready) + let next_row = ready_some_ok!(Pin::new(&mut s.row_iterator).poll_next(cx)); + let typed_row_res = RowT::from_row(next_row).map_err(|e| e.into()); + Poll::Ready(Some(typed_row_res)) } } -// TypedRowIterator can be moved freely for any RowT so it's Unpin -impl Unpin for TypedRowIterator {} +// LegacyTypedRowIterator can be moved freely for any RowT so it's Unpin +impl Unpin for LegacyTypedRowIterator {} diff --git a/scylla/src/transport/large_batch_statements_test.rs b/scylla/src/transport/large_batch_statements_test.rs index 7e8fc482c3..33628a49d4 100644 --- a/scylla/src/transport/large_batch_statements_test.rs +++ b/scylla/src/transport/large_batch_statements_test.rs @@ -7,7 +7,7 @@ use crate::transport::errors::{BadQuery, QueryError}; use crate::{ batch::Batch, test_utils::{create_new_session_builder, unique_keyspace_name}, - QueryResult, Session, + LegacyQueryResult, Session, }; #[tokio::test] @@ -51,7 +51,11 @@ async fn create_test_session(session: Session, ks: &String) -> Session { session } -async fn write_batch(session: &Session, n: usize, ks: &String) -> Result { +async fn write_batch( + session: &Session, + n: usize, + ks: &String, +) -> Result { let mut batch_query = Batch::new(BatchType::Unlogged); let mut batch_values = Vec::new(); let query = format!("INSERT INTO {}.pairs (dummy, k, v) VALUES (0, ?, ?)", ks); diff --git a/scylla/src/transport/legacy_query_result.rs b/scylla/src/transport/legacy_query_result.rs new file mode 100644 index 0000000000..3a8a577c71 --- /dev/null +++ b/scylla/src/transport/legacy_query_result.rs @@ -0,0 +1,627 @@ +use crate::frame::response::cql_to_rust::{FromRow, FromRowError}; +use crate::frame::response::result::ColumnSpec; +use crate::frame::response::result::Row; +use crate::transport::session::{IntoTypedRows, TypedRowIter}; +use scylla_cql::frame::response::result::ResultMetadataHolder; +use thiserror::Error; +use uuid::Uuid; + +/// Result of a single query\ +/// Contains all rows returned by the database and some more information +#[derive(Debug)] +pub struct LegacyQueryResult { + /// Rows returned by the database.\ + /// Queries like `SELECT` will have `Some(Vec)`, while queries like `INSERT` will have `None`.\ + /// Can contain an empty Vec. + pub rows: Option>, + /// Warnings returned by the database + pub warnings: Vec, + /// CQL Tracing uuid - can only be Some if tracing is enabled for this query + pub tracing_id: Option, + /// Metadata returned along with this response. + pub(crate) metadata: Option>, + /// The original size of the serialized rows in request + pub serialized_size: usize, +} + +impl LegacyQueryResult { + pub(crate) fn mock_empty() -> Self { + Self { + rows: None, + warnings: Vec::new(), + tracing_id: None, + metadata: None, + serialized_size: 0, + } + } + + /// Returns the number of received rows.\ + /// Fails when the query isn't of a type that could return rows, same as [`rows()`](LegacyQueryResult::rows). + pub fn rows_num(&self) -> Result { + match &self.rows { + Some(rows) => Ok(rows.len()), + None => Err(RowsExpectedError), + } + } + + /// Returns the received rows when present.\ + /// If `LegacyQueryResult.rows` is `None`, which means that this query is not supposed to return rows (e.g `INSERT`), returns an error.\ + /// Can return an empty `Vec`. + pub fn rows(self) -> Result, RowsExpectedError> { + match self.rows { + Some(rows) => Ok(rows), + None => Err(RowsExpectedError), + } + } + + /// Returns the received rows parsed as the given type.\ + /// Equal to `rows()?.into_typed()`.\ + /// Fails when the query isn't of a type that could return rows, same as [`rows()`](LegacyQueryResult::rows). + pub fn rows_typed(self) -> Result, RowsExpectedError> { + Ok(self.rows()?.into_typed()) + } + + /// Returns `Ok` for a result of a query that shouldn't contain any rows.\ + /// Will return `Ok` for `INSERT` result, but a `SELECT` result, even an empty one, will cause an error.\ + /// Opposite of [`rows()`](LegacyQueryResult::rows). + pub fn result_not_rows(&self) -> Result<(), RowsNotExpectedError> { + match self.rows { + Some(_) => Err(RowsNotExpectedError), + None => Ok(()), + } + } + + /// Returns rows when `LegacyQueryResult.rows` is `Some`, otherwise an empty Vec.\ + /// Equal to `rows().unwrap_or_default()`. + pub fn rows_or_empty(self) -> Vec { + self.rows.unwrap_or_default() + } + + /// Returns rows parsed as the given type.\ + /// When `LegacyQueryResult.rows` is `None`, returns 0 rows.\ + /// Equal to `rows_or_empty().into_typed::()`. + pub fn rows_typed_or_empty(self) -> TypedRowIter { + self.rows_or_empty().into_typed::() + } + + /// Returns first row from the received rows.\ + /// When the first row is not available, returns an error. + pub fn first_row(self) -> Result { + match self.maybe_first_row()? { + Some(row) => Ok(row), + None => Err(FirstRowError::RowsEmpty), + } + } + + /// Returns first row from the received rows parsed as the given type.\ + /// When the first row is not available, returns an error. + pub fn first_row_typed(self) -> Result { + Ok(self.first_row()?.into_typed()?) + } + + /// Returns `Option` containing the first of a result.\ + /// Fails when the query isn't of a type that could return rows, same as [`rows()`](LegacyQueryResult::rows). + pub fn maybe_first_row(self) -> Result, RowsExpectedError> { + Ok(self.rows()?.into_iter().next()) + } + + /// Returns `Option` containing the first of a result.\ + /// Fails when the query isn't of a type that could return rows, same as [`rows()`](LegacyQueryResult::rows). + pub fn maybe_first_row_typed( + self, + ) -> Result, MaybeFirstRowTypedError> { + match self.maybe_first_row()? { + Some(row) => Ok(Some(row.into_typed::()?)), + None => Ok(None), + } + } + + /// Returns the only received row.\ + /// Fails if the result is anything else than a single row.\ + pub fn single_row(self) -> Result { + let rows: Vec = self.rows()?; + + if rows.len() != 1 { + return Err(SingleRowError::BadNumberOfRows(rows.len())); + } + + Ok(rows.into_iter().next().unwrap()) + } + + /// Returns the only received row parsed as the given type.\ + /// Fails if the result is anything else than a single row.\ + pub fn single_row_typed(self) -> Result { + Ok(self.single_row()?.into_typed::()?) + } + + /// Returns column specifications. + #[inline] + pub fn col_specs(&self) -> &[ColumnSpec] { + self.metadata + .as_ref() + .map(|metadata| metadata.col_specs()) + .unwrap_or_default() + } + + /// Returns a column specification for a column with given name, or None if not found + #[inline] + pub fn get_column_spec<'a>(&'a self, name: &str) -> Option<(usize, &'a ColumnSpec)> { + self.col_specs() + .iter() + .enumerate() + .find(|(_id, spec)| spec.name() == name) + } +} + +/// [`LegacyQueryResult::rows()`](LegacyQueryResult::rows) or a similar function called on a bad LegacyQueryResult.\ +/// Expected `LegacyQueryResult.rows` to be `Some`, but it was `None`.\ +/// `LegacyQueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ +/// It is `None` for queries that can't return rows (e.g `INSERT`). +#[derive(Debug, Clone, Error, PartialEq, Eq)] +#[error( + "LegacyQueryResult::rows() or similar function called on a bad LegacyQueryResult. + Expected LegacyQueryResult.rows to be Some, but it was None. + LegacyQueryResult.rows is Some for queries that can return rows (e.g SELECT). + It is None for queries that can't return rows (e.g INSERT)." +)] +pub struct RowsExpectedError; + +/// [`LegacyQueryResult::result_not_rows()`](LegacyQueryResult::result_not_rows) called on a bad LegacyQueryResult.\ +/// Expected `LegacyQueryResult.rows` to be `None`, but it was `Some`.\ +/// `LegacyQueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ +/// It is `None` for queries that can't return rows (e.g `INSERT`). +#[derive(Debug, Clone, Error, PartialEq, Eq)] +#[error( + "LegacyQueryResult::result_not_rows() called on a bad LegacyQueryResult. + Expected LegacyQueryResult.rows to be None, but it was Some. + LegacyQueryResult.rows is Some for queries that can return rows (e.g SELECT). + It is None for queries that can't return rows (e.g INSERT)." +)] +pub struct RowsNotExpectedError; + +#[derive(Debug, Clone, Error, PartialEq, Eq)] +pub enum FirstRowError { + /// [`LegacyQueryResult::first_row()`](LegacyQueryResult::first_row) called on a bad LegacyQueryResult.\ + /// Expected `LegacyQueryResult.rows` to be `Some`, but it was `None`.\ + /// `LegacyQueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ + /// It is `None` for queries that can't return rows (e.g `INSERT`). + #[error(transparent)] + RowsExpected(#[from] RowsExpectedError), + + /// Rows in `LegacyQueryResult` are empty + #[error("Rows in LegacyQueryResult are empty")] + RowsEmpty, +} + +#[derive(Debug, Clone, Error, PartialEq, Eq)] +pub enum FirstRowTypedError { + /// [`LegacyQueryResult::first_row_typed()`](LegacyQueryResult::first_row_typed) called on a bad LegacyQueryResult.\ + /// Expected `LegacyQueryResult.rows` to be `Some`, but it was `None`.\ + /// `LegacyQueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ + /// It is `None` for queries that can't return rows (e.g `INSERT`). + #[error(transparent)] + RowsExpected(#[from] RowsExpectedError), + + /// Rows in `LegacyQueryResult` are empty + #[error("Rows in LegacyQueryResult are empty")] + RowsEmpty, + + /// Parsing row as the given type failed + #[error(transparent)] + FromRowError(#[from] FromRowError), +} + +#[derive(Debug, Clone, Error, PartialEq, Eq)] +pub enum MaybeFirstRowTypedError { + /// [`LegacyQueryResult::maybe_first_row_typed()`](LegacyQueryResult::maybe_first_row_typed) called on a bad LegacyQueryResult.\ + /// Expected `LegacyQueryResult.rows` to be `Some`, but it was `None`. + /// `LegacyQueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ + /// It is `None` for queries that can't return rows (e.g `INSERT`). + #[error(transparent)] + RowsExpected(#[from] RowsExpectedError), + + /// Parsing row as the given type failed + #[error(transparent)] + FromRowError(#[from] FromRowError), +} + +#[derive(Debug, Clone, Error, PartialEq, Eq)] +pub enum SingleRowError { + /// [`LegacyQueryResult::single_row()`](LegacyQueryResult::single_row) called on a bad LegacyQueryResult.\ + /// Expected `LegacyQueryResult.rows` to be `Some`, but it was `None`.\ + /// `LegacyQueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ + /// It is `None` for queries that can't return rows (e.g `INSERT`). + #[error(transparent)] + RowsExpected(#[from] RowsExpectedError), + + /// Expected a single row, found other number of rows + #[error("Expected a single row, found {0} rows")] + BadNumberOfRows(usize), +} + +#[derive(Debug, Clone, Error, PartialEq, Eq)] +pub enum SingleRowTypedError { + /// [`LegacyQueryResult::single_row_typed()`](LegacyQueryResult::single_row_typed) called on a bad LegacyQueryResult.\ + /// Expected `LegacyQueryResult.rows` to be `Some`, but it was `None`.\ + /// `LegacyQueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ + /// It is `None` for queries that can't return rows (e.g `INSERT`). + #[error(transparent)] + RowsExpected(#[from] RowsExpectedError), + + /// Expected a single row, found other number of rows + #[error("Expected a single row, found {0} rows")] + BadNumberOfRows(usize), + + /// Parsing row as the given type failed + #[error(transparent)] + FromRowError(#[from] FromRowError), +} + +impl From for FirstRowTypedError { + fn from(err: FirstRowError) -> FirstRowTypedError { + match err { + FirstRowError::RowsExpected(e) => FirstRowTypedError::RowsExpected(e), + FirstRowError::RowsEmpty => FirstRowTypedError::RowsEmpty, + } + } +} + +impl From for SingleRowTypedError { + fn from(err: SingleRowError) -> SingleRowTypedError { + match err { + SingleRowError::RowsExpected(e) => SingleRowTypedError::RowsExpected(e), + SingleRowError::BadNumberOfRows(r) => SingleRowTypedError::BadNumberOfRows(r), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + frame::response::result::{CqlValue, Row}, + test_utils::setup_tracing, + }; + use std::{borrow::Cow, convert::TryInto}; + + use assert_matches::assert_matches; + use scylla_cql::frame::response::result::{ColumnType, ResultMetadata, TableSpec}; + + // Returns specified number of rows, each one containing one int32 value. + // Values are 0, 1, 2, 3, 4, ... + fn make_rows(rows_num: usize) -> Vec { + let mut rows: Vec = Vec::with_capacity(rows_num); + for cur_value in 0..rows_num { + let int_val: i32 = cur_value.try_into().unwrap(); + rows.push(Row { + columns: vec![Some(CqlValue::Int(int_val))], + }); + } + rows + } + + // Just like make_rows, but each column has one String value + // values are "val0", "val1", "val2", ... + fn make_string_rows(rows_num: usize) -> Vec { + let mut rows: Vec = Vec::with_capacity(rows_num); + for cur_value in 0..rows_num { + rows.push(Row { + columns: vec![Some(CqlValue::Text(format!("val{}", cur_value)))], + }); + } + rows + } + + fn make_test_metadata() -> ResultMetadata<'static> { + let table_spec = TableSpec::borrowed("some_keyspace", "some_table"); + + let column_spec = ColumnSpec::borrowed("column0", ColumnType::Int, table_spec); + + ResultMetadata::new_for_test(1, vec![column_spec]) + } + + fn make_not_rows_query_result() -> LegacyQueryResult { + LegacyQueryResult { + rows: None, + warnings: vec![], + tracing_id: None, + metadata: None, + serialized_size: 0, + } + } + + fn make_rows_query_result(rows_num: usize) -> LegacyQueryResult { + let mut res = make_not_rows_query_result(); + res.rows = Some(make_rows(rows_num)); + res.metadata = Some(ResultMetadataHolder::BorrowedOrOwned(Cow::Owned( + make_test_metadata(), + ))); + res + } + + fn make_string_rows_query_result(rows_num: usize) -> LegacyQueryResult { + let mut res = make_not_rows_query_result(); + res.rows = Some(make_string_rows(rows_num)); + res.metadata = Some(ResultMetadataHolder::BorrowedOrOwned(Cow::Owned( + make_test_metadata(), + ))); + res + } + + #[test] + fn rows_num_test() { + setup_tracing(); + assert_eq!( + make_not_rows_query_result().rows_num(), + Err(RowsExpectedError) + ); + assert_eq!(make_rows_query_result(0).rows_num(), Ok(0)); + assert_eq!(make_rows_query_result(1).rows_num(), Ok(1)); + assert_eq!(make_rows_query_result(2).rows_num(), Ok(2)); + assert_eq!(make_rows_query_result(3).rows_num(), Ok(3)); + } + + #[test] + fn rows_test() { + setup_tracing(); + assert_eq!(make_not_rows_query_result().rows(), Err(RowsExpectedError)); + assert_eq!(make_rows_query_result(0).rows(), Ok(vec![])); + assert_eq!(make_rows_query_result(1).rows(), Ok(make_rows(1))); + assert_eq!(make_rows_query_result(2).rows(), Ok(make_rows(2))); + } + + #[test] + fn rows_typed_test() { + setup_tracing(); + assert!(make_not_rows_query_result().rows_typed::<(i32,)>().is_err()); + + let rows0: Vec<(i32,)> = make_rows_query_result(0) + .rows_typed::<(i32,)>() + .unwrap() + .map(|r| r.unwrap()) + .collect(); + + assert_eq!(rows0, vec![]); + + let rows1: Vec<(i32,)> = make_rows_query_result(1) + .rows_typed::<(i32,)>() + .unwrap() + .map(|r| r.unwrap()) + .collect(); + + assert_eq!(rows1, vec![(0,)]); + + let rows2: Vec<(i32,)> = make_rows_query_result(2) + .rows_typed::<(i32,)>() + .unwrap() + .map(|r| r.unwrap()) + .collect(); + + assert_eq!(rows2, vec![(0,), (1,)]); + } + + #[test] + fn result_not_rows_test() { + setup_tracing(); + assert_eq!(make_not_rows_query_result().result_not_rows(), Ok(())); + assert_eq!( + make_rows_query_result(0).result_not_rows(), + Err(RowsNotExpectedError) + ); + assert_eq!( + make_rows_query_result(1).result_not_rows(), + Err(RowsNotExpectedError) + ); + assert_eq!( + make_rows_query_result(2).result_not_rows(), + Err(RowsNotExpectedError) + ); + } + + #[test] + fn rows_or_empty_test() { + setup_tracing(); + assert_eq!(make_not_rows_query_result().rows_or_empty(), vec![]); + assert_eq!(make_rows_query_result(0).rows_or_empty(), make_rows(0)); + assert_eq!(make_rows_query_result(1).rows_or_empty(), make_rows(1)); + assert_eq!(make_rows_query_result(2).rows_or_empty(), make_rows(2)); + } + + #[test] + fn rows_typed_or_empty() { + setup_tracing(); + let rows_empty: Vec<(i32,)> = make_not_rows_query_result() + .rows_typed_or_empty::<(i32,)>() + .map(|r| r.unwrap()) + .collect(); + + assert_eq!(rows_empty, vec![]); + + let rows0: Vec<(i32,)> = make_rows_query_result(0) + .rows_typed_or_empty::<(i32,)>() + .map(|r| r.unwrap()) + .collect(); + + assert_eq!(rows0, vec![]); + + let rows1: Vec<(i32,)> = make_rows_query_result(1) + .rows_typed_or_empty::<(i32,)>() + .map(|r| r.unwrap()) + .collect(); + + assert_eq!(rows1, vec![(0,)]); + + let rows2: Vec<(i32,)> = make_rows_query_result(2) + .rows_typed_or_empty::<(i32,)>() + .map(|r| r.unwrap()) + .collect(); + + assert_eq!(rows2, vec![(0,), (1,)]); + } + + #[test] + fn first_row_test() { + setup_tracing(); + assert_eq!( + make_not_rows_query_result().first_row(), + Err(FirstRowError::RowsExpected(RowsExpectedError)) + ); + assert_eq!( + make_rows_query_result(0).first_row(), + Err(FirstRowError::RowsEmpty) + ); + assert_eq!( + make_rows_query_result(1).first_row(), + Ok(make_rows(1).into_iter().next().unwrap()) + ); + assert_eq!( + make_rows_query_result(2).first_row(), + Ok(make_rows(2).into_iter().next().unwrap()) + ); + assert_eq!( + make_rows_query_result(3).first_row(), + Ok(make_rows(3).into_iter().next().unwrap()) + ); + } + + #[test] + fn first_row_typed_test() { + setup_tracing(); + assert_eq!( + make_not_rows_query_result().first_row_typed::<(i32,)>(), + Err(FirstRowTypedError::RowsExpected(RowsExpectedError)) + ); + assert_eq!( + make_rows_query_result(0).first_row_typed::<(i32,)>(), + Err(FirstRowTypedError::RowsEmpty) + ); + assert_eq!( + make_rows_query_result(1).first_row_typed::<(i32,)>(), + Ok((0,)) + ); + assert_eq!( + make_rows_query_result(2).first_row_typed::<(i32,)>(), + Ok((0,)) + ); + assert_eq!( + make_rows_query_result(3).first_row_typed::<(i32,)>(), + Ok((0,)) + ); + + assert_matches!( + make_string_rows_query_result(2).first_row_typed::<(i32,)>(), + Err(FirstRowTypedError::FromRowError(_)) + ); + } + + #[test] + fn maybe_first_row_test() { + setup_tracing(); + assert_eq!( + make_not_rows_query_result().maybe_first_row(), + Err(RowsExpectedError) + ); + assert_eq!(make_rows_query_result(0).maybe_first_row(), Ok(None)); + assert_eq!( + make_rows_query_result(1).maybe_first_row(), + Ok(Some(make_rows(1).into_iter().next().unwrap())) + ); + assert_eq!( + make_rows_query_result(2).maybe_first_row(), + Ok(Some(make_rows(2).into_iter().next().unwrap())) + ); + assert_eq!( + make_rows_query_result(3).maybe_first_row(), + Ok(Some(make_rows(3).into_iter().next().unwrap())) + ); + } + + #[test] + fn maybe_first_row_typed_test() { + setup_tracing(); + assert_eq!( + make_not_rows_query_result().maybe_first_row_typed::<(i32,)>(), + Err(MaybeFirstRowTypedError::RowsExpected(RowsExpectedError)) + ); + + assert_eq!( + make_rows_query_result(0).maybe_first_row_typed::<(i32,)>(), + Ok(None) + ); + + assert_eq!( + make_rows_query_result(1).maybe_first_row_typed::<(i32,)>(), + Ok(Some((0,))) + ); + + assert_eq!( + make_rows_query_result(2).maybe_first_row_typed::<(i32,)>(), + Ok(Some((0,))) + ); + + assert_eq!( + make_rows_query_result(3).maybe_first_row_typed::<(i32,)>(), + Ok(Some((0,))) + ); + + assert_matches!( + make_string_rows_query_result(1).maybe_first_row_typed::<(i32,)>(), + Err(MaybeFirstRowTypedError::FromRowError(_)) + ) + } + + #[test] + fn single_row_test() { + setup_tracing(); + assert_eq!( + make_not_rows_query_result().single_row(), + Err(SingleRowError::RowsExpected(RowsExpectedError)) + ); + assert_eq!( + make_rows_query_result(0).single_row(), + Err(SingleRowError::BadNumberOfRows(0)) + ); + assert_eq!( + make_rows_query_result(1).single_row(), + Ok(make_rows(1).into_iter().next().unwrap()) + ); + assert_eq!( + make_rows_query_result(2).single_row(), + Err(SingleRowError::BadNumberOfRows(2)) + ); + assert_eq!( + make_rows_query_result(3).single_row(), + Err(SingleRowError::BadNumberOfRows(3)) + ); + } + + #[test] + fn single_row_typed_test() { + setup_tracing(); + assert_eq!( + make_not_rows_query_result().single_row_typed::<(i32,)>(), + Err(SingleRowTypedError::RowsExpected(RowsExpectedError)) + ); + assert_eq!( + make_rows_query_result(0).single_row_typed::<(i32,)>(), + Err(SingleRowTypedError::BadNumberOfRows(0)) + ); + assert_eq!( + make_rows_query_result(1).single_row_typed::<(i32,)>(), + Ok((0,)) + ); + assert_eq!( + make_rows_query_result(2).single_row_typed::<(i32,)>(), + Err(SingleRowTypedError::BadNumberOfRows(2)) + ); + assert_eq!( + make_rows_query_result(3).single_row_typed::<(i32,)>(), + Err(SingleRowTypedError::BadNumberOfRows(3)) + ); + + assert_matches!( + make_string_rows_query_result(1).single_row_typed::<(i32,)>(), + Err(SingleRowTypedError::FromRowError(_)) + ); + } +} diff --git a/scylla/src/transport/mod.rs b/scylla/src/transport/mod.rs index 45befce153..be4cfa37ba 100644 --- a/scylla/src/transport/mod.rs +++ b/scylla/src/transport/mod.rs @@ -7,6 +7,7 @@ pub mod errors; pub mod execution_profile; pub mod host_filter; pub mod iterator; +pub mod legacy_query_result; pub mod load_balancing; pub mod locator; pub(crate) mod metrics; diff --git a/scylla/src/transport/query_result.rs b/scylla/src/transport/query_result.rs index 0750624c93..cb461b985c 100644 --- a/scylla/src/transport/query_result.rs +++ b/scylla/src/transport/query_result.rs @@ -1,625 +1,806 @@ -use std::sync::Arc; - -use crate::frame::response::cql_to_rust::{FromRow, FromRowError}; -use crate::frame::response::result::ColumnSpec; -use crate::frame::response::result::Row; -use crate::transport::session::{IntoTypedRows, TypedRowIter}; -use scylla_cql::frame::response::result::ResultMetadata; use thiserror::Error; use uuid::Uuid; -/// Result of a single query\ -/// Contains all rows returned by the database and some more information -#[derive(Debug)] -pub struct QueryResult { - /// Rows returned by the database.\ - /// Queries like `SELECT` will have `Some(Vec)`, while queries like `INSERT` will have `None`.\ - /// Can contain an empty Vec. - pub rows: Option>, - /// Warnings returned by the database - pub warnings: Vec, - /// CQL Tracing uuid - can only be Some if tracing is enabled for this query - pub tracing_id: Option, - /// Metadata returned along with this response. - pub(crate) metadata: Option>>, - /// The original size of the serialized rows in request - pub serialized_size: usize, +use scylla_cql::frame::frame_errors::RowsParseError; +use scylla_cql::frame::response::result::{ + ColumnSpec, ColumnType, RawRows, RawRowsWithDeserializedMetadata, Row, TableSpec, +}; +use scylla_cql::types::deserialize::result::TypedRowIterator; +use scylla_cql::types::deserialize::row::DeserializeRow; +use scylla_cql::types::deserialize::{DeserializationError, TypeCheckError}; + +use super::legacy_query_result::LegacyQueryResult; + +/// A view over specification of a table in the database. +#[derive(Debug, Clone, Copy)] +#[cfg_attr(test, derive(PartialEq, Eq))] +pub struct TableSpecView<'res> { + table_name: &'res str, + ks_name: &'res str, } -impl QueryResult { - pub(crate) fn mock_empty() -> Self { +impl<'res> TableSpecView<'res> { + pub(crate) fn new_from_table_spec(spec: &'res TableSpec) -> Self { Self { - rows: None, - warnings: Vec::new(), - tracing_id: None, - metadata: None, - serialized_size: 0, + table_name: spec.table_name(), + ks_name: spec.ks_name(), } } - /// Returns the number of received rows.\ - /// Fails when the query isn't of a type that could return rows, same as [`rows()`](QueryResult::rows). - pub fn rows_num(&self) -> Result { - match &self.rows { - Some(rows) => Ok(rows.len()), - None => Err(RowsExpectedError), - } + /// The name of the table. + #[inline] + pub fn table_name(&'res self) -> &'res str { + self.table_name } - /// Returns the received rows when present.\ - /// If `QueryResult.rows` is `None`, which means that this query is not supposed to return rows (e.g `INSERT`), returns an error.\ - /// Can return an empty `Vec`. - pub fn rows(self) -> Result, RowsExpectedError> { - match self.rows { - Some(rows) => Ok(rows), - None => Err(RowsExpectedError), - } + /// The name of the keyspace the table resides in. + #[inline] + pub fn ks_name(&'res self) -> &'res str { + self.ks_name } +} - /// Returns the received rows parsed as the given type.\ - /// Equal to `rows()?.into_typed()`.\ - /// Fails when the query isn't of a type that could return rows, same as [`rows()`](QueryResult::rows). - pub fn rows_typed(self) -> Result, RowsExpectedError> { - Ok(self.rows()?.into_typed()) - } +/// A view over specification of a column returned by the database. +#[derive(Debug, Clone, Copy)] +#[cfg_attr(test, derive(PartialEq, Eq))] +pub struct ColumnSpecView<'res> { + table_spec: TableSpecView<'res>, + name: &'res str, + typ: &'res ColumnType<'res>, +} - /// Returns `Ok` for a result of a query that shouldn't contain any rows.\ - /// Will return `Ok` for `INSERT` result, but a `SELECT` result, even an empty one, will cause an error.\ - /// Opposite of [`rows()`](QueryResult::rows). - pub fn result_not_rows(&self) -> Result<(), RowsNotExpectedError> { - match self.rows { - Some(_) => Err(RowsNotExpectedError), - None => Ok(()), +impl<'res> ColumnSpecView<'res> { + pub(crate) fn new_from_column_spec(spec: &'res ColumnSpec) -> Self { + Self { + table_spec: TableSpecView::new_from_table_spec(spec.table_spec()), + name: spec.name(), + typ: spec.typ(), } } - /// Returns rows when `QueryResult.rows` is `Some`, otherwise an empty Vec.\ - /// Equal to `rows().unwrap_or_default()`. - pub fn rows_or_empty(self) -> Vec { - self.rows.unwrap_or_default() - } - - /// Returns rows parsed as the given type.\ - /// When `QueryResult.rows` is `None`, returns 0 rows.\ - /// Equal to `rows_or_empty().into_typed::()`. - pub fn rows_typed_or_empty(self) -> TypedRowIter { - self.rows_or_empty().into_typed::() + /// Returns a view over specification of the table the column is part of. + #[inline] + pub fn table_spec(&'res self) -> TableSpecView<'res> { + self.table_spec } - /// Returns first row from the received rows.\ - /// When the first row is not available, returns an error. - pub fn first_row(self) -> Result { - match self.maybe_first_row()? { - Some(row) => Ok(row), - None => Err(FirstRowError::RowsEmpty), - } + /// The column's name. + #[inline] + pub fn name(&'res self) -> &'res str { + self.name } - /// Returns first row from the received rows parsed as the given type.\ - /// When the first row is not available, returns an error. - pub fn first_row_typed(self) -> Result { - Ok(self.first_row()?.into_typed()?) + /// The column's CQL type. + #[inline] + pub fn typ(&'res self) -> &'res ColumnType { + self.typ } +} - /// Returns `Option` containing the first of a result.\ - /// Fails when the query isn't of a type that could return rows, same as [`rows()`](QueryResult::rows). - pub fn maybe_first_row(self) -> Result, RowsExpectedError> { - Ok(self.rows()?.into_iter().next()) - } +/// A view over specification of columns returned by the database. +#[derive(Debug, Clone, Copy)] +pub struct ColumnSpecs<'res> { + specs: &'res [ColumnSpec<'res>], +} - /// Returns `Option` containing the first of a result.\ - /// Fails when the query isn't of a type that could return rows, same as [`rows()`](QueryResult::rows). - pub fn maybe_first_row_typed( - self, - ) -> Result, MaybeFirstRowTypedError> { - match self.maybe_first_row()? { - Some(row) => Ok(Some(row.into_typed::()?)), - None => Ok(None), - } +impl<'res> ColumnSpecs<'res> { + pub(crate) fn new(specs: &'res [ColumnSpec<'res>]) -> Self { + Self { specs } } - /// Returns the only received row.\ - /// Fails if the result is anything else than a single row.\ - pub fn single_row(self) -> Result { - let rows: Vec = self.rows()?; - - if rows.len() != 1 { - return Err(SingleRowError::BadNumberOfRows(rows.len())); - } - - Ok(rows.into_iter().next().unwrap()) + pub(crate) fn inner(&self) -> &'res [ColumnSpec<'res>] { + self.specs } - /// Returns the only received row parsed as the given type.\ - /// Fails if the result is anything else than a single row.\ - pub fn single_row_typed(self) -> Result { - Ok(self.single_row()?.into_typed::()?) + /// Returns number of columns. + #[allow(clippy::len_without_is_empty)] + #[inline] + pub fn len(&self) -> usize { + self.specs.len() } - /// Returns column specifications. + /// Returns specification of k-th column returned from the database. #[inline] - pub fn col_specs(&self) -> &[ColumnSpec] { - self.metadata - .as_ref() - .map(|metadata| metadata.col_specs()) - .unwrap_or_default() + pub fn get_by_index(&self, k: usize) -> Option> { + self.specs.get(k).map(ColumnSpecView::new_from_column_spec) } - /// Returns a column specification for a column with given name, or None if not found + /// Returns specification of the column with given name returned from the database. #[inline] - pub fn get_column_spec<'a>(&'a self, name: &str) -> Option<(usize, &'a ColumnSpec)> { - self.col_specs() + pub fn get_by_name(&self, name: &str) -> Option<(usize, ColumnSpecView<'res>)> { + self.specs .iter() .enumerate() - .find(|(_id, spec)| spec.name() == name) + .find(|(_idx, spec)| spec.name() == name) + .map(|(idx, spec)| (idx, ColumnSpecView::new_from_column_spec(spec))) } -} - -/// [`QueryResult::rows()`](QueryResult::rows) or a similar function called on a bad QueryResult.\ -/// Expected `QueryResult.rows` to be `Some`, but it was `None`.\ -/// `QueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ -/// It is `None` for queries that can't return rows (e.g `INSERT`). -#[derive(Debug, Clone, Error, PartialEq, Eq)] -#[error( - "QueryResult::rows() or similar function called on a bad QueryResult. - Expected QueryResult.rows to be Some, but it was None. - QueryResult.rows is Some for queries that can return rows (e.g SELECT). - It is None for queries that can't return rows (e.g INSERT)." -)] -pub struct RowsExpectedError; - -/// [`QueryResult::result_not_rows()`](QueryResult::result_not_rows) called on a bad QueryResult.\ -/// Expected `QueryResult.rows` to be `None`, but it was `Some`.\ -/// `QueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ -/// It is `None` for queries that can't return rows (e.g `INSERT`). -#[derive(Debug, Clone, Error, PartialEq, Eq)] -#[error( - "QueryResult::result_not_rows() called on a bad QueryResult. - Expected QueryResult.rows to be None, but it was Some. - QueryResult.rows is Some for queries that can return rows (e.g SELECT). - It is None for queries that can't return rows (e.g INSERT)." -)] -pub struct RowsNotExpectedError; - -#[derive(Debug, Clone, Error, PartialEq, Eq)] -pub enum FirstRowError { - /// [`QueryResult::first_row()`](QueryResult::first_row) called on a bad QueryResult.\ - /// Expected `QueryResult.rows` to be `Some`, but it was `None`.\ - /// `QueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ - /// It is `None` for queries that can't return rows (e.g `INSERT`). - #[error(transparent)] - RowsExpected(#[from] RowsExpectedError), - - /// Rows in `QueryResult` are empty - #[error("Rows in QueryResult are empty")] - RowsEmpty, -} - -#[derive(Debug, Clone, Error, PartialEq, Eq)] -pub enum FirstRowTypedError { - /// [`QueryResult::first_row_typed()`](QueryResult::first_row_typed) called on a bad QueryResult.\ - /// Expected `QueryResult.rows` to be `Some`, but it was `None`.\ - /// `QueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ - /// It is `None` for queries that can't return rows (e.g `INSERT`). - #[error(transparent)] - RowsExpected(#[from] RowsExpectedError), - - /// Rows in `QueryResult` are empty - #[error("Rows in QueryResult are empty")] - RowsEmpty, - - /// Parsing row as the given type failed - #[error(transparent)] - FromRowError(#[from] FromRowError), -} -#[derive(Debug, Clone, Error, PartialEq, Eq)] -pub enum MaybeFirstRowTypedError { - /// [`QueryResult::maybe_first_row_typed()`](QueryResult::maybe_first_row_typed) called on a bad QueryResult.\ - /// Expected `QueryResult.rows` to be `Some`, but it was `None`. - /// `QueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ - /// It is `None` for queries that can't return rows (e.g `INSERT`). - #[error(transparent)] - RowsExpected(#[from] RowsExpectedError), - - /// Parsing row as the given type failed - #[error(transparent)] - FromRowError(#[from] FromRowError), -} - -#[derive(Debug, Clone, Error, PartialEq, Eq)] -pub enum SingleRowError { - /// [`QueryResult::single_row()`](QueryResult::single_row) called on a bad QueryResult.\ - /// Expected `QueryResult.rows` to be `Some`, but it was `None`.\ - /// `QueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ - /// It is `None` for queries that can't return rows (e.g `INSERT`). - #[error(transparent)] - RowsExpected(#[from] RowsExpectedError), - - /// Expected a single row, found other number of rows - #[error("Expected a single row, found {0} rows")] - BadNumberOfRows(usize), + /// Returns iterator over specification of columns returned from the database, + /// ordered by column order in the response. + #[inline] + pub fn iter(&self) -> impl Iterator> { + self.specs.iter().map(ColumnSpecView::new_from_column_spec) + } } -#[derive(Debug, Clone, Error, PartialEq, Eq)] -pub enum SingleRowTypedError { - /// [`QueryResult::single_row_typed()`](QueryResult::single_row_typed) called on a bad QueryResult.\ - /// Expected `QueryResult.rows` to be `Some`, but it was `None`.\ - /// `QueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ - /// It is `None` for queries that can't return rows (e.g `INSERT`). - #[error(transparent)] - RowsExpected(#[from] RowsExpectedError), - - /// Expected a single row, found other number of rows - #[error("Expected a single row, found {0} rows")] - BadNumberOfRows(usize), - - /// Parsing row as the given type failed - #[error(transparent)] - FromRowError(#[from] FromRowError), +/// Result of a single request to the database. It represents any kind of Result frame. +/// +/// The received rows and metadata, which are present if the frame is of Result:Rows kind, +/// are kept in a raw binary form. To deserialize and access them, this struct works +/// in tandem with [`RowsDeserializer`] struct, which borrows from [`QueryResult`]. +/// By borrowing, [`RowsDeserializer`] can avoid heap allocations of metadata strings, +/// borrowing them from the Result frame instead. +/// To create a [`RowsDeserializer`], use [`QueryResult::rows_deserializer`] method. +/// Upon creation, [`RowsDeserializer`] deserializes result metadata and allocates it, +/// so this should be considered a moderately costly operation and performed only once. +/// +/// NOTE: this is a result of a single CQL request. If you use paging for your query, +/// this will contain exactly one page. +#[derive(Debug)] +pub struct QueryResult { + raw_rows: Option, + tracing_id: Option, + warnings: Vec, } -impl From for FirstRowTypedError { - fn from(err: FirstRowError) -> FirstRowTypedError { - match err { - FirstRowError::RowsExpected(e) => FirstRowTypedError::RowsExpected(e), - FirstRowError::RowsEmpty => FirstRowTypedError::RowsEmpty, +impl QueryResult { + pub(crate) fn new( + raw_rows: Option, + tracing_id: Option, + warnings: Vec, + ) -> Self { + Self { + raw_rows, + tracing_id, + warnings, } } -} -impl From for SingleRowTypedError { - fn from(err: SingleRowError) -> SingleRowTypedError { - match err { - SingleRowError::RowsExpected(e) => SingleRowTypedError::RowsExpected(e), - SingleRowError::BadNumberOfRows(r) => SingleRowTypedError::BadNumberOfRows(r), + // Preferred to implementing Default, because users shouldn't be able to create + // an empty QueryResult. + // + // For now unused, but it will be used once Session's API is migrated + // to the new QueryResult. + #[allow(dead_code)] + pub(crate) fn mock_empty() -> Self { + Self { + raw_rows: None, + tracing_id: None, + warnings: Vec::new(), } } -} -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - frame::response::result::{CqlValue, Row}, - test_utils::setup_tracing, - }; - use std::convert::TryInto; - - use assert_matches::assert_matches; - use scylla_cql::frame::response::result::{ColumnType, TableSpec}; - - // Returns specified number of rows, each one containing one int32 value. - // Values are 0, 1, 2, 3, 4, ... - fn make_rows(rows_num: usize) -> Vec { - let mut rows: Vec = Vec::with_capacity(rows_num); - for cur_value in 0..rows_num { - let int_val: i32 = cur_value.try_into().unwrap(); - rows.push(Row { - columns: vec![Some(CqlValue::Int(int_val))], - }); - } - rows + /// Warnings emitted by the database. + #[inline] + pub fn warnings(&self) -> impl Iterator { + self.warnings.iter().map(String::as_str) } - // Just like make_rows, but each column has one String value - // values are "val0", "val1", "val2", ... - fn make_string_rows(rows_num: usize) -> Vec { - let mut rows: Vec = Vec::with_capacity(rows_num); - for cur_value in 0..rows_num { - rows.push(Row { - columns: vec![Some(CqlValue::Text(format!("val{}", cur_value)))], - }); - } - rows + /// Tracing ID associated with this CQL request. + #[inline] + pub fn tracing_id(&self) -> Option { + self.tracing_id } - fn make_test_metadata() -> ResultMetadata<'static> { - let table_spec = TableSpec::borrowed("some_keyspace", "some_table"); - - let column_spec = ColumnSpec::borrowed("column0", ColumnType::Int, table_spec); - - ResultMetadata::new_for_test(1, vec![column_spec]) + /// Returns a bool indicating the current response is of Rows type. + #[inline] + pub fn is_rows(&self) -> bool { + self.raw_rows.is_some() } - fn make_not_rows_query_result() -> QueryResult { - QueryResult { - rows: None, - warnings: vec![], - tracing_id: None, - metadata: None, - serialized_size: 0, + /// Returns `Ok` for a request's result that shouldn't contain any rows.\ + /// Will return `Ok` for `INSERT` result, but a `SELECT` result, even an empty one, will cause an error.\ + /// Opposite of [`rows_deserializer()`](QueryResult::rows_deserializer). + #[inline] + pub fn result_not_rows(&self) -> Result<(), ResultNotRowsError> { + match &self.raw_rows { + Some(_) => Err(ResultNotRowsError), + None => Ok(()), } } - fn make_rows_query_result(rows_num: usize) -> QueryResult { - let mut res = make_not_rows_query_result(); - res.rows = Some(make_rows(rows_num)); - res.metadata = Some(Arc::new(make_test_metadata())); - res + /// Creates a lifetime-bound [`RowsDeserializer`] to enable deserializing rows contained + /// in this [`QueryResult`]'s frame. Deserializes result metadata and allocates it, + /// so **this should be considered a moderately costly operation and performed only once**. + /// + /// Returns `None` if the response is not of Rows kind. + /// + /// The created [`RowsDeserializer`] borrows from the [`QueryResult`], which saves some + /// string heap allocations, but limits flexibility (e.g., such borrowing [`RowsDeserializer`] + /// can't be stored aside on a heap due to lifetime issues). + /// To gain more flexibility on cost of additional allocations, + /// use [`QueryResult::rows_deserializer_owned`]. + /// + /// ```rust + /// # use scylla::transport::query_result::{QueryResult, RowsDeserializer}; + /// # fn example(query_result: QueryResult) -> Result<(), Box> { + /// let rows_deserializer = query_result.rows_deserializer()?; + /// if let Some(rows_result) = rows_deserializer { + /// let mut rows_iter = rows_result.rows::<(i32, &str)>()?; + /// while let Some((num, text)) = rows_iter.next().transpose()? { + /// // do something with `num` and `text`` + /// } + /// } else { + /// // Response was not Result:Rows, but some other kind of Result. + /// } + /// + /// Ok(()) + /// # } + /// + /// ``` + pub fn rows_deserializer(&self) -> Result>, RowsParseError> { + self.raw_rows + .as_ref() + .map(|raw_rows| { + let raw_rows_with_metadata = raw_rows.deserialize_borrowed_metadata()?; + Ok(RowsDeserializer { + raw_rows_with_metadata, + }) + }) + .transpose() + } + + /// Creates an owned [`RowsDeserializer`] to enable deserializing rows contained + /// in this [`QueryResult`]'s frame. Deserializes result metadata and allocates it, + /// so this should be considered a moderately costly operation and performed only once. + /// + /// Returns `None` if the response is not of Rows kind. + /// + /// The created [`RowsDeserializer`] does not borrow from the [`QueryResult`], + /// so it does not not limit flexibility. However, the cost involves more string + /// heap allocations. + /// If you don't need that flexibility, use cheaper [`QueryResult::rows_deserializer`]. + /// + /// ```compile_fail + /// # use scylla::transport::QueryResult; + /// fn example(query: impl FnOnce() -> QueryResult) -> Result<(), Box> { + /// let deserializer = query().rows_deserializer()?.unwrap(); + /// + /// // Compiler complains: "Temporary value dropped when borrowed". + /// let col_specs = deserializer.column_specs(); + /// + /// Ok(()) + /// } + /// ``` + /// + /// ```rust + /// # use scylla::transport::query_result::{QueryResult, RowsDeserializer}; + /// fn example( + /// query: impl FnOnce() -> QueryResult + /// ) -> Result, Box> { + /// let deserializer = query().rows_deserializer_owned()?.unwrap(); + /// + /// // This compiles. + /// let col_specs = deserializer.column_specs(); + /// + /// // RowsDeserializer is fully owned and independent, but at cost + /// // of moderately more expensive metadata deserialization. + /// Ok(deserializer) + /// } + /// ``` + pub fn rows_deserializer_owned( + &self, + ) -> Result>, RowsParseError> { + self.raw_rows + .as_ref() + .map(|raw_rows| { + let raw_rows_with_metadata = raw_rows.deserialize_owned_metadata()?; + Ok(RowsDeserializer { + raw_rows_with_metadata, + }) + }) + .transpose() + } + + /// Transforms itself into the legacy result type, by eagerly deserializing rows + /// into the Row type. This is inefficient, and should only be used during transition + /// period to the new API. + pub fn into_legacy_result(self) -> Result { + if let Some(raw_rows) = self.raw_rows { + let raw_rows_with_metadata = raw_rows.deserialize_owned_metadata()?; + + let deserialized_rows = raw_rows_with_metadata + .rows_iter::()? + .collect::, DeserializationError>>()?; + let serialized_size = raw_rows_with_metadata.rows_size(); + let metadata = raw_rows_with_metadata.into_metadata(); + + Ok(LegacyQueryResult { + rows: Some(deserialized_rows), + warnings: self.warnings, + tracing_id: self.tracing_id, + metadata: Some(metadata), + serialized_size, + }) + } else { + Ok(LegacyQueryResult { + rows: None, + warnings: self.warnings, + tracing_id: self.tracing_id, + metadata: None, + serialized_size: 0, + }) + } } +} - fn make_string_rows_query_result(rows_num: usize) -> QueryResult { - let mut res = make_not_rows_query_result(); - res.rows = Some(make_string_rows(rows_num)); - res.metadata = Some(Arc::new(make_test_metadata())); - res +/// Enables deserialization of rows contained in a [`QueryResult`]. +/// +/// Upon creation, it deserializes result metadata and allocates it, +/// so this should be considered a moderately costly operation and performed +/// only once. +/// +/// This struct provides generic methods which enable typed access to the data, +/// by deserializing rows on the fly to the type provided as a type parameter. +/// Those methods are: +/// - rows() - for iterating through rows, +/// - first_row() and maybe_first_row() - for accessing the first row first, +/// - single_row() - for accessing the first row, additionally asserting +/// that it's the only one in the response. +/// +/// ```rust +/// # use scylla::transport::query_result::{QueryResult, RowsDeserializer}; +/// # fn example(query_result: QueryResult) -> Result<(), Box> { +/// let rows_deserializer = query_result.rows_deserializer()?; +/// if let Some(rows_result) = rows_deserializer { +/// let mut rows_iter = rows_result.rows::<(i32, &str)>()?; +/// while let Some((num, text)) = rows_iter.next().transpose()? { +/// // do something with `num` and `text`` +/// } +/// } else { +/// // Response was not Result:Rows, but some other kind of Result. +/// } +/// +/// Ok(()) +/// # } +/// +/// ``` +#[derive(Debug)] +pub struct RowsDeserializer<'frame> { + raw_rows_with_metadata: RawRowsWithDeserializedMetadata<'frame>, +} + +impl<'frame> RowsDeserializer<'frame> { + /// Returns the number of received rows. + #[inline] + pub fn rows_num(&self) -> usize { + self.raw_rows_with_metadata.rows_count() } - #[test] - fn rows_num_test() { - setup_tracing(); - assert_eq!( - make_not_rows_query_result().rows_num(), - Err(RowsExpectedError) - ); - assert_eq!(make_rows_query_result(0).rows_num(), Ok(0)); - assert_eq!(make_rows_query_result(1).rows_num(), Ok(1)); - assert_eq!(make_rows_query_result(2).rows_num(), Ok(2)); - assert_eq!(make_rows_query_result(3).rows_num(), Ok(3)); + /// Returns the size of the serialized rows. + #[inline] + pub fn rows_size(&self) -> usize { + self.raw_rows_with_metadata.rows_size() } - #[test] - fn rows_test() { - setup_tracing(); - assert_eq!(make_not_rows_query_result().rows(), Err(RowsExpectedError)); - assert_eq!(make_rows_query_result(0).rows(), Ok(vec![])); - assert_eq!(make_rows_query_result(1).rows(), Ok(make_rows(1))); - assert_eq!(make_rows_query_result(2).rows(), Ok(make_rows(2))); + /// Returns the received rows when present. + /// + /// Returns an error if the rows in the response are of incorrect type. + #[inline] + pub fn rows<'res, R: DeserializeRow<'res>>( + &'res self, + ) -> Result, RowsError> { + self.raw_rows_with_metadata + .rows_iter() + .map_err(RowsError::TypeCheckFailed) + } + + /// Returns `Option` containing the first of a result. + /// + /// Fails when the the rows in the response are of incorrect type, + /// or when the deserialization fails. + pub fn maybe_first_row<'s, R: DeserializeRow<'s>>( + &'s self, + ) -> Result, MaybeFirstRowError> { + self.rows::() + .map_err(|err| match err { + RowsError::TypeCheckFailed(typck_err) => { + MaybeFirstRowError::TypeCheckFailed(typck_err) + } + })? + .next() + .transpose() + .map_err(MaybeFirstRowError::DeserializationFailed) + } + + /// Returns first row from the received rows. + /// + /// When the first row is not available, returns an error. + /// Fails when the the rows in the response are of incorrect type, + /// or when the deserialization fails. + pub fn first_row<'res, R: DeserializeRow<'res>>(&'res self) -> Result { + match self.maybe_first_row::() { + Ok(Some(row)) => Ok(row), + Ok(None) => Err(FirstRowError::RowsEmpty), + Err(MaybeFirstRowError::TypeCheckFailed(err)) => { + Err(FirstRowError::TypeCheckFailed(err)) + } + Err(MaybeFirstRowError::DeserializationFailed(err)) => { + Err(FirstRowError::DeserializationFailed(err)) + } + } } - #[test] - fn rows_typed_test() { - setup_tracing(); - assert!(make_not_rows_query_result().rows_typed::<(i32,)>().is_err()); + /// Returns the only received row. + /// + /// Fails if the result is anything else than a single row. + /// Fails when the the rows in the response are of incorrect type, + /// or when the deserialization fails. + pub fn single_row<'res, R: DeserializeRow<'res>>(&'res self) -> Result { + match self.rows::() { + Ok(mut rows) => match rows.next() { + Some(Ok(row)) => { + if rows.rows_remaining() != 0 { + return Err(SingleRowError::UnexpectedRowCount( + rows.rows_remaining() + 1, + )); + } + Ok(row) + } + Some(Err(err)) => Err(SingleRowError::DeserializationFailed(err)), + None => Err(SingleRowError::UnexpectedRowCount(0)), + }, + Err(RowsError::TypeCheckFailed(err)) => Err(SingleRowError::TypeCheckFailed(err)), + } + } - let rows0: Vec<(i32,)> = make_rows_query_result(0) - .rows_typed::<(i32,)>() - .unwrap() - .map(|r| r.unwrap()) - .collect(); + /// Returns column specifications. + #[inline] + pub fn column_specs(&self) -> ColumnSpecs { + ColumnSpecs::new(self.raw_rows_with_metadata.metadata().col_specs()) + } +} - assert_eq!(rows0, vec![]); +/// An error returned by [`RowsDeserializer::rows`] or [`RowsDeserializer::maybe_first_row`]. +#[derive(Debug, Error)] +pub enum RowsError { + /// Type check failed + #[error("Type check failed: {0}")] + TypeCheckFailed(#[from] TypeCheckError), +} - let rows1: Vec<(i32,)> = make_rows_query_result(1) - .rows_typed::<(i32,)>() - .unwrap() - .map(|r| r.unwrap()) - .collect(); +/// An error returned by [`RowsDeserializer::maybe_first_row`]. +#[derive(Debug, Error)] +pub enum MaybeFirstRowError { + /// Type check failed + #[error("Type check failed: {0}")] + TypeCheckFailed(#[from] TypeCheckError), - assert_eq!(rows1, vec![(0,)]); + /// Deserialization failed + #[error("Deserialization failed: {0}")] + DeserializationFailed(#[from] DeserializationError), +} - let rows2: Vec<(i32,)> = make_rows_query_result(2) - .rows_typed::<(i32,)>() - .unwrap() - .map(|r| r.unwrap()) - .collect(); +/// An error returned by [`RowsDeserializer::first_row`]. +#[derive(Debug, Error)] +pub enum FirstRowError { + /// The request response was of Rows type, but no rows were returned + #[error("The request response was of Rows type, but no rows were returned")] + RowsEmpty, - assert_eq!(rows2, vec![(0,), (1,)]); - } + /// Type check failed + #[error("Type check failed: {0}")] + TypeCheckFailed(#[from] TypeCheckError), - #[test] - fn result_not_rows_test() { - setup_tracing(); - assert_eq!(make_not_rows_query_result().result_not_rows(), Ok(())); - assert_eq!( - make_rows_query_result(0).result_not_rows(), - Err(RowsNotExpectedError) - ); - assert_eq!( - make_rows_query_result(1).result_not_rows(), - Err(RowsNotExpectedError) - ); - assert_eq!( - make_rows_query_result(2).result_not_rows(), - Err(RowsNotExpectedError) - ); - } - - #[test] - fn rows_or_empty_test() { - setup_tracing(); - assert_eq!(make_not_rows_query_result().rows_or_empty(), vec![]); - assert_eq!(make_rows_query_result(0).rows_or_empty(), make_rows(0)); - assert_eq!(make_rows_query_result(1).rows_or_empty(), make_rows(1)); - assert_eq!(make_rows_query_result(2).rows_or_empty(), make_rows(2)); - } + /// Deserialization failed + #[error("Deserialization failed: {0}")] + DeserializationFailed(#[from] DeserializationError), +} - #[test] - fn rows_typed_or_empty() { - setup_tracing(); - let rows_empty: Vec<(i32,)> = make_not_rows_query_result() - .rows_typed_or_empty::<(i32,)>() - .map(|r| r.unwrap()) - .collect(); +/// An error returned by [`RowsDeserializer::single_row`]. +#[derive(Debug, Error, Clone)] +pub enum SingleRowError { + /// Expected one row, but got a different count + #[error("Expected a single row, but got {0} rows")] + UnexpectedRowCount(usize), - assert_eq!(rows_empty, vec![]); + /// Type check failed + #[error("Type check failed: {0}")] + TypeCheckFailed(#[from] TypeCheckError), - let rows0: Vec<(i32,)> = make_rows_query_result(0) - .rows_typed_or_empty::<(i32,)>() - .map(|r| r.unwrap()) - .collect(); + /// Deserialization failed + #[error("Deserialization failed: {0}")] + DeserializationFailed(#[from] DeserializationError), +} - assert_eq!(rows0, vec![]); +/// An error returned by [`QueryResult::result_not_rows`]. +/// +/// It indicates that response to the request was, unexpectedly, of Rows kind. +#[derive(Debug, Error)] +#[error("The request response was, unexpectedly, of Rows kind")] +pub struct ResultNotRowsError; - let rows1: Vec<(i32,)> = make_rows_query_result(1) - .rows_typed_or_empty::<(i32,)>() - .map(|r| r.unwrap()) - .collect(); +#[cfg(test)] +mod tests { + use assert_matches::assert_matches; + use bytes::{Bytes, BytesMut}; + use itertools::Itertools as _; + use scylla_cql::frame::response::result::ResultMetadata; + use scylla_cql::frame::types; - assert_eq!(rows1, vec![(0,)]); + use super::*; - let rows2: Vec<(i32,)> = make_rows_query_result(2) - .rows_typed_or_empty::<(i32,)>() - .map(|r| r.unwrap()) - .collect(); + const TABLE_SPEC: TableSpec<'static> = TableSpec::borrowed("ks", "tbl"); - assert_eq!(rows2, vec![(0,), (1,)]); + fn column_spec_infinite_iter() -> impl Iterator> { + (0..).map(|k| { + ColumnSpec::owned( + format!("col_{}", k), + match k % 3 { + 0 => ColumnType::Ascii, + 1 => ColumnType::Boolean, + 2 => ColumnType::Float, + _ => unreachable!(), + }, + TABLE_SPEC, + ) + }) } #[test] - fn first_row_test() { - setup_tracing(); - assert_eq!( - make_not_rows_query_result().first_row(), - Err(FirstRowError::RowsExpected(RowsExpectedError)) - ); - assert_eq!( - make_rows_query_result(0).first_row(), - Err(FirstRowError::RowsEmpty) - ); - assert_eq!( - make_rows_query_result(1).first_row(), - Ok(make_rows(1).into_iter().next().unwrap()) - ); - assert_eq!( - make_rows_query_result(2).first_row(), - Ok(make_rows(2).into_iter().next().unwrap()) - ); - assert_eq!( - make_rows_query_result(3).first_row(), - Ok(make_rows(3).into_iter().next().unwrap()) - ); - } + fn test_query_result() { + fn serialize_cells(cells: impl IntoIterator>>) -> Bytes { + let mut bytes = BytesMut::new(); + for cell in cells { + types::write_bytes_opt(cell, &mut bytes).unwrap(); + } + bytes.freeze() + } - #[test] - fn first_row_typed_test() { - setup_tracing(); - assert_eq!( - make_not_rows_query_result().first_row_typed::<(i32,)>(), - Err(FirstRowTypedError::RowsExpected(RowsExpectedError)) - ); - assert_eq!( - make_rows_query_result(0).first_row_typed::<(i32,)>(), - Err(FirstRowTypedError::RowsEmpty) - ); - assert_eq!( - make_rows_query_result(1).first_row_typed::<(i32,)>(), - Ok((0,)) - ); - assert_eq!( - make_rows_query_result(2).first_row_typed::<(i32,)>(), - Ok((0,)) - ); - assert_eq!( - make_rows_query_result(3).first_row_typed::<(i32,)>(), - Ok((0,)) - ); - - assert_matches!( - make_string_rows_query_result(2).first_row_typed::<(i32,)>(), - Err(FirstRowTypedError::FromRowError(_)) - ); - } + fn sample_result_metadata(cols: usize) -> ResultMetadata<'static> { + ResultMetadata::new_for_test(cols, column_spec_infinite_iter().take(cols).collect()) + } - #[test] - fn maybe_first_row_test() { - setup_tracing(); - assert_eq!( - make_not_rows_query_result().maybe_first_row(), - Err(RowsExpectedError) - ); - assert_eq!(make_rows_query_result(0).maybe_first_row(), Ok(None)); - assert_eq!( - make_rows_query_result(1).maybe_first_row(), - Ok(Some(make_rows(1).into_iter().next().unwrap())) - ); - assert_eq!( - make_rows_query_result(2).maybe_first_row(), - Ok(Some(make_rows(2).into_iter().next().unwrap())) - ); - assert_eq!( - make_rows_query_result(3).maybe_first_row(), - Ok(Some(make_rows(3).into_iter().next().unwrap())) - ); - } + fn sample_raw_rows(cols: usize, rows: usize) -> RawRows { + let metadata = sample_result_metadata(cols); + + static STRING: &[u8] = "MOCK".as_bytes(); + static BOOLEAN: &[u8] = &(true as i8).to_be_bytes(); + static FLOAT: &[u8] = &12341_i32.to_be_bytes(); + let cells = metadata.col_specs().iter().map(|spec| match spec.typ() { + ColumnType::Ascii => STRING, + ColumnType::Boolean => BOOLEAN, + ColumnType::Float => FLOAT, + _ => unreachable!(), + }); + let bytes = serialize_cells(cells.map(Some)); + RawRows::new_for_test(None, Some(metadata), false, rows, &bytes).unwrap() + } - #[test] - fn maybe_first_row_typed_test() { - setup_tracing(); - assert_eq!( - make_not_rows_query_result().maybe_first_row_typed::<(i32,)>(), - Err(MaybeFirstRowTypedError::RowsExpected(RowsExpectedError)) - ); - - assert_eq!( - make_rows_query_result(0).maybe_first_row_typed::<(i32,)>(), - Ok(None) - ); - - assert_eq!( - make_rows_query_result(1).maybe_first_row_typed::<(i32,)>(), - Ok(Some((0,))) - ); - - assert_eq!( - make_rows_query_result(2).maybe_first_row_typed::<(i32,)>(), - Ok(Some((0,))) - ); - - assert_eq!( - make_rows_query_result(3).maybe_first_row_typed::<(i32,)>(), - Ok(Some((0,))) - ); - - assert_matches!( - make_string_rows_query_result(1).maybe_first_row_typed::<(i32,)>(), - Err(MaybeFirstRowTypedError::FromRowError(_)) - ) - } + // Used to trigger DeserializationError. + fn sample_raw_rows_invalid_bytes(cols: usize, rows: usize) -> RawRows { + let metadata = sample_result_metadata(cols); - #[test] - fn single_row_test() { - setup_tracing(); - assert_eq!( - make_not_rows_query_result().single_row(), - Err(SingleRowError::RowsExpected(RowsExpectedError)) - ); - assert_eq!( - make_rows_query_result(0).single_row(), - Err(SingleRowError::BadNumberOfRows(0)) - ); - assert_eq!( - make_rows_query_result(1).single_row(), - Ok(make_rows(1).into_iter().next().unwrap()) - ); - assert_eq!( - make_rows_query_result(2).single_row(), - Err(SingleRowError::BadNumberOfRows(2)) - ); - assert_eq!( - make_rows_query_result(3).single_row(), - Err(SingleRowError::BadNumberOfRows(3)) - ); - } + RawRows::new_for_test(None, Some(metadata), false, rows, &[]).unwrap() + } - #[test] - fn single_row_typed_test() { - setup_tracing(); - assert_eq!( - make_not_rows_query_result().single_row_typed::<(i32,)>(), - Err(SingleRowTypedError::RowsExpected(RowsExpectedError)) - ); - assert_eq!( - make_rows_query_result(0).single_row_typed::<(i32,)>(), - Err(SingleRowTypedError::BadNumberOfRows(0)) - ); - assert_eq!( - make_rows_query_result(1).single_row_typed::<(i32,)>(), - Ok((0,)) - ); - assert_eq!( - make_rows_query_result(2).single_row_typed::<(i32,)>(), - Err(SingleRowTypedError::BadNumberOfRows(2)) - ); - assert_eq!( - make_rows_query_result(3).single_row_typed::<(i32,)>(), - Err(SingleRowTypedError::BadNumberOfRows(3)) - ); - - assert_matches!( - make_string_rows_query_result(1).single_row_typed::<(i32,)>(), - Err(SingleRowTypedError::FromRowError(_)) - ); + // Check tracing ID + for tracing_id in [None, Some(Uuid::from_u128(0x_feed_dead))] { + for raw_rows in [None, Some(sample_raw_rows(7, 6))] { + let qr = QueryResult::new(raw_rows, tracing_id, vec![]); + assert_eq!(qr.tracing_id(), tracing_id); + } + } + + // Check warnings + for raw_rows in [None, Some(sample_raw_rows(7, 6))] { + let warnings = vec!["Ooops", "Meltdown..."]; + let qr = QueryResult::new( + raw_rows, + None, + warnings.iter().copied().map(String::from).collect(), + ); + assert_eq!(qr.warnings().collect_vec(), warnings); + } + + // Check col specs + { + // Not RESULT::Rows response -> no column specs + { + let rqr = QueryResult::new(None, None, Vec::new()); + let qr = rqr.rows_deserializer().unwrap(); + assert_matches!(qr, None); + } + + // RESULT::Rows response -> some column specs + { + let n = 5; + let metadata = sample_result_metadata(n); + let rr = RawRows::new_for_test(None, Some(metadata), false, 0, &[]).unwrap(); + let rqr = QueryResult::new(Some(rr), None, Vec::new()); + let qr = rqr.rows_deserializer().unwrap().unwrap(); + let column_specs = qr.column_specs(); + assert_eq!(column_specs.len(), n); + + // By index + { + for (i, expected_col_spec) in column_spec_infinite_iter().enumerate().take(n) { + let expected_view = + ColumnSpecView::new_from_column_spec(&expected_col_spec); + assert_eq!(column_specs.get_by_index(i), Some(expected_view)); + } + + assert_matches!(column_specs.get_by_index(n), None); + } + + // By name + { + for (idx, expected_col_spec) in column_spec_infinite_iter().enumerate().take(n) + { + let name = expected_col_spec.name(); + let expected_view = + ColumnSpecView::new_from_column_spec(&expected_col_spec); + assert_eq!(column_specs.get_by_name(name), Some((idx, expected_view))); + } + + assert_matches!(column_specs.get_by_name("ala ma kota"), None); + } + + // By iter + { + for (got_view, expected_col_spec) in + column_specs.iter().zip(column_spec_infinite_iter()) + { + let expected_view = + ColumnSpecView::new_from_column_spec(&expected_col_spec); + assert_eq!(got_view, expected_view); + } + } + } + } + + // rows(), maybe_rows(), result_not_rows(), first_row(), maybe_first_row(), single_row() + // All errors are checked. + { + // Not RESULT::Rows + { + let rqr = QueryResult::new(None, None, Vec::new()); + let qr = rqr.rows_deserializer().unwrap(); + assert_matches!(qr, None); + } + + // RESULT::Rows with 0 rows + { + let rr = sample_raw_rows(1, 0); + let rqr = QueryResult::new(Some(rr), None, Vec::new()); + let qr = rqr.rows_deserializer().unwrap().unwrap(); + + assert_matches!(rqr.result_not_rows(), Err(ResultNotRowsError)); + + // Type check error + { + assert_matches!(qr.rows::<(i32,)>(), Err(RowsError::TypeCheckFailed(_))); + + assert_matches!( + qr.first_row::<(i32,)>(), + Err(FirstRowError::TypeCheckFailed(_)) + ); + assert_matches!( + qr.maybe_first_row::<(i32,)>(), + Err(MaybeFirstRowError::TypeCheckFailed(_)) + ); + + assert_matches!( + qr.single_row::<(i32,)>(), + Err(SingleRowError::TypeCheckFailed(_)) + ); + } + + // Correct type + { + assert_matches!(qr.rows::<(&str,)>(), Ok(_)); + + assert_matches!(qr.first_row::<(&str,)>(), Err(FirstRowError::RowsEmpty)); + assert_matches!(qr.maybe_first_row::<(&str,)>(), Ok(None)); + + assert_matches!( + qr.single_row::<(&str,)>(), + Err(SingleRowError::UnexpectedRowCount(0)) + ); + } + } + + // RESULT::Rows with 1 row + { + let rr_good_data = sample_raw_rows(2, 1); + let rr_bad_data = sample_raw_rows_invalid_bytes(2, 1); + let rqr_good_data = QueryResult::new(Some(rr_good_data), None, Vec::new()); + let qr_good_data = rqr_good_data.rows_deserializer().unwrap().unwrap(); + let rqr_bad_data = QueryResult::new(Some(rr_bad_data), None, Vec::new()); + let qr_bad_data = rqr_bad_data.rows_deserializer().unwrap().unwrap(); + + for rqr in [&rqr_good_data, &rqr_bad_data] { + assert_matches!(rqr.result_not_rows(), Err(ResultNotRowsError)); + } + + for qr in [&qr_good_data, &qr_bad_data] { + // Type check error + { + assert_matches!( + qr.rows::<(i32, i32)>(), + Err(RowsError::TypeCheckFailed(_)) + ); + + assert_matches!( + qr.first_row::<(i32, i32)>(), + Err(FirstRowError::TypeCheckFailed(_)) + ); + assert_matches!( + qr.maybe_first_row::<(i32, i32)>(), + Err(MaybeFirstRowError::TypeCheckFailed(_)) + ); + + assert_matches!( + qr.single_row::<(i32, i32)>(), + Err(SingleRowError::TypeCheckFailed(_)) + ); + } + } + + // Correct type + { + assert_matches!(qr_good_data.rows::<(&str, bool)>(), Ok(_)); + assert_matches!(qr_bad_data.rows::<(&str, bool)>(), Ok(_)); + + assert_matches!(qr_good_data.first_row::<(&str, bool)>(), Ok(_)); + assert_matches!( + qr_bad_data.first_row::<(&str, bool)>(), + Err(FirstRowError::DeserializationFailed(_)) + ); + assert_matches!(qr_good_data.maybe_first_row::<(&str, bool)>(), Ok(_)); + assert_matches!( + qr_bad_data.maybe_first_row::<(&str, bool)>(), + Err(MaybeFirstRowError::DeserializationFailed(_)) + ); + + assert_matches!(qr_good_data.single_row::<(&str, bool)>(), Ok(_)); + assert_matches!( + qr_bad_data.single_row::<(&str, bool)>(), + Err(SingleRowError::DeserializationFailed(_)) + ); + } + } + + // RESULT::Rows with 2 rows + { + let rr = sample_raw_rows(2, 2); + let rqr = QueryResult::new(Some(rr), None, Vec::new()); + let qr = rqr.rows_deserializer().unwrap().unwrap(); + + assert_matches!(rqr.result_not_rows(), Err(ResultNotRowsError)); + + // Type check error + { + assert_matches!(qr.rows::<(i32, i32)>(), Err(RowsError::TypeCheckFailed(_))); + + assert_matches!( + qr.first_row::<(i32, i32)>(), + Err(FirstRowError::TypeCheckFailed(_)) + ); + assert_matches!( + qr.maybe_first_row::<(i32, i32)>(), + Err(MaybeFirstRowError::TypeCheckFailed(_)) + ); + + assert_matches!( + qr.single_row::<(i32, i32)>(), + Err(SingleRowError::TypeCheckFailed(_)) + ); + } + + // Correct type + { + assert_matches!(qr.rows::<(&str, bool)>(), Ok(_)); + + assert_matches!(qr.first_row::<(&str, bool)>(), Ok(_)); + assert_matches!(qr.maybe_first_row::<(&str, bool)>(), Ok(_)); + + assert_matches!( + qr.single_row::<(&str, bool)>(), + Err(SingleRowError::UnexpectedRowCount(2)) + ); + } + } + } } } diff --git a/scylla/src/transport/session.rs b/scylla/src/transport/session.rs index c1cc7415fd..4315909a12 100644 --- a/scylla/src/transport/session.rs +++ b/scylla/src/transport/session.rs @@ -4,6 +4,7 @@ use crate::batch::batch_values; #[cfg(feature = "cloud")] use crate::cloud::CloudConfig; +use crate::LegacyQueryResult; use crate::history; use crate::history::HistoryListener; @@ -17,7 +18,7 @@ use async_trait::async_trait; use futures::future::join_all; use futures::future::try_join_all; use itertools::{Either, Itertools}; -use scylla_cql::frame::response::result::{deser_cql_value, ColumnSpec, Rows}; +use scylla_cql::frame::response::result::{deser_cql_value, ColumnSpec}; use scylla_cql::frame::response::NonErrorResponse; use scylla_cql::types::serialize::batch::BatchValues; use scylla_cql::types::serialize::row::{SerializeRow, SerializedValues}; @@ -42,11 +43,12 @@ use super::connection::QueryResponse; use super::connection::SslConfig; use super::errors::TracingProtocolError; use super::execution_profile::{ExecutionProfile, ExecutionProfileHandle, ExecutionProfileInner}; +use super::iterator::RawIterator; +use super::legacy_query_result::MaybeFirstRowTypedError; #[cfg(feature = "cloud")] use super::node::CloudEndpoint; use super::node::KnownNode; use super::partitioner::PartitionerName; -use super::query_result::MaybeFirstRowTypedError; use super::topology::UntranslatedPeer; use super::{NodeRef, SelfIdentity}; use crate::cql_to_rust::FromRow; @@ -61,7 +63,7 @@ use crate::transport::cluster::{Cluster, ClusterData, ClusterNeatDebug}; use crate::transport::connection::{Connection, ConnectionConfig, VerifiedKeyspaceName}; use crate::transport::connection_pool::PoolConfig; use crate::transport::host_filter::HostFilter; -use crate::transport::iterator::{PreparedIteratorConfig, RowIterator}; +use crate::transport::iterator::{LegacyRowIterator, PreparedIteratorConfig}; use crate::transport::load_balancing::{self, RoutingInfo}; use crate::transport::metrics::Metrics; use crate::transport::node::Node; @@ -641,7 +643,7 @@ impl Session { &self, query: impl Into, values: impl SerializeRow, - ) -> Result { + ) -> Result { let query = query.into(); let (result, paging_state_response) = self .query(&query, values, None, PagingState::start()) @@ -706,7 +708,7 @@ impl Session { query: impl Into, values: impl SerializeRow, paging_state: PagingState, - ) -> Result<(QueryResult, PagingStateResponse), QueryError> { + ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { let query = query.into(); self.query( &query, @@ -734,7 +736,7 @@ impl Session { values: impl SerializeRow, page_size: Option, paging_state: PagingState, - ) -> Result<(QueryResult, PagingStateResponse), QueryError> { + ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { let execution_profile = query .get_execution_profile_handle() .unwrap_or_else(|| self.get_default_execution_profile_handle()) @@ -821,7 +823,7 @@ impl Session { self.handle_auto_await_schema_agreement(&response).await?; let (result, paging_state) = response.into_query_result_and_paging_state()?; - span.record_result_fields(&result); + let result = result.into_legacy_result()?; Ok((result, paging_state)) } @@ -901,7 +903,7 @@ impl Session { &self, query: impl Into, values: impl SerializeRow, - ) -> Result { + ) -> Result { let query: Query = query.into(); let execution_profile = query @@ -910,20 +912,21 @@ impl Session { .access(); if values.is_empty() { - RowIterator::new_for_query( + RawIterator::new_for_query( query, execution_profile, self.cluster.get_data(), self.metrics.clone(), ) .await + .map(RawIterator::into_legacy) } else { - // Making RowIterator::new_for_query work with values is too hard (if even possible) + // Making RawIterator::new_for_query work with values is too hard (if even possible) // so instead of sending one prepare to a specific connection on each iterator query, // we fully prepare a statement beforehand. let prepared = self.prepare(query).await?; let values = prepared.serialize_values(&values)?; - RowIterator::new_for_prepared_statement(PreparedIteratorConfig { + RawIterator::new_for_prepared_statement(PreparedIteratorConfig { prepared, values, execution_profile, @@ -931,6 +934,7 @@ impl Session { metrics: self.metrics.clone(), }) .await + .map(RawIterator::into_legacy) } } @@ -1073,7 +1077,7 @@ impl Session { &self, prepared: &PreparedStatement, values: impl SerializeRow, - ) -> Result { + ) -> Result { let serialized_values = prepared.serialize_values(&values)?; let (result, paging_state) = self .execute(prepared, &serialized_values, None, PagingState::start()) @@ -1143,7 +1147,7 @@ impl Session { prepared: &PreparedStatement, values: impl SerializeRow, paging_state: PagingState, - ) -> Result<(QueryResult, PagingStateResponse), QueryError> { + ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { let serialized_values = prepared.serialize_values(&values)?; let page_size = prepared.get_validated_page_size(); self.execute(prepared, &serialized_values, Some(page_size), paging_state) @@ -1166,7 +1170,7 @@ impl Session { serialized_values: &SerializedValues, page_size: Option, paging_state: PagingState, - ) -> Result<(QueryResult, PagingStateResponse), QueryError> { + ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { let values_ref = &serialized_values; let paging_state_ref = &paging_state; @@ -1256,7 +1260,7 @@ impl Session { self.handle_auto_await_schema_agreement(&response).await?; let (result, paging_state) = response.into_query_result_and_paging_state()?; - span.record_result_fields(&result); + let result = result.into_legacy_result()?; Ok((result, paging_state)) } @@ -1304,7 +1308,7 @@ impl Session { &self, prepared: impl Into, values: impl SerializeRow, - ) -> Result { + ) -> Result { let prepared = prepared.into(); let serialized_values = prepared.serialize_values(&values)?; @@ -1313,7 +1317,7 @@ impl Session { .unwrap_or_else(|| self.get_default_execution_profile_handle()) .access(); - RowIterator::new_for_prepared_statement(PreparedIteratorConfig { + RawIterator::new_for_prepared_statement(PreparedIteratorConfig { prepared, values: serialized_values, execution_profile, @@ -1321,6 +1325,7 @@ impl Session { metrics: self.metrics.clone(), }) .await + .map(RawIterator::into_legacy) } /// Perform a batch request.\ @@ -1372,7 +1377,7 @@ impl Session { &self, batch: &Batch, values: impl BatchValues, - ) -> Result { + ) -> Result { // Shard-awareness behavior for batch will be to pick shard based on first batch statement's shard // If users batch statements by shard, they will be rewarded with full shard awareness @@ -1449,10 +1454,9 @@ impl Session { .await?; let result = match run_query_result { - RunQueryResult::IgnoredWriteError => QueryResult::mock_empty(), - RunQueryResult::Completed(response) => response, + RunQueryResult::IgnoredWriteError => LegacyQueryResult::mock_empty(), + RunQueryResult::Completed(response) => response.into_legacy_result()?, }; - span.record_result_fields(&result); Ok(result) } @@ -2171,18 +2175,6 @@ impl RequestSpan { } } - pub(crate) fn record_result_fields(&self, result: &QueryResult) { - self.span.record("result_size", result.serialized_size); - if let Some(rows) = result.rows.as_ref() { - self.span.record("result_rows", rows.len()); - } - } - - pub(crate) fn record_rows_fields(&self, rows: &Rows) { - self.span.record("result_size", rows.serialized_size); - self.span.record("result_rows", rows.rows.len()); - } - pub(crate) fn record_replicas<'a>(&'a self, replicas: &'a [(impl Borrow>, Shard)]) { struct ReplicaIps<'a, N>(&'a [(N, Shard)]); impl<'a, N> Display for ReplicaIps<'a, N> diff --git a/scylla/src/transport/session_test.rs b/scylla/src/transport/session_test.rs index ff2dc1e64b..aa1b8e2987 100644 --- a/scylla/src/transport/session_test.rs +++ b/scylla/src/transport/session_test.rs @@ -22,7 +22,7 @@ use crate::utils::test_utils::{ }; use crate::CachingSession; use crate::ExecutionProfile; -use crate::QueryResult; +use crate::LegacyQueryResult; use crate::{Session, SessionBuilder}; use assert_matches::assert_matches; use futures::{FutureExt, StreamExt, TryStreamExt}; @@ -960,7 +960,7 @@ async fn test_tracing() { async fn test_tracing_query(session: &Session, ks: String) { // A query without tracing enabled has no tracing uuid in result let untraced_query: Query = Query::new(format!("SELECT * FROM {}.tab", ks)); - let untraced_query_result: QueryResult = + let untraced_query_result: LegacyQueryResult = session.query_unpaged(untraced_query, &[]).await.unwrap(); assert!(untraced_query_result.tracing_id.is_none()); @@ -969,7 +969,8 @@ async fn test_tracing_query(session: &Session, ks: String) { let mut traced_query: Query = Query::new(format!("SELECT * FROM {}.tab", ks)); traced_query.config.tracing = true; - let traced_query_result: QueryResult = session.query_unpaged(traced_query, &[]).await.unwrap(); + let traced_query_result: LegacyQueryResult = + session.query_unpaged(traced_query, &[]).await.unwrap(); assert!(traced_query_result.tracing_id.is_some()); // Querying this uuid from tracing table gives some results @@ -983,7 +984,7 @@ async fn test_tracing_execute(session: &Session, ks: String) { .await .unwrap(); - let untraced_prepared_result: QueryResult = session + let untraced_prepared_result: LegacyQueryResult = session .execute_unpaged(&untraced_prepared, &[]) .await .unwrap(); @@ -998,7 +999,7 @@ async fn test_tracing_execute(session: &Session, ks: String) { traced_prepared.config.tracing = true; - let traced_prepared_result: QueryResult = session + let traced_prepared_result: LegacyQueryResult = session .execute_unpaged(&traced_prepared, &[]) .await .unwrap(); @@ -1035,7 +1036,8 @@ async fn test_get_tracing_info(session: &Session, ks: String) { let mut traced_query: Query = Query::new(format!("SELECT * FROM {}.tab", ks)); traced_query.config.tracing = true; - let traced_query_result: QueryResult = session.query_unpaged(traced_query, &[]).await.unwrap(); + let traced_query_result: LegacyQueryResult = + session.query_unpaged(traced_query, &[]).await.unwrap(); let tracing_id: Uuid = traced_query_result.tracing_id.unwrap(); // Getting tracing info from session using this uuid works @@ -1125,7 +1127,8 @@ async fn test_tracing_batch(session: &Session, ks: String) { let mut untraced_batch: Batch = Default::default(); untraced_batch.append_statement(&format!("INSERT INTO {}.tab (a) VALUES('a')", ks)[..]); - let untraced_batch_result: QueryResult = session.batch(&untraced_batch, ((),)).await.unwrap(); + let untraced_batch_result: LegacyQueryResult = + session.batch(&untraced_batch, ((),)).await.unwrap(); assert!(untraced_batch_result.tracing_id.is_none()); // Batch with tracing enabled has a tracing uuid in result @@ -1133,7 +1136,7 @@ async fn test_tracing_batch(session: &Session, ks: String) { traced_batch.append_statement(&format!("INSERT INTO {}.tab (a) VALUES('a')", ks)[..]); traced_batch.config.tracing = true; - let traced_batch_result: QueryResult = session.batch(&traced_batch, ((),)).await.unwrap(); + let traced_batch_result: LegacyQueryResult = session.batch(&traced_batch, ((),)).await.unwrap(); assert!(traced_batch_result.tracing_id.is_some()); assert_in_tracing_table(session, traced_batch_result.tracing_id.unwrap()).await; @@ -2567,7 +2570,7 @@ async fn test_batch_lwts() { batch.append_statement("INSERT INTO tab (p1, c1, r1, r2) VALUES (0, 123, 321, 312)"); batch.append_statement("UPDATE tab SET r1 = 1 WHERE p1 = 0 AND c1 = 0 IF r2 = 0"); - let batch_res: QueryResult = session.batch(&batch, ((), (), ())).await.unwrap(); + let batch_res: LegacyQueryResult = session.batch(&batch, ((), (), ())).await.unwrap(); // Scylla returns 5 columns, but Cassandra returns only 1 let is_scylla: bool = batch_res.col_specs().len() == 5; @@ -2579,7 +2582,11 @@ async fn test_batch_lwts() { } } -async fn test_batch_lwts_for_scylla(session: &Session, batch: &Batch, batch_res: QueryResult) { +async fn test_batch_lwts_for_scylla( + session: &Session, + batch: &Batch, + batch_res: LegacyQueryResult, +) { // Alias required by clippy type IntOrNull = Option; @@ -2600,7 +2607,7 @@ async fn test_batch_lwts_for_scylla(session: &Session, batch: &Batch, batch_res: assert_eq!(batch_res_rows, expected_batch_res_rows); let prepared_batch: Batch = session.prepare_batch(batch).await.unwrap(); - let prepared_batch_res: QueryResult = + let prepared_batch_res: LegacyQueryResult = session.batch(&prepared_batch, ((), (), ())).await.unwrap(); let prepared_batch_res_rows: Vec<(bool, IntOrNull, IntOrNull, IntOrNull, IntOrNull)> = @@ -2619,7 +2626,11 @@ async fn test_batch_lwts_for_scylla(session: &Session, batch: &Batch, batch_res: assert_eq!(prepared_batch_res_rows, expected_prepared_batch_res_rows); } -async fn test_batch_lwts_for_cassandra(session: &Session, batch: &Batch, batch_res: QueryResult) { +async fn test_batch_lwts_for_cassandra( + session: &Session, + batch: &Batch, + batch_res: LegacyQueryResult, +) { // Alias required by clippy type IntOrNull = Option; @@ -2636,7 +2647,7 @@ async fn test_batch_lwts_for_cassandra(session: &Session, batch: &Batch, batch_r assert_eq!(batch_res_rows, expected_batch_res_rows); let prepared_batch: Batch = session.prepare_batch(batch).await.unwrap(); - let prepared_batch_res: QueryResult = + let prepared_batch_res: LegacyQueryResult = session.batch(&prepared_batch, ((), (), ())).await.unwrap(); // Returned columns are: diff --git a/scylla/tests/integration/tablets.rs b/scylla/tests/integration/tablets.rs index 3f1356840f..2bdf969877 100644 --- a/scylla/tests/integration/tablets.rs +++ b/scylla/tests/integration/tablets.rs @@ -17,7 +17,7 @@ use scylla::transport::ClusterData; use scylla::transport::Node; use scylla::transport::NodeRef; use scylla::ExecutionProfile; -use scylla::QueryResult; +use scylla::LegacyQueryResult; use scylla::Session; use scylla::transport::errors::QueryError; @@ -185,7 +185,7 @@ async fn send_statement_everywhere( cluster: &ClusterData, statement: &PreparedStatement, values: &dyn SerializeRow, -) -> Result, QueryError> { +) -> Result, QueryError> { let tasks = cluster.get_nodes_info().iter().flat_map(|node| { let shard_count: u16 = node.sharder().unwrap().nr_shards.into(); (0..shard_count).map(|shard| { @@ -210,7 +210,7 @@ async fn send_unprepared_query_everywhere( session: &Session, cluster: &ClusterData, query: &Query, -) -> Result, QueryError> { +) -> Result, QueryError> { let tasks = cluster.get_nodes_info().iter().flat_map(|node| { let shard_count: u16 = node.sharder().unwrap().nr_shards.into(); (0..shard_count).map(|shard| {