odbc_api/buffers/columnar.rs
1use std::{
2 collections::HashSet,
3 num::NonZeroUsize,
4 str::{from_utf8, Utf8Error},
5};
6
7use crate::{
8 columnar_bulk_inserter::BoundInputSlice,
9 cursor::TruncationInfo,
10 fixed_sized::Pod,
11 handles::{CDataMut, Statement, StatementRef},
12 parameter::WithDataType,
13 result_set_metadata::utf8_display_sizes,
14 Error, ResultSetMetadata, RowSetBuffer,
15};
16
17use super::{Indicator, TextColumn};
18
19impl<C: ColumnBuffer> ColumnarBuffer<C> {
20 /// Create a new instance from columns with unique indicies. Capacity of the buffer will be the
21 /// minimum capacity of the columns. The constructed buffer is always empty (i.e. the number of
22 /// valid rows is considered to be zero).
23 ///
24 /// You do not want to call this constructor directly unless you want to provide your own buffer
25 /// implentation. Most users of this crate may want to use the constructors like
26 /// [`crate::buffers::ColumnarAnyBuffer::from_descs`] or
27 /// [`crate::buffers::TextRowSet::from_max_str_lens`] instead.
28 pub fn new(columns: Vec<(u16, C)>) -> Self {
29 // Assert capacity
30 let capacity = columns
31 .iter()
32 .map(|(_, col)| col.capacity())
33 .min()
34 .unwrap_or(0);
35
36 // Assert uniqueness of indices
37 let mut indices = HashSet::new();
38 if columns
39 .iter()
40 .any(move |&(col_index, _)| !indices.insert(col_index))
41 {
42 panic!("Column indices must be unique.")
43 }
44
45 unsafe { Self::new_unchecked(capacity, columns) }
46 }
47
48 /// # Safety
49 ///
50 /// * Indices must be unique
51 /// * Columns all must have enough `capacity`.
52 pub unsafe fn new_unchecked(capacity: usize, columns: Vec<(u16, C)>) -> Self {
53 ColumnarBuffer {
54 num_rows: Box::new(0),
55 row_capacity: capacity,
56 columns,
57 }
58 }
59
60 /// Number of valid rows in the buffer.
61 pub fn num_rows(&self) -> usize {
62 *self.num_rows
63 }
64
65 /// Return the number of columns in the row set.
66 pub fn num_cols(&self) -> usize {
67 self.columns.len()
68 }
69
70 /// Use this method to gain read access to the actual column data.
71 ///
72 /// # Parameters
73 ///
74 /// * `buffer_index`: Please note that the buffer index is not identical to the ODBC column
75 /// index. For one it is zero based. It also indexes the buffer bound, and not the columns of
76 /// the output result set. This is important, because not every column needs to be bound. Some
77 /// columns may simply be ignored. That being said, if every column of the output is bound in
78 /// the buffer, in the same order in which they are enumerated in the result set, the
79 /// relationship between column index and buffer index is `buffer_index = column_index - 1`.
80 pub fn column(&self, buffer_index: usize) -> C::View<'_> {
81 self.columns[buffer_index].1.view(*self.num_rows)
82 }
83}
84
85unsafe impl<C> RowSetBuffer for ColumnarBuffer<C>
86where
87 C: ColumnBuffer,
88{
89 fn bind_type(&self) -> usize {
90 0 // Specify columnar binding
91 }
92
93 fn row_array_size(&self) -> usize {
94 self.row_capacity
95 }
96
97 fn mut_num_fetch_rows(&mut self) -> &mut usize {
98 self.num_rows.as_mut()
99 }
100
101 unsafe fn bind_colmuns_to_cursor(&mut self, mut cursor: StatementRef<'_>) -> Result<(), Error> {
102 for (col_number, column) in &mut self.columns {
103 cursor.bind_col(*col_number, column).into_result(&cursor)?;
104 }
105 Ok(())
106 }
107
108 fn find_truncation(&self) -> Option<TruncationInfo> {
109 self.columns
110 .iter()
111 .enumerate()
112 .find_map(|(buffer_index, (_col_index, col_buffer))| {
113 col_buffer
114 .has_truncated_values(*self.num_rows)
115 .map(|indicator| TruncationInfo {
116 indicator: indicator.length(),
117 buffer_index,
118 })
119 })
120 }
121}
122
123/// A columnar buffer intended to be bound with [crate::Cursor::bind_buffer] in order to obtain
124/// results from a cursor.
125///
126/// Binds to the result set column wise. This is usually helpful in dataengineering or data sciense
127/// tasks. This buffer type can be used in situations there the schema of the queried data is known
128/// at compile time, as well as for generic applications which do work with wide range of different
129/// data.
130///
131/// # Example: Fetching results column wise with `ColumnarBuffer`.
132///
133/// Consider querying a table with two columns `year` and `name`.
134///
135/// ```no_run
136/// use odbc_api::{
137/// Environment, Cursor, ConnectionOptions,
138/// buffers::{AnySlice, BufferDesc, Item, ColumnarAnyBuffer},
139/// };
140///
141/// let env = Environment::new()?;
142///
143/// let batch_size = 1000; // Maximum number of rows in each row set
144/// let buffer_description = [
145/// // We know year to be a Nullable SMALLINT
146/// BufferDesc::I16 { nullable: true },
147/// // and name to be a required VARCHAR
148/// BufferDesc::Text { max_str_len: 255 },
149/// ];
150///
151/// /// Creates a columnar buffer fitting the buffer description with the capacity of `batch_size`.
152/// let mut buffer = ColumnarAnyBuffer::from_descs(batch_size, buffer_description);
153///
154/// let mut conn = env.connect(
155/// "YourDatabase", "SA", "My@Test@Password1",
156/// ConnectionOptions::default(),
157/// )?;
158/// let query = "SELECT year, name FROM Birthdays;";
159/// let params = ();
160/// let timeout_sec = None;
161/// if let Some(cursor) = conn.execute(query, params, timeout_sec)? {
162/// // Bind buffer to cursor. We bind the buffer as a mutable reference here, which makes it
163/// // easier to reuse for other queries, but we could have taken ownership.
164/// let mut row_set_cursor = cursor.bind_buffer(&mut buffer)?;
165/// // Loop over row sets
166/// while let Some(row_set) = row_set_cursor.fetch()? {
167/// // Process years in row set
168/// let year_col = row_set.column(0);
169/// for year in i16::as_nullable_slice(year_col)
170/// .expect("Year column buffer expected to be nullable Int")
171/// {
172/// // Iterate over `Option<i16>` with it ..
173/// }
174/// // Process names in row set
175/// let name_col = row_set.column(1);
176/// for name in name_col
177/// .as_text_view()
178/// .expect("Name column buffer expected to be text")
179/// .iter()
180/// {
181/// // Iterate over `Option<&CStr> ..
182/// }
183/// }
184/// }
185/// # Ok::<(), odbc_api::Error>(())
186/// ```
187///
188/// This second examples changes two things, we do not know the schema in advance and use the
189/// SQL DataType to determine the best fit for the buffers. Also we want to do everything in a
190/// function and return a `Cursor` with an already bound buffer. This approach is best if you have
191/// few and very long query, so the overhead of allocating buffers is negligible and you want to
192/// have an easier time with the borrow checker.
193///
194/// ```no_run
195/// use odbc_api::{
196/// Connection, BlockCursor, Error, Cursor, Nullability, ResultSetMetadata,
197/// buffers::{ AnyBuffer, BufferDesc, ColumnarAnyBuffer, ColumnarBuffer }
198/// };
199///
200/// fn get_birthdays<'a>(conn: &'a mut Connection)
201/// -> Result<BlockCursor<impl Cursor + 'a, ColumnarAnyBuffer>, Error>
202/// {
203/// let query = "SELECT year, name FROM Birthdays;";
204/// let params = ();
205/// let timeout_sec = None;
206/// let mut cursor = conn.execute(query, params, timeout_sec)?.unwrap();
207/// let mut column_description = Default::default();
208/// let buffer_description : Vec<_> = (0..cursor.num_result_cols()?).map(|index| {
209/// cursor.describe_col(index as u16 + 1, &mut column_description)?;
210/// let nullable = matches!(
211/// column_description.nullability,
212/// Nullability::Unknown | Nullability::Nullable
213/// );
214/// let desc = BufferDesc::from_data_type(
215/// column_description.data_type,
216/// nullable
217/// ).unwrap_or(BufferDesc::Text{ max_str_len: 255 });
218/// Ok(desc)
219/// }).collect::<Result<_, Error>>()?;
220///
221/// // Row set size of 5000 rows.
222/// let buffer = ColumnarAnyBuffer::from_descs(5000, buffer_description);
223/// // Bind buffer and take ownership over it.
224/// cursor.bind_buffer(buffer)
225/// }
226/// ```
227pub struct ColumnarBuffer<C> {
228 /// A mutable pointer to num_rows_fetched is passed to the C-API. It is used to write back the
229 /// number of fetched rows. `num_rows` is heap allocated, so the pointer is not invalidated,
230 /// even if the `ColumnarBuffer` instance is moved in memory.
231 num_rows: Box<usize>,
232 /// aka: batch size, row array size
233 row_capacity: usize,
234 /// Column index and bound buffer
235 columns: Vec<(u16, C)>,
236}
237
238/// A buffer for a single column intended to be used together with [`ColumnarBuffer`].
239///
240/// # Safety
241///
242/// Views must not allow access to unintialized / invalid rows.
243pub unsafe trait ColumnBuffer: CDataMut {
244 /// Immutable view on the column data. Used in safe abstractions. User must not be able to
245 /// access uninitialized or invalid memory of the buffer through this interface.
246 type View<'a>
247 where
248 Self: 'a;
249
250 /// Num rows may not exceed the actual amount of valid num_rows filled by the ODBC API. The
251 /// column buffer does not know how many elements were in the last row group, and therefore can
252 /// not guarantee the accessed element to be valid and in a defined state. It also can not panic
253 /// on accessing an undefined element.
254 fn view(&self, valid_rows: usize) -> Self::View<'_>;
255
256 /// Fills the column with the default representation of values, between `from` and `to` index.
257 fn fill_default(&mut self, from: usize, to: usize);
258
259 /// Current capacity of the column
260 fn capacity(&self) -> usize;
261
262 /// `Some` if any value is truncated in the range [0, num_rows).
263 ///
264 /// After fetching data we may want to know if any value has been truncated due to the buffer
265 /// not being able to hold elements of that size. This method checks the indicator buffer
266 /// element wise.
267 fn has_truncated_values(&self, num_rows: usize) -> Option<Indicator>;
268}
269
270unsafe impl<T> ColumnBuffer for WithDataType<T>
271where
272 T: ColumnBuffer,
273{
274 type View<'a>
275 = T::View<'a>
276 where
277 T: 'a;
278
279 fn view(&self, valid_rows: usize) -> T::View<'_> {
280 self.value.view(valid_rows)
281 }
282
283 fn fill_default(&mut self, from: usize, to: usize) {
284 self.value.fill_default(from, to)
285 }
286
287 fn capacity(&self) -> usize {
288 self.value.capacity()
289 }
290
291 fn has_truncated_values(&self, num_rows: usize) -> Option<Indicator> {
292 self.value.has_truncated_values(num_rows)
293 }
294}
295
296unsafe impl<'a, T> BoundInputSlice<'a> for WithDataType<T>
297where
298 T: BoundInputSlice<'a>,
299{
300 type SliceMut = T::SliceMut;
301
302 unsafe fn as_view_mut(
303 &'a mut self,
304 parameter_index: u16,
305 stmt: StatementRef<'a>,
306 ) -> Self::SliceMut {
307 self.value.as_view_mut(parameter_index, stmt)
308 }
309}
310
311/// This row set binds a string buffer to each column, which is large enough to hold the maximum
312/// length string representation for each element in the row set at once.
313///
314/// # Example
315///
316/// ```no_run
317/// //! A program executing a query and printing the result as csv to standard out. Requires
318/// //! `anyhow` and `csv` crate.
319///
320/// use anyhow::Error;
321/// use odbc_api::{buffers::TextRowSet, Cursor, Environment, ConnectionOptions, ResultSetMetadata};
322/// use std::{
323/// ffi::CStr,
324/// io::{stdout, Write},
325/// path::PathBuf,
326/// };
327///
328/// /// Maximum number of rows fetched with one row set. Fetching batches of rows is usually much
329/// /// faster than fetching individual rows.
330/// const BATCH_SIZE: usize = 5000;
331///
332/// fn main() -> Result<(), Error> {
333/// // Write csv to standard out
334/// let out = stdout();
335/// let mut writer = csv::Writer::from_writer(out);
336///
337/// // We know this is going to be the only ODBC environment in the entire process, so this is
338/// // safe.
339/// let environment = unsafe { Environment::new() }?;
340///
341/// // Connect using a DSN. Alternatively we could have used a connection string
342/// let mut connection = environment.connect(
343/// "DataSourceName",
344/// "Username",
345/// "Password",
346/// ConnectionOptions::default(),
347/// )?;
348///
349/// // Execute a one-off query without any parameters.
350/// let query = "SELECT * FROM TableName";
351/// let params = ();
352/// let timeout_sec = None;
353/// match connection.execute(query, params, timeout_sec)? {
354/// Some(mut cursor) => {
355/// // Write the column names to stdout
356/// let mut headline : Vec<String> = cursor.column_names()?.collect::<Result<_,_>>()?;
357/// writer.write_record(headline)?;
358///
359/// // Use schema in cursor to initialize a text buffer large enough to hold the largest
360/// // possible strings for each column up to an upper limit of 4KiB
361/// let mut buffers = TextRowSet::for_cursor(BATCH_SIZE, &mut cursor, Some(4096))?;
362/// // Bind the buffer to the cursor. It is now being filled with every call to fetch.
363/// let mut row_set_cursor = cursor.bind_buffer(&mut buffers)?;
364///
365/// // Iterate over batches
366/// while let Some(batch) = row_set_cursor.fetch()? {
367/// // Within a batch, iterate over every row
368/// for row_index in 0..batch.num_rows() {
369/// // Within a row iterate over every column
370/// let record = (0..batch.num_cols()).map(|col_index| {
371/// batch
372/// .at(col_index, row_index)
373/// .unwrap_or(&[])
374/// });
375/// // Writes row as csv
376/// writer.write_record(record)?;
377/// }
378/// }
379/// }
380/// None => {
381/// eprintln!(
382/// "Query came back empty. No output has been created."
383/// );
384/// }
385/// }
386///
387/// Ok(())
388/// }
389/// ```
390pub type TextRowSet = ColumnarBuffer<TextColumn<u8>>;
391
392impl TextRowSet {
393 /// The resulting text buffer is not in any way tied to the cursor, other than that its buffer
394 /// sizes a tailor fitted to result set the cursor is iterating over.
395 ///
396 /// This method performs fallible buffer allocations, if no upper bound is set, so you may see
397 /// a speedup, by setting an upper bound using `max_str_limit`.
398 ///
399 ///
400 /// # Parameters
401 ///
402 /// * `batch_size`: The maximum number of rows the buffer is able to hold.
403 /// * `cursor`: Used to query the display size for each column of the row set. For character
404 /// data the length in characters is multiplied by 4 in order to have enough space for 4 byte
405 /// utf-8 characters. This is a pessimization for some data sources (e.g. SQLite 3) which do
406 /// interpret the size of a `VARCHAR(5)` column as 5 bytes rather than 5 characters.
407 /// * `max_str_limit`: Some queries make it hard to estimate a sensible upper bound and
408 /// sometimes drivers are just not that good at it. This argument allows you to specify an
409 /// upper bound for the length of character data. Any size reported by the driver is capped to
410 /// this value. In case the upper bound can not inferred by the metadata reported by the
411 /// driver the element size is set to this upper bound, too.
412 pub fn for_cursor(
413 batch_size: usize,
414 cursor: &mut impl ResultSetMetadata,
415 max_str_limit: Option<usize>,
416 ) -> Result<TextRowSet, Error> {
417 let buffers = utf8_display_sizes(cursor)?
418 .enumerate()
419 .map(|(buffer_index, reported_len)| {
420 let buffer_index = buffer_index as u16;
421 let col_index = buffer_index + 1;
422 let max_str_len = reported_len?;
423 let buffer = if let Some(upper_bound) = max_str_limit {
424 let max_str_len = max_str_len
425 .map(NonZeroUsize::get)
426 .unwrap_or(upper_bound)
427 .min(upper_bound);
428 TextColumn::new(batch_size, max_str_len)
429 } else {
430 let max_str_len = max_str_len.map(NonZeroUsize::get).ok_or(
431 Error::TooLargeColumnBufferSize {
432 buffer_index,
433 num_elements: batch_size,
434 element_size: usize::MAX,
435 },
436 )?;
437 TextColumn::try_new(batch_size, max_str_len).map_err(|source| {
438 Error::TooLargeColumnBufferSize {
439 buffer_index,
440 num_elements: source.num_elements,
441 element_size: source.element_size,
442 }
443 })?
444 };
445
446 Ok::<_, Error>((col_index, buffer))
447 })
448 .collect::<Result<_, _>>()?;
449 Ok(TextRowSet {
450 row_capacity: batch_size,
451 num_rows: Box::new(0),
452 columns: buffers,
453 })
454 }
455
456 /// Creates a text buffer large enough to hold `batch_size` rows with one column for each item
457 /// `max_str_lengths` of respective size.
458 pub fn from_max_str_lens(
459 row_capacity: usize,
460 max_str_lengths: impl IntoIterator<Item = usize>,
461 ) -> Result<Self, Error> {
462 let buffers = max_str_lengths
463 .into_iter()
464 .enumerate()
465 .map(|(index, max_str_len)| {
466 Ok::<_, Error>((
467 (index + 1).try_into().unwrap(),
468 TextColumn::try_new(row_capacity, max_str_len)
469 .map_err(|source| source.add_context(index.try_into().unwrap()))?,
470 ))
471 })
472 .collect::<Result<_, _>>()?;
473 Ok(TextRowSet {
474 row_capacity,
475 num_rows: Box::new(0),
476 columns: buffers,
477 })
478 }
479
480 /// Access the element at the specified position in the row set.
481 pub fn at(&self, buffer_index: usize, row_index: usize) -> Option<&[u8]> {
482 assert!(row_index < *self.num_rows);
483 self.columns[buffer_index].1.value_at(row_index)
484 }
485
486 /// Access the element at the specified position in the row set.
487 pub fn at_as_str(&self, col_index: usize, row_index: usize) -> Result<Option<&str>, Utf8Error> {
488 self.at(col_index, row_index).map(from_utf8).transpose()
489 }
490
491 /// Indicator value at the specified position. Useful to detect truncation of data.
492 ///
493 /// # Example
494 ///
495 /// ```
496 /// use odbc_api::buffers::{Indicator, TextRowSet};
497 ///
498 /// fn is_truncated(buffer: &TextRowSet, col_index: usize, row_index: usize) -> bool {
499 /// match buffer.indicator_at(col_index, row_index) {
500 /// // There is no value, therefore there is no value not fitting in the column buffer.
501 /// Indicator::Null => false,
502 /// // The value did not fit into the column buffer, we do not even know, by how much.
503 /// Indicator::NoTotal => true,
504 /// Indicator::Length(total_length) => {
505 /// // If the maximum string length is shorter than the values total length, the
506 /// // has been truncated to fit into the buffer.
507 /// buffer.max_len(col_index) < total_length
508 /// }
509 /// }
510 /// }
511 /// ```
512 pub fn indicator_at(&self, buf_index: usize, row_index: usize) -> Indicator {
513 assert!(row_index < *self.num_rows);
514 self.columns[buf_index].1.indicator_at(row_index)
515 }
516
517 /// Maximum length in bytes of elements in a column.
518 pub fn max_len(&self, buf_index: usize) -> usize {
519 self.columns[buf_index].1.max_len()
520 }
521}
522
523unsafe impl<T> ColumnBuffer for Vec<T>
524where
525 T: Pod,
526{
527 type View<'a> = &'a [T];
528
529 fn view(&self, valid_rows: usize) -> &[T] {
530 &self[..valid_rows]
531 }
532
533 fn fill_default(&mut self, from: usize, to: usize) {
534 for item in &mut self[from..to] {
535 *item = Default::default();
536 }
537 }
538
539 fn capacity(&self) -> usize {
540 self.len()
541 }
542
543 fn has_truncated_values(&self, _num_rows: usize) -> Option<Indicator> {
544 None
545 }
546}
547
548#[cfg(test)]
549mod tests {
550
551 use crate::buffers::{BufferDesc, ColumnarAnyBuffer};
552
553 #[test]
554 #[should_panic(expected = "Column indices must be unique.")]
555 fn assert_unique_column_indices() {
556 let bd = BufferDesc::I32 { nullable: false };
557 ColumnarAnyBuffer::from_descs_and_indices(1, [(1, bd), (2, bd), (1, bd)].iter().cloned());
558 }
559}