odbc_api/cursor/block_cursor.rs
1use std::{mem::MaybeUninit, ptr, thread::panicking};
2
3use crate::{
4 handles::{AsStatementRef, Statement as _},
5 Error,
6};
7
8use super::{error_handling_for_fetch, unbind_buffer_from_cursor, Cursor, RowSetBuffer};
9
10/// In order to save on network overhead, it is recommended to use block cursors instead of fetching
11/// values individually. This can greatly reduce the time applications need to fetch data. You can
12/// create a block cursor by binding preallocated memory to a cursor using [`Cursor::bind_buffer`].
13/// A block cursor saves on a lot of IO overhead by fetching an entire set of rows (called *rowset*)
14/// at once into the buffer bound to it. Reusing the same buffer for each rowset also saves on
15/// allocations. A challange with using block cursors might be database schemas with columns there
16/// individual fields can be very large. In these cases developers can choose to:
17///
18/// 1. Reserve less memory for each individual field than the schema indicates and deciding on a
19/// sensible upper bound themselves. This risks truncation of values though, if they are larger
20/// than the upper bound. Using [`BlockCursor::fetch_with_truncation_check`] instead of
21/// [`Cursor::next_row`] your application can detect these truncations. This is usually the best
22/// choice, since individual fields in a table rarely actually take up several GiB of memory.
23/// 2. Calculate the number of rows dynamically based on the maximum expected row size.
24/// [`crate::buffers::BufferDesc::bytes_per_row`], can be helpful with this task.
25/// 3. Not use block cursors and fetch rows slowly with high IO overhead. Calling
26/// [`CursorRow::get_data`] and [`CursorRow::get_text`] to fetch large individual values.
27///
28/// See: <https://learn.microsoft.com/en-us/sql/odbc/reference/develop-app/block-cursors>
29pub struct BlockCursor<C: AsStatementRef, B> {
30 buffer: B,
31 cursor: C,
32}
33
34impl<C, B> BlockCursor<C, B>
35where
36 C: Cursor,
37{
38 pub(crate) fn new(buffer: B, cursor: C) -> Self {
39 Self { buffer, cursor }
40 }
41
42 /// Fills the bound buffer with the next row set.
43 ///
44 /// # Return
45 ///
46 /// `None` if the result set is empty and all row sets have been extracted. `Some` with a
47 /// reference to the internal buffer otherwise.
48 ///
49 /// ```
50 /// use odbc_api::{buffers::TextRowSet, Cursor};
51 ///
52 /// fn print_all_values(mut cursor: impl Cursor) {
53 /// let batch_size = 100;
54 /// let max_string_len = 4000;
55 /// let buffer = TextRowSet::for_cursor(batch_size, &mut cursor, Some(4000)).unwrap();
56 /// let mut cursor = cursor.bind_buffer(buffer).unwrap();
57 /// // Iterate over batches
58 /// while let Some(batch) = cursor.fetch().unwrap() {
59 /// // ... print values in batch ...
60 /// }
61 /// }
62 /// ```
63 pub fn fetch(&mut self) -> Result<Option<&B>, Error>
64 where
65 B: RowSetBuffer,
66 {
67 self.fetch_with_truncation_check(false)
68 }
69
70 /// Fills the bound buffer with the next row set. Should `error_for_truncation` be `true`and any
71 /// diagnostic indicate truncation of a value an error is returned.
72 ///
73 /// # Return
74 ///
75 /// `None` if the result set is empty and all row sets have been extracted. `Some` with a
76 /// reference to the internal buffer otherwise.
77 ///
78 /// Call this method to find out wether there are any truncated values in the batch, without
79 /// inspecting all its rows and columns.
80 ///
81 /// ```
82 /// use odbc_api::{buffers::TextRowSet, Cursor};
83 ///
84 /// fn print_all_values(mut cursor: impl Cursor) {
85 /// let batch_size = 100;
86 /// let max_string_len = 4000;
87 /// let buffer = TextRowSet::for_cursor(batch_size, &mut cursor, Some(4000)).unwrap();
88 /// let mut cursor = cursor.bind_buffer(buffer).unwrap();
89 /// // Iterate over batches
90 /// while let Some(batch) = cursor.fetch_with_truncation_check(true).unwrap() {
91 /// // ... print values in batch ...
92 /// }
93 /// }
94 /// ```
95 pub fn fetch_with_truncation_check(
96 &mut self,
97 error_for_truncation: bool,
98 ) -> Result<Option<&B>, Error>
99 where
100 B: RowSetBuffer,
101 {
102 let mut stmt = self.cursor.as_stmt_ref();
103 unsafe {
104 let result = stmt.fetch();
105 let has_row =
106 error_handling_for_fetch(result, stmt, &self.buffer, error_for_truncation)?;
107 Ok(has_row.then_some(&self.buffer))
108 }
109 }
110
111 /// Unbinds the buffer from the underlying statement handle. Potential usecases for this
112 /// function include.
113 ///
114 /// 1. Binding a different buffer to the "same" cursor after letting it point to the next result
115 /// set obtained with [Cursor::more_results`].
116 /// 2. Reusing the same buffer with a different statement.
117 pub fn unbind(self) -> Result<(C, B), Error> {
118 // In this method we want to deconstruct self and move cursor out of it. We need to
119 // negotiate with the compiler a little bit though, since BlockCursor does implement `Drop`.
120
121 // We want to move `cursor` out of self, which would make self partially uninitialized.
122 let dont_drop_me = MaybeUninit::new(self);
123 let self_ptr = dont_drop_me.as_ptr();
124
125 // Safety: We know `dont_drop_me` is valid at this point so reading the ptr is okay
126 let mut cursor = unsafe { ptr::read(&(*self_ptr).cursor) };
127 let buffer = unsafe { ptr::read(&(*self_ptr).buffer) };
128
129 // Now that we have cursor out of block cursor, we need to unbind the buffer.
130 unbind_buffer_from_cursor(&mut cursor)?;
131
132 Ok((cursor, buffer))
133 }
134}
135
136impl<C, B> BlockCursor<C, B>
137where
138 B: RowSetBuffer,
139 C: AsStatementRef,
140{
141 /// Maximum amount of rows fetched from the database in the next call to fetch.
142 pub fn row_array_size(&self) -> usize {
143 self.buffer.row_array_size()
144 }
145}
146
147impl<C, B> Drop for BlockCursor<C, B>
148where
149 C: AsStatementRef,
150{
151 fn drop(&mut self) {
152 if let Err(e) = unbind_buffer_from_cursor(&mut self.cursor) {
153 // Avoid panicking, if we already have a panic. We don't want to mask the original
154 // error.
155 if !panicking() {
156 panic!("Unexpected error unbinding columns: {e:?}")
157 }
158 }
159 }
160}