datafusion_catalog/table.rs
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
use std::any::Any;
use std::borrow::Cow;
use std::fmt::Debug;
use std::sync::Arc;
use crate::session::Session;
use arrow_schema::SchemaRef;
use async_trait::async_trait;
use datafusion_common::Result;
use datafusion_common::{not_impl_err, Constraints, Statistics};
use datafusion_expr::dml::InsertOp;
use datafusion_expr::{
CreateExternalTable, Expr, LogicalPlan, TableProviderFilterPushDown, TableType,
};
use datafusion_physical_plan::ExecutionPlan;
/// Source table
#[async_trait]
pub trait TableProvider: Debug + Sync + Send {
/// Returns the table provider as [`Any`](std::any::Any) so that it can be
/// downcast to a specific implementation.
fn as_any(&self) -> &dyn Any;
/// Get a reference to the schema for this table
fn schema(&self) -> SchemaRef;
/// Get a reference to the constraints of the table.
/// Returns:
/// - `None` for tables that do not support constraints.
/// - `Some(&Constraints)` for tables supporting constraints.
/// Therefore, a `Some(&Constraints::empty())` return value indicates that
/// this table supports constraints, but there are no constraints.
fn constraints(&self) -> Option<&Constraints> {
None
}
/// Get the type of this table for metadata/catalog purposes.
fn table_type(&self) -> TableType;
/// Get the create statement used to create this table, if available.
fn get_table_definition(&self) -> Option<&str> {
None
}
/// Get the [`LogicalPlan`] of this table, if available.
fn get_logical_plan(&self) -> Option<Cow<LogicalPlan>> {
None
}
/// Get the default value for a column, if available.
fn get_column_default(&self, _column: &str) -> Option<&Expr> {
None
}
/// Create an [`ExecutionPlan`] for scanning the table with optionally
/// specified `projection`, `filter` and `limit`, described below.
///
/// The `ExecutionPlan` is responsible scanning the datasource's
/// partitions in a streaming, parallelized fashion.
///
/// # Projection
///
/// If specified, only a subset of columns should be returned, in the order
/// specified. The projection is a set of indexes of the fields in
/// [`Self::schema`].
///
/// DataFusion provides the projection to scan only the columns actually
/// used in the query to improve performance, an optimization called
/// "Projection Pushdown". Some datasources, such as Parquet, can use this
/// information to go significantly faster when only a subset of columns is
/// required.
///
/// # Filters
///
/// A list of boolean filter [`Expr`]s to evaluate *during* the scan, in the
/// manner specified by [`Self::supports_filters_pushdown`]. Only rows for
/// which *all* of the `Expr`s evaluate to `true` must be returned (aka the
/// expressions are `AND`ed together).
///
/// To enable filter pushdown you must override
/// [`Self::supports_filters_pushdown`] as the default implementation does
/// not and `filters` will be empty.
///
/// DataFusion pushes filtering into the scans whenever possible
/// ("Filter Pushdown"), and depending on the format and the
/// implementation of the format, evaluating the predicate during the scan
/// can increase performance significantly.
///
/// ## Note: Some columns may appear *only* in Filters
///
/// In certain cases, a query may only use a certain column in a Filter that
/// has been completely pushed down to the scan. In this case, the
/// projection will not contain all the columns found in the filter
/// expressions.
///
/// For example, given the query `SELECT t.a FROM t WHERE t.b > 5`,
///
/// ```text
/// ┌────────────────────┐
/// │ Projection(t.a) │
/// └────────────────────┘
/// ▲
/// │
/// │
/// ┌────────────────────┐ Filter ┌────────────────────┐ Projection ┌────────────────────┐
/// │ Filter(t.b > 5) │────Pushdown──▶ │ Projection(t.a) │ ───Pushdown───▶ │ Projection(t.a) │
/// └────────────────────┘ └────────────────────┘ └────────────────────┘
/// ▲ ▲ ▲
/// │ │ │
/// │ │ ┌────────────────────┐
/// ┌────────────────────┐ ┌────────────────────┐ │ Scan │
/// │ Scan │ │ Scan │ │ filter=(t.b > 5) │
/// └────────────────────┘ │ filter=(t.b > 5) │ │ projection=(t.a) │
/// └────────────────────┘ └────────────────────┘
///
/// Initial Plan If `TableProviderFilterPushDown` Projection pushdown notes that
/// returns true, filter pushdown the scan only needs t.a
/// pushes the filter into the scan
/// BUT internally evaluating the
/// predicate still requires t.b
/// ```
///
/// # Limit
///
/// If `limit` is specified, must only produce *at least* this many rows,
/// (though it may return more). Like Projection Pushdown and Filter
/// Pushdown, DataFusion pushes `LIMIT`s as far down in the plan as
/// possible, called "Limit Pushdown" as some sources can use this
/// information to improve their performance. Note that if there are any
/// Inexact filters pushed down, the LIMIT cannot be pushed down. This is
/// because inexact filters do not guarantee that every filtered row is
/// removed, so applying the limit could lead to too few rows being available
/// to return as a final result.
async fn scan(
&self,
state: &dyn Session,
projection: Option<&Vec<usize>>,
filters: &[Expr],
limit: Option<usize>,
) -> Result<Arc<dyn ExecutionPlan>>;
/// Specify if DataFusion should provide filter expressions to the
/// TableProvider to apply *during* the scan.
///
/// Some TableProviders can evaluate filters more efficiently than the
/// `Filter` operator in DataFusion, for example by using an index.
///
/// # Parameters and Return Value
///
/// The return `Vec` must have one element for each element of the `filters`
/// argument. The value of each element indicates if the TableProvider can
/// apply the corresponding filter during the scan. The position in the return
/// value corresponds to the expression in the `filters` parameter.
///
/// If the length of the resulting `Vec` does not match the `filters` input
/// an error will be thrown.
///
/// Each element in the resulting `Vec` is one of the following:
/// * [`Exact`] or [`Inexact`]: The TableProvider can apply the filter
/// during scan
/// * [`Unsupported`]: The TableProvider cannot apply the filter during scan
///
/// By default, this function returns [`Unsupported`] for all filters,
/// meaning no filters will be provided to [`Self::scan`].
///
/// [`Unsupported`]: TableProviderFilterPushDown::Unsupported
/// [`Exact`]: TableProviderFilterPushDown::Exact
/// [`Inexact`]: TableProviderFilterPushDown::Inexact
/// # Example
///
/// ```rust
/// # use std::any::Any;
/// # use std::sync::Arc;
/// # use arrow_schema::SchemaRef;
/// # use async_trait::async_trait;
/// # use datafusion_catalog::{TableProvider, Session};
/// # use datafusion_common::Result;
/// # use datafusion_expr::{Expr, TableProviderFilterPushDown, TableType};
/// # use datafusion_physical_plan::ExecutionPlan;
/// // Define a struct that implements the TableProvider trait
/// #[derive(Debug)]
/// struct TestDataSource {}
///
/// #[async_trait]
/// impl TableProvider for TestDataSource {
/// # fn as_any(&self) -> &dyn Any { todo!() }
/// # fn schema(&self) -> SchemaRef { todo!() }
/// # fn table_type(&self) -> TableType { todo!() }
/// # async fn scan(&self, s: &dyn Session, p: Option<&Vec<usize>>, f: &[Expr], l: Option<usize>) -> Result<Arc<dyn ExecutionPlan>> {
/// todo!()
/// # }
/// // Override the supports_filters_pushdown to evaluate which expressions
/// // to accept as pushdown predicates.
/// fn supports_filters_pushdown(&self, filters: &[&Expr]) -> Result<Vec<TableProviderFilterPushDown>> {
/// // Process each filter
/// let support: Vec<_> = filters.iter().map(|expr| {
/// match expr {
/// // This example only supports a between expr with a single column named "c1".
/// Expr::Between(between_expr) => {
/// between_expr.expr
/// .try_as_col()
/// .map(|column| {
/// if column.name == "c1" {
/// TableProviderFilterPushDown::Exact
/// } else {
/// TableProviderFilterPushDown::Unsupported
/// }
/// })
/// // If there is no column in the expr set the filter to unsupported.
/// .unwrap_or(TableProviderFilterPushDown::Unsupported)
/// }
/// _ => {
/// // For all other cases return Unsupported.
/// TableProviderFilterPushDown::Unsupported
/// }
/// }
/// }).collect();
/// Ok(support)
/// }
/// }
/// ```
fn supports_filters_pushdown(
&self,
filters: &[&Expr],
) -> Result<Vec<TableProviderFilterPushDown>> {
Ok(vec![
TableProviderFilterPushDown::Unsupported;
filters.len()
])
}
/// Get statistics for this table, if available
fn statistics(&self) -> Option<Statistics> {
None
}
/// Return an [`ExecutionPlan`] to insert data into this table, if
/// supported.
///
/// The returned plan should return a single row in a UInt64
/// column called "count" such as the following
///
/// ```text
/// +-------+,
/// | count |,
/// +-------+,
/// | 6 |,
/// +-------+,
/// ```
///
/// # See Also
///
/// See [`DataSinkExec`] for the common pattern of inserting a
/// streams of `RecordBatch`es as files to an ObjectStore.
///
/// [`DataSinkExec`]: datafusion_physical_plan::insert::DataSinkExec
async fn insert_into(
&self,
_state: &dyn Session,
_input: Arc<dyn ExecutionPlan>,
_insert_op: InsertOp,
) -> Result<Arc<dyn ExecutionPlan>> {
not_impl_err!("Insert into not implemented for this table")
}
}
/// A factory which creates [`TableProvider`]s at runtime given a URL.
///
/// For example, this can be used to create a table "on the fly"
/// from a directory of files only when that name is referenced.
#[async_trait]
pub trait TableProviderFactory: Debug + Sync + Send {
/// Create a TableProvider with the given url
async fn create(
&self,
state: &dyn Session,
cmd: &CreateExternalTable,
) -> Result<Arc<dyn TableProvider>>;
}