use arrow::array::Float64Array;
use arrow::{
array::{ArrayRef, UInt64Array},
compute::cast,
datatypes::DataType,
datatypes::Field,
};
use datafusion_common::{downcast_value, plan_err, unwrap_or_internal_err, ScalarValue};
use datafusion_common::{DataFusionError, Result};
use datafusion_expr::aggregate_doc_sections::DOC_SECTION_STATISTICAL;
use datafusion_expr::function::{AccumulatorArgs, StateFieldsArgs};
use datafusion_expr::type_coercion::aggregates::NUMERICS;
use datafusion_expr::utils::format_state_name;
use datafusion_expr::{
Accumulator, AggregateUDFImpl, Documentation, Signature, Volatility,
};
use std::any::Any;
use std::collections::HashMap;
use std::fmt::Debug;
use std::mem::size_of_val;
use std::sync::OnceLock;
macro_rules! make_regr_udaf_expr_and_func {
($EXPR_FN:ident, $AGGREGATE_UDF_FN:ident, $REGR_TYPE:expr) => {
make_udaf_expr!($EXPR_FN, expr_y expr_x, concat!("Compute a linear regression of type [", stringify!($REGR_TYPE), "]"), $AGGREGATE_UDF_FN);
create_func!($EXPR_FN, $AGGREGATE_UDF_FN, Regr::new($REGR_TYPE, stringify!($EXPR_FN)));
}
}
make_regr_udaf_expr_and_func!(regr_slope, regr_slope_udaf, RegrType::Slope);
make_regr_udaf_expr_and_func!(regr_intercept, regr_intercept_udaf, RegrType::Intercept);
make_regr_udaf_expr_and_func!(regr_count, regr_count_udaf, RegrType::Count);
make_regr_udaf_expr_and_func!(regr_r2, regr_r2_udaf, RegrType::R2);
make_regr_udaf_expr_and_func!(regr_avgx, regr_avgx_udaf, RegrType::AvgX);
make_regr_udaf_expr_and_func!(regr_avgy, regr_avgy_udaf, RegrType::AvgY);
make_regr_udaf_expr_and_func!(regr_sxx, regr_sxx_udaf, RegrType::SXX);
make_regr_udaf_expr_and_func!(regr_syy, regr_syy_udaf, RegrType::SYY);
make_regr_udaf_expr_and_func!(regr_sxy, regr_sxy_udaf, RegrType::SXY);
pub struct Regr {
signature: Signature,
regr_type: RegrType,
func_name: &'static str,
}
impl Debug for Regr {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
f.debug_struct("regr")
.field("name", &self.name())
.field("signature", &self.signature)
.finish()
}
}
impl Regr {
pub fn new(regr_type: RegrType, func_name: &'static str) -> Self {
Self {
signature: Signature::uniform(2, NUMERICS.to_vec(), Volatility::Immutable),
regr_type,
func_name,
}
}
}
#[derive(Debug, Clone, PartialEq, Hash, Eq)]
#[allow(clippy::upper_case_acronyms)]
pub enum RegrType {
Slope,
Intercept,
Count,
R2,
AvgX,
AvgY,
SXX,
SYY,
SXY,
}
impl RegrType {
fn documentation(&self) -> Option<&Documentation> {
get_regr_docs().get(self)
}
}
static DOCUMENTATION: OnceLock<HashMap<RegrType, Documentation>> = OnceLock::new();
fn get_regr_docs() -> &'static HashMap<RegrType, Documentation> {
DOCUMENTATION.get_or_init(|| {
let mut hash_map = HashMap::new();
hash_map.insert(
RegrType::Slope,
Documentation::builder()
.with_doc_section(DOC_SECTION_STATISTICAL)
.with_description(
"Returns the slope of the linear regression line for non-null pairs in aggregate columns. \
Given input column Y and X: regr_slope(Y, X) returns the slope (k in Y = k*X + b) using minimal RSS fitting.",
)
.with_syntax_example("regr_slope(expression_y, expression_x)")
.with_standard_argument("expression_y", Some("Dependent variable"))
.with_standard_argument("expression_x", Some("Independent variable"))
.build()
.unwrap()
);
hash_map.insert(
RegrType::Intercept,
Documentation::builder()
.with_doc_section(DOC_SECTION_STATISTICAL)
.with_description(
"Computes the y-intercept of the linear regression line. For the equation (y = kx + b), \
this function returns b.",
)
.with_syntax_example("regr_intercept(expression_y, expression_x)")
.with_standard_argument("expression_y", Some("Dependent variable"))
.with_standard_argument("expression_x", Some("Independent variable"))
.build()
.unwrap()
);
hash_map.insert(
RegrType::Count,
Documentation::builder()
.with_doc_section(DOC_SECTION_STATISTICAL)
.with_description(
"Counts the number of non-null paired data points.",
)
.with_syntax_example("regr_count(expression_y, expression_x)")
.with_standard_argument("expression_y", Some("Dependent variable"))
.with_standard_argument("expression_x", Some("Independent variable"))
.build()
.unwrap()
);
hash_map.insert(
RegrType::R2,
Documentation::builder()
.with_doc_section(DOC_SECTION_STATISTICAL)
.with_description(
"Computes the square of the correlation coefficient between the independent and dependent variables.",
)
.with_syntax_example("regr_r2(expression_y, expression_x)")
.with_standard_argument("expression_y", Some("Dependent variable"))
.with_standard_argument("expression_x", Some("Independent variable"))
.build()
.unwrap()
);
hash_map.insert(
RegrType::AvgX,
Documentation::builder()
.with_doc_section(DOC_SECTION_STATISTICAL)
.with_description(
"Computes the average of the independent variable (input) expression_x for the non-null paired data points.",
)
.with_syntax_example("regr_avgx(expression_y, expression_x)")
.with_standard_argument("expression_y", Some("Dependent variable"))
.with_standard_argument("expression_x", Some("Independent variable"))
.build()
.unwrap()
);
hash_map.insert(
RegrType::AvgY,
Documentation::builder()
.with_doc_section(DOC_SECTION_STATISTICAL)
.with_description(
"Computes the average of the dependent variable (output) expression_y for the non-null paired data points.",
)
.with_syntax_example("regr_avgy(expression_y, expression_x)")
.with_standard_argument("expression_y", Some("Dependent variable"))
.with_standard_argument("expression_x", Some("Independent variable"))
.build()
.unwrap()
);
hash_map.insert(
RegrType::SXX,
Documentation::builder()
.with_doc_section(DOC_SECTION_STATISTICAL)
.with_description(
"Computes the sum of squares of the independent variable.",
)
.with_syntax_example("regr_sxx(expression_y, expression_x)")
.with_standard_argument("expression_y", Some("Dependent variable"))
.with_standard_argument("expression_x", Some("Independent variable"))
.build()
.unwrap()
);
hash_map.insert(
RegrType::SYY,
Documentation::builder()
.with_doc_section(DOC_SECTION_STATISTICAL)
.with_description(
"Computes the sum of squares of the dependent variable.",
)
.with_syntax_example("regr_syy(expression_y, expression_x)")
.with_standard_argument("expression_y", Some("Dependent variable"))
.with_standard_argument("expression_x", Some("Independent variable"))
.build()
.unwrap()
);
hash_map.insert(
RegrType::SXY,
Documentation::builder()
.with_doc_section(DOC_SECTION_STATISTICAL)
.with_description(
"Computes the sum of products of paired data points.",
)
.with_syntax_example("regr_sxy(expression_y, expression_x)")
.with_standard_argument("expression_y", Some("Dependent variable"))
.with_standard_argument("expression_x", Some("Independent variable"))
.build()
.unwrap()
);
hash_map
})
}
impl AggregateUDFImpl for Regr {
fn as_any(&self) -> &dyn Any {
self
}
fn name(&self) -> &str {
self.func_name
}
fn signature(&self) -> &Signature {
&self.signature
}
fn return_type(&self, arg_types: &[DataType]) -> Result<DataType> {
if !arg_types[0].is_numeric() {
return plan_err!("Covariance requires numeric input types");
}
if matches!(self.regr_type, RegrType::Count) {
Ok(DataType::UInt64)
} else {
Ok(DataType::Float64)
}
}
fn accumulator(&self, _acc_args: AccumulatorArgs) -> Result<Box<dyn Accumulator>> {
Ok(Box::new(RegrAccumulator::try_new(&self.regr_type)?))
}
fn state_fields(&self, args: StateFieldsArgs) -> Result<Vec<Field>> {
Ok(vec![
Field::new(
format_state_name(args.name, "count"),
DataType::UInt64,
true,
),
Field::new(
format_state_name(args.name, "mean_x"),
DataType::Float64,
true,
),
Field::new(
format_state_name(args.name, "mean_y"),
DataType::Float64,
true,
),
Field::new(
format_state_name(args.name, "m2_x"),
DataType::Float64,
true,
),
Field::new(
format_state_name(args.name, "m2_y"),
DataType::Float64,
true,
),
Field::new(
format_state_name(args.name, "algo_const"),
DataType::Float64,
true,
),
])
}
fn documentation(&self) -> Option<&Documentation> {
self.regr_type.documentation()
}
}
#[derive(Debug)]
pub struct RegrAccumulator {
count: u64,
mean_x: f64,
mean_y: f64,
m2_x: f64,
m2_y: f64,
algo_const: f64,
regr_type: RegrType,
}
impl RegrAccumulator {
pub fn try_new(regr_type: &RegrType) -> Result<Self> {
Ok(Self {
count: 0_u64,
mean_x: 0_f64,
mean_y: 0_f64,
m2_x: 0_f64,
m2_y: 0_f64,
algo_const: 0_f64,
regr_type: regr_type.clone(),
})
}
}
impl Accumulator for RegrAccumulator {
fn state(&mut self) -> Result<Vec<ScalarValue>> {
Ok(vec![
ScalarValue::from(self.count),
ScalarValue::from(self.mean_x),
ScalarValue::from(self.mean_y),
ScalarValue::from(self.m2_x),
ScalarValue::from(self.m2_y),
ScalarValue::from(self.algo_const),
])
}
fn update_batch(&mut self, values: &[ArrayRef]) -> Result<()> {
let values_y = &cast(&values[0], &DataType::Float64)?;
let values_x = &cast(&values[1], &DataType::Float64)?;
let mut arr_y = downcast_value!(values_y, Float64Array).iter().flatten();
let mut arr_x = downcast_value!(values_x, Float64Array).iter().flatten();
for i in 0..values_y.len() {
let value_y = if values_y.is_valid(i) {
arr_y.next()
} else {
None
};
let value_x = if values_x.is_valid(i) {
arr_x.next()
} else {
None
};
if value_y.is_none() || value_x.is_none() {
continue;
}
let value_y = unwrap_or_internal_err!(value_y);
let value_x = unwrap_or_internal_err!(value_x);
self.count += 1;
let delta_x = value_x - self.mean_x;
let delta_y = value_y - self.mean_y;
self.mean_x += delta_x / self.count as f64;
self.mean_y += delta_y / self.count as f64;
let delta_x_2 = value_x - self.mean_x;
let delta_y_2 = value_y - self.mean_y;
self.m2_x += delta_x * delta_x_2;
self.m2_y += delta_y * delta_y_2;
self.algo_const += delta_x * (value_y - self.mean_y);
}
Ok(())
}
fn supports_retract_batch(&self) -> bool {
true
}
fn retract_batch(&mut self, values: &[ArrayRef]) -> Result<()> {
let values_y = &cast(&values[0], &DataType::Float64)?;
let values_x = &cast(&values[1], &DataType::Float64)?;
let mut arr_y = downcast_value!(values_y, Float64Array).iter().flatten();
let mut arr_x = downcast_value!(values_x, Float64Array).iter().flatten();
for i in 0..values_y.len() {
let value_y = if values_y.is_valid(i) {
arr_y.next()
} else {
None
};
let value_x = if values_x.is_valid(i) {
arr_x.next()
} else {
None
};
if value_y.is_none() || value_x.is_none() {
continue;
}
let value_y = unwrap_or_internal_err!(value_y);
let value_x = unwrap_or_internal_err!(value_x);
if self.count > 1 {
self.count -= 1;
let delta_x = value_x - self.mean_x;
let delta_y = value_y - self.mean_y;
self.mean_x -= delta_x / self.count as f64;
self.mean_y -= delta_y / self.count as f64;
let delta_x_2 = value_x - self.mean_x;
let delta_y_2 = value_y - self.mean_y;
self.m2_x -= delta_x * delta_x_2;
self.m2_y -= delta_y * delta_y_2;
self.algo_const -= delta_x * (value_y - self.mean_y);
} else {
self.count = 0;
self.mean_x = 0.0;
self.m2_x = 0.0;
self.m2_y = 0.0;
self.mean_y = 0.0;
self.algo_const = 0.0;
}
}
Ok(())
}
fn merge_batch(&mut self, states: &[ArrayRef]) -> Result<()> {
let count_arr = downcast_value!(states[0], UInt64Array);
let mean_x_arr = downcast_value!(states[1], Float64Array);
let mean_y_arr = downcast_value!(states[2], Float64Array);
let m2_x_arr = downcast_value!(states[3], Float64Array);
let m2_y_arr = downcast_value!(states[4], Float64Array);
let algo_const_arr = downcast_value!(states[5], Float64Array);
for i in 0..count_arr.len() {
let count_b = count_arr.value(i);
if count_b == 0_u64 {
continue;
}
let (count_a, mean_x_a, mean_y_a, m2_x_a, m2_y_a, algo_const_a) = (
self.count,
self.mean_x,
self.mean_y,
self.m2_x,
self.m2_y,
self.algo_const,
);
let (count_b, mean_x_b, mean_y_b, m2_x_b, m2_y_b, algo_const_b) = (
count_b,
mean_x_arr.value(i),
mean_y_arr.value(i),
m2_x_arr.value(i),
m2_y_arr.value(i),
algo_const_arr.value(i),
);
let count_ab = count_a + count_b;
let (count_a, count_b) = (count_a as f64, count_b as f64);
let d_x = mean_x_b - mean_x_a;
let d_y = mean_y_b - mean_y_a;
let mean_x_ab = mean_x_a + d_x * count_b / count_ab as f64;
let mean_y_ab = mean_y_a + d_y * count_b / count_ab as f64;
let m2_x_ab =
m2_x_a + m2_x_b + d_x * d_x * count_a * count_b / count_ab as f64;
let m2_y_ab =
m2_y_a + m2_y_b + d_y * d_y * count_a * count_b / count_ab as f64;
let algo_const_ab = algo_const_a
+ algo_const_b
+ d_x * d_y * count_a * count_b / count_ab as f64;
self.count = count_ab;
self.mean_x = mean_x_ab;
self.mean_y = mean_y_ab;
self.m2_x = m2_x_ab;
self.m2_y = m2_y_ab;
self.algo_const = algo_const_ab;
}
Ok(())
}
fn evaluate(&mut self) -> Result<ScalarValue> {
let cov_pop_x_y = self.algo_const / self.count as f64;
let var_pop_x = self.m2_x / self.count as f64;
let var_pop_y = self.m2_y / self.count as f64;
let nullif_or_stat = |cond: bool, stat: f64| {
if cond {
Ok(ScalarValue::Float64(None))
} else {
Ok(ScalarValue::Float64(Some(stat)))
}
};
match self.regr_type {
RegrType::Slope => {
let nullif_cond = self.count <= 1 || var_pop_x == 0.0;
nullif_or_stat(nullif_cond, cov_pop_x_y / var_pop_x)
}
RegrType::Intercept => {
let slope = cov_pop_x_y / var_pop_x;
let nullif_cond = self.count <= 1 || var_pop_x == 0.0;
nullif_or_stat(nullif_cond, self.mean_y - slope * self.mean_x)
}
RegrType::Count => Ok(ScalarValue::UInt64(Some(self.count))),
RegrType::R2 => {
let nullif_cond = self.count <= 1 || var_pop_x == 0.0 || var_pop_y == 0.0;
nullif_or_stat(
nullif_cond,
(cov_pop_x_y * cov_pop_x_y) / (var_pop_x * var_pop_y),
)
}
RegrType::AvgX => nullif_or_stat(self.count < 1, self.mean_x),
RegrType::AvgY => nullif_or_stat(self.count < 1, self.mean_y),
RegrType::SXX => nullif_or_stat(self.count < 1, self.m2_x),
RegrType::SYY => nullif_or_stat(self.count < 1, self.m2_y),
RegrType::SXY => nullif_or_stat(self.count < 1, self.algo_const),
}
}
fn size(&self) -> usize {
size_of_val(self)
}
}