use std::any::Any;
use std::sync::Arc;
use crate::aggregate::stats::StatsType;
use crate::aggregate::utils::down_cast_any_ref;
use crate::expressions::format_state_name;
use crate::{AggregateExpr, PhysicalExpr};
use arrow::array::Float64Array;
use arrow::{
array::{ArrayRef, UInt64Array},
compute::cast,
datatypes::DataType,
datatypes::Field,
};
use datafusion_common::downcast_value;
use datafusion_common::{DataFusionError, Result, ScalarValue};
use datafusion_expr::Accumulator;
#[derive(Debug)]
pub struct Variance {
name: String,
expr: Arc<dyn PhysicalExpr>,
}
#[derive(Debug)]
pub struct VariancePop {
name: String,
expr: Arc<dyn PhysicalExpr>,
}
impl Variance {
pub fn new(
expr: Arc<dyn PhysicalExpr>,
name: impl Into<String>,
data_type: DataType,
) -> Self {
assert!(matches!(data_type, DataType::Float64));
Self {
name: name.into(),
expr,
}
}
}
impl AggregateExpr for Variance {
fn as_any(&self) -> &dyn Any {
self
}
fn field(&self) -> Result<Field> {
Ok(Field::new(&self.name, DataType::Float64, true))
}
fn create_accumulator(&self) -> Result<Box<dyn Accumulator>> {
Ok(Box::new(VarianceAccumulator::try_new(StatsType::Sample)?))
}
fn create_sliding_accumulator(&self) -> Result<Box<dyn Accumulator>> {
Ok(Box::new(VarianceAccumulator::try_new(StatsType::Sample)?))
}
fn state_fields(&self) -> Result<Vec<Field>> {
Ok(vec![
Field::new(
format_state_name(&self.name, "count"),
DataType::UInt64,
true,
),
Field::new(
format_state_name(&self.name, "mean"),
DataType::Float64,
true,
),
Field::new(format_state_name(&self.name, "m2"), DataType::Float64, true),
])
}
fn expressions(&self) -> Vec<Arc<dyn PhysicalExpr>> {
vec![self.expr.clone()]
}
fn name(&self) -> &str {
&self.name
}
}
impl PartialEq<dyn Any> for Variance {
fn eq(&self, other: &dyn Any) -> bool {
down_cast_any_ref(other)
.downcast_ref::<Self>()
.map(|x| self.name == x.name && self.expr.eq(&x.expr))
.unwrap_or(false)
}
}
impl VariancePop {
pub fn new(
expr: Arc<dyn PhysicalExpr>,
name: impl Into<String>,
data_type: DataType,
) -> Self {
assert!(matches!(data_type, DataType::Float64));
Self {
name: name.into(),
expr,
}
}
}
impl AggregateExpr for VariancePop {
fn as_any(&self) -> &dyn Any {
self
}
fn field(&self) -> Result<Field> {
Ok(Field::new(&self.name, DataType::Float64, true))
}
fn create_accumulator(&self) -> Result<Box<dyn Accumulator>> {
Ok(Box::new(VarianceAccumulator::try_new(
StatsType::Population,
)?))
}
fn create_sliding_accumulator(&self) -> Result<Box<dyn Accumulator>> {
Ok(Box::new(VarianceAccumulator::try_new(
StatsType::Population,
)?))
}
fn state_fields(&self) -> Result<Vec<Field>> {
Ok(vec![
Field::new(
format_state_name(&self.name, "count"),
DataType::UInt64,
true,
),
Field::new(
format_state_name(&self.name, "mean"),
DataType::Float64,
true,
),
Field::new(format_state_name(&self.name, "m2"), DataType::Float64, true),
])
}
fn expressions(&self) -> Vec<Arc<dyn PhysicalExpr>> {
vec![self.expr.clone()]
}
fn name(&self) -> &str {
&self.name
}
}
impl PartialEq<dyn Any> for VariancePop {
fn eq(&self, other: &dyn Any) -> bool {
down_cast_any_ref(other)
.downcast_ref::<Self>()
.map(|x| self.name == x.name && self.expr.eq(&x.expr))
.unwrap_or(false)
}
}
#[derive(Debug)]
pub struct VarianceAccumulator {
m2: f64,
mean: f64,
count: u64,
stats_type: StatsType,
}
impl VarianceAccumulator {
pub fn try_new(s_type: StatsType) -> Result<Self> {
Ok(Self {
m2: 0_f64,
mean: 0_f64,
count: 0_u64,
stats_type: s_type,
})
}
pub fn get_count(&self) -> u64 {
self.count
}
pub fn get_mean(&self) -> f64 {
self.mean
}
pub fn get_m2(&self) -> f64 {
self.m2
}
}
impl Accumulator for VarianceAccumulator {
fn state(&self) -> Result<Vec<ScalarValue>> {
Ok(vec![
ScalarValue::from(self.count),
ScalarValue::from(self.mean),
ScalarValue::from(self.m2),
])
}
fn update_batch(&mut self, values: &[ArrayRef]) -> Result<()> {
let values = &cast(&values[0], &DataType::Float64)?;
let arr = downcast_value!(values, Float64Array).iter().flatten();
for value in arr {
let new_count = self.count + 1;
let delta1 = value - self.mean;
let new_mean = delta1 / new_count as f64 + self.mean;
let delta2 = value - new_mean;
let new_m2 = self.m2 + delta1 * delta2;
self.count += 1;
self.mean = new_mean;
self.m2 = new_m2;
}
Ok(())
}
fn retract_batch(&mut self, values: &[ArrayRef]) -> Result<()> {
let values = &cast(&values[0], &DataType::Float64)?;
let arr = downcast_value!(values, Float64Array).iter().flatten();
for value in arr {
let new_count = self.count - 1;
let delta1 = self.mean - value;
let new_mean = delta1 / new_count as f64 + self.mean;
let delta2 = new_mean - value;
let new_m2 = self.m2 - delta1 * delta2;
self.count -= 1;
self.mean = new_mean;
self.m2 = new_m2;
}
Ok(())
}
fn merge_batch(&mut self, states: &[ArrayRef]) -> Result<()> {
let counts = downcast_value!(states[0], UInt64Array);
let means = downcast_value!(states[1], Float64Array);
let m2s = downcast_value!(states[2], Float64Array);
for i in 0..counts.len() {
let c = counts.value(i);
if c == 0_u64 {
continue;
}
let new_count = self.count + c;
let new_mean = self.mean * self.count as f64 / new_count as f64
+ means.value(i) * c as f64 / new_count as f64;
let delta = self.mean - means.value(i);
let new_m2 = self.m2
+ m2s.value(i)
+ delta * delta * self.count as f64 * c as f64 / new_count as f64;
self.count = new_count;
self.mean = new_mean;
self.m2 = new_m2;
}
Ok(())
}
fn evaluate(&self) -> Result<ScalarValue> {
let count = match self.stats_type {
StatsType::Population => self.count,
StatsType::Sample => {
if self.count > 0 {
self.count - 1
} else {
self.count
}
}
};
Ok(ScalarValue::Float64(match self.count {
0 => None,
1 => {
if let StatsType::Population = self.stats_type {
Some(0.0)
} else {
None
}
}
_ => Some(self.m2 / count as f64),
}))
}
fn size(&self) -> usize {
std::mem::size_of_val(self)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::aggregate::utils::get_accum_scalar_values_as_arrays;
use crate::expressions::col;
use crate::expressions::tests::aggregate;
use crate::generic_test_op;
use arrow::record_batch::RecordBatch;
use arrow::{array::*, datatypes::*};
use datafusion_common::Result;
#[test]
fn variance_f64_1() -> Result<()> {
let a: ArrayRef = Arc::new(Float64Array::from(vec![1_f64, 2_f64]));
generic_test_op!(
a,
DataType::Float64,
VariancePop,
ScalarValue::from(0.25_f64)
)
}
#[test]
fn variance_f64_2() -> Result<()> {
let a: ArrayRef =
Arc::new(Float64Array::from(vec![1_f64, 2_f64, 3_f64, 4_f64, 5_f64]));
generic_test_op!(a, DataType::Float64, VariancePop, ScalarValue::from(2_f64))
}
#[test]
fn variance_f64_3() -> Result<()> {
let a: ArrayRef =
Arc::new(Float64Array::from(vec![1_f64, 2_f64, 3_f64, 4_f64, 5_f64]));
generic_test_op!(a, DataType::Float64, Variance, ScalarValue::from(2.5_f64))
}
#[test]
fn variance_f64_4() -> Result<()> {
let a: ArrayRef = Arc::new(Float64Array::from(vec![1.1_f64, 2_f64, 3_f64]));
generic_test_op!(
a,
DataType::Float64,
Variance,
ScalarValue::from(0.9033333333333333_f64)
)
}
#[test]
fn variance_i32() -> Result<()> {
let a: ArrayRef = Arc::new(Int32Array::from(vec![1, 2, 3, 4, 5]));
generic_test_op!(a, DataType::Int32, VariancePop, ScalarValue::from(2_f64))
}
#[test]
fn variance_u32() -> Result<()> {
let a: ArrayRef =
Arc::new(UInt32Array::from(vec![1_u32, 2_u32, 3_u32, 4_u32, 5_u32]));
generic_test_op!(a, DataType::UInt32, VariancePop, ScalarValue::from(2.0f64))
}
#[test]
fn variance_f32() -> Result<()> {
let a: ArrayRef =
Arc::new(Float32Array::from(vec![1_f32, 2_f32, 3_f32, 4_f32, 5_f32]));
generic_test_op!(a, DataType::Float32, VariancePop, ScalarValue::from(2_f64))
}
#[test]
fn test_variance_1_input() -> Result<()> {
let a: ArrayRef = Arc::new(Float64Array::from(vec![1_f64]));
let schema = Schema::new(vec![Field::new("a", DataType::Float64, false)]);
let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![a])?;
let agg = Arc::new(Variance::new(
col("a", &schema)?,
"bla".to_string(),
DataType::Float64,
));
let actual = aggregate(&batch, agg).unwrap();
assert_eq!(actual, ScalarValue::Float64(None));
Ok(())
}
#[test]
fn variance_i32_with_nulls() -> Result<()> {
let a: ArrayRef = Arc::new(Int32Array::from(vec![
Some(1),
None,
Some(3),
Some(4),
Some(5),
]));
generic_test_op!(
a,
DataType::Int32,
VariancePop,
ScalarValue::from(2.1875_f64)
)
}
#[test]
fn variance_i32_all_nulls() -> Result<()> {
let a: ArrayRef = Arc::new(Int32Array::from(vec![None, None]));
let schema = Schema::new(vec![Field::new("a", DataType::Int32, true)]);
let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![a])?;
let agg = Arc::new(Variance::new(
col("a", &schema)?,
"bla".to_string(),
DataType::Float64,
));
let actual = aggregate(&batch, agg).unwrap();
assert_eq!(actual, ScalarValue::Float64(None));
Ok(())
}
#[test]
fn variance_f64_merge_1() -> Result<()> {
let a = Arc::new(Float64Array::from(vec![1_f64, 2_f64, 3_f64]));
let b = Arc::new(Float64Array::from(vec![4_f64, 5_f64]));
let schema = Schema::new(vec![Field::new("a", DataType::Float64, false)]);
let batch1 = RecordBatch::try_new(Arc::new(schema.clone()), vec![a])?;
let batch2 = RecordBatch::try_new(Arc::new(schema.clone()), vec![b])?;
let agg1 = Arc::new(VariancePop::new(
col("a", &schema)?,
"bla".to_string(),
DataType::Float64,
));
let agg2 = Arc::new(VariancePop::new(
col("a", &schema)?,
"bla".to_string(),
DataType::Float64,
));
let actual = merge(&batch1, &batch2, agg1, agg2)?;
assert!(actual == ScalarValue::from(2_f64));
Ok(())
}
#[test]
fn variance_f64_merge_2() -> Result<()> {
let a = Arc::new(Float64Array::from(vec![1_f64, 2_f64, 3_f64, 4_f64, 5_f64]));
let b = Arc::new(Float64Array::from(vec![None]));
let schema = Schema::new(vec![Field::new("a", DataType::Float64, true)]);
let batch1 = RecordBatch::try_new(Arc::new(schema.clone()), vec![a])?;
let batch2 = RecordBatch::try_new(Arc::new(schema.clone()), vec![b])?;
let agg1 = Arc::new(VariancePop::new(
col("a", &schema)?,
"bla".to_string(),
DataType::Float64,
));
let agg2 = Arc::new(VariancePop::new(
col("a", &schema)?,
"bla".to_string(),
DataType::Float64,
));
let actual = merge(&batch1, &batch2, agg1, agg2)?;
assert!(actual == ScalarValue::from(2_f64));
Ok(())
}
fn merge(
batch1: &RecordBatch,
batch2: &RecordBatch,
agg1: Arc<dyn AggregateExpr>,
agg2: Arc<dyn AggregateExpr>,
) -> Result<ScalarValue> {
let mut accum1 = agg1.create_accumulator()?;
let mut accum2 = agg2.create_accumulator()?;
let expr1 = agg1.expressions();
let expr2 = agg2.expressions();
let values1 = expr1
.iter()
.map(|e| e.evaluate(batch1))
.map(|r| r.map(|v| v.into_array(batch1.num_rows())))
.collect::<Result<Vec<_>>>()?;
let values2 = expr2
.iter()
.map(|e| e.evaluate(batch2))
.map(|r| r.map(|v| v.into_array(batch2.num_rows())))
.collect::<Result<Vec<_>>>()?;
accum1.update_batch(&values1)?;
accum2.update_batch(&values2)?;
let state2 = get_accum_scalar_values_as_arrays(accum2.as_ref())?;
accum1.merge_batch(&state2)?;
accum1.evaluate()
}
}