Struct polars::frame::DataFrame [−][src]
pub struct DataFrame { /* fields omitted */ }
Implementations
pub fn to_ndarray<N>(
&self
) -> Result<ArrayBase<OwnedRepr<<N as PolarsNumericType>::Native>, Dim<[usize; 2]>>, PolarsError> where
N: PolarsNumericType,
pub fn to_ndarray<N>(
&self
) -> Result<ArrayBase<OwnedRepr<<N as PolarsNumericType>::Native>, Dim<[usize; 2]>>, PolarsError> where
N: PolarsNumericType,
Create a 2D ndarray::Array
from this DataFrame
. This requires all columns in the
DataFrame
to be non-null and numeric. They will be casted to the same data type
(if they aren’t already).
use polars_core::prelude::*;
let a = UInt32Chunked::new_from_slice("a", &[1, 2, 3]).into_series();
let b = Float64Chunked::new_from_slice("b", &[10., 8., 6.]).into_series();
let df = DataFrame::new(vec![a, b]).unwrap();
let ndarray = df.to_ndarray::<Float64Type>().unwrap();
println!("{:?}", ndarray);
Outputs:
[[1.0, 10.0],
[2.0, 8.0],
[3.0, 6.0]], shape=[3, 2], strides=[2, 1], layout=C (0x1), const ndim=2/
Sample n datapoints from this DataFrame.
pub fn sample_frac(
&self,
frac: f64,
with_replacement: bool
) -> Result<DataFrame, PolarsError>
pub fn sample_frac(
&self,
frac: f64,
with_replacement: bool
) -> Result<DataFrame, PolarsError>
Sample a fraction between 0.0-1.0 of this DataFrame.
pub fn join_asof_by<'a, S, J>(
&self,
other: &DataFrame,
left_on: &str,
right_on: &str,
left_by: S,
right_by: S
) -> Result<DataFrame, PolarsError> where
S: Selection<'a, J>,
pub fn join_asof_by<'a, S, J>(
&self,
other: &DataFrame,
left_on: &str,
right_on: &str,
left_by: S,
right_by: S
) -> Result<DataFrame, PolarsError> where
S: Selection<'a, J>,
This is similar to a left-join except that we match on nearest key rather than equal keys.
The keys must be sorted to perform an asof join. This is a special implementation of an asof join
that searches for the nearest keys within a subgroup set by by
.
Creates the cartesian product from both frames, preserves the order of the left keys.
Explode DataFrame
to long format by exploding a column with Lists.
Example
use polars_core::prelude::*;
let s0 = Series::new("a", &[1i64, 2, 3]);
let s1 = Series::new("b", &[1i64, 1, 1]);
let s2 = Series::new("c", &[2i64, 2, 2]);
let list = Series::new("foo", &[s0, s1, s2]);
let s0 = Series::new("B", [1, 2, 3]);
let s1 = Series::new("C", [1, 1, 1]);
let df = DataFrame::new(vec![list, s0, s1]).unwrap();
let exploded = df.explode("foo").unwrap();
println!("{:?}", df);
println!("{:?}", exploded);
Outputs:
+-------------+-----+-----+
| foo | B | C |
| --- | --- | --- |
| list [i64] | i32 | i32 |
+=============+=====+=====+
| "[1, 2, 3]" | 1 | 1 |
+-------------+-----+-----+
| "[1, 1, 1]" | 2 | 1 |
+-------------+-----+-----+
| "[2, 2, 2]" | 3 | 1 |
+-------------+-----+-----+
+-----+-----+-----+
| foo | B | C |
| --- | --- | --- |
| i64 | i32 | i32 |
+=====+=====+=====+
| 1 | 1 | 1 |
+-----+-----+-----+
| 2 | 1 | 1 |
+-----+-----+-----+
| 3 | 1 | 1 |
+-----+-----+-----+
| 1 | 2 | 1 |
+-----+-----+-----+
| 1 | 2 | 1 |
+-----+-----+-----+
| 1 | 2 | 1 |
+-----+-----+-----+
| 2 | 3 | 1 |
+-----+-----+-----+
| 2 | 3 | 1 |
+-----+-----+-----+
| 2 | 3 | 1 |
+-----+-----+-----+
Unpivot a DataFrame
from wide to long format.
Example
Arguments
id_vars
- String slice that represent the columns to use as id variables.value_vars
- String slice that represent the columns to use as value variables.
use polars_core::prelude::*;
let df = df!("A" => &["a", "b", "a"],
"B" => &[1, 3, 5],
"C" => &[10, 11, 12],
"D" => &[2, 4, 6]
)
.unwrap();
let melted = df.melt(&["A", "B"], &["C", "D"]).unwrap();
println!("{:?}", df);
println!("{:?}", melted);
Outputs:
+-----+-----+-----+-----+
| A | B | C | D |
| --- | --- | --- | --- |
| str | i32 | i32 | i32 |
+=====+=====+=====+=====+
| "a" | 1 | 10 | 2 |
+-----+-----+-----+-----+
| "b" | 3 | 11 | 4 |
+-----+-----+-----+-----+
| "a" | 5 | 12 | 6 |
+-----+-----+-----+-----+
+-----+-----+----------+-------+
| A | B | variable | value |
| --- | --- | --- | --- |
| str | i32 | str | i32 |
+=====+=====+==========+=======+
| "a" | 1 | "C" | 10 |
+-----+-----+----------+-------+
| "b" | 3 | "C" | 11 |
+-----+-----+----------+-------+
| "a" | 5 | "C" | 12 |
+-----+-----+----------+-------+
| "a" | 1 | "D" | 2 |
+-----+-----+----------+-------+
| "b" | 3 | "D" | 4 |
+-----+-----+----------+-------+
| "a" | 5 | "D" | 6 |
+-----+-----+----------+-------+
Downsample a temporal column by some frequency/ rule
Examples
Consider the following input DataFrame:
╭─────────────────────┬─────╮
│ ms ┆ i │
│ --- ┆ --- │
│ datetime(ms) ┆ u8 │
╞═════════════════════╪═════╡
│ 2000-01-01 00:00:00 ┆ 0 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 2000-01-01 00:01:00 ┆ 1 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 2000-01-01 00:02:00 ┆ 2 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 2000-01-01 00:03:00 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ ... ┆ ... │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 2000-01-01 00:15:00 ┆ 15 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 2000-01-01 00:16:00 ┆ 16 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 2000-01-01 00:17:00 ┆ 17 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 2000-01-01 00:18:00 ┆ 18 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 2000-01-01 00:19:00 ┆ 19 │
╰─────────────────────┴─────╯
use polars_core::prelude::*;
use polars_core::frame::groupby::resample::SampleRule;
fn example(df: &DataFrame) -> Result<DataFrame> {
df.downsample("datetime", SampleRule::Minute(5))?
.first()?
.sort("datetime", false)
}
outputs:
╭─────────────────────┬─────────╮
│ ms ┆ i_first │
│ --- ┆ --- │
│ datetime(ms) ┆ u8 │
╞═════════════════════╪═════════╡
│ 2000-01-01 00:00:00 ┆ 0 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌┤
│ 2000-01-01 00:05:00 ┆ 5 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌┤
│ 2000-01-01 00:10:00 ┆ 10 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌┤
│ 2000-01-01 00:15:00 ┆ 15 │
╰─────────────────────┴─────────╯
pub fn downsample_with_series(
&self,
key: &Series,
rule: SampleRule
) -> Result<GroupBy<'_, '_>, PolarsError>
pub fn downsample_with_series(
&self,
key: &Series,
rule: SampleRule
) -> Result<GroupBy<'_, '_>, PolarsError>
See downsample.
pub fn groupby_with_series(
&self,
by: Vec<Series, Global>,
multithreaded: bool
) -> Result<GroupBy<'_, '_>, PolarsError>
Group DataFrame using a Series column.
Example
use polars_core::prelude::*;
fn groupby_sum(df: &DataFrame) -> Result<DataFrame> {
df.groupby("column_name")?
.select("agg_column_name")
.sum()
}
pub fn groupby_stable<'g, J, S>(
&self,
by: S
) -> Result<GroupBy<'_, '_>, PolarsError> where
S: Selection<'g, J>,
pub fn groupby_stable<'g, J, S>(
&self,
by: S
) -> Result<GroupBy<'_, '_>, PolarsError> where
S: Selection<'g, J>,
Group DataFrame using a Series column. The groups are ordered by their smallest row index.
Generic join method. Can be used to join on multiple columns.
pub fn inner_join(
&self,
other: &DataFrame,
left_on: &str,
right_on: &str
) -> Result<DataFrame, PolarsError>
pub fn inner_join(
&self,
other: &DataFrame,
left_on: &str,
right_on: &str
) -> Result<DataFrame, PolarsError>
Perform an inner join on two DataFrames.
Example
use polars_core::prelude::*;
fn join_dfs(left: &DataFrame, right: &DataFrame) -> Result<DataFrame> {
left.inner_join(right, "join_column_left", "join_column_right")
}
Perform a left join on two DataFrames
Example
use polars_core::prelude::*;
fn join_dfs(left: &DataFrame, right: &DataFrame) -> Result<DataFrame> {
left.left_join(right, "join_column_left", "join_column_right")
}
pub fn outer_join(
&self,
other: &DataFrame,
left_on: &str,
right_on: &str
) -> Result<DataFrame, PolarsError>
pub fn outer_join(
&self,
other: &DataFrame,
left_on: &str,
right_on: &str
) -> Result<DataFrame, PolarsError>
Perform an outer join on two DataFrames
Example
use polars_core::prelude::*;
fn join_dfs(left: &DataFrame, right: &DataFrame) -> Result<DataFrame> {
left.outer_join(right, "join_column_left", "join_column_right")
}
Get a row from a DataFrame. Use of this is discouraged as it will likely be slow.
Amortize allocations by reusing a row. The caller is responsible to make sure that the row has at least the capacity for the number of columns in the DataFrame
Amortize allocations by reusing a row. The caller is responsible to make sure that the row has at least the capacity for the number of columns in the DataFrame
Safety
Does not do any bounds checking.
pub fn from_rows_and_schema(
rows: &[Row<'_>],
schema: &Schema
) -> Result<DataFrame, PolarsError>
pub fn from_rows_and_schema(
rows: &[Row<'_>],
schema: &Schema
) -> Result<DataFrame, PolarsError>
Create a new DataFrame from rows. This should only be used when you have row wise data,
as this is a lot slower than creating the Series
in a columnar fashion
Create a new DataFrame from rows. This should only be used when you have row wise data,
as this is a lot slower than creating the Series
in a columnar fashion
Transpose a DataFrame. This is a very expensive operation.
Create a DataFrame from a Vector of Series.
Example
use polars_core::prelude::*;
let s0 = Series::new("days", [0, 1, 2].as_ref());
let s1 = Series::new("temp", [22.1, 19.9, 7.].as_ref());
let df = DataFrame::new(vec![s0, s1]).unwrap();
Add a new column at index 0 that counts the rows.
Create a new DataFrame
but does not check the length or duplicate occurrence of the Series
.
It is advised to use Series::new in favor of this method.
Panic
It is the callers responsibility to uphold the contract of all Series
having an equal length, if not this may panic down the line.
Aggregate all chunks to contiguous memory.
Shrink the capacity of this DataFrame to fit it’s length.
Aggregate all the chunks in the DataFrame to a single chunk.
Aggregate all the chunks in the DataFrame to a single chunk in parallel. This may lead to more peak memory consumption.
Get a reference to the DataFrame columns.
pub fn set_column_names<S>(&mut self, names: &[S]) -> Result<(), PolarsError> where
S: AsRef<str>,
pub fn set_column_names<S>(&mut self, names: &[S]) -> Result<(), PolarsError> where
S: AsRef<str>,
Set the column names.
Get the data types of the columns in the DataFrame.
The number of chunks per column
Get a reference to the schema fields of the DataFrame.
Get (width x height)
Example
use polars_core::prelude::*;
fn assert_shape(df: &DataFrame, shape: (usize, usize)) {
assert_eq!(df.shape(), shape)
}
Get width of DataFrame
Example
use polars_core::prelude::*;
fn assert_width(df: &DataFrame, width: usize) {
assert_eq!(df.width(), width)
}
Get height of DataFrame
Example
use polars_core::prelude::*;
fn assert_height(df: &DataFrame, height: usize) {
assert_eq!(df.height(), height)
}
Add multiple Series to a DataFrame The added Series are required to have the same length.
Example
use polars_core::prelude::*;
fn stack(df: &mut DataFrame, columns: &[Series]) {
df.hstack_mut(columns);
}
Add multiple Series to a DataFrame The added Series are required to have the same length.
Concatenate a DataFrame to this DataFrame and return as newly allocated DataFrame
Concatenate a DataFrame to this DataFrame
Remove column by name
Example
use polars_core::prelude::*;
fn drop_column(df: &mut DataFrame, name: &str) -> Result<Series> {
df.drop_in_place(name)
}
Return a new DataFrame where all null values are dropped
Drop a column by name. This is a pure method and will return a new DataFrame instead of modifying the current one in place.
pub fn insert_at_idx<S>(
&mut self,
index: usize,
column: S
) -> Result<&mut DataFrame, PolarsError> where
S: IntoSeries,
pub fn insert_at_idx<S>(
&mut self,
index: usize,
column: S
) -> Result<&mut DataFrame, PolarsError> where
S: IntoSeries,
Insert a new column at a given index
pub fn with_column<S>(
&mut self,
column: S
) -> Result<&mut DataFrame, PolarsError> where
S: IntoSeries,
pub fn with_column<S>(
&mut self,
column: S
) -> Result<&mut DataFrame, PolarsError> where
S: IntoSeries,
Add a new column to this DataFrame
or replace an existing one.
Get a row in the DataFrame
Beware this is slow.
Example
use polars_core::prelude::*;
fn example(df: &mut DataFrame, idx: usize) -> Option<Vec<AnyValue>> {
df.get(idx)
}
Select a series by index.
Get column index of a series by name.
Select a single column by name.
pub fn columns<I, S>(
&self,
names: I
) -> Result<Vec<&Series, Global>, PolarsError> where
I: IntoIterator<Item = S>,
S: AsRef<str>,
pub fn columns<I, S>(
&self,
names: I
) -> Result<Vec<&Series, Global>, PolarsError> where
I: IntoIterator<Item = S>,
S: AsRef<str>,
Selected multiple columns by name.
Select column(s) from this DataFrame and return a new DataFrame.
Examples
use polars_core::prelude::*;
fn example(df: &DataFrame, possible: &str) -> Result<DataFrame> {
match possible {
"by_str" => df.select("my-column"),
"by_tuple" => df.select(("col_1", "col_2")),
"by_vec" => df.select(vec!["col_a", "col_b"]),
_ => unimplemented!()
}
}
pub fn select_series<'a, S, J>(
&self,
selection: S
) -> Result<Vec<Series, Global>, PolarsError> where
S: Selection<'a, J>,
pub fn select_series<'a, S, J>(
&self,
selection: S
) -> Result<Vec<Series, Global>, PolarsError> where
S: Selection<'a, J>,
Select column(s) from this DataFrame and return them into a Vector.
Take DataFrame rows by a boolean mask.
Example
use polars_core::prelude::*;
fn example(df: &DataFrame) -> Result<DataFrame> {
let mask = df.column("sepal.width")?.is_not_null();
df.filter(&mask)
}
Take DataFrame value by indexes from an iterator.
Example
use polars_core::prelude::*;
fn example(df: &DataFrame) -> Result<DataFrame> {
let iterator = (0..9).into_iter();
df.take_iter(iterator)
}
Take DataFrame values by indexes from an iterator.
Safety
This doesn’t do any bound checking but checks null validity.
Take DataFrame values by indexes from an iterator that may contain None values.
Safety
This doesn’t do any bound checking. Out of bounds may access uninitialized memory. Null validity is checked
Take DataFrame rows by index values.
Example
use polars_core::prelude::*;
fn example(df: &DataFrame) -> Result<DataFrame> {
let idx = UInt32Chunked::new_from_slice("idx", &[0, 1, 9]);
df.take(&idx)
}
Rename a column in the DataFrame
Example
use polars_core::prelude::*;
fn example(df: &mut DataFrame) -> Result<&mut DataFrame> {
let original_name = "foo";
let new_name = "bar";
df.rename(original_name, new_name)
}
pub fn sort_in_place(
&mut self,
by_column: &str,
reverse: bool
) -> Result<&mut DataFrame, PolarsError>
pub fn sort_in_place(
&mut self,
by_column: &str,
reverse: bool
) -> Result<&mut DataFrame, PolarsError>
Sort DataFrame in place by a column.
Return a sorted clone of this DataFrame.
Example
use polars_core::prelude::*;
fn sort_example(df: &DataFrame, reverse: bool) -> Result<DataFrame> {
df.sort("a", reverse)
}
fn sort_by_multiple_columns_example(df: &DataFrame) -> Result<DataFrame> {
df.sort(&["a", "b"], vec![false, true])
}
pub fn replace<S>(
&mut self,
column: &str,
new_col: S
) -> Result<&mut DataFrame, PolarsError> where
S: IntoSeries,
pub fn replace<S>(
&mut self,
column: &str,
new_col: S
) -> Result<&mut DataFrame, PolarsError> where
S: IntoSeries,
Replace a column with a series.
pub fn replace_or_add<S>(
&mut self,
column: &str,
new_col: S
) -> Result<&mut DataFrame, PolarsError> where
S: IntoSeries,
pub fn replace_or_add<S>(
&mut self,
column: &str,
new_col: S
) -> Result<&mut DataFrame, PolarsError> where
S: IntoSeries,
Replace or update a column. The difference between this method and DataFrame::with_column
is that now the value of column: &str
determines the name of the column and not the name
of the Series
passed to this method.
pub fn replace_at_idx<S>(
&mut self,
idx: usize,
new_col: S
) -> Result<&mut DataFrame, PolarsError> where
S: IntoSeries,
pub fn replace_at_idx<S>(
&mut self,
idx: usize,
new_col: S
) -> Result<&mut DataFrame, PolarsError> where
S: IntoSeries,
Replace column at index idx
with a series.
Example
use polars_core::prelude::*;
let s0 = Series::new("foo", &["ham", "spam", "egg"]);
let s1 = Series::new("ascii", &[70, 79, 79]);
let mut df = DataFrame::new(vec![s0, s1]).unwrap();
// Add 32 to get lowercase ascii values
df.replace_at_idx(1, df.select_at_idx(1).unwrap() + 32);
pub fn apply<F, S>(
&mut self,
column: &str,
f: F
) -> Result<&mut DataFrame, PolarsError> where
F: FnOnce(&Series) -> S,
S: IntoSeries,
pub fn apply<F, S>(
&mut self,
column: &str,
f: F
) -> Result<&mut DataFrame, PolarsError> where
F: FnOnce(&Series) -> S,
S: IntoSeries,
Apply a closure to a column. This is the recommended way to do in place modification.
Example
use polars_core::prelude::*;
let s0 = Series::new("foo", &["ham", "spam", "egg"]);
let s1 = Series::new("names", &["Jean", "Claude", "van"]);
let mut df = DataFrame::new(vec![s0, s1]).unwrap();
fn str_to_len(str_val: &Series) -> Series {
str_val.utf8()
.unwrap()
.into_iter()
.map(|opt_name: Option<&str>| {
opt_name.map(|name: &str| name.len() as u32)
})
.collect::<UInt32Chunked>()
.into_series()
}
// Replace the names column by the length of the names.
df.apply("names", str_to_len);
Results in:
+--------+-------+
| foo | |
| --- | names |
| str | u32 |
+========+=======+
| "ham" | 4 |
+--------+-------+
| "spam" | 6 |
+--------+-------+
| "egg" | 3 |
+--------+-------+
pub fn apply_at_idx<F, S>(
&mut self,
idx: usize,
f: F
) -> Result<&mut DataFrame, PolarsError> where
F: FnOnce(&Series) -> S,
S: IntoSeries,
pub fn apply_at_idx<F, S>(
&mut self,
idx: usize,
f: F
) -> Result<&mut DataFrame, PolarsError> where
F: FnOnce(&Series) -> S,
S: IntoSeries,
Apply a closure to a column at index idx
. This is the recommended way to do in place
modification.
Example
use polars_core::prelude::*;
let s0 = Series::new("foo", &["ham", "spam", "egg"]);
let s1 = Series::new("ascii", &[70, 79, 79]);
let mut df = DataFrame::new(vec![s0, s1]).unwrap();
// Add 32 to get lowercase ascii values
df.apply_at_idx(1, |s| s + 32);
Results in:
+--------+-------+
| foo | ascii |
| --- | --- |
| str | i32 |
+========+=======+
| "ham" | 102 |
+--------+-------+
| "spam" | 111 |
+--------+-------+
| "egg" | 111 |
+--------+-------+
pub fn may_apply_at_idx<F, S>(
&mut self,
idx: usize,
f: F
) -> Result<&mut DataFrame, PolarsError> where
F: FnOnce(&Series) -> Result<S, PolarsError>,
S: IntoSeries,
pub fn may_apply_at_idx<F, S>(
&mut self,
idx: usize,
f: F
) -> Result<&mut DataFrame, PolarsError> where
F: FnOnce(&Series) -> Result<S, PolarsError>,
S: IntoSeries,
Apply a closure that may fail to a column at index idx
. This is the recommended way to do in place
modification.
Example
This is the idomatic way to replace some values a column of a DataFrame
given range of indexes.
let s0 = Series::new("foo", &["ham", "spam", "egg", "bacon", "quack"]);
let s1 = Series::new("values", &[1, 2, 3, 4, 5]);
let mut df = DataFrame::new(vec![s0, s1]).unwrap();
let idx = vec![0, 1, 4];
df.may_apply("foo", |s| {
s.utf8()?
.set_at_idx_with(idx, |opt_val| opt_val.map(|string| format!("{}-is-modified", string)))
});
Results in:
+---------------------+--------+
| foo | values |
| --- | --- |
| str | i32 |
+=====================+========+
| "ham-is-modified" | 1 |
+---------------------+--------+
| "spam-is-modified" | 2 |
+---------------------+--------+
| "egg" | 3 |
+---------------------+--------+
| "bacon" | 4 |
+---------------------+--------+
| "quack-is-modified" | 5 |
+---------------------+--------+
pub fn may_apply<F, S>(
&mut self,
column: &str,
f: F
) -> Result<&mut DataFrame, PolarsError> where
F: FnOnce(&Series) -> Result<S, PolarsError>,
S: IntoSeries,
pub fn may_apply<F, S>(
&mut self,
column: &str,
f: F
) -> Result<&mut DataFrame, PolarsError> where
F: FnOnce(&Series) -> Result<S, PolarsError>,
S: IntoSeries,
Apply a closure that may fail to a column. This is the recommended way to do in place modification.
Example
This is the idomatic way to replace some values a column of a DataFrame
given a boolean mask.
let s0 = Series::new("foo", &["ham", "spam", "egg", "bacon", "quack"]);
let s1 = Series::new("values", &[1, 2, 3, 4, 5]);
let mut df = DataFrame::new(vec![s0, s1]).unwrap();
// create a mask
let values = df.column("values").unwrap();
let mask = values.lt_eq(1) | values.gt_eq(5);
df.may_apply("foo", |s| {
s.utf8()?
.set(&mask, Some("not_within_bounds"))
});
Results in:
+---------------------+--------+
| foo | values |
| --- | --- |
| str | i32 |
+=====================+========+
| "not_within_bounds" | 1 |
+---------------------+--------+
| "spam" | 2 |
+---------------------+--------+
| "egg" | 3 |
+---------------------+--------+
| "bacon" | 4 |
+---------------------+--------+
| "not_within_bounds" | 5 |
+---------------------+--------+
Slice the DataFrame along the rows.
Transform the underlying chunks in the DataFrame to Arrow RecordBatches
Iterator over the rows in this DataFrame as Arrow RecordBatches.
Shift the values by a given period and fill the parts that will be empty due to this operation
with Nones
.
See the method on Series for more info on the shift
operation.
Replace None values with one of the following strategies:
- Forward fill (replace None with the previous value)
- Backward fill (replace None with the next value)
- Mean fill (replace None with the mean of the whole array)
- Min fill (replace None with the minimum of the whole array)
- Max fill (replace None with the maximum of the whole array)
See the method on Series for more info on the fill_null
operation.
Aggregate the columns to their quantile values.
Aggregate the column horizontally to their min values
Aggregate the column horizontally to their max values
Aggregate the column horizontally to their sum values
Aggregate the column horizontally to their mean values
pub fn pipe<F, B>(self, f: F) -> Result<B, PolarsError> where
F: Fn(DataFrame) -> Result<B, PolarsError>,
pub fn pipe<F, B>(self, f: F) -> Result<B, PolarsError> where
F: Fn(DataFrame) -> Result<B, PolarsError>,
Pipe different functions/ closure operations that work on a DataFrame together.
pub fn pipe_mut<F, B>(&mut self, f: F) -> Result<B, PolarsError> where
F: Fn(&mut DataFrame) -> Result<B, PolarsError>,
pub fn pipe_mut<F, B>(&mut self, f: F) -> Result<B, PolarsError> where
F: Fn(&mut DataFrame) -> Result<B, PolarsError>,
Pipe different functions/ closure operations that work on a DataFrame together.
pub fn pipe_with_args<F, B, Args>(
self,
f: F,
args: Args
) -> Result<B, PolarsError> where
F: Fn(DataFrame, Args) -> Result<B, PolarsError>,
pub fn pipe_with_args<F, B, Args>(
self,
f: F,
args: Args
) -> Result<B, PolarsError> where
F: Fn(DataFrame, Args) -> Result<B, PolarsError>,
Pipe different functions/ closure operations that work on a DataFrame together.
Create dummy variables.
Example
use polars_core::prelude::*;
let df = df! {
"id" => &[1, 2, 3, 1, 2, 3, 1, 1],
"type" => &["A", "B", "B", "B", "C", "C", "C", "B"],
"code" => &["X1", "X2", "X3", "X3", "X2", "X2", "X1", "X1"]
}.unwrap();
let dummies = df.to_dummies().unwrap();
dbg!(dummies);
Outputs:
+------+------+------+--------+--------+--------+---------+---------+---------+
| id_1 | id_3 | id_2 | type_A | type_B | type_C | code_X1 | code_X2 | code_X3 |
| --- | --- | --- | --- | --- | --- | --- | --- | --- |
| u8 | u8 | u8 | u8 | u8 | u8 | u8 | u8 | u8 |
+======+======+======+========+========+========+=========+=========+=========+
| 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 |
+------+------+------+--------+--------+--------+---------+---------+---------+
| 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 |
+------+------+------+--------+--------+--------+---------+---------+---------+
| 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
+------+------+------+--------+--------+--------+---------+---------+---------+
| 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
+------+------+------+--------+--------+--------+---------+---------+---------+
| 0 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 0 |
+------+------+------+--------+--------+--------+---------+---------+---------+
| 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 0 |
+------+------+------+--------+--------+--------+---------+---------+---------+
| 1 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 |
+------+------+------+--------+--------+--------+---------+---------+---------+
| 1 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 |
+------+------+------+--------+--------+--------+---------+---------+---------+
pub fn drop_duplicates(
&self,
maintain_order: bool,
subset: Option<&[String]>
) -> Result<DataFrame, PolarsError>
pub fn drop_duplicates(
&self,
maintain_order: bool,
subset: Option<&[String]>
) -> Result<DataFrame, PolarsError>
Drop duplicate rows from a DataFrame. This fails when there is a column of type List in DataFrame
Example
use polars_core::prelude::*;
fn example() -> Result<DataFrame> {
let df = df! {
"flt" => [1., 1., 2., 2., 3., 3.],
"int" => [1, 1, 2, 2, 3, 3, ],
"str" => ["a", "a", "b", "b", "c", "c"]
}?;
df.drop_duplicates(true, None)
}
Returns
+-----+-----+-----+
| flt | int | str |
| --- | --- | --- |
| f64 | i32 | str |
+=====+=====+=====+
| 1 | 1 | "a" |
+-----+-----+-----+
| 2 | 2 | "b" |
+-----+-----+-----+
| 3 | 3 | "c" |
+-----+-----+-----+
Get a mask of all the unique rows in the DataFrame.
Get a mask of all the duplicated rows in the DataFrame.
Create a new DataFrame that shows the null counts per column.
Check if DataFrames
are equal. Note that None == None
evaluates to false
Check if all values in DataFrames
are equal where None == None
evaluates to true
.
Trait Implementations
Conversion from Vec
type Error = PolarsError
type Error = PolarsError
The type returned in the event of a conversion error.
Performs the conversion.
Conversion from Vec
If batch-size is small it might be advisable to call rechunk to ensure predictable performance
type Error = PolarsError
type Error = PolarsError
The type returned in the event of a conversion error.
Performs the conversion.
Auto Trait Implementations
impl !RefUnwindSafe for DataFrame
impl !UnwindSafe for DataFrame
Blanket Implementations
Mutably borrows from an owned value. Read more
Decode u8
slices as UTF-8 and iterate over the codepoints as Utf8Char
s, Read more