nabla_ml::nab_optimizers

Struct NablaOptimizer

Source
pub struct NablaOptimizer;

Implementations§

Source§

impl NablaOptimizer

Source

pub fn sgd_update(weights: &mut NDArray, gradient: &NDArray, learning_rate: f64)

Performs Stochastic Gradient Descent (SGD) update

w = w - learning_rate * gradient

§Arguments
  • weights - NDArray of current weights to update
  • gradient - NDArray of gradients for the weights
  • learning_rate - Learning rate for the update
§Example
use nabla_ml::nab_array::NDArray;
use nabla_ml::nab_optimizers::NablaOptimizer;

let mut weights = NDArray::from_vec(vec![1.0, 2.0, 3.0]);
let gradients = NDArray::from_vec(vec![0.1, 0.2, 0.3]);
let learning_rate = 0.1;

NablaOptimizer::sgd_update(&mut weights, &gradients, learning_rate);
Source

pub fn sgd_momentum_update( weights: &mut NDArray, gradient: &NDArray, velocity: &mut NDArray, learning_rate: f64, momentum: f64, )

Performs SGD update with momentum

v = momentum * v - learning_rate * gradient w = w + v

§Arguments
  • weights - NDArray of current weights to update
  • gradient - NDArray of gradients for the weights
  • velocity - Mutable reference to momentum velocity
  • learning_rate - Learning rate for the update
  • momentum - Momentum coefficient (default: 0.9)
Source

pub fn rmsprop_update( weights: &mut NDArray, gradient: &NDArray, cache: &mut NDArray, learning_rate: f64, decay_rate: f64, epsilon: f64, )

Performs RMSprop update

cache = decay_rate * cache + (1 - decay_rate) * gradient^2 w = w - learning_rate * gradient / (sqrt(cache) + epsilon)

§Arguments
  • weights - NDArray of current weights to update
  • gradient - NDArray of gradients for the weights
  • cache - Running average of squared gradients
  • learning_rate - Learning rate for the update
  • decay_rate - Decay rate for running average (default: 0.9)
  • epsilon - Small value for numerical stability (default: 1e-8)
§Example
use nabla_ml::nab_array::NDArray;
use nabla_ml::nab_optimizers::NablaOptimizer;

let mut weights = NDArray::from_vec(vec![1.0, 2.0, 3.0]);
let gradients = NDArray::from_vec(vec![0.1, 0.2, 0.3]);
let mut cache = NDArray::zeros(vec![3]);
let learning_rate = 0.01;
let decay_rate = 0.9;
let epsilon = 1e-8;

NablaOptimizer::rmsprop_update(
    &mut weights, 
    &gradients, 
    &mut cache,
    learning_rate,
    decay_rate,
    epsilon
);
Source

pub fn adam_update( weights: &mut NDArray, gradient: &NDArray, m: &mut NDArray, v: &mut NDArray, t: usize, learning_rate: f64, beta1: f64, beta2: f64, epsilon: f64, )

Performs Adam (Adaptive Moment Estimation) update

m = beta1 * m + (1 - beta1) * gradient // Update first moment v = beta2 * v + (1 - beta2) * gradient^2 // Update second moment m_hat = m / (1 - beta1^t) // Bias correction v_hat = v / (1 - beta2^t) // Bias correction w = w - learning_rate * m_hat / (sqrt(v_hat) + epsilon)

§Arguments
  • weights - NDArray of current weights to update
  • gradient - NDArray of gradients for the weights
  • m - First moment vector (momentum)
  • v - Second moment vector (uncentered variance)
  • t - Current timestep (starting from 1)
  • learning_rate - Learning rate for the update
  • beta1 - Exponential decay rate for first moment (default: 0.9)
  • beta2 - Exponential decay rate for second moment (default: 0.999)
  • epsilon - Small value for numerical stability (default: 1e-8)
§Example
use nabla_ml::nab_array::NDArray;
use nabla_ml::nab_optimizers::NablaOptimizer;

let mut weights = NDArray::from_vec(vec![1.0, 2.0, 3.0]);
let gradients = NDArray::from_vec(vec![0.1, 0.2, 0.3]);
let mut m = NDArray::zeros(vec![3]);
let mut v = NDArray::zeros(vec![3]);
let t = 1;
let learning_rate = 0.001;
let beta1 = 0.9;
let beta2 = 0.999;
let epsilon = 1e-8;

NablaOptimizer::adam_update(
    &mut weights,
    &gradients,
    &mut m,
    &mut v,
    t,
    learning_rate,
    beta1,
    beta2,
    epsilon
);

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V