nabla_ml::nab_activations

Struct NablaActivation

Source
pub struct NablaActivation;

Implementations§

Source§

impl NablaActivation

Source

pub fn relu_forward(x: &NDArray) -> NDArray

Applies the Rectified Linear Unit (ReLU) activation function in forward pass

ReLU(x) = max(0, x)

§Arguments
  • x - Input NDArray
§Returns

NDArray with ReLU activation applied element-wise

§Example
use nabla_ml::nab_array::NDArray;
use nabla_ml::nab_activations::NablaActivation;

let x = NDArray::from_vec(vec![-1.0, 0.0, 2.0]);
let output = NablaActivation::relu_forward(&x);
assert_eq!(output.data(), &[0.0, 0.0, 2.0]);
Source

pub fn relu_backward(gradient: &NDArray, x: &NDArray) -> NDArray

Computes the gradient for ReLU activation in backward pass

ReLU’(x) = 1 if x > 0, else 0

§Arguments
  • gradient - Gradient from the next layer
  • x - Original input to the ReLU function
§Returns

NDArray containing the gradients for backpropagation

Source

pub fn softmax_forward(x: &NDArray, axis: Option<usize>) -> NDArray

Applies the Softmax activation function in forward pass

Softmax(x)_i = exp(x_i) / sum(exp(x_j))

§Arguments
  • x - Input NDArray
  • axis - Optional axis along which to apply softmax
§Returns

NDArray with softmax probabilities that sum to 1

§Example
use nabla_ml::nab_array::NDArray;
use nabla_ml::nab_activations::NablaActivation;

let x = NDArray::from_vec(vec![1.0, 2.0, 3.0]);
let output = NablaActivation::softmax_forward(&x, None);
let sum: f64 = output.data().iter().sum();
assert!((sum - 1.0).abs() < 1e-6);
Source

pub fn softmax_backward(gradient: &NDArray, _output: &NDArray) -> NDArray

Computes the gradient for Softmax activation in backward pass

Note: For numerical stability, the actual softmax gradient computation is typically combined with the loss function gradient.

§Arguments
  • gradient - Gradient from the loss function
  • output - Output from the softmax forward pass
§Returns

NDArray containing the gradients for backpropagation

Source

pub fn sigmoid_forward(x: &NDArray) -> NDArray

Applies the Sigmoid activation function in forward pass

sigmoid(x) = 1 / (1 + exp(-x))

§Arguments
  • x - Input NDArray
§Returns

NDArray with values squashed between 0 and 1

§Example
use nabla_ml::nab_array::NDArray;
use nabla_ml::nab_activations::NablaActivation;

let x = NDArray::from_vec(vec![-1.0, 0.0, 1.0]);
let output = NablaActivation::sigmoid_forward(&x);
// Values should be between 0 and 1
for &val in output.data() {
    assert!(val > 0.0 && val < 1.0);
}
Source

pub fn sigmoid_backward(gradient: &NDArray, output: &NDArray) -> NDArray

Computes the gradient for Sigmoid activation in backward pass

sigmoid’(x) = sigmoid(x) * (1 - sigmoid(x))

§Arguments
  • gradient - Gradient from the next layer
  • output - Output from the sigmoid forward pass
§Returns

NDArray containing the gradients for backpropagation

Source

pub fn leaky_relu_forward(x: &NDArray, alpha: Option<f64>) -> NDArray

Applies the Leaky ReLU activation function in forward pass

leaky_relu(x) = x if x > 0, else alpha * x

§Arguments
  • x - Input NDArray
  • alpha - Slope for negative values (default: 0.01)
§Returns

NDArray with Leaky ReLU activation applied element-wise

§Example
use nabla_ml::nab_array::NDArray;
use nabla_ml::nab_activations::NablaActivation;

let x = NDArray::from_vec(vec![-2.0, 0.0, 2.0]);
let output = NablaActivation::leaky_relu_forward(&x, Some(0.1));
// Negative values are scaled by alpha
assert_eq!(output.data()[0], -0.2);
// Positive values remain unchanged
assert_eq!(output.data()[2], 2.0);
Source

pub fn leaky_relu_backward( gradient: &NDArray, x: &NDArray, alpha: Option<f64>, ) -> NDArray

Computes the gradient for Leaky ReLU activation in backward pass

leaky_relu’(x) = 1 if x > 0, else alpha

§Arguments
  • gradient - Gradient from the next layer
  • x - Original input to the Leaky ReLU function
  • alpha - Slope for negative values (default: 0.01)
§Returns

NDArray containing the gradients for backpropagation

Source

pub fn tanh_forward(x: &NDArray) -> NDArray

Applies the Hyperbolic Tangent (tanh) activation function in forward pass

tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x))

§Arguments
  • x - Input NDArray
§Returns

NDArray with values squashed between -1 and 1

§Example
use nabla_ml::nab_array::NDArray;
use nabla_ml::nab_activations::NablaActivation;

let x = NDArray::from_vec(vec![-1.0, 0.0, 1.0]);
let output = NablaActivation::tanh_forward(&x);
// Values should be between -1 and 1
for &val in output.data() {
    assert!(val >= -1.0 && val <= 1.0);
}
Source

pub fn tanh_backward(gradient: &NDArray, output: &NDArray) -> NDArray

Computes the gradient for tanh activation in backward pass

tanh’(x) = 1 - tanh²(x)

§Arguments
  • gradient - Gradient from the next layer
  • output - Output from the tanh forward pass
§Returns

NDArray containing the gradients for backpropagation

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V