pub struct NablaActivation;
Implementations§
Source§impl NablaActivation
impl NablaActivation
Sourcepub fn relu_forward(x: &NDArray) -> NDArray
pub fn relu_forward(x: &NDArray) -> NDArray
Applies the Rectified Linear Unit (ReLU) activation function in forward pass
ReLU(x) = max(0, x)
§Arguments
x
- Input NDArray
§Returns
NDArray with ReLU activation applied element-wise
§Example
use nabla_ml::nab_array::NDArray;
use nabla_ml::nab_activations::NablaActivation;
let x = NDArray::from_vec(vec![-1.0, 0.0, 2.0]);
let output = NablaActivation::relu_forward(&x);
assert_eq!(output.data(), &[0.0, 0.0, 2.0]);
Sourcepub fn relu_backward(gradient: &NDArray, x: &NDArray) -> NDArray
pub fn relu_backward(gradient: &NDArray, x: &NDArray) -> NDArray
Sourcepub fn softmax_forward(x: &NDArray, axis: Option<usize>) -> NDArray
pub fn softmax_forward(x: &NDArray, axis: Option<usize>) -> NDArray
Applies the Softmax activation function in forward pass
Softmax(x)_i = exp(x_i) / sum(exp(x_j))
§Arguments
x
- Input NDArrayaxis
- Optional axis along which to apply softmax
§Returns
NDArray with softmax probabilities that sum to 1
§Example
use nabla_ml::nab_array::NDArray;
use nabla_ml::nab_activations::NablaActivation;
let x = NDArray::from_vec(vec![1.0, 2.0, 3.0]);
let output = NablaActivation::softmax_forward(&x, None);
let sum: f64 = output.data().iter().sum();
assert!((sum - 1.0).abs() < 1e-6);
Sourcepub fn softmax_backward(gradient: &NDArray, _output: &NDArray) -> NDArray
pub fn softmax_backward(gradient: &NDArray, _output: &NDArray) -> NDArray
Computes the gradient for Softmax activation in backward pass
Note: For numerical stability, the actual softmax gradient computation is typically combined with the loss function gradient.
§Arguments
gradient
- Gradient from the loss functionoutput
- Output from the softmax forward pass
§Returns
NDArray containing the gradients for backpropagation
Sourcepub fn sigmoid_forward(x: &NDArray) -> NDArray
pub fn sigmoid_forward(x: &NDArray) -> NDArray
Applies the Sigmoid activation function in forward pass
sigmoid(x) = 1 / (1 + exp(-x))
§Arguments
x
- Input NDArray
§Returns
NDArray with values squashed between 0 and 1
§Example
use nabla_ml::nab_array::NDArray;
use nabla_ml::nab_activations::NablaActivation;
let x = NDArray::from_vec(vec![-1.0, 0.0, 1.0]);
let output = NablaActivation::sigmoid_forward(&x);
// Values should be between 0 and 1
for &val in output.data() {
assert!(val > 0.0 && val < 1.0);
}
Sourcepub fn sigmoid_backward(gradient: &NDArray, output: &NDArray) -> NDArray
pub fn sigmoid_backward(gradient: &NDArray, output: &NDArray) -> NDArray
Sourcepub fn leaky_relu_forward(x: &NDArray, alpha: Option<f64>) -> NDArray
pub fn leaky_relu_forward(x: &NDArray, alpha: Option<f64>) -> NDArray
Applies the Leaky ReLU activation function in forward pass
leaky_relu(x) = x if x > 0, else alpha * x
§Arguments
x
- Input NDArrayalpha
- Slope for negative values (default: 0.01)
§Returns
NDArray with Leaky ReLU activation applied element-wise
§Example
use nabla_ml::nab_array::NDArray;
use nabla_ml::nab_activations::NablaActivation;
let x = NDArray::from_vec(vec![-2.0, 0.0, 2.0]);
let output = NablaActivation::leaky_relu_forward(&x, Some(0.1));
// Negative values are scaled by alpha
assert_eq!(output.data()[0], -0.2);
// Positive values remain unchanged
assert_eq!(output.data()[2], 2.0);
Sourcepub fn leaky_relu_backward(
gradient: &NDArray,
x: &NDArray,
alpha: Option<f64>,
) -> NDArray
pub fn leaky_relu_backward( gradient: &NDArray, x: &NDArray, alpha: Option<f64>, ) -> NDArray
Computes the gradient for Leaky ReLU activation in backward pass
leaky_relu’(x) = 1 if x > 0, else alpha
§Arguments
gradient
- Gradient from the next layerx
- Original input to the Leaky ReLU functionalpha
- Slope for negative values (default: 0.01)
§Returns
NDArray containing the gradients for backpropagation
Sourcepub fn tanh_forward(x: &NDArray) -> NDArray
pub fn tanh_forward(x: &NDArray) -> NDArray
Applies the Hyperbolic Tangent (tanh) activation function in forward pass
tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x))
§Arguments
x
- Input NDArray
§Returns
NDArray with values squashed between -1 and 1
§Example
use nabla_ml::nab_array::NDArray;
use nabla_ml::nab_activations::NablaActivation;
let x = NDArray::from_vec(vec![-1.0, 0.0, 1.0]);
let output = NablaActivation::tanh_forward(&x);
// Values should be between -1 and 1
for &val in output.data() {
assert!(val >= -1.0 && val <= 1.0);
}