pub struct NabLayer {
pub weights: Option<NDArray>,
pub biases: Option<NDArray>,
pub weight_gradients: Option<NDArray>,
pub bias_gradients: Option<NDArray>,
pub node_index: Option<usize>,
/* private fields */
}
Expand description
Represents a layer’s configuration and state
Fields§
§weights: Option<NDArray>
Layer weights (if any)
biases: Option<NDArray>
Layer biases (if any)
weight_gradients: Option<NDArray>
Weight gradients for optimization
bias_gradients: Option<NDArray>
Bias gradients for optimization
node_index: Option<usize>
Implementations§
Source§impl NabLayer
impl NabLayer
Sourcepub fn dense(
input_dim: usize,
units: usize,
activation: Option<&str>,
name: Option<&str>,
) -> Self
pub fn dense( input_dim: usize, units: usize, activation: Option<&str>, name: Option<&str>, ) -> Self
Creates a new Dense (fully connected) layer
§Arguments
input_dim
- Number of input featuresunits
- Number of output unitsactivation
- Optional activation function (“relu”, “sigmoid”, “tanh”, etc.)name
- Optional name for the layer
§Example
use nabla_ml::nab_layers::NabLayer;
// Dense layer with ReLU activation
let dense = NabLayer::dense(784, 128, Some("relu"), Some("hidden_1"));
Sourcepub fn activation(
activation_type: &str,
input_shape: Vec<usize>,
name: Option<&str>,
) -> Self
pub fn activation( activation_type: &str, input_shape: Vec<usize>, name: Option<&str>, ) -> Self
Creates a new Activation layer
§Arguments
activation_type
- Type of activation (“relu”, “sigmoid”, “tanh”, etc.)input_shape
- Shape of the input (excluding batch dimension)name
- Optional name for the layer
§Example
use nabla_ml::nab_layers::NabLayer;
let relu = NabLayer::activation("relu", vec![128], Some("relu_1"));
assert_eq!(relu.get_output_shape(), &[128]);
Sourcepub fn flatten(input_shape: Vec<usize>, name: Option<&str>) -> Self
pub fn flatten(input_shape: Vec<usize>, name: Option<&str>) -> Self
Creates a new Flatten layer
Flattens the input while keeping the batch size. For example: (batch_size, height, width, channels) -> (batch_size, height * width * channels)
§Arguments
input_shape
- Shape of the input (excluding batch dimension)name
- Optional name for the layer
§Example
use nabla_ml::nab_layers::NabLayer;
// Flatten a 28x28x1 image to 784 features
let flatten = NabLayer::flatten(vec![28, 28, 1], Some("flatten_1"));
assert_eq!(flatten.get_output_shape(), &[784]);
Sourcepub fn dropout(input_shape: Vec<usize>, rate: f64, name: Option<&str>) -> Self
pub fn dropout(input_shape: Vec<usize>, rate: f64, name: Option<&str>) -> Self
Creates a new Dropout layer
Randomly sets input units to 0 with a probability of rate during training. During inference (training=false), the layer behaves like an identity function.
§Arguments
input_shape
- Shape of the input (excluding batch dimension)rate
- Dropout rate between 0 and 1 (e.g., 0.5 means 50% of units are dropped)name
- Optional name for the layer
§Example
use nabla_ml::nab_layers::NabLayer;
// Dropout with 50% rate
let dropout = NabLayer::dropout(vec![128], 0.5, Some("dropout_1"));
assert_eq!(dropout.get_output_shape(), &[128]);
Sourcepub fn batch_norm(
input_shape: Vec<usize>,
epsilon: Option<f64>,
momentum: Option<f64>,
name: Option<&str>,
) -> Self
pub fn batch_norm( input_shape: Vec<usize>, epsilon: Option<f64>, momentum: Option<f64>, name: Option<&str>, ) -> Self
Creates a new BatchNormalization layer
Normalizes the activations of the previous layer for each batch. During training, uses batch statistics. During inference, uses running statistics.
§Arguments
input_shape
- Shape of the input (excluding batch dimension)epsilon
- Small constant for numerical stability (default: 1e-5)momentum
- Momentum for running statistics (default: 0.99)name
- Optional name for the layer
§Example
use nabla_ml::nab_layers::NabLayer;
let bn = NabLayer::batch_norm(vec![128], None, None, Some("bn_1"));
assert_eq!(bn.get_output_shape(), &[128]);
Sourcepub fn forward(&mut self, input: &NDArray, training: bool) -> NDArray
pub fn forward(&mut self, input: &NDArray, training: bool) -> NDArray
Forward pass through the layer
Sourcepub fn get_output_shape(&self) -> &[usize]
pub fn get_output_shape(&self) -> &[usize]
Returns the output shape of the layer
Sourcepub fn is_trainable(&self) -> bool
pub fn is_trainable(&self) -> bool
Returns whether the layer is trainable
Sourcepub fn compute_output_shape(&self, input_shape: &[usize]) -> Vec<usize>
pub fn compute_output_shape(&self, input_shape: &[usize]) -> Vec<usize>
Computes output shape for a given input shape