Skip to content
Merged
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
171 changes: 144 additions & 27 deletions crates/lambda-rs/src/math/matrix.rs
Original file line number Diff line number Diff line change
Expand Up @@ -259,6 +259,62 @@ pub fn identity_matrix<
return result;
}

/// Compute determinant via Gaussian elimination on fixed-size stack storage.
///
/// This avoids heap allocations in hot fixed-size paths (for example 4x4
/// transform matrices).
///
/// # Arguments
///
/// - `data`: Square matrix values stored in stack-allocated fixed-size arrays.
///
/// # Returns
///
/// - Determinant of `data`.
/// - `0.0` when elimination detects a near-zero pivot (singular matrix).
fn determinant_gaussian_stack<const N: usize>(mut data: [[f32; N]; N]) -> f32 {
let mut sign = 1.0_f32;
for pivot in 0..N {
let mut pivot_row = pivot;
let mut pivot_abs = data[pivot][pivot].abs();
for (candidate, row) in data.iter().enumerate().skip(pivot + 1) {
let candidate_abs = row[pivot].abs();
if candidate_abs > pivot_abs {
pivot_abs = candidate_abs;
pivot_row = candidate;
}
}

if pivot_abs <= f32::EPSILON {
return 0.0;
}
Copy link

Copilot AI Mar 12, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The singular-pivot detection uses an absolute threshold pivot_abs <= f32::EPSILON, which will incorrectly return 0.0 for valid matrices whose pivots are small in magnitude (e.g., a diagonal matrix with leading entry 1e-8 has a non-zero determinant but would be treated as singular). Consider checking pivot_abs == 0.0 (or using a relative tolerance based on matrix scale) so non-zero determinants aren’t collapsed to zero.

Copilot uses AI. Check for mistakes.

if pivot_row != pivot {
data.swap(pivot, pivot_row);
sign = -sign;
}

let pivot_value = data[pivot][pivot];
let pivot_tail = data[pivot];
for row in data.iter_mut().skip(pivot + 1) {
let factor = row[pivot] / pivot_value;
if factor == 0.0 {
continue;
}
for (dst, src) in row[pivot..].iter_mut().zip(pivot_tail[pivot..].iter())
{
*dst -= factor * *src;
}
}
}

let mut det = sign;
for (i, row) in data.iter().enumerate().take(N) {
det *= row[i];
}
return det;
}

// -------------------------- ARRAY IMPLEMENTATION -----------------------------

/// Matrix implementations for arrays of f32 arrays. Including the trait Matrix into
Expand Down Expand Up @@ -325,7 +381,15 @@ where
todo!()
}

/// Computes the determinant of any square matrix using Laplace expansion.
/// Computes the determinant of any square matrix using Gaussian elimination
/// with partial pivoting.
///
/// # Behavior
///
/// - Uses stack-allocated elimination for 3x3 and 4x4 matrices to avoid heap
/// allocation on common transform sizes.
/// - Uses a generic dense fallback for larger matrices.
/// - Overall asymptotic runtime is `O(n^3)` for square `n x n` matrices.
fn determinant(&self) -> Result<f32, MathError> {
let rows = self.as_ref().len();
if rows == 0 {
Expand All @@ -341,34 +405,88 @@ where
return Err(MathError::NonSquareMatrix { rows, cols });
}

return match rows {
1 => Ok(self.as_ref()[0].as_ref()[0]),
2 => {
let a = self.at(0, 0);
let b = self.at(0, 1);
let c = self.at(1, 0);
let d = self.at(1, 1);
return Ok(a * d - b * c);
if rows == 1 {
return Ok(self.as_ref()[0].as_ref()[0]);
}

if rows == 2 {
let a = self.at(0, 0);
let b = self.at(0, 1);
let c = self.at(1, 0);
let d = self.at(1, 1);
return Ok(a * d - b * c);
}

if rows == 3 {
let mut data = [[0.0_f32; 3]; 3];
for (i, row) in data.iter_mut().enumerate() {
for (j, value) in row.iter_mut().enumerate() {
*value = self.at(i, j);
}
}
return Ok(determinant_gaussian_stack::<3>(data));
}

if rows == 4 {
let mut data = [[0.0_f32; 4]; 4];
for (i, row) in data.iter_mut().enumerate() {
for (j, value) in row.iter_mut().enumerate() {
*value = self.at(i, j);
}
}
return Ok(determinant_gaussian_stack::<4>(data));
}
Comment on lines +431 to +439
Copy link

Copilot AI Mar 12, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

New 4x4 stack fast-path and the >4 generic elimination path aren’t covered by tests in this module (current determinant tests only exercise 2x2 and 3x3). Please add tests for at least one 4x4 determinant and one larger matrix (e.g., 5x5) to validate correctness and pivoting/sign behavior.

Copilot uses AI. Check for mistakes.

// Use a contiguous row-major buffer for larger matrices to keep the
// fallback cache-friendlier than `Vec<Vec<f32>>`.
let mut working = vec![0.0_f32; rows * rows];
for i in 0..rows {
for j in 0..rows {
working[i * rows + j] = self.at(i, j);
}
}

let mut sign = 1.0_f32;
for pivot in 0..rows {
// Partial pivoting improves numerical stability and avoids division by 0.
let mut pivot_row = pivot;
let mut pivot_abs = working[pivot * rows + pivot].abs();
for candidate in (pivot + 1)..rows {
let candidate_abs = working[candidate * rows + pivot].abs();
if candidate_abs > pivot_abs {
pivot_abs = candidate_abs;
pivot_row = candidate;
}
}

if pivot_abs <= f32::EPSILON {
return Ok(0.0);
}
Copy link

Copilot AI Mar 12, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The fallback elimination path also treats pivot_abs <= f32::EPSILON as singular. This is an absolute threshold and can make determinant() return 0.0 for non-singular matrices with small-magnitude values. Use an exact-zero check or a relative tolerance derived from the matrix magnitude to avoid incorrect zero determinants.

Copilot uses AI. Check for mistakes.

if pivot_row != pivot {
for column in 0..rows {
working.swap(pivot * rows + column, pivot_row * rows + column);
}
sign = -sign;
}
_ => {
let mut result = 0.0;
for i in 0..rows {
let mut submatrix: Vec<Vec<f32>> = Vec::with_capacity(rows - 1);
for j in 1..rows {
let mut row = Vec::new();
for k in 0..rows {
if k != i {
row.push(self.at(j, k));
}
}
submatrix.push(row);
}
let sub_determinant = submatrix.determinant()?;
result += self.at(0, i) * sub_determinant * (-1.0_f32).powi(i as i32);

let pivot_value = working[pivot * rows + pivot];
for row in (pivot + 1)..rows {
let factor = working[row * rows + pivot] / pivot_value;
if factor == 0.0 {
continue;
}
for column in pivot..rows {
let row_idx = row * rows + column;
let pivot_idx = pivot * rows + column;
working[row_idx] -= factor * working[pivot_idx];
}
return Ok(result);
}
};
}

let diagonal_product =
(0..rows).map(|i| working[i * rows + i]).product::<f32>();
return Ok(sign * diagonal_product);
}

/// Return the size as a (rows, columns).
Expand All @@ -394,7 +512,6 @@ where

#[cfg(test)]
mod tests {

use super::{
filled_matrix,
identity_matrix,
Expand Down
Loading