From 598f4d0e5373bda3f648ade72514664b5c22ea6a Mon Sep 17 00:00:00 2001 From: Adam Getchell Date: Wed, 29 Apr 2026 12:20:03 -0700 Subject: [PATCH 1/5] ci: enable repo Semgrep rules for issue #338 - Enable project-owned Semgrep rules in local checks, CodeRabbit, and Codacy/OpenGrep scanning. - Harden Semgrep execution with strict mode, a higher timeout, and fixture coverage for hot-path hash collection rules. - Replace flagged diagnostics and silent numeric fallbacks with explicit tracing, expectations, and typed Hilbert quantization errors. - Centralize Delaunay triangulation cache invalidation through the existing repair-cache helper. --- .codacy.yml | 6 +- .coderabbit.yml | 7 +- .github/workflows/codacy.yml | 26 +-- .semgrep.yaml | 167 +++++++++++++++++- justfile | 12 +- src/core/algorithms/pl_manifold_repair.rs | 4 +- src/core/util/hilbert.rs | 147 ++++++++------- src/core/vertex.rs | 7 +- src/geometry/util/measures.rs | 40 ++--- src/triangulation/delaunay.rs | 3 +- .../algorithms/no_std_hash_collections.rs | 55 ++++++ 11 files changed, 354 insertions(+), 120 deletions(-) create mode 100644 tests/semgrep/src/core/algorithms/no_std_hash_collections.rs diff --git a/.codacy.yml b/.codacy.yml index 846bf3ca..0d3565bc 100644 --- a/.codacy.yml +++ b/.codacy.yml @@ -6,6 +6,7 @@ # # Recommended enabled tools: # - markdownlint: documentation linting +# - opengrep: repository-owned Rust Semgrep rules from .semgrep.yaml # - ruff: Python utility linting # - shellcheck: shell-script linting # - duplication: advisory duplicate-code metric @@ -13,8 +14,6 @@ # # Recommended disabled tools: # - bandit, prospector, pylintpython3: redundant with Ruff/ty for this repo -# - opengrep: keep broad/default rules disabled until repo-owned rules in -# .semgrep.yaml are cleaned up and enabled # - trivy: dependency vulnerability scanning is handled by cargo-audit and # Dependabot unless this repository adds containers or IaC # - jacksonlinter, spectral: not relevant to the current project surface @@ -50,6 +49,9 @@ engines: - "**/*.md" config: file: ".markdownlint.json" + opengrep: + include_paths: + - "src/**/*.rs" ruff: include_paths: - "scripts/**/*.py" diff --git a/.coderabbit.yml b/.coderabbit.yml index 1613e64b..f20f3850 100644 --- a/.coderabbit.yml +++ b/.coderabbit.yml @@ -106,11 +106,10 @@ reviews: yamllint: enabled: true - # Semantic code analysis is intentionally disabled until the staged, - # repository-owned rules in .semgrep.yaml are enabled after legacy cleanup. - # Default Semgrep packs are noisy here and duplicate CodeQL/cargo-audit. + # Semantic code analysis uses only the focused, repository-owned rules in + # .semgrep.yaml. Default Semgrep packs remain disabled in CodeRabbit. semgrep: - enabled: false + enabled: true # Python linter (ruff provides comprehensive Python analysis) ruff: diff --git a/.github/workflows/codacy.yml b/.github/workflows/codacy.yml index abad516f..5db012ce 100644 --- a/.github/workflows/codacy.yml +++ b/.github/workflows/codacy.yml @@ -3,14 +3,14 @@ # separate terms of service, privacy policy, and support # documentation. -# This workflow checks out code, runs Codacy's Markdownlint engine only, and -# integrates the results with GitHub Advanced Security code scanning. +# This workflow checks out code, runs selected Codacy engines, and integrates +# the results with GitHub Advanced Security code scanning. # For more information on the Codacy analysis action usage and # parameters, see https://github.com/codacy/codacy-analysis-cli-action. # For more information on Codacy Analysis CLI in general, see # https://github.com/codacy/codacy-analysis-cli. -name: Codacy Markdownlint Scan +name: Codacy Quality Scan concurrency: # This concurrency group ensures that only one Codacy analysis runs at a time @@ -30,7 +30,7 @@ permissions: contents: read jobs: - codacy-markdownlint-scan: + codacy-quality-scan: permissions: # for actions/checkout to fetch code contents: read @@ -39,9 +39,15 @@ jobs: # only required for a private repository by # github/codeql-action/upload-sarif to get the Action run status actions: read - name: Codacy Markdownlint Scan + name: Codacy ${{ matrix.tool }} Scan runs-on: ubuntu-latest timeout-minutes: 30 + strategy: + fail-fast: false + matrix: + tool: + - markdownlint + - opengrep steps: # Checkout the repository to the GitHub Actions runner - name: Checkout code @@ -51,7 +57,7 @@ jobs: run: | set -euo pipefail echo "CODACY_WORKDIR=$RUNNER_TEMP/codacy-src" >> "$GITHUB_ENV" - echo "CODACY_SARIF=$RUNNER_TEMP/results.sarif" >> "$GITHUB_ENV" + echo "CODACY_SARIF=$RUNNER_TEMP/results-${{ matrix.tool }}.sarif" >> "$GITHUB_ENV" - name: Prepare workspace copy without .git run: | @@ -59,9 +65,9 @@ jobs: mkdir -p "$CODACY_WORKDIR" rsync -a --delete --exclude '.git' ./ "$CODACY_WORKDIR/" - # Execute Codacy Analysis CLI with a single tool. The Codacy GitHub App may - # run curated PR-quality tools, but this SARIF workflow stays Markdownlint - # only so maintainability checks are not mirrored into GitHub Code Scanning. + # Execute Codacy Analysis CLI with one selected tool per matrix entry. + # Opengrep reads the repository-owned rules from .semgrep.yaml; broad + # default Semgrep/Opengrep packs remain disabled. - name: Run Codacy Analysis CLI # Cap Codacy runtime so a hung analyzer does not consume the full job timeout. timeout-minutes: 20 @@ -76,7 +82,7 @@ jobs: directory: ${{ env.CODACY_WORKDIR }} output: ${{ env.CODACY_SARIF }} format: sarif - tool: markdownlint + tool: ${{ matrix.tool }} skip-uncommitted-files-check: true # Adjust severity of non-security issues gh-code-scanning-compat: true diff --git a/.semgrep.yaml b/.semgrep.yaml index c39dd75c..9ad9e4de 100644 --- a/.semgrep.yaml +++ b/.semgrep.yaml @@ -5,11 +5,8 @@ # been noisy for this Rust library and duplicate CodeQL, cargo-audit, Clippy, # Ruff, and ShellCheck coverage. # -# The rules below are intentionally disabled with an impossible include path -# while legacy call sites are cleaned up in -# https://github.com/acgetchell/delaunay/issues/338. To enable one, replace its -# "__semgrep_disabled__/**" include with the intended path such as "src/**/*.rs" -# and remove the "enabled: false" metadata note. +# These rules encode repository-specific Rust invariants. Keep them focused so +# local and CodeRabbit Semgrep runs stay low-noise. rules: - id: delaunay.rust.no-stdio-diagnostics-in-src @@ -18,13 +15,12 @@ rules: severity: WARNING message: "Use tracing macros for production diagnostics instead of stdout/stderr macros." metadata: - enabled: false category: maintainability tracking_issue: "https://github.com/acgetchell/delaunay/issues/338" rationale: "Runtime diagnostics should be filterable through tracing and RUST_LOG." paths: include: - - "/__semgrep_disabled__/**/*.rs" + - "/src/**/*.rs" exclude: - "/tests/**" - "/examples/**" @@ -33,6 +29,10 @@ rules: - pattern-either: - pattern: println!(...) - pattern: eprintln!(...) + - pattern-not-inside: | + mod tests { + ... + } - pattern-not-inside: | #[cfg(test)] mod $MOD { @@ -70,7 +70,6 @@ rules: severity: WARNING message: "Do not hide failed floating-point conversion with NaN or infinity defaults." metadata: - enabled: false category: correctness tracking_issue: "https://github.com/acgetchell/delaunay/issues/338" rationale: >- @@ -78,7 +77,7 @@ rules: sentinels after conversion failure. paths: include: - - "/__semgrep_disabled__/**/*.rs" + - "/src/**/*.rs" pattern-either: - pattern: $VALUE.unwrap_or(f64::NAN) - pattern: $VALUE.unwrap_or(f64::INFINITY) @@ -89,3 +88,153 @@ rules: - pattern: $VALUE.unwrap_or_else(|| f64::NAN) - pattern: $VALUE.unwrap_or_else(|| f64::INFINITY) - pattern: $VALUE.unwrap_or_else(|| f64::NEG_INFINITY) + + - id: delaunay.rust.no-std-hash-collections-in-hot-src + languages: + - rust + severity: WARNING + message: "Use crate FastHashMap/FastHashSet aliases in hot source paths instead of std hash collections." + metadata: + category: performance + rationale: >- + Performance-sensitive topology and geometry paths should use the + repository's optimized hash aliases unless a local exception is + documented outside these paths. + paths: + include: + - "/src/core/algorithms/**/*.rs" + - "/src/core/collections/**/*.rs" + - "/src/core/tds.rs" + - "/src/core/triangulation.rs" + - "/src/geometry/algorithms/**/*.rs" + - "/src/triangulation/**/*.rs" + patterns: + - pattern-either: + - pattern: use std::collections::HashMap; + - pattern: use std::collections::HashSet; + - pattern-regex: '^\s*use\s+std::collections::\{[^;\n]*\bHashMap\b[^;\n]*\};' + - pattern-regex: '^\s*use\s+std::collections::\{[^;\n]*\bHashSet\b[^;\n]*\};' + - pattern: std::collections::HashMap::new() + - pattern: std::collections::HashSet::new() + - pattern: std::collections::HashMap::default() + - pattern: std::collections::HashSet::default() + - pattern: std::collections::HashMap::with_capacity(...) + - pattern: std::collections::HashSet::with_capacity(...) + - pattern: std::collections::HashMap::with_hasher(...) + - pattern: std::collections::HashSet::with_hasher(...) + - pattern: std::collections::HashMap::with_capacity_and_hasher(...) + - pattern: std::collections::HashSet::with_capacity_and_hasher(...) + - pattern: std::collections::HashMap::<$KEY, $VALUE>::new() + - pattern: std::collections::HashSet::<$VALUE>::new() + - pattern: std::collections::HashMap::<$KEY, $VALUE>::default() + - pattern: std::collections::HashSet::<$VALUE>::default() + - pattern: std::collections::HashMap::<$KEY, $VALUE>::with_capacity(...) + - pattern: std::collections::HashSet::<$VALUE>::with_capacity(...) + - pattern: std::collections::HashMap::<$KEY, $VALUE>::with_hasher(...) + - pattern: std::collections::HashSet::<$VALUE>::with_hasher(...) + - pattern: std::collections::HashMap::<$KEY, $VALUE>::with_capacity_and_hasher(...) + - pattern: std::collections::HashSet::<$VALUE>::with_capacity_and_hasher(...) + - pattern: "let $NAME: std::collections::HashMap<$KEY, $VALUE> = $INIT;" + - pattern: "let $NAME: std::collections::HashSet<$VALUE> = $INIT;" + - pattern: "type $NAME = std::collections::HashMap<$KEY, $VALUE>;" + - pattern: "type $NAME = std::collections::HashSet<$VALUE>;" + - pattern: "fn $FUNC(...) -> std::collections::HashMap<$KEY, $VALUE> { ... }" + - pattern: "fn $FUNC(...) -> std::collections::HashSet<$VALUE> { ... }" + - pattern: "fn $FUNC(...) -> std::collections::HashMap<$KEY, $VALUE>;" + - pattern: "fn $FUNC(...) -> std::collections::HashSet<$VALUE>;" + - pattern: $ITER.collect::>() + - pattern: $ITER.collect::>() + - pattern-not-inside: | + mod tests { + ... + } + + - id: delaunay.rust.no-silent-conversion-fallbacks + languages: + - rust + severity: WARNING + message: "Handle numeric conversion failures explicitly instead of hiding them behind unwrap_or fallbacks." + metadata: + category: correctness + rationale: >- + Geometry and topology code should preserve conversion failure context + or choose an explicit fallback branch rather than silently substituting + sentinel values. + paths: + include: + - "/src/core/**/*.rs" + - "/src/geometry/**/*.rs" + - "/src/topology/**/*.rs" + - "/src/triangulation/**/*.rs" + patterns: + - pattern-either: + - pattern: NumCast::from($VALUE).unwrap_or($FALLBACK) + - pattern: NumCast::from($VALUE).unwrap_or_else($FALLBACK) + - pattern: num_traits::NumCast::from($VALUE).unwrap_or($FALLBACK) + - pattern: num_traits::NumCast::from($VALUE).unwrap_or_else($FALLBACK) + - patterns: + - pattern: $SAFE($ARGS...).unwrap_or($FALLBACK) + - metavariable-regex: + metavariable: $SAFE + regex: safe_[A-Za-z0-9_]+ + - pattern-not-inside: | + mod tests { + ... + } + + - id: delaunay.rust.no-production-unwrap-panic + languages: + - rust + severity: WARNING + message: "Avoid unwrap/panic in non-test source; return typed errors or document an internal invariant." + metadata: + category: correctness + rationale: "Public library paths should fail explicitly rather than panicking on user input." + paths: + include: + - "/src/**/*.rs" + patterns: + - pattern-either: + - pattern: $VALUE.unwrap() + - pattern: panic!(...) + - pattern-not-inside: | + mod tests { + ... + } + + - id: delaunay.rust.no-env-gated-stdio-diagnostics + languages: + - rust + severity: WARNING + message: "Environment-gated diagnostics should use tracing and repository feature gates instead of stdio macros." + metadata: + category: maintainability + rationale: "Debug hooks should remain filterable through tracing and RUST_LOG." + paths: + include: + - "/src/**/*.rs" + pattern-either: + - pattern: | + if std::env::var_os($NAME).is_some() { + ... + println!(...); + ... + } + - pattern: | + if std::env::var_os($NAME).is_some() { + ... + eprintln!(...); + ... + } + - pattern: | + if env::var_os($NAME).is_some() { + ... + println!(...); + ... + } + - pattern: | + if env::var_os($NAME).is_some() { + ... + eprintln!(...); + ... + } diff --git a/justfile b/justfile index d6c1b1f7..625a399d 100644 --- a/justfile +++ b/justfile @@ -303,8 +303,8 @@ help-workflows: # All linting: code + documentation + configuration lint: lint-code lint-docs lint-config -# Code linting: Rust (fmt-check, clippy, docs) + Python (ruff, ty, mypy) + Shell scripts -lint-code: fmt-check clippy doc-check python-lint shell-lint +# Code linting: Rust (fmt-check, clippy, docs, Semgrep) + Python (ruff, ty, mypy) + Shell scripts +lint-code: fmt-check clippy doc-check semgrep semgrep-test python-lint shell-lint # Configuration validation: JSON, TOML, YAML, GitHub Actions workflows lint-config: validate-json toml-lint toml-fmt-check yaml-lint action-lint @@ -580,10 +580,12 @@ python-sync: _ensure-uv python-typecheck: _ensure-uv uv run ty check scripts/ --error all -# Repository-owned Semgrep rules. Currently opt-in because the Rust rules are -# staged but disabled while legacy diagnostics are cleaned up. +# Repository-owned Semgrep rules for project-specific Rust diagnostics. semgrep: _ensure-uv - uv run semgrep --config .semgrep.yaml . + uv run semgrep --error --strict --timeout 30 --config .semgrep.yaml . + +semgrep-test: _ensure-uv + uv run semgrep scan --test --strict --config .semgrep.yaml tests/semgrep/src/core/algorithms/no_std_hash_collections.rs # Development setup setup: setup-tools diff --git a/src/core/algorithms/pl_manifold_repair.rs b/src/core/algorithms/pl_manifold_repair.rs index 1f8aa2d4..5e2c2dfc 100644 --- a/src/core/algorithms/pl_manifold_repair.rs +++ b/src/core/algorithms/pl_manifold_repair.rs @@ -360,7 +360,9 @@ where for (idx, d) in diff.iter_mut().enumerate() { *d = vi.point().coords()[idx] - vj.point().coords()[idx]; } - let len: f64 = NumCast::from(hypot(&diff)).unwrap_or(f64::MAX); + let Some(len): Option = NumCast::from(hypot(&diff)) else { + return f64::MAX; + }; edge_lengths.push(len); } } diff --git a/src/core/util/hilbert.rs b/src/core/util/hilbert.rs index 5e28cc90..6bec8248 100644 --- a/src/core/util/hilbert.rs +++ b/src/core/util/hilbert.rs @@ -40,6 +40,30 @@ pub enum HilbertError { /// The dimension that exceeded representable limits. dimension: usize, }, + + /// The quantization grid maximum could not be represented as the coordinate scalar. + #[error( + "Hilbert quantization grid maximum {max_grid_value} for {bits} bits cannot be represented by the coordinate scalar" + )] + QuantizationScaleConversionFailed { + /// The requested number of Hilbert bits per coordinate. + bits: u32, + /// The computed grid maximum, `2^bits - 1`. + max_grid_value: u32, + }, +} + +/// Converts a validated bit depth into a scalar grid maximum so Hilbert +/// ordering cannot silently collapse when a coordinate type cannot represent it. +fn hilbert_quantization_scale(bits: u32) -> Result<(u32, T), HilbertError> { + let max_grid_value = (1_u32 << bits) - 1; + let Some(max_grid_value_scalar): Option = num_traits::NumCast::from(max_grid_value) else { + return Err(HilbertError::QuantizationScaleConversionFailed { + bits, + max_grid_value, + }); + }; + Ok((max_grid_value, max_grid_value_scalar)) } /// Quantize D-dimensional coordinates into integer grid coordinates in `[0, 2^bits)`. @@ -51,6 +75,9 @@ pub enum HilbertError { /// /// Returns [`HilbertError::InvalidBitsParameter`] if `bits` is 0 or greater than 31. /// +/// Returns [`HilbertError::QuantizationScaleConversionFailed`] if the quantization +/// grid maximum cannot be represented by the coordinate scalar type. +/// /// # Examples /// /// ```rust @@ -74,55 +101,26 @@ pub fn hilbert_quantize( return Ok([0_u32; D]); } - let extent = bounds.1 - bounds.0; - - // `2^bits - 1` as u32. - let max_val_u32 = (1_u32 << bits) - 1; - let max_val_t: T = num_traits::NumCast::from(max_val_u32).unwrap_or_else(T::zero); - - let mut quantized = [0_u32; D]; - for (i, &coord) in coords.iter().enumerate() { - // Normalize to [0, 1]. If bounds are degenerate or non-finite, fall back to 0. - let normalized = if extent > T::zero() { - let t = (coord - bounds.0) / extent; - if t.is_finite_generic() { - t.max(T::zero()).min(T::one()) - } else { - T::zero() - } - } else { - T::zero() - }; - - let scaled = normalized * max_val_t; - // Round to nearest grid cell (instead of truncating) for fairer distribution. - let q: u32 = scaled.round().to_u32().unwrap_or(0).min(max_val_u32); - quantized[i] = q; - } + let (max_val_u32, max_val_t) = hilbert_quantization_scale::(bits)?; - Ok(quantized) + Ok(hilbert_quantize_with_scale( + coords, + bounds, + max_val_u32, + max_val_t, + )) } -/// Internal unchecked quantization - assumes bits parameter is already validated. -/// This is used in hot paths after pre-validation to avoid Result overhead. +/// Quantizes coordinates with a precomputed scalar grid maximum so hot callers +/// can validate conversion once before sorting or batch index generation. #[inline] -fn hilbert_quantize_unchecked( +fn hilbert_quantize_with_scale( coords: &[T; D], bounds: (T, T), - bits: u32, + max_val_u32: u32, + max_val_t: T, ) -> [u32; D] { - debug_assert!(bits > 0 && bits <= 31, "bits must be pre-validated"); - - if D == 0 { - return [0_u32; D]; - } - let extent = bounds.1 - bounds.0; - - // `2^bits - 1` as u32. - let max_val_u32 = (1_u32 << bits) - 1; - let max_val_t: T = num_traits::NumCast::from(max_val_u32).unwrap_or_else(T::zero); - let mut quantized = [0_u32; D]; for (i, &coord) in coords.iter().enumerate() { // Normalize to [0, 1]. If bounds are degenerate or non-finite, fall back to 0. @@ -139,7 +137,10 @@ fn hilbert_quantize_unchecked( let scaled = normalized * max_val_t; // Round to nearest grid cell (instead of truncating) for fairer distribution. - let q: u32 = scaled.round().to_u32().unwrap_or(0).min(max_val_u32); + let q = scaled + .round() + .to_u32() + .map_or(0, |value| value.min(max_val_u32)); quantized[i] = q; } @@ -160,6 +161,9 @@ fn hilbert_quantize_unchecked( /// Returns [`HilbertError::DimensionTooLarge`] if the dimension `D` exceeds `u32::MAX` /// (extremely unlikely in practice). /// +/// Returns [`HilbertError::QuantizationScaleConversionFailed`] if the quantization +/// grid maximum cannot be represented by the coordinate scalar type. +/// /// # Examples /// /// ```rust @@ -298,6 +302,9 @@ fn hilbert_index_from_quantized(coords: &[u32; D], bits: u32) -> /// Returns [`HilbertError::DimensionTooLarge`] if the dimension `D` exceeds `u32::MAX` /// (extremely unlikely in practice). /// +/// Returns [`HilbertError::QuantizationScaleConversionFailed`] if the quantization +/// grid maximum cannot be represented by the coordinate scalar type. +/// /// # Examples /// /// ```rust @@ -333,15 +340,17 @@ where }); } + if D == 0 { + return Ok(()); + } + + let (max_val_u32, max_val_t) = hilbert_quantization_scale::(bits)?; + // Sort using cached keys - parameters are pre-validated items.sort_by_cached_key(|item| { let c = coords_of(item); - let q = hilbert_quantize_unchecked(&c, bounds, bits); - let idx = if D == 0 { - 0 - } else { - hilbert_index_from_quantized(&q, bits) - }; + let q = hilbert_quantize_with_scale(&c, bounds, max_val_u32, max_val_t); + let idx = hilbert_index_from_quantized(&q, bits); (idx, q) }); @@ -366,6 +375,9 @@ where /// Returns [`HilbertError::DimensionTooLarge`] if the dimension `D` exceeds `u32::MAX` /// (extremely unlikely in practice). /// +/// Returns [`HilbertError::QuantizationScaleConversionFailed`] if the quantization +/// grid maximum cannot be represented by the coordinate scalar type. +/// /// # Examples /// /// ```rust @@ -401,21 +413,19 @@ where }); } + if D == 0 { + return Ok(()); + } + + let (max_val_u32, max_val_t) = hilbert_quantization_scale::(bits)?; + items.sort_unstable_by(|a, b| { let ca = coords_of(a); let cb = coords_of(b); - let qa = hilbert_quantize_unchecked(&ca, bounds, bits); - let qb = hilbert_quantize_unchecked(&cb, bounds, bits); - let ida = if D == 0 { - 0 - } else { - hilbert_index_from_quantized(&qa, bits) - }; - let idb = if D == 0 { - 0 - } else { - hilbert_index_from_quantized(&qb, bits) - }; + let qa = hilbert_quantize_with_scale(&ca, bounds, max_val_u32, max_val_t); + let qb = hilbert_quantize_with_scale(&cb, bounds, max_val_u32, max_val_t); + let ida = hilbert_index_from_quantized(&qa, bits); + let idb = hilbert_index_from_quantized(&qb, bits); (ida, qa).cmp(&(idb, qb)) }); @@ -443,6 +453,9 @@ where /// Returns [`HilbertError::DimensionTooLarge`] if the dimension `D` exceeds `u32::MAX` /// (extremely unlikely in practice). /// +/// Returns [`HilbertError::QuantizationScaleConversionFailed`] if the quantization +/// grid maximum cannot be represented by the coordinate scalar type. +/// /// # Examples /// /// ```rust @@ -557,16 +570,18 @@ pub fn hilbert_sorted_indices( }); } + if D == 0 { + return Ok((0..coords.len()).collect()); + } + + let (max_val_u32, max_val_t) = hilbert_quantization_scale::(bits)?; + let mut keyed: Vec<((u128, [u32; D]), usize)> = coords .iter() .enumerate() .map(|(i, c)| { - let q = hilbert_quantize_unchecked(c, bounds, bits); - let idx = if D == 0 { - 0 - } else { - hilbert_index_from_quantized(&q, bits) - }; + let q = hilbert_quantize_with_scale(c, bounds, max_val_u32, max_val_t); + let idx = hilbert_index_from_quantized(&q, bits); ((idx, q), i) }) .collect(); diff --git a/src/core/vertex.rs b/src/core/vertex.rs index c0305dde..bea4a1e7 100644 --- a/src/core/vertex.rs +++ b/src/core/vertex.rs @@ -596,7 +596,12 @@ where pub fn from_points(points: &[Point]) -> Vec { points .iter() - .map(|p| VertexBuilder::default().point(*p).build().unwrap()) + .map(|p| { + VertexBuilder::default() + .point(*p) + .build() + .expect("point was set before building vertex") + }) .collect() } diff --git a/src/geometry/util/measures.rs b/src/geometry/util/measures.rs index b5c3b238..aea0ef72 100644 --- a/src/geometry/util/measures.rs +++ b/src/geometry/util/measures.rs @@ -1101,9 +1101,9 @@ mod tests { Point::new([0.0, 1.0, 0.0]), ]; let area_3d = facet_measure(&triangle_3d).unwrap(); - if std::env::var_os("TEST_DEBUG").is_some() { - println!("3D triangle area: {area_3d} (expected: 0.5)"); - } + assert_relative_eq!(area_3d, 0.5, epsilon = 1e-10); + #[cfg(feature = "test-debug")] + tracing::debug!("3D triangle area: {area_3d} (expected: 0.5)"); // Test 1b: Nearly singular triangle should not error due to tiny negative det let eps = 1e-10; @@ -1117,9 +1117,9 @@ mod tests { // Test 2: Same triangle but use direct Gram matrix calculation let area_3d_gram = facet_measure_gram_matrix::(&triangle_3d).unwrap(); - if std::env::var_os("TEST_DEBUG").is_some() { - println!("3D triangle area (Gram): {area_3d_gram} (expected: 0.5)"); - } + assert_relative_eq!(area_3d_gram, 0.5, epsilon = 1e-10); + #[cfg(feature = "test-debug")] + tracing::debug!("3D triangle area (Gram): {area_3d_gram} (expected: 0.5)"); // Test 3: Unit tetrahedron in 4D - should be 1/6 ≈ 0.167 let tetrahedron_4d = vec![ @@ -1129,23 +1129,23 @@ mod tests { Point::new([0.0, 0.0, 1.0, 0.0]), ]; let volume_4d = facet_measure(&tetrahedron_4d).unwrap(); - if std::env::var_os("TEST_DEBUG").is_some() { - println!( - "4D tetrahedron volume: {} (expected: {})", - volume_4d, - 1.0 / 6.0 - ); - } + assert_relative_eq!(volume_4d, 1.0 / 6.0, epsilon = 1e-10); + #[cfg(feature = "test-debug")] + tracing::debug!( + "4D tetrahedron volume: {} (expected: {})", + volume_4d, + 1.0 / 6.0 + ); // Test 4: Manual calculation for the 4D tetrahedron let volume_4d_gram = facet_measure_gram_matrix::(&tetrahedron_4d).unwrap(); - if std::env::var_os("TEST_DEBUG").is_some() { - println!( - "4D tetrahedron volume (Gram): {} (expected: {})", - volume_4d_gram, - 1.0 / 6.0 - ); - } + assert_relative_eq!(volume_4d_gram, 1.0 / 6.0, epsilon = 1e-10); + #[cfg(feature = "test-debug")] + tracing::debug!( + "4D tetrahedron volume (Gram): {} (expected: {})", + volume_4d_gram, + 1.0 / 6.0 + ); } #[test] diff --git a/src/triangulation/delaunay.rs b/src/triangulation/delaunay.rs index db10fb2a..a9da5544 100644 --- a/src/triangulation/delaunay.rs +++ b/src/triangulation/delaunay.rs @@ -4781,8 +4781,7 @@ where )] pub fn as_triangulation_mut(&mut self) -> &mut Triangulation { // Direct mutable access can invalidate performance caches. - self.insertion_state.last_inserted_cell = None; - self.spatial_index = None; + self.invalidate_repair_caches(); &mut self.tri } diff --git a/tests/semgrep/src/core/algorithms/no_std_hash_collections.rs b/tests/semgrep/src/core/algorithms/no_std_hash_collections.rs new file mode 100644 index 00000000..ff48f382 --- /dev/null +++ b/tests/semgrep/src/core/algorithms/no_std_hash_collections.rs @@ -0,0 +1,55 @@ +#![allow(dead_code, unused_imports)] + +use rustc_hash::{FxHashMap as FastHashMap, FxHashSet as FastHashSet}; + +// ruleid: delaunay.rust.no-std-hash-collections-in-hot-src +use std::collections::{BTreeMap, HashMap}; +// ruleid: delaunay.rust.no-std-hash-collections-in-hot-src +use std::collections::{BTreeSet, HashSet}; + +// ok: delaunay.rust.no-std-hash-collections-in-hot-src +type GoodMap = FastHashMap; +// ok: delaunay.rust.no-std-hash-collections-in-hot-src +type GoodSet = FastHashSet; + +// ruleid: delaunay.rust.no-std-hash-collections-in-hot-src +type SlowMap = std::collections::HashMap; +// ruleid: delaunay.rust.no-std-hash-collections-in-hot-src +type SlowSet = std::collections::HashSet; + +trait SlowSignature { + // ruleid: delaunay.rust.no-std-hash-collections-in-hot-src + fn map(&self) -> std::collections::HashMap; + + // ruleid: delaunay.rust.no-std-hash-collections-in-hot-src + fn set(&self) -> std::collections::HashSet; +} + +fn constructors() { + // ruleid: delaunay.rust.no-std-hash-collections-in-hot-src + let _map = std::collections::HashMap::::with_capacity(4); + + // ruleid: delaunay.rust.no-std-hash-collections-in-hot-src + let _set = std::collections::HashSet::::with_capacity(4); + + // ruleid: delaunay.rust.no-std-hash-collections-in-hot-src + let _map_default = std::collections::HashMap::::default(); + + // ruleid: delaunay.rust.no-std-hash-collections-in-hot-src + let _set_default = std::collections::HashSet::::default(); +} + +fn collects() { + let pairs = [(1_u8, 2_u8)]; + let values = [1_u8, 2_u8]; + + // ruleid: delaunay.rust.no-std-hash-collections-in-hot-src + let _map = pairs + .into_iter() + .collect::>(); + + // ruleid: delaunay.rust.no-std-hash-collections-in-hot-src + let _set = values + .into_iter() + .collect::>(); +} From fb9f3dec086babd02f717d6b8e047df9c028f17c Mon Sep 17 00:00:00 2001 From: Adam Getchell Date: Wed, 29 Apr 2026 13:51:04 -0700 Subject: [PATCH 2/5] ci: enable repository Semgrep rules - Rename the Semgrep config to semgrep.yaml and wire it into local checks, CodeRabbit, and Codacy/OpenGrep. - Add strict Semgrep execution plus fixture coverage for hot-path hash collections and targeted panic bypasses. - Make Hilbert errors non-exhaustive and document quantization-scale conversion failures on APIs that can return them. - Replace fragile VertexBuilder expect paths with infallible Vertex point constructors. --- .codacy.yml | 2 +- .coderabbit.yml | 3 +- .github/workflows/codacy.yml | 2 +- docs/code_organization.md | 20 ++++++-- justfile | 7 ++- .semgrep.yaml => semgrep.yaml | 26 ++++++++++ src/core/util/hilbert.rs | 7 +-- src/core/vertex.rs | 48 ++++++++++++------- src/geometry/util/triangulation_generation.rs | 17 ++----- src/triangulation/builder.rs | 8 ++-- .../algorithms/no_std_hash_collections.rs | 10 ++++ 11 files changed, 103 insertions(+), 47 deletions(-) rename .semgrep.yaml => semgrep.yaml (84%) diff --git a/.codacy.yml b/.codacy.yml index 0d3565bc..4cb48326 100644 --- a/.codacy.yml +++ b/.codacy.yml @@ -6,7 +6,7 @@ # # Recommended enabled tools: # - markdownlint: documentation linting -# - opengrep: repository-owned Rust Semgrep rules from .semgrep.yaml +# - opengrep: repository-owned Rust Semgrep rules from semgrep.yaml # - ruff: Python utility linting # - shellcheck: shell-script linting # - duplication: advisory duplicate-code metric diff --git a/.coderabbit.yml b/.coderabbit.yml index f20f3850..a9d2c3ba 100644 --- a/.coderabbit.yml +++ b/.coderabbit.yml @@ -107,9 +107,10 @@ reviews: enabled: true # Semantic code analysis uses only the focused, repository-owned rules in - # .semgrep.yaml. Default Semgrep packs remain disabled in CodeRabbit. + # semgrep.yaml. Default Semgrep packs remain disabled in CodeRabbit. semgrep: enabled: true + config_file: semgrep.yaml # Python linter (ruff provides comprehensive Python analysis) ruff: diff --git a/.github/workflows/codacy.yml b/.github/workflows/codacy.yml index 5db012ce..761d34f2 100644 --- a/.github/workflows/codacy.yml +++ b/.github/workflows/codacy.yml @@ -66,7 +66,7 @@ jobs: rsync -a --delete --exclude '.git' ./ "$CODACY_WORKDIR/" # Execute Codacy Analysis CLI with one selected tool per matrix entry. - # Opengrep reads the repository-owned rules from .semgrep.yaml; broad + # Opengrep reads the repository-owned rules from semgrep.yaml; broad # default Semgrep/Opengrep packs remain disabled. - name: Run Codacy Analysis CLI # Cap Codacy runtime so a hung analyzer does not consume the full job timeout. diff --git a/docs/code_organization.md b/docs/code_organization.md index fb577a99..57f5885f 100644 --- a/docs/code_organization.md +++ b/docs/code_organization.md @@ -28,7 +28,7 @@ The delaunay project follows a standard Rust library structure with additional t > ```bash > # Requires tree command (install with: brew install tree or apt-get install tree) > git --no-pager ls-files | LC_ALL=C sort | \ -> LC_ALL=C tree --charset UTF-8 --dirsfirst --noreport \ +> LC_ALL=C tree -a --charset UTF-8 --dirsfirst --noreport \ > -I 'target|.git|**/*.png|**/*.svg' -F --fromfile > > # Alternative using find (when tree is not available): @@ -52,8 +52,8 @@ delaunay/ │ │ ├── benchmarks.yml │ │ ├── ci.yml │ │ ├── codacy.yml -│ │ ├── codeql.yml │ │ ├── codecov.yml +│ │ ├── codeql.yml │ │ ├── generate-baseline.yml │ │ ├── profiling-benchmarks.yml │ │ └── rust-clippy.yml @@ -64,6 +64,7 @@ delaunay/ │ ├── README.md │ ├── ci_performance_suite.rs │ ├── circumsphere_containment.rs +│ ├── cold_path_predicates.rs │ ├── large_scale_performance.rs │ ├── profiling_suite.rs │ └── topology_guarantee_construction.rs @@ -94,6 +95,7 @@ delaunay/ │ ├── dev/ │ │ ├── commands.md │ │ ├── debug_env_vars.md +│ │ ├── python.md │ │ ├── rust.md │ │ └── testing.md │ ├── templates/ @@ -113,8 +115,8 @@ delaunay/ │ └── workflows.md ├── examples/ │ ├── README.md -│ ├── delaunayize_repair.rs │ ├── convex_hull_3d_100_points.rs +│ ├── delaunayize_repair.rs │ ├── into_from_conversions.rs │ ├── memory_analysis.rs │ ├── pachner_roundtrip_4d.rs @@ -123,6 +125,8 @@ delaunay/ │ ├── triangulation_3d_100_points.rs │ └── zero_allocation_iterator_demo.rs ├── scripts/ +│ ├── ci/ +│ │ └── capture_profiling_metadata.sh │ ├── tests/ │ │ ├── __init__.py │ │ ├── conftest.py @@ -222,6 +226,11 @@ delaunay/ │ │ └── flips.rs │ └── lib.rs ├── tests/ +│ ├── semgrep/ +│ │ └── src/ +│ │ └── core/ +│ │ └── algorithms/ +│ │ └── no_std_hash_collections.rs │ ├── COVERAGE.md │ ├── README.md │ ├── allocation_api.rs @@ -232,12 +241,14 @@ delaunay/ │ ├── dedup_batch_construction.rs │ ├── delaunay_edge_cases.rs │ ├── delaunay_incremental_insertion.rs +│ ├── delaunay_public_api_coverage.rs │ ├── delaunay_repair_fallback.rs │ ├── delaunayize_workflow.rs │ ├── euler_characteristic.rs │ ├── insert_with_statistics.rs │ ├── k3_cycle_predicate.rs │ ├── large_scale_debug.rs +│ ├── prelude_exports.rs │ ├── proptest_cell.rs │ ├── proptest_convex_hull.rs │ ├── proptest_delaunay_triangulation.proptest-regressions @@ -268,7 +279,6 @@ delaunay/ ├── .gitleaks.toml ├── .markdownlint.json ├── .python-version -├── .semgrep.yaml ├── .taplo.toml ├── .yamllint ├── AGENTS.md @@ -281,6 +291,7 @@ delaunay/ ├── LICENSE ├── README.md ├── REFERENCES.md +├── SECURITY.md ├── cliff.toml ├── clippy.toml ├── justfile @@ -288,6 +299,7 @@ delaunay/ ├── pyproject.toml ├── rust-toolchain.toml ├── rustfmt.toml +├── semgrep.yaml ├── ty.toml ├── typos.toml └── uv.lock diff --git a/justfile b/justfile index 625a399d..30e0a6da 100644 --- a/justfile +++ b/justfile @@ -582,10 +582,13 @@ python-typecheck: _ensure-uv # Repository-owned Semgrep rules for project-specific Rust diagnostics. semgrep: _ensure-uv - uv run semgrep --error --strict --timeout 30 --config .semgrep.yaml . + uv run semgrep --error --strict --timeout 30 --config semgrep.yaml . semgrep-test: _ensure-uv - uv run semgrep scan --test --strict --config .semgrep.yaml tests/semgrep/src/core/algorithms/no_std_hash_collections.rs + #!/usr/bin/env bash + set -euo pipefail + cd tests/semgrep + uv run semgrep scan --test --strict --config ../../semgrep.yaml src/core/algorithms/no_std_hash_collections.rs # Development setup setup: setup-tools diff --git a/.semgrep.yaml b/semgrep.yaml similarity index 84% rename from .semgrep.yaml rename to semgrep.yaml index 9ad9e4de..6d320580 100644 --- a/.semgrep.yaml +++ b/semgrep.yaml @@ -33,6 +33,25 @@ rules: mod tests { ... } + - pattern-not: $TYPE::try_from($VALUE).expect(...) + - pattern-not: $VALUE.to_f64().expect(...) + - pattern-not: $VALUE.to_i64().expect(...) + - pattern-not: ::from($VALUE).expect(...) + - pattern-not: NonZeroUsize::new($VALUE).expect(...) + - pattern-not: $VALUE.build_facet_to_cells_map().expect(...) + - pattern-not: $VALUE.expect("Closure should have set result") + - pattern-not: $VALUE.expect("conflict_cells should be computed above") + - pattern-not: $VALUE.expect("retry_seed is only used when retry attempts are enabled") + - pattern-not: $VALUE.expect("epsilon validated above") + - pattern-not-regex: 'try_from\([^)]*\)\.expect\(' + - pattern-not-regex: '\.to_(f64|i64)\(\)\s*\.expect\(' + - pattern-not-regex: 'NumCast>::from\([^)]*\)\s*\.expect\(' + - pattern-not-regex: 'NonZeroUsize::new\([^)]*\)\.expect\(' + - pattern-not-regex: 'build_facet_to_cells_map\(\)\s*\.expect\(' + - pattern-not-regex: 'expect\("Closure should have set result"\)' + - pattern-not-regex: 'expect\("conflict_cells should be computed above"\)' + - pattern-not-regex: 'expect\("retry_seed is only used when retry attempts are enabled"\)' + - pattern-not-regex: 'expect\("epsilon validated above"\)' - pattern-not-inside: | #[cfg(test)] mod $MOD { @@ -197,6 +216,13 @@ rules: - pattern-either: - pattern: $VALUE.unwrap() - pattern: panic!(...) + - patterns: + - pattern: $VALUE.expect($MESSAGE) + - metavariable-regex: + metavariable: $MESSAGE + regex: >- + "(point was set before building vertex|Failed to build + vertex.*|public APIs should return typed errors instead)" - pattern-not-inside: | mod tests { ... diff --git a/src/core/util/hilbert.rs b/src/core/util/hilbert.rs index 6bec8248..5ce10a07 100644 --- a/src/core/util/hilbert.rs +++ b/src/core/util/hilbert.rs @@ -13,6 +13,7 @@ use crate::geometry::traits::coordinate::CoordinateScalar; /// Errors that can occur during Hilbert curve operations. #[derive(Clone, Debug, thiserror::Error, PartialEq, Eq)] +#[non_exhaustive] pub enum HilbertError { /// The `bits` parameter is out of valid range [1, 31]. #[error("bits parameter {bits} is out of valid range [1, 31]")] @@ -453,9 +454,6 @@ where /// Returns [`HilbertError::DimensionTooLarge`] if the dimension `D` exceeds `u32::MAX` /// (extremely unlikely in practice). /// -/// Returns [`HilbertError::QuantizationScaleConversionFailed`] if the quantization -/// grid maximum cannot be represented by the coordinate scalar type. -/// /// # Examples /// /// ```rust @@ -540,6 +538,9 @@ pub fn hilbert_indices_prequantized( /// Returns [`HilbertError::DimensionTooLarge`] if the dimension `D` exceeds `u32::MAX` /// (extremely unlikely in practice). /// +/// Returns [`HilbertError::QuantizationScaleConversionFailed`] if the quantization +/// grid maximum cannot be represented by the coordinate scalar type. +/// /// # Examples /// /// ```rust diff --git a/src/core/vertex.rs b/src/core/vertex.rs index bea4a1e7..7b85cfbf 100644 --- a/src/core/vertex.rs +++ b/src/core/vertex.rs @@ -560,8 +560,38 @@ where } } + /// Creates a vertex directly from a point with a fresh UUID and no user data. + /// + /// This constructor is infallible because [`Point`] already owns validated + /// coordinates for the vertex dimension. + #[inline] + #[must_use] + pub fn from_point(point: Point) -> Self { + Self { + point, + uuid: make_uuid(), + incident_cell: None, + data: None, + } + } + + /// Creates a vertex directly from a point and user data with a fresh UUID. + /// + /// This constructor is infallible because [`Point`] already owns validated + /// coordinates for the vertex dimension and `data` is stored as provided. + #[inline] + #[must_use] + pub fn from_point_with_data(point: Point, data: impl Into) -> Self { + Self { + point, + uuid: make_uuid(), + incident_cell: None, + data: Some(data.into()), + } + } + /// The function `from_points` takes a slice of points and returns a - /// vector of vertices, using the `new` method. + /// vector of vertices. /// /// # Arguments /// @@ -574,12 +604,6 @@ where /// optional data associated with the [Vertex], and `D` is the /// dimensionality of the [Vertex]. /// - /// # Panics - /// - /// Cannot panic in practice: the only [`VertexBuilderError`] variant is - /// `MissingPoint`, and this method always calls - /// [`VertexBuilder::point`] before [`VertexBuilder::build`]. - /// /// # Example /// /// ``` @@ -594,15 +618,7 @@ where #[inline] #[must_use] pub fn from_points(points: &[Point]) -> Vec { - points - .iter() - .map(|p| { - VertexBuilder::default() - .point(*p) - .build() - .expect("point was set before building vertex") - }) - .collect() + points.iter().copied().map(Self::from_point).collect() } /// The function `into_hashmap` converts a collection of vertices into a diff --git a/src/geometry/util/triangulation_generation.rs b/src/geometry/util/triangulation_generation.rs index cf2c476b..3d5f9725 100644 --- a/src/geometry/util/triangulation_generation.rs +++ b/src/geometry/util/triangulation_generation.rs @@ -8,7 +8,7 @@ use super::point_generation::{generate_random_points, generate_random_points_seeded}; use crate::core::traits::data_type::DataType; use crate::core::triangulation::{TopologyGuarantee, TriangulationConstructionError}; -use crate::core::vertex::{Vertex, VertexBuilder}; +use crate::core::vertex::Vertex; use crate::geometry::kernel::AdaptiveKernel; use crate::geometry::point::Point; use crate::geometry::traits::coordinate::CoordinateScalar; @@ -97,19 +97,8 @@ where .into_iter() .map(|point| { vertex_data.map_or_else( - || { - VertexBuilder::default() - .point(point) - .build() - .expect("Failed to build vertex without data") - }, - |data| { - VertexBuilder::default() - .point(point) - .data(data) - .build() - .expect("Failed to build vertex with data") - }, + || Vertex::from_point(point), + |data| Vertex::from_point_with_data(point, data), ) }) .collect() diff --git a/src/triangulation/builder.rs b/src/triangulation/builder.rs index 91c70254..1c9ad753 100644 --- a/src/triangulation/builder.rs +++ b/src/triangulation/builder.rs @@ -110,7 +110,7 @@ use crate::core::tds::{CellKey, Tds, TriangulationConstructionState, VertexKey}; use crate::core::traits::data_type::DataType; use crate::core::triangulation::{TopologyGuarantee, TriangulationConstructionError}; use crate::core::util::periodic_facet_key_from_lifted_vertices; -use crate::core::vertex::{Vertex, VertexBuilder}; +use crate::core::vertex::Vertex; use crate::geometry::kernel::{AdaptiveKernel, Kernel}; use crate::geometry::point::Point; use crate::geometry::traits::coordinate::{Coordinate, CoordinateScalar}; @@ -1510,10 +1510,7 @@ where let canonical_v = Vertex::new_with_uuid(new_point, v.uuid(), v.data); expanded.push(canonical_v); } else { - let image_v: Vertex = VertexBuilder::default() - .point(new_point) - .build() - .expect("image vertex with valid coords always builds"); + let image_v: Vertex = Vertex::from_point(new_point); image_uuid_to_canonical_with_offset.insert(image_v.uuid(), (v.uuid(), offset)); expanded.push(image_v); } @@ -2359,6 +2356,7 @@ where #[cfg(test)] mod tests { use super::*; + use crate::core::vertex::VertexBuilder; use crate::geometry::kernel::RobustKernel; use crate::topology::traits::global_topology_model::{ EuclideanModel, GlobalTopologyModel, GlobalTopologyModelError, ToroidalModel, diff --git a/tests/semgrep/src/core/algorithms/no_std_hash_collections.rs b/tests/semgrep/src/core/algorithms/no_std_hash_collections.rs index ff48f382..d5dffe86 100644 --- a/tests/semgrep/src/core/algorithms/no_std_hash_collections.rs +++ b/tests/semgrep/src/core/algorithms/no_std_hash_collections.rs @@ -53,3 +53,13 @@ fn collects() { .into_iter() .collect::>(); } + +pub fn public_expect_bypass(value: Option) -> u8 { + // ruleid: delaunay.rust.no-production-unwrap-panic + value.expect("public APIs should return typed errors instead") +} + +fn private_documented_invariant(value: Option) -> u8 { + // ok: delaunay.rust.no-production-unwrap-panic + value.expect("private helper documents an internal invariant") +} From ffce8f860dac9bcdf6d57bcc4a87655fd4001a8c Mon Sep 17 00:00:00 2001 From: Adam Getchell Date: Wed, 29 Apr 2026 15:15:59 -0700 Subject: [PATCH 3/5] ci: expand repository Semgrep rules - Add project-specific Semgrep checks for Rust dynamic errors, lint suppression reasons, Python subprocess mocks, and typed script helpers. - Add focused Semgrep fixtures for hot-path hash collections, Rust project rules, and Python test conventions. - Wire the expanded Semgrep fixture suite into `just check`. - Replace stale Clippy `allow` suppressions with documented `expect` attributes and remove dynamic error trait-object usage from tests. --- benches/ci_performance_suite.rs | 5 +- benches/large_scale_performance.rs | 10 +- benches/profiling_suite.rs | 16 +- justfile | 2 + semgrep.yaml | 241 ++++++++++++++++-- src/core/algorithms/flips.rs | 7 +- src/core/boundary.rs | 5 +- src/core/facet.rs | 15 +- src/core/traits/facet_cache.rs | 6 +- src/core/util/facet_keys.rs | 5 +- src/geometry/algorithms/convex_hull.rs | 133 +++++++--- src/geometry/point.rs | 25 +- src/geometry/predicates.rs | 5 +- src/geometry/quality.rs | 3 +- src/geometry/robust_predicates.rs | 5 +- src/geometry/traits/coordinate.rs | 9 +- src/geometry/util/triangulation_generation.rs | 10 +- src/triangulation/delaunay.rs | 28 +- tests/coordinate_conversion_errors.rs | 5 +- tests/delaunay_edge_cases.rs | 15 +- tests/proptest_sos.rs | 5 +- .../scripts/tests/python_exceptions.py | 55 ++++ .../algorithms/no_std_hash_collections.rs | 10 - tests/semgrep/src/project_rules/rust_style.rs | 82 ++++++ 24 files changed, 589 insertions(+), 113 deletions(-) create mode 100644 tests/semgrep/scripts/tests/python_exceptions.py create mode 100644 tests/semgrep/src/project_rules/rust_style.rs diff --git a/benches/ci_performance_suite.rs b/benches/ci_performance_suite.rs index a06adaf2..0fb5ccfd 100644 --- a/benches/ci_performance_suite.rs +++ b/benches/ci_performance_suite.rs @@ -738,7 +738,10 @@ fn seed_search_limit() -> usize { macro_rules! benchmark_tds_new_dimension { ($dim:literal, $func_name:ident, $seed:literal, $counts:expr) => { /// Benchmark triangulation creation for D-dimensional triangulations - #[allow(clippy::too_many_lines)] + #[expect( + clippy::too_many_lines, + reason = "dimension-specific benchmark macro keeps setup and measurements together" + )] fn $func_name(c: &mut Criterion) { print_manifest_once(); let counts = $counts; diff --git a/benches/large_scale_performance.rs b/benches/large_scale_performance.rs index 36b1fe10..c6c74d88 100644 --- a/benches/large_scale_performance.rs +++ b/benches/large_scale_performance.rs @@ -243,12 +243,18 @@ fn measure_construction_with_memory(n_points: usize, seed: u64) // Total delta includes setup + TDS let delta_i128 = i128::from(mem_after) - i128::from(mem_before); - #[expect(clippy::cast_possible_truncation)] // Clamped to i64 range, safe to cast + #[expect( + clippy::cast_possible_truncation, + reason = "clamped to i64 range before casting" + )] let delta = delta_i128.clamp(i128::from(i64::MIN), i128::from(i64::MAX)) as i64; // TDS-only delta excludes setup overhead let tds_delta_i128 = i128::from(mem_after) - i128::from(mem_before_tds); - #[expect(clippy::cast_possible_truncation)] // Clamped to i64 range, safe to cast + #[expect( + clippy::cast_possible_truncation, + reason = "clamped to i64 range before casting" + )] let tds_delta = tds_delta_i128.clamp(i128::from(i64::MIN), i128::from(i64::MAX)) as i64; MemoryInfo { diff --git a/benches/profiling_suite.rs b/benches/profiling_suite.rs index 2a635cf5..b0b34f52 100644 --- a/benches/profiling_suite.rs +++ b/benches/profiling_suite.rs @@ -270,7 +270,11 @@ fn gen_points( // ============================================================================ /// Comprehensive triangulation scaling analysis across dimensions and distributions -#[expect(clippy::significant_drop_tightening, clippy::too_many_lines)] +#[expect( + clippy::significant_drop_tightening, + clippy::too_many_lines, + reason = "profiling setup keeps benchmark state lifetimes and measurement branches visible" +)] fn bench_scaling(c: &mut Criterion) { let counts = get_profiling_counts(); let distributions = [ @@ -505,7 +509,10 @@ fn calculate_percentile(values: &mut [u64], percentile: usize) -> u64 { /// Print memory allocation summary #[cfg(all(feature = "count-allocations", feature = "bench-logging"))] -#[expect(clippy::cast_precision_loss)] +#[expect( + clippy::cast_precision_loss, + reason = "allocation summary reports byte ratios where f64 precision is sufficient" +)] fn print_alloc_summary( info: &AllocationInfo, description: &str, @@ -542,7 +549,10 @@ fn print_alloc_summary( } #[cfg(all(feature = "count-allocations", feature = "bench-logging"))] -#[expect(clippy::cast_possible_wrap)] +#[expect( + clippy::cast_possible_wrap, + reason = "sample counts fit signed indexing for percentile diagnostics" +)] fn print_alloc_summary_from_samples( allocation_infos: &SmallBuffer, actual_point_counts: &SmallBuffer, diff --git a/justfile b/justfile index 30e0a6da..20f5651c 100644 --- a/justfile +++ b/justfile @@ -589,6 +589,8 @@ semgrep-test: _ensure-uv set -euo pipefail cd tests/semgrep uv run semgrep scan --test --strict --config ../../semgrep.yaml src/core/algorithms/no_std_hash_collections.rs + uv run semgrep scan --test --strict --config ../../semgrep.yaml src/project_rules/rust_style.rs + uv run semgrep scan --test --strict --config ../../semgrep.yaml scripts/tests/python_exceptions.py # Development setup setup: setup-tools diff --git a/semgrep.yaml b/semgrep.yaml index 6d320580..10874d00 100644 --- a/semgrep.yaml +++ b/semgrep.yaml @@ -33,25 +33,6 @@ rules: mod tests { ... } - - pattern-not: $TYPE::try_from($VALUE).expect(...) - - pattern-not: $VALUE.to_f64().expect(...) - - pattern-not: $VALUE.to_i64().expect(...) - - pattern-not: ::from($VALUE).expect(...) - - pattern-not: NonZeroUsize::new($VALUE).expect(...) - - pattern-not: $VALUE.build_facet_to_cells_map().expect(...) - - pattern-not: $VALUE.expect("Closure should have set result") - - pattern-not: $VALUE.expect("conflict_cells should be computed above") - - pattern-not: $VALUE.expect("retry_seed is only used when retry attempts are enabled") - - pattern-not: $VALUE.expect("epsilon validated above") - - pattern-not-regex: 'try_from\([^)]*\)\.expect\(' - - pattern-not-regex: '\.to_(f64|i64)\(\)\s*\.expect\(' - - pattern-not-regex: 'NumCast>::from\([^)]*\)\s*\.expect\(' - - pattern-not-regex: 'NonZeroUsize::new\([^)]*\)\.expect\(' - - pattern-not-regex: 'build_facet_to_cells_map\(\)\s*\.expect\(' - - pattern-not-regex: 'expect\("Closure should have set result"\)' - - pattern-not-regex: 'expect\("conflict_cells should be computed above"\)' - - pattern-not-regex: 'expect\("retry_seed is only used when retry attempts are enabled"\)' - - pattern-not-regex: 'expect\("epsilon validated above"\)' - pattern-not-inside: | #[cfg(test)] mod $MOD { @@ -192,7 +173,7 @@ rules: - pattern: num_traits::NumCast::from($VALUE).unwrap_or($FALLBACK) - pattern: num_traits::NumCast::from($VALUE).unwrap_or_else($FALLBACK) - patterns: - - pattern: $SAFE($ARGS...).unwrap_or($FALLBACK) + - pattern: $SAFE(...).unwrap_or($FALLBACK) - metavariable-regex: metavariable: $SAFE regex: safe_[A-Za-z0-9_]+ @@ -264,3 +245,223 @@ rules: eprintln!(...); ... } + + - id: delaunay.rust.no-box-dyn-error-in-src + languages: + - rust + severity: WARNING + message: "Use a typed error enum in production src/ instead of Box, &dyn Error, or anyhow::Error." + metadata: + category: maintainability + rationale: >- + Public and production fallible paths should preserve typed error + contracts rather than erasing error shape behind dynamic errors. + paths: + include: + - "/src/**/*.rs" + exclude: + - "/tests/**" + - "/examples/**" + - "/benches/**" + - "/tests/semgrep/**" + patterns: + # Regex is deliberate here: Rust AST patterns over-match associated + # error types such as serde's `S::Error`; this rule is about exact + # dynamic-error spellings in production code. + - pattern-regex: >- + ^[ \t]*[^\n]*(\bBox\s*<\s*dyn\s+(std::error::)?Error\b|&\s*dyn\s+(std::error::)?Error\b|\banyhow::Error\b) + - pattern-not-regex: '^[ \t]*(//|///|/\*|\*)' + - pattern-not-inside: | + mod tests { + ... + } + - pattern-not-inside: | + #[cfg(test)] + mod $MOD { + ... + } + - pattern-not-inside: | + #[cfg(any(test, ...))] + mod $MOD { + ... + } + - pattern-not-inside: | + #[cfg(test)] + fn $FUNC(...) { + ... + } + - pattern-not-inside: | + #[cfg(any(test, ...))] + fn $FUNC(...) { + ... + } + - pattern-not-inside: | + #[cfg(test)] + impl $TYPE { + ... + } + - pattern-not-inside: | + #[cfg(any(test, ...))] + impl $TYPE { + ... + } + + - id: delaunay.rust.no-clippy-allow-lints + languages: + - rust + severity: WARNING + message: "Use #[expect(..., reason = ...)] instead of #[allow(...)] for Clippy suppressions." + metadata: + category: maintainability + rationale: >- + Clippy suppressions should be checked by the compiler and carry a + stable reason so stale suppressions are noticed during linting. + paths: + include: + - "/src/**/*.rs" + - "/tests/**/*.rs" + - "/benches/**/*.rs" + exclude: + - "/tests/semgrep/**" + pattern-regex: '^\s*#\s*\[\s*allow\s*\(\s*clippy::' + + - id: delaunay.rust.expect-requires-reason + languages: + - rust + severity: WARNING + message: "Add reason = \"...\" to #[expect(...)] so lint suppressions remain auditable." + metadata: + category: maintainability + rationale: >- + Repository guidance prefers documented lint expectations over + unexplained suppressions. + paths: + include: + - "/src/**/*.rs" + - "/tests/**/*.rs" + - "/benches/**/*.rs" + exclude: + - "/tests/semgrep/**" + patterns: + # Regex keeps this intentionally scoped to one-line `#[expect(...)]` + # attributes; multiline attributes remain a review concern. + - pattern-regex: '^\s*#\s*\[\s*expect\s*\([^\]\n]*\)\s*\]' + - pattern-not-regex: '\breason\s*=' + + - id: delaunay.python.no-broad-exception + languages: + - python + severity: WARNING + message: "Catch a specific exception type instead of broad Exception." + metadata: + category: maintainability + rationale: >- + Python tooling should surface expected failure modes explicitly instead + of swallowing unrelated errors. + paths: + include: + - "/scripts/**/*.py" + exclude: + - "/tests/semgrep/**" + pattern-regex: '^\s*except\s+Exception\s*:' + + - id: delaunay.python.no-raw-exception-in-tests + languages: + - python + severity: WARNING + message: "Raise a specific exception type in Python tests instead of raw Exception." + metadata: + category: maintainability + rationale: "Typed exceptions make test failures and expected errors easier to diagnose." + paths: + include: + - "/scripts/tests/**/*.py" + exclude: + - "/tests/semgrep/**" + pattern: raise Exception(...) + + - id: delaunay.python.no-adhoc-completedprocess-mock + languages: + - python + severity: WARNING + message: "Use subprocess.CompletedProcess[str] instead of ad hoc Mock stdout/returncode objects." + metadata: + category: maintainability + rationale: >- + Command-wrapper tests should use the real subprocess result shape so + mocks stay aligned with production subprocess APIs. + paths: + include: + - "/scripts/tests/**/*.py" + exclude: + - "/tests/semgrep/**" + pattern-either: + - patterns: + - pattern-either: + - pattern: | + $PROC = Mock(...) + ... + $PROC.stdout = ... + - pattern: | + $PROC = unittest.mock.Mock(...) + ... + $PROC.stdout = ... + - pattern: | + $PROC = mock.Mock(...) + ... + $PROC.stdout = ... + - pattern: | + $PROC = MagicMock(...) + ... + $PROC.stdout = ... + - pattern: | + $PROC = unittest.mock.MagicMock(...) + ... + $PROC.stdout = ... + - pattern: | + $PROC = mock.MagicMock(...) + ... + $PROC.stdout = ... + - patterns: + - pattern-either: + - pattern: | + $PROC = Mock(...) + ... + $PROC.returncode = ... + - pattern: | + $PROC = unittest.mock.Mock(...) + ... + $PROC.returncode = ... + - pattern: | + $PROC = mock.Mock(...) + ... + $PROC.returncode = ... + - pattern: | + $PROC = MagicMock(...) + ... + $PROC.returncode = ... + - pattern: | + $PROC = unittest.mock.MagicMock(...) + ... + $PROC.returncode = ... + - pattern: | + $PROC = mock.MagicMock(...) + ... + $PROC.returncode = ... + + - id: delaunay.python.no-untyped-defs-in-scripts + languages: + - python + severity: WARNING + message: "Add an explicit return annotation to script functions." + metadata: + category: maintainability + rationale: "Typed script helpers are easier for ty and reviewers to reason about." + paths: + include: + - "/scripts/**/*.py" + exclude: + - "/tests/semgrep/**" + # Regex catches obvious unannotated definitions while avoiding a custom + # Python parser rule that would duplicate ty. + pattern-regex: '^( {0,4}|\t)(async\s+)?def\s+[A-Za-z_][A-Za-z0-9_]*\([^#\n]*\)\s*:' diff --git a/src/core/algorithms/flips.rs b/src/core/algorithms/flips.rs index b6cee27a..71027f8e 100644 --- a/src/core/algorithms/flips.rs +++ b/src/core/algorithms/flips.rs @@ -2525,10 +2525,9 @@ where }) } -#[allow(clippy::too_many_lines)] #[expect( clippy::too_many_arguments, - reason = "Local predicate evaluation threads topology, source cells, and diagnostics explicitly" + reason = "local predicate evaluation threads topology, source cells, and diagnostics explicitly" )] /// Evaluate the k=2 facet flip predicate for a local Delaunay violation. fn delaunay_violation_k2_for_facet( @@ -4932,9 +4931,9 @@ impl RepairQueues { /// Seeds exactly the queues supported by the current dimension so repair and /// verification inspect the same local neighborhoods. -#[allow( +#[expect( clippy::too_many_lines, - reason = "Seeding logic mirrors runtime queues; keep as single flow for diagnostics" + reason = "seeding logic mirrors runtime queues and stays as one diagnostic flow" )] fn seed_repair_queues( tds: &Tds, diff --git a/src/core/boundary.rs b/src/core/boundary.rs index d957c3ec..13ebbc6d 100644 --- a/src/core/boundary.rs +++ b/src/core/boundary.rs @@ -235,7 +235,10 @@ mod tests { // SINGLE SIMPLEX TESTS // ============================================================================= - #[expect(clippy::too_many_lines)] + #[expect( + clippy::too_many_lines, + reason = "boundary regression test keeps topology setup and assertions together" + )] #[test] fn test_boundary_facets_single_simplices() { // Test boundary analysis for single simplices in different dimensions diff --git a/src/core/facet.rs b/src/core/facet.rs index 2e437909..745a6af8 100644 --- a/src/core/facet.rs +++ b/src/core/facet.rs @@ -680,8 +680,14 @@ where } } -#[expect(clippy::expl_impl_clone_on_copy)] -#[expect(clippy::non_canonical_clone_impl)] +#[expect( + clippy::expl_impl_clone_on_copy, + reason = "manual clone implementation documents facet identity semantics" +)] +#[expect( + clippy::non_canonical_clone_impl, + reason = "facet clone intentionally preserves cached view fields" +)] impl Clone for FacetView<'_, T, U, V, D> where T: CoordinateScalar, @@ -861,7 +867,10 @@ where ); // We collect here because we need an owned iterator to store in the struct // CellKey is just u64, so this is efficient - #[expect(clippy::needless_collect)] + #[expect( + clippy::needless_collect, + reason = "iterator owns a stable snapshot of cell keys before yielding facets" + )] let cell_keys: Vec = tds.cell_keys().collect(); Self { tds, diff --git a/src/core/traits/facet_cache.rs b/src/core/traits/facet_cache.rs index a567156c..efc20f87 100644 --- a/src/core/traits/facet_cache.rs +++ b/src/core/traits/facet_cache.rs @@ -123,8 +123,10 @@ where return Some(existing.clone()); } // Build the cache only once, even if RCU retries - #[expect(clippy::option_if_let_else)] - // Complex error handling doesn't benefit from map_or_else + #[expect( + clippy::option_if_let_else, + reason = "explicit match keeps cache-hit and cache-build errors readable" + )] match built.get_or_insert_with(|| tds.build_facet_to_cells_map().map(Arc::new)) { Ok(arc) => Some(arc.clone()), Err(_) => None, // Let the caller handle the error diff --git a/src/core/util/facet_keys.rs b/src/core/util/facet_keys.rs index ce3e972e..9bf9c311 100644 --- a/src/core/util/facet_keys.rs +++ b/src/core/util/facet_keys.rs @@ -411,7 +411,10 @@ mod tests { } #[test] - #[expect(clippy::too_many_lines)] + #[expect( + clippy::too_many_lines, + reason = "facet-key test keeps related canonicalization cases together" + )] fn test_checked_facet_key_from_vertex_keys_comprehensive() { println!("Testing checked_facet_key_from_vertex_keys comprehensively"); diff --git a/src/geometry/algorithms/convex_hull.rs b/src/geometry/algorithms/convex_hull.rs index 8566b8f4..4290eaca 100644 --- a/src/geometry/algorithms/convex_hull.rs +++ b/src/geometry/algorithms/convex_hull.rs @@ -901,7 +901,10 @@ where /// # Errors /// /// Returns a [`ConvexHullConstructionError`] if visibility checking fails. - #[expect(clippy::too_many_lines)] + #[expect( + clippy::too_many_lines, + reason = "visibility check keeps predicate setup, cache lookup, and diagnostics together" + )] fn is_facet_visible_from_point_with_cache( &self, facet_handle: &FacetHandle, @@ -1898,7 +1901,11 @@ mod tests { /// Comprehensive test for visibility algorithms covering all dimensions and edge cases /// Consolidates: `test_visibility_algorithms_comprehensive`, `test_visibility_edge_cases`, /// `test_visibility_algorithm_coverage`, `test_edge_case_distance_calculations` - #[expect(clippy::too_many_lines, clippy::cognitive_complexity)] + #[expect( + clippy::too_many_lines, + clippy::cognitive_complexity, + reason = "test keeps dimension-specific visibility cases together" + )] #[test] fn test_visibility_algorithms_comprehensive() { println!("Testing comprehensive visibility algorithms in dimensions 2D-5D"); @@ -2184,7 +2191,10 @@ mod tests { // These tests target private methods to ensure thorough coverage of internal // ConvexHull functionality, particularly the fallback_visibility_test method. - #[expect(clippy::too_many_lines)] + #[expect( + clippy::too_many_lines, + reason = "test keeps fallback visibility scenarios together" + )] #[test] fn test_fallback_visibility_comprehensive() { println!("Testing comprehensive fallback visibility algorithm"); @@ -2461,7 +2471,11 @@ mod tests { /// Consolidates: `test_convex_hull_validation_comprehensive`, `test_validate_method_comprehensive`, /// `test_validate_method_various_dimensions` #[test] - #[expect(clippy::cognitive_complexity, clippy::too_many_lines)] + #[expect( + clippy::cognitive_complexity, + clippy::too_many_lines, + reason = "test keeps hull validation scenarios and diagnostics together" + )] fn test_convex_hull_validation_comprehensive() { println!("Testing ConvexHull validation comprehensively"); @@ -3792,7 +3806,10 @@ mod tests { println!("✓ Numeric cast error handling tested successfully"); } - #[expect(clippy::too_many_lines)] + #[expect( + clippy::too_many_lines, + reason = "test keeps cache invalidation scenarios together" + )] #[test] fn test_cache_invalidation_behavior() { println!("Testing cache invalidation behavior in ConvexHull"); @@ -4838,7 +4855,11 @@ mod tests { // These tests provide comprehensive coverage of error conditions that can // occur during convex hull construction and operation. - #[expect(clippy::too_many_lines, clippy::cognitive_complexity)] + #[expect( + clippy::too_many_lines, + clippy::cognitive_complexity, + reason = "test keeps convex hull error variants and scenarios together" + )] #[test] fn test_convex_hull_error_handling_comprehensive() { println!("Testing comprehensive ConvexHull error handling"); @@ -4973,12 +4994,12 @@ mod tests { source: facet_error, }; - // Walk the error source chain - let mut current_error: &dyn Error = &chained_visibility_error; + // Walk the error source chain without erasing the root error type. + let mut current_source = chained_visibility_error.source(); let mut depth = 0; - while let Some(source) = current_error.source() { + while let Some(source) = current_source { depth += 1; - current_error = source; + current_source = source.source(); } assert!( depth > 0, @@ -4991,27 +5012,56 @@ mod tests { // ======================================================================== println!(" Testing error message formatting consistency..."); - let test_errors: Vec> = vec![ - Box::new(ConvexHullValidationError::InvalidFacet { - facet_index: 0, - source: FacetError::InsufficientVertices { - expected: 3, - actual: 2, - dimension: 3, - }, - }), - Box::new(ConvexHullConstructionError::InvalidTriangulation { - message: "Test message".to_string(), - }), - Box::new(ConvexHullConstructionError::GeometricDegeneracy { - message: "Collinear points".to_string(), - }), + let test_errors = [ + ( + ConvexHullValidationError::InvalidFacet { + facet_index: 0, + source: FacetError::InsufficientVertices { + expected: 3, + actual: 2, + dimension: 3, + }, + } + .to_string(), + format!( + "{:?}", + ConvexHullValidationError::InvalidFacet { + facet_index: 0, + source: FacetError::InsufficientVertices { + expected: 3, + actual: 2, + dimension: 3, + }, + } + ), + ), + ( + ConvexHullConstructionError::InvalidTriangulation { + message: "Test message".to_string(), + } + .to_string(), + format!( + "{:?}", + ConvexHullConstructionError::InvalidTriangulation { + message: "Test message".to_string(), + } + ), + ), + ( + ConvexHullConstructionError::GeometricDegeneracy { + message: "Collinear points".to_string(), + } + .to_string(), + format!( + "{:?}", + ConvexHullConstructionError::GeometricDegeneracy { + message: "Collinear points".to_string(), + } + ), + ), ]; - for (i, error) in test_errors.iter().enumerate() { - let display_msg = format!("{error}"); - let debug_msg = format!("{error:?}"); - + for (i, (display_msg, debug_msg)) in test_errors.iter().enumerate() { assert!( !display_msg.is_empty(), "Error {i} display message should not be empty" @@ -5146,7 +5196,10 @@ mod tests { // under various degenerate and edge-case conditions. #[test] - #[expect(clippy::too_many_lines)] + #[expect( + clippy::too_many_lines, + reason = "test keeps degenerate facet visibility scenarios together" + )] fn test_fallback_visibility_with_degenerate_facets() { println!("Testing fallback visibility algorithm with degenerate facet geometries"); @@ -5346,7 +5399,10 @@ mod tests { } #[test] - #[expect(clippy::too_many_lines)] + #[expect( + clippy::too_many_lines, + reason = "test keeps threshold and heuristic visibility scenarios together" + )] fn test_fallback_visibility_threshold_behavior() { println!("Testing fallback visibility threshold and heuristic behavior"); @@ -5505,7 +5561,10 @@ mod tests { // geometric configurations that can cause numerical instability. #[test] - #[expect(clippy::too_many_lines)] + #[expect( + clippy::too_many_lines, + reason = "test keeps collinear hull edge cases together" + )] fn test_collinear_points_handling() { println!("Testing convex hull construction with collinear point configurations"); @@ -5634,7 +5693,10 @@ mod tests { } #[test] - #[expect(clippy::too_many_lines)] + #[expect( + clippy::too_many_lines, + reason = "test keeps coplanar higher-dimensional cases together" + )] fn test_coplanar_points_in_higher_dimensions() { println!("Testing convex hull construction with coplanar point configurations"); @@ -5875,7 +5937,10 @@ mod tests { // and with larger datasets to ensure robustness and performance. #[test] - #[expect(clippy::too_many_lines)] + #[expect( + clippy::too_many_lines, + reason = "test keeps high-dimensional hull cases together" + )] fn test_high_dimensional_convex_hulls() { println!("Testing convex hull construction in high dimensions (6D, 7D, 8D)"); diff --git a/src/geometry/point.rs b/src/geometry/point.rs index 188f543f..ed20e0c7 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -1421,7 +1421,10 @@ mod tests { // Create different NaN values let nan1 = f64::NAN; - #[expect(clippy::zero_divided_by_zero)] + #[expect( + clippy::zero_divided_by_zero, + reason = "test deliberately constructs an alternate NaN bit pattern" + )] let nan2 = 0.0f64 / 0.0f64; // Another way to create NaN let nan3 = f64::INFINITY - f64::INFINITY; // Yet another way @@ -1441,7 +1444,10 @@ mod tests { // Test with f32 as well let f32_nan1 = f32::NAN; - #[expect(clippy::zero_divided_by_zero)] + #[expect( + clippy::zero_divided_by_zero, + reason = "test deliberately constructs an alternate NaN bit pattern" + )] let f32_nan2 = 0.0f32 / 0.0f32; let point_f32_1 = Point::new([f32_nan1]); @@ -1532,7 +1538,10 @@ mod tests { } #[test] - #[expect(clippy::cast_precision_loss)] + #[expect( + clippy::cast_precision_loss, + reason = "test inputs intentionally cover large integer-to-float conversions" + )] fn point_extreme_dimensions() { // Test with high dimensional points (limited by serde trait implementations) @@ -1609,7 +1618,10 @@ mod tests { let original = Point::new([1.0, 2.0, 3.0]); // Test explicit cloning - #[expect(clippy::clone_on_copy)] + #[expect( + clippy::clone_on_copy, + reason = "test asserts explicit clone behavior for copy coordinates" + )] let cloned = original.clone(); assert_relative_eq!(original.to_array().as_slice(), cloned.to_array().as_slice()); @@ -3324,7 +3336,10 @@ mod tests { assert_sync(point); // Test Clone and Copy - #[expect(clippy::clone_on_copy)] + #[expect( + clippy::clone_on_copy, + reason = "test asserts explicit clone behavior for copy coordinates" + )] let cloned = point.clone(); let copied = point; diff --git a/src/geometry/predicates.rs b/src/geometry/predicates.rs index 076c824e..38c8b69a 100644 --- a/src/geometry/predicates.rs +++ b/src/geometry/predicates.rs @@ -1198,7 +1198,10 @@ mod tests { } #[test] - #[expect(clippy::too_many_lines)] + #[expect( + clippy::too_many_lines, + reason = "orientation regression test keeps dimension-specific cases together" + )] fn test_simplex_orientation_comprehensive() { // Test 2D orientation - positive case let positive_2d = vec![ diff --git a/src/geometry/quality.rs b/src/geometry/quality.rs index a7a73453..20ffc752 100644 --- a/src/geometry/quality.rs +++ b/src/geometry/quality.rs @@ -1273,8 +1273,7 @@ let cell_key = dt.cells().next().unwrap().0; let err = QualityError::InvalidCell { message: "test".to_string(), }; - // Should be able to use as Error trait object - let _: &dyn std::error::Error = &err; + assert!(std::error::Error::source(&err).is_none()); assert!(format!("{err}").contains("test")); } diff --git a/src/geometry/robust_predicates.rs b/src/geometry/robust_predicates.rs index dffc06fa..ea5354a6 100644 --- a/src/geometry/robust_predicates.rs +++ b/src/geometry/robust_predicates.rs @@ -653,7 +653,10 @@ mod tests { assert!(result.is_ok()); } - #[expect(clippy::too_many_lines)] + #[expect( + clippy::too_many_lines, + reason = "insphere consistency test keeps related robust predicate cases together" + )] #[test] fn test_verify_insphere_consistency_comprehensive() { let points = vec![ diff --git a/src/geometry/traits/coordinate.rs b/src/geometry/traits/coordinate.rs index 54c5b859..bd948138 100644 --- a/src/geometry/traits/coordinate.rs +++ b/src/geometry/traits/coordinate.rs @@ -1306,12 +1306,9 @@ mod tests { // Test source() method - should return None for this error type assert!(error.source().is_none()); - // Test that it can be converted to a boxed error - let _boxed_error: Box = Box::new(error.clone()); - - // Test error chain handling - let error_ref: &dyn Error = &error; - assert_eq!(error_ref.to_string(), error.to_string()); + // Test error trait handling without erasing the concrete type. + assert!(Error::source(&error).is_none()); + assert_eq!(format!("{error}"), error.to_string()); } #[test] diff --git a/src/geometry/util/triangulation_generation.rs b/src/geometry/util/triangulation_generation.rs index 3d5f9725..d9e967a0 100644 --- a/src/geometry/util/triangulation_generation.rs +++ b/src/geometry/util/triangulation_generation.rs @@ -209,14 +209,11 @@ where /// - Topology/Euler validation failure after robust fallback attempts /// /// **Other construction failures** (various variants): -/// - Vertex/cell construction errors +/// - Cell construction errors /// - Triangulation validation failures /// -/// # Panics -/// -/// This function can panic if: -/// - Vertex construction fails due to invalid data types or constraints -/// - This should not happen with valid inputs and supported data types +/// Vertex construction from generated points is infallible; failures are +/// returned from point generation, triangulation construction, or validation. /// /// # Examples /// @@ -346,7 +343,6 @@ where /// .unwrap(); /// assert_eq!(dt.dim(), 3); /// ``` -#[allow(clippy::too_many_lines)] pub fn generate_random_triangulation_with_topology_guarantee( n_points: usize, bounds: (T, T), diff --git a/src/triangulation/delaunay.rs b/src/triangulation/delaunay.rs index a9da5544..e6297d87 100644 --- a/src/triangulation/delaunay.rs +++ b/src/triangulation/delaunay.rs @@ -2628,7 +2628,10 @@ where /// Retries batch construction with deterministic shuffles so retryable /// degeneracies can be escaped reproducibly. - #[allow(clippy::too_many_lines)] + #[expect( + clippy::too_many_lines, + reason = "construction retry flow keeps seed selection, validation, and diagnostics together" + )] fn build_with_shuffled_retries( kernel: &K, vertices: &[Vertex], @@ -2800,7 +2803,10 @@ where /// Mirrors shuffled retry construction while preserving per-attempt /// statistics for callers that need skip and retry diagnostics. - #[allow(clippy::too_many_lines)] + #[expect( + clippy::too_many_lines, + reason = "statistics variant mirrors construction retry flow for comparable diagnostics" + )] #[expect( clippy::result_large_err, reason = "Internal helper propagates public by-value construction-statistics error type" @@ -3565,7 +3571,10 @@ where /// Inserts the non-simplex vertices under a fixed perturbation seed so bulk /// construction retries are reproducible. - #[allow(clippy::too_many_lines)] + #[expect( + clippy::too_many_lines, + reason = "seeded insertion loop keeps cache repair and retry diagnostics in one flow" + )] fn insert_remaining_vertices_seeded( &mut self, vertices: &[Vertex], @@ -5006,7 +5015,13 @@ where } /// Enables test-only repair fallback paths without exposing a public knob. - #[allow(clippy::missing_const_for_fn)] + #[cfg_attr( + not(test), + expect( + clippy::missing_const_for_fn, + reason = "runtime feature and environment checks should remain ordinary functions" + ) + )] fn force_heuristic_rebuild_enabled() -> bool { #[cfg(test)] { @@ -5139,7 +5154,10 @@ where /// Rebuilds from the current vertex set with varied deterministic seeds when /// flip repair cannot converge directly. - #[allow(clippy::too_many_lines)] + #[expect( + clippy::too_many_lines, + reason = "heuristic rebuild keeps point extraction, reconstruction, and validation together" + )] fn rebuild_with_heuristic( &self, base_seeds: DelaunayRepairHeuristicSeeds, diff --git a/tests/coordinate_conversion_errors.rs b/tests/coordinate_conversion_errors.rs index eb70e0a0..1ee73735 100644 --- a/tests/coordinate_conversion_errors.rs +++ b/tests/coordinate_conversion_errors.rs @@ -342,7 +342,10 @@ fn test_zero_and_negative_zero() { } #[test] -#[expect(clippy::match_same_arms)] +#[expect( + clippy::match_same_arms, + reason = "test asserts each conversion error maps to the same public category" +)] fn test_very_large_finite_values() { // Test that very large but finite values are handled correctly // Use a large value that won't overflow when squared (f64::MAX would become infinity when squared) diff --git a/tests/delaunay_edge_cases.rs b/tests/delaunay_edge_cases.rs index b7b6e90b..113fbd09 100644 --- a/tests/delaunay_edge_cases.rs +++ b/tests/delaunay_edge_cases.rs @@ -131,9 +131,18 @@ test_regression_config!( ); #[test] -#[expect(clippy::collapsible_if)] -#[expect(clippy::too_many_lines)] -#[expect(clippy::unreadable_literal)] +#[expect( + clippy::collapsible_if, + reason = "test keeps nested invariant checks aligned with diagnostic messages" +)] +#[expect( + clippy::too_many_lines, + reason = "edge-case regression test keeps construction and validation assertions together" +)] +#[expect( + clippy::unreadable_literal, + reason = "large literal documents the exact stress-case coordinate" +)] fn debug_issue_120_empty_circumsphere_5d() { init_tracing(); let vertices = vec![ diff --git a/tests/proptest_sos.rs b/tests/proptest_sos.rs index f345c9fd..b752abe7 100644 --- a/tests/proptest_sos.rs +++ b/tests/proptest_sos.rs @@ -35,7 +35,10 @@ use proptest::prelude::*; /// /// Uses exact equality because coordinates are derived from integers /// via `f64::from()`, so no rounding occurs. -#[allow(clippy::float_cmp)] +#[expect( + clippy::float_cmp, + reason = "coordinates are integer-derived f64 values, so exact equality is intentional" +)] fn points_all_distinct(points: &[Point]) -> bool { (0..points.len()) .all(|i| ((i + 1)..points.len()).all(|j| points[i].coords() != points[j].coords())) diff --git a/tests/semgrep/scripts/tests/python_exceptions.py b/tests/semgrep/scripts/tests/python_exceptions.py new file mode 100644 index 00000000..2de63ad0 --- /dev/null +++ b/tests/semgrep/scripts/tests/python_exceptions.py @@ -0,0 +1,55 @@ +import subprocess +from unittest.mock import MagicMock, Mock + + +def catches_broad_exception() -> None: + try: + pass + # ruleid: delaunay.python.no-broad-exception + except Exception: + pass + + +def catches_specific_exception() -> None: + try: + pass + # ok: delaunay.python.no-broad-exception + except OSError: + pass + + +def raises_raw_exception() -> None: + # ruleid: delaunay.python.no-raw-exception-in-tests + raise Exception("too broad") + + +def raises_specific_exception() -> None: + # ok: delaunay.python.no-raw-exception-in-tests + raise RuntimeError("specific failure") + + +def adhoc_mock_stdout() -> None: + # ruleid: delaunay.python.no-adhoc-completedprocess-mock + result = Mock() + result.stdout = "ok" + + +def adhoc_mock_returncode() -> None: + # ruleid: delaunay.python.no-adhoc-completedprocess-mock + result = MagicMock() + result.returncode = 0 + + +def typed_completed_process() -> subprocess.CompletedProcess[str]: + # ok: delaunay.python.no-adhoc-completedprocess-mock + return subprocess.CompletedProcess(args=[], returncode=0, stdout="ok", stderr="") + + +# ruleid: delaunay.python.no-untyped-defs-in-scripts +def missing_return_annotation(): + return None + + +# ok: delaunay.python.no-untyped-defs-in-scripts +def explicit_return_annotation() -> None: + return None diff --git a/tests/semgrep/src/core/algorithms/no_std_hash_collections.rs b/tests/semgrep/src/core/algorithms/no_std_hash_collections.rs index d5dffe86..ff48f382 100644 --- a/tests/semgrep/src/core/algorithms/no_std_hash_collections.rs +++ b/tests/semgrep/src/core/algorithms/no_std_hash_collections.rs @@ -53,13 +53,3 @@ fn collects() { .into_iter() .collect::>(); } - -pub fn public_expect_bypass(value: Option) -> u8 { - // ruleid: delaunay.rust.no-production-unwrap-panic - value.expect("public APIs should return typed errors instead") -} - -fn private_documented_invariant(value: Option) -> u8 { - // ok: delaunay.rust.no-production-unwrap-panic - value.expect("private helper documents an internal invariant") -} diff --git a/tests/semgrep/src/project_rules/rust_style.rs b/tests/semgrep/src/project_rules/rust_style.rs new file mode 100644 index 00000000..b0aa27c5 --- /dev/null +++ b/tests/semgrep/src/project_rules/rust_style.rs @@ -0,0 +1,82 @@ +#![allow(dead_code, unused_imports)] + +use num_traits::NumCast; + +pub fn production_stdio() { + // ruleid: delaunay.rust.no-stdio-diagnostics-in-src + println!("debug output"); + + // ruleid: delaunay.rust.no-stdio-diagnostics-in-src + eprintln!("debug output"); +} + +pub fn nonfinite_defaults(value: Option) -> f64 { + // ruleid: delaunay.rust.no-nonfinite-unwrap-defaults + value.unwrap_or(f64::NAN) +} + +pub fn silent_conversion_fallback(value: u64) -> f64 { + // ruleid: delaunay.rust.no-silent-conversion-fallbacks + NumCast::from(value).unwrap_or(0.0) +} + +fn safe_f64(_value: u64) -> Option { + Some(1.0) +} + +pub fn safe_conversion_fallback(value: u64) -> f64 { + // ruleid: delaunay.rust.no-silent-conversion-fallbacks + safe_f64(value).unwrap_or(0.0) +} + +pub fn public_unwrap_bypass(value: Option) -> u8 { + // ruleid: delaunay.rust.no-production-unwrap-panic + value.unwrap() +} + +pub fn public_expect_bypass(value: Option) -> u8 { + // ruleid: delaunay.rust.no-production-unwrap-panic + value.expect("public APIs should return typed errors instead") +} + +fn private_documented_invariant(value: Option) -> u8 { + // ok: delaunay.rust.no-production-unwrap-panic + value.expect("private helper documents an internal invariant") +} + +pub fn env_gated_stdio() { + // ruleid: delaunay.rust.no-env-gated-stdio-diagnostics + if std::env::var_os("DELAUNAY_DEBUG").is_some() { + // ruleid: delaunay.rust.no-stdio-diagnostics-in-src + println!("debug output"); + } +} + +// ruleid: delaunay.rust.no-clippy-allow-lints +#[allow(clippy::too_many_lines)] +fn clippy_allow_fixture() {} + +// ruleid: delaunay.rust.expect-requires-reason +#[expect(clippy::too_many_lines)] +fn expect_without_reason_fixture() {} + +// ok: delaunay.rust.expect-requires-reason +#[expect(clippy::too_many_lines, reason = "fixture documents the suppression")] +fn expect_with_reason_fixture() {} + +// ruleid: delaunay.rust.no-box-dyn-error-in-src +type ProductionBoxedError = Box; + +trait ProductionDynamicErrors { + // ruleid: delaunay.rust.no-box-dyn-error-in-src + fn boxed_error_result(&self) -> Result<(), Box>; + + // ruleid: delaunay.rust.no-box-dyn-error-in-src + fn borrowed_error(&self, error: &dyn std::error::Error); + + // ruleid: delaunay.rust.no-box-dyn-error-in-src + fn anyhow_error(&self, error: anyhow::Error); +} + +/// # Ok::<(), Box>(()) +fn doctest_style_error_is_ignored() {} From 1a5fd6a7e8682e3800a8aa622d992a0efaeb306a Mon Sep 17 00:00:00 2001 From: Adam Getchell Date: Wed, 29 Apr 2026 16:17:41 -0700 Subject: [PATCH 4/5] ci: refresh quality tooling and diagnostics - Pin GitHub workflow tool versions and update action SHAs for cache, artifact upload, install-action, and SARIF upload. - Exclude Semgrep fixtures from Codacy analysis so intentional rule-test violations do not surface as production issues. - Add a cargo-machete backed just unused-deps recipe for checking unused direct dependencies. - Gate convex hull test diagnostics behind test-debug tracing instead of unconditional stdout output. - Add Hilbert ordering and zero-dimensional sort coverage for Codecov patch gaps. --- .codacy.yml | 2 + .github/workflows/audit.yml | 10 +- .github/workflows/benchmarks.yml | 2 +- .github/workflows/ci.yml | 20 +- .github/workflows/codacy.yml | 2 +- .github/workflows/codecov.yml | 16 +- .github/workflows/codeql.yml | 6 +- .github/workflows/generate-baseline.yml | 2 +- .github/workflows/profiling-benchmarks.yml | 14 +- .github/workflows/rust-clippy.yml | 18 +- .gitignore | 4 - justfile | 13 + semgrep.yaml | 2 +- src/core/util/hilbert.rs | 27 + src/geometry/algorithms/convex_hull.rs | 849 +++++++++++---------- src/triangulation/delaunay.rs | 2 +- 16 files changed, 534 insertions(+), 455 deletions(-) diff --git a/.codacy.yml b/.codacy.yml index 4cb48326..ce1ed79f 100644 --- a/.codacy.yml +++ b/.codacy.yml @@ -64,3 +64,5 @@ exclude_paths: - "node_modules/**" - ".git/**" - "docs/archive/**" + # Semgrep fixtures intentionally contain bad examples for rule tests. + - "tests/semgrep/**" diff --git a/.github/workflows/audit.yml b/.github/workflows/audit.yml index 41d3872e..98fca937 100644 --- a/.github/workflows/audit.yml +++ b/.github/workflows/audit.yml @@ -30,6 +30,8 @@ permissions: jobs: audit: runs-on: ubuntu-latest + env: + CARGO_AUDIT_VERSION: "0.22.1" steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 @@ -40,14 +42,14 @@ jobs: # toolchain, components, etc. are specified in rust-toolchain.toml - name: Cache audit database - uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 + uses: actions/cache@27d5ce7f107fe9357f9df03efb73ab90386fccae # v5.0.5 with: path: ~/.cargo/advisory-db key: advisory-db-${{ github.ref_name }}-v1 restore-keys: advisory-db- - name: Install cargo-audit - run: cargo install --locked cargo-audit + run: cargo install --locked cargo-audit --version "${CARGO_AUDIT_VERSION}" - name: Run cargo audit run: | @@ -71,14 +73,14 @@ jobs: - name: Upload audit SARIF results if: always() - uses: github/codeql-action/upload-sarif@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 # v4 + uses: github/codeql-action/upload-sarif@b25d0ebf40e5b63ee81e1bd6e5d2a12b7c2aeb61 # v4 with: sarif_file: audit-results.sarif category: "cargo-audit" wait-for-processing: true - name: Upload audit results - uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 if: always() with: name: audit-results diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index 51afba6a..5dc4ca86 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -302,7 +302,7 @@ jobs: - name: Upload regression test results if: env.BASELINE_EXISTS == 'true' && env.SKIP_BENCHMARKS == 'false' && always() - uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 with: name: performance-regression-results-${{ github.run_number }} path: | diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 080f80d2..3bd653b1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -21,10 +21,12 @@ on: env: CARGO_TERM_COLOR: always RUST_BACKTRACE: 1 - ACTIONLINT_VERSION: "1.7.10" - MARKDOWNLINT_VERSION: "0.47.0" - SHFMT_VERSION: "3.12.0" - TYPOS_VERSION: "1.43.4" + ACTIONLINT_VERSION: "1.7.12" + JUST_VERSION: "1.50.0" + MARKDOWNLINT_VERSION: "0.48.0" + SHFMT_VERSION: "3.13.1" + TAPLO_VERSION: "0.10.0" + TYPOS_VERSION: "1.45.2" UV_VERSION: "0.11.8" jobs: @@ -57,9 +59,9 @@ jobs: - name: Install just if: matrix.os != 'windows-latest' - uses: taiki-e/install-action@cf525cb33f51aca27cd6fa02034117ab963ff9f1 # v2.75.22 + uses: taiki-e/install-action@b651345a718c8f44efa2460560b3dbf29cbd7ee1 # v2.75.26 with: - tool: just + tool: just@${{ env.JUST_VERSION }} - name: Install uv (for Python scripts and pytest) if: matrix.os != 'windows-latest' @@ -80,15 +82,15 @@ jobs: - name: Install typos-cli if: matrix.os != 'windows-latest' - uses: taiki-e/install-action@cf525cb33f51aca27cd6fa02034117ab963ff9f1 # v2.75.22 + uses: taiki-e/install-action@b651345a718c8f44efa2460560b3dbf29cbd7ee1 # v2.75.26 with: tool: typos-cli@${{ env.TYPOS_VERSION }} - name: Install taplo (for TOML formatting and linting) if: matrix.os != 'windows-latest' - uses: taiki-e/install-action@cf525cb33f51aca27cd6fa02034117ab963ff9f1 # v2.75.22 + uses: taiki-e/install-action@b651345a718c8f44efa2460560b3dbf29cbd7ee1 # v2.75.26 with: - tool: taplo-cli + tool: taplo-cli@${{ env.TAPLO_VERSION }} - name: Install actionlint (Linux/macOS) if: matrix.os != 'windows-latest' diff --git a/.github/workflows/codacy.yml b/.github/workflows/codacy.yml index 761d34f2..e11eca8f 100644 --- a/.github/workflows/codacy.yml +++ b/.github/workflows/codacy.yml @@ -154,7 +154,7 @@ jobs: PY - name: Upload split SARIF files - uses: github/codeql-action/upload-sarif@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 # v4 + uses: github/codeql-action/upload-sarif@b25d0ebf40e5b63ee81e1bd6e5d2a12b7c2aeb61 # v4 with: sarif_file: ${{ env.CODACY_SPLIT_SARIF_DIR }} wait-for-processing: true diff --git a/.github/workflows/codecov.yml b/.github/workflows/codecov.yml index 0925eb2a..8f2eb563 100644 --- a/.github/workflows/codecov.yml +++ b/.github/workflows/codecov.yml @@ -22,6 +22,8 @@ jobs: runs-on: ubuntu-latest env: CARGO_LLVM_COV_VERSION: "0.8.5" + JUST_VERSION: "1.50.0" + NEXTEST_VERSION: "0.9.133" steps: - name: Checkout repository uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 @@ -38,7 +40,7 @@ jobs: run: rustup component add llvm-tools-preview - name: Cache cargo-llvm-cov - uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 + uses: actions/cache@27d5ce7f107fe9357f9df03efb73ab90386fccae # v5.0.5 with: path: ~/.cargo/bin/cargo-llvm-cov key: cargo-llvm-cov-${{ runner.os }}-${{ env.CARGO_LLVM_COV_VERSION }} @@ -58,14 +60,14 @@ jobs: fi - name: Install just - uses: taiki-e/install-action@cf525cb33f51aca27cd6fa02034117ab963ff9f1 # v2.75.22 + uses: taiki-e/install-action@b651345a718c8f44efa2460560b3dbf29cbd7ee1 # v2.75.26 with: - tool: just + tool: just@${{ env.JUST_VERSION }} - name: Install nextest - uses: taiki-e/install-action@cf525cb33f51aca27cd6fa02034117ab963ff9f1 # v2.75.22 + uses: taiki-e/install-action@b651345a718c8f44efa2460560b3dbf29cbd7ee1 # v2.75.26 with: - tool: nextest + tool: nextest@${{ env.NEXTEST_VERSION }} - name: Run tests with nextest (for JUnit XML) run: | @@ -202,14 +204,14 @@ jobs: continue-on-error: true - name: Archive coverage results - uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 if: always() with: name: coverage-report path: coverage/ - name: Archive test results - uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 if: always() with: name: test-results diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 5f1fb7fc..3811dccc 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -41,20 +41,20 @@ jobs: - name: Initialize CodeQL if: matrix.language != 'rust' - uses: github/codeql-action/init@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 # v4 + uses: github/codeql-action/init@b25d0ebf40e5b63ee81e1bd6e5d2a12b7c2aeb61 # v4 with: languages: ${{ matrix.language }} queries: security-extended - name: Initialize CodeQL (Rust) if: matrix.language == 'rust' - uses: github/codeql-action/init@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 # v4 + uses: github/codeql-action/init@b25d0ebf40e5b63ee81e1bd6e5d2a12b7c2aeb61 # v4 with: languages: ${{ matrix.language }} build-mode: none queries: security-extended - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 # v4 + uses: github/codeql-action/analyze@b25d0ebf40e5b63ee81e1bd6e5d2a12b7c2aeb61 # v4 with: category: "/language:${{ matrix.language }}" diff --git a/.github/workflows/generate-baseline.yml b/.github/workflows/generate-baseline.yml index 7b49d2bc..e19b7f08 100644 --- a/.github/workflows/generate-baseline.yml +++ b/.github/workflows/generate-baseline.yml @@ -171,7 +171,7 @@ jobs: run: uv run benchmark-utils sanitize-artifact-name --tag "$TAG_NAME" - name: Upload baseline artifact - uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 with: name: ${{ steps.safe_name.outputs.artifact_name }} path: bench-target/baseline-artifact/ diff --git a/.github/workflows/profiling-benchmarks.yml b/.github/workflows/profiling-benchmarks.yml index 7103c22b..c51df009 100644 --- a/.github/workflows/profiling-benchmarks.yml +++ b/.github/workflows/profiling-benchmarks.yml @@ -50,7 +50,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v6.0.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Install Rust toolchain uses: actions-rust-lang/setup-rust-toolchain@2b1f5e9b395427c92ee4e3331786ca3c37afe2d7 # v1.16.0 @@ -59,7 +59,7 @@ jobs: rustflags: "" - name: Cache Cargo dependencies - uses: actions/cache@v5 + uses: actions/cache@27d5ce7f107fe9357f9df03efb73ab90386fccae # v5.0.5 with: path: | ~/.cargo/bin/ @@ -216,7 +216,7 @@ jobs: cp -r target/criterion profiling-results/criterion-baseline-${{ github.ref_name }} - name: Upload profiling results - uses: actions/upload-artifact@v7 + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 with: name: profiling-results-${{ github.run_number }} path: profiling-results/ @@ -224,7 +224,7 @@ jobs: - name: Upload profiling baseline (for tagged releases) if: startsWith(github.ref, 'refs/tags/') - uses: actions/upload-artifact@v7 + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 with: name: profiling-baseline-${{ github.ref_name }} path: | @@ -252,7 +252,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v6.0.2 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Install Rust toolchain uses: actions-rust-lang/setup-rust-toolchain@2b1f5e9b395427c92ee4e3331786ca3c37afe2d7 # v1.16.0 @@ -261,7 +261,7 @@ jobs: rustflags: "" - name: Cache Cargo dependencies - uses: actions/cache@v5 + uses: actions/cache@27d5ce7f107fe9357f9df03efb73ab90386fccae # v5.0.5 with: path: | ~/.cargo/bin/ @@ -298,7 +298,7 @@ jobs: echo "BENCH_MEASUREMENT_TIME=$BENCH_MEASUREMENT_TIME" cargo bench --profile perf --bench profiling_suite --features count-allocations -- memory_profiling - name: Upload memory test results - uses: actions/upload-artifact@v7 + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 with: name: memory-stress-results-${{ github.run_number }} path: | diff --git a/.github/workflows/rust-clippy.yml b/.github/workflows/rust-clippy.yml index 45249889..17ca4ab0 100644 --- a/.github/workflows/rust-clippy.yml +++ b/.github/workflows/rust-clippy.yml @@ -23,6 +23,9 @@ jobs: clippy-sarif: name: Clippy SARIF Analysis runs-on: ubuntu-latest + env: + CLIPPY_SARIF_VERSION: "0.8.0" + SARIF_FMT_VERSION: "0.8.0" steps: - name: Checkout code uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 @@ -34,15 +37,20 @@ jobs: # toolchain, components, etc. are specified in rust-toolchain.toml - name: Cache clippy tools - uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 + uses: actions/cache@27d5ce7f107fe9357f9df03efb73ab90386fccae # v5.0.5 with: - path: ~/.cargo/bin/clippy-sarif - key: clippy-sarif-${{ runner.os }} + path: | + ~/.cargo/bin/clippy-sarif + ~/.cargo/bin/sarif-fmt + key: clippy-sarif-${{ runner.os }}-${{ env.CLIPPY_SARIF_VERSION }}-sarif-fmt-${{ env.SARIF_FMT_VERSION }} - name: Install clippy-sarif tools run: | if ! command -v clippy-sarif &> /dev/null; then - cargo install clippy-sarif sarif-fmt --locked + cargo install clippy-sarif --locked --version "${CLIPPY_SARIF_VERSION}" + fi + if ! command -v sarif-fmt &> /dev/null; then + cargo install sarif-fmt --locked --version "${SARIF_FMT_VERSION}" fi - name: Run clippy with SARIF output @@ -59,7 +67,7 @@ jobs: continue-on-error: true - name: Upload SARIF results - uses: github/codeql-action/upload-sarif@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 # v4 + uses: github/codeql-action/upload-sarif@b25d0ebf40e5b63ee81e1bd6e5d2a12b7c2aeb61 # v4 with: sarif_file: rust-clippy-results.sarif category: "clippy" diff --git a/.gitignore b/.gitignore index cfb6c52d..646d1065 100644 --- a/.gitignore +++ b/.gitignore @@ -8,9 +8,6 @@ /*.profraw /cobertura.xml /logs -# Keep tarpaulin.toml but ignore generated files -!/tarpaulin.toml -/tarpaulin-report.* /benches/results /node_modules/ /.package-lock.json @@ -18,7 +15,6 @@ /package.json /__pycache__/ **/__pycache__/ -/coverage_report/tarpaulin-report.* # Python packaging artifacts *.egg-info/ diff --git a/justfile b/justfile index 20f5651c..d1c906a0 100644 --- a/justfile +++ b/justfile @@ -29,6 +29,15 @@ _ensure-cargo-llvm-cov: exit 1 fi +_ensure-cargo-machete: + #!/usr/bin/env bash + set -euo pipefail + if ! cargo machete --version >/dev/null 2>&1; then + echo "❌ 'cargo-machete' not found. Install with:" + echo " cargo install --locked cargo-machete" + exit 1 + fi + # Internal helpers: ensure external tooling is installed _ensure-git-cliff: #!/usr/bin/env bash @@ -899,6 +908,10 @@ toml-lint: _ensure-taplo echo "No TOML files found to lint." fi +# Check for unused direct Cargo dependencies. +unused-deps: _ensure-cargo-machete + cargo machete + # File validation validate-json: _ensure-jq #!/usr/bin/env bash diff --git a/semgrep.yaml b/semgrep.yaml index 10874d00..beb876dc 100644 --- a/semgrep.yaml +++ b/semgrep.yaml @@ -329,7 +329,7 @@ rules: languages: - rust severity: WARNING - message: "Add reason = \"...\" to #[expect(...)] so lint suppressions remain auditable." + message: 'Add reason = "..." to #[expect(...)] so lint suppressions remain auditable.' metadata: category: maintainability rationale: >- diff --git a/src/core/util/hilbert.rs b/src/core/util/hilbert.rs index 5ce10a07..e2c9fef9 100644 --- a/src/core/util/hilbert.rs +++ b/src/core/util/hilbert.rs @@ -633,6 +633,33 @@ mod tests { assert_eq!(payload, payload2); } + #[test] + fn test_hilbert_sort_by_unstable_orders_by_hilbert_key() { + let coords: Vec<[f64; 2]> = + vec![[0.9, 0.9], [0.1, 0.1], [0.5, 0.5], [0.1, 0.9], [0.9, 0.1]]; + let expected_order = hilbert_sorted_indices(&coords, (0.0, 1.0), 16).unwrap(); + + let mut payload: Vec = (0..coords.len()).collect(); + hilbert_sort_by_unstable(&mut payload, (0.0_f64, 1.0), 16, |&i| coords[i]).unwrap(); + + assert_eq!(payload, expected_order); + } + + #[test] + fn test_hilbert_zero_dimension_sort_helpers_are_noops() { + let coords: Vec<[f64; 0]> = vec![[], [], []]; + let order = hilbert_sorted_indices(&coords, (0.0, 1.0), 8).unwrap(); + assert_eq!(order, vec![0, 1, 2]); + + let mut stable_payload = vec![3, 2, 1]; + hilbert_sort_by_stable(&mut stable_payload, (0.0_f64, 1.0), 8, |_| []).unwrap(); + assert_eq!(stable_payload, vec![3, 2, 1]); + + let mut unstable_payload = vec![3, 2, 1]; + hilbert_sort_by_unstable(&mut unstable_payload, (0.0_f64, 1.0), 8, |_| []).unwrap(); + assert_eq!(unstable_payload, vec![3, 2, 1]); + } + #[test] fn test_hilbert_curve_is_continuous_on_2d_grid() { // A defining property of the (discrete) Hilbert curve is continuity: diff --git a/src/geometry/algorithms/convex_hull.rs b/src/geometry/algorithms/convex_hull.rs index 4290eaca..77b68856 100644 --- a/src/geometry/algorithms/convex_hull.rs +++ b/src/geometry/algorithms/convex_hull.rs @@ -1612,6 +1612,20 @@ mod tests { use std::sync::atomic::Ordering; use std::thread; + #[cfg(feature = "test-debug")] + macro_rules! test_debug { + ($($arg:tt)*) => {{ + tracing::debug!($($arg)*); + }}; + } + + #[cfg(not(feature = "test-debug"))] + macro_rules! test_debug { + ($($arg:tt)*) => {{ + let _ = core::format_args!($($arg)*); + }}; + } + // ============================================================================= // HELPER FUNCTIONS // ============================================================================= @@ -1908,12 +1922,12 @@ mod tests { )] #[test] fn test_visibility_algorithms_comprehensive() { - println!("Testing comprehensive visibility algorithms in dimensions 2D-5D"); + test_debug!("Testing comprehensive visibility algorithms in dimensions 2D-5D"); // ======================================================================== // Test 1: 2D visibility (point-in-polygon and visible facets) // ======================================================================== - println!(" Testing 2D visibility algorithms..."); + test_debug!(" Testing 2D visibility algorithms..."); let vertices_2d = vec![ vertex!([0.0, 0.0]), vertex!([1.0, 0.0]), @@ -1960,7 +1974,7 @@ mod tests { // ======================================================================== // Test 2: 3D visibility (comprehensive testing) // ======================================================================== - println!(" Testing 3D visibility algorithms..."); + test_debug!(" Testing 3D visibility algorithms..."); let vertices_3d = vec![ vertex!([0.0, 0.0, 0.0]), vertex!([1.0, 0.0, 0.0]), @@ -2057,7 +2071,7 @@ mod tests { // ======================================================================== // Test 3: Edge cases with boundary and near-boundary points // ======================================================================== - println!(" Testing visibility edge cases..."); + test_debug!(" Testing visibility edge cases..."); let boundary_points = [ Point::new([0.5, 0.5, 0.0]), // On a face Point::new([0.3, 0.3, 0.3]), // Near centroid @@ -2092,7 +2106,7 @@ mod tests { // ======================================================================== // Test 4: Distance calculations with edge cases // ======================================================================== - println!(" Testing edge case distance calculations..."); + test_debug!(" Testing edge case distance calculations..."); // Test find_nearest_visible_facet with equidistant points let equidistant_point = Point::new([0.5, 0.5, 0.5]); @@ -2122,7 +2136,7 @@ mod tests { // ======================================================================== // Test 5: 4D visibility // ======================================================================== - println!(" Testing 4D visibility algorithms..."); + test_debug!(" Testing 4D visibility algorithms..."); let vertices_4d = vec![ vertex!([0.0, 0.0, 0.0, 0.0]), vertex!([1.0, 0.0, 0.0, 0.0]), @@ -2153,7 +2167,7 @@ mod tests { // ======================================================================== // Test 6: 5D visibility // ======================================================================== - println!(" Testing 5D visibility algorithms..."); + test_debug!(" Testing 5D visibility algorithms..."); let vertices_5d = vec![ vertex!([0.0, 0.0, 0.0, 0.0, 0.0]), vertex!([1.0, 0.0, 0.0, 0.0, 0.0]), @@ -2182,7 +2196,7 @@ mod tests { "5D outside point should be outside" ); - println!(" ✓ Visibility algorithms tested comprehensively across all dimensions"); + test_debug!(" ✓ Visibility algorithms tested comprehensively across all dimensions"); } // ============================================================================ @@ -2197,10 +2211,10 @@ mod tests { )] #[test] fn test_fallback_visibility_comprehensive() { - println!("Testing comprehensive fallback visibility algorithm"); + test_debug!("Testing comprehensive fallback visibility algorithm"); // Test distance-based heuristic with 3D tetrahedron - println!(" Testing distance-based heuristic..."); + test_debug!(" Testing distance-based heuristic..."); let vertices = vec![ vertex!([0.0, 0.0, 0.0]), vertex!([1.0, 0.0, 0.0]), @@ -2233,7 +2247,7 @@ mod tests { .unwrap(); visibility_results.push(is_visible); let coords = point.coords(); - println!(" Point {coords:?} ({description}) - Visible: {is_visible}"); + test_debug!(" Point {coords:?} ({description}) - Visible: {is_visible}"); } let visible_count = visibility_results.iter().filter(|&&v| v).count(); @@ -2243,7 +2257,7 @@ mod tests { ); // Test degenerate cases - println!(" Testing degenerate and edge cases..."); + test_debug!(" Testing degenerate and edge cases..."); let degenerate_points = vec![ (Point::new([0.0, 0.0, 0.0]), "Origin point"), ( @@ -2264,11 +2278,11 @@ mod tests { result.is_ok(), "{description} should not cause fallback to error" ); - println!(" {description} - Result: {:?}", result.unwrap()); + test_debug!(" {description} - Result: {:?}", result.unwrap()); } // Test consistency - same point multiple times should give same result - println!(" Testing consistency..."); + test_debug!(" Testing consistency..."); let consistency_point = Point::new([2.0, 2.0, 2.0]); let consistency_results: Vec = (0..5) .map(|_| { @@ -2287,14 +2301,14 @@ mod tests { .all(|&result| result == first_result), "Fallback visibility should be consistent for same point" ); - println!( + test_debug!( " Consistency test: all {} results were {}", consistency_results.len(), first_result ); // Test numerical precision with high-precision coordinates - println!(" Testing numerical precision..."); + test_debug!(" Testing numerical precision..."); let precise_points = vec![ Point::new([1e-15, 1e-15, 1e-15]), Point::new([ @@ -2321,13 +2335,13 @@ mod tests { "High precision coordinates should not cause errors" ); let coords = point.coords(); - println!( + test_debug!( " High precision Point {coords:?} - Visible: {:?}", result.unwrap() ); } - println!(" Testing in different dimensions..."); + test_debug!(" Testing in different dimensions..."); // Test 2D fallback let vertices_2d = vec![ @@ -2348,7 +2362,7 @@ mod tests { 2, >::fallback_visibility_test(&test_facet_2d_vertices, &test_point_2d); assert!(result_2d.is_ok(), "2D fallback should work"); - println!(" 2D fallback result: {:?}", result_2d.unwrap()); + test_debug!(" 2D fallback result: {:?}", result_2d.unwrap()); // Test 4D fallback let vertices_4d = vec![ @@ -2371,9 +2385,9 @@ mod tests { 4, >::fallback_visibility_test(&test_facet_4d_vertices, &test_point_4d); assert!(result_4d.is_ok(), "4D fallback should work"); - println!(" 4D fallback result: {:?}", result_4d.unwrap()); + test_debug!(" 4D fallback result: {:?}", result_4d.unwrap()); - println!(" ✓ Comprehensive fallback visibility algorithm tested successfully"); + test_debug!(" ✓ Comprehensive fallback visibility algorithm tested successfully"); } // ============================================================================ @@ -2477,12 +2491,12 @@ mod tests { reason = "test keeps hull validation scenarios and diagnostics together" )] fn test_convex_hull_validation_comprehensive() { - println!("Testing ConvexHull validation comprehensively"); + test_debug!("Testing ConvexHull validation comprehensively"); // ======================================================================== // Test 1: Valid hulls in different dimensions (1D-5D comprehensive coverage) // ======================================================================== - println!(" Testing valid hull validation in different dimensions..."); + test_debug!(" Testing valid hull validation in different dimensions..."); // Test 1D hull (empty hull validation) let dummy_vertices_1d = vec![vertex!([0.0]), vertex!([1.0])]; @@ -2492,7 +2506,7 @@ mod tests { hull_1d.validate(dummy_dt_1d.as_triangulation()).is_ok(), "1D empty hull should validate" ); - println!(" 1D empty hull validated"); + test_debug!(" 1D empty hull validated"); // Test 2D hull let vertices_2d = vec![ @@ -2518,7 +2532,7 @@ mod tests { let vertices = facet_view.vertices().unwrap().count(); assert_eq!(vertices, 2, "2D facet {i} should have exactly 2 vertices"); } - println!( + test_debug!( " 2D hull: {} facets validated", hull_2d.number_of_facets() ); @@ -2555,7 +2569,7 @@ mod tests { let vertices = facet_view.vertices().unwrap().count(); assert_eq!(vertices, 3, "3D facet {i} should have exactly 3 vertices"); } - println!( + test_debug!( " 3D hull: {} facets validated", hull_3d.number_of_facets() ); @@ -2593,7 +2607,7 @@ mod tests { let vertices = facet_view.vertices().unwrap().count(); assert_eq!(vertices, 4, "4D facet {i} should have exactly 4 vertices"); } - println!( + test_debug!( " 4D hull: {} facets validated", hull_4d.number_of_facets() ); @@ -2632,7 +2646,7 @@ mod tests { let vertices = facet_view.vertices().unwrap().count(); assert_eq!(vertices, 5, "5D facet {i} should have exactly 5 vertices"); } - println!( + test_debug!( " 5D hull: {} facets validated", hull_5d.number_of_facets() ); @@ -2644,13 +2658,13 @@ mod tests { "5D empty hull should validate successfully" ); - println!(" ✓ Empty hull validation passed for dimensions 1D-5D"); - println!(" ✓ Valid hull validation passed for all tested dimensions"); + test_debug!(" ✓ Empty hull validation passed for dimensions 1D-5D"); + test_debug!(" ✓ Valid hull validation passed for all tested dimensions"); // ======================================================================== // Test 2: Validation with different data types // ======================================================================== - println!(" Testing validation with different data types..."); + test_debug!(" Testing validation with different data types..."); // Test with integer vertex data let vertices_int = vec![ @@ -2687,12 +2701,12 @@ mod tests { hull_char.validate(dt_char.as_triangulation()).is_ok(), "Hull with character data should validate successfully" ); - println!(" ✓ Validation with different data types passed"); + test_debug!(" ✓ Validation with different data types passed"); // ======================================================================== // Test 4: Validation with extreme coordinate values // ======================================================================== - println!(" Testing validation with extreme coordinate values..."); + test_debug!(" Testing validation with extreme coordinate values..."); let extreme_vertices = vec![ // Large coordinates @@ -2748,7 +2762,7 @@ mod tests { // numerically unstable by the robust initial simplex search, or // Hilbert-sort dedup may collapse near-identical coordinates at // quantization resolution, leaving fewer than D+1 vertices. - println!( + test_debug!( " \x1b[33mWarning:\x1b[0m skipping {desc} extreme coordinate hull validation \ due to geometric degeneracy or insufficient vertices in DelaunayTriangulation::new", ); @@ -2758,12 +2772,12 @@ mod tests { } } } - println!(" ✓ Validation with extreme coordinate values passed"); + test_debug!(" ✓ Validation with extreme coordinate values passed"); // ======================================================================== // Test 5: Error type structure and formatting // ======================================================================== - println!(" Testing error type structure and formatting..."); + test_debug!(" Testing error type structure and formatting..."); // Test ConvexHullValidationError::InvalidFacet structure let invalid_facet_error = ConvexHullValidationError::InvalidFacet { @@ -2779,7 +2793,7 @@ mod tests { assert!(error_message.contains("Facet 42 validation failed")); assert!(error_message.contains("exactly 3 vertices")); assert!(error_message.contains("got 2")); - println!(" InvalidFacet error: {error_message}"); + test_debug!(" InvalidFacet error: {error_message}"); // Test ConvexHullValidationError::DuplicateVerticesInFacet structure let duplicate_vertices_error = ConvexHullValidationError::DuplicateVerticesInFacet { @@ -2790,7 +2804,7 @@ mod tests { let error_message = format!("{duplicate_vertices_error}"); assert!(error_message.contains("Facet 17 has duplicate vertices")); assert!(error_message.contains("[[0, 2], [1, 3, 5]]")); - println!(" DuplicateVertices error: {error_message}"); + test_debug!(" DuplicateVertices error: {error_message}"); // Test error equality and cloning let cloned_error = invalid_facet_error.clone(); @@ -2819,12 +2833,12 @@ mod tests { panic!("Expected InsufficientVertices error"); } } - println!(" ✓ Error type structure and formatting tests passed"); + test_debug!(" ✓ Error type structure and formatting tests passed"); // ======================================================================== // Test 6: Validation consistency and performance // ======================================================================== - println!(" Testing validation consistency and performance..."); + test_debug!(" Testing validation consistency and performance..."); // Test consistency across multiple calls let results: Vec> = (0..5) @@ -2870,10 +2884,10 @@ mod tests { "Validation should be fast (< {budget_ms} ms for 100 calls); took {elapsed:?}" ); - println!(" 100 validation calls completed in {elapsed:?}"); - println!(" ✓ Validation consistency and performance tests passed"); + test_debug!(" 100 validation calls completed in {elapsed:?}"); + test_debug!(" ✓ Validation consistency and performance tests passed"); - println!("✓ All comprehensive ConvexHull validation tests passed successfully!"); + test_debug!("✓ All comprehensive ConvexHull validation tests passed successfully!"); } #[test] @@ -3295,7 +3309,7 @@ mod tests { // This should either succeed or fail gracefully match result { Ok(_visibility) => (), // Success case - Err(e) => println!("Expected visibility error: {e}"), + Err(e) => test_debug!("Expected visibility error: {e}"), } } } @@ -3594,7 +3608,7 @@ mod tests { #[test] fn test_error_handling_paths() { - println!("Testing error handling paths in convex hull methods"); + test_debug!("Testing error handling paths in convex hull methods"); let vertices = vec![ vertex!([0.0, 0.0, 0.0]), @@ -3615,7 +3629,7 @@ mod tests { ); // The method should handle extreme coordinates gracefully - println!(" Fallback visibility result with extreme point: {result:?}"); + test_debug!(" Fallback visibility result with extreme point: {result:?}"); // Test normal visibility methods with edge case points let edge_points = vec![ @@ -3629,12 +3643,12 @@ mod tests { assert!(result.is_ok(), "Edge case visibility test should not error"); } - println!("✓ Error handling paths tested successfully"); + test_debug!("✓ Error handling paths tested successfully"); } #[test] fn test_degenerate_orientation_fallback() { - println!("Testing degenerate orientation fallback behavior"); + test_debug!("Testing degenerate orientation fallback behavior"); // Create a triangulation that might produce degenerate orientations // while still allowing the initial simplex search to succeed. @@ -3663,18 +3677,18 @@ mod tests { ); let coords = point.coords(); - println!( + test_debug!( " Degenerate point {coords:?} - Outside: {:?}", result.unwrap() ); } - println!("✓ Degenerate orientation fallback tested successfully"); + test_debug!("✓ Degenerate orientation fallback tested successfully"); } #[test] fn test_extreme_coordinate_precision() { - println!("Testing extreme coordinate precision handling"); + test_debug!("Testing extreme coordinate precision handling"); // Test with coordinates at the limits of f64 precision let vertices_extreme = vec![ @@ -3715,7 +3729,7 @@ mod tests { &facet_vertices, &test_point, ); - println!(" Extreme precision fallback result: {fallback_result:?}"); + test_debug!(" Extreme precision fallback result: {fallback_result:?}"); } Err(DelaunayTriangulationConstructionError::Triangulation( TriangulationConstructionError::GeometricDegeneracy { .. }, @@ -3725,7 +3739,7 @@ mod tests { // case, it's acceptable for DelaunayTriangulation::new to fail with geometric // degeneracy; later parts of this test still exercise max-scale // behavior. - println!( + test_debug!( " \x1b[33mWarning:\x1b[0m skipping MIN_POSITIVE extreme simplex due to geometric degeneracy", ); } @@ -3751,12 +3765,12 @@ mod tests { "Maximum finite coordinates should not crash" ); - println!("✓ Extreme coordinate precision tested successfully"); + test_debug!("✓ Extreme coordinate precision tested successfully"); } #[test] fn test_numeric_cast_error_handling() { - println!("Testing numeric cast error handling in find_nearest_visible_facet"); + test_debug!("Testing numeric cast error handling in find_nearest_visible_facet"); let vertices = vec![ vertex!([0.0, 0.0, 0.0]), @@ -3800,10 +3814,10 @@ mod tests { let coords = point.coords(); let result_val = result.unwrap(); - println!(" Edge point {coords:?} - Result: {result_val:?}"); + test_debug!(" Edge point {coords:?} - Result: {result_val:?}"); } - println!("✓ Numeric cast error handling tested successfully"); + test_debug!("✓ Numeric cast error handling tested successfully"); } #[expect( @@ -3812,7 +3826,7 @@ mod tests { )] #[test] fn test_cache_invalidation_behavior() { - println!("Testing cache invalidation behavior in ConvexHull"); + test_debug!("Testing cache invalidation behavior in ConvexHull"); // Create initial triangulation let vertices = vec![ @@ -3828,8 +3842,8 @@ mod tests { let initial_tds_generation = dt.as_triangulation().tds.generation(); let initial_hull_generation = hull.cached_generation().load(Ordering::Acquire); - println!(" Initial TDS generation: {initial_tds_generation}"); - println!(" Initial hull cached generation: {initial_hull_generation}"); + test_debug!(" Initial TDS generation: {initial_tds_generation}"); + test_debug!(" Initial hull cached generation: {initial_hull_generation}"); // ConvexHull keeps an independent snapshot for staleness detection // Since generation is now private, we can't compare pointers directly @@ -3845,7 +3859,7 @@ mod tests { let test_point = Point::new([2.0, 2.0, 2.0]); let facet = hull.get_facet(0).unwrap(); - println!(" Performing initial visibility test to build cache..."); + test_debug!(" Performing initial visibility test to build cache..."); let result1 = hull.is_facet_visible_from_point(facet, &test_point, dt.as_triangulation()); assert!(result1.is_ok(), "Initial visibility test should succeed"); @@ -3853,7 +3867,7 @@ mod tests { let post_cache_tds_gen = dt.as_triangulation().tds.generation(); let post_cache_hull_gen = hull.cached_generation().load(Ordering::Acquire); - println!( + test_debug!( " After cache build - TDS gen: {post_cache_tds_gen}, Hull gen: {post_cache_hull_gen}" ); assert_eq!( @@ -3868,17 +3882,17 @@ mod tests { "Cache should exist after first visibility test" ); - println!(" ✓ Cache successfully built and generations synchronized"); + test_debug!(" ✓ Cache successfully built and generations synchronized"); // Test validity checking before TDS modification - println!(" Testing validity checking..."); + test_debug!(" Testing validity checking..."); assert!( hull.is_valid_for_triangulation(dt.as_triangulation()), "Hull should be valid for initial TDS" ); // Test TDS modification by adding a new vertex - println!(" Testing TDS modification and hull invalidation..."); + test_debug!(" Testing TDS modification and hull invalidation..."); let old_generation = dt.as_triangulation().tds.generation(); let stale_hull_gen = hull.cached_generation().load(Ordering::Acquire); @@ -3888,9 +3902,9 @@ mod tests { .expect("Failed to insert vertex into DelaunayTriangulation"); let modified_tds_gen = dt.as_triangulation().tds.generation(); - println!(" After TDS modification (added vertex):"); - println!(" TDS generation: {modified_tds_gen}"); - println!(" Hull cached generation: {stale_hull_gen}"); + test_debug!(" After TDS modification (added vertex):"); + test_debug!(" TDS generation: {modified_tds_gen}"); + test_debug!(" Hull cached generation: {stale_hull_gen}"); // Hull snapshot is now stale relative to TDS assert!( @@ -3908,11 +3922,11 @@ mod tests { "Hull should be invalid for modified TDS" ); - println!(" ✓ Generation change correctly detected - hull is now invalid"); + test_debug!(" ✓ Generation change correctly detected - hull is now invalid"); // IMPORTANT: After TDS modification, the old hull's facet handles are invalid! // We must rebuild the hull to get fresh facet handles - println!(" Rebuilding hull after TDS modification..."); + test_debug!(" Rebuilding hull after TDS modification..."); let new_hull = ConvexHull::from_triangulation(dt.as_triangulation()).unwrap(); // The new hull should be valid and have matching generation @@ -3921,7 +3935,7 @@ mod tests { "New hull should be valid for modified TDS" ); let new_hull_gen = new_hull.cached_generation().load(Ordering::Acquire); - println!(" New hull generation: {new_hull_gen}"); + test_debug!(" New hull generation: {new_hull_gen}"); assert_eq!( new_hull_gen, modified_tds_gen, "New hull should have same generation as modified TDS" @@ -3931,7 +3945,7 @@ mod tests { let new_facet = new_hull.get_facet(0).unwrap(); // Test visibility with the new hull and fresh facet handle - println!(" Testing visibility with rebuilt hull..."); + test_debug!(" Testing visibility with rebuilt hull..."); let result2 = new_hull.is_facet_visible_from_point(new_facet, &test_point, dt.as_triangulation()); assert!( @@ -3946,10 +3960,10 @@ mod tests { "Cache should exist after visibility test on new hull" ); - println!(" ✓ Hull rebuilt successfully after TDS modification"); + test_debug!(" ✓ Hull rebuilt successfully after TDS modification"); // Test manual cache invalidation on the new hull - println!(" Testing manual cache invalidation..."); + test_debug!(" Testing manual cache invalidation..."); // Store current generation let pre_invalidation_gen = new_hull.cached_generation().load(Ordering::Acquire); @@ -3971,8 +3985,8 @@ mod tests { "Generation should be reset to 0 after manual invalidation" ); - println!(" Generation before invalidation: {pre_invalidation_gen}"); - println!(" Generation after invalidation: {post_invalidation_gen}"); + test_debug!(" Generation before invalidation: {pre_invalidation_gen}"); + test_debug!(" Generation after invalidation: {post_invalidation_gen}"); // Next visibility test should rebuild cache let result3 = @@ -3997,10 +4011,10 @@ mod tests { "Hull generation should match TDS generation after cache rebuild" ); - println!(" Final TDS generation: {final_tds_gen}"); - println!(" Final hull generation: {final_hull_gen}"); + test_debug!(" Final TDS generation: {final_tds_gen}"); + test_debug!(" Final hull generation: {final_hull_gen}"); - println!(" ✓ Manual cache invalidation working correctly"); + test_debug!(" ✓ Manual cache invalidation working correctly"); // Note: We verified that: // 1. The initial hull works correctly with the initial TDS @@ -4008,7 +4022,7 @@ mod tests { // 3. A new hull can be built for the modified TDS // 4. The new hull works correctly with the modified TDS // 5. Manual cache invalidation works as expected - println!(" All tests verified correct hull and cache management"); + test_debug!(" All tests verified correct hull and cache management"); // Verify that all visibility tests succeeded assert!( @@ -4024,10 +4038,10 @@ mod tests { "Third visibility test (after cache invalidation) should succeed" ); - println!(" ✓ Hull rebuilding and cache management working correctly"); + test_debug!(" ✓ Hull rebuilding and cache management working correctly"); // Test concurrent access safety using the new hull - println!(" Testing thread safety of cache operations..."); + test_debug!(" Testing thread safety of cache operations..."); let test_results: Vec<_> = (0..10) .map(|i| { @@ -4045,14 +4059,14 @@ mod tests { ); } - println!(" ✓ Thread safety test passed"); + test_debug!(" ✓ Thread safety test passed"); - println!("✓ All cache invalidation behavior tests passed successfully!"); + test_debug!("✓ All cache invalidation behavior tests passed successfully!"); } #[test] fn test_try_get_or_build_facet_cache() { - println!("Testing try_get_or_build_facet_cache method"); + test_debug!("Testing try_get_or_build_facet_cache method"); // Create a triangulation let vertices = vec![ @@ -4069,7 +4083,7 @@ mod tests { assert!(initial_cache.is_none(), "Cache should be empty initially"); // First call should build the cache - println!(" Testing initial cache building..."); + test_debug!(" Testing initial cache building..."); let cache1 = hull .try_get_or_build_facet_cache(&dt.as_triangulation().tds) .expect("Failed to build cache"); @@ -4086,7 +4100,7 @@ mod tests { ); // Second call with same generation should reuse cache - println!(" Testing cache reuse with same generation..."); + test_debug!(" Testing cache reuse with same generation..."); let cache2 = hull .try_get_or_build_facet_cache(&dt.as_triangulation().tds) .expect("Failed to reuse cache"); @@ -4111,7 +4125,7 @@ mod tests { } // Modify triangulation by adding a vertex to trigger generation change - println!(" Testing cache invalidation with generation change..."); + test_debug!(" Testing cache invalidation with generation change..."); let old_generation = dt.as_triangulation().tds.generation(); // Add a new vertex to trigger generation bump @@ -4147,12 +4161,12 @@ mod tests { "Hull generation should match TDS generation after rebuild" ); - println!(" ✓ Cache building, reuse, and invalidation working correctly"); + test_debug!(" ✓ Cache building, reuse, and invalidation working correctly"); } #[test] fn test_helper_methods_integration() { - println!("Testing integration between helper methods"); + test_debug!("Testing integration between helper methods"); // Create a triangulation let vertices = vec![ @@ -4165,7 +4179,7 @@ mod tests { let hull = ConvexHull::from_triangulation(dt.as_triangulation()).unwrap(); // Test that cache contains keys derivable by the key derivation method - println!(" Testing cache-key derivation consistency..."); + test_debug!(" Testing cache-key derivation consistency..."); let cache = hull .try_get_or_build_facet_cache(&dt.as_triangulation().tds) .expect("Failed to build cache"); @@ -4194,19 +4208,19 @@ mod tests { if let Ok(derived_key) = derived_key_result { if cache.contains_key(&derived_key) { keys_found += 1; - println!(" Facet {i}: key {derived_key} found in cache ✓"); + test_debug!(" Facet {i}: key {derived_key} found in cache ✓"); } else { - println!(" Facet {i}: key {derived_key} NOT in cache (unexpected)"); + test_debug!(" Facet {i}: key {derived_key} NOT in cache (unexpected)"); } } else { - println!( + test_debug!( " Facet {i}: key derivation failed: {:?}", derived_key_result.err() ); } } - println!( + test_debug!( " Found {keys_found}/{} hull facet keys in cache", hull.number_of_facets() ); @@ -4223,7 +4237,7 @@ mod tests { ); // Test that helper methods work correctly together in visibility testing - println!(" Testing helper methods in visibility context..."); + test_debug!(" Testing helper methods in visibility context..."); let test_point = Point::new([2.0, 2.0, 2.0]); let test_facet = hull.get_facet(0).unwrap(); @@ -4234,13 +4248,13 @@ mod tests { "Visibility test using helper methods should succeed" ); - println!(" Visibility result: {}", visibility_result.unwrap()); - println!(" ✓ Integration between helper methods working correctly"); + test_debug!(" Visibility result: {}", visibility_result.unwrap()); + test_debug!(" ✓ Integration between helper methods working correctly"); } #[test] fn test_facet_cache_build_failed_error() { - println!("Testing FacetCacheBuildFailed error path"); + test_debug!("Testing FacetCacheBuildFailed error path"); // This test is challenging because we need to trigger a TdsError // during facet cache building. In practice, this is rare with valid TDS objects. @@ -4274,10 +4288,10 @@ mod tests { // Verify that the method returns ConvexHullConstructionError (not FacetError) // This ensures our error hierarchy changes are properly implemented match result { - Ok(_) => println!(" ✓ Normal case succeeded as expected"), + Ok(_) => test_debug!(" ✓ Normal case succeeded as expected"), Err(e) => { // If there is an error, verify it's the right type - println!(" Error type verification: {e:?}"); + test_debug!(" Error type verification: {e:?}"); // The fact that it compiles with ConvexHullConstructionError shows the type is correct } } @@ -4290,15 +4304,15 @@ mod tests { "find_visible_facets should succeed in normal case" ); - println!(" ✓ FacetCacheBuildFailed error path properly configured"); - println!( + test_debug!(" ✓ FacetCacheBuildFailed error path properly configured"); + test_debug!( " Note: Actual cache build failure requires corrupted TDS, which is hard to create in tests" ); } #[test] fn test_nearest_facet_equidistant_cases() { - println!("Testing find_nearest_visible_facet with equidistant facets"); + test_debug!("Testing find_nearest_visible_facet with equidistant facets"); // Create a symmetric triangulation where multiple facets might be equidistant let vertices = vec![ @@ -4310,7 +4324,7 @@ mod tests { let dt = create_triangulation(&vertices); let hull = ConvexHull::from_triangulation(dt.as_triangulation()).unwrap(); - println!(" Testing with point equidistant from multiple facets..."); + test_debug!(" Testing with point equidistant from multiple facets..."); // Point at (1,1,1) should be roughly equidistant from all facets let equidistant_point = Point::new([1.0, 1.0, 1.0]); @@ -4323,7 +4337,7 @@ mod tests { facet_index < hull.number_of_facets(), "Returned facet index should be valid" ); - println!(" ✓ Found nearest facet at index: {facet_index}"); + test_debug!(" ✓ Found nearest facet at index: {facet_index}"); // Verify the facet is actually visible let selected_facet = hull.get_facet(facet_index).unwrap(); @@ -4338,7 +4352,7 @@ mod tests { ); } Ok(None) => { - println!(" No visible facets found (point might be inside)"); + test_debug!(" No visible facets found (point might be inside)"); // Verify this is correct by checking if point is actually inside let is_outside = hull .is_point_outside(&equidistant_point, dt.as_triangulation()) @@ -4353,7 +4367,7 @@ mod tests { } } - println!(" Testing with point clearly outside..."); + test_debug!(" Testing with point clearly outside..."); // Point clearly outside should always find a nearest facet let far_point = Point::new([10.0, 10.0, 10.0]); @@ -4362,7 +4376,7 @@ mod tests { match far_result { Ok(Some(facet_index)) => { assert!(facet_index < hull.number_of_facets()); - println!(" ✓ Found nearest facet for far point at index: {facet_index}"); + test_debug!(" ✓ Found nearest facet for far point at index: {facet_index}"); } Ok(None) => { panic!("Far outside point should always see some facets"); @@ -4372,7 +4386,7 @@ mod tests { } } - println!(" Testing with point clearly inside..."); + test_debug!(" Testing with point clearly inside..."); // Point clearly inside should see no facets let inside_point = Point::new([0.1, 0.1, 0.1]); @@ -4380,11 +4394,11 @@ mod tests { match inside_result { Ok(None) => { - println!(" ✓ Inside point correctly sees no facets"); + test_debug!(" ✓ Inside point correctly sees no facets"); } Ok(Some(facet_index)) => { // This might happen due to numerical precision - verify it's reasonable - println!( + test_debug!( " Inside point unexpectedly sees facet {facet_index} (may be due to precision)" ); assert!(facet_index < hull.number_of_facets()); @@ -4394,12 +4408,12 @@ mod tests { } } - println!(" ✓ Equidistant facet selection working correctly"); + test_debug!(" ✓ Equidistant facet selection working correctly"); } #[test] fn test_concurrent_cache_access_patterns() { - println!("Testing concurrent cache access patterns"); + test_debug!("Testing concurrent cache access patterns"); // Create a triangulation let vertices = vec![ @@ -4415,7 +4429,7 @@ mod tests { let hull = Arc::new(hull); let dt = Arc::new(dt); - println!(" Testing concurrent cache building..."); + test_debug!(" Testing concurrent cache building..."); // Spawn multiple threads that will try to build/access the cache concurrently let mut handles = vec![]; @@ -4450,7 +4464,7 @@ mod tests { let result = handle.join().expect("Thread should not panic"); match result { Ok((facet_count, is_outside, thread_id)) => { - println!( + test_debug!( " Thread {thread_id}: {facet_count} visible facets, outside: {is_outside}" ); results.push((facet_count, is_outside, thread_id)); @@ -4476,7 +4490,7 @@ mod tests { ); } - println!(" Testing cache consistency after concurrent access..."); + test_debug!(" Testing cache consistency after concurrent access..."); // Verify cache is in a consistent state after concurrent access let cache = hull @@ -4495,13 +4509,15 @@ mod tests { "Operations should work normally after concurrent access" ); - println!(" ✓ Concurrent cache access working correctly"); - println!(" Note: This test verifies basic thread safety, not high-contention scenarios"); + test_debug!(" ✓ Concurrent cache access working correctly"); + test_debug!( + " Note: This test verifies basic thread safety, not high-contention scenarios" + ); } #[test] fn test_invalidate_cache_behavior() { - println!("Testing cache invalidation behavior"); + test_debug!("Testing cache invalidation behavior"); // Create a triangulation let vertices = vec![ @@ -4514,7 +4530,7 @@ mod tests { let hull = ConvexHull::from_triangulation(dt.as_triangulation()).unwrap(); // Build initial cache - println!(" Building initial cache..."); + test_debug!(" Building initial cache..."); let initial_cache = hull .try_get_or_build_facet_cache(&dt.as_triangulation().tds) .unwrap(); @@ -4536,7 +4552,7 @@ mod tests { ); // Manually invalidate cache - println!(" Manually invalidating cache..."); + test_debug!(" Manually invalidating cache..."); hull.invalidate_cache(); // Verify cache is cleared @@ -4554,7 +4570,7 @@ mod tests { ); // Rebuild cache after invalidation - println!(" Rebuilding cache after invalidation..."); + test_debug!(" Rebuilding cache after invalidation..."); let rebuilt_cache = hull .try_get_or_build_facet_cache(&dt.as_triangulation().tds) .unwrap(); @@ -4579,7 +4595,7 @@ mod tests { ); // Verify functionality still works after invalidation/rebuild cycle - println!(" Testing functionality after invalidation/rebuild..."); + test_debug!(" Testing functionality after invalidation/rebuild..."); let test_point = Point::new([2.0, 2.0, 2.0]); let visible_facets = hull.find_visible_facets(&test_point, dt.as_triangulation()); assert!( @@ -4593,12 +4609,12 @@ mod tests { "Outside point should see visible facets after rebuild" ); - println!(" ✓ Cache invalidation and rebuild working correctly"); + test_debug!(" ✓ Cache invalidation and rebuild working correctly"); } #[test] fn test_error_propagation_chain() { - println!("Testing complete error propagation chain"); + test_debug!("Testing complete error propagation chain"); // Create a valid setup let vertices = vec![ @@ -4610,7 +4626,7 @@ mod tests { let dt = create_triangulation(&vertices); let hull = ConvexHull::from_triangulation(dt.as_triangulation()).unwrap(); - println!(" Testing error types are properly propagated..."); + test_debug!(" Testing error types are properly propagated..."); let test_point = Point::new([2.0, 2.0, 2.0]); let test_facet = hull.get_facet(0).unwrap(); @@ -4620,20 +4636,20 @@ mod tests { hull.is_facet_visible_from_point(test_facet, &test_point, dt.as_triangulation()); match visibility_result { Ok(visible) => { - println!(" is_facet_visible_from_point: Ok({visible})"); + test_debug!(" is_facet_visible_from_point: Ok({visible})"); } Err(e) => { - println!(" is_facet_visible_from_point error: {e:?}"); + test_debug!(" is_facet_visible_from_point error: {e:?}"); // Verify it's the right error type by matching on variants match e { ConvexHullConstructionError::FacetCacheBuildFailed { source: _ } => { - println!(" ✓ FacetCacheBuildFailed variant present"); + test_debug!(" ✓ FacetCacheBuildFailed variant present"); } ConvexHullConstructionError::VisibilityCheckFailed { source: _ } => { - println!(" ✓ VisibilityCheckFailed variant present"); + test_debug!(" ✓ VisibilityCheckFailed variant present"); } _ => { - println!(" Unexpected error variant: {e:?}"); + test_debug!(" Unexpected error variant: {e:?}"); } } } @@ -4660,13 +4676,13 @@ mod tests { "find_nearest_visible_facet should succeed in normal case" ); - println!(" ✓ Error propagation chain correctly implemented"); - println!(" ✓ All methods return ConvexHullConstructionError as expected"); + test_debug!(" ✓ Error propagation chain correctly implemented"); + test_debug!(" ✓ All methods return ConvexHullConstructionError as expected"); } #[test] fn test_adjacent_cell_resolution_failed_error() { - println!("Testing AdjacentCellResolutionFailed error variant"); + test_debug!("Testing AdjacentCellResolutionFailed error variant"); // Create a simple triangulation to test with let vertices = vec![ @@ -4713,14 +4729,14 @@ mod tests { "AdjacentCellResolutionFailed should have a source error" ); - println!(" ✓ AdjacentCellResolutionFailed error variant properly implemented"); - println!(" ✓ Error preserves underlying TdsError as source"); - println!(" ✓ Error display format correct: {error_message}"); + test_debug!(" ✓ AdjacentCellResolutionFailed error variant properly implemented"); + test_debug!(" ✓ Error preserves underlying TdsError as source"); + test_debug!(" ✓ Error display format correct: {error_message}"); } #[test] fn test_enhanced_facet_key_error_information() { - println!("Testing enhanced facet key error information"); + test_debug!("Testing enhanced facet key error information"); // Create example UUIDs for testing let uuid1 = uuid::Uuid::new_v4(); @@ -4737,7 +4753,7 @@ mod tests { vertex_uuids: vertex_uuids.clone(), }; - println!(" Testing error message format..."); + test_debug!(" Testing error message format..."); let error_message = format!("{enhanced_error}"); // Verify the error message contains expected components @@ -4758,9 +4774,9 @@ mod tests { "Error message should mention key derivation mismatch: {error_message}" ); - println!(" Enhanced error message: {error_message}"); + test_debug!(" Enhanced error message: {error_message}"); - println!(" Testing error debug format..."); + test_debug!(" Testing error debug format..."); let debug_message = format!("{enhanced_error:?}"); assert!( debug_message.contains("FacetKeyNotFoundInCache"), @@ -4776,9 +4792,9 @@ mod tests { "Debug format should contain vertex UUIDs: {debug_message}" ); - println!(" Debug representation: {debug_message}"); + test_debug!(" Debug representation: {debug_message}"); - println!(" Testing error comparison and cloning..."); + test_debug!(" Testing error comparison and cloning..."); // Test Clone let cloned_error = enhanced_error.clone(); @@ -4798,7 +4814,7 @@ mod tests { "Different errors should not be equal" ); - println!(" Testing integration with ConvexHullConstructionError..."); + test_debug!(" Testing integration with ConvexHullConstructionError..."); // Wrap in the higher-level error let construction_error = ConvexHullConstructionError::VisibilityCheckFailed { @@ -4826,9 +4842,9 @@ mod tests { ); } - println!(" Construction error message: {construction_message}"); + test_debug!(" Construction error message: {construction_message}"); - println!(" Testing backward compatibility..."); + test_debug!(" Testing backward compatibility..."); // Verify the old error variant still exists and works let old_error = FacetError::FacetNotFoundInTriangulation; @@ -4838,15 +4854,15 @@ mod tests { "Old error variant should still work: {old_message}" ); - println!(" Old error message: {old_message}"); + test_debug!(" Old error message: {old_message}"); - println!(" ✓ Enhanced facet key error information working correctly"); - println!(" ✓ Error provides detailed diagnostic information including:"); - println!(" - Facet key in hex format for debugging"); - println!(" - Cache size for context"); - println!(" - Vertex UUIDs that generated the key"); - println!(" - Actionable error message suggesting possible causes"); - println!(" ✓ Backward compatibility maintained with existing error variants"); + test_debug!(" ✓ Enhanced facet key error information working correctly"); + test_debug!(" ✓ Error provides detailed diagnostic information including:"); + test_debug!(" - Facet key in hex format for debugging"); + test_debug!(" - Cache size for context"); + test_debug!(" - Vertex UUIDs that generated the key"); + test_debug!(" - Actionable error message suggesting possible causes"); + test_debug!(" ✓ Backward compatibility maintained with existing error variants"); } // ============================================================================ @@ -4862,12 +4878,12 @@ mod tests { )] #[test] fn test_convex_hull_error_handling_comprehensive() { - println!("Testing comprehensive ConvexHull error handling"); + test_debug!("Testing comprehensive ConvexHull error handling"); // ======================================================================== // Test 1: ConvexHullValidationError variants // ======================================================================== - println!(" Testing ConvexHullValidationError variants..."); + test_debug!(" Testing ConvexHullValidationError variants..."); let invalid_facet_error = ConvexHullValidationError::InvalidFacet { facet_index: 5, @@ -4899,13 +4915,13 @@ mod tests { assert_eq!(invalid_facet_error, cloned_invalid); assert_ne!(invalid_facet_error, duplicate_error); - println!(" InvalidFacet: {error_msg}"); - println!(" DuplicateVertices: {dup_msg}"); + test_debug!(" InvalidFacet: {error_msg}"); + test_debug!(" DuplicateVertices: {dup_msg}"); // ======================================================================== // Test 2: ConvexHullConstructionError variants // ======================================================================== - println!(" Testing ConvexHullConstructionError variants..."); + test_debug!(" Testing ConvexHullConstructionError variants..."); let boundary_error = ConvexHullConstructionError::BoundaryFacetExtractionFailed { source: TdsError::InconsistentDataStructure { @@ -4960,17 +4976,17 @@ mod tests { assert_eq!(boundary_error, cloned_boundary); assert_ne!(boundary_error, cast_error); - println!(" BoundaryExtraction: {boundary_msg}"); - println!(" VisibilityCheck: {visibility_msg}"); - println!(" InvalidTriangulation: {invalid_tri_msg}"); - println!(" GeometricDegeneracy: {degeneracy_msg}"); - println!(" NumericCast: {cast_msg}"); - println!(" CoordinateConversion: {coord_msg}"); + test_debug!(" BoundaryExtraction: {boundary_msg}"); + test_debug!(" VisibilityCheck: {visibility_msg}"); + test_debug!(" InvalidTriangulation: {invalid_tri_msg}"); + test_debug!(" GeometricDegeneracy: {degeneracy_msg}"); + test_debug!(" NumericCast: {cast_msg}"); + test_debug!(" CoordinateConversion: {coord_msg}"); // ======================================================================== // Test 3: Error propagation and source chains // ======================================================================== - println!(" Testing error propagation and source chains..."); + test_debug!(" Testing error propagation and source chains..."); // Test coordinate conversion error propagation let coord_conv_error = @@ -4981,7 +4997,7 @@ mod tests { let hull_error: ConvexHullConstructionError = coord_conv_error.into(); match hull_error { ConvexHullConstructionError::CoordinateConversion(_) => { - println!(" ✓ Coordinate conversion error properly wrapped"); + test_debug!(" ✓ Coordinate conversion error properly wrapped"); } _ => panic!("Coordinate conversion error not properly wrapped"), } @@ -5005,12 +5021,12 @@ mod tests { depth > 0, "Error chain should have at least one level of nesting" ); - println!(" ✓ Error source chain depth: {depth}"); + test_debug!(" ✓ Error source chain depth: {depth}"); // ======================================================================== // Test 4: Error message consistency and formatting // ======================================================================== - println!(" Testing error message formatting consistency..."); + test_debug!(" Testing error message formatting consistency..."); let test_errors = [ ( @@ -5079,7 +5095,7 @@ mod tests { // ======================================================================== // Test 5: Extreme coordinate error handling // ======================================================================== - println!(" Testing extreme coordinate error handling..."); + test_debug!(" Testing extreme coordinate error handling..."); // Test with very large coordinates (may cause numeric issues) let large_vertices = vec![ @@ -5103,25 +5119,27 @@ mod tests { visibility_result.is_ok(), "Visibility test should handle large coordinates" ); - println!(" ✓ Large coordinates handled successfully"); + test_debug!(" ✓ Large coordinates handled successfully"); } Err(e) => { - println!(" Large coordinate hull construction failed (acceptable): {e}"); + test_debug!( + " Large coordinate hull construction failed (acceptable): {e}" + ); // Verify appropriate error types for numeric issues match e { ConvexHullConstructionError::CoordinateConversion(_) | ConvexHullConstructionError::NumericCastFailed { .. } | ConvexHullConstructionError::GeometricDegeneracy { .. } => { - println!(" ✓ Appropriate error type for numeric issues"); + test_debug!(" ✓ Appropriate error type for numeric issues"); } - _ => println!( + _ => test_debug!( " Note: Unexpected error type but may be acceptable: {e:?}" ), } } } } - Err(e) => println!( + Err(e) => test_debug!( " Large coordinate DelaunayTriangulation construction failed (acceptable): {e}" ), } @@ -5138,13 +5156,13 @@ mod tests { Ok(small_dt) => match ConvexHull::from_triangulation(small_dt.as_triangulation()) { Ok(small_hull) => { assert!(small_hull.validate(small_dt.as_triangulation()).is_ok()); - println!(" ✓ Small coordinates handled successfully"); + test_debug!(" ✓ Small coordinates handled successfully"); } Err(e) => { - println!(" Small coordinate hull construction failed (acceptable): {e}"); + test_debug!(" Small coordinate hull construction failed (acceptable): {e}"); } }, - Err(e) => println!( + Err(e) => test_debug!( " Small coordinate DelaunayTriangulation construction failed (acceptable): {e}" ), } @@ -5178,15 +5196,15 @@ mod tests { ); match fallback_result { Ok(is_visible) => { - println!(" Extreme point {i}: fallback visibility = {is_visible}"); + test_debug!(" Extreme point {i}: fallback visibility = {is_visible}"); } Err(e) => { - println!(" Extreme point {i}: fallback failed (acceptable): {e}"); + test_debug!(" Extreme point {i}: fallback failed (acceptable): {e}"); } } } - println!("✓ All comprehensive ConvexHull error handling tests passed successfully!"); + test_debug!("✓ All comprehensive ConvexHull error handling tests passed successfully!"); } // ============================================================================ @@ -5201,7 +5219,7 @@ mod tests { reason = "test keeps degenerate facet visibility scenarios together" )] fn test_fallback_visibility_with_degenerate_facets() { - println!("Testing fallback visibility algorithm with degenerate facet geometries"); + test_debug!("Testing fallback visibility algorithm with degenerate facet geometries"); // Create basic triangulation to get valid facet structure let vertices = vec![ @@ -5213,7 +5231,7 @@ mod tests { let dt = create_triangulation(&vertices); let hull = ConvexHull::from_triangulation(dt.as_triangulation()).unwrap(); - println!(" Testing fallback with points at various distances..."); + test_debug!(" Testing fallback with points at various distances..."); let facet_handle = hull.get_facet(0).unwrap(); let facet_view = FacetView::new( @@ -5253,37 +5271,37 @@ mod tests { match fallback_result { Ok(is_visible) => { - println!(" {description} ({category}): visible = {is_visible}"); + test_debug!(" {description} ({category}): visible = {is_visible}"); // Validate that the result makes geometric sense match category { "zero_distance" | "very_close" => { // Very close points might be visible or not due to precision - println!( + test_debug!( " Close point visibility: {is_visible} (precision-dependent)" ); } "very_far" => { // Very far points should typically be visible if !is_visible { - println!( + test_debug!( " Warning: Very far point unexpectedly not visible (may indicate threshold issues)" ); } } _ => { // Middle-range points - no strong expectations - println!(" Medium distance point visibility: {is_visible}"); + test_debug!(" Medium distance point visibility: {is_visible}"); } } } Err(e) => { - println!(" {description} ({category}): error = {e:?}"); + test_debug!(" {description} ({category}): error = {e:?}"); // Errors should only occur for coordinate conversion issues match e { ConvexHullConstructionError::CoordinateConversion(_) => { - println!(" ✓ Acceptable coordinate conversion error"); + test_debug!(" ✓ Acceptable coordinate conversion error"); } _ => { panic!("Unexpected error type for fallback visibility: {e:?}"); @@ -5293,7 +5311,7 @@ mod tests { } } - println!(" Testing fallback with collinear facet vertices (degenerate geometry)..."); + test_debug!(" Testing fallback with collinear facet vertices (degenerate geometry)..."); // Create a triangulation with near-collinear points let near_collinear_vertices = vec![ @@ -5307,7 +5325,7 @@ mod tests { Ok(collinear_dt) => { match ConvexHull::from_triangulation(collinear_dt.as_triangulation()) { Ok(collinear_hull) => { - println!(" ✓ Near-collinear triangulation created successfully"); + test_debug!(" ✓ Near-collinear triangulation created successfully"); let collinear_facet_vertices = extract_facet_vertices( collinear_hull.get_facet(0).unwrap(), @@ -5324,27 +5342,27 @@ mod tests { match collinear_result { Ok(is_visible) => { - println!(" Near-collinear facet visibility: {is_visible}"); + test_debug!(" Near-collinear facet visibility: {is_visible}"); } Err(e) => { - println!(" Near-collinear facet test failed: {e}"); + test_debug!(" Near-collinear facet test failed: {e}"); // This is acceptable for degenerate geometry } } } Err(e) => { - println!(" Near-collinear hull construction failed (expected): {e}"); + test_debug!(" Near-collinear hull construction failed (expected): {e}"); } } } Err(e) => { - println!( + test_debug!( " Near-collinear DelaunayTriangulation construction failed (expected): {e}" ); } } - println!(" Testing fallback with zero-area configurations..."); + test_debug!(" Testing fallback with zero-area configurations..."); // Create a triangulation where facets might have very small areas let tiny_area_vertices = vec![ @@ -5358,7 +5376,7 @@ mod tests { Ok(tiny_dt) => { match ConvexHull::from_triangulation(tiny_dt.as_triangulation()) { Ok(tiny_hull) => { - println!(" ✓ Tiny area triangulation created successfully"); + test_debug!(" ✓ Tiny area triangulation created successfully"); let tiny_facet_vertices = extract_facet_vertices( tiny_hull.get_facet(0).unwrap(), @@ -5375,27 +5393,27 @@ mod tests { match tiny_result { Ok(is_visible) => { - println!(" Tiny area facet visibility: {is_visible}"); + test_debug!(" Tiny area facet visibility: {is_visible}"); } Err(e) => { - println!(" Tiny area facet test failed: {e}"); + test_debug!(" Tiny area facet test failed: {e}"); // This might fail due to precision issues } } } Err(e) => { - println!(" Tiny area hull construction failed (acceptable): {e}"); + test_debug!(" Tiny area hull construction failed (acceptable): {e}"); } } } Err(e) => { - println!( + test_debug!( " Tiny area DelaunayTriangulation construction failed (acceptable): {e}" ); } } - println!(" ✓ Fallback visibility algorithm tested with degenerate facets"); + test_debug!(" ✓ Fallback visibility algorithm tested with degenerate facets"); } #[test] @@ -5404,7 +5422,7 @@ mod tests { reason = "test keeps threshold and heuristic visibility scenarios together" )] fn test_fallback_visibility_threshold_behavior() { - println!("Testing fallback visibility threshold and heuristic behavior"); + test_debug!("Testing fallback visibility threshold and heuristic behavior"); // Create a well-defined triangulation let vertices = vec![ @@ -5419,7 +5437,7 @@ mod tests { let test_facet_vertices = extract_facet_vertices(hull.get_facet(0).unwrap(), dt.as_triangulation()).unwrap(); - println!(" Testing threshold behavior with systematic point placement..."); + test_debug!(" Testing threshold behavior with systematic point placement..."); // Test points at increasing distances to understand threshold behavior let base_distance = 0.1; @@ -5439,18 +5457,20 @@ mod tests { match result { Ok(is_visible) => { visibility_results.push((multiplier, distance, is_visible)); - println!( + test_debug!( " Distance {distance:.6} (multiplier {multiplier}): visible = {is_visible}" ); } Err(e) => { - println!(" Distance {distance:.6} (multiplier {multiplier}): error = {e:?}"); + test_debug!( + " Distance {distance:.6} (multiplier {multiplier}): error = {e:?}" + ); visibility_results.push((multiplier, distance, false)); // Treat error as not visible } } } - println!(" Analyzing threshold behavior patterns..."); + test_debug!(" Analyzing threshold behavior patterns..."); // Look for patterns in visibility results let visible_count = visibility_results @@ -5459,11 +5479,11 @@ mod tests { .count(); let not_visible_count = visibility_results.len() - visible_count; - println!( + test_debug!( " Visible results: {visible_count}/{}", visibility_results.len() ); - println!( + test_debug!( " Not visible results: {not_visible_count}/{}", visibility_results.len() ); @@ -5474,7 +5494,7 @@ mod tests { for (multiplier, distance, visible) in &visibility_results { if !last_visible && *visible { - println!( + test_debug!( " ✓ Visibility transition found at distance {distance:.6} (multiplier {multiplier})" ); transition_found = true; @@ -5483,12 +5503,12 @@ mod tests { } if !transition_found && visible_count > 0 { - println!(" No clear transition, but some points are visible"); + test_debug!(" No clear transition, but some points are visible"); } else if visible_count == 0 { - println!(" Warning: No points were deemed visible (possible threshold issue)"); + test_debug!(" Warning: No points were deemed visible (possible threshold issue)"); } - println!(" Testing edge case geometries for threshold calculation..."); + test_debug!(" Testing edge case geometries for threshold calculation..."); // Test with facets that have different edge length distributions let edge_test_cases = vec![ @@ -5514,7 +5534,7 @@ mod tests { ]; for (description, vertices) in edge_test_cases { - println!(" Testing {description}..."); + test_debug!(" Testing {description}..."); match DelaunayTriangulation::new(&vertices) { Ok(edge_dt) => match ConvexHull::from_triangulation(edge_dt.as_triangulation()) { @@ -5534,24 +5554,26 @@ mod tests { match edge_result { Ok(is_visible) => { - println!(" {description} visibility: {is_visible}"); + test_debug!(" {description} visibility: {is_visible}"); } Err(e) => { - println!(" {description} test failed: {e}"); + test_debug!(" {description} test failed: {e}"); } } } Err(e) => { - println!(" {description} hull construction failed: {e}"); + test_debug!(" {description} hull construction failed: {e}"); } }, Err(e) => { - println!(" {description} DelaunayTriangulation construction failed: {e}"); + test_debug!( + " {description} DelaunayTriangulation construction failed: {e}" + ); } } } - println!(" ✓ Fallback visibility threshold behavior thoroughly tested"); + test_debug!(" ✓ Fallback visibility threshold behavior thoroughly tested"); } // ============================================================================ @@ -5566,9 +5588,9 @@ mod tests { reason = "test keeps collinear hull edge cases together" )] fn test_collinear_points_handling() { - println!("Testing convex hull construction with collinear point configurations"); + test_debug!("Testing convex hull construction with collinear point configurations"); - println!(" Testing perfectly collinear points in 2D..."); + test_debug!(" Testing perfectly collinear points in 2D..."); let collinear_2d_vertices = vec![ vertex!([0.0, 0.0]), @@ -5580,13 +5602,13 @@ mod tests { Ok(collinear_dt) => { match ConvexHull::from_triangulation(collinear_dt.as_triangulation()) { Ok(collinear_hull) => { - println!(" ✓ Collinear 2D hull constructed successfully"); + test_debug!(" ✓ Collinear 2D hull constructed successfully"); assert!( collinear_hull .validate(collinear_dt.as_triangulation()) .is_ok() ); - println!(" Facet count: {}", collinear_hull.number_of_facets()); + test_debug!(" Facet count: {}", collinear_hull.number_of_facets()); // Test operations on collinear hull let test_point = Point::new([0.5, 1.0]); @@ -5594,40 +5616,40 @@ mod tests { .is_point_outside(&test_point, collinear_dt.as_triangulation()); match visibility_result { Ok(is_outside) => { - println!(" Point outside test: {is_outside}"); + test_debug!(" Point outside test: {is_outside}"); } Err(e) => { - println!( + test_debug!( " Point outside test failed (acceptable for degenerate case): {e}" ); } } } Err(e) => { - println!(" Collinear 2D hull construction failed: {e}"); + test_debug!(" Collinear 2D hull construction failed: {e}"); match e { ConvexHullConstructionError::GeometricDegeneracy { .. } | ConvexHullConstructionError::InvalidTriangulation { .. } | ConvexHullConstructionError::BoundaryFacetExtractionFailed { .. } => { - println!(" ✓ Appropriate error type for collinear points"); + test_debug!(" ✓ Appropriate error type for collinear points"); } _ => { - println!(" Unexpected error type: {e:?}"); + test_debug!(" Unexpected error type: {e:?}"); } } } } } Err(e) => { - println!( + test_debug!( " Collinear 2D DelaunayTriangulation construction failed (expected): {e}" ); } } - println!(" Testing nearly collinear points with small perturbations..."); + test_debug!(" Testing nearly collinear points with small perturbations..."); let nearly_collinear_vertices = vec![ vertex!([0.0, 0.0]), @@ -5639,7 +5661,7 @@ mod tests { Ok(nearly_dt) => { match ConvexHull::from_triangulation(nearly_dt.as_triangulation()) { Ok(nearly_hull) => { - println!(" ✓ Nearly collinear 2D hull constructed successfully"); + test_debug!(" ✓ Nearly collinear 2D hull constructed successfully"); assert!(nearly_hull.validate(nearly_dt.as_triangulation()).is_ok()); // Test that operations handle numerical precision gracefully @@ -5648,26 +5670,26 @@ mod tests { .is_point_outside(&precision_test_point, nearly_dt.as_triangulation()); match precision_result { Ok(is_outside) => { - println!( + test_debug!( " Precision test successful: point outside = {is_outside}" ); } Err(e) => { - println!(" Precision test failed (may be acceptable): {e}"); + test_debug!(" Precision test failed (may be acceptable): {e}"); } } } Err(e) => { - println!(" Nearly collinear hull construction failed: {e}"); + test_debug!(" Nearly collinear hull construction failed: {e}"); } } } Err(e) => { - println!(" Nearly collinear DelaunayTriangulation construction failed: {e}"); + test_debug!(" Nearly collinear DelaunayTriangulation construction failed: {e}"); } } - println!(" Testing collinear points in 3D (degenerate configuration)..."); + test_debug!(" Testing collinear points in 3D (degenerate configuration)..."); let collinear_3d_vertices = vec![ vertex!([0.0, 0.0, 0.0]), @@ -5678,18 +5700,18 @@ mod tests { match DelaunayTriangulation::new(&collinear_3d_vertices) { Ok(_) => { - println!( + test_debug!( " Warning: 3D collinear DelaunayTriangulation constructed (unexpected but handled)" ); } Err(e) => { - println!( + test_debug!( " 3D collinear DelaunayTriangulation construction failed (expected): {e}" ); } } - println!(" ✓ Collinear point configurations tested"); + test_debug!(" ✓ Collinear point configurations tested"); } #[test] @@ -5698,9 +5720,9 @@ mod tests { reason = "test keeps coplanar higher-dimensional cases together" )] fn test_coplanar_points_in_higher_dimensions() { - println!("Testing convex hull construction with coplanar point configurations"); + test_debug!("Testing convex hull construction with coplanar point configurations"); - println!(" Testing coplanar points in 3D..."); + test_debug!(" Testing coplanar points in 3D..."); // Four points in the same plane (z=0) let coplanar_3d_vertices = vec![ @@ -5712,18 +5734,18 @@ mod tests { match DelaunayTriangulation::new(&coplanar_3d_vertices) { Ok(_) => { - println!( + test_debug!( " Warning: Coplanar 3D DelaunayTriangulation constructed (may indicate insufficient degeneracy detection)" ); } Err(e) => { - println!( + test_debug!( " Coplanar 3D DelaunayTriangulation construction failed (expected): {e}" ); } } - println!(" Testing nearly coplanar points with small z-perturbations..."); + test_debug!(" Testing nearly coplanar points with small z-perturbations..."); let nearly_coplanar_vertices = vec![ vertex!([0.0, 0.0, 0.0]), @@ -5736,16 +5758,16 @@ mod tests { Ok(nearly_coplanar_dt) => { match ConvexHull::from_triangulation(nearly_coplanar_dt.as_triangulation()) { Ok(nearly_coplanar_hull) => { - println!(" ✓ Nearly coplanar 3D hull constructed successfully"); + test_debug!(" ✓ Nearly coplanar 3D hull constructed successfully"); let validation_result = nearly_coplanar_hull.validate(nearly_coplanar_dt.as_triangulation()); match validation_result { Ok(()) => { - println!(" Hull validation successful"); + test_debug!(" Hull validation successful"); } Err(e) => { - println!( + test_debug!( " Hull validation failed (may be due to degeneracy): {e}" ); } @@ -5757,12 +5779,12 @@ mod tests { .is_point_outside(&test_point, nearly_coplanar_dt.as_triangulation()); match visibility_result { Ok(is_outside) => { - println!( + test_debug!( " Nearly coplanar visibility test: point outside = {is_outside}" ); } Err(e) => { - println!(" Nearly coplanar visibility test failed: {e}"); + test_debug!(" Nearly coplanar visibility test failed: {e}"); match e { ConvexHullConstructionError::VisibilityCheckFailed { .. @@ -5771,39 +5793,39 @@ mod tests { | ConvexHullConstructionError::FacetCacheBuildFailed { .. } => { - println!( + test_debug!( " ✓ Appropriate error for nearly degenerate geometry" ); } _ => { - println!(" Unexpected error type: {e:?}"); + test_debug!(" Unexpected error type: {e:?}"); } } } } } Err(e) => { - println!(" Nearly coplanar hull construction failed: {e}"); + test_debug!(" Nearly coplanar hull construction failed: {e}"); match e { ConvexHullConstructionError::GeometricDegeneracy { .. } | ConvexHullConstructionError::BoundaryFacetExtractionFailed { .. } => { - println!(" ✓ Appropriate error for nearly coplanar points"); + test_debug!(" ✓ Appropriate error for nearly coplanar points"); } _ => { - println!(" Unexpected error type: {e:?}"); + test_debug!(" Unexpected error type: {e:?}"); } } } } } Err(e) => { - println!(" Nearly coplanar DelaunayTriangulation construction failed: {e}"); + test_debug!(" Nearly coplanar DelaunayTriangulation construction failed: {e}"); } } - println!(" Testing coplanar points in 4D..."); + test_debug!(" Testing coplanar points in 4D..."); // Five points in the same 3D hyperplane (w=0) let coplanar_4d_vertices = vec![ @@ -5816,25 +5838,25 @@ mod tests { match DelaunayTriangulation::new(&coplanar_4d_vertices) { Ok(_) => { - println!( + test_debug!( " Warning: Coplanar 4D DelaunayTriangulation constructed (may indicate insufficient degeneracy detection)" ); } Err(e) => { - println!( + test_debug!( " Coplanar 4D DelaunayTriangulation construction failed (expected): {e}" ); } } - println!(" ✓ Coplanar point configurations tested"); + test_debug!(" ✓ Coplanar point configurations tested"); } #[test] fn test_duplicate_and_coincident_vertices() { - println!("Testing convex hull construction with duplicate and coincident vertices"); + test_debug!("Testing convex hull construction with duplicate and coincident vertices"); - println!(" Testing exact duplicate vertices..."); + test_debug!(" Testing exact duplicate vertices..."); let duplicate_vertices = vec![ vertex!([0.0, 0.0, 0.0]), @@ -5846,43 +5868,43 @@ mod tests { match DelaunayTriangulation::new(&duplicate_vertices) { Ok(dup_dt) => { - println!(" Duplicate vertices DelaunayTriangulation constructed"); + test_debug!(" Duplicate vertices DelaunayTriangulation constructed"); match ConvexHull::from_triangulation(dup_dt.as_triangulation()) { Ok(dup_hull) => { - println!(" ✓ Hull with duplicate vertices constructed"); + test_debug!(" ✓ Hull with duplicate vertices constructed"); // Test validation - should catch duplicate vertices in facets let validation_result = dup_hull.validate(dup_dt.as_triangulation()); match validation_result { Ok(()) => { - println!( + test_debug!( " Hull validation passed (duplicates may have been handled)" ); } Err(ConvexHullValidationError::DuplicateVerticesInFacet { .. }) => { - println!( + test_debug!( " ✓ Validation correctly detected duplicate vertices in facets" ); } Err(e) => { - println!(" Hull validation failed with different error: {e}"); + test_debug!(" Hull validation failed with different error: {e}"); } } } Err(e) => { - println!(" Hull construction with duplicates failed: {e}"); + test_debug!(" Hull construction with duplicates failed: {e}"); // This might be expected depending on how the TDS handles duplicates } } } Err(e) => { - println!( + test_debug!( " Duplicate vertices DelaunayTriangulation construction failed (may be expected): {e}" ); } } - println!(" Testing nearly coincident vertices (within floating-point precision)..."); + test_debug!(" Testing nearly coincident vertices (within floating-point precision)..."); let nearly_coincident_vertices = vec![ vertex!([0.0, 0.0, 0.0]), @@ -5896,7 +5918,7 @@ mod tests { Ok(nearly_coin_dt) => { match ConvexHull::from_triangulation(nearly_coin_dt.as_triangulation()) { Ok(nearly_coin_hull) => { - println!(" ✓ Hull with nearly coincident vertices constructed"); + test_debug!(" ✓ Hull with nearly coincident vertices constructed"); // Test operations to see how they handle near-duplicates let test_point = Point::new([2.0, 2.0, 2.0]); @@ -5904,30 +5926,32 @@ mod tests { .is_point_outside(&test_point, nearly_coin_dt.as_triangulation()); match visibility_result { Ok(is_outside) => { - println!( + test_debug!( " Nearly coincident hull visibility test: {is_outside}" ); } Err(e) => { - println!(" Nearly coincident hull visibility test failed: {e}"); + test_debug!( + " Nearly coincident hull visibility test failed: {e}" + ); } } } Err(e) => { - println!( + test_debug!( " Hull construction with nearly coincident vertices failed: {e}" ); } } } Err(e) => { - println!( + test_debug!( " Nearly coincident vertices DelaunayTriangulation construction failed: {e}" ); } } - println!(" ✓ Duplicate and coincident vertex configurations tested"); + test_debug!(" ✓ Duplicate and coincident vertex configurations tested"); } // ============================================================================ @@ -5942,9 +5966,9 @@ mod tests { reason = "test keeps high-dimensional hull cases together" )] fn test_high_dimensional_convex_hulls() { - println!("Testing convex hull construction in high dimensions (6D, 7D, 8D)"); + test_debug!("Testing convex hull construction in high dimensions (6D, 7D, 8D)"); - println!(" Testing 6D convex hull..."); + test_debug!(" Testing 6D convex hull..."); // Create a 6D simplex (7 vertices) let vertices_6d = vec![ @@ -5961,18 +5985,18 @@ mod tests { Ok(dt_6d) => { match ConvexHull::from_triangulation(dt_6d.as_triangulation()) { Ok(hull_6d) => { - println!(" ✓ 6D hull constructed successfully"); - println!(" 6D hull facet count: {}", hull_6d.number_of_facets()); + test_debug!(" ✓ 6D hull constructed successfully"); + test_debug!(" 6D hull facet count: {}", hull_6d.number_of_facets()); assert_eq!(hull_6d.dimension(), 6); // Test validation in 6D let validation_result = hull_6d.validate(dt_6d.as_triangulation()); match validation_result { Ok(()) => { - println!(" 6D hull validation successful"); + test_debug!(" 6D hull validation successful"); } Err(e) => { - println!(" 6D hull validation failed: {e}"); + test_debug!(" 6D hull validation failed: {e}"); } } @@ -5982,26 +6006,26 @@ mod tests { hull_6d.is_point_outside(&test_point_6d, dt_6d.as_triangulation()); match visibility_result { Ok(is_outside) => { - println!(" 6D point outside test: {is_outside}"); + test_debug!(" 6D point outside test: {is_outside}"); } Err(e) => { - println!(" 6D point outside test failed: {e}"); + test_debug!(" 6D point outside test failed: {e}"); // High dimensional operations might fail due to complexity } } } Err(e) => { - println!(" 6D hull construction failed: {e}"); + test_debug!(" 6D hull construction failed: {e}"); // This might be expected for high-dimensional cases } } } Err(e) => { - println!(" 6D DelaunayTriangulation construction failed: {e}"); + test_debug!(" 6D DelaunayTriangulation construction failed: {e}"); } } - println!(" Testing 7D convex hull..."); + test_debug!(" Testing 7D convex hull..."); let vertices_7d = vec![ vertex!([1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), @@ -6017,21 +6041,21 @@ mod tests { match DelaunayTriangulation::new(&vertices_7d) { Ok(dt_7d) => match ConvexHull::from_triangulation(dt_7d.as_triangulation()) { Ok(hull_7d) => { - println!(" ✓ 7D hull constructed successfully"); - println!(" 7D hull facet count: {}", hull_7d.number_of_facets()); + test_debug!(" ✓ 7D hull constructed successfully"); + test_debug!(" 7D hull facet count: {}", hull_7d.number_of_facets()); assert_eq!(hull_7d.dimension(), 7); assert!(!hull_7d.is_empty()); } Err(e) => { - println!(" 7D hull construction failed: {e}"); + test_debug!(" 7D hull construction failed: {e}"); } }, Err(e) => { - println!(" 7D DelaunayTriangulation construction failed: {e}"); + test_debug!(" 7D DelaunayTriangulation construction failed: {e}"); } } - println!(" Testing 8D convex hull (stress test)..."); + test_debug!(" Testing 8D convex hull (stress test)..."); let vertices_8d = vec![ vertex!([1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), @@ -6049,27 +6073,27 @@ mod tests { Ok(dt_8d) => { match ConvexHull::from_triangulation(dt_8d.as_triangulation()) { Ok(hull_8d) => { - println!(" ✓ 8D hull constructed successfully (impressive!)"); - println!(" 8D hull facet count: {}", hull_8d.number_of_facets()); + test_debug!(" ✓ 8D hull constructed successfully (impressive!)"); + test_debug!(" 8D hull facet count: {}", hull_8d.number_of_facets()); assert_eq!(hull_8d.dimension(), 8); // Stress test operations on 8D hull let clear_ops_start = std::time::Instant::now(); assert!(!hull_8d.is_empty()); let basic_ops_duration = clear_ops_start.elapsed(); - println!(" 8D basic operations took: {basic_ops_duration:?}"); + test_debug!(" 8D basic operations took: {basic_ops_duration:?}"); } Err(e) => { - println!(" 8D hull construction failed (acceptable): {e}"); + test_debug!(" 8D hull construction failed (acceptable): {e}"); } } } Err(e) => { - println!(" 8D DelaunayTriangulation construction failed (acceptable): {e}"); + test_debug!(" 8D DelaunayTriangulation construction failed (acceptable): {e}"); } } - println!(" ✓ High-dimensional convex hull tests completed"); + test_debug!(" ✓ High-dimensional convex hull tests completed"); } #[test] @@ -6078,9 +6102,9 @@ mod tests { reason = "performance test covers 3D and 2D cases" )] fn test_large_dataset_performance() { - println!("Testing convex hull performance with larger datasets"); + test_debug!("Testing convex hull performance with larger datasets"); - println!(" Testing 3D hull with many vertices..."); + test_debug!(" Testing 3D hull with many vertices..."); // Generate a larger set of 3D points *near* (but not on) a sphere. // Varying the radius per point breaks exact cosphericity so the f64 // fast filter resolves most insphere queries without exact Bareiss. @@ -6099,7 +6123,7 @@ mod tests { large_vertices.push(vertex!([x, y, z])); } - println!(" Generated {num_vertices} vertices"); + test_debug!(" Generated {num_vertices} vertices"); let start_time = std::time::Instant::now(); @@ -6111,15 +6135,17 @@ mod tests { ) { Ok(large_dt) => { let dt_construction_time = start_time.elapsed(); - println!(" DelaunayTriangulation construction took: {dt_construction_time:?}"); + test_debug!( + " DelaunayTriangulation construction took: {dt_construction_time:?}" + ); let hull_start = std::time::Instant::now(); match ConvexHull::from_triangulation(large_dt.as_triangulation()) { Ok(large_hull) => { let hull_construction_time = hull_start.elapsed(); - println!(" ✓ Large 3D hull constructed successfully"); - println!(" Hull construction took: {hull_construction_time:?}"); - println!( + test_debug!(" ✓ Large 3D hull constructed successfully"); + test_debug!(" Hull construction took: {hull_construction_time:?}"); + test_debug!( " Large hull facet count: {}", large_hull.number_of_facets() ); @@ -6145,29 +6171,29 @@ mod tests { ); let ops_duration = ops_start.elapsed(); - println!(" Operations on large hull took: {ops_duration:?}"); + test_debug!(" Operations on large hull took: {ops_duration:?}"); // Performance expectations (loose bounds) if hull_construction_time.as_millis() > 1000 { - println!(" Warning: Hull construction took longer than expected"); + test_debug!(" Warning: Hull construction took longer than expected"); } if ops_duration.as_millis() > 100 { - println!(" Warning: Operations took longer than expected"); + test_debug!(" Warning: Operations took longer than expected"); } } Err(e) => { - println!(" Large hull construction failed: {e}"); + test_debug!(" Large hull construction failed: {e}"); // This might be acceptable for very large datasets } } } Err(e) => { - println!(" Large DelaunayTriangulation construction failed: {e}"); + test_debug!(" Large DelaunayTriangulation construction failed: {e}"); } } - println!(" Testing 2D hull with many vertices..."); + test_debug!(" Testing 2D hull with many vertices..."); // Generate points near (but not exactly on) a 2D circle. let num_2d_vertices = 40; @@ -6194,8 +6220,8 @@ mod tests { { Ok(large_2d_hull) => { let construction_2d_time = start_2d.elapsed(); - println!(" ✓ Large 2D hull constructed in {construction_2d_time:?}"); - println!( + test_debug!(" ✓ Large 2D hull constructed in {construction_2d_time:?}"); + test_debug!( " 2D hull facet count: {} (should be ~{})", large_2d_hull.number_of_facets(), num_2d_vertices @@ -6208,15 +6234,15 @@ mod tests { ); } Err(e) => { - println!(" Large 2D hull construction failed: {e}"); + test_debug!(" Large 2D hull construction failed: {e}"); } }, Err(e) => { - println!(" Large 2D DelaunayTriangulation construction failed: {e}"); + test_debug!(" Large 2D DelaunayTriangulation construction failed: {e}"); } } - println!(" ✓ Large dataset performance tests completed"); + test_debug!(" ✓ Large dataset performance tests completed"); } // ============================================================================ @@ -6227,7 +6253,7 @@ mod tests { #[test] fn test_generation_counter_edge_cases() { - println!("Testing generation counter edge cases and overflow scenarios"); + test_debug!("Testing generation counter edge cases and overflow scenarios"); // Create a basic triangulation for testing let vertices = vec![ @@ -6239,26 +6265,26 @@ mod tests { let mut dt = create_triangulation(&vertices); let hull = ConvexHull::from_triangulation(dt.as_triangulation()).unwrap(); - println!(" Testing rapid generation changes..."); + test_debug!(" Testing rapid generation changes..."); // Record initial generation let initial_generation = dt.as_triangulation().tds.generation(); let initial_hull_generation = hull.cached_generation().load(Ordering::Acquire); - println!(" Initial TDS generation: {initial_generation}"); - println!(" Initial hull generation: {initial_hull_generation}"); + test_debug!(" Initial TDS generation: {initial_generation}"); + test_debug!(" Initial hull generation: {initial_hull_generation}"); // Rapidly modify the TDS to increment generation many times for i in 0..20 { let new_vertex = vertex!([>::from(i).mul_add(0.01, 0.1), 0.1, 0.1]); if dt.insert(new_vertex).is_ok() { let current_gen = dt.as_triangulation().tds.generation(); - println!(" After modification {i}: TDS generation = {current_gen}"); + test_debug!(" After modification {i}: TDS generation = {current_gen}"); // Test that hull detects staleness let hull_gen = hull.cached_generation().load(Ordering::Acquire); if hull_gen > 0 && hull_gen < current_gen { - println!( + test_debug!( " ✓ Hull generation ({hull_gen}) correctly behind TDS ({current_gen})" ); } @@ -6267,10 +6293,10 @@ mod tests { } } - println!(" Testing cache rebuild with high generation values..."); + test_debug!(" Testing cache rebuild with high generation values..."); let final_generation = dt.as_triangulation().tds.generation(); - println!(" Final TDS generation: {final_generation}"); + test_debug!(" Final TDS generation: {final_generation}"); // Force cache rebuild let test_point = Point::new([2.0, 2.0, 2.0]); @@ -6278,21 +6304,21 @@ mod tests { match visibility_result { Ok(is_outside) => { let updated_hull_gen = hull.cached_generation().load(Ordering::Acquire); - println!(" After cache rebuild: hull generation = {updated_hull_gen}"); + test_debug!(" After cache rebuild: hull generation = {updated_hull_gen}"); assert_eq!( updated_hull_gen, final_generation, "Hull generation should match TDS after rebuild" ); - println!( + test_debug!( " ✓ Cache rebuild with high generation successful: point outside = {is_outside}" ); } Err(e) => { - println!(" Cache rebuild failed: {e}"); + test_debug!(" Cache rebuild failed: {e}"); } } - println!(" Testing manual invalidation with high generation values..."); + test_debug!(" Testing manual invalidation with high generation values..."); // Test manual invalidation hull.invalidate_cache(); @@ -6301,7 +6327,7 @@ mod tests { invalidated_gen, 0, "Generation should be reset to 0 after manual invalidation" ); - println!(" ✓ Manual invalidation correctly reset generation to 0"); + test_debug!(" ✓ Manual invalidation correctly reset generation to 0"); // Test rebuild after manual invalidation let rebuild_result = hull.is_point_outside(&test_point, dt.as_triangulation()); @@ -6312,19 +6338,19 @@ mod tests { rebuilt_gen, final_generation, "Generation should match TDS after rebuild from manual invalidation" ); - println!(" ✓ Rebuild after manual invalidation successful"); + test_debug!(" ✓ Rebuild after manual invalidation successful"); } Err(e) => { - println!(" Rebuild after manual invalidation failed: {e}"); + test_debug!(" Rebuild after manual invalidation failed: {e}"); } } - println!(" ✓ Generation counter edge cases tested"); + test_debug!(" ✓ Generation counter edge cases tested"); } #[test] fn test_cache_consistency_under_stress() { - println!("Testing cache consistency under rapid operations"); + test_debug!("Testing cache consistency under rapid operations"); let vertices = vec![ vertex!([0.0, 0.0, 0.0]), @@ -6335,7 +6361,7 @@ mod tests { let dt = create_triangulation(&vertices); let hull = ConvexHull::from_triangulation(dt.as_triangulation()).unwrap(); - println!(" Testing rapid cache invalidation and rebuild cycles..."); + test_debug!(" Testing rapid cache invalidation and rebuild cycles..."); let num_cycles = 50; let test_points = [ @@ -6383,7 +6409,7 @@ mod tests { ); } - println!(" Cycle {cycle}, Point {i}: outside = {is_outside}"); + test_debug!(" Cycle {cycle}, Point {i}: outside = {is_outside}"); } Err(e) => { panic!("Visibility test failed in cycle {cycle}, point {i}: {e:?}"); @@ -6403,13 +6429,13 @@ mod tests { ); if (cycle + 1) % 10 == 0 { - println!(" Completed {cycle} invalidation/rebuild cycles"); + test_debug!(" Completed {cycle} invalidation/rebuild cycles"); } } - println!(" ✓ Completed {num_cycles} invalidation/rebuild cycles successfully"); + test_debug!(" ✓ Completed {num_cycles} invalidation/rebuild cycles successfully"); - println!(" Testing cache reuse efficiency..."); + test_debug!(" Testing cache reuse efficiency..."); // Test that cache is reused when generation hasn't changed let cache_before = hull.facet_cache().load(); @@ -6436,14 +6462,14 @@ mod tests { if let (Some(before_arc), Some(after_arc)) = (&*cache_before, &*cache_after) { // Compare Arc pointer equality for reuse detection let cache_reused = Arc::ptr_eq(before_arc, after_arc); - println!(" Cache reused: {cache_reused}"); + test_debug!(" Cache reused: {cache_reused}"); // For this test, we expect cache to be reused since generation didn't change assert!( cache_reused, "Cache Arc should be reused when generation unchanged" ); - println!(" ✓ Cache efficiently reused across multiple operations"); + test_debug!(" ✓ Cache efficiently reused across multiple operations"); } else { panic!("Cache should exist both before and after operations"); } @@ -6453,14 +6479,14 @@ mod tests { "Generation should remain unchanged" ); - println!(" ✓ Cache consistency under stress tested"); + test_debug!(" ✓ Cache consistency under stress tested"); } #[test] fn test_memory_usage_and_cleanup() { - println!("Testing memory usage patterns and cleanup behavior"); + test_debug!("Testing memory usage patterns and cleanup behavior"); - println!(" Testing cache memory cleanup..."); + test_debug!(" Testing cache memory cleanup..."); let vertices = vec![ vertex!([0.0, 0.0, 0.0]), @@ -6498,9 +6524,9 @@ mod tests { "Cache should be rebuilt after invalidation" ); - println!(" ✓ Cache memory cleanup behavior correct"); + test_debug!(" ✓ Cache memory cleanup behavior correct"); - println!(" Testing multiple hull instances with shared TDS..."); + test_debug!(" Testing multiple hull instances with shared TDS..."); // Create multiple hull instances from the same triangulation let hull1: ConvexHull, (), (), 3> = @@ -6518,8 +6544,8 @@ mod tests { if let (Some(cache1_arc), Some(cache2_arc)) = (&*cache1, &*cache2) { // Caches should be independent (different Arc instances) // but might have the same content - println!(" Hull1 cache size: {}", cache1_arc.len()); - println!(" Hull2 cache size: {}", cache2_arc.len()); + test_debug!(" Hull1 cache size: {}", cache1_arc.len()); + test_debug!(" Hull2 cache size: {}", cache2_arc.len()); // They should have the same content but be independent instances assert_eq!( @@ -6527,7 +6553,7 @@ mod tests { cache2_arc.len(), "Cache sizes should be equal" ); - println!(" ✓ Multiple hull instances have independent but equivalent caches"); + test_debug!(" ✓ Multiple hull instances have independent but equivalent caches"); } else { panic!("Both hulls should have caches after operations"); } @@ -6547,9 +6573,9 @@ mod tests { "Hull2 cache should still exist" ); - println!(" ✓ Independent cache invalidation working correctly"); + test_debug!(" ✓ Independent cache invalidation working correctly"); - println!(" ✓ Memory usage and cleanup patterns tested"); + test_debug!(" ✓ Memory usage and cleanup patterns tested"); } /// Regression test for the bug where stale hulls could silently revalidate after `invalidate_cache()`. @@ -6563,7 +6589,7 @@ mod tests { /// Expected behavior: Hull remains invalid for the modified TDS even after cache invalidation. #[test] fn test_stale_hull_detection_after_invalidate_cache() { - println!("Testing stale hull detection after invalidate_cache (regression test)"); + test_debug!("Testing stale hull detection after invalidate_cache (regression test)"); // Step 1: Create hull from initial DelaunayTriangulation let mut dt = DelaunayTriangulation::new(&[ @@ -6581,7 +6607,7 @@ mod tests { hull.is_valid_for_triangulation(dt.as_triangulation()), "Hull should be valid for initial triangulation" ); - println!(" ✓ Hull created with generation {initial_gen}"); + test_debug!(" ✓ Hull created with generation {initial_gen}"); // Step 2: Mutate triangulation (increases generation) dt.insert(vertex!([0.5, 0.5, 0.5])).unwrap(); @@ -6590,18 +6616,18 @@ mod tests { initial_gen, new_gen, "Triangulation generation should increase after mutation" ); - println!(" ✓ Triangulation mutated, generation increased to {new_gen}"); + test_debug!(" ✓ Triangulation mutated, generation increased to {new_gen}"); // Verify hull is now invalid (stale) assert!( !hull.is_valid_for_triangulation(dt.as_triangulation()), "Hull should be invalid after triangulation modification" ); - println!(" ✓ Hull correctly detected as stale"); + test_debug!(" ✓ Hull correctly detected as stale"); // Step 3: Call invalidate_cache() (the critical operation that triggered the bug) hull.invalidate_cache(); - println!(" ✓ Called invalidate_cache()"); + test_debug!(" ✓ Called invalidate_cache()"); // Step 4: Try to use the hull (this would trigger cache rebuild in the old buggy code) let test_point = Point::new([2.0, 2.0, 2.0]); @@ -6626,7 +6652,7 @@ mod tests { tds_generation, new_gen, "Error should report current TDS generation" ); - println!(" ✓ Visibility operation correctly failed with StaleHull error"); + test_debug!(" ✓ Visibility operation correctly failed with StaleHull error"); } else { panic!("Expected StaleHull error, got: {visibility_result:?}"); } @@ -6637,7 +6663,7 @@ mod tests { !hull.is_valid_for_triangulation(dt.as_triangulation()), "BUG DETECTED: Hull should STILL be invalid for modified triangulation after invalidate_cache()" ); - println!( + test_debug!( " ✓ CRITICAL: is_valid_for_triangulation() correctly returns false after cache invalidation" ); @@ -6654,9 +6680,9 @@ mod tests { new_visibility_result.is_ok(), "Visibility operation should succeed on new hull" ); - println!(" ✓ New hull works correctly with modified TDS"); + test_debug!(" ✓ New hull works correctly with modified TDS"); - println!( + test_debug!( " ✓ Regression test passed: stale hull detection works correctly after invalidate_cache()" ); } @@ -6667,7 +6693,7 @@ mod tests { #[test] fn test_stale_hull_all_operations_fail() { - println!("Testing that all hull operations fail on stale hull"); + test_debug!("Testing that all hull operations fail on stale hull"); let mut dt = DelaunayTriangulation::new(&[ vertex!([0.0, 0.0, 0.0]), @@ -6684,7 +6710,7 @@ mod tests { let test_point = Point::new([2.0, 2.0, 2.0]); // Test that all operations detect staleness - println!(" Testing find_visible_facets..."); + test_debug!(" Testing find_visible_facets..."); let visible_result = hull.find_visible_facets(&test_point, dt.as_triangulation()); assert!( matches!( @@ -6694,7 +6720,7 @@ mod tests { "find_visible_facets should fail with StaleHull error" ); - println!(" Testing find_nearest_visible_facet..."); + test_debug!(" Testing find_nearest_visible_facet..."); let nearest_result = hull.find_nearest_visible_facet(&test_point, dt.as_triangulation()); assert!( matches!( @@ -6704,7 +6730,7 @@ mod tests { "find_nearest_visible_facet should fail with StaleHull error" ); - println!(" Testing is_point_outside..."); + test_debug!(" Testing is_point_outside..."); let outside_result = hull.is_point_outside(&test_point, dt.as_triangulation()); assert!( matches!( @@ -6714,7 +6740,7 @@ mod tests { "is_point_outside should fail with StaleHull error" ); - println!(" Testing validate..."); + test_debug!(" Testing validate..."); let validate_result = hull.validate(dt.as_triangulation()); // validate() should fail with explicit StaleHull error assert!( @@ -6725,7 +6751,7 @@ mod tests { "validate should fail on stale hull with StaleHull error, got: {validate_result:?}" ); - println!(" ✓ All operations correctly detect stale hull"); + test_debug!(" ✓ All operations correctly detect stale hull"); } // ============================================================================ @@ -6734,7 +6760,7 @@ mod tests { #[test] fn test_fallback_visibility_degenerate_facets() { - println!("Testing fallback visibility check for degenerate facets"); + test_debug!("Testing fallback visibility check for degenerate facets"); // Create hull with nearly coplanar points (z << x,y) but still valid simplex let dt = DelaunayTriangulation::new(&[ @@ -6746,7 +6772,7 @@ mod tests { .unwrap(); let hull = ConvexHull::from_triangulation(dt.as_triangulation()).unwrap(); - println!(" Testing point visibility with degenerate geometry..."); + test_debug!(" Testing point visibility with degenerate geometry..."); let test_point = Point::new([0.5, 0.5, 1.0]); let result = hull.is_point_outside(&test_point, dt.as_triangulation()); @@ -6755,12 +6781,12 @@ mod tests { "Visibility test should handle degenerate geometry: {:?}", result.err() ); - println!(" ✓ Degenerate geometry handled gracefully"); + test_debug!(" ✓ Degenerate geometry handled gracefully"); } #[test] fn test_extreme_aspect_ratio_facets() { - println!("Testing convex hull with extreme aspect ratio facets"); + test_debug!("Testing convex hull with extreme aspect ratio facets"); // Create a very flat triangulation (extreme aspect ratio) let dt = DelaunayTriangulation::new(&[ @@ -6772,7 +6798,7 @@ mod tests { .unwrap(); let hull = ConvexHull::from_triangulation(dt.as_triangulation()).unwrap(); - println!(" Testing operations with extreme aspect ratio..."); + test_debug!(" Testing operations with extreme aspect ratio..."); let test_point = Point::new([500.0, 10.0, 10.0]); let result = hull.is_point_outside(&test_point, dt.as_triangulation()); @@ -6781,7 +6807,7 @@ mod tests { "Should handle extreme aspect ratios: {:?}", result.err() ); - println!(" ✓ Extreme aspect ratio handled correctly"); + test_debug!(" ✓ Extreme aspect ratio handled correctly"); } // ============================================================================ @@ -6790,7 +6816,7 @@ mod tests { #[test] fn test_point_exactly_on_hull_surface() { - println!("Testing points exactly on hull surface"); + test_debug!("Testing points exactly on hull surface"); let dt = create_triangulation(&[ vertex!([0.0, 0.0, 0.0]), @@ -6800,32 +6826,32 @@ mod tests { ]); let hull = ConvexHull::from_triangulation(dt.as_triangulation()).unwrap(); - println!(" Testing point on facet plane..."); + test_debug!(" Testing point on facet plane..."); // Point on the plane defined by (1,0,0), (0,1,0), (0,0,0) let on_facet = Point::new([0.5, 0.5, 0.0]); let result = hull.is_point_outside(&on_facet, dt.as_triangulation()); // Should handle boundary case without panic assert!(result.is_ok(), "Point on facet should be handled"); - println!(" ✓ Point on facet handled: inside={}", !result.unwrap()); + test_debug!(" ✓ Point on facet handled: inside={}", !result.unwrap()); - println!(" Testing point at vertex..."); + test_debug!(" Testing point at vertex..."); let at_vertex = Point::new([1.0, 0.0, 0.0]); let result = hull.is_point_outside(&at_vertex, dt.as_triangulation()); assert!(result.is_ok(), "Point at vertex should be handled"); // Point at vertex should be inside hull assert!(!result.unwrap(), "Point at vertex should be inside"); - println!(" ✓ Point at vertex correctly identified as inside"); + test_debug!(" ✓ Point at vertex correctly identified as inside"); - println!(" Testing point on edge..."); + test_debug!(" Testing point on edge..."); let on_edge = Point::new([0.5, 0.0, 0.0]); let result = hull.is_point_outside(&on_edge, dt.as_triangulation()); assert!(result.is_ok(), "Point on edge should be handled"); - println!(" ✓ Point on edge handled"); + test_debug!(" ✓ Point on edge handled"); } #[test] fn test_point_very_close_to_surface() { - println!("Testing points very close to hull surface (epsilon distance)"); + test_debug!("Testing points very close to hull surface (epsilon distance)"); let dt = create_triangulation(&[ vertex!([0.0, 0.0, 0.0]), @@ -6835,17 +6861,17 @@ mod tests { ]); let hull = ConvexHull::from_triangulation(dt.as_triangulation()).unwrap(); - println!(" Testing point just outside (epsilon away)..."); + test_debug!(" Testing point just outside (epsilon away)..."); let just_outside = Point::new([0.5, 0.5, f64::EPSILON * 10.0]); let result = hull.is_point_outside(&just_outside, dt.as_triangulation()); assert!(result.is_ok(), "Near-surface point should be handled"); - println!(" ✓ Epsilon-distance point handled"); + test_debug!(" ✓ Epsilon-distance point handled"); - println!(" Testing point just inside (negative epsilon)..."); + test_debug!(" Testing point just inside (negative epsilon)..."); let just_inside = Point::new([0.25, 0.25, -f64::EPSILON * 10.0]); let result = hull.is_point_outside(&just_inside, dt.as_triangulation()); assert!(result.is_ok(), "Near-surface point should be handled"); - println!(" ✓ Negative epsilon-distance point handled"); + test_debug!(" ✓ Negative epsilon-distance point handled"); } // ============================================================================ @@ -6867,7 +6893,7 @@ mod tests { }}; } - println!("Testing {}D convex hull construction and operations", $dim); + test_debug!("Testing {}D convex hull construction and operations", $dim); // Generate simplex vertices for dimension D let mut vertices_vec = Vec::new(); @@ -6880,7 +6906,7 @@ mod tests { vertices_vec.push(make_vertex!(coords)); } - println!( + test_debug!( " Creating {}D triangulation with {} vertices...", $dim, vertices_vec.len() @@ -6888,7 +6914,7 @@ mod tests { let dt = DelaunayTriangulation::<_, (), (), $dim>::new(&vertices_vec) .expect(&format!("Failed to create {}D DelaunayTriangulation", $dim)); - println!(" Constructing {}D convex hull...", $dim); + test_debug!(" Constructing {}D convex hull...", $dim); let hull = ConvexHull::from_triangulation(dt.as_triangulation()) .expect(&format!("Failed to create {}D hull", $dim)); @@ -6901,13 +6927,14 @@ mod tests { $dim, expected_facets ); - println!( + test_debug!( " ✓ {}D hull has correct facet count: {}", - $dim, expected_facets + $dim, + expected_facets ); // Test point outside - println!(" Testing point outside {}D hull...", $dim); + test_debug!(" Testing point outside {}D hull...", $dim); let outside_coords = [2.0; $dim]; let outside_point = Point::new(outside_coords); let is_outside = hull @@ -6918,10 +6945,10 @@ mod tests { "Point at {:?} should be outside {}D hull", outside_coords, $dim ); - println!(" ✓ Outside point correctly identified in {}D", $dim); + test_debug!(" ✓ Outside point correctly identified in {}D", $dim); // Test point inside (centroid) - println!(" Testing point inside {}D hull...", $dim); + test_debug!(" Testing point inside {}D hull...", $dim); let mut inside_coords = [0.0; $dim]; for i in 0..$dim { inside_coords[i] = 1.0 / >::from($dim + 1); @@ -6931,10 +6958,10 @@ mod tests { .is_point_outside(&inside_point, dt.as_triangulation()) .expect(&format!("Failed to test inside point in {}D", $dim)); assert!(!is_outside, "Centroid should be inside {}D hull", $dim); - println!(" ✓ Inside point correctly identified in {}D", $dim); + test_debug!(" ✓ Inside point correctly identified in {}D", $dim); // Test visibility - println!(" Testing visibility in {}D...", $dim); + test_debug!(" Testing visibility in {}D...", $dim); let visible_facets = hull .find_visible_facets(&outside_point, dt.as_triangulation()) .expect(&format!("Failed to find visible facets in {}D", $dim)); @@ -6943,19 +6970,19 @@ mod tests { "Outside point should see some facets in {}D", $dim ); - println!( + test_debug!( " ✓ Visibility test passed in {}D: {} visible facets", $dim, visible_facets.len() ); // Test validation - println!(" Validating {}D hull...", $dim); + test_debug!(" Validating {}D hull...", $dim); hull.validate(dt.as_triangulation()) .expect(&format!("{}D hull validation failed", $dim)); - println!(" ✓ {}D hull validation passed", $dim); + test_debug!(" ✓ {}D hull validation passed", $dim); - println!(" ✓ All {}D convex hull tests passed", $dim); + test_debug!(" ✓ All {}D convex hull tests passed", $dim); } }; } @@ -6972,7 +6999,7 @@ mod tests { #[test] fn test_cache_lifecycle_multiple_operations() { - println!("Testing cache lifecycle through multiple operations"); + test_debug!("Testing cache lifecycle through multiple operations"); let dt = create_triangulation(&[ vertex!([0.0, 0.0, 0.0]), @@ -6984,14 +7011,14 @@ mod tests { let test_point = Point::new([2.0, 2.0, 2.0]); - println!(" Initial state: cache already built during construction"); + test_debug!(" Initial state: cache already built during construction"); let initial_gen = hull.cached_generation().load(Ordering::Acquire); assert!( initial_gen > 0, "Generation should be set after construction" ); - println!(" First operation: should build cache"); + test_debug!(" First operation: should build cache"); hull.find_visible_facets(&test_point, dt.as_triangulation()) .unwrap(); assert!( @@ -7004,7 +7031,7 @@ mod tests { "Generation should be updated after build" ); - println!(" Second operation: should reuse cache"); + test_debug!(" Second operation: should reuse cache"); let gen_before_reuse = hull.cached_generation().load(Ordering::Acquire); hull.find_visible_facets(&test_point, dt.as_triangulation()) .unwrap(); @@ -7014,7 +7041,7 @@ mod tests { "Generation should not change when reusing cache" ); - println!(" Manual invalidation: should clear cache and reset generation"); + test_debug!(" Manual invalidation: should clear cache and reset generation"); hull.invalidate_cache(); assert!( hull.facet_cache().load().is_none(), @@ -7032,7 +7059,7 @@ mod tests { "Should have had cache before invalidation" ); - println!(" Operation after invalidation: should rebuild cache"); + test_debug!(" Operation after invalidation: should rebuild cache"); hull.find_visible_facets(&test_point, dt.as_triangulation()) .unwrap(); assert!( @@ -7045,12 +7072,12 @@ mod tests { "Generation should be updated after rebuild" ); - println!(" ✓ Cache lifecycle behaves correctly through all operations"); + test_debug!(" ✓ Cache lifecycle behaves correctly through all operations"); } #[test] fn test_cache_independence_multiple_hulls() { - println!("Testing cache independence between multiple hull instances"); + test_debug!("Testing cache independence between multiple hull instances"); let dt = create_triangulation(&[ vertex!([0.0, 0.0, 0.0]), @@ -7059,7 +7086,7 @@ mod tests { vertex!([0.0, 0.0, 1.0]), ]); - println!(" Creating first hull and building cache..."); + test_debug!(" Creating first hull and building cache..."); let hull1: ConvexHull, (), (), 3> = ConvexHull::from_triangulation(dt.as_triangulation()).unwrap(); let test_point = Point::new([2.0, 2.0, 2.0]); @@ -7067,18 +7094,18 @@ mod tests { .find_visible_facets(&test_point, dt.as_triangulation()) .unwrap(); - println!(" Creating second hull from same triangulation..."); + test_debug!(" Creating second hull from same triangulation..."); let hull2: ConvexHull, (), (), 3> = ConvexHull::from_triangulation(dt.as_triangulation()).unwrap(); - println!(" Second hull also has cache built during construction..."); + test_debug!(" Second hull also has cache built during construction..."); let hull2_gen_initial = hull2.cached_generation().load(Ordering::Acquire); assert!( hull2_gen_initial > 0, "Hull2 should have cache built during construction" ); - println!(" Building cache in second hull..."); + test_debug!(" Building cache in second hull..."); hull2 .find_visible_facets(&test_point, dt.as_triangulation()) .unwrap(); @@ -7095,7 +7122,7 @@ mod tests { "Hull2 should have cache" ); - println!(" Invalidating first hull cache..."); + test_debug!(" Invalidating first hull cache..."); hull1.invalidate_cache(); // Hull1 should be cleared @@ -7110,6 +7137,6 @@ mod tests { "Hull2 cache should be independent" ); - println!(" ✓ Cache independence between multiple hull instances verified"); + test_debug!(" ✓ Cache independence between multiple hull instances verified"); } } diff --git a/src/triangulation/delaunay.rs b/src/triangulation/delaunay.rs index e6297d87..853d583b 100644 --- a/src/triangulation/delaunay.rs +++ b/src/triangulation/delaunay.rs @@ -9604,7 +9604,7 @@ mod tests { } // ========================================================================= - // Coverage-oriented tests (tarpaulin) + // Coverage-oriented tests // ========================================================================= #[test] From fb99dbbc547738a5d0849004a0c2bde3e6fb25cf Mon Sep 17 00:00:00 2001 From: Adam Getchell Date: Wed, 29 Apr 2026 17:26:06 -0700 Subject: [PATCH 5/5] fix: harden Hilbert ordering errors and prelude checks (#338) - Return typed Hilbert errors for non-finite quantization inputs and failed u32 coordinate conversions instead of silently collapsing values. - Preserve item order when Hilbert sort key construction fails, and add regression coverage for the new error paths. - Add the focused ordering prelude and update doctests, examples, benchmarks, and integration tests to use orthogonal prelude imports. - Add a Semgrep rule and fixture coverage for examples and benchmarks that bypass focused preludes. - Verify pinned shfmt binaries in CI with explicit SHA256 values instead of downloading a missing upstream checksum file. --- .github/workflows/ci.yml | 22 +- benches/ci_performance_suite.rs | 5 +- benches/circumsphere_containment.rs | 2 +- benches/cold_path_predicates.rs | 2 +- benches/large_scale_performance.rs | 9 +- benches/profiling_suite.rs | 7 +- benches/topology_guarantee_construction.rs | 2 +- examples/delaunayize_repair.rs | 2 +- examples/topology_editing_2d_3d.rs | 9 +- examples/triangulation_3d_100_points.rs | 6 +- semgrep.yaml | 19 + src/core/util/hilbert.rs | 482 ++++++++++++------ src/core/vertex.rs | 25 + src/lib.rs | 26 + tests/prelude_exports.rs | 32 ++ tests/regressions.rs | 2 +- tests/semgrep/src/project_rules/rust_style.rs | 5 + 17 files changed, 468 insertions(+), 189 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3bd653b1..fd9fe043 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -25,6 +25,9 @@ env: JUST_VERSION: "1.50.0" MARKDOWNLINT_VERSION: "0.48.0" SHFMT_VERSION: "3.13.1" + SHFMT_SHA256_DARWIN_AMD64: "6feedafc72915794163114f512348e2437d080d0047ef8b8fa2ec63b575f12af" + SHFMT_SHA256_DARWIN_ARM64: "9680526be4a66ea1ffe988ed08af58e1400fe1e4f4aef5bd88b20bb9b3da33f8" + SHFMT_SHA256_LINUX_AMD64: "fb096c5d1ac6beabbdbaa2874d025badb03ee07929f0c9ff67563ce8c75398b1" TAPLO_VERSION: "0.10.0" TYPOS_VERSION: "1.45.2" UV_VERSION: "0.11.8" @@ -171,6 +174,7 @@ jobs: # Install shfmt (pinned for CI consistency) SHFMT_ASSET="shfmt_v${SHFMT_VERSION}_linux_amd64" SHFMT_BASE_URL="https://github.com/mvdan/sh/releases/download/v${SHFMT_VERSION}" + SHFMT_SHA256="${SHFMT_SHA256_LINUX_AMD64}" tmpdir="$(mktemp -d)" trap 'rm -rf "$tmpdir"' EXIT @@ -178,13 +182,10 @@ jobs: curl -fsSL \ "${SHFMT_BASE_URL}/${SHFMT_ASSET}" \ -o "$tmpdir/${SHFMT_ASSET}" - curl -fsSL \ - "${SHFMT_BASE_URL}/sha256sums.txt" \ - -o "$tmpdir/sha256sums.txt" ( cd "$tmpdir" - grep -F " ${SHFMT_ASSET}" sha256sums.txt > checksum.txt + printf '%s %s\n' "$SHFMT_SHA256" "$SHFMT_ASSET" > checksum.txt sha256sum -c checksum.txt ) @@ -204,6 +205,14 @@ jobs: SHFMT_ASSET="shfmt_v${SHFMT_VERSION}_darwin_${SHFMT_ARCH}" SHFMT_BASE_URL="https://github.com/mvdan/sh/releases/download/v${SHFMT_VERSION}" + case "$SHFMT_ARCH" in + amd64) SHFMT_SHA256="${SHFMT_SHA256_DARWIN_AMD64}" ;; + arm64) SHFMT_SHA256="${SHFMT_SHA256_DARWIN_ARM64}" ;; + *) + echo "Unsupported shfmt architecture: $SHFMT_ARCH" >&2 + exit 1 + ;; + esac tmpdir="$(mktemp -d)" trap 'rm -rf "$tmpdir"' EXIT @@ -211,13 +220,10 @@ jobs: curl -fsSL \ "${SHFMT_BASE_URL}/${SHFMT_ASSET}" \ -o "$tmpdir/${SHFMT_ASSET}" - curl -fsSL \ - "${SHFMT_BASE_URL}/sha256sums.txt" \ - -o "$tmpdir/sha256sums.txt" ( cd "$tmpdir" - grep -F " ${SHFMT_ASSET}" sha256sums.txt > checksum.txt + printf '%s %s\n' "$SHFMT_SHA256" "$SHFMT_ASSET" > checksum.txt shasum -a 256 -c checksum.txt ) diff --git a/benches/ci_performance_suite.rs b/benches/ci_performance_suite.rs index 0fb5ccfd..6e5ddd0d 100644 --- a/benches/ci_performance_suite.rs +++ b/benches/ci_performance_suite.rs @@ -31,9 +31,10 @@ use criterion::measurement::WallTime; use criterion::{ BatchSize, BenchmarkGroup, BenchmarkId, Criterion, Throughput, criterion_group, criterion_main, }; -use delaunay::geometry::util::simplex_volume; use delaunay::prelude::generators::generate_random_points_seeded; -use delaunay::prelude::geometry::{AdaptiveKernel, Coordinate, Point, RobustKernel}; +use delaunay::prelude::geometry::{ + AdaptiveKernel, Coordinate, Point, RobustKernel, simplex_volume, +}; use delaunay::prelude::query::ConvexHull; use delaunay::prelude::triangulation::flips::{ BistellarFlips, CellKey, EdgeKey, FacetHandle, RidgeHandle, TopologyGuarantee, TriangleHandle, diff --git a/benches/circumsphere_containment.rs b/benches/circumsphere_containment.rs index c1512474..9b35e58c 100644 --- a/benches/circumsphere_containment.rs +++ b/benches/circumsphere_containment.rs @@ -14,7 +14,7 @@ //! - Numerical consistency validation between all three algorithms use criterion::{Criterion, criterion_group, criterion_main}; -use delaunay::geometry::util::generate_random_points_seeded; +use delaunay::prelude::generators::generate_random_points_seeded; use delaunay::prelude::query::*; use std::hint::black_box; diff --git a/benches/cold_path_predicates.rs b/benches/cold_path_predicates.rs index ec6d5392..f52d0ee1 100644 --- a/benches/cold_path_predicates.rs +++ b/benches/cold_path_predicates.rs @@ -33,7 +33,7 @@ //! ``` use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; -use delaunay::geometry::util::generate_random_points_seeded; +use delaunay::prelude::generators::generate_random_points_seeded; use delaunay::prelude::query::*; use std::hint::black_box; diff --git a/benches/large_scale_performance.rs b/benches/large_scale_performance.rs index c6c74d88..fd643c83 100644 --- a/benches/large_scale_performance.rs +++ b/benches/large_scale_performance.rs @@ -77,10 +77,11 @@ //! **Query performance:** Directly measures `SlotMap` iteration efficiency use criterion::{BatchSize, Criterion, Throughput, criterion_group, criterion_main}; -use delaunay::core::vertex::Vertex; -use delaunay::geometry::kernel::AdaptiveKernel; -use delaunay::geometry::util::generate_random_points_seeded; -use delaunay::triangulation::delaunay::{ConstructionOptions, DelaunayTriangulation, RetryPolicy}; +use delaunay::prelude::generators::generate_random_points_seeded; +use delaunay::prelude::geometry::AdaptiveKernel; +use delaunay::prelude::triangulation::{ + ConstructionOptions, DelaunayTriangulation, RetryPolicy, Vertex, +}; use delaunay::vertex; use std::hint::black_box; use std::num::NonZeroUsize; diff --git a/benches/profiling_suite.rs b/benches/profiling_suite.rs index b0b34f52..5b691cb8 100644 --- a/benches/profiling_suite.rs +++ b/benches/profiling_suite.rs @@ -58,12 +58,11 @@ use criterion::measurement::WallTime; use criterion::{ BatchSize, BenchmarkGroup, BenchmarkId, Criterion, Throughput, criterion_group, criterion_main, }; -use delaunay::core::collections::SmallBuffer; -use delaunay::geometry::traits::coordinate::Coordinate; -use delaunay::geometry::util::{ +use delaunay::prelude::collections::SmallBuffer; +use delaunay::prelude::generators::{ generate_grid_points, generate_poisson_points, generate_random_points_seeded, - safe_usize_to_scalar, }; +use delaunay::prelude::geometry::{Coordinate, safe_usize_to_scalar}; use delaunay::prelude::query::*; use delaunay::prelude::triangulation::{ ConstructionOptions, DelaunayTriangulationBuilder, RetryPolicy, diff --git a/benches/topology_guarantee_construction.rs b/benches/topology_guarantee_construction.rs index 38153b31..fe925d80 100644 --- a/benches/topology_guarantee_construction.rs +++ b/benches/topology_guarantee_construction.rs @@ -13,7 +13,7 @@ //! ``` use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; -use delaunay::geometry::util::generate_random_points_seeded; +use delaunay::prelude::generators::generate_random_points_seeded; use delaunay::prelude::{ DelaunayRepairPolicy, DelaunayTriangulation, TopologyGuarantee, ValidationPolicy, }; diff --git a/examples/delaunayize_repair.rs b/examples/delaunayize_repair.rs index cd075d69..cc3894e6 100644 --- a/examples/delaunayize_repair.rs +++ b/examples/delaunayize_repair.rs @@ -22,8 +22,8 @@ use delaunay::prelude::triangulation::delaunayize::*; use delaunay::prelude::triangulation::flips::*; // For the generic print_outcome helper. -use delaunay::geometry::traits::coordinate::CoordinateScalar; use delaunay::prelude::DataType; +use delaunay::prelude::geometry::CoordinateScalar; fn main() { println!("============================================================"); diff --git a/examples/topology_editing_2d_3d.rs b/examples/topology_editing_2d_3d.rs index cc740fbe..8fb13255 100644 --- a/examples/topology_editing_2d_3d.rs +++ b/examples/topology_editing_2d_3d.rs @@ -18,12 +18,9 @@ //! cargo run --example topology_editing_2d_3d //! ``` -use delaunay::core::edge::EdgeKey; -use delaunay::geometry::Coordinate; -use delaunay::geometry::kernel::Kernel; -use delaunay::geometry::point::Point; -use delaunay::geometry::traits::coordinate::CoordinateScalar; -use delaunay::geometry::util::{circumcenter, hypot}; +use delaunay::prelude::geometry::{ + Coordinate, CoordinateScalar, Kernel, Point, circumcenter, hypot, +}; use delaunay::prelude::triangulation::flips::*; use delaunay::prelude::triangulation::*; diff --git a/examples/triangulation_3d_100_points.rs b/examples/triangulation_3d_100_points.rs index 9d172c8a..6d6fd4c2 100644 --- a/examples/triangulation_3d_100_points.rs +++ b/examples/triangulation_3d_100_points.rs @@ -25,10 +25,10 @@ //! - Boundary analysis //! - Performance metrics -use delaunay::geometry::traits::coordinate::CoordinateScalar; -use delaunay::geometry::util::generate_random_triangulation; +use delaunay::prelude::generators::generate_random_triangulation; +use delaunay::prelude::geometry::CoordinateScalar; use delaunay::prelude::query::*; -use delaunay::topology::characteristics::validation as topology_validation; +use delaunay::prelude::topology::validation as topology_validation; use num_traits::NumCast; use num_traits::cast::cast; use std::time::Instant; diff --git a/semgrep.yaml b/semgrep.yaml index beb876dc..26e2dca7 100644 --- a/semgrep.yaml +++ b/semgrep.yaml @@ -348,6 +348,25 @@ rules: - pattern-regex: '^\s*#\s*\[\s*expect\s*\([^\]\n]*\)\s*\]' - pattern-not-regex: '\breason\s*=' + - id: delaunay.rust.prefer-prelude-imports-in-examples-benches + languages: + - rust + severity: WARNING + message: "Use focused delaunay::prelude imports in examples and benchmarks instead of deep crate module paths." + metadata: + category: maintainability + rationale: >- + Public examples and benchmarks should model the small, orthogonal + prelude import surfaces that users can copy into their own code. + paths: + include: + - "/examples/**/*.rs" + - "/benches/**/*.rs" + - "/src/project_rules/**/*.rs" + patterns: + - pattern-regex: '^\s*use\s+delaunay::(core|geometry|triangulation|topology)::' + - pattern-not-regex: '^\s*use\s+delaunay::prelude::' + - id: delaunay.python.no-broad-exception languages: - python diff --git a/src/core/util/hilbert.rs b/src/core/util/hilbert.rs index e2c9fef9..7a439970 100644 --- a/src/core/util/hilbert.rs +++ b/src/core/util/hilbert.rs @@ -10,6 +10,7 @@ #![forbid(unsafe_code)] use crate::geometry::traits::coordinate::CoordinateScalar; +use num_traits::cast; /// Errors that can occur during Hilbert curve operations. #[derive(Clone, Debug, thiserror::Error, PartialEq, Eq)] @@ -52,13 +53,57 @@ pub enum HilbertError { /// The computed grid maximum, `2^bits - 1`. max_grid_value: u32, }, + + /// A Hilbert quantization bound was non-finite. + #[error( + "Hilbert quantization bounds must be finite: lower bound finite = {lower_bound_finite}, upper bound finite = {upper_bound_finite}" + )] + NonFiniteBounds { + /// Whether the lower quantization bound is finite. + lower_bound_finite: bool, + /// Whether the upper quantization bound is finite. + upper_bound_finite: bool, + }, + + /// Finite bounds produced a non-finite quantization extent. + #[error("Hilbert quantization bounds produced a non-finite extent")] + NonFiniteBoundsExtent {}, + + /// A coordinate to quantize was non-finite. + #[error("Hilbert coordinate at index {coordinate_index} must be finite")] + NonFiniteCoordinate { + /// The coordinate index whose value was non-finite. + coordinate_index: usize, + }, + + /// A finite coordinate and finite bounds produced a non-finite normalized value. + #[error( + "Hilbert coordinate at index {coordinate_index} produced a non-finite normalized value" + )] + NonFiniteNormalizedCoordinate { + /// The coordinate index whose normalized value was non-finite. + coordinate_index: usize, + }, + + /// A rounded, scaled coordinate could not be represented as a `u32`. + #[error( + "Hilbert quantized coordinate at index {coordinate_index} for {bits} bits and grid maximum {max_grid_value} cannot be represented as u32" + )] + QuantizedCoordinateConversionFailed { + /// The requested number of Hilbert bits per coordinate. + bits: u32, + /// The computed grid maximum, `2^bits - 1`. + max_grid_value: u32, + /// The coordinate index whose rounded scaled value could not be converted. + coordinate_index: usize, + }, } /// Converts a validated bit depth into a scalar grid maximum so Hilbert /// ordering cannot silently collapse when a coordinate type cannot represent it. -fn hilbert_quantization_scale(bits: u32) -> Result<(u32, T), HilbertError> { +fn quantization_scale(bits: u32) -> Result<(u32, T), HilbertError> { let max_grid_value = (1_u32 << bits) - 1; - let Some(max_grid_value_scalar): Option = num_traits::NumCast::from(max_grid_value) else { + let Some(max_grid_value_scalar): Option = cast(max_grid_value) else { return Err(HilbertError::QuantizationScaleConversionFailed { bits, max_grid_value, @@ -67,6 +112,35 @@ fn hilbert_quantization_scale(bits: u32) -> Result<(u32, T) Ok((max_grid_value, max_grid_value_scalar)) } +/// Keeps every Hilbert entry point on the same accepted bit-depth range. +const fn validate_bits(bits: u32) -> Result<(), HilbertError> { + if bits == 0 || bits > 31 { + return Err(HilbertError::InvalidBitsParameter { bits }); + } + Ok(()) +} + +/// Computes encoded index width once so overflow errors report consistent context. +fn total_bits(bits: u32) -> Result { + let d_u32 = u32::try_from(D).map_err(|_| HilbertError::DimensionTooLarge { dimension: D })?; + Ok(u128::from(d_u32) * u128::from(bits)) +} + +/// Centralizes index-width validation shared by indexing and ordering APIs. +fn validate_index_params(bits: u32) -> Result<(), HilbertError> { + validate_bits(bits)?; + + let total_bits = total_bits::(bits)?; + if total_bits > 128 { + return Err(HilbertError::IndexOverflow { + dimension: D, + bits, + total_bits, + }); + } + Ok(()) +} + /// Quantize D-dimensional coordinates into integer grid coordinates in `[0, 2^bits)`. /// /// The coordinates are normalized using a scalar `(min, max)` bound applied to every @@ -79,58 +153,80 @@ fn hilbert_quantization_scale(bits: u32) -> Result<(u32, T) /// Returns [`HilbertError::QuantizationScaleConversionFailed`] if the quantization /// grid maximum cannot be represented by the coordinate scalar type. /// +/// Returns [`HilbertError::NonFiniteBounds`], +/// [`HilbertError::NonFiniteBoundsExtent`], +/// [`HilbertError::NonFiniteCoordinate`], or +/// [`HilbertError::NonFiniteNormalizedCoordinate`] if quantization input or +/// normalization arithmetic is non-finite. +/// +/// Returns [`HilbertError::QuantizedCoordinateConversionFailed`] if a rounded +/// scaled coordinate cannot be represented as `u32`. +/// /// # Examples /// /// ```rust -/// use delaunay::core::util::hilbert::hilbert_quantize; +/// use delaunay::prelude::ordering::{hilbert_quantize, HilbertError}; /// /// let coords = [0.5_f64, 0.25]; /// let q = hilbert_quantize(&coords, (0.0, 1.0), 2)?; /// assert!(q[0] <= 3 && q[1] <= 3); -/// # Ok::<(), delaunay::core::util::hilbert::HilbertError>(()) +/// # Ok::<(), HilbertError>(()) /// ``` pub fn hilbert_quantize( coords: &[T; D], bounds: (T, T), bits: u32, ) -> Result<[u32; D], HilbertError> { - if bits == 0 || bits > 31 { - return Err(HilbertError::InvalidBitsParameter { bits }); - } + validate_bits(bits)?; if D == 0 { return Ok([0_u32; D]); } - let (max_val_u32, max_val_t) = hilbert_quantization_scale::(bits)?; + let (max_val_u32, max_val_t) = quantization_scale::(bits)?; - Ok(hilbert_quantize_with_scale( - coords, - bounds, - max_val_u32, - max_val_t, - )) + quantize_with_scale(coords, bounds, bits, max_val_u32, max_val_t) } /// Quantizes coordinates with a precomputed scalar grid maximum so hot callers /// can validate conversion once before sorting or batch index generation. #[inline] -fn hilbert_quantize_with_scale( +fn quantize_with_scale( coords: &[T; D], bounds: (T, T), + bits: u32, max_val_u32: u32, max_val_t: T, -) -> [u32; D] { +) -> Result<[u32; D], HilbertError> { + if !bounds.0.is_finite_generic() || !bounds.1.is_finite_generic() { + return Err(HilbertError::NonFiniteBounds { + lower_bound_finite: bounds.0.is_finite_generic(), + upper_bound_finite: bounds.1.is_finite_generic(), + }); + } + let extent = bounds.1 - bounds.0; + if !extent.is_finite_generic() { + return Err(HilbertError::NonFiniteBoundsExtent {}); + } + let mut quantized = [0_u32; D]; for (i, &coord) in coords.iter().enumerate() { - // Normalize to [0, 1]. If bounds are degenerate or non-finite, fall back to 0. + if !coord.is_finite_generic() { + return Err(HilbertError::NonFiniteCoordinate { + coordinate_index: i, + }); + } + + // Normalize to [0, 1]. Degenerate or reversed bounds deliberately collapse to 0. let normalized = if extent > T::zero() { let t = (coord - bounds.0) / extent; if t.is_finite_generic() { t.max(T::zero()).min(T::one()) } else { - T::zero() + return Err(HilbertError::NonFiniteNormalizedCoordinate { + coordinate_index: i, + }); } } else { T::zero() @@ -138,14 +234,37 @@ fn hilbert_quantize_with_scale( let scaled = normalized * max_val_t; // Round to nearest grid cell (instead of truncating) for fairer distribution. - let q = scaled - .round() - .to_u32() - .map_or(0, |value| value.min(max_val_u32)); + let Some(value) = scaled.round().to_u32() else { + return Err(HilbertError::QuantizedCoordinateConversionFailed { + bits, + max_grid_value: max_val_u32, + coordinate_index: i, + }); + }; + let q = value.min(max_val_u32); quantized[i] = q; } - quantized + Ok(quantized) +} + +/// Applies a prevalidated permutation after key construction succeeds so sort +/// helpers never partially reorder items before returning a Hilbert error. +fn apply_order(items: &mut [Item], order: Vec) { + debug_assert_eq!(items.len(), order.len()); + + let mut ranks = vec![0_usize; order.len()]; + for (new_index, old_index) in order.into_iter().enumerate() { + ranks[old_index] = new_index; + } + + for index in 0..items.len() { + while ranks[index] != index { + let target = ranks[index]; + items.swap(index, target); + ranks.swap(index, target); + } + } } /// Compute the Hilbert curve index for a point in D-dimensional space. @@ -165,44 +284,37 @@ fn hilbert_quantize_with_scale( /// Returns [`HilbertError::QuantizationScaleConversionFailed`] if the quantization /// grid maximum cannot be represented by the coordinate scalar type. /// +/// Returns [`HilbertError::NonFiniteBounds`], +/// [`HilbertError::NonFiniteBoundsExtent`], +/// [`HilbertError::NonFiniteCoordinate`], or +/// [`HilbertError::NonFiniteNormalizedCoordinate`] if quantization input or +/// normalization arithmetic is non-finite. +/// +/// Returns [`HilbertError::QuantizedCoordinateConversionFailed`] if a rounded +/// scaled coordinate cannot be represented as `u32`. +/// /// # Examples /// /// ```rust -/// use delaunay::core::util::hilbert::hilbert_index; +/// use delaunay::prelude::ordering::{hilbert_index, HilbertError}; /// /// let idx = hilbert_index(&[0.0_f64, 0.0], (0.0, 1.0), 4)?; /// assert_eq!(idx, 0); -/// # Ok::<(), delaunay::core::util::hilbert::HilbertError>(()) +/// # Ok::<(), HilbertError>(()) /// ``` pub fn hilbert_index( coords: &[T; D], bounds: (T, T), bits: u32, ) -> Result { - // Validate bits parameter (same as hilbert_quantize) - if bits == 0 || bits > 31 { - return Err(HilbertError::InvalidBitsParameter { bits }); - } - - // Validate dimension fits in u32 - let d_u32 = u32::try_from(D).map_err(|_| HilbertError::DimensionTooLarge { dimension: D })?; - - // Validate overflow - let total_bits = u128::from(d_u32) * u128::from(bits); - if total_bits > 128 { - return Err(HilbertError::IndexOverflow { - dimension: D, - bits, - total_bits, - }); - } + validate_index_params::(bits)?; if D == 0 { return Ok(0); } let q = hilbert_quantize(coords, bounds, bits)?; - Ok(hilbert_index_from_quantized(&q, bits)) + Ok(index_from_quantized(&q, bits)) } /// Compute Hilbert index from pre-quantized integer coordinates. @@ -213,7 +325,7 @@ pub fn hilbert_index( /// The resulting ordering is continuous on the integer grid (successive indices move to /// adjacent cells). #[must_use] -fn hilbert_index_from_quantized(coords: &[u32; D], bits: u32) -> u128 { +fn index_from_quantized(coords: &[u32; D], bits: u32) -> u128 { debug_assert!(D > 0, "caller should handle D==0"); debug_assert!( bits > 0 && bits <= 31, @@ -306,63 +418,61 @@ fn hilbert_index_from_quantized(coords: &[u32; D], bits: u32) -> /// Returns [`HilbertError::QuantizationScaleConversionFailed`] if the quantization /// grid maximum cannot be represented by the coordinate scalar type. /// +/// Returns [`HilbertError::NonFiniteBounds`], +/// [`HilbertError::NonFiniteBoundsExtent`], +/// [`HilbertError::NonFiniteCoordinate`], or +/// [`HilbertError::NonFiniteNormalizedCoordinate`] if quantization input or +/// normalization arithmetic is non-finite. +/// +/// Returns [`HilbertError::QuantizedCoordinateConversionFailed`] if a rounded +/// scaled coordinate cannot be represented as `u32`. +/// /// # Examples /// /// ```rust -/// use delaunay::core::util::hilbert::hilbert_sort_by_stable; +/// use delaunay::prelude::ordering::{hilbert_sort_by_stable, HilbertError}; /// /// let mut points = vec![[0.9_f64, 0.9], [0.1, 0.1], [0.5, 0.5]]; /// hilbert_sort_by_stable(&mut points, (0.0, 1.0), 8, |p| *p)?; /// assert_eq!(points[0], [0.1, 0.1]); -/// # Ok::<(), delaunay::core::util::hilbert::HilbertError>(()) +/// # Ok::<(), HilbertError>(()) /// ``` -pub fn hilbert_sort_by_stable( +pub fn hilbert_sort_by_stable( items: &mut [Item], bounds: (T, T), bits: u32, - coords_of: F, -) -> Result<(), HilbertError> -where - T: CoordinateScalar, - F: Fn(&Item) -> [T; D], -{ - // Pre-validate parameters once before sorting - if bits == 0 || bits > 31 { - return Err(HilbertError::InvalidBitsParameter { bits }); - } - - let d_u32 = u32::try_from(D).map_err(|_| HilbertError::DimensionTooLarge { dimension: D })?; - let total_bits = u128::from(d_u32) * u128::from(bits); - if total_bits > 128 { - return Err(HilbertError::IndexOverflow { - dimension: D, - bits, - total_bits, - }); - } + coords_of: impl Fn(&Item) -> [T; D], +) -> Result<(), HilbertError> { + validate_index_params::(bits)?; if D == 0 { return Ok(()); } - let (max_val_u32, max_val_t) = hilbert_quantization_scale::(bits)?; + let (max_val_u32, max_val_t) = quantization_scale::(bits)?; + + let mut keyed: Vec<((u128, [u32; D]), usize)> = items + .iter() + .enumerate() + .map(|(i, item)| { + let c = coords_of(item); + let q = quantize_with_scale(&c, bounds, bits, max_val_u32, max_val_t)?; + let idx = index_from_quantized(&q, bits); + Ok(((idx, q), i)) + }) + .collect::>()?; - // Sort using cached keys - parameters are pre-validated - items.sort_by_cached_key(|item| { - let c = coords_of(item); - let q = hilbert_quantize_with_scale(&c, bounds, max_val_u32, max_val_t); - let idx = hilbert_index_from_quantized(&q, bits); - (idx, q) - }); + keyed.sort_by_key(|(key, _)| *key); + apply_order(items, keyed.into_iter().map(|(_, i)| i).collect()); Ok(()) } /// Unstable sort helper: sort items by Hilbert index + quantized-coordinate tie-break. /// -/// This avoids allocations beyond what the sort implementation may use internally, -/// but recomputes indices during comparisons. Prefer [`hilbert_sort_by_stable`] unless -/// memory pressure is critical, as the unstable variant recomputes keys O(n log n) times. +/// This precomputes fallible Hilbert keys once, then applies an unstable ordering. +/// Prefer [`hilbert_sort_by_stable`] when equal-key items must preserve their +/// original relative order. /// /// When `D == 0`, all items are considered equivalent (index 0) and the sort order is /// implementation-defined. @@ -379,56 +489,52 @@ where /// Returns [`HilbertError::QuantizationScaleConversionFailed`] if the quantization /// grid maximum cannot be represented by the coordinate scalar type. /// +/// Returns [`HilbertError::NonFiniteBounds`], +/// [`HilbertError::NonFiniteBoundsExtent`], +/// [`HilbertError::NonFiniteCoordinate`], or +/// [`HilbertError::NonFiniteNormalizedCoordinate`] if quantization input or +/// normalization arithmetic is non-finite. +/// +/// Returns [`HilbertError::QuantizedCoordinateConversionFailed`] if a rounded +/// scaled coordinate cannot be represented as `u32`. +/// /// # Examples /// /// ```rust -/// use delaunay::core::util::hilbert::hilbert_sort_by_unstable; +/// use delaunay::prelude::ordering::{hilbert_sort_by_unstable, HilbertError}; /// /// let mut points = vec![[0.9_f64, 0.9], [0.1, 0.1], [0.5, 0.5]]; /// hilbert_sort_by_unstable(&mut points, (0.0, 1.0), 8, |p| *p)?; /// assert_eq!(points[0], [0.1, 0.1]); -/// # Ok::<(), delaunay::core::util::hilbert::HilbertError>(()) +/// # Ok::<(), HilbertError>(()) /// ``` -pub fn hilbert_sort_by_unstable( +pub fn hilbert_sort_by_unstable( items: &mut [Item], bounds: (T, T), bits: u32, - coords_of: F, -) -> Result<(), HilbertError> -where - T: CoordinateScalar, - F: Fn(&Item) -> [T; D], -{ - // Pre-validate parameters once before sorting - if bits == 0 || bits > 31 { - return Err(HilbertError::InvalidBitsParameter { bits }); - } - - let d_u32 = u32::try_from(D).map_err(|_| HilbertError::DimensionTooLarge { dimension: D })?; - let total_bits = u128::from(d_u32) * u128::from(bits); - if total_bits > 128 { - return Err(HilbertError::IndexOverflow { - dimension: D, - bits, - total_bits, - }); - } + coords_of: impl Fn(&Item) -> [T; D], +) -> Result<(), HilbertError> { + validate_index_params::(bits)?; if D == 0 { return Ok(()); } - let (max_val_u32, max_val_t) = hilbert_quantization_scale::(bits)?; + let (max_val_u32, max_val_t) = quantization_scale::(bits)?; + + let mut keyed: Vec<((u128, [u32; D]), usize)> = items + .iter() + .enumerate() + .map(|(i, item)| { + let c = coords_of(item); + let q = quantize_with_scale(&c, bounds, bits, max_val_u32, max_val_t)?; + let idx = index_from_quantized(&q, bits); + Ok(((idx, q), i)) + }) + .collect::>()?; - items.sort_unstable_by(|a, b| { - let ca = coords_of(a); - let cb = coords_of(b); - let qa = hilbert_quantize_with_scale(&ca, bounds, max_val_u32, max_val_t); - let qb = hilbert_quantize_with_scale(&cb, bounds, max_val_u32, max_val_t); - let ida = hilbert_index_from_quantized(&qa, bits); - let idb = hilbert_index_from_quantized(&qb, bits); - (ida, qa).cmp(&(idb, qb)) - }); + keyed.sort_unstable_by_key(|(key, _)| *key); + apply_order(items, keyed.into_iter().map(|(_, i)| i).collect()); Ok(()) } @@ -457,7 +563,7 @@ where /// # Examples /// /// ```rust -/// use delaunay::core::util::hilbert::{hilbert_quantize, hilbert_indices_prequantized}; +/// use delaunay::prelude::ordering::{hilbert_indices_prequantized, hilbert_quantize, HilbertError}; /// /// let coords = vec![[0.1_f64, 0.2], [0.5, 0.5], [0.9, 0.8]]; /// let bounds = (0.0, 1.0); @@ -472,13 +578,13 @@ where /// // Compute all indices in bulk /// let indices = hilbert_indices_prequantized(&quantized, bits)?; /// assert_eq!(indices.len(), coords.len()); -/// # Ok::<(), delaunay::core::util::hilbert::HilbertError>(()) +/// # Ok::<(), HilbertError>(()) /// ``` /// /// Error handling: /// /// ```rust -/// use delaunay::core::util::hilbert::{hilbert_indices_prequantized, HilbertError}; +/// use delaunay::prelude::ordering::{hilbert_indices_prequantized, HilbertError}; /// /// let quantized = vec![[1_u32, 2]]; /// @@ -495,23 +601,7 @@ pub fn hilbert_indices_prequantized( quantized: &[[u32; D]], bits: u32, ) -> Result, HilbertError> { - // Validate bits parameter - if bits == 0 || bits > 31 { - return Err(HilbertError::InvalidBitsParameter { bits }); - } - - // Validate dimension fits in u32 - let d_u32 = u32::try_from(D).map_err(|_| HilbertError::DimensionTooLarge { dimension: D })?; - - // Validate overflow - let total_bits = u128::from(d_u32) * u128::from(bits); - if total_bits > 128 { - return Err(HilbertError::IndexOverflow { - dimension: D, - bits, - total_bits, - }); - } + validate_index_params::(bits)?; // Handle D == 0 case: zero-dimensional space has only one point, all map to index 0 if D == 0 { @@ -520,7 +610,7 @@ pub fn hilbert_indices_prequantized( Ok(quantized .iter() - .map(|q| hilbert_index_from_quantized(q, bits)) + .map(|q| index_from_quantized(q, bits)) .collect()) } @@ -541,51 +631,47 @@ pub fn hilbert_indices_prequantized( /// Returns [`HilbertError::QuantizationScaleConversionFailed`] if the quantization /// grid maximum cannot be represented by the coordinate scalar type. /// +/// Returns [`HilbertError::NonFiniteBounds`], +/// [`HilbertError::NonFiniteBoundsExtent`], +/// [`HilbertError::NonFiniteCoordinate`], or +/// [`HilbertError::NonFiniteNormalizedCoordinate`] if quantization input or +/// normalization arithmetic is non-finite. +/// +/// Returns [`HilbertError::QuantizedCoordinateConversionFailed`] if a rounded +/// scaled coordinate cannot be represented as `u32`. +/// /// # Examples /// /// ```rust -/// use delaunay::core::util::hilbert::hilbert_sorted_indices; +/// use delaunay::prelude::ordering::{hilbert_sorted_indices, HilbertError}; /// /// let coords = vec![[0.9_f64, 0.9], [0.1, 0.1], [0.5, 0.5]]; /// let order = hilbert_sorted_indices(&coords, (0.0, 1.0), 8)?; /// assert_eq!(order.len(), coords.len()); -/// # Ok::<(), delaunay::core::util::hilbert::HilbertError>(()) +/// # Ok::<(), HilbertError>(()) /// ``` pub fn hilbert_sorted_indices( coords: &[[T; D]], bounds: (T, T), bits: u32, ) -> Result, HilbertError> { - // Pre-validate parameters once - if bits == 0 || bits > 31 { - return Err(HilbertError::InvalidBitsParameter { bits }); - } - - let d_u32 = u32::try_from(D).map_err(|_| HilbertError::DimensionTooLarge { dimension: D })?; - let total_bits = u128::from(d_u32) * u128::from(bits); - if total_bits > 128 { - return Err(HilbertError::IndexOverflow { - dimension: D, - bits, - total_bits, - }); - } + validate_index_params::(bits)?; if D == 0 { return Ok((0..coords.len()).collect()); } - let (max_val_u32, max_val_t) = hilbert_quantization_scale::(bits)?; + let (max_val_u32, max_val_t) = quantization_scale::(bits)?; let mut keyed: Vec<((u128, [u32; D]), usize)> = coords .iter() .enumerate() .map(|(i, c)| { - let q = hilbert_quantize_with_scale(c, bounds, max_val_u32, max_val_t); - let idx = hilbert_index_from_quantized(&q, bits); - ((idx, q), i) + let q = quantize_with_scale(c, bounds, bits, max_val_u32, max_val_t)?; + let idx = index_from_quantized(&q, bits); + Ok(((idx, q), i)) }) - .collect(); + .collect::>()?; keyed.sort_by(|(ka, ia), (kb, ib)| ka.cmp(kb).then_with(|| ia.cmp(ib))); Ok(keyed.into_iter().map(|(_, i)| i).collect()) @@ -630,11 +716,12 @@ mod tests { // Sorting by stable helper should be deterministic. let mut payload2: Vec = (0..coords.len()).collect(); hilbert_sort_by_stable(&mut payload2, (0.0_f64, 1.0), 16, |&i| coords[i]).unwrap(); + assert_eq!(payload, order); assert_eq!(payload, payload2); } #[test] - fn test_hilbert_sort_by_unstable_orders_by_hilbert_key() { + fn test_unstable_sort_orders_by_key() { let coords: Vec<[f64; 2]> = vec![[0.9, 0.9], [0.1, 0.1], [0.5, 0.5], [0.1, 0.9], [0.9, 0.1]]; let expected_order = hilbert_sorted_indices(&coords, (0.0, 1.0), 16).unwrap(); @@ -646,7 +733,7 @@ mod tests { } #[test] - fn test_hilbert_zero_dimension_sort_helpers_are_noops() { + fn test_zero_dim_sort_helpers_noop() { let coords: Vec<[f64; 0]> = vec![[], [], []]; let order = hilbert_sorted_indices(&coords, (0.0, 1.0), 8).unwrap(); assert_eq!(order, vec![0, 1, 2]); @@ -660,6 +747,87 @@ mod tests { assert_eq!(unstable_payload, vec![3, 2, 1]); } + #[test] + fn test_scaled_quantize_reports_conversion_error() { + let result = quantize_with_scale(&[1.0_f64], (0.0, 1.0), 31, u32::MAX, f64::INFINITY); + + assert!(matches!( + result, + Err(HilbertError::QuantizedCoordinateConversionFailed { + bits: 31, + max_grid_value: u32::MAX, + coordinate_index: 0 + }) + )); + } + + #[test] + fn test_quantize_rejects_nonfinite_bounds() { + let result = hilbert_quantize(&[0.5_f64], (f64::NAN, 1.0), 8); + + assert_eq!( + result, + Err(HilbertError::NonFiniteBounds { + lower_bound_finite: false, + upper_bound_finite: true + }) + ); + } + + #[test] + fn test_quantize_rejects_nonfinite_extent() { + let result = hilbert_quantize(&[0.0_f64], (-f64::MAX, f64::MAX), 8); + + assert_eq!(result, Err(HilbertError::NonFiniteBoundsExtent {})); + } + + #[test] + fn test_quantize_rejects_nonfinite_coordinate() { + let result = hilbert_quantize(&[0.25_f64, f64::INFINITY], (0.0, 1.0), 8); + + assert_eq!( + result, + Err(HilbertError::NonFiniteCoordinate { + coordinate_index: 1 + }) + ); + } + + #[test] + fn test_quantize_rejects_nonfinite_normalized() { + let result = hilbert_quantize(&[f64::MAX], (-f64::MAX / 2.0, f64::MAX / 2.0), 8); + + assert_eq!( + result, + Err(HilbertError::NonFiniteNormalizedCoordinate { + coordinate_index: 0 + }) + ); + } + + #[test] + fn test_sort_error_keeps_order() { + let coords = [[0.5_f64], [f64::NAN], [0.25]]; + let mut payload = vec![0_usize, 1, 2]; + + let result = hilbert_sort_by_stable(&mut payload, (0.0, 1.0), 8, |&i| coords[i]); + + assert_eq!( + result, + Err(HilbertError::NonFiniteCoordinate { + coordinate_index: 0 + }) + ); + assert_eq!(payload, vec![0, 1, 2]); + } + + #[test] + fn test_quantize_clamps_f32_endpoint() { + let q = hilbert_quantize(&[1.0_f32], (0.0, 1.0), 31).unwrap(); + + assert_eq!(q, [(1_u32 << 31) - 1]); + } + #[test] fn test_hilbert_curve_is_continuous_on_2d_grid() { // A defining property of the (discrete) Hilbert curve is continuity: @@ -671,7 +839,7 @@ mod tests { for x in 0..n { for y in 0..n { let q = [x, y]; - let idx = hilbert_index_from_quantized(&q, bits); + let idx = index_from_quantized(&q, bits); points.push((q, idx)); } } @@ -707,7 +875,7 @@ mod tests { for z in 0..n { for w in 0..n { let q = [x, y, z, w]; - let idx = hilbert_index_from_quantized(&q, bits); + let idx = index_from_quantized(&q, bits); points.push((q, idx)); } } diff --git a/src/core/vertex.rs b/src/core/vertex.rs index 7b85cfbf..3bc42bd4 100644 --- a/src/core/vertex.rs +++ b/src/core/vertex.rs @@ -564,6 +564,19 @@ where /// /// This constructor is infallible because [`Point`] already owns validated /// coordinates for the vertex dimension. + /// + /// # Examples + /// + /// ```rust + /// use delaunay::prelude::geometry::{Coordinate, Point}; + /// use delaunay::prelude::triangulation::Vertex; + /// + /// let point = Point::new([1.0, 2.0]); + /// let vertex = Vertex::::from_point(point); + /// + /// assert_eq!(vertex.point().coords(), &[1.0, 2.0]); + /// assert!(vertex.data().is_none()); + /// ``` #[inline] #[must_use] pub fn from_point(point: Point) -> Self { @@ -579,6 +592,18 @@ where /// /// This constructor is infallible because [`Point`] already owns validated /// coordinates for the vertex dimension and `data` is stored as provided. + /// + /// # Examples + /// + /// ```rust + /// use delaunay::prelude::geometry::{Coordinate, Point}; + /// use delaunay::prelude::triangulation::Vertex; + /// + /// let point = Point::new([1.0, 2.0]); + /// let vertex = Vertex::::from_point_with_data(point, 7); + /// + /// assert_eq!(vertex.data(), Some(&7)); + /// ``` #[inline] #[must_use] pub fn from_point_with_data(point: Point, data: impl Into) -> Self { diff --git a/src/lib.rs b/src/lib.rs index ad51e40d..b5ef702f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -48,6 +48,7 @@ //! | Read-only queries, traversal, convex hull | `use delaunay::prelude::query::*` | //! | Geometry helpers, predicates, points | `use delaunay::prelude::geometry::*` | //! | Random points / triangulations for examples and tests | `use delaunay::prelude::generators::*` | +//! | Hilbert ordering and quantization utilities | `use delaunay::prelude::ordering::*` | //! | Bistellar flips (Pachner moves) | `use delaunay::prelude::triangulation::flips::*` | //! | Delaunay repair and flip-based Level 4 validation | `use delaunay::prelude::triangulation::repair::*` | //! | Delaunayize workflow (repair + flip) | `use delaunay::prelude::triangulation::delaunayize::*` | @@ -1059,6 +1060,31 @@ pub mod prelude { generate_random_triangulation_with_topology_guarantee, }; } + + /// Focused exports for Hilbert ordering and quantization utilities. + /// + /// These helpers are useful in doctests, integration tests, examples, and + /// benchmarks that need deterministic space-filling-curve ordering without + /// importing the broader triangulation or geometry preludes. + /// + /// # Examples + /// + /// ```rust + /// use delaunay::prelude::ordering::{hilbert_sorted_indices, HilbertError}; + /// + /// let coords = [[0.9_f64, 0.9], [0.1, 0.1], [0.5, 0.5]]; + /// let order = hilbert_sorted_indices(&coords, (0.0, 1.0), 8)?; + /// + /// assert_eq!(order.len(), coords.len()); + /// # Ok::<(), HilbertError>(()) + /// ``` + pub mod ordering { + pub use crate::core::util::{ + HilbertError, hilbert_index, hilbert_indices_prequantized, hilbert_quantize, + hilbert_sort_by_stable, hilbert_sort_by_unstable, hilbert_sorted_indices, + }; + } + /// Topology validation & analysis utilities. pub mod topology { /// Topology validation utilities. diff --git a/tests/prelude_exports.rs b/tests/prelude_exports.rs index 45a25bfa..b8bb7be2 100644 --- a/tests/prelude_exports.rs +++ b/tests/prelude_exports.rs @@ -6,6 +6,10 @@ use delaunay::prelude::generators::generate_random_points_seeded; use delaunay::prelude::geometry::{AdaptiveKernel, Point}; +use delaunay::prelude::ordering::{ + HilbertError, hilbert_index, hilbert_indices_prequantized, hilbert_quantize, + hilbert_sort_by_stable, hilbert_sort_by_unstable, hilbert_sorted_indices, +}; use delaunay::prelude::query::ConvexHull; use delaunay::prelude::triangulation::delaunayize::{ DelaunayizeConfig, DelaunayizeError, DelaunayizeOutcome, delaunayize_by_flips, @@ -22,6 +26,7 @@ use delaunay::prelude::triangulation::{ }; use delaunay::vertex; +/// Proves the focused flips prelude exports the trait bound expected by benchmarks. const fn assert_bistellar_flips(_: &impl BistellarFlips, (), (), 3>) {} #[test] @@ -98,3 +103,30 @@ fn diagnostic_preludes_cover_repair_apis() { let _typed_outcome: DelaunayizeOutcome = outcome; let _typed_error: Option = None; } + +#[test] +fn ordering_prelude_covers_hilbert_apis() -> Result<(), HilbertError> { + let coords = [[0.9_f64, 0.9], [0.1, 0.1], [0.5, 0.5]]; + let order = hilbert_sorted_indices(&coords, (0.0, 1.0), 8)?; + assert_eq!(order.len(), coords.len()); + + let quantized: Vec<[u32; 2]> = coords + .iter() + .map(|coord| hilbert_quantize(coord, (0.0, 1.0), 8)) + .collect::>()?; + let indices = hilbert_indices_prequantized(&quantized, 8)?; + assert_eq!(indices.len(), coords.len()); + + let index = hilbert_index(&coords[0], (0.0, 1.0), 8)?; + assert_eq!(index, indices[0]); + + let mut stable_payload = vec![0_usize, 1, 2]; + hilbert_sort_by_stable(&mut stable_payload, (0.0, 1.0), 8, |&i| coords[i])?; + assert_eq!(stable_payload, order); + + let mut unstable_payload = vec![0_usize, 1, 2]; + hilbert_sort_by_unstable(&mut unstable_payload, (0.0, 1.0), 8, |&i| coords[i])?; + assert_eq!(unstable_payload, order); + + Ok(()) +} diff --git a/tests/regressions.rs b/tests/regressions.rs index 1d8b4c52..2693047c 100644 --- a/tests/regressions.rs +++ b/tests/regressions.rs @@ -6,10 +6,10 @@ #[cfg(debug_assertions)] use delaunay::core::util::debug_print_first_delaunay_violation; -use delaunay::core::util::{hilbert_indices_prequantized, hilbert_quantize}; use delaunay::geometry::kernel::RobustKernel; use delaunay::geometry::point::Point; use delaunay::geometry::util::generate_random_points_in_ball_seeded; +use delaunay::prelude::ordering::{hilbert_indices_prequantized, hilbert_quantize}; use delaunay::prelude::triangulation::*; use delaunay::triangulation::delaunay::{ConstructionOptions, InsertionOrderStrategy, RetryPolicy}; diff --git a/tests/semgrep/src/project_rules/rust_style.rs b/tests/semgrep/src/project_rules/rust_style.rs index b0aa27c5..8895d1f7 100644 --- a/tests/semgrep/src/project_rules/rust_style.rs +++ b/tests/semgrep/src/project_rules/rust_style.rs @@ -2,6 +2,11 @@ use num_traits::NumCast; +// ruleid: delaunay.rust.prefer-prelude-imports-in-examples-benches +use delaunay::core::vertex::Vertex as DeepVertex; +// ok: delaunay.rust.prefer-prelude-imports-in-examples-benches +use delaunay::prelude::triangulation::Vertex as PreludeVertex; + pub fn production_stdio() { // ruleid: delaunay.rust.no-stdio-diagnostics-in-src println!("debug output");