diff --git a/Cargo.lock b/Cargo.lock index 54c632a7a..c0bcc4594 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -745,6 +745,34 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" +[[package]] +name = "blockfrost" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a3f18d436df2ac619d7cfa04f883b5f241619d3ef43c893d3c035012a271f81" +dependencies = [ + "blockfrost-openapi", + "futures", + "futures-timer", + "reqwest 0.12.24", + "serde", + "serde_json", + "thiserror 2.0.17", + "url", +] + +[[package]] +name = "blockfrost-openapi" +version = "0.1.75" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43e88a3c131d5e95b82a761d9b4ae0d100b67b138a9fd1d880742b50b26318a5" +dependencies = [ + "serde", + "serde_json", + "serde_with", + "uuid", +] + [[package]] name = "blocking" version = "1.6.2" @@ -2026,6 +2054,21 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "form_urlencoded" version = "1.2.2" @@ -2721,6 +2764,22 @@ dependencies = [ "tower-service", ] +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper 1.7.0", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", +] + [[package]] name = "hyper-util" version = "0.1.17" @@ -2740,9 +2799,11 @@ dependencies = [ "percent-encoding", "pin-project-lite", "socket2 0.6.0", + "system-configuration 0.6.1", "tokio", "tower-service", "tracing", + "windows-registry", ] [[package]] @@ -4087,6 +4148,16 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" +[[package]] +name = "mime_guess" +version = "2.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" +dependencies = [ + "mime", + "unicase", +] + [[package]] name = "miniz_oxide" version = "0.8.9" @@ -4130,6 +4201,23 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "685a9ac4b61f4e728e1d2c6a7844609c16527aeb5e6c865915c08e619c16410f" +[[package]] +name = "native-tls" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework 2.11.1", + "security-framework-sys", + "tempfile", +] + [[package]] name = "nix" version = "0.29.0" @@ -4345,12 +4433,50 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" +[[package]] +name = "openssl" +version = "0.10.75" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" +dependencies = [ + "bitflags 2.9.4", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote 1.0.41", + "syn 2.0.106", +] + [[package]] name = "openssl-probe" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" +[[package]] +name = "openssl-sys" +version = "0.9.111" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + [[package]] name = "opentelemetry" version = "0.31.0" @@ -5245,7 +5371,7 @@ dependencies = [ "serde_json", "serde_urlencoded", "sync_wrapper 0.1.2", - "system-configuration", + "system-configuration 0.5.1", "tokio", "tokio-rustls 0.24.1", "tower-service", @@ -5265,17 +5391,23 @@ checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" dependencies = [ "base64 0.22.1", "bytes", + "encoding_rs", "futures-channel", "futures-core", "futures-util", + "h2 0.4.12", "http 1.3.1", "http-body 1.0.1", "http-body-util", "hyper 1.7.0", "hyper-rustls 0.27.7", + "hyper-tls", "hyper-util", "js-sys", "log", + "mime", + "mime_guess", + "native-tls", "percent-encoding", "pin-project-lite", "quinn", @@ -5287,6 +5419,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper 1.0.2", "tokio", + "tokio-native-tls", "tokio-rustls 0.26.4", "tokio-util", "tower", @@ -6389,6 +6522,73 @@ dependencies = [ "der", ] +[[package]] +name = "spo-api" +version = "3.0.0" +dependencies = [ + "anyhow", + "async-graphql", + "async-graphql-axum", + "axum", + "byte-unit-serde", + "clap", + "derive_more 2.0.1", + "fastrace", + "fastrace-axum", + "futures", + "indexer-common", + "indoc", + "log", + "metrics", + "once_cell", + "regex", + "serde", + "serde_with", + "sqlx", + "thiserror 2.0.17", + "tokio", + "tower", + "tower-http", + "trait-variant", + "uuid", +] + +[[package]] +name = "spo-indexer" +version = "3.0.0" +dependencies = [ + "anyhow", + "async-stream", + "blake2", + "blockfrost", + "byte-unit", + "clap", + "derive_more 2.0.1", + "fake", + "fastrace", + "futures", + "hex", + "humantime-serde", + "indexer-common", + "indoc", + "itertools 0.14.0", + "log", + "metrics", + "parity-scale-codec", + "parking_lot", + "paste", + "reqwest 0.12.24", + "secrecy", + "serde", + "serde_json", + "serde_with", + "sqlx", + "subxt", + "thiserror 2.0.17", + "tokio", + "trait-variant", +] + [[package]] name = "sqlx" version = "0.8.6" @@ -6410,6 +6610,7 @@ checksum = "ee6798b1838b6a0f69c007c133b8df5866302197e404e8b6ee8ed3e3a5e68dc6" dependencies = [ "base64 0.22.1", "bytes", + "chrono", "crc", "crossbeam-queue", "either", @@ -6489,6 +6690,7 @@ dependencies = [ "bitflags 2.9.4", "byteorder", "bytes", + "chrono", "crc", "digest 0.10.7", "dotenvy", @@ -6532,6 +6734,7 @@ dependencies = [ "base64 0.22.1", "bitflags 2.9.4", "byteorder", + "chrono", "crc", "dotenvy", "etcetera 0.8.0", @@ -6568,6 +6771,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2d12fe70b2c1b4401038055f90f151b78208de1f9f89a7dbfd41587a10c3eea" dependencies = [ "atoi", + "chrono", "flume", "futures-channel", "futures-core", @@ -7021,7 +7225,18 @@ checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ "bitflags 1.3.2", "core-foundation 0.9.4", - "system-configuration-sys", + "system-configuration-sys 0.5.0", +] + +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags 2.9.4", + "core-foundation 0.9.4", + "system-configuration-sys 0.6.0", ] [[package]] @@ -7034,6 +7249,16 @@ dependencies = [ "libc", ] +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "tap" version = "1.0.1" @@ -7241,6 +7466,16 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.24.1" @@ -7661,6 +7896,12 @@ dependencies = [ "version_check", ] +[[package]] +name = "unicase" +version = "2.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" + [[package]] name = "unicode-bidi" version = "0.3.18" @@ -8185,7 +8426,7 @@ checksum = "d2ed2439a290666cd67ecce2b0ffaad89c2a56b976b736e6ece670297897832d" dependencies = [ "windows-implement", "windows-interface", - "windows-result", + "windows-result 0.1.2", "windows-targets 0.52.6", ] @@ -8223,6 +8464,17 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" +[[package]] +name = "windows-registry" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b8a9ed28765efc97bbc954883f4e6796c33a06546ebafacbabee9696967499e" +dependencies = [ + "windows-link 0.1.3", + "windows-result 0.3.4", + "windows-strings", +] + [[package]] name = "windows-result" version = "0.1.2" @@ -8232,6 +8484,24 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-result" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" +dependencies = [ + "windows-link 0.1.3", +] + +[[package]] +name = "windows-strings" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" +dependencies = [ + "windows-link 0.1.3", +] + [[package]] name = "windows-sys" version = "0.45.0" diff --git a/Cargo.toml b/Cargo.toml index 3f3a4fe43..c958077a2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,6 +7,8 @@ members = [ "indexer-api", "indexer-standalone", "indexer-tests", + "spo-indexer", + "spo-api", ] [workspace.package] @@ -29,6 +31,8 @@ async-stream = { version = "0.3" } axum = { version = "0.8" } bech32 = { version = "0.11" } bip32 = { version = "0.5" } +blake2 = { version = "0.10.6" } +blockfrost = { version = "1.1.0" } byte-unit = { version = "5.1" } byte-unit-serde = { version = "0.1" } chacha20poly1305 = { version = "0.10" } @@ -45,6 +49,7 @@ figment = { version = "0.10" } fs_extra = { version = "1.3" } futures = { version = "0.3" } graphql_client = { version = "0.14" } +hex = { version = "0.4.3" } humantime-serde = { version = "1.1" } indoc = { version = "2.0" } itertools = { version = "0.14" } @@ -61,12 +66,15 @@ midnight-storage_v6 = { git = "https://github.com/midnightntwrk/midnigh midnight-transient-crypto_v6 = { git = "https://github.com/midnightntwrk/midnight-ledger", package = "midnight-transient-crypto", tag = "ledger-6.1.0-alpha.5" } midnight-zswap_v6 = { git = "https://github.com/midnightntwrk/midnight-ledger", package = "midnight-zswap", tag = "ledger-6.1.0-alpha.5" } nix = { version = "0.30" } +once_cell = { version = "1.19" } opentelemetry = { version = "0.31" } opentelemetry-otlp = { version = "0.31" } opentelemetry_sdk = { version = "0.31" } parity-scale-codec = { version = "3.7" } parking_lot = { version = "0.12" } +paste = { version = "1.0" } reqwest = { version = "0.12", default-features = false } +regex = { version = "1.11" } secrecy = { version = "0.10" } serde = { version = "1.0" } serde_json = { version = "1.0" } diff --git a/MIGRATION.md b/MIGRATION.md new file mode 100644 index 000000000..4081a45a9 --- /dev/null +++ b/MIGRATION.md @@ -0,0 +1,673 @@ +# SPO Services Migration Documentation + +## Overview + +This document details the migration of SPO (Stake Pool Operator) services from the `midnight-indexer-spo-extension` repository into the main `midnight-indexer` repository (v3.0.0-alpha.9). + +**Migration Date**: November 17, 2025 +**Source Repository**: midnight-indexer-spo-extension (based on midnight-indexer from months ago) +**Target Repository**: midnight-indexer v3.0.0-alpha.9 +**Target Network**: Midnight Preview Network (`wss://rpc.preview.midnight.network`) + +## Background + +The SPO extension was originally developed on an older version of midnight-indexer. The main midnight-indexer repository has since been updated with 100+ commits, including critical changes for Preview network compatibility. The migration was necessary to: + +1. Get latest improvements and bug fixes from upstream +2. Support the Midnight Preview network (previous version only supported older dev networks) +3. Resolve NetworkId compatibility issues that prevented connection to Preview network + +### Key Version Changes + +| Component | Old Version (spo-extension) | New Version (midnight-indexer) | +|-----------|----------------------------|--------------------------------| +| midnight-ledger | alpha.2 | alpha.5 | +| async-nats | 0.42 | 0.45 | +| NetworkId | Enum-based | String-based wrapper | + +## Migration Strategy + +**Approach Selected**: Integrate SPO services into midnight-indexer (Option A) + +**Rationale**: +- Midnight-indexer is the canonical upstream repository +- Easier to maintain going forward +- Access to latest improvements and Preview network support +- NetworkId changes in midnight-indexer v3.0.0 required for Preview network + +## Phase 1: Repository Setup + +### 1.1 Branch Creation +```bash +git checkout -b feature/integrate-spo-services +``` + +### 1.2 Service Directories Copied +From `midnight-indexer-spo-extension` to `midnight-indexer`: +- `spo-indexer/` - Complete directory (38 files) +- `spo-api/` - Complete directory + +## Phase 2: Workspace Configuration + +### 2.1 Updated Root Cargo.toml + +**Added Workspace Members**: +```toml +members = [ + # ... existing members + "spo-indexer", + "spo-api", +] +``` + +**Added Workspace Dependencies**: +```toml +[workspace.dependencies] +blake2 = { version = "0.10.6" } +blockfrost = { version = "1.1.0" } +hex = { version = "0.4.3" } +once_cell = { version = "1.19" } +paste = { version = "1.0" } +regex = { version = "1.11" } +``` + +### 2.2 Updated Service Dependencies + +**Files Modified**: +- `spo-indexer/Cargo.toml` +- `spo-api/Cargo.toml` + +**Changes**: Updated dependencies to use workspace versions: +```toml +blake2 = { workspace = true } +blockfrost = { workspace = true } +hex = { workspace = true } +once_cell = { workspace = true } +paste = { workspace = true } +regex = { workspace = true } +``` + +## Phase 3: Database Migrations + +### 3.1 Migration Files Copied + +Copied from `spo-extension/indexer-common/migrations/postgres/` to `midnight-indexer/indexer-common/migrations/postgres/`: + +1. **002_spo_initial.sql** (84 lines) + - Creates `epochs` table + - Creates `pool_metadata_cache` table + - Creates `spo_identity` table + - Creates `committee_membership` table + - Creates `spo_epoch_performance` table + - Creates `spo_history` table + +2. **003_drop_stg_committee.sql** (4 lines) + - Drops staging table + +3. **004_spo_stake.sql** (17 lines) + - Creates `spo_stake_snapshot` table + +4. **005_spo_stake_history.sql** (29 lines) + - Creates `spo_stake_history` table + - Creates `spo_stake_refresh_state` table + +## Phase 4: Docker Configuration + +### 4.1 Updated docker-compose.yaml + +**Added Services**: + +```yaml +spo-indexer: + profiles: + - cloud + depends_on: + postgres: + condition: "service_healthy" + nats: + condition: "service_started" + build: + context: . + dockerfile: spo-indexer/Dockerfile + image: "spo-indexer:local" + restart: "no" + environment: + RUST_LOG: "spo_indexer=debug,indexer_common=debug,fastrace_opentelemetry=off,info" + APP__APPLICATION__NETWORK_ID: "preview" + APP__INFRA__NODE__URL: "wss://rpc.preview.midnight.network" + APP__INFRA__NODE__BLOCKFROST_ID: $APP__INFRA__NODE__BLOCKFROST_ID + APP__INFRA__STORAGE__HOST: "postgres" + APP__INFRA__STORAGE__PASSWORD: $APP__INFRA__STORAGE__PASSWORD + APP__INFRA__PUB_SUB__URL: "nats:4222" + APP__INFRA__PUB_SUB__PASSWORD: $APP__INFRA__PUB_SUB__PASSWORD + healthcheck: + test: ["CMD-SHELL", "cat /var/run/spo-indexer/running || exit 0"] + +spo-api: + profiles: + - cloud + depends_on: + postgres: + condition: "service_healthy" + nats: + condition: "service_started" + build: + context: . + dockerfile: spo-api/Dockerfile + image: "spo-api:local" + restart: "no" + ports: + - "8090:8090" + environment: + RUST_LOG: "spo_api=debug,indexer_common=debug,fastrace_opentelemetry=off,info" + APP__APPLICATION__NETWORK_ID: "preview" + APP__INFRA__STORAGE__HOST: "postgres" + APP__INFRA__STORAGE__PASSWORD: $APP__INFRA__STORAGE__PASSWORD + APP__INFRA__API__PORT: "8090" + APP__INFRA__API__MAX_COMPLEXITY: "2000" + APP__INFRA__API__MAX_DEPTH: "50" + healthcheck: + test: ["CMD-SHELL", "cat /var/run/spo-api/running || exit 0"] +``` + +**Key Changes from Original**: +- Changed from pulling pre-built images to local builds +- Updated network configuration to use Preview network +- Updated database user from "postgres" to "indexer" for consistency + +### 4.2 Environment Variables + +**Updated .envrc.local**: +```bash +export APP__INFRA__NODE__BLOCKFROST_ID="previewukkFxumNW31cXmsBtKI1JTnbxvcVCbCj" +export APP__INFRA__STORAGE__PASSWORD="indexer" +export APP__INFRA__PUB_SUB__PASSWORD="indexer" +``` + +## Phase 5: Configuration Updates + +### 5.1 SPO Indexer Configuration + +**File**: `spo-indexer/config.yaml` + +**Changes**: +```yaml +application: + network_id: "preview" # Changed from "Undeployed" + +infra: + storage: + user: "indexer" # Changed from "postgres" + + node: + url: "wss://rpc.preview.midnight.network" # Changed from dev network +``` + +### 5.2 SPO API Configuration + +**File**: `spo-api/config.yaml` + +**Changes**: +```yaml +application: + network_id: "preview" # Changed from "Undeployed" +``` + +## Phase 6: Code Compatibility Fixes + +### 6.1 NetworkId Type Change + +**Issue**: NetworkId changed from `Copy` trait enum to String-based wrapper (non-Copy) + +**Files Modified**: + +1. **spo-api/src/application.rs:15** + ```rust + // BEFORE + #[derive(Debug, Clone, Copy, Deserialize)] + pub struct Config { + pub network_id: NetworkId, + } + + // AFTER + #[derive(Debug, Clone, Deserialize)] // Removed Copy + pub struct Config { + pub network_id: NetworkId, + } + ``` + +2. **spo-api/src/infra/api/mod.rs:191** + ```rust + // BEFORE + fn get_network_id(&self) -> NetworkId { + self.data::() + .copied() + .expect("NetworkId is stored in Context") + } + + // AFTER + fn get_network_id(&self) -> NetworkId { + self.data::() + .cloned() // Changed from .copied() + .expect("NetworkId is stored in Context") + } + ``` + +### 6.2 Preview Network API Compatibility + +**Issue**: Midnight Preview network RPC API changed between alpha.2 and alpha.5 + +#### Change 1: `auraPubKey` Field Removed + +**Files Modified**: + +1. **spo-indexer/src/domain/rpc.rs:94-108** + ```rust + // BEFORE + pub struct CandidateRegistration { + pub sidechain_pub_key: String, + pub sidechain_account_id: String, + pub mainchain_pub_key: String, + pub cross_chain_pub_key: String, + pub aura_pub_key: String, + pub grandpa_pub_key: String, + // ... rest of fields + } + + // AFTER + pub struct CandidateRegistration { + pub sidechain_pub_key: String, + pub sidechain_account_id: String, + pub mainchain_pub_key: String, + pub cross_chain_pub_key: String, + #[serde(default)] + pub aura_pub_key: Option, // Made optional + #[serde(default)] + pub grandpa_pub_key: Option, // Made optional + // ... rest of fields + } + ``` + +2. **spo-indexer/src/domain/rpc.rs:120-126** (Display impl) + ```rust + // BEFORE + writeln!(f, " Aura Pub Key: {}", self.aura_pub_key)?; + writeln!(f, " Grandpa Pub Key: {}", self.grandpa_pub_key)?; + + // AFTER + if let Some(aura_key) = &self.aura_pub_key { + writeln!(f, " Aura Pub Key: {}", aura_key)?; + } + if let Some(grandpa_key) = &self.grandpa_pub_key { + writeln!(f, " Grandpa Pub Key: {}", grandpa_key)?; + } + ``` + +3. **spo-indexer/src/infra/subxt_node.rs:191-192** + ```rust + // BEFORE + aura_pub_key: remove_hex_prefix(reg.aura_pub_key), + grandpa_pub_key: remove_hex_prefix(reg.grandpa_pub_key), + + // AFTER + aura_pub_key: reg.aura_pub_key.map(remove_hex_prefix), + grandpa_pub_key: reg.grandpa_pub_key.map(remove_hex_prefix), + ``` + +4. **spo-indexer/src/application.rs:266** + ```rust + // BEFORE + let aura_pk = remove_hex_prefix(raw_spo.aura_pub_key.to_string()); + + // AFTER + let aura_pk = raw_spo.aura_pub_key.as_ref() + .map(|k| remove_hex_prefix(k.to_string())) + .unwrap_or_default(); + ``` + +## Phase 7: Build and Testing + +### 7.1 Compilation + +**Command**: +```bash +source .envrc.local && docker compose --profile cloud build spo-indexer spo-api +``` + +**Results**: +- ✅ spo-indexer builds successfully +- ✅ spo-api builds successfully +- Build time: ~4 minutes (first build), ~3 minutes (incremental) + +### 7.2 Container Startup + +**Command**: +```bash +source .envrc.local && docker compose up -d postgres nats spo-indexer spo-api +``` + +**Results**: +- ✅ postgres: Started and healthy +- ✅ nats: Started +- ✅ spo-api: Started and healthy (port 8090) +- ✅ spo-indexer: Started successfully + +### 7.3 Initial Testing Results + +**Successful Operations**: +1. ✅ Connected to Preview network RPC (`wss://rpc.preview.midnight.network`) +2. ✅ Created database connection pool +3. ✅ Applied database migrations +4. ✅ Successfully processed epoch 979338 +5. ✅ Started processing epoch 979339 + +**Sample Log Output**: +```json +{"timestamp":"2025-11-17T23:05:44.259759+00:00","level":"INFO","target":"spo_indexer","file":"spo-indexer/src/main.rs","line":52,"message":"starting"} +{"timestamp":"2025-11-17T23:05:48.341566+00:00","level":"DEBUG","target":"indexer_common::infra::pool::postgres","file":"/build/indexer-common/src/infra/pool/postgres.rs","line":60,"message":"created pool"} +processing epoch 979338 +processed epoch 979338 +processing epoch 979339 +``` + +## Current Status + +### ✅ Completed + +1. Repository structure migrated +2. Workspace configuration updated +3. Database migrations integrated +4. Docker configuration updated +5. Configuration files updated for Preview network +6. NetworkId compatibility fixes applied +7. Preview network API compatibility fixes (aura_pub_key, grandpa_pub_key) +8. Successful compilation of both services +9. Successful connection to Preview network +10. Successfully processing SPO registration data from Preview network + +### ⚠️ Known Issues + +#### Issue #1: Missing RPC Method `sidechain_getEpochCommittee` + +**Error**: +```json +{"timestamp":"2025-11-17T23:05:51.417863+00:00","level":"ERROR","target":"spo_indexer","file":"spo-indexer/src/main.rs","line":31,"message":"process exited with ERROR","kvs":{"backtrace":"disabled backtrace","error":"cannot make rpc call: sidechain_getEpochCommittee"}} +``` + +**Root Cause**: The `sidechain_getEpochCommittee` RPC method does not exist in the Midnight Preview network API (likely removed or renamed between alpha.2 and alpha.5). + +**Impact**: +- spo-indexer processes a few epochs successfully +- Crashes when it tries to fetch committee information +- Prevents continuous operation + +**Location**: `spo-indexer/src/infra/subxt_node.rs:220` + +**Current Implementation**: +```rust +pub async fn get_committee(&self, epoch_number: u32) -> Result, SPOClientError> { + let rpc_params = RawValue::from_string(format!("[{}]", epoch_number))?; + + loop { + let raw_response = self + .rpc_client + .request( + "sidechain_getEpochCommittee".to_string(), // This method doesn't exist + Some(rpc_params.clone()), + ) + .await + .map_err(|_| SPOClientError::RpcCall("sidechain_getEpochCommittee".to_string()))?; + // ... + } +} +``` + +**Potential Solutions**: + +1. **Option A - Find Alternative RPC Method**: + - Research Midnight Preview network API documentation + - Find the new method name for fetching epoch committee + - Update the RPC call + +2. **Option B - Derive from Alternative Data**: + - Check if committee information is available through `sidechain_getAriadneParameters` response + - Extract committee from candidate registrations if possible + +3. **Option C - Make Committee Optional**: + - Modify application logic to handle missing committee data + - Skip committee-related operations if API unavailable + - **Note**: This may impact functionality that depends on committee data + +**Recommended Next Steps**: +1. Research Midnight Preview network RPC API documentation +2. Check if there's an alternative method to get committee data +3. Test if the application can function without committee data +4. If committee data is optional, implement graceful degradation + +## Testing Checklist + +### Completed Tests +- [x] Docker build succeeds for spo-indexer +- [x] Docker build succeeds for spo-api +- [x] Containers start without errors +- [x] Database connection established +- [x] Database migrations apply successfully +- [x] Connection to Preview network RPC successful +- [x] SPO registration data fetching works +- [x] Epoch processing works (at least partially) + +### Pending Tests +- [ ] Full epoch processing without errors +- [ ] Committee data retrieval (blocked by missing RPC) +- [ ] Pool metadata fetching from Blockfrost +- [ ] spo-api GraphQL queries +- [ ] Stake refresh functionality +- [ ] End-to-end data flow from indexer to API + +## Environment Setup + +### Required Environment Variables + +```bash +# Database +export APP__INFRA__STORAGE__PASSWORD="indexer" + +# NATS +export APP__INFRA__PUB_SUB__PASSWORD="indexer" +export APP__INFRA__LEDGER_STATE_STORAGE__PASSWORD="indexer" + +# Blockfrost (for Cardano pool metadata) +export APP__INFRA__NODE__BLOCKFROST_ID="previewukkFxumNW31cXmsBtKI1JTnbxvcVCbCj" + +# Optional: Encryption secret for wallet indexer +export APP__INFRA__SECRET="303132333435363738393031323334353637383930313233343536373839303132" +``` + +### Running Services + +**Start all services**: +```bash +source .envrc.local && docker compose --profile cloud up -d +``` + +**Start only SPO services**: +```bash +source .envrc.local && docker compose up -d postgres nats spo-indexer spo-api +``` + +**View logs**: +```bash +# All logs +docker compose logs -f + +# SPO Indexer only +docker compose logs -f spo-indexer + +# SPO API only +docker compose logs -f spo-api +``` + +**Rebuild after code changes**: +```bash +source .envrc.local && docker compose build spo-indexer spo-api +source .envrc.local && docker compose up -d spo-indexer spo-api +``` + +## API Endpoints + +### SPO API +- **GraphQL Endpoint**: http://localhost:8090/api/v1/graphql +- **GraphQL Playground**: http://localhost:8090/api/v1/playground +- **Health Check**: http://localhost:8090/ready + +### Indexer API (if running full stack) +- **GraphQL Endpoint**: http://localhost:8088/api/v1/graphql +- **Health Check**: http://localhost:8088/ready + +## Files Modified Summary + +### New Files +- None (all files copied from spo-extension) + +### Modified Files + +| File | Lines Changed | Purpose | +|------|---------------|---------| +| `Cargo.toml` | +8 | Added workspace members and dependencies | +| `docker-compose.yaml` | +60 | Added spo-indexer and spo-api services | +| `.envrc.local` | +3 | Added Blockfrost ID and credentials | +| `spo-indexer/config.yaml` | 3 | Updated network_id, user, RPC URL | +| `spo-api/config.yaml` | 1 | Updated network_id | +| `spo-indexer/Cargo.toml` | 3 | Updated to use workspace dependencies | +| `spo-api/Cargo.toml` | 1 | Updated to use workspace dependencies | +| `spo-api/src/application.rs` | 1 | Removed Copy trait from Config | +| `spo-api/src/infra/api/mod.rs` | 1 | Changed .copied() to .cloned() | +| `spo-indexer/src/domain/rpc.rs` | 8 | Made aura_pub_key and grandpa_pub_key optional | +| `spo-indexer/src/infra/subxt_node.rs` | 2 | Handle optional aura/grandpa keys | +| `spo-indexer/src/application.rs` | 1 | Handle optional aura_pub_key | + +**Total Files Modified**: 13 +**Total New Files**: 4 (migration SQL files) + +## Dependency Changes + +### Updated Dependencies + +| Dependency | Old Version | New Version | Reason | +|------------|-------------|-------------|--------| +| midnight-ledger | alpha.2 | alpha.5 | Preview network support | +| async-nats | 0.42 | 0.45 | Compatibility with midnight-ledger | +| blake2 | - | 0.10.6 | Added to workspace | +| blockfrost | - | 1.1.0 | Added to workspace | +| hex | - | 0.4.3 | Added to workspace | +| once_cell | - | 1.19 | Added to workspace | +| paste | - | 1.0 | Added to workspace | +| regex | - | 1.11 | Added to workspace | + +## Breaking Changes from alpha.2 to alpha.5 + +### 1. NetworkId Type System + +**Before (alpha.2)**: +```rust +#[derive(Copy, Clone)] +pub enum NetworkId { + Undeployed, + DevNet, + TestNet, + MainNet, +} +``` + +**After (alpha.5)**: +```rust +pub struct NetworkId(pub String); +``` + +**Impact**: +- NetworkId no longer implements Copy trait +- Supports arbitrary network names ("preview", "qanet", etc.) +- Configuration changed from enum variant to string value + +### 2. Midnight RPC API Changes + +**Removed Fields in `CandidateRegistration`**: +- `auraPubKey` - No longer returned by `sidechain_getAriadneParameters` +- `grandpaPubKey` - No longer returned by `sidechain_getAriadneParameters` + +**Missing RPC Methods**: +- `sidechain_getEpochCommittee` - Method not available in Preview network + +**Impact**: +- Code must handle optional consensus keys +- Committee data retrieval needs alternative approach + +## Recommendations for Future Work + +### Immediate Priority + +1. **Resolve Committee Data Issue**: + - Contact Midnight team for Preview network RPC documentation + - Identify correct method to fetch committee information + - Or implement graceful handling if committee data is not critical + +2. **End-to-End Testing**: + - Test full epoch processing cycle + - Verify data persists correctly to database + - Test GraphQL queries through spo-api + +### Medium Priority + +3. **Documentation Updates**: + - Update README with SPO services documentation + - Document GraphQL schema + - Add examples for common queries + +4. **Monitoring**: + - Add health metrics for SPO services + - Monitor epoch processing performance + - Track Blockfrost API usage + +### Low Priority + +5. **Optimization**: + - Review and optimize database queries + - Consider caching strategies for pool metadata + - Optimize Docker build times with better layer caching + +6. **Code Cleanup**: + - Remove dead code if any + - Consolidate duplicate logic + - Update comments to reflect Preview network specifics + +## Appendix + +### A. Network Configuration Comparison + +| Config Item | Old (Dev Network) | New (Preview Network) | +|-------------|-------------------|----------------------| +| network_id | "Undeployed" | "preview" | +| RPC URL | ws://node:9944 | wss://rpc.preview.midnight.network | +| Database User | postgres | indexer | +| Blockfrost Network | mainnet | preview | + +### B. Database Schema + +See migration files in `indexer-common/migrations/postgres/`: +- `002_spo_initial.sql` - Core SPO tables +- `003_drop_stg_committee.sql` - Cleanup +- `004_spo_stake.sql` - Stake tracking +- `005_spo_stake_history.sql` - Historical stake data + +### C. References + +- **Midnight Documentation**: https://docs.midnight.network/ +- **midnight-indexer Repository**: https://github.com/midnightntwrk/midnight-indexer +- **midnight-ledger Repository**: https://github.com/midnightntwrk/midnight-ledger +- **Blockfrost API**: https://docs.blockfrost.io/ + +--- + +**Document Version**: 1.0 +**Last Updated**: November 17, 2025 +**Authors**: Migration performed with assistance from Claude (Anthropic) diff --git a/docker-compose.yaml b/docker-compose.yaml index 1034ce9d3..384b0925c 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -69,6 +69,67 @@ services: timeout: "2s" retries: 2 + spo-indexer: + profiles: + - cloud + depends_on: + postgres: + condition: "service_healthy" + nats: + condition: "service_started" + build: + context: . + dockerfile: spo-indexer/Dockerfile + image: "spo-indexer:local" + restart: "no" + environment: + RUST_LOG: "spo_indexer=debug,indexer_common=debug,fastrace_opentelemetry=off,info" + APP__APPLICATION__NETWORK_ID: "preview" + APP__INFRA__NODE__URL: "wss://rpc.preview.midnight.network" + APP__INFRA__NODE__BLOCKFROST_ID: $APP__INFRA__NODE__BLOCKFROST_ID + APP__INFRA__STORAGE__HOST: "postgres" + APP__INFRA__STORAGE__PASSWORD: $APP__INFRA__STORAGE__PASSWORD + APP__INFRA__PUB_SUB__URL: "nats:4222" + APP__INFRA__PUB_SUB__PASSWORD: $APP__INFRA__PUB_SUB__PASSWORD + healthcheck: + test: ["CMD-SHELL", "cat /var/run/spo-indexer/running || exit 0"] + start_interval: "2s" + start_period: "30s" + interval: "5s" + timeout: "2s" + retries: 2 + + spo-api: + profiles: + - cloud + depends_on: + postgres: + condition: "service_healthy" + nats: + condition: "service_started" + build: + context: . + dockerfile: spo-api/Dockerfile + image: "spo-api:local" + restart: "no" + ports: + - "8090:8090" + environment: + RUST_LOG: "spo_api=debug,indexer_common=debug,fastrace_opentelemetry=off,info" + APP__APPLICATION__NETWORK_ID: "preview" + APP__INFRA__STORAGE__HOST: "postgres" + APP__INFRA__STORAGE__PASSWORD: $APP__INFRA__STORAGE__PASSWORD + APP__INFRA__API__PORT: "8090" + APP__INFRA__API__MAX_COMPLEXITY: "2000" + APP__INFRA__API__MAX_DEPTH: "50" + healthcheck: + test: ["CMD-SHELL", "cat /var/run/spo-api/running || exit 0"] + start_interval: "2s" + start_period: "30s" + interval: "5s" + timeout: "2s" + retries: 2 + indexer-api: profiles: - cloud diff --git a/indexer-common/migrations/postgres/002_spo_initial.sql b/indexer-common/migrations/postgres/002_spo_initial.sql new file mode 100644 index 000000000..c50a5cd17 --- /dev/null +++ b/indexer-common/migrations/postgres/002_spo_initial.sql @@ -0,0 +1,84 @@ +CREATE TABLE epochs ( + epoch_no BIGINT PRIMARY KEY, + starts_at TIMESTAMPTZ NOT NULL, + ends_at TIMESTAMPTZ NOT NULL +); + +CREATE TABLE pool_metadata_cache ( + pool_id VARCHAR PRIMARY KEY, + hex_id VARCHAR UNIQUE, + name TEXT, + ticker TEXT, + homepage_url TEXT, + updated_at TIMESTAMPTZ, + url TEXT +); + +CREATE TABLE spo_identity ( + spo_sk VARCHAR PRIMARY KEY, + sidechain_pubkey VARCHAR UNIQUE, + + pool_id VARCHAR REFERENCES pool_metadata_cache(pool_id), + mainchain_pubkey VARCHAR UNIQUE, + aura_pubkey VARCHAR UNIQUE +); + +CREATE TABLE stg_committee ( + epoch_no BIGINT NOT NULL, + position INT NOT NULL, + sidechain_pubkey VARCHAR NOT NULL, + arrived_at TIMESTAMPTZ NOT NULL +); + +CREATE TABLE committee_membership ( + spo_sk VARCHAR, + sidechain_pubkey VARCHAR, + + epoch_no BIGINT NOT NULL, + position INT NOT NULL, + expected_slots INT NOT NULL, + PRIMARY KEY (epoch_no, position) +); + +CREATE TABLE spo_epoch_performance ( + spo_sk VARCHAR REFERENCES spo_identity(spo_sk), + identity_label VARCHAR, + epoch_no BIGINT NOT NULL, + expected_blocks INT NOT NULL, + produced_blocks INT NOT NULL, + PRIMARY KEY (epoch_no, spo_sk) +); + +CREATE TABLE spo_history ( + spo_hist_sk BIGSERIAL PRIMARY KEY, + spo_sk VARCHAR REFERENCES spo_identity(spo_sk), + epoch_no BIGINT NOT NULL, + status TEXT NOT NULL, + valid_from BIGINT NOT NULL, + valid_to BIGINT NOT NULL, + UNIQUE (spo_sk, epoch_no) +); + +-- Update "updated_at" field each time the record is updated +CREATE OR REPLACE FUNCTION set_updated_at_timestamp() +RETURNS TRIGGER AS $$ +BEGIN + NEW.updated_at = NOW(); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER update_pool_metadata_cache_updated_at +BEFORE UPDATE ON pool_metadata_cache +FOR EACH ROW +EXECUTE FUNCTION set_updated_at_timestamp(); + +-- indexes +CREATE INDEX IF NOT EXISTS spo_identity_pk ON spo_identity (pool_id, sidechain_pubkey, aura_pubkey); + +CREATE INDEX IF NOT EXISTS spo_history_epoch_no_idx ON spo_history (epoch_no); + +CREATE INDEX IF NOT EXISTS committee_membership_epoch_no_idx ON committee_membership (epoch_no); + +CREATE INDEX IF NOT EXISTS spo_epoch_performance_identity_pk ON spo_epoch_performance (epoch_no, identity_label); +CREATE INDEX IF NOT EXISTS spo_epoch_performance_epoch_no_idx ON spo_epoch_performance (epoch_no); diff --git a/indexer-common/migrations/postgres/003_drop_stg_committee.sql b/indexer-common/migrations/postgres/003_drop_stg_committee.sql new file mode 100644 index 000000000..4031f2aa9 --- /dev/null +++ b/indexer-common/migrations/postgres/003_drop_stg_committee.sql @@ -0,0 +1,4 @@ +-- This migration drops the legacy staging table added in 002 to maintain checksum compatibility. +-- Safe to run even if the table doesn't exist. + +DROP TABLE IF EXISTS stg_committee; diff --git a/indexer-common/migrations/postgres/004_spo_stake.sql b/indexer-common/migrations/postgres/004_spo_stake.sql new file mode 100644 index 000000000..82cdd6091 --- /dev/null +++ b/indexer-common/migrations/postgres/004_spo_stake.sql @@ -0,0 +1,17 @@ +-- Stake snapshot per pool (latest values). This supports explorer stake distribution views. +-- Values are sourced from mainchain pool data (e.g., Blockfrost) and keyed by Cardano pool_id (56-hex string). + +CREATE TABLE IF NOT EXISTS spo_stake_snapshot ( + pool_id VARCHAR PRIMARY KEY REFERENCES pool_metadata_cache(pool_id) ON DELETE CASCADE, + live_stake NUMERIC, -- current live stake (lovelace-like units) as big numeric + active_stake NUMERIC, -- current active stake + live_delegators INT, -- number of live delegators + live_saturation DOUBLE PRECISION, -- saturation ratio (0..1+) + declared_pledge NUMERIC, -- declared pledge + live_pledge NUMERIC, -- current pledge + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Helpful indexes for ordering/filtering +CREATE INDEX IF NOT EXISTS spo_stake_snapshot_updated_at_idx ON spo_stake_snapshot (updated_at DESC); +CREATE INDEX IF NOT EXISTS spo_stake_snapshot_live_stake_idx ON spo_stake_snapshot ((COALESCE(live_stake, 0)) DESC); diff --git a/indexer-common/migrations/postgres/005_spo_stake_history.sql b/indexer-common/migrations/postgres/005_spo_stake_history.sql new file mode 100644 index 000000000..cd2aaef56 --- /dev/null +++ b/indexer-common/migrations/postgres/005_spo_stake_history.sql @@ -0,0 +1,29 @@ +-- Stake history table and refresh state cursor + +CREATE TABLE IF NOT EXISTS spo_stake_history ( + id BIGSERIAL PRIMARY KEY, + pool_id VARCHAR NOT NULL REFERENCES pool_metadata_cache(pool_id) ON DELETE CASCADE, + recorded_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + mainchain_epoch INTEGER, + + live_stake NUMERIC, + active_stake NUMERIC, + live_delegators INTEGER, + live_saturation DOUBLE PRECISION, + declared_pledge NUMERIC, + live_pledge NUMERIC +); + +CREATE INDEX IF NOT EXISTS spo_stake_history_pool_time_idx ON spo_stake_history (pool_id, recorded_at DESC); +CREATE INDEX IF NOT EXISTS spo_stake_history_epoch_idx ON spo_stake_history (mainchain_epoch); + +-- Single-row state table to track paging cursor for stake refresh +CREATE TABLE IF NOT EXISTS spo_stake_refresh_state ( + id BOOLEAN PRIMARY KEY DEFAULT TRUE, + last_pool_id VARCHAR, + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +INSERT INTO spo_stake_refresh_state (id) +VALUES (TRUE) +ON CONFLICT (id) DO NOTHING; diff --git a/spo-api/ARCHITECTURE.md b/spo-api/ARCHITECTURE.md new file mode 100644 index 000000000..7247e35ff --- /dev/null +++ b/spo-api/ARCHITECTURE.md @@ -0,0 +1,175 @@ +# SPO API Architecture + +This document describes the architecture of the `spo-api` service: its purpose, components, data / request flow, dependencies, operational concerns, and planned evolution. It mirrors the structure used by `indexer-api`, adapted for a Stake Pool Operator (SPO) focused read-only API MVP. + +## Purpose & Scope + +Expose Stake Pool Operator (SPO) and per-epoch performance / identity data to Midnight Explorer (and other clients) over GraphQL (HTTP + WebSocket). For the current MVP: + +- Read-only queries backed by the existing Postgres database used by the indexer. +- No NATS dependency yet (live updates & catch-up tracking to be added later). +- Simple readiness: DB connectivity + (placeholder) caught-up flag (always `true` for now). + +Future iterations will introduce: + +- NATS subscription to indexer events for real caught-up gating and push updates. +- Subscriptions for live SPO / epoch performance changes. +- Repository layer with richer projections / aggregations. + +## High-level Data / Control Flow + +```text ++----------------+ +----------------------+ +----------------------+ +------------------+ +| Postgres (SQL) | SQL | spo-api Application | HTTP | Clients (GraphQL/WS) | Admin | /ready endpoint | ++----------------+ <------ +----------------------+ <----->+ Queries / Future WS | check | DB + caught_up | + | Axum + async-graphql +----------------------+ +------------------+ + | (v1 schema) + | State: { caught_up, db } + +---------------------------+ +``` + +Mermaid (future NATS integration indicated but not active yet): + +```mermaid +flowchart LR + DB[(Postgres)] --> API[SPO API] + API -->|HTTP| Client[GraphQL Clients] + subgraph Future + NATS[(NATS Events)] --> API + end + API -.-> Ready[/ready health/] +``` + +## Components & Responsibilities + +- Application (`src/application.rs`) + - Loads network settings, spawns API server. + - Maintains `caught_up: AtomicBool` (placeholder `true` until NATS integration). + - Handles SIGTERM for graceful shutdown. + +- Infra API (`src/infra/api/*`) + - `AxumApi`: builds the router and runs the server. + - `AppState`: composite state (caught_up + optional Db pool) with `FromRef` splits for Axum extractors. + - `/ready`: returns `503` if (future) not caught up or DB ping fails; else `200`. + - GraphQL v1 schema mounted at `/api/v1/graphql` (and future WS at `/api/v1/graphql/ws`). + - Middleware: tracing (`FastraceLayer`), CORS (permissive), request body size limiting + custom 400->413 remap. + +- Domain (`src/domain.rs` and future domain modules) + - Defines the `Api` trait consumed by application orchestration. + - Will host SPO-specific domain models (stake pool operator identity, performance snapshots, epoch aggregates). + +- Repository Layer (planned: `src/infra/repo.rs`) + - SQL query abstraction over Postgres using `sqlx`. + - Provide typed return models consumed by GraphQL resolvers. + - Encapsulate pagination, filtering, and performance queries. + +## Readiness & Health + +Current readiness logic: + +1. `caught_up` flag (always `true` in MVP). +2. Lightweight DB health ping `SELECT 1` inside `/ready`. + + +If either fails (in the future, actual catch-up check), returns `503 Service Unavailable`. + +## GraphQL Schema (v1 MVP) + +- `Query.serviceInfo`: returns service name, version, network. +- Future additions: + - `stakePoolOperators(limit, offset, filters)` + - `stakePoolOperator(id)` + - `epochPerformance(epochNumber)` + - Aggregations (top K by performance, delegation composition, historical trend) +- Subscriptions (deferred until NATS): live operator performance updates, epoch rollovers. + +## Data Model (Planned) + +Conceptual entities (not yet implemented): + +- StakePoolOperator { id, identityKey, displayName, createdAt, lastActiveAt, performanceScore, commissions, totalStake } +- EpochPerformance { epoch, operatorId, blocksProduced, blocksExpected, performanceRatio, stakeShare } +- DelegatorStake (optional for explorer pivot views) + +Indexes / queries to optimize: + +- By operator id +- By epoch + operator id +- Top N operators by performance for a given epoch + +## Error Handling + +- `/ready` returns targeted messages: "database not ready" vs. future "indexer not caught up". +- GraphQL resolvers (when added) will map domain errors into structured GraphQL errors with categories (e.g. NOT_FOUND, INTERNAL). + +## Telemetry & Metrics + +(Planned) + +- Request tracing via existing fastrace integration. +- Gauge for connected WS clients (already scaffolded; currently unused field in `Metrics`). +- Counters for query types and DB latency histograms (to be added alongside repo layer). + +## Configuration + +`config.yaml` (MVP subset): + +- `infra.api`: address, port, body limits, complexity, depth. +- `infra.storage`: Postgres connection pool config. +- `application.network_id`: network enumeration. +- Telemetry config (tracing + metrics) reused from `indexer-common`. + +## Build & Run + +```sh +# Build +cargo build -p spo-api --features cloud + +# Run (ensure Postgres env / config is valid) +cargo run -p spo-api --features cloud + +# Health +curl -i http://localhost:/ready +``` + +## Evolution Roadmap + +| Milestone | Description | Status | +|-----------|-------------|--------| +| MVP scaffold | Service, config, basic GraphQL, readiness, DB pool | DONE | +| Repo layer | Introduce `repo` module with first SPO queries | PENDING | +| SPO domain models | Define core structs + mapping | PENDING | +| GraphQL SPO queries | `stakePoolOperators`, `stakePoolOperator` | PENDING | +| Performance endpoints | Epoch performance aggregates | PENDING | +| NATS integration | Real catch-up + subscriptions | PENDING | +| Subscriptions | Live operator performance stream | PENDING | +| Metrics expansion | DB/query metrics, WS client gauge | PENDING | +| Hardening | Auth (if required), pagination policies, limits | FUTURE | + +## Design Principles + +- Start minimal: add complexity (NATS, subscriptions) only when data feed is ready. +- Keep GraphQL boundary stable; evolve underlying queries behind repository abstraction. +- Prefer explicit typed models over ad-hoc JSON for performance data. +- Systematically enforce limits (complexity, depth, pagination) for resilience. + +## Open Questions / Future Decisions + +- Exact schema for performance scoring (source of truth & calculation timing). +- Need for caching layer (in-memory epoch aggregates) vs. pure SQL queries. +- Security / auth requirements for future administrative fields (if any). + +## Status Summary (Current) + +- Server & routing: READY +- Readiness endpoint: READY (DB ping + placeholder caught_up) +- DB pool: READY +- GraphQL base schema: READY (serviceInfo) +- Repo layer: NOT IMPLEMENTED +- SPO domain models: NOT IMPLEMENTED +- Subscriptions: NOT IMPLEMENTED +- Metrics enrichment: NOT IMPLEMENTED +- NATS integration: NOT IMPLEMENTED + +--- +Last updated: 2025-09-18 diff --git a/spo-api/Cargo.toml b/spo-api/Cargo.toml new file mode 100644 index 000000000..5de3279e5 --- /dev/null +++ b/spo-api/Cargo.toml @@ -0,0 +1,51 @@ +[package] +name = "spo-api" +description = "SPO API" +version = { workspace = true } +edition = { workspace = true } +license = { workspace = true } +readme = { workspace = true } +homepage = { workspace = true } +repository = { workspace = true } +documentation = { workspace = true } +publish = { workspace = true } + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ "--cfg", "docsrs" ] + +[dependencies] +anyhow = { workspace = true } +async-graphql = { workspace = true, features = [ "uuid" ] } +async-graphql-axum = { workspace = true } +axum = { workspace = true, features = [ "http2" ] } +byte-unit-serde = { workspace = true } +clap = { workspace = true, features = [ "derive" ] } +derive_more = { workspace = true, features = [ "debug", "display", "from" ] } +fastrace = { workspace = true, features = [ "enable" ] } +fastrace-axum = { workspace = true } +futures = { workspace = true } +indexer-common = { path = "../indexer-common" } +indoc = { workspace = true } +log = { workspace = true, features = [ "kv" ] } +metrics = { workspace = true } +serde = { workspace = true, features = [ "derive" ] } +serde_with = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true, features = [ "macros", "rt-multi-thread", "time", "signal" ] } +tower = { workspace = true } +tower-http = { workspace = true, features = [ "cors", "limit" ] } +trait-variant = { workspace = true } +uuid = { workspace = true, features = [ "v7" ], optional = true } +sqlx = { workspace = true, features = [ "runtime-tokio", "postgres" ] } +regex = { workspace = true } +once_cell = { workspace = true } + +[features] +cloud = [ "indexer-common/cloud", "uuid" ] + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = [ 'cfg(coverage_nightly)' ] } + +[package.metadata.cargo-shear] +ignored = [ "uuid" ] diff --git a/spo-api/Dockerfile b/spo-api/Dockerfile new file mode 100644 index 000000000..470273e5f --- /dev/null +++ b/spo-api/Dockerfile @@ -0,0 +1,33 @@ +ARG RUST_VERSION=1.89 +FROM rust:${RUST_VERSION}-bookworm AS chef +WORKDIR /build +RUN cargo install cargo-chef --version 0.1.72 + +FROM chef AS planner +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + +FROM chef AS builder +SHELL ["/bin/bash", "-c"] +ARG PROFILE=release +RUN git config --global url."https://@github.com".insteadOf "ssh://git@github.com" +COPY --from=planner /build/recipe.json recipe.json +RUN --mount=type=secret,id=netrc,target=/root/.netrc \ + cargo chef cook --profile $PROFILE --recipe-path recipe.json +COPY . . +RUN --mount=type=secret,id=netrc,target=/root/.netrc \ + cargo build -p spo-api --locked --features cloud --profile $PROFILE && \ + mkdir -p /runtime/usr/local/bin /runtime/opt/spo-api && \ + mv "./target/${PROFILE/dev/debug}/spo-api" /runtime/usr/local/bin/ && \ + install -Dm755 spo-api/bin/entrypoint.sh /runtime/usr/local/bin/entrypoint.sh && \ + install -Dm644 spo-api/config.yaml /runtime/opt/spo-api/config.yaml + +FROM debian:bookworm-slim@sha256:b1a741487078b369e78119849663d7f1a5341ef2768798f7b7406c4240f86aef AS runtime +RUN adduser --disabled-password --gecos "" --home "/nonexistent" --shell "/sbin/nologin" --no-create-home --uid "10001" appuser && \ + mkdir /var/run/spo-api && \ + chown appuser:appuser /var/run/spo-api +COPY --from=builder --chown=appuser:appuser /runtime / +USER appuser +WORKDIR /opt/spo-api +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] +EXPOSE 8090 diff --git a/spo-api/README.md b/spo-api/README.md new file mode 100644 index 000000000..41fb3fd3f --- /dev/null +++ b/spo-api/README.md @@ -0,0 +1,163 @@ +# SPO API + +GraphQL API exposing SPO identity, pool metadata, and per-epoch performance. + +- HTTP (GraphQL + GraphiQL UI): /api/v1/graphql +- WebSocket (GraphQL WS): /api/v1/graphql/ws +- Readiness: /ready + +Open GraphiQL at + +## Quick start + +Option A — cargo (local) + +```bash +# Build +cargo build -p spo-api --features cloud + +# Run (requires Postgres and password) +export APP__INFRA__STORAGE__PASSWORD=indexer +CONFIG_FILE=spo-api/config.yaml cargo run -p spo-api --features cloud +``` + +Option B — Docker Compose + +```bash +# Ensure .env contains APP__INFRA__STORAGE__PASSWORD +echo APP__INFRA__STORAGE__PASSWORD=indexer >> .env + +# Bring up DB and API +docker compose up -d postgres spo-api + +# Health +curl -f http://localhost:8090/ready +``` + +## Handy queries + +```graphql +query ServiceInfo { + serviceInfo { name version network } +} + +query LatestPerformance { + spoPerformanceLatest(limit: 10, offset: 0) { + epochNo + spoSkHex + produced + expected + poolIdHex + } +} + +query PerformanceBySPO($spoSk: String!) { + spoPerformanceBySpoSk(spoSkHex: $spoSk, limit: 5, offset: 0) { + epochNo + produced + expected + identityLabel + } +} + +query EpochPerformance($epoch: Int!) { + epochPerformance(epoch: $epoch, limit: 20, offset: 0) { + spoSkHex + produced + expected + poolIdHex + } +} + +query SpoByPoolId($poolId: String!) { + spoByPoolId(poolIdHex: $poolId) { + poolIdHex + sidechainPubkeyHex + name + ticker + } +} + +query SpoList { + spoList(limit: 10, offset: 0) { + poolIdHex + sidechainPubkeyHex + name + ticker + homepageUrl + logoUrl + } +} + +query CurrentEpochInfo { + currentEpochInfo { + epochNo + durationSeconds + elapsedSeconds + } +} + +query EpochUtilization($epoch: Int!) { + epochUtilization(epoch: $epoch) +} + +query SpoCount { + spoCount +} +``` + +## Operation reference (v1) + +- serviceInfo: ServiceInfo! +- spoIdentities(limit: Int = 50, offset: Int = 0): [SpoIdentity!]! +- spoIdentityByPoolId(poolIdHex: String!): SpoIdentity +- poolMetadata(poolIdHex: String!): PoolMetadata +- poolMetadataList(limit: Int = 50, offset: Int = 0, withNameOnly: Boolean = false): [PoolMetadata!]! +- spoList(limit: Int = 20, offset: Int = 0): [Spo!]! +- spoByPoolId(poolIdHex: String!): Spo +- spoCompositeByPoolId(poolIdHex: String!): SpoComposite +- stakePoolOperators(limit: Int = 20): [String!]! +- spoPerformanceLatest(limit: Int = 20, offset: Int = 0): [EpochPerf!]! +- spoPerformanceBySpoSk(spoSkHex: String!, limit: Int = 100, offset: Int = 0): [EpochPerf!]! +- epochPerformance(epoch: Int!, limit: Int = 100, offset: Int = 0): [EpochPerf!]! +- currentEpochInfo: EpochInfo +- epochUtilization(epoch: Int!): Float +- spoCount: BigInt + +Key return types (selected fields): + +- SpoIdentity: poolIdHex, mainchainPubkeyHex, sidechainPubkeyHex, auraPubkeyHex +- PoolMetadata: poolIdHex, hexId, name, ticker, homepageUrl, logoUrl +- Spo: poolIdHex, sidechainPubkeyHex, auraPubkeyHex, name, ticker, homepageUrl, logoUrl +- EpochPerf: epochNo, spoSkHex, produced, expected, identityLabel, poolIdHex +- EpochInfo: epochNo, durationSeconds, elapsedSeconds + +Notes + +- Identifiers are stored as plain strings (hex text), not BYTEA. Supply lowercase hex without 0x where possible. +- Performance joins use spo_sk (sidechain key) as the canonical identity. +- Subscriptions will be added later (NATS integration). + +## Configuration + +Excerpt (see spo-api/config.yaml): + +```yaml +infra: + storage: + host: localhost + port: 5432 + dbname: indexer + user: indexer + sslmode: prefer + max_connections: 10 + idle_timeout: 1m + max_lifetime: 5m + api: + address: 0.0.0.0 + port: 8090 + max_complexity: 2000 + max_depth: 50 +``` + +Provide the password via env: APP__INFRA__STORAGE__PASSWORD. diff --git a/spo-api/bin/entrypoint.sh b/spo-api/bin/entrypoint.sh new file mode 100644 index 000000000..2919086c3 --- /dev/null +++ b/spo-api/bin/entrypoint.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +trap 'rm /var/run/spo-api/running' EXIT +trap 'kill -SIGINT $PID' INT +trap 'kill -SIGTERM $PID' TERM + +touch /var/run/spo-api/running +spo-api & +PID=$! +wait $PID diff --git a/spo-api/config.yaml b/spo-api/config.yaml new file mode 100644 index 000000000..b6d398f9c --- /dev/null +++ b/spo-api/config.yaml @@ -0,0 +1,36 @@ +run_migrations: false + +application: + network_id: "preview" + +infra: + storage: + host: "localhost" + port: 5432 + dbname: "indexer" + user: "indexer" + sslmode: "prefer" + max_connections: 10 + idle_timeout: "1m" + max_lifetime: "5m" + + pub_sub: + url: "localhost:4222" + username: "indexer" + + api: + address: "0.0.0.0" + port: 8090 + request_body_limit: "1MiB" + max_complexity: 200 + max_depth: 15 + +telemetry: + tracing: + enabled: false + service_name: "spo-api" + otlp_exporter_endpoint: "http://localhost:4317" + metrics: + enabled: false + address: "0.0.0.0" + port: 9000 diff --git a/spo-api/src/application.rs b/spo-api/src/application.rs new file mode 100644 index 000000000..636351996 --- /dev/null +++ b/spo-api/src/application.rs @@ -0,0 +1,49 @@ +// This file is part of midnight-indexer. +// Copyright (C) 2025 Midnight Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::domain::Api; +use anyhow::Context as AnyhowContext; +use indexer_common::domain::{NetworkId, Subscriber}; +use log::warn; +use serde::Deserialize; +use serde_with::{DisplayFromStr, serde_as}; +use std::sync::{Arc, atomic::AtomicBool}; +use tokio::{select, signal::unix::Signal, task}; + +#[serde_as] +#[derive(Debug, Clone, Deserialize)] +pub struct Config { + #[serde_as(as = "DisplayFromStr")] + pub network_id: NetworkId, +} + +pub async fn run( + config: Config, + api: impl Api, + _subscriber: impl Subscriber, + mut sigterm: Signal, +) -> anyhow::Result<()> { + let Config { network_id } = config; + + // For now we don't track catch-up; expose ready immediately. We'll wire NATS later. + let caught_up = Arc::new(AtomicBool::new(true)); + + let serve_api_task = { + task::spawn(async move { + api.serve(network_id, caught_up) + .await + .context("serving SPO API") + }) + }; + + select! { + result = serve_api_task => result + .context("serve_api_task panicked") + .and_then(|r| r.context("serve_api_task failed")), + _ = sigterm.recv() => { + warn!("SIGTERM received"); + Ok(()) + } + } +} diff --git a/spo-api/src/config.rs b/spo-api/src/config.rs new file mode 100644 index 000000000..0ffdcc47d --- /dev/null +++ b/spo-api/src/config.rs @@ -0,0 +1,19 @@ +// This file is part of midnight-indexer. +// Copyright (C) 2025 Midnight Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{application, infra}; +#[cfg(feature = "cloud")] +#[derive(Debug, Clone, serde::Deserialize)] +pub struct Config { + pub run_migrations: bool, + + #[serde(rename = "application")] + pub application_config: application::Config, + + #[serde(rename = "infra")] + pub infra_config: infra::Config, + + #[serde(rename = "telemetry")] + pub telemetry_config: indexer_common::telemetry::Config, +} diff --git a/spo-api/src/domain.rs b/spo-api/src/domain.rs new file mode 100644 index 000000000..3d3f68271 --- /dev/null +++ b/spo-api/src/domain.rs @@ -0,0 +1,47 @@ +// This file is part of midnight-indexer. +// Copyright (C) 2025 Midnight Foundation +// SPDX-License-Identifier: Apache-2.0 + +use indexer_common::domain::NetworkId; +use std::{ + error::Error as StdError, + sync::{Arc, atomic::AtomicBool}, +}; + +#[trait_variant::make(Send)] +pub trait Api +where + Self: 'static, +{ + type Error: StdError + Send + Sync + 'static; + + async fn serve( + self, + network_id: NetworkId, + caught_up: Arc, + ) -> Result<(), Self::Error>; +} + +// --- SPO domain types (initial draft) --- + +#[derive(Debug, Clone)] +pub struct StakePoolOperator { + pub id: String, // canonical operator id (e.g. hash or bech32) + pub identity_key: Option, // optional identity / metadata key + pub display_name: Option, + pub created_at_epoch: Option, + pub last_active_epoch: Option, + pub performance_score: Option, + pub commission_rate: Option, + pub total_stake: Option, // string to avoid premature big-int choice +} + +#[derive(Debug, Clone)] +pub struct EpochPerformance { + pub epoch: i64, + pub operator_id: String, + pub blocks_produced: Option, + pub blocks_expected: Option, + pub performance_ratio: Option, + pub stake_share: Option, +} diff --git a/spo-api/src/infra.rs b/spo-api/src/infra.rs new file mode 100644 index 000000000..3308ecd4d --- /dev/null +++ b/spo-api/src/infra.rs @@ -0,0 +1,16 @@ +// This file is part of midnight-indexer. +// Copyright (C) 2025 Midnight Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub mod api; + +#[cfg_attr(docsrs, doc(cfg(feature = "cloud")))] +#[cfg(feature = "cloud")] +#[derive(Debug, Clone, serde::Deserialize)] +pub struct Config { + #[serde(rename = "api")] + pub api_config: api::Config, + + #[serde(rename = "storage")] + pub storage_config: indexer_common::infra::pool::postgres::Config, +} diff --git a/spo-api/src/infra/api/mod.rs b/spo-api/src/infra/api/mod.rs new file mode 100644 index 000000000..b172fdff0 --- /dev/null +++ b/spo-api/src/infra/api/mod.rs @@ -0,0 +1,198 @@ +// This file is part of midnight-indexer. +// Copyright (C) 2025 Midnight Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub mod v1; + +use crate::domain::Api; +use async_graphql::Context; +use axum::{ + Router, + extract::{FromRef, State}, + http::StatusCode, + response::IntoResponse, + routing::get, +}; +use indexer_common::{domain::NetworkId, infra::pool::postgres::PostgresPool}; +use log::info; +use serde::Deserialize; +use std::{ + io, + net::IpAddr, + sync::{ + Arc, + atomic::{AtomicBool, Ordering}, + }, +}; +use thiserror::Error; +use tokio::signal::unix::{SignalKind, signal}; +use tower::ServiceBuilder; +use tower_http::{cors::CorsLayer, limit::RequestBodyLimitLayer}; + +#[derive(Clone)] +pub struct Db(pub PostgresPool); + +#[derive(Clone)] +pub struct AppState { + pub caught_up: Arc, + pub db: Option, +} + +impl FromRef for Arc { + fn from_ref(s: &AppState) -> Arc { + s.caught_up.clone() + } +} +impl FromRef for Option { + fn from_ref(s: &AppState) -> Option { + s.db.clone() + } +} + +pub struct AxumApi { + config: Config, + db: Option, +} + +impl AxumApi { + pub fn new(config: Config) -> Self { + Self { config, db: None } + } + pub fn with_db(mut self, db: Db) -> Self { + self.db = Some(db); + self + } +} + +impl Api for AxumApi { + type Error = AxumApiError; + + async fn serve( + self, + network_id: NetworkId, + caught_up: Arc, + ) -> Result<(), Self::Error> { + let Config { + address, + port, + request_body_limit, + max_complexity, + max_depth, + } = self.config; + + // In the current shape AxumApi doesn't own the pool; we keep readiness simple (caught_up only). + let app = make_app( + caught_up, + self.db, + network_id, + max_complexity, + max_depth, + request_body_limit as usize, + ); + + let listener = tokio::net::TcpListener::bind((address, port)) + .await + .map_err(AxumApiError::Bind)?; + info!(address:?, port; "listening to TCP connections"); + axum::serve(listener, app) + .with_graceful_shutdown(shutdown_signal()) + .await + .map_err(AxumApiError::Serve) + } +} + +#[derive(Debug, Clone, Deserialize)] +pub struct Config { + pub address: IpAddr, + pub port: u16, + #[serde(with = "byte_unit_serde")] + pub request_body_limit: u64, + pub max_complexity: usize, + pub max_depth: usize, +} + +#[derive(Debug, Error)] +pub enum AxumApiError { + #[error("cannot bind tcp listener")] + Bind(#[source] io::Error), + #[error("cannot serve API")] + Serve(#[source] io::Error), +} + +pub struct Metrics; +impl Default for Metrics { + fn default() -> Self { Self } +} + +#[allow(clippy::too_many_arguments)] +fn make_app( + caught_up: Arc, + db: Option, + network_id: NetworkId, + max_complexity: usize, + max_depth: usize, + request_body_limit: usize, +) -> Router { + let app_state = AppState { caught_up, db }; + let v1_app = v1::make_app(network_id, max_complexity, max_depth, app_state.db.clone()) + .with_state(app_state.clone()); + + Router::new() + .route("/ready", get(ready)) + .nest("/api/v1", v1_app) + .with_state(app_state) + .layer( + ServiceBuilder::new() + .layer(RequestBodyLimitLayer::new(request_body_limit)) + .layer(CorsLayer::permissive()), + ) +} + +async fn ready( + State(caught_up): State>, + State(db): State>, +) -> impl IntoResponse { + if !caught_up.load(Ordering::Acquire) { + ( + StatusCode::SERVICE_UNAVAILABLE, + "indexer has not yet caught up with the node", + ) + .into_response() + } else { + // if a DB is provided, try a lightweight ping + if let Some(Db(pool)) = db { + if let Err(_e) = sqlx::query_scalar::<_, i32>("SELECT 1") + .fetch_one(&*pool) + .await + { + return (StatusCode::SERVICE_UNAVAILABLE, "database not ready").into_response(); + } + } + StatusCode::OK.into_response() + } +} + +// removed custom 400->413 transform; default behavior is acceptable for MVP + +async fn shutdown_signal() { + signal(SignalKind::terminate()) + .expect("install SIGTERM handler") + .recv() + .await; +} + +pub trait ContextExt { + fn get_network_id(&self) -> NetworkId; + fn get_metrics(&self) -> &Metrics; +} +impl ContextExt for Context<'_> { + fn get_network_id(&self) -> NetworkId { + self.data::() + .cloned() + .expect("NetworkId is stored in Context") + } + fn get_metrics(&self) -> &Metrics { + self.data::() + .expect("Metrics is stored in Context") + } +} diff --git a/spo-api/src/infra/api/v1/mod.rs b/spo-api/src/infra/api/v1/mod.rs new file mode 100644 index 000000000..d506af29c --- /dev/null +++ b/spo-api/src/infra/api/v1/mod.rs @@ -0,0 +1,1543 @@ +// This file is part of midnight-indexer. +// Copyright (C) 2025 Midnight Foundation +// SPDX-License-Identifier: Apache-2.0 + +use super::{AppState, ContextExt, Db, Metrics}; +use async_graphql::{Context, EmptyMutation, EmptySubscription, Object, Schema}; +use async_graphql_axum::{GraphQL, GraphQLSubscription}; +use axum::{ + Router, + response::IntoResponse, + routing::{get, post_service}, +}; +use indexer_common::domain::NetworkId; +use log::{info, warn}; +// no extra imports needed here +use regex::Regex; +// no rust_decimal to keep sqlx decoding simple; we parse numerics as strings when needed + +const DEFAULT_PERFORMANCE_LIMIT: i64 = 20; + +type EpochPerfRow = ( + i64, // epoch_no (BIGINT) + String, // spo_sk_hex + i32, // produced_blocks (INT) + i32, // expected_blocks (INT) + Option, // identity_label + Option, // stake_snapshot + Option, // pool_id_hex + Option, // validator_class +); + +pub fn make_app( + network_id: NetworkId, + max_complexity: usize, + max_depth: usize, + db: Option, +) -> Router { + let schema = Schema::build(Query::default(), EmptyMutation, EmptySubscription) + .limit_complexity(max_complexity) + .limit_depth(max_depth) + .data(network_id) + .data(Metrics::default()) + .data(db) + // Inject optional Db from AppState via Router state in handlers + .finish(); + + // Runtime confirmation that extended schema is present. + if schema.sdl().contains("spoCompositeByPoolId") { + info!("GraphQL schema includes spoCompositeByPoolId"); + } else { + warn!("spoCompositeByPoolId missing from schema – ensure service rebuilt without cache"); + } + + Router::new() + // Support both /graphql and /graphql/ to avoid 404 (empty body -> GraphiQL JSON parse error) + .route("/graphql", get(graphiql)) + .route("/graphql/", get(graphiql)) + .route("/graphql", post_service(GraphQL::new(schema.clone()))) + .route("/graphql/", post_service(GraphQL::new(schema.clone()))) + .route_service("/graphql/ws", GraphQLSubscription::new(schema.clone())) + .route_service("/graphql/ws/", GraphQLSubscription::new(schema)) +} + +#[derive(Default)] +pub struct Query; + +#[Object(rename_fields = "camelCase")] +impl Query { + async fn service_info(&self, cx: &Context<'_>) -> ServiceInfo { + let network = format!("{}", cx.get_network_id()); + ServiceInfo { + name: "spo-api".into(), + version: env!("CARGO_PKG_VERSION").into(), + network, + } + } + + /// Cumulative total of currently registered SPOs over an epoch range, using first-seen epochs. + /// + /// Semantics: + /// - Domain is limited to pools present in spo_stake_snapshot ("current" pools), so the final + /// value equals spo_count by construction. + /// - First-seen epoch per pool is computed as the minimum epoch where that pool_id appears in any of: + /// spo_history (via spo_identity), committee_membership (via spo_identity), spo_epoch_performance (via spo_identity). + /// - If a current pool has no appearances in those sources, it is assigned first_seen_epoch = to_epoch + /// (it will enter at the end of the requested window so totals match spo_count). + async fn registered_totals_series( + &self, + cx: &Context<'_>, + from_epoch: i64, + to_epoch: i64, + ) -> Vec { + let start = from_epoch.min(to_epoch); + let end = to_epoch.max(from_epoch); + if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { + let sql = r#" + WITH rng AS ( + SELECT generate_series($1::BIGINT, $2::BIGINT) AS epoch_no + ), + cur AS ( + SELECT s.pool_id + FROM spo_stake_snapshot s + ), + union_firsts AS ( + SELECT si.pool_id AS pool_id, MIN(sh.epoch_no)::BIGINT AS first_seen_epoch + FROM spo_history sh + LEFT JOIN spo_identity si ON si.spo_sk = sh.spo_sk + WHERE si.pool_id IS NOT NULL + GROUP BY si.pool_id + UNION ALL + SELECT si.pool_id AS pool_id, MIN(cm.epoch_no)::BIGINT AS first_seen_epoch + FROM committee_membership cm + LEFT JOIN spo_identity si ON si.sidechain_pubkey = cm.sidechain_pubkey + WHERE si.pool_id IS NOT NULL + GROUP BY si.pool_id + UNION ALL + SELECT si.pool_id AS pool_id, MIN(sep.epoch_no)::BIGINT AS first_seen_epoch + FROM spo_epoch_performance sep + LEFT JOIN spo_identity si ON si.spo_sk = sep.spo_sk + WHERE si.pool_id IS NOT NULL + GROUP BY si.pool_id + ), + firsts0 AS ( + SELECT pool_id, MIN(first_seen_epoch)::BIGINT AS first_seen_epoch + FROM union_firsts + GROUP BY pool_id + ), + firsts_cur AS ( + SELECT c.pool_id, + COALESCE(f0.first_seen_epoch, $2::BIGINT) AS first_seen_epoch + FROM cur c + LEFT JOIN firsts0 f0 ON f0.pool_id = c.pool_id + ), + agg AS ( + SELECT r.epoch_no, + COUNT(*) FILTER (WHERE fc.first_seen_epoch <= r.epoch_no) AS total_registered, + COUNT(*) FILTER (WHERE fc.first_seen_epoch = r.epoch_no) AS newly_registered + FROM rng r + CROSS JOIN firsts_cur fc + GROUP BY r.epoch_no + ) + SELECT epoch_no, total_registered, newly_registered + FROM agg + ORDER BY epoch_no + "#; + match sqlx::query_as::<_, (i64, i64, i64)>(sql) + .bind(start) + .bind(end) + .fetch_all(&**pool) + .await + { + Ok(rows) => rows + .into_iter() + .map(|(epoch_no, total_registered, newly_registered)| RegisteredTotals { + epoch_no, + total_registered, + newly_registered, + }) + .collect(), + Err(e) => { + warn!("registered_totals_series query failed: {e}"); + vec![] + } + } + } else { + vec![] + } + } + + // ------------------------------------------------- + // Identity (no metadata) queries + // ------------------------------------------------- + async fn spo_identities( + &self, + cx: &Context<'_>, + limit: Option, + offset: Option, + ) -> Vec { + let limit = limit.unwrap_or(50).clamp(1, 500) as i64; + let offset = offset.unwrap_or(0).max(0) as i64; + if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { + let sql = r#" + SELECT pool_id AS pool_id_hex, + mainchain_pubkey AS mainchain_pubkey_hex, + sidechain_pubkey AS sidechain_pubkey_hex, + aura_pubkey AS aura_pubkey_hex, + 'UNKNOWN' AS validator_class + FROM spo_identity + WHERE pool_id IS NOT NULL + ORDER BY mainchain_pubkey + LIMIT $1 OFFSET $2 + "#; + match sqlx::query_as::<_, (String, String, String, Option, String)>(sql) + .bind(limit) + .bind(offset) + .fetch_all(&**pool) + .await + { + Ok(rows) => rows + .into_iter() + .map( + |( + pool_id_hex, + mainchain_pubkey_hex, + sidechain_pubkey_hex, + aura_pubkey_hex, + validator_class, + )| SpoIdentity { + pool_id_hex, + mainchain_pubkey_hex, + sidechain_pubkey_hex, + aura_pubkey_hex, + validator_class, + }, + ) + .collect(), + Err(e) => { + warn!("spo_identities query failed: {e}"); + vec![] + } + } + } else { + vec![] + } + } + + async fn spo_identity_by_pool_id( + &self, + cx: &Context<'_>, + pool_id_hex: String, + ) -> Option { + let pool_id_hex = normalize_hex(&pool_id_hex)?; + if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { + let sql = r#" + SELECT pool_id AS pool_id_hex, + mainchain_pubkey AS mainchain_pubkey_hex, + sidechain_pubkey AS sidechain_pubkey_hex, + aura_pubkey AS aura_pubkey_hex, + 'UNKNOWN' AS validator_class + FROM spo_identity + WHERE pool_id = $1 + LIMIT 1 + "#; + match sqlx::query_as::<_, (String, String, String, Option, String)>(sql) + .bind(&pool_id_hex) + .fetch_optional(&**pool) + .await + { + Ok(Some(( + pool_id_hex, + mainchain_pubkey_hex, + sidechain_pubkey_hex, + aura_pubkey_hex, + validator_class, + ))) => Some(SpoIdentity { + pool_id_hex, + mainchain_pubkey_hex, + sidechain_pubkey_hex, + aura_pubkey_hex, + validator_class, + }), + Ok(None) => None, + Err(e) => { + warn!("spo_identity_by_pool_id query failed: {e}"); + None + } + } + } else { + None + } + } + + // ------------------------------------------------- + // Metadata queries + // ------------------------------------------------- + async fn pool_metadata(&self, cx: &Context<'_>, pool_id_hex: String) -> Option { + let pool_id_hex = normalize_hex(&pool_id_hex)?; + if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { + let sql = r#" + SELECT pool_id AS pool_id_hex, + hex_id AS hex_id, + name, ticker, homepage_url, url AS logo_url + FROM pool_metadata_cache + WHERE pool_id = $1 + LIMIT 1 + "#; + match sqlx::query_as::< + _, + ( + String, + Option, + Option, + Option, + Option, + Option, + ), + >(sql) + .bind(&pool_id_hex) + .fetch_optional(&**pool) + .await + { + Ok(Some((pool_id_hex, hex_id, name, ticker, homepage_url, logo_url))) => { + Some(PoolMetadata { + pool_id_hex, + hex_id, + name, + ticker, + homepage_url, + logo_url, + }) + } + Ok(None) => None, + Err(e) => { + warn!("pool_metadata query failed: {e}"); + None + } + } + } else { + None + } + } + + async fn pool_metadata_list( + &self, + cx: &Context<'_>, + limit: Option, + offset: Option, + with_name_only: Option, + ) -> Vec { + let limit = limit.unwrap_or(50).clamp(1, 500) as i64; + let offset = offset.unwrap_or(0).max(0) as i64; + let name_only = with_name_only.unwrap_or(false); + if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { + let sql = if name_only { + r#" + SELECT pool_id AS pool_id_hex, + hex_id AS hex_id, + name, ticker, homepage_url, url AS logo_url + FROM pool_metadata_cache + WHERE name IS NOT NULL OR ticker IS NOT NULL + ORDER BY pool_id + LIMIT $1 OFFSET $2 + "# + } else { + r#" + SELECT pool_id AS pool_id_hex, + hex_id AS hex_id, + name, ticker, homepage_url, url AS logo_url + FROM pool_metadata_cache + ORDER BY pool_id + LIMIT $1 OFFSET $2 + "# + }; + match sqlx::query_as::< + _, + ( + String, + Option, + Option, + Option, + Option, + Option, + ), + >(sql) + .bind(limit) + .bind(offset) + .fetch_all(&**pool) + .await + { + Ok(rows) => rows + .into_iter() + .map( + |(pool_id_hex, hex_id, name, ticker, homepage_url, logo_url)| { + PoolMetadata { + pool_id_hex, + hex_id, + name, + ticker, + homepage_url, + logo_url, + } + }, + ) + .collect(), + Err(e) => { + warn!("pool_metadata_list query failed: {e}"); + vec![] + } + } + } else { + vec![] + } + } + + // ------------------------------------------------- + // Composite query + // ------------------------------------------------- + async fn spo_composite_by_pool_id( + &self, + cx: &Context<'_>, + pool_id_hex: String, + ) -> Option { + let pool_id_hex = normalize_hex(&pool_id_hex)?; + let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) else { + return None; + }; + + let identity_sql = r#" + SELECT pool_id AS pool_id_hex, + mainchain_pubkey AS mainchain_pubkey_hex, + sidechain_pubkey AS sidechain_pubkey_hex, + aura_pubkey AS aura_pubkey_hex, + 'UNKNOWN' AS validator_class + FROM spo_identity + WHERE pool_id = $1 + LIMIT 1 + "#; + let identity = match sqlx::query_as::< + _, + (String, String, String, Option, String), + >(identity_sql) + .bind(&pool_id_hex) + .fetch_optional(&**pool) + .await + { + Ok(Some(( + pool_id_hex, + mainchain_pubkey_hex, + sidechain_pubkey_hex, + aura_pubkey_hex, + validator_class, + ))) => Some(SpoIdentity { + pool_id_hex, + mainchain_pubkey_hex, + sidechain_pubkey_hex, + aura_pubkey_hex, + validator_class, + }), + Ok(None) => None, + Err(e) => { + warn!("spo_composite_by_pool_id identity query failed: {e}"); + None + } + }; + + let metadata_sql = r#" + SELECT pool_id AS pool_id_hex, + hex_id AS hex_id, + name, ticker, homepage_url, url AS logo_url + FROM pool_metadata_cache + WHERE pool_id = $1 + LIMIT 1 + "#; + let metadata = match sqlx::query_as::< + _, + ( + String, + Option, + Option, + Option, + Option, + Option, + ), + >(metadata_sql) + .bind(&pool_id_hex) + .fetch_optional(&**pool) + .await + { + Ok(Some((pool_id_hex, hex_id, name, ticker, homepage_url, logo_url))) => { + Some(PoolMetadata { + pool_id_hex, + hex_id, + name, + ticker, + homepage_url, + logo_url, + }) + } + Ok(None) => None, + Err(e) => { + warn!("spo_composite_by_pool_id metadata query failed: {e}"); + None + } + }; + + let performance = if let Some(identity_ref) = identity.as_ref() { + // Performance rows are keyed by sidechain_pubkey (sep.spo_sk). + let sk_hex = &identity_ref.sidechain_pubkey_hex; + let perf_sql = r#" + SELECT sep.epoch_no, + sep.spo_sk AS spo_sk_hex, + sep.produced_blocks, + sep.expected_blocks, + sep.identity_label, + NULL::TEXT AS stake_snapshot, + si.pool_id AS pool_id_hex, + 'UNKNOWN' AS validator_class + FROM spo_epoch_performance sep + LEFT JOIN spo_identity si ON si.spo_sk = sep.spo_sk + WHERE sep.spo_sk = $1 + ORDER BY sep.epoch_no DESC + LIMIT $2 + "#; + match sqlx::query_as::< + _, + ( + i64, + String, + i32, + i32, + Option, + Option, + Option, + Option, + ), + >(perf_sql) + .bind(sk_hex) + .bind(DEFAULT_PERFORMANCE_LIMIT) + .fetch_all(&**pool) + .await + { + Ok(rows) => rows.into_iter().map(EpochPerf::from_tuple).collect(), + Err(e) => { + warn!("spo_composite_by_pool_id performance query failed: {e}"); + vec![] + } + } + } else { + vec![] + }; + + Some(SpoComposite { + identity, + metadata, + performance, + }) + } + + /// List stake pool operator identifiers (placeholder – returns empty if table missing / error). + async fn stake_pool_operators(&self, cx: &Context<'_>, limit: Option) -> Vec { + let limit = limit.unwrap_or(20).clamp(1, 100) as i64; + // Access optional Db from Router state (AppState) + if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { + let sql = r#" + SELECT encode(sep.spo_sk,'hex') AS spo_sk_hex + FROM spo_epoch_performance sep + GROUP BY sep.spo_sk + ORDER BY MAX(sep.produced_blocks) DESC + LIMIT $1 + "#; + match sqlx::query_scalar::<_, String>(sql) + .bind(limit) + .fetch_all(&**pool) + .await + { + Ok(rows) => return rows, + Err(e) => { + warn!("stake_pool_operators query failed: {e}"); + return vec![]; + } + } + } + vec![] + } + + /// Latest SPO performance entries ordered by epoch (desc) and produced blocks (desc). + async fn spo_performance_latest( + &self, + cx: &Context<'_>, + limit: Option, + offset: Option, + ) -> Vec { + let limit = limit + .unwrap_or(DEFAULT_PERFORMANCE_LIMIT as i32) + .clamp(1, 500) as i64; + let offset = offset.unwrap_or(0).max(0) as i64; + if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { + let sql = r#" + SELECT sep.epoch_no, + sep.spo_sk AS spo_sk_hex, + sep.produced_blocks, + sep.expected_blocks, + sep.identity_label, + NULL::TEXT AS stake_snapshot, + si.pool_id AS pool_id_hex, + 'UNKNOWN' AS validator_class + FROM spo_epoch_performance sep + LEFT JOIN spo_identity si ON si.spo_sk = sep.spo_sk + ORDER BY sep.epoch_no DESC, sep.produced_blocks DESC + LIMIT $1 OFFSET $2 + "#; + match sqlx::query_as::<_, EpochPerfRow>(sql) + .bind(limit) + .bind(offset) + .fetch_all(&**pool) + .await + { + Ok(rows) => rows.into_iter().map(EpochPerf::from_tuple).collect(), + Err(e) => { + warn!("spo_performance_latest query failed: {e}"); + vec![] + } + } + } else { + vec![] + } + } + + /// Performance history for a single SPO (identified by its side/mainchain key hex representation). + async fn spo_performance_by_spo_sk( + &self, + cx: &Context<'_>, + spo_sk_hex: String, + limit: Option, + offset: Option, + ) -> Vec { + let spo_sk_hex = match normalize_hex(&spo_sk_hex) { + Some(hex) => hex, + None => return vec![], + }; + let limit = limit.unwrap_or(100).clamp(1, 500) as i64; + let offset = offset.unwrap_or(0).max(0) as i64; + if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { + let sql = r#" + SELECT sep.epoch_no, + sep.spo_sk AS spo_sk_hex, + sep.produced_blocks, + sep.expected_blocks, + sep.identity_label, + NULL::TEXT AS stake_snapshot, + si.pool_id AS pool_id_hex, + 'UNKNOWN' AS validator_class + FROM spo_epoch_performance sep + LEFT JOIN spo_identity si ON si.spo_sk = sep.spo_sk + WHERE sep.spo_sk = $1 + ORDER BY sep.epoch_no DESC + LIMIT $2 OFFSET $3 + "#; + match sqlx::query_as::<_, EpochPerfRow>(sql) + .bind(&spo_sk_hex) + .bind(limit) + .bind(offset) + .fetch_all(&**pool) + .await + { + Ok(rows) => rows.into_iter().map(EpochPerf::from_tuple).collect(), + Err(e) => { + warn!("spo_performance_by_spo_sk query failed: {e}"); + vec![] + } + } + } else { + vec![] + } + } + + /// Epoch performance for a given epoch, tolerant of missing identity records. + async fn epoch_performance( + &self, + cx: &Context<'_>, + epoch: i64, + limit: Option, + offset: Option, + ) -> Vec { + let limit = limit.unwrap_or(100).clamp(1, 500) as i64; + let offset = offset.unwrap_or(0).max(0) as i64; + if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { + let sql = r#" + SELECT sep.epoch_no, + sep.spo_sk AS spo_sk_hex, + sep.produced_blocks, + sep.expected_blocks, + sep.identity_label, + NULL::TEXT AS stake_snapshot, + si.pool_id AS pool_id_hex, + 'UNKNOWN' AS validator_class + FROM spo_epoch_performance sep + LEFT JOIN spo_identity si ON si.spo_sk = sep.spo_sk + WHERE sep.epoch_no = $1 + ORDER BY sep.produced_blocks DESC + LIMIT $2 OFFSET $3 + "#; + match sqlx::query_as::<_, EpochPerfRow>(sql) + .bind(epoch) + .bind(limit) + .bind(offset) + .fetch_all(&**pool) + .await + { + Ok(rows) => rows.into_iter().map(EpochPerf::from_tuple).collect(), + Err(e) => { + warn!("epoch_performance query failed: {e}"); + vec![] + } + } + } else { + vec![] + } + } + + /// List SPOs with optional metadata, paginated. + async fn spo_list( + &self, + cx: &Context<'_>, + limit: Option, + offset: Option, + search: Option, + ) -> Vec { + let limit = limit.unwrap_or(20).clamp(1, 200) as i64; + let offset = offset.unwrap_or(0).max(0) as i64; + let search = search.as_ref().and_then(|s| { + let trimmed = s.trim(); + if trimmed.is_empty() { None } else { Some(trimmed.to_string()) } + }); + if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { + // Use spo_stake_snapshot as the canonical current set to align counts with spo_count. + let sql = if search.is_some() { + r#" + SELECT s.pool_id AS pool_id_hex, + 'UNKNOWN' AS validator_class, + si.sidechain_pubkey AS sidechain_pubkey_hex, + si.aura_pubkey AS aura_pubkey_hex, + pm.name, pm.ticker, pm.homepage_url, pm.url AS logo_url + FROM spo_stake_snapshot s + LEFT JOIN spo_identity si ON si.pool_id = s.pool_id + LEFT JOIN pool_metadata_cache pm ON pm.pool_id = s.pool_id + WHERE ( + pm.name ILIKE $3 OR pm.ticker ILIKE $3 OR pm.homepage_url ILIKE $3 OR s.pool_id ILIKE $4 + OR si.sidechain_pubkey ILIKE $4 OR si.aura_pubkey ILIKE $4 OR si.mainchain_pubkey ILIKE $4 + ) + ORDER BY COALESCE(si.mainchain_pubkey, s.pool_id) + LIMIT $1 OFFSET $2 + "# + } else { + r#" + SELECT s.pool_id AS pool_id_hex, + 'UNKNOWN' AS validator_class, + si.sidechain_pubkey AS sidechain_pubkey_hex, + si.aura_pubkey AS aura_pubkey_hex, + pm.name, pm.ticker, pm.homepage_url, pm.url AS logo_url + FROM spo_stake_snapshot s + LEFT JOIN spo_identity si ON si.pool_id = s.pool_id + LEFT JOIN pool_metadata_cache pm ON pm.pool_id = s.pool_id + ORDER BY COALESCE(si.mainchain_pubkey, s.pool_id) + LIMIT $1 OFFSET $2 + "# + }; + + // Build bind params + let mut q = sqlx::query_as::< + _, + ( + String, + String, + String, + Option, + Option, + Option, + Option, + Option, + ), + >(sql); + q = q.bind(limit).bind(offset); + if let Some(s) = search { + // For text fields use %term% ; for hex-like identifiers also use %term_no_0x% + let s_like = format!("%{}%", s); + let s_hex = normalize_hex(&s).unwrap_or_else(|| s.to_ascii_lowercase()); + let s_hex_like = format!("%{}%", s_hex); + q = q.bind(s_like).bind(s_hex_like); + } + + match q.fetch_all(&**pool).await { + Ok(rows) => rows + .into_iter() + .map( + |( + pool_id_hex, + validator_class, + sidechain_pubkey_hex, + aura_pubkey_hex, + name, + ticker, + homepage_url, + logo_url, + )| Spo { + pool_id_hex, + validator_class, + sidechain_pubkey_hex, + aura_pubkey_hex, + name, + ticker, + homepage_url, + logo_url, + }, + ) + .collect(), + Err(e) => { + warn!("spo_list query failed: {e}"); + vec![] + } + } + } else { + vec![] + } + } + + /// Stake distribution for registered SPOs, based on spo_stake_snapshot (latest values). + async fn stake_distribution( + &self, + cx: &Context<'_>, + limit: Option, + offset: Option, + search: Option, + order_by_stake_desc: Option, + ) -> Vec { + let limit = limit.unwrap_or(50).clamp(1, 500) as i64; + let offset = offset.unwrap_or(0).max(0) as i64; + let search = search.as_ref().and_then(|s| { + let t = s.trim(); + if t.is_empty() { None } else { Some(t.to_string()) } + }); + + if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { + // Compute total across pools first for share calculation + let total_sql = r#" + SELECT COALESCE(SUM(s.live_stake), 0)::TEXT + FROM spo_stake_snapshot s + "#; + let total_live_str: String = match sqlx::query_scalar(total_sql) + .fetch_one(&**pool) + .await + { + Ok(v) => v, + Err(e) => { + warn!("stake_distribution total stake query failed: {e}"); + "0".to_string() + } + }; + let total_live_f64: f64 = total_live_str.parse::().unwrap_or(0.0); + + let order_desc = order_by_stake_desc.unwrap_or(true); + let base_select = if search.is_some() { + r#" + SELECT + pm.pool_id AS pool_id_hex, + pm.name, pm.ticker, pm.homepage_url, pm.url AS logo_url, + (s.live_stake)::TEXT, (s.active_stake)::TEXT, s.live_delegators, s.live_saturation, + (s.declared_pledge)::TEXT, (s.live_pledge)::TEXT + FROM spo_stake_snapshot s + JOIN pool_metadata_cache pm ON pm.pool_id = s.pool_id + WHERE ( + pm.name ILIKE $3 OR pm.ticker ILIKE $3 OR pm.homepage_url ILIKE $3 OR pm.pool_id ILIKE $4 + ) + ORDER BY COALESCE(s.live_stake, 0) DESC, pm.pool_id + LIMIT $1 OFFSET $2 + "# + } else { + r#" + SELECT + pm.pool_id AS pool_id_hex, + pm.name, pm.ticker, pm.homepage_url, pm.url AS logo_url, + (s.live_stake)::TEXT, (s.active_stake)::TEXT, s.live_delegators, s.live_saturation, + (s.declared_pledge)::TEXT, (s.live_pledge)::TEXT + FROM spo_stake_snapshot s + JOIN pool_metadata_cache pm ON pm.pool_id = s.pool_id + ORDER BY COALESCE(s.live_stake, 0) DESC, pm.pool_id + LIMIT $1 OFFSET $2 + "# + }; + + // optionally flip order if ascending requested + let sql = if order_desc { base_select.to_string() } else { base_select.replace("DESC", "ASC") }; + + let mut q = sqlx::query_as::< + _, + ( + String, // pool_id_hex + Option, // name + Option, // ticker + Option, // homepage_url + Option, // logo_url + Option, // live_stake (TEXT) + Option, // active_stake (TEXT) + Option, // live_delegators + Option, // live_saturation + Option, // declared_pledge (TEXT) + Option, // live_pledge (TEXT) + ), + >(&sql) + .bind(limit) + .bind(offset); + + if let Some(s) = search { + let s_like = format!("%{}%", s); + q = q.bind(s_like.clone()).bind(s_like); + } + + match q.fetch_all(&**pool).await { + Ok(rows) => rows + .into_iter() + .map(|(pool_id_hex, name, ticker, homepage_url, logo_url, live_stake, active_stake, live_delegators, live_saturation, declared_pledge, live_pledge)| { + // Compute share = live_stake / total_live + let share = { + let ls = live_stake.as_deref().unwrap_or("0"); + let lv = ls.parse::().unwrap_or(0.0); + if total_live_f64 > 0.0 { lv / total_live_f64 } else { 0.0 } + }; + let live_delegators_i64 = live_delegators.map(|v| v as i64); + StakeShare { + pool_id_hex, + name, + ticker, + homepage_url, + logo_url, + live_stake, + active_stake, + live_delegators: live_delegators_i64, + live_saturation, + declared_pledge, + live_pledge, + stake_share: Some(share), + } + }) + .collect(), + Err(e) => { + warn!("stake_distribution query failed: {e}"); + vec![] + } + } + } else { + vec![] + } + } + + /// Find single SPO by pool ID (hex string). + async fn spo_by_pool_id(&self, cx: &Context<'_>, pool_id_hex: String) -> Option { + let pool_id_hex = normalize_hex(&pool_id_hex)?; + if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { + // Accept hex string; decode on DB side. pool_id is BYTEA. + let query = r#" + SELECT si.pool_id AS pool_id_hex, + 'UNKNOWN' AS validator_class, + si.sidechain_pubkey AS sidechain_pubkey_hex, + si.aura_pubkey AS aura_pubkey_hex, + pm.name, pm.ticker, pm.homepage_url, pm.url AS logo_url + FROM spo_identity si + LEFT JOIN pool_metadata_cache pm ON pm.pool_id = si.pool_id + WHERE si.pool_id = $1 + LIMIT 1 + "#; + match sqlx::query_as::< + _, + ( + String, + String, + String, + Option, + Option, + Option, + Option, + Option, + ), + >(query) + .bind(&pool_id_hex) + .fetch_optional(&**pool) + .await + { + Ok(Some(( + pool_id_hex, + validator_class, + sidechain_pubkey_hex, + aura_pubkey_hex, + name, + ticker, + homepage_url, + logo_url, + ))) => Some(Spo { + pool_id_hex, + validator_class, + sidechain_pubkey_hex, + aura_pubkey_hex, + name, + ticker, + homepage_url, + logo_url, + }), + Err(e) => { + warn!("spo_by_pool_id query failed: {e}"); + None + } + Ok(None) => None, + } + } else { + None + } + } + + // ------------------------------------------------- + // KPI / Dashboard helpers + // ------------------------------------------------- + /// Current epoch info with duration and elapsed seconds. + async fn current_epoch_info(&self, cx: &Context<'_>) -> Option { + if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { + let sql = r#" + WITH last AS ( + SELECT + epoch_no, + EXTRACT(EPOCH FROM starts_at)::BIGINT AS starts_s, + EXTRACT(EPOCH FROM ends_at)::BIGINT AS ends_s, + EXTRACT(EPOCH FROM (ends_at - starts_at))::BIGINT AS dur_s, + EXTRACT(EPOCH FROM NOW())::BIGINT AS now_s + FROM epochs + ORDER BY epoch_no DESC + LIMIT 1 + ), calc AS ( + SELECT + epoch_no, starts_s, ends_s, dur_s, now_s, + CASE WHEN ends_s > now_s THEN 0 + ELSE ((now_s - ends_s) / dur_s)::BIGINT + 1 END AS n + FROM last + ), synth AS ( + SELECT + (epoch_no + n) AS epoch_no, + dur_s AS duration_seconds, + CASE WHEN n = 0 THEN LEAST(GREATEST(now_s - starts_s, 0), dur_s) + ELSE LEAST(GREATEST(now_s - (ends_s + (n - 1) * dur_s), 0), dur_s) + END AS elapsed_seconds + FROM calc + ) + SELECT epoch_no, duration_seconds, elapsed_seconds FROM synth + "#; + match sqlx::query_as::<_, (i64, i64, i64)>(sql).fetch_optional(&**pool).await { + Ok(Some((epoch_no, duration_seconds, elapsed_seconds))) => Some(EpochInfo { + epoch_no, + duration_seconds, + elapsed_seconds, + }), + Ok(None) => None, + Err(e) => { + warn!("current_epoch_info query failed: {e}"); + None + } + } + } else { + None + } + } + + /// Epoch-wide block utilization = sum(produced) / sum(expected) (0.0 if no data or expected == 0). + async fn epoch_utilization(&self, cx: &Context<'_>, epoch: i32) -> Option { + if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { + let sql = r#" + SELECT COALESCE( + CASE WHEN SUM(expected_blocks) > 0 + THEN SUM(produced_blocks)::DOUBLE PRECISION / SUM(expected_blocks) + ELSE 0.0 END, + 0.0) AS utilization + FROM spo_epoch_performance + WHERE epoch_no = $1 + "#; + match sqlx::query_scalar::<_, Option>(sql) + .bind(epoch as i64) + .fetch_one(&**pool) + .await + { + Ok(v) => v.or(Some(0.0)), + Err(e) => { + warn!("epoch_utilization query failed: {e}"); + None + } + } + } else { + None + } + } + + /// Number of SPO identities (with a pool_id present). + async fn spo_count(&self, cx: &Context<'_>) -> Option { + if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { + // Single source of truth for current SPOs: spo_stake_snapshot + let sql = r#" + SELECT COUNT(1)::BIGINT FROM spo_stake_snapshot + "#; + match sqlx::query_scalar::<_, i64>(sql).fetch_one(&**pool).await { + Ok(count) => Some(count), + Err(e) => { + warn!("spo_count query failed: {e}"); + None + } + } + } else { + None + } + } + + /// Committee membership for an epoch (ordered by position), with identity enrichment when available. + async fn committee( + &self, + cx: &Context<'_>, + epoch: i64, + ) -> Vec { + if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { + let sql = r#" + SELECT + cm.epoch_no, + cm.position, + cm.sidechain_pubkey AS sidechain_pubkey_hex, + cm.expected_slots, + si.aura_pubkey AS aura_pubkey_hex, + si.pool_id AS pool_id_hex, + si.spo_sk AS spo_sk_hex + FROM committee_membership cm + LEFT JOIN spo_identity si ON si.sidechain_pubkey = cm.sidechain_pubkey + WHERE cm.epoch_no = $1 + ORDER BY cm.position + "#; + match sqlx::query_as::< + _, + ( + i64, // epoch_no + i32, // position + String, // sidechain_pubkey_hex + i32, // expected_slots + Option, // aura_pubkey_hex + Option, // pool_id_hex + Option, // spo_sk_hex + ), + >(sql) + .bind(epoch) + .fetch_all(&**pool) + .await + { + Ok(rows) => rows + .into_iter() + .map(|(epoch_no, position, sidechain_pubkey_hex, expected_slots, aura_pubkey_hex, pool_id_hex, spo_sk_hex)| CommitteeMember { + epoch_no, + position, + sidechain_pubkey_hex, + expected_slots, + aura_pubkey_hex, + pool_id_hex, + spo_sk_hex, + }) + .collect(), + Err(e) => { + warn!("committee query failed: {e}"); + vec![] + } + } + } else { + vec![] + } + } + + /// Registration counts series for an epoch range. Uses DB when possible. + async fn registered_spo_series( + &self, + cx: &Context<'_>, + from_epoch: i64, + to_epoch: i64, + ) -> Vec { + let start = from_epoch.min(to_epoch); + let end = to_epoch.max(from_epoch); + if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { + // Simplified: return raw per-epoch counts directly from DB sources. + // - federated_valid_count: distinct committee members with expected_slots > 0 + // - registered_valid_count: distinct VALID in spo_history per epoch + // - registered_invalid_count: distinct INVALID in spo_history per epoch + // - federated_invalid_count: 0 (not tracked) + // - dparam: same as registered_valid_count as DOUBLE PRECISION (frontend can derive other metrics) + let sql = r#" + WITH rng AS ( + SELECT generate_series($1::BIGINT, $2::BIGINT) AS epoch_no + ), + hist_valid AS ( + SELECT sh.epoch_no, + COUNT(DISTINCT si.pool_id) AS cnt + FROM spo_history sh + LEFT JOIN spo_identity si ON si.spo_sk = sh.spo_sk + WHERE sh.status IN ('VALID','Valid') + AND sh.epoch_no BETWEEN $1::BIGINT AND $2::BIGINT + AND si.pool_id IS NOT NULL + GROUP BY sh.epoch_no + ), + hist_invalid AS ( + SELECT sh.epoch_no, + COUNT(DISTINCT si.pool_id) AS cnt + FROM spo_history sh + LEFT JOIN spo_identity si ON si.spo_sk = sh.spo_sk + WHERE sh.status IN ('INVALID','Invalid') + AND sh.epoch_no BETWEEN $1::BIGINT AND $2::BIGINT + AND si.pool_id IS NOT NULL + GROUP BY sh.epoch_no + ), + fed AS ( + SELECT c.epoch_no, + COUNT(DISTINCT c.sidechain_pubkey) FILTER (WHERE c.expected_slots > 0) AS federated_valid_count, + 0::BIGINT AS federated_invalid_count + FROM committee_membership c + WHERE c.epoch_no BETWEEN $1::BIGINT AND $2::BIGINT + GROUP BY c.epoch_no + ) + SELECT r.epoch_no, + COALESCE(f.federated_valid_count, 0) AS federated_valid_count, + COALESCE(f.federated_invalid_count, 0) AS federated_invalid_count, + COALESCE(hv.cnt, 0) AS registered_valid_count, + COALESCE(hi.cnt, 0) AS registered_invalid_count, + COALESCE(hv.cnt, 0)::DOUBLE PRECISION AS dparam + FROM rng r + LEFT JOIN hist_valid hv ON hv.epoch_no = r.epoch_no + LEFT JOIN hist_invalid hi ON hi.epoch_no = r.epoch_no + LEFT JOIN fed f ON f.epoch_no = r.epoch_no + ORDER BY r.epoch_no + "#; + match sqlx::query_as::<_, (i64, i64, i64, i64, i64, Option)>(sql) + .bind(start) + .bind(end) + .fetch_all(&**pool) + .await + { + Ok(rows) => rows + .into_iter() + .map(|(epoch_no, f_valid, f_invalid, r_valid, r_invalid, dparam)| RegisteredStat { + epoch_no, + federated_valid_count: f_valid, + federated_invalid_count: f_invalid, + registered_valid_count: r_valid, + registered_invalid_count: r_invalid, + dparam, + }) + .collect(), + Err(e) => { + warn!("registered_spo_series query failed: {e}"); + vec![] + } + } + } else { + vec![] + } + } + + /// Raw presence events for SPO identity per epoch across sources (history, committee, performance). + /// Frontend can reconstruct totals/new registrations from these events. + async fn registered_presence( + &self, + cx: &Context<'_>, + from_epoch: i64, + to_epoch: i64, + ) -> Vec { + let start = from_epoch.min(to_epoch); + let end = to_epoch.max(from_epoch); + if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { + let sql = r#" + WITH history AS ( + SELECT sh.epoch_no::BIGINT AS epoch_no, + COALESCE(si.pool_id, sh.spo_sk) AS id_key, + 'history'::TEXT AS source, + sh.status::TEXT AS status + FROM spo_history sh + LEFT JOIN spo_identity si ON si.spo_sk = sh.spo_sk + WHERE sh.epoch_no BETWEEN $1::BIGINT AND $2::BIGINT + ), + committee AS ( + SELECT cm.epoch_no::BIGINT AS epoch_no, + COALESCE(si.pool_id, cm.sidechain_pubkey) AS id_key, + 'committee'::TEXT AS source, + NULL::TEXT AS status + FROM committee_membership cm + LEFT JOIN spo_identity si ON si.sidechain_pubkey = cm.sidechain_pubkey + WHERE cm.epoch_no BETWEEN $1::BIGINT AND $2::BIGINT + ), + performance AS ( + SELECT sep.epoch_no::BIGINT AS epoch_no, + COALESCE(si.pool_id, sep.spo_sk) AS id_key, + 'performance'::TEXT AS source, + NULL::TEXT AS status + FROM spo_epoch_performance sep + LEFT JOIN spo_identity si ON si.spo_sk = sep.spo_sk + WHERE sep.epoch_no BETWEEN $1::BIGINT AND $2::BIGINT + ) + SELECT epoch_no, id_key, source, status FROM history + UNION ALL + SELECT epoch_no, id_key, source, status FROM committee + UNION ALL + SELECT epoch_no, id_key, source, status FROM performance + ORDER BY epoch_no, source, id_key + "#; + match sqlx::query_as::<_, (i64, String, String, Option)>(sql) + .bind(start) + .bind(end) + .fetch_all(&**pool) + .await + { + Ok(rows) => rows + .into_iter() + .map(|(epoch_no, id_key, source, status)| PresenceEvent { epoch_no, id_key, source, status }) + .collect(), + Err(e) => { + warn!("registered_presence query failed: {e}"); + vec![] + } + } + } else { + vec![] + } + } + + /// First valid epoch per identity (based on spo_history status VALID). Optional cutoff to bound the scan. + async fn registered_first_valid_epochs( + &self, + cx: &Context<'_>, + upto_epoch: Option, + ) -> Vec { + if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { + let sql = r#" + SELECT COALESCE(si.pool_id, sh.spo_sk) AS id_key, + MIN(sh.epoch_no)::BIGINT AS first_valid_epoch + FROM spo_history sh + LEFT JOIN spo_identity si ON si.spo_sk = sh.spo_sk + WHERE sh.status IN ('VALID','Valid') + AND ($1::BIGINT IS NULL OR sh.epoch_no <= $1::BIGINT) + GROUP BY 1 + ORDER BY first_valid_epoch + "#; + match sqlx::query_as::<_, (String, i64)>(sql) + .bind(upto_epoch) + .fetch_all(&**pool) + .await + { + Ok(rows) => rows + .into_iter() + .map(|(id_key, first_valid_epoch)| FirstValidEpoch { id_key, first_valid_epoch }) + .collect(), + Err(e) => { + warn!("registered_first_valid_epochs query failed: {e}"); + vec![] + } + } + } else { + vec![] + } + } +} + +#[derive(async_graphql::SimpleObject)] +#[graphql(rename_fields = "camelCase")] +pub struct ServiceInfo { + pub name: String, + pub version: String, + pub network: String, +} + +#[derive(async_graphql::SimpleObject)] +#[graphql(rename_fields = "camelCase")] +pub struct EpochInfo { + pub epoch_no: i64, + pub duration_seconds: i64, + pub elapsed_seconds: i64, +} + +#[derive(async_graphql::SimpleObject)] +#[graphql(rename_fields = "camelCase")] +pub struct EpochPerf { + pub epoch_no: i64, + pub spo_sk_hex: String, + pub produced: i64, + pub expected: i64, + pub identity_label: Option, + pub stake_snapshot: Option, + pub pool_id_hex: Option, + pub validator_class: Option, +} + +#[derive(async_graphql::SimpleObject)] +#[graphql(rename_fields = "camelCase")] +pub struct Spo { + pub pool_id_hex: String, + pub validator_class: String, + pub sidechain_pubkey_hex: String, + pub aura_pubkey_hex: Option, + pub name: Option, + pub ticker: Option, + pub homepage_url: Option, + pub logo_url: Option, +} + +#[derive(async_graphql::SimpleObject)] +#[graphql(rename_fields = "camelCase")] +pub struct SpoIdentity { + pub pool_id_hex: String, + pub mainchain_pubkey_hex: String, + pub sidechain_pubkey_hex: String, + pub aura_pubkey_hex: Option, + pub validator_class: String, +} + +#[derive(async_graphql::SimpleObject)] +#[graphql(rename_fields = "camelCase")] +pub struct PoolMetadata { + pub pool_id_hex: String, + pub hex_id: Option, + pub name: Option, + pub ticker: Option, + pub homepage_url: Option, + pub logo_url: Option, +} + +#[derive(async_graphql::SimpleObject)] +#[graphql(rename_fields = "camelCase")] +pub struct SpoComposite { + pub identity: Option, + pub metadata: Option, + pub performance: Vec, +} + +#[derive(async_graphql::SimpleObject)] +#[graphql(rename_fields = "camelCase")] +pub struct PresenceEvent { + pub epoch_no: i64, + pub id_key: String, + pub source: String, + pub status: Option, +} + +#[derive(async_graphql::SimpleObject)] +#[graphql(rename_fields = "camelCase")] +pub struct FirstValidEpoch { + pub id_key: String, + pub first_valid_epoch: i64, +} + +#[derive(async_graphql::SimpleObject)] +#[graphql(rename_fields = "camelCase")] +pub struct CommitteeMember { + pub epoch_no: i64, + pub position: i32, + pub sidechain_pubkey_hex: String, + pub expected_slots: i32, + pub aura_pubkey_hex: Option, + pub pool_id_hex: Option, + pub spo_sk_hex: Option, +} + +#[derive(async_graphql::SimpleObject)] +#[graphql(rename_fields = "camelCase")] +pub struct RegisteredStat { + pub epoch_no: i64, + pub federated_valid_count: i64, + pub federated_invalid_count: i64, + pub registered_valid_count: i64, + pub registered_invalid_count: i64, + pub dparam: Option, +} + +#[derive(async_graphql::SimpleObject)] +#[graphql(rename_fields = "camelCase")] +pub struct RegisteredTotals { + pub epoch_no: i64, + pub total_registered: i64, + pub newly_registered: i64, +} + +impl EpochPerf { + fn from_tuple(row: EpochPerfRow) -> Self { + let ( + epoch_no, + spo_sk_hex, + produced_i32, + expected_i32, + identity_label, + stake_snapshot, + pool_id_hex, + validator_class, + ) = row; + Self { + epoch_no, + spo_sk_hex, + produced: produced_i32 as i64, + expected: expected_i32 as i64, + identity_label, + stake_snapshot, + pool_id_hex, + validator_class, + } + } +} + +#[derive(async_graphql::SimpleObject)] +#[graphql(rename_fields = "camelCase")] +pub struct StakeShare { + pub pool_id_hex: String, + pub name: Option, + pub ticker: Option, + pub homepage_url: Option, + pub logo_url: Option, + pub live_stake: Option, + pub active_stake: Option, + pub live_delegators: Option, + pub live_saturation: Option, + pub declared_pledge: Option, + pub live_pledge: Option, + pub stake_share: Option, +} + + +async fn graphiql() -> impl IntoResponse { + use async_graphql::http::GraphiQLSource; + use axum::response::Html; + info!("Serving GraphiQL at /graphql"); + // Because this router is nested under /api/v1, we must point the JS client to the fully-qualified path. + // Otherwise the generated GraphiQL page will attempt requests to /graphql (404) -> empty body -> JSON parse error. + Html( + GraphiQLSource::build() + .endpoint("/api/v1/graphql") + .subscription_endpoint("/api/v1/graphql/ws") + .finish(), + ) +} + +// ------------------------------------------------- +// Helpers +// ------------------------------------------------- +fn normalize_hex(input: &str) -> Option { + if input.is_empty() { + return None; + } + let s = input + .strip_prefix("0x") + .unwrap_or(input) + .strip_prefix("0X") + .unwrap_or(input); + // Accept only even-length hex (bytea) and reasonable size (<= 256 chars to avoid abuse) + if s.len() % 2 != 0 || s.len() > 256 { + return None; + } + // Cheap validation (compiled once at runtime). If regex creation fails, we fallback to returning original. + static HEX_RE: once_cell::sync::Lazy = + once_cell::sync::Lazy::new(|| Regex::new("^[0-9a-fA-F]+$").unwrap()); + if !HEX_RE.is_match(s) { + return None; + } + Some(s.to_ascii_lowercase()) +} diff --git a/spo-api/src/infra/repo.rs b/spo-api/src/infra/repo.rs new file mode 100644 index 000000000..bd3b797db --- /dev/null +++ b/spo-api/src/infra/repo.rs @@ -0,0 +1,29 @@ +// This file is part of midnight-indexer. +// Copyright (C) 2025 Midnight Foundation +// SPDX-License-Identifier: Apache-2.0 + +use indexer_common::infra::pool::postgres::PostgresPool; +use anyhow::Context; + +#[derive(Debug, Clone)] +pub struct SpoRepository { + pool: PostgresPool, +} + +impl SpoRepository { + pub fn new(pool: PostgresPool) -> Self { Self { pool } } + + /// List stake pool operator identifiers (placeholder implementation). + pub async fn list_stake_pool_operator_ids(&self, limit: i64) -> anyhow::Result> { + // TODO: Replace with real schema/table once defined (e.g., spo_operators) + // For now we query a non-existent placeholder; when integrated this will be updated. + let rows = sqlx::query_scalar::<_, String>("SELECT id FROM spo_operators ORDER BY id LIMIT $1") + .bind(limit) + .fetch_all(&*self.pool) + .await + .with_context(|| "query stake pool operator ids")?; + Ok(rows) + } +} + +// Future: introduce a trait abstraction if multiple backends are needed. diff --git a/spo-api/src/lib.rs b/spo-api/src/lib.rs new file mode 100644 index 000000000..b69e98b53 --- /dev/null +++ b/spo-api/src/lib.rs @@ -0,0 +1,10 @@ +// This file is part of midnight-indexer. +// Copyright (C) 2025 Midnight Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub mod application; +#[cfg(feature = "cloud")] +pub mod config; +pub mod domain; +#[cfg(feature = "cloud")] +pub mod infra; diff --git a/spo-api/src/main.rs b/spo-api/src/main.rs new file mode 100644 index 000000000..362738b3a --- /dev/null +++ b/spo-api/src/main.rs @@ -0,0 +1,71 @@ +// This file is part of midnight-indexer. +// Copyright (C) 2025 Midnight Foundation +// SPDX-License-Identifier: Apache-2.0 + +#[cfg(feature = "cloud")] +#[tokio::main] +async fn main() { + use log::error; + use indexer_common::telemetry; + use std::panic; + + telemetry::init_logging(); + panic::set_hook(Box::new(|panic| error!(panic:%; "process panicked"))); + + if let Err(error) = run().await { + let backtrace = error.backtrace(); + let error = format!("{error:#}"); + error!(error, backtrace:%; "process exited with ERROR") + } +} + +#[cfg(feature = "cloud")] +async fn run() -> anyhow::Result<()> { + use anyhow::Context; + use indexer_common::{config::ConfigExt, domain::NoopSubscriber, infra::pool, telemetry}; + use log::info; + use spo_api::{application, config::Config, infra, infra::api::{AxumApi, Db}}; + use tokio::signal::unix::{SignalKind, signal}; + + let sigterm = signal(SignalKind::terminate()).expect("SIGTERM handler can be registered"); + let config = Config::load().context("load configuration")?; + info!(config:?; "starting"); + let Config { + run_migrations: _, + application_config, + infra_config, + telemetry_config: + telemetry::Config { + tracing_config, + metrics_config, + }, + } = config; + + telemetry::init_tracing(tracing_config); + telemetry::init_metrics(metrics_config); + + let infra::Config { + api_config, + storage_config, + } = infra_config; + + // Create Postgres pool (for read-only access initially) and run migrations if/when added later. + let pool = pool::postgres::PostgresPool::new(storage_config) + .await + .context("create DB pool for Postgres")?; + + // Build API without NATS for now. + let api = AxumApi::new(api_config).with_db(Db(pool)); + + // Until we have a catch-up signal, application::run will just serve the API and listen for SIGTERM. + // Pass a no-op subscriber for now. + let subscriber = NoopSubscriber::default(); + application::run(application_config, api, subscriber, sigterm) + .await + .context("run SPO API application") +} + +#[cfg(not(feature = "cloud"))] +fn main() { + unimplemented!() +} diff --git a/spo-indexer/Cargo.toml b/spo-indexer/Cargo.toml new file mode 100644 index 000000000..32d5cc8f5 --- /dev/null +++ b/spo-indexer/Cargo.toml @@ -0,0 +1,54 @@ +[package] +name = "spo-indexer" +description = "SPO Indexer" +version = { workspace = true } +edition = { workspace = true } +license = { workspace = true } +readme = { workspace = true } +homepage = { workspace = true } +repository = { workspace = true } +documentation = { workspace = true } +publish = { workspace = true } + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ "--cfg", "docsrs" ] + +[dependencies] +anyhow = { workspace = true } +async-stream = { workspace = true } +blake2 = { workspace = true } +blockfrost = { workspace = true } +byte-unit = { workspace = true } +derive_more = { workspace = true, features = [ "from" ] } +fastrace = { workspace = true, features = [ "enable" ] } +futures = { workspace = true } +hex = { workspace = true } +humantime-serde = { workspace = true } +indexer-common = { path = "../indexer-common" } +indoc = { workspace = true } +itertools = { workspace = true } +log = { workspace = true, features = [ "kv_std" ] } +metrics = { workspace = true } +parity-scale-codec = { workspace = true } +parking_lot = { workspace = true } +paste = { workspace = true } +serde = { workspace = true, features = [ "derive" ] } +serde_json = { workspace = true} +serde_with = { workspace = true } +secrecy = { workspace = true, features = [ "serde" ] } +sqlx = { workspace = true, features = [ "runtime-tokio", "time", "chrono", "postgres", "macros"] } +subxt = { workspace = true, features = [ "reconnecting-rpc-client" ] } +thiserror = { workspace = true } +tokio = { workspace = true, features = [ "macros", "rt-multi-thread", "time", "signal" ] } +trait-variant = { workspace = true } +reqwest = { workspace = true, features = ["json", "rustls-tls"] } + +[dev-dependencies] +clap = { workspace = true, features = [ "derive" ] } +fake = { workspace = true } + +[features] +cloud = [ "indexer-common/cloud" ] +standalone = [ "indexer-common/standalone" ] + diff --git a/spo-indexer/Dockerfile b/spo-indexer/Dockerfile new file mode 100644 index 000000000..2175f6671 --- /dev/null +++ b/spo-indexer/Dockerfile @@ -0,0 +1,35 @@ +ARG RUST_VERSION=1.89 +FROM rust:${RUST_VERSION}-bookworm AS chef +WORKDIR /build +RUN cargo install cargo-chef --version 0.1.72 + +FROM chef AS planner +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + +FROM chef AS builder +SHELL ["/bin/bash", "-c"] +ARG PROFILE=release +RUN git config --global url."https://@github.com".insteadOf "ssh://git@github.com" +COPY --from=planner /build/recipe.json recipe.json +RUN --mount=type=secret,id=netrc,target=/root/.netrc \ + cargo chef cook --profile $PROFILE --recipe-path recipe.json +COPY . . +RUN --mount=type=secret,id=netrc,target=/root/.netrc \ + cargo build -p spo-indexer --locked --features cloud --profile $PROFILE && \ + mkdir -p /runtime/usr/local/bin /runtime/opt/spo-indexer && \ + mv "./target/${PROFILE/dev/debug}/spo-indexer" /runtime/usr/local/bin/ && \ + install -Dm755 spo-indexer/bin/entrypoint.sh /runtime/usr/local/bin/entrypoint.sh && \ + install -Dm644 spo-indexer/config.yaml /runtime/opt/spo-indexer/config.yaml + +FROM debian:bookworm-slim@sha256:b1a741487078b369e78119849663d7f1a5341ef2768798f7b7406c4240f86aef AS runtime +RUN apt-get update && \ + apt-get install -y --no-install-recommends ca-certificates=20230311+deb12u1 && \ + rm -rf /var/lib/apt/lists/* +RUN adduser --disabled-password --gecos "" --home "/nonexistent" --shell "/sbin/nologin" --no-create-home --uid "10001" appuser && \ + mkdir /var/run/spo-indexer && \ + chown appuser:appuser /var/run/spo-indexer +COPY --from=builder --chown=appuser:appuser /runtime / +USER appuser +WORKDIR /opt/spo-indexer +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] diff --git a/spo-indexer/README.md b/spo-indexer/README.md new file mode 100644 index 000000000..03275d043 --- /dev/null +++ b/spo-indexer/README.md @@ -0,0 +1,3 @@ +# Chain Indexer + +The Chain Indexer connects to the Node, i.e. the Midnight blockchain, to fetch data, e.g. Midnight Transactions, and stores it in its database. diff --git a/spo-indexer/bin/entrypoint.sh b/spo-indexer/bin/entrypoint.sh new file mode 100755 index 000000000..a8cf1ad15 --- /dev/null +++ b/spo-indexer/bin/entrypoint.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -euo pipefail + +RUN_FILE=/var/run/spo-indexer/running +trap 'rm -f "$RUN_FILE"' EXIT +trap 'kill -SIGINT $PID' INT +trap 'kill -SIGTERM $PID' TERM + +touch "$RUN_FILE" +spo-indexer & +PID=$! +wait $PID diff --git a/spo-indexer/build.rs b/spo-indexer/build.rs new file mode 100644 index 000000000..96101856a --- /dev/null +++ b/spo-indexer/build.rs @@ -0,0 +1,123 @@ +// This file is part of midnight-indexer. +// Copyright (C) 2025 Midnight Foundation +// SPDX-License-Identifier: Apache-2.0 +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{env, fs, path::Path}; + +const NODE_VERSION_FILE: &str = "../NODE_VERSION"; + +fn main() { + let node_version = read_node_version(); + + let metadata_path = Path::new("..") + .join(".node") + .join(&node_version) + .join("metadata.scale"); + if !metadata_path.exists() { + panic!("metadata file not found at {}", metadata_path.display()); + } + + // Extract version for module name (replace dots and hyphens with underscores). + // E.g. "0.16.0-da0b6c69" becomes "0_16". + let module_suffix = node_version + .split('.') + .take(2) + .collect::>() + .join("_"); + + // Generate the code with the subxt macro call. + let generated_code = format!( + r#" + #[subxt::subxt( + runtime_metadata_path = "{}", + derive_for_type( + path = "sp_consensus_slots::Slot", + derive = "parity_scale_codec::Encode, parity_scale_codec::Decode", + recursive + ) + )] + pub mod runtime_{module_suffix} {{}} + "#, + metadata_path.display() + ); + + // Write generated code to file in OUT_DIR. + let out_dir = env::var("OUT_DIR").expect("env var OUT_DIR is set"); + let runtime_file = Path::new(&out_dir).join("generated_runtime.rs"); + fs::write(&runtime_file, generated_code).expect("generated runtime file can be written"); + + // Tell cargo to rerun build script if: + // 1. The NODE_VERSION file changes. + println!("cargo:rerun-if-changed={}", NODE_VERSION_FILE); + // 2. The metadata file itself changes. + println!("cargo:rerun-if-changed={}", metadata_path.display()); + // 3. The .node directory structure changes. + println!("cargo:rerun-if-changed=../.node"); + + // Output information for debugging. + println!("cargo:rustc-env=USED_NODE_VERSION={}", node_version); +} + +fn read_node_version() -> String { + if !Path::new(NODE_VERSION_FILE).exists() { + panic!("{NODE_VERSION_FILE} file not found"); + } + + // Read and validate/sanitize the version string. + match fs::read_to_string(NODE_VERSION_FILE) { + Ok(version) => { + let version = version.trim().to_string(); + + if version.is_empty() { + panic!("{NODE_VERSION_FILE} file is empty"); + } + + validate_and_sanitize_version(&version) + } + + Err(error) => { + panic!("cannot read {NODE_VERSION_FILE} file: {error}"); + } + } +} + +fn validate_and_sanitize_version(version: &str) -> String { + const MAX_VERSION_LENGTH: usize = 64; + if version.len() > MAX_VERSION_LENGTH { + panic!( + "node version must have less than {MAX_VERSION_LENGTH} characters, but had {}", + version.len() + ); + } + + const PERMITTED_SPECIAL_CHARS: [char; 3] = ['.', '-', '_']; + let allowed_chars = + |c: char| -> bool { c.is_ascii_alphanumeric() || PERMITTED_SPECIAL_CHARS.contains(&c) }; + if !version.chars().all(allowed_chars) { + panic!( + "invalid characters in node version {}", + version + .chars() + .filter(|c| !allowed_chars(*c)) + .collect::() + ); + } + + if version.starts_with(PERMITTED_SPECIAL_CHARS) || version.ends_with(PERMITTED_SPECIAL_CHARS) { + panic!( + "node version must not start or end with {PERMITTED_SPECIAL_CHARS:?}, but got: '{}'", + version + ); + } + + version.to_string() +} diff --git a/spo-indexer/config.yaml b/spo-indexer/config.yaml new file mode 100644 index 000000000..d0c111a3b --- /dev/null +++ b/spo-indexer/config.yaml @@ -0,0 +1,49 @@ +run_migrations: true + +application: + network_id: "preview" + interval: 5000 + blocks_buffer: 10 + save_ledger_state_after: 1000 + caught_up_max_distance: 10 + caught_up_leeway: 5 + stake_refresh: + period_secs: 900 # every 15 minutes + page_size: 100 # scan this many pools per cycle + max_rps: 2 # throttle requests to avoid 402s + +infra: + storage: + host: "localhost" # use Docker service name inside Compose network + port: 5432 + dbname: "indexer" + user: "indexer" # matches postgres service (POSTGRES_USER) + sslmode: "prefer" # consistent with spo-api + max_connections: 10 + idle_timeout: "1m" + max_lifetime: "5m" + + pub_sub: + url: "nats:4222" + username: "indexer" + + ledger_state_storage: + url: "nats:4222" + username: "indexer" + + node: + url: "wss://rpc.preview.midnight.network" + genesis_protocol_version: 16000 + reconnect_max_delay: "10s" # 10ms, 100ms, 1s, 10s + reconnect_max_attempts: 30 # Roughly 5m + blockfrost_id: "previewukkFxumNW31cXmsBtKI1JTnbxvcVCbCj" + +telemetry: + tracing: + enabled: false + service_name: "chain-indexer" + otlp_exporter_endpoint: "http://localhost:4317" + metrics: + enabled: false + address: "0.0.0.0" + port: 9000 diff --git a/spo-indexer/src/application.rs b/spo-indexer/src/application.rs new file mode 100644 index 000000000..41d8545b7 --- /dev/null +++ b/spo-indexer/src/application.rs @@ -0,0 +1,386 @@ +// This file is part of midnight-indexer. +// Copyright (C) 2025 Midnight Foundation +// SPDX-License-Identifier: Apache-2.0 +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + domain::{ + CandidateRegistration, Epoch, PoolMetadata, SPO, SPOEpochPerformance, SPOHistory, + SPOStatus, Validator, ValidatorMembership, + storage::{SqlxTransaction, Storage}, + }, + infra::subxt_node::{SLOT_DURATION, SPOClient}, + utils::{hex_to_bytes, remove_hex_prefix}, +}; +use blake2::{ + Blake2bVar, + digest::{Update, VariableOutput}, +}; +use serde::Deserialize; +use std::{cmp, collections::HashMap, time::Duration}; +use subxt::utils::to_hex; +use tokio::time; + +#[derive(Debug, Clone, Deserialize)] +pub struct Config { + pub interval: u32, + /// Stake refresh config (mandatory) + pub stake_refresh: StakeRefreshConfig, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct StakeRefreshConfig { + /// How often to refresh stake data in seconds + pub period_secs: u64, + /// Number of pools to fetch per cycle + pub page_size: u32, + /// Max requests per second to Blockfrost (rudimentary rate limit) + pub max_rps: u32, +} + +pub async fn run(config: Config, client: SPOClient, storage: impl Storage) -> anyhow::Result<()> { + // Mandatory background task: refresh stake snapshots periodically using Blockfrost + let st_cfg = config.stake_refresh.clone(); + let storage_bg = storage.clone(); + let client_bg = client.clone(); + tokio::spawn(async move { + let mut ticker = time::interval(Duration::from_secs(st_cfg.period_secs.max(60))); + // initial delay to avoid hammering on startup + ticker.tick().await; + loop { + ticker.tick().await; + if let Err(e) = refresh_stake_snapshots(&client_bg, &storage_bg, &st_cfg).await { + eprintln!("stake refresh failed: {e:?}"); + } + } + }); + + loop { + let cur_epoch = get_epoch_to_process(&client, &storage).await?; + + if cur_epoch.is_none() { + println!("latest epoch reached"); + time::sleep(Duration::new(config.interval.into(), 0)).await; + continue; + } + + let epoch = cur_epoch.unwrap(); + println!("processing epoch {}", epoch.epoch_no); + + let mut tx = storage.create_tx().await?; + let committee = client.get_committee(epoch.epoch_no).await?; + let raw_spos = client.get_spo_registrations(epoch.epoch_no).await?; + let membership = committee_to_membership(&client, &committee); + + storage.save_epoch(&epoch, &mut tx).await?; + storage.save_membership(&membership, &mut tx).await?; + + let mut blocks_produced: HashMap = HashMap::new(); + let mut val_to_registration: HashMap = HashMap::new(); + + for (_, registrations) in raw_spos.candidate_registrations { + for raw_spo in ®istrations { + let cardano_id = get_cardano_id(&raw_spo.mainchain_pub_key); + // Normalize all keys by stripping optional 0x prefix for consistency with DB values + let spo_sk = remove_hex_prefix(raw_spo.sidechain_pub_key.to_string()); + + val_to_registration.insert(spo_sk.clone(), raw_spo.clone()); + save_pool_metadata(&client, &storage, &mut tx, cardano_id.clone()).await?; + save_spo_identity(&storage, &raw_spo, cardano_id, &mut tx).await?; + save_spo_history(&storage, &raw_spo, epoch.epoch_no.into(), &mut tx).await?; + + let count_mk = blocks_produced.entry(spo_sk).or_insert(0); + *count_mk += 1; + } + } + + println!("\tcommittee size: {}", committee.len()); + if committee.len() > 0 { + let blocks_remainder = client.epoch_duration % committee.len() as u32; + let expected_blocks = get_expected_blocks(&client, &epoch, committee.len() as u32); + + for (index, spo) in committee.iter().enumerate() { + let spo_sk = remove_hex_prefix(spo.sidechain_pubkey.to_string()); + let produced = blocks_produced.get(&spo_sk); + + // only count if the validator has produced a block + if produced.is_some() { + let raw_spo = val_to_registration.get(&spo_sk).unwrap(); + let cardano_id = get_cardano_id(&raw_spo.mainchain_pub_key); + + let spo_performance = SPOEpochPerformance { + spo_sk, + epoch_no: epoch.epoch_no as u64, + expected_blocks: expected_blocks + + (if (index as u32) < blocks_remainder { + 1 + } else { + 0 + }), + produced_blocks: *produced.unwrap() as u64, + identity_label: cardano_id, + }; + + storage + .save_spo_performance(&spo_performance, &mut tx) + .await?; + } + } + } + + tx.commit().await?; + println!("processed epoch {}", epoch.epoch_no); + } +} + +async fn refresh_stake_snapshots( + client: &SPOClient, + storage: &impl Storage, + cfg: &StakeRefreshConfig, +) -> anyhow::Result<()> { + let limit = cfg.page_size as i64; + let mut total_updated = 0u32; + let main_epoch = client + .get_sidechain_status() + .await + .ok() + .map(|s| s.mainchain.epoch as i64); + + // Cursor-based paging: resume after last_pool_id, then wrap to start + let start_after = storage.get_stake_refresh_cursor().await?; + let after = start_after.clone(); + + // First page: after last_pool_id + let mut pool_ids = if let Some(ref last) = after { + storage.list_pool_ids_after(last, limit).await? + } else { + storage.list_pool_ids(limit, 0).await? + }; + + // If empty, wrap-around from beginning + if pool_ids.is_empty() { + pool_ids = storage.list_pool_ids(limit, 0).await?; + } + + if pool_ids.is_empty() { + return Ok(()); + } + + // Rate limiting + let sleep_per_req_ms = if cfg.max_rps == 0 { + 0 + } else { + (1000 / cfg.max_rps.max(1)) as u64 + }; + + let mut tx = storage.create_tx().await?; + for pid in pool_ids.iter() { + match client.get_pool_data(pid).await { + Ok(pd) => { + storage + .save_stake_snapshot( + pid, + pd.live_stake.as_deref(), + pd.active_stake.as_deref(), + pd.live_delegators, + pd.live_saturation, + pd.declared_pledge.as_deref(), + pd.live_pledge.as_deref(), + &mut tx, + ) + .await?; + storage + .insert_stake_history( + pid, + main_epoch, + pd.live_stake.as_deref(), + pd.active_stake.as_deref(), + pd.live_delegators, + pd.live_saturation, + pd.declared_pledge.as_deref(), + pd.live_pledge.as_deref(), + &mut tx, + ) + .await?; + total_updated += 1; + } + Err(err) => { + eprintln!("stake refresh for {pid} failed: {err:?}"); + } + } + if sleep_per_req_ms > 0 { + time::sleep(Duration::from_millis(sleep_per_req_ms)).await; + } + } + tx.commit().await?; + + // Persist cursor at the last processed id + let last_id = pool_ids.last().map(|s| s.as_str()); + storage.set_stake_refresh_cursor(last_id).await?; + + if total_updated > 0 { + println!( + "stake refresh: updated {} pools (cursor at {:?})", + total_updated, last_id + ); + } + Ok(()) +} + +async fn save_spo_history( + storage: &impl Storage, + raw_spo: &CandidateRegistration, + epoch: u64, + tx: &mut SqlxTransaction, +) -> anyhow::Result<()> { + // Normalize to hex without 0x + let spo_sk = remove_hex_prefix(raw_spo.sidechain_pub_key.to_string()); + + let spo = SPOHistory { + spo_sk: spo_sk.clone(), + epoch_no: epoch, + status: if raw_spo.is_valid { + SPOStatus::Valid + } else { + SPOStatus::Invalid + }, + }; + + storage.save_spo_history(&spo, tx).await?; + Ok(()) +} + +async fn save_spo_identity( + storage: &impl Storage, + raw_spo: &CandidateRegistration, + cardano_id: String, + tx: &mut SqlxTransaction, +) -> anyhow::Result<()> { + // Normalize all hex-like identifiers to avoid mixed representations + let spo_sk = remove_hex_prefix(raw_spo.sidechain_pub_key.to_string()); + let aura_pk = remove_hex_prefix(raw_spo.keys.aura.to_string()); + let main_pk = remove_hex_prefix(raw_spo.mainchain_pub_key.to_string()); + + let spo = SPO { + spo_sk: spo_sk.clone(), + sidechain_pubkey: spo_sk.clone(), + pool_id: cardano_id.clone().to_string(), + aura_pubkey: aura_pk.clone(), + mainchain_pubkey: main_pk, + }; + + storage.save_spo(&spo, tx).await?; + Ok(()) +} + +async fn save_pool_metadata( + client: &SPOClient, + storage: &impl Storage, + tx: &mut SqlxTransaction, + cardano_id: String, +) -> anyhow::Result<()> { + let meta = client.get_pool_metadata(cardano_id.clone()).await; + + let saved_meta = if meta.is_ok() { + meta? + } else { + PoolMetadata { + pool_id: cardano_id.to_string(), + hex_id: cardano_id.to_string(), + name: "".to_string(), + ticker: "".to_string(), + homepage_url: "".to_string(), + url: "".to_string(), + } + }; + + storage.save_pool_meta(&saved_meta, tx).await?; + Ok(()) +} + +fn get_expected_blocks(client: &SPOClient, epoch: &Epoch, committee_size: u32) -> u32 { + let mx_slots = cmp::min( + client.slots_per_epoch, + (epoch.ends_at - epoch.starts_at) as u32 / SLOT_DURATION, + ); + mx_slots / committee_size +} + +fn committee_to_membership( + client: &SPOClient, + committee: &Vec, +) -> Vec { + if committee.len() == 0 { + return vec![]; + } + + let slots_per_epoch = client.slots_per_epoch; + let num_validators = committee.len() as u32; + let leftover = slots_per_epoch % num_validators; + + committee + .into_iter() + .enumerate() + .map(|(index, c)| ValidatorMembership { + epoch_no: c.epoch_no, + position: c.position, + // Normalize to hex without 0x for consistency with identity/performance + spo_sk: remove_hex_prefix(c.sidechain_pubkey.clone()), + sidechain_pubkey: remove_hex_prefix(c.sidechain_pubkey.clone()), + expected_slots: slots_per_epoch / num_validators + + if leftover > index.try_into().unwrap() { + 1 + } else { + 0 + }, + }) + .collect() +} + +/// if option is None, it means that we are already at the latest epoch +async fn get_epoch_to_process( + client: &SPOClient, + storage: &impl Storage, +) -> anyhow::Result> { + let latest_processed = storage.get_latest_epoch().await?; + let current_epoch = client.get_current_epoch().await?; + let latest_epoch_num = if latest_processed.is_some() { + latest_processed.unwrap().epoch_no + } else { + client.get_first_epoch_num().await? + }; + + let time_offset: i64 = + (current_epoch.epoch_no as i64 - latest_epoch_num as i64) * client.epoch_duration as i64; + + if time_offset == 0 { + Ok(None) + } else { + Ok(Some(Epoch { + epoch_no: latest_epoch_num + 1, + starts_at: current_epoch.starts_at - time_offset, + ends_at: current_epoch.ends_at - time_offset, + })) + } +} + +fn get_cardano_id(mainchain_pk: &String) -> String { + let mainchain_pk = hex_to_bytes(&mainchain_pk); + let mut hasher = Blake2bVar::new(28).unwrap(); + hasher.update(&mainchain_pk); + + let mut buffer = [0u8; 28]; + hasher.finalize_variable(&mut buffer).unwrap(); + + let hex_hash = to_hex(&buffer); + + remove_hex_prefix(hex_hash.to_string()) +} \ No newline at end of file diff --git a/spo-indexer/src/config.rs b/spo-indexer/src/config.rs new file mode 100644 index 000000000..8c6f9e139 --- /dev/null +++ b/spo-indexer/src/config.rs @@ -0,0 +1,28 @@ +// This file is part of midnight-indexer. +// Copyright (C) 2025 Midnight Foundation +// SPDX-License-Identifier: Apache-2.0 +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{application, infra}; + +#[derive(Debug, Clone, serde::Deserialize)] +pub struct Config { + pub run_migrations: bool, + + #[serde(rename = "application")] + pub application_config: application::Config, + + #[serde(rename = "infra")] + pub infra_config: infra::Config, + + #[serde(rename = "telemetry")] + pub telemetry_config: indexer_common::telemetry::Config, +} diff --git a/spo-indexer/src/domain.rs b/spo-indexer/src/domain.rs new file mode 100644 index 000000000..90afd70a8 --- /dev/null +++ b/spo-indexer/src/domain.rs @@ -0,0 +1,26 @@ +// This file is part of midnight-indexer. +// Copyright (C) 2025 Midnight Foundation +// SPDX-License-Identifier: Apache-2.0 +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod storage; + +mod committee; +mod epoch; +mod pool; +mod rpc; +mod spo; + +pub use committee::*; +pub use epoch::*; +pub use pool::*; +pub use rpc::*; +pub use spo::*; diff --git a/spo-indexer/src/domain/committee.rs b/spo-indexer/src/domain/committee.rs new file mode 100644 index 000000000..8d70deeb2 --- /dev/null +++ b/spo-indexer/src/domain/committee.rs @@ -0,0 +1,15 @@ +#[derive(Debug, Clone)] +pub struct Validator { + pub epoch_no: u64, + pub position: u64, + pub sidechain_pubkey: String, +} + +#[derive(Debug, Clone)] +pub struct ValidatorMembership { + pub spo_sk: String, + pub sidechain_pubkey: String, + pub epoch_no: u64, + pub position: u64, + pub expected_slots: u32, +} diff --git a/spo-indexer/src/domain/epoch.rs b/spo-indexer/src/domain/epoch.rs new file mode 100644 index 000000000..491035440 --- /dev/null +++ b/spo-indexer/src/domain/epoch.rs @@ -0,0 +1,6 @@ +#[derive(Debug, Clone)] +pub struct Epoch { + pub epoch_no: u32, + pub starts_at: i64, + pub ends_at: i64, +} diff --git a/spo-indexer/src/domain/pool.rs b/spo-indexer/src/domain/pool.rs new file mode 100644 index 000000000..3623dec17 --- /dev/null +++ b/spo-indexer/src/domain/pool.rs @@ -0,0 +1,9 @@ +#[derive(Debug, Clone)] +pub struct PoolMetadata { + pub pool_id: String, + pub hex_id: String, + pub name: String, + pub ticker: String, + pub homepage_url: String, + pub url: String, +} diff --git a/spo-indexer/src/domain/rpc.rs b/spo-indexer/src/domain/rpc.rs new file mode 100644 index 000000000..1e99f9bca --- /dev/null +++ b/spo-indexer/src/domain/rpc.rs @@ -0,0 +1,258 @@ +use core::fmt; +use serde::{Deserialize, Serialize}; +use std::{ + collections::HashMap, + fmt::{Display, Formatter}, +}; + +#[derive(Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct SidechainStatusResponse { + pub mainchain: ChainInfo, + pub sidechain: ChainInfo, +} + +impl Display for SidechainStatusResponse { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + writeln!(f, "Sidechain Status:")?; + writeln!(f, " Mainchain:")?; + writeln!(f, "{}", self.mainchain)?; + writeln!(f, " Sidechain:")?; + writeln!(f, "{}", self.sidechain) + } +} + +#[derive(Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ChainInfo { + pub epoch: u32, + pub next_epoch_timestamp: i64, + pub slot: u32, +} + +impl Display for ChainInfo { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + writeln!(f, " Epoch: {}", self.epoch)?; + writeln!(f, " Next Epoch Timestamp: {}", self.next_epoch_timestamp)?; + writeln!(f, " Slot: {}", self.slot) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct SPORegistrationResponse { + pub d_parameter: DParameter, + pub permissioned_candidates: serde_json::Value, + pub candidate_registrations: HashMap>, +} + +impl Display for SPORegistrationResponse { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + writeln!(f, "SPO Registration Response:")?; + writeln!(f, " DParameter:")?; + writeln!(f, "{}", self.d_parameter)?; + writeln!( + f, + " Permissioned Candidates: {:?}", + self.permissioned_candidates + )?; + writeln!(f, " Candidate Registrations:")?; + for (key, registrations) in &self.candidate_registrations { + writeln!(f, " {}:", key)?; + for reg in registrations { + writeln!(f, "{}", reg)?; + } + } + Ok(()) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct DParameter { + pub num_permissioned_candidates: u32, + pub num_registered_candidates: u32, +} + +impl Display for DParameter { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + writeln!( + f, + " Num Permissioned Candidates: {}", + self.num_permissioned_candidates + )?; + writeln!( + f, + " Num Registered Candidates: {}", + self.num_registered_candidates + ) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct CandidateRegistration { + pub sidechain_pub_key: String, + pub sidechain_account_id: String, + pub mainchain_pub_key: String, + pub cross_chain_pub_key: String, + pub keys: CandidateKeys, + pub sidechain_signature: String, + pub mainchain_signature: String, + pub cross_chain_signature: String, + pub utxo: Utxo, + pub is_valid: bool, + pub invalid_reasons: Option, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct CandidateKeys { + pub gran: String, + pub aura: String, +} + +impl Display for CandidateRegistration { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + writeln!(f, " Sidechain Pub Key: {}", self.sidechain_pub_key)?; + writeln!( + f, + " Sidechain Account ID: {}", + self.sidechain_account_id + )?; + writeln!(f, " Mainchain Pub Key: {}", self.mainchain_pub_key)?; + writeln!(f, " Cross Chain Pub Key: {}", self.cross_chain_pub_key)?; + writeln!(f, " Aura Pub Key: {}", self.keys.aura)?; + writeln!(f, " Grandpa Pub Key: {}", self.keys.gran)?; + writeln!(f, " Sidechain Signature: {}", self.sidechain_signature)?; + writeln!(f, " Mainchain Signature: {}", self.mainchain_signature)?; + writeln!( + f, + " Cross Chain Signature: {}", + self.cross_chain_signature + )?; + writeln!(f, " UTXO:")?; + writeln!(f, "{}", self.utxo)?; + writeln!(f, " Is Valid: {}", self.is_valid)?; + + if let Some(reasons) = &self.invalid_reasons { + writeln!(f, " Invalid Reasons: {}", reasons)?; + } + + Ok(()) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct Utxo { + pub utxo_id: String, + pub epoch_number: u32, + pub block_number: u32, + pub slot_number: u64, + pub tx_index_within_block: u32, +} + +impl Display for Utxo { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + writeln!(f, " UTXO ID: {}", self.utxo_id)?; + writeln!(f, " Epoch Number: {}", self.epoch_number)?; + writeln!(f, " Block Number: {}", self.block_number)?; + writeln!(f, " Slot Number: {}", self.slot_number)?; + writeln!( + f, + " Tx Index Within Block: {}", + self.tx_index_within_block + ) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +#[serde(untagged)] +pub enum InvalidReasons { + StakeError { + #[serde(rename = "StakeError")] + stake_error: String, + }, +} + +impl Display for InvalidReasons { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + InvalidReasons::StakeError { stake_error } => { + write!(f, "Stake Error: {}", stake_error) + } + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct EpochCommitteeResponse { + pub sidechain_epoch: u64, + pub committee: Vec, +} + +impl Display for EpochCommitteeResponse { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + writeln!(f, "Epoch: {}", self.sidechain_epoch)?; + writeln!(f, "Committee Members:")?; + for (i, member) in self.committee.iter().enumerate() { + writeln!(f, " {}: {}", i + 1, member.sidechain_pub_key)?; + } + Ok(()) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct CommitteeMember { + pub sidechain_pub_key: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BlockResponse { + pub block: Block, + + pub justifications: Vec>>, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Block { + pub header: Header, + + pub extrinsics: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Header { + pub parent_hash: String, + pub number: String, + pub state_root: String, + pub extrinsics_root: String, + pub digest: Digest, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Digest { + pub logs: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum BodyItem { + Timestamp(TimestampExtrinsic), + + UnknownTransaction(String), + + Object(HashMap), +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TimestampExtrinsic { + #[serde(rename = "Timestamp")] + pub timestamp_ms: u64, +} \ No newline at end of file diff --git a/spo-indexer/src/domain/spo.rs b/spo-indexer/src/domain/spo.rs new file mode 100644 index 000000000..420398f77 --- /dev/null +++ b/spo-indexer/src/domain/spo.rs @@ -0,0 +1,43 @@ +use std::fmt; + +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone)] +pub struct SPO { + pub spo_sk: String, + pub pool_id: String, + pub mainchain_pubkey: String, + pub sidechain_pubkey: String, + pub aura_pubkey: String, +} + +#[derive(Debug, Clone)] +pub struct SPOEpochPerformance { + pub spo_sk: String, + pub epoch_no: u64, + pub expected_blocks: u32, + pub produced_blocks: u64, + pub identity_label: String, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub enum SPOStatus { + Valid, + Invalid, +} + +#[derive(Debug, Clone)] +pub struct SPOHistory { + pub spo_sk: String, + pub epoch_no: u64, + pub status: SPOStatus, +} + +impl fmt::Display for SPOStatus { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + SPOStatus::Valid => write!(f, "VALID"), + SPOStatus::Invalid => write!(f, "INVALID"), + } + } +} diff --git a/spo-indexer/src/domain/storage.rs b/spo-indexer/src/domain/storage.rs new file mode 100644 index 000000000..514ac883e --- /dev/null +++ b/spo-indexer/src/domain/storage.rs @@ -0,0 +1,104 @@ +// This file is part of midnight-indexer. +// Copyright (C) 2025 Midnight Foundation +// SPDX-License-Identifier: Apache-2.0 +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::domain::{ + Epoch, PoolMetadata, SPO, SPOEpochPerformance, SPOHistory, ValidatorMembership, +}; + +#[cfg(feature = "cloud")] +/// Sqlx transaction for Postgres. +pub type SqlxTransaction = sqlx::Transaction<'static, sqlx::Postgres>; + +#[cfg(feature = "standalone")] +/// Sqlx transaction for Sqlite. +pub type SqlxTransaction = sqlx::Transaction<'static, sqlx::Sqlite>; + +#[cfg(not(any(feature = "cloud", feature = "standalone")))] +/// Default to Postgres when no feature is explicitly enabled (workspace builds) +pub type SqlxTransaction = sqlx::Transaction<'static, sqlx::Postgres>; + +/// Storage abstraction. +#[trait_variant::make(Send)] +pub trait Storage +where + Self: Clone + Send + Sync + 'static, +{ + async fn create_tx(&self) -> Result; + + async fn get_latest_epoch(&self) -> Result, sqlx::Error>; + + async fn save_epoch(&self, epoch: &Epoch, tx: &mut SqlxTransaction) -> Result<(), sqlx::Error>; + + async fn save_spo(&self, spo: &SPO, tx: &mut SqlxTransaction) -> Result<(), sqlx::Error>; + + async fn save_membership( + &self, + memberships: &Vec, + tx: &mut SqlxTransaction, + ) -> Result<(), sqlx::Error>; + + async fn save_spo_performance( + &self, + metadata: &SPOEpochPerformance, + tx: &mut SqlxTransaction, + ) -> Result<(), sqlx::Error>; + + async fn save_pool_meta( + &self, + metadata: &PoolMetadata, + tx: &mut SqlxTransaction, + ) -> Result<(), sqlx::Error>; + + async fn save_spo_history( + &self, + history: &SPOHistory, + tx: &mut SqlxTransaction, + ) -> Result<(), sqlx::Error>; + + /// Return a page of pool_ids known to the system (for stake refreshers). + /// Implementations should order by most recently updated metadata first when possible. + async fn list_pool_ids(&self, limit: i64, offset: i64) -> Result, sqlx::Error>; + /// Return pool_ids after a given id, lexicographically, for cursor-based rotation + async fn list_pool_ids_after(&self, after: &str, limit: i64) -> Result, sqlx::Error>; + + /// Upsert latest stake snapshot for a pool. + async fn save_stake_snapshot( + &self, + pool_id: &str, + live_stake: Option<&str>, + active_stake: Option<&str>, + live_delegators: Option, + live_saturation: Option, + declared_pledge: Option<&str>, + live_pledge: Option<&str>, + tx: &mut SqlxTransaction, + ) -> Result<(), sqlx::Error>; + + /// Append a history row for stake + async fn insert_stake_history( + &self, + pool_id: &str, + mainchain_epoch: Option, + live_stake: Option<&str>, + active_stake: Option<&str>, + live_delegators: Option, + live_saturation: Option, + declared_pledge: Option<&str>, + live_pledge: Option<&str>, + tx: &mut SqlxTransaction, + ) -> Result<(), sqlx::Error>; + + /// Refresh cursor helpers + async fn get_stake_refresh_cursor(&self) -> Result, sqlx::Error>; + async fn set_stake_refresh_cursor(&self, pool_id: Option<&str>) -> Result<(), sqlx::Error>; +} diff --git a/spo-indexer/src/infra.rs b/spo-indexer/src/infra.rs new file mode 100644 index 000000000..cd01883d2 --- /dev/null +++ b/spo-indexer/src/infra.rs @@ -0,0 +1,28 @@ +// This file is part of midnight-indexer. +// Copyright (C) 2025 Midnight Foundation +// SPDX-License-Identifier: Apache-2.0 +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[cfg_attr(docsrs, doc(cfg(any(feature = "cloud", feature = "standalone"))))] +#[cfg(any(feature = "cloud", feature = "standalone"))] +pub mod storage; +pub mod subxt_node; + +#[cfg_attr(docsrs, doc(cfg(feature = "cloud")))] +#[cfg(feature = "cloud")] +#[derive(Debug, Clone, serde::Deserialize)] +pub struct Config { + #[serde(rename = "storage")] + pub storage_config: indexer_common::infra::pool::postgres::Config, + + #[serde(rename = "node")] + pub node_config: subxt_node::Config, +} diff --git a/spo-indexer/src/infra/config.rs b/spo-indexer/src/infra/config.rs new file mode 100644 index 000000000..810aa64c3 --- /dev/null +++ b/spo-indexer/src/infra/config.rs @@ -0,0 +1,22 @@ +// This file is part of midnight-indexer. +// Copyright (C) 2025 Midnight Foundation +// SPDX-License-Identifier: Apache-2.0 +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[cfg_attr(docsrs, doc(cfg(feature = "cloud")))] +#[derive(Debug, Clone, serde::Deserialize)] +pub struct Config { + #[serde(rename = "storage")] + pub storage_config: indexer_common::infra::pool::postgres::Config, + + #[serde(rename = "node")] + pub node_config: crate::infra::subtx_node::Config, +} diff --git a/spo-indexer/src/infra/storage.rs b/spo-indexer/src/infra/storage.rs new file mode 100644 index 000000000..bc92c053f --- /dev/null +++ b/spo-indexer/src/infra/storage.rs @@ -0,0 +1,415 @@ +// This file is part of midnight-indexer. +// Copyright (C) 2025 Midnight Foundation +// SPDX-License-Identifier: Apache-2.0 +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::domain::{ + self, Epoch, PoolMetadata, SPO, SPOEpochPerformance, SPOHistory, ValidatorMembership, +}; +use fastrace::trace; +use indoc::indoc; +use sqlx::types::chrono::{DateTime, Utc}; + +#[cfg(feature = "cloud")] +/// Sqlx transaction for Postgres. +type SqlxTransaction = sqlx::Transaction<'static, sqlx::Postgres>; + +#[cfg(feature = "standalone")] +/// Sqlx transaction for Sqlite. +type SqlxTransaction = sqlx::Transaction<'static, sqlx::Sqlite>; + +/// Unified storage implementation for PostgreSQL (cloud) and SQLite (standalone). Uses Cargo +/// features to select the appropriate database backend at build time. +#[derive(Debug, Clone)] +pub struct Storage { + #[cfg(feature = "cloud")] + pool: indexer_common::infra::pool::postgres::PostgresPool, + + #[cfg(feature = "standalone")] + pool: indexer_common::infra::pool::sqlite::SqlitePool, +} + +impl Storage { + #[cfg(feature = "cloud")] + pub fn new(pool: indexer_common::infra::pool::postgres::PostgresPool) -> Self { + Self { pool } + } + + #[cfg(feature = "standalone")] + pub fn new(pool: indexer_common::infra::pool::sqlite::SqlitePool) -> Self { + Self { pool } + } +} + +impl domain::storage::Storage for Storage { + #[trace] + async fn create_tx(&self) -> Result { + Ok(self.pool.begin().await?) + } + + async fn get_latest_epoch(&self) -> Result, sqlx::Error> { + let query = indoc! {" + SELECT + epoch_no, + starts_at, + ends_at + FROM epochs + ORDER BY epoch_no + DESC LIMIT 1 + "}; + + sqlx::query_as::<_, (i64, DateTime, DateTime)>(query) + .fetch_optional(&*self.pool) + .await? + .map(|(epoch_no, starts_at, ends_at)| { + Ok(Epoch { + epoch_no: epoch_no as u32, + // return millis to domain + starts_at: starts_at.timestamp_millis(), + ends_at: ends_at.timestamp_millis(), + }) + }) + .transpose() + } + + #[trace] + async fn save_epoch(&self, epoch: &Epoch, tx: &mut SqlxTransaction) -> Result<(), sqlx::Error> { + sqlx::query(indoc! { + "INSERT INTO epochs (epoch_no, starts_at, ends_at) + VALUES ($1, $2, $3)" + }) + .bind(epoch.epoch_no as i64) + // epoch.starts_at/ends_at are in millis; store as timestamptz + .bind( + DateTime::from_timestamp( + epoch.starts_at / 1000, + ((epoch.starts_at % 1000) * 1_000_000) as u32, + ) + .unwrap_or(DateTime::::default()), + ) + .bind( + DateTime::from_timestamp( + epoch.ends_at / 1000, + ((epoch.ends_at % 1000) * 1_000_000) as u32, + ) + .unwrap_or(DateTime::::default()), + ) + .execute(&mut **tx) + .await?; + + Ok(()) + } + + #[trace] + async fn save_spo(&self, spo: &SPO, tx: &mut SqlxTransaction) -> Result<(), sqlx::Error> { + sqlx::query(indoc! { + "INSERT INTO spo_identity ( + spo_sk, + sidechain_pubkey, + pool_id, + mainchain_pubkey, + aura_pubkey + ) + SELECT $1, $2, $3, $4, $5 + WHERE NOT EXISTS ( + SELECT 1 FROM spo_identity si + WHERE si.spo_sk = $1 + OR (si.mainchain_pubkey IS NOT DISTINCT FROM $4) + OR (si.aura_pubkey IS NOT DISTINCT FROM $5) + OR (si.sidechain_pubkey IS NOT DISTINCT FROM $2) + ) + ON CONFLICT DO NOTHING" + }) + .bind(&spo.spo_sk) + .bind(&spo.sidechain_pubkey) + .bind(&spo.pool_id) + .bind(&spo.mainchain_pubkey) + .bind(&spo.aura_pubkey) + .execute(&mut **tx) + .await?; + + Ok(()) + } + + #[trace] + async fn save_membership( + &self, + memberships: &Vec, + tx: &mut SqlxTransaction, + ) -> Result<(), sqlx::Error> { + for member in memberships.iter() { + sqlx::query(indoc! { + "INSERT INTO committee_membership ( + spo_sk, + sidechain_pubkey, + epoch_no, + + position, + expected_slots + ) + VALUES ($1, $2, $3, $4, $5) + ON CONFLICT (epoch_no, position) DO NOTHING" // Prevents re-insertion errors + }) + .bind(&member.spo_sk) + .bind(&member.sidechain_pubkey) + .bind(member.epoch_no as i64) + .bind(member.position as i32) + .bind(member.expected_slots as i32) + .execute(&mut **tx) + .await?; + } + + Ok(()) + } + + async fn save_spo_performance( + &self, + metadata: &SPOEpochPerformance, + tx: &mut SqlxTransaction, + ) -> Result<(), sqlx::Error> { + sqlx::query(indoc! { + "INSERT INTO spo_epoch_performance ( + spo_sk, + identity_label, + epoch_no, + expected_blocks, + produced_blocks + ) + SELECT $1, $2, $3, $4, $5 + WHERE EXISTS (SELECT 1 FROM spo_identity si WHERE si.spo_sk = $1) + ON CONFLICT (epoch_no, spo_sk) DO NOTHING" + }) + .bind(&metadata.spo_sk) + .bind(&metadata.identity_label) + .bind(metadata.epoch_no as i64) + .bind(metadata.expected_blocks as i32) + .bind(metadata.produced_blocks as i32) + .execute(&mut **tx) + .await?; + + Ok(()) + } + + async fn save_pool_meta( + &self, + metadata: &PoolMetadata, + tx: &mut SqlxTransaction, + ) -> Result<(), sqlx::Error> { + sqlx::query(indoc! { + "INSERT INTO pool_metadata_cache ( + pool_id, hex_id, name, ticker, homepage_url, url + ) + VALUES ($1, $2, $3, $4, $5, $6) + ON CONFLICT (pool_id) DO UPDATE SET + name = CASE WHEN EXCLUDED.name IS NOT NULL AND EXCLUDED.name <> '' THEN EXCLUDED.name ELSE pool_metadata_cache.name END, + ticker = CASE WHEN EXCLUDED.ticker IS NOT NULL AND EXCLUDED.ticker <> '' THEN EXCLUDED.ticker ELSE pool_metadata_cache.ticker END, + homepage_url = CASE WHEN EXCLUDED.homepage_url IS NOT NULL AND EXCLUDED.homepage_url <> '' THEN EXCLUDED.homepage_url ELSE pool_metadata_cache.homepage_url END, + url = CASE WHEN EXCLUDED.url IS NOT NULL AND EXCLUDED.url <> '' THEN EXCLUDED.url ELSE pool_metadata_cache.url END" + }) + .bind(&metadata.pool_id) + .bind(&metadata.hex_id) + .bind(&metadata.name) + .bind(&metadata.ticker) + .bind(&metadata.homepage_url) + .bind(&metadata.url) + .execute(&mut **tx) + .await?; + + Ok(()) + } + + async fn save_spo_history( + &self, + history: &SPOHistory, + tx: &mut SqlxTransaction, + ) -> Result<(), sqlx::Error> { + let epoch = history.epoch_no as i64; + + sqlx::query(indoc! { + "INSERT INTO spo_history ( + spo_sk, + epoch_no, + status, + valid_from, + valid_to + ) + SELECT $1, $2, $3, $4, $5 + WHERE EXISTS (SELECT 1 FROM spo_identity si WHERE si.spo_sk = $1) + ON CONFLICT (spo_sk, epoch_no) DO UPDATE SET + valid_to = EXCLUDED.epoch_no, + status = EXCLUDED.status + " + }) + .bind(history.spo_sk.clone()) + .bind(epoch) + .bind(history.status.to_string()) + .bind(epoch) + .bind(epoch) + .execute(&mut **tx) + .await?; + + Ok(()) + } + + async fn list_pool_ids(&self, limit: i64, offset: i64) -> Result, sqlx::Error> { + let query = indoc! {" + SELECT pool_id + FROM pool_metadata_cache + ORDER BY updated_at DESC, pool_id ASC + LIMIT $1 OFFSET $2 + "}; + + let rows = sqlx::query_as::<_, (String,)>(query) + .bind(limit) + .bind(offset) + .fetch_all(&*self.pool) + .await?; + + Ok(rows.into_iter().map(|(pid,)| pid).collect()) + } + + async fn list_pool_ids_after(&self, after: &str, limit: i64) -> Result, sqlx::Error> { + let query = indoc! {" + SELECT pool_id + FROM pool_metadata_cache + WHERE pool_id > $1 + ORDER BY pool_id ASC + LIMIT $2 + "}; + + let rows = sqlx::query_as::<_, (String,)>(query) + .bind(after) + .bind(limit) + .fetch_all(&*self.pool) + .await?; + Ok(rows.into_iter().map(|(pid,)| pid).collect()) + } + + async fn save_stake_snapshot( + &self, + pool_id: &str, + live_stake: Option<&str>, + active_stake: Option<&str>, + live_delegators: Option, + live_saturation: Option, + declared_pledge: Option<&str>, + live_pledge: Option<&str>, + tx: &mut SqlxTransaction, + ) -> Result<(), sqlx::Error> { + // Call the inherent implementation to avoid recursive call to the trait method + Storage::save_stake_snapshot( + self, + pool_id, + live_stake, + active_stake, + live_delegators, + live_saturation, + declared_pledge, + live_pledge, + tx, + ) + .await + } + + async fn insert_stake_history( + &self, + pool_id: &str, + mainchain_epoch: Option, + live_stake: Option<&str>, + active_stake: Option<&str>, + live_delegators: Option, + live_saturation: Option, + declared_pledge: Option<&str>, + live_pledge: Option<&str>, + tx: &mut SqlxTransaction, + ) -> Result<(), sqlx::Error> { + sqlx::query(indoc! { + "INSERT INTO spo_stake_history ( + pool_id, recorded_at, mainchain_epoch, + live_stake, active_stake, live_delegators, live_saturation, declared_pledge, live_pledge + ) VALUES ($1, NOW(), $2, CAST($3 AS NUMERIC), CAST($4 AS NUMERIC), $5, $6, CAST($7 AS NUMERIC), CAST($8 AS NUMERIC))" + }) + .bind(pool_id) + .bind(mainchain_epoch) + .bind(live_stake) + .bind(active_stake) + .bind(live_delegators) + .bind(live_saturation) + .bind(declared_pledge) + .bind(live_pledge) + .execute(&mut **tx) + .await?; + Ok(()) + } + + async fn get_stake_refresh_cursor(&self) -> Result, sqlx::Error> { + let row = sqlx::query_as::<_, (Option,)>( + "SELECT last_pool_id FROM spo_stake_refresh_state WHERE id = TRUE", + ) + .fetch_optional(&*self.pool) + .await?; + Ok(row.and_then(|(p,)| p)) + } + + async fn set_stake_refresh_cursor(&self, pool_id: Option<&str>) -> Result<(), sqlx::Error> { + sqlx::query(indoc! { + "INSERT INTO spo_stake_refresh_state (id, last_pool_id, updated_at) + VALUES (TRUE, $1, NOW()) + ON CONFLICT (id) DO UPDATE SET last_pool_id = EXCLUDED.last_pool_id, updated_at = NOW()" + }) + .bind(pool_id) + .execute(&*self.pool) + .await?; + Ok(()) + } + +} + +impl Storage { + /// Optional upsert for stake snapshot (DB-first; can be wired to external data later). + pub async fn save_stake_snapshot( + &self, + pool_id: &str, + live_stake: Option<&str>, + active_stake: Option<&str>, + live_delegators: Option, + live_saturation: Option, + declared_pledge: Option<&str>, + live_pledge: Option<&str>, + tx: &mut SqlxTransaction, + ) -> Result<(), sqlx::Error> { + sqlx::query(indoc! { + "INSERT INTO spo_stake_snapshot ( + pool_id, live_stake, active_stake, live_delegators, live_saturation, declared_pledge, live_pledge + ) VALUES ($1, CAST($2 AS NUMERIC), CAST($3 AS NUMERIC), $4, $5, CAST($6 AS NUMERIC), CAST($7 AS NUMERIC)) + ON CONFLICT (pool_id) DO UPDATE SET + live_stake = EXCLUDED.live_stake, + active_stake = EXCLUDED.active_stake, + live_delegators = EXCLUDED.live_delegators, + live_saturation = EXCLUDED.live_saturation, + declared_pledge = EXCLUDED.declared_pledge, + live_pledge = EXCLUDED.live_pledge, + updated_at = NOW()" + }) + .bind(pool_id) + .bind(live_stake) + .bind(active_stake) + .bind(live_delegators) + .bind(live_saturation) + .bind(declared_pledge) + .bind(live_pledge) + .execute(&mut **tx) + .await?; + + Ok(()) + } + +} diff --git a/spo-indexer/src/infra/subxt_node.rs b/spo-indexer/src/infra/subxt_node.rs new file mode 100644 index 000000000..4cc858dda --- /dev/null +++ b/spo-indexer/src/infra/subxt_node.rs @@ -0,0 +1,393 @@ +// This file is part of midnight-indexer. +// Copyright (C) 2025 Midnight Foundation +// SPDX-License-Identifier: Apache-2.0 +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + domain::{ + CandidateKeys, CandidateRegistration, Epoch, EpochCommitteeResponse, PoolMetadata, + SPORegistrationResponse, SidechainStatusResponse, Validator, + }, + utils::remove_hex_prefix, +}; +use blockfrost::{BlockfrostAPI, BlockfrostError}; +use indexer_common::error::BoxError; +use log::error; +use polkadot::sidechain::storage::types::epoch_number::EpochNumber; +use reqwest::Client as HttpClient; +use secrecy::{ExposeSecret, SecretString}; +use serde_json::value::RawValue; +use std::collections::HashMap; +use subxt::{ + OnlineClient, PolkadotConfig, + backend::rpc::reconnecting_rpc_client::{ExponentialBackoff, RpcClient}, + utils::H256, +}; +use thiserror::Error; +use tokio::time; + +const SLOT_PER_EPOCH_KEY: &str = "3eaeb1cee77dc09baac326e5a1d29726f38178a5f54bee65a8446a55b585f261"; +const MIN_COMMITTEE_SIZE: usize = 300; +pub const SLOT_DURATION: u32 = 6000; + +#[subxt::subxt(runtime_metadata_path = "./src/meta/polkadot_metadata.scale")] +pub mod polkadot {} + +/// Config for node connection. +#[derive(Debug, Clone, serde::Deserialize)] +pub struct Config { + pub url: String, + + pub blockfrost_id: SecretString, + + #[serde(with = "humantime_serde")] + pub reconnect_max_delay: std::time::Duration, + + pub reconnect_max_attempts: usize, +} + +/// A [Node] implementation based on subxt. +#[derive(Clone)] +pub struct SPOClient { + pub epoch_duration: u32, + pub slots_per_epoch: u32, + + rpc_client: RpcClient, + blockfrost: BlockfrostAPI, + http: HttpClient, + config: Config, + api: OnlineClient, +} + +// we will try to eliminate the 0x from any hex string out of this function +impl SPOClient { + /// Create a new [SPOClient] with the given [Config]. + pub async fn new(config: Config) -> Result { + let retry_policy = ExponentialBackoff::from_millis(10) + .max_delay(config.reconnect_max_delay) + .take(config.reconnect_max_attempts); + let api = OnlineClient::::from_url(&config.url) + .await + .map_err(|error| SPOClientError::Subtx(error.into()))?; + let rpc_client = RpcClient::builder() + .retry_policy(retry_policy) + .build(&config.url) + .await + .map_err(|error| SPOClientError::Subtx(error.into()))?; + let blockfrost = + BlockfrostAPI::new(&config.blockfrost_id.expose_secret(), Default::default()); + let http = HttpClient::builder() + .user_agent("midnight-spo-indexer/1.0") + .build() + .map_err(|e| SPOClientError::UnexpectedResponse(e.to_string()))?; + let (epoch_duration, slots_per_epoch) = get_epoch_duration(&api).await?; + + Ok(Self { + rpc_client, + blockfrost, + http, + epoch_duration, + slots_per_epoch, + config, + api: api, + }) + } + + pub async fn get_sidechain_status(&self) -> Result { + let raw_response = self + .rpc_client + .request("sidechain_getStatus".to_string(), None) + .await + .map_err(|e| { + SPOClientError::RpcCall("sidechain_getStatus".to_string(), e.to_string()) + })?; + + let response: SidechainStatusResponse = serde_json::from_str(raw_response.get()) + .map_err(|error| SPOClientError::UnexpectedResponse(error.to_string()))?; + + return Ok(response); + } + + pub async fn get_block_timestamp(&self, block_number: u32) -> Result { + let params_blockhash = RawValue::from_string(format!("[{}]", block_number)).unwrap(); + let blockhash_res = self + .rpc_client + .request("chain_getBlockHash".to_string(), Some(params_blockhash)) + .await + .map_err(|e| { + SPOClientError::RpcCall("chain_getBlockHash".to_string(), e.to_string()) + })?; + + let str_blockhash = remove_hex_prefix(blockhash_res.get().to_string().replace("\"", "")); + let raw_blockhash = H256::from_slice(hex::decode(str_blockhash).unwrap().as_slice()); + let storage_query = polkadot::storage().timestamp().now(); + + let result = self + .api + .storage() + .at(raw_blockhash) + .fetch(&storage_query) + .await + .unwrap(); + let timestamp = result.unwrap(); + + Ok(timestamp) + } + + pub async fn get_first_epoch_num(&self) -> Result { + let current_epoch = self.get_current_epoch().await?; + let block_timestamp = self.get_block_timestamp(1).await?; + let epoch_duration = self.epoch_duration; + + let num_epochs: u64 = + (current_epoch.ends_at as u64 - block_timestamp as u64) / (epoch_duration as u64); + + Ok(current_epoch.epoch_no - num_epochs as u32) + } + + pub async fn get_current_epoch(&self) -> Result { + let sidechain_status = self.get_sidechain_status().await?; + let epoch = Epoch { + epoch_no: sidechain_status.sidechain.epoch, + starts_at: sidechain_status.sidechain.next_epoch_timestamp - self.epoch_duration as i64, + ends_at: sidechain_status.sidechain.next_epoch_timestamp, + }; + + return Ok(epoch); + } + + pub async fn get_spo_registrations( + &self, + epoch_number: u32, + ) -> Result { + let rpc_params = RawValue::from_string(format!("[{}]", epoch_number)).unwrap(); + + let raw_response = self + .rpc_client + .request( + "sidechain_getAriadneParameters".to_string(), + Some(rpc_params), + ) + .await + .map_err(|e| { + SPOClientError::RpcCall("sidechain_getAriadneParameters".to_string(), e.to_string()) + })?; + + let mut reg_response: SPORegistrationResponse = serde_json::from_str(raw_response.get()) + .map_err(|error| SPOClientError::UnexpectedResponse(error.to_string()))?; + let mut response: HashMap> = HashMap::new(); + + for (mut key, registrations) in reg_response.clone().candidate_registrations { + key = remove_hex_prefix(key); + + let cleaned_registrations: Vec = registrations + .into_iter() + .map(|reg| CandidateRegistration { + sidechain_pub_key: remove_hex_prefix(reg.sidechain_pub_key), + sidechain_account_id: reg.sidechain_account_id, + mainchain_pub_key: remove_hex_prefix(reg.mainchain_pub_key), + cross_chain_pub_key: remove_hex_prefix(reg.cross_chain_pub_key), + keys: CandidateKeys { + aura: remove_hex_prefix(reg.keys.aura), + gran: remove_hex_prefix(reg.keys.gran), + }, + sidechain_signature: remove_hex_prefix(reg.sidechain_signature), + mainchain_signature: remove_hex_prefix(reg.mainchain_signature), + cross_chain_signature: remove_hex_prefix(reg.cross_chain_signature), + + utxo: reg.utxo, + is_valid: reg.is_valid, + invalid_reasons: reg.invalid_reasons, + }) + .collect(); + + response.insert(key, cleaned_registrations); + } + + reg_response.candidate_registrations = response; + + return Ok(reg_response); + } + + pub async fn get_committee(&self, epoch_number: u32) -> Result, SPOClientError> { + let rpc_params = RawValue::from_string(format!("[{}]", epoch_number)).map_err(|e| { + SPOClientError::UnexpectedResponse(format!("Failed to create RPC params: {}", e)) + })?; + + loop { + let raw_response = self + .rpc_client + .request( + "sidechain_getEpochCommittee".to_string(), + Some(rpc_params.clone()), + ) + .await + .map_err(|e| { + SPOClientError::RpcCall( + "sidechain_getEpochCommittee".to_string(), + e.to_string(), + ) + }); + + if raw_response.is_err() { + return Ok(vec![]); + } + + let response: EpochCommitteeResponse = serde_json::from_str(raw_response?.get()) + .map_err(|error| SPOClientError::UnexpectedResponse(error.to_string()))?; + + let committee_size = response.committee.len(); + if committee_size >= MIN_COMMITTEE_SIZE { + let mut committee = vec![]; + for (index, pk) in response.committee.iter().enumerate() { + committee.push(Validator { + epoch_no: response.sidechain_epoch, + position: index as u64, + sidechain_pubkey: remove_hex_prefix(pk.sidechain_pub_key.clone()), + }); + } + + return Ok(committee); + } + + time::sleep(time::Duration::from_secs( + self.config.reconnect_max_delay.as_secs(), + )) + .await; + } + } + + pub async fn get_pool_metadata(&self, pool_id: String) -> Result { + let raw_meta = self + .blockfrost + .pools_metadata(&pool_id) + .await + .map_err(|error| SPOClientError::Blockfrost(error))?; + let meta = PoolMetadata { + pool_id: pool_id, + hex_id: remove_hex_prefix(raw_meta.hex), + name: raw_meta.name.unwrap_or("".to_string()), + ticker: raw_meta.ticker.unwrap_or("".to_string()), + homepage_url: raw_meta.homepage.unwrap_or("".to_string()), + url: raw_meta.url.unwrap_or("".to_string()), + }; + + Ok(meta) + } + + /// Minimal pool stake data from Blockfrost /pools/{pool_id} + pub async fn get_pool_data(&self, pool_id: &str) -> Result { + let base = self.blockfrost_base_url(); + let url = format!("{}/pools/{}", base, pool_id); + let resp = self + .http + .get(&url) + .header("project_id", self.config.blockfrost_id.expose_secret()) + .send() + .await + .map_err(|e| SPOClientError::UnexpectedResponse(e.to_string()))?; + let status = resp.status(); + if !status.is_success() { + let txt = resp.text().await.unwrap_or_default(); + return Err(SPOClientError::UnexpectedResponse(format!( + "Blockfrost GET /pools failed: {} {}", + status, txt + ))); + } + let v: serde_json::Value = resp + .json() + .await + .map_err(|e| SPOClientError::UnexpectedResponse(e.to_string()))?; + Ok(PoolStakeData::from_json(&v)) + } + + fn blockfrost_base_url(&self) -> &'static str { + let id = self.config.blockfrost_id.expose_secret(); + if id.starts_with("mainnet") { + "https://cardano-mainnet.blockfrost.io/api/v0" + } else if id.starts_with("preprod") { + "https://cardano-preprod.blockfrost.io/api/v0" + } else if id.starts_with("preview") { + "https://cardano-preview.blockfrost.io/api/v0" + } else if id.starts_with("testnet") { + "https://cardano-testnet.blockfrost.io/api/v0" + } else { + // default to preview + "https://cardano-preview.blockfrost.io/api/v0" + } + } +} + +#[derive(Debug, Clone)] +pub struct PoolStakeData { + pub live_stake: Option, + pub active_stake: Option, + pub live_delegators: Option, + pub live_saturation: Option, + pub declared_pledge: Option, + pub live_pledge: Option, +} + +impl PoolStakeData { + fn from_json(v: &serde_json::Value) -> Self { + Self { + live_stake: v + .get("live_stake") + .and_then(|x| x.as_str().map(|s| s.to_string())), + active_stake: v + .get("active_stake") + .and_then(|x| x.as_str().map(|s| s.to_string())), + live_delegators: v.get("live_delegators").and_then(|x| x.as_i64()), + live_saturation: v.get("live_saturation").and_then(|x| x.as_f64()), + declared_pledge: v + .get("declared_pledge") + .and_then(|x| x.as_str().map(|s| s.to_string())), + live_pledge: v + .get("live_pledge") + .and_then(|x| x.as_str().map(|s| s.to_string())), + } + } +} + +async fn get_epoch_duration( + api: &OnlineClient, +) -> Result<(u32, u32), SPOClientError> { + let slot: Vec = hex::decode(SLOT_PER_EPOCH_KEY).unwrap(); + let storage_cli = api + .storage() + .at_latest() + .await + .map_err(|error| SPOClientError::Subtx(error.into()))?; + + let res = storage_cli + .fetch_raw(slot) + .await + .map_err(|_| SPOClientError::UnexpectedResponse("".to_string()))?; + let raw_response: [u8; 4] = res.unwrap().try_into().unwrap(); + let slots_per_epoch = u32::from_le_bytes(raw_response); + + Ok((SLOT_DURATION * slots_per_epoch, slots_per_epoch)) +} + +#[derive(Debug, Error)] +pub enum SPOClientError { + #[error("cannot create reconnecting subxt RPC client")] + Subtx(#[source] BoxError), + + #[error("cannot make rpc call {0}. Error: {1}")] + RpcCall(String, String), + + #[error("api call error")] + Blockfrost(#[from] BlockfrostError), + + #[error("unexpected error {0}")] + UnexpectedResponse(String), +} \ No newline at end of file diff --git a/spo-indexer/src/lib.rs b/spo-indexer/src/lib.rs new file mode 100644 index 000000000..8a85c0a49 --- /dev/null +++ b/spo-indexer/src/lib.rs @@ -0,0 +1,21 @@ +// This file is part of midnight-indexer. +// Copyright (C) 2025 Midnight Foundation +// SPDX-License-Identifier: Apache-2.0 +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(docsrs, feature(doc_cfg))] + +pub mod application; +#[cfg(feature = "cloud")] +pub mod config; +pub mod domain; +pub mod infra; +pub mod utils; diff --git a/spo-indexer/src/main.rs b/spo-indexer/src/main.rs new file mode 100644 index 000000000..e558fca6b --- /dev/null +++ b/spo-indexer/src/main.rs @@ -0,0 +1,88 @@ +// This file is part of midnight-indexer. +// Copyright (C) 2025 Midnight Foundation +// SPDX-License-Identifier: Apache-2.0 +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[cfg(feature = "cloud")] +#[tokio::main] +async fn main() { + use indexer_common::telemetry; + use log::error; + use std::panic; + + // Initialize logging. + telemetry::init_logging(); + + // Replace the default panic hook with one that uses structured logging at ERROR level. + panic::set_hook(Box::new(|panic| error!(panic:%; "process panicked"))); + + // Run and log any error. + if let Err(error) = run().await { + let backtrace = error.backtrace(); + let error = format!("{error:#}"); + error!(error, backtrace:%; "process exited with ERROR") + } +} + +#[cfg(feature = "cloud")] +async fn run() -> anyhow::Result<()> { + use anyhow::Context; + use indexer_common::{ + config::ConfigExt, + infra::{migrations, pool}, + telemetry, + }; + use log::info; + use spo_indexer::{ + application, + config::Config, + infra::{self, subxt_node::SPOClient}, + }; + + // Load configuration. + let config = Config::load().context("load configuration")?; + info!(config:?; "starting"); + let Config { + run_migrations, + application_config, + infra_config, + telemetry_config: + telemetry::Config { + tracing_config, + metrics_config, + }, + } = config.clone(); + + // Initialize tracing and metrics. + telemetry::init_tracing(tracing_config); + telemetry::init_metrics(metrics_config); + + let node = SPOClient::new(infra_config.node_config) + .await + .context("create SPOClient")?; + + let pool = pool::postgres::PostgresPool::new(infra_config.storage_config) + .await + .context("create DB pool for Postgres")?; + if run_migrations { + migrations::postgres::run(&pool) + .await + .context("run Postgres migrations")?; + } + let storage = infra::storage::Storage::new(pool); + + application::run(application_config, node, storage).await +} + +#[cfg(not(feature = "cloud"))] +fn main() { + unimplemented!() +} diff --git a/spo-indexer/src/meta/polkadot_metadata.scale b/spo-indexer/src/meta/polkadot_metadata.scale new file mode 100644 index 000000000..dc10f3e0c Binary files /dev/null and b/spo-indexer/src/meta/polkadot_metadata.scale differ diff --git a/spo-indexer/src/utils.rs b/spo-indexer/src/utils.rs new file mode 100644 index 000000000..e6890be72 --- /dev/null +++ b/spo-indexer/src/utils.rs @@ -0,0 +1,12 @@ +pub fn remove_hex_prefix(s: String) -> String { + if s.starts_with("0x") { + s[2..].to_string() + } else { + s.to_string() + } +} + +pub fn hex_to_bytes(s: &str) -> Vec { + let hex_str = remove_hex_prefix(s.to_string()); + hex::decode(hex_str).unwrap() +}