diff --git a/Cargo.lock b/Cargo.lock index c23352acee..b44388d652 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2548,6 +2548,8 @@ dependencies = [ "cow-contract-izeroex", "cow-contract-liquoricesettlement", "cow-contract-mockerc4626wrapper", + "cow-contract-mockuniswapv3factory", + "cow-contract-mockuniswapv3pool", "cow-contract-nonstandarderc20balances", "cow-contract-pancakerouter", "cow-contract-permit2", @@ -3307,6 +3309,28 @@ dependencies = [ "anyhow", ] +[[package]] +name = "cow-contract-mockuniswapv3factory" +version = "0.1.0" +dependencies = [ + "alloy-contract", + "alloy-primitives", + "alloy-provider", + "alloy-sol-types", + "anyhow", +] + +[[package]] +name = "cow-contract-mockuniswapv3pool" +version = "0.1.0" +dependencies = [ + "alloy-contract", + "alloy-primitives", + "alloy-provider", + "alloy-sol-types", + "anyhow", +] + [[package]] name = "cow-contract-nonstandarderc20balances" version = "0.1.0" @@ -4015,6 +4039,7 @@ dependencies = [ "number", "observe", "orderbook", + "pool-indexer", "price-estimation", "refunder", "reqwest 0.13.2", @@ -5397,6 +5422,7 @@ dependencies = [ "serde", "serde_json", "serde_with", + "shared", "strum", "testlib", "thiserror 1.0.69", @@ -6273,6 +6299,42 @@ version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" +[[package]] +name = "pool-indexer" +version = "0.1.0" +dependencies = [ + "alloy", + "alloy-primitives", + "anyhow", + "async-trait", + "axum 0.8.8", + "bigdecimal", + "clap", + "configs", + "contracts", + "ethrpc", + "futures", + "mimalloc", + "num", + "number", + "observe", + "prometheus", + "prometheus-metric-storage", + "reqwest 0.13.2", + "scopeguard", + "serde", + "serde_json", + "shared", + "sqlx", + "tikv-jemallocator", + "tokio", + "toml", + "tower 0.5.3", + "tower-http", + "tracing", + "url", +] + [[package]] name = "portable-atomic" version = "1.13.0" @@ -7682,7 +7744,6 @@ dependencies = [ "humantime", "indexmap 2.13.0", "itertools 0.14.0", - "liquidity-sources", "maplit", "mockall", "model", diff --git a/Cargo.toml b/Cargo.toml index 7ad6cdcfab..f8c1a8f357 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -90,6 +90,7 @@ order-validation = { path = "crates/order-validation" } orderbook = { path = "crates/orderbook" } paste = "1.0" pin-project-lite = "0.2.14" +pool-indexer = { path = "crates/pool-indexer" } prettyplease = "0.2.37" price-estimation = { path = "crates/price-estimation" } proc-macro2 = "1.0.103" diff --git a/Dockerfile b/Dockerfile index 0205d3eda8..b2104f3e31 100644 --- a/Dockerfile +++ b/Dockerfile @@ -19,13 +19,14 @@ RUN rustup install stable && rustup default stable COPY . . RUN --mount=type=cache,target=/usr/local/cargo/registry --mount=type=cache,target=/src/target \ CARGO_PROFILE_RELEASE_DEBUG=1 RUSTFLAGS="${RUSTFLAGS}" cargo build --release \ - -p autopilot -p driver -p orderbook -p refunder -p solvers \ + -p autopilot -p driver -p orderbook -p refunder -p solvers -p pool-indexer \ ${CARGO_BUILD_FEATURES} && \ cp target/release/autopilot / && \ cp target/release/driver / && \ cp target/release/orderbook / && \ cp target/release/refunder / && \ - cp target/release/solvers / + cp target/release/solvers / && \ + cp target/release/pool-indexer / # Create an intermediate image to extract the binaries FROM docker.io/debian:bookworm-slim AS intermediate @@ -53,6 +54,10 @@ FROM intermediate AS solvers COPY --from=cargo-build /solvers /usr/local/bin/solvers ENTRYPOINT [ "solvers" ] +FROM intermediate AS pool-indexer +COPY --from=cargo-build /pool-indexer /usr/local/bin/pool-indexer +ENTRYPOINT [ "pool-indexer" ] + # Extract Binary FROM intermediate @@ -62,5 +67,6 @@ COPY --from=cargo-build /driver /usr/local/bin/driver COPY --from=cargo-build /orderbook /usr/local/bin/orderbook COPY --from=cargo-build /refunder /usr/local/bin/refunder COPY --from=cargo-build /solvers /usr/local/bin/solvers +COPY --from=cargo-build /pool-indexer /usr/local/bin/pool-indexer ENTRYPOINT ["/usr/bin/tini", "-s", "--"] diff --git a/contracts/artifacts/MockUniswapV3Factory.json b/contracts/artifacts/MockUniswapV3Factory.json new file mode 100644 index 0000000000..806da46e6a --- /dev/null +++ b/contracts/artifacts/MockUniswapV3Factory.json @@ -0,0 +1,78 @@ +{ + "abi": [ + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "token0", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "token1", + "type": "address" + }, + { + "indexed": true, + "internalType": "uint24", + "name": "fee", + "type": "uint24" + }, + { + "indexed": false, + "internalType": "int24", + "name": "tickSpacing", + "type": "int24" + }, + { + "indexed": false, + "internalType": "address", + "name": "pool", + "type": "address" + } + ], + "name": "PoolCreated", + "type": "event" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "tokenA", + "type": "address" + }, + { + "internalType": "address", + "name": "tokenB", + "type": "address" + }, + { + "internalType": "uint24", + "name": "_fee", + "type": "uint24" + } + ], + "name": "createPool", + "outputs": [ + { + "internalType": "address", + "name": "pool", + "type": "address" + } + ], + "stateMutability": "nonpayable", + "type": "function" + } + ], + "bytecode": "0x6080604052348015600e575f5ffd5b506106dd8061001c5f395ff3fe608060405234801561000f575f5ffd5b5060043610610029575f3560e01c8063a16712951461002d575b5f5ffd5b61004061003b3660046101ab565b610069565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390f35b5f5f5f8473ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff16106100a65784866100a9565b85855b915091505f8282866040516100bd90610176565b73ffffffffffffffffffffffffffffffffffffffff938416815292909116602083015262ffffff166040820152606001604051809103905ff080158015610106573d5f5f3e3d5ffd5b5060408051600a815273ffffffffffffffffffffffffffffffffffffffff808416602083015292965086935062ffffff88169280861692908716917f783cca1c0412dd0d695e784568c96da2e9c22ff989357a2e8b1d9b2b4e6b7118910160405180910390a45050509392505050565b6104da806101f783390190565b803573ffffffffffffffffffffffffffffffffffffffff811681146101a6575f5ffd5b919050565b5f5f5f606084860312156101bd575f5ffd5b6101c684610183565b92506101d460208501610183565b9150604084013562ffffff811681146101eb575f5ffd5b80915050925092509256fe60e060405234801561000f575f5ffd5b506040516104da3803806104da83398101604081905261002e91610069565b6001600160a01b03928316608052911660a05262ffffff1660c0526100b4565b80516001600160a01b0381168114610064575f5ffd5b919050565b5f5f5f6060848603121561007b575f5ffd5b6100848461004e565b92506100926020850161004e565b9150604084015162ffffff811681146100a9575f5ffd5b809150509250925092565b60805160a05160c0516103fd6100dd5f395f61012c01525f61010501525f607801526103fd5ff3fe608060405234801561000f575f5ffd5b506004361061006f575f3560e01c8063ddca3f431161004d578063ddca3f4314610127578063efe27fa314610162578063f637731d14610177575f5ffd5b80630dfe1681146100735780631a686502146100c4578063d21220a714610100575b5f5ffd5b61009a7f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b5f546100df906fffffffffffffffffffffffffffffffff1681565b6040516fffffffffffffffffffffffffffffffff90911681526020016100bb565b61009a7f000000000000000000000000000000000000000000000000000000000000000081565b61014e7f000000000000000000000000000000000000000000000000000000000000000081565b60405162ffffff90911681526020016100bb565b610175610170366004610312565b61018a565b005b61017561018536600461037b565b610287565b5f805482919081906101af9084906fffffffffffffffffffffffffffffffff1661039d565b92506101000a8154816fffffffffffffffffffffffffffffffff02191690836fffffffffffffffffffffffffffffffff1602179055508160020b8360020b8573ffffffffffffffffffffffffffffffffffffffff167f7a53080ba414158be7ec69b987b5fb7d07dee101fe85488f0853ae16239d0bde33855f5f604051610279949392919073ffffffffffffffffffffffffffffffffffffffff9490941684526fffffffffffffffffffffffffffffffff9290921660208401526040830152606082015260800190565b60405180910390a450505050565b6040805173ffffffffffffffffffffffffffffffffffffffff831681525f60208201527f98636036cb66a9c19a37435efc1e90142190214e8abeb821bdba3f2990dd4c95910160405180910390a150565b73ffffffffffffffffffffffffffffffffffffffff811681146102f9575f5ffd5b50565b8035600281900b811461030d575f5ffd5b919050565b5f5f5f5f60808587031215610325575f5ffd5b8435610330816102d8565b935061033e602086016102fc565b925061034c604086016102fc565b915060608501356fffffffffffffffffffffffffffffffff81168114610370575f5ffd5b939692955090935050565b5f6020828403121561038b575f5ffd5b8135610396816102d8565b9392505050565b6fffffffffffffffffffffffffffffffff81811683821601908111156103ea577f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b9291505056fea164736f6c634300081e000aa164736f6c634300081e000a", + "deployedBytecode": "0x608060405234801561000f575f5ffd5b5060043610610029575f3560e01c8063a16712951461002d575b5f5ffd5b61004061003b3660046101ab565b610069565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390f35b5f5f5f8473ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff16106100a65784866100a9565b85855b915091505f8282866040516100bd90610176565b73ffffffffffffffffffffffffffffffffffffffff938416815292909116602083015262ffffff166040820152606001604051809103905ff080158015610106573d5f5f3e3d5ffd5b5060408051600a815273ffffffffffffffffffffffffffffffffffffffff808416602083015292965086935062ffffff88169280861692908716917f783cca1c0412dd0d695e784568c96da2e9c22ff989357a2e8b1d9b2b4e6b7118910160405180910390a45050509392505050565b6104da806101f783390190565b803573ffffffffffffffffffffffffffffffffffffffff811681146101a6575f5ffd5b919050565b5f5f5f606084860312156101bd575f5ffd5b6101c684610183565b92506101d460208501610183565b9150604084013562ffffff811681146101eb575f5ffd5b80915050925092509256fe60e060405234801561000f575f5ffd5b506040516104da3803806104da83398101604081905261002e91610069565b6001600160a01b03928316608052911660a05262ffffff1660c0526100b4565b80516001600160a01b0381168114610064575f5ffd5b919050565b5f5f5f6060848603121561007b575f5ffd5b6100848461004e565b92506100926020850161004e565b9150604084015162ffffff811681146100a9575f5ffd5b809150509250925092565b60805160a05160c0516103fd6100dd5f395f61012c01525f61010501525f607801526103fd5ff3fe608060405234801561000f575f5ffd5b506004361061006f575f3560e01c8063ddca3f431161004d578063ddca3f4314610127578063efe27fa314610162578063f637731d14610177575f5ffd5b80630dfe1681146100735780631a686502146100c4578063d21220a714610100575b5f5ffd5b61009a7f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b5f546100df906fffffffffffffffffffffffffffffffff1681565b6040516fffffffffffffffffffffffffffffffff90911681526020016100bb565b61009a7f000000000000000000000000000000000000000000000000000000000000000081565b61014e7f000000000000000000000000000000000000000000000000000000000000000081565b60405162ffffff90911681526020016100bb565b610175610170366004610312565b61018a565b005b61017561018536600461037b565b610287565b5f805482919081906101af9084906fffffffffffffffffffffffffffffffff1661039d565b92506101000a8154816fffffffffffffffffffffffffffffffff02191690836fffffffffffffffffffffffffffffffff1602179055508160020b8360020b8573ffffffffffffffffffffffffffffffffffffffff167f7a53080ba414158be7ec69b987b5fb7d07dee101fe85488f0853ae16239d0bde33855f5f604051610279949392919073ffffffffffffffffffffffffffffffffffffffff9490941684526fffffffffffffffffffffffffffffffff9290921660208401526040830152606082015260800190565b60405180910390a450505050565b6040805173ffffffffffffffffffffffffffffffffffffffff831681525f60208201527f98636036cb66a9c19a37435efc1e90142190214e8abeb821bdba3f2990dd4c95910160405180910390a150565b73ffffffffffffffffffffffffffffffffffffffff811681146102f9575f5ffd5b50565b8035600281900b811461030d575f5ffd5b919050565b5f5f5f5f60808587031215610325575f5ffd5b8435610330816102d8565b935061033e602086016102fc565b925061034c604086016102fc565b915060608501356fffffffffffffffffffffffffffffffff81168114610370575f5ffd5b939692955090935050565b5f6020828403121561038b575f5ffd5b8135610396816102d8565b9392505050565b6fffffffffffffffffffffffffffffffff81811683821601908111156103ea577f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b9291505056fea164736f6c634300081e000aa164736f6c634300081e000a", + "devdoc": { + "methods": {} + }, + "userdoc": { + "methods": {} + } +} diff --git a/contracts/artifacts/MockUniswapV3Pool.json b/contracts/artifacts/MockUniswapV3Pool.json new file mode 100644 index 0000000000..9061a1ca49 --- /dev/null +++ b/contracts/artifacts/MockUniswapV3Pool.json @@ -0,0 +1,194 @@ +{ + "abi": [ + { + "inputs": [ + { + "internalType": "address", + "name": "_token0", + "type": "address" + }, + { + "internalType": "address", + "name": "_token1", + "type": "address" + }, + { + "internalType": "uint24", + "name": "_fee", + "type": "uint24" + } + ], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint160", + "name": "sqrtPriceX96", + "type": "uint160" + }, + { + "indexed": false, + "internalType": "int24", + "name": "tick", + "type": "int24" + } + ], + "name": "Initialize", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": true, + "internalType": "int24", + "name": "tickLower", + "type": "int24" + }, + { + "indexed": true, + "internalType": "int24", + "name": "tickUpper", + "type": "int24" + }, + { + "indexed": false, + "internalType": "uint128", + "name": "amount", + "type": "uint128" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount0", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount1", + "type": "uint256" + } + ], + "name": "Mint", + "type": "event" + }, + { + "inputs": [], + "name": "fee", + "outputs": [ + { + "internalType": "uint24", + "name": "", + "type": "uint24" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint160", + "name": "sqrtPriceX96", + "type": "uint160" + } + ], + "name": "initialize", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "liquidity", + "outputs": [ + { + "internalType": "uint128", + "name": "", + "type": "uint128" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "internalType": "int24", + "name": "tickLower", + "type": "int24" + }, + { + "internalType": "int24", + "name": "tickUpper", + "type": "int24" + }, + { + "internalType": "uint128", + "name": "amount", + "type": "uint128" + } + ], + "name": "mockMint", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "token0", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "token1", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + } + ], + "bytecode": "0x60e060405234801561000f575f5ffd5b506040516104da3803806104da83398101604081905261002e91610069565b6001600160a01b03928316608052911660a05262ffffff1660c0526100b4565b80516001600160a01b0381168114610064575f5ffd5b919050565b5f5f5f6060848603121561007b575f5ffd5b6100848461004e565b92506100926020850161004e565b9150604084015162ffffff811681146100a9575f5ffd5b809150509250925092565b60805160a05160c0516103fd6100dd5f395f61012c01525f61010501525f607801526103fd5ff3fe608060405234801561000f575f5ffd5b506004361061006f575f3560e01c8063ddca3f431161004d578063ddca3f4314610127578063efe27fa314610162578063f637731d14610177575f5ffd5b80630dfe1681146100735780631a686502146100c4578063d21220a714610100575b5f5ffd5b61009a7f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b5f546100df906fffffffffffffffffffffffffffffffff1681565b6040516fffffffffffffffffffffffffffffffff90911681526020016100bb565b61009a7f000000000000000000000000000000000000000000000000000000000000000081565b61014e7f000000000000000000000000000000000000000000000000000000000000000081565b60405162ffffff90911681526020016100bb565b610175610170366004610312565b61018a565b005b61017561018536600461037b565b610287565b5f805482919081906101af9084906fffffffffffffffffffffffffffffffff1661039d565b92506101000a8154816fffffffffffffffffffffffffffffffff02191690836fffffffffffffffffffffffffffffffff1602179055508160020b8360020b8573ffffffffffffffffffffffffffffffffffffffff167f7a53080ba414158be7ec69b987b5fb7d07dee101fe85488f0853ae16239d0bde33855f5f604051610279949392919073ffffffffffffffffffffffffffffffffffffffff9490941684526fffffffffffffffffffffffffffffffff9290921660208401526040830152606082015260800190565b60405180910390a450505050565b6040805173ffffffffffffffffffffffffffffffffffffffff831681525f60208201527f98636036cb66a9c19a37435efc1e90142190214e8abeb821bdba3f2990dd4c95910160405180910390a150565b73ffffffffffffffffffffffffffffffffffffffff811681146102f9575f5ffd5b50565b8035600281900b811461030d575f5ffd5b919050565b5f5f5f5f60808587031215610325575f5ffd5b8435610330816102d8565b935061033e602086016102fc565b925061034c604086016102fc565b915060608501356fffffffffffffffffffffffffffffffff81168114610370575f5ffd5b939692955090935050565b5f6020828403121561038b575f5ffd5b8135610396816102d8565b9392505050565b6fffffffffffffffffffffffffffffffff81811683821601908111156103ea577f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b9291505056fea164736f6c634300081e000a", + "deployedBytecode": "0x608060405234801561000f575f5ffd5b506004361061006f575f3560e01c8063ddca3f431161004d578063ddca3f4314610127578063efe27fa314610162578063f637731d14610177575f5ffd5b80630dfe1681146100735780631a686502146100c4578063d21220a714610100575b5f5ffd5b61009a7f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b5f546100df906fffffffffffffffffffffffffffffffff1681565b6040516fffffffffffffffffffffffffffffffff90911681526020016100bb565b61009a7f000000000000000000000000000000000000000000000000000000000000000081565b61014e7f000000000000000000000000000000000000000000000000000000000000000081565b60405162ffffff90911681526020016100bb565b610175610170366004610312565b61018a565b005b61017561018536600461037b565b610287565b5f805482919081906101af9084906fffffffffffffffffffffffffffffffff1661039d565b92506101000a8154816fffffffffffffffffffffffffffffffff02191690836fffffffffffffffffffffffffffffffff1602179055508160020b8360020b8573ffffffffffffffffffffffffffffffffffffffff167f7a53080ba414158be7ec69b987b5fb7d07dee101fe85488f0853ae16239d0bde33855f5f604051610279949392919073ffffffffffffffffffffffffffffffffffffffff9490941684526fffffffffffffffffffffffffffffffff9290921660208401526040830152606082015260800190565b60405180910390a450505050565b6040805173ffffffffffffffffffffffffffffffffffffffff831681525f60208201527f98636036cb66a9c19a37435efc1e90142190214e8abeb821bdba3f2990dd4c95910160405180910390a150565b73ffffffffffffffffffffffffffffffffffffffff811681146102f9575f5ffd5b50565b8035600281900b811461030d575f5ffd5b919050565b5f5f5f5f60808587031215610325575f5ffd5b8435610330816102d8565b935061033e602086016102fc565b925061034c604086016102fc565b915060608501356fffffffffffffffffffffffffffffffff81168114610370575f5ffd5b939692955090935050565b5f6020828403121561038b575f5ffd5b8135610396816102d8565b9392505050565b6fffffffffffffffffffffffffffffffff81811683821601908111156103ea577f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b9291505056fea164736f6c634300081e000a", + "devdoc": { + "methods": {} + }, + "userdoc": { + "methods": {} + } +} diff --git a/contracts/artifacts/UniswapV3Pool.json b/contracts/artifacts/UniswapV3Pool.json index 8e04b0b6b5..1e55555ead 100644 --- a/contracts/artifacts/UniswapV3Pool.json +++ b/contracts/artifacts/UniswapV3Pool.json @@ -772,6 +772,49 @@ ], "stateMutability": "view", "type": "function" + }, + { + "inputs": [], + "name": "slot0", + "outputs": [ + { + "internalType": "uint160", + "name": "sqrtPriceX96", + "type": "uint160" + }, + { + "internalType": "int24", + "name": "tick", + "type": "int24" + }, + { + "internalType": "uint16", + "name": "observationIndex", + "type": "uint16" + }, + { + "internalType": "uint16", + "name": "observationCardinality", + "type": "uint16" + }, + { + "internalType": "uint16", + "name": "observationCardinalityNext", + "type": "uint16" + }, + { + "internalType": "uint8", + "name": "feeProtocol", + "type": "uint8" + }, + { + "internalType": "bool", + "name": "unlocked", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" } ], "_disabled": [ @@ -879,49 +922,6 @@ "stateMutability": "nonpayable", "type": "function" }, - { - "inputs": [], - "name": "slot0", - "outputs": [ - { - "internalType": "uint160", - "name": "sqrtPriceX96", - "type": "uint160" - }, - { - "internalType": "int24", - "name": "tick", - "type": "int24" - }, - { - "internalType": "uint16", - "name": "observationIndex", - "type": "uint16" - }, - { - "internalType": "uint16", - "name": "observationCardinality", - "type": "uint16" - }, - { - "internalType": "uint16", - "name": "observationCardinalityNext", - "type": "uint16" - }, - { - "internalType": "uint8", - "name": "feeProtocol", - "type": "uint8" - }, - { - "internalType": "bool", - "name": "unlocked", - "type": "bool" - } - ], - "stateMutability": "view", - "type": "function" - }, { "inputs": [ { @@ -989,4 +989,4 @@ "type": "function" } ] -} +} \ No newline at end of file diff --git a/contracts/generated/contracts-facade/Cargo.toml b/contracts/generated/contracts-facade/Cargo.toml index 902099c993..9652fa9d20 100644 --- a/contracts/generated/contracts-facade/Cargo.toml +++ b/contracts/generated/contracts-facade/Cargo.toml @@ -67,6 +67,8 @@ cow-contract-iuniswapv3factory = { path = "../contracts-generated/iuniswapv3fact cow-contract-izeroex = { path = "../contracts-generated/izeroex" } cow-contract-liquoricesettlement = { path = "../contracts-generated/liquoricesettlement" } cow-contract-mockerc4626wrapper = { path = "../contracts-generated/mockerc4626wrapper" } +cow-contract-mockuniswapv3factory = { path = "../contracts-generated/mockuniswapv3factory" } +cow-contract-mockuniswapv3pool = { path = "../contracts-generated/mockuniswapv3pool" } cow-contract-nonstandarderc20balances = { path = "../contracts-generated/nonstandarderc20balances" } cow-contract-pancakerouter = { path = "../contracts-generated/pancakerouter" } cow-contract-permit2 = { path = "../contracts-generated/permit2" } diff --git a/contracts/generated/contracts-facade/src/lib.rs b/contracts/generated/contracts-facade/src/lib.rs index 331b6a21aa..586212c05e 100644 --- a/contracts/generated/contracts-facade/src/lib.rs +++ b/contracts/generated/contracts-facade/src/lib.rs @@ -84,6 +84,8 @@ pub mod test { cow_contract_cowprotocoltoken as CowProtocolToken, cow_contract_gashog as GasHog, cow_contract_mockerc4626wrapper as MockERC4626Wrapper, + cow_contract_mockuniswapv3factory as MockUniswapV3Factory, + cow_contract_mockuniswapv3pool as MockUniswapV3Pool, cow_contract_nonstandarderc20balances as NonStandardERC20Balances, cow_contract_remoteerc20balances as RemoteERC20Balances, }; diff --git a/contracts/generated/contracts-generated/mockuniswapv3factory/Cargo.toml b/contracts/generated/contracts-generated/mockuniswapv3factory/Cargo.toml new file mode 100644 index 0000000000..b592492933 --- /dev/null +++ b/contracts/generated/contracts-generated/mockuniswapv3factory/Cargo.toml @@ -0,0 +1,19 @@ +# Auto-generated by contracts-generate. Do not edit. +[package] +name = "cow-contract-mockuniswapv3factory" +version = "0.1.0" +edition = "2024" +publish = false + +[lib] +doctest = false + +[dependencies] +alloy-contract = { workspace = true } +alloy-primitives = { workspace = true } +alloy-provider = { workspace = true } +alloy-sol-types = { workspace = true } +anyhow = { workspace = true } + +[lints] +workspace = true diff --git a/contracts/generated/contracts-generated/mockuniswapv3factory/src/lib.rs b/contracts/generated/contracts-generated/mockuniswapv3factory/src/lib.rs new file mode 100644 index 0000000000..3c7fc262db --- /dev/null +++ b/contracts/generated/contracts-generated/mockuniswapv3factory/src/lib.rs @@ -0,0 +1,851 @@ +#![allow( + unused_imports, + unused_attributes, + clippy::all, + rustdoc::all, + non_snake_case +)] +//! Auto-generated contract bindings. Do not edit. +/** + +Generated by the following Solidity interface... +```solidity +interface MockUniswapV3Factory { + event PoolCreated(address indexed token0, address indexed token1, uint24 indexed fee, int24 tickSpacing, address pool); + + function createPool(address tokenA, address tokenB, uint24 _fee) external returns (address pool); +} +``` + +...which was generated by the following JSON ABI: +```json +[ + { + "type": "function", + "name": "createPool", + "inputs": [ + { + "name": "tokenA", + "type": "address", + "internalType": "address" + }, + { + "name": "tokenB", + "type": "address", + "internalType": "address" + }, + { + "name": "_fee", + "type": "uint24", + "internalType": "uint24" + } + ], + "outputs": [ + { + "name": "pool", + "type": "address", + "internalType": "address" + } + ], + "stateMutability": "nonpayable" + }, + { + "type": "event", + "name": "PoolCreated", + "inputs": [ + { + "name": "token0", + "type": "address", + "indexed": true, + "internalType": "address" + }, + { + "name": "token1", + "type": "address", + "indexed": true, + "internalType": "address" + }, + { + "name": "fee", + "type": "uint24", + "indexed": true, + "internalType": "uint24" + }, + { + "name": "tickSpacing", + "type": "int24", + "indexed": false, + "internalType": "int24" + }, + { + "name": "pool", + "type": "address", + "indexed": false, + "internalType": "address" + } + ], + "anonymous": false + } +] +```*/ +#[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style, + clippy::empty_structs_with_brackets +)] +pub mod MockUniswapV3Factory { + use {super::*, alloy_sol_types}; + /// The creation / init bytecode of the contract. + /// + /// ```text + ///0x6080604052348015600e575f5ffd5b506106dd8061001c5f395ff3fe608060405234801561000f575f5ffd5b5060043610610029575f3560e01c8063a16712951461002d575b5f5ffd5b61004061003b3660046101ab565b610069565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390f35b5f5f5f8473ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff16106100a65784866100a9565b85855b915091505f8282866040516100bd90610176565b73ffffffffffffffffffffffffffffffffffffffff938416815292909116602083015262ffffff166040820152606001604051809103905ff080158015610106573d5f5f3e3d5ffd5b5060408051600a815273ffffffffffffffffffffffffffffffffffffffff808416602083015292965086935062ffffff88169280861692908716917f783cca1c0412dd0d695e784568c96da2e9c22ff989357a2e8b1d9b2b4e6b7118910160405180910390a45050509392505050565b6104da806101f783390190565b803573ffffffffffffffffffffffffffffffffffffffff811681146101a6575f5ffd5b919050565b5f5f5f606084860312156101bd575f5ffd5b6101c684610183565b92506101d460208501610183565b9150604084013562ffffff811681146101eb575f5ffd5b80915050925092509256fe60e060405234801561000f575f5ffd5b506040516104da3803806104da83398101604081905261002e91610069565b6001600160a01b03928316608052911660a05262ffffff1660c0526100b4565b80516001600160a01b0381168114610064575f5ffd5b919050565b5f5f5f6060848603121561007b575f5ffd5b6100848461004e565b92506100926020850161004e565b9150604084015162ffffff811681146100a9575f5ffd5b809150509250925092565b60805160a05160c0516103fd6100dd5f395f61012c01525f61010501525f607801526103fd5ff3fe608060405234801561000f575f5ffd5b506004361061006f575f3560e01c8063ddca3f431161004d578063ddca3f4314610127578063efe27fa314610162578063f637731d14610177575f5ffd5b80630dfe1681146100735780631a686502146100c4578063d21220a714610100575b5f5ffd5b61009a7f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b5f546100df906fffffffffffffffffffffffffffffffff1681565b6040516fffffffffffffffffffffffffffffffff90911681526020016100bb565b61009a7f000000000000000000000000000000000000000000000000000000000000000081565b61014e7f000000000000000000000000000000000000000000000000000000000000000081565b60405162ffffff90911681526020016100bb565b610175610170366004610312565b61018a565b005b61017561018536600461037b565b610287565b5f805482919081906101af9084906fffffffffffffffffffffffffffffffff1661039d565b92506101000a8154816fffffffffffffffffffffffffffffffff02191690836fffffffffffffffffffffffffffffffff1602179055508160020b8360020b8573ffffffffffffffffffffffffffffffffffffffff167f7a53080ba414158be7ec69b987b5fb7d07dee101fe85488f0853ae16239d0bde33855f5f604051610279949392919073ffffffffffffffffffffffffffffffffffffffff9490941684526fffffffffffffffffffffffffffffffff9290921660208401526040830152606082015260800190565b60405180910390a450505050565b6040805173ffffffffffffffffffffffffffffffffffffffff831681525f60208201527f98636036cb66a9c19a37435efc1e90142190214e8abeb821bdba3f2990dd4c95910160405180910390a150565b73ffffffffffffffffffffffffffffffffffffffff811681146102f9575f5ffd5b50565b8035600281900b811461030d575f5ffd5b919050565b5f5f5f5f60808587031215610325575f5ffd5b8435610330816102d8565b935061033e602086016102fc565b925061034c604086016102fc565b915060608501356fffffffffffffffffffffffffffffffff81168114610370575f5ffd5b939692955090935050565b5f6020828403121561038b575f5ffd5b8135610396816102d8565b9392505050565b6fffffffffffffffffffffffffffffffff81811683821601908111156103ea577f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b9291505056fea164736f6c634300081e000aa164736f6c634300081e000a + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"`\x80`@R4\x80\x15`\x0EW__\xFD[Pa\x06\xDD\x80a\0\x1C_9_\xF3\xFE`\x80`@R4\x80\x15a\0\x0FW__\xFD[P`\x046\x10a\0)W_5`\xE0\x1C\x80c\xA1g\x12\x95\x14a\0-W[__\xFD[a\0@a\0;6`\x04a\x01\xABV[a\0iV[`@Qs\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x90\x91\x16\x81R` \x01`@Q\x80\x91\x03\x90\xF3[___\x84s\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x16\x86s\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x16\x10a\0\xA6W\x84\x86a\0\xA9V[\x85\x85[\x91P\x91P_\x82\x82\x86`@Qa\0\xBD\x90a\x01vV[s\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x93\x84\x16\x81R\x92\x90\x91\x16` \x83\x01Rb\xFF\xFF\xFF\x16`@\x82\x01R``\x01`@Q\x80\x91\x03\x90_\xF0\x80\x15\x80\x15a\x01\x06W=__>=_\xFD[P`@\x80Q`\n\x81Rs\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x80\x84\x16` \x83\x01R\x92\x96P\x86\x93Pb\xFF\xFF\xFF\x88\x16\x92\x80\x86\x16\x92\x90\x87\x16\x91\x7Fx<\xCA\x1C\x04\x12\xDD\ri^xEh\xC9m\xA2\xE9\xC2/\xF9\x895z.\x8B\x1D\x9B+Nkq\x18\x91\x01`@Q\x80\x91\x03\x90\xA4PPP\x93\x92PPPV[a\x04\xDA\x80a\x01\xF7\x839\x01\x90V[\x805s\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x81\x16\x81\x14a\x01\xA6W__\xFD[\x91\x90PV[___``\x84\x86\x03\x12\x15a\x01\xBDW__\xFD[a\x01\xC6\x84a\x01\x83V[\x92Pa\x01\xD4` \x85\x01a\x01\x83V[\x91P`@\x84\x015b\xFF\xFF\xFF\x81\x16\x81\x14a\x01\xEBW__\xFD[\x80\x91PP\x92P\x92P\x92V\xFE`\xE0`@R4\x80\x15a\0\x0FW__\xFD[P`@Qa\x04\xDA8\x03\x80a\x04\xDA\x839\x81\x01`@\x81\x90Ra\0.\x91a\0iV[`\x01`\x01`\xA0\x1B\x03\x92\x83\x16`\x80R\x91\x16`\xA0Rb\xFF\xFF\xFF\x16`\xC0Ra\0\xB4V[\x80Q`\x01`\x01`\xA0\x1B\x03\x81\x16\x81\x14a\0dW__\xFD[\x91\x90PV[___``\x84\x86\x03\x12\x15a\0{W__\xFD[a\0\x84\x84a\0NV[\x92Pa\0\x92` \x85\x01a\0NV[\x91P`@\x84\x01Qb\xFF\xFF\xFF\x81\x16\x81\x14a\0\xA9W__\xFD[\x80\x91PP\x92P\x92P\x92V[`\x80Q`\xA0Q`\xC0Qa\x03\xFDa\0\xDD_9_a\x01,\x01R_a\x01\x05\x01R_`x\x01Ra\x03\xFD_\xF3\xFE`\x80`@R4\x80\x15a\0\x0FW__\xFD[P`\x046\x10a\0oW_5`\xE0\x1C\x80c\xDD\xCA?C\x11a\0MW\x80c\xDD\xCA?C\x14a\x01'W\x80c\xEF\xE2\x7F\xA3\x14a\x01bW\x80c\xF67s\x1D\x14a\x01wW__\xFD[\x80c\r\xFE\x16\x81\x14a\0sW\x80c\x1Ahe\x02\x14a\0\xC4W\x80c\xD2\x12 \xA7\x14a\x01\0W[__\xFD[a\0\x9A\x7F\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x81V[`@Qs\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x90\x91\x16\x81R` \x01[`@Q\x80\x91\x03\x90\xF3[_Ta\0\xDF\x90o\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x16\x81V[`@Qo\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x90\x91\x16\x81R` \x01a\0\xBBV[a\0\x9A\x7F\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x81V[a\x01N\x7F\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x81V[`@Qb\xFF\xFF\xFF\x90\x91\x16\x81R` \x01a\0\xBBV[a\x01ua\x01p6`\x04a\x03\x12V[a\x01\x8AV[\0[a\x01ua\x01\x856`\x04a\x03{V[a\x02\x87V[_\x80T\x82\x91\x90\x81\x90a\x01\xAF\x90\x84\x90o\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x16a\x03\x9DV[\x92Pa\x01\0\n\x81T\x81o\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x02\x19\x16\x90\x83o\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x16\x02\x17\x90UP\x81`\x02\x0B\x83`\x02\x0B\x85s\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x16\x7FzS\x08\x0B\xA4\x14\x15\x8B\xE7\xECi\xB9\x87\xB5\xFB}\x07\xDE\xE1\x01\xFE\x85H\x8F\x08S\xAE\x16#\x9D\x0B\xDE3\x85__`@Qa\x02y\x94\x93\x92\x91\x90s\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x94\x90\x94\x16\x84Ro\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x92\x90\x92\x16` \x84\x01R`@\x83\x01R``\x82\x01R`\x80\x01\x90V[`@Q\x80\x91\x03\x90\xA4PPPPV[`@\x80Qs\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x83\x16\x81R_` \x82\x01R\x7F\x98c`6\xCBf\xA9\xC1\x9A7C^\xFC\x1E\x90\x14!\x90!N\x8A\xBE\xB8!\xBD\xBA?)\x90\xDDL\x95\x91\x01`@Q\x80\x91\x03\x90\xA1PV[s\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x81\x16\x81\x14a\x02\xF9W__\xFD[PV[\x805`\x02\x81\x90\x0B\x81\x14a\x03\rW__\xFD[\x91\x90PV[____`\x80\x85\x87\x03\x12\x15a\x03%W__\xFD[\x845a\x030\x81a\x02\xD8V[\x93Pa\x03>` \x86\x01a\x02\xFCV[\x92Pa\x03L`@\x86\x01a\x02\xFCV[\x91P``\x85\x015o\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x81\x16\x81\x14a\x03pW__\xFD[\x93\x96\x92\x95P\x90\x93PPV[_` \x82\x84\x03\x12\x15a\x03\x8BW__\xFD[\x815a\x03\x96\x81a\x02\xD8V[\x93\x92PPPV[o\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x81\x81\x16\x83\x82\x16\x01\x90\x81\x11\x15a\x03\xEAW\x7FNH{q\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0_R`\x11`\x04R`$_\xFD[\x92\x91PPV\xFE\xA1dsolcC\0\x08\x1E\0\n\xA1dsolcC\0\x08\x1E\0\n", + ); + /// The runtime bytecode of the contract, as deployed on the network. + /// + /// ```text + ///0x608060405234801561000f575f5ffd5b5060043610610029575f3560e01c8063a16712951461002d575b5f5ffd5b61004061003b3660046101ab565b610069565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390f35b5f5f5f8473ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff16106100a65784866100a9565b85855b915091505f8282866040516100bd90610176565b73ffffffffffffffffffffffffffffffffffffffff938416815292909116602083015262ffffff166040820152606001604051809103905ff080158015610106573d5f5f3e3d5ffd5b5060408051600a815273ffffffffffffffffffffffffffffffffffffffff808416602083015292965086935062ffffff88169280861692908716917f783cca1c0412dd0d695e784568c96da2e9c22ff989357a2e8b1d9b2b4e6b7118910160405180910390a45050509392505050565b6104da806101f783390190565b803573ffffffffffffffffffffffffffffffffffffffff811681146101a6575f5ffd5b919050565b5f5f5f606084860312156101bd575f5ffd5b6101c684610183565b92506101d460208501610183565b9150604084013562ffffff811681146101eb575f5ffd5b80915050925092509256fe60e060405234801561000f575f5ffd5b506040516104da3803806104da83398101604081905261002e91610069565b6001600160a01b03928316608052911660a05262ffffff1660c0526100b4565b80516001600160a01b0381168114610064575f5ffd5b919050565b5f5f5f6060848603121561007b575f5ffd5b6100848461004e565b92506100926020850161004e565b9150604084015162ffffff811681146100a9575f5ffd5b809150509250925092565b60805160a05160c0516103fd6100dd5f395f61012c01525f61010501525f607801526103fd5ff3fe608060405234801561000f575f5ffd5b506004361061006f575f3560e01c8063ddca3f431161004d578063ddca3f4314610127578063efe27fa314610162578063f637731d14610177575f5ffd5b80630dfe1681146100735780631a686502146100c4578063d21220a714610100575b5f5ffd5b61009a7f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b5f546100df906fffffffffffffffffffffffffffffffff1681565b6040516fffffffffffffffffffffffffffffffff90911681526020016100bb565b61009a7f000000000000000000000000000000000000000000000000000000000000000081565b61014e7f000000000000000000000000000000000000000000000000000000000000000081565b60405162ffffff90911681526020016100bb565b610175610170366004610312565b61018a565b005b61017561018536600461037b565b610287565b5f805482919081906101af9084906fffffffffffffffffffffffffffffffff1661039d565b92506101000a8154816fffffffffffffffffffffffffffffffff02191690836fffffffffffffffffffffffffffffffff1602179055508160020b8360020b8573ffffffffffffffffffffffffffffffffffffffff167f7a53080ba414158be7ec69b987b5fb7d07dee101fe85488f0853ae16239d0bde33855f5f604051610279949392919073ffffffffffffffffffffffffffffffffffffffff9490941684526fffffffffffffffffffffffffffffffff9290921660208401526040830152606082015260800190565b60405180910390a450505050565b6040805173ffffffffffffffffffffffffffffffffffffffff831681525f60208201527f98636036cb66a9c19a37435efc1e90142190214e8abeb821bdba3f2990dd4c95910160405180910390a150565b73ffffffffffffffffffffffffffffffffffffffff811681146102f9575f5ffd5b50565b8035600281900b811461030d575f5ffd5b919050565b5f5f5f5f60808587031215610325575f5ffd5b8435610330816102d8565b935061033e602086016102fc565b925061034c604086016102fc565b915060608501356fffffffffffffffffffffffffffffffff81168114610370575f5ffd5b939692955090935050565b5f6020828403121561038b575f5ffd5b8135610396816102d8565b9392505050565b6fffffffffffffffffffffffffffffffff81811683821601908111156103ea577f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b9291505056fea164736f6c634300081e000aa164736f6c634300081e000a + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static DEPLOYED_BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"`\x80`@R4\x80\x15a\0\x0FW__\xFD[P`\x046\x10a\0)W_5`\xE0\x1C\x80c\xA1g\x12\x95\x14a\0-W[__\xFD[a\0@a\0;6`\x04a\x01\xABV[a\0iV[`@Qs\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x90\x91\x16\x81R` \x01`@Q\x80\x91\x03\x90\xF3[___\x84s\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x16\x86s\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x16\x10a\0\xA6W\x84\x86a\0\xA9V[\x85\x85[\x91P\x91P_\x82\x82\x86`@Qa\0\xBD\x90a\x01vV[s\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x93\x84\x16\x81R\x92\x90\x91\x16` \x83\x01Rb\xFF\xFF\xFF\x16`@\x82\x01R``\x01`@Q\x80\x91\x03\x90_\xF0\x80\x15\x80\x15a\x01\x06W=__>=_\xFD[P`@\x80Q`\n\x81Rs\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x80\x84\x16` \x83\x01R\x92\x96P\x86\x93Pb\xFF\xFF\xFF\x88\x16\x92\x80\x86\x16\x92\x90\x87\x16\x91\x7Fx<\xCA\x1C\x04\x12\xDD\ri^xEh\xC9m\xA2\xE9\xC2/\xF9\x895z.\x8B\x1D\x9B+Nkq\x18\x91\x01`@Q\x80\x91\x03\x90\xA4PPP\x93\x92PPPV[a\x04\xDA\x80a\x01\xF7\x839\x01\x90V[\x805s\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x81\x16\x81\x14a\x01\xA6W__\xFD[\x91\x90PV[___``\x84\x86\x03\x12\x15a\x01\xBDW__\xFD[a\x01\xC6\x84a\x01\x83V[\x92Pa\x01\xD4` \x85\x01a\x01\x83V[\x91P`@\x84\x015b\xFF\xFF\xFF\x81\x16\x81\x14a\x01\xEBW__\xFD[\x80\x91PP\x92P\x92P\x92V\xFE`\xE0`@R4\x80\x15a\0\x0FW__\xFD[P`@Qa\x04\xDA8\x03\x80a\x04\xDA\x839\x81\x01`@\x81\x90Ra\0.\x91a\0iV[`\x01`\x01`\xA0\x1B\x03\x92\x83\x16`\x80R\x91\x16`\xA0Rb\xFF\xFF\xFF\x16`\xC0Ra\0\xB4V[\x80Q`\x01`\x01`\xA0\x1B\x03\x81\x16\x81\x14a\0dW__\xFD[\x91\x90PV[___``\x84\x86\x03\x12\x15a\0{W__\xFD[a\0\x84\x84a\0NV[\x92Pa\0\x92` \x85\x01a\0NV[\x91P`@\x84\x01Qb\xFF\xFF\xFF\x81\x16\x81\x14a\0\xA9W__\xFD[\x80\x91PP\x92P\x92P\x92V[`\x80Q`\xA0Q`\xC0Qa\x03\xFDa\0\xDD_9_a\x01,\x01R_a\x01\x05\x01R_`x\x01Ra\x03\xFD_\xF3\xFE`\x80`@R4\x80\x15a\0\x0FW__\xFD[P`\x046\x10a\0oW_5`\xE0\x1C\x80c\xDD\xCA?C\x11a\0MW\x80c\xDD\xCA?C\x14a\x01'W\x80c\xEF\xE2\x7F\xA3\x14a\x01bW\x80c\xF67s\x1D\x14a\x01wW__\xFD[\x80c\r\xFE\x16\x81\x14a\0sW\x80c\x1Ahe\x02\x14a\0\xC4W\x80c\xD2\x12 \xA7\x14a\x01\0W[__\xFD[a\0\x9A\x7F\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x81V[`@Qs\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x90\x91\x16\x81R` \x01[`@Q\x80\x91\x03\x90\xF3[_Ta\0\xDF\x90o\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x16\x81V[`@Qo\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x90\x91\x16\x81R` \x01a\0\xBBV[a\0\x9A\x7F\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x81V[a\x01N\x7F\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x81V[`@Qb\xFF\xFF\xFF\x90\x91\x16\x81R` \x01a\0\xBBV[a\x01ua\x01p6`\x04a\x03\x12V[a\x01\x8AV[\0[a\x01ua\x01\x856`\x04a\x03{V[a\x02\x87V[_\x80T\x82\x91\x90\x81\x90a\x01\xAF\x90\x84\x90o\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x16a\x03\x9DV[\x92Pa\x01\0\n\x81T\x81o\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x02\x19\x16\x90\x83o\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x16\x02\x17\x90UP\x81`\x02\x0B\x83`\x02\x0B\x85s\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x16\x7FzS\x08\x0B\xA4\x14\x15\x8B\xE7\xECi\xB9\x87\xB5\xFB}\x07\xDE\xE1\x01\xFE\x85H\x8F\x08S\xAE\x16#\x9D\x0B\xDE3\x85__`@Qa\x02y\x94\x93\x92\x91\x90s\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x94\x90\x94\x16\x84Ro\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x92\x90\x92\x16` \x84\x01R`@\x83\x01R``\x82\x01R`\x80\x01\x90V[`@Q\x80\x91\x03\x90\xA4PPPPV[`@\x80Qs\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x83\x16\x81R_` \x82\x01R\x7F\x98c`6\xCBf\xA9\xC1\x9A7C^\xFC\x1E\x90\x14!\x90!N\x8A\xBE\xB8!\xBD\xBA?)\x90\xDDL\x95\x91\x01`@Q\x80\x91\x03\x90\xA1PV[s\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x81\x16\x81\x14a\x02\xF9W__\xFD[PV[\x805`\x02\x81\x90\x0B\x81\x14a\x03\rW__\xFD[\x91\x90PV[____`\x80\x85\x87\x03\x12\x15a\x03%W__\xFD[\x845a\x030\x81a\x02\xD8V[\x93Pa\x03>` \x86\x01a\x02\xFCV[\x92Pa\x03L`@\x86\x01a\x02\xFCV[\x91P``\x85\x015o\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x81\x16\x81\x14a\x03pW__\xFD[\x93\x96\x92\x95P\x90\x93PPV[_` \x82\x84\x03\x12\x15a\x03\x8BW__\xFD[\x815a\x03\x96\x81a\x02\xD8V[\x93\x92PPPV[o\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x81\x81\x16\x83\x82\x16\x01\x90\x81\x11\x15a\x03\xEAW\x7FNH{q\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0_R`\x11`\x04R`$_\xFD[\x92\x91PPV\xFE\xA1dsolcC\0\x08\x1E\0\n\xA1dsolcC\0\x08\x1E\0\n", + ); + #[derive(Default, Debug, PartialEq, Eq, Hash)] + /**Event with signature `PoolCreated(address,address,uint24,int24,address)` and selector `0x783cca1c0412dd0d695e784568c96da2e9c22ff989357a2e8b1d9b2b4e6b7118`. + ```solidity + event PoolCreated(address indexed token0, address indexed token1, uint24 indexed fee, int24 tickSpacing, address pool); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct PoolCreated { + #[allow(missing_docs)] + pub token0: alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub token1: alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub fee: alloy_sol_types::private::primitives::aliases::U24, + #[allow(missing_docs)] + pub tickSpacing: alloy_sol_types::private::primitives::aliases::I24, + #[allow(missing_docs)] + pub pool: alloy_sol_types::private::Address, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for PoolCreated { + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type DataTuple<'a> = ( + alloy_sol_types::sol_data::Int<24>, + alloy_sol_types::sol_data::Address, + ); + type TopicList = ( + alloy_sol_types::sol_data::FixedBytes<32>, + alloy_sol_types::sol_data::Address, + alloy_sol_types::sol_data::Address, + alloy_sol_types::sol_data::Uint<24>, + ); + + const ANONYMOUS: bool = false; + const SIGNATURE: &'static str = "PoolCreated(address,address,uint24,int24,address)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 120u8, 60u8, 202u8, 28u8, 4u8, 18u8, 221u8, 13u8, 105u8, 94u8, 120u8, 69u8, + 104u8, 201u8, 109u8, 162u8, 233u8, 194u8, 47u8, 249u8, 137u8, 53u8, 122u8, + 46u8, 139u8, 29u8, 155u8, 43u8, 78u8, 107u8, 113u8, 24u8, + ]); + + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + token0: topics.1, + token1: topics.2, + fee: topics.3, + tickSpacing: data.0, + pool: data.1, + } + } + + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + as alloy_sol_types::SolType>::tokenize( + &self.tickSpacing, + ), + ::tokenize( + &self.pool, + ), + ) + } + + #[inline] + fn topics(&self) -> ::RustType { + ( + Self::SIGNATURE_HASH.into(), + self.token0.clone(), + self.token1.clone(), + self.fee.clone(), + ) + } + + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + out[1usize] = ::encode_topic( + &self.token0, + ); + out[2usize] = ::encode_topic( + &self.token1, + ); + out[3usize] = as alloy_sol_types::EventTopic>::encode_topic(&self.fee); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for PoolCreated { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&PoolCreated> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &PoolCreated) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + #[derive(Default, Debug, PartialEq, Eq, Hash)] + /**Function with signature `createPool(address,address,uint24)` and selector `0xa1671295`. + ```solidity + function createPool(address tokenA, address tokenB, uint24 _fee) external returns (address pool); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct createPoolCall { + #[allow(missing_docs)] + pub tokenA: alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub tokenB: alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub _fee: alloy_sol_types::private::primitives::aliases::U24, + } + #[derive(Default, Debug, PartialEq, Eq, Hash)] + ///Container type for the return parameters of the + /// [`createPool(address,address,uint24)`](createPoolCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct createPoolReturn { + #[allow(missing_docs)] + pub pool: alloy_sol_types::private::Address, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use alloy_sol_types; + { + #[doc(hidden)] + #[allow(dead_code)] + type UnderlyingSolTuple<'a> = ( + alloy_sol_types::sol_data::Address, + alloy_sol_types::sol_data::Address, + alloy_sol_types::sol_data::Uint<24>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + alloy_sol_types::private::Address, + alloy_sol_types::private::Address, + alloy_sol_types::private::primitives::aliases::U24, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: createPoolCall) -> Self { + (value.tokenA, value.tokenB, value._fee) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for createPoolCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + tokenA: tuple.0, + tokenB: tuple.1, + _fee: tuple.2, + } + } + } + } + { + #[doc(hidden)] + #[allow(dead_code)] + type UnderlyingSolTuple<'a> = (alloy_sol_types::sol_data::Address,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (alloy_sol_types::private::Address,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: createPoolReturn) -> Self { + (value.pool,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for createPoolReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { pool: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for createPoolCall { + type Parameters<'a> = ( + alloy_sol_types::sol_data::Address, + alloy_sol_types::sol_data::Address, + alloy_sol_types::sol_data::Uint<24>, + ); + type Return = alloy_sol_types::private::Address; + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type ReturnTuple<'a> = (alloy_sol_types::sol_data::Address,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + + const SELECTOR: [u8; 4] = [161u8, 103u8, 18u8, 149u8]; + const SIGNATURE: &'static str = "createPool(address,address,uint24)"; + + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + ::tokenize( + &self.tokenA, + ), + ::tokenize( + &self.tokenB, + ), + as alloy_sol_types::SolType>::tokenize( + &self._fee, + ), + ) + } + + #[inline] + fn tokenize_returns(ret: &Self::Return) -> Self::ReturnToken<'_> { + (::tokenize(ret),) + } + + #[inline] + fn abi_decode_returns(data: &[u8]) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence(data).map( + |r| { + let r: createPoolReturn = r.into(); + r.pool + }, + ) + } + + #[inline] + fn abi_decode_returns_validate(data: &[u8]) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence_validate( + data, + ) + .map(|r| { + let r: createPoolReturn = r.into(); + r.pool + }) + } + } + }; + ///Container for all the [`MockUniswapV3Factory`](self) function calls. + #[derive(Clone)] + pub enum MockUniswapV3FactoryCalls { + #[allow(missing_docs)] + createPool(createPoolCall), + } + impl MockUniswapV3FactoryCalls { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the + /// variants. No guarantees are made about the order of the + /// selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 4usize]] = &[[161u8, 103u8, 18u8, 149u8]]; + /// The signatures in the same order as `SELECTORS`. + pub const SIGNATURES: &'static [&'static str] = + &[::SIGNATURE]; + /// The names of the variants in the same order as `SELECTORS`. + pub const VARIANT_NAMES: &'static [&'static str] = &[::core::stringify!(createPool)]; + + /// Returns the signature for the given selector, if known. + #[inline] + pub fn signature_by_selector( + selector: [u8; 4usize], + ) -> ::core::option::Option<&'static str> { + match Self::SELECTORS.binary_search(&selector) { + ::core::result::Result::Ok(idx) => { + ::core::option::Option::Some(Self::SIGNATURES[idx]) + } + ::core::result::Result::Err(_) => ::core::option::Option::None, + } + } + + /// Returns the enum variant name for the given selector, if known. + #[inline] + pub fn name_by_selector(selector: [u8; 4usize]) -> ::core::option::Option<&'static str> { + let sig = Self::signature_by_selector(selector)?; + sig.split_once('(').map(|(name, _)| name) + } + } + #[automatically_derived] + impl alloy_sol_types::SolInterface for MockUniswapV3FactoryCalls { + const COUNT: usize = 1usize; + const MIN_DATA_LENGTH: usize = 96usize; + const NAME: &'static str = "MockUniswapV3FactoryCalls"; + + #[inline] + fn selector(&self) -> [u8; 4] { + match self { + Self::createPool(_) => ::SELECTOR, + } + } + + #[inline] + fn selector_at(i: usize) -> ::core::option::Option<[u8; 4]> { + Self::SELECTORS.get(i).copied() + } + + #[inline] + fn valid_selector(selector: [u8; 4]) -> bool { + Self::SELECTORS.binary_search(&selector).is_ok() + } + + #[inline] + #[allow(non_snake_case)] + fn abi_decode_raw(selector: [u8; 4], data: &[u8]) -> alloy_sol_types::Result { + static DECODE_SHIMS: &[fn( + &[u8], + ) + -> alloy_sol_types::Result] = &[{ + fn createPool(data: &[u8]) -> alloy_sol_types::Result { + ::abi_decode_raw(data) + .map(MockUniswapV3FactoryCalls::createPool) + } + createPool + }]; + let Ok(idx) = Self::SELECTORS.binary_search(&selector) else { + return Err(alloy_sol_types::Error::unknown_selector( + ::NAME, + selector, + )); + }; + DECODE_SHIMS[idx](data) + } + + #[inline] + #[allow(non_snake_case)] + fn abi_decode_raw_validate( + selector: [u8; 4], + data: &[u8], + ) -> alloy_sol_types::Result { + static DECODE_VALIDATE_SHIMS: &[fn( + &[u8], + ) -> alloy_sol_types::Result< + MockUniswapV3FactoryCalls, + >] = &[{ + fn createPool(data: &[u8]) -> alloy_sol_types::Result { + ::abi_decode_raw_validate(data) + .map(MockUniswapV3FactoryCalls::createPool) + } + createPool + }]; + let Ok(idx) = Self::SELECTORS.binary_search(&selector) else { + return Err(alloy_sol_types::Error::unknown_selector( + ::NAME, + selector, + )); + }; + DECODE_VALIDATE_SHIMS[idx](data) + } + + #[inline] + fn abi_encoded_size(&self) -> usize { + match self { + Self::createPool(inner) => { + ::abi_encoded_size(inner) + } + } + } + + #[inline] + fn abi_encode_raw(&self, out: &mut alloy_sol_types::private::Vec) { + match self { + Self::createPool(inner) => { + ::abi_encode_raw(inner, out) + } + } + } + } + ///Container for all the [`MockUniswapV3Factory`](self) events. + #[derive(Clone, Debug, PartialEq, Eq, Hash)] + pub enum MockUniswapV3FactoryEvents { + #[allow(missing_docs)] + PoolCreated(PoolCreated), + } + impl MockUniswapV3FactoryEvents { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the + /// variants. No guarantees are made about the order of the + /// selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 32usize]] = &[[ + 120u8, 60u8, 202u8, 28u8, 4u8, 18u8, 221u8, 13u8, 105u8, 94u8, 120u8, 69u8, 104u8, + 201u8, 109u8, 162u8, 233u8, 194u8, 47u8, 249u8, 137u8, 53u8, 122u8, 46u8, 139u8, 29u8, + 155u8, 43u8, 78u8, 107u8, 113u8, 24u8, + ]]; + /// The signatures in the same order as `SELECTORS`. + pub const SIGNATURES: &'static [&'static str] = + &[::SIGNATURE]; + /// The names of the variants in the same order as `SELECTORS`. + pub const VARIANT_NAMES: &'static [&'static str] = &[::core::stringify!(PoolCreated)]; + + /// Returns the signature for the given selector, if known. + #[inline] + pub fn signature_by_selector( + selector: [u8; 32usize], + ) -> ::core::option::Option<&'static str> { + match Self::SELECTORS.binary_search(&selector) { + ::core::result::Result::Ok(idx) => { + ::core::option::Option::Some(Self::SIGNATURES[idx]) + } + ::core::result::Result::Err(_) => ::core::option::Option::None, + } + } + + /// Returns the enum variant name for the given selector, if known. + #[inline] + pub fn name_by_selector(selector: [u8; 32usize]) -> ::core::option::Option<&'static str> { + let sig = Self::signature_by_selector(selector)?; + sig.split_once('(').map(|(name, _)| name) + } + } + #[automatically_derived] + impl alloy_sol_types::SolEventInterface for MockUniswapV3FactoryEvents { + const COUNT: usize = 1usize; + const NAME: &'static str = "MockUniswapV3FactoryEvents"; + + fn decode_raw_log( + topics: &[alloy_sol_types::Word], + data: &[u8], + ) -> alloy_sol_types::Result { + match topics.first().copied() { + Some(::SIGNATURE_HASH) => { + ::decode_raw_log(topics, data) + .map(Self::PoolCreated) + } + _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { + name: ::NAME, + log: alloy_sol_types::private::Box::new( + alloy_sol_types::private::LogData::new_unchecked( + topics.to_vec(), + data.to_vec().into(), + ), + ), + }), + } + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for MockUniswapV3FactoryEvents { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + match self { + Self::PoolCreated(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + } + } + + fn into_log_data(self) -> alloy_sol_types::private::LogData { + match self { + Self::PoolCreated(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + } + } + } + use alloy_contract; + /**Creates a new wrapper around an on-chain [`MockUniswapV3Factory`](self) contract instance. + + See the [wrapper's documentation](`MockUniswapV3FactoryInstance`) for more details.*/ + #[inline] + pub const fn new< + P: alloy_contract::private::Provider, + N: alloy_contract::private::Network, + >( + address: alloy_sol_types::private::Address, + __provider: P, + ) -> MockUniswapV3FactoryInstance { + MockUniswapV3FactoryInstance::::new(address, __provider) + } + /**Deploys this contract using the given `provider` and constructor arguments, if any. + + Returns a new instance of the contract, if the deployment was successful. + + For more fine-grained control over the deployment process, use [`deploy_builder`] instead.*/ + #[inline] + pub fn deploy, N: alloy_contract::private::Network>( + __provider: P, + ) -> impl ::core::future::Future>> + { + MockUniswapV3FactoryInstance::::deploy(__provider) + } + /**Creates a `RawCallBuilder` for deploying this contract using the given `provider` + and constructor arguments, if any. + + This is a simple wrapper around creating a `RawCallBuilder` with the data set to + the bytecode concatenated with the constructor's ABI-encoded arguments.*/ + #[inline] + pub fn deploy_builder< + P: alloy_contract::private::Provider, + N: alloy_contract::private::Network, + >( + __provider: P, + ) -> alloy_contract::RawCallBuilder { + MockUniswapV3FactoryInstance::::deploy_builder(__provider) + } + /**A [`MockUniswapV3Factory`](self) instance. + + Contains type-safe methods for interacting with an on-chain instance of the + [`MockUniswapV3Factory`](self) contract located at a given `address`, using a given + provider `P`. + + If the contract bytecode is available (see the [`sol!`](alloy_sol_types::sol!) + documentation on how to provide it), the `deploy` and `deploy_builder` methods can + be used to deploy a new instance of the contract. + + See the [module-level documentation](self) for all the available methods.*/ + #[derive(Clone)] + pub struct MockUniswapV3FactoryInstance { + address: alloy_sol_types::private::Address, + provider: P, + _network: ::core::marker::PhantomData, + } + #[automatically_derived] + impl ::core::fmt::Debug for MockUniswapV3FactoryInstance { + #[inline] + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + f.debug_tuple("MockUniswapV3FactoryInstance") + .field(&self.address) + .finish() + } + } + /// Instantiation and getters/setters. + impl, N: alloy_contract::private::Network> + MockUniswapV3FactoryInstance + { + /**Creates a new wrapper around an on-chain [`MockUniswapV3Factory`](self) contract instance. + + See the [wrapper's documentation](`MockUniswapV3FactoryInstance`) for more details.*/ + #[inline] + pub const fn new(address: alloy_sol_types::private::Address, __provider: P) -> Self { + Self { + address, + provider: __provider, + _network: ::core::marker::PhantomData, + } + } + + /**Deploys this contract using the given `provider` and constructor arguments, if any. + + Returns a new instance of the contract, if the deployment was successful. + + For more fine-grained control over the deployment process, use [`deploy_builder`] instead.*/ + #[inline] + pub async fn deploy( + __provider: P, + ) -> alloy_contract::Result> { + let call_builder = Self::deploy_builder(__provider); + let contract_address = call_builder.deploy().await?; + Ok(Self::new(contract_address, call_builder.provider)) + } + + /**Creates a `RawCallBuilder` for deploying this contract using the given `provider` + and constructor arguments, if any. + + This is a simple wrapper around creating a `RawCallBuilder` with the data set to + the bytecode concatenated with the constructor's ABI-encoded arguments.*/ + #[inline] + pub fn deploy_builder(__provider: P) -> alloy_contract::RawCallBuilder { + alloy_contract::RawCallBuilder::new_raw_deploy( + __provider, + ::core::clone::Clone::clone(&BYTECODE), + ) + } + + /// Returns a reference to the address. + #[inline] + pub const fn address(&self) -> &alloy_sol_types::private::Address { + &self.address + } + + /// Sets the address. + #[inline] + pub fn set_address(&mut self, address: alloy_sol_types::private::Address) { + self.address = address; + } + + /// Sets the address and returns `self`. + pub fn at(mut self, address: alloy_sol_types::private::Address) -> Self { + self.set_address(address); + self + } + + /// Returns a reference to the provider. + #[inline] + pub const fn provider(&self) -> &P { + &self.provider + } + } + impl MockUniswapV3FactoryInstance<&P, N> { + /// Clones the provider and returns a new instance with the cloned + /// provider. + #[inline] + pub fn with_cloned_provider(self) -> MockUniswapV3FactoryInstance { + MockUniswapV3FactoryInstance { + address: self.address, + provider: ::core::clone::Clone::clone(&self.provider), + _network: ::core::marker::PhantomData, + } + } + } + /// Function calls. + impl, N: alloy_contract::private::Network> + MockUniswapV3FactoryInstance + { + /// Creates a new call builder using this contract instance's provider + /// and address. + /// + /// Note that the call can be any function call, not just those defined + /// in this contract. Prefer using the other methods for + /// building type-safe contract calls. + pub fn call_builder( + &self, + call: &C, + ) -> alloy_contract::SolCallBuilder<&P, C, N> { + alloy_contract::SolCallBuilder::new_sol(&self.provider, &self.address, call) + } + + ///Creates a new call builder for the [`createPool`] function. + pub fn createPool( + &self, + tokenA: alloy_sol_types::private::Address, + tokenB: alloy_sol_types::private::Address, + _fee: alloy_sol_types::private::primitives::aliases::U24, + ) -> alloy_contract::SolCallBuilder<&P, createPoolCall, N> { + self.call_builder(&createPoolCall { + tokenA, + tokenB, + _fee, + }) + } + } + /// Event filters. + impl, N: alloy_contract::private::Network> + MockUniswapV3FactoryInstance + { + /// Creates a new event filter using this contract instance's provider + /// and address. + /// + /// Note that the type can be any event, not just those defined in this + /// contract. Prefer using the other methods for building + /// type-safe event filters. + pub fn event_filter( + &self, + ) -> alloy_contract::Event<&P, E, N> { + alloy_contract::Event::new_sol(&self.provider, &self.address) + } + + ///Creates a new event filter for the [`PoolCreated`] event. + pub fn PoolCreated_filter(&self) -> alloy_contract::Event<&P, PoolCreated, N> { + self.event_filter::() + } + } +} +pub type Instance = + MockUniswapV3Factory::MockUniswapV3FactoryInstance<::alloy_provider::DynProvider>; diff --git a/contracts/generated/contracts-generated/mockuniswapv3pool/Cargo.toml b/contracts/generated/contracts-generated/mockuniswapv3pool/Cargo.toml new file mode 100644 index 0000000000..4ffa2c1c90 --- /dev/null +++ b/contracts/generated/contracts-generated/mockuniswapv3pool/Cargo.toml @@ -0,0 +1,19 @@ +# Auto-generated by contracts-generate. Do not edit. +[package] +name = "cow-contract-mockuniswapv3pool" +version = "0.1.0" +edition = "2024" +publish = false + +[lib] +doctest = false + +[dependencies] +alloy-contract = { workspace = true } +alloy-primitives = { workspace = true } +alloy-provider = { workspace = true } +alloy-sol-types = { workspace = true } +anyhow = { workspace = true } + +[lints] +workspace = true diff --git a/contracts/generated/contracts-generated/mockuniswapv3pool/src/lib.rs b/contracts/generated/contracts-generated/mockuniswapv3pool/src/lib.rs new file mode 100644 index 0000000000..7e3c0e725b --- /dev/null +++ b/contracts/generated/contracts-generated/mockuniswapv3pool/src/lib.rs @@ -0,0 +1,2105 @@ +#![allow( + unused_imports, + unused_attributes, + clippy::all, + rustdoc::all, + non_snake_case +)] +//! Auto-generated contract bindings. Do not edit. +/** + +Generated by the following Solidity interface... +```solidity +interface MockUniswapV3Pool { + event Initialize(uint160 sqrtPriceX96, int24 tick); + event Mint(address sender, address indexed owner, int24 indexed tickLower, int24 indexed tickUpper, uint128 amount, uint256 amount0, uint256 amount1); + + constructor(address _token0, address _token1, uint24 _fee); + + function fee() external view returns (uint24); + function initialize(uint160 sqrtPriceX96) external; + function liquidity() external view returns (uint128); + function mockMint(address owner, int24 tickLower, int24 tickUpper, uint128 amount) external; + function token0() external view returns (address); + function token1() external view returns (address); +} +``` + +...which was generated by the following JSON ABI: +```json +[ + { + "type": "constructor", + "inputs": [ + { + "name": "_token0", + "type": "address", + "internalType": "address" + }, + { + "name": "_token1", + "type": "address", + "internalType": "address" + }, + { + "name": "_fee", + "type": "uint24", + "internalType": "uint24" + } + ], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "fee", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "uint24", + "internalType": "uint24" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "initialize", + "inputs": [ + { + "name": "sqrtPriceX96", + "type": "uint160", + "internalType": "uint160" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "liquidity", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "uint128", + "internalType": "uint128" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "mockMint", + "inputs": [ + { + "name": "owner", + "type": "address", + "internalType": "address" + }, + { + "name": "tickLower", + "type": "int24", + "internalType": "int24" + }, + { + "name": "tickUpper", + "type": "int24", + "internalType": "int24" + }, + { + "name": "amount", + "type": "uint128", + "internalType": "uint128" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "token0", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "address", + "internalType": "address" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "token1", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "address", + "internalType": "address" + } + ], + "stateMutability": "view" + }, + { + "type": "event", + "name": "Initialize", + "inputs": [ + { + "name": "sqrtPriceX96", + "type": "uint160", + "indexed": false, + "internalType": "uint160" + }, + { + "name": "tick", + "type": "int24", + "indexed": false, + "internalType": "int24" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "Mint", + "inputs": [ + { + "name": "sender", + "type": "address", + "indexed": false, + "internalType": "address" + }, + { + "name": "owner", + "type": "address", + "indexed": true, + "internalType": "address" + }, + { + "name": "tickLower", + "type": "int24", + "indexed": true, + "internalType": "int24" + }, + { + "name": "tickUpper", + "type": "int24", + "indexed": true, + "internalType": "int24" + }, + { + "name": "amount", + "type": "uint128", + "indexed": false, + "internalType": "uint128" + }, + { + "name": "amount0", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "amount1", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + } + ], + "anonymous": false + } +] +```*/ +#[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style, + clippy::empty_structs_with_brackets +)] +pub mod MockUniswapV3Pool { + use {super::*, alloy_sol_types}; + /// The creation / init bytecode of the contract. + /// + /// ```text + ///0x60e060405234801561000f575f5ffd5b506040516104da3803806104da83398101604081905261002e91610069565b6001600160a01b03928316608052911660a05262ffffff1660c0526100b4565b80516001600160a01b0381168114610064575f5ffd5b919050565b5f5f5f6060848603121561007b575f5ffd5b6100848461004e565b92506100926020850161004e565b9150604084015162ffffff811681146100a9575f5ffd5b809150509250925092565b60805160a05160c0516103fd6100dd5f395f61012c01525f61010501525f607801526103fd5ff3fe608060405234801561000f575f5ffd5b506004361061006f575f3560e01c8063ddca3f431161004d578063ddca3f4314610127578063efe27fa314610162578063f637731d14610177575f5ffd5b80630dfe1681146100735780631a686502146100c4578063d21220a714610100575b5f5ffd5b61009a7f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b5f546100df906fffffffffffffffffffffffffffffffff1681565b6040516fffffffffffffffffffffffffffffffff90911681526020016100bb565b61009a7f000000000000000000000000000000000000000000000000000000000000000081565b61014e7f000000000000000000000000000000000000000000000000000000000000000081565b60405162ffffff90911681526020016100bb565b610175610170366004610312565b61018a565b005b61017561018536600461037b565b610287565b5f805482919081906101af9084906fffffffffffffffffffffffffffffffff1661039d565b92506101000a8154816fffffffffffffffffffffffffffffffff02191690836fffffffffffffffffffffffffffffffff1602179055508160020b8360020b8573ffffffffffffffffffffffffffffffffffffffff167f7a53080ba414158be7ec69b987b5fb7d07dee101fe85488f0853ae16239d0bde33855f5f604051610279949392919073ffffffffffffffffffffffffffffffffffffffff9490941684526fffffffffffffffffffffffffffffffff9290921660208401526040830152606082015260800190565b60405180910390a450505050565b6040805173ffffffffffffffffffffffffffffffffffffffff831681525f60208201527f98636036cb66a9c19a37435efc1e90142190214e8abeb821bdba3f2990dd4c95910160405180910390a150565b73ffffffffffffffffffffffffffffffffffffffff811681146102f9575f5ffd5b50565b8035600281900b811461030d575f5ffd5b919050565b5f5f5f5f60808587031215610325575f5ffd5b8435610330816102d8565b935061033e602086016102fc565b925061034c604086016102fc565b915060608501356fffffffffffffffffffffffffffffffff81168114610370575f5ffd5b939692955090935050565b5f6020828403121561038b575f5ffd5b8135610396816102d8565b9392505050565b6fffffffffffffffffffffffffffffffff81811683821601908111156103ea577f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b9291505056fea164736f6c634300081e000a + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"`\xE0`@R4\x80\x15a\0\x0FW__\xFD[P`@Qa\x04\xDA8\x03\x80a\x04\xDA\x839\x81\x01`@\x81\x90Ra\0.\x91a\0iV[`\x01`\x01`\xA0\x1B\x03\x92\x83\x16`\x80R\x91\x16`\xA0Rb\xFF\xFF\xFF\x16`\xC0Ra\0\xB4V[\x80Q`\x01`\x01`\xA0\x1B\x03\x81\x16\x81\x14a\0dW__\xFD[\x91\x90PV[___``\x84\x86\x03\x12\x15a\0{W__\xFD[a\0\x84\x84a\0NV[\x92Pa\0\x92` \x85\x01a\0NV[\x91P`@\x84\x01Qb\xFF\xFF\xFF\x81\x16\x81\x14a\0\xA9W__\xFD[\x80\x91PP\x92P\x92P\x92V[`\x80Q`\xA0Q`\xC0Qa\x03\xFDa\0\xDD_9_a\x01,\x01R_a\x01\x05\x01R_`x\x01Ra\x03\xFD_\xF3\xFE`\x80`@R4\x80\x15a\0\x0FW__\xFD[P`\x046\x10a\0oW_5`\xE0\x1C\x80c\xDD\xCA?C\x11a\0MW\x80c\xDD\xCA?C\x14a\x01'W\x80c\xEF\xE2\x7F\xA3\x14a\x01bW\x80c\xF67s\x1D\x14a\x01wW__\xFD[\x80c\r\xFE\x16\x81\x14a\0sW\x80c\x1Ahe\x02\x14a\0\xC4W\x80c\xD2\x12 \xA7\x14a\x01\0W[__\xFD[a\0\x9A\x7F\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x81V[`@Qs\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x90\x91\x16\x81R` \x01[`@Q\x80\x91\x03\x90\xF3[_Ta\0\xDF\x90o\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x16\x81V[`@Qo\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x90\x91\x16\x81R` \x01a\0\xBBV[a\0\x9A\x7F\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x81V[a\x01N\x7F\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x81V[`@Qb\xFF\xFF\xFF\x90\x91\x16\x81R` \x01a\0\xBBV[a\x01ua\x01p6`\x04a\x03\x12V[a\x01\x8AV[\0[a\x01ua\x01\x856`\x04a\x03{V[a\x02\x87V[_\x80T\x82\x91\x90\x81\x90a\x01\xAF\x90\x84\x90o\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x16a\x03\x9DV[\x92Pa\x01\0\n\x81T\x81o\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x02\x19\x16\x90\x83o\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x16\x02\x17\x90UP\x81`\x02\x0B\x83`\x02\x0B\x85s\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x16\x7FzS\x08\x0B\xA4\x14\x15\x8B\xE7\xECi\xB9\x87\xB5\xFB}\x07\xDE\xE1\x01\xFE\x85H\x8F\x08S\xAE\x16#\x9D\x0B\xDE3\x85__`@Qa\x02y\x94\x93\x92\x91\x90s\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x94\x90\x94\x16\x84Ro\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x92\x90\x92\x16` \x84\x01R`@\x83\x01R``\x82\x01R`\x80\x01\x90V[`@Q\x80\x91\x03\x90\xA4PPPPV[`@\x80Qs\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x83\x16\x81R_` \x82\x01R\x7F\x98c`6\xCBf\xA9\xC1\x9A7C^\xFC\x1E\x90\x14!\x90!N\x8A\xBE\xB8!\xBD\xBA?)\x90\xDDL\x95\x91\x01`@Q\x80\x91\x03\x90\xA1PV[s\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x81\x16\x81\x14a\x02\xF9W__\xFD[PV[\x805`\x02\x81\x90\x0B\x81\x14a\x03\rW__\xFD[\x91\x90PV[____`\x80\x85\x87\x03\x12\x15a\x03%W__\xFD[\x845a\x030\x81a\x02\xD8V[\x93Pa\x03>` \x86\x01a\x02\xFCV[\x92Pa\x03L`@\x86\x01a\x02\xFCV[\x91P``\x85\x015o\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x81\x16\x81\x14a\x03pW__\xFD[\x93\x96\x92\x95P\x90\x93PPV[_` \x82\x84\x03\x12\x15a\x03\x8BW__\xFD[\x815a\x03\x96\x81a\x02\xD8V[\x93\x92PPPV[o\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x81\x81\x16\x83\x82\x16\x01\x90\x81\x11\x15a\x03\xEAW\x7FNH{q\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0_R`\x11`\x04R`$_\xFD[\x92\x91PPV\xFE\xA1dsolcC\0\x08\x1E\0\n", + ); + /// The runtime bytecode of the contract, as deployed on the network. + /// + /// ```text + ///0x608060405234801561000f575f5ffd5b506004361061006f575f3560e01c8063ddca3f431161004d578063ddca3f4314610127578063efe27fa314610162578063f637731d14610177575f5ffd5b80630dfe1681146100735780631a686502146100c4578063d21220a714610100575b5f5ffd5b61009a7f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b5f546100df906fffffffffffffffffffffffffffffffff1681565b6040516fffffffffffffffffffffffffffffffff90911681526020016100bb565b61009a7f000000000000000000000000000000000000000000000000000000000000000081565b61014e7f000000000000000000000000000000000000000000000000000000000000000081565b60405162ffffff90911681526020016100bb565b610175610170366004610312565b61018a565b005b61017561018536600461037b565b610287565b5f805482919081906101af9084906fffffffffffffffffffffffffffffffff1661039d565b92506101000a8154816fffffffffffffffffffffffffffffffff02191690836fffffffffffffffffffffffffffffffff1602179055508160020b8360020b8573ffffffffffffffffffffffffffffffffffffffff167f7a53080ba414158be7ec69b987b5fb7d07dee101fe85488f0853ae16239d0bde33855f5f604051610279949392919073ffffffffffffffffffffffffffffffffffffffff9490941684526fffffffffffffffffffffffffffffffff9290921660208401526040830152606082015260800190565b60405180910390a450505050565b6040805173ffffffffffffffffffffffffffffffffffffffff831681525f60208201527f98636036cb66a9c19a37435efc1e90142190214e8abeb821bdba3f2990dd4c95910160405180910390a150565b73ffffffffffffffffffffffffffffffffffffffff811681146102f9575f5ffd5b50565b8035600281900b811461030d575f5ffd5b919050565b5f5f5f5f60808587031215610325575f5ffd5b8435610330816102d8565b935061033e602086016102fc565b925061034c604086016102fc565b915060608501356fffffffffffffffffffffffffffffffff81168114610370575f5ffd5b939692955090935050565b5f6020828403121561038b575f5ffd5b8135610396816102d8565b9392505050565b6fffffffffffffffffffffffffffffffff81811683821601908111156103ea577f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b9291505056fea164736f6c634300081e000a + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static DEPLOYED_BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"`\x80`@R4\x80\x15a\0\x0FW__\xFD[P`\x046\x10a\0oW_5`\xE0\x1C\x80c\xDD\xCA?C\x11a\0MW\x80c\xDD\xCA?C\x14a\x01'W\x80c\xEF\xE2\x7F\xA3\x14a\x01bW\x80c\xF67s\x1D\x14a\x01wW__\xFD[\x80c\r\xFE\x16\x81\x14a\0sW\x80c\x1Ahe\x02\x14a\0\xC4W\x80c\xD2\x12 \xA7\x14a\x01\0W[__\xFD[a\0\x9A\x7F\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x81V[`@Qs\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x90\x91\x16\x81R` \x01[`@Q\x80\x91\x03\x90\xF3[_Ta\0\xDF\x90o\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x16\x81V[`@Qo\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x90\x91\x16\x81R` \x01a\0\xBBV[a\0\x9A\x7F\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x81V[a\x01N\x7F\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x81V[`@Qb\xFF\xFF\xFF\x90\x91\x16\x81R` \x01a\0\xBBV[a\x01ua\x01p6`\x04a\x03\x12V[a\x01\x8AV[\0[a\x01ua\x01\x856`\x04a\x03{V[a\x02\x87V[_\x80T\x82\x91\x90\x81\x90a\x01\xAF\x90\x84\x90o\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x16a\x03\x9DV[\x92Pa\x01\0\n\x81T\x81o\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x02\x19\x16\x90\x83o\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x16\x02\x17\x90UP\x81`\x02\x0B\x83`\x02\x0B\x85s\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x16\x7FzS\x08\x0B\xA4\x14\x15\x8B\xE7\xECi\xB9\x87\xB5\xFB}\x07\xDE\xE1\x01\xFE\x85H\x8F\x08S\xAE\x16#\x9D\x0B\xDE3\x85__`@Qa\x02y\x94\x93\x92\x91\x90s\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x94\x90\x94\x16\x84Ro\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x92\x90\x92\x16` \x84\x01R`@\x83\x01R``\x82\x01R`\x80\x01\x90V[`@Q\x80\x91\x03\x90\xA4PPPPV[`@\x80Qs\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x83\x16\x81R_` \x82\x01R\x7F\x98c`6\xCBf\xA9\xC1\x9A7C^\xFC\x1E\x90\x14!\x90!N\x8A\xBE\xB8!\xBD\xBA?)\x90\xDDL\x95\x91\x01`@Q\x80\x91\x03\x90\xA1PV[s\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x81\x16\x81\x14a\x02\xF9W__\xFD[PV[\x805`\x02\x81\x90\x0B\x81\x14a\x03\rW__\xFD[\x91\x90PV[____`\x80\x85\x87\x03\x12\x15a\x03%W__\xFD[\x845a\x030\x81a\x02\xD8V[\x93Pa\x03>` \x86\x01a\x02\xFCV[\x92Pa\x03L`@\x86\x01a\x02\xFCV[\x91P``\x85\x015o\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x81\x16\x81\x14a\x03pW__\xFD[\x93\x96\x92\x95P\x90\x93PPV[_` \x82\x84\x03\x12\x15a\x03\x8BW__\xFD[\x815a\x03\x96\x81a\x02\xD8V[\x93\x92PPPV[o\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x81\x81\x16\x83\x82\x16\x01\x90\x81\x11\x15a\x03\xEAW\x7FNH{q\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0_R`\x11`\x04R`$_\xFD[\x92\x91PPV\xFE\xA1dsolcC\0\x08\x1E\0\n", + ); + #[derive(Default, Debug, PartialEq, Eq, Hash)] + /**Event with signature `Initialize(uint160,int24)` and selector `0x98636036cb66a9c19a37435efc1e90142190214e8abeb821bdba3f2990dd4c95`. + ```solidity + event Initialize(uint160 sqrtPriceX96, int24 tick); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct Initialize { + #[allow(missing_docs)] + pub sqrtPriceX96: alloy_sol_types::private::primitives::aliases::U160, + #[allow(missing_docs)] + pub tick: alloy_sol_types::private::primitives::aliases::I24, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for Initialize { + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type DataTuple<'a> = ( + alloy_sol_types::sol_data::Uint<160>, + alloy_sol_types::sol_data::Int<24>, + ); + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + + const ANONYMOUS: bool = false; + const SIGNATURE: &'static str = "Initialize(uint160,int24)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 152u8, 99u8, 96u8, 54u8, 203u8, 102u8, 169u8, 193u8, 154u8, 55u8, 67u8, 94u8, + 252u8, 30u8, 144u8, 20u8, 33u8, 144u8, 33u8, 78u8, 138u8, 190u8, 184u8, 33u8, + 189u8, 186u8, 63u8, 41u8, 144u8, 221u8, 76u8, 149u8, + ]); + + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + sqrtPriceX96: data.0, + tick: data.1, + } + } + + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + as alloy_sol_types::SolType>::tokenize( + &self.sqrtPriceX96, + ), + as alloy_sol_types::SolType>::tokenize( + &self.tick, + ), + ) + } + + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for Initialize { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&Initialize> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &Initialize) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + #[derive(Default, Debug, PartialEq, Eq, Hash)] + /**Event with signature `Mint(address,address,int24,int24,uint128,uint256,uint256)` and selector `0x7a53080ba414158be7ec69b987b5fb7d07dee101fe85488f0853ae16239d0bde`. + ```solidity + event Mint(address sender, address indexed owner, int24 indexed tickLower, int24 indexed tickUpper, uint128 amount, uint256 amount0, uint256 amount1); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct Mint { + #[allow(missing_docs)] + pub sender: alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub owner: alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub tickLower: alloy_sol_types::private::primitives::aliases::I24, + #[allow(missing_docs)] + pub tickUpper: alloy_sol_types::private::primitives::aliases::I24, + #[allow(missing_docs)] + pub amount: u128, + #[allow(missing_docs)] + pub amount0: alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub amount1: alloy_sol_types::private::primitives::aliases::U256, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for Mint { + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type DataTuple<'a> = ( + alloy_sol_types::sol_data::Address, + alloy_sol_types::sol_data::Uint<128>, + alloy_sol_types::sol_data::Uint<256>, + alloy_sol_types::sol_data::Uint<256>, + ); + type TopicList = ( + alloy_sol_types::sol_data::FixedBytes<32>, + alloy_sol_types::sol_data::Address, + alloy_sol_types::sol_data::Int<24>, + alloy_sol_types::sol_data::Int<24>, + ); + + const ANONYMOUS: bool = false; + const SIGNATURE: &'static str = + "Mint(address,address,int24,int24,uint128,uint256,uint256)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 122u8, 83u8, 8u8, 11u8, 164u8, 20u8, 21u8, 139u8, 231u8, 236u8, 105u8, 185u8, + 135u8, 181u8, 251u8, 125u8, 7u8, 222u8, 225u8, 1u8, 254u8, 133u8, 72u8, 143u8, + 8u8, 83u8, 174u8, 22u8, 35u8, 157u8, 11u8, 222u8, + ]); + + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + sender: data.0, + owner: topics.1, + tickLower: topics.2, + tickUpper: topics.3, + amount: data.1, + amount0: data.2, + amount1: data.3, + } + } + + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + ::tokenize( + &self.sender, + ), + as alloy_sol_types::SolType>::tokenize( + &self.amount, + ), + as alloy_sol_types::SolType>::tokenize( + &self.amount0, + ), + as alloy_sol_types::SolType>::tokenize( + &self.amount1, + ), + ) + } + + #[inline] + fn topics(&self) -> ::RustType { + ( + Self::SIGNATURE_HASH.into(), + self.owner.clone(), + self.tickLower.clone(), + self.tickUpper.clone(), + ) + } + + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + out[1usize] = ::encode_topic( + &self.owner, + ); + out[2usize] = as alloy_sol_types::EventTopic>::encode_topic(&self.tickLower); + out[3usize] = as alloy_sol_types::EventTopic>::encode_topic(&self.tickUpper); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for Mint { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&Mint> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &Mint) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Constructor`. + ```solidity + constructor(address _token0, address _token1, uint24 _fee); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct constructorCall { + #[allow(missing_docs)] + pub _token0: alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub _token1: alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub _fee: alloy_sol_types::private::primitives::aliases::U24, + } + const _: () = { + use alloy_sol_types; + { + #[doc(hidden)] + #[allow(dead_code)] + type UnderlyingSolTuple<'a> = ( + alloy_sol_types::sol_data::Address, + alloy_sol_types::sol_data::Address, + alloy_sol_types::sol_data::Uint<24>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + alloy_sol_types::private::Address, + alloy_sol_types::private::Address, + alloy_sol_types::private::primitives::aliases::U24, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: constructorCall) -> Self { + (value._token0, value._token1, value._fee) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for constructorCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + _token0: tuple.0, + _token1: tuple.1, + _fee: tuple.2, + } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolConstructor for constructorCall { + type Parameters<'a> = ( + alloy_sol_types::sol_data::Address, + alloy_sol_types::sol_data::Address, + alloy_sol_types::sol_data::Uint<24>, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + ::tokenize( + &self._token0, + ), + ::tokenize( + &self._token1, + ), + as alloy_sol_types::SolType>::tokenize( + &self._fee, + ), + ) + } + } + }; + #[derive(Default, Debug, PartialEq, Eq, Hash)] + /**Function with signature `fee()` and selector `0xddca3f43`. + ```solidity + function fee() external view returns (uint24); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct feeCall; + #[derive(Default, Debug, PartialEq, Eq, Hash)] + ///Container type for the return parameters of the [`fee()`](feeCall) + /// function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct feeReturn { + #[allow(missing_docs)] + pub _0: alloy_sol_types::private::primitives::aliases::U24, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use alloy_sol_types; + { + #[doc(hidden)] + #[allow(dead_code)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: feeCall) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for feeCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self + } + } + } + { + #[doc(hidden)] + #[allow(dead_code)] + type UnderlyingSolTuple<'a> = (alloy_sol_types::sol_data::Uint<24>,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (alloy_sol_types::private::primitives::aliases::U24,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: feeReturn) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for feeReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for feeCall { + type Parameters<'a> = (); + type Return = alloy_sol_types::private::primitives::aliases::U24; + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type ReturnTuple<'a> = (alloy_sol_types::sol_data::Uint<24>,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + + const SELECTOR: [u8; 4] = [221u8, 202u8, 63u8, 67u8]; + const SIGNATURE: &'static str = "fee()"; + + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + + #[inline] + fn tokenize_returns(ret: &Self::Return) -> Self::ReturnToken<'_> { + ( + as alloy_sol_types::SolType>::tokenize( + ret, + ), + ) + } + + #[inline] + fn abi_decode_returns(data: &[u8]) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence(data).map( + |r| { + let r: feeReturn = r.into(); + r._0 + }, + ) + } + + #[inline] + fn abi_decode_returns_validate(data: &[u8]) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence_validate( + data, + ) + .map(|r| { + let r: feeReturn = r.into(); + r._0 + }) + } + } + }; + #[derive(Default, Debug, PartialEq, Eq, Hash)] + /**Function with signature `initialize(uint160)` and selector `0xf637731d`. + ```solidity + function initialize(uint160 sqrtPriceX96) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct initializeCall { + #[allow(missing_docs)] + pub sqrtPriceX96: alloy_sol_types::private::primitives::aliases::U160, + } + ///Container type for the return parameters of the + /// [`initialize(uint160)`](initializeCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct initializeReturn {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use alloy_sol_types; + { + #[doc(hidden)] + #[allow(dead_code)] + type UnderlyingSolTuple<'a> = (alloy_sol_types::sol_data::Uint<160>,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (alloy_sol_types::private::primitives::aliases::U160,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: initializeCall) -> Self { + (value.sqrtPriceX96,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for initializeCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + sqrtPriceX96: tuple.0, + } + } + } + } + { + #[doc(hidden)] + #[allow(dead_code)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: initializeReturn) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for initializeReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + impl initializeReturn { + fn _tokenize(&self) -> ::ReturnToken<'_> { + () + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for initializeCall { + type Parameters<'a> = (alloy_sol_types::sol_data::Uint<160>,); + type Return = initializeReturn; + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type ReturnTuple<'a> = (); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + + const SELECTOR: [u8; 4] = [246u8, 55u8, 115u8, 29u8]; + const SIGNATURE: &'static str = "initialize(uint160)"; + + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + as alloy_sol_types::SolType>::tokenize( + &self.sqrtPriceX96, + ), + ) + } + + #[inline] + fn tokenize_returns(ret: &Self::Return) -> Self::ReturnToken<'_> { + initializeReturn::_tokenize(ret) + } + + #[inline] + fn abi_decode_returns(data: &[u8]) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence(data) + .map(Into::into) + } + + #[inline] + fn abi_decode_returns_validate(data: &[u8]) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence_validate( + data, + ) + .map(Into::into) + } + } + }; + #[derive(Default, Debug, PartialEq, Eq, Hash)] + /**Function with signature `liquidity()` and selector `0x1a686502`. + ```solidity + function liquidity() external view returns (uint128); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct liquidityCall; + #[derive(Default, Debug, PartialEq, Eq, Hash)] + ///Container type for the return parameters of the + /// [`liquidity()`](liquidityCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct liquidityReturn { + #[allow(missing_docs)] + pub _0: u128, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use alloy_sol_types; + { + #[doc(hidden)] + #[allow(dead_code)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: liquidityCall) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for liquidityCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self + } + } + } + { + #[doc(hidden)] + #[allow(dead_code)] + type UnderlyingSolTuple<'a> = (alloy_sol_types::sol_data::Uint<128>,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (u128,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: liquidityReturn) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for liquidityReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for liquidityCall { + type Parameters<'a> = (); + type Return = u128; + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type ReturnTuple<'a> = (alloy_sol_types::sol_data::Uint<128>,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + + const SELECTOR: [u8; 4] = [26u8, 104u8, 101u8, 2u8]; + const SIGNATURE: &'static str = "liquidity()"; + + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + + #[inline] + fn tokenize_returns(ret: &Self::Return) -> Self::ReturnToken<'_> { + ( + as alloy_sol_types::SolType>::tokenize( + ret, + ), + ) + } + + #[inline] + fn abi_decode_returns(data: &[u8]) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence(data).map( + |r| { + let r: liquidityReturn = r.into(); + r._0 + }, + ) + } + + #[inline] + fn abi_decode_returns_validate(data: &[u8]) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence_validate( + data, + ) + .map(|r| { + let r: liquidityReturn = r.into(); + r._0 + }) + } + } + }; + #[derive(Default, Debug, PartialEq, Eq, Hash)] + /**Function with signature `mockMint(address,int24,int24,uint128)` and selector `0xefe27fa3`. + ```solidity + function mockMint(address owner, int24 tickLower, int24 tickUpper, uint128 amount) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct mockMintCall { + #[allow(missing_docs)] + pub owner: alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub tickLower: alloy_sol_types::private::primitives::aliases::I24, + #[allow(missing_docs)] + pub tickUpper: alloy_sol_types::private::primitives::aliases::I24, + #[allow(missing_docs)] + pub amount: u128, + } + ///Container type for the return parameters of the + /// [`mockMint(address,int24,int24,uint128)`](mockMintCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct mockMintReturn {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use alloy_sol_types; + { + #[doc(hidden)] + #[allow(dead_code)] + type UnderlyingSolTuple<'a> = ( + alloy_sol_types::sol_data::Address, + alloy_sol_types::sol_data::Int<24>, + alloy_sol_types::sol_data::Int<24>, + alloy_sol_types::sol_data::Uint<128>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + alloy_sol_types::private::Address, + alloy_sol_types::private::primitives::aliases::I24, + alloy_sol_types::private::primitives::aliases::I24, + u128, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: mockMintCall) -> Self { + (value.owner, value.tickLower, value.tickUpper, value.amount) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for mockMintCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + owner: tuple.0, + tickLower: tuple.1, + tickUpper: tuple.2, + amount: tuple.3, + } + } + } + } + { + #[doc(hidden)] + #[allow(dead_code)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: mockMintReturn) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for mockMintReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + impl mockMintReturn { + fn _tokenize(&self) -> ::ReturnToken<'_> { + () + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for mockMintCall { + type Parameters<'a> = ( + alloy_sol_types::sol_data::Address, + alloy_sol_types::sol_data::Int<24>, + alloy_sol_types::sol_data::Int<24>, + alloy_sol_types::sol_data::Uint<128>, + ); + type Return = mockMintReturn; + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type ReturnTuple<'a> = (); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + + const SELECTOR: [u8; 4] = [239u8, 226u8, 127u8, 163u8]; + const SIGNATURE: &'static str = "mockMint(address,int24,int24,uint128)"; + + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + ::tokenize( + &self.owner, + ), + as alloy_sol_types::SolType>::tokenize( + &self.tickLower, + ), + as alloy_sol_types::SolType>::tokenize( + &self.tickUpper, + ), + as alloy_sol_types::SolType>::tokenize( + &self.amount, + ), + ) + } + + #[inline] + fn tokenize_returns(ret: &Self::Return) -> Self::ReturnToken<'_> { + mockMintReturn::_tokenize(ret) + } + + #[inline] + fn abi_decode_returns(data: &[u8]) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence(data) + .map(Into::into) + } + + #[inline] + fn abi_decode_returns_validate(data: &[u8]) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence_validate( + data, + ) + .map(Into::into) + } + } + }; + #[derive(Default, Debug, PartialEq, Eq, Hash)] + /**Function with signature `token0()` and selector `0x0dfe1681`. + ```solidity + function token0() external view returns (address); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct token0Call; + #[derive(Default, Debug, PartialEq, Eq, Hash)] + ///Container type for the return parameters of the [`token0()`](token0Call) + /// function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct token0Return { + #[allow(missing_docs)] + pub _0: alloy_sol_types::private::Address, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use alloy_sol_types; + { + #[doc(hidden)] + #[allow(dead_code)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: token0Call) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for token0Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self + } + } + } + { + #[doc(hidden)] + #[allow(dead_code)] + type UnderlyingSolTuple<'a> = (alloy_sol_types::sol_data::Address,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (alloy_sol_types::private::Address,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: token0Return) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for token0Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for token0Call { + type Parameters<'a> = (); + type Return = alloy_sol_types::private::Address; + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type ReturnTuple<'a> = (alloy_sol_types::sol_data::Address,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + + const SELECTOR: [u8; 4] = [13u8, 254u8, 22u8, 129u8]; + const SIGNATURE: &'static str = "token0()"; + + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + + #[inline] + fn tokenize_returns(ret: &Self::Return) -> Self::ReturnToken<'_> { + (::tokenize(ret),) + } + + #[inline] + fn abi_decode_returns(data: &[u8]) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence(data).map( + |r| { + let r: token0Return = r.into(); + r._0 + }, + ) + } + + #[inline] + fn abi_decode_returns_validate(data: &[u8]) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence_validate( + data, + ) + .map(|r| { + let r: token0Return = r.into(); + r._0 + }) + } + } + }; + #[derive(Default, Debug, PartialEq, Eq, Hash)] + /**Function with signature `token1()` and selector `0xd21220a7`. + ```solidity + function token1() external view returns (address); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct token1Call; + #[derive(Default, Debug, PartialEq, Eq, Hash)] + ///Container type for the return parameters of the [`token1()`](token1Call) + /// function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct token1Return { + #[allow(missing_docs)] + pub _0: alloy_sol_types::private::Address, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use alloy_sol_types; + { + #[doc(hidden)] + #[allow(dead_code)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: token1Call) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for token1Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self + } + } + } + { + #[doc(hidden)] + #[allow(dead_code)] + type UnderlyingSolTuple<'a> = (alloy_sol_types::sol_data::Address,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (alloy_sol_types::private::Address,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: token1Return) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for token1Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for token1Call { + type Parameters<'a> = (); + type Return = alloy_sol_types::private::Address; + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type ReturnTuple<'a> = (alloy_sol_types::sol_data::Address,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + + const SELECTOR: [u8; 4] = [210u8, 18u8, 32u8, 167u8]; + const SIGNATURE: &'static str = "token1()"; + + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + + #[inline] + fn tokenize_returns(ret: &Self::Return) -> Self::ReturnToken<'_> { + (::tokenize(ret),) + } + + #[inline] + fn abi_decode_returns(data: &[u8]) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence(data).map( + |r| { + let r: token1Return = r.into(); + r._0 + }, + ) + } + + #[inline] + fn abi_decode_returns_validate(data: &[u8]) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence_validate( + data, + ) + .map(|r| { + let r: token1Return = r.into(); + r._0 + }) + } + } + }; + ///Container for all the [`MockUniswapV3Pool`](self) function calls. + #[derive(Clone)] + pub enum MockUniswapV3PoolCalls { + #[allow(missing_docs)] + fee(feeCall), + #[allow(missing_docs)] + initialize(initializeCall), + #[allow(missing_docs)] + liquidity(liquidityCall), + #[allow(missing_docs)] + mockMint(mockMintCall), + #[allow(missing_docs)] + token0(token0Call), + #[allow(missing_docs)] + token1(token1Call), + } + impl MockUniswapV3PoolCalls { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the + /// variants. No guarantees are made about the order of the + /// selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 4usize]] = &[ + [13u8, 254u8, 22u8, 129u8], + [26u8, 104u8, 101u8, 2u8], + [210u8, 18u8, 32u8, 167u8], + [221u8, 202u8, 63u8, 67u8], + [239u8, 226u8, 127u8, 163u8], + [246u8, 55u8, 115u8, 29u8], + ]; + /// The signatures in the same order as `SELECTORS`. + pub const SIGNATURES: &'static [&'static str] = &[ + ::SIGNATURE, + ::SIGNATURE, + ::SIGNATURE, + ::SIGNATURE, + ::SIGNATURE, + ::SIGNATURE, + ]; + /// The names of the variants in the same order as `SELECTORS`. + pub const VARIANT_NAMES: &'static [&'static str] = &[ + ::core::stringify!(token0), + ::core::stringify!(liquidity), + ::core::stringify!(token1), + ::core::stringify!(fee), + ::core::stringify!(mockMint), + ::core::stringify!(initialize), + ]; + + /// Returns the signature for the given selector, if known. + #[inline] + pub fn signature_by_selector( + selector: [u8; 4usize], + ) -> ::core::option::Option<&'static str> { + match Self::SELECTORS.binary_search(&selector) { + ::core::result::Result::Ok(idx) => { + ::core::option::Option::Some(Self::SIGNATURES[idx]) + } + ::core::result::Result::Err(_) => ::core::option::Option::None, + } + } + + /// Returns the enum variant name for the given selector, if known. + #[inline] + pub fn name_by_selector(selector: [u8; 4usize]) -> ::core::option::Option<&'static str> { + let sig = Self::signature_by_selector(selector)?; + sig.split_once('(').map(|(name, _)| name) + } + } + #[automatically_derived] + impl alloy_sol_types::SolInterface for MockUniswapV3PoolCalls { + const COUNT: usize = 6usize; + const MIN_DATA_LENGTH: usize = 0usize; + const NAME: &'static str = "MockUniswapV3PoolCalls"; + + #[inline] + fn selector(&self) -> [u8; 4] { + match self { + Self::fee(_) => ::SELECTOR, + Self::initialize(_) => ::SELECTOR, + Self::liquidity(_) => ::SELECTOR, + Self::mockMint(_) => ::SELECTOR, + Self::token0(_) => ::SELECTOR, + Self::token1(_) => ::SELECTOR, + } + } + + #[inline] + fn selector_at(i: usize) -> ::core::option::Option<[u8; 4]> { + Self::SELECTORS.get(i).copied() + } + + #[inline] + fn valid_selector(selector: [u8; 4]) -> bool { + Self::SELECTORS.binary_search(&selector).is_ok() + } + + #[inline] + #[allow(non_snake_case)] + fn abi_decode_raw(selector: [u8; 4], data: &[u8]) -> alloy_sol_types::Result { + static DECODE_SHIMS: &[fn(&[u8]) -> alloy_sol_types::Result] = + &[ + { + fn token0(data: &[u8]) -> alloy_sol_types::Result { + ::abi_decode_raw(data) + .map(MockUniswapV3PoolCalls::token0) + } + token0 + }, + { + fn liquidity( + data: &[u8], + ) -> alloy_sol_types::Result + { + ::abi_decode_raw(data) + .map(MockUniswapV3PoolCalls::liquidity) + } + liquidity + }, + { + fn token1(data: &[u8]) -> alloy_sol_types::Result { + ::abi_decode_raw(data) + .map(MockUniswapV3PoolCalls::token1) + } + token1 + }, + { + fn fee(data: &[u8]) -> alloy_sol_types::Result { + ::abi_decode_raw(data) + .map(MockUniswapV3PoolCalls::fee) + } + fee + }, + { + fn mockMint( + data: &[u8], + ) -> alloy_sol_types::Result + { + ::abi_decode_raw(data) + .map(MockUniswapV3PoolCalls::mockMint) + } + mockMint + }, + { + fn initialize( + data: &[u8], + ) -> alloy_sol_types::Result + { + ::abi_decode_raw(data) + .map(MockUniswapV3PoolCalls::initialize) + } + initialize + }, + ]; + let Ok(idx) = Self::SELECTORS.binary_search(&selector) else { + return Err(alloy_sol_types::Error::unknown_selector( + ::NAME, + selector, + )); + }; + DECODE_SHIMS[idx](data) + } + + #[inline] + #[allow(non_snake_case)] + fn abi_decode_raw_validate( + selector: [u8; 4], + data: &[u8], + ) -> alloy_sol_types::Result { + static DECODE_VALIDATE_SHIMS: &[fn( + &[u8], + ) -> alloy_sol_types::Result< + MockUniswapV3PoolCalls, + >] = &[ + { + fn token0(data: &[u8]) -> alloy_sol_types::Result { + ::abi_decode_raw_validate(data) + .map(MockUniswapV3PoolCalls::token0) + } + token0 + }, + { + fn liquidity(data: &[u8]) -> alloy_sol_types::Result { + ::abi_decode_raw_validate(data) + .map(MockUniswapV3PoolCalls::liquidity) + } + liquidity + }, + { + fn token1(data: &[u8]) -> alloy_sol_types::Result { + ::abi_decode_raw_validate(data) + .map(MockUniswapV3PoolCalls::token1) + } + token1 + }, + { + fn fee(data: &[u8]) -> alloy_sol_types::Result { + ::abi_decode_raw_validate(data) + .map(MockUniswapV3PoolCalls::fee) + } + fee + }, + { + fn mockMint(data: &[u8]) -> alloy_sol_types::Result { + ::abi_decode_raw_validate(data) + .map(MockUniswapV3PoolCalls::mockMint) + } + mockMint + }, + { + fn initialize(data: &[u8]) -> alloy_sol_types::Result { + ::abi_decode_raw_validate(data) + .map(MockUniswapV3PoolCalls::initialize) + } + initialize + }, + ]; + let Ok(idx) = Self::SELECTORS.binary_search(&selector) else { + return Err(alloy_sol_types::Error::unknown_selector( + ::NAME, + selector, + )); + }; + DECODE_VALIDATE_SHIMS[idx](data) + } + + #[inline] + fn abi_encoded_size(&self) -> usize { + match self { + Self::fee(inner) => ::abi_encoded_size(inner), + Self::initialize(inner) => { + ::abi_encoded_size(inner) + } + Self::liquidity(inner) => { + ::abi_encoded_size(inner) + } + Self::mockMint(inner) => { + ::abi_encoded_size(inner) + } + Self::token0(inner) => { + ::abi_encoded_size(inner) + } + Self::token1(inner) => { + ::abi_encoded_size(inner) + } + } + } + + #[inline] + fn abi_encode_raw(&self, out: &mut alloy_sol_types::private::Vec) { + match self { + Self::fee(inner) => { + ::abi_encode_raw(inner, out) + } + Self::initialize(inner) => { + ::abi_encode_raw(inner, out) + } + Self::liquidity(inner) => { + ::abi_encode_raw(inner, out) + } + Self::mockMint(inner) => { + ::abi_encode_raw(inner, out) + } + Self::token0(inner) => { + ::abi_encode_raw(inner, out) + } + Self::token1(inner) => { + ::abi_encode_raw(inner, out) + } + } + } + } + ///Container for all the [`MockUniswapV3Pool`](self) events. + #[derive(Clone, Debug, PartialEq, Eq, Hash)] + pub enum MockUniswapV3PoolEvents { + #[allow(missing_docs)] + Initialize(Initialize), + #[allow(missing_docs)] + Mint(Mint), + } + impl MockUniswapV3PoolEvents { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the + /// variants. No guarantees are made about the order of the + /// selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 32usize]] = &[ + [ + 122u8, 83u8, 8u8, 11u8, 164u8, 20u8, 21u8, 139u8, 231u8, 236u8, 105u8, 185u8, + 135u8, 181u8, 251u8, 125u8, 7u8, 222u8, 225u8, 1u8, 254u8, 133u8, 72u8, 143u8, 8u8, + 83u8, 174u8, 22u8, 35u8, 157u8, 11u8, 222u8, + ], + [ + 152u8, 99u8, 96u8, 54u8, 203u8, 102u8, 169u8, 193u8, 154u8, 55u8, 67u8, 94u8, + 252u8, 30u8, 144u8, 20u8, 33u8, 144u8, 33u8, 78u8, 138u8, 190u8, 184u8, 33u8, + 189u8, 186u8, 63u8, 41u8, 144u8, 221u8, 76u8, 149u8, + ], + ]; + /// The signatures in the same order as `SELECTORS`. + pub const SIGNATURES: &'static [&'static str] = &[ + ::SIGNATURE, + ::SIGNATURE, + ]; + /// The names of the variants in the same order as `SELECTORS`. + pub const VARIANT_NAMES: &'static [&'static str] = + &[::core::stringify!(Mint), ::core::stringify!(Initialize)]; + + /// Returns the signature for the given selector, if known. + #[inline] + pub fn signature_by_selector( + selector: [u8; 32usize], + ) -> ::core::option::Option<&'static str> { + match Self::SELECTORS.binary_search(&selector) { + ::core::result::Result::Ok(idx) => { + ::core::option::Option::Some(Self::SIGNATURES[idx]) + } + ::core::result::Result::Err(_) => ::core::option::Option::None, + } + } + + /// Returns the enum variant name for the given selector, if known. + #[inline] + pub fn name_by_selector(selector: [u8; 32usize]) -> ::core::option::Option<&'static str> { + let sig = Self::signature_by_selector(selector)?; + sig.split_once('(').map(|(name, _)| name) + } + } + #[automatically_derived] + impl alloy_sol_types::SolEventInterface for MockUniswapV3PoolEvents { + const COUNT: usize = 2usize; + const NAME: &'static str = "MockUniswapV3PoolEvents"; + + fn decode_raw_log( + topics: &[alloy_sol_types::Word], + data: &[u8], + ) -> alloy_sol_types::Result { + match topics.first().copied() { + Some(::SIGNATURE_HASH) => { + ::decode_raw_log(topics, data) + .map(Self::Initialize) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log(topics, data) + .map(Self::Mint) + } + _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { + name: ::NAME, + log: alloy_sol_types::private::Box::new( + alloy_sol_types::private::LogData::new_unchecked( + topics.to_vec(), + data.to_vec().into(), + ), + ), + }), + } + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for MockUniswapV3PoolEvents { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + match self { + Self::Initialize(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::Mint(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), + } + } + + fn into_log_data(self) -> alloy_sol_types::private::LogData { + match self { + Self::Initialize(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::Mint(inner) => alloy_sol_types::private::IntoLogData::into_log_data(inner), + } + } + } + use alloy_contract; + /**Creates a new wrapper around an on-chain [`MockUniswapV3Pool`](self) contract instance. + + See the [wrapper's documentation](`MockUniswapV3PoolInstance`) for more details.*/ + #[inline] + pub const fn new< + P: alloy_contract::private::Provider, + N: alloy_contract::private::Network, + >( + address: alloy_sol_types::private::Address, + __provider: P, + ) -> MockUniswapV3PoolInstance { + MockUniswapV3PoolInstance::::new(address, __provider) + } + /**Deploys this contract using the given `provider` and constructor arguments, if any. + + Returns a new instance of the contract, if the deployment was successful. + + For more fine-grained control over the deployment process, use [`deploy_builder`] instead.*/ + #[inline] + pub fn deploy, N: alloy_contract::private::Network>( + __provider: P, + _token0: alloy_sol_types::private::Address, + _token1: alloy_sol_types::private::Address, + _fee: alloy_sol_types::private::primitives::aliases::U24, + ) -> impl ::core::future::Future>> + { + MockUniswapV3PoolInstance::::deploy(__provider, _token0, _token1, _fee) + } + /**Creates a `RawCallBuilder` for deploying this contract using the given `provider` + and constructor arguments, if any. + + This is a simple wrapper around creating a `RawCallBuilder` with the data set to + the bytecode concatenated with the constructor's ABI-encoded arguments.*/ + #[inline] + pub fn deploy_builder< + P: alloy_contract::private::Provider, + N: alloy_contract::private::Network, + >( + __provider: P, + _token0: alloy_sol_types::private::Address, + _token1: alloy_sol_types::private::Address, + _fee: alloy_sol_types::private::primitives::aliases::U24, + ) -> alloy_contract::RawCallBuilder { + MockUniswapV3PoolInstance::::deploy_builder(__provider, _token0, _token1, _fee) + } + /**A [`MockUniswapV3Pool`](self) instance. + + Contains type-safe methods for interacting with an on-chain instance of the + [`MockUniswapV3Pool`](self) contract located at a given `address`, using a given + provider `P`. + + If the contract bytecode is available (see the [`sol!`](alloy_sol_types::sol!) + documentation on how to provide it), the `deploy` and `deploy_builder` methods can + be used to deploy a new instance of the contract. + + See the [module-level documentation](self) for all the available methods.*/ + #[derive(Clone)] + pub struct MockUniswapV3PoolInstance { + address: alloy_sol_types::private::Address, + provider: P, + _network: ::core::marker::PhantomData, + } + #[automatically_derived] + impl ::core::fmt::Debug for MockUniswapV3PoolInstance { + #[inline] + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + f.debug_tuple("MockUniswapV3PoolInstance") + .field(&self.address) + .finish() + } + } + /// Instantiation and getters/setters. + impl, N: alloy_contract::private::Network> + MockUniswapV3PoolInstance + { + /**Creates a new wrapper around an on-chain [`MockUniswapV3Pool`](self) contract instance. + + See the [wrapper's documentation](`MockUniswapV3PoolInstance`) for more details.*/ + #[inline] + pub const fn new(address: alloy_sol_types::private::Address, __provider: P) -> Self { + Self { + address, + provider: __provider, + _network: ::core::marker::PhantomData, + } + } + + /**Deploys this contract using the given `provider` and constructor arguments, if any. + + Returns a new instance of the contract, if the deployment was successful. + + For more fine-grained control over the deployment process, use [`deploy_builder`] instead.*/ + #[inline] + pub async fn deploy( + __provider: P, + _token0: alloy_sol_types::private::Address, + _token1: alloy_sol_types::private::Address, + _fee: alloy_sol_types::private::primitives::aliases::U24, + ) -> alloy_contract::Result> { + let call_builder = Self::deploy_builder(__provider, _token0, _token1, _fee); + let contract_address = call_builder.deploy().await?; + Ok(Self::new(contract_address, call_builder.provider)) + } + + /**Creates a `RawCallBuilder` for deploying this contract using the given `provider` + and constructor arguments, if any. + + This is a simple wrapper around creating a `RawCallBuilder` with the data set to + the bytecode concatenated with the constructor's ABI-encoded arguments.*/ + #[inline] + pub fn deploy_builder( + __provider: P, + _token0: alloy_sol_types::private::Address, + _token1: alloy_sol_types::private::Address, + _fee: alloy_sol_types::private::primitives::aliases::U24, + ) -> alloy_contract::RawCallBuilder { + alloy_contract::RawCallBuilder::new_raw_deploy( + __provider, + [ + &BYTECODE[..], + &alloy_sol_types::SolConstructor::abi_encode(&constructorCall { + _token0, + _token1, + _fee, + })[..], + ] + .concat() + .into(), + ) + } + + /// Returns a reference to the address. + #[inline] + pub const fn address(&self) -> &alloy_sol_types::private::Address { + &self.address + } + + /// Sets the address. + #[inline] + pub fn set_address(&mut self, address: alloy_sol_types::private::Address) { + self.address = address; + } + + /// Sets the address and returns `self`. + pub fn at(mut self, address: alloy_sol_types::private::Address) -> Self { + self.set_address(address); + self + } + + /// Returns a reference to the provider. + #[inline] + pub const fn provider(&self) -> &P { + &self.provider + } + } + impl MockUniswapV3PoolInstance<&P, N> { + /// Clones the provider and returns a new instance with the cloned + /// provider. + #[inline] + pub fn with_cloned_provider(self) -> MockUniswapV3PoolInstance { + MockUniswapV3PoolInstance { + address: self.address, + provider: ::core::clone::Clone::clone(&self.provider), + _network: ::core::marker::PhantomData, + } + } + } + /// Function calls. + impl, N: alloy_contract::private::Network> + MockUniswapV3PoolInstance + { + /// Creates a new call builder using this contract instance's provider + /// and address. + /// + /// Note that the call can be any function call, not just those defined + /// in this contract. Prefer using the other methods for + /// building type-safe contract calls. + pub fn call_builder( + &self, + call: &C, + ) -> alloy_contract::SolCallBuilder<&P, C, N> { + alloy_contract::SolCallBuilder::new_sol(&self.provider, &self.address, call) + } + + ///Creates a new call builder for the [`fee`] function. + pub fn fee(&self) -> alloy_contract::SolCallBuilder<&P, feeCall, N> { + self.call_builder(&feeCall) + } + + ///Creates a new call builder for the [`initialize`] function. + pub fn initialize( + &self, + sqrtPriceX96: alloy_sol_types::private::primitives::aliases::U160, + ) -> alloy_contract::SolCallBuilder<&P, initializeCall, N> { + self.call_builder(&initializeCall { sqrtPriceX96 }) + } + + ///Creates a new call builder for the [`liquidity`] function. + pub fn liquidity(&self) -> alloy_contract::SolCallBuilder<&P, liquidityCall, N> { + self.call_builder(&liquidityCall) + } + + ///Creates a new call builder for the [`mockMint`] function. + pub fn mockMint( + &self, + owner: alloy_sol_types::private::Address, + tickLower: alloy_sol_types::private::primitives::aliases::I24, + tickUpper: alloy_sol_types::private::primitives::aliases::I24, + amount: u128, + ) -> alloy_contract::SolCallBuilder<&P, mockMintCall, N> { + self.call_builder(&mockMintCall { + owner, + tickLower, + tickUpper, + amount, + }) + } + + ///Creates a new call builder for the [`token0`] function. + pub fn token0(&self) -> alloy_contract::SolCallBuilder<&P, token0Call, N> { + self.call_builder(&token0Call) + } + + ///Creates a new call builder for the [`token1`] function. + pub fn token1(&self) -> alloy_contract::SolCallBuilder<&P, token1Call, N> { + self.call_builder(&token1Call) + } + } + /// Event filters. + impl, N: alloy_contract::private::Network> + MockUniswapV3PoolInstance + { + /// Creates a new event filter using this contract instance's provider + /// and address. + /// + /// Note that the type can be any event, not just those defined in this + /// contract. Prefer using the other methods for building + /// type-safe event filters. + pub fn event_filter( + &self, + ) -> alloy_contract::Event<&P, E, N> { + alloy_contract::Event::new_sol(&self.provider, &self.address) + } + + ///Creates a new event filter for the [`Initialize`] event. + pub fn Initialize_filter(&self) -> alloy_contract::Event<&P, Initialize, N> { + self.event_filter::() + } + + ///Creates a new event filter for the [`Mint`] event. + pub fn Mint_filter(&self) -> alloy_contract::Event<&P, Mint, N> { + self.event_filter::() + } + } +} +pub type Instance = MockUniswapV3Pool::MockUniswapV3PoolInstance<::alloy_provider::DynProvider>; diff --git a/contracts/generated/contracts-generated/uniswapv3pool/src/lib.rs b/contracts/generated/contracts-generated/uniswapv3pool/src/lib.rs index bb14908ba7..9699241ec4 100644 --- a/contracts/generated/contracts-generated/uniswapv3pool/src/lib.rs +++ b/contracts/generated/contracts-generated/uniswapv3pool/src/lib.rs @@ -35,6 +35,7 @@ interface UniswapV3Pool { function observe(uint32[] memory secondsAgos) external view returns (int56[] memory tickCumulatives, uint160[] memory secondsPerLiquidityCumulativeX128s); function positions(bytes32) external view returns (uint128 liquidity, uint256 feeGrowthInside0LastX128, uint256 feeGrowthInside1LastX128, uint128 tokensOwed0, uint128 tokensOwed1); function protocolFees() external view returns (uint128 token0, uint128 token1); + function slot0() external view returns (uint160 sqrtPriceX96, int24 tick, uint16 observationIndex, uint16 observationCardinality, uint16 observationCardinalityNext, uint8 feeProtocol, bool unlocked); function swap(address recipient, bool zeroForOne, int256 amountSpecified, uint160 sqrtPriceLimitX96, bytes memory data) external returns (int256 amount0, int256 amount1); function ticks(int24) external view returns (uint128 liquidityGross, int128 liquidityNet, uint256 feeGrowthOutside0X128, uint256 feeGrowthOutside1X128, int56 tickCumulativeOutside, uint160 secondsPerLiquidityOutsideX128, uint32 secondsOutside, bool initialized); function token0() external view returns (address); @@ -367,6 +368,49 @@ interface UniswapV3Pool { ], "stateMutability": "view" }, + { + "type": "function", + "name": "slot0", + "inputs": [], + "outputs": [ + { + "name": "sqrtPriceX96", + "type": "uint160", + "internalType": "uint160" + }, + { + "name": "tick", + "type": "int24", + "internalType": "int24" + }, + { + "name": "observationIndex", + "type": "uint16", + "internalType": "uint16" + }, + { + "name": "observationCardinality", + "type": "uint16", + "internalType": "uint16" + }, + { + "name": "observationCardinalityNext", + "type": "uint16", + "internalType": "uint16" + }, + { + "name": "feeProtocol", + "type": "uint8", + "internalType": "uint8" + }, + { + "name": "unlocked", + "type": "bool", + "internalType": "bool" + } + ], + "stateMutability": "view" + }, { "type": "function", "name": "swap", @@ -4189,6 +4233,213 @@ pub mod UniswapV3Pool { } }; #[derive(Default, Debug, PartialEq, Eq, Hash)] + /**Function with signature `slot0()` and selector `0x3850c7bd`. + ```solidity + function slot0() external view returns (uint160 sqrtPriceX96, int24 tick, uint16 observationIndex, uint16 observationCardinality, uint16 observationCardinalityNext, uint8 feeProtocol, bool unlocked); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct slot0Call; + #[derive(Default, Debug, PartialEq, Eq, Hash)] + ///Container type for the return parameters of the [`slot0()`](slot0Call) + /// function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct slot0Return { + #[allow(missing_docs)] + pub sqrtPriceX96: alloy_sol_types::private::primitives::aliases::U160, + #[allow(missing_docs)] + pub tick: alloy_sol_types::private::primitives::aliases::I24, + #[allow(missing_docs)] + pub observationIndex: u16, + #[allow(missing_docs)] + pub observationCardinality: u16, + #[allow(missing_docs)] + pub observationCardinalityNext: u16, + #[allow(missing_docs)] + pub feeProtocol: u8, + #[allow(missing_docs)] + pub unlocked: bool, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use alloy_sol_types; + { + #[doc(hidden)] + #[allow(dead_code)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: slot0Call) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for slot0Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self + } + } + } + { + #[doc(hidden)] + #[allow(dead_code)] + type UnderlyingSolTuple<'a> = ( + alloy_sol_types::sol_data::Uint<160>, + alloy_sol_types::sol_data::Int<24>, + alloy_sol_types::sol_data::Uint<16>, + alloy_sol_types::sol_data::Uint<16>, + alloy_sol_types::sol_data::Uint<16>, + alloy_sol_types::sol_data::Uint<8>, + alloy_sol_types::sol_data::Bool, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + alloy_sol_types::private::primitives::aliases::U160, + alloy_sol_types::private::primitives::aliases::I24, + u16, + u16, + u16, + u8, + bool, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: slot0Return) -> Self { + ( + value.sqrtPriceX96, + value.tick, + value.observationIndex, + value.observationCardinality, + value.observationCardinalityNext, + value.feeProtocol, + value.unlocked, + ) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for slot0Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + sqrtPriceX96: tuple.0, + tick: tuple.1, + observationIndex: tuple.2, + observationCardinality: tuple.3, + observationCardinalityNext: tuple.4, + feeProtocol: tuple.5, + unlocked: tuple.6, + } + } + } + } + impl slot0Return { + fn _tokenize(&self) -> ::ReturnToken<'_> { + ( + as alloy_sol_types::SolType>::tokenize( + &self.sqrtPriceX96, + ), + as alloy_sol_types::SolType>::tokenize( + &self.tick, + ), + as alloy_sol_types::SolType>::tokenize( + &self.observationIndex, + ), + as alloy_sol_types::SolType>::tokenize( + &self.observationCardinality, + ), + as alloy_sol_types::SolType>::tokenize( + &self.observationCardinalityNext, + ), + as alloy_sol_types::SolType>::tokenize( + &self.feeProtocol, + ), + ::tokenize( + &self.unlocked, + ), + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for slot0Call { + type Parameters<'a> = (); + type Return = slot0Return; + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type ReturnTuple<'a> = ( + alloy_sol_types::sol_data::Uint<160>, + alloy_sol_types::sol_data::Int<24>, + alloy_sol_types::sol_data::Uint<16>, + alloy_sol_types::sol_data::Uint<16>, + alloy_sol_types::sol_data::Uint<16>, + alloy_sol_types::sol_data::Uint<8>, + alloy_sol_types::sol_data::Bool, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + + const SELECTOR: [u8; 4] = [56u8, 80u8, 199u8, 189u8]; + const SIGNATURE: &'static str = "slot0()"; + + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + + #[inline] + fn tokenize_returns(ret: &Self::Return) -> Self::ReturnToken<'_> { + slot0Return::_tokenize(ret) + } + + #[inline] + fn abi_decode_returns(data: &[u8]) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence(data) + .map(Into::into) + } + + #[inline] + fn abi_decode_returns_validate(data: &[u8]) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence_validate( + data, + ) + .map(Into::into) + } + } + }; + #[derive(Default, Debug, PartialEq, Eq, Hash)] /**Function with signature `swap(address,bool,int256,uint160,bytes)` and selector `0x128acb08`. ```solidity function swap(address recipient, bool zeroForOne, int256 amountSpecified, uint160 sqrtPriceLimitX96, bytes memory data) external returns (int256 amount0, int256 amount1); @@ -4918,6 +5169,8 @@ pub mod UniswapV3Pool { #[allow(missing_docs)] protocolFees(protocolFeesCall), #[allow(missing_docs)] + slot0(slot0Call), + #[allow(missing_docs)] swap(swapCall), #[allow(missing_docs)] ticks(ticksCall), @@ -4940,6 +5193,7 @@ pub mod UniswapV3Pool { [26u8, 104u8, 101u8, 2u8], [26u8, 216u8, 176u8, 59u8], [37u8, 44u8, 9u8, 215u8], + [56u8, 80u8, 199u8, 189u8], [60u8, 138u8, 125u8, 141u8], [73u8, 14u8, 108u8, 188u8], [79u8, 30u8, 179u8, 216u8], @@ -4959,6 +5213,7 @@ pub mod UniswapV3Pool { ::SIGNATURE, ::SIGNATURE, ::SIGNATURE, + ::SIGNATURE, ::SIGNATURE, ::SIGNATURE, ::SIGNATURE, @@ -4978,6 +5233,7 @@ pub mod UniswapV3Pool { ::core::stringify!(liquidity), ::core::stringify!(protocolFees), ::core::stringify!(observations), + ::core::stringify!(slot0), ::core::stringify!(mint), ::core::stringify!(flash), ::core::stringify!(collect), @@ -5013,7 +5269,7 @@ pub mod UniswapV3Pool { } #[automatically_derived] impl alloy_sol_types::SolInterface for UniswapV3PoolCalls { - const COUNT: usize = 16usize; + const COUNT: usize = 17usize; const MIN_DATA_LENGTH: usize = 0usize; const NAME: &'static str = "UniswapV3PoolCalls"; @@ -5032,6 +5288,7 @@ pub mod UniswapV3Pool { Self::observe(_) => ::SELECTOR, Self::positions(_) => ::SELECTOR, Self::protocolFees(_) => ::SELECTOR, + Self::slot0(_) => ::SELECTOR, Self::swap(_) => ::SELECTOR, Self::ticks(_) => ::SELECTOR, Self::token0(_) => ::SELECTOR, @@ -5088,6 +5345,13 @@ pub mod UniswapV3Pool { } observations }, + { + fn slot0(data: &[u8]) -> alloy_sol_types::Result { + ::abi_decode_raw(data) + .map(UniswapV3PoolCalls::slot0) + } + slot0 + }, { fn mint(data: &[u8]) -> alloy_sol_types::Result { ::abi_decode_raw(data) @@ -5224,6 +5488,13 @@ pub mod UniswapV3Pool { } observations }, + { + fn slot0(data: &[u8]) -> alloy_sol_types::Result { + ::abi_decode_raw_validate(data) + .map(UniswapV3PoolCalls::slot0) + } + slot0 + }, { fn mint(data: &[u8]) -> alloy_sol_types::Result { ::abi_decode_raw_validate(data) @@ -5348,6 +5619,9 @@ pub mod UniswapV3Pool { Self::protocolFees(inner) => { ::abi_encoded_size(inner) } + Self::slot0(inner) => { + ::abi_encoded_size(inner) + } Self::swap(inner) => { ::abi_encoded_size(inner) } @@ -5402,6 +5676,9 @@ pub mod UniswapV3Pool { Self::protocolFees(inner) => { ::abi_encode_raw(inner, out) } + Self::slot0(inner) => { + ::abi_encode_raw(inner, out) + } Self::swap(inner) => { ::abi_encode_raw(inner, out) } @@ -5886,6 +6163,11 @@ pub mod UniswapV3Pool { self.call_builder(&protocolFeesCall) } + ///Creates a new call builder for the [`slot0`] function. + pub fn slot0(&self) -> alloy_contract::SolCallBuilder<&P, slot0Call, N> { + self.call_builder(&slot0Call) + } + ///Creates a new call builder for the [`swap`] function. pub fn swap( &self, diff --git a/contracts/solidity/Makefile b/contracts/solidity/Makefile index ef57ef6090..046b4fddc4 100644 --- a/contracts/solidity/Makefile +++ b/contracts/solidity/Makefile @@ -18,7 +18,7 @@ CONTRACTS := \ Trader.sol ARTIFACTS := $(patsubst %.sol,$(ARTIFACTDIR)/%.json,$(CONTRACTS)) -TEST_CONTRACTS := Counter.sol GasHog.sol NonStandardERC20Balances.sol RemoteERC20Balances.sol +TEST_CONTRACTS := Counter.sol GasHog.sol MockUniswapV3Factory.sol MockUniswapV3Pool.sol NonStandardERC20Balances.sol RemoteERC20Balances.sol TEST_ARTIFACTS := $(patsubst %.sol,$(ARTIFACTDIR)/%.json,$(TEST_CONTRACTS)) .PHONY: artifacts diff --git a/contracts/solidity/tests/MockUniswapV3Factory.sol b/contracts/solidity/tests/MockUniswapV3Factory.sol new file mode 100644 index 0000000000..7463f07468 --- /dev/null +++ b/contracts/solidity/tests/MockUniswapV3Factory.sol @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +pragma solidity ^0.8.17; + +import "./MockUniswapV3Pool.sol"; + +/// @title Minimal mock of a Uniswap V3 factory for indexer e2e tests. +/// @dev `createPool` deploys a `MockUniswapV3Pool` and emits the same +/// `PoolCreated` event the pool-indexer listens for. +contract MockUniswapV3Factory { + event PoolCreated( + address indexed token0, + address indexed token1, + uint24 indexed fee, + int24 tickSpacing, + address pool + ); + + function createPool( + address tokenA, + address tokenB, + uint24 _fee + ) external returns (address pool) { + (address t0, address t1) = tokenA < tokenB + ? (tokenA, tokenB) + : (tokenB, tokenA); + + MockUniswapV3Pool p = new MockUniswapV3Pool(t0, t1, _fee); + pool = address(p); + + emit PoolCreated(t0, t1, _fee, int24(10), pool); + } +} diff --git a/contracts/solidity/tests/MockUniswapV3Pool.sol b/contracts/solidity/tests/MockUniswapV3Pool.sol new file mode 100644 index 0000000000..f6fe07f059 --- /dev/null +++ b/contracts/solidity/tests/MockUniswapV3Pool.sol @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +pragma solidity ^0.8.17; + +/// @title Minimal mock of a Uniswap V3 pool for indexer e2e tests. +/// @dev Emits the same events the pool-indexer listens for. Only the +/// subset of state that the indexer actually reads is stored. +contract MockUniswapV3Pool { + address public immutable token0; + address public immutable token1; + uint24 public immutable fee; + + uint128 public liquidity; + + event Initialize(uint160 sqrtPriceX96, int24 tick); + + event Mint( + address sender, + address indexed owner, + int24 indexed tickLower, + int24 indexed tickUpper, + uint128 amount, + uint256 amount0, + uint256 amount1 + ); + + constructor(address _token0, address _token1, uint24 _fee) { + token0 = _token0; + token1 = _token1; + fee = _fee; + } + + function initialize(uint160 sqrtPriceX96) external { + emit Initialize(sqrtPriceX96, int24(0)); + } + + function mockMint( + address owner, + int24 tickLower, + int24 tickUpper, + uint128 amount + ) external { + liquidity += amount; + emit Mint(msg.sender, owner, tickLower, tickUpper, amount, 0, 0); + } +} diff --git a/contracts/src/codegen.rs b/contracts/src/codegen.rs index 84c0822c76..93bc710a86 100644 --- a/contracts/src/codegen.rs +++ b/contracts/src/codegen.rs @@ -38,10 +38,10 @@ publish = false doctest = false [dependencies] -alloy-primitives = { workspace = true } -alloy-sol-types = { workspace = true } alloy-contract = { workspace = true } +alloy-primitives = { workspace = true } alloy-provider = { workspace = true } +alloy-sol-types = { workspace = true } anyhow = { workspace = true } [lints] @@ -54,15 +54,15 @@ const WORKSPACE_CARGO_TOML: &str = "\ [workspace] resolver = \"3\" members = [ - \"contracts-facade\", - \"contracts-generated/*\", + \"contracts-facade\", + \"contracts-generated/*\", ] [workspace.dependencies] -alloy-primitives = { version = \"1.5.7\", default-features = false } -alloy-sol-types = { version = \"1.5.7\", default-features = false } -alloy-contract = { version = \"1.7.3\" } -alloy-provider = { version = \"1.7.3\", default-features = false } +alloy-contract = { version = \"1.7.3\" } +alloy-primitives = { version = \"1.5.7\", default-features = false } +alloy-provider = { version = \"1.7.3\", default-features = false } +alloy-sol-types = { version = \"1.5.7\", default-features = false } anyhow = \"1.0.100\" [workspace.lints.clippy] diff --git a/contracts/src/main.rs b/contracts/src/main.rs index b031174ca3..f7774fc876 100644 --- a/contracts/src/main.rs +++ b/contracts/src/main.rs @@ -510,6 +510,8 @@ fn build_module() -> Module { .add_contract(Contract::new("GasHog")) .add_contract(Contract::new("Counter")) .add_contract(Contract::new("MockERC4626Wrapper")) + .add_contract(Contract::new("MockUniswapV3Factory")) + .add_contract(Contract::new("MockUniswapV3Pool")) .add_contract(Contract::new("CowProtocolToken").with_networks(networks![ MAINNET => "0xDEf1CA1fb7FBcDC777520aa7f396b4E015F497aB", GNOSIS => "0x177127622c4A00F3d409B75571e12cB3c8973d3c", diff --git a/crates/balance-overrides/src/detector.rs b/crates/balance-overrides/src/detector.rs index 494b9d57be..35e9c59311 100644 --- a/crates/balance-overrides/src/detector.rs +++ b/crates/balance-overrides/src/detector.rs @@ -13,7 +13,7 @@ use { alloy_transport::{RpcError, TransportErrorKind}, contracts::ERC20, std::{ - collections::HashMap, + collections::{HashMap, HashSet}, fmt::{self, Debug, Formatter}, sync::Arc, time::Duration, @@ -288,7 +288,7 @@ impl Detector { ) -> Vec<(Address, B256)> { let mut storage_context = vec![initial_storage_context]; let mut slots = Vec::new(); - let mut seen = std::collections::HashSet::new(); + let mut seen = HashSet::new(); for log in &trace .try_into_default_frame() diff --git a/crates/chain/src/lib.rs b/crates/chain/src/lib.rs index 3f21cc0d5a..fb7b979c72 100644 --- a/crates/chain/src/lib.rs +++ b/crates/chain/src/lib.rs @@ -52,6 +52,27 @@ impl Chain { } } + /// Kebab-case slug used in URLs and per-network configs (pool-indexer API + /// routes, DB database names, etc). Stable — other services parse it. + pub fn slug(&self) -> &'static str { + match &self { + Self::Mainnet => "mainnet", + Self::Goerli => "goerli", + Self::Gnosis => "gnosis", + Self::Sepolia => "sepolia", + Self::ArbitrumOne => "arbitrum-one", + Self::Base => "base", + Self::Hardhat => "hardhat", + Self::Bnb => "bnb", + Self::Avalanche => "avalanche", + Self::Optimism => "optimism", + Self::Polygon => "polygon", + Self::Linea => "linea", + Self::Plasma => "plasma", + Self::Ink => "ink", + } + } + /// The default amount in native tokens atoms to use for price estimation pub fn default_amount_to_estimate_native_prices_with(&self) -> U256 { match &self { diff --git a/crates/configs/src/deserialize_env.rs b/crates/configs/src/deserialize_env.rs index 798c99e681..423b15e439 100644 --- a/crates/configs/src/deserialize_env.rs +++ b/crates/configs/src/deserialize_env.rs @@ -33,7 +33,7 @@ fn invalid_value_unable_to_parse_url(err: ParseError) -> E /// Deserializes an URL from *either* an environment variable — with the format /// `%` — or interpreting a String as a URL. -pub(crate) fn deserialize_url_from_env<'de, D>(deserializer: D) -> Result +pub fn deserialize_url_from_env<'de, D>(deserializer: D) -> Result where D: Deserializer<'de>, { @@ -70,9 +70,7 @@ where /// Deserializes an optional URL from *either* an environment variable — with /// the format `%` — or interpreting a String as a URL. -pub(crate) fn deserialize_optional_url_from_env<'de, D>( - deserializer: D, -) -> Result, D::Error> +pub fn deserialize_optional_url_from_env<'de, D>(deserializer: D) -> Result, D::Error> where D: Deserializer<'de>, { diff --git a/crates/configs/src/lib.rs b/crates/configs/src/lib.rs index 3dd21d4cc0..7dd291317b 100644 --- a/crates/configs/src/lib.rs +++ b/crates/configs/src/lib.rs @@ -2,7 +2,7 @@ pub mod autopilot; pub mod balance_overrides; pub mod banned_users; pub mod database; -pub(crate) mod deserialize_env; +pub mod deserialize_env; pub mod fee_factor; pub mod gas_price_estimation; pub mod http_client; diff --git a/crates/database/src/lib.rs b/crates/database/src/lib.rs index 97dad43556..16257f6849 100644 --- a/crates/database/src/lib.rs +++ b/crates/database/src/lib.rs @@ -62,6 +62,7 @@ pub const TABLES: &[&str] = &[ "last_indexed_blocks", "onchain_order_invalidations", "onchain_placed_orders", + "pool_indexer_checkpoints", "presignature_events", "proposed_jit_orders", "quotes", @@ -71,6 +72,8 @@ pub const TABLES: &[&str] = &[ "solver_competitions", "surplus_capturing_jit_order_owners", "trades", + "uniswap_v3_pool_states", + "uniswap_v3_pools", ]; /// The names of potentially big volume tables we use in the db. @@ -85,6 +88,7 @@ pub const LARGE_TABLES: &[&str] = &[ "order_quotes", "proposed_solutions", "proposed_trade_executions", + "uniswap_v3_ticks", ]; pub fn all_tables() -> impl Iterator { @@ -92,11 +96,15 @@ pub fn all_tables() -> impl Iterator { } /// Delete all data in the database. Only used by tests. +/// +/// Truncates all tables in a single statement so Postgres accepts foreign-key +/// cycles between listed tables (e.g. `uniswap_v3_pool_states` → +/// `uniswap_v3_pools`). Individual per-table `TRUNCATE`s error out when any +/// other listed table references the one being truncated. #[expect(non_snake_case)] pub async fn clear_DANGER_(ex: &mut PgTransaction<'_>) -> sqlx::Result<()> { - for table in all_tables() { - ex.execute(format!("TRUNCATE {table};").as_str()).await?; - } + let tables = all_tables().collect::>().join(", "); + ex.execute(format!("TRUNCATE {tables};").as_str()).await?; Ok(()) } diff --git a/crates/driver/src/boundary/liquidity/uniswap/v3.rs b/crates/driver/src/boundary/liquidity/uniswap/v3.rs index 3dfab84338..a41d12bbb8 100644 --- a/crates/driver/src/boundary/liquidity/uniswap/v3.rs +++ b/crates/driver/src/boundary/liquidity/uniswap/v3.rs @@ -13,7 +13,12 @@ use { anyhow::Context, eth_domain_types as eth, event_indexing::{block_retriever::BlockRetrieving, maintenance::ServiceMaintenance}, - liquidity_sources::uniswap_v3::pool_fetching::UniswapV3PoolFetcher, + liquidity_sources::uniswap_v3::{ + V3PoolDataSource, + graph_api::UniV3SubgraphClient, + pool_fetching::UniswapV3PoolFetcher, + pool_indexer::PoolIndexerClient, + }, shared::{http_solver::model::TokenAmount, interaction::Interaction}, solver::{ liquidity::{ @@ -114,15 +119,14 @@ async fn init_liquidity( config: &infra::liquidity::config::UniswapV3, ) -> anyhow::Result> { let web3 = eth.web3().clone(); + let source = build_pool_data_source(eth, config).await?; let pool_fetcher = Arc::new( UniswapV3PoolFetcher::new( - &config.graph_url, + source, web3.clone(), - boundary::liquidity::http_client(), block_retriever, config.max_pools_to_initialize, - config.max_pools_per_tick_query, ) .await .context("failed to initialise UniswapV3 liquidity")?, @@ -138,3 +142,35 @@ async fn init_liquidity( pool_fetcher, )) } + +/// Picks the V3 pool data source based on config precedence. +async fn build_pool_data_source( + eth: &Ethereum, + config: &infra::liquidity::config::UniswapV3, +) -> anyhow::Result> { + let http = boundary::liquidity::http_client(); + + if let Some(url) = &config.pool_indexer_url { + tracing::info!(%url, "uniswap v3: using pool-indexer as data source"); + return Ok(Arc::new(PoolIndexerClient::new( + url.clone(), + eth.chain(), + http, + ))); + } + + let subgraph = config + .subgraph + .as_ref() + .context("uniswap v3: subgraph required when pool_indexer_url is unset")?; + tracing::info!(url = %subgraph.url, "uniswap v3: using subgraph as data source"); + Ok(Arc::new( + UniV3SubgraphClient::from_subgraph_url( + &subgraph.url, + http, + subgraph.max_pools_per_tick_query, + ) + .await + .context("failed to construct UniV3 subgraph client")?, + )) +} diff --git a/crates/driver/src/infra/config/file/load.rs b/crates/driver/src/infra/config/file/load.rs index 6149891e2c..d9d4750a60 100644 --- a/crates/driver/src/infra/config/file/load.rs +++ b/crates/driver/src/infra/config/file/load.rs @@ -225,35 +225,54 @@ pub async fn load(chain: Chain, path: &Path) -> infra::Config { preset, max_pools_to_initialize, graph_url, + pool_indexer_url, reinit_interval, max_pools_per_tick_query, - } => liquidity::config::UniswapV3 { - max_pools_to_initialize, - reinit_interval, - ..match preset { - file::UniswapV3Preset::UniswapV3 => { - liquidity::config::UniswapV3::uniswap_v3( - &graph_url, - chain, - max_pools_per_tick_query, - ) + } => { + assert!( + graph_url.is_some() || pool_indexer_url.is_some(), + "uniswap-v3: set at least one of graph-url or pool-indexer-url" + ); + let subgraph = graph_url.map(|url| liquidity::config::UniswapV3Subgraph { + url, + max_pools_per_tick_query, + }); + liquidity::config::UniswapV3 { + max_pools_to_initialize, + pool_indexer_url, + reinit_interval, + ..match preset { + file::UniswapV3Preset::UniswapV3 => { + liquidity::config::UniswapV3::uniswap_v3(subgraph, chain) + } } + .expect("no Uniswap V3 preset for current network") } - .expect("no Uniswap V3 preset for current network") - }, + } file::UniswapV3Config::Manual { router, max_pools_to_initialize, graph_url, + pool_indexer_url, reinit_interval, max_pools_per_tick_query, - } => liquidity::config::UniswapV3 { - router: router.into(), - max_pools_to_initialize, - graph_url, - reinit_interval, - max_pools_per_tick_query, - }, + } => { + assert!( + graph_url.is_some() || pool_indexer_url.is_some(), + "uniswap-v3: set at least one of graph-url or pool-indexer-url" + ); + let subgraph = graph_url.map(|url| liquidity::config::UniswapV3Subgraph { + url, + max_pools_per_tick_query, + }); + liquidity::config::UniswapV3 { + router: router.into(), + max_pools_to_initialize, + subgraph, + pool_indexer_url, + reinit_interval, + } + } }) .collect(), balancer_v2: config diff --git a/crates/driver/src/infra/config/file/mod.rs b/crates/driver/src/infra/config/file/mod.rs index 054028be10..6608a949c3 100644 --- a/crates/driver/src/infra/config/file/mod.rs +++ b/crates/driver/src/infra/config/file/mod.rs @@ -540,7 +540,19 @@ enum UniswapV3Config { #[serde(default = "uniswap_v3::default_max_pools_to_initialize")] max_pools_to_initialize: usize, - graph_url: Url, + // TODO: model these two URLs as an enum (Graph / PoolIndexer / Both) + // once serde supports `deny_unknown_fields` with internally-tagged or + // flattened enum variants — https://github.com/serde-rs/serde/issues/1547. + /// The URL used to connect to uniswap v3 subgraph client. At least one + /// of `graph_url` or `pool_indexer_url` must be set; `pool_indexer_url` + /// takes precedence when both are provided. + #[serde(default)] + graph_url: Option, + + /// Optional URL of a CoW pool-indexer service. When set, it replaces + /// the subgraph as the pool metadata source. + #[serde(default)] + pool_indexer_url: Option, /// How many pool IDs can be present in a where clause of a Tick query /// at once. Some subgraphs are overloaded and throw errors when @@ -569,8 +581,19 @@ enum UniswapV3Config { #[serde(default = "uniswap_v3::default_max_pools_per_tick_query")] max_pools_per_tick_query: usize, - /// The URL used to connect to uniswap v3 subgraph client. - graph_url: Url, + // TODO: model these two URLs as an enum (Graph / PoolIndexer / Both) + // once serde supports `deny_unknown_fields` with internally-tagged or + // flattened enum variants — https://github.com/serde-rs/serde/issues/1547. + /// The URL used to connect to uniswap v3 subgraph client. At least one + /// of `graph_url` or `pool_indexer_url` must be set; `pool_indexer_url` + /// takes precedence when both are provided. + #[serde(default)] + graph_url: Option, + + /// Optional URL of a CoW pool-indexer service. When set, it replaces + /// the subgraph as the pool metadata source. + #[serde(default)] + pool_indexer_url: Option, /// How often the liquidity source should be reinitialized to get /// access to new pools. diff --git a/crates/driver/src/infra/liquidity/config.rs b/crates/driver/src/infra/liquidity/config.rs index ef05baa2a1..87cb4ca225 100644 --- a/crates/driver/src/infra/liquidity/config.rs +++ b/crates/driver/src/infra/liquidity/config.rs @@ -158,6 +158,19 @@ impl Swapr { } } +/// Subgraph-specific Uniswap V3 config. Only consulted when no +/// `pool_indexer_url` is set on [`UniswapV3`]. +#[derive(Clone, Debug)] +pub struct UniswapV3Subgraph { + /// URL of the Uniswap V3 subgraph. + pub url: Url, + + /// How many pool IDs can be present in a where clause of a Tick query at + /// once. Some subgraphs are overloaded and throw errors when there are + /// too many. + pub max_pools_per_tick_query: usize, +} + /// Uniswap V3 liquidity fetching options. #[derive(Clone, Debug)] pub struct UniswapV3 { @@ -167,33 +180,31 @@ pub struct UniswapV3 { /// How many pools should be initialized during start up. pub max_pools_to_initialize: usize, - /// The URL used to connect to uniswap v3 subgraph client. - pub graph_url: Url, + /// Subgraph data source. One of `subgraph` or `pool_indexer_url` must be + /// set; when both are set the pool-indexer wins. Enforced by the config + /// loader. + pub subgraph: Option, + + /// Optional URL of a CoW pool-indexer service exposing the + /// `/api/v1/{network}/uniswap/v3/` endpoints. When set, it takes + /// precedence over `subgraph` for pool metadata and ticks. + pub pool_indexer_url: Option, /// How often the liquidity source should be reinitialized to /// become aware of new pools. pub reinit_interval: Option, - - /// How many pool IDs can be present in a where clause of a Tick query at - /// once. Some subgraphs are overloaded and throw errors when there are - /// too many. - pub max_pools_per_tick_query: usize, } impl UniswapV3 { /// Returns the liquidity configuration for Uniswap V3. #[expect(clippy::self_named_constructors)] - pub fn uniswap_v3( - graph_url: &Url, - chain: Chain, - max_pools_per_tick_query: usize, - ) -> Option { + pub fn uniswap_v3(subgraph: Option, chain: Chain) -> Option { Some(Self { router: contracts::UniswapV3SwapRouterV2::deployment_address(&chain.id())?.into(), max_pools_to_initialize: 100, - graph_url: graph_url.clone(), + subgraph, + pool_indexer_url: None, reinit_interval: None, - max_pools_per_tick_query, }) } } diff --git a/crates/e2e/Cargo.toml b/crates/e2e/Cargo.toml index 1f5dbaa391..50e664f665 100644 --- a/crates/e2e/Cargo.toml +++ b/crates/e2e/Cargo.toml @@ -10,6 +10,7 @@ license = "MIT OR Apache-2.0" [dependencies] alloy = { workspace = true, default-features = false, features = [ + "contract", "json-rpc", "provider-anvil-api", "provider-debug-api", @@ -49,6 +50,7 @@ model = { workspace = true, features = ["e2e"] } number = { workspace = true } observe = { workspace = true } orderbook = { workspace = true, features = ["e2e", "test-util"] } +pool-indexer = { workspace = true } price-estimation = { workspace = true } reqwest = { workspace = true, features = ["blocking", "query"] } serde = { workspace = true } diff --git a/crates/e2e/tests/e2e/main.rs b/crates/e2e/tests/e2e/main.rs index 9711aee7e6..630d76d77f 100644 --- a/crates/e2e/tests/e2e/main.rs +++ b/crates/e2e/tests/e2e/main.rs @@ -33,6 +33,7 @@ mod partial_fill; mod partially_fillable_balance; mod partially_fillable_pool; mod place_order_with_quote; +mod pool_indexer; mod protocol_fee; mod quote_verification; mod quoting; diff --git a/crates/e2e/tests/e2e/pool_indexer.rs b/crates/e2e/tests/e2e/pool_indexer.rs new file mode 100644 index 0000000000..331f268964 --- /dev/null +++ b/crates/e2e/tests/e2e/pool_indexer.rs @@ -0,0 +1,683 @@ +use { + alloy::{ + primitives::{Address, aliases::U160}, + providers::Provider, + sol_types::SolEvent, + }, + contracts::test::{MockUniswapV3Factory, MockUniswapV3Pool}, + e2e::setup::{OnchainComponents, TIMEOUT, colocation, run_test, wait_for_condition}, + ethrpc::Web3, + number::units::EthUnit, + pool_indexer::config::{ + ApiConfig, + Configuration, + DatabaseConfig, + FactoryConfig, + NetworkConfig, + NetworkName, + }, + sqlx::{PgPool, Row}, + std::{ + net::{Ipv4Addr, SocketAddr, SocketAddrV4}, + num::NonZeroU32, + sync::Mutex, + time::Duration, + }, +}; + +// Holds the JoinHandle of any currently-running pool-indexer so we can +// abort it even if a previous test panicked before calling handle.abort(). +static CURRENT_HANDLE: Mutex>> = Mutex::new(None); + +const POOL_INDEXER_PORT: u16 = 7778; +const POOL_INDEXER_HOST: &str = "http://127.0.0.1:7778"; +const POOL_INDEXER_METRICS_PORT: u16 = 7779; +const LOCAL_DB_URL: &str = "postgresql://"; + +// sqrt(1) * 2^96 — valid starting price +const INITIAL_SQRT_PRICE: u128 = 79_228_162_514_264_337_593_543_950_336; + +async fn clear_pool_indexer_tables(db: &PgPool) { + sqlx::query( + "TRUNCATE uniswap_v3_ticks, uniswap_v3_pool_states, uniswap_v3_pools, \ + pool_indexer_checkpoints", + ) + .execute(db) + .await + .unwrap(); +} + +async fn seed_checkpoint(db: &PgPool, factory: Address, block: u64) { + sqlx::query( + "INSERT INTO pool_indexer_checkpoints (chain_id, contract, block_number) + VALUES (1, $1, $2) + ON CONFLICT (chain_id, contract) DO UPDATE SET block_number = EXCLUDED.block_number", + ) + .bind(factory.as_slice()) + .bind(block.cast_signed()) + .execute(db) + .await + .unwrap(); +} + +/// Start the pool-indexer. Aborts any previously-running instance first +/// (handles leftover from a prior test that panicked before calling +/// `stop_pool_indexer`). `metrics_port = 0` asks the OS to pick a random +/// port; tests that need to scrape metrics should pass a fixed port. +async fn start_pool_indexer(factory: Address) { + start_pool_indexer_at(factory, 0).await; +} + +async fn start_pool_indexer_at(factory: Address, metrics_port: u16) { + // Abort any handle left over from a previous test that panicked. + if let Some(old) = CURRENT_HANDLE.lock().unwrap().take() { + old.abort(); + } + // Always wait a bit so the previous pool-indexer (if any) has time to + // release port 7778 before we try to bind it again. + tokio::time::sleep(Duration::from_millis(300)).await; + + let config = Configuration { + database: DatabaseConfig { + url: LOCAL_DB_URL.parse().unwrap(), + max_connections: NonZeroU32::new(5).unwrap(), + }, + networks: vec![NetworkConfig { + name: NetworkName::new("mainnet"), + chain_id: 1, + rpc_url: "http://127.0.0.1:8545".parse().unwrap(), + factories: vec![FactoryConfig { + address: factory, + deployment_block: 0, + }], + chunk_size: 1000, + poll_interval_secs: 1, + use_latest: true, + subgraph_url: None, + seed_block: None, + fetch_concurrency: 8, + prefetch_concurrency: 50, + }], + api: ApiConfig { + bind_address: SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, POOL_INDEXER_PORT)), + }, + metrics: pool_indexer::config::MetricsConfig { + bind_address: SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, metrics_port)), + }, + }; + let handle = tokio::task::spawn(pool_indexer::run(config)); + wait_for_condition(TIMEOUT, || async { + reqwest::get(format!("{POOL_INDEXER_HOST}/health")) + .await + .is_ok_and(|r| r.status().is_success()) + }) + .await + .expect("pool-indexer API did not come up"); + *CURRENT_HANDLE.lock().unwrap() = Some(handle); +} + +fn stop_pool_indexer() { + if let Some(h) = CURRENT_HANDLE.lock().unwrap().take() { + h.abort(); + } +} + +/// Create and initialise a single pool inside an already-deployed factory. +/// `fee` must be unique within the factory for token0/token1 ([1u8;20], +/// [2u8;20]). +async fn create_pool( + factory: &MockUniswapV3Factory::Instance, + fee: u32, +) -> (Address, MockUniswapV3Pool::Instance) { + let provider = factory.provider(); + let token0 = Address::from([1u8; 20]); + let token1 = Address::from([2u8; 20]); + + factory + .createPool(token0, token1, alloy::primitives::aliases::U24::from(fee)) + .send() + .await + .unwrap() + .get_receipt() + .await + .unwrap(); + + let block = provider.get_block_number().await.unwrap(); + let logs = provider + .get_logs( + &alloy::rpc::types::Filter::new() + .from_block(block) + .to_block(block) + .event_signature( + MockUniswapV3Factory::MockUniswapV3Factory::PoolCreated::SIGNATURE_HASH, + ), + ) + .await + .unwrap(); + let pool_addr = + MockUniswapV3Factory::MockUniswapV3Factory::PoolCreated::decode_log(&logs[0].inner) + .unwrap() + .data + .pool; + + let pool = MockUniswapV3Pool::Instance::new(pool_addr, provider.clone()); + + pool.initialize(U160::from(INITIAL_SQRT_PRICE)) + .send() + .await + .unwrap() + .get_receipt() + .await + .unwrap(); + + pool.mockMint( + token0, + alloy::primitives::aliases::I24::try_from(-100i32).unwrap(), + alloy::primitives::aliases::I24::try_from(100i32).unwrap(), + 1_000_000u128, + ) + .send() + .await + .unwrap() + .get_receipt() + .await + .unwrap(); + + (pool_addr, pool) +} + +/// Deploy mock V3 contracts and set up a pool with liquidity. +/// Returns `(factory, pool_address)`. +async fn deploy_univ3(web3: &Web3) -> (MockUniswapV3Factory::Instance, Address) { + let provider = &web3.provider; + + let factory = MockUniswapV3Factory::Instance::deploy(provider.clone()) + .await + .unwrap(); + + let (pool_addr, _pool) = create_pool(&factory, 500).await; + + (factory, pool_addr) +} + +#[tokio::test] +#[ignore] +async fn local_node_pool_indexer_happy_path() { + run_test(happy_path).await; +} + +async fn happy_path(web3: Web3) { + let db = PgPool::connect(LOCAL_DB_URL).await.unwrap(); + clear_pool_indexer_tables(&db).await; + + let (factory, pool_addr) = deploy_univ3(&web3).await; + let factory_addr = *factory.address(); + let head = web3.provider.get_block_number().await.unwrap(); + + seed_checkpoint(&db, factory_addr, 0).await; + start_pool_indexer(factory_addr).await; + + wait_for_condition(TIMEOUT, || async { + let resp = reqwest::get(format!( + "{POOL_INDEXER_HOST}/api/v1/mainnet/uniswap/v3/pools" + )) + .await + .ok()?; + let body: serde_json::Value = resp.json().await.ok()?; + Some(body["block_number"].as_u64()? >= head) + }) + .await + .expect("indexer did not reach head block in time"); + + let resp: serde_json::Value = reqwest::get(format!( + "{POOL_INDEXER_HOST}/api/v1/mainnet/uniswap/v3/pools" + )) + .await + .unwrap() + .json() + .await + .unwrap(); + + let pools = resp["pools"].as_array().unwrap(); + assert!(!pools.is_empty()); + let our_pool = pools + .iter() + .find(|p| { + p["id"] + .as_str() + .unwrap() + .eq_ignore_ascii_case(&format!("{pool_addr:?}")) + }) + .expect("deployed pool not found in /pools response"); + assert_eq!(our_pool["fee_tier"].as_str().unwrap(), "500"); + assert_ne!(our_pool["sqrt_price"].as_str().unwrap(), "0"); + + let resp: serde_json::Value = reqwest::get(format!( + "{POOL_INDEXER_HOST}/api/v1/mainnet/uniswap/v3/pools/{pool_addr:?}/ticks" + )) + .await + .unwrap() + .json() + .await + .unwrap(); + assert!( + !resp["ticks"].as_array().unwrap().is_empty(), + "expected ticks from Mint event" + ); + + let count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM uniswap_v3_pools WHERE chain_id = 1") + .fetch_one(&db) + .await + .unwrap(); + assert!(count > 0); + + stop_pool_indexer(); +} + +#[tokio::test] +#[ignore] +async fn local_node_pool_indexer_checkpoint_resume() { + run_test(checkpoint_resume).await; +} + +async fn checkpoint_resume(web3: Web3) { + let db = PgPool::connect(LOCAL_DB_URL).await.unwrap(); + clear_pool_indexer_tables(&db).await; + + let (factory, pool_addr) = deploy_univ3(&web3).await; + let factory_addr = *factory.address(); + let head = web3.provider.get_block_number().await.unwrap(); + seed_checkpoint(&db, factory_addr, 0).await; + + start_pool_indexer(factory_addr).await; + wait_for_condition(TIMEOUT, || async { + let resp = reqwest::get(format!( + "{POOL_INDEXER_HOST}/api/v1/mainnet/uniswap/v3/pools" + )) + .await + .ok()?; + let body: serde_json::Value = resp.json().await.ok()?; + Some(body["block_number"].as_u64()? >= head) + }) + .await + .expect("first run did not reach head"); + + let pool_count: i64 = + sqlx::query_scalar("SELECT COUNT(*) FROM uniswap_v3_pools WHERE chain_id = 1") + .fetch_one(&db) + .await + .unwrap(); + + // Capture pool state after first sync for comparison after restart. + let row = sqlx::query( + "SELECT sqrt_price_x96::TEXT AS price, tick, liquidity::TEXT AS liq + FROM uniswap_v3_pool_states + WHERE chain_id = 1 AND pool_address = $1", + ) + .bind(pool_addr.as_slice()) + .fetch_one(&db) + .await + .unwrap(); + let sqrt_price_before: String = row.get("price"); + let tick_before: i32 = row.get("tick"); + let liquidity_before: String = row.get("liq"); + + // stop_pool_indexer aborts and clears CURRENT_HANDLE; start_pool_indexer + // will see no old handle, so no extra sleep needed. + stop_pool_indexer(); + + start_pool_indexer(factory_addr).await; + wait_for_condition(TIMEOUT, || async { + let resp = reqwest::get(format!( + "{POOL_INDEXER_HOST}/api/v1/mainnet/uniswap/v3/pools" + )) + .await + .ok()?; + let body: serde_json::Value = resp.json().await.ok()?; + Some(body["block_number"].as_u64()? >= head) + }) + .await + .expect("second run did not reach head"); + + let pool_count_after: i64 = + sqlx::query_scalar("SELECT COUNT(*) FROM uniswap_v3_pools WHERE chain_id = 1") + .fetch_one(&db) + .await + .unwrap(); + + assert_eq!( + pool_count, pool_count_after, + "pool count changed after restart — idempotency violation" + ); + + // State must be identical after restart — re-indexing must not corrupt values. + let row_after = sqlx::query( + "SELECT sqrt_price_x96::TEXT AS price, tick, liquidity::TEXT AS liq + FROM uniswap_v3_pool_states + WHERE chain_id = 1 AND pool_address = $1", + ) + .bind(pool_addr.as_slice()) + .fetch_one(&db) + .await + .unwrap(); + assert_eq!( + sqrt_price_before, + row_after.get::("price"), + "sqrt_price changed after restart" + ); + assert_eq!( + tick_before, + row_after.get::("tick"), + "tick changed after restart" + ); + assert_eq!( + liquidity_before, + row_after.get::("liq"), + "liquidity changed after restart" + ); + + let checkpoint: i64 = sqlx::query_scalar( + "SELECT block_number FROM pool_indexer_checkpoints + WHERE chain_id = 1 AND contract = $1", + ) + .bind(factory_addr.as_slice()) + .fetch_one(&db) + .await + .unwrap(); + assert!(checkpoint as u64 >= head); + + stop_pool_indexer(); +} + +// ── Test 3: API error handling +// ──────────────────────────────────────────────── + +#[tokio::test] +#[ignore] +async fn local_node_pool_indexer_api_errors() { + run_test(api_errors).await; +} + +async fn api_errors(web3: Web3) { + let db = PgPool::connect(LOCAL_DB_URL).await.unwrap(); + clear_pool_indexer_tables(&db).await; + + let (factory, _pool_addr) = deploy_univ3(&web3).await; + let factory_addr = *factory.address(); + let head = web3.provider.get_block_number().await.unwrap(); + + seed_checkpoint(&db, factory_addr, 0).await; + start_pool_indexer(factory_addr).await; + + wait_for_condition(TIMEOUT, || async { + let resp = reqwest::get(format!( + "{POOL_INDEXER_HOST}/api/v1/mainnet/uniswap/v3/pools" + )) + .await + .ok()?; + let body: serde_json::Value = resp.json().await.ok()?; + Some(body["block_number"].as_u64()? >= head) + }) + .await + .expect("indexer did not reach head"); + + // Invalid address → 400. + let status = reqwest::get(format!( + "{POOL_INDEXER_HOST}/api/v1/mainnet/uniswap/v3/pools/not-an-address/ticks" + )) + .await + .unwrap() + .status(); + assert_eq!(u16::from(status), 400, "expected 400 for invalid address"); + + // Valid but unknown address → 200 with empty ticks array. + let unknown = Address::from([0xABu8; 20]); + let resp: serde_json::Value = reqwest::get(format!( + "{POOL_INDEXER_HOST}/api/v1/mainnet/uniswap/v3/pools/{unknown:?}/ticks" + )) + .await + .unwrap() + .json() + .await + .unwrap(); + assert_eq!( + resp["ticks"].as_array().unwrap().len(), + 0, + "expected empty ticks for unknown pool" + ); + + stop_pool_indexer(); +} + +// ── Test 4: pagination +// ──────────────────────────────────────────────────────── + +#[tokio::test] +#[ignore] +async fn local_node_pool_indexer_pagination() { + run_test(pagination).await; +} + +async fn pagination(web3: Web3) { + let db = PgPool::connect(LOCAL_DB_URL).await.unwrap(); + clear_pool_indexer_tables(&db).await; + + // Deploy factory + 3 pools (different fee tiers) so pagination has >1 page + // to traverse with limit=1. + let (factory, _pool1) = deploy_univ3(&web3).await; + let factory_addr = *factory.address(); + create_pool(&factory, 3000).await; + create_pool(&factory, 10_000).await; + let head = web3.provider.get_block_number().await.unwrap(); + seed_checkpoint(&db, factory_addr, 0).await; + + start_pool_indexer(factory_addr).await; + + wait_for_condition(TIMEOUT, || async { + let resp = reqwest::get(format!( + "{POOL_INDEXER_HOST}/api/v1/mainnet/uniswap/v3/pools" + )) + .await + .ok()?; + let body: serde_json::Value = resp.json().await.ok()?; + Some(body["block_number"].as_u64()? >= head) + }) + .await + .expect("indexer did not reach head"); + + let mut all_ids: Vec = Vec::new(); + let mut cursor: Option = None; + + loop { + let url = match &cursor { + None => format!("{POOL_INDEXER_HOST}/api/v1/mainnet/uniswap/v3/pools?limit=1"), + Some(c) => { + format!("{POOL_INDEXER_HOST}/api/v1/mainnet/uniswap/v3/pools?limit=1&after={c}") + } + }; + let resp: serde_json::Value = reqwest::get(&url).await.unwrap().json().await.unwrap(); + let pools = resp["pools"].as_array().unwrap(); + if pools.is_empty() { + break; + } + for p in pools { + all_ids.push(p["id"].as_str().unwrap().to_owned()); + } + cursor = resp["next_cursor"].as_str().map(|s| s.to_owned()); + if cursor.is_none() { + break; + } + } + + let db_count: i64 = + sqlx::query_scalar("SELECT COUNT(*) FROM uniswap_v3_pools WHERE chain_id = 1") + .fetch_one(&db) + .await + .unwrap(); + assert_eq!( + i64::try_from(all_ids.len()).unwrap(), + db_count, + "paginated count doesn't match DB" + ); + assert!( + db_count >= 3, + "expected at least 3 pools for a meaningful pagination test" + ); + let unique: std::collections::HashSet<_> = all_ids.iter().collect(); + assert_eq!( + unique.len(), + all_ids.len(), + "pagination returned duplicates" + ); + + stop_pool_indexer(); +} + +/// Reads the prometheus `/metrics` endpoint and extracts the request count +/// for `GET /api/v1/{network}/uniswap/v3/pools` with status 200. The metric +/// family name is `pool_indexer_api_requests` (optionally prefixed by the +/// process registry's namespace — e.g. `driver_pool_indexer_api_requests` +/// when the driver was the first to call `setup_registry_reentrant`), so we +/// substring-match on the route+status suffix rather than assume a prefix. +/// Reads the prometheus counter `api_requests{route, status="200"}` for the +/// given route template (e.g. `/api/v1/{network}/uniswap/v3/pools`). The +/// metric family name is `pool_indexer_api_requests`, optionally prefixed by +/// the process registry's namespace (e.g. `driver_pool_indexer_api_requests` +/// when the driver was the first to call `setup_registry_reentrant`), so we +/// substring-match on the family-name-plus-labels rather than assume a +/// prefix. +async fn api_requests_counter(metrics_port: u16, route: &str) -> u64 { + let text = reqwest::get(format!("http://127.0.0.1:{metrics_port}/metrics")) + .await + .unwrap() + .text() + .await + .unwrap(); + let needle = format!(r#"pool_indexer_api_requests{{route="{route}",status="200"}}"#); + for line in text.lines() { + if line.starts_with('#') { + continue; + } + if let Some(idx) = line.find(&needle) { + let after = line[idx + needle.len()..].trim(); + return after.parse().unwrap_or(0); + } + } + 0 +} + +#[tokio::test] +#[ignore] +async fn local_node_pool_indexer_driver_integration() { + run_test(driver_integration).await; +} + +/// End-to-end: pool-indexer indexes a mock V3 factory, driver starts with +/// `pool-indexer-url` pointing at the service, and we assert (via the +/// indexer's own request counters) that the driver actually fetched pools +/// AND their ticks. The ticks endpoint is the stronger signal — it only +/// fires after `UniswapV3PoolFetcher::new` has a non-empty registered-pool +/// set to pick a top-N from. A baseline solver is spun up only because the +/// driver's TOML config requires at least one `[[solver]]`. +async fn driver_integration(web3: Web3) { + const POOLS_ROUTE: &str = "/api/v1/{network}/uniswap/v3/pools"; + const POOLS_BY_IDS_ROUTE: &str = "/api/v1/{network}/uniswap/v3/pools/by-ids"; + const TICKS_ROUTE: &str = "/api/v1/{network}/uniswap/v3/pools/ticks"; + + let db = PgPool::connect(LOCAL_DB_URL).await.unwrap(); + clear_pool_indexer_tables(&db).await; + + let mut onchain = OnchainComponents::deploy(web3.clone()).await; + let [solver] = onchain.make_solvers(10u64.eth()).await; + + let (factory, pool_addr) = deploy_univ3(&web3).await; + let factory_addr = *factory.address(); + let head = web3.provider.get_block_number().await.unwrap(); + seed_checkpoint(&db, factory_addr, 0).await; + + start_pool_indexer_at(factory_addr, POOL_INDEXER_METRICS_PORT).await; + + // Wait until the indexer has both caught up to head AND surfaced the + // seeded pool. If we only check the block number the driver could race + // in and see an empty registered-pool set, which would never trigger a + // ticks fetch and silently degrade the test. + wait_for_condition(TIMEOUT, || async { + let resp = reqwest::get(format!( + "{POOL_INDEXER_HOST}/api/v1/mainnet/uniswap/v3/pools" + )) + .await + .ok()?; + let body: serde_json::Value = resp.json().await.ok()?; + let at_head = body["block_number"].as_u64()? >= head; + let has_pool = !body["pools"].as_array()?.is_empty(); + Some(at_head && has_pool) + }) + .await + .expect("indexer did not reach head with pool visible"); + + // The mock tokens (`[1u8;20]`, `[2u8;20]`) don't have a real `decimals()` + // selector, so the indexer's discovery-time eth_call returns `None` and + // the pool is stored with NULL decimals. The driver-side filter + // `pools_tokens_have_decimals` then drops the pool, leaving the + // top-N selection empty and skipping the bulk-by-ids/ticks fetch path + // this test wants to assert. Backfill plausible decimals so the driver + // doesn't drop it. + sqlx::query( + "UPDATE uniswap_v3_pools SET token0_decimals = 18, token1_decimals = 6 WHERE chain_id = 1 \ + AND address = $1", + ) + .bind(pool_addr.as_slice()) + .execute(&db) + .await + .unwrap(); + + // Capture baselines after all test-side warm-up requests so the final + // assertions prove the bumps came from the driver, not from the polling + // above. + let baseline_pools = api_requests_counter(POOL_INDEXER_METRICS_PORT, POOLS_ROUTE).await; + let baseline_pools_by_ids = + api_requests_counter(POOL_INDEXER_METRICS_PORT, POOLS_BY_IDS_ROUTE).await; + let baseline_ticks = api_requests_counter(POOL_INDEXER_METRICS_PORT, TICKS_ROUTE).await; + + let baseline_solver = colocation::start_baseline_solver( + "test_solver".into(), + solver.clone(), + *onchain.contracts().weth.address(), + vec![], + 1, + true, + ) + .await; + + // The router address is required by the `manual` variant of the + // uniswap-v3 config but only used at settlement time — any 20-byte value + // is fine for a pool-fetch-only integration test. + let config_override = format!( + r#" +[[liquidity.uniswap-v3]] +router = "0x000000000000000000000000000000000000dEaD" +pool-indexer-url = "{POOL_INDEXER_HOST}" +max-pools-to-initialize = 10 +"# + ); + let driver_handle = colocation::start_driver_with_config_override( + onchain.contracts(), + vec![baseline_solver], + colocation::LiquidityProvider::UniswapV2, + false, + Some(&config_override), + ); + + wait_for_condition(TIMEOUT, || async { + let pools = api_requests_counter(POOL_INDEXER_METRICS_PORT, POOLS_ROUTE).await; + let pools_by_ids = + api_requests_counter(POOL_INDEXER_METRICS_PORT, POOLS_BY_IDS_ROUTE).await; + let ticks = api_requests_counter(POOL_INDEXER_METRICS_PORT, TICKS_ROUTE).await; + pools > baseline_pools && pools_by_ids > baseline_pools_by_ids && ticks > baseline_ticks + }) + .await + .expect("driver did not complete pool + tick fetch from pool-indexer within timeout"); + + driver_handle.abort(); + stop_pool_indexer(); +} diff --git a/crates/liquidity-sources/Cargo.toml b/crates/liquidity-sources/Cargo.toml index f4b0b87547..3f886ac7e2 100644 --- a/crates/liquidity-sources/Cargo.toml +++ b/crates/liquidity-sources/Cargo.toml @@ -33,6 +33,7 @@ reqwest = { workspace = true, features = ["json"] } serde = { workspace = true } serde_json = { workspace = true } serde_with = { workspace = true } +shared = { workspace = true } strum = { workspace = true } thiserror = { workspace = true } token-info = { workspace = true } diff --git a/crates/liquidity-sources/src/uniswap_v3/graph_api.rs b/crates/liquidity-sources/src/uniswap_v3/graph_api.rs index 5eb93d38ea..d299e4bcb2 100644 --- a/crates/liquidity-sources/src/uniswap_v3/graph_api.rs +++ b/crates/liquidity-sources/src/uniswap_v3/graph_api.rs @@ -5,9 +5,11 @@ use { crate::{ json_map, subgraph::{ContainsId, SubgraphClient}, + uniswap_v3::V3PoolDataSource, }, alloy::primitives::{Address, U256}, anyhow::Result, + async_trait::async_trait, event_indexing::event_handler::MAX_REORG_BLOCK_COUNT, num::BigInt, number::serialization::HexOrDecimalU256, @@ -278,6 +280,21 @@ impl UniV3SubgraphClient { } } +#[async_trait] +impl V3PoolDataSource for UniV3SubgraphClient { + async fn get_registered_pools(&self) -> Result { + Self::get_registered_pools(self).await + } + + async fn get_pools_with_ticks_by_ids( + &self, + ids: &[Address], + block_number: u64, + ) -> Result> { + Self::get_pools_with_ticks_by_ids(self, ids, block_number).await + } +} + /// Result of the registered stable pool query. #[derive(Debug, Default, PartialEq)] pub struct RegisteredPools { diff --git a/crates/liquidity-sources/src/uniswap_v3/mod.rs b/crates/liquidity-sources/src/uniswap_v3/mod.rs index 6e90557757..188aa735a5 100644 --- a/crates/liquidity-sources/src/uniswap_v3/mod.rs +++ b/crates/liquidity-sources/src/uniswap_v3/mod.rs @@ -2,3 +2,37 @@ pub mod event_fetching; pub mod graph_api; pub mod pool_fetching; +pub mod pool_indexer; + +use { + self::graph_api::{PoolData, RegisteredPools}, + alloy::primitives::Address, + anyhow::Result, + async_trait::async_trait, +}; + +/// Abstracts over places we can pull Uniswap V3 pool state + ticks from. +/// Currently there are two backends: the Uniswap V3 subgraph (historical, +/// queryable by block) and our own pool-indexer service (at-head only). +#[async_trait] +pub trait V3PoolDataSource: Send + Sync + 'static { + /// Fetch the full set of pools the source knows about, tagged with the + /// block number the snapshot was taken at. `PoolData::ticks` is always + /// `None` here — callers needing ticks must use + /// [`Self::get_pools_with_ticks_by_ids`] separately. The split lets a + /// cheap "what pools exist?" lookup skip the expensive tick fetch. + async fn get_registered_pools(&self) -> Result; + + /// Fetch pools + their active ticks for the given pool addresses. + /// + /// `block_number` is a best-effort hint: sources that support historical + /// queries (subgraph) honor it exactly; sources that only expose head + /// data (pool-indexer) ignore it and return at-head data instead. + /// Callers requiring strict at-block semantics must not use this trait + /// generically — they must pin to an impl that supports it. + async fn get_pools_with_ticks_by_ids( + &self, + ids: &[Address], + block_number: u64, + ) -> Result>; +} diff --git a/crates/liquidity-sources/src/uniswap_v3/pool_fetching.rs b/crates/liquidity-sources/src/uniswap_v3/pool_fetching.rs index faa3643715..85dec79f4b 100644 --- a/crates/liquidity-sources/src/uniswap_v3/pool_fetching.rs +++ b/crates/liquidity-sources/src/uniswap_v3/pool_fetching.rs @@ -1,7 +1,8 @@ use { super::{ + V3PoolDataSource, event_fetching::{RecentEventsCache, UniswapV3PoolEventFetcher}, - graph_api::{PoolData, Token, UniV3SubgraphClient}, + graph_api::{PoolData, Token}, }, crate::{recent_block_cache::Block, uniswap_v3::event_fetching::WithAddress}, alloy::{ @@ -22,7 +23,6 @@ use { model::TokenPair, num::{BigInt, Zero, rational::Ratio}, number::serialization::HexOrDecimalU256, - reqwest::{Client, Url}, serde::Serialize, serde_with::{DisplayFromStr, serde_as}, std::{ @@ -124,7 +124,7 @@ struct PoolsCheckpoint { } struct PoolsCheckpointHandler { - graph_api: UniV3SubgraphClient, + source: Arc, /// Address is pool id while TokenPair is a pair or tokens for each pool. pools_by_token_pair: HashMap>, /// Pools state on a specific block number in history considered reorg safe @@ -136,15 +136,10 @@ impl PoolsCheckpointHandler { /// state/ticks). Then fetches state/ticks for the most deepest pools /// (subset of all existing pools) pub async fn new( - subgraph_url: &Url, - client: Client, + source: Arc, max_pools_to_initialize_cache: usize, - max_pools_per_tick_query: usize, ) -> Result { - let graph_api = - UniV3SubgraphClient::from_subgraph_url(subgraph_url, client, max_pools_per_tick_query) - .await?; - let mut registered_pools = graph_api.get_registered_pools().await?; + let mut registered_pools = source.get_registered_pools().await?; tracing::debug!( block = %registered_pools.fetched_block_number, pools = %registered_pools.pools.len(), "initialized registered pools", @@ -171,7 +166,7 @@ impl PoolsCheckpointHandler { .rev() .take(max_pools_to_initialize_cache) .collect::>(); - let pools = graph_api + let pools = source .get_pools_with_ticks_by_ids(&pool_ids, registered_pools.fetched_block_number) .await? .into_iter() @@ -184,7 +179,7 @@ impl PoolsCheckpointHandler { }); Ok(Self { - graph_api, + source, pools_by_token_pair, pools_checkpoint, }) @@ -243,7 +238,7 @@ impl PoolsCheckpointHandler { let pool_ids = missing_pools.into_iter().collect::>(); let start = std::time::Instant::now(); let pools = self - .graph_api + .source .get_pools_with_ticks_by_ids(&pool_ids, block_number) .await; tracing::debug!( @@ -282,21 +277,13 @@ pub struct UniswapV3PoolFetcher { impl UniswapV3PoolFetcher { pub async fn new( - subgraph_url: &Url, + source: Arc, web3: Web3, - client: Client, block_retriever: Arc, max_pools_to_initialize: usize, - max_pools_per_tick_query: usize, ) -> Result { let web3 = web3.labeled("uniswapV3"); - let checkpoint = PoolsCheckpointHandler::new( - subgraph_url, - client, - max_pools_to_initialize, - max_pools_per_tick_query, - ) - .await?; + let checkpoint = PoolsCheckpointHandler::new(source, max_pools_to_initialize).await?; let init_block = checkpoint.pools_checkpoint.lock().unwrap().block_number; let init_block = block_retriever.block(init_block).await?; diff --git a/crates/liquidity-sources/src/uniswap_v3/pool_indexer.rs b/crates/liquidity-sources/src/uniswap_v3/pool_indexer.rs new file mode 100644 index 0000000000..b651618b34 --- /dev/null +++ b/crates/liquidity-sources/src/uniswap_v3/pool_indexer.rs @@ -0,0 +1,288 @@ +//! HTTP client for CoW Protocol's own pool-indexer service. Implements +//! [`V3PoolDataSource`] so the driver can swap this in place of the subgraph +//! client without touching anything else. +//! +//! The pool-indexer always returns at-head data — it doesn't support +//! historical queries. `block_number` arguments are ignored; the block +//! actually served is returned in the response envelope. For the driver's +//! current use this is fine (see design discussion around cold_seeder and +//! the baseline solver's eth_call delegation). + +use { + crate::uniswap_v3::{ + V3PoolDataSource, + graph_api::{PoolData, RegisteredPools, TickData, Token}, + }, + alloy::primitives::{Address, U256}, + anyhow::{Context, Result}, + async_trait::async_trait, + chain::Chain, + num::BigInt, + reqwest::{Client, Url}, + serde::Deserialize, + std::{collections::HashMap, str::FromStr}, +}; + +/// Pool-indexer's server-side cap on `pool_ids=` query param size; keep our +/// per-request chunk at or below this. +const POOL_IDS_PER_REQUEST: usize = 500; + +/// Pool-indexer's server-side cap on `limit=` for listing pools. +const LIST_PAGE_SIZE: u64 = 5000; + +pub struct PoolIndexerClient { + /// Service root (e.g. `http://pool-indexer/`). + base_url: Url, + http: Client, +} + +impl PoolIndexerClient { + pub fn new(base_url: Url, chain: Chain, http: Client) -> Self { + let prefix = format!("api/v1/{}/uniswap/v3/", chain.slug()); + let base_url = shared::url::join(&base_url, &prefix); + Self { base_url, http } + } + + fn path(&self, suffix: &str) -> Url { + shared::url::join(&self.base_url, suffix) + } +} + +#[derive(Deserialize)] +struct PoolsResponse { + block_number: u64, + pools: Vec, + #[serde(default)] + next_cursor: Option, +} + +#[derive(Deserialize)] +struct IndexerPool { + id: Address, + token0: IndexerToken, + token1: IndexerToken, + fee_tier: String, + liquidity: String, + sqrt_price: String, + tick: i32, +} + +#[derive(Deserialize)] +struct IndexerToken { + id: Address, + #[serde(default)] + decimals: Option, +} + +#[derive(Deserialize)] +struct BulkTicksResponse { + pools: Vec, +} + +#[derive(Deserialize)] +struct IndexerPoolTicks { + pool: Address, + ticks: Vec, +} + +#[derive(Deserialize)] +struct IndexerTick { + tick_idx: i32, + liquidity_net: String, +} + +/// Filter predicate: drop pools where either token's `decimals` is missing. +/// `decimals = 0` reaching the solver would mis-scale prices by 10^18, so we +/// fail closed (drop + warn) until the indexer backfills the value. +fn pools_tokens_have_decimals(p: &IndexerPool) -> bool { + if p.token0.decimals.is_none() || p.token1.decimals.is_none() { + tracing::warn!( + pool = %format!("{:#x}", p.id), + token0 = %format!("{:#x}", p.token0.id), + token1 = %format!("{:#x}", p.token1.id), + token0_decimals_set = p.token0.decimals.is_some(), + token1_decimals_set = p.token1.decimals.is_some(), + "pool dropped from response: missing token decimals" + ); + return false; + } + true +} + +impl TryFrom for PoolData { + type Error = anyhow::Error; + + fn try_from(pool: IndexerPool) -> Result { + let token0_decimals = pool + .token0 + .decimals + .context("BUG: missing token0 decimals after pools_tokens_have_decimals filter")?; + let token1_decimals = pool + .token1 + .decimals + .context("BUG: missing token1 decimals after pools_tokens_have_decimals filter")?; + Ok(Self { + id: pool.id, + token0: Token { + id: pool.token0.id, + decimals: token0_decimals, + }, + token1: Token { + id: pool.token1.id, + decimals: token1_decimals, + }, + fee_tier: U256::from_str(&pool.fee_tier).context("parse fee_tier")?, + liquidity: U256::from_str(&pool.liquidity).context("parse liquidity")?, + sqrt_price: U256::from_str(&pool.sqrt_price).context("parse sqrt_price")?, + tick: BigInt::from(pool.tick), + ticks: None, + }) + } +} + +impl IndexerTick { + fn into_tick_data(self, pool_address: Address) -> Result { + Ok(TickData { + id: format!("{pool_address:#x}#{}", self.tick_idx), + tick_idx: BigInt::from(self.tick_idx), + liquidity_net: BigInt::from_str(&self.liquidity_net).context("parse liquidity_net")?, + pool_address, + }) + } +} + +#[async_trait] +impl V3PoolDataSource for PoolIndexerClient { + async fn get_registered_pools(&self) -> Result { + // Paginate through the full pool set. The block_number returned from + // the first page is what we pin the snapshot to — subsequent pages + // may report a higher block, which we tolerate as bounded drift: the + // driver's event replay picks up anything committed after this + // snapshot. + let mut cursor: Option = None; + let mut pools: Vec = Vec::new(); + let mut fetched_block_number: u64 = 0; + loop { + let mut url = self.path("pools"); + url.query_pairs_mut() + .append_pair("limit", &LIST_PAGE_SIZE.to_string()); + if let Some(c) = &cursor { + url.query_pairs_mut().append_pair("after", c); + } + let page: PoolsResponse = self + .http + .get(url) + .send() + .await + .context("GET /pools")? + .error_for_status() + .context("pools HTTP status")? + .json() + .await + .context("pools body")?; + + if fetched_block_number == 0 { + fetched_block_number = page.block_number; + } + // Skip zero-liquidity pools (fully-burned LP, never-minted, etc.) + let filtered = page + .pools + .into_iter() + .filter(|p| p.liquidity != "0") + .filter(pools_tokens_have_decimals) + .map(PoolData::try_from) + .collect::>>()?; + pools.extend(filtered); + match page.next_cursor { + Some(c) => cursor = Some(c), + None => break, + } + } + Ok(RegisteredPools { + fetched_block_number, + pools, + }) + } + + async fn get_pools_with_ticks_by_ids( + &self, + ids: &[Address], + // pool-indexer is at-head only — see trait doc on `V3PoolDataSource`. + _block_number: u64, + ) -> Result> { + if ids.is_empty() { + return Ok(Vec::new()); + } + + let mut out: Vec = Vec::with_capacity(ids.len()); + for batch in ids.chunks(POOL_IDS_PER_REQUEST) { + let (pools, ticks_by_pool) = futures::try_join!( + fetch_pools_by_ids(self, batch), + fetch_ticks_by_pool_ids(self, batch), + )?; + + for mut pool in pools { + if let Some(ticks) = ticks_by_pool.get(&pool.id) { + pool.ticks = Some(ticks.clone()); + } + out.push(pool); + } + } + Ok(out) + } +} + +fn ids_param(ids: &[Address]) -> String { + ids.iter() + .map(|a| format!("{a:#x}")) + .collect::>() + .join(",") +} + +async fn fetch_pools_by_ids(client: &PoolIndexerClient, ids: &[Address]) -> Result> { + let mut url = client.path("pools/by-ids"); + url.query_pairs_mut() + .append_pair("pool_ids", &ids_param(ids)); + let resp: PoolsResponse = client + .http + .get(url) + .send() + .await + .context("GET /pools/by-ids?pool_ids=")? + .error_for_status() + .context("pools-by-ids HTTP status")? + .json() + .await + .context("pools-by-ids body")?; + resp.pools + .into_iter() + .filter(pools_tokens_have_decimals) + .map(PoolData::try_from) + .collect() +} + +async fn fetch_ticks_by_pool_ids( + client: &PoolIndexerClient, + ids: &[Address], +) -> Result>> { + let mut url = client.path("pools/ticks"); + url.query_pairs_mut() + .append_pair("pool_ids", &ids_param(ids)); + let resp: BulkTicksResponse = client + .http + .get(url) + .send() + .await + .context("GET /pools/ticks")? + .error_for_status() + .context("bulk-ticks HTTP status")? + .json() + .await + .context("bulk-ticks body")?; + let mut out: HashMap> = HashMap::new(); + for IndexerPoolTicks { pool, ticks } in resp.pools { + let mapped: Result> = ticks.into_iter().map(|t| t.into_tick_data(pool)).collect(); + out.insert(pool, mapped?); + } + Ok(out) +} diff --git a/crates/number/src/conversions.rs b/crates/number/src/conversions.rs index efccde0a16..b63e87226e 100644 --- a/crates/number/src/conversions.rs +++ b/crates/number/src/conversions.rs @@ -1,5 +1,8 @@ use { - alloy_primitives::{U256, aliases::I512}, + alloy_primitives::{ + U256, + aliases::{I512, U160}, + }, anyhow::{Result, ensure}, bigdecimal::{BigDecimal, num_bigint::ToBigInt}, num::{BigInt, BigRational, BigUint, Zero, bigint::Sign, rational::Ratio}, @@ -77,6 +80,24 @@ pub fn u256_to_big_decimal(u256: &U256) -> BigDecimal { BigDecimal::from(BigInt::from(big_uint)) } +pub fn u160_to_big_decimal(u160: &U160) -> BigDecimal { + let big_uint = BigUint::from_bytes_be(&u160.to_be_bytes::<20>()); + BigDecimal::from(BigInt::from(big_uint)) +} + +pub fn big_decimal_to_u160(big_decimal: &BigDecimal) -> Option { + let big_uint = big_decimal_to_big_uint(big_decimal)?; + big_uint_to_u160(&big_uint).ok() +} + +pub fn big_uint_to_u160(input: &BigUint) -> Result { + let bytes = input.to_bytes_be(); + ensure!(bytes.len() <= 20, "too large for U160"); + let mut buf = [0u8; 20]; + buf[20 - bytes.len()..].copy_from_slice(&bytes); + Ok(U160::from_be_bytes(buf)) +} + pub fn i512_to_big_int(i512: &I512) -> BigInt { BigInt::from_bytes_be( match i512.sign() { diff --git a/crates/pool-indexer/Cargo.toml b/crates/pool-indexer/Cargo.toml new file mode 100644 index 0000000000..b54ae24efa --- /dev/null +++ b/crates/pool-indexer/Cargo.toml @@ -0,0 +1,54 @@ +[package] +name = "pool-indexer" +version = "0.1.0" +authors = ["Cow Protocol Developers "] +edition = "2024" +license = "GPL-3.0-or-later" + +[lib] +doctest = false +name = "pool_indexer" +path = "src/lib.rs" + +[[bin]] +name = "pool-indexer" +path = "src/main.rs" + +[dependencies] +alloy = { workspace = true, features = ["contract", "providers", "rpc-types", "sol-types"] } +alloy-primitives = { workspace = true, features = ["serde", "std"] } +anyhow = { workspace = true } +async-trait = { workspace = true } +axum = { workspace = true } +bigdecimal = { workspace = true } +clap = { workspace = true } +configs = { workspace = true } +contracts = { workspace = true } +ethrpc = { workspace = true } +futures = { workspace = true } +mimalloc = { workspace = true, optional = true } +num = { workspace = true } +number = { workspace = true } +observe = { workspace = true } +prometheus = { workspace = true } +prometheus-metric-storage = { workspace = true } +reqwest = { workspace = true, features = ["json"] } +scopeguard = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +shared = { workspace = true } +sqlx = { workspace = true } +tikv-jemallocator = { workspace = true } +tokio = { workspace = true, features = ["macros", "rt-multi-thread", "signal", "sync", "time"] } +toml = { workspace = true } +tower = { workspace = true } +tower-http = { workspace = true, features = ["trace"] } +tracing = { workspace = true } +url = { workspace = true } + +[features] +mimalloc-allocator = ["dep:mimalloc"] +tokio-console = ["observe/tokio-console"] + +[lints] +workspace = true diff --git a/crates/pool-indexer/run-local.sh b/crates/pool-indexer/run-local.sh new file mode 100755 index 0000000000..f0574cc3dc --- /dev/null +++ b/crates/pool-indexer/run-local.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +# Wipes the local test DB, re-applies migrations, and runs pool-indexer +# against config.local.toml (mainnet + Ink by default). +set -euo pipefail + +cd "$(dirname "$0")/../.." + +CONFIG="crates/pool-indexer/config.local.toml" + +if [[ ! -f "$CONFIG" ]]; then + echo "missing $CONFIG — copy/edit config.local.toml first" >&2 + exit 1 +fi + +echo "==> tearing down docker compose (with volumes)" +docker compose down --volumes + +echo "==> starting postgres" +docker compose up -d db + +echo "==> waiting for postgres to accept connections" +until docker compose exec -T db pg_isready -U "$USER" >/dev/null 2>&1; do + sleep 1 +done + +echo "==> running flyway migrations" +docker compose run --rm migrations + +echo "==> starting pool-indexer" +export RUST_LOG=info,pool_indexer=debug +exec cargo run --release -p pool-indexer -- --config "$CONFIG" diff --git a/crates/pool-indexer/src/api/mod.rs b/crates/pool-indexer/src/api/mod.rs new file mode 100644 index 0000000000..597589c65c --- /dev/null +++ b/crates/pool-indexer/src/api/mod.rs @@ -0,0 +1,92 @@ +pub mod routes; +pub mod uniswap_v3; + +pub use routes::router; +use { + crate::config::NetworkName, + axum::{ + Json, + http::StatusCode, + response::{IntoResponse, Response}, + }, + sqlx::PgPool, + std::collections::HashMap, +}; + +#[derive(Clone)] +pub struct AppState { + pub db: PgPool, + /// Maps network name → chain_id for all configured networks. + pub networks: HashMap, +} + +impl AppState { + pub fn resolve_network(&self, name: &str) -> Option { + self.networks.get(&NetworkName::new(name)).copied() + } +} + +/// Structured error type for API handlers. Each variant decides its own HTTP +/// status + body via the `IntoResponse` impl so formatting lives in one place +/// and helpers can `?`-propagate failures instead of handing around prebuilt +/// `Response` values. Input-shape errors (bad addresses, bad cursors, too +/// many ids) are handled earlier by the serde extractors and come back as +/// axum's default 400s — see [`crate::api::uniswap_v3::PoolIds`]. +#[derive(Debug)] +pub enum ApiError { + /// `{network}` path segment doesn't match any configured network. Says + /// nothing about whether the network exists in the world, only that + /// this indexer wasn't told about it. + NetworkNotFound, + /// The indexer has no checkpoint yet for this chain — it's still in + /// bootstrap. Returned as 503 so clients retry rather than treat it + /// as a permanent empty set. + NotReady, + /// The `after=` cursor didn't parse as a 20-byte hex address. Cursors + /// are opaque but not arbitrary — clients must pass back exactly what + /// the previous response returned. + InvalidCursor, + /// Unexpected failure inside the handler (usually DB). Body is generic + /// 500; the underlying error is logged server-side. + Internal(anyhow::Error), +} + +impl IntoResponse for ApiError { + fn into_response(self) -> Response { + match self { + Self::NetworkNotFound => StatusCode::NOT_FOUND.into_response(), + Self::NotReady => StatusCode::SERVICE_UNAVAILABLE.into_response(), + Self::InvalidCursor => bad_request("invalid cursor"), + Self::Internal(err) => { + tracing::error!(?err, "internal error"); + StatusCode::INTERNAL_SERVER_ERROR.into_response() + } + } + } +} + +impl From for ApiError { + fn from(err: anyhow::Error) -> Self { + Self::Internal(err) + } +} + +fn bad_request(message: impl Into) -> Response { + ( + StatusCode::BAD_REQUEST, + Json(serde_json::json!({ "error": message.into() })), + ) + .into_response() +} + +pub(super) fn resolve_chain_id(state: &AppState, network: &str) -> Result { + state + .resolve_network(network) + .ok_or(ApiError::NetworkNotFound) +} + +pub(super) async fn latest_indexed_block(state: &AppState, chain_id: u64) -> Result { + crate::db::uniswap_v3::get_latest_indexed_block(&state.db, chain_id) + .await? + .ok_or(ApiError::NotReady) +} diff --git a/crates/pool-indexer/src/api/routes.rs b/crates/pool-indexer/src/api/routes.rs new file mode 100644 index 0000000000..fbd490d586 --- /dev/null +++ b/crates/pool-indexer/src/api/routes.rs @@ -0,0 +1,78 @@ +//! HTTP routing for the pool-indexer API. Keeps the wiring — route table, +//! middleware, span extraction — separate from the type definitions in +//! `super` so either side can change without churn in the other. + +use { + super::{AppState, uniswap_v3}, + axum::{ + Router, + extract::{MatchedPath, Request}, + http::StatusCode, + middleware::{self, Next}, + response::{IntoResponse, Response}, + routing::get, + }, + observe::tracing::distributed::axum::{make_span, record_trace_id}, + std::sync::Arc, + tower::ServiceBuilder, + tower_http::trace::TraceLayer, +}; + +/// Builds the full axum `Router` for the pool-indexer API. Mounts handlers, +/// attaches the metrics middleware, and wires the distributed-tracing layer +/// so `traceparent` / B3 headers on incoming requests seed the current +/// span — letting logs correlate across services. +pub fn router(state: Arc) -> Router { + Router::new() + .route("/health", get(health)) + .route( + "/api/v1/{network}/uniswap/v3/pools", + get(uniswap_v3::get_pools), + ) + .route( + "/api/v1/{network}/uniswap/v3/pools/by-ids", + get(uniswap_v3::get_pools_by_ids), + ) + .route( + "/api/v1/{network}/uniswap/v3/pools/ticks", + get(uniswap_v3::get_ticks_bulk), + ) + .route( + "/api/v1/{network}/uniswap/v3/pools/{pool_address}/ticks", + get(uniswap_v3::get_ticks), + ) + .with_state(state) + .layer(middleware::from_fn(record_request_metrics)) + .layer( + ServiceBuilder::new() + .layer(TraceLayer::new_for_http().make_span_with(make_span)) + .map_request(record_trace_id), + ) +} + +async fn health() -> impl IntoResponse { + StatusCode::OK +} + +/// Emits per-request `api_requests` (count) and `api_request_seconds` +/// (latency) metrics labelled by the matched route template (e.g. +/// `/api/v1/{network}/uniswap/v3/pools`) rather than the concrete URL — so +/// the cardinality stays bounded no matter how many networks / addresses +/// flow through. +async fn record_request_metrics(req: Request, next: Next) -> Response { + let route = req + .extensions() + .get::() + .map(|p| p.as_str().to_owned()) + .unwrap_or_else(|| "unmatched".to_owned()); + let metrics = crate::metrics::Metrics::get(); + let labels = [route.as_str()]; + let _timer = crate::metrics::Metrics::timer(&metrics.api_request_seconds, &labels); + let response = next.run(req).await; + let status = response.status().as_u16().to_string(); + metrics + .api_requests + .with_label_values(&[route.as_str(), status.as_str()]) + .inc(); + response +} diff --git a/crates/pool-indexer/src/api/uniswap_v3/mod.rs b/crates/pool-indexer/src/api/uniswap_v3/mod.rs new file mode 100644 index 0000000000..1e529c59ef --- /dev/null +++ b/crates/pool-indexer/src/api/uniswap_v3/mod.rs @@ -0,0 +1,118 @@ +pub mod pools; +pub mod ticks; + +use { + alloy_primitives::Address, + bigdecimal::{BigDecimal, num_bigint::ToBigInt}, + serde::{Deserialize, Deserializer}, +}; +pub use { + pools::{get_pools, get_pools_by_ids}, + ticks::{get_ticks, get_ticks_bulk}, +}; + +/// Upper bound on pool addresses accepted in a single bulk lookup. Keeps the +/// URL under typical proxy limits and bounds DB query size. +pub(super) const MAX_POOL_IDS_PER_REQUEST: usize = 500; + +/// Newtype over `Vec
` that deserializes from a comma-separated list +/// of 20-byte hex addresses in URL query strings (`0x…,0x…`). Parsing + +/// capping happen at the extractor boundary so handlers work with typed +/// addresses instead of raw strings. +pub(crate) struct PoolIds(pub Vec
); + +impl<'de> Deserialize<'de> for PoolIds { + fn deserialize>(de: D) -> Result { + let raw = <&str>::deserialize(de)?; + let mut out = Vec::new(); + for entry in raw.split(',').map(str::trim).filter(|s| { + if s.is_empty() { + tracing::warn!("pool_ids query contained an empty entry"); + false + } else { + true + } + }) { + out.push( + entry + .parse::
() + .map_err(|_| serde::de::Error::custom("invalid pool id"))?, + ); + } + if out.len() > MAX_POOL_IDS_PER_REQUEST { + return Err(serde::de::Error::custom(format!( + "too many pool ids; max {MAX_POOL_IDS_PER_REQUEST}" + ))); + } + Ok(PoolIds(out)) + } +} + +/// Serializes any [`Display`](std::fmt::Display) value as a JSON string. +pub(super) fn serialize_display( + value: &T, + serializer: S, +) -> Result { + serializer.serialize_str(&value.to_string()) +} + +/// Serializes a [`BigDecimal`] holding an integer value as a plain decimal +/// string — never scientific notation. `BigDecimal`'s own `Display` emits +/// `"Ne±M"` for some magnitudes, which breaks downstream parsers expecting +/// `uint` strings (alloy's `U256::from_str`). The stored columns +/// (`sqrt_price_x96`, `liquidity`, `liquidity_net`) are always integers, so +/// converting via `BigInt` is lossless. +pub(super) fn serialize_integer( + value: &BigDecimal, + serializer: S, +) -> Result { + // `to_bigint` truncates fractional values (1.5 → 1), so also verify the + // round-trip matches — otherwise we'd silently drop precision. + match value.to_bigint() { + Some(bi) if BigDecimal::from(bi.clone()) == *value => { + serializer.serialize_str(&bi.to_string()) + } + _ => Err(serde::ser::Error::custom(format!( + "expected integer, got {value}" + ))), + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + bigdecimal::{BigDecimal, num_bigint::BigInt}, + serde::Serialize, + std::str::FromStr, + }; + + /// Postgres' NUMERIC wire encoding compresses trailing zeros into a + /// negative `BigDecimal` scale (`mantissa × 10^|scale|`). The default + /// `Display` stringifies this as scientific notation (`1E30`), which + /// `alloy::U256::from_str` rejects — `serialize_integer` must emit + /// plain digits instead. + #[test] + fn serialize_integer_handles_negative_scale_bigdecimal() { + // negative-scale compression large enough to push `BigDecimal`'s `Display` into + // `Ne+M` notation. + let mantissa = BigInt::from_str("79228162514264337593543950336").unwrap(); + let v = BigDecimal::new(mantissa, -30); + + // Confirm the bug shape: default `Display` produces scientific + // notation that `U256::from_str` can't parse. + assert_eq!(v.to_string(), "79228162514264337593543950336e+30"); + + // Our serializer normalizes to pure digits that the driver parses. + #[derive(Serialize)] + struct Wrapper { + #[serde(serialize_with = "serialize_integer")] + v: BigDecimal, + } + let json = serde_json::to_string(&Wrapper { v }).unwrap(); + assert_eq!( + json, + "{\"v\":\"79228162514264337593543950336000000000000000000000000000000\"}" + ); + } +} diff --git a/crates/pool-indexer/src/api/uniswap_v3/pools.rs b/crates/pool-indexer/src/api/uniswap_v3/pools.rs new file mode 100644 index 0000000000..b48627fcc5 --- /dev/null +++ b/crates/pool-indexer/src/api/uniswap_v3/pools.rs @@ -0,0 +1,209 @@ +use { + super::{PoolIds, serialize_display, serialize_integer}, + crate::{ + api::{ApiError, AppState, latest_indexed_block, resolve_chain_id}, + db::uniswap_v3 as db, + }, + alloy_primitives::Address, + axum::{ + extract::{Path, Query, State}, + response::{IntoResponse, Json, Response}, + }, + bigdecimal::BigDecimal, + serde::{Deserialize, Serialize}, + std::sync::Arc, +}; + +/// Query parameters for the `GET /pools` endpoint — cursor-paginated full +/// listing. +#[derive(Deserialize)] +pub struct ListPoolsQuery { + /// Opaque cursor returned by the previous page; omit to start from the + /// beginning. + pub after: Option, + /// Maximum number of pools to return. Clamped to [1, 5000]; defaults to + /// 1000. + pub limit: Option, +} + +/// Query parameters for the `GET /pools/by-ids` endpoint — bulk lookup of +/// specific pool addresses, returns only the requested pools (no pagination). +/// Intended for clients that already know the pool addresses they care about, +/// e.g. resolving pools referenced by an auction. +#[derive(Deserialize)] +pub struct BulkLookupQuery { + /// Comma-separated list of pool addresses (`0x…,0x…`) parsed eagerly. + /// Capped at [`super::MAX_POOL_IDS_PER_REQUEST`] entries; callers with + /// more addresses should chunk their requests. + pub pool_ids: PoolIds, +} + +/// ERC-20 token metadata embedded in pool responses. +#[derive(Serialize)] +pub struct TokenInfo { + /// Checksummed contract address. + pub id: Address, + #[serde(skip_serializing_if = "Option::is_none")] + pub decimals: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub symbol: Option, +} + +/// A single Uniswap v3 pool. +#[derive(Serialize)] +pub struct PoolResponse { + /// Checksummed pool contract address. + pub id: Address, + pub token0: TokenInfo, + pub token1: TokenInfo, + /// Fee tier in hundredths of a basis point (e.g. 3000 = 0.3%). + #[serde(serialize_with = "serialize_display")] + pub fee_tier: u32, + #[serde(serialize_with = "serialize_integer")] + pub liquidity: BigDecimal, + #[serde(serialize_with = "serialize_integer")] + pub sqrt_price: BigDecimal, + pub tick: i32, + /// Populated only when tick data is explicitly requested. + pub ticks: Option>, +} + +/// Response envelope for pool listing and search endpoints. +#[derive(Serialize)] +pub struct PoolsResponse { + /// Latest block that has been fully indexed. + pub block_number: u64, + pub pools: Vec, + /// Cursor to pass as `after` to fetch the next page; `null` on the last + /// page. + pub next_cursor: Option, +} + +/// Default number of pools to return per page when the client doesn't +/// specify a `limit`. Sized so a full mainnet pool set can be drained in +/// a few pages. +const DEFAULT_PAGE_LIMIT: u64 = 1_000; + +/// Hard cap on `limit` to bound both query time and response size. Server +/// applies this even if the client asks for more. +const MAX_PAGE_LIMIT: u64 = 5_000; + +impl ListPoolsQuery { + /// Resolve the effective page size: the client-supplied `limit` clamped + /// to `[1, MAX_PAGE_LIMIT]`, defaulting to `DEFAULT_PAGE_LIMIT`. + fn page_limit(&self) -> u64 { + self.limit + .unwrap_or(DEFAULT_PAGE_LIMIT) + .clamp(1, MAX_PAGE_LIMIT) + } + + /// Parse the opaque `after` cursor back to the 20-byte address key used + /// by the DB's keyset pagination. Returns `InvalidCursor` on malformed + /// input so callers see a 400 rather than an empty page. + fn cursor(&self) -> Result>, ApiError> { + self.after + .as_deref() + .map(|raw| { + raw.parse::
() + .map(|address| address.as_slice().to_vec()) + .map_err(|_| ApiError::InvalidCursor) + }) + .transpose() + } +} + +impl From<&db::PoolRow> for PoolResponse { + fn from(r: &db::PoolRow) -> Self { + Self { + id: r.address, + token0: TokenInfo { + id: r.token0, + decimals: r.token0_decimals, + symbol: non_empty(&r.token0_symbol), + }, + token1: TokenInfo { + id: r.token1, + decimals: r.token1_decimals, + symbol: non_empty(&r.token1_symbol), + }, + fee_tier: r.fee, + liquidity: r.liquidity.clone(), + sqrt_price: r.sqrt_price_x96.clone(), + tick: r.tick, + ticks: None, + } + } +} + +/// Empty strings are a "tried-and-failed" sentinel written by the symbol +/// backfill task; surface them as missing rather than as `""`. +fn non_empty(s: &Option) -> Option { + s.as_ref().filter(|s| !s.is_empty()).cloned() +} + +/// Converts a slice of DB rows into the on-the-wire [`PoolsResponse`] +/// envelope, attaching the indexed-block tag and optional pagination +/// cursor. Centralised here so every route emits the same JSON shape. +fn pools_response( + block_number: u64, + rows: &[db::PoolRow], + next_cursor: Option, +) -> Response { + Json(PoolsResponse { + block_number, + pools: rows.iter().map(PoolResponse::from).collect(), + next_cursor, + }) + .into_response() +} + +/// `GET /api/v1/{network}/uniswap/v3/pools` +/// +/// Returns a cursor-paginated list of all indexed pools, ordered by address. +/// +/// Pagination is last-value-seen: the DB query returns `limit + 1` rows to +/// detect whether a next page exists, the extra row is dropped, and the +/// address of the last row in the returned page becomes the `next_cursor`. +/// The next request passes that back as `after=…`, and the DB uses +/// `WHERE address > $cursor` to pick up from the row immediately after it — +/// so the cursor points at the *last row served*, not the next one to +/// serve. +pub async fn get_pools( + State(state): State>, + Path(network): Path, + Query(query): Query, +) -> Result { + let chain_id = resolve_chain_id(&state, &network)?; + let block_number = latest_indexed_block(&state, chain_id).await?; + let limit = query.page_limit(); + let cursor = query.cursor()?; + + let mut rows = db::get_pools(&state.db, chain_id, cursor, limit + 1).await?; + + let has_next = rows.len() > limit as usize; + rows.truncate(limit as usize); + let next_cursor = has_next + .then(|| rows.last().map(|row| format!("{:#x}", row.address))) + .flatten(); + + Ok(pools_response(block_number, &rows, next_cursor)) +} + +/// `GET /api/v1/{network}/uniswap/v3/pools/by-ids?pool_ids=0x…,0x…` +/// +/// Returns the pools with addresses in `pool_ids` (order not guaranteed to +/// match the request). Silently skips unknown addresses so callers can treat +/// a partial response as "these are the ones I have". Fetches the latest +/// indexed block in parallel with the pool lookup. +pub async fn get_pools_by_ids( + State(state): State>, + Path(network): Path, + Query(BulkLookupQuery { pool_ids }): Query, +) -> Result { + let chain_id = resolve_chain_id(&state, &network)?; + let (block, pools) = tokio::join!( + latest_indexed_block(&state, chain_id), + db::get_pools_by_ids(&state.db, chain_id, &pool_ids.0), + ); + Ok(pools_response(block?, &pools?, None)) +} diff --git a/crates/pool-indexer/src/api/uniswap_v3/ticks.rs b/crates/pool-indexer/src/api/uniswap_v3/ticks.rs new file mode 100644 index 0000000000..416fe2cbf6 --- /dev/null +++ b/crates/pool-indexer/src/api/uniswap_v3/ticks.rs @@ -0,0 +1,121 @@ +use { + super::{PoolIds, serialize_integer}, + crate::{ + api::{ApiError, AppState, latest_indexed_block, resolve_chain_id}, + db::uniswap_v3 as db, + }, + alloy_primitives::Address, + axum::{ + extract::{Path, Query, State}, + response::{IntoResponse, Json, Response}, + }, + bigdecimal::BigDecimal, + serde::{Deserialize, Serialize}, + std::{collections::HashMap, sync::Arc}, +}; + +/// A single tick entry with its net liquidity. +#[derive(Serialize)] +pub struct TickEntry { + pub tick_idx: i32, + #[serde(serialize_with = "serialize_integer")] + pub liquidity_net: BigDecimal, +} + +impl From for TickEntry { + fn from(tick: db::TickRow) -> Self { + Self { + tick_idx: tick.tick_idx, + liquidity_net: tick.liquidity_net, + } + } +} + +#[derive(Serialize)] +pub struct TicksResponse { + pub block_number: u64, + pub pool: Address, + pub ticks: Vec, +} + +/// Query parameters for the bulk ticks endpoint. +#[derive(Deserialize)] +pub struct BulkTicksQuery { + /// Comma-separated list of pool addresses (`0x…,0x…`) parsed eagerly. + /// Capped at [`super::MAX_POOL_IDS_PER_REQUEST`] entries. + pub pool_ids: PoolIds, +} + +/// One pool's worth of ticks in a bulk response. +#[derive(Serialize)] +pub struct PoolTicks { + pub pool: Address, + pub ticks: Vec, +} + +/// Envelope for `GET /pools/ticks`. Only pools with at least one non-zero +/// tick appear in `pools` — callers resolving many addresses at once should +/// treat a missing pool as "no active ticks" rather than "unknown pool". +#[derive(Serialize)] +pub struct BulkTicksResponse { + pub block_number: u64, + pub pools: Vec, +} + +pub async fn get_ticks( + State(state): State>, + Path((network, pool)): Path<(String, Address)>, +) -> Result { + let chain_id = resolve_chain_id(&state, &network)?; + + let (block, ticks) = tokio::join!( + latest_indexed_block(&state, chain_id), + db::get_ticks(&state.db, chain_id, &pool), + ); + + Ok(Json(TicksResponse { + block_number: block?, + pool, + ticks: ticks?.into_iter().map(TickEntry::from).collect(), + }) + .into_response()) +} + +/// `GET /api/v1/{network}/uniswap/v3/pools/ticks?pool_ids=0x…,0x…` +/// +/// Bulk tick fetch for many pools in one round trip. Replaces the subgraph's +/// `TICKS_BY_POOL_IDS_QUERY`. Ticks are grouped by pool and sorted by +/// `tick_idx` within each group. Per-pool tick count is bounded by the DB +/// helper (see [`db::MAX_TICKS_PER_POOL`]). +pub async fn get_ticks_bulk( + State(state): State>, + Path(network): Path, + Query(BulkTicksQuery { pool_ids }): Query, +) -> Result { + let chain_id = resolve_chain_id(&state, &network)?; + + let (block, ticks) = tokio::join!( + latest_indexed_block(&state, chain_id), + db::get_ticks_for_pools(&state.db, chain_id, &pool_ids.0), + ); + + Ok(Json(BulkTicksResponse { + block_number: block?, + pools: group_ticks_by_pool(ticks?), + }) + .into_response()) +} + +fn group_ticks_by_pool(rows: Vec) -> Vec { + let mut groups: HashMap> = HashMap::with_capacity(rows.len()); + for row in rows { + groups.entry(row.pool_address).or_default().push(TickEntry { + tick_idx: row.tick_idx, + liquidity_net: row.liquidity_net, + }); + } + groups + .into_iter() + .map(|(pool, ticks)| PoolTicks { pool, ticks }) + .collect() +} diff --git a/crates/pool-indexer/src/arguments.rs b/crates/pool-indexer/src/arguments.rs new file mode 100644 index 0000000000..afd005928a --- /dev/null +++ b/crates/pool-indexer/src/arguments.rs @@ -0,0 +1,7 @@ +use std::path::PathBuf; + +#[derive(clap::Parser)] +pub struct Arguments { + #[clap(long, env)] + pub config: PathBuf, +} diff --git a/crates/pool-indexer/src/cold_seeder.rs b/crates/pool-indexer/src/cold_seeder.rs new file mode 100644 index 0000000000..87407728a3 --- /dev/null +++ b/crates/pool-indexer/src/cold_seeder.rs @@ -0,0 +1,408 @@ +//! Bootstraps the pool-indexer from on-chain data alone. +//! +//! Used when a chain has no Uniswap V3 subgraph. Three phases: +//! +//! 1. **Pool discovery** — scan `PoolCreated` events on the factory from +//! genesis to `snapshot_block`. +//! 2. **State snapshot** — per-pool `slot0()` + `liquidity()` at +//! `snapshot_block`, fanned out across concurrent eth_calls. +//! 3. **Tick reconstruction** — for pools with non-zero liquidity, filter +//! `Mint`/`Burn` logs by pool address over the full history and accumulate +//! `liquidity_net` deltas per tick. +//! +//! The live indexer takes over from `snapshot_block + 1` via `catch_up`. + +use { + crate::{ + db::uniswap_v3 as db, + indexer::uniswap_v3::{NewPoolData, PoolStateData, TickDeltaData, bisecting_get_logs}, + metrics::Metrics, + }, + alloy::{primitives::Address, providers::Provider, rpc::types::Log, sol_types::SolEvent}, + anyhow::{Context, Result}, + contracts::{ + ERC20, + IUniswapV3Factory::IUniswapV3Factory::PoolCreated, + UniswapV3Pool::{ + self, + UniswapV3Pool::{Burn, Mint}, + }, + }, + ethrpc::AlloyProvider, + futures::{StreamExt, TryStreamExt}, + sqlx::PgPool, + std::collections::HashMap, + tracing::{info, instrument, warn}, +}; + +/// Initial block-range size for `PoolCreated` discovery. Bisected on +/// "range too large" errors, so picking too small only costs extra +/// round-trips, never fails. 10k is Alchemy's per-call cap on chains like +/// Ink; more permissive endpoints could run faster with a larger value. +const DISCOVERY_BLOCK_CHUNK: u64 = 10_000; + +/// Initial block-range size for Mint/Burn history scans in phase 3. +const HISTORY_BLOCK_CHUNK: u64 = 10_000; + +/// Number of pools per `eth_getLogs` address-filter list in phase 3. Must stay +/// under the RPC provider's filter-size limit. +const POOL_ADDRESS_BATCH: usize = 100; + +/// Concurrent view-call fan-out for the per-contract reads we issue during +/// seeding: ERC-20 `decimals()` in phase 1 and pool `slot0()` / `liquidity()` +/// in phase 2. +const POOL_VIEW_CALL_CONCURRENCY: usize = 50; + +/// Concurrency for concurrent `eth_getLogs` calls. +const LOG_FETCH_CONCURRENCY: usize = 8; + +pub async fn cold_seed( + db: &PgPool, + network: &str, + chain_id: u64, + provider: AlloyProvider, + factory: Address, + factory_deployment_block: u64, + snapshot_block: Option, +) -> Result { + let snapshot_block = match snapshot_block { + Some(b) => b, + None => provider + .get_block_number() + .await + .context("fetch current block")?, + }; + + info!( + chain_id, + factory_deployment_block, snapshot_block, "cold-seeding pool-indexer from chain" + ); + + let metrics = crate::metrics::Metrics::get(); + + let pools = { + let labels = [network, "discovery"]; + let _t = Metrics::timer(&metrics.cold_seed_phase_seconds, &labels); + discover_pools( + provider.clone(), + factory, + factory_deployment_block, + snapshot_block, + ) + .await? + }; + metrics + .cold_seed_pools_discovered + .with_label_values(&[network]) + .set(i64::try_from(pools.len()).unwrap_or(0)); + info!(chain_id, pools = pools.len(), "pools discovered"); + persist_pools(db, chain_id, &factory, &pools).await?; + + let states = { + let labels = [network, "state_snapshot"]; + let _t = Metrics::timer(&metrics.cold_seed_phase_seconds, &labels); + snapshot_pool_states(provider.clone(), &pools, snapshot_block).await? + }; + info!(chain_id, states = states.len(), "pool states snapshotted"); + persist_pool_states(db, chain_id, &factory, &states).await?; + + // Reconstruct ticks for every discovered pool, not just the ones with + // currently-active liquidity. A pool with `state.liquidity == 0` can + // still hold dormant out-of-range positions whose `liquidity_net` deltas + // matter once the price moves back into range — skipping them would + // leave the indexer mispricing those pools after the fact. + let pool_addresses: Vec
= pools.iter().map(|p| p.address).collect(); + info!( + chain_id, + pools = pool_addresses.len(), + "reconstructing ticks for all discovered pools" + ); + + { + let labels = [network, "tick_reconstruction"]; + let _t = Metrics::timer(&metrics.cold_seed_phase_seconds, &labels); + reconstruct_and_persist_ticks( + db, + chain_id, + &factory, + provider.clone(), + &pool_addresses, + factory_deployment_block, + snapshot_block, + ) + .await?; + } + + info!(chain_id, snapshot_block, "cold seeding complete"); + Ok(snapshot_block) +} + +#[instrument(skip(provider))] +async fn discover_pools( + provider: AlloyProvider, + factory: Address, + from_block: u64, + to_block: u64, +) -> Result> { + // Chunk the full block range, fetch in parallel, decode PoolCreated events. + let ranges: Vec<(u64, u64)> = (from_block..=to_block) + .step_by(DISCOVERY_BLOCK_CHUNK as usize) + .map(|start| (start, (start + DISCOVERY_BLOCK_CHUNK - 1).min(to_block))) + .collect(); + + let logs: Vec = futures::stream::iter(ranges) + .map(|(from, to)| { + let provider = provider.clone(); + async move { fetch_pool_created_logs(provider, factory, from, to).await } + }) + .buffered(LOG_FETCH_CONCURRENCY) + .try_concat() + .await?; + + let events: Vec<(Log, PoolCreated)> = logs + .into_iter() + .filter_map(|log| { + let decoded = PoolCreated::decode_log(&log.inner).ok()?; + Some((log, decoded.data)) + }) + .collect(); + + // Fetch ERC-20 decimals for every referenced token (dedup first). + let tokens: std::collections::HashSet
= events + .iter() + .flat_map(|(_, e)| [e.token0, e.token1]) + .collect(); + let decimals = fetch_decimals_concurrent(provider, tokens).await; + + Ok(events + .into_iter() + .map(|(log, e)| NewPoolData { + address: e.pool, + token0: e.token0, + token1: e.token1, + fee: e.fee.to::(), + token0_decimals: decimals.get(&e.token0).copied(), + token1_decimals: decimals.get(&e.token1).copied(), + token0_symbol: None, + token1_symbol: None, + created_block: log.block_number.unwrap_or(0), + }) + .collect()) +} + +async fn fetch_pool_created_logs( + provider: AlloyProvider, + factory: Address, + from: u64, + to: u64, +) -> Result> { + bisecting_get_logs( + &provider, + from, + to, + vec![factory], + vec![PoolCreated::SIGNATURE_HASH], + ) + .await +} + +async fn fetch_decimals_concurrent( + provider: AlloyProvider, + tokens: std::collections::HashSet
, +) -> HashMap { + futures::stream::iter(tokens) + .map(|token| { + let provider = provider.clone(); + async move { + let dec = ERC20::Instance::new(token, provider.clone()) + .decimals() + .call() + .await + .ok(); + (token, dec) + } + }) + .buffer_unordered(POOL_VIEW_CALL_CONCURRENCY) + .filter_map(|(token, opt)| async move { opt.map(|d| (token, d)) }) + .collect() + .await +} + +async fn persist_pools( + db: &PgPool, + chain_id: u64, + factory: &Address, + pools: &[NewPoolData], +) -> Result<()> { + let mut tx = db.begin().await.context("begin pools tx")?; + db::insert_pools(&mut tx, chain_id, factory, pools).await?; + tx.commit().await.context("commit pools tx")?; + Ok(()) +} + +#[instrument(skip(provider, pools))] +async fn snapshot_pool_states( + provider: AlloyProvider, + pools: &[NewPoolData], + at_block: u64, +) -> Result> { + let addresses: Vec
= pools.iter().map(|p| p.address).collect(); + let states: Vec = futures::stream::iter(addresses) + .map(|pool| { + let provider = provider.clone(); + async move { fetch_pool_state(provider, pool, at_block).await } + }) + .buffer_unordered(POOL_VIEW_CALL_CONCURRENCY) + .filter_map(|res| async move { res }) + .collect() + .await; + Ok(states) +} + +/// Fetch a pool's `slot0` + `liquidity` at `at_block`. Returns `None` if +/// either eth_call fails (RPC blip, contract not yet deployed at that block, +/// non-conforming pool); the failure is logged and the caller treats this +/// pool as missing-state for the snapshot. +async fn fetch_pool_state( + provider: AlloyProvider, + pool: Address, + at_block: u64, +) -> Option { + let instance = UniswapV3Pool::Instance::new(pool, provider.clone()); + let slot0_call = instance.slot0().block(at_block.into()); + let liquidity_call = instance.liquidity().block(at_block.into()); + let (slot0, liquidity) = tokio::join!(slot0_call.call(), liquidity_call.call()); + let slot0 = match slot0 { + Ok(s) => s, + Err(err) => { + warn!(%pool, ?err, "slot0 failed"); + return None; + } + }; + let liquidity = match liquidity { + Ok(l) => l, + Err(err) => { + warn!(%pool, ?err, "liquidity failed"); + return None; + } + }; + Some(PoolStateData { + pool_address: pool, + block_number: at_block, + sqrt_price_x96: slot0.sqrtPriceX96, + liquidity, + tick: slot0.tick.as_i32(), + }) +} + +async fn persist_pool_states( + db: &PgPool, + chain_id: u64, + factory: &Address, + states: &[PoolStateData], +) -> Result<()> { + let mut tx = db.begin().await.context("begin states tx")?; + db::upsert_pool_states(&mut tx, chain_id, factory, states).await?; + tx.commit().await.context("commit states tx")?; + Ok(()) +} + +/// Processes active pools one `POOL_ADDRESS_BATCH`-sized group at a time. +/// Each group's full history is fetched, deltas accumulated, and flushed to +/// the DB before moving on — bounds memory to roughly one batch's worth of +/// logs at any moment, and gives operators visible progress on long runs. +#[instrument(skip(db, provider, pool_addresses))] +async fn reconstruct_and_persist_ticks( + db: &PgPool, + chain_id: u64, + factory: &Address, + provider: AlloyProvider, + pool_addresses: &[Address], + from_block: u64, + to_block: u64, +) -> Result<()> { + let total = pool_addresses.len(); + let mut processed = 0usize; + let mut tick_rows = 0usize; + + for pool_batch in pool_addresses.chunks(POOL_ADDRESS_BATCH) { + let pool_batch = pool_batch.to_vec(); + let batch_size = pool_batch.len(); + + let block_ranges: Vec<(u64, u64)> = (from_block..=to_block) + .step_by(HISTORY_BLOCK_CHUNK as usize) + .map(|start| (start, (start + HISTORY_BLOCK_CHUNK - 1).min(to_block))) + .collect(); + + let logs: Vec = futures::stream::iter(block_ranges) + .map(|(from, to)| { + let provider = provider.clone(); + let pool_batch = pool_batch.clone(); + async move { fetch_mint_burn_logs(provider, pool_batch, from, to).await } + }) + .buffered(LOG_FETCH_CONCURRENCY) + .try_concat() + .await?; + + // Accumulate net liquidity per pool × tick boundary across the whole + // history range: `(pool_address, tick_idx) -> sum(liquidity_net)`. + // Mints add at tickLower / subtract at tickUpper; Burns reverse. + let mut acc: HashMap<(Address, i32), i128> = HashMap::new(); + for log in logs { + let Some(t) = log.topic0() else { continue }; + let pool = log.address(); + if *t == Mint::SIGNATURE_HASH + && let Ok(decoded) = Mint::decode_log(&log.inner) + { + let e = &decoded.data; + let amount = e.amount.cast_signed(); + *acc.entry((pool, e.tickLower.as_i32())).or_default() += amount; + *acc.entry((pool, e.tickUpper.as_i32())).or_default() -= amount; + } else if *t == Burn::SIGNATURE_HASH + && let Ok(decoded) = Burn::decode_log(&log.inner) + { + let e = &decoded.data; + let amount = e.amount.cast_signed(); + *acc.entry((pool, e.tickLower.as_i32())).or_default() -= amount; + *acc.entry((pool, e.tickUpper.as_i32())).or_default() += amount; + } + } + + let deltas: Vec = acc + .into_iter() + .filter(|(_, d)| *d != 0) + .map(|((pool, tick), delta)| TickDeltaData { + pool_address: pool, + tick_idx: tick, + delta, + }) + .collect(); + + if !deltas.is_empty() { + db::batch_seed_ticks(db, chain_id, factory, &deltas).await?; + tick_rows += deltas.len(); + } + + processed += batch_size; + info!(processed, total, tick_rows, "tick reconstruction progress"); + } + Ok(()) +} + +async fn fetch_mint_burn_logs( + provider: AlloyProvider, + pool_batch: Vec
, + from: u64, + to: u64, +) -> Result> { + let pool_count = pool_batch.len(); + bisecting_get_logs( + &provider, + from, + to, + pool_batch, + vec![Mint::SIGNATURE_HASH, Burn::SIGNATURE_HASH], + ) + .await + .with_context(|| format!("mint_burn_logs({from}..={to}, pools={pool_count})")) +} diff --git a/crates/pool-indexer/src/config.rs b/crates/pool-indexer/src/config.rs new file mode 100644 index 0000000000..0b25223177 --- /dev/null +++ b/crates/pool-indexer/src/config.rs @@ -0,0 +1,256 @@ +use { + alloy_primitives::Address, + anyhow::{Context, Result}, + serde::Deserialize, + std::{ + collections::HashSet, + fmt, + net::{Ipv4Addr, SocketAddr, SocketAddrV4}, + num::NonZeroU32, + path::Path, + time::Duration, + }, + url::Url, +}; + +fn default_max_connections() -> NonZeroU32 { + NonZeroU32::new(10).unwrap() +} + +fn default_chunk_size() -> u64 { + 500 +} + +fn default_poll_interval_secs() -> u64 { + 3 +} + +fn default_fetch_concurrency() -> usize { + 8 +} + +fn default_prefetch_concurrency() -> usize { + 50 +} + +fn default_bind_address() -> SocketAddr { + SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 7777)) +} + +fn default_metrics_address() -> SocketAddr { + SocketAddr::V4(SocketAddrV4::new( + Ipv4Addr::UNSPECIFIED, + observe::metrics::DEFAULT_METRICS_PORT, + )) +} + +/// Network identifier used in API routes (e.g. "mainnet", "arbitrum-one"). +#[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize)] +#[serde(transparent)] +pub struct NetworkName(String); + +impl NetworkName { + pub fn new(name: impl Into) -> Self { + Self(name.into()) + } + + pub fn as_str(&self) -> &str { + &self.0 + } +} + +impl fmt::Display for NetworkName { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(&self.0) + } +} + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "kebab-case", deny_unknown_fields)] +pub struct DatabaseConfig { + /// Postgres connection URL. Accepts `%ENV_VAR` to pull from the + /// environment. + #[serde(deserialize_with = "configs::deserialize_env::deserialize_url_from_env")] + pub url: Url, + #[serde(default = "default_max_connections")] + pub max_connections: NonZeroU32, +} + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "kebab-case", deny_unknown_fields)] +pub struct NetworkConfig { + pub name: NetworkName, + pub chain_id: u64, + #[serde(deserialize_with = "configs::deserialize_env::deserialize_url_from_env")] + pub rpc_url: Url, + /// One or more Uniswap V3 factories to index. Each factory runs its own + /// seed + live-indexing loop; pools from all factories share the per-chain + /// namespace in the DB and API. + pub factories: Vec, + #[serde(default = "default_chunk_size")] + pub chunk_size: u64, + #[serde(default = "default_poll_interval_secs")] + pub poll_interval_secs: u64, + #[serde(default = "default_fetch_concurrency")] + pub fetch_concurrency: usize, + #[serde(default = "default_prefetch_concurrency")] + pub prefetch_concurrency: usize, + /// When `true`, use `latest` instead of `finalized` as the target block. + /// Useful for test environments where finality is not simulated (e.g. local + /// Anvil). + #[serde(skip)] + pub use_latest: bool, + /// Subgraph GraphQL endpoint for seeding initial state. If absent, the + /// indexer starts from genesis event indexing. + #[serde( + default, + deserialize_with = "configs::deserialize_env::deserialize_optional_url_from_env" + )] + pub subgraph_url: Option, + /// Block number to seed at. Defaults to the subgraph's current block when + /// `subgraph_url` is set. + pub seed_block: Option, +} + +impl NetworkConfig { + pub fn poll_interval(&self) -> Duration { + Duration::from_secs(self.poll_interval_secs) + } + + pub fn indexer_config(&self, factory: Address) -> IndexerConfig { + IndexerConfig { + network: self.name.clone(), + chain_id: self.chain_id, + factory_address: factory, + chunk_size: self.chunk_size, + use_latest: self.use_latest, + fetch_concurrency: self.fetch_concurrency, + prefetch_concurrency: self.prefetch_concurrency, + } + } +} + +#[derive(Debug, Clone, Copy, Deserialize)] +#[serde(rename_all = "kebab-case", deny_unknown_fields)] +pub struct FactoryConfig { + pub address: Address, + /// Block the factory was deployed at. Cold-seed log discovery starts here + /// instead of block 0 — saves thousands of empty `eth_getLogs` requests on + /// chains where the factory was deployed long after genesis (e.g. + /// Arbitrum). Leave unset (0) on chains where the factory is near + /// genesis. + #[serde(default)] + pub deployment_block: u64, +} + +/// The subset of [`NetworkConfig`] that [`UniswapV3Indexer`] needs at runtime. +#[derive(Debug, Clone)] +pub struct IndexerConfig { + pub network: NetworkName, + pub chain_id: u64, + pub factory_address: Address, + pub chunk_size: u64, + pub use_latest: bool, + pub fetch_concurrency: usize, + pub prefetch_concurrency: usize, +} + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "kebab-case", deny_unknown_fields)] +pub struct ApiConfig { + #[serde(default = "default_bind_address")] + pub bind_address: SocketAddr, +} + +impl Default for ApiConfig { + fn default() -> Self { + Self { + bind_address: default_bind_address(), + } + } +} + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "kebab-case", deny_unknown_fields)] +pub struct MetricsConfig { + #[serde(default = "default_metrics_address")] + pub bind_address: SocketAddr, +} + +impl Default for MetricsConfig { + fn default() -> Self { + Self { + bind_address: default_metrics_address(), + } + } +} + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "kebab-case", deny_unknown_fields)] +pub struct Configuration { + pub database: DatabaseConfig, + #[serde(rename = "network")] + pub networks: Vec, + #[serde(default)] + pub api: ApiConfig, + #[serde(default)] + pub metrics: MetricsConfig, +} + +impl Configuration { + pub fn from_path(path: &Path) -> Result { + let content = std::fs::read_to_string(path) + .with_context(|| format!("reading config file {}", path.display()))?; + let config: Self = toml::from_str(&content).context("parsing config file")?; + config.validate_networks(); + Ok(config) + } + + /// Cross-network sanity checks that don't fit serde's per-field + /// validation: uniqueness of names / chain IDs / factory addresses, the + /// subgraph-URL ↔ multi-factory mutual exclusion, and the at-least-one- + /// network requirement. + fn validate_networks(&self) { + assert!( + !self.networks.is_empty(), + "at least one [[network]] must be configured" + ); + let mut names = HashSet::new(); + let mut chain_ids = HashSet::new(); + for n in &self.networks { + assert!( + names.insert(n.name.as_str()), + "duplicate network name: {}", + n.name, + ); + assert!( + chain_ids.insert(n.chain_id), + "duplicate chain_id: {}", + n.chain_id, + ); + assert!( + !n.factories.is_empty(), + "network {} must list at least one factory", + n.name, + ); + let mut seen = HashSet::new(); + for f in &n.factories { + assert!( + seen.insert(f.address), + "network {}: duplicate factory {}", + n.name, + f.address, + ); + } + // A subgraph indexes one specific factory — applying one URL to + // many factories would double-seed the wrong data. Multi-factory + // networks must cold-seed each factory. + assert!( + !(n.factories.len() > 1 && n.subgraph_url.is_some()), + "network {}: subgraph-url cannot be combined with multiple factories (omit \ + subgraph-url to cold-seed each factory)", + n.name, + ); + } + } +} diff --git a/crates/pool-indexer/src/db/mod.rs b/crates/pool-indexer/src/db/mod.rs new file mode 100644 index 0000000000..c24144d0e0 --- /dev/null +++ b/crates/pool-indexer/src/db/mod.rs @@ -0,0 +1 @@ +pub mod uniswap_v3; diff --git a/crates/pool-indexer/src/db/uniswap_v3.rs b/crates/pool-indexer/src/db/uniswap_v3.rs new file mode 100644 index 0000000000..8616f7cd20 --- /dev/null +++ b/crates/pool-indexer/src/db/uniswap_v3.rs @@ -0,0 +1,729 @@ +use { + crate::indexer::uniswap_v3::{LiquidityUpdateData, NewPoolData, PoolStateData, TickDeltaData}, + alloy_primitives::Address, + anyhow::{Context, Result}, + bigdecimal::BigDecimal, + num::BigInt, + number::conversions::u160_to_big_decimal, + sqlx::{PgPool, Postgres, Row, Transaction, postgres::PgRow}, +}; + +fn bytes_to_addr(b: Vec) -> Result
{ + Address::try_from(b.as_slice()).context("invalid address bytes") +} + +fn sql_u128(value: u128) -> BigDecimal { + BigDecimal::from(BigInt::from(value)) +} + +fn sql_i128(value: i128) -> BigDecimal { + BigDecimal::from(BigInt::from(value)) +} + +fn address_bytes_list(addresses: &[Address]) -> Vec<&[u8]> { + addresses.iter().map(|address| address.as_slice()).collect() +} + +fn decode_pool_rows(rows: Vec) -> Result> { + rows.into_iter().map(PoolRow::try_from).collect() +} + +pub async fn get_checkpoint( + pool: &PgPool, + chain_id: u64, + contract: &Address, +) -> Result> { + let row = sqlx::query( + "SELECT block_number FROM pool_indexer_checkpoints WHERE chain_id = $1 AND contract = $2", + ) + .bind(chain_id.cast_signed()) + .bind(contract.as_slice()) + .fetch_optional(pool) + .await + .context("get_checkpoint")?; + + Ok(row.map(|r| r.get::("block_number").cast_unsigned())) +} + +pub async fn set_checkpoint( + executor: impl sqlx::PgExecutor<'_>, + chain_id: u64, + contract: &Address, + block_number: u64, +) -> Result<()> { + sqlx::query( + "INSERT INTO pool_indexer_checkpoints (chain_id, contract, block_number) + VALUES ($1, $2, $3) + ON CONFLICT (chain_id, contract) DO UPDATE SET block_number = EXCLUDED.block_number", + ) + .bind(chain_id.cast_signed()) + .bind(contract.as_slice()) + .bind(block_number.cast_signed()) + .execute(executor) + .await + .context("set_checkpoint")?; + Ok(()) +} + +pub async fn insert_pools( + tx: &mut Transaction<'_, Postgres>, + chain_id: u64, + factory: &Address, + pools: &[NewPoolData], +) -> Result<()> { + if pools.is_empty() { + return Ok(()); + } + let addresses: Vec<&[u8]> = pools.iter().map(|pool| pool.address.as_slice()).collect(); + let token0s: Vec<&[u8]> = pools.iter().map(|pool| pool.token0.as_slice()).collect(); + let token1s: Vec<&[u8]> = pools.iter().map(|pool| pool.token1.as_slice()).collect(); + let fees: Vec = pools.iter().map(|pool| pool.fee.cast_signed()).collect(); + let t0_decimals: Vec> = pools + .iter() + .map(|pool| pool.token0_decimals.map(i16::from)) + .collect(); + let t1_decimals: Vec> = pools + .iter() + .map(|pool| pool.token1_decimals.map(i16::from)) + .collect(); + let t0_symbols: Vec> = pools.iter().map(|p| p.token0_symbol.clone()).collect(); + let t1_symbols: Vec> = pools.iter().map(|p| p.token1_symbol.clone()).collect(); + let created_blocks: Vec = pools + .iter() + .map(|pool| pool.created_block.cast_signed()) + .collect(); + + sqlx::query( + "INSERT INTO uniswap_v3_pools + (chain_id, address, factory, token0, token1, fee, token0_decimals, token1_decimals, + token0_symbol, token1_symbol, created_block) + SELECT $1, t.addr, $2, t.t0, t.t1, t.fee, t.t0d, t.t1d, t.t0s, t.t1s, t.cblk + FROM UNNEST($3::BYTEA[], $4::BYTEA[], $5::BYTEA[], $6::INT4[], $7::INT2[], $8::INT2[], + $9::TEXT[], $10::TEXT[], $11::INT8[]) + AS t(addr, t0, t1, fee, t0d, t1d, t0s, t1s, cblk) + ON CONFLICT (chain_id, address) DO NOTHING", + ) + .bind(chain_id.cast_signed()) + .bind(factory.as_slice()) + .bind(addresses) + .bind(token0s) + .bind(token1s) + .bind(fees) + .bind(t0_decimals) + .bind(t1_decimals) + .bind(t0_symbols) + .bind(t1_symbols) + .bind(created_blocks) + .execute(&mut **tx) + .await + .context("insert_pools")?; + Ok(()) +} + +pub async fn upsert_pool_states( + tx: &mut Transaction<'_, Postgres>, + chain_id: u64, + factory: &Address, + states: &[PoolStateData], +) -> Result<()> { + if states.is_empty() { + return Ok(()); + } + let addresses: Vec<&[u8]> = states + .iter() + .map(|state| state.pool_address.as_slice()) + .collect(); + let block_numbers: Vec = states + .iter() + .map(|state| state.block_number.cast_signed()) + .collect(); + let sqrt_prices: Vec = states + .iter() + .map(|state| u160_to_big_decimal(&state.sqrt_price_x96)) + .collect(); + let liquidities: Vec = states + .iter() + .map(|state| sql_u128(state.liquidity)) + .collect(); + let ticks: Vec = states.iter().map(|state| state.tick).collect(); + + sqlx::query( + "WITH latest AS ( + SELECT DISTINCT ON (addr) addr, blk, sqrt, liq, tick + FROM UNNEST($3::BYTEA[], $4::INT8[], $5::NUMERIC[], $6::NUMERIC[], $7::INT4[]) + AS t(addr, blk, sqrt, liq, tick) + ORDER BY addr, blk DESC + ) + INSERT INTO uniswap_v3_pool_states + (chain_id, pool_address, block_number, sqrt_price_x96, liquidity, tick) + SELECT $1, l.addr, l.blk, l.sqrt, l.liq, l.tick + FROM latest l + WHERE EXISTS ( + SELECT 1 FROM uniswap_v3_pools + WHERE chain_id = $1 AND address = l.addr AND factory = $2 + ) + ON CONFLICT (chain_id, pool_address) DO UPDATE + SET block_number = EXCLUDED.block_number, + sqrt_price_x96 = EXCLUDED.sqrt_price_x96, + liquidity = EXCLUDED.liquidity, + tick = EXCLUDED.tick", + ) + .bind(chain_id.cast_signed()) + .bind(factory.as_slice()) + .bind(addresses) + .bind(block_numbers) + .bind(sqrt_prices) + .bind(liquidities) + .bind(ticks) + .execute(&mut **tx) + .await + .context("upsert_pool_states")?; + Ok(()) +} + +pub async fn batch_update_pool_liquidity( + tx: &mut Transaction<'_, Postgres>, + chain_id: u64, + factory: &Address, + updates: &[LiquidityUpdateData], +) -> Result<()> { + if updates.is_empty() { + return Ok(()); + } + let addresses: Vec<&[u8]> = updates + .iter() + .map(|update| update.pool_address.as_slice()) + .collect(); + let liquidities: Vec = updates + .iter() + .map(|update| sql_u128(update.liquidity)) + .collect(); + let block_numbers: Vec = updates + .iter() + .map(|update| update.block_number.cast_signed()) + .collect(); + + sqlx::query( + "WITH latest AS ( + SELECT DISTINCT ON (addr) addr, liq, blk + FROM UNNEST($3::BYTEA[], $4::NUMERIC[], $5::INT8[]) AS t(addr, liq, blk) + ORDER BY addr, blk DESC + ) + UPDATE uniswap_v3_pool_states s + SET liquidity = l.liq, block_number = l.blk + FROM latest l + WHERE s.chain_id = $1 AND s.pool_address = l.addr + AND EXISTS ( + SELECT 1 FROM uniswap_v3_pools p + WHERE p.chain_id = $1 AND p.address = l.addr AND p.factory = $2 + )", + ) + .bind(chain_id.cast_signed()) + .bind(factory.as_slice()) + .bind(addresses) + .bind(liquidities) + .bind(block_numbers) + .execute(&mut **tx) + .await + .context("batch_update_pool_liquidity")?; + Ok(()) +} + +pub async fn batch_update_ticks( + tx: &mut Transaction<'_, Postgres>, + chain_id: u64, + factory: &Address, + deltas: &[TickDeltaData], +) -> Result<()> { + if deltas.is_empty() { + return Ok(()); + } + let addresses: Vec<&[u8]> = deltas + .iter() + .map(|delta| delta.pool_address.as_slice()) + .collect(); + let tick_idxs: Vec = deltas.iter().map(|delta| delta.tick_idx).collect(); + let delta_values: Vec = deltas.iter().map(|delta| sql_i128(delta.delta)).collect(); + + sqlx::query( + "WITH input AS ( + SELECT t.addr, t.tick_idx, SUM(t.delta) AS total_delta + FROM UNNEST($3::BYTEA[], $4::INT4[], $5::NUMERIC[]) AS t(addr, tick_idx, delta) + GROUP BY t.addr, t.tick_idx + ), + upserted AS ( + INSERT INTO uniswap_v3_ticks (chain_id, pool_address, tick_idx, liquidity_net) + SELECT $1, i.addr, i.tick_idx, i.total_delta + FROM input i + WHERE EXISTS ( + SELECT 1 FROM uniswap_v3_pools + WHERE chain_id = $1 AND address = i.addr AND factory = $2 + ) + ON CONFLICT (chain_id, pool_address, tick_idx) DO UPDATE + SET liquidity_net = uniswap_v3_ticks.liquidity_net + EXCLUDED.liquidity_net + RETURNING chain_id, pool_address, tick_idx, liquidity_net + ) + DELETE FROM uniswap_v3_ticks ticks + USING upserted + WHERE ticks.chain_id = upserted.chain_id + AND ticks.pool_address = upserted.pool_address + AND ticks.tick_idx = upserted.tick_idx + AND upserted.liquidity_net = 0", + ) + .bind(chain_id.cast_signed()) + .bind(factory.as_slice()) + .bind(addresses) + .bind(tick_idxs) + .bind(delta_values) + .execute(&mut **tx) + .await + .context("batch_update_ticks")?; + Ok(()) +} + +/// Insert/replace tick `liquidity_net` values directly (no delta accumulation). +/// Used by the subgraph seeder where the subgraph value IS the authoritative +/// net. +pub async fn batch_seed_ticks( + executor: impl sqlx::PgExecutor<'_>, + chain_id: u64, + factory: &Address, + ticks: &[TickDeltaData], +) -> Result<()> { + if ticks.is_empty() { + return Ok(()); + } + let addresses: Vec<&[u8]> = ticks + .iter() + .map(|tick| tick.pool_address.as_slice()) + .collect(); + let tick_idxs: Vec = ticks.iter().map(|tick| tick.tick_idx).collect(); + let values: Vec = ticks.iter().map(|tick| sql_i128(tick.delta)).collect(); + + sqlx::query( + "WITH input AS ( + SELECT t.addr, t.tick_idx, SUM(t.val) AS net + FROM UNNEST($3::BYTEA[], $4::INT4[], $5::NUMERIC[]) AS t(addr, tick_idx, val) + GROUP BY t.addr, t.tick_idx + ) + INSERT INTO uniswap_v3_ticks (chain_id, pool_address, tick_idx, liquidity_net) + SELECT $1, i.addr, i.tick_idx, i.net + FROM input i + WHERE EXISTS ( + SELECT 1 FROM uniswap_v3_pools + WHERE chain_id = $1 AND address = i.addr AND factory = $2 + ) + AND i.net <> 0 + ON CONFLICT (chain_id, pool_address, tick_idx) DO UPDATE + SET liquidity_net = EXCLUDED.liquidity_net", + ) + .bind(chain_id.cast_signed()) + .bind(factory.as_slice()) + .bind(addresses) + .bind(tick_idxs) + .bind(values) + .execute(executor) + .await + .context("batch_seed_ticks")?; + Ok(()) +} + +/// Deletes ticks for all pools owned by `factory` on `chain_id`. Used by the +/// subgraph seeder to clear stale state before reseeding. Scoped to this +/// factory so a reseed on one factory doesn't wipe another's ticks. +pub async fn delete_ticks_for_factory( + executor: impl sqlx::PgExecutor<'_>, + chain_id: u64, + factory: &Address, +) -> Result<()> { + sqlx::query( + "DELETE FROM uniswap_v3_ticks t + USING uniswap_v3_pools p + WHERE t.chain_id = $1 + AND p.chain_id = $1 + AND p.address = t.pool_address + AND p.factory = $2", + ) + .bind(chain_id.cast_signed()) + .bind(factory.as_slice()) + .execute(executor) + .await + .context("delete_ticks_for_factory")?; + Ok(()) +} + +/// A pool with its current on-chain state (price, liquidity, tick). +pub struct PoolRow { + pub address: Address, + pub token0: Address, + pub token1: Address, + pub fee: u32, + pub token0_decimals: Option, + pub token1_decimals: Option, + pub token0_symbol: Option, + pub token1_symbol: Option, + pub sqrt_price_x96: BigDecimal, + pub liquidity: BigDecimal, + pub tick: i32, +} + +impl TryFrom for PoolRow { + type Error = anyhow::Error; + + fn try_from(r: PgRow) -> Result { + Ok(Self { + address: bytes_to_addr(r.get("address"))?, + token0: bytes_to_addr(r.get("token0"))?, + token1: bytes_to_addr(r.get("token1"))?, + fee: r.get::("fee").cast_unsigned(), + // The DB stores `-1` as the "tried, failed" sentinel written by + // the decimals backfill task. Drop those back to `None` so callers + // see "missing" rather than a misleading `Some(0)`. + token0_decimals: r + .get::, _>("token0_decimals") + .and_then(|d| u8::try_from(d).ok()), + token1_decimals: r + .get::, _>("token1_decimals") + .and_then(|d| u8::try_from(d).ok()), + token0_symbol: r.get("token0_symbol"), + token1_symbol: r.get("token1_symbol"), + sqrt_price_x96: r.get("sqrt_price_x96"), + liquidity: r.get("liquidity"), + tick: r.get("tick"), + }) + } +} + +/// Fetches a page of pools ordered by address with their current state. Pass +/// `cursor = None` for the first page, or the previous page's last address for +/// keyset pagination. +pub async fn get_pools( + pool: &PgPool, + chain_id: u64, + cursor: Option>, + limit: u64, +) -> Result> { + let rows = sqlx::query( + "SELECT p.address, p.token0, p.token1, p.fee, + p.token0_decimals, p.token1_decimals, + p.token0_symbol, p.token1_symbol, + s.sqrt_price_x96, s.liquidity, s.tick + FROM uniswap_v3_pools p + JOIN uniswap_v3_pool_states s + ON s.chain_id = p.chain_id AND s.pool_address = p.address + WHERE p.chain_id = $1 + AND ($2::BYTEA IS NULL OR p.address > $2) + ORDER BY p.address + LIMIT $3", + ) + .bind(chain_id.cast_signed()) + .bind(cursor) + .bind(limit.cast_signed()) + .fetch_all(pool) + .await + .context("get_pools")?; + + decode_pool_rows(rows) +} + +pub struct TickRow { + pub tick_idx: i32, + pub liquidity_net: BigDecimal, +} + +/// Upper bound on ticks returned per pool query. Sized ~3× the largest known +/// mainnet pool: USDC/WETH 0.05% (0x88e6A0c2dDD26FEEb64F039a2c41296FcB3f5640) +/// had 1533 active ticks on 2026-04-22. Callers that hit this limit get a +/// `warn_truncated` log; bump if that starts firing on real pools. +pub const MAX_TICKS_PER_POOL: u32 = 5_000; + +/// A tick tagged with its owning pool, used by bulk-tick queries that span +/// multiple pools. +pub struct PoolTickRow { + pub pool_address: Address, + pub tick_idx: i32, + pub liquidity_net: BigDecimal, +} + +/// Fetches pools matching any of `addresses` with their current state. Returns +/// fewer rows than requested when some addresses are unknown. Ordered by +/// address to give callers a stable iteration order. +pub async fn get_pools_by_ids( + pool: &PgPool, + chain_id: u64, + addresses: &[Address], +) -> Result> { + if addresses.is_empty() { + return Ok(Vec::new()); + } + let rows = sqlx::query( + "SELECT p.address, p.token0, p.token1, p.fee, + p.token0_decimals, p.token1_decimals, + p.token0_symbol, p.token1_symbol, + s.sqrt_price_x96, s.liquidity, s.tick + FROM uniswap_v3_pools p + JOIN uniswap_v3_pool_states s + ON s.chain_id = p.chain_id AND s.pool_address = p.address + WHERE p.chain_id = $1 + AND p.address = ANY($2) + ORDER BY p.address", + ) + .bind(chain_id.cast_signed()) + .bind(address_bytes_list(addresses)) + .fetch_all(pool) + .await + .context("get_pools_by_ids")?; + + decode_pool_rows(rows) +} + +/// Fetches ticks for multiple pools in one query, capped at +/// [`MAX_TICKS_PER_POOL`] per pool. Uses a `LATERAL` join so each pool's +/// limit is applied individually via the PK prefix index — a flat +/// `WHERE pool_address = ANY($2)` with a single outer `LIMIT` could starve +/// later pools when one has many ticks. Rows are ordered by +/// `(pool_address, tick_idx)` so callers can group in a single pass. +pub async fn get_ticks_for_pools( + pool: &PgPool, + chain_id: u64, + addresses: &[Address], +) -> Result> { + if addresses.is_empty() { + return Ok(Vec::new()); + } + let rows = sqlx::query( + "SELECT t.pool_address, t.tick_idx, t.liquidity_net + FROM UNNEST($2::BYTEA[]) AS p(addr) + JOIN LATERAL ( + SELECT pool_address, tick_idx, liquidity_net + FROM uniswap_v3_ticks + WHERE chain_id = $1 AND pool_address = p.addr + ORDER BY tick_idx + LIMIT $3 + ) t ON TRUE + ORDER BY t.pool_address, t.tick_idx", + ) + .bind(chain_id.cast_signed()) + .bind(address_bytes_list(addresses)) + .bind(i64::from(MAX_TICKS_PER_POOL)) + .fetch_all(pool) + .await + .context("get_ticks_for_pools")?; + + let out: Vec = rows + .into_iter() + .map(|r| { + Ok::<_, anyhow::Error>(PoolTickRow { + pool_address: bytes_to_addr(r.get("pool_address"))?, + tick_idx: r.get("tick_idx"), + liquidity_net: r.get("liquidity_net"), + }) + }) + .collect::>()?; + warn_on_truncated_pools(&out); + Ok(out) +} + +pub async fn get_ticks( + pool: &PgPool, + chain_id: u64, + pool_address: &Address, +) -> Result> { + let ticks: Vec = sqlx::query( + "SELECT tick_idx, liquidity_net + FROM uniswap_v3_ticks + WHERE chain_id = $1 + AND pool_address = $2 + ORDER BY tick_idx + LIMIT $3", + ) + .bind(chain_id.cast_signed()) + .bind(pool_address.as_slice()) + .bind(i64::from(MAX_TICKS_PER_POOL)) + .fetch_all(pool) + .await + .context("get_ticks")? + .into_iter() + .map(|r| TickRow { + tick_idx: r.get("tick_idx"), + liquidity_net: r.get("liquidity_net"), + }) + .collect(); + + if ticks.len() >= MAX_TICKS_PER_POOL as usize { + warn_truncated(pool_address); + } + Ok(ticks) +} + +fn warn_on_truncated_pools(rows: &[PoolTickRow]) { + let mut tick_count: std::collections::HashMap<&Address, usize> = + std::collections::HashMap::new(); + for row in rows { + *tick_count.entry(&row.pool_address).or_default() += 1; + } + for (addr, count) in tick_count { + if count >= MAX_TICKS_PER_POOL as usize { + warn_truncated(addr); + } + } +} + +fn warn_truncated(pool: &Address) { + tracing::warn!( + %pool, + limit = MAX_TICKS_PER_POOL, + "tick query hit MAX_TICKS_PER_POOL limit; results may be truncated", + ); +} + +/// Returns all distinct token addresses that have no symbol recorded yet. +pub async fn get_tokens_missing_symbols(pool: &PgPool, chain_id: u64) -> Result> { + let rows = sqlx::query( + "SELECT DISTINCT token FROM ( + SELECT token0 AS token FROM uniswap_v3_pools + WHERE chain_id = $1 AND token0_symbol IS NULL + UNION + SELECT token1 AS token FROM uniswap_v3_pools + WHERE chain_id = $1 AND token1_symbol IS NULL + ) t", + ) + .bind(chain_id.cast_signed()) + .fetch_all(pool) + .await + .context("get_tokens_missing_symbols")?; + + rows.into_iter() + .map(|r| bytes_to_addr(r.get("token"))) + .collect() +} + +/// Returns all distinct token addresses that have no decimals recorded yet. +pub async fn get_tokens_missing_decimals(pool: &PgPool, chain_id: u64) -> Result> { + let rows = sqlx::query( + "SELECT DISTINCT token FROM ( + SELECT token0 AS token FROM uniswap_v3_pools + WHERE chain_id = $1 AND token0_decimals IS NULL + UNION + SELECT token1 AS token FROM uniswap_v3_pools + WHERE chain_id = $1 AND token1_decimals IS NULL + ) t", + ) + .bind(chain_id.cast_signed()) + .fetch_all(pool) + .await + .context("get_tokens_missing_decimals")?; + + rows.into_iter() + .map(|r| bytes_to_addr(r.get("token"))) + .collect() +} + +/// Batched update of `token0_decimals` / `token1_decimals` for every pool +/// containing one of the provided tokens. Pass `-1` for entries that were +/// "tried, failed" so the next backfill pass's `IS NULL` filter skips them. +/// +/// One round-trip via a writeable CTE: the side-by-side UPDATE ... FROM UNNEST +/// pattern would mis-handle pools where both `token0` and `token1` appear in +/// the batch (Postgres picks an arbitrary FROM row per target row, so only +/// one side would get set). Splitting into two separate UPDATEs keyed on each +/// side avoids that. +pub async fn batch_set_token_decimals( + pool: &PgPool, + chain_id: u64, + entries: &[(Address, i16)], +) -> Result<()> { + if entries.is_empty() { + return Ok(()); + } + let tokens: Vec<&[u8]> = entries.iter().map(|(t, _)| t.as_slice()).collect(); + let decimals: Vec = entries.iter().map(|(_, d)| *d).collect(); + + sqlx::query( + "WITH input AS ( + SELECT * FROM UNNEST($2::BYTEA[], $3::INT2[]) AS t(tok, dec) + ), + update_t0 AS ( + UPDATE uniswap_v3_pools p + SET token0_decimals = i.dec + FROM input i + WHERE p.chain_id = $1 + AND p.token0 = i.tok + AND p.token0_decimals IS NULL + RETURNING 1 + ) + UPDATE uniswap_v3_pools p + SET token1_decimals = i.dec + FROM input i + WHERE p.chain_id = $1 + AND p.token1 = i.tok + AND p.token1_decimals IS NULL", + ) + .bind(chain_id.cast_signed()) + .bind(tokens) + .bind(decimals) + .execute(pool) + .await + .context("batch_set_token_decimals")?; + + Ok(()) +} + +/// Batched update of `token0_symbol` / `token1_symbol` for every pool +/// containing one of the provided tokens. Pass `""` for entries that were +/// "tried, failed" so the next backfill pass's `IS NULL` filter skips them. +/// See [`batch_set_token_decimals`] for the writeable-CTE rationale. +pub async fn batch_set_token_symbols( + pool: &PgPool, + chain_id: u64, + entries: &[(Address, String)], +) -> Result<()> { + if entries.is_empty() { + return Ok(()); + } + let tokens: Vec<&[u8]> = entries.iter().map(|(t, _)| t.as_slice()).collect(); + let symbols: Vec<&str> = entries.iter().map(|(_, s)| s.as_str()).collect(); + + sqlx::query( + "WITH input AS ( + SELECT * FROM UNNEST($2::BYTEA[], $3::TEXT[]) AS t(tok, sym) + ), + update_t0 AS ( + UPDATE uniswap_v3_pools p + SET token0_symbol = i.sym + FROM input i + WHERE p.chain_id = $1 + AND p.token0 = i.tok + AND p.token0_symbol IS NULL + RETURNING 1 + ) + UPDATE uniswap_v3_pools p + SET token1_symbol = i.sym + FROM input i + WHERE p.chain_id = $1 + AND p.token1 = i.tok + AND p.token1_symbol IS NULL", + ) + .bind(chain_id.cast_signed()) + .bind(tokens) + .bind(symbols) + .execute(pool) + .await + .context("batch_set_token_symbols")?; + + Ok(()) +} + +pub async fn get_latest_indexed_block(pool: &PgPool, chain_id: u64) -> Result> { + let row = sqlx::query( + "SELECT MAX(block_number) AS max_block FROM pool_indexer_checkpoints WHERE chain_id = $1", + ) + .bind(chain_id.cast_signed()) + .fetch_one(pool) + .await + .context("get_latest_indexed_block")?; + + Ok(row + .get::, _>("max_block") + .map(|b| b.cast_unsigned())) +} diff --git a/crates/pool-indexer/src/indexer/mod.rs b/crates/pool-indexer/src/indexer/mod.rs new file mode 100644 index 0000000000..c24144d0e0 --- /dev/null +++ b/crates/pool-indexer/src/indexer/mod.rs @@ -0,0 +1 @@ +pub mod uniswap_v3; diff --git a/crates/pool-indexer/src/indexer/uniswap_v3.rs b/crates/pool-indexer/src/indexer/uniswap_v3.rs new file mode 100644 index 0000000000..db1270eed8 --- /dev/null +++ b/crates/pool-indexer/src/indexer/uniswap_v3.rs @@ -0,0 +1,1247 @@ +use { + crate::{ + config::{IndexerConfig, NetworkName}, + db::uniswap_v3 as db, + }, + alloy::{ + primitives::{Address, B256, aliases::U160}, + providers::Provider, + rpc::types::{BlockNumberOrTag, Filter, FilterSet, Log}, + sol_types::SolEvent, + transports::RpcError, + }, + anyhow::{Context, Result}, + contracts::{ + ERC20, + IUniswapV3Factory::IUniswapV3Factory::PoolCreated, + UniswapV3Pool::UniswapV3Pool::{Burn, Initialize, Mint, Swap}, + }, + ethrpc::{AlloyProvider, alloy::errors::ContractErrorExt}, + futures::{StreamExt, TryStreamExt}, + sqlx::PgPool, + std::collections::HashMap, + tracing::instrument, +}; + +type LiquidityCache = HashMap<(Address, u64), u128>; +type DecimalsCache = HashMap; + +const SYMBOL_BACKFILL_BATCH_SIZE: usize = 500; + +/// Data for a newly discovered pool, sourced from a `PoolCreated` factory +/// event. +pub struct NewPoolData { + pub address: Address, + pub token0: Address, + pub token1: Address, + /// Raw fee in hundredths of a basis point (e.g. 3000 = 0.3 %). + pub fee: u32, + pub token0_decimals: Option, + pub token1_decimals: Option, + pub token0_symbol: Option, + pub token1_symbol: Option, + pub created_block: u64, +} + +/// Full pool state as of a given block, sourced from an `Initialize` or `Swap` +/// event (both carry the current price, liquidity, and tick). +pub struct PoolStateData { + pub pool_address: Address, + pub block_number: u64, + pub sqrt_price_x96: U160, + pub liquidity: u128, + pub tick: i32, +} + +/// A liquidity-only pool update sourced from a `Mint` or `Burn` event when no +/// `Swap` or `Initialize` has been seen for the pool in the same chunk. +pub struct LiquidityUpdateData { + pub pool_address: Address, + pub block_number: u64, + pub liquidity: u128, +} + +/// A signed liquidity delta for a single tick boundary, accumulated from +/// `Mint` (+amount) and `Burn` (-amount) events. +pub struct TickDeltaData { + pub pool_address: Address, + pub tick_idx: i32, + /// Net signed change to `liquidity_net` at this tick. + pub delta: i128, +} + +/// All state changes extracted from a single block-range chunk of logs, +/// ready to be written to the database in one transaction. +struct ChunkChanges { + new_pools: Vec, + /// Full state updates (from `Initialize` / `Swap`). + pool_states: Vec, + /// Liquidity-only updates (from `Mint`/`Burn` with no `Swap` in this + /// chunk). + liquidity_updates: Vec, + /// Accumulated tick deltas. + tick_deltas: Vec, +} + +#[derive(Clone, Copy, Debug)] +struct ChunkRange { + start: u64, + end: u64, +} + +struct PrefetchedChunkData { + liquidities: LiquidityCache, + decimals: DecimalsCache, +} + +/// Indexes Uniswap V3 events for a single factory contract, persisting pool +/// state and tick liquidity to the database. +pub struct UniswapV3Indexer { + provider: AlloyProvider, + db: PgPool, + network: NetworkName, + chain_id: u64, + factory: Address, + chunk_size: u64, + finality_tag: BlockNumberOrTag, + fetch_concurrency: usize, + prefetch_concurrency: usize, +} + +impl UniswapV3Indexer { + pub fn new(provider: AlloyProvider, db: PgPool, config: &IndexerConfig) -> Self { + Self { + provider, + db, + network: config.network.clone(), + chain_id: config.chain_id, + factory: config.factory_address, + chunk_size: config.chunk_size, + finality_tag: if config.use_latest { + BlockNumberOrTag::Latest + } else { + BlockNumberOrTag::Finalized + }, + fetch_concurrency: config.fetch_concurrency, + prefetch_concurrency: config.prefetch_concurrency, + } + } + + pub async fn run(self, poll_interval: std::time::Duration) -> ! { + tokio::spawn(backfill_symbols( + self.provider.clone(), + self.db.clone(), + self.network.clone(), + self.chain_id, + self.prefetch_concurrency, + poll_interval, + )); + tokio::spawn(backfill_decimals( + self.provider.clone(), + self.db.clone(), + self.network.clone(), + self.chain_id, + self.prefetch_concurrency, + poll_interval, + )); + loop { + if let Err(err) = self.run_once().await { + crate::metrics::Metrics::get() + .indexer_errors + .with_label_values(&[self.network.as_str()]) + .inc(); + tracing::error!(?err, "indexer error, retrying after poll interval"); + } + tokio::time::sleep(poll_interval).await; + } + } + + /// Bootstrap helper: brings a fresh (chain, factory) up to the current + /// finalized block in one shot, then returns. Loops until no further + /// blocks remain (handles new blocks finalizing during a long catch-up). + /// Intended to run exactly once, right after seeding completes. + /// + /// The checkpoint stores the *last indexed* block, so to make the next + /// indexer pass start at `from_block` we initialize the checkpoint to + /// `from_block - 1`. Errors if a checkpoint already exists — overwriting + /// would silently regress progress and re-index history; callers should + /// guard with `if checkpoint.is_none()` before invoking. + pub async fn catch_up(&self, from_block: u64) -> Result<()> { + if db::get_checkpoint(&self.db, self.chain_id, &self.factory) + .await? + .is_some() + { + anyhow::bail!( + "catch_up called but checkpoint already exists for chain {} factory {}", + self.chain_id, + self.factory, + ); + } + db::set_checkpoint( + &self.db, + self.chain_id, + &self.factory, + from_block.saturating_sub(1), + ) + .await?; + + loop { + let finalized_block = self.finalized_block().await?; + let last_indexed_block = self.last_indexed_block().await?; + + if last_indexed_block >= finalized_block { + tracing::info!(block = finalized_block, "caught up to finalized block"); + return Ok(()); + } + + self.run_once().await?; + } + } + + async fn run_once(&self) -> Result<()> { + let finalized_block = self.finalized_block().await?; + let last_indexed_block = self.last_indexed_block().await?; + + let lag = finalized_block.saturating_sub(last_indexed_block); + crate::metrics::Metrics::get() + .indexer_lag_blocks + .with_label_values(&[self.network.as_str()]) + .set(i64::try_from(lag).unwrap_or(0)); + + if last_indexed_block >= finalized_block { + return Ok(()); + } + + // Fetch chunks' logs in parallel; commit in order. + futures::stream::iter(self.pending_chunks(last_indexed_block, finalized_block)) + .map(|chunk| async move { + let logs = self.fetch_logs_bisecting(chunk.start, chunk.end).await?; + Ok::<_, anyhow::Error>((chunk, logs)) + }) + .buffered(self.fetch_concurrency) + .try_for_each(|(chunk, logs)| self.commit_chunk(chunk, logs)) + .await?; + + tracing::info!( + block = finalized_block, + blocks_processed = lag, + "live indexer caught up to finalized block", + ); + Ok(()) + } + + async fn finalized_block(&self) -> Result { + Ok(self + .provider + .get_block_by_number(self.finality_tag) + .await + .context("get finalized block")? + .context("no finalized block")? + .header + .number) + } + + async fn last_indexed_block(&self) -> Result { + Ok(db::get_checkpoint(&self.db, self.chain_id, &self.factory) + .await? + .unwrap_or(0)) + } + + fn pending_chunks(&self, last_indexed_block: u64, finalized_block: u64) -> Vec { + let mut chunks = Vec::new(); + let mut next_start = last_indexed_block + 1; + + while next_start <= finalized_block { + let next_end = (next_start + self.chunk_size - 1).min(finalized_block); + chunks.push(ChunkRange { + start: next_start, + end: next_end, + }); + next_start = next_end + 1; + } + + chunks + } + + async fn fetch_logs_bisecting(&self, from: u64, to: u64) -> Result> { + // No address filter: `PoolCreated` is emitted by the factory but the + // other four events are emitted by each pool contract, and that + // address list (tens of thousands on mainnet) would blow past most + // RPCs' filter-size caps. `eth_getLogs` applies the address filter + // across all events at once, so we can't scope each topic + // independently. Instead, we filter client-side: + // - PoolCreated is matched against `self.factory` in + // `LogAccumulator::handle_pool_created`. + // - Mint/Burn/Swap/Initialize from unknown pools are silently dropped by the + // SQL `WHERE EXISTS (... uniswap_v3_pools ...)` guards in the batch + // writers. + bisecting_get_logs( + &self.provider, + from, + to, + vec![], + vec![ + PoolCreated::SIGNATURE_HASH, + Initialize::SIGNATURE_HASH, + Mint::SIGNATURE_HASH, + Burn::SIGNATURE_HASH, + Swap::SIGNATURE_HASH, + ], + ) + .await + } + + #[instrument(skip(self, logs), fields(chunk_start = chunk.start, chunk_end = chunk.end))] + async fn commit_chunk(&self, chunk: ChunkRange, logs: Vec) -> Result<()> { + // Pre-fetch all I/O (liquidity + decimals eth_calls) in parallel before + // opening the DB transaction. Symbols are intentionally excluded — a + // hung `symbol()` call must never block pool inserts. They're populated + // later by the async backfill task. + let metrics = crate::metrics::Metrics::get(); + let chunk_timer_labels = [self.network.as_str()]; + let _chunk_timer = + crate::metrics::Metrics::timer(&metrics.chunk_commit_seconds, &chunk_timer_labels); + let prefetched = self.prefetch_chunk_data(&logs).await; + let changes = collect_log_changes( + self.factory, + &logs, + &prefetched.liquidities, + &prefetched.decimals, + ); + + tracing::debug!( + chunk_start = chunk.start, + chunk_end = chunk.end, + log_count = logs.len(), + new_pools = changes.new_pools.len(), + pool_states = changes.pool_states.len(), + liq_updates = changes.liquidity_updates.len(), + tick_deltas = changes.tick_deltas.len(), + "processing chunk" + ); + + let network = self.network.as_str(); + for (kind, count) in [ + ("new_pool", changes.new_pools.len()), + ("pool_state", changes.pool_states.len()), + ("liq_update", changes.liquidity_updates.len()), + ("tick_delta", changes.tick_deltas.len()), + ] { + metrics + .events_applied + .with_label_values(&[network, kind]) + .inc_by(count as u64); + } + + self.persist_chunk(chunk, changes).await?; + + metrics.chunks_committed.with_label_values(&[network]).inc(); + metrics + .indexed_block + .with_label_values(&[network]) + .set(i64::try_from(chunk.end).unwrap_or(0)); + Ok(()) + } + + async fn persist_chunk(&self, chunk: ChunkRange, changes: ChunkChanges) -> Result<()> { + let mut tx = self.db.begin().await.context("begin transaction")?; + db::insert_pools(&mut tx, self.chain_id, &self.factory, &changes.new_pools).await?; + db::upsert_pool_states(&mut tx, self.chain_id, &self.factory, &changes.pool_states).await?; + db::batch_update_pool_liquidity( + &mut tx, + self.chain_id, + &self.factory, + &changes.liquidity_updates, + ) + .await?; + db::batch_update_ticks(&mut tx, self.chain_id, &self.factory, &changes.tick_deltas).await?; + db::set_checkpoint(&mut *tx, self.chain_id, &self.factory, chunk.end).await?; + tx.commit().await.context("commit transaction")?; + + Ok(()) + } + + async fn prefetch_chunk_data(&self, logs: &[Log]) -> PrefetchedChunkData { + let (liquidities, decimals) = tokio::join!( + self.prefetch_liquidities(logs), + self.prefetch_decimals(logs), + ); + + PrefetchedChunkData { + liquidities, + decimals, + } + } + + /// Parallel-fetch liquidity for every unique (pool, block) pair from + /// Mint/Burn events. + async fn prefetch_liquidities(&self, logs: &[Log]) -> LiquidityCache { + let pairs: std::collections::HashSet<_> = logs + .iter() + .filter_map(|log| { + let t = log.topic0()?; + if *t == Mint::SIGNATURE_HASH || *t == Burn::SIGNATURE_HASH { + Some((log.address(), log.block_number?)) + } else { + None + } + }) + .collect(); + + futures::stream::iter(pairs) + .map(|(addr, block)| async move { + let liq = fetch_pool_liquidity(&self.provider, addr, block).await; + ((addr, block), liq) + }) + .buffer_unordered(self.prefetch_concurrency) + .filter_map(|(key, opt)| async move { opt.map(|v| (key, v)) }) + .collect() + .await + } + + /// Parallel-fetch ERC-20 decimals for all tokens referenced in PoolCreated + /// events. + async fn prefetch_decimals(&self, logs: &[Log]) -> DecimalsCache { + futures::stream::iter(pool_created_token_addresses(self.factory, logs)) + .map(|token| async move { + let dec = fetch_decimals(&self.provider, token).await; + (token, dec) + }) + .buffer_unordered(self.prefetch_concurrency) + .filter_map(|(token, opt)| async move { opt.map(|d| (token, d)) }) + .collect() + .await + } +} + +/// Wraps an alloy contract call with the indexer's standard retry policy: +/// retry only on transient transport errors (`is_node_error`); contract +/// reverts and missing-selector failures bail out immediately. On giveup, +/// invokes `on_giveup` with the accumulated errors and returns `None`. +async fn retry_node_call( + f: impl Fn() -> Fut, + on_giveup: impl FnOnce(&[alloy::contract::Error]), +) -> Option +where + Fut: std::future::Future>, +{ + match shared::retry::retry_with_sleep_if(f, |err: &alloy::contract::Error| err.is_node_error()) + .await + { + Ok(v) => Some(v), + Err(errors) => { + on_giveup(&errors); + None + } + } +} + +async fn fetch_pool_liquidity(provider: &AlloyProvider, pool: Address, block: u64) -> Option { + retry_node_call( + || async move { + contracts::UniswapV3Pool::Instance::new(pool, provider.clone()) + .liquidity() + .block(block.into()) + .call() + .await + }, + |errors| tracing::warn!(%pool, block, ?errors, "fetch_pool_liquidity gave up"), + ) + .await +} + +async fn fetch_decimals(provider: &AlloyProvider, token: Address) -> Option { + retry_node_call( + || async move { + ERC20::Instance::new(token, provider.clone()) + .decimals() + .call() + .await + }, + |errors| tracing::warn!(%token, ?errors, "fetch_decimals gave up"), + ) + .await +} + +/// Periodically fills in missing `token{0,1}_symbol` values on +/// `uniswap_v3_pools`. Runs forever, sleeping `poll_interval` between passes so +/// newly-indexed pools get their symbols backfilled. +/// +/// Tokens whose `symbol()` call fails (revert, decode error, empty result) are +/// persisted as the empty string so subsequent passes skip them — otherwise we +/// would hammer known-broken tokens on every tick. A process restart re-probes +/// them once (cheap, and useful if the earlier failure was transient). +async fn backfill_symbols( + provider: AlloyProvider, + db: sqlx::PgPool, + network: NetworkName, + chain_id: u64, + prefetch_concurrency: usize, + poll_interval: std::time::Duration, +) -> ! { + loop { + if let Err(err) = + run_symbol_backfill_pass(&provider, &db, &network, chain_id, prefetch_concurrency).await + { + tracing::error!(?err, "token symbol backfill pass failed"); + } + tokio::time::sleep(poll_interval).await; + } +} + +async fn run_symbol_backfill_pass( + provider: &AlloyProvider, + db: &sqlx::PgPool, + network: &NetworkName, + chain_id: u64, + prefetch_concurrency: usize, +) -> Result<()> { + let tokens = db::get_tokens_missing_symbols(db, chain_id) + .await + .context("get_tokens_missing_symbols")?; + let network = network.as_str(); + crate::metrics::Metrics::get() + .symbols_pending + .with_label_values(&[network]) + // -1 surfaces the impossible-but-defensive `usize → i64` overflow as + // a visible signal in metrics rather than masquerading as "no work + // pending". + .set(i64::try_from(tokens.len()).unwrap_or(-1)); + if tokens.is_empty() { + return Ok(()); + } + let total = tokens.len(); + tracing::info!(total, "backfilling token symbols"); + + let mut updated = 0usize; + let mut processed = 0usize; + + for token_batch in tokens.chunks(SYMBOL_BACKFILL_BATCH_SIZE) { + let symbols: Vec<(Address, String)> = futures::stream::iter(token_batch.iter().copied()) + .map(|token| async move { + // `None` → "" sentinel: marks the token as "tried and failed" so + // the next backfill pass's `IS NULL` filter skips it. + let sym = fetch_symbol(provider, token).await.unwrap_or_default(); + (token, sym) + }) + .buffer_unordered(prefetch_concurrency) + .collect() + .await; + + let metrics = crate::metrics::Metrics::get(); + match db::batch_set_token_symbols(db, chain_id, &symbols).await { + Ok(()) => { + for (_, symbol) in &symbols { + updated += 1; + let result = if symbol.is_empty() { "empty" } else { "ok" }; + metrics + .symbols_backfilled + .with_label_values(&[network, result]) + .inc(); + } + } + Err(err) => tracing::warn!( + ?err, + batch_size = symbols.len(), + "failed to backfill symbols batch" + ), + } + + processed += token_batch.len(); + tracing::info!(processed, total, updated, "token symbol backfill progress"); + } + + tracing::info!(updated, total, "token symbol backfill pass complete"); + Ok(()) +} + +/// Periodically fills in missing `token{0,1}_decimals` values on +/// `uniswap_v3_pools`. Same shape as [`backfill_symbols`]: sleeps +/// `poll_interval` between passes, persists `-1` as the "tried and failed" +/// sentinel for tokens whose `decimals()` call fails so subsequent passes +/// skip them. A process restart re-probes them once. +async fn backfill_decimals( + provider: AlloyProvider, + db: sqlx::PgPool, + network: NetworkName, + chain_id: u64, + prefetch_concurrency: usize, + poll_interval: std::time::Duration, +) -> ! { + loop { + if let Err(err) = + run_decimals_backfill_pass(&provider, &db, &network, chain_id, prefetch_concurrency) + .await + { + tracing::error!(?err, "token decimals backfill pass failed"); + } + tokio::time::sleep(poll_interval).await; + } +} + +async fn run_decimals_backfill_pass( + provider: &AlloyProvider, + db: &sqlx::PgPool, + network: &NetworkName, + chain_id: u64, + prefetch_concurrency: usize, +) -> Result<()> { + let tokens = db::get_tokens_missing_decimals(db, chain_id) + .await + .context("get_tokens_missing_decimals")?; + let network = network.as_str(); + crate::metrics::Metrics::get() + .decimals_pending + .with_label_values(&[network]) + // -1: see `symbols_pending` above for the rationale. + .set(i64::try_from(tokens.len()).unwrap_or(-1)); + if tokens.is_empty() { + return Ok(()); + } + let total = tokens.len(); + tracing::info!(total, "backfilling token decimals"); + + let mut updated = 0usize; + let mut processed = 0usize; + + for token_batch in tokens.chunks(SYMBOL_BACKFILL_BATCH_SIZE) { + let decimals: Vec<(Address, i16)> = futures::stream::iter(token_batch.iter().copied()) + .map(|token| async move { + // `None` → `-1` sentinel: marks the token as "tried and + // failed" so the next backfill pass's `IS NULL` filter skips + // it. + let dec = fetch_decimals(provider, token) + .await + .map(i16::from) + .unwrap_or(-1); + (token, dec) + }) + .buffer_unordered(prefetch_concurrency) + .collect() + .await; + + let metrics = crate::metrics::Metrics::get(); + match db::batch_set_token_decimals(db, chain_id, &decimals).await { + Ok(()) => { + for (_, dec) in &decimals { + updated += 1; + let result = if *dec < 0 { "empty" } else { "ok" }; + metrics + .decimals_backfilled + .with_label_values(&[network, result]) + .inc(); + } + } + Err(err) => tracing::warn!( + ?err, + batch_size = decimals.len(), + "failed to backfill decimals batch" + ), + } + + processed += token_batch.len(); + tracing::info!( + processed, + total, + updated, + "token decimals backfill progress" + ); + } + + tracing::info!(updated, total, "token decimals backfill pass complete"); + Ok(()) +} + +async fn fetch_symbol(provider: &AlloyProvider, token: Address) -> Option { + let sym = ERC20::Instance::new(token, provider.clone()) + .symbol() + .call() + .await + .ok()?; + // Strip null bytes — some tokens embed \x00 in their symbol which Postgres + // rejects. + let cleaned = sym.replace('\x00', ""); + (!cleaned.is_empty()).then_some(cleaned) +} + +/// True if the server-side JSON-RPC payload rejected `eth_getLogs` for +/// being too wide / returning too many logs / exceeding a response-size +/// cap / hitting the server's query timeout. Substrings cover the +/// rejections empirically seen on OVH and Alchemy mainnet. Transport-level +/// errors (HTTP timeouts, DNS, connection resets) live in other `RpcError` +/// variants and short-circuit to false, so client-side noise can't trigger +/// pointless bisection. +pub(crate) fn is_range_too_large(err: &alloy::transports::TransportError) -> bool { + let RpcError::ErrorResp(payload) = err else { + return false; + }; + let msg = payload.message.to_lowercase(); + msg.contains("max block range") + || msg.contains("max results") + || msg.contains("log response size exceeded") + || msg.contains("query timeout exceeded") + || msg.contains("response is too big") +} + +/// Bisecting bound — substring matching on RPC error messages is necessarily +/// approximate, and a misclassified error would otherwise burn `log2(range)` +/// RPC calls before the recursion bottoms out at `to == from`. 8 halvings = +/// 256× resolution; for the indexer's ~1k-block chunks that means giving up +/// around ~4-block ranges, well past where range-size could plausibly still +/// be the cause. +const MAX_BISECTION_DEPTH: u32 = 8; + +/// Fetches logs for `[from, to]` filtered by the given contract addresses +/// and `topic0` event signatures, sequentially bisecting the block range on +/// "too large" rejections until each sub-range is tractable. An empty +/// `addresses` list means "any contract". Bisection depth is capped by +/// [`MAX_BISECTION_DEPTH`]. +pub(crate) fn bisecting_get_logs( + provider: &AlloyProvider, + from: u64, + to: u64, + addresses: Vec
, + topics: Vec, +) -> futures::future::BoxFuture<'_, Result>> { + bisecting_get_logs_with_depth(provider, from, to, addresses, topics, 0) +} + +fn bisecting_get_logs_with_depth( + provider: &AlloyProvider, + from: u64, + to: u64, + addresses: Vec
, + topics: Vec, + depth: u32, +) -> futures::future::BoxFuture<'_, Result>> { + Box::pin(async move { + let filter = Filter::new() + .address(addresses.clone()) + .event_signature(FilterSet::from_iter(topics.clone())) + .from_block(from) + .to_block(to); + + let err = match provider.get_logs(&filter).await { + Ok(logs) => return Ok(logs), + Err(err) => err, + }; + let too_large = is_range_too_large(&err); + if too_large && to > from && depth < MAX_BISECTION_DEPTH { + let mid = (from + to) / 2; + tracing::debug!(from, to, mid, depth, "range too large, bisecting"); + let mut left = bisecting_get_logs_with_depth( + provider, + from, + mid, + addresses.clone(), + topics.clone(), + depth + 1, + ) + .await?; + let right = + bisecting_get_logs_with_depth(provider, mid + 1, to, addresses, topics, depth + 1) + .await?; + left.extend(right); + Ok(left) + } else { + Err(anyhow::Error::new(err).context(format!("get_logs({from}..={to})"))) + } + }) +} + +/// Collects the unique set of token addresses from all `PoolCreated` events +/// emitted by `factory` in `logs`. +fn pool_created_token_addresses( + factory: Address, + logs: &[Log], +) -> std::collections::HashSet
{ + logs.iter() + .filter_map(|log| { + let t = log.topic0()?; + if *t != PoolCreated::SIGNATURE_HASH || log.address() != factory { + return None; + } + let decoded = PoolCreated::decode_log(&log.inner).ok()?; + Some([decoded.data.token0, decoded.data.token1]) + }) + .flatten() + .collect() +} + +/// Accumulates per-event-type state changes while iterating over a chunk's +/// logs. +#[derive(Default)] +struct LogAccumulator { + new_pools: HashMap, + /// Latest full state per pool, established by `Initialize` or `Swap`. + full_states: HashMap, + /// Liquidity-only update per pool, used when no full state exists yet in + /// the chunk (i.e. neither `Initialize` nor `Swap` has been seen). + liq_only: HashMap, + /// Accumulated signed tick-liquidity deltas, keyed by `(pool, tick_idx)`. + tick_deltas: HashMap<(Address, i32), i128>, +} + +impl LogAccumulator { + /// Records a newly discovered pool, filling decimals from the prefetch + /// cache. Symbols are left `None` here and populated later by the + /// background backfill task. + fn handle_pool_created(&mut self, log: &Log, dec_cache: &DecimalsCache) { + let decoded = match PoolCreated::decode_log(&log.inner) { + Ok(d) => d, + Err(err) => { + tracing::warn!(?err, pool = %log.address(), block = ?log.block_number, "failed to decode PoolCreated log"); + return; + } + }; + let e = &decoded.data; + let pool: Address = e.pool; + let token0: Address = e.token0; + let token1: Address = e.token1; + let created_block = log.block_number.unwrap_or_default(); + tracing::debug!(%pool, %token0, %token1, fee = e.fee.to::(), "discovered pool"); + self.new_pools.insert( + pool, + NewPoolData { + address: pool, + token0, + token1, + fee: e.fee.to::(), + token0_decimals: dec_cache.get(&token0).copied(), + token1_decimals: dec_cache.get(&token1).copied(), + token0_symbol: None, + token1_symbol: None, + created_block, + }, + ); + } + + /// Records the initial price and tick from an `Initialize` event. + /// Preserves any liquidity already seen for this pool earlier in the chunk. + fn handle_initialize(&mut self, log: &Log) { + let decoded = match Initialize::decode_log(&log.inner) { + Ok(d) => d, + Err(err) => { + tracing::warn!(?err, pool = %log.address(), block = ?log.block_number, "failed to decode Initialize log"); + return; + } + }; + let e = &decoded.data; + let pool = log.address(); + let block = log.block_number.unwrap_or_default(); + let liquidity = self + .full_states + .get(&pool) + .map(|s| s.liquidity) + .unwrap_or(0); + self.full_states.insert( + pool, + PoolStateData { + pool_address: pool, + block_number: block, + sqrt_price_x96: e.sqrtPriceX96, + liquidity, + tick: e.tick.as_i32(), + }, + ); + self.liq_only.remove(&pool); + } + + /// Records a full pool-state update (price, liquidity, tick) from a `Swap`. + fn handle_swap(&mut self, log: &Log) { + let decoded = match Swap::decode_log(&log.inner) { + Ok(d) => d, + Err(err) => { + tracing::warn!(?err, pool = %log.address(), block = ?log.block_number, "failed to decode Swap log"); + return; + } + }; + let e = &decoded.data; + let pool = log.address(); + let block = log.block_number.unwrap_or_default(); + self.full_states.insert( + pool, + PoolStateData { + pool_address: pool, + block_number: block, + sqrt_price_x96: e.sqrtPriceX96, + liquidity: e.liquidity, + tick: e.tick.as_i32(), + }, + ); + self.liq_only.remove(&pool); + } + + /// Applies positive tick-liquidity deltas from a `Mint` and refreshes + /// pool liquidity from the prefetch cache. + fn handle_mint(&mut self, log: &Log, liq_cache: &LiquidityCache) { + let decoded = match Mint::decode_log(&log.inner) { + Ok(d) => d, + Err(err) => { + tracing::warn!(?err, pool = %log.address(), block = ?log.block_number, "failed to decode Mint log"); + return; + } + }; + let e = &decoded.data; + let pool = log.address(); + let block = log.block_number.unwrap_or_default(); + let amount = e.amount.cast_signed(); + self.record_tick_range_delta(pool, e.tickLower.as_i32(), e.tickUpper.as_i32(), amount); + self.update_liquidity_from_cache(pool, block, liq_cache); + } + + /// Applies negative tick-liquidity deltas from a `Burn` and refreshes + /// pool liquidity from the prefetch cache. + fn handle_burn(&mut self, log: &Log, liq_cache: &LiquidityCache) { + let decoded = match Burn::decode_log(&log.inner) { + Ok(d) => d, + Err(err) => { + tracing::warn!(?err, pool = %log.address(), block = ?log.block_number, "failed to decode Burn log"); + return; + } + }; + let e = &decoded.data; + let pool = log.address(); + let block = log.block_number.unwrap_or_default(); + let amount = e.amount.cast_signed(); + self.record_tick_range_delta(pool, e.tickLower.as_i32(), e.tickUpper.as_i32(), -amount); + self.update_liquidity_from_cache(pool, block, liq_cache); + } + + fn record_tick_range_delta( + &mut self, + pool: Address, + lower_tick: i32, + upper_tick: i32, + liquidity_delta: i128, + ) { + *self.tick_deltas.entry((pool, lower_tick)).or_default() += liquidity_delta; + *self.tick_deltas.entry((pool, upper_tick)).or_default() -= liquidity_delta; + } + + /// Refreshes the stored liquidity for `pool` at `block` using the + /// prefetch cache. Updates the existing full state in-place if one exists, + /// otherwise stores a liquidity-only record. + fn update_liquidity_from_cache( + &mut self, + pool: Address, + block: u64, + liq_cache: &LiquidityCache, + ) { + if let Some(&liq) = liq_cache.get(&(pool, block)) { + if let Some(state) = self.full_states.get_mut(&pool) { + state.liquidity = liq; + state.block_number = block; + } else { + self.liq_only.insert(pool, (block, liq)); + } + } + } + + fn into_chunk_changes(self) -> ChunkChanges { + ChunkChanges { + new_pools: self.new_pools.into_values().collect(), + pool_states: self.full_states.into_values().collect(), + liquidity_updates: self + .liq_only + .into_iter() + .map(|(pool, (block, liq))| LiquidityUpdateData { + pool_address: pool, + block_number: block, + liquidity: liq, + }) + .collect(), + tick_deltas: self + .tick_deltas + .into_iter() + .filter(|(_, d)| *d != 0) + .map(|((pool, tick), delta)| TickDeltaData { + pool_address: pool, + tick_idx: tick, + delta, + }) + .collect(), + } + } +} + +fn collect_log_changes( + factory: Address, + logs: &[Log], + liq_cache: &LiquidityCache, + dec_cache: &DecimalsCache, +) -> ChunkChanges { + let mut acc = LogAccumulator::default(); + for log in logs { + let Some(t) = log.topic0() else { continue }; + match *t { + t if t == PoolCreated::SIGNATURE_HASH && log.address() == factory => { + acc.handle_pool_created(log, dec_cache); + } + t if t == Initialize::SIGNATURE_HASH => acc.handle_initialize(log), + t if t == Swap::SIGNATURE_HASH => acc.handle_swap(log), + t if t == Mint::SIGNATURE_HASH => acc.handle_mint(log, liq_cache), + t if t == Burn::SIGNATURE_HASH => acc.handle_burn(log, liq_cache), + _ => {} + } + } + acc.into_chunk_changes() +} + +#[cfg(test)] +mod tests { + use { + super::*, + alloy::{ + primitives::{ + I256, + U256, + aliases::{I24, U24, U160}, + }, + sol_types::SolEvent, + }, + contracts::{ + IUniswapV3Factory::IUniswapV3Factory::PoolCreated, + UniswapV3Pool::UniswapV3Pool::{Burn, Initialize, Mint, Swap}, + }, + }; + + const FACTORY: Address = Address::repeat_byte(0xFA); + const POOL: Address = Address::repeat_byte(0x01); + const TOKEN0: Address = Address::repeat_byte(0x02); + const TOKEN1: Address = Address::repeat_byte(0x03); + // `sqrtPriceX96 = sqrt(price) * 2^96` is Uniswap V3's Q64.96 fixed-point + // price representation; for `price = 1` this is exactly `2^96`. + const SQRT_PRICE_1: u128 = 79_228_162_514_264_337_593_543_950_336; + + fn t(n: i32) -> I24 { + I24::try_from(n).unwrap() + } + + fn make_log(address: Address, block: u64, event: impl SolEvent) -> Log { + Log { + inner: alloy_primitives::Log { + address, + data: event.encode_log_data(), + }, + block_number: Some(block), + block_hash: None, + block_timestamp: None, + transaction_hash: None, + transaction_index: None, + log_index: None, + removed: false, + } + } + + #[test] + fn empty_logs_produce_empty_changes() { + let c = collect_log_changes(FACTORY, &[], &Default::default(), &Default::default()); + assert!(c.new_pools.is_empty()); + assert!(c.pool_states.is_empty()); + assert!(c.liquidity_updates.is_empty()); + assert!(c.tick_deltas.is_empty()); + } + + #[test] + fn pool_created_from_factory_inserted() { + let event = PoolCreated { + token0: TOKEN0, + token1: TOKEN1, + fee: U24::from(500u32), + tickSpacing: t(10), + pool: POOL, + }; + let log = make_log(FACTORY, 100, event); + let c = collect_log_changes(FACTORY, &[log], &Default::default(), &Default::default()); + assert_eq!(c.new_pools.len(), 1); + assert_eq!(c.new_pools[0].address, POOL); + assert_eq!(c.new_pools[0].fee, 500); + } + + #[test] + fn pool_created_wrong_factory_ignored() { + let event = PoolCreated { + token0: TOKEN0, + token1: TOKEN1, + fee: U24::from(500u32), + tickSpacing: t(10), + pool: POOL, + }; + let log = make_log(Address::repeat_byte(0xBB), 100, event); + let c = collect_log_changes(FACTORY, &[log], &Default::default(), &Default::default()); + assert!(c.new_pools.is_empty()); + } + + #[test] + fn initialize_creates_full_state_with_zero_liquidity() { + let event = Initialize { + sqrtPriceX96: U160::from(SQRT_PRICE_1), + tick: t(0), + }; + let log = make_log(POOL, 100, event); + let c = collect_log_changes(FACTORY, &[log], &Default::default(), &Default::default()); + assert_eq!(c.pool_states.len(), 1); + assert_eq!(c.pool_states[0].pool_address, POOL); + assert_eq!(c.pool_states[0].block_number, 100); + assert_eq!(c.pool_states[0].tick, 0); + assert_eq!(c.pool_states[0].liquidity, 0); + } + + #[test] + fn swap_creates_full_state() { + let event = Swap { + sender: Address::ZERO, + recipient: Address::ZERO, + amount0: I256::ZERO, + amount1: I256::ZERO, + sqrtPriceX96: U160::from(SQRT_PRICE_1), + liquidity: 500_000u128, + tick: t(42), + }; + let log = make_log(POOL, 200, event); + let c = collect_log_changes(FACTORY, &[log], &Default::default(), &Default::default()); + assert_eq!(c.pool_states.len(), 1); + assert_eq!(c.pool_states[0].tick, 42); + assert_eq!(c.pool_states[0].liquidity, 500_000); + assert_eq!(c.pool_states[0].block_number, 200); + } + + #[test] + fn mint_produces_correct_tick_deltas_and_liq_only() { + let amount = 1_000_000u128; + let event = Mint { + sender: Address::ZERO, + owner: Address::ZERO, + tickLower: t(-100), + tickUpper: t(100), + amount, + amount0: U256::ZERO, + amount1: U256::ZERO, + }; + let liq_cache: LiquidityCache = HashMap::from([((POOL, 100u64), amount)]); + let log = make_log(POOL, 100, event); + let c = collect_log_changes(FACTORY, &[log], &liq_cache, &Default::default()); + + assert_eq!(c.tick_deltas.len(), 2); + let lower = c.tick_deltas.iter().find(|d| d.tick_idx == -100).unwrap(); + let upper = c.tick_deltas.iter().find(|d| d.tick_idx == 100).unwrap(); + assert_eq!(lower.delta, amount.cast_signed()); + assert_eq!(upper.delta, -amount.cast_signed()); + + // No prior full state → goes into liq_only + assert_eq!(c.liquidity_updates.len(), 1); + assert_eq!(c.liquidity_updates[0].liquidity, amount); + assert!(c.pool_states.is_empty()); + } + + #[test] + fn mint_after_swap_updates_full_state_liquidity() { + let swap_liq = 500_000u128; + let after_mint_liq = 600_000u128; + + let swap = Swap { + sender: Address::ZERO, + recipient: Address::ZERO, + amount0: I256::ZERO, + amount1: I256::ZERO, + sqrtPriceX96: U160::from(SQRT_PRICE_1), + liquidity: swap_liq, + tick: t(0), + }; + let mint = Mint { + sender: Address::ZERO, + owner: Address::ZERO, + tickLower: t(-100), + tickUpper: t(100), + amount: 100_000u128, + amount0: U256::ZERO, + amount1: U256::ZERO, + }; + let liq_cache: LiquidityCache = HashMap::from([((POOL, 201u64), after_mint_liq)]); + let logs = vec![make_log(POOL, 200, swap), make_log(POOL, 201, mint)]; + let c = collect_log_changes(FACTORY, &logs, &liq_cache, &Default::default()); + + assert_eq!(c.pool_states.len(), 1); + // Swap established full_state; Mint updated its liquidity from the cache. + assert_eq!(c.pool_states[0].liquidity, after_mint_liq); + assert_eq!(c.pool_states[0].block_number, 201); + assert!(c.liquidity_updates.is_empty()); + } + + #[test] + fn burn_zeroes_tick_filtered_out() { + let amount = 1_000_000u128; + let mint = Mint { + sender: Address::ZERO, + owner: Address::ZERO, + tickLower: t(-100), + tickUpper: t(100), + amount, + amount0: U256::ZERO, + amount1: U256::ZERO, + }; + let burn = Burn { + owner: Address::ZERO, + tickLower: t(-100), + tickUpper: t(100), + amount, + amount0: U256::ZERO, + amount1: U256::ZERO, + }; + let logs = vec![make_log(POOL, 100, mint), make_log(POOL, 101, burn)]; + let c = collect_log_changes(FACTORY, &logs, &Default::default(), &Default::default()); + assert!(c.tick_deltas.is_empty(), "zero-net ticks must be pruned"); + } + + #[test] + fn partial_burn_leaves_nonzero_delta() { + let mint_amount = 1_000_000u128; + let burn_amount = 400_000u128; + let mint = Mint { + sender: Address::ZERO, + owner: Address::ZERO, + tickLower: t(-100), + tickUpper: t(100), + amount: mint_amount, + amount0: U256::ZERO, + amount1: U256::ZERO, + }; + let burn = Burn { + owner: Address::ZERO, + tickLower: t(-100), + tickUpper: t(100), + amount: burn_amount, + amount0: U256::ZERO, + amount1: U256::ZERO, + }; + let logs = vec![make_log(POOL, 100, mint), make_log(POOL, 101, burn)]; + let c = collect_log_changes(FACTORY, &logs, &Default::default(), &Default::default()); + + let expected = (mint_amount - burn_amount).cast_signed(); + let lower = c.tick_deltas.iter().find(|d| d.tick_idx == -100).unwrap(); + let upper = c.tick_deltas.iter().find(|d| d.tick_idx == 100).unwrap(); + assert_eq!(lower.delta, expected); + assert_eq!(upper.delta, -expected); + } + + #[test] + fn pool_created_and_initialize_same_chunk() { + let created = PoolCreated { + token0: TOKEN0, + token1: TOKEN1, + fee: U24::from(3000u32), + tickSpacing: t(60), + pool: POOL, + }; + let init = Initialize { + sqrtPriceX96: U160::from(SQRT_PRICE_1), + tick: t(0), + }; + let logs = vec![make_log(FACTORY, 100, created), make_log(POOL, 100, init)]; + let c = collect_log_changes(FACTORY, &logs, &Default::default(), &Default::default()); + assert_eq!(c.new_pools.len(), 1); + assert_eq!(c.pool_states.len(), 1); + assert_eq!(c.pool_states[0].pool_address, POOL); + } +} diff --git a/crates/pool-indexer/src/lib.rs b/crates/pool-indexer/src/lib.rs new file mode 100644 index 0000000000..916da099fa --- /dev/null +++ b/crates/pool-indexer/src/lib.rs @@ -0,0 +1,11 @@ +pub mod config; +pub use run::{run, start}; + +mod api; +mod arguments; +mod cold_seeder; +mod db; +mod indexer; +mod metrics; +mod run; +mod subgraph_seeder; diff --git a/crates/pool-indexer/src/main.rs b/crates/pool-indexer/src/main.rs new file mode 100644 index 0000000000..646eb280fd --- /dev/null +++ b/crates/pool-indexer/src/main.rs @@ -0,0 +1,12 @@ +#[cfg(feature = "mimalloc-allocator")] +#[global_allocator] +static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc; + +#[cfg(not(feature = "mimalloc-allocator"))] +#[global_allocator] +static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; + +#[tokio::main] +async fn main() { + pool_indexer::start(std::env::args()).await; +} diff --git a/crates/pool-indexer/src/metrics.rs b/crates/pool-indexer/src/metrics.rs new file mode 100644 index 0000000000..a2d599b8b1 --- /dev/null +++ b/crates/pool-indexer/src/metrics.rs @@ -0,0 +1,109 @@ +//! Prometheus metrics for pool-indexer. +//! +//! All metrics live under the `pool_indexer_` prefix (configured by +//! `observe::metrics::setup_registry`) and are labelled by `network` where +//! more than one network is active in the same process. Call `Metrics::get()` +//! to reach the shared registry-backed instance. + +use {prometheus::HistogramVec, prometheus_metric_storage::MetricStorage}; + +#[derive(MetricStorage)] +#[metric(subsystem = "pool_indexer")] +pub struct Metrics { + /// Chunks successfully committed to the DB. + #[metric(labels("network"))] + pub chunks_committed: prometheus::IntCounterVec, + + /// Per-chunk commit duration in seconds. + #[metric( + labels("network"), + buckets(0.01, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0) + )] + pub chunk_commit_seconds: HistogramVec, + + /// Events applied to the DB, labelled by type. + #[metric(labels("network", "kind"))] + pub events_applied: prometheus::IntCounterVec, + + /// Highest block committed by the live indexer for this chain. + #[metric(labels("network"))] + pub indexed_block: prometheus::IntGaugeVec, + + /// Lag (in blocks) between the chain's finalized/latest tip and the + /// indexer's checkpoint. Sampled each polling tick. + #[metric(labels("network"))] + pub indexer_lag_blocks: prometheus::IntGaugeVec, + + /// Unrecoverable `run_once` errors that forced a retry. + #[metric(labels("network"))] + pub indexer_errors: prometheus::IntCounterVec, + + /// Duration of each phase of the cold-seed bootstrap. + #[metric( + labels("network", "phase"), + buckets( + 1.0, 5.0, 10.0, 30.0, 60.0, 120.0, 300.0, 600.0, 1_200.0, 1_800.0, 3_600.0 + ) + )] + pub cold_seed_phase_seconds: HistogramVec, + + /// Pools discovered by the cold seeder (phase 1). + #[metric(labels("network"))] + pub cold_seed_pools_discovered: prometheus::IntGaugeVec, + + /// Duration of the full subgraph seed (pool page fetch + tick fetch). + #[metric( + labels("network"), + buckets(1.0, 5.0, 10.0, 30.0, 60.0, 120.0, 300.0, 600.0) + )] + pub subgraph_seed_seconds: HistogramVec, + + /// Symbols written to the DB (label: `result` = `ok` for a real symbol, + /// `empty` for the "tried and failed" sentinel). + #[metric(labels("network", "result"))] + pub symbols_backfilled: prometheus::IntCounterVec, + + /// Tokens still needing a symbol, sampled each backfill pass. + #[metric(labels("network"))] + pub symbols_pending: prometheus::IntGaugeVec, + + /// Decimals written to the DB (label: `result` = `ok` for a real value, + /// `empty` for the "tried and failed" `-1` sentinel). + #[metric(labels("network", "result"))] + pub decimals_backfilled: prometheus::IntCounterVec, + + /// Tokens still needing a decimals value, sampled each backfill pass. + #[metric(labels("network"))] + pub decimals_pending: prometheus::IntGaugeVec, + + /// API request count by route + HTTP status. + #[metric(labels("route", "status"))] + pub api_requests: prometheus::IntCounterVec, + + /// API request duration. + #[metric( + labels("route"), + buckets(0.001, 0.005, 0.01, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5) + )] + pub api_request_seconds: HistogramVec, +} + +impl Metrics { + pub fn get() -> &'static Self { + Self::instance(observe::metrics::get_storage_registry()) + .expect("unexpected pool_indexer metrics duplicate registration") + } + + /// Returns a guard that records the elapsed time on a histogram when it's + /// dropped. Use with `let _timer = Metrics::timer(&hist, &[..]);` at the + /// top of a function / block. Cleaner than manual `Instant::now()` + + /// observe pairs, and records even on early return. + #[must_use] + pub fn timer<'a>(hist: &'a HistogramVec, labels: &'a [&'a str]) -> impl Drop + use<'a> { + let start = std::time::Instant::now(); + scopeguard::guard(start, move |start| { + hist.with_label_values(labels) + .observe(start.elapsed().as_secs_f64()); + }) + } +} diff --git a/crates/pool-indexer/src/run.rs b/crates/pool-indexer/src/run.rs new file mode 100644 index 0000000000..7716a6644e --- /dev/null +++ b/crates/pool-indexer/src/run.rs @@ -0,0 +1,219 @@ +use { + crate::{ + api::AppState, + arguments::Arguments, + config::{Configuration, NetworkConfig}, + indexer::uniswap_v3::UniswapV3Indexer, + }, + alloy::providers::Provider, + clap::Parser, + ethrpc::{AlloyProvider, Config as EthRpcConfig, web3}, + sqlx::{PgPool, postgres::PgPoolOptions}, + std::sync::Arc, + tokio::task::JoinSet, +}; + +pub async fn start(args: impl Iterator) { + let args = Arguments::parse_from(args); + initialize_observability(); + observe::metrics::setup_registry(Some("pool_indexer".into()), None); + let config = Configuration::from_path(&args.config).expect("failed to load configuration"); + tracing::info!("pool-indexer starting"); + run(config).await; +} + +pub async fn run(config: Configuration) { + let db = connect_db(&config).await; + let api_state = build_api_state(&db, &config.networks); + + observe::metrics::serve_metrics( + Arc::new(AlwaysAlive), + config.metrics.bind_address, + Default::default(), + Default::default(), + ); + + let mut set = JoinSet::new(); + let api_router = crate::api::router(api_state); + let api_addr = config.api.bind_address; + set.spawn(async move { serve(api_router, api_addr).await }); + + for network in config.networks { + let db = db.clone(); + set.spawn(async move { run_network_indexer(db, network).await }); + } + + if let Some(result) = set.join_next().await { + panic!("pool-indexer task exited: {result:?}"); + } +} + +/// Minimal liveness that always reports alive. The indexer panics on +/// unrecoverable faults, so if the process is up it's alive. +struct AlwaysAlive; + +#[async_trait::async_trait] +impl observe::metrics::LivenessChecking for AlwaysAlive { + async fn is_alive(&self) -> bool { + true + } +} + +fn initialize_observability() { + let log_filter = std::env::var("RUST_LOG").unwrap_or_else(|_| "info".into()); + observe::tracing::init::initialize(&observe::Config::new(&log_filter, None, false, None)); + observe::panic_hook::install(); +} + +fn build_api_state(db: &PgPool, networks: &[NetworkConfig]) -> Arc { + let networks = networks + .iter() + .map(|network| (network.name.clone(), network.chain_id)) + .collect(); + + Arc::new(AppState { + db: db.clone(), + networks, + }) +} + +async fn run_network_indexer(db: PgPool, network: NetworkConfig) { + tracing::info!( + network = %network.name, + chain_id = network.chain_id, + factories = network.factories.len(), + "starting network indexer", + ); + + let provider = build_provider(&network); + + // Verify the configured chain_id matches the RPC. A misconfigured + // deployment (e.g. chain_id = 1 pointed at an Arbitrum RPC) would + // otherwise silently index Arbitrum events into the mainnet partition + // of the shared DB. + let actual_chain_id = provider + .get_chain_id() + .await + .expect("failed to fetch chain_id from RPC"); + assert_eq!( + actual_chain_id, network.chain_id, + "chain_id mismatch for network {}: config says {}, RPC reports {}", + network.name, network.chain_id, actual_chain_id, + ); + + let network = Arc::new(network); + + // One indexer task per factory, sharing the same provider and DB pool. + // Seeder + catch-up are per-factory because their checkpoints are keyed + // by `(chain_id, contract)`. + let mut factory_set = JoinSet::new(); + for factory in network.factories.iter().copied() { + let indexer = UniswapV3Indexer::new( + provider.clone(), + db.clone(), + &network.indexer_config(factory.address), + ); + factory_set.spawn(run_factory_indexer( + db.clone(), + provider.clone(), + indexer, + network.clone(), + factory, + )); + } + + if let Some(result) = factory_set.join_next().await { + panic!("pool-indexer factory task exited: {result:?}"); + } +} + +async fn run_factory_indexer( + db: PgPool, + provider: AlloyProvider, + indexer: UniswapV3Indexer, + network: Arc, + factory: crate::config::FactoryConfig, +) { + tracing::info!( + network = %network.name, + chain_id = network.chain_id, + factory = %factory.address, + "starting factory indexer", + ); + + bootstrap_factory(&db, &provider, &indexer, &network, &factory).await; + indexer.run(network.poll_interval()).await; +} + +/// Seed + catch-up for a fresh `(chain, factory)`. A pre-existing checkpoint +/// means this pair has already been bootstrapped (e.g. a prior run seeded +/// it), in which case we skip straight to live indexing. +async fn bootstrap_factory( + db: &PgPool, + provider: &AlloyProvider, + indexer: &UniswapV3Indexer, + network: &NetworkConfig, + factory: &crate::config::FactoryConfig, +) { + let checkpoint = crate::db::uniswap_v3::get_checkpoint(db, network.chain_id, &factory.address) + .await + .expect("failed to read checkpoint"); + if checkpoint.is_some() { + return; + } + + let seeded_block = if let Some(subgraph_url) = network.subgraph_url.as_ref() { + crate::subgraph_seeder::seed( + db, + network.name.as_str(), + network.chain_id, + factory.address, + subgraph_url, + network.seed_block, + ) + .await + .expect("subgraph seeding failed") + } else { + crate::cold_seeder::cold_seed( + db, + network.name.as_str(), + network.chain_id, + provider.clone(), + factory.address, + factory.deployment_block, + network.seed_block, + ) + .await + .expect("cold seeding failed") + }; + indexer + .catch_up(seeded_block) + .await + .expect("catch-up indexing failed"); +} + +fn build_provider(network: &NetworkConfig) -> AlloyProvider { + web3( + EthRpcConfig::default(), + &network.rpc_url, + Some(&format!("pool-indexer-{}", network.name)), + ) + .provider + .clone() +} + +async fn connect_db(config: &Configuration) -> sqlx::PgPool { + PgPoolOptions::new() + .max_connections(config.database.max_connections.get()) + .connect(config.database.url.as_str()) + .await + .expect("failed to connect to database") +} + +async fn serve(router: axum::Router, addr: std::net::SocketAddr) { + let listener = tokio::net::TcpListener::bind(addr) + .await + .expect("failed to bind TCP listener"); + tracing::info!(%addr, "serving pool-indexer API"); + axum::serve(listener, router).await.expect("server error"); +} diff --git a/crates/pool-indexer/src/subgraph_seeder.rs b/crates/pool-indexer/src/subgraph_seeder.rs new file mode 100644 index 0000000000..db96d46ab7 --- /dev/null +++ b/crates/pool-indexer/src/subgraph_seeder.rs @@ -0,0 +1,443 @@ +//! Bootstraps the pool-indexer database from a Uniswap V3 subgraph. +//! +//! Seeding happens in two phases: +//! +//! 1. **Pools** — all pools and their current state are fetched with keyset +//! pagination and written to the DB in page-sized transactions. +//! 2. **Ticks** — each pool's ticks are fetched concurrently (up to +//! [`TICK_CONCURRENCY`] at a time) and buffered; the existing tick rows are +//! then deleted and the buffered set inserted in a single transaction so the +//! API never observes an empty tick set mid-reseed. +//! +//! Both phases query the subgraph at the same fixed block number so the +//! snapshot is consistent. After seeding, the caller should invoke +//! `UniswapV3Indexer::catch_up` to replay any blocks the subgraph has already +//! processed but that aren't yet in the DB. + +use { + crate::{ + db::uniswap_v3 as db, + indexer::uniswap_v3::{NewPoolData, PoolStateData, TickDeltaData}, + }, + alloy_primitives::{Address, aliases::U160}, + anyhow::{Context, Result, bail}, + futures::{StreamExt, TryStreamExt}, + reqwest::Client, + serde::Deserialize, + serde_json::{Value, json}, + sqlx::PgPool, + std::time::Duration, + tracing::{info, instrument}, + url::Url, +}; + +/// Number of pools (or ticks) returned per GraphQL page. +const PAGE_SIZE: usize = 1000; + +/// Maximum number of pools whose ticks are fetched concurrently. +const TICK_CONCURRENCY: usize = 50; + +/// Timeout for individual subgraph HTTP requests. +const SUBGRAPH_REQUEST_TIMEOUT: Duration = Duration::from_secs(30); + +/// Cursor value below the minimum Uniswap V3 tick index (-887272), ensuring the +/// first GraphQL page includes the lowest possible tick. +const TICK_IDX_CURSOR_START: i64 = -887_273; + +#[derive(Deserialize)] +struct GqlResponse { + data: Option, + errors: Option, +} + +#[derive(Deserialize)] +struct PoolsPage { + pools: Vec, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct SubgraphPool { + id: String, + token0: SubgraphToken, + token1: SubgraphToken, + fee_tier: String, + created_at_block_number: String, + sqrt_price: String, + liquidity: String, + tick: Option, +} + +#[derive(Deserialize)] +struct SubgraphToken { + id: String, + decimals: String, + symbol: Option, +} + +#[derive(Deserialize)] +struct TicksPage { + ticks: Vec, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct SubgraphTick { + tick_idx: String, + liquidity_net: String, +} + +/// Wrapper around the `{ _meta { block { number } } }` GraphQL response. +#[derive(Deserialize)] +struct MetaPage { + #[serde(rename = "_meta")] + meta: MetaInfo, +} + +#[derive(Deserialize)] +struct MetaInfo { + block: MetaBlock, +} + +#[derive(Deserialize)] +struct MetaBlock { + number: u64, +} + +#[derive(Clone)] +struct SubgraphClient { + http: Client, + url: Url, +} + +impl SubgraphClient { + fn new(url: &Url) -> Result { + let http = Client::builder() + .timeout(SUBGRAPH_REQUEST_TIMEOUT) + .build() + .context("build HTTP client")?; + + Ok(Self { + http, + url: url.clone(), + }) + } + + /// Executes a GraphQL query and deserialises the `data` field. + /// Returns an error if the response contains a top-level `errors` array. + async fn query Deserialize<'de>>(&self, query: &str, vars: Value) -> Result { + let response = self + .http + .post(self.url.as_str()) + .json(&json!({ "query": query, "variables": vars })) + .send() + .await + .context("subgraph HTTP request")?; + + let gql_response: GqlResponse = + response.json().await.context("decode subgraph response")?; + if let Some(errors) = gql_response.errors { + bail!("subgraph errors: {errors}"); + } + + let data = gql_response.data.context("missing data field")?; + serde_json::from_value(data).context("decode subgraph data") + } + + async fn current_block(&self) -> Result { + let page: MetaPage = self + .query("{ _meta { block { number } } }", json!({})) + .await?; + Ok(page.meta.block.number) + } + + /// Fetches one page of pools at `block`, ordered by id and starting after + /// `cursor` (empty string to start from the beginning). + async fn fetch_pools_page(&self, block: u64, cursor: &str) -> Result> { + let query = "query($block: Int!, $cursor: String!) { + pools(first: 1000, orderBy: id, where: {id_gt: $cursor}, block: {number: $block}) { + id + token0 { id decimals symbol } + token1 { id decimals symbol } + feeTier + createdAtBlockNumber + sqrtPrice + liquidity + tick + } + }"; + + let page: PoolsPage = self + .query(query, json!({ "block": block, "cursor": cursor })) + .await?; + Ok(page.pools) + } + + /// Fetches all ticks for `pool_id` at `block` using keyset pagination. + /// Returns each tick as a [`TickDeltaData`] where `delta` is the subgraph's + /// `liquidityNet` (treated as an absolute value, not a running delta). + async fn fetch_ticks_for_pool( + &self, + pool_id: String, + block: u64, + ) -> Result> { + let query = "query($pool: String!, $cursor: Int!, $block: Int!) { + ticks( + first: 1000, + orderBy: tickIdx, + where: { pool: $pool, tickIdx_gt: $cursor }, + block: { number: $block } + ) { + tickIdx + liquidityNet + } + }"; + + let pool_address: Address = pool_id.parse().context("parse pool address")?; + let mut ticks = Vec::new(); + let mut cursor = TICK_IDX_CURSOR_START; + + loop { + let page: TicksPage = self + .query( + query, + json!({ "pool": pool_id, "cursor": cursor, "block": block }), + ) + .await?; + + for tick in &page.ticks { + ticks.push(TickDeltaData { + pool_address, + tick_idx: tick.tick_idx.parse().context("parse tickIdx")?, + delta: tick.liquidity_net.parse().context("parse liquidityNet")?, + }); + } + + if page.ticks.len() < PAGE_SIZE { + break; + } + + cursor = ticks.last().expect("tick page is non-empty").tick_idx as i64; + } + + Ok(ticks) + } +} + +struct SubgraphSeeder<'a> { + db: &'a PgPool, + chain_id: u64, + factory: Address, + subgraph: SubgraphClient, + snapshot_block: u64, +} + +impl<'a> SubgraphSeeder<'a> { + async fn new( + db: &'a PgPool, + chain_id: u64, + factory: Address, + subgraph_url: &Url, + block: Option, + ) -> Result { + let subgraph = SubgraphClient::new(subgraph_url)?; + let snapshot_block = match block { + Some(block) => block, + None => subgraph + .current_block() + .await + .context("fetch current subgraph block")?, + }; + + Ok(Self { + db, + chain_id, + factory, + subgraph, + snapshot_block, + }) + } + + #[instrument(skip_all, fields(chain_id = self.chain_id))] + async fn seed(self) -> Result { + info!( + block = self.snapshot_block, + "seeding pool-indexer from subgraph" + ); + + let pool_ids = self.seed_pools().await?; + let total_ticks = self.seed_ticks(&pool_ids).await?; + + info!( + block = self.snapshot_block, + pools = pool_ids.len(), + ticks = total_ticks, + "seeding complete" + ); + Ok(self.snapshot_block) + } + + async fn seed_pools(&self) -> Result> { + let mut all_pool_ids = Vec::new(); + let mut cursor = String::new(); + + loop { + let page = self + .subgraph + .fetch_pools_page(self.snapshot_block, &cursor) + .await?; + + all_pool_ids.extend(self.persist_pool_page(&page).await?); + info!(total = all_pool_ids.len(), "pools seeded"); + + if page.len() < PAGE_SIZE { + break; + } + + cursor = page.last().expect("full pages are non-empty").id.clone(); + } + + info!( + total = all_pool_ids.len(), + "all pools seeded — starting tick seeding" + ); + Ok(all_pool_ids) + } + + async fn persist_pool_page(&self, page: &[SubgraphPool]) -> Result> { + let mut pool_ids = Vec::with_capacity(page.len()); + let mut new_pools = Vec::with_capacity(page.len()); + let mut pool_states = Vec::with_capacity(page.len()); + + for pool in page { + let (pool_id, new_pool, pool_state) = parse_seeded_pool(pool, self.snapshot_block)?; + pool_ids.push(pool_id); + new_pools.push(new_pool); + + if let Some(pool_state) = pool_state { + pool_states.push(pool_state); + } + } + + let mut tx = self.db.begin().await.context("begin pool tx")?; + db::insert_pools(&mut tx, self.chain_id, &self.factory, &new_pools).await?; + db::upsert_pool_states(&mut tx, self.chain_id, &self.factory, &pool_states).await?; + tx.commit().await.context("commit pool tx")?; + + Ok(pool_ids) + } + + async fn seed_ticks(&self, pool_ids: &[String]) -> Result { + // All delete + insert work happens inside one transaction so the + // API never observes an empty tick set mid-reseed. Scoped to + // `self.factory` so a reseed doesn't wipe another factory's ticks + // on the same chain. + // + // Subgraph fetches run outside the transaction — the result is + // buffered and only the final DB writes are transactional, which + // keeps the tx short and avoids holding a DB connection during + // slow HTTP I/O. + let mut all_ticks: Vec = Vec::new(); + for pool_batch in pool_ids.chunks(TICK_CONCURRENCY) { + let ticks = self.fetch_tick_batch(pool_batch).await?; + all_ticks.extend(ticks); + info!(total = all_ticks.len(), "ticks fetched"); + } + + let total_ticks = all_ticks.len(); + let mut tx = self.db.begin().await.context("begin tick reseed tx")?; + db::delete_ticks_for_factory(&mut *tx, self.chain_id, &self.factory).await?; + if !all_ticks.is_empty() { + db::batch_seed_ticks(&mut *tx, self.chain_id, &self.factory, &all_ticks).await?; + } + tx.commit().await.context("commit tick reseed tx")?; + + info!(total = total_ticks, "ticks seeded"); + Ok(total_ticks) + } + + async fn fetch_tick_batch(&self, pool_batch: &[String]) -> Result> { + let subgraph = self.subgraph.clone(); + let snapshot_block = self.snapshot_block; + + let tick_batches: Vec> = + futures::stream::iter(pool_batch.iter().cloned()) + .map(move |pool_id| { + let subgraph = subgraph.clone(); + async move { subgraph.fetch_ticks_for_pool(pool_id, snapshot_block).await } + }) + .buffer_unordered(TICK_CONCURRENCY) + .try_collect() + .await?; + + Ok(tick_batches.into_iter().flatten().collect()) + } +} + +fn parse_seeded_pool( + pool: &SubgraphPool, + snapshot_block: u64, +) -> Result<(String, NewPoolData, Option)> { + let address: Address = pool.id.parse().context("parse pool id")?; + let new_pool = NewPoolData { + address, + token0: pool.token0.id.parse().context("parse token0")?, + token1: pool.token1.id.parse().context("parse token1")?, + fee: pool.fee_tier.parse().context("parse feeTier")?, + token0_decimals: pool.token0.decimals.parse::().ok(), + token1_decimals: pool.token1.decimals.parse::().ok(), + token0_symbol: pool.token0.symbol.clone(), + token1_symbol: pool.token1.symbol.clone(), + created_block: pool + .created_at_block_number + .parse() + .context("parse createdAtBlockNumber")?, + }; + + Ok(( + pool.id.clone(), + new_pool, + parse_seeded_pool_state(pool, address, snapshot_block)?, + )) +} + +fn parse_seeded_pool_state( + pool: &SubgraphPool, + address: Address, + snapshot_block: u64, +) -> Result> { + let Some(tick) = pool.tick.as_deref() else { + return Ok(None); + }; + if pool.sqrt_price == "0" { + return Ok(None); + } + + Ok(Some(PoolStateData { + pool_address: address, + block_number: snapshot_block, + sqrt_price_x96: pool.sqrt_price.parse::().context("parse sqrtPrice")?, + liquidity: pool.liquidity.parse().context("parse liquidity")?, + tick: tick.parse().context("parse tick")?, + })) +} + +/// Seeds pools and ticks from the subgraph and returns the block number that +/// was seeded. The caller is responsible for catching up to the current +/// finalized block via `catch_up`. +pub async fn seed( + db: &PgPool, + network: &str, + chain_id: u64, + factory: Address, + subgraph_url: &Url, + block: Option, +) -> Result { + let labels = [network]; + let m = crate::metrics::Metrics::get(); + let _timer = crate::metrics::Metrics::timer(&m.subgraph_seed_seconds, &labels); + SubgraphSeeder::new(db, chain_id, factory, subgraph_url, block) + .await? + .seed() + .await +} diff --git a/crates/shared/Cargo.toml b/crates/shared/Cargo.toml index ab8b09f759..1fbf5824e6 100644 --- a/crates/shared/Cargo.toml +++ b/crates/shared/Cargo.toml @@ -36,7 +36,6 @@ hex-literal = { workspace = true } humantime = { workspace = true } indexmap = { workspace = true } itertools = { workspace = true } -liquidity-sources = { workspace = true } mockall = { workspace = true, optional = true } model = { workspace = true } moka = { workspace = true, features = ["sync"] } diff --git a/crates/shared/src/lib.rs b/crates/shared/src/lib.rs index aebbb50b83..86e60a410b 100644 --- a/crates/shared/src/lib.rs +++ b/crates/shared/src/lib.rs @@ -12,6 +12,7 @@ pub mod interaction; pub mod order_quoting; pub mod order_validation; pub mod remaining_amounts; +pub mod retry; pub mod token_list; pub mod url; pub mod web3; diff --git a/crates/shared/src/retry.rs b/crates/shared/src/retry.rs new file mode 100644 index 0000000000..7fbe22c679 --- /dev/null +++ b/crates/shared/src/retry.rs @@ -0,0 +1,52 @@ +//! Bounded retry-with-sleep for transient failures. +//! +//! Calls the supplied future-producing closure up to a fixed number of times, +//! sleeping with small jitter between attempts so concurrent retriers don't +//! all wake up at once. Returns `Ok` on the first success, otherwise an +//! `Err(Vec)` containing every observed error in order — useful for +//! diagnosing whether a permanent failure was masked as a flake. +use { + rand::Rng, + std::{future::Future, time::Duration}, +}; + +const MAX_RETRIES: usize = 5; + +/// Retry on every error. +pub async fn retry_with_sleep(future: impl Fn() -> F) -> Result> +where + F: Future>, + E: std::fmt::Debug, +{ + retry_with_sleep_if(future, |_| true).await +} + +/// Retry only when `should_retry(&err)` returns true. Permanent errors +/// (e.g. contract reverts, bad input) bail out immediately so callers don't +/// waste sleep budget on something that won't get better. +pub async fn retry_with_sleep_if( + future: impl Fn() -> F, + should_retry: P, +) -> Result> +where + F: Future>, + E: std::fmt::Debug, + P: Fn(&E) -> bool, +{ + let mut errors = Vec::new(); + for attempt in 0..MAX_RETRIES { + match future().await { + Ok(value) => return Ok(value), + Err(err) => { + let retryable = should_retry(&err); + errors.push(err); + if !retryable || attempt + 1 == MAX_RETRIES { + return Err(errors); + } + let timeout_with_jitter = 50u64 + rand::rng().random_range(0..=50); + tokio::time::sleep(Duration::from_millis(timeout_with_jitter)).await; + } + } + } + Err(errors) +} diff --git a/database/sql/V110__pool_indexer_uniswap_v3.sql b/database/sql/V110__pool_indexer_uniswap_v3.sql new file mode 100644 index 0000000000..108b3befe4 --- /dev/null +++ b/database/sql/V110__pool_indexer_uniswap_v3.sql @@ -0,0 +1,64 @@ +-- Tracks the highest finalized block fully processed per chain+contract +CREATE TABLE pool_indexer_checkpoints ( + chain_id BIGINT NOT NULL, + contract BYTEA NOT NULL, -- factory or pool address + block_number BIGINT NOT NULL, + PRIMARY KEY (chain_id, contract) +); + +-- One row per discovered pool (from PoolCreated events on the factory). +-- `factory` is the emitting factory's address; it partitions the table so each +-- indexer writes only to its own rows on chains where multiple V3-compatible +-- factories are configured (same chain's logs are fetched chain-wide). +CREATE TABLE uniswap_v3_pools ( + chain_id BIGINT NOT NULL, + address BYTEA NOT NULL, -- pool address + factory BYTEA NOT NULL, + token0 BYTEA NOT NULL, + token1 BYTEA NOT NULL, + fee INT NOT NULL CHECK (fee > 0), -- hundredths of a basis point (500 = 0.05%, 3000 = 0.3%, 10000 = 1%) + token0_decimals SMALLINT, + token1_decimals SMALLINT, + token0_symbol TEXT, + token1_symbol TEXT, + created_block BIGINT NOT NULL, + PRIMARY KEY (chain_id, address) +); + +-- Current state of each pool (updated on every Swap or Initialize) +CREATE TABLE uniswap_v3_pool_states ( + chain_id BIGINT NOT NULL, + pool_address BYTEA NOT NULL, + block_number BIGINT NOT NULL, + sqrt_price_x96 NUMERIC NOT NULL, -- uint160 + liquidity NUMERIC NOT NULL, -- uint128 + -- `tick` is the Uniswap V3 *price tick index* (signed int24), not a + -- database index. It's the discrete log-price coordinate from the pool's + -- last Swap/Initialize event. See also `uniswap_v3_ticks.tick_idx` below. + tick INT NOT NULL, + PRIMARY KEY (chain_id, pool_address), + FOREIGN KEY (chain_id, pool_address) REFERENCES uniswap_v3_pools(chain_id, address) +); + +-- Active ticks per pool (rows with liquidity_net = 0 are pruned). +-- `tick_idx` is the Uniswap V3 price tick coordinate (signed int24) — the +-- same domain as `uniswap_v3_pool_states.tick`, just one row per active +-- tick boundary instead of the pool's current tick. +CREATE TABLE uniswap_v3_ticks ( + chain_id BIGINT NOT NULL, + pool_address BYTEA NOT NULL, + tick_idx INT NOT NULL, + liquidity_net NUMERIC NOT NULL, -- int128 (can be negative) + PRIMARY KEY (chain_id, pool_address, tick_idx), + FOREIGN KEY (chain_id, pool_address) REFERENCES uniswap_v3_pools(chain_id, address) +); + +-- Symbol- and decimals-backfill hot paths: both `get_tokens_missing_*` +-- (scan for NULL columns) and the batched `batch_set_token_*` updates hit +-- these. Partial on the IS NULL predicate so each index shrinks to near-empty +-- once most rows are populated (real value or `""` / `-1` "tried, failed" +-- sentinel). +CREATE INDEX ON uniswap_v3_pools (chain_id, token0) WHERE token0_symbol IS NULL; +CREATE INDEX ON uniswap_v3_pools (chain_id, token1) WHERE token1_symbol IS NULL; +CREATE INDEX ON uniswap_v3_pools (chain_id, token0) WHERE token0_decimals IS NULL; +CREATE INDEX ON uniswap_v3_pools (chain_id, token1) WHERE token1_decimals IS NULL;