diff --git a/Cargo.lock b/Cargo.lock index 304d49d..d0f59cb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -399,6 +399,48 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" +[[package]] +name = "aws-config" +version = "1.8.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a8fc176d53d6fe85017f230405e3255cedb4a02221cb55ed6d76dccbbb099b2" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-sdk-sso", + "aws-sdk-ssooidc", + "aws-sdk-sts", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand", + "hex", + "http 1.4.0", + "ring", + "time", + "tokio", + "tracing", + "url", + "zeroize", +] + +[[package]] +name = "aws-credential-types" +version = "1.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d203b0bf2626dcba8665f5cd0871d7c2c0930223d6b6be9097592fea21242d0" +dependencies = [ + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "zeroize", +] + [[package]] name = "aws-lc-rs" version = "1.16.2" @@ -421,6 +463,384 @@ dependencies = [ "fs_extra", ] +[[package]] +name = "aws-runtime" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ede2ddc593e6c8acc6ce3358c28d6677a6dc49b65ba4b37a2befe14a11297e75" +dependencies = [ + "aws-credential-types", + "aws-sigv4", + "aws-smithy-async", + "aws-smithy-eventstream", + "aws-smithy-http", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "bytes-utils", + "fastrand", + "http 0.2.12", + "http 1.4.0", + "http-body 0.4.6", + "http-body 1.0.1", + "percent-encoding", + "pin-project-lite", + "tracing", + "uuid", +] + +[[package]] +name = "aws-sdk-s3" +version = "1.124.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "744c09d75dfec039a05cf8e117c995ded3b0baffa6eb83f3ed7075a01d8d8947" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-sigv4", + "aws-smithy-async", + "aws-smithy-checksums", + "aws-smithy-eventstream", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-observability", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-smithy-xml", + "aws-types", + "bytes", + "fastrand", + "hex", + "hmac", + "http 0.2.12", + "http 1.4.0", + "http-body 1.0.1", + "lru", + "percent-encoding", + "regex-lite", + "sha2 0.10.9", + "tracing", + "url", +] + +[[package]] +name = "aws-sdk-sso" +version = "1.95.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00c5ff27c6ba2cbd95e6e26e2e736676fdf6bcf96495b187733f521cfe4ce448" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-observability", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand", + "http 0.2.12", + "http 1.4.0", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sdk-ssooidc" +version = "1.97.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d186f1e5a3694a188e5a0640b3115ccc6e084d104e16fd6ba968dca072ffef8" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-observability", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand", + "http 0.2.12", + "http 1.4.0", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sdk-sts" +version = "1.99.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9acba7c62f3d4e2408fa998a3a8caacd8b9a5b5549cf36e2372fbdae329d5449" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-observability", + "aws-smithy-query", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-smithy-xml", + "aws-types", + "fastrand", + "http 0.2.12", + "http 1.4.0", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sigv4" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37411f8e0f4bea0c3ca0958ce7f18f6439db24d555dbd809787262cd00926aa9" +dependencies = [ + "aws-credential-types", + "aws-smithy-eventstream", + "aws-smithy-http", + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "crypto-bigint 0.5.5", + "form_urlencoded", + "hex", + "hmac", + "http 0.2.12", + "http 1.4.0", + "p256 0.11.1", + "percent-encoding", + "ring", + "sha2 0.10.9", + "subtle", + "time", + "tracing", + "zeroize", +] + +[[package]] +name = "aws-smithy-async" +version = "1.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cc50d0f63e714784b84223abd7abbc8577de8c35d699e0edd19f0a88a08ae13" +dependencies = [ + "futures-util", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "aws-smithy-checksums" +version = "0.64.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "180dddf5ef0f52a2f99e2fada10e16ea610e507ef6148a42bdc4d5867596aa00" +dependencies = [ + "aws-smithy-http", + "aws-smithy-types", + "bytes", + "crc-fast", + "hex", + "http 1.4.0", + "http-body 1.0.1", + "http-body-util", + "md-5", + "pin-project-lite", + "sha1", + "sha2 0.10.9", + "tracing", +] + +[[package]] +name = "aws-smithy-eventstream" +version = "0.60.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c0b3e587fbaa5d7f7e870544508af8ce82ea47cd30376e69e1e37c4ac746f79" +dependencies = [ + "aws-smithy-types", + "bytes", + "crc32fast", +] + +[[package]] +name = "aws-smithy-http" +version = "0.63.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d619373d490ad70966994801bc126846afaa0d1ee920697a031f0cf63f2568e7" +dependencies = [ + "aws-smithy-eventstream", + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "bytes-utils", + "futures-core", + "futures-util", + "http 1.4.0", + "http-body 1.0.1", + "http-body-util", + "percent-encoding", + "pin-project-lite", + "pin-utils", + "tracing", +] + +[[package]] +name = "aws-smithy-http-client" +version = "1.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00ccbb08c10f6bcf912f398188e42ee2eab5f1767ce215a02a73bc5df1bbdd95" +dependencies = [ + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "h2 0.3.27", + "h2 0.4.13", + "http 0.2.12", + "http 1.4.0", + "http-body 0.4.6", + "hyper 0.14.32", + "hyper 1.8.1", + "hyper-rustls 0.24.2", + "hyper-rustls 0.27.7", + "hyper-util", + "pin-project-lite", + "rustls 0.21.12", + "rustls 0.23.37", + "rustls-native-certs", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.26.4", + "tower 0.5.3", + "tracing", +] + +[[package]] +name = "aws-smithy-json" +version = "0.62.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27b3a779093e18cad88bbae08dc4261e1d95018c4c5b9356a52bcae7c0b6e9bb" +dependencies = [ + "aws-smithy-types", +] + +[[package]] +name = "aws-smithy-observability" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d3f39d5bb871aaf461d59144557f16d5927a5248a983a40654d9cf3b9ba183b" +dependencies = [ + "aws-smithy-runtime-api", +] + +[[package]] +name = "aws-smithy-query" +version = "0.60.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f76a580e3d8f8961e5d48763214025a2af65c2fa4cd1fb7f270a0e107a71b0" +dependencies = [ + "aws-smithy-types", + "urlencoding", +] + +[[package]] +name = "aws-smithy-runtime" +version = "1.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ccf7f6eba8b2dcf8ce9b74806c6c185659c311665c4bf8d6e71ebd454db6bf" +dependencies = [ + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-http-client", + "aws-smithy-observability", + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "fastrand", + "http 0.2.12", + "http 1.4.0", + "http-body 0.4.6", + "http-body 1.0.1", + "http-body-util", + "pin-project-lite", + "pin-utils", + "tokio", + "tracing", +] + +[[package]] +name = "aws-smithy-runtime-api" +version = "1.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4af6e5def28be846479bbeac55aa4603d6f7986fc5da4601ba324dd5d377516" +dependencies = [ + "aws-smithy-async", + "aws-smithy-types", + "bytes", + "http 0.2.12", + "http 1.4.0", + "pin-project-lite", + "tokio", + "tracing", + "zeroize", +] + +[[package]] +name = "aws-smithy-types" +version = "1.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ca2734c16913a45343b37313605d84e7d8b34a4611598ce1d25b35860a2bed3" +dependencies = [ + "base64-simd", + "bytes", + "bytes-utils", + "futures-core", + "http 0.2.12", + "http 1.4.0", + "http-body 0.4.6", + "http-body 1.0.1", + "http-body-util", + "itoa", + "num-integer", + "pin-project-lite", + "pin-utils", + "ryu", + "serde", + "time", + "tokio", + "tokio-util", +] + +[[package]] +name = "aws-smithy-xml" +version = "0.60.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b53543b4b86ed43f051644f704a98c7291b3618b67adf057ee77a366fa52fcaa" +dependencies = [ + "xmlparser", +] + +[[package]] +name = "aws-types" +version = "1.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0470cc047657c6e286346bdf10a8719d26efd6a91626992e0e64481e44323e96" +dependencies = [ + "aws-credential-types", + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "rustc_version", + "tracing", +] + [[package]] name = "axum" version = "0.7.9" @@ -432,7 +852,7 @@ dependencies = [ "bytes", "futures-util", "http 1.4.0", - "http-body", + "http-body 1.0.1", "http-body-util", "itoa", "matchit 0.7.3", @@ -460,9 +880,9 @@ dependencies = [ "form_urlencoded", "futures-util", "http 1.4.0", - "http-body", + "http-body 1.0.1", "http-body-util", - "hyper", + "hyper 1.8.1", "hyper-util", "itoa", "matchit 0.8.4", @@ -492,7 +912,7 @@ dependencies = [ "bytes", "futures-util", "http 1.4.0", - "http-body", + "http-body 1.0.1", "http-body-util", "mime", "pin-project-lite", @@ -511,7 +931,7 @@ dependencies = [ "bytes", "futures-core", "http 1.4.0", - "http-body", + "http-body 1.0.1", "http-body-util", "mime", "pin-project-lite", @@ -531,8 +951,9 @@ dependencies = [ "axum-core 0.5.6", "bytes", "futures-util", + "headers", "http 1.4.0", - "http-body", + "http-body 1.0.1", "http-body-util", "mime", "pin-project-lite", @@ -563,8 +984,8 @@ dependencies = [ "bytes", "fs-err", "http 1.4.0", - "http-body", - "hyper", + "http-body 1.0.1", + "hyper 1.8.1", "hyper-util", "tokio", "tower-service", @@ -581,6 +1002,12 @@ dependencies = [ "rand 0.8.5", ] +[[package]] +name = "base16ct" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" + [[package]] name = "base16ct" version = "0.2.0" @@ -605,6 +1032,16 @@ version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" +[[package]] +name = "base64-simd" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "339abbe78e73178762e23bea9dfd08e697eb3f3301cd4be981c0f78ba5859195" +dependencies = [ + "outref", + "vsimd", +] + [[package]] name = "base64ct" version = "1.8.3" @@ -797,6 +1234,16 @@ version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" +[[package]] +name = "bytes-utils" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dafe3a8757b027e2be6e4e5601ed563c55989fcf1546e933c66c8eb3a058d35" +dependencies = [ + "bytes", + "either", +] + [[package]] name = "bytesize" version = "1.3.3" @@ -1256,6 +1703,33 @@ dependencies = [ "libc", ] +[[package]] +name = "crc" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9710d3b3739c2e349eb44fe848ad0b7c8cb1e42bd87ee49371df2f7acaf3e675" +dependencies = [ + "crc-catalog", +] + +[[package]] +name = "crc-catalog" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" + +[[package]] +name = "crc-fast" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fd92aca2c6001b1bf5ba0ff84ee74ec8501b52bbef0cac80bf25a6c1d87a83d" +dependencies = [ + "crc", + "digest 0.10.7", + "rustversion", + "spin", +] + [[package]] name = "crc32fast" version = "1.5.0" @@ -1349,6 +1823,18 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" +[[package]] +name = "crypto-bigint" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" +dependencies = [ + "generic-array", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + [[package]] name = "crypto-bigint" version = "0.5.5" @@ -1394,7 +1880,7 @@ dependencies = [ "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", - "group", + "group 0.13.0", "rand_core 0.6.4", "rustc_version", "subtle", @@ -1549,6 +2035,16 @@ version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7a1e2f27636f116493b8b860f5546edb47c8d8f8ea73e1d2a20be88e28d1fea" +[[package]] +name = "der" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" +dependencies = [ + "const-oid", + "zeroize", +] + [[package]] name = "der" version = "0.7.10" @@ -1692,18 +2188,30 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" +[[package]] +name = "ecdsa" +version = "0.14.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" +dependencies = [ + "der 0.6.1", + "elliptic-curve 0.12.3", + "rfc6979 0.3.1", + "signature 1.6.4", +] + [[package]] name = "ecdsa" version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ - "der", + "der 0.7.10", "digest 0.10.7", - "elliptic-curve", - "rfc6979", - "signature", - "spki", + "elliptic-curve 0.13.8", + "rfc6979 0.4.0", + "signature 2.2.0", + "spki 0.7.3", ] [[package]] @@ -1712,8 +2220,8 @@ version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ - "pkcs8", - "signature", + "pkcs8 0.10.2", + "signature 2.2.0", ] [[package]] @@ -1743,7 +2251,7 @@ dependencies = [ "rand_core 0.6.4", "serde", "sha2 0.10.9", - "signature", + "signature 2.2.0", "subtle", "zeroize", ] @@ -1754,22 +2262,42 @@ version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" +[[package]] +name = "elliptic-curve" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" +dependencies = [ + "base16ct 0.1.1", + "crypto-bigint 0.4.9", + "der 0.6.1", + "digest 0.10.7", + "ff 0.12.1", + "generic-array", + "group 0.12.1", + "pkcs8 0.9.0", + "rand_core 0.6.4", + "sec1 0.3.0", + "subtle", + "zeroize", +] + [[package]] name = "elliptic-curve" version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ - "base16ct", - "crypto-bigint", + "base16ct 0.2.0", + "crypto-bigint 0.5.5", "digest 0.10.7", - "ff", + "ff 0.13.1", "generic-array", - "group", + "group 0.13.0", "pem-rfc7468", - "pkcs8", + "pkcs8 0.10.2", "rand_core 0.6.4", - "sec1", + "sec1 0.7.3", "subtle", "zeroize", ] @@ -1882,6 +2410,16 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" +[[package]] +name = "ff" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "ff" version = "0.13.1" @@ -2115,7 +2653,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls", + "rustls 0.23.37", "rustls-pki-types", ] @@ -2229,17 +2767,47 @@ dependencies = [ "web-time", ] +[[package]] +name = "group" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" +dependencies = [ + "ff 0.12.1", + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "group" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff", + "ff 0.13.1", "rand_core 0.6.4", "subtle", ] +[[package]] +name = "h2" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0beca50380b1fc32983fc1cb4587bfa4bb9e78fc259aad4a0032d2080309222d" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 0.2.12", + "indexmap 2.13.0", + "slab", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "h2" version = "0.4.13" @@ -2327,6 +2895,30 @@ dependencies = [ "num-traits", ] +[[package]] +name = "headers" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3314d5adb5d94bcdf56771f2e50dbbc80bb4bdf88967526706205ac9eff24eb" +dependencies = [ + "base64 0.22.1", + "bytes", + "headers-core", + "http 1.4.0", + "httpdate", + "mime", + "sha1", +] + +[[package]] +name = "headers-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54b4a22553d4242c49fddb9ba998a99962b5cc6f22cb5a3482bec22522403ce4" +dependencies = [ + "http 1.4.0", +] + [[package]] name = "heck" version = "0.5.0" @@ -2369,20 +2961,20 @@ dependencies = [ "futures-channel", "futures-io", "futures-util", - "h2", + "h2 0.4.13", "http 1.4.0", "idna", "ipnet", "once_cell", "rand 0.9.2", "ring", - "rustls", + "rustls 0.23.37", "rustls-pki-types", "thiserror 2.0.18", "time", "tinyvec", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.4", "tracing", "url", "webpki-roots 0.26.11", @@ -2403,11 +2995,11 @@ dependencies = [ "parking_lot", "rand 0.9.2", "resolv-conf", - "rustls", + "rustls 0.23.37", "smallvec", "thiserror 2.0.18", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.4", "tracing", "webpki-roots 0.26.11", ] @@ -2462,6 +3054,17 @@ dependencies = [ "itoa", ] +[[package]] +name = "http-body" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +dependencies = [ + "bytes", + "http 0.2.12", + "pin-project-lite", +] + [[package]] name = "http-body" version = "1.0.1" @@ -2481,7 +3084,7 @@ dependencies = [ "bytes", "futures-core", "http 1.4.0", - "http-body", + "http-body 1.0.1", "pin-project-lite", ] @@ -2510,7 +3113,7 @@ dependencies = [ "futures", "http 1.4.0", "http-body-util", - "hyper", + "hyper 1.8.1", "hyper-util", "log", "once_cell", @@ -2527,6 +3130,30 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" +[[package]] +name = "hyper" +version = "0.14.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.3.27", + "http 0.2.12", + "http-body 0.4.6", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2 0.5.10", + "tokio", + "tower-service", + "tracing", + "want", +] + [[package]] name = "hyper" version = "1.8.1" @@ -2537,9 +3164,9 @@ dependencies = [ "bytes", "futures-channel", "futures-core", - "h2", + "h2 0.4.13", "http 1.4.0", - "http-body", + "http-body 1.0.1", "httparse", "httpdate", "itoa", @@ -2550,6 +3177,21 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-rustls" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +dependencies = [ + "futures-util", + "http 0.2.12", + "hyper 0.14.32", + "log", + "rustls 0.21.12", + "tokio", + "tokio-rustls 0.24.1", +] + [[package]] name = "hyper-rustls" version = "0.27.7" @@ -2557,15 +3199,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ "http 1.4.0", - "hyper", + "hyper 1.8.1", "hyper-util", "log", - "rustls", + "rustls 0.23.37", "rustls-native-certs", "rustls-pki-types", "rustls-platform-verifier", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.4", "tower-service", "webpki-roots 1.0.6", ] @@ -2576,7 +3218,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ - "hyper", + "hyper 1.8.1", "hyper-util", "pin-project-lite", "tokio", @@ -2594,8 +3236,8 @@ dependencies = [ "futures-channel", "futures-util", "http 1.4.0", - "http-body", - "hyper", + "http-body 1.0.1", + "hyper 1.8.1", "ipnet", "libc", "percent-encoding", @@ -2648,13 +3290,13 @@ dependencies = [ "bytes", "cached 0.56.0", "candid", - "ecdsa", + "ecdsa 0.16.9", "ed25519-consensus", - "elliptic-curve", + "elliptic-curve 0.13.8", "futures-util", "hex", "http 1.4.0", - "http-body", + "http-body 1.0.1", "http-body-util", "ic-certification 3.1.0", "ic-ed25519", @@ -2663,14 +3305,14 @@ dependencies = [ "ic_principal", "k256", "leb128", - "p256", + "p256 0.13.2", "pem", - "pkcs8", + "pkcs8 0.10.2", "rand 0.10.0", "rangemap", "reqwest 0.13.2", "ring", - "sec1", + "sec1 0.7.3", "serde", "serde_bytes", "serde_cbor", @@ -2713,11 +3355,11 @@ dependencies = [ "hickory-proto", "hickory-resolver", "http 1.4.0", - "http-body", + "http-body 1.0.1", "http-body-util", "humantime", - "hyper", - "hyper-rustls", + "hyper 1.8.1", + "hyper-rustls 0.27.7", "hyper-util", "ic-agent", "ic-bn-lib-common", @@ -2734,7 +3376,7 @@ dependencies = [ "rcgen 0.14.7", "regex", "reqwest 0.13.2", - "rustls", + "rustls 0.23.37", "rustls-acme", "rustls-pemfile", "rustls-platform-verifier", @@ -2755,7 +3397,7 @@ dependencies = [ "thiserror 2.0.18", "tokio", "tokio-io-timeout", - "tokio-rustls", + "tokio-rustls 0.26.4", "tokio-util", "tower 0.5.3", "tower-service", @@ -2788,7 +3430,7 @@ dependencies = [ "hickory-resolver", "http 1.4.0", "humantime", - "hyper", + "hyper 1.8.1", "hyper-util", "ic-agent", "instant-acme", @@ -2796,7 +3438,7 @@ dependencies = [ "prometheus", "rcgen 0.14.7", "reqwest 0.13.2", - "rustls", + "rustls 0.23.37", "serde", "socket2 0.6.3", "strum 0.27.2", @@ -3072,7 +3714,12 @@ dependencies = [ "ahash", "anyhow", "arc-swap", + "async-stream", "async-trait", + "aws-config", + "aws-sdk-s3", + "aws-smithy-http-client", + "aws-smithy-runtime-api", "axum 0.8.8", "axum-extra", "bytes", @@ -3089,12 +3736,14 @@ dependencies = [ "hickory-resolver", "hostname", "http 1.4.0", - "http-body", + "http-body 1.0.1", "http-body-util", "httptest", "humantime", "ic-bn-lib", "ic-bn-lib-common", + "ic-certificate-verification 3.1.0", + "ic-certification 3.1.0", "ic-certified-assets", "ic-custom-domains-backend", "ic-custom-domains-base", @@ -3113,7 +3762,7 @@ dependencies = [ "rand_regex", "regex", "reqwest 0.13.2", - "rustls", + "rustls 0.23.37", "serde", "serde_cbor", "serde_json", @@ -3178,7 +3827,7 @@ dependencies = [ "candid", "futures", "http 1.4.0", - "http-body", + "http-body 1.0.1", "http-body-util", "ic-agent", "ic-http-certification 3.1.0", @@ -3362,8 +4011,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1e828f9e804ccefe4b9b15b2195f474c60fd4f95ccd14fcb554eb6d7dfafde3" dependencies = [ "digest 0.10.7", - "ff", - "group", + "ff 0.13.1", + "group 0.13.0", "pairing", "rand_core 0.6.4", "subtle", @@ -3557,15 +4206,15 @@ dependencies = [ "base64 0.22.1", "bytes", "http 1.4.0", - "http-body", + "http-body 1.0.1", "http-body-util", "httpdate", - "hyper", - "hyper-rustls", + "hyper 1.8.1", + "hyper-rustls 0.27.7", "hyper-util", "rcgen 0.14.7", "ring", - "rustls", + "rustls 0.23.37", "rustls-pki-types", "serde", "serde_json", @@ -3751,11 +4400,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ "cfg-if", - "ecdsa", - "elliptic-curve", + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", "once_cell", "sha2 0.10.9", - "signature", + "signature 2.2.0", ] [[package]] @@ -3860,6 +4509,15 @@ version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" +[[package]] +name = "lru" +version = "0.16.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f66e8d5d03f609abc3a39e6f08e4164ebf1447a732906d39eb9b99b7919ef39" +dependencies = [ + "hashbrown 0.16.1", +] + [[package]] name = "lru-slab" version = "0.1.2" @@ -3909,6 +4567,16 @@ dependencies = [ "thiserror 2.0.18", ] +[[package]] +name = "md-5" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" +dependencies = [ + "cfg-if", + "digest 0.10.7", +] + [[package]] name = "memchr" version = "2.8.0" @@ -4332,14 +5000,25 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a80800c0488c3a21695ea981a54918fbb37abf04f4d0720c453632255e2ff0e" +[[package]] +name = "p256" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" +dependencies = [ + "ecdsa 0.14.8", + "elliptic-curve 0.12.3", + "sha2 0.10.9", +] + [[package]] name = "p256" version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" dependencies = [ - "ecdsa", - "elliptic-curve", + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", "primeorder", "sha2 0.10.9", ] @@ -4350,7 +5029,7 @@ version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f" dependencies = [ - "group", + "group 0.13.0", ] [[package]] @@ -4481,14 +5160,24 @@ dependencies = [ "futures-io", ] +[[package]] +name = "pkcs8" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" +dependencies = [ + "der 0.6.1", + "spki 0.6.0", +] + [[package]] name = "pkcs8" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der", - "spki", + "der 0.7.10", + "spki 0.7.3", ] [[package]] @@ -4687,7 +5376,7 @@ version = "0.13.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" dependencies = [ - "elliptic-curve", + "elliptic-curve 0.13.8", ] [[package]] @@ -4841,7 +5530,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls", + "rustls 0.23.37", "socket2 0.6.3", "thiserror 2.0.18", "tokio", @@ -4862,7 +5551,7 @@ dependencies = [ "rand 0.9.2", "ring", "rustc-hash", - "rustls", + "rustls 0.23.37", "rustls-pki-types", "slab", "thiserror 2.0.18", @@ -5152,6 +5841,12 @@ dependencies = [ "regex-syntax", ] +[[package]] +name = "regex-lite" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cab834c73d247e67f4fae452806d17d3c7501756d98c8808d7c9c7aa7d18f973" + [[package]] name = "regex-syntax" version = "0.8.10" @@ -5169,12 +5864,12 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2", + "h2 0.4.13", "http 1.4.0", - "http-body", + "http-body 1.0.1", "http-body-util", - "hyper", - "hyper-rustls", + "hyper 1.8.1", + "hyper-rustls 0.27.7", "hyper-util", "js-sys", "log", @@ -5182,7 +5877,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls", + "rustls 0.23.37", "rustls-native-certs", "rustls-pki-types", "serde", @@ -5190,7 +5885,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.4", "tokio-util", "tower 0.5.3", "tower-http", @@ -5215,13 +5910,13 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2", + "h2 0.4.13", "hickory-resolver", "http 1.4.0", - "http-body", + "http-body 1.0.1", "http-body-util", - "hyper", - "hyper-rustls", + "hyper 1.8.1", + "hyper-rustls 0.27.7", "hyper-util", "js-sys", "log", @@ -5230,14 +5925,14 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls", + "rustls 0.23.37", "rustls-pki-types", "rustls-platform-verifier", "serde", "serde_json", "sync_wrapper", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.4", "tokio-util", "tower 0.5.3", "tower-http", @@ -5255,6 +5950,17 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e061d1b48cb8d38042de4ae0a7a6401009d6143dc80d2e2d6f31f0bdd6470c7" +[[package]] +name = "rfc6979" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" +dependencies = [ + "crypto-bigint 0.4.9", + "hmac", + "zeroize", +] + [[package]] name = "rfc6979" version = "0.4.0" @@ -5316,6 +6022,18 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "rustls" +version = "0.21.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +dependencies = [ + "log", + "ring", + "rustls-webpki 0.101.7", + "sct", +] + [[package]] name = "rustls" version = "0.23.37" @@ -5329,7 +6047,7 @@ dependencies = [ "once_cell", "ring", "rustls-pki-types", - "rustls-webpki", + "rustls-webpki 0.103.10", "subtle", "zeroize", ] @@ -5402,10 +6120,10 @@ dependencies = [ "jni", "log", "once_cell", - "rustls", + "rustls 0.23.37", "rustls-native-certs", "rustls-platform-verifier-android", - "rustls-webpki", + "rustls-webpki 0.103.10", "security-framework", "security-framework-sys", "webpki-root-certs", @@ -5418,6 +6136,16 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" +[[package]] +name = "rustls-webpki" +version = "0.101.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "rustls-webpki" version = "0.103.10" @@ -5514,16 +6242,40 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "sct" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "sec1" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" +dependencies = [ + "base16ct 0.1.1", + "der 0.6.1", + "generic-array", + "pkcs8 0.9.0", + "subtle", + "zeroize", +] + [[package]] name = "sec1" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ - "base16ct", - "der", + "base16ct 0.2.0", + "der 0.7.10", "generic-array", - "pkcs8", + "pkcs8 0.10.2", "subtle", "zeroize", ] @@ -5841,6 +6593,16 @@ dependencies = [ "libc", ] +[[package]] +name = "signature" +version = "1.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" +dependencies = [ + "digest 0.10.7", + "rand_core 0.6.4", +] + [[package]] name = "signature" version = "2.2.0" @@ -5934,6 +6696,12 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "spin" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe4ccb98d9c292d56fec89a5e07da7fc4cf0dc11e156b41793132775d3e591" + [[package]] name = "spinning_top" version = "0.3.0" @@ -5943,6 +6711,16 @@ dependencies = [ "lock_api", ] +[[package]] +name = "spki" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" +dependencies = [ + "base64ct", + "der 0.6.1", +] + [[package]] name = "spki" version = "0.7.3" @@ -5950,7 +6728,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", - "der", + "der 0.7.10", ] [[package]] @@ -6394,13 +7172,23 @@ dependencies = [ "syn 2.0.117", ] +[[package]] +name = "tokio-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +dependencies = [ + "rustls 0.21.12", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" dependencies = [ - "rustls", + "rustls 0.23.37", "tokio", ] @@ -6443,11 +7231,11 @@ dependencies = [ "axum 0.7.9", "base64 0.22.1", "bytes", - "h2", + "h2 0.4.13", "http 1.4.0", - "http-body", + "http-body 1.0.1", "http-body-util", - "hyper", + "hyper 1.8.1", "hyper-timeout", "hyper-util", "percent-encoding", @@ -6472,11 +7260,11 @@ dependencies = [ "axum 0.8.8", "base64 0.22.1", "bytes", - "h2", + "h2 0.4.13", "http 1.4.0", - "http-body", + "http-body 1.0.1", "http-body-util", - "hyper", + "hyper 1.8.1", "hyper-timeout", "hyper-util", "percent-encoding", @@ -6542,7 +7330,7 @@ dependencies = [ "futures-core", "futures-util", "http 1.4.0", - "http-body", + "http-body 1.0.1", "http-body-util", "iri-string", "pin-project-lite", @@ -7643,6 +8431,12 @@ dependencies = [ "rustix", ] +[[package]] +name = "xmlparser" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66fee0b777b0f5ac1c69bb06d361268faafa61cd4682ae064a171c16c433e9e4" + [[package]] name = "yasna" version = "0.5.2" diff --git a/Cargo.toml b/Cargo.toml index aad93c0..d50bb4d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,8 +19,12 @@ ahash = "0.8.11" anyhow = "1.0.93" arc-swap = "1.7.1" async-trait = "0.1.83" +aws-config = "1.8" +aws-sdk-s3 = "1.120" +aws-smithy-http-client = { version = "1.1", features = ["rustls-ring"] } +aws-smithy-runtime-api = { version = "1.10", features = ["client"] } axum = { version = "0.8.1", features = ["macros"] } -axum-extra = "0.10.0" +axum-extra = { version = "0.10.0", features = ["typed-header"] } bytes = "1.10.0" candid = "0.10.10" clap = { version = "4.5.20", features = ["derive", "string", "env"] } @@ -50,6 +54,7 @@ ic-bn-lib = { version = "0.1.18", features = [ "clients-hyper", ] } ic-bn-lib-common = "0.1" +ic-certificate-verification = "3.1" ic-custom-domains-backend = "0.1" ic-custom-domains-base = "0.1" ic-http-certification = { version = "3.1.0", optional = true } @@ -100,6 +105,8 @@ url = "2.5.3" # Read https://github.com/uuid-rs/uuid/releases/tag/1.13.0 uuid = { version = "=1.12.1", features = ["v7"] } woothee = "0.13.0" +async-stream = "0.3.6" +ic-certification = "3" [dev-dependencies] criterion = { version = "0.5", features = ["html_reports", "async_tokio"] } diff --git a/src/api.rs b/src/api.rs index f08aae5..0bb3924 100644 --- a/src/api.rs +++ b/src/api.rs @@ -66,11 +66,7 @@ pub async fn log_handler( .into_response(); }; // Maintain hickory_proto::dnssec=error filter when changing log level - let env_filter = EnvFilter::new(format!( - "{},{}", - log_level, - crate::log::LOG_LEVEL_OVERRIDES - )); + let env_filter = EnvFilter::new(format!("{},{}", log_level, crate::log::LOG_LEVEL_OVERRIDES)); let _ = state.log_handle.modify(|f| *f = env_filter); "Ok\n".into_response() diff --git a/src/cli.rs b/src/cli.rs index 6027fd5..8e98f8f 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -17,9 +17,11 @@ use ic_bn_lib_common::{ }; use reqwest::Url; +use candid::Principal; + use crate::{ core::{AUTHOR_NAME, SERVICE_NAME}, - routing::{RequestType, domain::CanisterAlias}, + routing::{RequestType, domain::CanisterAlias, storage::S3Flavor}, }; /// Clap does not support prefixes due to macro limitations. @@ -91,6 +93,9 @@ pub struct Cli { #[command(flatten, next_help_heading = "Cache")] pub cache: CacheConfig, + #[command(flatten)] + pub blob_storage: BlobStorage, + #[command(flatten, next_help_heading = "Shedding System")] pub shed_system: ShedSystemCli, @@ -531,6 +536,69 @@ pub struct CacheConfig { pub cache_xfetch_beta: f64, } +/// Blob-storage feature config: cashier billing + S3 backend. +/// Both groups below are flattened at the top level; `cli.blob_storage.cashier.*` +/// and `cli.blob_storage.s3.*` on the Rust side, flag names unchanged. +#[derive(Args)] +pub struct BlobStorage { + #[command(flatten, next_help_heading = "Blob Storage — Cashier")] + pub cashier: CashierConfig, + + #[command(flatten, next_help_heading = "Blob Storage — S3")] + pub s3: S3Storage, +} + +#[derive(Args)] +pub struct CashierConfig { + /// Canister ID of the cashier backend. + /// When set, the gateway will connect to this canister for billing and budget checks. + #[clap(env, long)] + pub cashier_canister_id: Option, + + /// How frequently to report accumulated usage counters to the cashier canister + #[clap(env, long, default_value = "10s", value_parser = parse_duration)] + pub cashier_usage_report_interval: Duration, + + /// Comma-separated list of hosts allowed to call DELETE /owner. + /// If unset, owner deletion is unrestricted. + #[clap(env = "ALLOW_DELETE_OWNER_FROM_HOST", long)] + pub allow_delete_owner_from_host: Option, +} + +#[derive(Args)] +pub struct S3Storage { + /// S3-compatible endpoint URL (e.g. http://localhost:9000 for MinIO). + /// When set together with other S3_* options, enables S3 storage backend. + #[clap(env, long)] + pub s3_endpoint: Option, + + /// S3 access key + #[clap(env, long, default_value = "root-user")] + pub s3_access_key: String, + + /// S3 secret key + #[clap(env, long, default_value = "password")] + pub s3_secret_key: String, + + /// S3 bucket name (used as the default / fallback bucket for health checks) + #[clap(env, long, default_value = "ic-gateway-storage")] + pub s3_bucket: String, + + /// S3 region + #[clap(env, long, default_value = "us-east-1")] + pub s3_region: String, + + /// S3 session token (for temporary credentials, e.g. Okta-based AWS access) + #[clap(env, long)] + pub s3_session_token: Option, + + /// Which S3 flavor we're talking to. Drives feature selection + /// (e.g. whether to request `INTELLIGENT_TIERING` on uploads) without + /// probing the backend at startup. Defaults to `minio` for local dev. + #[clap(env, long, value_enum, default_value_t = S3Flavor::Minio)] + pub s3_flavor: S3Flavor, +} + #[derive(Args)] pub struct Cors { /// Default value for Access-Control-Allow-Origin header diff --git a/src/core.rs b/src/core.rs index 3627e03..954ad29 100644 --- a/src/core.rs +++ b/src/core.rs @@ -31,6 +31,10 @@ use crate::{ cli::Cli, metrics, routing::ic::subnets_info::SubnetsInfoFetcher, + routing::storage::{ + AWSBucket, BucketLike, CashierClient, CashierConnector, IngressAuth, IngressAuthImpl, + S3Config, StorageState, + }, routing::{ self, ic::{ @@ -254,7 +258,9 @@ pub async fn main( let root_subnet_id = principal!(MAINNET_ROOT_SUBNET_ID); - let fetcher = Arc::new(SubnetsInfoFetcher::new(Arc::new(agent), root_subnet_id)); + let agent = Arc::new(agent); + + let fetcher = Arc::new(SubnetsInfoFetcher::new(agent.clone(), root_subnet_id)); let subnets_info = fetcher.info.clone(); health_manager.add(fetcher.clone()); @@ -264,6 +270,89 @@ pub async fn main( cli.domain.subnets_info_poll_interval, ); + // Setup Cashier client + billing connector + let cashier_connector = if let Some(canister_id) = cli.blob_storage.cashier.cashier_canister_id + { + let cashier_client = Arc::new(CashierClient::new(agent.clone(), canister_id)); + warn!("Cashier client configured for canister {canister_id}"); + + match CashierConnector::new(cashier_client, Some(HOSTNAME.get().unwrap().clone())).await { + Ok(connector) => { + let connector = Arc::new(connector); + warn!("CashierConnector initialized (billing enabled)"); + + health_manager.add(connector.clone()); + tasks.add_interval( + "cashier_usage_reporter", + connector.clone(), + cli.blob_storage.cashier.cashier_usage_report_interval, + ); + + Some(connector) + } + Err(e) => { + warn!("CashierConnector init failed (non-fatal, billing disabled): {e:#}"); + None + } + } + } else { + None + }; + + // Setup S3 storage backend (single bucket) + let s3_bucket = if let Some(ref endpoint) = cli.blob_storage.s3.s3_endpoint { + let flavor = cli.blob_storage.s3.s3_flavor; + let s3_config = S3Config { + endpoint: endpoint.clone(), + access_key: cli.blob_storage.s3.s3_access_key.clone(), + secret_key: cli.blob_storage.s3.s3_secret_key.clone(), + bucket_name: cli.blob_storage.s3.s3_bucket.clone(), + region: cli.blob_storage.s3.s3_region.clone(), + session_token: cli.blob_storage.s3.s3_session_token.clone(), + flavor, + }; + warn!( + endpoint = %endpoint, + bucket = %s3_config.bucket_name, + region = %s3_config.region, + flavor = ?flavor, + "Initializing S3 storage backend" + ); + + match AWSBucket::new(s3_config, Arc::new(dns_resolver.clone())).await { + Ok(bucket) => { + warn!( + bucket = %cli.blob_storage.s3.s3_bucket, + "S3 bucket ready" + ); + Some(Arc::new(bucket) as Arc) + } + Err(e) => { + warn!("S3 bucket initialization failed (non-fatal): {e}"); + None + } + } + } else { + None + }; + + // Assemble storage state (enabled iff both S3 bucket and cashier connector + // are configured). Ingress auth is only constructed when storage is active. + let storage_state = s3_bucket.zip(cashier_connector).map(|(bucket, connector)| { + let ingress_auth: Arc = + Arc::new(IngressAuthImpl::new(agent.read_root_key())); + StorageState { + connector, + bucket, + ingress_auth, + allowed_delete_owner_hosts: cli + .blob_storage + .cashier + .allow_delete_owner_from_host + .clone(), + } + }); + // Setup WAF let waf_layer = if cli.waf.waf_enable { let v = WafLayer::new_from_cli(&cli.waf, Some(http_client.clone())) @@ -292,6 +381,7 @@ pub async fn main( waf_layer, custom_domains_router, subnets_info, + storage_state, ) .await .context("unable to setup Axum router")?; diff --git a/src/routing/error_cause.rs b/src/routing/error_cause.rs index 611d61f..c9a78f0 100644 --- a/src/routing/error_cause.rs +++ b/src/routing/error_cause.rs @@ -108,6 +108,9 @@ pub enum BackendError { HttpGateway(String), Other(String), ResponseVerification(String), + // Storage backends + Cashier(String), + S3(String), } impl BackendError { @@ -120,6 +123,8 @@ impl BackendError { Self::BoundaryNode(x) => Some(x.clone()), Self::HttpGateway(x) => Some(x.clone()), Self::Other(x) => Some(x.clone()), + Self::Cashier(x) => Some(x.clone()), + Self::S3(x) => Some(x.clone()), _ => None, } } @@ -137,6 +142,10 @@ pub enum ClientError { UnknownDomain(FQDN), DomainCanisterMismatch(Principal), SubnetNotFound, + // Storage-side client errors + #[strum(to_string = "not_found_{0}")] + NotFound(&'static str), + RangeNotSatisfiable(u64), } impl ClientError { @@ -148,6 +157,8 @@ impl ClientError { Self::DomainCanisterMismatch(x) => { Some(format!("The canister {x} is not served by this domain")) } + Self::NotFound(x) => Some((*x).into()), + Self::RangeNotSatisfiable(total) => Some(format!("available length: {total}")), _ => None, } } @@ -703,6 +714,102 @@ impl ErrorData { } } +// --------------------------------------------------------------------------- +// Storage API error type (for /v1/* endpoints) +// --------------------------------------------------------------------------- +// +// Structurally similar to `ErrorCause` but with two differences: +// * reuses `ClientError` and `BackendError` above (instead of defining parallel +// enums); the existing `MalformedRequest`/`BodyTooLarge` variants fit the +// storage `bad_request`/`payload_too_large` semantics, and storage-specific +// concepts (`NotFound`, `RangeNotSatisfiable`, `Cashier`, `S3`) are folded +// into the same enums as new variants. +// * renders responses as plain text (not HTML pages): `/v1/*` is an +// SDK-facing API, not a browser destination. + +/// Error type for storage API endpoints (`/v1/*`). +#[derive(Debug, Clone, Display, IntoStaticStr, Eq, PartialEq)] +#[strum(serialize_all = "snake_case")] +pub enum StorageError { + #[strum(to_string = "client_{0}")] + Client(ClientError), + #[strum(to_string = "backend_{0}")] + Backend(BackendError), + /// 401 with `WWW-Authenticate` (missing ingress signature). + Unauthorized(String), + /// 403 with an explanatory message (the enum-level `ErrorCause::Forbidden` + /// carries no message, hence a storage-specific variant). + Forbidden(String), + /// 403 — owner principal unknown to the cashier. + OwnerNotFound, + /// 403 — owner out of credit. + InsufficientBalance, + #[strum(serialize = "internal_server_error")] + Internal(String), +} + +impl From for StorageError { + fn from(e: ClientError) -> Self { + Self::Client(e) + } +} + +impl From for StorageError { + fn from(e: BackendError) -> Self { + Self::Backend(e) + } +} + +impl IntoResponse for StorageError { + fn into_response(self) -> Response { + use http::header; + let mut headers = http::HeaderMap::new(); + let (status, body) = match &self { + Self::Client(ClientError::MalformedRequest(m)) => (StatusCode::BAD_REQUEST, m.clone()), + Self::Client(ClientError::BodyTooLarge) => { + (StatusCode::PAYLOAD_TOO_LARGE, "payload too large".into()) + } + Self::Client(ClientError::NotFound(what)) => { + (StatusCode::NOT_FOUND, format!("{what} not found")) + } + Self::Client(ClientError::RangeNotSatisfiable(total)) => ( + StatusCode::RANGE_NOT_SATISFIABLE, + format!("range not satisfiable; available length: {total}"), + ), + Self::Client(e) => ( + StatusCode::BAD_REQUEST, + e.details().unwrap_or_else(|| e.to_string()), + ), + Self::Backend(BackendError::Cashier(m)) => { + (StatusCode::BAD_GATEWAY, format!("cashier unavailable: {m}")) + } + Self::Backend(BackendError::S3(m)) => ( + StatusCode::INTERNAL_SERVER_ERROR, + format!("storage backend error: {m}"), + ), + Self::Backend(e) => ( + StatusCode::BAD_GATEWAY, + e.details().unwrap_or_else(|| e.to_string()), + ), + Self::Unauthorized(m) => { + headers.insert( + header::WWW_AUTHENTICATE, + ic_bn_lib::hval!("X-ICP-Canister-Signature"), + ); + (StatusCode::UNAUTHORIZED, m.clone()) + } + Self::Forbidden(m) => (StatusCode::FORBIDDEN, m.clone()), + Self::OwnerNotFound => (StatusCode::FORBIDDEN, "owner not found".into()), + Self::InsufficientBalance => (StatusCode::FORBIDDEN, "insufficient balance".into()), + Self::Internal(m) => (StatusCode::INTERNAL_SERVER_ERROR, m.clone()), + }; + let mut resp = (status, headers, body).into_response(); + // Same pattern as ErrorCause: make the typed error visible to middleware. + resp.extensions_mut().insert(self); + resp + } +} + #[cfg(test)] mod test { use super::*; diff --git a/src/routing/ic/subnets_info.rs b/src/routing/ic/subnets_info.rs index 55af5c8..695e2fd 100644 --- a/src/routing/ic/subnets_info.rs +++ b/src/routing/ic/subnets_info.rs @@ -6,9 +6,7 @@ use arc_swap::ArcSwapOption; use async_trait::async_trait; use candid::Principal; use ic_bn_lib::ic_agent::{ - Agent, - agent::SubnetType as AgentSubnetType, - hash_tree::SubtreeLookupResult, + Agent, agent::SubnetType as AgentSubnetType, hash_tree::SubtreeLookupResult, }; use ic_bn_lib_common::traits::{Healthy, Run}; use tokio_util::sync::CancellationToken; @@ -197,7 +195,9 @@ impl SubnetsInfoFetcher { let subnet_type = SubnetType::from(subnet.subnet_type()); if subnet_type == SubnetType::Unknown { - return Err(anyhow::anyhow!("invalid subnet type for subnet {subnet_id}")); + return Err(anyhow::anyhow!( + "invalid subnet type for subnet {subnet_id}" + )); } Ok::<_, Error>((subnet_id, ranges, subnet_type)) @@ -276,10 +276,7 @@ mod tests { let lo_app = Principal::from_slice(&[0x20]); let hi_app = Principal::from_slice(&[0x2F]); - let ranges = vec![ - (lo_sys, hi_sys, subnet_sys), - (lo_app, hi_app, subnet_app), - ]; + let ranges = vec![(lo_sys, hi_sys, subnet_sys), (lo_app, hi_app, subnet_app)]; let mut types = AHashMap::new(); types.insert(subnet_sys, SubnetType::System); types.insert(subnet_app, SubnetType::Application); @@ -290,8 +287,14 @@ mod tests { let info = info.as_ref().unwrap(); // mid-range hits - assert_eq!(info.subnet_type(Principal::from_slice(&[0x13])), Some(SubnetType::System)); - assert_eq!(info.subnet_type(Principal::from_slice(&[0x25])), Some(SubnetType::Application)); + assert_eq!( + info.subnet_type(Principal::from_slice(&[0x13])), + Some(SubnetType::System) + ); + assert_eq!( + info.subnet_type(Principal::from_slice(&[0x25])), + Some(SubnetType::Application) + ); // exact boundary hits assert_eq!(info.subnet_type(lo_sys), Some(SubnetType::System)); assert_eq!(info.subnet_type(hi_sys), Some(SubnetType::System)); diff --git a/src/routing/middleware/cors.rs b/src/routing/middleware/cors.rs index 1745bd1..55d8150 100644 --- a/src/routing/middleware/cors.rs +++ b/src/routing/middleware/cors.rs @@ -51,6 +51,10 @@ pub const ALLOW_METHODS_HTTP: [Method; 6] = [ Method::PATCH, ]; +// Methods allowed for storage API calls (`/v1/*`) +pub const ALLOW_METHODS_STORAGE: [Method; 4] = + [Method::HEAD, Method::GET, Method::PUT, Method::DELETE]; + // Base headers const EXPOSE_HEADERS: [HeaderName; 5] = [ ACCEPT_RANGES, diff --git a/src/routing/middleware/geoip.rs b/src/routing/middleware/geoip.rs index 06bbde5..da877cd 100644 --- a/src/routing/middleware/geoip.rs +++ b/src/routing/middleware/geoip.rs @@ -33,14 +33,10 @@ impl GeoIp { } pub fn lookup(&self, ip: IpAddr) -> Option { - let country: Option = self.db.lookup(ip) - .and_then(|r| r.decode()) - .ok() - .flatten(); + let country: Option = + self.db.lookup(ip).and_then(|r| r.decode()).ok().flatten(); - country.and_then(|x| { - x.country.iso_code.map(|code| CountryCode(code.into())) - }) + country.and_then(|x| x.country.iso_code.map(|code| CountryCode(code.into()))) } } diff --git a/src/routing/mod.rs b/src/routing/mod.rs index 306b862..5f0da1b 100644 --- a/src/routing/mod.rs +++ b/src/routing/mod.rs @@ -3,6 +3,7 @@ pub mod error_cause; pub mod ic; pub mod middleware; pub mod proxy; +pub mod storage; use std::{net::IpAddr, ops::Deref, str::FromStr, sync::Arc, time::Duration}; @@ -79,6 +80,8 @@ use { }; pub const CONTENT_TYPE_JSON: HeaderValue = hval!("application/json"); +pub const CONTENT_TYPE_OCTET: HeaderValue = hval!("application/octet-stream"); +pub const ACCEPT_RANGES_BYTES: HeaderValue = hval!("bytes"); #[derive(Debug, Clone, Copy, Eq, PartialEq, PartialOrd, Ord, Hash)] pub struct CanisterId(pub Principal); @@ -214,6 +217,7 @@ pub async fn setup_router( waf_layer: Option, custom_domains_router: Option, subnets_info: Arc>, + storage_state: Option, ) -> Result { // Setup API router let router_api = setup_api_router( @@ -524,7 +528,7 @@ pub async fn setup_router( let custom_domains_router = custom_domains_router.map(|x| { Router::new() .nest("/custom-domains", x) - .layer(cors_base.allow_methods([ + .layer(cors_base.clone().allow_methods([ Method::HEAD, Method::GET, Method::POST, @@ -533,6 +537,15 @@ pub async fn setup_router( ])) }); + // Build optional storage router (blob/chunk CRUD under /v1/) + let storage_router = storage_state.map(|state| { + storage::storage_router( + state, + cli.cors.cors_max_age, + cli.cors.cors_allow_origin.clone(), + ) + }); + // Top-level router #[allow(unused_mut)] let mut router = Router::new() @@ -578,6 +591,10 @@ pub async fn setup_router( ) .layer(common_layers); + if let Some(sr) = storage_router { + router = router.nest("/storage/v1", sr); + } + #[cfg(all(target_os = "linux", feature = "sev-snp"))] if cli.sev_snp.sev_snp_enable { let router_sev_snp = Router::new().route( diff --git a/src/routing/storage/auth.rs b/src/routing/storage/auth.rs new file mode 100644 index 0000000..e1f55dd --- /dev/null +++ b/src/routing/storage/auth.rs @@ -0,0 +1,146 @@ +use std::time::{SystemTime, UNIX_EPOCH}; + +use candid::Principal; +use ic_bn_lib::ic_agent::{Certificate, hash_tree::LookupResult}; +use ic_certificate_verification::VerifyCertificate; + +use super::wire::{OwnerEgressSignature, PutBlobTreeRequest, StorageGatewayAuthorization}; +use crate::routing::error_cause::StorageError; + +/// Maximum age of an upload certificate's IC `time` value relative to the +/// gateway's wall clock. After this, the certificate is rejected even if its +/// signature is otherwise valid, narrowing the replay window for leaked or +/// captured certificates. +const CERT_MAX_AGE_NS: u128 = 30 * 60 * 1_000_000_000; + +/// Authorization gate for ingress (client-facing) write operations on the +/// storage API. +/// +/// Implementations decide whether a given request is allowed to mutate state +/// on behalf of the owner principal it names. The trait is kept narrow — it +/// only covers endpoints where the gateway needs to prove the caller acts on +/// the owner's behalf (currently `PUT /blob_tree`). Read endpoints and chunk +/// uploads are gated by other means (public reads; chunk uploads are bound +/// to an already-authenticated blob tree). +/// +/// Must be `Send + Sync` so it can live behind an `Arc` in +/// the shared `StorageState` used by the async Axum handlers. +pub trait IngressAuth: Send + Sync { + /// Check that `request` is authorized to upload its blob tree. + /// + /// Returns `Ok(())` if the request carries a valid authorization binding + /// the caller to the blob tree's root hash. Returns a `StorageError` + /// (`Unauthorized` / `Forbidden`) otherwise; handlers translate that into + /// the appropriate HTTP status. + fn check_put_blob(&self, request: &PutBlobTreeRequest) -> Result<(), StorageError>; +} + +/// Production implementation of [`IngressAuth`] that verifies IC egress +/// certificates produced by the owner canister. +/// +/// Authorization is delegated to the IC consensus layer: the owner canister +/// signs an `OwnerEgressSignature` (containing `method = "upload"` and the +/// expected `blob_hash`) into its certified data tree, and the client attaches +/// the resulting certificate to the upload request. We verify the certificate +/// against the IC root key, then match the embedded payload against the +/// request's blob root hash. +pub struct IngressAuthImpl { + root_key: Vec, +} + +impl IngressAuthImpl { + /// Build a new verifier bound to `root_key`. + /// + /// `root_key` is the IC NNS root public key used to validate certificate + /// signatures and delegation chains. For mainnet this is the well-known + /// hardcoded key; for local/dev replicas it is fetched at startup via + /// `Agent::fetch_root_key`. The key is snapshotted at construction time — + /// runtime rotation is not supported. + pub fn new(root_key: Vec) -> Self { + Self { root_key } + } + + fn parse_certificate(bytes: &[u8]) -> Result { + serde_cbor::from_slice::(bytes) + .map_err(|e| StorageError::Forbidden(format!("failed to parse certificate: {e}"))) + } + + /// Verify the BLS signature, delegation, canister-range membership, and + /// freshness of a certificate. Certificates whose IC `time` is older than + /// [`CERT_MAX_AGE_NS`] (relative to the gateway's wall clock) are rejected; + /// this caps the replay window for a leaked certificate even though + /// `blob_hash`-binding already makes replays idempotent on the same + /// content. + fn verify_certificate( + &self, + cert: &Certificate, + canister: Principal, + ) -> Result<(), StorageError> { + let now_ns = SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|e| StorageError::Internal(format!("system clock before UNIX epoch: {e}")))? + .as_nanos(); + + cert.verify(canister.as_slice(), &self.root_key, &now_ns, &CERT_MAX_AGE_NS) + .map_err(|e| StorageError::Forbidden(format!("certificate verification failed: {e}"))) + } + + fn extract_payload(cert: &Certificate) -> Result { + cert.tree + .list_paths() + .iter() + .find_map(|path| { + if let LookupResult::Found(value) = cert.tree.lookup_path(path) { + candid::decode_one::(value).ok() + } else { + None + } + }) + .ok_or_else(|| { + StorageError::Forbidden( + "no valid OwnerEgressSignature in certificate tree".into(), + ) + }) + } + + fn check_payload( + payload: &OwnerEgressSignature, + root_hash: &str, + ) -> Result<(), StorageError> { + if payload.method != "upload" { + Err(StorageError::Forbidden(format!( + "invalid method: {}", + payload.method + ))) + } else if payload.blob_hash != root_hash { + Err(StorageError::Forbidden(format!( + "blob hash mismatch: expected {root_hash}, got {}", + payload.blob_hash + ))) + } else { + Ok(()) + } + } +} + +impl IngressAuth for IngressAuthImpl { + fn check_put_blob(&self, request: &PutBlobTreeRequest) -> Result<(), StorageError> { + let root_hash = request + .blob_tree + .root_hash() + .ok_or_else(|| StorageError::Forbidden("blob tree has no root hash".into()))? + .to_string(); + + match &request.auth { + StorageGatewayAuthorization::None => Err(StorageError::Unauthorized( + "no authorization provided".into(), + )), + StorageGatewayAuthorization::OwnerEgressSignature(cert_bytes) => { + let cert = Self::parse_certificate(cert_bytes)?; + self.verify_certificate(&cert, request.owner)?; + let payload = Self::extract_payload(&cert)?; + Self::check_payload(&payload, &root_hash) + } + } + } +} diff --git a/src/routing/storage/bucket.rs b/src/routing/storage/bucket.rs new file mode 100644 index 0000000..d0d707c --- /dev/null +++ b/src/routing/storage/bucket.rs @@ -0,0 +1,340 @@ +use std::fmt::{Display, Formatter}; +use std::net::IpAddr; +use std::sync::Arc; +use std::time::Duration; + +use async_trait::async_trait; +use aws_config::retry::RetryConfig; +use aws_config::{BehaviorVersion, Region}; +use aws_sdk_s3::Client; +use aws_sdk_s3::error::{DisplayErrorContext, SdkError}; +use aws_sdk_s3::operation::get_object::GetObjectError; +use aws_sdk_s3::operation::head_bucket::HeadBucketError; +use aws_sdk_s3::operation::head_object::HeadObjectError; +use aws_sdk_s3::primitives::ByteStream; +use bytes::Bytes; +use clap::ValueEnum; +use aws_smithy_http_client::{Builder as HttpClientBuilder, tls}; +use aws_smithy_runtime_api::client::dns::{DnsFuture, ResolveDns, ResolveDnsError}; +use hickory_resolver::proto::rr::{RData, RecordType}; +use ic_bn_lib::http::dns::Resolver as DnsResolver; +use ic_bn_lib::ic_bn_lib_common::traits::dns::Resolves; + +/// Which S3-compatible backend the gateway is talking to. +/// +/// The flavor encodes out-of-band knowledge about which features the backend +/// supports, so we don't have to discover capabilities at runtime. Add new +/// variants when you onboard a new backend. +#[derive(Debug, Clone, Copy, PartialEq, Eq, ValueEnum)] +#[value(rename_all = "kebab-case")] +pub enum S3Flavor { + /// Amazon S3 (production). Supports `INTELLIGENT_TIERING`. + Aws, + /// Pakistan deployment. Does not use `INTELLIGENT_TIERING`. + Pakistan, + /// MinIO (local/dev). Does not support `INTELLIGENT_TIERING`. + Minio, +} + +/// Configuration for the S3 backend. +#[derive(Debug, Clone)] +pub struct S3Config { + pub endpoint: String, + pub access_key: String, + pub secret_key: String, + pub bucket_name: String, + pub region: String, + pub session_token: Option, + /// Which S3 flavor we're talking to — drives feature selection + /// (e.g. intelligent tiering) without a runtime capability probe. + pub flavor: S3Flavor, +} + +/// Errors from S3 storage operations. +#[derive(Debug)] +pub enum StorageError { + AwsS3(String), +} + +impl Display for StorageError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + Self::AwsS3(inner) => write!(f, "AWS S3 error: {inner}"), + } + } +} + +impl std::error::Error for StorageError {} + +/// Abstraction over S3 buckets to enable dependency injection in tests. +#[async_trait] +pub trait BucketLike: Send + Sync { + /// Upload `content` under `path`. Takes ownership of `content` as + /// `bytes::Bytes` so callers with either `Vec` or `Bytes` can hand off + /// without a copy (`Vec -> Bytes` and `Bytes -> ByteStream` are both + /// zero-copy). + async fn put_object(&self, path: String, content: Bytes) -> Result<(), StorageError>; + + /// Returns `Ok(Some(data))` if the object exists, `Ok(None)` if not, + /// and `Err` only for communication errors. + async fn get_object(&self, path: String) -> Result>, StorageError>; + + async fn object_exists(&self, path: String) -> Result; + + async fn delete_object(&self, path: String) -> Result<(), StorageError>; + + async fn list_page( + &self, + prefix: String, + continuation_token: Option, + max_keys: Option, + ) -> Result; +} + +/// A single page of object keys from a paginated list operation. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ListPage { + pub keys: Vec, + pub next_continuation_token: Option, +} + +/// Adapter that exposes an `ic_bn_lib_common` [`Resolves`] implementation as +/// an AWS SDK [`ResolveDns`]. +/// +/// The AWS SDK only needs `hostname -> Vec` resolution; we perform +/// parallel A and AAAA lookups and return the union. +#[derive(Clone, Debug)] +struct AwsDnsAdapter(Arc); + +impl ResolveDns for AwsDnsAdapter { + fn resolve_dns<'a>(&'a self, name: &'a str) -> DnsFuture<'a> { + let resolver = self.0.clone(); + let name = name.to_string(); + DnsFuture::new(async move { + let (v4, v6) = tokio::join!( + resolver.resolve(RecordType::A, &name), + resolver.resolve(RecordType::AAAA, &name), + ); + + let mut ips: Vec = Vec::new(); + if let Ok(records) = &v4 { + for r in records { + if let RData::A(a) = r.data() { + ips.push(IpAddr::V4(a.0)); + } + } + } + if let Ok(records) = &v6 { + for r in records { + if let RData::AAAA(aaaa) = r.data() { + ips.push(IpAddr::V6(aaaa.0)); + } + } + } + + if ips.is_empty() { + // Surface the underlying error when we have one; otherwise + // report an empty result explicitly. + let err = v4.err().or(v6.err()).map_or_else( + || { + std::io::Error::new( + std::io::ErrorKind::NotFound, + format!("no addresses found for hostname: {name}"), + ) + }, + |e| std::io::Error::other(format!("DNS resolution failed for {name}: {e}")), + ); + return Err(ResolveDnsError::new(err)); + } + Ok(ips) + }) + } +} + +/// `BucketLike` implementation backed by the AWS SDK S3 client. +pub struct AWSBucket { + client: Client, + config: S3Config, +} + +impl AWSBucket { + fn normalize_endpoint(endpoint: &str) -> String { + if endpoint.starts_with("http://") || endpoint.starts_with("https://") { + endpoint.to_string() + } else { + format!("https://{endpoint}") + } + } + + /// Build a new `aws_sdk_s3::Client` from the given config. + async fn build_client(config: &S3Config, dns_resolver: Arc) -> Client { + let credentials = aws_sdk_s3::config::Credentials::new( + &config.access_key, + &config.secret_key, + config.session_token.clone(), + None, + "ic-gateway-s3", + ); + let normalized_endpoint = Self::normalize_endpoint(&config.endpoint); + + let http_client = HttpClientBuilder::new() + .tls_provider(tls::Provider::Rustls( + tls::rustls_provider::CryptoMode::Ring, + )) + .build_with_resolver(AwsDnsAdapter(dns_resolver)); + + let lib_config = aws_config::defaults(BehaviorVersion::latest()) + .region(Region::new(config.region.clone())) + .endpoint_url(&normalized_endpoint) + .credentials_provider(credentials) + .http_client(http_client) + .retry_config(RetryConfig::standard().with_initial_backoff(Duration::from_millis(100))) + .load() + .await; + + let s3_config = aws_sdk_s3::config::Builder::from(&lib_config) + .force_path_style(true) + .build(); + Client::from_conf(s3_config) + } + + /// Ensure the bucket exists, creating it if necessary. Probe intelligent tiering. + async fn init_bucket(client: &Client, config: &S3Config) -> Result { + let exists = match client + .head_bucket() + .bucket(&config.bucket_name) + .send() + .await + { + Ok(_) => true, + Err(SdkError::ServiceError(inner)) => match inner.into_err() { + HeadBucketError::NotFound(_) => false, + other => return Err(StorageError::AwsS3(other.to_string())), + }, + Err(e) => return Err(StorageError::AwsS3(format!("{}", DisplayErrorContext(e)))), + }; + + if !exists { + client + .create_bucket() + .bucket(&config.bucket_name) + .send() + .await + .map_err(|e| StorageError::AwsS3(format!("{}", DisplayErrorContext(e))))?; + } + + Ok(true) + } + + pub async fn new( + config: S3Config, + dns_resolver: Arc, + ) -> Result { + let client = Self::build_client(&config, dns_resolver).await; + Self::init_bucket(&client, &config).await?; + Ok(Self { client, config }) + } +} + +#[async_trait] +impl BucketLike for AWSBucket { + async fn put_object(&self, path: String, content: Bytes) -> Result<(), StorageError> { + let mut req = self + .client + .put_object() + .bucket(&self.config.bucket_name) + .key(&path) + .body(ByteStream::from(content)); + + if let S3Flavor::Aws = self.config.flavor { + req = req.storage_class(aws_sdk_s3::types::StorageClass::IntelligentTiering); + } + + req.send() + .await + .map(|_| ()) + .map_err(|e| StorageError::AwsS3(format!("{}", DisplayErrorContext(e)))) + } + + async fn get_object(&self, path: String) -> Result>, StorageError> { + match self + .client + .get_object() + .bucket(&self.config.bucket_name) + .key(path) + .send() + .await + { + Ok(output) => output + .body + .collect() + .await + .map(|b| Some(b.to_vec())) + .map_err(|e| StorageError::AwsS3(e.to_string())), + Err(SdkError::ServiceError(inner)) => match inner.into_err() { + GetObjectError::NoSuchKey(_) => Ok(None), + other => Err(StorageError::AwsS3(other.to_string())), + }, + Err(e) => Err(StorageError::AwsS3(format!("{}", DisplayErrorContext(e)))), + } + } + + async fn object_exists(&self, path: String) -> Result { + match self + .client + .head_object() + .bucket(&self.config.bucket_name) + .key(&path) + .send() + .await + { + Ok(_) => Ok(true), + Err(SdkError::ServiceError(inner)) => match inner.into_err() { + HeadObjectError::NotFound(_) => Ok(false), + other => Err(StorageError::AwsS3(other.to_string())), + }, + Err(e) => Err(StorageError::AwsS3(format!("{}", DisplayErrorContext(e)))), + } + } + + async fn delete_object(&self, path: String) -> Result<(), StorageError> { + self.client + .delete_object() + .bucket(&self.config.bucket_name) + .key(&path) + .send() + .await + .map(|_| ()) + .map_err(|e| StorageError::AwsS3(format!("{}", DisplayErrorContext(e)))) + } + + async fn list_page( + &self, + prefix: String, + continuation_token: Option, + max_keys: Option, + ) -> Result { + let output = self + .client + .list_objects_v2() + .bucket(&self.config.bucket_name) + .prefix(prefix) + .set_continuation_token(continuation_token) + .set_max_keys(max_keys.map(|v| v as i32)) + .send() + .await + .map_err(|e| StorageError::AwsS3(format!("{}", DisplayErrorContext(e))))?; + + let keys = output + .contents + .unwrap_or_default() + .into_iter() + .map(|o| o.key.unwrap_or_default()) + .collect(); + + Ok(ListPage { + keys, + next_continuation_token: output.next_continuation_token, + }) + } +} diff --git a/src/routing/storage/cashier_client.rs b/src/routing/storage/cashier_client.rs new file mode 100644 index 0000000..5c36d0c --- /dev/null +++ b/src/routing/storage/cashier_client.rs @@ -0,0 +1,94 @@ +use std::sync::Arc; + +use anyhow::{Context, Error}; +use candid::{Decode, Principal}; +use ic_bn_lib::ic_agent::Agent; + +use super::cashier_types::*; + +/// Client for calling the cashier canister using the gateway's existing Agent. +/// +/// Provides 3 methods: +/// - `pricelist_v1` (query) — load pricing for cost calculation +/// - `budget_get_v1` (query) — check per-owner credit/budget +/// - `storage_usage_set_batch_v1` (update) — report usage counters +pub struct CashierClient { + agent: Arc, + canister_id: Principal, +} + +impl CashierClient { + pub fn new(agent: Arc, canister_id: Principal) -> Self { + Self { agent, canister_id } + } + + pub fn canister_id(&self) -> &Principal { + &self.canister_id + } + + /// Returns the principal of the agent's identity. + pub fn principal(&self) -> Result { + self.agent + .get_principal() + .map_err(|e| anyhow::anyhow!("failed to get agent principal: {e}")) + } + + /// Query: returns the pricing for storage operations. + pub async fn pricelist_v1(&self) -> Result { + let encoded_args = candid::encode_args(()).context("failed to encode pricelist args")?; + + let response_bytes = self + .agent + .query(&self.canister_id, "pricelist_v1") + .with_arg(encoded_args) + .call() + .await + .context("pricelist_v1 query failed")?; + + let response = + Decode!(&response_bytes, Pricelist).context("failed to decode pricelist response")?; + Ok(response) + } + + /// Query: returns the budget for a given owner on this gateway. + pub async fn budget_get_v1( + &self, + request: &GetBudgetRequestV1, + ) -> Result { + let encoded_args = + candid::encode_args((request,)).context("failed to encode budget_get_v1 args")?; + + let response_bytes = self + .agent + .query(&self.canister_id, "budget_get_v1") + .with_arg(encoded_args) + .call() + .await + .context("budget_get_v1 query failed")?; + + let response = Decode!(&response_bytes, GetBudgetResult) + .context("failed to decode budget_get_v1 response")?; + Ok(response) + } + + /// Update: reports usage counters for one or more owners, returns updated budgets. + pub async fn storage_usage_set_batch_v1( + &self, + request: &StorageSetUsageBatchRequest, + ) -> Result { + let encoded_args = candid::encode_args((request,)) + .context("failed to encode storage_usage_set_batch_v1 args")?; + + let response_bytes = self + .agent + .update(&self.canister_id, "storage_usage_set_batch_v1") + .with_arg(encoded_args) + .call_and_wait() + .await + .context("storage_usage_set_batch_v1 update failed")?; + + let response = Decode!(&response_bytes, StorageSetUsageBatchResult) + .context("failed to decode storage_usage_set_batch_v1 response")?; + Ok(response) + } +} diff --git a/src/routing/storage/cashier_connector.rs b/src/routing/storage/cashier_connector.rs new file mode 100644 index 0000000..774e29f --- /dev/null +++ b/src/routing/storage/cashier_connector.rs @@ -0,0 +1,351 @@ +use std::collections::HashMap; +use std::fmt; +use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::time::{Duration, Instant}; + +use anyhow::Error; +use async_trait::async_trait; +use candid::{Int, Nat, Principal}; +use ic_bn_lib_common::traits::{Healthy, Run}; +use tokio::sync::RwLock; +use tokio_util::sync::CancellationToken; +use tracing::warn; + +use super::cashier_client::CashierClient; +use super::cashier_types::*; +use super::wire::ONE_MIB; + +const BUDGET_TTL: Duration = Duration::from_secs(30); + +#[derive(Debug, Clone)] +pub enum BillingError { + OwnerNotFound, + InsufficientBalance, + CashierUnavailable(String), +} + +impl std::fmt::Display for BillingError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::OwnerNotFound => write!(f, "owner not found"), + Self::InsufficientBalance => write!(f, "insufficient balance"), + Self::CashierUnavailable(e) => write!(f, "cashier unavailable: {e}"), + } + } +} + +struct CachedBudget { + budget: GatewayBudget, + fetched_at: Instant, +} + +struct OwnerUsage { + counters: UsageCounters, + dirty: bool, +} + +impl OwnerUsage { + fn new() -> Self { + Self { + counters: UsageCounters { + bytes_downloaded: Nat::from(0u64), + bytes_uploaded: Nat::from(0u64), + write_requests: 0, + read_requests: 0, + }, + dirty: false, + } + } +} + +/// Billing wrapper around [`CashierClient`]. +/// +/// Caches per-owner budgets, charges operations locally, and periodically +/// flushes accumulated usage counters to the cashier canister. +pub struct CashierConnector { + client: Arc, + gateway_id: GatewayId, + pricelist: Pricelist, + budgets: RwLock>, + usage: RwLock>, + healthy: AtomicBool, +} + +impl fmt::Debug for CashierConnector { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("CashierConnector") + .field("gateway_id", &self.gateway_id.principal) + .field("healthy", &self.healthy.load(Ordering::Relaxed)) + .finish() + } +} + +impl CashierConnector { + pub async fn new( + client: Arc, + gateway_name: Option, + ) -> Result { + let principal = client.principal()?; + let pricelist = client.pricelist_v1().await?; + let gateway_id = GatewayId { + principal, + name: gateway_name, + }; + + warn!( + gateway = %gateway_id.principal, + cashier = %client.canister_id(), + "CashierConnector initialized" + ); + + Ok(Self { + client, + gateway_id, + pricelist, + budgets: RwLock::new(HashMap::new()), + usage: RwLock::new(HashMap::new()), + healthy: AtomicBool::new(true), + }) + } + + // ----------------------------------------------------------------------- + // Charge methods + // ----------------------------------------------------------------------- + + pub async fn charge_blob_tree_upload(&self, owner: &Principal) -> Result<(), BillingError> { + let cost = self.compute_cost(ONE_MIB as u64, 0, 0, 1); + self.consume_budget(owner, cost).await?; + self.record_usage(owner, ONE_MIB as u64, 0, 0, 1).await; + Ok(()) + } + + pub async fn charge_chunk_upload(&self, owner: &Principal) -> Result<(), BillingError> { + let cost = self.compute_cost(ONE_MIB as u64, 0, 0, 1); + self.consume_budget(owner, cost).await?; + self.record_usage(owner, ONE_MIB as u64, 0, 0, 1).await; + Ok(()) + } + + pub async fn charge_blob_tree_download(&self, owner: &Principal) -> Result<(), BillingError> { + let cost = self.compute_cost(0, ONE_MIB as u64, 1, 0); + self.consume_budget(owner, cost).await?; + self.record_usage(owner, 0, ONE_MIB as u64, 1, 0).await; + Ok(()) + } + + pub async fn charge_chunk_download(&self, owner: &Principal) -> Result<(), BillingError> { + let cost = self.compute_cost(0, ONE_MIB as u64, 1, 0); + self.consume_budget(owner, cost).await?; + self.record_usage(owner, 0, ONE_MIB as u64, 1, 0).await; + Ok(()) + } + + // ----------------------------------------------------------------------- + // Usage reporting + // ----------------------------------------------------------------------- + + /// Flush accumulated usage counters to the cashier canister. + async fn report_usage(&self) { + if let Err(e) = self.flush_usage().await { + self.healthy.store(false, Ordering::Relaxed); + warn!("Failed to report usage to cashier: {e}"); + } else { + self.healthy.store(true, Ordering::Relaxed); + } + } + + async fn flush_usage(&self) -> Result<(), Error> { + let batch = { + let mut usage = self.usage.write().await; + let mut batch = Vec::new(); + for (owner, ou) in usage.iter_mut() { + if ou.dirty { + batch.push(StorageSetUsageRequest { + owner: *owner, + usage: ou.counters.clone(), + }); + ou.dirty = false; + } + } + batch + }; + + if batch.is_empty() { + return Ok(()); + } + + let request = StorageSetUsageBatchRequest { + gateway_id: self.gateway_id.clone(), + counters: batch, + }; + + match self.client.storage_usage_set_batch_v1(&request).await? { + StorageSetUsageBatchResult::Ok(resp) => { + let mut budgets = self.budgets.write().await; + for (principal, budget) in resp.budgets { + budgets.insert( + principal, + CachedBudget { + budget, + fetched_at: Instant::now(), + }, + ); + } + } + StorageSetUsageBatchResult::Err(e) => { + warn!("Cashier rejected usage report: {e:?}"); + } + } + + Ok(()) + } + + // ----------------------------------------------------------------------- + // Internals + // ----------------------------------------------------------------------- + + fn compute_cost( + &self, + bytes_uploaded: u64, + bytes_downloaded: u64, + read_requests: u64, + write_requests: u64, + ) -> i64 { + let p = &self.pricelist.counters; + price_component(&p.bytes_uploaded_price, bytes_uploaded) + + price_component(&p.bytes_downloaded_price, bytes_downloaded) + + price_component(&p.read_request_price, read_requests) + + price_component(&p.write_request_price, write_requests) + } + + async fn consume_budget(&self, owner: &Principal, cost: i64) -> Result<(), BillingError> { + // Refresh the cache if the entry is missing or stale. We deliberately + // release every lock before awaiting on the canister — holding a + // `RwLock` across an await would serialize all charges and risks + // contention if `fetch_budget` is slow. + let needs_refresh = { + let budgets = self.budgets.read().await; + budgets + .get(owner) + .is_none_or(|c| c.fetched_at.elapsed() >= BUDGET_TTL) + }; + + if needs_refresh { + let fresh = self.fetch_budget(owner).await?; + self.budgets.write().await.insert( + *owner, + CachedBudget { + budget: fresh, + fetched_at: Instant::now(), + }, + ); + } + + let mut budgets = self.budgets.write().await; + let cached = budgets + .get_mut(owner) + .expect("cache entry exists after refresh"); + Self::try_debit(cached, cost) + } + + fn try_debit(cached: &mut CachedBudget, cost: i64) -> Result<(), BillingError> { + let credit = int_to_i64(&cached.budget.available_credit); + if credit >= cost { + cached.budget.available_credit = Int::from(credit - cost); + Ok(()) + } else { + Err(BillingError::InsufficientBalance) + } + } + + async fn fetch_budget(&self, owner: &Principal) -> Result { + let request = GetBudgetRequestV1 { + gateway_id: Some(self.gateway_id.clone()), + owner_id: *owner, + }; + + let result = self + .client + .budget_get_v1(&request) + .await + .map_err(|e| BillingError::CashierUnavailable(e.to_string()))?; + + match result { + GetBudgetResult::Ok(resp) => Ok(resp.budget), + GetBudgetResult::Err(GetBudgetError::OwnerNotFound) => Err(BillingError::OwnerNotFound), + GetBudgetResult::Err(GetBudgetError::GatewayNotFound(_)) => Err( + BillingError::CashierUnavailable("gateway not found".to_string()), + ), + } + } + + async fn record_usage( + &self, + owner: &Principal, + bytes_up: u64, + bytes_down: u64, + reads: u64, + writes: u64, + ) { + let mut usage = self.usage.write().await; + let entry = usage.entry(*owner).or_insert_with(OwnerUsage::new); + entry.counters.bytes_uploaded += Nat::from(bytes_up); + entry.counters.bytes_downloaded += Nat::from(bytes_down); + entry.counters.read_requests += reads; + entry.counters.write_requests += writes; + entry.dirty = true; + } +} + +#[async_trait] +impl Run for CashierConnector { + async fn run(&self, _token: CancellationToken) -> Result<(), Error> { + self.report_usage().await; + Ok(()) + } +} + +impl Healthy for CashierConnector { + fn healthy(&self) -> bool { + self.healthy.load(Ordering::Relaxed) + } +} + +fn int_to_i64(v: &Int) -> i64 { + // Int is arbitrary precision; clamp to i64 range for local budget math. + v.0.to_string().parse::().unwrap_or(i64::MAX) +} + +fn factor_divisor(f: &Factor) -> u64 { + match f { + Factor::U => 1, + Factor::K => 1_000, + Factor::M => 1_000_000, + Factor::G => 1_000_000_000, + Factor::T => 1_000_000_000_000, + Factor::Ki => 1_024, + Factor::Mi => 1_048_576, + Factor::Gi => 1_073_741_824, + Factor::Ti => 1_099_511_627_776, + } +} + +fn price_component(price: &PricePerBillingUnit, quantity: u64) -> i64 { + if quantity == 0 { + return 0; + } + let divisor = factor_divisor(&price.per); + if divisor == 0 { + return 0; + } + // Promote to `i128` so `quantity (u64) * cost (i64)` cannot overflow + // (max magnitude ~2^127, needed ~2^127). On absurd inputs we saturate + // *high* so an oversized price locks the user out rather than wrapping + // negative — a negative cost would let `try_debit` accept anything and + // even mint credit (`credit - negative_cost`). + let cost = i128::from(int_to_i64(&price.cost)); + let product = i128::from(quantity).saturating_mul(cost); + let result = product / i128::from(divisor); + i64::try_from(result).unwrap_or(i64::MAX) +} diff --git a/src/routing/storage/cashier_types.rs b/src/routing/storage/cashier_types.rs new file mode 100644 index 0000000..6322e0e --- /dev/null +++ b/src/routing/storage/cashier_types.rs @@ -0,0 +1,111 @@ +use candid::{CandidType, Deserialize, Int, Nat, Principal}; + +#[derive(CandidType, Deserialize, Clone, Debug)] +pub enum Factor { + G, + K, + M, + T, + U, + Gi, + Ki, + Mi, + Ti, +} + +#[derive(CandidType, Deserialize, Clone, Debug)] +pub struct PricePerBillingUnit { + pub per: Factor, + pub cost: Int, +} + +#[derive(CandidType, Deserialize, Clone, Debug)] +pub struct LevelPrices { + pub bytes_stored: PricePerBillingUnit, +} + +#[derive(CandidType, Deserialize, Clone, Debug)] +pub struct UsagePrices { + pub read_request_price: PricePerBillingUnit, + pub bytes_downloaded_price: PricePerBillingUnit, + pub bytes_uploaded_price: PricePerBillingUnit, + pub write_request_price: PricePerBillingUnit, +} + +#[derive(CandidType, Deserialize, Clone, Debug)] +pub struct Pricelist { + pub gauges: LevelPrices, + pub counters: UsagePrices, +} + +#[derive(CandidType, Deserialize, Clone, Debug)] +pub struct GatewayId { + pub principal: Principal, + pub name: Option, +} + +#[derive(CandidType, Deserialize, Clone, Debug)] +pub struct UsageCounters { + pub bytes_downloaded: Nat, + pub bytes_uploaded: Nat, + pub write_requests: u64, + pub read_requests: u64, +} + +#[derive(CandidType, Deserialize, Clone, Debug)] +pub struct GatewayBudget { + pub available_credit: Int, +} + +#[derive(CandidType, Deserialize, Clone, Debug)] +pub struct GetBudgetRequestV1 { + pub gateway_id: Option, + pub owner_id: Principal, +} + +#[derive(CandidType, Deserialize, Clone, Debug)] +pub struct GetBudgetResponseV1 { + pub usage: UsageCounters, + pub budget: GatewayBudget, +} + +#[derive(CandidType, Deserialize, Clone, Debug)] +pub enum GetBudgetError { + GatewayNotFound(GatewayId), + OwnerNotFound, +} + +#[derive(CandidType, Deserialize, Clone, Debug)] +pub enum GetBudgetResult { + Ok(GetBudgetResponseV1), + Err(GetBudgetError), +} + +#[derive(CandidType, Deserialize, Clone, Debug)] +pub struct StorageSetUsageRequest { + pub owner: Principal, + pub usage: UsageCounters, +} + +#[derive(CandidType, Deserialize, Clone, Debug)] +pub struct StorageSetUsageBatchRequest { + pub gateway_id: GatewayId, + pub counters: Vec, +} + +#[derive(CandidType, Deserialize, Clone, Debug)] +pub struct StorageSetUsageBatchResponse { + pub budgets: Vec<(Principal, GatewayBudget)>, +} + +#[derive(CandidType, Deserialize, Clone, Debug)] +pub enum StorageSetUsageError { + NotAuthorized(Principal), + InternalError(String), +} + +#[derive(CandidType, Deserialize, Clone, Debug)] +pub enum StorageSetUsageBatchResult { + Ok(StorageSetUsageBatchResponse), + Err(StorageSetUsageError), +} diff --git a/src/routing/storage/handler.rs b/src/routing/storage/handler.rs new file mode 100644 index 0000000..340beb8 --- /dev/null +++ b/src/routing/storage/handler.rs @@ -0,0 +1,600 @@ +use std::{ + cmp::min, + ops::{Bound, Range}, + sync::Arc, + time::Duration, +}; + +use axum::{ + Json, + body::Body, + extract::{Path, State}, + http::{HeaderMap, StatusCode, header}, + response::{IntoResponse, Response}, +}; +use axum_extra::{TypedHeader, extract::Host, headers::Range as HttpRange}; +use candid::Principal; +use futures::stream::{self, Stream, StreamExt}; +use ic_bn_lib::http::body::buffer_body; +use sha2::{Digest, Sha256}; + +use crate::routing::{ + ACCEPT_RANGES_BYTES, CONTENT_TYPE_JSON, CONTENT_TYPE_OCTET, + error_cause::{BackendError, ClientError, StorageError}, +}; + +use super::{ + StorageState, + bucket::BucketLike, + wire::{ + BlobMetadata, MAX_REQUEST_BODY_SIZE, ONE_MIB, PutBlobTreeRequest, PutBlobTreeResponse, + PutChunkResponse, + }, +}; + +type S = Arc; + +const BODY_READ_TIMEOUT: Duration = Duration::from_secs(60); +const BLOB_METADATA_PATH: &str = "blob-metadata"; +const CHUNK_PATH: &str = "chunks"; + +/// Maximum number of chunk fetches kept in flight when streaming a blob +/// download. `buffered(N)` preserves source order, so the response body stays +/// strictly sequential while we prefetch ahead. Bounds peak per-download +/// memory at `CHUNK_DOWNLOAD_PARALLELISM * 1 MiB` (~8 MiB). +const CHUNK_DOWNLOAD_PARALLELISM: usize = 8; + +fn blob_path_owner_prefix(owner: &Principal) -> String { + format!("{BLOB_METADATA_PATH}/{owner}/") +} + +/// S3 key for blob metadata: `blob-metadata/{owner}/{root_hash}`. +fn blob_path(owner: &Principal, root_hash: &str) -> String { + format!("{}{root_hash}", blob_path_owner_prefix(owner)) +} + +fn chunk_path_owner_prefix(owner: &Principal) -> String { + format!("{CHUNK_PATH}/{owner}/") +} + +/// S3 key for a chunk: `chunks/{owner}/{chunk_hash}`. +fn chunk_path(owner: &Principal, chunk_hash: &str) -> String { + format!("{}{chunk_hash}", chunk_path_owner_prefix(owner)) +} + +/// One slice of a blob to download. +/// +/// `start`/`end_cap` are byte offsets *within the chunk*. `end_cap` may exceed +/// the actual chunk length (e.g. `usize::MAX` to mean "to the end of the +/// chunk"); it is clamped against `data.len()` after the chunk is fetched. +struct ChunkPart { + hash: String, + start: usize, + end_cap: usize, +} + +impl ChunkPart { + /// Returns the entire chunk verbatim. + fn full(hash: String) -> Self { + Self { + hash, + start: 0, + end_cap: usize::MAX, + } + } +} + +/// Build an ordered streaming download of `parts`, fetching up to +/// [`CHUNK_DOWNLOAD_PARALLELISM`] chunks in parallel. Each chunk is +/// pre-charged via `charge_chunk_download` before its S3 GET; on success +/// the requested slice is yielded as `Bytes` (O(1) — shares the underlying +/// allocation, no memcpy). +/// +/// `buffered` preserves source order, so the response body is strictly +/// sequential even though fetches overlap. +/// +/// We take an owned `Arc` rather than borrowing because the +/// returned stream has `'static` lifetime — axum keeps polling it after this +/// frame is gone. Each concurrent task `Arc::clone`s the state once +/// (a single atomic increment); that is the minimum cost of independently +/// owning shared state across N parallel futures. +fn chunk_download_stream( + state: Arc, + owner: Principal, + parts: Vec, +) -> impl Stream> + Send + 'static { + stream::iter(parts) + .map(move |part| fetch_chunk(state.clone(), owner, part)) + .buffered(CHUNK_DOWNLOAD_PARALLELISM) +} + +/// Charge for and fetch a single chunk slice from S3. +async fn fetch_chunk( + state: Arc, + owner: Principal, + part: ChunkPart, +) -> Result { + state + .connector + .charge_chunk_download(&owner) + .await + .map_err(|e| std::io::Error::new(std::io::ErrorKind::PermissionDenied, e.to_string()))?; + + let path = chunk_path(&owner, &part.hash); + let data = state + .bucket + .get_object(path) + .await + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))? + .ok_or_else(|| std::io::Error::new(std::io::ErrorKind::NotFound, "chunk not found"))?; + + let end = min(part.end_cap, data.len()); + Ok(bytes::Bytes::from(data).slice(part.start..end)) +} + +// Helpers + +/// Build response headers for a blob/chunk download: Content-Length, Accept-Ranges, Content-Type, +/// plus any custom headers stored in the blob metadata. +fn blob_download_headers(content_length: u64, stored_headers: &[String]) -> HeaderMap { + let mut headers = HeaderMap::new(); + headers.insert(header::CONTENT_LENGTH, content_length.into()); + headers.insert(header::ACCEPT_RANGES, ACCEPT_RANGES_BYTES); + headers.insert(header::CONTENT_TYPE, CONTENT_TYPE_OCTET); + apply_stored_headers(&mut headers, stored_headers); + headers +} + +fn apply_stored_headers(headers: &mut HeaderMap, stored: &[String]) { + for pair in stored.chunks(2) { + if let [name, value] = pair { + if let (Ok(hn), Ok(hv)) = ( + name.parse::(), + value.parse::(), + ) { + headers.insert(hn, hv); + } + } + } +} + +fn parse_principal(s: &str) -> Result { + Principal::from_text(s) + .map_err(|e| ClientError::MalformedRequest(format!("invalid owner_id: {e}")).into()) +} + +async fn load_blob_metadata( + bucket: &dyn BucketLike, + owner: &Principal, + blob_hash: &str, +) -> Result { + let path = blob_path(owner, blob_hash); + let data = bucket + .get_object(path) + .await + .map_err(|e| StorageError::from(BackendError::S3(e.to_string())))? + .ok_or_else(|| StorageError::from(ClientError::NotFound("blob")))?; + + serde_json::from_slice::(&data) + .map_err(|e| StorageError::Internal(format!("corrupt blob metadata: {e}"))) +} + +/// Resolve a parsed `Range` header to an inclusive-start/exclusive-end byte +/// range. We only support single-range requests (RFC 9110 §14.2). Multi-range +/// responses would require `multipart/byteranges` which we don't emit. +fn resolve_range(range: &HttpRange, total: u64) -> Result, StorageError> { + let unsatisfiable = || StorageError::from(ClientError::RangeNotSatisfiable(total)); + + let mut iter = range.satisfiable_ranges(total); + let first = iter.next().ok_or_else(unsatisfiable)?; + if iter.next().is_some() { + return Err(StorageError::from(ClientError::MalformedRequest( + "only single-range requests are supported".into(), + ))); + } + + let (start_bound, end_bound) = first; + let start = match start_bound { + Bound::Included(n) => n, + Bound::Excluded(n) => n.saturating_add(1), + Bound::Unbounded => 0, + }; + let end = match end_bound { + Bound::Included(n) => n.saturating_add(1), + Bound::Excluded(n) => n, + Bound::Unbounded => total, + }; + + if start >= end || end > total { + return Err(unsatisfiable()); + } + Ok(start..end) +} + +/// Map a byte range to (chunk_index_range, start_offset_in_first_chunk, end_offset_in_last_chunk). +fn range_to_chunk_ranges(range: &Range) -> (Range, usize, usize) { + let cs = ONE_MIB as u64; + let start_chunk = (range.start / cs) as usize; + let end_chunk = range.end.div_ceil(cs) as usize; + let start_offset = (range.start % cs) as usize; + let end_offset = if range.end == 0 { + 0 + } else { + ((range.end.saturating_sub(1) % cs) + 1) as usize + }; + (start_chunk..end_chunk, start_offset, end_offset) +} + +fn check_delete_owner_host( + host: Option<&str>, + allowed_env: Option<&str>, +) -> Result<(), StorageError> { + let Some(allowed_list) = allowed_env else { + return Ok(()); + }; + + let host = host + .ok_or_else(|| StorageError::Forbidden("missing Host header for owner deletion".into()))?; + + let normalize = |h: &str| { + h.split_once(':') + .map(|(h, _)| h.to_lowercase()) + .unwrap_or_else(|| h.to_lowercase()) + }; + + let normalized = normalize(host); + let allowed: Vec<&str> = allowed_list + .split(',') + .map(|s| s.trim()) + .filter(|s| !s.is_empty()) + .collect(); + + if allowed.iter().any(|a| *a == host || *a == normalized) { + Ok(()) + } else { + Err(StorageError::Forbidden(format!( + "host '{host}' is not allowed for owner deletion" + ))) + } +} + +// HEAD /storage/v1/owner/{owner_id}/blob/{blob_hash} +pub async fn head_blob( + State(state): State, + Path((owner_id, blob_hash)): Path<(String, String)>, +) -> Result { + let owner = parse_principal(&owner_id)?; + + state + .connector + .charge_blob_tree_download(&owner) + .await + .map_err(|e| StorageError::from(&e))?; + + let meta = load_blob_metadata(&*state.bucket, &owner, &blob_hash).await?; + + // Per RFC 9110 §9.3.2, HEAD must return the same headers as GET would. + let headers = blob_download_headers(meta.num_blob_bytes, &meta.headers); + Ok((StatusCode::OK, headers).into_response()) +} + +// GET /storage/v1/owner/{owner_id}/blob/{blob_hash} (with Range support) +pub async fn get_blob( + State(state): State, + range_header: Option>, + Path((owner_id, blob_hash)): Path<(String, String)>, +) -> Result { + let owner = parse_principal(&owner_id)?; + + state + .connector + .charge_blob_tree_download(&owner) + .await + .map_err(|e| StorageError::from(&e))?; + + let meta = load_blob_metadata(&*state.bucket, &owner, &blob_hash).await?; + let chunk_hashes: Vec = meta.hash_tree.chunk_hashes().to_vec(); + let total_bytes = meta.num_blob_bytes; + + if let Some(TypedHeader(http_range)) = range_header { + let range = resolve_range(&http_range, total_bytes)?; + let content_length = range.end - range.start; + let (chunk_range, start_offset, end_offset) = range_to_chunk_ranges(&range); + + // Snapshot just the hashes we'll touch; cheap (small set of ~70-char strings). + let needed: Vec = chunk_hashes[chunk_range.clone()].to_vec(); + let last_idx = needed.len().saturating_sub(1); + + let parts: Vec = needed + .into_iter() + .enumerate() + .map(|(i, hash)| ChunkPart { + hash, + start: if i == 0 { start_offset } else { 0 }, + end_cap: if i == last_idx { end_offset } else { usize::MAX }, + }) + .collect(); + + let stream = chunk_download_stream(state.clone(), owner, parts); + + let mut headers = blob_download_headers(content_length, &meta.headers); + let cr = format!("bytes {}-{}/{total_bytes}", range.start, range.end - 1); + headers.insert( + header::CONTENT_RANGE, + cr.parse() + .expect("Content-Range value is always valid ASCII"), + ); + + Ok(( + StatusCode::PARTIAL_CONTENT, + headers, + Body::from_stream(stream), + ) + .into_response()) + } else { + let parts: Vec = chunk_hashes.into_iter().map(ChunkPart::full).collect(); + + let stream = chunk_download_stream(state.clone(), owner, parts); + + let headers = blob_download_headers(total_bytes, &meta.headers); + + Ok((StatusCode::OK, headers, Body::from_stream(stream)).into_response()) + } +} + +// GET /storage/v1/owner/{owner_id}/blob_tree/{blob_hash} +pub async fn get_blob_tree( + State(state): State, + Path((owner_id, blob_hash)): Path<(String, String)>, +) -> Result { + let owner = parse_principal(&owner_id)?; + + state + .connector + .charge_blob_tree_download(&owner) + .await + .map_err(|e| StorageError::from(&e))?; + + let path = blob_path(&owner, &blob_hash); + let data = state + .bucket + .get_object(path) + .await + .map_err(|e| StorageError::from(BackendError::S3(e.to_string())))? + .ok_or_else(|| StorageError::from(ClientError::NotFound("blob")))?; + + Ok(([(header::CONTENT_TYPE, CONTENT_TYPE_JSON)], data).into_response()) +} + +// GET /storage/v1/owner/{owner_id}/chunk/{chunk_hash} +pub async fn get_chunk( + State(state): State, + Path((owner_id, chunk_hash)): Path<(String, String)>, +) -> Result { + let owner = parse_principal(&owner_id)?; + + state + .connector + .charge_chunk_download(&owner) + .await + .map_err(|e| StorageError::from(&e))?; + + let path = chunk_path(&owner, &chunk_hash); + let data = state + .bucket + .get_object(path) + .await + .map_err(|e| StorageError::from(BackendError::S3(e.to_string())))? + .ok_or_else(|| StorageError::from(ClientError::NotFound("chunk")))?; + + Ok(([(header::CONTENT_TYPE, CONTENT_TYPE_OCTET)], data).into_response()) +} + +// PUT /storage/v1/owner/{owner_id}/blob_tree/{blob_hash} (JSON body, with auth) +pub async fn put_blob_tree( + State(state): State, + Path((owner_id, blob_hash)): Path<(String, String)>, + body: Body, +) -> Result { + let owner = parse_principal(&owner_id)?; + + let body_bytes = buffer_body(body, MAX_REQUEST_BODY_SIZE, BODY_READ_TIMEOUT) + .await + .map_err(|e| ClientError::MalformedRequest(e.to_string()))?; + + let request: PutBlobTreeRequest = serde_json::from_slice(&body_bytes) + .map_err(|e| ClientError::MalformedRequest(format!("invalid JSON: {e}")))?; + + if request.owner != owner { + return Err(ClientError::MalformedRequest( + "URL owner_id does not match request body owner".into(), + ) + .into()); + } + + let root_hash = request + .blob_tree + .root_hash() + .ok_or_else(|| ClientError::MalformedRequest("blob tree has no root hash".into()))? + .to_string(); + + if root_hash != blob_hash { + return Err(ClientError::MalformedRequest(format!( + "URL blob_hash {blob_hash} does not match body root_hash {root_hash}" + )) + .into()); + } + + state.ingress_auth.check_put_blob(&request)?; + + state + .connector + .charge_blob_tree_upload(&owner) + .await + .map_err(|e| StorageError::from(&e))?; + + let metadata = BlobMetadata { + hash_tree: request.blob_tree, + num_blob_bytes: request.num_blob_bytes, + headers: request.headers, + }; + + let data = serde_json::to_vec(&metadata).map_err(|e| StorageError::Internal(e.to_string()))?; + + let path = blob_path(&owner, &root_hash); + state + .bucket + .put_object(path, data.into()) + .await + .map_err(|e| BackendError::S3(e.to_string()))?; + + let chunk_hashes = metadata.hash_tree.chunk_hashes(); + let mut existing_chunks = Vec::new(); + let mut chunk_check_errors: usize = 0; + + for hash in chunk_hashes { + let path = chunk_path(&owner, hash); + match state.bucket.object_exists(path).await { + Ok(true) => existing_chunks.push(hash.clone()), + Ok(false) => {} + Err(_) => chunk_check_errors += 1, + } + } + + let response = PutBlobTreeResponse { + status: "blob_tree_accepted".to_string(), + existing_chunks, + chunk_check_errors, + }; + + Ok((StatusCode::OK, Json(response)).into_response()) +} + +// PUT /storage/v1/owner/{owner_id}/blob/{blob_hash}/chunk/{chunk_index} +pub async fn put_chunk( + State(state): State, + Path((owner_id, blob_hash, chunk_index)): Path<(String, String, usize)>, + body: Body, +) -> Result { + let owner = parse_principal(&owner_id)?; + + let body = buffer_body(body, ONE_MIB, BODY_READ_TIMEOUT) + .await + .map_err(|_| ClientError::BodyTooLarge)?; + + state + .connector + .charge_blob_tree_download(&owner) + .await + .map_err(|e| StorageError::from(&e))?; + + let meta = load_blob_metadata(&*state.bucket, &owner, &blob_hash).await?; + let chunk_hashes = meta.hash_tree.chunk_hashes(); + + if chunk_index >= chunk_hashes.len() { + return Err(ClientError::MalformedRequest("chunk_index out of range".into()).into()); + } + + let expected_hash = &chunk_hashes[chunk_index]; + + let actual_hash = format!("sha256:{:x}", Sha256::digest(&body)); + if actual_hash != *expected_hash { + return Err(ClientError::MalformedRequest(format!( + "chunk hash mismatch: expected {expected_hash}, got {actual_hash}" + )) + .into()); + } + + if chunk_index + 1 < chunk_hashes.len() && body.len() != ONE_MIB { + return Err( + ClientError::MalformedRequest("non-last chunk must be exactly 1 MiB".into()).into(), + ); + } + + state + .connector + .charge_chunk_upload(&owner) + .await + .map_err(|e| StorageError::from(&e))?; + + let path = chunk_path(&owner, expected_hash); + state + .bucket + .put_object(path, body) + .await + .map_err(|e| BackendError::S3(e.to_string()))?; + + Ok(Json(PutChunkResponse { + status: "chunk_accepted".to_string(), + }) + .into_response()) +} + +// DELETE /storage/v1/owner/{owner_id} +pub async fn delete_owner( + State(state): State, + Host(host): Host, + Path(owner_id): Path, +) -> Result { + let owner = parse_principal(&owner_id)?; + + check_delete_owner_host(Some(&host), state.allowed_delete_owner_hosts.as_deref())?; + + let blob_prefix = blob_path_owner_prefix(&owner); + let chunk_prefix = chunk_path_owner_prefix(&owner); + + let blobs_deleted = delete_all_with_prefix(&*state.bucket, blob_prefix).await?; + let chunks_deleted = delete_all_with_prefix(&*state.bucket, chunk_prefix).await?; + + if blobs_deleted || chunks_deleted { + Ok(( + StatusCode::OK, + format!("deleted all data for owner: {owner}"), + ) + .into_response()) + } else { + Ok(StatusCode::NO_CONTENT.into_response()) + } +} + +async fn delete_all_with_prefix( + bucket: &dyn BucketLike, + prefix: String, +) -> Result { + let mut deleted_any = false; + let mut continuation_token = None; + + loop { + let page = bucket + .list_page(prefix.clone(), continuation_token, Some(1000)) + .await + .map_err(|e| BackendError::S3(e.to_string()))?; + + if page.keys.is_empty() { + break; + } + + deleted_any = true; + for key in &page.keys { + bucket + .delete_object(key.clone()) + .await + .map_err(|e| BackendError::S3(e.to_string()))?; + } + + match page.next_continuation_token { + Some(token) => continuation_token = Some(token), + None => break, + } + } + + Ok(deleted_any) +} + +// DELETE /storage/v1/owner/{owner_id}/blob_tree/{blob_hash} — intentionally disabled (405) +pub async fn delete_blob_tree_disabled() -> impl IntoResponse { + ( + StatusCode::METHOD_NOT_ALLOWED, + "blob_tree deletion is disabled", + ) +} diff --git a/src/routing/storage/mod.rs b/src/routing/storage/mod.rs new file mode 100644 index 0000000..e358b43 --- /dev/null +++ b/src/routing/storage/mod.rs @@ -0,0 +1,83 @@ +pub mod auth; +pub mod bucket; +pub mod cashier_client; +pub mod cashier_connector; +pub mod cashier_types; +pub mod handler; +pub mod wire; + +use std::{sync::Arc, time::Duration}; + +use axum::{ + Router, + routing::{delete, get, put}, +}; +use http::HeaderValue; + +use crate::routing::{ + error_cause::{BackendError, StorageError}, + middleware::cors, +}; + +use self::cashier_connector::BillingError; + +pub use self::{ + auth::{IngressAuth, IngressAuthImpl}, + bucket::{AWSBucket, BucketLike, S3Config, S3Flavor}, + cashier_client::CashierClient, + cashier_connector::CashierConnector, +}; + +// Conversion from storage-local `BillingError` into the shared `StorageError`. +// Kept here (rather than in `error_cause.rs`) so `error_cause` stays +// independent of storage-module internals. `IngressAuth` already returns +// `StorageError` directly, so it needs no conversion. + +impl From<&BillingError> for StorageError { + fn from(e: &BillingError) -> Self { + match e { + BillingError::OwnerNotFound => Self::OwnerNotFound, + BillingError::InsufficientBalance => Self::InsufficientBalance, + BillingError::CashierUnavailable(m) => Self::Backend(BackendError::Cashier(m.clone())), + } + } +} + +pub struct StorageState { + pub connector: Arc, + pub bucket: Arc, + pub ingress_auth: Arc, + pub allowed_delete_owner_hosts: Option, +} + +pub fn storage_router( + state: StorageState, + cors_max_age: Duration, + cors_allow_origin: Vec, +) -> Router { + let cors = + cors::layer(cors_max_age, cors_allow_origin).allow_methods(cors::ALLOW_METHODS_STORAGE); + + Router::new() + .route( + "/owner/{owner_id}/blob/{blob_hash}", + get(handler::get_blob).head(handler::head_blob), + ) + .route( + "/owner/{owner_id}/blob_tree/{blob_hash}", + get(handler::get_blob_tree) + .put(handler::put_blob_tree) + .delete(handler::delete_blob_tree_disabled), + ) + .route( + "/owner/{owner_id}/chunk/{chunk_hash}", + get(handler::get_chunk), + ) + .route( + "/owner/{owner_id}/blob/{blob_hash}/chunk/{chunk_index}", + put(handler::put_chunk), + ) + .route("/owner/{owner_id}", delete(handler::delete_owner)) + .layer(cors) + .with_state(Arc::new(state)) +} diff --git a/src/routing/storage/wire.rs b/src/routing/storage/wire.rs new file mode 100644 index 0000000..b4fa2ae --- /dev/null +++ b/src/routing/storage/wire.rs @@ -0,0 +1,117 @@ +//! Wire-format types for the storage API. +//! +//! These are the serialized representations exchanged with external parties: +//! +//! * HTTP request/response bodies (JSON) for the `/storage/v1/` endpoints, +//! consumed by Caffeine frontends and the `icfs` CLI. +//! * [`BlobMetadata`] — JSON blob stored in S3 at +//! `blob-metadata/{owner}/{root_hash}` and read back by `get_blob`. +//! * [`OwnerEgressSignature`] — Candid payload embedded in an IC egress +//! certificate, verified by [`crate::routing::storage::auth`]. +//! +//! These types must stay serde/Candid-compatible with `object-storage`'s +//! `icfs-common` crate (`BlobHashNHeaders`, `BlobHashTree`, etc.). Any +//! schema change here is a breaking API change — coordinate with clients +//! before touching them. + +use candid::CandidType; +use serde::{Deserialize, Serialize}; + +pub const ONE_MIB: usize = 1024 * 1024; +pub const MAX_REQUEST_BODY_SIZE: usize = 10 * ONE_MIB; + +/// Blob metadata stored in S3 at `blob-metadata/{owner}/{root_hash}`. +/// JSON-compatible with object-storage's `BlobHashNHeaders`. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BlobMetadata { + pub hash_tree: BlobHashTree, + pub num_blob_bytes: u64, + #[serde(default)] + pub headers: Vec, +} + +/// Merkle tree over chunk hashes, tagged by tree type. +/// Compatible with object-storage's `BlobHashTree` enum. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(tag = "tree_type")] +pub enum BlobHashTree { + BMT { + tree: Option, + chunk_hashes: Vec, + #[serde(default)] + headers: Vec, + }, + DSBMTWH { + tree: MerkleTreeNode, + chunk_hashes: Vec, + #[serde(default)] + headers: Vec, + }, +} + +impl BlobHashTree { + pub fn chunk_hashes(&self) -> &[String] { + match self { + Self::BMT { chunk_hashes, .. } | Self::DSBMTWH { chunk_hashes, .. } => chunk_hashes, + } + } + + pub fn root_hash(&self) -> Option<&str> { + match self { + Self::BMT { tree, .. } => tree.as_ref().map(|t| t.hash.as_str()), + Self::DSBMTWH { tree, .. } => Some(tree.hash.as_str()), + } + } +} + +/// A node in the Merkle tree. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MerkleTreeNode { + pub hash: String, + pub left: Option>, + pub right: Option>, +} + +/// Authorization for storage gateway operations. +#[derive(Serialize, Deserialize, Debug, Default)] +pub enum StorageGatewayAuthorization { + #[default] + None, + OwnerEgressSignature(Vec), +} + +/// Parsed payload of an OwnerEgressSignature embedded in a certificate tree. +#[derive(CandidType, Deserialize, Debug, Eq, PartialEq)] +pub struct OwnerEgressSignature { + pub method: String, + pub blob_hash: String, +} + +/// PUT blob_tree request body. +#[derive(Debug, Serialize, Deserialize)] +pub struct PutBlobTreeRequest { + pub blob_tree: BlobHashTree, + pub owner: candid::Principal, + pub num_blob_bytes: u64, + #[serde(default)] + pub headers: Vec, + #[serde(default)] + pub auth: StorageGatewayAuthorization, +} + +/// PUT blob_tree response. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PutBlobTreeResponse { + #[serde(default)] + pub status: String, + #[serde(default)] + pub existing_chunks: Vec, + #[serde(default)] + pub chunk_check_errors: usize, +} + +/// PUT chunk response. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PutChunkResponse { + pub status: String, +} diff --git a/src/test.rs b/src/test.rs index 98c1965..b1a1a53 100644 --- a/src/test.rs +++ b/src/test.rs @@ -160,6 +160,7 @@ pub async fn setup_test_router(tasks: &mut TaskManager) -> (Router, Vec) None, None, Arc::new(arc_swap::ArcSwapOption::::empty()), + None, ) .await .unwrap();