diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 6f3cebb5..178bb647 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -106,3 +106,48 @@ jobs: env: BASE_URL: http://localhost:8787 BOOTSTRAP_MTC_LOG_NAME: dev2 + + integration-ietf-mtc: + name: IETF MTC Worker Integration Tests + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Install Node.js + uses: actions/setup-node@v4 + with: + node-version: "22" + + - name: Install wasm-pack + run: cargo install wasm-pack@0.13.1 --locked + + - name: Install worker-build + run: cargo install worker-build@0.7.5 --locked + + - name: Build ietf_mtc_worker (dev environment) + working-directory: crates/ietf_mtc_worker + run: DEPLOY_ENV=dev worker-build --release + + - name: Start wrangler dev + working-directory: crates/ietf_mtc_worker + run: npx wrangler@4.80.0 -e=dev dev --port 8787 --persist-to .wrangler/state & + + - name: Wait for wrangler dev to be ready + run: | + for i in $(seq 1 30); do + if curl -sf http://localhost:8787/logs/dev2/metadata > /dev/null 2>&1; then + echo "wrangler dev is ready" + exit 0 + fi + echo "Waiting for wrangler dev... attempt $i/30" + sleep 2 + done + echo "wrangler dev failed to start in time" + exit 1 + + - name: Run IETF MTC integration tests + run: cargo test -p integration_tests --test ietf_mtc_api --verbose + env: + BASE_URL: http://localhost:8787 + IETF_MTC_LOG_NAME: dev2 diff --git a/AGENTS.md b/AGENTS.md index 1cc2211a..f2193c7a 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -47,6 +47,13 @@ cargo test -p integration_tests --test bootstrap_mtc_api # Override defaults: BASE_URL=http://localhost:8787 BOOTSTRAP_MTC_LOG_NAME=dev2 cargo test -p integration_tests --test bootstrap_mtc_api +# IETF MTC worker tests — from crates/ietf_mtc_worker/: +npx wrangler -e=dev dev & +# From workspace root: +cargo test -p integration_tests --test ietf_mtc_api +# Override defaults: +BASE_URL=http://localhost:8787 IETF_MTC_LOG_NAME=dev2 cargo test -p integration_tests --test ietf_mtc_api + # Worker deploy npx wrangler -e=${ENV} deploy npx wrangler -e=${ENV} tail diff --git a/Cargo.lock b/Cargo.lock index 81c7530a..b29e6df0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -95,9 +95,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "base16ct" -version = "0.2.0" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" +checksum = "fd307490d624467aa6f74b0eabb77633d1f758a7b25f12bceb0b22e08d9726f6" [[package]] name = "base64" @@ -158,11 +158,11 @@ checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" [[package]] name = "block-buffer" -version = "0.10.4" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +checksum = "cdd35008169921d80bc60d3d0ab416eecb028c4cd653352907921d95084790be" dependencies = [ - "generic-array", + "hybrid-array", ] [[package]] @@ -198,7 +198,7 @@ dependencies = [ "ed25519-dalek", "futures-executor", "generic_log_worker", - "getrandom 0.2.16", + "getrandom 0.4.2", "itertools 0.14.0", "jsonschema", "log", @@ -285,6 +285,17 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" +[[package]] +name = "chacha20" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f8d983286843e49675a4b7a2d174efe136dc93a18d69130dd18198a6c167601" +dependencies = [ + "cfg-if", + "cpufeatures 0.3.0", + "rand_core", +] + [[package]] name = "chrono" version = "0.4.42" @@ -351,6 +362,12 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" +[[package]] +name = "cmov" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f88a43d011fc4a6876cb7344703e297c71dda42494fee094d5f7c76bf13f746" + [[package]] name = "console_error_panic_hook" version = "0.1.7" @@ -373,9 +390,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.9.6" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" +checksum = "a6ef517f0926dd24a1582492c791b6a4818a4d94e789a334894aa15b0d12f55c" [[package]] name = "core-foundation" @@ -393,6 +410,12 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" +[[package]] +name = "cpubits" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ef0c543070d296ea414df2dd7625d1b24866ce206709d8a4a424f28377f5861" + [[package]] name = "cpufeatures" version = "0.2.17" @@ -402,6 +425,15 @@ dependencies = [ "libc", ] +[[package]] +name = "cpufeatures" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b2a41393f66f16b0823bb79094d54ac5fbd34ab292ddafb9a0456ac9f87d201" +dependencies = [ + "libc", +] + [[package]] name = "criterion" version = "0.5.1" @@ -471,24 +503,41 @@ checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" [[package]] name = "crypto-bigint" -version = "0.5.5" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" +checksum = "42a0d26b245348befa0c121944541476763dcc46ede886c88f9d12e1697d27c3" dependencies = [ - "generic-array", + "cpubits", + "ctutils", + "getrandom 0.4.2", + "hybrid-array", + "num-traits", "rand_core", + "serdect", "subtle", "zeroize", ] [[package]] name = "crypto-common" -version = "0.1.6" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +checksum = "77727bb15fa921304124b128af125e7e3b968275d1b108b379190264f4423710" dependencies = [ - "generic-array", - "typenum", + "getrandom 0.4.2", + "hybrid-array", + "rand_core", +] + +[[package]] +name = "crypto-primes" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21f41f23de7d24cdbda7f0c4d9c0351f99a4ceb258ef30e5c1927af8987ffe5a" +dependencies = [ + "crypto-bigint", + "libm", + "rand_core", ] [[package]] @@ -524,7 +573,7 @@ dependencies = [ "ed25519-dalek", "futures-executor", "generic_log_worker", - "getrandom 0.2.16", + "getrandom 0.4.2", "itertools 0.14.0", "jsonschema", "log", @@ -552,14 +601,24 @@ dependencies = [ "serde", ] +[[package]] +name = "ctutils" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d5515a3834141de9eafb9717ad39eea8247b5674e6066c404e8c4b365d2a29e" +dependencies = [ + "cmov", + "subtle", +] + [[package]] name = "curve25519-dalek" -version = "4.1.3" +version = "5.0.0-pre.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +checksum = "335f1947f241137a14106b6f5acc5918a5ede29c9d71d3f2cb1678d5075d9fc3" dependencies = [ "cfg-if", - "cpufeatures", + "cpufeatures 0.2.17", "curve25519-dalek-derive", "digest", "fiat-crypto", @@ -616,8 +675,9 @@ dependencies = [ [[package]] name = "der" -version = "0.7.10" -source = "git+https://github.com/lukevalenta/formats?branch=relative-oid-tag-v0.7.10#895775339c03f7dc42529c131d578751406870a4" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71fd89660b2dc699704064e59e9dba0147b903e85319429e131620d022be411b" dependencies = [ "const-oid", "der_derive", @@ -628,9 +688,9 @@ dependencies = [ [[package]] name = "der_derive" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8034092389675178f570469e6c3b0465d3d30b4505c294a6550db47f3c17ad18" +checksum = "59600e2c2d636fde9b65e99cc6445ac770c63d3628195ff39932b8d6d7409903" dependencies = [ "proc-macro2", "quote", @@ -649,14 +709,14 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.7" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +checksum = "4850db49bf08e663084f7fb5c87d202ef91a3907271aff24a94eb97ff039153c" dependencies = [ "block-buffer", "const-oid", "crypto-common", - "subtle", + "ctutils", ] [[package]] @@ -670,22 +730,6 @@ dependencies = [ "syn", ] -[[package]] -name = "dsa" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48bc224a9084ad760195584ce5abb3c2c34a225fa312a128ad245a6b412b7689" -dependencies = [ - "digest", - "num-bigint-dig", - "num-traits", - "pkcs8", - "rfc6979", - "sha2", - "signature", - "zeroize", -] - [[package]] name = "dyn-clone" version = "1.0.20" @@ -694,9 +738,9 @@ checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" [[package]] name = "ecdsa" -version = "0.16.9" +version = "0.17.0-rc.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +checksum = "91bbdd377139884fafcad8dc43a760a3e1e681aa26db910257fa6535b70e1829" dependencies = [ "der", "digest", @@ -704,13 +748,14 @@ dependencies = [ "rfc6979", "signature", "spki", + "zeroize", ] [[package]] name = "ed25519" -version = "2.2.3" +version = "3.0.0-rc.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" +checksum = "c6e914c7c52decb085cea910552e24c63ac019e3ab8bf001ff736da9a9d9d890" dependencies = [ "pkcs8", "signature", @@ -718,15 +763,16 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "2.2.0" +version = "3.0.0-pre.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9" +checksum = "053618a4c3d3bc24f188aa660ae75a46eeab74ef07fb415c61431e5e7cd4749b" dependencies = [ "curve25519-dalek", "ed25519", "rand_core", "serde", "sha2", + "signature", "subtle", "zeroize", ] @@ -739,19 +785,21 @@ checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" [[package]] name = "elliptic-curve" -version = "0.13.8" +version = "0.14.0-rc.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +checksum = "7d7a0bfd012613a7bcfe02cbfccf2b846e9ef9e1bccb641c48d461253cfb034d" dependencies = [ "base16ct", "crypto-bigint", + "crypto-common", "digest", - "ff", - "generic-array", - "group", + "hybrid-array", + "once_cell", "pem-rfc7468", "pkcs8", "rand_core", + "rustcrypto-ff", + "rustcrypto-group", "sec1", "subtle", "zeroize", @@ -788,7 +836,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.60.2", + "windows-sys 0.61.2", ] [[package]] @@ -808,21 +856,11 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" -[[package]] -name = "ff" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0b50bfb653653f9ca9095b427bed08ab8d75a137839d9ad64eb11810d5b6393" -dependencies = [ - "rand_core", - "subtle", -] - [[package]] name = "fiat-crypto" -version = "0.2.9" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" +checksum = "64cd1e32ddd350061ae6edb1b082d7c54915b5c672c389143b9a63403a109f24" [[package]] name = "find-msvc-tools" @@ -966,17 +1004,6 @@ dependencies = [ "slab", ] -[[package]] -name = "generic-array" -version = "0.14.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bb6743198531e02858aeaea5398fcc883e71851fcbcb5a2f773e2fb6cb1edf2" -dependencies = [ - "typenum", - "version_check", - "zeroize", -] - [[package]] name = "generic_log_worker" version = "0.2.0" @@ -1017,10 +1044,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", - "js-sys", "libc", "wasi", - "wasm-bindgen", ] [[package]] @@ -1031,27 +1056,32 @@ checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" dependencies = [ "cfg-if", "libc", - "r-efi", + "r-efi 5.3.0", "wasip2", ] [[package]] -name = "glam" -version = "0.30.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd47b05dddf0005d850e5644cae7f2b14ac3df487979dbfff3b56f20b1a6ae46" - -[[package]] -name = "group" -version = "0.13.0" +name = "getrandom" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +checksum = "0de51e6874e94e7bf76d726fc5d13ba782deca734ff60d5bb2fb2607c7406555" dependencies = [ - "ff", + "cfg-if", + "js-sys", + "libc", + "r-efi 6.0.0", "rand_core", - "subtle", + "wasip2", + "wasip3", + "wasm-bindgen", ] +[[package]] +name = "glam" +version = "0.30.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd47b05dddf0005d850e5644cae7f2b14ac3df487979dbfff3b56f20b1a6ae46" + [[package]] name = "h2" version = "0.4.13" @@ -1105,6 +1135,12 @@ version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + [[package]] name = "hermit-abi" version = "0.5.2" @@ -1119,9 +1155,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hmac" -version = "0.12.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +checksum = "6303bc9732ae41b04cb554b844a762b4115a61bfaa81e3e83050991eeb56863f" dependencies = [ "digest", ] @@ -1166,6 +1202,17 @@ version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" +[[package]] +name = "hybrid-array" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3944cf8cf766b40e2a1a333ee5e9b563f854d5fa49d6a8ca2764e97c6eddb214" +dependencies = [ + "subtle", + "typenum", + "zeroize", +] + [[package]] name = "hyper" version = "1.7.0" @@ -1351,6 +1398,12 @@ dependencies = [ "zerovec", ] +[[package]] +name = "id-arena" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" + [[package]] name = "ident_case" version = "1.0.1" @@ -1378,6 +1431,70 @@ dependencies = [ "icu_properties", ] +[[package]] +name = "ietf_mtc_api" +version = "0.2.0" +dependencies = [ + "base64", + "byteorder", + "der", + "ed25519-dalek", + "generic_log_worker", + "length_prefixed", + "ml-dsa", + "pkcs8", + "rand", + "serde", + "serde_json", + "serde_with", + "sha2", + "signature", + "signed_note", + "thiserror 2.0.17", + "tlog_tiles", + "x509-cert", +] + +[[package]] +name = "ietf_mtc_worker" +version = "0.2.0" +dependencies = [ + "base64ct", + "chrono", + "der", + "ed25519-dalek", + "futures-executor", + "generic_log_worker", + "getrandom 0.4.2", + "ietf_mtc_api", + "ietf_mtc_worker_config", + "itertools 0.14.0", + "jsonschema", + "log", + "ml-dsa", + "p256", + "parking_lot", + "pkcs8", + "rand", + "serde", + "serde_json", + "serde_with", + "signed_note", + "tlog_tiles", + "tokio", + "url", + "worker", + "x509-cert", +] + +[[package]] +name = "ietf_mtc_worker_config" +version = "0.2.0" +dependencies = [ + "ietf_mtc_api", + "serde", +] + [[package]] name = "indexmap" version = "1.9.3" @@ -1411,12 +1528,16 @@ dependencies = [ "byteorder", "chrono", "const-oid", + "crypto-common", "der", "ed25519-dalek", "hex", + "ietf_mtc_api", "length_prefixed", "log", + "ml-dsa", "p256", + "pkcs8", "rand", "reqwest", "sct_validator", @@ -1424,12 +1545,13 @@ dependencies = [ "serde_json", "serde_with", "sha2", + "signature", "signed_note", + "spki", "static_ct_api", "tlog_tiles", "tokio", "x509-cert", - "x509-verify", "x509_util", ] @@ -1532,15 +1654,13 @@ dependencies = [ ] [[package]] -name = "k256" -version = "0.13.4" +name = "keccak" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" +checksum = "9e24a010dd405bd7ed803e5253182815b41bf2e6a80cc3bfc066658e03a198aa" dependencies = [ "cfg-if", - "ecdsa", - "elliptic-curve", - "sha2", + "cpufeatures 0.3.0", ] [[package]] @@ -1548,9 +1668,12 @@ name = "lazy_static" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" -dependencies = [ - "spin", -] + +[[package]] +name = "leb128fmt" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" [[package]] name = "length_prefixed" @@ -1577,9 +1700,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" +checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981" [[package]] name = "linux-raw-sys" @@ -1614,25 +1737,6 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" -[[package]] -name = "md-5" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" -dependencies = [ - "cfg-if", - "digest", -] - -[[package]] -name = "md2" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4f0f3ed25ff4f8d8d102288d92f900efc202661c884cf67dfe4f0d07c43d1f" -dependencies = [ - "digest", -] - [[package]] name = "memchr" version = "2.7.6" @@ -1656,6 +1760,32 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "ml-dsa" +version = "0.1.0-rc.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5b2bb0ad6fa2b40396775bd56f51345171490fef993f46f91a876ecdbdaea55" +dependencies = [ + "const-oid", + "ctutils", + "hybrid-array", + "module-lattice", + "pkcs8", + "rand_core", + "sha3", + "signature", +] + +[[package]] +name = "module-lattice" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "164eb3faeaecbd14b0b2a917c1b4d0c035097a9c559b0bed85c2cdd032bc8faa" +dependencies = [ + "hybrid-array", + "num-traits", +] + [[package]] name = "native-tls" version = "0.2.15" @@ -1697,23 +1827,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "num-bigint-dig" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" -dependencies = [ - "byteorder", - "lazy_static", - "libm", - "num-integer", - "num-iter", - "num-traits", - "rand", - "smallvec", - "zeroize", -] - [[package]] name = "num-cmp" version = "0.1.0" @@ -1773,7 +1886,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", - "libm", ] [[package]] @@ -1838,63 +1950,43 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a80800c0488c3a21695ea981a54918fbb37abf04f4d0720c453632255e2ff0e" -[[package]] -name = "p192" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b0533bc6c238f2669aab8db75ae52879dc74e88d6bd3685bd4022a00fa85cd2" -dependencies = [ - "ecdsa", - "elliptic-curve", - "primeorder", - "sec1", -] - -[[package]] -name = "p224" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30c06436d66652bc2f01ade021592c80a2aad401570a18aa18b82e440d2b9aa1" -dependencies = [ - "ecdsa", - "elliptic-curve", - "primeorder", - "sha2", -] - [[package]] name = "p256" -version = "0.13.2" +version = "0.14.0-rc.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" +checksum = "44f0a10fe314869359cb2901342b045f4e5a962ef9febc006f03d2a8c848fe4c" dependencies = [ "ecdsa", "elliptic-curve", + "primefield", "primeorder", "sha2", ] [[package]] name = "p384" -version = "0.13.1" +version = "0.14.0-rc.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe42f1670a52a47d448f14b6a5c61dd78fce51856e68edaa38f7ae3a46b8d6b6" +checksum = "b079e66810c55ab3d6ba424e056dc4aefcdb8046c8c3f3816142edbdd7af7721" dependencies = [ "ecdsa", "elliptic-curve", + "fiat-crypto", + "primefield", "primeorder", "sha2", ] [[package]] name = "p521" -version = "0.13.3" +version = "0.14.0-rc.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fc9e2161f1f215afdfce23677034ae137bbd45016a880c2eb3ba8eb95f085b2" +checksum = "9eecc34c4c6e6596d5271fecf90ac4f16593fa198e77282214d0c22736aa9266" dependencies = [ "base16ct", "ecdsa", "elliptic-curve", + "primefield", "primeorder", "sha2", ] @@ -1924,9 +2016,9 @@ dependencies = [ [[package]] name = "pem-rfc7468" -version = "0.7.0" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +checksum = "a6305423e0e7738146434843d1694d621cce767262b2a86910beab705e4493d9" dependencies = [ "base64ct", ] @@ -1971,20 +2063,19 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkcs1" -version = "0.7.5" +version = "0.8.0-rc.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" +checksum = "986d2e952779af96ea048f160fd9194e1751b4faea78bcf3ceb456efe008088e" dependencies = [ "der", - "pkcs8", "spki", ] [[package]] name = "pkcs8" -version = "0.10.2" +version = "0.11.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +checksum = "12922b6296c06eb741b02d7b5161e3aaa22864af38dfa025a1a3ba3f68c84577" dependencies = [ "der", "spki", @@ -2040,19 +2131,34 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] -name = "ppv-lite86" -version = "0.2.21" +name = "prettyplease" +version = "0.2.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ - "zerocopy", + "proc-macro2", + "syn", +] + +[[package]] +name = "primefield" +version = "0.14.0-rc.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6543f5eec854fbf74ba5ef651fbdc9408919b47c3e1526623687135c16d12e9" +dependencies = [ + "crypto-bigint", + "crypto-common", + "rand_core", + "rustcrypto-ff", + "subtle", + "zeroize", ] [[package]] name = "primeorder" -version = "0.13.6" +version = "0.14.0-rc.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" +checksum = "569d9ad6ef822bb0322c7e7d84e5e286244050bd5246cac4c013535ae91c2c90" dependencies = [ "elliptic-curve", ] @@ -2117,34 +2223,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" [[package]] -name = "rand" -version = "0.8.5" +name = "r-efi" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha", - "rand_core", -] +checksum = "f8dcc9c7d52a811697d2151c701e0d08956f92b0e24136cf4cf27b57a6a0d9bf" [[package]] -name = "rand_chacha" -version = "0.3.1" +name = "rand" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +checksum = "bc266eb313df6c5c09c1c7b1fbe2510961e5bcd3add930c1e31f7ed9da0feff8" dependencies = [ - "ppv-lite86", + "chacha20", + "getrandom 0.4.2", "rand_core", ] [[package]] name = "rand_core" -version = "0.6.4" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom 0.2.16", -] +checksum = "0c8d0fd677905edcbeedbf2edb6494d676f0e98d54d5cf9bda0b061cb8fb8aba" [[package]] name = "rayon" @@ -2282,9 +2381,9 @@ dependencies = [ [[package]] name = "rfc6979" -version = "0.4.0" +version = "0.5.0-rc.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +checksum = "23a3127ee32baec36af75b4107082d9bd823501ec14a4e016be4b6b37faa74ae" dependencies = [ "hmac", "subtle", @@ -2306,21 +2405,20 @@ dependencies = [ [[package]] name = "rsa" -version = "0.9.8" +version = "0.10.0-rc.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78928ac1ed176a5ca1d17e578a1825f3d81ca54cf41053a592584b020cfd691b" +checksum = "87ed3e93fc7e473e464b9726f4759659e72bc8665e4b8ea227547024f416d905" dependencies = [ "const-oid", + "crypto-bigint", + "crypto-primes", "digest", - "num-bigint-dig", - "num-integer", - "num-traits", "pkcs1", "pkcs8", "rand_core", + "sha2", "signature", "spki", - "subtle", "zeroize", ] @@ -2333,6 +2431,27 @@ dependencies = [ "semver", ] +[[package]] +name = "rustcrypto-ff" +version = "0.14.0-rc.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd2a8adb347447693cd2ba0d218c4b66c62da9b0a5672b17b981e4291ec65ff6" +dependencies = [ + "rand_core", + "subtle", +] + +[[package]] +name = "rustcrypto-group" +version = "0.14.0-rc.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "369f9b61aa45933c062c9f6b5c3c50ab710687eca83dd3802653b140b43f85ed" +dependencies = [ + "rand_core", + "rustcrypto-ff", + "subtle", +] + [[package]] name = "rustix" version = "1.1.3" @@ -2343,7 +2462,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys", - "windows-sys 0.60.2", + "windows-sys 0.61.2", ] [[package]] @@ -2462,14 +2581,14 @@ dependencies = [ [[package]] name = "sec1" -version = "0.7.3" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +checksum = "d56d437c2f19203ce5f7122e507831de96f3d2d4d3be5af44a0b0a09d8a80e4d" dependencies = [ "base16ct", + "ctutils", "der", - "generic-array", - "pkcs8", + "hybrid-array", "subtle", "zeroize", ] @@ -2610,26 +2729,46 @@ dependencies = [ "syn", ] +[[package]] +name = "serdect" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9af4a3e75ebd5599b30d4de5768e00b5095d518a79fefc3ecbaf77e665d1ec06" +dependencies = [ + "base16ct", + "serde", +] + [[package]] name = "sha1" -version = "0.10.6" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +checksum = "aacc4cc499359472b4abe1bf11d0b12e688af9a805fa5e3016f9a386dc2d0214" dependencies = [ "cfg-if", - "cpufeatures", + "cpufeatures 0.3.0", "digest", ] [[package]] name = "sha2" -version = "0.10.9" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" +checksum = "446ba717509524cb3f22f17ecc096f10f4822d76ab5c0b9822c5f9c284e825f4" dependencies = [ "cfg-if", - "cpufeatures", + "cpufeatures 0.3.0", + "digest", +] + +[[package]] +name = "sha3" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be176f1a57ce4e3d31c1a166222d9768de5954f811601fb7ca06fc8203905ce1" +dependencies = [ "digest", + "keccak", ] [[package]] @@ -2650,9 +2789,9 @@ dependencies = [ [[package]] name = "signature" -version = "2.2.0" +version = "3.0.0-rc.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +checksum = "7f1880df446116126965eeec169136b2e0251dba37c6223bcc819569550edea3" dependencies = [ "digest", "rand_core", @@ -2704,17 +2843,11 @@ dependencies = [ "windows-sys 0.60.2", ] -[[package]] -name = "spin" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" - [[package]] name = "spki" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +checksum = "1d9efca8738c78ee9484207732f728b1ef517bbb1833d6fc0879ca898a522f6f" dependencies = [ "base64ct", "der", @@ -2742,10 +2875,10 @@ dependencies = [ "sha2", "signature", "signed_note", + "spki", "thiserror 2.0.17", "tlog_tiles", "x509-cert", - "x509-verify", "x509_util", ] @@ -2823,7 +2956,7 @@ dependencies = [ "getrandom 0.3.4", "once_cell", "rustix", - "windows-sys 0.60.2", + "windows-sys 0.61.2", ] [[package]] @@ -2947,7 +3080,7 @@ name = "tlog_tiles_wasm" version = "0.2.0" dependencies = [ "console_error_panic_hook", - "getrandom 0.2.16", + "getrandom 0.4.2", "tlog_tiles", "wasm-bindgen", ] @@ -3116,6 +3249,12 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "462eeb75aeb73aea900253ce739c8e18a67423fadf006037cd3ff27e82748a06" +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + [[package]] name = "untrusted" version = "0.9.0" @@ -3210,7 +3349,16 @@ version = "1.0.1+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" dependencies = [ - "wit-bindgen", + "wit-bindgen 0.46.0", +] + +[[package]] +name = "wasip3" +version = "0.4.0+wasi-0.3.0-rc-2026-01-06" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" +dependencies = [ + "wit-bindgen 0.51.0", ] [[package]] @@ -3272,6 +3420,28 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "wasm-encoder" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" +dependencies = [ + "leb128fmt", + "wasmparser", +] + +[[package]] +name = "wasm-metadata" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" +dependencies = [ + "anyhow", + "indexmap 2.12.0", + "wasm-encoder", + "wasmparser", +] + [[package]] name = "wasm-streams" version = "0.4.2" @@ -3285,6 +3455,18 @@ dependencies = [ "web-sys", ] +[[package]] +name = "wasmparser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" +dependencies = [ + "bitflags", + "hashbrown 0.15.5", + "indexmap 2.12.0", + "semver", +] + [[package]] name = "web-sys" version = "0.3.85" @@ -3560,6 +3742,94 @@ version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" +[[package]] +name = "wit-bindgen" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" +dependencies = [ + "wit-bindgen-rust-macro", +] + +[[package]] +name = "wit-bindgen-core" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" +dependencies = [ + "anyhow", + "heck", + "wit-parser", +] + +[[package]] +name = "wit-bindgen-rust" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" +dependencies = [ + "anyhow", + "heck", + "indexmap 2.12.0", + "prettyplease", + "syn", + "wasm-metadata", + "wit-bindgen-core", + "wit-component", +] + +[[package]] +name = "wit-bindgen-rust-macro" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" +dependencies = [ + "anyhow", + "prettyplease", + "proc-macro2", + "quote", + "syn", + "wit-bindgen-core", + "wit-bindgen-rust", +] + +[[package]] +name = "wit-component" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" +dependencies = [ + "anyhow", + "bitflags", + "indexmap 2.12.0", + "log", + "serde", + "serde_derive", + "serde_json", + "wasm-encoder", + "wasm-metadata", + "wasmparser", + "wit-parser", +] + +[[package]] +name = "wit-parser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" +dependencies = [ + "anyhow", + "id-arena", + "indexmap 2.12.0", + "log", + "semver", + "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser", +] + [[package]] name = "worker" version = "0.7.4" @@ -3626,9 +3896,9 @@ checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" [[package]] name = "x509-cert" -version = "0.2.5" +version = "0.3.0-rc.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1301e935010a701ae5f8655edc0ad17c44bad3ac5ce8c39185f75453b720ae94" +checksum = "1e21aad3a769f25f3d2d0cbf30ea8b50a1d602354bd6ab687fad112821608ba6" dependencies = [ "const-oid", "der", @@ -3639,55 +3909,22 @@ dependencies = [ ] [[package]] -name = "x509-ocsp" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e54e695a31f0fecb826cf59ae2093c941d7ef932a1f8508185dd23b29ce2e2e" -dependencies = [ - "const-oid", - "der", - "spki", - "x509-cert", -] - -[[package]] -name = "x509-verify" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c43a49bf845cd2f3aff9603a4276409dbf2b8fa4454d3e9501bf5b0028342964" +name = "x509_util" +version = "0.2.0" dependencies = [ + "chrono", "const-oid", "der", - "dsa", - "ecdsa", - "ed25519-dalek", - "k256", - "md-5", - "md2", - "p192", - "p224", "p256", "p384", "p521", + "pkcs8", "rsa", - "sha1", "sha2", "signature", "spki", - "x509-cert", - "x509-ocsp", -] - -[[package]] -name = "x509_util" -version = "0.2.0" -dependencies = [ - "chrono", - "der", - "sha2", "thiserror 2.0.17", "x509-cert", - "x509-verify", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 652e8a92..6e016cee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,6 +10,8 @@ members = [ "crates/bootstrap_mtc_worker", "crates/ct_worker", "crates/generic_log_worker", + "crates/ietf_mtc_api", + "crates/ietf_mtc_worker", "crates/integration_tests", "crates/length_prefixed", "crates/sct_validator", @@ -25,11 +27,13 @@ members = [ # wrangler dev instance and will hang if run without one. # Run explicitly with: cargo test -p integration_tests --test static_ct_api default-members = [ + "crates/bootstrap_mtc_api", + "crates/bootstrap_mtc_worker", "crates/ct_worker", "crates/generic_log_worker", + "crates/ietf_mtc_api", + "crates/ietf_mtc_worker", "crates/length_prefixed", - "crates/bootstrap_mtc_api", - "crates/bootstrap_mtc_worker", "crates/sct_validator", "crates/signed_note", "crates/signed_note_wasm", @@ -74,11 +78,12 @@ console_log = { version = "1.0" } criterion = { version = "0.5", features = ["html_reports"] } csv = "1.3.1" generic_log_worker = { path = "crates/generic_log_worker", version = "0.2.0" } -der = "0.7.10" -ed25519-dalek = { version = "2.1.1", features = ["pem"] } +der = { version = "0.8", features = ["oid"] } +ed25519-dalek = { version = "3.0.0-pre.6", features = ["pem"] } +ml-dsa = { version = "0.1.0-rc.8", features = ["pkcs8"] } futures-executor = "0.3.31" futures-util = "0.3.31" -getrandom = { version = "0.2", features = ["js"] } +getrandom = { version = "0.4", features = ["wasm_js"] } hex = "0.4" itertools = "0.14.0" jsonschema = "0.30" @@ -86,18 +91,21 @@ length_prefixed = { path = "crates/length_prefixed" } libfuzzer-sys = "0.4" log = { version = "0.4" } bootstrap_mtc_api = { version = "0.2.0", path = "crates/bootstrap_mtc_api" } -p256 = { version = "0.13", features = ["ecdsa"] } +ietf_mtc_api = { version = "0.2.0", path = "crates/ietf_mtc_api" } +p256 = { version = "0.14.0-rc.8", features = ["ecdsa"] } +p384 = { version = "0.14.0-rc.8", features = ["ecdsa"] } +p521 = { version = "0.14.0-rc.8", features = ["ecdsa"] } parking_lot = "0.12" prometheus = "0.14" -rand = "0.8.5" -rand_core = "0.6.4" +rand = "0.10" +rand_core = "0.10" serde = { version = "1.0", features = ["derive"] } serde-wasm-bindgen = "0.6.5" serde_bytes = "0.11" serde_json = "1.0" serde_with = { version = "3.9.0", features = ["base64"] } -sha2 = "0.10" -signature = "2.2.0" +sha2 = "0.11" +signature = "3.0.0-rc.10" signed_note = { path = "crates/signed_note", version = "0.2.0" } static_ct_api = { path = "crates/static_ct_api", version = "0.2.0" } thiserror = "2.0" @@ -107,31 +115,15 @@ reqwest = { version = "0.12", features = ["json"] } url = "2.2" wasm-bindgen = "0.2" worker = "0.7.4" -x509-cert = "0.2.5" -x509-verify = { version = "0.4.4", features = [ - "md2", - "md5", - "sha1", - "dsa", - "rsa", - "k256", - "p192", - "p224", - "p256", - "p384", - "ecdsa", - "ed25519", - "x509", - "pem", -] } +x509-cert = "0.3.0-rc.4" + x509_util = { path = "crates/x509_util" } sct_validator = { path = "crates/sct_validator" } # sct_validator dependencies -rsa = { version = "0.9", default-features = false, features = ["sha2"] } +rsa = { version = "0.10.0-rc.17", default-features = false, features = ["sha2", "encoding"] } hashbrown = "0.15" -spki = "0.7" -const-oid = "0.9.6" +spki = "0.8" +const-oid = "0.10" +pkcs8 = { version = "0.11.0-rc.11", features = ["pem"] } -[patch.crates-io] -der = { git = "https://github.com/lukevalenta/formats", branch = "relative-oid-tag-v0.7.10" } diff --git a/crates/bootstrap_mtc_api/README.md b/crates/bootstrap_mtc_api/README.md index 00ff79ce..d0688f19 100644 --- a/crates/bootstrap_mtc_api/README.md +++ b/crates/bootstrap_mtc_api/README.md @@ -19,7 +19,8 @@ This crate implements the bootstrap-specific protocol layer on top of the shared format. This crate is intentionally frozen at the bootstrap protocol version and will not -be updated to track the IETF draft. +be updated to track the IETF draft. For the current IETF draft implementation, +see [`ietf_mtc_api`](../ietf_mtc_api/). ## License diff --git a/crates/bootstrap_mtc_api/src/cosigner.rs b/crates/bootstrap_mtc_api/src/cosigner.rs index 994a1e54..7f4410de 100644 --- a/crates/bootstrap_mtc_api/src/cosigner.rs +++ b/crates/bootstrap_mtc_api/src/cosigner.rs @@ -242,14 +242,11 @@ mod tests { use tlog_tiles::{open_checkpoint, record_hash, TreeWithTimestamp}; use super::*; - use rand::rngs::OsRng; use signed_note::VerifierList; use std::str::FromStr; #[test] fn test_cosignature_v1_sign_verify() { - let mut rng = OsRng; - let origin = "example.com/origin"; let timestamp = 100; let tree_size = 4; @@ -257,14 +254,16 @@ mod tests { // Make a tree head and sign it let tree = TreeWithTimestamp::new(tree_size, record_hash(b"hello world"), timestamp); let signer = { - let sk = Ed25519SigningKey::generate(&mut rng); + let sk = Ed25519SigningKey::generate(&mut rand::rng()); MtcCosigner::new_checkpoint( TrustAnchorID::from_str("1.2.3").unwrap(), TrustAnchorID::from_str("4.5.6").unwrap(), sk, ) }; - let checkpoint = tree.sign(origin, &[], &[&signer], &mut rng).unwrap(); + let checkpoint = tree + .sign(origin, &[], &[&signer], &mut rand::rng()) + .unwrap(); // Now verify the signed checkpoint let verifier = signer.verifier(); diff --git a/crates/bootstrap_mtc_api/src/lib.rs b/crates/bootstrap_mtc_api/src/lib.rs index d8afc43a..f4d2538d 100644 --- a/crates/bootstrap_mtc_api/src/lib.rs +++ b/crates/bootstrap_mtc_api/src/lib.rs @@ -174,6 +174,16 @@ impl LogEntry for BootstrapMtcLogEntry { const REQUIRE_CHECKPOINT_TIMESTAMP: bool = false; type Pending = BootstrapMtcPendingLogEntry; type ParseError = MtcError; + type Metadata = SequenceMetadata; + + fn make_metadata( + leaf_index: LeafIndex, + timestamp: UnixTimestamp, + _old_tree_size: u64, + _new_tree_size: u64, + ) -> Self::Metadata { + (leaf_index, timestamp) + } fn initial_entry() -> Option { Some(Self::Pending { @@ -184,7 +194,7 @@ impl LogEntry for BootstrapMtcLogEntry { }) } - fn new(pending: Self::Pending, metadata: SequenceMetadata) -> Self { + fn new(pending: Self::Pending, metadata: Self::Metadata) -> Self { Self(TlogTilesLogEntry::new(pending.entry, metadata)) } @@ -214,6 +224,11 @@ pub fn serialize_signatureless_cert( subtree: &Subtree, inclusion_proof: Proof, ) -> Result, MtcError> { + use der::{ + asn1::{ContextSpecific, ContextSpecificRef}, + TagMode, TagNumber, + }; + let entry = match MerkleTreeCertEntry::decode(&log_entry.0.inner.data)? { MerkleTreeCertEntry::TbsCertEntry(entry) => entry, MerkleTreeCertEntry::NullEntry => { @@ -225,37 +240,92 @@ pub fn serialize_signatureless_cert( if spki_hash != entry.subject_public_key_info_hash { return Err(MtcError::Dynamic("spki hash mismatch".to_string())); } - let signature_algorithm = AlgorithmIdentifier { + let signature_algorithm: AlgorithmIdentifier = AlgorithmIdentifier { oid: ID_ALG_MTCPROOF, parameters: None, }; - let tbs_certificate = TbsCertificate { - version: entry.version, - serial_number: SerialNumber::new(&leaf_index.to_be_bytes())?, - signature: signature_algorithm.clone(), - issuer: entry.issuer, - validity: entry.validity, - subject: entry.subject, - subject_public_key_info: spki, - issuer_unique_id: entry.issuer_unique_id, - subject_unique_id: entry.subject_unique_id, - extensions: entry.extensions, - }; - let certificate = Certificate { - tbs_certificate, - signature_algorithm, - signature: BitString::from_bytes( - &MtcProof { - start: subtree.lo(), - end: subtree.hi(), - inclusion_proof, - signatures: Vec::new(), - } - .to_bytes(), - )?, - }; - Ok(certificate.to_der()?) + // Build TBSCertificate DER field-by-field (x509-cert 0.3 fields are private). + let mut tbs_content = Vec::new(); + if entry.version != x509_cert::certificate::Version::V1 { + let tagged = ContextSpecific { + tag_number: TagNumber(0), + tag_mode: TagMode::Explicit, + value: entry.version, + }; + tagged.encode_to_vec(&mut tbs_content)?; + } + SerialNumber::::new(&leaf_index.to_be_bytes())? + .encode_to_vec(&mut tbs_content)?; + signature_algorithm.encode_to_vec(&mut tbs_content)?; + entry.issuer.encode_to_vec(&mut tbs_content)?; + entry.validity.encode_to_vec(&mut tbs_content)?; + entry.subject.encode_to_vec(&mut tbs_content)?; + spki.encode_to_vec(&mut tbs_content)?; + if let Some(uid) = &entry.issuer_unique_id { + // issuerUniqueID [1] IMPLICIT UniqueIdentifier OPTIONAL + ContextSpecificRef { + tag_number: TagNumber(1), + tag_mode: TagMode::Implicit, + value: uid, + } + .encode_to_vec(&mut tbs_content)?; + } + if let Some(uid) = &entry.subject_unique_id { + // subjectUniqueID [2] IMPLICIT UniqueIdentifier OPTIONAL + ContextSpecificRef { + tag_number: TagNumber(2), + tag_mode: TagMode::Implicit, + value: uid, + } + .encode_to_vec(&mut tbs_content)?; + } + if let Some(exts) = &entry.extensions { + let mut exts_items = Vec::new(); + for ext in exts { + exts_items.extend(ext.to_der()?); + } + let mut exts_seq = Vec::new(); + der::Header::new(der::Tag::Sequence, der::Length::try_from(exts_items.len())?) + .encode_to_vec(&mut exts_seq)?; + exts_seq.extend(exts_items); + let exts_any = der::asn1::Any::from_der(&exts_seq)?; + let tagged = ContextSpecific { + tag_number: TagNumber(3), + tag_mode: TagMode::Explicit, + value: exts_any, + }; + tagged.encode_to_vec(&mut tbs_content)?; + } + let mut tbs_der = Vec::new(); + der::Header::new( + der::Tag::Sequence, + der::Length::try_from(tbs_content.len())?, + ) + .encode_to_vec(&mut tbs_der)?; + tbs_der.extend(&tbs_content); + + // Build Certificate DER: SEQUENCE { tbs_der, signature_algorithm, signature }. + let sig_bytes = MtcProof { + start: subtree.lo(), + end: subtree.hi(), + inclusion_proof, + signatures: Vec::new(), + } + .to_bytes(); + let sig_bitstring = BitString::from_bytes(&sig_bytes)?; + let mut cert_content = Vec::new(); + cert_content.extend(&tbs_der); + signature_algorithm.encode_to_vec(&mut cert_content)?; + sig_bitstring.encode_to_vec(&mut cert_content)?; + let mut cert_der = Vec::new(); + der::Header::new( + der::Tag::Sequence, + der::Length::try_from(cert_content.len())?, + ) + .encode_to_vec(&mut cert_der)?; + cert_der.extend(cert_content); + Ok(cert_der) } #[derive(Debug, Error)] @@ -471,17 +541,17 @@ fn filter_extensions(extensions: &mut Vec) -> Result<(), MtcError> { /// Errors if the bootstrap certificate contains unsupported fields or /// extensions. pub fn tbs_cert_to_log_entry( - bootstrap: TbsCertificate, - issuer: RdnSequence, + bootstrap: &TbsCertificate, + issuer: &RdnSequence, validity: Validity, ) -> Result { - if bootstrap.version != Version::V3 { + if bootstrap.version() != Version::V3 { return Err(MtcError::Dynamic("bootstrap version must be v3".into())); } if validity .not_before .to_unix_duration() - .lt(&bootstrap.validity.not_before.to_unix_duration()) + .lt(&bootstrap.validity().not_before.to_unix_duration()) { return Err(MtcError::Dynamic( "entry not_before must not be less than bootstrap not_before".into(), @@ -490,30 +560,34 @@ pub fn tbs_cert_to_log_entry( if validity .not_after .to_unix_duration() - .gt(&bootstrap.validity.not_after.to_unix_duration()) + .gt(&bootstrap.validity().not_after.to_unix_duration()) { return Err(MtcError::Dynamic( "entry not_after must not be greater than bootstrap not_after".into(), )); } - let extensions = if let Some(mut bootstrap_extensions) = bootstrap.extensions { - filter_extensions(&mut bootstrap_extensions)?; - Some(bootstrap_extensions) + let extensions = if let Some(bootstrap_extensions) = bootstrap.extensions() { + let mut exts = bootstrap_extensions.clone(); + filter_extensions(&mut exts)?; + Some(exts) } else { None }; + // Convert RdnSequence → Name via DER round-trip (Name is a newtype over RdnSequence). + let issuer = Name::from_der(&issuer.to_der()?)?; + Ok(TbsCertificateLogEntry { - version: bootstrap.version, + version: bootstrap.version(), issuer, validity, - subject: bootstrap.subject, + subject: bootstrap.subject().clone(), subject_public_key_info_hash: OctetString::new( - &Sha256::digest(bootstrap.subject_public_key_info.to_der()?)[..], + &Sha256::digest(bootstrap.subject_public_key_info().to_der()?)[..], )?, - issuer_unique_id: bootstrap.issuer_unique_id, - subject_unique_id: bootstrap.subject_unique_id, + issuer_unique_id: bootstrap.issuer_unique_id().clone(), + subject_unique_id: bootstrap.subject_unique_id().clone(), extensions, }) } @@ -532,24 +606,24 @@ pub fn validate_correspondence( ) -> Result<(), MtcError> { // We will run ordinary chain validation on the given chain. After, we will do additional // validation, expressed in the below closure. - let validator_hook = |leaf: Certificate, + let validator_hook = |bootstrap: Certificate, chain_certs: Vec<&Certificate>, _chain_fingerprints: Vec<[u8; 32]>, _found_root_idx: Option| -> Result<(), MtcError> { - let bootstrap = leaf.tbs_certificate.clone(); - - if !(log_entry.version == bootstrap.version && log_entry.version == Version::V3) { + if !(log_entry.version == bootstrap.tbs_certificate().version() + && log_entry.version == Version::V3) + { return Err(MtcError::Dynamic( "entry and bootstrap versions must be v3".into(), )); } // Make sure the validity is contained within the validity of every cert in // the chain. - for cert in core::iter::once(&leaf).chain(chain_certs) { + for cert in core::iter::once(&bootstrap).chain(chain_certs) { if log_entry.validity.not_before.to_unix_duration().lt(&cert - .tbs_certificate - .validity + .tbs_certificate() + .validity() .not_before .to_unix_duration()) { @@ -558,8 +632,8 @@ pub fn validate_correspondence( )); } if log_entry.validity.not_after.to_unix_duration().gt(&cert - .tbs_certificate - .validity + .tbs_certificate() + .validity() .not_after .to_unix_duration()) { @@ -569,41 +643,50 @@ pub fn validate_correspondence( )); } } - if log_entry.subject != bootstrap.subject { + if log_entry.subject != *bootstrap.tbs_certificate().subject() { return Err(MtcError::Dynamic( "entry subject must match bootstrap subject".into(), )); } if log_entry.subject_public_key_info_hash - != OctetString::new(&Sha256::digest(bootstrap.subject_public_key_info.to_der()?)[..])? + != OctetString::new( + &Sha256::digest( + bootstrap + .tbs_certificate() + .subject_public_key_info() + .to_der()?, + )[..], + )? { return Err(MtcError::Dynamic( "entry spki hash must match hash of bootstrap spki".into(), )); } - if log_entry.issuer_unique_id != bootstrap.issuer_unique_id { + if log_entry.issuer_unique_id != *bootstrap.tbs_certificate().issuer_unique_id() { return Err(MtcError::Dynamic( "entry issuer unique ID must match bootstrap issuer unique ID".into(), )); } - if log_entry.subject_unique_id != bootstrap.subject_unique_id { + if log_entry.subject_unique_id != *bootstrap.tbs_certificate().subject_unique_id() { return Err(MtcError::Dynamic( "entry subject unique ID must match bootstrap subject unique ID".into(), )); } - let (log_entry_extensions, mut bootstrap_extensions) = - match (&log_entry.extensions, bootstrap.extensions) { - // If no extensions in either entry or bootstrap, we're done. - (None, None) => return Ok(()), - // If mismatched, that's an error. - (Some(_), None) | (None, Some(_)) => { - return Err(MtcError::Dynamic("mismatched extensions".into())) - } - // Otherwise both the log entry and bootstrap cert have - // extensions. Check them below. - (Some(log_ext), Some(boot_ext)) => (log_ext, boot_ext), - }; + let (log_entry_extensions, mut bootstrap_extensions) = match ( + &log_entry.extensions, + bootstrap.tbs_certificate().extensions().cloned(), + ) { + // If no extensions in either entry or bootstrap, we're done. + (None, None) => return Ok(()), + // If mismatched, that's an error. + (Some(_), None) | (None, Some(_)) => { + return Err(MtcError::Dynamic("mismatched extensions".into())) + } + // Otherwise both the log entry and bootstrap cert have + // extensions. Check them below. + (Some(log_ext), Some(boot_ext)) => (log_ext, boot_ext), + }; // Check and filter bootstrap extensions. filter_extensions(&mut bootstrap_extensions)?; @@ -698,7 +781,7 @@ pub fn validate_correspondence( pub fn validate_chain( raw_chain: &[Vec], roots: &CertPool, - issuer: RdnSequence, + issuer: &RdnSequence, validity: &mut Validity, ) -> Result<(BootstrapMtcPendingLogEntry, Option), MtcError> { // We will run the ordinary chain validation on our input, but we have some post-processing we @@ -712,20 +795,20 @@ pub fn validate_chain( // all certificates in the chain. for cert in std::iter::once(&leaf).chain(chain_certs) { if validity.not_before.to_unix_duration().lt(&cert - .tbs_certificate - .validity + .tbs_certificate() + .validity() .not_before .to_unix_duration()) { - validity.not_before = cert.tbs_certificate.validity.not_before; + validity.not_before = cert.tbs_certificate().validity().not_before; } if validity.not_after.to_unix_duration().gt(&cert - .tbs_certificate - .validity + .tbs_certificate() + .validity() .not_after .to_unix_duration()) { - validity.not_after = cert.tbs_certificate.validity.not_after; + validity.not_after = cert.tbs_certificate().validity().not_after; } // Check that we still have a non-empty validity period. if validity @@ -754,7 +837,7 @@ pub fn validate_chain( bootstrap: bootstrap_tile_entry, entry: TlogTilesPendingLogEntry { data: MerkleTreeCertEntry::TbsCertEntry(tbs_cert_to_log_entry( - leaf.tbs_certificate, + leaf.tbs_certificate(), issuer, *validity, )?) @@ -808,19 +891,14 @@ mod tests { )) .unwrap(); - let validity = Validity { - not_before: Time::UtcTime( - UtcTime::from_unix_duration(Duration::from_secs(1_518_521_919)).unwrap(), - ), - not_after: Time::UtcTime( - UtcTime::from_unix_duration(Duration::from_secs(1_743_161_919)).unwrap(), - ), - }; + let validity = Validity::new( + Time::UtcTime(UtcTime::from_unix_duration(Duration::from_secs(1_518_521_919)).unwrap()), + Time::UtcTime(UtcTime::from_unix_duration(Duration::from_secs(1_743_161_919)).unwrap()), + ); let mut log_entry = { - let bootstrap = &bootstrap_chain[0].tbs_certificate; let issuer = RdnSequence::default(); - tbs_cert_to_log_entry(bootstrap.clone(), issuer, validity).unwrap() + tbs_cert_to_log_entry(bootstrap_chain[0].tbs_certificate(), &issuer, validity).unwrap() }; // Valid. @@ -865,24 +943,120 @@ mod tests { validate_correspondence(&log_entry, &raw_chain, &roots).unwrap_err(); } + #[test] + fn test_serialize_signatureless_cert() { + use der::Decode as _; + use sha2::Digest as _; + use tlog_tiles::{Proof, Subtree, TlogTilesLogEntry, TlogTilesPendingLogEntry}; + + let certs = + Certificate::load_pem_chain(include_bytes!("../../static_ct_api/tests/leaf-cert.pem")) + .unwrap(); + let cert = &certs[0]; + let tbs = cert.tbs_certificate(); + let issuer = RdnSequence::default(); + let validity = Validity::new( + Time::UtcTime(UtcTime::from_unix_duration(Duration::from_secs(1_518_521_919)).unwrap()), + Time::UtcTime(UtcTime::from_unix_duration(Duration::from_secs(1_743_161_919)).unwrap()), + ); + let log_entry = tbs_cert_to_log_entry(tbs, &issuer, validity).unwrap(); + let spki_der = tbs.subject_public_key_info().to_der().unwrap(); + + // Construct a BootstrapMtcLogEntry wrapping the log entry. + let mtc_entry = MerkleTreeCertEntry::TbsCertEntry(log_entry); + let bootstrap_log_entry = BootstrapMtcLogEntry(TlogTilesLogEntry { + inner: TlogTilesPendingLogEntry { + data: mtc_entry.encode().unwrap(), + }, + }); + + let leaf_index: LeafIndex = 42; + let subtree = Subtree::new(0, 64).unwrap(); + let proof: Proof = vec![]; + + let cert_der = serialize_signatureless_cert( + &bootstrap_log_entry, + leaf_index, + &spki_der, + &subtree, + proof, + ) + .unwrap(); + + // Parse the output as a Certificate and verify key fields. + let out = Certificate::from_der(&cert_der).unwrap(); + let out_tbs = out.tbs_certificate(); + + // Serial number must equal leaf_index encoded as big-endian bytes. + // Leading zeros may differ due to DER integer sign-bit encoding. + let trim_leading_zeros = + |b: &[u8]| -> Vec { b.iter().copied().skip_while(|&x| x == 0).collect() }; + assert_eq!( + trim_leading_zeros(out_tbs.serial_number().as_bytes()), + trim_leading_zeros(&leaf_index.to_be_bytes()), + "serial_number must encode leaf_index" + ); + + // Signature algorithm must be id-alg-mtcproof. + assert_eq!( + out_tbs.signature().oid, + ID_ALG_MTCPROOF, + "signature algorithm must be id-alg-mtcproof" + ); + + // Subject must match the original cert's subject. + assert_eq!(out_tbs.subject(), tbs.subject(), "subject must round-trip"); + + // SPKI in the output must match the input spki_der. + assert_eq!( + out_tbs.subject_public_key_info().to_der().unwrap(), + spki_der, + "subject_public_key_info must round-trip" + ); + + // Verify the issuer is encoded (empty RdnSequence → Name with no RDNs). + // This exercises the Name→issuer round-trip via DER. + let issuer_der = out_tbs.issuer().to_der().unwrap(); + let expected_issuer_der = Name::from_der(&RdnSequence::default().to_der().unwrap()) + .unwrap() + .to_der() + .unwrap(); + assert_eq!( + issuer_der, expected_issuer_der, + "issuer must encode correctly" + ); + + // Validity must match what we passed in. + assert_eq!( + out_tbs.validity().not_before.to_unix_duration().as_secs(), + 1_518_521_919, + "not_before must round-trip" + ); + assert_eq!( + out_tbs.validity().not_after.to_unix_duration().as_secs(), + 1_743_161_919, + "not_after must round-trip" + ); + + // Unique IDs: the source cert has none, so the output should have none. + assert!(out_tbs.issuer_unique_id().is_none()); + assert!(out_tbs.subject_unique_id().is_none()); + } + #[test] fn test_encode() { let certs = Certificate::load_pem_chain(include_bytes!("../../static_ct_api/tests/leaf-cert.pem")) .unwrap(); - let bootstrap = &certs[0].tbs_certificate; let issuer = RdnSequence::default(); - let validity = Validity { - not_before: Time::UtcTime( - UtcTime::from_unix_duration(Duration::from_secs(1_518_521_919)).unwrap(), - ), - not_after: Time::UtcTime( - UtcTime::from_unix_duration(Duration::from_secs(1_743_161_919)).unwrap(), - ), - }; + let validity = Validity::new( + Time::UtcTime(UtcTime::from_unix_duration(Duration::from_secs(1_518_521_919)).unwrap()), + Time::UtcTime(UtcTime::from_unix_duration(Duration::from_secs(1_743_161_919)).unwrap()), + ); - let log_entry = tbs_cert_to_log_entry(bootstrap.clone(), issuer, validity).unwrap(); + let log_entry = + tbs_cert_to_log_entry(certs[0].tbs_certificate(), &issuer, validity).unwrap(); let decoded = TbsCertificateLogEntry::from_der(&log_entry.to_der().unwrap()).unwrap(); assert_eq!(log_entry, decoded); diff --git a/crates/bootstrap_mtc_api/src/relative_oid.rs b/crates/bootstrap_mtc_api/src/relative_oid.rs index 8a876d00..b3b6689f 100644 --- a/crates/bootstrap_mtc_api/src/relative_oid.rs +++ b/crates/bootstrap_mtc_api/src/relative_oid.rs @@ -4,7 +4,7 @@ use std::str::FromStr; /// ASN.1 `RELATIVE OID`. /// /// TODO upstream this to the `der` crate. -#[derive(Clone)] +#[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct RelativeOid { ber: Vec, arcs: Vec, @@ -42,6 +42,37 @@ impl RelativeOid { pub fn as_bytes(&self) -> &[u8] { &self.ber } + + /// Decode a `RelativeOid` from its BER-encoded bytes. + /// + /// # Errors + /// + /// Returns an error if the bytes are not valid BER for a relative OID. + pub fn from_ber_bytes(ber: &[u8]) -> Result { + let mut arcs = Vec::new(); + let mut i = 0; + while i < ber.len() { + let mut arc: u32 = 0; + loop { + let b = *ber + .get(i) + .ok_or_else(|| MtcError::Dynamic("truncated OID arc".into()))?; + i += 1; + arc = arc + .checked_shl(7) + .ok_or_else(|| MtcError::Dynamic("OID arc overflow".into()))? + | u32::from(b & 0x7f); + if b & 0x80 == 0 { + break; + } + } + arcs.push(arc); + } + Ok(Self { + ber: ber.to_vec(), + arcs, + }) + } } impl std::fmt::Display for RelativeOid { diff --git a/crates/bootstrap_mtc_worker/Cargo.toml b/crates/bootstrap_mtc_worker/Cargo.toml index f25c57e7..def777bc 100644 --- a/crates/bootstrap_mtc_worker/Cargo.toml +++ b/crates/bootstrap_mtc_worker/Cargo.toml @@ -37,7 +37,7 @@ x509-cert.workspace = true der.workspace = true [dev-dependencies] -rand = { workspace = true, features = ["small_rng"] } +rand.workspace = true itertools.workspace = true parking_lot.workspace = true futures-executor.workspace = true diff --git a/crates/bootstrap_mtc_worker/README.md b/crates/bootstrap_mtc_worker/README.md index b702b1e7..2a17350a 100644 --- a/crates/bootstrap_mtc_worker/README.md +++ b/crates/bootstrap_mtc_worker/README.md @@ -2,7 +2,7 @@ A Rust implementation of a [Bootstrap MTC CA](https://blog.cloudflare.com/bootstrap-mtc) for deployment on [Cloudflare Workers](https://workers.cloudflare.com/). -This worker implements an experimental Merkle Tree Certificates CA based on the bootstrap experiment described in the Cloudflare blog post above. It implements an older version of the MTC specification (approximately draft-davidben-tls-merkle-tree-certs-09). +This worker implements an experimental Merkle Tree Certificates CA based on the bootstrap experiment described in the Cloudflare blog post above. It implements an older version of the MTC specification (approximately draft-davidben-tls-merkle-tree-certs-09). For the current IETF draft implementation, see [`ietf_mtc_api`](../ietf_mtc_api/). The internal log architecture (Sequencer, Batcher, Cleaner Durable Objects, tiled R2 storage) is shared with the [Static CT Log](../ct_worker/README.md). diff --git a/crates/bootstrap_mtc_worker/build.rs b/crates/bootstrap_mtc_worker/build.rs index 18c2580f..c78145f4 100644 --- a/crates/bootstrap_mtc_worker/build.rs +++ b/crates/bootstrap_mtc_worker/build.rs @@ -3,10 +3,10 @@ // Build script to include per-environment configuration and trusted roots. +use bootstrap_mtc_api::ID_RDNA_TRUSTANCHOR_ID; use config::AppConfig; use der::asn1::Utf8StringRef; -use der::{asn1::SetOfVec, Any, Tag}; -use bootstrap_mtc_api::ID_RDNA_TRUSTANCHOR_ID; +use der::{Any, Tag}; use std::env; use std::fs; use url::Url; @@ -39,17 +39,17 @@ fn main() { }); for (name, params) in conf.logs { // Make sure we can create the RDN sequence for the issuer log ID. - let _ = RdnSequence::from(vec![RelativeDistinguishedName( - SetOfVec::from_iter([AttributeTypeAndValue { + let _ = RdnSequence::from(vec![RelativeDistinguishedName::try_from(vec![ + AttributeTypeAndValue { oid: ID_RDNA_TRUSTANCHOR_ID, value: Any::new( Tag::Utf8String, Utf8StringRef::new(¶ms.log_id).unwrap().as_bytes(), ) .unwrap(), - }]) - .unwrap(), - )]); + }, + ]) + .unwrap()]); // Valid location hints: https://developers.cloudflare.com/durable-objects/reference/data-location/#supported-locations-1 if let Some(location) = ¶ms.location_hint { diff --git a/crates/bootstrap_mtc_worker/src/batcher_do.rs b/crates/bootstrap_mtc_worker/src/batcher_do.rs index 48389f72..10a1f745 100644 --- a/crates/bootstrap_mtc_worker/src/batcher_do.rs +++ b/crates/bootstrap_mtc_worker/src/batcher_do.rs @@ -4,7 +4,7 @@ use generic_log_worker::{get_durable_object_name, BatcherConfig, GenericBatcher, use worker::*; #[durable_object(fetch)] -struct Batcher(GenericBatcher); +struct Batcher(GenericBatcher); impl DurableObject for Batcher { fn new(state: State, env: Env) -> Self { @@ -25,7 +25,7 @@ impl DurableObject for Batcher { enable_dedup: false, // deduplication is not currently supported location_hint: params.location_hint.clone(), }; - Batcher(GenericBatcher::new(state, env, config)) + Batcher(GenericBatcher::::new(state, env, config)) } async fn fetch(&self, req: Request) -> Result { diff --git a/crates/bootstrap_mtc_worker/src/ccadb_roots_cron.rs b/crates/bootstrap_mtc_worker/src/ccadb_roots_cron.rs index d2d92eef..62afea40 100644 --- a/crates/bootstrap_mtc_worker/src/ccadb_roots_cron.rs +++ b/crates/bootstrap_mtc_worker/src/ccadb_roots_cron.rs @@ -78,7 +78,7 @@ pub(crate) async fn update_ccadb_roots(kv: &KvStore) -> Result<()> { write!( &mut buf, "\n# {}\n# added on {} from CCADB\n{}\n", - cert.tbs_certificate.subject, + cert.tbs_certificate().subject(), DateTime::from_timestamp_millis( now_millis() .try_into() diff --git a/crates/bootstrap_mtc_worker/src/frontend_worker.rs b/crates/bootstrap_mtc_worker/src/frontend_worker.rs index 2ba39505..9e710e2c 100644 --- a/crates/bootstrap_mtc_worker/src/frontend_worker.rs +++ b/crates/bootstrap_mtc_worker/src/frontend_worker.rs @@ -5,7 +5,7 @@ use crate::{load_checkpoint_cosigner, load_origin, load_roots, SequenceMetadata, CONFIG}; use der::{ - asn1::{SetOfVec, UtcTime, Utf8StringRef}, + asn1::{UtcTime, Utf8StringRef}, Any, Encode, Tag, }; use generic_log_worker::{ @@ -287,9 +287,8 @@ fn build_issuer_rdn(log_id: &str) -> std::result::Result { value: any_value, }; - let rdn = RelativeDistinguishedName( - SetOfVec::from_iter([attr]).expect("single attribute should always succeed"), - ); + let rdn = RelativeDistinguishedName::try_from(vec![attr]) + .expect("single attribute should always succeed"); Ok(RdnSequence::from(vec![rdn])) } @@ -303,10 +302,7 @@ fn build_validity( let not_after = UtcTime::from_unix_duration(now + Duration::from_secs(max_lifetime_secs)) .map_err(|e| e.to_string())?; - Ok(Validity { - not_before: Time::UtcTime(not_before), - not_after: Time::UtcTime(not_after), - }) + Ok(Validity::new(Time::UtcTime(not_before), Time::UtcTime(not_after))) } /// Returns the issuer cert for SCT validation. For multi-cert chains, that's @@ -329,7 +325,7 @@ fn resolve_issuer_for_sct( // Single-cert chain: look up issuer from roots pool let leaf = Certificate::from_der(&chain[0]).map_err(|e| format!("failed to parse leaf: {e}"))?; - let issuer_dn = &leaf.tbs_certificate.issuer; + let issuer_dn = leaf.tbs_certificate().issuer(); roots .find_by_subject(issuer_dn) @@ -348,7 +344,7 @@ async fn add_entry(mut req: Request, env: &Env, name: &str) -> Result let roots = load_roots(env, name).await?; let (pending_entry, found_root_idx) = - match bootstrap_mtc_api::validate_chain(&req.chain, roots, issuer, &mut validity) { + match bootstrap_mtc_api::validate_chain(&req.chain, roots, &issuer, &mut validity) { Ok(v) => v, Err(e) => { log::warn!("{name}: Bad request: {e}"); @@ -530,9 +526,9 @@ mod tests { #[test] fn test_build_issuer_rdn() { let rdn = build_issuer_rdn("test-log-id").unwrap(); - assert_eq!(rdn.0.len(), 1); + assert_eq!(rdn.as_ref().len(), 1); - let attr = rdn.0[0].0.iter().next().unwrap(); + let attr = rdn.as_ref()[0].as_ref().iter().next().unwrap(); assert_eq!(attr.oid, ID_RDNA_TRUSTANCHOR_ID); let encoded = attr.value.to_der().unwrap(); diff --git a/crates/bootstrap_mtc_worker/src/lib.rs b/crates/bootstrap_mtc_worker/src/lib.rs index 0fcc4045..90b2abcd 100644 --- a/crates/bootstrap_mtc_worker/src/lib.rs +++ b/crates/bootstrap_mtc_worker/src/lib.rs @@ -85,58 +85,66 @@ pub(crate) fn load_origin(name: &str) -> KeyName { } async fn load_roots(env: &Env, name: &str) -> Result<&'static CertPool> { - // Load embedded roots. - ROOTS - .get_or_try_init(|| async { - let mut pool = CertPool::default(); - // Load additional roots from the CCADB roots file in Workers KV. - let kv = env.kv(CCADB_ROOTS_NAMESPACE)?; - let pem = if let Some(pem) = kv.get(CCADB_ROOTS_FILENAME).text().await? { - pem - } else { - // The roots file might not exist if the CCADB roots cron job hasn't - // run yet. Try to create it once before failing. - update_ccadb_roots(&kv).await?; - kv.get(CCADB_ROOTS_FILENAME) - .text() - .await? - .ok_or(format!("{name}: '{CCADB_ROOTS_FILENAME}' not found in KV"))? - }; + // Fast path: already initialized. + if let Some(pool) = ROOTS.get() { + return Ok(pool); + } + + // Build the pool for this request. If another request concurrently built + // and stored one first, we discard ours and return the stored value. + // This avoids awaiting a OnceCell initialized by another request context, + // which the Workers runtime would cancel as a cross-request deadlock. + let mut pool = CertPool::default(); - pool.append_certs_from_pem(pem.as_bytes()) - .map_err(|e| format!("failed to add CCADB certs to pool: {e}"))?; + // Load additional roots from the CCADB roots file in Workers KV. + let kv = env.kv(CCADB_ROOTS_NAMESPACE)?; + let pem = if let Some(pem) = kv.get(CCADB_ROOTS_FILENAME).text().await? { + pem + } else { + // The roots file might not exist if the CCADB roots cron job hasn't + // run yet. Try to create it once before failing. + update_ccadb_roots(&kv).await?; + kv.get(CCADB_ROOTS_FILENAME) + .text() + .await? + .ok_or(format!("{name}: '{CCADB_ROOTS_FILENAME}' not found in KV"))? + }; - // Add additional roots when the 'dev-bootstrap-roots' feature is - // enabled. - // - // A note on the differences between how roots are handled for the - // MTC vs CT applications: - // - // The purpose of CT is to observe certificates but not police them. - // As long as it's not a spam vector, we're generally willing to - // accept any root certificates that have been trusted by at least - // one major root program during the log shard's lifetime. Roots - // aren't removed from the list once they're added in order to keep - // a better record. We have the ability to add in custom roots from - // a per-environment roots file too, in order to support test CAs. - // - // For bootstrap MTC, the roots are meant to ensure that the log - // only accepts bootstrap MTC chains that will be trusted by Chrome, - // since Chrome might reject an entire batch of MTCs if there's a - // single untrusted entry. Thus, we want to keep the trusted roots - // as a subset of Chrome's trust store. We're using Mozilla's CRLite - // filters to check for revocation, so we need to be a subset of - // Mozilla's trust store too. When either root program stops - // trusting a root, we also need to remove it from our trust store. - // Given that, we gate the ability to add in custom roots behind the - // 'dev-bootstrap-roots' feature flag. - #[cfg(feature = "dev-bootstrap-roots")] - { - pool.append_certs_from_pem(include_bytes!("../dev-bootstrap-roots.pem")) - .map_err(|e| format!("failed to add dev certs to pool: {e}"))?; - } + pool.append_certs_from_pem(pem.as_bytes()) + .map_err(|e| format!("failed to add CCADB certs to pool: {e}"))?; + + // Add additional roots when the 'dev-bootstrap-roots' feature is + // enabled. + // + // A note on the differences between how roots are handled for the + // MTC vs CT applications: + // + // The purpose of CT is to observe certificates but not police them. + // As long as it's not a spam vector, we're generally willing to + // accept any root certificates that have been trusted by at least + // one major root program during the log shard's lifetime. Roots + // aren't removed from the list once they're added in order to keep + // a better record. We have the ability to add in custom roots from + // a per-environment roots file too, in order to support test CAs. + // + // For bootstrap MTC, the roots are meant to ensure that the log + // only accepts bootstrap MTC chains that will be trusted by Chrome, + // since Chrome might reject an entire batch of MTCs if there's a + // single untrusted entry. Thus, we want to keep the trusted roots + // as a subset of Chrome's trust store. We're using Mozilla's CRLite + // filters to check for revocation, so we need to be a subset of + // Mozilla's trust store too. When either root program stops + // trusting a root, we also need to remove it from our trust store. + // Given that, we gate the ability to add in custom roots behind the + // 'dev-bootstrap-roots' feature flag. + #[cfg(feature = "dev-bootstrap-roots")] + { + pool.append_certs_from_pem(include_bytes!("../dev-bootstrap-roots.pem")) + .map_err(|e| format!("failed to add dev certs to pool: {e}"))?; + } - Ok(pool) - }) - .await + // Store the pool if no other request got there first; either way return + // the value now in the cell. + let _ = ROOTS.set(pool); + Ok(ROOTS.get().expect("just set")) } diff --git a/crates/bootstrap_mtc_worker/src/sequencer_do.rs b/crates/bootstrap_mtc_worker/src/sequencer_do.rs index 7eed9045..18437747 100644 --- a/crates/bootstrap_mtc_worker/src/sequencer_do.rs +++ b/crates/bootstrap_mtc_worker/src/sequencer_do.rs @@ -86,7 +86,7 @@ fn checkpoint_callback(env: &Env, name: &str) -> CheckpointCallbacker { let params = &CONFIG.logs[name]; let bucket = load_public_bucket(env, name).unwrap(); Box::new( - move |old_time: UnixTimestamp, new_time: UnixTimestamp, new_checkpoint_bytes: &[u8]| { + move |old_time: UnixTimestamp, new_time: UnixTimestamp, _old_tree_size: u64, _new_tree_size: u64, new_checkpoint_bytes: &[u8]| { let new_checkpoint = { // TODO: Make more efficient. There are two unnecessary allocations here. diff --git a/crates/ct_worker/Cargo.toml b/crates/ct_worker/Cargo.toml index b2df51bf..7666dde1 100644 --- a/crates/ct_worker/Cargo.toml +++ b/crates/ct_worker/Cargo.toml @@ -32,7 +32,7 @@ url.workspace = true x509-cert.workspace = true [dev-dependencies] -rand = { workspace = true, features = ["small_rng"] } +rand.workspace = true itertools.workspace = true parking_lot.workspace = true futures-executor.workspace = true diff --git a/crates/ct_worker/src/batcher_do.rs b/crates/ct_worker/src/batcher_do.rs index e832edc1..a5f58441 100644 --- a/crates/ct_worker/src/batcher_do.rs +++ b/crates/ct_worker/src/batcher_do.rs @@ -4,7 +4,7 @@ use generic_log_worker::{get_durable_object_name, BatcherConfig, GenericBatcher, use worker::*; #[durable_object(fetch)] -struct Batcher(GenericBatcher); +struct Batcher(GenericBatcher); impl DurableObject for Batcher { fn new(state: State, env: Env) -> Self { @@ -25,7 +25,7 @@ impl DurableObject for Batcher { enable_dedup: params.enable_dedup, location_hint: params.location_hint.clone(), }; - Batcher(GenericBatcher::new(state, env, config)) + Batcher(GenericBatcher::::new(state, env, config)) } async fn fetch(&self, req: Request) -> Result { diff --git a/crates/ct_worker/src/ccadb_roots_cron.rs b/crates/ct_worker/src/ccadb_roots_cron.rs index 17c7a682..b469062b 100644 --- a/crates/ct_worker/src/ccadb_roots_cron.rs +++ b/crates/ct_worker/src/ccadb_roots_cron.rs @@ -87,7 +87,7 @@ pub(crate) async fn update_ccadb_roots>(keys: &[T], kv: &KvStore) write!( &mut buf, "\n# {}\n# added on {} from CCADB\n{}\n", - cert.tbs_certificate.subject, + cert.tbs_certificate().subject(), DateTime::from_timestamp_millis( now_millis() .try_into() diff --git a/crates/ct_worker/src/lib.rs b/crates/ct_worker/src/lib.rs index 57755a6d..e273c1b8 100644 --- a/crates/ct_worker/src/lib.rs +++ b/crates/ct_worker/src/lib.rs @@ -101,36 +101,44 @@ pub(crate) fn load_origin(name: &str) -> KeyName { } async fn load_roots(env: &Env, name: &str) -> Result<&'static CertPool> { - // Load embedded roots. - ROOTS - .get_or_try_init(|| async { - let pem = include_bytes!(concat!(env!("OUT_DIR"), "/roots.pem")); - let mut pool = CertPool::default(); - // load_pem_chain fails on empty input: https://github.com/RustCrypto/formats/pull/1965 - if !pem.is_empty() { - pool.append_certs_from_pem(pem) - .map_err(|e| format!("failed to load PEM chain: {e}"))?; - } + // Fast path: already initialized. + if let Some(pool) = ROOTS.get() { + return Ok(pool); + } + + // Build the pool for this request. If another request concurrently built + // and stored one first, we discard ours and return the stored value. + // This avoids awaiting a OnceCell initialized by another request context, + // which the Workers runtime would cancel as a cross-request deadlock. + let pem = include_bytes!(concat!(env!("OUT_DIR"), "/roots.pem")); + let mut pool = CertPool::default(); + // load_pem_chain fails on empty input: https://github.com/RustCrypto/formats/pull/1965 + if !pem.is_empty() { + pool.append_certs_from_pem(pem) + .map_err(|e| format!("failed to load PEM chain: {e}"))?; + } + + // Load additional roots from the CCADB roots file in Workers KV. + if CONFIG.logs[name].enable_ccadb_roots { + let key = ccadb_roots_filename(name); + let kv = env.kv(CCADB_ROOTS_NAMESPACE)?; + let pem = if let Some(pem) = kv.get(&key).text().await? { + pem + } else { + // The roots file might not exist if the CCADB roots cron job hasn't + // run yet. Try to create it once before failing. + update_ccadb_roots(&[&key], &kv).await?; + kv.get(&key) + .text() + .await? + .ok_or(format!("{name}: '{key}' not found in KV"))? + }; + pool.append_certs_from_pem(pem.as_bytes()) + .map_err(|e| format!("failed to add CCADB certs to pool: {e}"))?; + } - // Load additional roots from the CCADB roots file in Workers KV. - if CONFIG.logs[name].enable_ccadb_roots { - let key = ccadb_roots_filename(name); - let kv = env.kv(CCADB_ROOTS_NAMESPACE)?; - let pem = if let Some(pem) = kv.get(&key).text().await? { - pem - } else { - // The roots file might not exist if the CCADB roots cron job hasn't - // run yet. Try to create it once before failing. - update_ccadb_roots(&[&key], &kv).await?; - kv.get(&key) - .text() - .await? - .ok_or(format!("{name}: '{key}' not found in KV"))? - }; - pool.append_certs_from_pem(pem.as_bytes()) - .map_err(|e| format!("failed to add CCADB certs to pool: {e}"))?; - } - Ok(pool) - }) - .await + // Store the pool if no other request got there first; either way return + // the value now in the cell. + let _ = ROOTS.set(pool); + Ok(ROOTS.get().expect("just set")) } diff --git a/crates/generic_log_worker/Cargo.toml b/crates/generic_log_worker/Cargo.toml index 58f07f38..8db18469 100644 --- a/crates/generic_log_worker/Cargo.toml +++ b/crates/generic_log_worker/Cargo.toml @@ -13,7 +13,7 @@ categories = ["cryptography"] keywords = ["transparency", "crypto"] [dev-dependencies] -rand = { workspace = true, features = ["small_rng"] } +rand.workspace = true itertools.workspace = true parking_lot.workspace = true futures-executor.workspace = true diff --git a/crates/generic_log_worker/src/batcher_do.rs b/crates/generic_log_worker/src/batcher_do.rs index 4fcd24d9..b9c778ca 100644 --- a/crates/generic_log_worker/src/batcher_do.rs +++ b/crates/generic_log_worker/src/batcher_do.rs @@ -7,9 +7,10 @@ //! Entries are assigned to Batcher shards with consistent hashing on the cache key. use crate::{ - deserialize, get_durable_object_stub, load_cache_kv, obs, serialize, LookupKey, - SequenceMetadata, BATCH_ENDPOINT, ENTRY_ENDPOINT, SEQUENCER_BINDING, + deserialize, get_durable_object_stub, load_cache_kv, obs, serialize, CacheSerialize, + LookupKey, BATCH_ENDPOINT, ENTRY_ENDPOINT, SEQUENCER_BINDING, }; +use serde::{de::DeserializeOwned, Serialize}; use base64::prelude::*; use futures_util::future::{join_all, select, Either}; use std::{ @@ -23,12 +24,12 @@ use worker::kv::KvStore; #[allow(clippy::wildcard_imports)] use worker::*; -pub struct GenericBatcher { +pub struct GenericBatcher { env: Env, config: BatcherConfig, state: State, kv: Option, - batch: RefCell, + batch: RefCell>, in_flight: RefCell, processed: RefCell, wshim: Option, @@ -43,13 +44,13 @@ pub struct BatcherConfig { } // A batch of entries to be submitted to the Sequencer together. -struct Batch { +struct Batch { entries: Vec, by_hash: HashSet, - done: Sender>, + done: Sender>, } -impl Default for Batch { +impl Default for Batch { /// Returns a batch initialized with a watch channel. fn default() -> Self { Self { @@ -60,7 +61,7 @@ impl Default for Batch { } } -impl GenericBatcher { +impl GenericBatcher { /// Returns a new batcher with the given config. /// /// # Panics @@ -189,7 +190,7 @@ impl GenericBatcher { } } -impl GenericBatcher { +impl GenericBatcher { /// Submit the current pending batch to be sequenced. /// /// # Errors @@ -217,8 +218,8 @@ impl GenericBatcher { ..Default::default() }, )?; - let sequenced_entries: HashMap = - deserialize::>( + let sequenced_entries: HashMap = + deserialize::>( &get_durable_object_stub( &self.env, &self.config.name, @@ -244,7 +245,7 @@ impl GenericBatcher { .map(|(k, v)| { Ok(kv .put(&BASE64_STANDARD.encode(k), "")? - .metadata::(v)? + .metadata::(v)? .execute()) }) .collect::>>()?; diff --git a/crates/generic_log_worker/src/lib.rs b/crates/generic_log_worker/src/lib.rs index 4d005c1d..39ce5970 100644 --- a/crates/generic_log_worker/src/lib.rs +++ b/crates/generic_log_worker/src/lib.rs @@ -16,7 +16,7 @@ pub use cleaner_do::*; pub use log_ops::upload_issuers; pub use sequencer_do::*; -use byteorder::{BigEndian, WriteBytesExt}; + use log::{error, info}; use log_ops::UploadOptions; use obs::metrics::{millis_diff_as_secs, AsF64, ObjectMetrics}; @@ -26,8 +26,12 @@ use sha2::{Digest, Sha256}; use std::cell::RefCell; use std::collections::btree_map::Entry; use std::collections::{BTreeMap, HashMap, VecDeque}; + +use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; +use serde::de::DeserializeOwned; use std::io::Write; -use tlog_tiles::{LookupKey, PendingLogEntry, SequenceMetadata}; +pub use tlog_tiles::LookupKey; +use tlog_tiles::{PendingLogEntry, SequenceMetadata}; use tokio::sync::Mutex; use util::now_millis; use worker::{ @@ -166,14 +170,17 @@ pub fn load_cache_kv(env: &Env, name: &str) -> Result { /// # Errors /// /// Returns an error if there are issues retrieving the metadata. -pub async fn get_cached_metadata( +pub async fn get_cached_metadata( kv: &KvStore, lookup_key: &LookupKey, -) -> Result> { +) -> Result> +where + M: Serialize + DeserializeOwned, +{ // Query the cache and return the entry metadata if it exists let metadata_opt = kv .get(&BASE64_STANDARD.encode(lookup_key)) - .bytes_with_metadata::() + .bytes_with_metadata::() .await? .1; Ok(metadata_opt) @@ -186,40 +193,44 @@ pub async fn get_cached_metadata( /// /// Returns an error if either the KV namespace doesn't exist, or if there is an /// exception when writing the value. -pub async fn put_cache_entry_metadata( +pub async fn put_cache_entry_metadata( kv: &KvStore, pending: &L, - metadata: SequenceMetadata, -) -> Result<()> { + metadata: M, +) -> Result<()> +where + L: PendingLogEntry, + M: Serialize, +{ // Get the lookup key. let lookup_key = pending.lookup_key(); // Store key => "", with metadata kv.put(&BASE64_STANDARD.encode(lookup_key), "")? - .metadata::(metadata)? + .metadata::(metadata)? .execute() .await .map_err(Error::from) } -trait CacheWrite { +trait CacheWrite { /// Put the provided sequenced entries into the cache. This does NOT overwrite existing entries. - async fn put_entries(&self, entries: &[(LookupKey, SequenceMetadata)]) -> Result<()>; + async fn put_entries(&self, entries: &[(LookupKey, M)]) -> Result<()>; } -trait CacheRead { +trait CacheRead { /// Read an entry from the deduplication cache. - fn get_entry(&self, key: &LookupKey) -> Option; + fn get_entry(&self, key: &LookupKey) -> Option; } -struct DedupCache { - memory: MemoryCache, +struct DedupCache { + memory: MemoryCache, storage: Storage, } -impl CacheWrite for DedupCache { +impl CacheWrite for DedupCache { /// Write entries to both the short-term deduplication cache and its backup in DO Storage. - async fn put_entries(&self, entries: &[(LookupKey, SequenceMetadata)]) -> Result<()> { + async fn put_entries(&self, entries: &[(LookupKey, M)]) -> Result<()> { if entries.is_empty() { return Ok(()); } @@ -228,10 +239,10 @@ impl CacheWrite for DedupCache { } } -impl CacheRead for DedupCache { +impl CacheRead for DedupCache { /// Check the short-term deduplication cache only. The long-term deduplication /// cache gets checked by the Worker frontend when handling add-chain requests. - fn get_entry(&self, key: &LookupKey) -> Option { + fn get_entry(&self, key: &LookupKey) -> Option { self.memory.get_entry(key) } } @@ -291,11 +302,15 @@ fn compute_cache_keys_to_load(head: u32, tail: u32, max_batches: u32) -> Vec String { + format!("fifo:{idx}") +} + +impl DedupCache { // Batches are written at most once per second, and we only need them to // deduplicate entries long enough for KV's eventual consistency guarantees // (~60s). Cap at 128 so we can use a single get_multiple call to get all @@ -306,7 +321,7 @@ impl DedupCache { const FIFO_TAIL_KEY: &str = "fifo:tail"; fn fifo_key(idx: u32) -> String { - format!("fifo:{idx}") + dedup_cache_fifo_key(idx) } // Load batches of cache entries from DO storage into the in-memory cache. log_name is the name @@ -360,7 +375,7 @@ impl DedupCache { } // Store a batch of cache entries in DO storage. - async fn store(&self, entries: &[(LookupKey, SequenceMetadata)]) -> Result<()> { + async fn store(&self, entries: &[(LookupKey, M)]) -> Result<()> { // Get the head and tail of the dedup cache, picking 0 if uninitialized let head = self .storage @@ -401,40 +416,93 @@ impl DedupCache { } } -fn serialize_entries(entries: &[(LookupKey, SequenceMetadata)]) -> Vec { - let mut buf = Vec::with_capacity(32 * entries.len()); - for (k, (idx, ts)) in entries { - buf.write_all(k).unwrap(); - buf.write_u64::(*idx).unwrap(); - buf.write_u64::(*ts).unwrap(); - } - buf +/// Serialization format for the `DedupCache` DO storage ring buffer. +/// +/// Each metadata type defines its own binary format so that the format can be +/// kept stable across upgrades. `SequenceMetadata` uses the original +/// 32-bytes-per-entry binary format (16-byte key, 8-byte `leaf_index`, +/// 8-byte `timestamp`, all big-endian); other types use `serde_json`. +pub trait CacheSerialize: Sized { + /// Serialize a batch of `(LookupKey, Self)` pairs to bytes. + fn serialize_entries(entries: &[(LookupKey, Self)]) -> Vec; + /// Deserialize a batch of `(LookupKey, Self)` pairs from bytes. + /// + /// # Errors + /// + /// Returns a `String` describing the error if the bytes are malformed. + /// The `String` can be converted to a `worker::Error` via `From`. + fn deserialize_entries(buf: &[u8]) -> std::result::Result, String>; } -fn deserialize_entries(buf: &[u8]) -> Result> { - if !buf.len().is_multiple_of(32) { - return Err("invalid buffer length".into()); +/// `SequenceMetadata` uses the original 32-byte binary format for backward +/// compatibility with existing Durable Object storage. +impl CacheSerialize for SequenceMetadata { + fn serialize_entries(entries: &[(LookupKey, Self)]) -> Vec { + let mut buf = Vec::with_capacity(32 * entries.len()); + for (k, (idx, ts)) in entries { + buf.write_all(k).unwrap(); + buf.write_u64::(*idx).unwrap(); + buf.write_u64::(*ts).unwrap(); + } + buf } - let mut entries = Vec::with_capacity(buf.len() / 32); - for i in 0..buf.len() / 32 { - let key: [u8; 16] = buf[i * 32..i * 32 + 16].try_into().unwrap(); - let value = ( - u64::from_be_bytes(buf[i * 32 + 16..i * 32 + 24].try_into().unwrap()), - u64::from_be_bytes(buf[i * 32 + 24..i * 32 + 32].try_into().unwrap()), - ); - entries.push((key, value)); + + fn deserialize_entries(buf: &[u8]) -> std::result::Result, String> { + if !buf.len().is_multiple_of(32) { + return Err("invalid buffer length".into()); + } + let mut entries = Vec::with_capacity(buf.len() / 32); + let mut cursor = std::io::Cursor::new(buf); + while usize::try_from(cursor.position()).unwrap_or(usize::MAX) < buf.len() { + let mut key = LookupKey::default(); + std::io::Read::read_exact(&mut cursor, &mut key) + .map_err(|e| format!("reading key: {e}"))?; + let idx = cursor + .read_u64::() + .map_err(|e| format!("reading `leaf_index`: {e}"))?; + let ts = cursor + .read_u64::() + .map_err(|e| format!("reading `timestamp`: {e}"))?; + entries.push((key, (idx, ts))); + } + Ok(entries) } - Ok(entries) +} + +/// Implement `CacheSerialize` via `serde_json` for a given type. +#[macro_export] +macro_rules! impl_json_cache_serialize { + ($t:ty) => { + impl $crate::CacheSerialize for $t { + fn serialize_entries(entries: &[($crate::LookupKey, Self)]) -> Vec { + serde_json::to_vec(entries).expect("serializing cache entries to JSON") + } + fn deserialize_entries( + buf: &[u8], + ) -> ::std::result::Result, String> { + serde_json::from_slice(buf) + .map_err(|e| format!("deserializing cache entries: {e}")) + } + } + }; +} + +fn serialize_entries(entries: &[(LookupKey, M)]) -> Vec { + M::serialize_entries(entries) +} + +fn deserialize_entries(buf: &[u8]) -> Result> { + M::deserialize_entries(buf).map_err(Into::into) } // A fixed-size in-memory FIFO cache. -struct MemoryCache { +struct MemoryCache { max_size: usize, - map: RefCell>, + map: RefCell>, fifo: RefCell>, } -impl MemoryCache { +impl MemoryCache { fn new(max_size: usize) -> Self { assert_ne!(max_size, 0); Self { @@ -445,13 +513,13 @@ impl MemoryCache { } // Get an entry from the in-memory cache. - fn get_entry(&self, key: &LookupKey) -> Option { + fn get_entry(&self, key: &LookupKey) -> Option { self.map.borrow().get(key).copied() } // Put a batch of entries into the in-memory cache, // evicting old entries to make room if necessary. - fn put_entries(&self, entries: &[(LookupKey, SequenceMetadata)]) { + fn put_entries(&self, entries: &[(LookupKey, M)]) { let mut map = self.map.borrow_mut(); let mut fifo = self.fifo.borrow_mut(); for (key, value) in entries { @@ -736,6 +804,7 @@ impl ObjectBackend for CachedRoObjectBucket { #[cfg(test)] mod tests { use super::*; + use tlog_tiles::SequenceMetadata; // ==================== HeadTailValidation Tests ==================== @@ -883,21 +952,14 @@ mod tests { fn test_serialize_deserialize_empty() { let entries: Vec<(LookupKey, SequenceMetadata)> = vec![]; let serialized = serialize_entries(&entries); - assert!(serialized.is_empty()); - let deserialized = deserialize_entries(&serialized).unwrap(); + let deserialized = deserialize_entries::(&serialized).unwrap(); assert!(deserialized.is_empty()); } #[test] - fn test_deserialize_invalid_length() { - let buf = vec![0u8; 31]; // Not a multiple of 32 - assert!(deserialize_entries(&buf).is_err()); - } - - #[test] - fn test_deserialize_invalid_length_one_extra() { - let buf = vec![0u8; 33]; // 32 + 1 - assert!(deserialize_entries(&buf).is_err()); + fn test_deserialize_invalid_json() { + let buf = b"not valid json"; + assert!(deserialize_entries::(buf).is_err()); } // ==================== MemoryCache Tests ==================== @@ -974,9 +1036,41 @@ mod tests { #[test] fn test_fifo_key_generation() { - assert_eq!(DedupCache::fifo_key(0), "fifo:0"); - assert_eq!(DedupCache::fifo_key(127), "fifo:127"); - assert_eq!(DedupCache::fifo_key(128), "fifo:128"); - assert_eq!(DedupCache::fifo_key(u32::MAX), format!("fifo:{}", u32::MAX)); + assert_eq!(DedupCache::::fifo_key(0), "fifo:0"); + assert_eq!(DedupCache::::fifo_key(127), "fifo:127"); + assert_eq!(DedupCache::::fifo_key(128), "fifo:128"); + assert_eq!(DedupCache::::fifo_key(u32::MAX), format!("fifo:{}", u32::MAX)); + } + + /// Regression test: confirm the `SequenceMetadata` binary wire format has + /// not changed. Any change here would corrupt the dedup ring buffer in + /// Durable Object storage for deployed CT and bootstrap MTC workers. + /// + /// Format: `[16-byte key | 8-byte leaf_index BE | 8-byte timestamp BE]` + #[test] + fn test_sequence_metadata_cache_format_unchanged() { + let key: LookupKey = [ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + ]; + let leaf_index: u64 = 0x0102030405060708; + let timestamp: u64 = 0x0a0b0c0d0e0f1011; + + let entries: Vec<(LookupKey, SequenceMetadata)> = vec![(key, (leaf_index, timestamp))]; + let serialized = serialize_entries(&entries); + + // Manually construct the expected 32-byte buffer. + let mut expected = Vec::with_capacity(32); + expected.extend_from_slice(&key); + expected.extend_from_slice(&leaf_index.to_be_bytes()); + expected.extend_from_slice(×tamp.to_be_bytes()); + + assert_eq!(serialized, expected, + "SequenceMetadata binary format has changed — this will corrupt \ + existing Durable Object dedup cache storage"); + + // Round-trip. + let deserialized = deserialize_entries::(&serialized).unwrap(); + assert_eq!(deserialized, entries); } } diff --git a/crates/generic_log_worker/src/log_ops.rs b/crates/generic_log_worker/src/log_ops.rs index 7cf1354c..1aae9aad 100644 --- a/crates/generic_log_worker/src/log_ops.rs +++ b/crates/generic_log_worker/src/log_ops.rs @@ -21,9 +21,9 @@ use crate::{ obs::metrics::{millis_diff_as_secs, AsF64, SequencerMetrics}, util::now_millis, - CacheRead, CacheWrite, LockBackend, LookupKey, ObjectBackend, SequenceMetadata, - SequencerConfig, + CacheRead, CacheWrite, LockBackend, LookupKey, ObjectBackend, SequencerConfig, }; +use serde::de::DeserializeOwned; use anyhow::{anyhow, bail}; use futures_util::future::try_join_all; use log::{debug, error, info, trace, warn}; @@ -31,6 +31,7 @@ use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use signed_note::VerifierList; use std::collections::HashMap; +use std::fmt::Debug; use std::{ cell::RefCell, cmp::{Ord, Ordering}, @@ -72,19 +73,19 @@ const MAX_POOL_SIZE: usize = 4000; /// they are rotated out of `in_sequencing`. /// #[derive(Debug)] -pub(crate) struct PoolState { +pub(crate) struct PoolState { // How many times sequencing has been skipped for any entries in the pool. sequence_skips: usize, // Entries that are ready to be sequenced, along with the Sender used to // send metadata to receivers once the corresponding entry is sequenced. - pending_entries: Vec<(P, Sender)>, + pending_entries: Vec<(P, Sender)>, // Deduplication cache for entries currently pending sequencing. - pending_dedup: HashMap>, + pending_dedup: HashMap>, // Deduplication cache for entries currently being sequenced. - in_sequencing_dedup: HashMap>, + in_sequencing_dedup: HashMap>, // Ring buffer tracking insertion timestamps for the most recent entries // that are potentially skippable. @@ -95,7 +96,7 @@ pub(crate) struct PoolState { leftover_timestamps_next_slot: usize, } -impl Default for PoolState

{ +impl Default for PoolState { fn default() -> Self { PoolState { sequence_skips: 0, @@ -108,10 +109,10 @@ impl Default for PoolState

{ } } -impl PoolState { +impl PoolState { // Check if the key is already in the pool. If so, return a Receiver from // which to read the entry metadata when it is sequenced. - fn check(&self, key: &LookupKey) -> Option { + fn check(&self, key: &LookupKey) -> Option> { if let Some(rx) = self.in_sequencing_dedup.get(key) { // Entry is being sequenced. Some(AddLeafResult::Pending { @@ -128,11 +129,11 @@ impl PoolState { } } // Add a new entry to the pool. - fn add(&mut self, key: LookupKey, entry: E) -> AddLeafResult { + fn add(&mut self, key: LookupKey, entry: E) -> AddLeafResult { if self.pending_entries.len() >= MAX_POOL_SIZE { return AddLeafResult::RateLimited; } - let (tx, rx) = channel((0, 0)); + let (tx, rx) = channel(M::default()); self.pending_entries.push((entry, tx)); self.pending_dedup.insert(key, rx.clone()); self.leftover_timestamps_millis @@ -158,7 +159,7 @@ impl PoolState { old_size: u64, max_sequence_skips: usize, sequence_skip_threshold_millis: Option, - ) -> Option)>> { + ) -> Option)>> { let new_size = old_size + self.pending_entries.len() as u64; let publishing_full_tile = new_size / u64::from(TlogTile::FULL_WIDTH) > old_size / u64::from(TlogTile::FULL_WIDTH); @@ -291,7 +292,7 @@ pub(crate) async fn create_log( config.origin.as_str(), &extensions.iter().map(String::as_str).collect::>(), &dyn_signers, - &mut rand::thread_rng(), + &mut rand::rng(), ) .map_err(|e| anyhow!("failed to sign checkpoint: {e}"))?; lock.put(CHECKPOINT_KEY, &sth) @@ -657,19 +658,19 @@ async fn tile_reader_for_indexes( /// Result of an [`add_leaf_to_pool`] request containing either a cached log /// entry or a pending entry that must be resolved. -pub(crate) enum AddLeafResult { - Cached(SequenceMetadata), +pub(crate) enum AddLeafResult { + Cached(M), Pending { - rx: Receiver, + rx: Receiver, source: PendingSource, }, RateLimited, } -impl AddLeafResult { +impl AddLeafResult { /// Resolve an `AddLeafResult` to a leaf entry, or None if the /// entry was not sequenced. - pub(crate) async fn resolve(self) -> Option { + pub(crate) async fn resolve(self) -> Option { match self { AddLeafResult::Cached(entry) => Some(entry), AddLeafResult::Pending { mut rx, source: _ } => { @@ -687,9 +688,9 @@ impl AddLeafResult { pub(crate) fn source(&self) -> &'static str { match self { - AddLeafResult::Cached(_) => "cache", - AddLeafResult::RateLimited => "ratelimit", - AddLeafResult::Pending { rx: _, source } => match source { + Self::Cached(_) => "cache", + Self::RateLimited => "ratelimit", + Self::Pending { rx: _, source } => match source { PendingSource::InSequencing => "sequencing", PendingSource::Pool => "pool", PendingSource::Sequencer => "sequencer", @@ -709,12 +710,16 @@ pub(crate) enum PendingSource { /// with a [`AddLeafResult::Cached`]. If the pool is full, return /// [`AddLeafResult::RateLimited`]. Otherwise, return a [`AddLeafResult::Pending`] which /// can be resolved once the entry has been sequenced. -pub(crate) fn add_leaf_to_pool( - state: &RefCell>, - cache: &impl CacheRead, +pub(crate) fn add_leaf_to_pool( + state: &RefCell>, + cache: &impl CacheRead, config: &SequencerConfig, entry: E, -) -> AddLeafResult { +) -> AddLeafResult +where + E: PendingLogEntry, + M: Serialize + DeserializeOwned + Copy + Debug + Default + 'static, +{ let hash = entry.lookup_key(); let mut state = state.borrow_mut(); @@ -740,12 +745,12 @@ pub(crate) fn add_leaf_to_pool( /// Will return an error if sequencing fails with an error that requires the /// sequencer to be re-initialized to get into a good state. pub(crate) async fn sequence( - pool_state: &RefCell>, + pool_state: &RefCell>, sequence_state: &RefCell, config: &SequencerConfig, object: &impl ObjectBackend, lock: &impl LockBackend, - cache: &impl CacheWrite, + cache: &impl CacheWrite, metrics: &SequencerMetrics, ) -> Result<(), anyhow::Error> { let Some(entries) = pool_state.borrow_mut().take( @@ -816,8 +821,8 @@ async fn sequence_entries( config: &SequencerConfig, object: &impl ObjectBackend, lock: &impl LockBackend, - cache: &impl CacheWrite, - entries: Vec<(L::Pending, Sender)>, + cache: &impl CacheWrite, + entries: Vec<(L::Pending, Sender)>, metrics: &SequencerMetrics, ) -> Result<(), SequenceError> { let name = &config.name; @@ -853,6 +858,7 @@ async fn sequence_entries( let mut overlay = HashMap::new(); let mut n = old_size; + let new_size = old_size + entries.len() as u64; let mut sequenced_metadata = Vec::with_capacity(entries.len()); let mut cache_metadata = Vec::with_capacity(entries.len()); @@ -863,8 +869,10 @@ async fn sequence_entries( } } - // Add the entry and metadata to our lists of things sequenced - let metadata = (n, timestamp); + // Add the entry and metadata to our lists of things sequenced. + // L::make_metadata provides the application-specific metadata type, + // which may include additional fields beyond (leaf_index, timestamp). + let metadata = L::make_metadata(n, timestamp, old_size, new_size); cache_metadata.push((entry.lookup_key(), metadata)); sequenced_metadata.push((sender, metadata)); @@ -918,14 +926,16 @@ async fn sequence_entries( } } + assert_eq!(n, new_size, "loop must have processed exactly entries.len() entries"); + // Stage leftover partial data tile, if any. - if n != old_size && n % u64::from(TlogTile::FULL_WIDTH) != 0 { + if new_size != old_size && !new_size.is_multiple_of(u64::from(TlogTile::FULL_WIDTH)) { metrics .seq_data_tile_size .with_label_values(&["partial"]) .observe(data_tile.len().as_f64()); stage_data_tile::( - n, + new_size, &mut edge_tiles, &mut tile_uploads, std::mem::take(&mut data_tile), @@ -934,7 +944,7 @@ async fn sequence_entries( } // Produce and stage new tree tiles. - let tiles = TlogTile::new_tiles(old_size, n); + let tiles = TlogTile::new_tiles(old_size, new_size); for tile in tiles { let data = tile .read_data(&HashReaderWithOverlay { @@ -951,7 +961,7 @@ async fn sequence_entries( || (t.tile.level_index() == tile.level_index() && t.tile.width() < tile.width()) }) { debug!( - "{name}: staging tree tile: old_tree_size={old_size}, tree_size={n}, tile={tile:?}, size={}", + "{name}: staging tree tile: old_tree_size={old_size}, tree_size={new_size}, tile={tile:?}, size={}", data.len() ); edge_tiles.insert( @@ -973,7 +983,7 @@ async fn sequence_entries( // Construct the new sequence state. let new = { let tree = TreeWithTimestamp::from_hash_reader( - n, + new_size, &HashReaderWithOverlay { edge_tiles: &edge_tiles, overlay: &overlay, @@ -992,7 +1002,7 @@ async fn sequence_entries( config.origin.as_str(), &extensions.iter().map(String::as_str).collect::>(), &dyn_signers, - &mut rand::thread_rng(), + &mut rand::rng(), ) .map_err(|e| SequenceError::NonFatal(format!("couldn't sign checkpoint: {e}")))?; SequenceState { @@ -1061,7 +1071,7 @@ async fn sequence_entries( // Call the checkpoint callback. This is a no-op for CT, but is used to // update landmark checkpoints for MTC. - if let Err(e) = (config.checkpoint_callback)(old_time, timestamp, new.checkpoint()).await { + if let Err(e) = (config.checkpoint_callback)(old_time, timestamp, old_size, n, new.checkpoint()).await { warn!("{name}: Checkpoint callback failed: {e}"); } @@ -1367,6 +1377,7 @@ pub async fn upload_issuers( #[cfg(test)] mod tests { use super::*; + use tlog_tiles::SequenceMetadata; use crate::{empty_checkpoint_callback, util}; use anyhow::ensure; @@ -1376,8 +1387,8 @@ mod tests { use p256::ecdsa::SigningKey as EcdsaSigningKey; use prometheus::Registry; use rand::{ - rngs::{OsRng, SmallRng}, - thread_rng, Rng, RngCore, SeedableRng, + rngs::SmallRng, + Rng, RngExt, SeedableRng, }; use signed_note::{KeyName, Note}; use static_ct_api::{ @@ -1494,10 +1505,10 @@ mod tests { } // Check that we can make a consistency proof for random spans in the tree - let mut rng = thread_rng(); + let mut rng = rand::rng(); for _ in 0..100 { - let prev_tree_size = rng.gen_range(1..n); - let new_tree_size = rng.gen_range(prev_tree_size + 1..=n); + let prev_tree_size = rng.random_range(1..n); + let new_tree_size = rng.random_range(prev_tree_size + 1..=n); let consistency_proof = block_on(prove_consistency( tree_hashes[usize::try_from(new_tree_size).unwrap()], new_tree_size, @@ -1685,11 +1696,11 @@ mod tests { fn test_duplicates(is_precert: bool) { let mut log = TestLog::new(); - log.add_with_seed(is_precert, rand::thread_rng().next_u64()); // 1 - log.add_with_seed(is_precert, rand::thread_rng().next_u64()); // 2 + log.add_with_seed(is_precert, rand::rng().next_u64()); // 1 + log.add_with_seed(is_precert, rand::rng().next_u64()); // 2 log.sequence().unwrap(); - log.add_with_seed(is_precert, rand::thread_rng().next_u64()); // 3 - log.add_with_seed(is_precert, rand::thread_rng().next_u64()); // 4 + log.add_with_seed(is_precert, rand::rng().next_u64()); // 3 + log.add_with_seed(is_precert, rand::rng().next_u64()); // 4 // Two pairs of duplicates from the pending pool. let res01 = log.add_with_seed(is_precert, 0); // 5 @@ -1801,7 +1812,7 @@ mod tests { // Try to load the checkpoint with two randomly generated checkpoint signers. These should fail let checkpoint_signer = StaticCTCheckpointSigner::new( log.config.origin.clone(), - EcdsaSigningKey::random(&mut OsRng), + EcdsaSigningKey::random(&mut rand::rng()), ) .unwrap(); log.config.checkpoint_signers = vec![Box::new(checkpoint_signer)]; @@ -1814,7 +1825,7 @@ mod tests { let checkpoint_signer = Ed25519CheckpointSigner::new( log.config.origin.clone(), - Ed25519SigningKey::generate(&mut OsRng), + Ed25519SigningKey::generate(&mut rand::rng()), ) .unwrap(); log.config.checkpoint_signers = vec![Box::new(checkpoint_signer)]; @@ -2311,13 +2322,13 @@ mod tests { struct TestCacheBackend(RefCell>); - impl CacheRead for TestCacheBackend { + impl CacheRead for TestCacheBackend { fn get_entry(&self, key: &LookupKey) -> Option { self.0.borrow().get(key).copied() } } - impl CacheWrite for TestCacheBackend { + impl CacheWrite for TestCacheBackend { async fn put_entries( &self, entries: &[(LookupKey, SequenceMetadata)], @@ -2390,7 +2401,7 @@ mod tests { struct TestLog { config: SequencerConfig, - pool_state: RefCell>, + pool_state: RefCell>, sequence_state: RefCell, lock: TestLockBackend, object: TestObjectBackend, @@ -2400,7 +2411,7 @@ mod tests { impl TestLog { fn new() -> Self { - let mut rng = OsRng; + let mut rng = rand::rng(); let cache = TestCacheBackend(RefCell::new(HashMap::new())); let object = TestObjectBackend::new(); @@ -2493,23 +2504,23 @@ mod tests { .unwrap(); self.pool_state.borrow_mut().reset_in_sequencing_dedup(); } - fn add_certificate(&mut self) -> AddLeafResult { - self.add_certificate_with_seed(rand::thread_rng().next_u64()) + fn add_certificate(&mut self) -> AddLeafResult { + self.add_certificate_with_seed(rand::rng().next_u64()) } - fn add_certificate_with_seed(&mut self, seed: u64) -> AddLeafResult { + fn add_certificate_with_seed(&mut self, seed: u64) -> AddLeafResult { self.add_with_seed(false, seed) } - fn add(&mut self, is_precert: bool) -> AddLeafResult { - self.add_with_seed(is_precert, rand::thread_rng().next_u64()) + fn add(&mut self, is_precert: bool) -> AddLeafResult { + self.add_with_seed(is_precert, rand::rng().next_u64()) } - fn add_with_seed(&mut self, is_precert: bool, seed: u64) -> AddLeafResult { + fn add_with_seed(&mut self, is_precert: bool, seed: u64) -> AddLeafResult { let mut rng = SmallRng::seed_from_u64(seed); - let mut certificate = vec![0; rng.gen_range(8..12)]; + let mut certificate = vec![0; rng.random_range(8..12)]; rng.fill(&mut certificate[..]); let precert_opt: Option = if is_precert { let mut issuer_key_hash = [0; 32]; rng.fill(&mut issuer_key_hash); - let mut pre_certificate = vec![0; rng.gen_range(1..5)]; + let mut pre_certificate = vec![0; rng.random_range(1..5)]; rng.fill(&mut pre_certificate[..]); Some(PrecertData { issuer_key_hash, @@ -2518,7 +2529,7 @@ mod tests { } else { None }; - let issuers = CHAINS[rng.gen_range(0..CHAINS.len())]; + let issuers = CHAINS[rng.random_range(0..CHAINS.len())]; let leaf = StaticCTPendingLogEntry { certificate, precert_opt, diff --git a/crates/generic_log_worker/src/sequencer_do.rs b/crates/generic_log_worker/src/sequencer_do.rs index 58607d85..3b69eddb 100644 --- a/crates/generic_log_worker/src/sequencer_do.rs +++ b/crates/generic_log_worker/src/sequencer_do.rs @@ -15,7 +15,7 @@ use crate::{ }, serialize, util::now_millis, - DedupCache, LookupKey, MemoryCache, ObjectBucket, SequenceMetadata, BATCH_ENDPOINT, + CacheSerialize, DedupCache, LookupKey, MemoryCache, ObjectBucket, BATCH_ENDPOINT, CLEANER_BINDING, ENTRY_ENDPOINT, }; use futures_util::future::join_all; @@ -33,12 +33,12 @@ const MEMORY_CACHE_SIZE: usize = 300_000; pub struct GenericSequencer { env: Env, - do_state: State, // implements LockBackend - public_bucket: RwLock, // implements ObjectBackend - cache: DedupCache, // implements CacheRead, CacheWrite + do_state: State, // implements LockBackend + public_bucket: RwLock, // implements ObjectBackend + cache: DedupCache, // implements CacheRead, CacheWrite config: SequencerConfig, sequence_state: RefCell, - pool_state: RefCell>, + pool_state: RefCell>, initialized: RefCell, init_mux: Mutex<()>, wshim: Option, @@ -62,7 +62,7 @@ pub struct SequencerConfig { pub env_label: String, } -impl GenericSequencer { +impl> GenericSequencer { /// Return a new sequencer with the given config. /// /// # Panics @@ -219,7 +219,7 @@ impl GenericSequencer { } } -impl GenericSequencer { +impl> GenericSequencer { // Initialize the durable object when it is started on a new machine (e.g., after eviction or a deployment). async fn initialize(&self, metrics: &SequencerMetrics) -> Result<(), WorkerError> { // This can be triggered by the alarm() or fetch() handlers, so lock state to avoid a race condition. @@ -313,7 +313,7 @@ impl GenericSequencer { &self, pending_entries: Vec, metrics: &SequencerMetrics, - ) -> Result, WorkerError> { + ) -> Result, WorkerError> { // Safe to unwrap config here as the log must be initialized. let mut futures = Vec::with_capacity(pending_entries.len()); let mut lookup_keys = Vec::with_capacity(pending_entries.len()); @@ -359,11 +359,15 @@ impl GenericSequencer { /// The parameters are as follows: /// - `old_time: UnixTimestamp`: The timestamp of the previous checkpoint. /// - `new_time: UnixTimestamp`: The timestamp of the latest checkpoint. +/// - `old_tree_size: u64`: The tree size of the previous checkpoint. +/// - `new_tree_size: u64`: The tree size of the latest checkpoint. /// - `new_checkpoint: &[u8]`: The latest checkpoint bytes. This is a signed note. pub type CheckpointCallbacker = Box< dyn Fn( UnixTimestamp, UnixTimestamp, + u64, + u64, &[u8], ) -> Pin> + 'static>> + 'static, @@ -374,8 +378,10 @@ pub type CheckpointCallbacker = Box< #[must_use] pub fn empty_checkpoint_callback() -> CheckpointCallbacker { Box::new( - move |_old_time: UnixTimestamp, _new_time: UnixTimestamp, _new_checkpoint: &[u8]| { - Box::pin(async move { Ok(()) }) - }, + move |_old_time: UnixTimestamp, + _new_time: UnixTimestamp, + _old_tree_size: u64, + _new_tree_size: u64, + _new_checkpoint: &[u8]| { Box::pin(async move { Ok(()) }) }, ) } diff --git a/crates/ietf_mtc_api/Cargo.toml b/crates/ietf_mtc_api/Cargo.toml new file mode 100644 index 00000000..b99b8711 --- /dev/null +++ b/crates/ietf_mtc_api/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "ietf_mtc_api" +version.workspace = true +authors.workspace = true +edition.workspace = true +license.workspace = true +readme.workspace = true +homepage.workspace = true +repository.workspace = true +description.workspace = true + +[dependencies] +base64.workspace = true +generic_log_worker.workspace = true +serde_json.workspace = true +byteorder.workspace = true +rand.workspace = true +der.workspace = true +ed25519-dalek.workspace = true +signature.workspace = true +ml-dsa.workspace = true +pkcs8.workspace = true +length_prefixed.workspace = true +serde.workspace = true +serde_with.workspace = true +sha2.workspace = true +signed_note.workspace = true +thiserror.workspace = true +tlog_tiles.workspace = true +x509-cert.workspace = true + +[dev-dependencies] diff --git a/crates/ietf_mtc_api/README.md b/crates/ietf_mtc_api/README.md new file mode 100644 index 00000000..42fed556 --- /dev/null +++ b/crates/ietf_mtc_api/README.md @@ -0,0 +1,33 @@ +# ietf_mtc_api + +Core types and logic for the [IETF Merkle Tree CA Worker](../ietf_mtc_worker/README.md). + +This crate implements the IETF draft protocol layer on top of the shared +[`tlog_tiles`](../tlog_tiles/) infrastructure, targeting +[draft-ietf-plants-merkle-tree-certs-02](https://datatracker.ietf.org/doc/draft-ietf-plants-merkle-tree-certs/). + +Key components: + +- **`AddEntryRequest`** — PKCS#10 CSR submission request (base64url-encoded DER, + matching the ACME `finalize` format per RFC 8555 §7.4). +- **`build_pending_entry`** — parses a CSR, extracts subject, SPKI algorithm, + SPKI hash, and SANs, and constructs an `IetfMtcPendingLogEntry`. +- **`TbsCertificateLogEntry`** — the plants-02 wire format: fields encoded as raw + concatenated DER (no outer SEQUENCE wrapper), including the new + `subjectPublicKeyInfoAlgorithm` field. +- **`MerkleTreeCertEntry`** — entry type enum (`NullEntry` / `TbsCertEntry`) with + encode/decode. +- **`serialize_landmark_relative_cert`** — constructs the landmark-relative MTC + certificate from a sequenced log entry, an inclusion proof, and the subscriber's + SPKI. +- **Landmark sequence** — tracks the active landmark subtrees and their Merkle + roots. +- **Cosigner** — Ed25519-based subtree cosigning over the `mtc-subtree/v1` note + format. + +For the older bootstrap experiment (draft-davidben-tls-merkle-tree-certs-09), +see [`bootstrap_mtc_api`](../bootstrap_mtc_api/). + +## License + +The project is licensed under the [BSD-3-Clause License](./LICENSE). diff --git a/crates/ietf_mtc_api/src/cosigner.rs b/crates/ietf_mtc_api/src/cosigner.rs new file mode 100644 index 00000000..4af3b5b0 --- /dev/null +++ b/crates/ietf_mtc_api/src/cosigner.rs @@ -0,0 +1,514 @@ +// Copyright (c) 2025 Cloudflare, Inc. +// Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause + +use byteorder::{BigEndian, WriteBytesExt}; +use ed25519_dalek::Signer as Ed25519Signer; +use length_prefixed::WriteLengthPrefixedBytesExt; +use ml_dsa::{signature::Verifier as MlDsaVerifier, MlDsa44}; + +use signature::Error as SignatureError; +use signed_note::{compute_key_id, KeyName, NoteError, NoteSignature, NoteVerifier}; +use std::collections::HashMap; +use tlog_tiles::{CheckpointSigner, CheckpointText, Hash, LeafIndex, UnixTimestamp}; + +use crate::{RelativeOid, ID_RDNA_TRUSTANCHOR_ID}; + +pub type TrustAnchorID = RelativeOid; + +// --------------------------------------------------------------------------- +// Multi-algorithm key types +// --------------------------------------------------------------------------- + +/// A signing key for MTC subtree cosignatures. +#[derive(Clone)] +#[allow(clippy::large_enum_variant)] +pub enum MtcSigningKey { + Ed25519(ed25519_dalek::SigningKey), + MlDsa44(ml_dsa::ExpandedSigningKey), +} + +/// A verifying key for MTC subtree cosignatures. +#[derive(Clone)] +#[allow(clippy::large_enum_variant)] +pub enum MtcVerifyingKey { + Ed25519(ed25519_dalek::VerifyingKey), + MlDsa44(ml_dsa::VerifyingKey), +} + +impl MtcSigningKey { + /// Sign `msg`, returning the signature bytes. + /// + /// # Errors + /// + /// Returns an error if signing fails. Not possible with current variants, + /// but future algorithms (e.g. randomized schemes requiring entropy) may + /// be fallible. + pub fn try_sign(&self, msg: &[u8]) -> Result, SignatureError> { + Ok(match self { + Self::Ed25519(sk) => sk.sign(msg).to_bytes().to_vec(), + Self::MlDsa44(sk) => sk.sign(msg).encode().as_slice().to_vec(), + }) + } +} + +impl MtcVerifyingKey { + /// Returns the raw public key bytes (without algorithm prefix or DER wrapping). + fn to_raw_bytes(&self) -> Vec { + match self { + Self::Ed25519(vk) => vk.to_bytes().to_vec(), + Self::MlDsa44(vk) => vk.encode().as_slice().to_vec(), + } + } + + /// The signature type identifier bytes for this algorithm, as used in the + /// c2sp.org/signed-note key ID computation: + /// + /// `key ID = SHA-256(key name || 0x0A || signature_type_bytes || public key)[:4]` + /// + /// Ed25519 uses the allocated single byte `0x01`. ML-DSA variants use `0xff` + /// (unassigned per c2sp.org/signed-note §Signature types) followed by the + /// algorithm OID in dotted-decimal ASCII, as RECOMMENDED by the spec for types + /// without an assigned identifier byte. + // TODO(C2SP/C2SP#237): Once https://github.com/C2SP/C2SP/pull/237 merges, update + // the ML-DSA-44 cosignature to the finalised format: + // - algorithm byte: 0x06 (replacing 0xff + dotted-decimal OID) + // - signed message label: "subtree/v1\n\0" (replacing "mtc-subtree/v1\n\0") + // - add 8-byte POSIX-seconds timestamp prefix to signature bytes + // - cosigner_name / log_origin OID encoding: "oid/" + DER content bytes + // (replacing BER-encoded relative OID bytes) + // - extract_timestamp_millis: return Some(timestamp_secs * 1000) + // - CheckpointSigner::sign: use the provided timestamp (currently ignored) + fn signature_type_bytes(&self) -> &'static [u8] { + match self { + Self::Ed25519(_) => &[0x01], + Self::MlDsa44(_) => b"\xff2.16.840.1.101.3.4.3.17", + } + } + + fn verify(&self, msg: &[u8], sig_bytes: &[u8]) -> bool { + match self { + Self::Ed25519(vk) => { + let Ok(sig_arr) = sig_bytes.try_into() else { + return false; + }; + let sig = ed25519_dalek::Signature::from_bytes(sig_arr); + ed25519_dalek::Verifier::verify(vk, msg, &sig).is_ok() + } + Self::MlDsa44(vk) => verify_ml_dsa(vk, msg, sig_bytes), + } + } + + /// # Panics + /// + /// Panics if PKCS#8 encoding fails, which should never happen for a valid key. + #[must_use] + pub fn to_public_key_der(&self) -> Vec { + use pkcs8::EncodePublicKey; + match self { + Self::Ed25519(vk) => vk + .to_public_key_der() + .expect("Ed25519 SPKI encoding failed") + .to_vec(), + Self::MlDsa44(vk) => vk + .to_public_key_der() + .expect("ML-DSA-44 SPKI encoding failed") + .to_vec(), + } + } +} + +/// Generic ML-DSA signature verification helper. +fn verify_ml_dsa

(vk: &ml_dsa::VerifyingKey

, msg: &[u8], sig_bytes: &[u8]) -> bool +where + P: ml_dsa::MlDsaParams, +{ + ml_dsa::EncodedSignature::

::try_from(sig_bytes) + .ok() + .and_then(|enc| ml_dsa::Signature::

::decode(&enc)) + .is_some_and(|sig| MlDsaVerifier::verify(vk, msg, &sig).is_ok()) +} + +// --------------------------------------------------------------------------- +// MtcCosigner +// --------------------------------------------------------------------------- + +pub struct MtcCosigner { + v: MtcNoteVerifier, + k: MtcSigningKey, +} + +impl MtcCosigner { + /// Return a checkpoint cosigner from an `MtcSigningKey` and `MtcVerifyingKey`. + #[must_use] + pub fn new_checkpoint( + cosigner_id: TrustAnchorID, + log_id: TrustAnchorID, + sk: MtcSigningKey, + vk: MtcVerifyingKey, + ) -> Self { + let sig_type = vk.signature_type_bytes(); + Self { + v: MtcNoteVerifier::new_checkpoint(cosigner_id, log_id, vk, sig_type), + k: sk, + } + } + + /// Compute a subtree cosignature as defined in + /// . + /// + /// # Errors + /// + /// Returns an error if signing fails. Future algorithm variants may be + /// fallible; use this method when the error can be propagated. + pub fn sign_subtree( + &self, + start: LeafIndex, + end: LeafIndex, + root_hash: &Hash, + ) -> Result, SignatureError> { + let serialized = serialize_mtc_subtree_signature_input( + &self.v.cosigner_id, + &self.v.log_id, + start, + end, + root_hash, + ); + self.k.try_sign(&serialized) + } + + /// Return the log ID. + #[must_use] + pub fn log_id(&self) -> &TrustAnchorID { + &self.v.log_id + } + + /// Return the cosigner ID. + #[must_use] + pub fn cosigner_id(&self) -> &TrustAnchorID { + &self.v.cosigner_id + } + + /// Return the DER-encoded `SubjectPublicKeyInfo` of the verifying key. + #[must_use] + pub fn verifying_key(&self) -> Vec { + self.v.verifying_key.to_public_key_der() + } +} + +/// Support signing tlog-checkpoint with the subtree cosigner. +impl CheckpointSigner for MtcCosigner { + fn name(&self) -> &KeyName { + self.v.name() + } + + fn key_id(&self) -> u32 { + self.v.key_id() + } + + fn sign( + &self, + _timestamp_unix_millis: UnixTimestamp, + checkpoint: &tlog_tiles::CheckpointText, + ) -> Result { + let sig = self.sign_subtree(0, checkpoint.size(), checkpoint.hash())?; + Ok(NoteSignature::new(self.name().clone(), self.key_id(), sig)) + } + + fn verifier(&self) -> Box { + Box::new(self.v.clone()) + } +} + +// --------------------------------------------------------------------------- +// MtcNoteVerifier +// --------------------------------------------------------------------------- + +/// Verifier for MTC subtree cosignatures. +#[derive(Clone)] +pub struct MtcNoteVerifier { + cosigner_id: TrustAnchorID, + log_id: TrustAnchorID, + name: KeyName, + id: u32, + verifying_key: MtcVerifyingKey, +} + +impl MtcNoteVerifier { + /// Return a checkpoint verifier. + /// + /// # Panics + /// + /// Will panic if the trust anchor ID cannot be parsed as a valid key name + /// according to . + #[must_use] + pub fn new_checkpoint( + cosigner_id: TrustAnchorID, + log_id: TrustAnchorID, + verifying_key: MtcVerifyingKey, + signature_type_bytes: &[u8], + ) -> Self { + let name = KeyName::new(format!("oid/{ID_RDNA_TRUSTANCHOR_ID}.{log_id}")).unwrap(); + + let id = { + // Key ID = SHA-256(name || 0x0A || signature_type_bytes || raw_pubkey_bytes)[:4] + // per https://c2sp.org/signed-note (compute_key_id convention). + let pubkey_bytes = verifying_key.to_raw_bytes(); + compute_key_id(&name, &[signature_type_bytes, &pubkey_bytes].concat()) + }; + + Self { + cosigner_id, + log_id, + name, + id, + verifying_key, + } + } +} + +impl NoteVerifier for MtcNoteVerifier { + fn name(&self) -> &KeyName { + &self.name + } + + fn key_id(&self) -> u32 { + self.id + } + + fn verify(&self, msg: &[u8], sig_bytes: &[u8]) -> bool { + let Ok(checkpoint) = CheckpointText::from_bytes(msg) else { + return false; + }; + + let message = serialize_mtc_subtree_signature_input( + &self.cosigner_id, + &self.log_id, + 0, + checkpoint.size(), + checkpoint.hash(), + ); + + self.verifying_key.verify(&message, sig_bytes) + } + + fn extract_timestamp_millis(&self, _sig: &[u8]) -> Result, NoteError> { + Ok(None) + } +} + +// --------------------------------------------------------------------------- +// Proof parsing and verification +// --------------------------------------------------------------------------- + +/// A decoded `MTCProof` extracted from a certificate's `signatureValue`. +/// +/// See draft-ietf-plants-merkle-tree-certs §6.1. +#[derive(Debug)] +pub struct ParsedMtcProof { + /// Start of the covering subtree interval (inclusive). + pub start: u64, + /// End of the covering subtree interval (exclusive). + pub end: u64, + /// Merkle inclusion proof hashes. + pub inclusion_proof: Vec, + /// Cosignatures keyed by `cosigner_id`. + pub signatures: HashMap>, +} + +impl ParsedMtcProof { + /// Parse an `MTCProof` from the raw `signatureValue` bytes of an MTC certificate. + /// + /// # Errors + /// + /// Returns an error if the bytes are malformed. + /// + /// # Panics + /// + /// Panics if a 32-byte hash slice cannot be converted to a fixed-size array, + /// which cannot happen since `chunks_exact(32)` guarantees the length. + pub fn from_bytes(mut bytes: &[u8]) -> Result { + use byteorder::ReadBytesExt; + + let start = bytes.read_u64::()?; + let end = bytes.read_u64::()?; + + // inclusion_proof: uint16-prefixed list of 32-byte hashes + let proof_len = bytes.read_u16::()? as usize; + if bytes.len() < proof_len { + return Err(crate::MtcError::Dynamic("truncated inclusion proof".into())); + } + let (proof_bytes, rest) = bytes.split_at(proof_len); + bytes = rest; + let inclusion_proof = proof_bytes + .chunks_exact(32) + .map(|c| Hash(c.try_into().unwrap())) + .collect(); + + // signatures: uint16-prefixed list of MtcSignature + let sigs_len = bytes.read_u16::()? as usize; + if bytes.len() < sigs_len { + return Err(crate::MtcError::Dynamic("truncated signatures".into())); + } + let mut sig_bytes = &bytes[..sigs_len]; + let mut signatures = HashMap::new(); + while !sig_bytes.is_empty() { + let id_len = sig_bytes.read_u8()? as usize; + if sig_bytes.len() < id_len { + return Err(crate::MtcError::Dynamic("truncated cosigner_id".into())); + } + let id_raw = &sig_bytes[..id_len]; + sig_bytes = &sig_bytes[id_len..]; + let cosigner_id = TrustAnchorID::from_ber_bytes(id_raw) + .map_err(|e| crate::MtcError::Dynamic(format!("invalid cosigner_id: {e}")))?; + + let signature_len = sig_bytes.read_u16::()? as usize; + if sig_bytes.len() < signature_len { + return Err(crate::MtcError::Dynamic("truncated signature".into())); + } + let sig = sig_bytes[..signature_len].to_vec(); + sig_bytes = &sig_bytes[signature_len..]; + signatures.insert(cosigner_id, sig); + } + + Ok(Self { + start, + end, + inclusion_proof, + signatures, + }) + } + + /// Verify that one of the proof's cosignatures is valid for the given + /// subtree hash, cosigner verifying key, cosigner ID, and log ID. + /// + /// # Errors + /// + /// Returns an error if no matching cosignature is found or verification fails. + pub fn verify_cosignature( + &self, + subtree_hash: &Hash, + verifying_key: &MtcVerifyingKey, + cosigner_id: &TrustAnchorID, + log_id: &TrustAnchorID, + ) -> Result<(), crate::MtcError> { + let sig_bytes = self.signatures.get(cosigner_id).ok_or_else(|| { + crate::MtcError::Dynamic(format!("no signature found for cosigner_id {cosigner_id}")) + })?; + let msg = serialize_mtc_subtree_signature_input( + cosigner_id, + log_id, + self.start, + self.end, + subtree_hash, + ); + if verifying_key.verify(&msg, sig_bytes) { + Ok(()) + } else { + Err(crate::MtcError::Dynamic( + "cosignature verification failed".into(), + )) + } + } +} + +// --------------------------------------------------------------------------- +// Serialization +// --------------------------------------------------------------------------- + +/// Serializes the passed in parameters into the correct format for signing +/// according to . +/// ```text +/// +/// opaque HashValue[HASH_SIZE]; +/// +/// /* From Section 4.1 of draft-ietf-tls-trust-anchor-ids */ +/// opaque TrustAnchorID<1..2^8-1>; +/// +/// struct { +/// TrustAnchorID log_id; +/// uint64 start; +/// uint64 end; +/// HashValue hash; +/// } MTCSubtree; +/// +/// struct { +/// uint8 label[16] = "mtc-subtree/v1\n\0"; +/// TrustAnchorID cosigner_id; +/// MTCSubtree subtree; +/// } MTCSubtreeSignatureInput; +/// ``` +/// +/// # Panics +/// +/// Panics if writing to an internal buffer fails, which should never happen. +fn serialize_mtc_subtree_signature_input( + cosigner_id: &TrustAnchorID, + log_id: &TrustAnchorID, + start: LeafIndex, + end: LeafIndex, + root_hash: &Hash, +) -> Vec { + let mut buffer: Vec = b"mtc-subtree/v1\n\x00".to_vec(); + buffer + .write_length_prefixed(cosigner_id.as_bytes(), 1) + .unwrap(); + buffer.write_length_prefixed(log_id.as_bytes(), 1).unwrap(); + buffer.write_u64::(start).unwrap(); + buffer.write_u64::(end).unwrap(); + buffer.extend(root_hash.0); + buffer +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use tlog_tiles::{open_checkpoint, record_hash, TreeWithTimestamp}; + + use super::*; + use ml_dsa::{signature::Keypair as _, KeyGen, MlDsa44}; + use signed_note::VerifierList; + use std::str::FromStr; + + fn run_sign_verify_test(signer: MtcCosigner) { + let origin = "example.com/origin"; + let timestamp = 100; + let tree = TreeWithTimestamp::new(4, record_hash(b"hello world"), timestamp); + let checkpoint = tree + .sign(origin, &[], &[&signer], &mut rand::rng()) + .unwrap(); + let verifier = signer.verifier(); + open_checkpoint( + origin, + &VerifierList::new(vec![verifier]), + timestamp, + &checkpoint, + ) + .unwrap(); + } + + #[test] + fn test_cosignature_ed25519() { + let sk = ed25519_dalek::SigningKey::generate(&mut rand::rng()); + let vk = sk.verifying_key(); + run_sign_verify_test(MtcCosigner::new_checkpoint( + TrustAnchorID::from_str("1.2.3").unwrap(), + TrustAnchorID::from_str("4.5.6").unwrap(), + MtcSigningKey::Ed25519(sk), + MtcVerifyingKey::Ed25519(vk), + )); + } + + #[test] + fn test_cosignature_ml_dsa_44() { + let kp = MlDsa44::key_gen(&mut rand::rng()); + run_sign_verify_test(MtcCosigner::new_checkpoint( + TrustAnchorID::from_str("1.2.3").unwrap(), + TrustAnchorID::from_str("4.5.6").unwrap(), + MtcSigningKey::MlDsa44(kp.signing_key().clone()), + MtcVerifyingKey::MlDsa44(kp.verifying_key().clone()), + )); + } +} diff --git a/crates/ietf_mtc_api/src/landmark.rs b/crates/ietf_mtc_api/src/landmark.rs new file mode 100644 index 00000000..e79bcdb7 --- /dev/null +++ b/crates/ietf_mtc_api/src/landmark.rs @@ -0,0 +1,585 @@ +//! Landmark sequence management for Merkle Tree Certificates. +//! +//! This module implements the landmark sequence as specified in +//! [draft-ietf-plants-merkle-tree-certs-02, Section 6.3.1](https://www.ietf.org/archive/id/draft-ietf-plants-merkle-tree-certs-02.html#section-6.3.1). +//! +//! # Key Concepts +//! +//! - **Landmarks**: Agreed-upon tree sizes used to optimize certificate construction +//! - **Active Landmarks**: The most recent `max_active_landmarks` landmarks +//! - **Landmark Subtrees**: Subtrees covering the interval between consecutive landmarks +//! +//! # Important: Landmark Storage Invariant +//! +//! The `landmarks` deque stores `num_active_landmarks + 1` tree sizes, which equals +//! `max_active_landmarks + 1` at steady state. This is **correct by design** per the spec. +//! +//! ## Why One Extra Landmark? +//! +//! Landmark subtrees are defined by intervals `[prev_tree_size, tree_size)` between +//! consecutive landmarks. To compute subtrees for ALL active landmarks, we need the +//! tree size of the landmark immediately before the oldest active landmark (which is +//! expired but still needed for computation). +//! +//! ## Example +//! +//! With `max_active_landmarks = 169`: +//! - File contains `num_active_landmarks = 169` (at most) +//! - File stores `169 + 1 = 170` tree sizes +//! - Deque contains 170 landmarks +//! - The 169 most recent are "active" (contain unexpired certs) +//! - The oldest (expired) landmark is kept to compute subtrees +//! +//! This is validated by: `num_active_landmarks <= max_active_landmarks` (not `<`). + +use crate::MtcError; +use std::{collections::VecDeque, fmt::Write}; +use tlog_tiles::Subtree; + +/// A sequence of landmarks used for constructing landmark certificates. +/// +/// Landmarks are numbered consecutively from zero and define subtrees that +/// relying parties can use to optimize certificate validation. +/// +/// # Invariants +/// +/// - `landmarks.len() <= max_active_landmarks + 1` (one extra for subtree computation) +/// - Tree sizes are strictly monotonically increasing +/// - At steady state: `landmarks.len() == max_active_landmarks + 1` +#[derive(Debug, PartialEq, Clone)] +pub struct LandmarkSequence { + /// Maximum number of active landmarks (those containing unexpired certificates). + /// The deque may contain `max_active_landmarks + 1` total landmarks. + pub max_active_landmarks: usize, + /// The ID of the most recently added landmark. + pub last_landmark: usize, + /// Tree sizes for the landmarks, from oldest to newest. + /// Contains up to `max_active_landmarks + 1` entries at steady state. + pub landmarks: VecDeque, +} + +/// The location in object storage for the landmark sequence. +pub const LANDMARK_KEY: &str = "landmark"; + +/// The location in object storage for the landmark checkpoint. +pub const LANDMARK_CHECKPOINT_KEY: &str = "landmark-checkpoint"; + +/// The location in object storage for the landmark bundle. Its serialized form is JSON. +pub const LANDMARK_BUNDLE_KEY: &str = "landmark-bundle"; + +impl LandmarkSequence { + /// Create a new landmark sequence with the given `max_active_landmarks` and an + /// initial landmark with id 0 and tree size 0. + #[must_use] + pub fn create(max_active_landmarks: usize) -> Self { + Self { + max_active_landmarks, + last_landmark: 0, + landmarks: VecDeque::from(vec![0]), + } + } + /// Get the first index that is covered by the landmark sequence. + /// + /// # Panics + /// + /// Panics if the landmark sequence is empty, which should never happen. + #[must_use] + pub fn first_index(&self) -> u64 { + *self.landmarks.front().expect("landmark sequence is empty") + } + /// Add a new landmark with the given tree size, removing the oldest landmark + /// if necessary to maintain the invariant that `landmarks.len() <= max_active_landmarks + 1`. + /// + /// Returns `true` if a new landmark was added, or `false` if the tree size + /// matches the most recent landmark (no change). + /// + /// # Important Note + /// + /// The check `if self.landmarks.len() > self.max_active_landmarks` happens **before** + /// the push. This is intentional and correct per the spec! It allows the deque + /// to reach `max_active_landmarks + 1` elements, which is needed to compute subtrees + /// for all active landmarks. + /// + /// At steady state: + /// - Before push: `len = max_active_landmarks + 1` + /// - Check: `(max_active_landmarks + 1) > max_active_landmarks`? → `true` → drain 1 + /// - After drain: `len = max_active_landmarks` + /// - After push: `len = max_active_landmarks + 1` ✓ + /// + /// # Errors + /// + /// Returns an error if the tree size is not strictly greater than the last + /// landmark tree size (monotonicity violation). + pub fn add(&mut self, tree_size: u64) -> Result { + if let Some(last) = self.landmarks.back() { + if tree_size == *last { + // The last landmark is unchanged. + return Ok(false); + } + if tree_size < *last { + return Err(MtcError::Dynamic( + "landmark sequence must be strictly increasing".into(), + )); + } + } + // CRITICAL: Check happens BEFORE push to allow deque to reach max_active_landmarks + 1 elements. + // This is correct per spec - we need the extra (oldest) landmark to compute subtrees. + // See module-level documentation for detailed explanation. + if self.landmarks.len() > self.max_active_landmarks { + self.landmarks + .drain(..self.landmarks.len() - self.max_active_landmarks); + } + self.landmarks.push_back(tree_size); + self.last_landmark += 1; + Ok(true) + } + + /// Return the landmark ID and subtree covering `leaf_index`, or `None` if + /// the `leaf_index` is not covered by a landmark range. + /// + /// # Panics + /// + /// Will panic if landmarks are not sorted or are not unique. + #[must_use] + pub fn subtree_for_index(&self, leaf_index: u64) -> Option<(usize, Subtree)> { + // Find the index of the first landmark greater than the leaf index. + let hi_index = self + .landmarks + .partition_point(|&landmark| landmark <= leaf_index); + + // Get the lower index, if it exists. + let lo_index = hi_index.checked_sub(1)?; + + // Return the ID of the higher landmark. + let landmark_id = hi_index + (self.last_landmark + 1 - self.landmarks.len()); + + // Get lo and hi landmarks, if they exist. + let &lo = self.landmarks.get(lo_index)?; + let &hi = self.landmarks.get(hi_index)?; + + // Find which landmark subtree within `[lo, hi)` contains the leaf. + let (left, right) = Subtree::split_interval(lo, hi).unwrap(); + if left.contains(leaf_index) { + Some((landmark_id, left)) + } else { + right.map(|tree| (landmark_id, tree)) + } + } + + /// Serialize the landmark sequence to the wire format. + /// + /// The format is defined in + /// [draft-ietf-plants-merkle-tree-certs-02, Section 6.3.1](https://www.ietf.org/archive/id/draft-ietf-plants-merkle-tree-certs-02.html#section-6.3.1): + /// + /// ```text + /// + /// // Most recent (last_landmark) + /// + /// ... + /// // Oldest + /// ``` + /// + /// # Important + /// + /// - `num_active_landmarks = landmarks.len() - 1` + /// - File contains `num_active_landmarks + 1` tree sizes + /// - With `max_active_landmarks = 169`, file can have `num_active_landmarks = 169`, + /// which means 170 total tree sizes + /// + /// # Errors + /// + /// Will return an error if writing to the buffer fails. + pub fn to_bytes(&self) -> Result, MtcError> { + let mut buffer = format!("{} {}\n", self.last_landmark, self.landmarks.len() - 1); + for landmark in self.landmarks.iter().rev() { + writeln!(buffer, "{landmark}")?; + } + Ok(buffer.into_bytes()) + } + + /// Deserialize a landmark sequence from the wire format. + /// + /// Validates that: + /// - `num_active_landmarks <= max_active_landmarks` (allows equality!) + /// - `num_active_landmarks <= last_landmark` + /// - Tree sizes are strictly monotonically decreasing in the file + /// + /// # Important: Validation Behavior + /// + /// The validation uses `num_active_landmarks <= max_active_landmarks`, not `<`. + /// This means with `max_active_landmarks = 169`, a file with `num_active_landmarks = 169` + /// is **valid** and will create a deque with 170 landmarks. This is correct + /// per the spec! + /// + /// # Errors + /// + /// Returns an error if: + /// - The file is malformed or too large (`> 10_000` bytes) + /// - Validation constraints are violated + /// - Tree sizes are not strictly monotonically decreasing + pub fn from_bytes(data: &[u8], max_active_landmarks: usize) -> Result { + // Note: `lines()` will return the same thing whether or not there's a + // newline after the last line, and whether or not there are carriage + // returns preceding each newline. + + // Set some upper limit on what we're willing to process. + if data.len() > 10_000 { + return Err(MtcError::Dynamic("too much data".into())); + } + let mut iter = std::str::from_utf8(data)?.lines(); + let first = iter + .next() + .ok_or(MtcError::Dynamic("missing first line".into()))? + .split_once(' ') + .ok_or(MtcError::Dynamic("malformed first line".into()))?; + let last_landmark = first.0.parse::()?; + let num_active_landmarks = first.1.parse::()?; + + // Note: Uses > not >= to allow num_active_landmarks == max_active_landmarks (correct per spec). + // This means a file with max_active_landmarks=169 can have num_active_landmarks=169, + // and will contain 170 tree sizes (169 active + 1 expired for subtree computation). + if num_active_landmarks > max_active_landmarks { + return Err(MtcError::Dynamic( + "num_active_landmarks must not be greater than max_active_landmarks".into(), + )); + } + if num_active_landmarks > last_landmark { + return Err(MtcError::Dynamic( + "num_active_landmarks must not be greater than last_landmark".into(), + )); + } + + let mut landmarks = VecDeque::with_capacity(num_active_landmarks + 1); + for i in 0..=num_active_landmarks { + let landmark = iter + .next() + .ok_or(MtcError::Dynamic("malformed landmark line".into()))? + .parse::()?; + if i > 0 && landmark >= landmarks[0] { + return Err(MtcError::Dynamic( + "landmarks must be in decreasing order".into(), + )); + } + landmarks.push_front(landmark); + } + if iter.next().is_some() { + return Err(MtcError::Dynamic( + "trailing data in landmark sequence".into(), + )); + } + Ok(Self { + max_active_landmarks, + last_landmark, + landmarks, + }) + } + + /// Iterate over the sequence of subtrees determined by the landmark sequence. + #[must_use] + pub fn subtrees(&self) -> LandmarkSubtreesIterator<'_> { + LandmarkSubtreesIterator { + index: 1, + landmarks: &self.landmarks, + next_subtree: None, + } + } +} + +/// An iterator over the subtrees determined by the landmark sequence. +pub struct LandmarkSubtreesIterator<'a> { + index: usize, + landmarks: &'a VecDeque, + next_subtree: Option, +} + +impl Iterator for LandmarkSubtreesIterator<'_> { + type Item = Subtree; + + fn next(&mut self) -> Option { + if self.landmarks.len() < 2 { + return None; + } + + if let Some(subtree) = self.next_subtree.take() { + self.next_subtree = None; + return Some(subtree); + } + + if self.index == self.landmarks.len() { + return None; + } + + let subtree; + (subtree, self.next_subtree) = + Subtree::split_interval(self.landmarks[self.index - 1], self.landmarks[self.index]) + .unwrap(); + + self.index += 1; + Some(subtree) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_subtree_for_index() { + let mut seq = LandmarkSequence::create(10); + assert_eq!(seq.first_index(), 0); + // Only have a single landmark so no subtrees yet. + assert!(seq.subtree_for_index(0).is_none()); + // Check landmark sequence at partial capacity. + for i in 1..=5 { + seq.add(i * 10).unwrap(); + } + assert_eq!(seq.first_index(), 0); + // At first landmark. + assert_eq!( + seq.subtree_for_index(0), + Some((1, Subtree::new(0, 8).unwrap())) + ); + // Past last landmark. + assert!(seq.subtree_for_index(50).is_none()); + // Valid landmark, left subtree aligned with lower landmark tree size. + assert_eq!( + seq.subtree_for_index(31), + Some((4, Subtree::new(30, 32).unwrap())) + ); + // Valid landmark, left subtree extending beyond lower landmark tree + // size. + assert_eq!( + seq.subtree_for_index(12), + Some((2, Subtree::new(8, 16).unwrap())) + ); + // Valid landmark, right subtree. + assert_eq!( + seq.subtree_for_index(33), + Some((4, Subtree::new(32, 40).unwrap())) + ); + + // New tree size matching the last landmark tree size is ignored. + let old_seq = seq.clone(); + seq.add(50).unwrap(); + assert_eq!(seq, old_seq); + // Error if we try to add a smaller tree size. + assert!(seq.add(49).is_err()); + + // Put landmark sequence at full capacity. + for i in 6..=20 { + seq.add(i * 10).unwrap(); + } + assert_eq!(seq.first_index(), 100); + // Before first landmark. + assert!(seq.subtree_for_index(99).is_none()); + // Just within first landmark. + assert_eq!( + seq.subtree_for_index(100), + Some((11, Subtree::new(100, 104).unwrap())) + ); + // At last landmark. + assert_eq!( + seq.subtree_for_index(199), + Some((20, Subtree::new(192, 200).unwrap())) + ); + // Past last landmark. + assert!(seq.subtree_for_index(200).is_none()); + } + + #[test] + fn test_subtrees() { + let mut seq = LandmarkSequence::create(10); + assert!(seq.subtrees().next().is_none()); + + for i in 1..=5 { + seq.add(i * 10).unwrap(); + } + let got = seq.subtrees().collect::>(); + let want = vec![ + Subtree::new(0, 8).unwrap(), + Subtree::new(8, 10).unwrap(), + Subtree::new(8, 16).unwrap(), + Subtree::new(16, 20).unwrap(), + Subtree::new(20, 24).unwrap(), + Subtree::new(24, 30).unwrap(), + Subtree::new(30, 32).unwrap(), + Subtree::new(32, 40).unwrap(), + Subtree::new(40, 48).unwrap(), + Subtree::new(48, 50).unwrap(), + ]; + assert_eq!(got, want); + } + + #[test] + fn test_max_active_landmarks_plus_one_is_correct() { + // This test documents and validates the CORRECT behavior per the spec: + // The deque should contain max_active_landmarks + 1 entries at steady state. + // + // From draft-ietf-plants-merkle-tree-certs-02, Section 6.3.1: + // - "The most recent max_active_landmarks landmarks are said to be active" + // - File format stores "num_active_landmarks + 1 lines" of tree sizes + // - Validation: "num_active_landmarks <= max_active_landmarks" + // + // This means with max_active_landmarks=169, the file can have num_active=169, + // which results in 170 total tree sizes (169 + 1). + + let max_active_landmarks = 10; + let mut seq = LandmarkSequence::create(max_active_landmarks); + + // Fill to steady state + for i in 1..=20 { + seq.add(i * 10).unwrap(); + } + + // At steady state, we should have max_active_landmarks + 1 entries + assert_eq!( + seq.landmarks.len(), + max_active_landmarks + 1, + "Deque should contain max_active_landmarks + 1 = {} landmarks at steady state", + max_active_landmarks + 1 + ); + + // The serialized file should have num_active = max_active_landmarks + let bytes = seq.to_bytes().unwrap(); + let content = String::from_utf8(bytes).unwrap(); + let first_line = content.lines().next().unwrap(); + let parts: Vec<&str> = first_line.split_whitespace().collect(); + let num_active: usize = parts[1].parse().unwrap(); + + assert_eq!( + num_active, max_active_landmarks, + "Serialized file should have num_active_landmarks = {}", + max_active_landmarks + ); + + // File should contain num_active + 1 lines of tree sizes + let tree_size_lines: Vec<_> = content.lines().skip(1).collect(); + assert_eq!( + tree_size_lines.len(), + num_active + 1, + "File should contain {} tree size lines", + num_active + 1 + ); + } + + #[test] + fn test_production_config_values() { + // Validate the production configuration produces correct values. + // Production: 7 days (604800 secs), 1 hour intervals (3600 secs) + + let max_cert_lifetime_secs: usize = 604_800; // 7 days + let landmark_interval_secs: usize = 3_600; // 1 hour + + let max_active_landmarks = max_cert_lifetime_secs.div_ceil(landmark_interval_secs) + 1; + + assert_eq!( + max_active_landmarks, 169, + "Production max_active_landmarks should be 169" + ); + + let mut seq = LandmarkSequence::create(max_active_landmarks); + + // Simulate 200 hours of operation + for hour in 1..=200 { + seq.add(hour).unwrap(); + } + + // At steady state (after 169 additions), should have 170 landmarks + assert_eq!( + seq.landmarks.len(), + 170, + "Production should maintain 170 landmarks (169 active + 1 expired)" + ); + + // Validate serialization + let bytes = seq.to_bytes().unwrap(); + let content = String::from_utf8(bytes.clone()).unwrap(); + let first_line = content.lines().next().unwrap(); + let parts: Vec<&str> = first_line.split_whitespace().collect(); + let num_active: usize = parts[1].parse().unwrap(); + + assert_eq!( + num_active, 169, + "Production file should have num_active_landmarks = 169" + ); + + // Validate deserialization accepts this + let loaded = LandmarkSequence::from_bytes(&bytes, max_active_landmarks) + .expect("Should successfully load file with num_active=169"); + + assert_eq!(loaded.landmarks.len(), 170); + } + + #[test] + fn test_subtrees_require_extra_landmark() { + // This test demonstrates WHY we need the extra (expired) landmark: + // to compute subtrees for the oldest active landmark. + + let max_active_landmarks = 5; + let mut seq = LandmarkSequence::create(max_active_landmarks); + + // Add landmarks up to capacity + for i in 1..=10 { + seq.add(i * 10).unwrap(); + } + + // At steady state: 6 landmarks total (5 active + 1 expired) + assert_eq!(seq.landmarks.len(), 6); + + // The landmarks are: [50, 60, 70, 80, 90, 100] + // - Oldest (expired): 50 + // - Active: 60, 70, 80, 90, 100 + + // To compute subtrees for landmark 60 (oldest active), we need: + // - The interval [50, 60) -- requires knowing landmark 50's tree size! + // - Without landmark 50, we couldn't compute these subtrees + + let subtrees: Vec<_> = seq.subtrees().collect(); + + // Verify we got subtrees for all active landmarks + // With 5 active landmarks, we should get 10 subtrees + assert_eq!( + subtrees.len(), + 10, + "Should be able to compute subtrees with the extra landmark, got {} subtrees", + subtrees.len() + ); + + // The oldest landmark (50) is needed to compute the first subtrees + // starting from the interval [50, 60) + assert_eq!(seq.first_index(), 50, "Oldest landmark should be 50"); + } + + #[test] + fn test_validation_allows_max_active_landmarks() { + // Verify that from_bytes accepts num_active_landmarks == max_active_landmarks + // This is correct per spec: "num_active_landmarks <= max_active_landmarks" + + let max_active_landmarks = 169; + + // Create a sequence with max_active_landmarks + 1 entries + let seq = LandmarkSequence { + max_active_landmarks: max_active_landmarks, + last_landmark: 200, + landmarks: (32..=201).collect(), // 170 landmarks + }; + + assert_eq!(seq.landmarks.len(), 170); + + // Serialize + let bytes = seq.to_bytes().unwrap(); + let content = String::from_utf8(bytes.clone()).unwrap(); + let first_line = content.lines().next().unwrap(); + let parts: Vec<&str> = first_line.split_whitespace().collect(); + let num_active: usize = parts[1].parse().unwrap(); + + // num_active should be 169 + assert_eq!(num_active, 169); + + // from_bytes should accept this (169 <= 169 is true) + let loaded = LandmarkSequence::from_bytes(&bytes, max_active_landmarks) + .expect("Should accept file with num_active == max_active_landmarks"); + + assert_eq!(loaded.landmarks.len(), 170); + } +} diff --git a/crates/ietf_mtc_api/src/lib.rs b/crates/ietf_mtc_api/src/lib.rs new file mode 100644 index 00000000..1528af24 --- /dev/null +++ b/crates/ietf_mtc_api/src/lib.rs @@ -0,0 +1,895 @@ +// Copyright (c) 2025 Cloudflare, Inc. +// Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause + +mod cosigner; +mod landmark; +mod relative_oid; +pub use cosigner::*; +pub use landmark::*; +pub use relative_oid::*; + +use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; +use der::{ + asn1::{BitString, OctetString}, + oid::{db::rfc5280::ID_CE_SUBJECT_ALT_NAME, ObjectIdentifier}, + Any, Decode, Encode, Reader, +}; +use length_prefixed::WriteLengthPrefixedBytesExt; +use serde::{Deserialize, Serialize}; +use serde_with::{ + base64::{Base64, UrlSafe}, + formats::Unpadded, + serde_as, +}; +use sha2::{Digest, Sha256}; +use std::{io::Read, num::ParseIntError}; +use thiserror::Error; +use tlog_tiles::{ + Hash, LeafIndex, LogEntry, PathElem, PendingLogEntry, Proof, Subtree, TlogError, + TlogTilesLogEntry, TlogTilesPendingLogEntry, UnixTimestamp, +}; +use x509_cert::{ + certificate::Version, + ext::{Extension, Extensions}, + name::{Name, RdnSequence}, + request::CertReq, + serial_number::SerialNumber, + spki::{AlgorithmIdentifier, AlgorithmIdentifierOwned, SubjectPublicKeyInfo}, + time::Validity, +}; + +/// OID for Trust Anchor IDs, as specified in draft-ietf-plants-merkle-tree-certs-02. +/// +/// The experimental value `1.3.6.1.4.1.44363.47.1` (Cloudflare's private OID arc) +/// is used until the IANA assignment from the draft is finalized. +pub const ID_RDNA_TRUSTANCHOR_ID: ObjectIdentifier = + ObjectIdentifier::new_unwrap("1.3.6.1.4.1.44363.47.1"); + +/// OID for the MTC proof algorithm, used in the `signature_algorithm` field of +/// landmark certificates, as specified in draft-ietf-plants-merkle-tree-certs-02. +/// +/// The experimental value `1.3.6.1.4.1.44363.47.0` is used until the IANA +/// assignment from the draft is finalized. +pub const ID_ALG_MTCPROOF: ObjectIdentifier = + ObjectIdentifier::new_unwrap("1.3.6.1.4.1.44363.47.0"); + +/// The draft version of the IETF MTC spec that this crate implements. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize, Default)] +#[serde(rename_all = "snake_case")] +pub enum DraftVersion { + #[default] + Draft02, +} + +/// Sequence metadata for IETF MTC log entries. +/// +/// Extends the standard `(LeafIndex, UnixTimestamp)` with the previous and new +/// tree sizes, allowing the frontend to compute the exact subtree signature key +/// for a newly sequenced entry via `Subtree::split_interval(old, new)` without +/// enumerating candidates. +#[derive(Debug, Clone, Copy, Serialize, Deserialize, Default)] +pub struct IetfSequenceMetadata { + /// The index of the sequenced entry in the Merkle tree. + pub leaf_index: LeafIndex, + /// The sequencing timestamp in milliseconds since Unix epoch. + pub timestamp: UnixTimestamp, + /// The tree size immediately before this batch was sequenced. + pub old_tree_size: u64, + /// The tree size immediately after this batch was sequenced. + pub new_tree_size: u64, +} + +// IetfMtcWorker is a new deployment with no existing DO storage, so serde_json +// serialization for the dedup cache ring buffer is fine. +generic_log_worker::impl_json_cache_serialize!(IetfSequenceMetadata); + +// MTCSignature from draft-ietf-plants-merkle-tree-certs §6.1. +struct MtcSignature { + cosigner_id: TrustAnchorID, + signature: Vec, +} + +impl MtcSignature { + fn to_bytes(&self) -> Vec { + let mut buffer = Vec::new(); + buffer + .write_length_prefixed(self.cosigner_id.as_bytes(), 1) + .unwrap(); + buffer.write_length_prefixed(&self.signature, 2).unwrap(); + buffer + } +} + +// MTCProof from draft-ietf-plants-merkle-tree-certs §6.1. +struct MtcProof { + start: u64, + end: u64, + inclusion_proof: Proof, + signatures: Vec, +} + +impl MtcProof { + fn to_bytes(&self) -> Vec { + let mut buffer = Vec::new(); + buffer.write_u64::(self.start).unwrap(); + buffer.write_u64::(self.end).unwrap(); + buffer + .write_length_prefixed( + &self + .inclusion_proof + .iter() + .flat_map(|h| h.0.to_vec()) + .collect::>(), + 2, + ) + .unwrap(); + buffer + .write_length_prefixed( + &self + .signatures + .iter() + .flat_map(MtcSignature::to_bytes) + .collect::>(), + 2, + ) + .unwrap(); + buffer + } +} + +/// Add-entry request for the IETF MTC submission API. +/// +/// The payload is a PKCS#10 Certificate Signing Request (CSR) in DER format, +/// base64url-encoded (no padding), matching the ACME `finalize` endpoint +/// format (RFC 8555 §7.4). The server extracts the subject, SPKI, and SANs +/// from the CSR; the CSR signature is not verified (authentication is handled +/// at the transport layer). +/// +/// The validity window is set server-side: `[now, now + max_certificate_lifetime_secs]`. +/// ACME order `notBefore`/`notAfter` fields are not currently supported. +#[serde_as] +#[derive(Deserialize, Debug)] +pub struct AddEntryRequest { + /// Base64url-encoded (no padding) DER-encoded PKCS#10 CSR. + #[serde_as(as = "Base64")] + pub csr: Vec, +} + +/// Add-entry response. +/// +/// The DER-encoded standalone MTC certificate (§6.2) encodes all relevant +/// fields: the entry index is the certificate serial number, validity is in +/// the `TBSCertificate`, and the inclusion proof and cosignature are in the +/// `signatureValue`. +#[serde_as] +#[derive(Serialize)] +pub struct AddEntryResponse { + /// DER-encoded standalone MTC certificate (§6.2), base64-encoded. + #[serde_as(as = "Base64")] + pub certificate: Vec, +} + +/// A pending IETF MTC log entry. Unlike the bootstrap variant, there is no +/// auxiliary tile — the entry is purely the `MerkleTreeCertEntry` data. +#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)] +pub struct IetfMtcPendingLogEntry { + /// An encoded `MerkleTreeCertEntry` wrapped in a generic `TlogTilesPendingLogEntry`. + pub entry: TlogTilesPendingLogEntry, +} + +impl PendingLogEntry for IetfMtcPendingLogEntry { + /// Uses the standard tlog-tiles data tile path. + const DATA_TILE_PATH: PathElem = TlogTilesPendingLogEntry::DATA_TILE_PATH; + + /// No auxiliary tile. + const AUX_TILE_PATH: Option = None; + + /// Unused in ietf-mtc-api. + fn aux_entry(&self) -> &[u8] { + unimplemented!() + } + + fn lookup_key(&self) -> tlog_tiles::LookupKey { + self.entry.lookup_key() + } +} + +/// A sequenced IETF MTC log entry. +#[derive(Debug, Clone, PartialEq)] +pub struct IetfMtcLogEntry(TlogTilesLogEntry); + +impl LogEntry for IetfMtcLogEntry { + const REQUIRE_CHECKPOINT_TIMESTAMP: bool = false; + type Pending = IetfMtcPendingLogEntry; + type ParseError = MtcError; + type Metadata = IetfSequenceMetadata; + + fn make_metadata( + leaf_index: LeafIndex, + timestamp: UnixTimestamp, + old_tree_size: u64, + new_tree_size: u64, + ) -> Self::Metadata { + IetfSequenceMetadata { + leaf_index, + timestamp, + old_tree_size, + new_tree_size, + } + } + + fn initial_entry() -> Option { + Some(Self::Pending { + entry: TlogTilesPendingLogEntry { + data: MerkleTreeCertEntry::NullEntry.encode().unwrap(), + }, + }) + } + + fn new(pending: Self::Pending, metadata: Self::Metadata) -> Self { + // Convert IetfSequenceMetadata to SequenceMetadata for TlogTilesLogEntry + // (which only uses leaf_index and timestamp internally). + Self(TlogTilesLogEntry::new( + pending.entry, + (metadata.leaf_index, metadata.timestamp), + )) + } + + fn merkle_tree_leaf(&self) -> Hash { + self.0.merkle_tree_leaf() + } + + fn to_data_tile_entry(&self) -> Vec { + self.0.to_data_tile_entry() + } + + fn parse_from_tile_entry(input: &mut R) -> Result { + Ok(Self(TlogTilesLogEntry::parse_from_tile_entry(input)?)) + } +} + +/// Construct an `IetfMtcPendingLogEntry` from an `AddEntryRequest`. +/// +/// Parses the DER-encoded PKCS#10 CSR in `req.csr`, extracting the subject, +/// `SubjectPublicKeyInfo`, and any `subjectAltName` extension request +/// attribute. The CSR signature is not verified. +/// +/// # Errors +/// +/// Returns an error if the CSR cannot be parsed, contains unsupported fields, +/// or the resulting entry cannot be encoded. +pub fn build_pending_entry( + req: &AddEntryRequest, + issuer: &RdnSequence, + validity: Validity, +) -> Result { + let csr = + CertReq::from_der(&req.csr).map_err(|e| MtcError::Dynamic(format!("invalid CSR: {e}")))?; + + let subject = csr.info.subject; + let spki_der = csr.info.public_key.to_der()?; + let spki_hash = OctetString::new(&Sha256::digest(&spki_der)[..])?; + + // Extract the AlgorithmIdentifier from the SPKI (new field in plants-02). + let spki_algorithm = csr.info.public_key.algorithm; + + // Extract SubjectAltName from the CSR's extensionRequest attribute (RFC 2985 §5.4.2). + let extensions = extract_san_from_csr(&csr.info.attributes)?; + + // Convert RdnSequence → Name via DER round-trip (Name is a newtype over RdnSequence + // in x509-cert 0.3; its inner field is pub(crate) so direct construction isn't possible). + let issuer = Name::from_der(&issuer.to_der()?)?; + + let log_entry = TbsCertificateLogEntry { + version: Version::V3, + issuer, + validity, + subject, + subject_public_key_info_algorithm: spki_algorithm, + subject_public_key_info_hash: spki_hash, + issuer_unique_id: None, + subject_unique_id: None, + extensions, + }; + + Ok(IetfMtcPendingLogEntry { + entry: TlogTilesPendingLogEntry { + data: MerkleTreeCertEntry::TbsCertEntry(log_entry).encode()?, + }, + }) +} + +/// Extract a `SubjectAltName` extension from a CSR's `extensionRequest` attribute. +/// +/// Returns `None` if no `extensionRequest` attribute is present or if it +/// contains no `subjectAltName` extension. Returns an error if the attribute +/// is malformed. +fn extract_san_from_csr( + attributes: &x509_cert::attr::Attributes, +) -> Result, MtcError> { + // OID for the PKCS#9 extensionRequest attribute (RFC 2985 §5.4.2 / RFC 5912). + const ID_EXTENSION_REQ: der::asn1::ObjectIdentifier = + der::asn1::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.14"); + + for attr in attributes.iter() { + if attr.oid != ID_EXTENSION_REQ { + continue; + } + // The extensionRequest attribute value is a SET containing a single + // SEQUENCE OF Extension (i.e. the Extensions type). + for val in attr.values.iter() { + let exts = Extensions::from_der(&val.to_der()?)?; + let san_exts: Vec = exts + .into_iter() + .filter(|e| e.extn_id == ID_CE_SUBJECT_ALT_NAME) + .collect(); + if !san_exts.is_empty() { + return Ok(Some(Extensions::from(san_exts))); + } + } + } + Ok(None) +} + +/// R2 key prefix for cached subtree signatures. +/// +/// Each key has the form `{SUBTREE_SIG_KEY_PREFIX}/{lo}-{hi}` where `lo` and +/// `hi` are the zero-padded decimal endpoints of the signed subtree interval. +/// Zero-padding ensures lexicographic ordering matches numeric ordering. +pub const SUBTREE_SIG_KEY_PREFIX: &str = "subtree-sig"; + +/// Format a subtree signature R2 key for the interval `[lo, hi)`. +#[must_use] +pub fn subtree_sig_key(lo: u64, hi: u64) -> String { + format!("{SUBTREE_SIG_KEY_PREFIX}/{lo:020}-{hi:020}") +} + +/// A subtree cosignature cached in R2 by the sequencer. +#[serde_as] +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct SignedSubtree { + /// The start (inclusive) of the subtree interval. + pub lo: u64, + /// The end (exclusive) of the subtree interval. + pub hi: u64, + /// SHA-256 Merkle hash of the subtree root. + #[serde_as(as = "Base64")] + pub hash: [u8; 32], + /// SHA-256 hash of the full checkpoint tree at the time of signing. + /// Required to fetch the correct hash tiles when computing inclusion proofs. + #[serde_as(as = "Base64")] + pub checkpoint_hash: [u8; 32], + /// Tree size of the full checkpoint at the time of signing. + pub checkpoint_size: u64, + /// Raw cosignature bytes from `MtcCosigner::sign_subtree`. + #[serde_as(as = "Base64")] + pub signature: Vec, + /// `TrustAnchorID` of the cosigner that produced `signature`. + pub cosigner_id: String, +} + +impl SignedSubtree { + /// Return the subtree interval as a `Subtree`. + /// + /// # Errors + /// + /// Returns an error if `(lo, hi)` is not a valid subtree interval. + pub fn as_subtree(&self) -> Result { + Subtree::new(self.lo, self.hi).map_err(|e| MtcError::Dynamic(e.to_string())) + } + + /// Return true if `leaf_index` falls within `[lo, hi)`. + #[must_use] + pub fn contains(&self, leaf_index: LeafIndex) -> bool { + self.lo <= leaf_index && leaf_index < self.hi + } +} + +/// Build `TBSCertificate` DER field-by-field. +/// +/// `x509-cert` 0.3 makes all `TbsCertificateInner` fields private, so we cannot +/// use struct-literal construction from external crates. +fn encode_tbs_certificate_der( + entry: &TbsCertificateLogEntry, + leaf_index: LeafIndex, + signature_algorithm: &AlgorithmIdentifier, + spki: &SubjectPublicKeyInfo, +) -> Result, MtcError> { + use der::{ + asn1::{ContextSpecific, ContextSpecificRef}, + Encode, TagMode, TagNumber, + }; + + let mut tbs_content = Vec::new(); + if entry.version != x509_cert::certificate::Version::V1 { + ContextSpecific { + tag_number: TagNumber(0), + tag_mode: TagMode::Explicit, + value: entry.version, + } + .encode_to_vec(&mut tbs_content)?; + } + SerialNumber::::new(&leaf_index.to_be_bytes())? + .encode_to_vec(&mut tbs_content)?; + signature_algorithm.encode_to_vec(&mut tbs_content)?; + entry.issuer.encode_to_vec(&mut tbs_content)?; + entry.validity.encode_to_vec(&mut tbs_content)?; + entry.subject.encode_to_vec(&mut tbs_content)?; + spki.encode_to_vec(&mut tbs_content)?; + if let Some(uid) = &entry.issuer_unique_id { + ContextSpecificRef { + tag_number: TagNumber(1), + tag_mode: TagMode::Implicit, + value: uid, + } + .encode_to_vec(&mut tbs_content)?; + } + if let Some(uid) = &entry.subject_unique_id { + ContextSpecificRef { + tag_number: TagNumber(2), + tag_mode: TagMode::Implicit, + value: uid, + } + .encode_to_vec(&mut tbs_content)?; + } + if let Some(exts) = &entry.extensions { + let mut exts_items = Vec::new(); + for ext in exts { + exts_items.extend(ext.to_der()?); + } + let mut exts_seq = Vec::new(); + der::Header::new(der::Tag::Sequence, der::Length::try_from(exts_items.len())?) + .encode_to_vec(&mut exts_seq)?; + exts_seq.extend(exts_items); + let exts_any = der::asn1::Any::from_der(&exts_seq)?; + ContextSpecific { + tag_number: TagNumber(3), + tag_mode: TagMode::Explicit, + value: exts_any, + } + .encode_to_vec(&mut tbs_content)?; + } + let mut tbs_der = Vec::new(); + der::Header::new( + der::Tag::Sequence, + der::Length::try_from(tbs_content.len())?, + ) + .encode_to_vec(&mut tbs_der)?; + tbs_der.extend(tbs_content); + Ok(tbs_der) +} + +/// Serialize a DER-encoded MTC certificate (draft-ietf-plants-merkle-tree-certs §6.1). +/// +/// Pass an empty `cosignatures` slice for a landmark-relative certificate (§6.3) +/// or a non-empty slice for a standalone certificate (§6.2). +/// +/// # Errors +/// +/// Returns an error if the SPKI hash does not match the entry, or if there +/// are any serialization errors. +pub fn serialize_mtc_cert( + log_entry: &IetfMtcLogEntry, + leaf_index: LeafIndex, + spki_der: &[u8], + subtree: &Subtree, + inclusion_proof: Proof, + cosignatures: &[(TrustAnchorID, Vec)], +) -> Result, MtcError> { + let entry = match MerkleTreeCertEntry::decode(&log_entry.0.inner.data)? { + MerkleTreeCertEntry::TbsCertEntry(entry) => entry, + MerkleTreeCertEntry::NullEntry => { + return Err(MtcError::Dynamic("no tbs cert entry for null entry".into())) + } + }; + let spki: SubjectPublicKeyInfo = SubjectPublicKeyInfo::from_der(spki_der)?; + let spki_hash = OctetString::new(&Sha256::digest(spki_der)[..])?; + if spki_hash != entry.subject_public_key_info_hash { + return Err(MtcError::Dynamic("spki hash mismatch".to_string())); + } + + let signature_algorithm: AlgorithmIdentifier = AlgorithmIdentifier { + oid: ID_ALG_MTCPROOF, + parameters: None, + }; + let tbs_der = encode_tbs_certificate_der(&entry, leaf_index, &signature_algorithm, &spki)?; + + // Build Certificate DER: SEQUENCE { tbs_der, signature_algorithm, signature }. + let signatures = cosignatures + .iter() + .map(|(cosigner_id, sig)| MtcSignature { + cosigner_id: cosigner_id.clone(), + signature: sig.clone(), + }) + .collect(); + let sig_bytes = MtcProof { + start: subtree.lo(), + end: subtree.hi(), + inclusion_proof, + signatures, + } + .to_bytes(); + let sig_bitstring = BitString::from_bytes(&sig_bytes)?; + let mut cert_content = Vec::new(); + cert_content.extend(&tbs_der); + signature_algorithm.encode_to_vec(&mut cert_content)?; + sig_bitstring.encode_to_vec(&mut cert_content)?; + let mut cert_der = Vec::new(); + der::Header::new( + der::Tag::Sequence, + der::Length::try_from(cert_content.len())?, + ) + .encode_to_vec(&mut cert_der)?; + cert_der.extend(cert_content); + Ok(cert_der) +} + +#[derive(Debug, Error)] +pub enum MtcError { + #[error(transparent)] + Tlog(#[from] TlogError), + #[error(transparent)] + Der(#[from] der::Error), + #[error(transparent)] + IO(#[from] std::io::Error), + #[error(transparent)] + Fmt(#[from] std::fmt::Error), + #[error(transparent)] + Utf8(#[from] std::str::Utf8Error), + #[error(transparent)] + ParseInt(#[from] ParseIntError), + #[error("mtc: {0}")] + Dynamic(String), +} + +#[repr(u16)] +pub enum MerkleTreeCertEntryType { + NullEntry = 0x0000, + TbsCertEntry = 0x0001, +} + +impl TryFrom for MerkleTreeCertEntryType { + type Error = MtcError; + + fn try_from(value: u16) -> Result { + match value { + 0x0000 => Ok(MerkleTreeCertEntryType::NullEntry), + 0x0001 => Ok(MerkleTreeCertEntryType::TbsCertEntry), + _ => Err(MtcError::Dynamic("unknown entry type".into())), + } + } +} + +/// A `MerkleTreeCertEntry` as defined in draft-ietf-plants-merkle-tree-certs §5.3. +/// +/// The `NullEntry` type is used as the first element in the tree so that the +/// serial number for each subsequent `TbsCertEntry` corresponds to its index. +#[allow(clippy::large_enum_variant)] +#[derive(PartialEq, Debug)] +pub enum MerkleTreeCertEntry { + NullEntry, + TbsCertEntry(TbsCertificateLogEntry), +} + +impl MerkleTreeCertEntry { + /// Encode entry to bytes. + /// + /// # Errors + /// + /// Will return an error if there are issues encoding the entry. + pub fn encode(&self) -> Result, MtcError> { + match &self { + Self::NullEntry => Ok((MerkleTreeCertEntryType::NullEntry as u16) + .to_be_bytes() + .to_vec()), + Self::TbsCertEntry(tbs_cert_entry) => { + // plants-02: fields are written directly after the u16 type tag, + // without an outer ASN.1 SEQUENCE wrapper (dropped in davidben-10). + let mut out = (MerkleTreeCertEntryType::TbsCertEntry as u16) + .to_be_bytes() + .to_vec(); + out.extend(tbs_cert_entry.encode_fields()?); + Ok(out) + } + } + } + + /// Decode entry from bytes. + /// + /// # Errors + /// + /// Returns an error if the entry cannot be decoded. + pub fn decode(mut data: &[u8]) -> Result { + match MerkleTreeCertEntryType::try_from(data.read_u16::()?)? { + MerkleTreeCertEntryType::NullEntry => { + if data.is_empty() { + Ok(Self::NullEntry) + } else { + Err(MtcError::Dynamic( + "data for null entry must be empty".into(), + )) + } + } + MerkleTreeCertEntryType::TbsCertEntry => { + // plants-02: the remaining bytes are raw field DER (no SEQUENCE wrapper). + let tbs_cert_entry = TbsCertificateLogEntry::decode_fields(data)?; + Ok(Self::TbsCertEntry(tbs_cert_entry)) + } + } + } +} + +/// A `TBSCertificateLogEntry` as defined in draft-ietf-plants-merkle-tree-certs §5.3 +/// (plants-02). +/// +/// Differs from a standard `TBSCertificate` in that `subject_public_key_info` +/// is replaced by two separate fields: +/// - `subject_public_key_info_algorithm`: the `AlgorithmIdentifier` from the SPKI +/// (new in plants-02; not present in davidben-09) +/// - `subject_public_key_info_hash`: SHA-256 of the full DER-encoded SPKI +/// +/// Unlike in davidben-09, the entry is **not** wrapped in an ASN.1 SEQUENCE — +/// the fields are encoded as raw concatenated DER values (the SEQUENCE wrapper +/// was dropped in davidben-10). For this reason we implement `Encode`/`Decode` +/// manually rather than using `#[derive(Sequence)]`. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct TbsCertificateLogEntry { + pub version: Version, + pub issuer: Name, + pub validity: Validity, + pub subject: Name, + /// The `AlgorithmIdentifier` from the submitted SPKI (new in plants-02). + pub subject_public_key_info_algorithm: AlgorithmIdentifierOwned, + /// SHA-256 of the full DER-encoded `SubjectPublicKeyInfo`. + pub subject_public_key_info_hash: OctetString, + pub issuer_unique_id: Option, + pub subject_unique_id: Option, + pub extensions: Option, +} + +impl TbsCertificateLogEntry { + /// Encode all fields as raw concatenated DER (no outer SEQUENCE wrapper). + /// + /// # Errors + /// + /// Returns a `der::Error` if any field cannot be encoded. + pub fn encode_fields(&self) -> der::Result> { + // Manually encode each field using der::Encode and collect into a Vec. + // This is equivalent to the content bytes of a SEQUENCE, but without + // the SEQUENCE tag and length prefix. + let mut buf = Vec::new(); + + // version [0] EXPLICIT INTEGER DEFAULT 0 — omit if V1 (default) + if self.version != Version::V1 { + use der::asn1::ContextSpecific; + use der::TagMode; + let tagged = ContextSpecific:: { + tag_number: der::TagNumber(0), + tag_mode: TagMode::Explicit, + value: self.version, + }; + tagged.encode_to_vec(&mut buf)?; + } + + self.issuer.encode_to_vec(&mut buf)?; + self.validity.encode_to_vec(&mut buf)?; + self.subject.encode_to_vec(&mut buf)?; + self.subject_public_key_info_algorithm + .encode_to_vec(&mut buf)?; + self.subject_public_key_info_hash.encode_to_vec(&mut buf)?; + + // issuerUniqueID [1] IMPLICIT BIT STRING OPTIONAL + if let Some(ref v) = self.issuer_unique_id { + use der::asn1::ContextSpecificRef; + use der::TagMode; + ContextSpecificRef:: { + tag_number: der::TagNumber(1), + tag_mode: TagMode::Implicit, + value: v, + } + .encode_to_vec(&mut buf)?; + } + + // subjectUniqueID [2] IMPLICIT BIT STRING OPTIONAL + if let Some(ref v) = self.subject_unique_id { + use der::asn1::ContextSpecificRef; + use der::TagMode; + ContextSpecificRef:: { + tag_number: der::TagNumber(2), + tag_mode: TagMode::Implicit, + value: v, + } + .encode_to_vec(&mut buf)?; + } + + // extensions [3] EXPLICIT Extensions OPTIONAL + if let Some(ref exts) = self.extensions { + use der::asn1::ContextSpecific; + use der::TagMode; + let tagged = ContextSpecific:: { + tag_number: der::TagNumber(3), + tag_mode: TagMode::Explicit, + value: exts.clone(), + }; + tagged.encode_to_vec(&mut buf)?; + } + + Ok(buf) + } + + /// Decode all fields from raw concatenated DER (no outer SEQUENCE wrapper). + /// + /// # Errors + /// + /// Returns a `MtcError` if the data is malformed. + pub fn decode_fields(data: &[u8]) -> Result { + use der::{asn1::ContextSpecific, SliceReader, TagNumber}; + + let mut reader = SliceReader::new(data)?; + + // version [0] EXPLICIT INTEGER DEFAULT V1 + let version = if der::Tag::peek(&reader).ok().is_some_and(|t| { + t == der::Tag::ContextSpecific { + constructed: true, + number: TagNumber(0), + } + }) { + let cs = ContextSpecific::::decode(&mut reader)?; + cs.value + } else { + Version::V1 + }; + + let issuer = Name::decode(&mut reader)?; + let validity = Validity::decode(&mut reader)?; + let subject = Name::decode(&mut reader)?; + let subject_public_key_info_algorithm = AlgorithmIdentifierOwned::decode(&mut reader)?; + let subject_public_key_info_hash = OctetString::decode(&mut reader)?; + + // issuerUniqueID [1] IMPLICIT BIT STRING OPTIONAL + let issuer_unique_id = + ContextSpecific::::decode_implicit(&mut reader, TagNumber(1))? + .map(|cs| cs.value); + + // subjectUniqueID [2] IMPLICIT BIT STRING OPTIONAL + let subject_unique_id = + ContextSpecific::::decode_implicit(&mut reader, TagNumber(2))? + .map(|cs| cs.value); + + // extensions [3] EXPLICIT Extensions OPTIONAL + let extensions = if der::Tag::peek(&reader).ok().is_some_and(|t| { + t == der::Tag::ContextSpecific { + constructed: true, + number: TagNumber(3), + } + }) { + let cs = ContextSpecific::::decode(&mut reader)?; + Some(cs.value) + } else { + None + }; + + reader.finish().map_err(MtcError::from)?; + Ok(Self { + version, + issuer, + validity, + subject, + subject_public_key_info_algorithm, + subject_public_key_info_hash, + issuer_unique_id, + subject_unique_id, + extensions, + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use base64::prelude::*; + use der::asn1::UtcTime; + use std::time::Duration; + use x509_cert::{ + ext::pkix::{name::GeneralName, SubjectAltName}, + time::Time, + }; + + fn dummy_validity() -> Validity { + Validity::new( + Time::UtcTime(UtcTime::from_unix_duration(Duration::from_secs(1_700_000_000)).unwrap()), + Time::UtcTime(UtcTime::from_unix_duration(Duration::from_secs(1_700_086_400)).unwrap()), + ) + } + + // Pre-built P-256 DER CSRs (base64url, no padding). + // Subject: CN=test.example.com,O=Test,C=US + // Generated with: openssl req -new -key -subj "..." [-addext "subjectAltName=..."] + const CSR_NO_SAN_B64URL: &str = + "MIHyMIGZAgEAMDcxGTAXBgNVBAMMEHRlc3QuZXhhbXBsZS5jb20xDTALBgNVBAoM\ + BFRlc3QxCzAJBgNVBAYTAlVTMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEs6dP\ + v4lKY7RVXxTGVLkj8lK3H1bgpSrAYXXg-b-aYb_KFMcYrbcW8ytv0hFnDXWVUTgo\ + Dyp4pbkBhgXieKD0MKAAMAoGCCqGSM49BAMCA0gAMEUCIQD9BWGDjR6Ul8jYQuyC\ + 1Xw1Ydt0Z9TbFsDsS9d8NiHgigIgXDq9F4hRBvdwYvnRxP7jqW6ae_bamy1BOdzn\ + 15F90uE"; + const CSR_WITH_SANS_B64URL: &str = + "MIIBLDCB0wIBADA3MRkwFwYDVQQDDBB0ZXN0LmV4YW1wbGUuY29tMQ0wCwYDVQQK\ + DARUZXN0MQswCQYDVQQGEwJVUzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABLOn\ + T7-JSmO0VV8UxlS5I_JStx9W4KUqwGF14Pm_mmG_yhTHGK23FvMrb9IRZw11lVE4\ + KA8qeKW5AYYF4nig9DCgOjA4BgkqhkiG9w0BCQ4xKzApMCcGA1UdEQQgMB6CC2V4\ + YW1wbGUuY29tgg93d3cuZXhhbXBsZS5jb20wCgYIKoZIzj0EAwIDSAAwRQIgNoiV\ + IX6MeFGZgPSHjy0SY40txuhSOrGkat6KteN5v1oCIQCwKyv4B7cTXcCnligVQ-IY\ + 6nyTYJJ0sDmRpgD03Ejqhg"; + + fn decode_csr(b64url: &str) -> Vec { + BASE64_URL_SAFE_NO_PAD + .decode(b64url.replace(['\n', ' '], "").as_bytes()) + .unwrap() + } + + #[test] + fn test_encode_null_entry() { + let null_entry = MerkleTreeCertEntry::NullEntry; + assert_eq!( + null_entry, + MerkleTreeCertEntry::decode(&null_entry.encode().unwrap()).unwrap() + ); + } + + #[test] + fn test_build_pending_entry_with_sans() { + let req = AddEntryRequest { + csr: decode_csr(CSR_WITH_SANS_B64URL), + }; + + let entry = build_pending_entry(&req, &RdnSequence::default(), dummy_validity()).unwrap(); + let decoded = MerkleTreeCertEntry::decode(&entry.entry.data).unwrap(); + + let MerkleTreeCertEntry::TbsCertEntry(tbs) = decoded else { + panic!("expected TbsCertEntry"); + }; + + let exts = tbs.extensions.unwrap(); + assert_eq!(exts.len(), 1); + assert_eq!(exts[0].extn_id, ID_CE_SUBJECT_ALT_NAME); + + let san = SubjectAltName::from_der(exts[0].extn_value.as_bytes()).unwrap(); + assert_eq!(san.0.len(), 2); + assert!(matches!(&san.0[0], GeneralName::DnsName(n) if n.as_str() == "example.com")); + assert!(matches!(&san.0[1], GeneralName::DnsName(n) if n.as_str() == "www.example.com")); + } + + #[test] + fn test_build_pending_entry_no_sans() { + let req = AddEntryRequest { + csr: decode_csr(CSR_NO_SAN_B64URL), + }; + + let entry = build_pending_entry(&req, &RdnSequence::default(), dummy_validity()).unwrap(); + let decoded = MerkleTreeCertEntry::decode(&entry.entry.data).unwrap(); + let MerkleTreeCertEntry::TbsCertEntry(tbs) = decoded else { + panic!("expected TbsCertEntry"); + }; + assert!(tbs.extensions.is_none()); + } + + #[test] + fn test_build_pending_entry_invalid_csr() { + let req = AddEntryRequest { + csr: b"not a valid csr".to_vec(), + }; + assert!(build_pending_entry(&req, &RdnSequence::default(), dummy_validity()).is_err()); + } + + #[test] + fn test_add_entry_request_serde() { + let csr_bytes = decode_csr(CSR_NO_SAN_B64URL); + let b64url = BASE64_URL_SAFE_NO_PAD.encode(&csr_bytes); + + // Without optional fields. + let json = format!(r#"{{"csr": "{b64url}"}}"#); + let req: AddEntryRequest = serde_json::from_str(&json).unwrap(); + assert_eq!(req.csr, csr_bytes); + assert_eq!(req.csr, csr_bytes); + } +} diff --git a/crates/ietf_mtc_api/src/relative_oid.rs b/crates/ietf_mtc_api/src/relative_oid.rs new file mode 100644 index 00000000..b3b6689f --- /dev/null +++ b/crates/ietf_mtc_api/src/relative_oid.rs @@ -0,0 +1,149 @@ +use crate::MtcError; +use std::str::FromStr; + +/// ASN.1 `RELATIVE OID`. +/// +/// TODO upstream this to the `der` crate. +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct RelativeOid { + ber: Vec, + arcs: Vec, +} + +impl RelativeOid { + fn from_arcs(arcs: &[u32]) -> Result { + let mut ber = Vec::new(); + for arc in arcs { + for j in (0..=4).rev() { + #[allow(clippy::cast_possible_truncation)] + let cur = (arc >> (j * 7)) as u8; + + if cur != 0 || j == 0 { + let mut to_write = cur & 0x7f; // lower 7 bits + + if j != 0 { + to_write |= 0x80; + } + ber.push(to_write); + } + } + } + if ber.len() > 255 { + return Err(MtcError::Dynamic("invalid relative OID".into())); + } + Ok(Self { + ber, + arcs: arcs.to_vec(), + }) + } + + /// Returns the DER-encoded content bytes. + #[must_use] + pub fn as_bytes(&self) -> &[u8] { + &self.ber + } + + /// Decode a `RelativeOid` from its BER-encoded bytes. + /// + /// # Errors + /// + /// Returns an error if the bytes are not valid BER for a relative OID. + pub fn from_ber_bytes(ber: &[u8]) -> Result { + let mut arcs = Vec::new(); + let mut i = 0; + while i < ber.len() { + let mut arc: u32 = 0; + loop { + let b = *ber + .get(i) + .ok_or_else(|| MtcError::Dynamic("truncated OID arc".into()))?; + i += 1; + arc = arc + .checked_shl(7) + .ok_or_else(|| MtcError::Dynamic("OID arc overflow".into()))? + | u32::from(b & 0x7f); + if b & 0x80 == 0 { + break; + } + } + arcs.push(arc); + } + Ok(Self { + ber: ber.to_vec(), + arcs, + }) + } +} + +impl std::fmt::Display for RelativeOid { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + for arc in self.arcs.iter().take(self.arcs.len() - 1) { + write!(f, "{arc}.")?; + } + write!(f, "{}", self.arcs[self.arcs.len() - 1]) + } +} + +impl FromStr for RelativeOid { + type Err = MtcError; + /// Parse the [`RelativeOid`] from a decimal-dotted string. + fn from_str(s: &str) -> Result { + let parts = s.split('.'); + let mut arcs = Vec::new(); + for part in parts { + let i = part.parse::()?; + arcs.push(i); + } + Self::from_arcs(&arcs) + } +} + +#[cfg(test)] +mod tests { + + use der::{Any, Encode, Tag}; + + use super::*; + + #[test] + fn encode_tagged() { + let relative_oid = RelativeOid::from_str("13335.2").unwrap(); + let any = Any::new(Tag::RelativeOid, relative_oid.as_bytes()).unwrap(); + assert_eq!(any.to_der().unwrap(), b"\x0d\x03\xe8\x17\x02"); + } + + #[test] + fn encode_string() { + let relative_oid = RelativeOid::from_str("13335.2").unwrap(); + assert_eq!(relative_oid.to_string(), "13335.2"); + } + + #[test] + fn decode_string_encode_bytes() { + struct TestCase { + s: &'static str, + b: &'static [u8], + } + for TestCase { s, b } in [ + TestCase { + s: "237", + b: &[129, 109], + }, + TestCase { + s: "1.2.3.4", + b: &[1, 2, 3, 4], + }, + TestCase { + s: "13335.2", + b: &[232, 23, 2], + }, + TestCase { + s: "44363.48.10", + b: &[130, 218, 75, 48, 10], + }, + ] { + let relative_oid = RelativeOid::from_str(s).unwrap(); + assert_eq!(relative_oid.as_bytes(), b); + } + } +} diff --git a/crates/ietf_mtc_worker/.cargo/config.toml b/crates/ietf_mtc_worker/.cargo/config.toml new file mode 100644 index 00000000..1d512562 --- /dev/null +++ b/crates/ietf_mtc_worker/.cargo/config.toml @@ -0,0 +1,6 @@ +[target.wasm32-unknown-unknown] +# ML-DSA key expansion (KeyGen::from_seed) requires more stack than the default 1MB. +# 4MB is sufficient and has no impact on bundle size or production deployment limits +# (the stack is carved from WASM linear memory at runtime, not stored in the binary). +# See: https://github.com/RustCrypto/signatures/issues/1024 +rustflags = ["-C", "link-args=-z stack-size=4194304"] diff --git a/crates/ietf_mtc_worker/.dev.vars b/crates/ietf_mtc_worker/.dev.vars new file mode 100644 index 00000000..da398dea --- /dev/null +++ b/crates/ietf_mtc_worker/.dev.vars @@ -0,0 +1,6 @@ +# NIST ACVP ML-DSA-44 keyGen test vectors (FIPS 204) +# Source: https://github.com/usnistgov/ACVP-Server/blob/master/gen-val/json-files/ML-DSA-keyGen-FIPS204/internalProjection.json +# tcId=1, seed=D71361C000F9A7BC99DFB425BCB6BB27C32C36AB444FF3708B2D93B4E66D5B5B +SIGNING_KEY_dev1="-----BEGIN PRIVATE KEY-----\nMDQCAQAwCwYJYIZIAWUDBAMRBCKAINcTYcAA+ae8md+0Jby2uyfDLDarRE/zcIst\nk7TmbVtb\n-----END PRIVATE KEY-----\n" +# tcId=2, seed=AB611F971C44D1B755D289E0FCFEE70F0EB5D9FDFB1BC31CA894A75794235AF8 +SIGNING_KEY_dev2="-----BEGIN PRIVATE KEY-----\nMDQCAQAwCwYJYIZIAWUDBAMRBCKAIKthH5ccRNG3VdKJ4Pz+5w8Otdn9+xvDHKiU\np1eUI1r4\n-----END PRIVATE KEY-----\n" diff --git a/crates/ietf_mtc_worker/.gitignore b/crates/ietf_mtc_worker/.gitignore new file mode 100644 index 00000000..2b3d58cf --- /dev/null +++ b/crates/ietf_mtc_worker/.gitignore @@ -0,0 +1,3 @@ +package-lock.json +package.json +dist/ diff --git a/crates/ietf_mtc_worker/Cargo.toml b/crates/ietf_mtc_worker/Cargo.toml new file mode 100644 index 00000000..bf4987f0 --- /dev/null +++ b/crates/ietf_mtc_worker/Cargo.toml @@ -0,0 +1,70 @@ +[package] +name = "ietf_mtc_worker" +publish = false +version.workspace = true +authors.workspace = true +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "An implementation of IETF Merkle Tree Certificates on Cloudflare Workers" +categories = ["cryptography"] +keywords = ["certificate", "transparency", "crypto", "pki"] + +[package.metadata.release] +release = false + +# https://github.com/rustwasm/wasm-pack/issues/1351 +[package.metadata.wasm-pack.profile.dev.wasm-bindgen] +dwarf-debug-info = true + +[lib] +crate-type = ["cdylib"] + +[build-dependencies] +chrono.workspace = true +config = { path = "./config", package = "ietf_mtc_worker_config" } +generic_log_worker.workspace = true +jsonschema.workspace = true +ietf_mtc_api.workspace = true +serde_json.workspace = true +serde.workspace = true +url.workspace = true +x509-cert.workspace = true +der.workspace = true + +[dev-dependencies] +rand.workspace = true +itertools.workspace = true +parking_lot.workspace = true +futures-executor.workspace = true + +[dependencies] +base64ct.workspace = true +chrono.workspace = true +config = { path = "./config", package = "ietf_mtc_worker_config" } +der.workspace = true +generic_log_worker.workspace = true +ed25519-dalek.workspace = true +pkcs8.workspace = true +getrandom.workspace = true +log.workspace = true +p256.workspace = true +serde.workspace = true +serde_json.workspace = true +serde_with.workspace = true +signed_note.workspace = true +tlog_tiles.workspace = true +tokio.workspace = true +worker.workspace = true +x509-cert.workspace = true +ietf_mtc_api.workspace = true +ml-dsa.workspace = true + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = [ + 'cfg(wasm_bindgen_unstable_test_coverage)', +] } + +[package.metadata.cargo-machete] +ignored = ["getrandom"] diff --git a/crates/ietf_mtc_worker/LICENSE b/crates/ietf_mtc_worker/LICENSE new file mode 100644 index 00000000..df1cff30 --- /dev/null +++ b/crates/ietf_mtc_worker/LICENSE @@ -0,0 +1,27 @@ +Copyright 2025 Cloudflare, Inc. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +- Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +- Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. +- Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/crates/ietf_mtc_worker/README.md b/crates/ietf_mtc_worker/README.md new file mode 100644 index 00000000..dc06a59b --- /dev/null +++ b/crates/ietf_mtc_worker/README.md @@ -0,0 +1,52 @@ +# IETF Merkle Tree CA Worker + +A Rust implementation of an [IETF Merkle Tree Certificate CA](https://github.com/ietf-plants-wg/merkle-tree-certs/) for deployment on [Cloudflare Workers](https://workers.cloudflare.com/). + +This worker implements [draft-ietf-plants-merkle-tree-certs-02](https://datatracker.ietf.org/doc/draft-ietf-plants-merkle-tree-certs/). For the older bootstrap experiment, see [`bootstrap_mtc_worker`](../bootstrap_mtc_worker/README.md). + +The internal log architecture (Sequencer, Batcher, Cleaner Durable Objects, tiled R2 storage) is shared with the [Static CT Log](../ct_worker/README.md). + +## How it works + +Subscribers submit a PKCS#10 CSR (base64url-encoded, no padding) to the `add-entry` endpoint, matching the ACME `finalize` format (RFC 8555 §7.4). The CA extracts the subject, SPKI, and SANs from the CSR and logs them as a `TBSCertificateLogEntry`. The validity window is set server-side to `[now, now + max_certificate_lifetime_secs]`. + +Once a landmark interval elapses, the sequencer produces a landmark subtree and the CA can issue **landmark-relative MTC certificates** — DER-encoded X.509 structures whose `signatureValue` encodes a Merkle inclusion proof into the landmark subtree rather than a traditional signature. + +## Known limitations + +- Standalone certificates (with cosignatures in the `signatures` field) are not yet implemented. +- ML-DSA signing is not yet implemented. +- The subtree signing oracle (for external cosigners) is not yet implemented. +- ACME order `notBefore`/`notAfter` fields are not currently supported. + +## Development + +Requires `node` and `npm`. + +```bash +# Run locally +npx wrangler -e=dev dev + +# Reset local state between runs +./reset-dev.sh +``` + +### Integration tests + +```bash +BASE_URL=http://localhost:8787 IETF_MTC_LOG_NAME=dev2 cargo test -p integration_tests --test ietf_mtc_api +``` + +## Deployment + +See the [`ct_worker` documentation](../ct_worker/README.md#deployment-to-a-custom-domain) for deployment to a custom domain. + +The production environment is `prod` (maps to `config.prod.json`): + +```bash +npx wrangler -e=prod deploy +``` + +## License + +The project is licensed under the [BSD-3-Clause License](./LICENSE). diff --git a/crates/ietf_mtc_worker/build.rs b/crates/ietf_mtc_worker/build.rs new file mode 100644 index 00000000..a0b86208 --- /dev/null +++ b/crates/ietf_mtc_worker/build.rs @@ -0,0 +1,92 @@ +// Copyright (c) 2025 Cloudflare, Inc. +// Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause + +// Build script to include per-environment configuration and trusted roots. + +use config::AppConfig; +use der::asn1::Utf8StringRef; +use der::{Any, Tag}; +use ietf_mtc_api::ID_RDNA_TRUSTANCHOR_ID; +use std::env; +use std::fs; +use url::Url; +use x509_cert::{ + attr::AttributeTypeAndValue, + name::{RdnSequence, RelativeDistinguishedName}, +}; + +fn main() { + let env = env::var("DEPLOY_ENV").unwrap_or_else(|_| "dev".to_string()); + let config_file = &format!("config.{env}.json"); + let config_contents = &fs::read_to_string(config_file).unwrap_or_else(|e| { + panic!("failed to read config file '{config_file}': {e}"); + }); + + // Validate the config json against the schema. + let json = serde_json::from_str(config_contents).unwrap_or_else(|e| { + panic!("failed to deserialize JSON config '{config_file}': {e}"); + }); + let schema = serde_json::from_str(include_str!("config.schema.json")).unwrap_or_else(|e| { + panic!("failed to deserialize JSON schema 'config.schema.json': {e}"); + }); + jsonschema::validate(&schema, &json).unwrap_or_else(|e| { + panic!("config '{config_file}' does not match schema 'config.schema.json': {e}"); + }); + + // Validate the config parameters. + let conf = serde_json::from_str::(config_contents).unwrap_or_else(|e| { + panic!("failed to deserialize JSON config '{config_file}': {e}"); + }); + for (name, params) in conf.logs { + // Make sure we can create the RDN sequence for the issuer log ID. + let _ = RdnSequence::from(vec![RelativeDistinguishedName::try_from(vec![ + AttributeTypeAndValue { + oid: ID_RDNA_TRUSTANCHOR_ID, + value: Any::new( + Tag::Utf8String, + Utf8StringRef::new(¶ms.log_id).unwrap().as_bytes(), + ) + .unwrap(), + }, + ]) + .unwrap()]); + + // Valid location hints: https://developers.cloudflare.com/durable-objects/reference/data-location/#supported-locations-1 + if let Some(location) = ¶ms.location_hint { + assert!( + ["wnam", "enam", "sam", "weur", "eeur", "apac", "oc", "afr", "me",] + .contains(&location.as_str()), + "{name} invalid location hint: {location}" + ); + } + + check_url(¶ms.submission_url); + if !params.monitoring_url.is_empty() { + check_url(¶ms.monitoring_url); + } + } + + // Copy to OUT_DIR. + let out_dir = env::var("OUT_DIR").unwrap(); + fs::copy(config_file, format!("{out_dir}/config.json")).expect("failed to copy config file"); + + // Make DEPLOY_ENV available at compile time via env!() + println!("cargo::rustc-env=DEPLOY_ENV={env}"); + + println!("cargo::rerun-if-env-changed=DEPLOY_ENV"); + println!("cargo::rerun-if-changed=config.schema.json"); + println!("cargo::rerun-if-changed={config_file}"); +} + +// Validate the URL prefix according to https://datatracker.ietf.org/doc/html/rfc6962#section-4. +// "The prefix can include a path as well as a server name and a port." +fn check_url(s: &str) { + let u = Url::parse(s).unwrap(); + assert!(["http", "https"].contains(&u.scheme()), "invalid scheme"); + assert!(u.domain().is_some(), "invalid domain"); + assert_eq!( + u.as_str(), + &format!("{}{}", u.origin().ascii_serialization(), u.path()), + "invalid URL components" + ); +} diff --git a/crates/ietf_mtc_worker/config.dev.json b/crates/ietf_mtc_worker/config.dev.json new file mode 100644 index 00000000..356c3ecf --- /dev/null +++ b/crates/ietf_mtc_worker/config.dev.json @@ -0,0 +1,21 @@ +{ + "logging_level": "info", + "logs": { + "dev1": { + "description": "MTCA Dev1", + "log_id": "44363.48.1", + "cosigner_id": "44363.48.2", + "submission_url": "http://localhost:8787/logs/dev1/", + "location_hint": "enam" + }, + "dev2": { + "description": "MTCA Dev2", + "log_id": "44363.48.3", + "cosigner_id": "44363.48.4", + "submission_url": "http://localhost:8787/logs/dev2/", + "location_hint": "enam", + "max_certificate_lifetime_secs": 100, + "landmark_interval_secs": 10 + } + } +} diff --git a/crates/ietf_mtc_worker/config.schema.json b/crates/ietf_mtc_worker/config.schema.json new file mode 100644 index 00000000..18fe5734 --- /dev/null +++ b/crates/ietf_mtc_worker/config.schema.json @@ -0,0 +1,121 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "additionalProperties": false, + "properties": { + "logging_level": { + "type": "string", + "enum": [ + "debug", + "info", + "warn", + "error" + ], + "description": "Log verbosity." + }, + "logs": { + "type": "object", + "description": "Dictionary MTC log shard names to configurations.", + "additionalProperties": false, + "patternProperties": { + "^[a-zA-Z0-9_]+$": { + "type": "object", + "additionalProperties": false, + "properties": { + "description": { + "type": "string", + "description": "Description of the log." + }, + "log_id": { + "type": "string", + "description": "The log ID (a trust anchor ID) in dotted decimal notation (e.g., 32473.1)." + }, + "cosigner_id": { + "type": "string", + "description": "The cosigner's ID (a trust anchor ID) in dotted decimal notation (e.g., 32473.1)." + }, + "max_certificate_lifetime_secs": { + "type": "integer", + "default": 604800, + "description": "The maximum lifetime for issued certificates. The actual lifetime could be less, for example, to fit within the bootstrap certificate's validity window." + }, + "landmark_interval_secs": { + "type": "integer", + "minimum": 1, + "default": 3600, + "description": "The time between publishing landmarks. This is used to calculate `max_active_landmarks` as `ceil(max_certificate_timetime_secs / landmark_interval_secs) + 1`." + }, + "submission_url": { + "type": "string", + "description": "URL for log submissions." + }, + "monitoring_url": { + "type": "string", + "default": "", + "description": "URL for log monitoring. If unspecified, use the submission URL and the Worker will proxy requests to the R2 bucket." + }, + "location_hint": { + "type": "string", + "description": "Provide a hint to place the log in a specific geographic location. See https://developers.cloudflare.com/durable-objects/reference/data-location/ for supported locations. If unspecified, the Durable Object will be created in proximity to the first request." + }, + "sequence_interval_millis": { + "type": "integer", + "minimum": 100, + "default": 1000, + "description": "The duration in between sequencing operations, in milliseconds." + }, + "max_sequence_skips": { + "type": "integer", + "minimum": 0, + "default": 0, + "description": "The maximum number of times sequencing can be skipped to avoid creating partial tiles. If non-zero, pending entries may be delayed by either a multiple of the sequence interval or sequence_skip_threshold_millis if set." + }, + "sequence_skip_threshold_millis": { + "type": "integer", + "minimum": 0, + "description": "If provided, entries will only be skipped by sequencing (when max_sequenced_skips is non-zero) if they have been in the pool for less than this timeout." + }, + "num_batchers": { + "type": "integer", + "minimum": 0, + "default": 8, + "maximum": 255, + "description": "The number of batchers to use to proxy requests to the sequencer. If zero, requests from the frontend worker go directly to the sequencer." + }, + "batch_timeout_millis": { + "type": "integer", + "minimum": 100, + "default": 100, + "description": "The maximum duration to wait before submitting a batch to the sequencer, in milliseconds." + }, + "max_batch_entries": { + "type": "integer", + "minimum": 1, + "default": 256, + "description": "The maximum number of entries per batch." + }, + "clean_interval_secs": { + "type": "integer", + "minimum": 1, + "default": 60, + "description": "How long to wait in between runs of the partial tile cleaner." + }, + "version": { + "type": "string", + "enum": ["draft02"], + "default": "draft02", + "description": "The version of draft-ietf-plants-merkle-tree-certs that this log implements." + } + }, + "required": [ + "log_id", + "submission_url" + ] + } + } + } + }, + "required": [ + "logs" + ] +} \ No newline at end of file diff --git a/crates/ietf_mtc_worker/config/Cargo.toml b/crates/ietf_mtc_worker/config/Cargo.toml new file mode 100644 index 00000000..8ab7358e --- /dev/null +++ b/crates/ietf_mtc_worker/config/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "ietf_mtc_worker_config" +publish = false +version.workspace = true +authors.workspace = true +edition.workspace = true +license.workspace = true +readme.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Configuration for ietf_mtc_worker" + +[dependencies] +serde.workspace = true +ietf_mtc_api.workspace = true \ No newline at end of file diff --git a/crates/ietf_mtc_worker/config/src/lib.rs b/crates/ietf_mtc_worker/config/src/lib.rs new file mode 100644 index 00000000..4b298d70 --- /dev/null +++ b/crates/ietf_mtc_worker/config/src/lib.rs @@ -0,0 +1,83 @@ +// Copyright (c) 2025 Cloudflare, Inc. +// Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause + +// CT log configuration, in a separate crate to allow build.rs to use it. +use serde::Deserialize; +use std::collections::HashMap; + +#[derive(Deserialize, Debug)] +pub struct AppConfig { + pub logging_level: Option, + pub logs: HashMap, +} + +#[derive(Deserialize, Debug)] +pub struct LogParams { + pub description: Option, + pub log_id: String, + pub cosigner_id: String, + #[serde(default = "default_usize::<604_800>")] + pub max_certificate_lifetime_secs: usize, + #[serde(default = "default_usize::<3600>")] + pub landmark_interval_secs: usize, + #[serde(default)] + pub monitoring_url: String, + pub submission_url: String, + pub location_hint: Option, + #[serde(default = "default_u64::<1000>")] + pub sequence_interval_millis: u64, + #[serde(default = "default_usize::<0>")] + pub max_sequence_skips: usize, + pub sequence_skip_threshold_millis: Option, + #[serde(default = "default_u8::<8>")] + pub num_batchers: u8, + #[serde(default = "default_u64::<1000>")] + pub batch_timeout_millis: u64, + #[serde(default = "default_usize::<100>")] + pub max_batch_entries: usize, + #[serde(default = "default_u64::<60>")] + pub clean_interval_secs: u64, + /// The version of draft-ietf-plants-merkle-tree-certs that this log implements. + #[serde(default)] + pub version: ietf_mtc_api::DraftVersion, +} + +impl LogParams { + /// Return the maximum number of active landmarks (those covering unexpired + /// certificates). + /// + /// # Formula: `ceil(lifetime / interval) + 1` + /// + /// The `+ 1` accounts for landmarks not allocated at the exact start of + /// their time interval, which can push certificate expiry one interval + /// further than `ceil(lifetime / interval)` alone would bound. + /// + /// # Example + /// + /// With 7-day (168 hour) certificate lifetime and 1-hour landmark interval: + /// - Formula: `ceil(168 / 1) + 1 = 168 + 1 = 169` + /// - This means up to 169 active landmarks + /// + /// # Storage Note + /// + /// The actual landmark deque stores `max_active_landmarks + 1` entries (170 + /// in the example above). The extra (expired) landmark is needed to compute + /// subtrees for all active landmarks. See `LandmarkSequence` documentation + /// for details. + #[must_use] + pub fn max_active_landmarks(&self) -> usize { + self.max_certificate_lifetime_secs + .div_ceil(self.landmark_interval_secs) + + 1 + } +} + +fn default_u8() -> u8 { + V +} +fn default_u64() -> u64 { + V +} +fn default_usize() -> usize { + V +} diff --git a/crates/ietf_mtc_worker/reset-dev.sh b/crates/ietf_mtc_worker/reset-dev.sh new file mode 100755 index 00000000..52bac394 --- /dev/null +++ b/crates/ietf_mtc_worker/reset-dev.sh @@ -0,0 +1,2 @@ +#!/bin/sh +rm -rf .wrangler/state \ No newline at end of file diff --git a/crates/ietf_mtc_worker/src/batcher_do.rs b/crates/ietf_mtc_worker/src/batcher_do.rs new file mode 100644 index 00000000..e329a69b --- /dev/null +++ b/crates/ietf_mtc_worker/src/batcher_do.rs @@ -0,0 +1,34 @@ +use crate::CONFIG; +use generic_log_worker::{get_durable_object_name, BatcherConfig, GenericBatcher, BATCHER_BINDING}; +#[allow(clippy::wildcard_imports)] +use worker::*; + +#[durable_object(fetch)] +struct Batcher(GenericBatcher); + +impl DurableObject for Batcher { + fn new(state: State, env: Env) -> Self { + let name = get_durable_object_name( + &env, + &state, + BATCHER_BINDING, + &mut CONFIG + .logs + .iter() + .map(|(name, params)| (name.as_str(), params.num_batchers)), + ); + let params = &CONFIG.logs[name]; + let config = BatcherConfig { + name: name.to_string(), + max_batch_entries: params.max_batch_entries, + batch_timeout_millis: params.batch_timeout_millis, + enable_dedup: false, // deduplication is not currently supported + location_hint: params.location_hint.clone(), + }; + Batcher(GenericBatcher::::new(state, env, config)) + } + + async fn fetch(&self, req: Request) -> Result { + self.0.fetch(req).await + } +} diff --git a/crates/ietf_mtc_worker/src/cleaner_do.rs b/crates/ietf_mtc_worker/src/cleaner_do.rs new file mode 100644 index 00000000..5a5920fa --- /dev/null +++ b/crates/ietf_mtc_worker/src/cleaner_do.rs @@ -0,0 +1,122 @@ +use std::time::Duration; + +use crate::{load_checkpoint_cosigner, load_origin, CONFIG}; +use generic_log_worker::{ + get_durable_object_name, load_public_bucket, CleanerConfig, GenericCleaner, ObjectBackend, + ObjectBucket, CLEANER_BINDING, +}; +use ietf_mtc_api::{IetfMtcPendingLogEntry, LandmarkSequence, LANDMARK_KEY, SUBTREE_SIG_KEY_PREFIX}; +use signed_note::VerifierList; +use tlog_tiles::{CheckpointSigner, PendingLogEntry}; +#[allow(clippy::wildcard_imports)] +use worker::*; + +#[durable_object(alarm)] +struct Cleaner(GenericCleaner, Env, String); + +impl DurableObject for Cleaner { + fn new(state: State, env: Env) -> Self { + let name = get_durable_object_name( + &env, + &state, + CLEANER_BINDING, + &mut CONFIG.logs.keys().map(|name| (name.as_str(), 0)), + ); + let params = &CONFIG.logs[name]; + + let config = CleanerConfig { + name: name.to_string(), + origin: load_origin(name), + data_path: IetfMtcPendingLogEntry::DATA_TILE_PATH, + aux_path: IetfMtcPendingLogEntry::AUX_TILE_PATH, + verifiers: VerifierList::new(vec![load_checkpoint_cosigner(&env, name).verifier()]), + clean_interval: Duration::from_secs(params.clean_interval_secs), + }; + + let name = name.to_string(); + Cleaner(GenericCleaner::new(state, &env, config), env, name) + } + + async fn fetch(&self, req: Request) -> Result { + self.0.fetch(req).await + } + + async fn alarm(&self) -> Result { + // Run the generic log cleaner first. + let response = self.0.alarm().await?; + + // Then clean up expired subtree signatures. + if let Err(e) = self.clean_subtree_sigs().await { + log::warn!("{}: subtree sig cleanup failed: {e}", self.2); + } + + Ok(response) + } +} + +impl Cleaner { + /// Delete subtree signature entries whose covered interval ends at or + /// before the oldest landmark in the sequence. + /// + /// Any entry with `hi <= oldest_landmark` is guaranteed to be covered by + /// an expired landmark and will never be needed for a new certificate. + async fn clean_subtree_sigs(&self) -> Result<()> { + let env = &self.1; + let name = &self.2; + let params = &CONFIG.logs[name.as_str()]; + let object_bucket = ObjectBucket::new(load_public_bucket(env, name)?); + let raw_bucket = load_public_bucket(env, name)?; + + // Load the landmark sequence to determine the oldest active landmark. + let Some(seq_bytes) = object_bucket.fetch(LANDMARK_KEY).await? else { + return Ok(()); // no landmarks yet, nothing to clean + }; + let seq = LandmarkSequence::from_bytes(&seq_bytes, params.max_active_landmarks()) + .map_err(|e| e.to_string())?; + let Some(&oldest_landmark) = seq.landmarks.front() else { + return Ok(()); + }; + + // List all subtree signature keys and delete those with hi <= oldest_landmark. + let mut cursor = None; + loop { + let mut list_req = raw_bucket.list().prefix(SUBTREE_SIG_KEY_PREFIX); + if let Some(ref c) = cursor { + list_req = list_req.cursor(c); + } + let listed = list_req.execute().await?; + + let to_delete: Vec = listed + .objects() + .into_iter() + .filter_map(|obj| { + let key = obj.key(); + parse_subtree_sig_hi(&key) + .filter(|&hi| hi <= oldest_landmark) + .map(|_| key) + }) + .collect(); + + if !to_delete.is_empty() { + log::info!("{name}: deleting {} expired subtree sigs", to_delete.len()); + raw_bucket.delete_multiple(to_delete).await?; + } + + if listed.truncated() { + cursor = listed.cursor(); + } else { + break; + } + } + + Ok(()) + } +} + +/// Parse the `hi` endpoint from a subtree signature R2 key. +/// Key format: `{prefix}/{lo:020}-{hi:020}` +fn parse_subtree_sig_hi(key: &str) -> Option { + let suffix = key.strip_prefix(SUBTREE_SIG_KEY_PREFIX)?.strip_prefix('/')?; + let hi_str = suffix.split('-').nth(1)?; + hi_str.parse().ok() +} diff --git a/crates/ietf_mtc_worker/src/frontend_worker.rs b/crates/ietf_mtc_worker/src/frontend_worker.rs new file mode 100644 index 00000000..6b51d550 --- /dev/null +++ b/crates/ietf_mtc_worker/src/frontend_worker.rs @@ -0,0 +1,546 @@ +// Copyright (c) 2025 Cloudflare, Inc. +// Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause + +//! Frontend worker for the IETF MTC submission API. + +use crate::{load_checkpoint_cosigner, load_origin, CONFIG}; +use ietf_mtc_api::IetfSequenceMetadata; +use der::{ + asn1::{UtcTime, Utf8StringRef}, + Any, Decode, Tag, +}; +use generic_log_worker::{ + batcher_id_from_lookup_key, deserialize, get_durable_object_stub, init_logging, + load_public_bucket, + log_ops::{prove_subtree_inclusion, read_leaf, ProofError, CHECKPOINT_KEY}, + obs::Wshim, + serialize, + util::now_millis, + ObjectBackend, ObjectBucket, ENTRY_ENDPOINT, +}; +use ietf_mtc_api::{ + build_pending_entry, serialize_mtc_cert, AddEntryRequest, AddEntryResponse, IetfMtcLogEntry, + LandmarkSequence, SignedSubtree, TrustAnchorID, ID_RDNA_TRUSTANCHOR_ID, LANDMARK_BUNDLE_KEY, + LANDMARK_KEY, +}; +use std::str::FromStr; +use serde::{Deserialize, Serialize}; +use serde_with::{base64::Base64, serde_as}; +use signed_note::VerifierList; +use std::time::Duration; +use tlog_tiles::{ + open_checkpoint, CheckpointSigner, CheckpointText, LeafIndex, PendingLogEntry, + PendingLogEntryBlob, +}; +#[allow(clippy::wildcard_imports)] +use worker::*; +use x509_cert::{ + attr::AttributeTypeAndValue, + name::{RdnSequence, RelativeDistinguishedName}, + time::{Time, Validity}, +}; + +const UNKNOWN_LOG_MSG: &str = "unknown log"; + +#[serde_as] +#[derive(Serialize)] +struct MetadataResponse<'a> { + #[serde(skip_serializing_if = "Option::is_none")] + description: &'a Option, + log_id: String, + cosigner_id: String, + /// DER-encoded `SubjectPublicKeyInfo` of the cosigner's verifying key, + /// base64-encoded. Includes the algorithm identifier so clients can + /// determine the signing algorithm without out-of-band information. + #[serde_as(as = "Base64")] + cosigner_public_key: Vec, + submission_url: &'a str, + monitoring_url: &'a str, +} + +/// POST body for the `/get-certificate` endpoint. +#[serde_as] +#[derive(Serialize, Deserialize)] +pub struct GetCertificateRequest { + pub leaf_index: LeafIndex, + + #[serde_as(as = "Base64")] + pub spki_der: Vec, +} + +/// GET response for the `/get-certificate` endpoint. +#[serde_as] +#[derive(Serialize, Deserialize)] +pub struct GetCertificateResponse { + #[serde_as(as = "Base64")] + pub data: Vec, + pub landmark_id: usize, +} + +/// Start is the first code run when the Wasm module is loaded. +#[event(start)] +fn start() { + init_logging(CONFIG.logging_level.as_deref()); +} + +/// Worker entrypoint. +/// +/// # Errors +/// +/// Returns an error if any unhandled internal errors occur while processing the request. +/// +/// # Panics +/// +/// Panics if there are issues parsing route parameters, which should never happen. +#[event(fetch, respond_with_errors)] +async fn main(req: Request, env: Env, ctx: Context) -> Result { + let wshim = Wshim::from_env(&env); + let response = Router::new() + .or_else_any_method_async("/logs/:log/*route", |req, ctx| async move { + let name = if let Some(name) = ctx.param("log") { + if CONFIG.logs.contains_key(name) { + &name.clone() + } else { + return Err(UNKNOWN_LOG_MSG.into()); + } + } else { + return Err("missing 'log' route param".into()); + }; + + Router::with_data(name) + .post_async("/logs/:log/add-entry", |req, ctx| async move { + add_entry(req, &ctx.env, ctx.data).await + }) + .post_async("/logs/:log/get-certificate", |mut req, ctx| async move { + let name = ctx.data; + let params = &CONFIG.logs[name]; + let Ok(GetCertificateRequest { + leaf_index, + spki_der, + }) = req.json().await + else { + return Response::error("Unexpected input", 400); + }; + let object_backend = ObjectBucket::new(load_public_bucket(&ctx.env, name)?); + let (checkpoint, _checkpoint_bytes) = + get_current_checkpoint(&ctx.env, name, &object_backend).await?; + if leaf_index >= checkpoint.size() { + return Response::error("Leaf index is not in log", 422); + } + + let seq = get_landmark_sequence(name, &object_backend).await?; + if leaf_index < seq.first_index() { + return Response::error("Leaf index is before first active landmark", 422); + } + let Some((landmark_id, landmark_subtree)) = seq.subtree_for_index(leaf_index) + else { + let headers = Headers::new(); + let i = params.landmark_interval_secs as u64; + headers + .set("Retry-After", &format!("{}", i - (now_millis() / 1000) % i))?; + return Response::error("Leaf index will be covered by next landmark", 503) + .map(|r| r.with_headers(headers)); + }; + + let log_entry = read_leaf::( + &object_backend, + leaf_index, + checkpoint.size(), + checkpoint.hash(), + ) + .await + .map_err(|e| e.to_string())?; + + let proof = match prove_subtree_inclusion( + checkpoint.size(), + *checkpoint.hash(), + landmark_subtree.lo(), + landmark_subtree.hi(), + leaf_index, + &object_backend, + ) + .await + { + Ok(p) => p, + Err(ProofError::Tlog(s)) => return Response::error(s.to_string(), 422), + Err(ProofError::Other(e)) => return Err(e.to_string().into()), + }; + + let data = match serialize_mtc_cert( + &log_entry, + leaf_index, + &spki_der, + &landmark_subtree, + proof, + &[], // landmark-relative: no cosignatures + ) { + Ok(data) => data, + Err(e) => { + return Response::error( + format!("Failed to serialize landmark-relative cert: {e}"), + 422, + ) + } + }; + + Response::from_json(&GetCertificateResponse { data, landmark_id }) + }) + .get_async("/logs/:log/get-landmark-bundle", |_req, ctx| async move { + get_landmark_bundle(&ctx.env, ctx.data).await + }) + .get("/logs/:log/metadata", |_req, ctx| { + let name = ctx.data; + let params = &CONFIG.logs[name]; + let cosigner = load_checkpoint_cosigner(&ctx.env, name); + Response::from_json(&MetadataResponse { + description: ¶ms.description, + log_id: cosigner.log_id().to_string(), + cosigner_id: cosigner.cosigner_id().to_string(), + cosigner_public_key: cosigner.verifying_key(), + submission_url: ¶ms.submission_url, + monitoring_url: if params.monitoring_url.is_empty() { + ¶ms.submission_url + } else { + ¶ms.monitoring_url + }, + }) + }) + .get("/logs/:log/sequencer_id", |_req, ctx| { + let name = ctx.data; + let namespace = ctx.env.durable_object("SEQUENCER")?; + let object_id = namespace.id_from_name(name)?; + Response::ok(object_id.to_string()) + }) + .get_async("/logs/:log/*key", |_req, ctx| async move { + let name = ctx.data; + let key = ctx.param("key").unwrap(); + if CONFIG.logs[name].monitoring_url.is_empty() { + let bucket = load_public_bucket(&ctx.env, name)?; + if let Some(obj) = bucket.get(key).execute().await? { + Response::from_body( + obj.body() + .ok_or("R2 object missing body")? + .response_body()?, + ) + .map(|r| { + r.with_headers(headers_from_http_metadata(obj.http_metadata())) + }) + } else { + Response::error("Not found", 404) + } + } else { + Response::error( + format!( + "Use {} for monitoring API", + CONFIG.logs[name].monitoring_url + ), + 404, + ) + } + }) + .run(req, ctx.env) + .await + }) + .run(req, env) + .await + .or_else(|e| match e { + Error::RustError(ref msg) if msg == UNKNOWN_LOG_MSG => { + Response::error("Unknown log", 400) + } + _ => { + log::warn!("Internal error: {e}"); + Response::error("Internal error", 500) + } + }); + if let Ok(wshim) = wshim { + ctx.wait_until(async move { wshim.flush(&generic_log_worker::obs::logs::LOGGER).await }); + } + response +} + +/// Builds the issuer RDN containing the log's Trust Anchor ID. +fn build_issuer_rdn(log_id: &str) -> std::result::Result { + let utf8_value = Utf8StringRef::new(log_id).map_err(|e| e.to_string())?; + let any_value = Any::new(Tag::Utf8String, utf8_value.as_bytes()).map_err(|e| e.to_string())?; + let attr = AttributeTypeAndValue { + oid: ID_RDNA_TRUSTANCHOR_ID, + value: any_value, + }; + let rdn = RelativeDistinguishedName::try_from(vec![attr]) + .expect("single attribute should always succeed"); + Ok(RdnSequence::from(vec![rdn])) +} + +/// Compute the validity window for a new entry. +/// +/// `not_before` is the current time; `not_after` is `not_before + +/// max_lifetime_secs`. +/// +/// ACME order `notBefore`/`notAfter` fields are not currently supported; +/// the validity window is always determined by the server's policy. +fn build_validity( + now_millis: u64, + max_lifetime_secs: u64, +) -> std::result::Result { + let not_before = Duration::from_millis(now_millis); + let not_after = not_before + Duration::from_secs(max_lifetime_secs); + + Ok(Validity::new( + Time::UtcTime(UtcTime::from_unix_duration(not_before).map_err(|e| e.to_string())?), + Time::UtcTime(UtcTime::from_unix_duration(not_after).map_err(|e| e.to_string())?), + )) +} + +async fn add_entry(mut req: Request, env: &Env, name: &str) -> Result { + let params = &CONFIG.logs[name]; + let req: AddEntryRequest = match req.json().await { + Ok(r) => r, + Err(e) => { + log::warn!("{name}: Bad request: {e}"); + return Response::error("Bad request", 400); + } + }; + + let issuer = build_issuer_rdn(¶ms.log_id)?; + let validity = match build_validity( + now_millis(), + params.max_certificate_lifetime_secs as u64, + ) { + Ok(v) => v, + Err(e) => { + log::warn!("{name}: Bad request: {e}"); + return Response::error("Bad request", 400); + } + }; + + let pending_entry = match build_pending_entry(&req, &issuer, validity) { + Ok(e) => e, + Err(e) => { + log::warn!("{name}: Bad request: {e}"); + return Response::error("Bad request", 400); + } + }; + + let lookup_key = pending_entry.lookup_key(); + + let stub = { + let shard_id = batcher_id_from_lookup_key(&lookup_key, params.num_batchers); + get_durable_object_stub( + env, + name, + shard_id, + if shard_id.is_some() { + "BATCHER" + } else { + "SEQUENCER" + }, + params.location_hint.as_deref(), + )? + }; + let serialized = serialize(&PendingLogEntryBlob { + lookup_key, + data: serialize(&pending_entry)?, + })?; + let mut response = stub + .fetch_with_request(Request::new_with_init( + &format!("http://fake_url.com{ENTRY_ENDPOINT}"), + &RequestInit { + method: Method::Post, + body: Some(serialized.into()), + ..Default::default() + }, + )?) + .await?; + if response.status_code() != 200 { + return Ok(response); + } + let metadata = deserialize::(&response.bytes().await?)?; + let leaf_index = metadata.leaf_index; + + // Build the standalone certificate from the subtree signature cached by + // the sequencer. The checkpoint_callback runs before the sequencer + // returns, so the signature should always be present at this point. + let Some(certificate) = build_standalone_cert(env, name, leaf_index, metadata.old_tree_size, metadata.new_tree_size, &req).await else { + log::warn!("{name}: subtree sig not found for leaf {leaf_index} after sequencing"); + return Response::error("Service unavailable: subtree signature not yet available", 503); + }; + + Response::from_json(&AddEntryResponse { certificate }) +} + +/// Try to build a standalone certificate for `leaf_index` using a cached +/// subtree signature. Returns `None` if no matching signature is available +/// yet (the client should retry); logs warnings on unexpected errors. +/// Maximum number of retries waiting for a subtree signature to appear in R2. +const MAX_SIG_RETRIES: u32 = 6; +/// Delay between retries in milliseconds. +const SIG_RETRY_DELAY_MS: u64 = 250; + +async fn build_standalone_cert( + env: &Env, + name: &str, + leaf_index: LeafIndex, + old_tree_size: u64, + new_tree_size: u64, + req: &AddEntryRequest, +) -> Option> { + use ietf_mtc_api::subtree_sig_key; + + let object_bucket = ObjectBucket::new(load_public_bucket(env, name).ok()?); + + // Compute the exact subtree containing leaf_index using the batch tree + // size range from IetfSequenceMetadata. + let (left, right) = tlog_tiles::Subtree::split_interval(old_tree_size, new_tree_size).ok()?; + let subtree = [Some(left), right] + .into_iter() + .flatten() + .find(|s| s.lo() <= leaf_index && leaf_index < s.hi())?; + let key = subtree_sig_key(subtree.lo(), subtree.hi()); + + let mut signed: Option = None; + for _ in 0..MAX_SIG_RETRIES { + let Some(raw) = object_bucket.fetch(&key).await.ok().flatten() else { + worker::Delay::from(std::time::Duration::from_millis(SIG_RETRY_DELAY_MS)).await; + continue; + }; + if let Ok(s) = serde_json::from_slice::(&raw) { + signed = Some(s); + break; + } + worker::Delay::from(std::time::Duration::from_millis(SIG_RETRY_DELAY_MS)).await; + } + let signed = signed?; + + let subtree = signed.as_subtree().ok()?; + let cosigner_id = TrustAnchorID::from_str(&signed.cosigner_id).ok()?; + let checkpoint_hash = tlog_tiles::Hash(signed.checkpoint_hash); + + // Parse the CSR to extract the SPKI. + let csr = x509_cert::request::CertReq::from_der(&req.csr).ok()?; + let spki_der = der::Encode::to_der(&csr.info.public_key).ok()?; + + // Read the sequenced log entry from the data tile. + let log_entry = generic_log_worker::log_ops::read_leaf::( + &object_bucket, + leaf_index, + signed.checkpoint_size, + &checkpoint_hash, + ) + .await + .ok()?; + + // Compute an inclusion proof of leaf_index into the subtree. + let proof = match generic_log_worker::log_ops::prove_subtree_inclusion( + signed.checkpoint_size, + checkpoint_hash, + subtree.lo(), + subtree.hi(), + leaf_index, + &object_bucket, + ) + .await + { + Ok(p) => p, + Err(e) => { + log::warn!("{name}: subtree inclusion proof failed for leaf {leaf_index}: {e:?}"); + return None; + } + }; + + serialize_mtc_cert( + &log_entry, + leaf_index, + &spki_der, + &subtree, + proof, + &[(cosigner_id, signed.signature)], + ) + .ok() +} + +async fn get_landmark_bundle(env: &Env, name: &str) -> Result { + let object_backend = ObjectBucket::new(load_public_bucket(env, name)?); + let Some(landmark_bundle_bytes) = object_backend.fetch(LANDMARK_BUNDLE_KEY).await? else { + return Err("failed to get landmark bundle".into()); + }; + Ok(ResponseBuilder::new() + .with_header("content-type", "application/json")? + .body(ResponseBody::Body(landmark_bundle_bytes))) +} + +async fn get_current_checkpoint( + env: &Env, + name: &str, + object_backend: &ObjectBucket, +) -> Result<(CheckpointText, Vec)> { + let checkpoint_bytes = object_backend + .fetch(CHECKPOINT_KEY) + .await? + .ok_or("no checkpoint in object storage".to_string())?; + let origin = &load_origin(name); + let verifiers = &VerifierList::new(vec![load_checkpoint_cosigner(env, name).verifier()]); + let (checkpoint, _timestamp) = + open_checkpoint(origin.as_str(), verifiers, now_millis(), &checkpoint_bytes) + .map_err(|e| e.to_string())?; + Ok((checkpoint, checkpoint_bytes)) +} + +async fn get_landmark_sequence( + name: &str, + object_backend: &ObjectBucket, +) -> Result { + let params = &CONFIG.logs[name]; + let Some(landmark_sequence_bytes) = object_backend.fetch(LANDMARK_KEY).await? else { + return Err("failed to get landmark sequence".into()); + }; + let landmark_sequence = + LandmarkSequence::from_bytes(&landmark_sequence_bytes, params.max_active_landmarks()) + .map_err(|e| e.to_string())?; + Ok(landmark_sequence) +} + +fn headers_from_http_metadata(meta: HttpMetadata) -> Headers { + let h = Headers::new(); + if let Some(hdr) = meta.cache_control { + h.append("Cache-Control", &hdr).unwrap(); + } + if let Some(hdr) = meta.content_encoding { + h.append("Content-Encoding", &hdr).unwrap(); + } + if let Some(hdr) = meta.content_type { + h.append("Content-Type", &hdr).unwrap(); + } + h +} + +#[cfg(test)] +mod tests { + use super::*; + use der::Encode; + + #[test] + fn test_build_issuer_rdn() { + let rdn = build_issuer_rdn("test-log-id").unwrap(); + assert_eq!(rdn.as_ref().len(), 1); + let attr = rdn.as_ref()[0].as_ref().iter().next().unwrap(); + assert_eq!(attr.oid, ID_RDNA_TRUSTANCHOR_ID); + let encoded = attr.value.to_der().unwrap(); + assert_eq!(encoded[0], 0x0C); // UTF8String tag + } + + #[test] + fn test_build_validity() { + let now_ms = 1_700_000_000_000_u64; // milliseconds + let max_lifetime = 86400_u64; // 1 day + + let validity = build_validity(now_ms, max_lifetime).unwrap(); + assert_eq!( + validity.not_before.to_unix_duration().as_secs(), + now_ms / 1000 + ); + assert_eq!( + validity.not_after.to_unix_duration().as_secs(), + now_ms / 1000 + max_lifetime + ); + } +} diff --git a/crates/ietf_mtc_worker/src/lib.rs b/crates/ietf_mtc_worker/src/lib.rs new file mode 100644 index 00000000..78a4bd96 --- /dev/null +++ b/crates/ietf_mtc_worker/src/lib.rs @@ -0,0 +1,109 @@ +// Copyright (c) 2025 Cloudflare, Inc. +// Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause + +#![doc = include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/README.md"))] + +use config::AppConfig; +use ietf_mtc_api::{MtcCosigner, MtcSigningKey, MtcVerifyingKey, TrustAnchorID}; +use ml_dsa::{signature::Keypair as _, MlDsa44, SigningKey as MlDsaSigningKey}; +use pkcs8::DecodePrivateKey; +use signed_note::KeyName; +use std::collections::HashMap; +use std::str::FromStr; +use std::sync::{LazyLock, OnceLock}; +#[allow(clippy::wildcard_imports)] +use worker::*; + +mod batcher_do; +mod cleaner_do; +mod frontend_worker; +mod sequencer_do; + +// Algorithm OID constants. +const OID_ED25519: der::asn1::ObjectIdentifier = + der::asn1::ObjectIdentifier::new_unwrap("1.3.101.112"); +const OID_ML_DSA_44: der::asn1::ObjectIdentifier = + der::asn1::ObjectIdentifier::new_unwrap("2.16.840.1.101.3.4.3.17"); + +// Application configuration. +static CONFIG: LazyLock = LazyLock::new(|| { + serde_json::from_str::(include_str!(concat!(env!("OUT_DIR"), "/config.json"))) + .expect("Failed to parse config") +}); + +type CachedKeys = (MtcSigningKey, MtcVerifyingKey); +static KEY_MAP: OnceLock>> = OnceLock::new(); + +/// Return the key pair for the given log, using a per-log cache. +/// +/// Uses `OnceLock::get()` to check the cache without blocking. If the cache +/// is not yet populated (either empty or being initialized by another request), +/// the key pair is parsed directly from the secret without waiting. This +/// avoids the cross-request `OnceLock::get_or_init` deadlock that the Workers +/// runtime detects when two requests concurrently initialize the same cell. +pub(crate) fn load_key_pair(env: &Env, name: &str) -> Result { + let once = &KEY_MAP.get_or_init(|| { + CONFIG + .logs + .keys() + .map(|n| (n.clone(), OnceLock::new())) + .collect() + })[name]; + + // Fast path: already cached. + if let Some(keys) = once.get() { + return Ok(keys.clone()); + } + + // Slow path: parse from secret. We do not call get_or_init here because + // that would block if another request is currently initializing the cell, + // which the Workers runtime detects and cancels as a cross-request deadlock. + // Instead, parse directly and attempt to store the result; if another + // request beat us to it, use its cached value. + let pem = env.secret(&format!("SIGNING_KEY_{name}"))?.to_string(); + let keys = parse_key_pair(&pem).map_err(worker::Error::from)?; + Ok(once.get_or_init(|| keys).clone()) +} + +/// Parse a PKCS#8 PEM key, dispatching to the correct algorithm based on the +/// `AlgorithmIdentifier` OID embedded in the `PrivateKeyInfo`. +fn parse_key_pair(pem: &str) -> std::result::Result<(MtcSigningKey, MtcVerifyingKey), String> { + let (_label, doc) = pkcs8::SecretDocument::from_pem(pem).map_err(|e| e.to_string())?; + let pki = pkcs8::PrivateKeyInfoRef::try_from(doc.as_bytes()).map_err(|e| e.to_string())?; + + match pki.algorithm.oid { + OID_ED25519 => { + let sk = ed25519_dalek::SigningKey::from_pkcs8_pem(pem) + .map_err(|e| e.to_string())?; + let vk = sk.verifying_key(); + Ok((MtcSigningKey::Ed25519(sk), MtcVerifyingKey::Ed25519(vk))) + } + OID_ML_DSA_44 => { + let kp = MlDsaSigningKey::::from_pkcs8_pem(pem).map_err(|e| e.to_string())?; + Ok(( + MtcSigningKey::MlDsa44(kp.signing_key().clone()), + MtcVerifyingKey::MlDsa44(kp.verifying_key().clone()), + )) + } + oid => Err(format!("unsupported signing algorithm OID: {oid}")), + } +} + +pub(crate) fn load_checkpoint_cosigner(env: &Env, name: &str) -> MtcCosigner { + let log_id = TrustAnchorID::from_str(&CONFIG.logs[name].log_id).unwrap(); + let cosigner_id = TrustAnchorID::from_str(&CONFIG.logs[name].cosigner_id).unwrap(); + let (sk, vk) = load_key_pair(env, name).unwrap(); + MtcCosigner::new_checkpoint(cosigner_id, log_id, sk, vk) +} + +pub(crate) fn load_origin(name: &str) -> KeyName { + KeyName::new( + CONFIG.logs[name] + .submission_url + .trim_start_matches("http://") + .trim_start_matches("https://") + .trim_end_matches('/') + .to_string(), + ) + .expect("invalid origin name") +} diff --git a/crates/ietf_mtc_worker/src/sequencer_do.rs b/crates/ietf_mtc_worker/src/sequencer_do.rs new file mode 100644 index 00000000..2bc20490 --- /dev/null +++ b/crates/ietf_mtc_worker/src/sequencer_do.rs @@ -0,0 +1,379 @@ +// Copyright (c) 2025 Cloudflare, Inc. +// Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause + +//! Sequencer is the 'brain' of the CT log, responsible for sequencing entries and maintaining log state. + +use std::{collections::VecDeque, time::Duration}; + +use crate::{load_checkpoint_cosigner, load_origin, CONFIG}; +use generic_log_worker::{ + get_durable_object_name, load_public_bucket, + log_ops::{prove_subtree_consistency, ProofError}, + CachedRoObjectBucket, CheckpointCallbacker, GenericSequencer, ObjectBucket, SequencerConfig, + SEQUENCER_BINDING, +}; +use ietf_mtc_api::{ + subtree_sig_key, IetfMtcLogEntry, LandmarkSequence, SignedSubtree, TrustAnchorID, + LANDMARK_BUNDLE_KEY, LANDMARK_CHECKPOINT_KEY, LANDMARK_KEY, +}; +use std::str::FromStr; +use tlog_tiles::Subtree; +use serde::{Deserialize, Serialize}; +use serde_with::{base64::Base64, serde_as}; +use signed_note::Note; +use tlog_tiles::{CheckpointText, Hash, UnixTimestamp}; +#[allow(clippy::wildcard_imports)] +use worker::*; + +#[durable_object(alarm)] +struct Sequencer(GenericSequencer); + +impl DurableObject for Sequencer { + fn new(state: State, env: Env) -> Self { + let name = get_durable_object_name( + &env, + &state, + SEQUENCER_BINDING, + &mut CONFIG.logs.keys().map(|name| (name.as_str(), 0)), + ); + let params = &CONFIG.logs[name]; + + let config = SequencerConfig { + name: name.to_string(), + origin: load_origin(name), + checkpoint_signers: vec![Box::new(load_checkpoint_cosigner(&env, name))], + checkpoint_extension: Box::new(|_| vec![]), // no checkpoint extension for MTC + sequence_interval: Duration::from_millis(params.sequence_interval_millis), + max_sequence_skips: params.max_sequence_skips, + enable_dedup: false, // deduplication is not currently supported + sequence_skip_threshold_millis: params.sequence_skip_threshold_millis, + location_hint: params.location_hint.clone(), + checkpoint_callback: checkpoint_callback(&env, name), + env_label: env!("DEPLOY_ENV").to_string(), + }; + + Sequencer(GenericSequencer::new(state, env, config)) + } + + async fn fetch(&self, req: Request) -> Result { + self.0.fetch(req).await + } + + async fn alarm(&self) -> Result { + self.0.alarm().await + } +} + +#[serde_as] +#[derive(Serialize, Deserialize)] +pub struct SubtreeWithConsistencyProof { + #[serde_as(as = "Base64")] + pub hash: [u8; 32], + #[serde_as(as = "Vec")] + pub consistency_proof: Vec<[u8; 32]>, +} + +/// GET response structure for the `/get-landmark-bundle` endpoint +#[derive(Serialize, Deserialize)] +pub struct LandmarkBundle { + pub checkpoint: String, + pub subtrees: Vec, + pub landmarks: VecDeque, +} + +/// Return a callback function that gets passed into the generic sequencer and +/// called each time a new checkpoint is created. For MTC, this is used to +/// periodically update the landmark checkpoint sequence. +fn checkpoint_callback(env: &Env, name: &str) -> CheckpointCallbacker { + let params = &CONFIG.logs[name]; + let bucket = load_public_bucket(env, name).unwrap(); + // Capture the signing key parts so the cosigner can be reconstructed + // on each callback invocation (MtcCosigner is not Clone). + let (sk, vk) = crate::load_key_pair(env, name).unwrap(); + let log_id = TrustAnchorID::from_str(&CONFIG.logs[name].log_id).unwrap(); + let cosigner_id_str = CONFIG.logs[name].cosigner_id.clone(); + Box::new( + move |old_time: UnixTimestamp, + new_time: UnixTimestamp, + old_tree_size: u64, + new_tree_size: u64, + new_checkpoint_bytes: &[u8]| { + let new_checkpoint = { + // TODO: Make more efficient. There are two unnecessary allocations here. + + // We can unwrap because the checkpoint provided is the checkpoint that the + // sequencer just created, so it must be well formed. + let note = Note::from_bytes(new_checkpoint_bytes) + .expect("freshly created checkpoint is not a note"); + CheckpointText::from_bytes(note.text()) + .expect("freshly created checkpoint is not a checkpoint") + }; + let tree_size = new_checkpoint.size(); + let root_hash = *new_checkpoint.hash(); + // We can unwrap here for the same reason as above + let new_checkpoint_str = String::from_utf8(new_checkpoint_bytes.to_vec()) + .expect("freshly created checkpoint is not UTF-8"); + + Box::pin({ + // We have to clone each time since the bucket gets moved into + // the async function. + let bucket_clone = bucket.clone(); + let sk_clone = sk.clone(); + let vk_clone = vk.clone(); + let log_id_clone = log_id.clone(); + let cosigner_id_clone = TrustAnchorID::from_str(&cosigner_id_str).unwrap(); + async move { + if old_time > new_time { + return Err("condition not met: `old_time <= new_time`".into()); + } + + // Sign and cache the subtree(s) covering entries added in + // this batch (§4.5). This enables the add-entry endpoint to + // return a standalone certificate immediately after sequencing. + Box::pin(sign_and_cache_batch_subtrees( + old_tree_size, + new_tree_size, + tree_size, + root_hash, + &cosigner_id_clone, + &log_id_clone, + &sk_clone, + &vk_clone, + &bucket_clone, + )) + .await?; + + // Check if we crossed a landmark epoch between the old and + // new checkpoints. (Ideally `old_time` would be the time + // that the last landmark was added, but we don't have that + // handy so can use the previous checkpoint time instead.) + if new_time / (1000 * params.landmark_interval_secs as u64) + == old_time / (1000 * params.landmark_interval_secs as u64) + { + // Not yet time to add a new landmark. + return Ok(()); + } + + // Time to add a new landmark. + let max_active_landmarks = params.max_active_landmarks(); + + // TODO: the put operations below should all be done as part of the same + // transaction. Otherwise an error that occurs after this point might put us in + // a state where the objects are not in sync with one another, e.g., the + // landmark bundle and checkpoint might have the same value. We need an + // all-or-nothing multi-put operation. Tracking issue here + // https://github.com/cloudflare/workers-rs/issues/876 + + // Load current landmark sequence. + let mut seq = + if let Some(obj) = bucket_clone.get(LANDMARK_KEY).execute().await? { + let bytes = obj.body().ok_or("missing object body")?.bytes().await?; + LandmarkSequence::from_bytes(&bytes, max_active_landmarks) + .map_err(|e| e.to_string())? + } else { + LandmarkSequence::create(max_active_landmarks) + }; + // Add the new landmark. + if seq.add(tree_size).map_err(|e| e.to_string())? { + // The landmark sequence was updated. Publish the result. + bucket_clone + .put(LANDMARK_KEY, seq.to_bytes().map_err(|e| e.to_string())?) + .execute() + .await?; + } + + // Update the landmark checkpoint. + bucket_clone + .put(LANDMARK_CHECKPOINT_KEY, new_checkpoint_str.clone()) + .execute() + .await?; + + // Compute the landmark bundle and save it + let landmark_subtrees = + get_landmark_subtrees(&seq, root_hash, tree_size, bucket_clone.clone()) + .await?; + + // Sign and cache each active landmark subtree so that + // build_standalone_cert can serve certificates for entries + // from older batches, not just the current one. + Box::pin(sign_and_cache_landmark_subtrees( + &seq, + root_hash, + tree_size, + &cosigner_id_clone, + &log_id_clone, + &sk_clone, + &vk_clone, + &landmark_subtrees, + &bucket_clone, + )) + .await?; + + let bundle = LandmarkBundle { + checkpoint: new_checkpoint_str, + subtrees: landmark_subtrees, + landmarks: seq.landmarks, + }; + bucket_clone + // Can unwrap here because we use the autoderived Serialize impl for LandmarkBundle + .put(LANDMARK_BUNDLE_KEY, serde_json::to_vec(&bundle).unwrap()) + .execute() + .await?; + + Ok(()) + } + }) + }, + ) +} + +// Computes the sequence of landmark subtrees and, for each subtree, a proof of consistency with the +// checkpoint. Each landmark-relative MTC includes an inclusion proof in one of these subtrees. +async fn get_landmark_subtrees( + landmark_sequence: &LandmarkSequence, + checkpoint_hash: Hash, + checkpoint_size: u64, + bucket: Bucket, +) -> Result> { + let cached_object_backend = CachedRoObjectBucket::new(ObjectBucket::new(bucket)); + let mut subtrees = Vec::new(); + for landmark_subtree in landmark_sequence.subtrees() { + let (consistency_proof, landmark_subtree_hash) = match prove_subtree_consistency( + checkpoint_hash, + checkpoint_size, + landmark_subtree.lo(), + landmark_subtree.hi(), + &cached_object_backend, + ) + .await + { + Ok(p) => p, + Err(ProofError::Tlog(s)) => return Err(s.to_string().into()), + Err(ProofError::Other(e)) => return Err(e.to_string().into()), + }; + + subtrees.push(SubtreeWithConsistencyProof { + hash: landmark_subtree_hash.0, + consistency_proof: consistency_proof.iter().map(|h| h.0).collect(), + }); + } + + Ok(subtrees) +} + +/// Sign the subtree(s) covering `[old_tree_size, new_tree_size)` and store +/// each signature in R2. Called from the checkpoint callback. +/// +/// The subtree root hash is computed from the checkpoint tiles via +/// `prove_subtree_consistency` so that the signature covers the actual +/// subtree head, not the full checkpoint hash. +#[allow(clippy::too_many_arguments)] +async fn sign_and_cache_batch_subtrees( + old_tree_size: u64, + new_tree_size: u64, + checkpoint_size: u64, + checkpoint_hash: Hash, + cosigner_id: &TrustAnchorID, + log_id: &TrustAnchorID, + sk: &ietf_mtc_api::MtcSigningKey, + vk: &ietf_mtc_api::MtcVerifyingKey, + bucket: &Bucket, +) -> Result<()> { + if old_tree_size >= new_tree_size { + return Ok(()); + } + let cosigner = ietf_mtc_api::MtcCosigner::new_checkpoint( + cosigner_id.clone(), + log_id.clone(), + sk.clone(), + vk.clone(), + ); + let object_bucket = CachedRoObjectBucket::new(ObjectBucket::new(bucket.clone())); + let (left, right) = + Subtree::split_interval(old_tree_size, new_tree_size).map_err(|e| e.to_string())?; + for subtree in [Some(left), right].into_iter().flatten() { + // Compute the actual subtree root hash from the checkpoint tiles. + let (_, subtree_hash) = match prove_subtree_consistency( + checkpoint_hash, + checkpoint_size, + subtree.lo(), + subtree.hi(), + &object_bucket, + ) + .await + { + Ok(p) => p, + Err(ProofError::Tlog(s)) => return Err(s.to_string().into()), + Err(ProofError::Other(e)) => return Err(e.to_string().into()), + }; + let sig = cosigner + .sign_subtree(subtree.lo(), subtree.hi(), &subtree_hash) + .map_err(|e| e.to_string())?; + let signed = SignedSubtree { + lo: subtree.lo(), + hi: subtree.hi(), + hash: subtree_hash.0, + checkpoint_hash: checkpoint_hash.0, + checkpoint_size, + signature: sig, + cosigner_id: cosigner_id.to_string(), + }; + bucket + .put( + subtree_sig_key(subtree.lo(), subtree.hi()), + serde_json::to_vec(&signed).map_err(|e| e.to_string())?, + ) + .execute() + .await?; + } + Ok(()) +} + +/// Sign and cache each active landmark subtree so that `build_standalone_cert` +/// can serve certificates for entries from prior batches. +/// +/// Like batch subtrees, landmark subtrees are signed with their own Merkle +/// root hash (obtained from `get_landmark_subtrees` via `prove_subtree_consistency`) +/// rather than the full checkpoint hash. +#[allow(clippy::too_many_arguments)] +async fn sign_and_cache_landmark_subtrees( + seq: &LandmarkSequence, + checkpoint_hash: Hash, + checkpoint_size: u64, + cosigner_id: &TrustAnchorID, + log_id: &TrustAnchorID, + sk: &ietf_mtc_api::MtcSigningKey, + vk: &ietf_mtc_api::MtcVerifyingKey, + landmark_subtrees: &[SubtreeWithConsistencyProof], + bucket: &Bucket, +) -> Result<()> { + let cosigner = ietf_mtc_api::MtcCosigner::new_checkpoint( + cosigner_id.clone(), + log_id.clone(), + sk.clone(), + vk.clone(), + ); + for (subtree, proof) in seq.subtrees().zip(landmark_subtrees.iter()) { + let subtree_hash = Hash(proof.hash); + let sig = cosigner + .sign_subtree(subtree.lo(), subtree.hi(), &subtree_hash) + .map_err(|e| e.to_string())?; + let signed = SignedSubtree { + lo: subtree.lo(), + hi: subtree.hi(), + hash: proof.hash, + checkpoint_hash: checkpoint_hash.0, + checkpoint_size, + signature: sig, + cosigner_id: cosigner_id.to_string(), + }; + bucket + .put( + subtree_sig_key(subtree.lo(), subtree.hi()), + serde_json::to_vec(&signed).map_err(|e| e.to_string())?, + ) + .execute() + .await?; + } + Ok(()) +} diff --git a/crates/ietf_mtc_worker/test-dev.sh b/crates/ietf_mtc_worker/test-dev.sh new file mode 100755 index 00000000..b328f06d --- /dev/null +++ b/crates/ietf_mtc_worker/test-dev.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +set -e + +bootstrap_cert_hostname="cloudflareresearch.com" +landmark_interval_secs=`jq '.logs.dev2.landmark_interval_secs' config.dev.json` +submission_url=`jq -r '.logs.dev2.submission_url' config.dev.json` + +# Get a bootstrap certificate chain. +bootstrap_cert_chain=`mktemp` +echo | openssl s_client \ + -connect ${bootstrap_cert_hostname}:443 \ + -servername ${bootstrap_cert_hostname} \ + -showcerts 2>/dev/null |\ + sed -n '/-----BEGIN CERTIFICATE-----/,/-----END CERTIFICATE-----/p' \ + > ${bootstrap_cert_chain} + +spki_der=`openssl x509 -in ${bootstrap_cert_chain} -pubkey -noout |\ + openssl pkey -pubin -inform pem -outform der | base64` + +add_entry_req=`cat ${bootstrap_cert_chain} |\ + while (set -o pipefail; + openssl x509 -outform DER 2>/dev/null |\ + base64); do :; done |\ + sed '/^$/d' | sed 's/.*/"&"/' | jq -sc '{"chain":.}'` + +# Add entry for the bootstrap certificate. +add_entry_resp=`curl -f --no-progress-meter -X POST \ + -H "Content-Type: application/json" \ + -d ${add_entry_req} \ + "${submission_url}add-entry"` + +leaf_index=`echo ${add_entry_resp} | jq '.leaf_index'` +echo "Leaf index: ${leaf_index}" + +# Wait for the next landmark to be minted. +echo "Waiting ${landmark_interval_secs}s for the next landmark" +sleep ${landmark_interval_secs} + +get_cert_req="{\"leaf_index\":${leaf_index},\"spki_der\":\"${spki_der}\"}" + +# Fetch the completed MTC. +get_cert_resp=`curl -f --no-progress-meter -X POST \ + -H "Content-Type: application/json" \ + -d ${get_cert_req} \ + "${submission_url}get-certificate"` + +landmark_id=`echo ${get_cert_resp} | jq '.landmark_id'` +echo "Landmark id: ${landmark_id}" + +echo ${get_cert_resp} | jq -r '.data' | base64 -d |\ + openssl x509 -inform DER -outform PEM diff --git a/crates/ietf_mtc_worker/wrangler.jsonc b/crates/ietf_mtc_worker/wrangler.jsonc new file mode 100644 index 00000000..0490472c --- /dev/null +++ b/crates/ietf_mtc_worker/wrangler.jsonc @@ -0,0 +1,107 @@ +{ + "name": "ietf-mtc", + "main": "build/worker/shim.mjs", + "compatibility_date": "2025-09-25", + "workers_dev": false, + "build": { + "command": "echo 'Default environment not configured. Please specify an environment with the \"-e\" flag.' && exit 1" + }, + "env": { + "dev": { + "build": { + // Change '--release' to '--dev' to compile with debug symbols. + // DEPLOY_ENV is used in build.rs to select the per-environment config. + "command": "cargo install -q worker-build@0.7.5 && DEPLOY_ENV=dev worker-build --release" + }, + "workers_dev": true, + "r2_buckets": [ + { + "bucket_name": "ietf-mtc-public-dev1", + "binding": "public_dev1" + }, + { + "bucket_name": "ietf-mtc-public-dev2", + "binding": "public_dev2" + } + ], + "durable_objects": { + "bindings": [ + { + "name": "SEQUENCER", + "class_name": "Sequencer" + }, + { + "name": "BATCHER", + "class_name": "Batcher" + }, + { + "name": "CLEANER", + "class_name": "Cleaner" + } + ] + }, + "migrations": [ + { + "tag": "v1", + "new_sqlite_classes": [ + "Sequencer", + "Batcher" + ] + }, + { + "tag": "v2", + "new_sqlite_classes": [ + "Cleaner" + ] + } + ] + }, + "draft02": { + "build": { + "command": "cargo install -q worker-build@0.7.5 && DEPLOY_ENV=prod worker-build --release" + }, + "route": { + "pattern": "draft02.cloudflareresearch.com", + "custom_domain": true + }, + "r2_buckets": [ + { + "bucket_name": "draft02-shard1", + "binding": "public_draft02_shard1" + } + ], + "durable_objects": { + "bindings": [ + { + "name": "BATCHER", + "class_name": "Batcher" + }, + { + "name": "CLEANER", + "class_name": "Cleaner" + }, + { + "name": "SEQUENCER", + "class_name": "Sequencer" + } + ] + }, + "observability": { + "logs": { + "enabled": true, + "invocation_logs": true + } + }, + "migrations": [ + { + "tag": "v1", + "new_sqlite_classes": [ + "Batcher", + "Cleaner", + "Sequencer" + ] + } + ] + } + } +} diff --git a/crates/integration_tests/Cargo.toml b/crates/integration_tests/Cargo.toml index 1dc38db1..9f9575ef 100644 --- a/crates/integration_tests/Cargo.toml +++ b/crates/integration_tests/Cargo.toml @@ -19,13 +19,19 @@ name = "integration_tests" [dependencies] # Crypto / protocol types reused from the workspace base64.workspace = true +crypto-common = "0.2" p256.workspace = true ed25519-dalek.workspace = true +ml-dsa.workspace = true +pkcs8.workspace = true +spki.workspace = true sha2.workspace = true signed_note.workspace = true static_ct_api.workspace = true bootstrap_mtc_api.workspace = true +ietf_mtc_api.workspace = true tlog_tiles.workspace = true +signature.workspace = true sct_validator.workspace = true serde.workspace = true serde_json.workspace = true @@ -45,4 +51,3 @@ const-oid.workspace = true reqwest = { workspace = true } tokio = { version = "1", features = ["full"] } x509_util.workspace = true -x509-verify.workspace = true diff --git a/crates/integration_tests/src/assertions.rs b/crates/integration_tests/src/assertions.rs index 46842947..a1e58c34 100644 --- a/crates/integration_tests/src/assertions.rs +++ b/crates/integration_tests/src/assertions.rs @@ -101,8 +101,8 @@ pub fn assert_sct_signature( // Extract the issuer `SubjectPublicKeyInfo` DER. let issuer_cert = Certificate::from_der(issuer_der).context("decoding issuer certificate")?; let issuer_spki_der = issuer_cert - .tbs_certificate - .subject_public_key_info + .tbs_certificate() + .subject_public_key_info() .to_der() .context("encoding issuer SPKI")?; @@ -149,9 +149,8 @@ pub fn assert_sct_signature( let ct_poison_oid = der::asn1::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.11129.2.4.3"); let leaf_cert = Certificate::from_der(leaf_der).context("parsing leaf cert")?; let is_precert = leaf_cert - .tbs_certificate - .extensions - .as_ref() + .tbs_certificate() + .extensions() .is_some_and(|exts| exts.iter().any(|e| e.extn_id == ct_poison_oid)); // For precerts, the worker signs over the TBS with the CT poison extension @@ -160,7 +159,7 @@ pub fn assert_sct_signature( let effective_cert_der: &[u8]; let effective_issuer_spki: &[u8]; if is_precert { - cert_to_sign = static_ct_api::build_precert_tbs(&leaf_cert.tbs_certificate) + cert_to_sign = static_ct_api::build_precert_tbs(leaf_cert.tbs_certificate()) .context("building precert TBS for signature verification")?; effective_cert_der = &cert_to_sign; effective_issuer_spki = &issuer_spki_der; diff --git a/crates/integration_tests/src/client.rs b/crates/integration_tests/src/client.rs index 7b5214a7..dcbac698 100644 --- a/crates/integration_tests/src/client.rs +++ b/crates/integration_tests/src/client.rs @@ -210,7 +210,7 @@ impl CtClient { .with_context(|| format!("reading body for {path}")) } - /// `GET /logs/:log/{path}` — returns the HTTP status code (does not fail on 4xx/5xx). + /// `GET /logs/:log/{path}` — returns the HTTP status code without failing on 4xx/5xx. pub async fn get_status(&self, path: &str) -> Result { let resp = self .client @@ -222,6 +222,8 @@ impl CtClient { } } +// =========================================================================== +// IETF MTC client // =========================================================================== // MTC client // =========================================================================== @@ -407,6 +409,190 @@ impl BootstrapMtcClient { } } +// =========================================================================== +// IETF MTC client +// =========================================================================== + +/// Log shard name to use for IETF MTC tests. Defaults to `dev2` (fast landmark interval). +#[must_use] +pub fn ietf_mtc_log_name() -> String { + std::env::var("IETF_MTC_LOG_NAME").unwrap_or_else(|_| "dev2".to_string()) +} + +/// Add-entry response from the IETF MTC worker. +#[serde_as] +#[derive(Deserialize, Debug, Clone)] +pub struct IetfMtcAddEntryResponse { + /// DER-encoded standalone MTC certificate, base64-encoded. + #[serde_as(as = "Base64")] + pub certificate: Vec, +} + +/// Get-certificate response from the IETF MTC worker. +#[serde_as] +#[derive(Deserialize, Debug)] +pub struct IetfMtcGetCertificateResponse { + #[serde_as(as = "Base64")] + pub data: Vec, + pub landmark_id: usize, +} + +/// HTTP client bound to a particular IETF MTC log shard. +pub struct IetfMtcClient { + client: reqwest::Client, + pub log: String, +} + +impl IetfMtcClient { + /// Creates a new client targeting the given IETF MTC log shard. + #[must_use] + pub fn new(log: impl Into) -> Self { + Self { + client: reqwest::Client::new(), + log: log.into(), + } + } + + /// Creates a client for the default IETF MTC log (from `IETF_MTC_LOG_NAME` env / `dev2`). + #[must_use] + pub fn default_log() -> Self { + Self::new(ietf_mtc_log_name()) + } + + fn url(&self, path: &str) -> String { + format!("{}/{}", log_url(&self.log), path) + } + + /// `GET /logs/:log/metadata` + pub async fn get_metadata(&self) -> Result { + let resp = self + .client + .get(self.url("metadata")) + .send() + .await + .context("GET metadata")?; + let status = resp.status(); + if !status.is_success() { + bail!("GET metadata returned {status}"); + } + resp.json().await.context("parsing metadata response") + } + + /// `POST /logs/:log/add-entry` — sends a base64url-encoded DER CSR. + pub async fn add_entry( + &self, + csr_der: Vec, + ) -> Result<(u16, Option)> { + use base64::prelude::*; + #[derive(serde::Serialize)] + struct Req { + csr: String, + } + let resp = self + .client + .post(self.url("add-entry")) + .json(&Req { + csr: BASE64_URL_SAFE_NO_PAD.encode(&csr_der), + }) + .send() + .await + .context("POST add-entry")?; + let status = resp.status().as_u16(); + if status == 200 { + let body: IetfMtcAddEntryResponse = resp + .json() + .await + .context("parsing add-entry response")?; + Ok((status, Some(body))) + } else { + Ok((status, None)) + } + } + + /// `POST /logs/:log/get-certificate` + pub async fn get_certificate( + &self, + leaf_index: u64, + spki_der: Vec, + ) -> Result<(u16, Option)> { + #[serde_as] + #[derive(serde::Serialize)] + struct Req { + leaf_index: u64, + #[serde_as(as = "Base64")] + spki_der: Vec, + } + let resp = self + .client + .post(self.url("get-certificate")) + .json(&Req { leaf_index, spki_der }) + .send() + .await + .context("POST get-certificate")?; + let status = resp.status().as_u16(); + if status == 200 { + let body: IetfMtcGetCertificateResponse = resp + .json() + .await + .context("parsing get-certificate response")?; + Ok((status, Some(body))) + } else { + Ok((status, None)) + } + } + + /// `GET /logs/:log/checkpoint` — raw bytes. + pub async fn get_checkpoint(&self) -> Result> { + self.get_raw("checkpoint").await + } + + /// `GET /logs/:log/{path}` — raw bytes. + pub async fn get_raw(&self, path: &str) -> Result> { + let resp = self + .client + .get(self.url(path)) + .send() + .await + .with_context(|| format!("GET {path}"))?; + let status = resp.status(); + if !status.is_success() { + bail!("GET {path} returned {status}"); + } + resp.bytes() + .await + .map(|b| b.to_vec()) + .with_context(|| format!("reading body for {path}")) + } + + /// `GET /logs/:log/{path}` — returns the HTTP status code without failing on 4xx/5xx. + pub async fn get_status(&self, path: &str) -> Result { + let resp = self + .client + .get(self.url(path)) + .send() + .await + .with_context(|| format!("GET {path} (status probe)"))?; + Ok(resp.status().as_u16()) + } + + /// Fetch a `SignedSubtree` from R2 for the subtree covering `[lo, hi)`. + pub async fn get_signed_subtree( + &self, + lo: u64, + hi: u64, + ) -> Result> { + let key = ietf_mtc_api::subtree_sig_key(lo, hi); + match self.get_raw(&key).await { + Ok(bytes) => { + let s: ietf_mtc_api::SignedSubtree = + serde_json::from_slice(&bytes).context("parsing SignedSubtree")?; + Ok(Some(s)) + } + Err(_) => Ok(None), + } + } +} + // --------------------------------------------------------------------------- // Helpers // --------------------------------------------------------------------------- diff --git a/crates/integration_tests/src/fixtures.rs b/crates/integration_tests/src/fixtures.rs index b28a35f2..7aa2f560 100644 --- a/crates/integration_tests/src/fixtures.rs +++ b/crates/integration_tests/src/fixtures.rs @@ -30,23 +30,24 @@ use std::str::FromStr; use anyhow::{Context, Result}; use const_oid::AssociatedOid; +use crypto_common::Generate; use der::{ asn1::{Ia5String, Null}, Decode, Encode, Length, Writer, }; use p256::{ecdsa::SigningKey, pkcs8::DecodePrivateKey}; -use rand::rngs::OsRng; use serde::Deserialize; use x509_cert::{ - builder::{Builder, CertificateBuilder, Profile}, - certificate::Certificate, + builder::profile::BuilderProfile, + builder::{Builder, CertificateBuilder}, + certificate::{Certificate, TbsCertificate}, ext::{ pkix::{name::GeneralName, ExtendedKeyUsage, SubjectAltName}, - AsExtension, Extension, + Criticality, Extension, }, name::Name, serial_number::SerialNumber, - spki::SubjectPublicKeyInfoOwned, + spki::{SubjectPublicKeyInfoOwned, SubjectPublicKeyInfoRef}, time::Validity, }; @@ -67,8 +68,8 @@ impl AssociatedOid for CtPoisonExtension { const OID: der::asn1::ObjectIdentifier = CT_PRECERT_POISON_OID; } -// `AsExtension` requires `AssociatedOid + der::Encode`. -// We implement `Encode` directly: the extension value is ASN.1 NULL. +// x509-cert 0.3: ToExtension is implemented for `&T` when T: Criticality + AssociatedOid + Encode. +// We implement Encode (value = ASN.1 NULL) and Criticality (always critical). impl Encode for CtPoisonExtension { fn encoded_len(&self) -> der::Result { Null.encoded_len() @@ -78,8 +79,8 @@ impl Encode for CtPoisonExtension { } } -impl AsExtension for CtPoisonExtension { - fn critical(&self, _subject: &x509_cert::name::RdnSequence, _extensions: &[Extension]) -> bool { +impl Criticality for CtPoisonExtension { + fn criticality(&self, _subject: &Name, _extensions: &[Extension]) -> bool { true } } @@ -255,6 +256,64 @@ pub fn make_bootstrap_mtc_chain(log_name: &str) -> Result { }) } +// --------------------------------------------------------------------------- +// IETF MTC fixtures +// --------------------------------------------------------------------------- + +// NOTE: compile-time dependency on ietf_mtc_worker/config.dev.json. If that file is +// moved or renamed, the error will surface here rather than in ietf_mtc_worker. +const IETF_MTC_DEV_CONFIG_JSON: &str = include_str!("../../ietf_mtc_worker/config.dev.json"); + +/// A PKCS#10 CSR generated for a specific IETF MTC log shard. +pub struct IetfMtcCsr { + /// DER-encoded PKCS#10 CSR. + pub csr_der: Vec, + /// DER-encoded `SubjectPublicKeyInfo` of the key in the CSR, for use with + /// `POST /get-certificate`. + pub spki_der: Vec, +} + +/// Generate a PKCS#10 CSR suitable for `POST /logs/:log/add-entry` on the +/// IETF MTC worker. +/// +/// The CSR key is a fresh P-256 key pair. The subject is a simple test DN. +/// A `subjectAltName` extension with one DNS name is included. +pub fn make_ietf_mtc_csr(log_name: &str) -> Result { + use x509_cert::builder::{Builder, RequestBuilder}; + + // Validate the log name exists in the dev config (catches misconfiguration early). + let config: MtcDevConfig = serde_json::from_str(IETF_MTC_DEV_CONFIG_JSON) + .context("parsing ietf_mtc_worker config.dev.json")?; + config.logs.get(log_name).with_context(|| { + format!("log '{log_name}' not found in ietf_mtc_worker config.dev.json") + })?; + + let leaf_key = SigningKey::generate_from_rng(&mut rand::rng()); + let leaf_spki = SubjectPublicKeyInfoOwned::from_key(leaf_key.verifying_key()) + .context("encoding leaf SPKI")?; + let spki_der = leaf_spki.to_der().context("encoding leaf SPKI to DER")?; + + let subject = Name::from_str("CN=integration-test.example.com,O=Test,C=US") + .context("building subject name")?; + + let mut builder = RequestBuilder::new(subject).context("creating RequestBuilder")?; + + // Add a subjectAltName extension. + let san = x509_cert::ext::pkix::SubjectAltName(vec![ + x509_cert::ext::pkix::name::GeneralName::DnsName( + der::asn1::Ia5String::new("integration-test.example.com").context("building SAN")?, + ), + ]); + builder.add_extension(&san).context("adding SAN")?; + + let csr = builder + .build::<_, p256::ecdsa::DerSignature>(&leaf_key) + .context("building CSR")?; + let csr_der = csr.to_der().context("encoding CSR to DER")?; + + Ok(IetfMtcCsr { csr_der, spki_der }) +} + // --------------------------------------------------------------------------- // Certificate construction // --------------------------------------------------------------------------- @@ -271,6 +330,29 @@ fn build_cert( /// Like `build_cert`, but also returns the DER-encoded `SubjectPublicKeyInfo` of /// the leaf certificate. Used by MTC tests that need to call `get-certificate`. +/// Minimal leaf certificate profile for integration tests. +struct LeafProfile { + issuer: Name, + subject: Name, +} + +impl BuilderProfile for LeafProfile { + fn get_issuer(&self, _subject: &Name) -> Name { + self.issuer.clone() + } + fn get_subject(&self) -> Name { + self.subject.clone() + } + fn build_extensions( + &self, + _spk: SubjectPublicKeyInfoRef<'_>, + _issuer_spk: SubjectPublicKeyInfoRef<'_>, + _tbs: &TbsCertificate, + ) -> Result, x509_cert::builder::Error> { + Ok(vec![]) + } +} + fn build_cert_with_spki( ca_key: &SigningKey, not_before: chrono::DateTime, @@ -281,40 +363,30 @@ fn build_cert_with_spki( let serial = SerialNumber::from(rand::random::()); - let validity = Validity { - not_before: Time::GeneralTime(der::asn1::GeneralizedTime::from_date_time(to_der_datetime( + let validity = Validity::new( + Time::GeneralTime(der::asn1::GeneralizedTime::from_date_time(to_der_datetime( not_before, )?)), - not_after: Time::GeneralTime(der::asn1::GeneralizedTime::from_date_time(to_der_datetime( + Time::GeneralTime(der::asn1::GeneralizedTime::from_date_time(to_der_datetime( not_after, )?)), - }; + ); let subject = Name::from_str("CN=integration-test.example.com,O=Test,C=US") .context("building subject name")?; // Generate a fresh key for this leaf. - let leaf_key = SigningKey::random(&mut OsRng); - let leaf_spki = SubjectPublicKeyInfoOwned::from_key(*leaf_key.verifying_key()) + let leaf_key = SigningKey::generate_from_rng(&mut rand::rng()); + let leaf_spki = SubjectPublicKeyInfoOwned::from_key(leaf_key.verifying_key()) .context("encoding leaf SPKI")?; let leaf_spki_der = leaf_spki.to_der().context("encoding leaf SPKI to DER")?; - // leaf_spki was moved into CertificateBuilder::new; we keep the DER copy above. let ca_cert = Certificate::from_der(&ca_cert_der_bytes()).context("parsing CA cert")?; + let issuer = ca_cert.tbs_certificate().subject().clone(); - let mut builder = CertificateBuilder::new( - Profile::Leaf { - issuer: ca_cert.tbs_certificate.subject.clone(), - enable_key_agreement: false, - enable_key_encipherment: false, - }, - serial, - validity, - subject, - leaf_spki, - ca_key, - ) - .context("creating CertificateBuilder")?; + let profile = LeafProfile { issuer, subject }; + let mut builder = CertificateBuilder::new(profile, serial, validity, leaf_spki) + .context("creating CertificateBuilder")?; let san = SubjectAltName(vec![GeneralName::DnsName( Ia5String::new("integration-test.example.com").context("building SAN")?, @@ -334,7 +406,7 @@ fn build_cert_with_spki( } let cert_der = builder - .build_with_rng::(&mut OsRng) + .build_with_rng::<_, p256::ecdsa::DerSignature, _>(ca_key, &mut rand::rng()) .context("signing certificate")? .to_der() .context("encoding certificate to DER")?; diff --git a/crates/integration_tests/src/lib.rs b/crates/integration_tests/src/lib.rs index 09082708..b56f18c9 100644 --- a/crates/integration_tests/src/lib.rs +++ b/crates/integration_tests/src/lib.rs @@ -7,7 +7,8 @@ //! Set `BASE_URL` to point at the server; defaults to `http://localhost:8787`. //! //! CT tests: set `LOG_NAME` to choose the log shard (default: `dev2026h1a`). -//! MTC tests: set `BOOTSTRAP_MTC_LOG_NAME` to choose the log shard (default: `dev2`). +//! Bootstrap MTC tests: set `BOOTSTRAP_MTC_LOG_NAME` to choose the log shard (default: `dev2`). +//! IETF MTC tests: set `IETF_MTC_LOG_NAME` to choose the log shard (default: `dev2`). pub mod assertions; pub mod client; diff --git a/crates/integration_tests/tests/bootstrap_mtc_api.rs b/crates/integration_tests/tests/bootstrap_mtc_api.rs index d491496f..1da90ee7 100644 --- a/crates/integration_tests/tests/bootstrap_mtc_api.rs +++ b/crates/integration_tests/tests/bootstrap_mtc_api.rs @@ -113,7 +113,6 @@ async fn ensure_initialized() { /// DER-encoded X.509 certificates. #[tokio::test] async fn get_roots_returns_valid_certs() { - ensure_initialized().await; let client = BootstrapMtcClient::default_log(); let roots = client.get_roots().await.expect("get-roots failed"); diff --git a/crates/integration_tests/tests/ietf_mtc_api.rs b/crates/integration_tests/tests/ietf_mtc_api.rs new file mode 100644 index 00000000..9e2f774b --- /dev/null +++ b/crates/integration_tests/tests/ietf_mtc_api.rs @@ -0,0 +1,556 @@ +// Copyright (c) 2025 Cloudflare, Inc. +// Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause + +//! Integration tests for the IETF MTC API (`ietf_mtc_worker`). +//! +//! These tests require a running `wrangler dev` instance. +//! Set `BASE_URL` to point at the server; defaults to `http://localhost:8787`. +//! Set `IETF_MTC_LOG_NAME` to choose which log shard; defaults to `dev2`. +//! +//! `dev2` is preferred because its `landmark_interval_secs: 10` makes the +//! landmark-dependent `get_certificate` test feasible without a long wait. +//! +//! # Running +//! +//! ```text +//! # From crates/ietf_mtc_worker/: +//! npx wrangler -e=dev dev & +//! +//! # From workspace root: +//! cargo test -p integration_tests --test ietf_mtc_api +//! ``` + +use std::time::Duration; + +use ietf_mtc_api::{MtcVerifyingKey, ParsedMtcProof, TrustAnchorID}; +use integration_tests::{ + client::{IetfMtcClient, ietf_mtc_log_name}, + fixtures::make_ietf_mtc_csr, +}; +use tokio::sync::OnceCell; +use tlog_tiles::{evaluate_subtree_inclusion_proof, record_hash, Hash, Subtree}; +use x509_cert::{der::Decode, Certificate}; + +/// OID for the MTC proof algorithm (id-alg-mtcproof). +const ID_ALG_MTCPROOF: der::asn1::ObjectIdentifier = + der::asn1::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.44363.47.0"); + +/// Assert that `cert_der` is a valid MTC certificate: +/// - Parses as a valid X.509 DER certificate +/// - Uses `id-alg-mtcproof` as the signature algorithm +/// - Has a non-empty `signatureValue` (the encoded `MTCProof`) +/// - Has a non-empty subject +/// Extract the `MTCProof` bytes from a certificate's `signatureValue`. +/// +/// The `signatureValue` is a DER `BIT STRING`. `raw_bytes()` returns the +/// content octets (the unused-bits byte is handled by the `der` crate +/// internally), which are exactly the `MTCProof` bytes. +fn extract_mtc_proof_bytes(cert: &Certificate) -> Vec { + cert.signature() + .as_bytes() + .expect("signatureValue BIT STRING must have 0 unused bits") + .to_vec() +} + +/// Compute `entry_hash` from a certificate following draft-ietf-plants-merkle-tree-certs §7.2 +/// steps 4-5. +/// +/// Steps 4a-4c reconstruct the `TBSCertificateLogEntry` from the certificate: +/// - Copy most TBS fields verbatim (4a) +/// - Set `subjectPublicKeyAlgorithm` from the SPKI `algorithm` field (4b) +/// - Set `subjectPublicKeyInfoHash` to HASH(DER(subjectPublicKeyInfo)) (4c) +/// +/// Step 5: construct a `MerkleTreeCertEntry` of type `tbs_cert_entry` and compute +/// `entry_hash = MTH({entry}) = HASH(0x00 || entry)` i.e. `record_hash(entry_bytes)`. +/// +/// Also asserts that the certificate's serial number encodes `leaf_index` (§7.2 step 3). +fn compute_entry_hash(cert: &Certificate, leaf_index: u64) -> Hash { + use der::Encode; + use ietf_mtc_api::{MerkleTreeCertEntry, TbsCertificateLogEntry}; + use sha2::Digest; + + // §7.2 step 3: serial number encodes `index`. + let tbs = cert.tbs_certificate(); + let serial_bytes = tbs.serial_number().as_bytes(); + let mut padded = [0u8; 8]; + let len = serial_bytes.len().min(8); + padded[8 - len..].copy_from_slice(&serial_bytes[serial_bytes.len() - len..]); + assert_eq!( + u64::from_be_bytes(padded), + leaf_index, + "serial_number must encode leaf_index" + ); + + // §7.2 steps 4a-4c: reconstruct TBSCertificateLogEntry. + let spki_der = tbs.subject_public_key_info().to_der().expect("encoding SPKI"); + let spki_hash = + der::asn1::OctetString::new(&sha2::Sha256::digest(&spki_der)[..]).expect("OctetString"); + let log_entry = TbsCertificateLogEntry { + version: tbs.version(), + issuer: tbs.issuer().clone(), + validity: *tbs.validity(), + subject: tbs.subject().clone(), + // §7.2 step 4b + subject_public_key_info_algorithm: tbs.subject_public_key_info().algorithm.clone(), + // §7.2 step 4c + subject_public_key_info_hash: spki_hash, + issuer_unique_id: tbs.issuer_unique_id().clone(), + subject_unique_id: tbs.subject_unique_id().clone(), + extensions: tbs.extensions().cloned(), + }; + + // §7.2 step 5: entry_hash = MTH({entry}) = record_hash(entry_bytes). + let entry_bytes = MerkleTreeCertEntry::TbsCertEntry(log_entry) + .encode() + .expect("encoding MerkleTreeCertEntry"); + record_hash(&entry_bytes) +} + +fn assert_valid_mtc_cert(cert_der: &[u8], context: &str) -> Certificate { + let cert = Certificate::from_der(cert_der) + .unwrap_or_else(|e| panic!("{context}: not a valid DER certificate: {e}")); + + assert_eq!( + cert.signature_algorithm().oid, + ID_ALG_MTCPROOF, + "{context}: expected id-alg-mtcproof signature algorithm, got {}", + cert.signature_algorithm().oid + ); + assert_eq!( + cert.tbs_certificate().signature().oid, + ID_ALG_MTCPROOF, + "{context}: TBSCertificate.signature algorithm mismatch" + ); + assert!( + !cert.signature().as_bytes().unwrap_or(&[]).is_empty(), + "{context}: signatureValue (MTCProof) must be non-empty" + ); + assert!( + !cert.tbs_certificate().subject().as_ref().is_empty(), + "{context}: subject must be non-empty" + ); + + cert +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + + + +// --------------------------------------------------------------------------- +// Initialization guard +// --------------------------------------------------------------------------- + +/// Ensures the IETF MTC worker is fully live and has sequenced at least one +/// entry before any test that depends on sequencer state runs. +/// +/// Unlike the bootstrap MTC worker, there is no CCADB roots `OnceCell` to +/// worry about, so we go straight to `add-entry` as the readiness probe. +static INITIALIZED: OnceCell<()> = OnceCell::const_new(); + +async fn ensure_initialized() { + INITIALIZED + .get_or_init(|| async { + const MAX_ATTEMPTS: u32 = 30; + const RETRY_DELAY: Duration = Duration::from_secs(1); + + let log_name = ietf_mtc_log_name(); + let client = IetfMtcClient::new(&log_name); + let csr = make_ietf_mtc_csr(&log_name).expect("make_ietf_mtc_csr for warmup"); + + for attempt in 0..MAX_ATTEMPTS { + match client.add_entry(csr.csr_der.clone()).await { + Ok((200, _)) => return, + Ok((status, _)) => { + eprintln!( + "ensure_initialized: add-entry returned {status} \ + (attempt {}/{MAX_ATTEMPTS}), retrying…", + attempt + 1 + ); + } + Err(e) => { + eprintln!( + "ensure_initialized: add-entry error: {e} \ + (attempt {}/{MAX_ATTEMPTS}), retrying…", + attempt + 1 + ); + } + } + tokio::time::sleep(RETRY_DELAY).await; + } + + panic!("ietf_mtc_worker failed to initialize after {MAX_ATTEMPTS}s"); + }) + .await; +} + +/// Fetch metadata and build an `MtcVerifyingKey` for the log's cosigner. +async fn fetch_verifying_key(client: &IetfMtcClient) -> (MtcVerifyingKey, TrustAnchorID, TrustAnchorID) { + use const_oid::db::{fips204::ID_ML_DSA_44, rfc8410::ID_ED_25519}; + use spki::SubjectPublicKeyInfoRef; + use std::str::FromStr; + + let meta = client.get_metadata().await.expect("metadata"); + // Determine the algorithm from the SPKI algorithm identifier OID. + let spki = SubjectPublicKeyInfoRef::try_from(meta.cosigner_public_key.as_ref()) + .expect("cosigner_public_key must be a valid SPKI"); + let vk = match spki.algorithm.oid { + ID_ED_25519 => { + use pkcs8::DecodePublicKey; + let ed_vk = ed25519_dalek::VerifyingKey::from_public_key_der(&meta.cosigner_public_key) + .expect("cosigner_public_key must be a valid Ed25519 SPKI"); + MtcVerifyingKey::Ed25519(ed_vk) + } + ID_ML_DSA_44 => { + let ml_dsa_vk = ml_dsa::VerifyingKey::::try_from(spki) + .expect("cosigner_public_key must be a valid ML-DSA-44 SPKI"); + MtcVerifyingKey::MlDsa44(ml_dsa_vk) + } + oid => panic!("unsupported cosigner algorithm OID: {oid}"), + }; + let cosigner_id = TrustAnchorID::from_str(&meta.cosigner_id).expect("cosigner_id"); + let log_id = TrustAnchorID::from_str(&meta.log_id).expect("log_id"); + (vk, cosigner_id, log_id) +} + +/// Verify a standalone MTC certificate following draft-ietf-plants-merkle-tree-certs §7.2. +/// +/// Steps performed: +/// 1. Check `id-alg-mtcProof` algorithm (already done by `assert_valid_mtc_cert`). +/// 2. Decode `signatureValue` as an `MTCProof`. +/// 3. Check `index` is not revoked (not implemented — no revocation list in test). +/// 4-5. Reconstruct `TBSCertificateLogEntry` and compute `entry_hash`. +/// 6. Evaluate the inclusion proof to get `expected_subtree_hash` (§4.3.2). +/// 7. No trusted subtree predistributed in test — proceed to step 8. +/// 8. Verify cosignatures satisfy relying party requirements (≥1 valid cosignature). +async fn verify_standalone_cert( + _client: &IetfMtcClient, + cert: &Certificate, + leaf_index: u64, + vk: &MtcVerifyingKey, + cosigner_id: &TrustAnchorID, + log_id: &TrustAnchorID, +) { + // §7.2 step 2: decode signatureValue as MTCProof. + let proof_bytes = extract_mtc_proof_bytes(cert); + let proof = ParsedMtcProof::from_bytes(&proof_bytes) + .expect("MTCProof must parse from signatureValue"); + + // §7.2 step 8: standalone certs must carry cosignatures. + assert!( + !proof.signatures.is_empty(), + "standalone cert must have at least one cosignature (§7.2 step 8)" + ); + + let subtree = Subtree::new(proof.start, proof.end) + .expect("MTCProof subtree interval must be valid"); + assert!( + subtree.lo() <= leaf_index && leaf_index < subtree.hi(), + "leaf_index {leaf_index} must be within subtree [{}, {})", + subtree.lo(), + subtree.hi() + ); + + // §7.2 steps 4-5: compute entry_hash from the certificate. + let entry_hash = compute_entry_hash(cert, leaf_index); + + // §7.2 step 6: evaluate the inclusion proof to get expected_subtree_hash (§4.3.2). + let expected_subtree_hash = evaluate_subtree_inclusion_proof( + &proof.inclusion_proof, + &subtree, + leaf_index, + entry_hash, + ) + .expect("inclusion proof evaluation must succeed"); + + // §7.2 step 8: verify cosignatures against expected_subtree_hash. + proof + .verify_cosignature(&expected_subtree_hash, vk, cosigner_id, log_id) + .expect("at least one cosignature must be valid"); +} + +/// Verify a landmark-relative MTC certificate following draft-ietf-plants-merkle-tree-certs §7.2. +/// +/// Landmark-relative certs have no inline cosignatures (§6.3). In a real relying +/// party, the subtree hash would be predistributed (§7.4). In the test, we fetch +/// the `SignedSubtree` from R2 as a stand-in for predistributed trusted subtree info, +/// and also verify the CA's cosignature over that subtree hash. +/// +/// Steps performed: +/// 1. Check `id-alg-mtcProof` (already done). +/// 2. Decode `signatureValue` as `MTCProof`. +/// 4-5. Reconstruct `TBSCertificateLogEntry` and compute `entry_hash`. +/// 6. Evaluate the inclusion proof to get `expected_subtree_hash` (§4.3.2). +/// 7. Compare `expected_subtree_hash` against the trusted subtree hash (from R2). +async fn verify_landmark_relative_cert( + client: &IetfMtcClient, + cert: &Certificate, + leaf_index: u64, + vk: &MtcVerifyingKey, + cosigner_id: &TrustAnchorID, + log_id: &TrustAnchorID, +) { + // §7.2 step 2: decode signatureValue as MTCProof. + let proof_bytes = extract_mtc_proof_bytes(cert); + let proof = ParsedMtcProof::from_bytes(&proof_bytes) + .expect("MTCProof must parse from signatureValue"); + + // §6.3 / §7.2 step 7: landmark-relative certs carry no inline cosignatures. + assert!( + proof.signatures.is_empty(), + "landmark-relative cert must have no inline cosignatures (§6.3)" + ); + + let subtree = Subtree::new(proof.start, proof.end) + .expect("MTCProof subtree interval must be valid"); + + // §7.2 steps 4-5: compute entry_hash. + let entry_hash = compute_entry_hash(cert, leaf_index); + + // §7.2 step 6: evaluate the inclusion proof (§4.3.2). + let expected_subtree_hash = evaluate_subtree_inclusion_proof( + &proof.inclusion_proof, + &subtree, + leaf_index, + entry_hash, + ) + .expect("inclusion proof evaluation must succeed"); + + // §7.2 step 7: compare against the trusted subtree hash. + // In production, this hash is predistributed. In the test, we fetch it + // from R2 and also verify the CA's cosignature over it. + let signed: ietf_mtc_api::SignedSubtree = client + .get_signed_subtree(proof.start, proof.end) + .await + .expect("get_signed_subtree request") + .unwrap_or_else(|| { + panic!("SignedSubtree not found for [{}, {})", proof.start, proof.end) + }); + let trusted_subtree_hash = Hash(signed.hash); + assert_eq!( + expected_subtree_hash, trusted_subtree_hash, + "evaluated subtree hash must match the trusted (predistributed) subtree hash" + ); + + // Additionally verify the CA's cosignature over the trusted hash, + // confirming the predistributed value is authentic. + use std::str::FromStr; + let signed_cosigner_id = + TrustAnchorID::from_str(&signed.cosigner_id).expect("valid cosigner_id in SignedSubtree"); + let r2_proof = ParsedMtcProof { + start: signed.lo, + end: signed.hi, + inclusion_proof: vec![], + signatures: std::collections::HashMap::from([(signed_cosigner_id, signed.signature.clone())]), + }; + r2_proof + .verify_cosignature(&trusted_subtree_hash, vk, cosigner_id, log_id) + .expect("CA cosignature over trusted subtree hash must be valid"); +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +/// `GET /logs/:log/metadata` returns 200 with all required fields. +#[tokio::test] +async fn metadata_returns_valid_fields() { + let client = IetfMtcClient::default_log(); + let meta = client.get_metadata().await.expect("metadata failed"); + + assert!(!meta.log_id.is_empty(), "log_id must be non-empty"); + assert!( + meta.log_id.contains('.'), + "log_id must be a dotted-decimal OID, got: {}", + meta.log_id + ); + assert!(!meta.cosigner_id.is_empty(), "cosigner_id must be non-empty"); + // cosigner_public_key is a DER-encoded SubjectPublicKeyInfo. The algorithm + // identifier is included so clients can determine the signing algorithm. + assert!( + !meta.cosigner_public_key.is_empty(), + "cosigner_public_key must be non-empty" + ); + assert!(!meta.submission_url.is_empty(), "submission_url must be set"); +} + +/// Requesting an unknown log name returns 400. +#[tokio::test] +async fn unknown_log_returns_400() { + let client = IetfMtcClient::new("this-log-does-not-exist"); + let status = client + .get_status("metadata") + .await + .expect("GET request"); + assert_eq!(status, 400, "expected 400 for unknown log"); +} + +/// `POST /logs/:log/add-entry` with a valid CSR returns 200 with a +/// structurally valid standalone MTC certificate. +#[tokio::test] +async fn add_entry_returns_valid_response() { + ensure_initialized().await; + let client = IetfMtcClient::default_log(); + let csr = make_ietf_mtc_csr(&client.log).expect("generating CSR"); + + let (status, resp) = client + .add_entry(csr.csr_der) + .await + .expect("add-entry request"); + assert_eq!(status, 200, "expected 200 from add-entry"); + let resp = resp.unwrap(); + + // The response is a DER-encoded standalone MTC certificate. + let cert = assert_valid_mtc_cert(&resp.certificate, "add-entry standalone cert"); + let serial_bytes = cert.tbs_certificate().serial_number().as_bytes(); + let mut padded = [0u8; 8]; + let len = serial_bytes.len().min(8); + padded[8 - len..].copy_from_slice(&serial_bytes[serial_bytes.len() - len..]); + let leaf_index = u64::from_be_bytes(padded); + + // Full signature and inclusion proof verification. + let (vk, cosigner_id, log_id) = fetch_verifying_key(&client).await; + verify_standalone_cert(&client, &cert, leaf_index, &vk, &cosigner_id, &log_id).await; +} + +/// `POST` with garbage bytes (not a valid CSR) returns 400. +#[tokio::test] +async fn add_entry_with_invalid_csr_returns_400() { + ensure_initialized().await; + let client = IetfMtcClient::default_log(); + let (status, _) = client + .add_entry(b"this is not a valid CSR".to_vec()) + .await + .expect("add-entry request"); + assert_eq!(status, 400, "expected 400 for invalid CSR"); +} + +/// After `add-entry`, the certificate's serial number (= leaf_index) is covered +/// by the checkpoint. +#[tokio::test] +async fn add_entry_appears_in_checkpoint() { + ensure_initialized().await; + let client = IetfMtcClient::default_log(); + let csr = make_ietf_mtc_csr(&client.log).expect("generating CSR"); + + let (status, resp) = client + .add_entry(csr.csr_der) + .await + .expect("add-entry request"); + assert_eq!(status, 200, "expected 200 from add-entry"); + let resp = resp.unwrap(); + + // The leaf_index is encoded as the certificate's serial number. + let cert = assert_valid_mtc_cert(&resp.certificate, "add-entry standalone cert"); + let serial_bytes = cert.tbs_certificate().serial_number().as_bytes(); + let mut padded = [0u8; 8]; + let len = serial_bytes.len().min(8); + padded[8 - len..].copy_from_slice(&serial_bytes[serial_bytes.len() - len..]); + let leaf_index = u64::from_be_bytes(padded); + let min_size = leaf_index + 1; + + const MAX_RETRIES: u32 = 12; + const RETRY_DELAY_MS: u64 = 500; + let mut last_size = 0u64; + + for attempt in 0..MAX_RETRIES { + let checkpoint_bytes = client + .get_checkpoint() + .await + .expect("fetching checkpoint"); + let text = String::from_utf8_lossy(&checkpoint_bytes); + if let Some(size_str) = text.lines().nth(1) { + if let Ok(size) = size_str.trim().parse::() { + last_size = size; + if size >= min_size { + return; + } + } + } + if attempt + 1 < MAX_RETRIES { + tokio::time::sleep(tokio::time::Duration::from_millis(RETRY_DELAY_MS)).await; + } + } + + panic!( + "checkpoint size {last_size} never reached {min_size} after {MAX_RETRIES} retries" + ); +} + +/// After `add-entry`, `get-certificate` returns a parseable landmark-relative DER +/// certificate once a landmark has been produced. +/// +/// This test uses `dev2` (10s landmark interval) and retries for up to 30s. +/// It is skipped if `IETF_MTC_LOG_NAME` is set to a log with a longer interval. +#[tokio::test] +async fn get_certificate_returns_valid_cert() { + ensure_initialized().await; + let log_name = ietf_mtc_log_name(); + if log_name != "dev2" { + eprintln!("Skipping get_certificate test: IETF_MTC_LOG_NAME={log_name} (not dev2)"); + return; + } + + let client = IetfMtcClient::new(&log_name); + let csr = make_ietf_mtc_csr(&log_name).expect("generating CSR"); + let spki_der = csr.spki_der.clone(); + + let (status, resp) = client + .add_entry(csr.csr_der) + .await + .expect("add-entry request"); + assert_eq!(status, 200, "expected 200 from add-entry"); + let resp = resp.unwrap(); + + let cert = assert_valid_mtc_cert(&resp.certificate, "add-entry standalone cert"); + let serial_bytes = cert.tbs_certificate().serial_number().as_bytes(); + let mut padded = [0u8; 8]; + let len = serial_bytes.len().min(8); + padded[8 - len..].copy_from_slice(&serial_bytes[serial_bytes.len() - len..]); + let leaf_index = u64::from_be_bytes(padded); + + const MAX_RETRIES: u32 = 30; + const RETRY_DELAY_MS: u64 = 1_000; + let mut last_status = 0u16; + + for attempt in 0..MAX_RETRIES { + let (s, cert_resp) = client + .get_certificate(leaf_index, spki_der.clone()) + .await + .expect("get-certificate request"); + last_status = s; + if s == 200 { + let cert_resp = cert_resp.unwrap(); + + let lm_cert = assert_valid_mtc_cert( + &cert_resp.data, + "get-certificate landmark-relative cert", + ); + + assert!( + cert_resp.landmark_id > 0, + "landmark_id must be positive (index 0 is the initial null entry)" + ); + + // Full signature and inclusion proof verification for landmark-relative cert. + let (vk, cosigner_id, log_id) = fetch_verifying_key(&client).await; + verify_landmark_relative_cert( + &client, &lm_cert, leaf_index, &vk, &cosigner_id, &log_id, + ) + .await; + + return; + } + assert_eq!(s, 503, "expected 200 or 503, got {s}"); + + if attempt + 1 < MAX_RETRIES { + tokio::time::sleep(tokio::time::Duration::from_millis(RETRY_DELAY_MS)).await; + } + } + + panic!( + "get-certificate never returned 200 after {MAX_RETRIES} retries (last status: {last_status})" + ); +} diff --git a/crates/integration_tests/tests/static_ct_api.rs b/crates/integration_tests/tests/static_ct_api.rs index d34316b0..8fe05c64 100644 --- a/crates/integration_tests/tests/static_ct_api.rs +++ b/crates/integration_tests/tests/static_ct_api.rs @@ -52,11 +52,11 @@ fn now_millis() -> u64 { // --------------------------------------------------------------------------- /// Shared once-per-binary initialization: ensures the worker is fully live and -/// has sequenced at least one entry before any test that depends on sequencer -/// state runs. +/// Ensures the worker is fully live and has sequenced at least one entry +/// before any test that depends on sequencer state runs. /// -/// `ct_worker` initializes its root pool, Durable Objects, and sequencer lazily -/// on the first request. Tests that run before initialization completes see +/// `ct_worker` initializes its Durable Objects and sequencer lazily on the +/// first request. Tests that run before initialization completes may see /// 503 (sequencer busy) or missing checkpoints. Calling `ensure_initialized` /// at the start of any such test avoids these races without requiring a /// specific test ordering. @@ -75,25 +75,14 @@ async fn ensure_initialized() { let client = CtClient::default_log(); let chains = make_chains(&client.log).expect("make_chains for warmup"); - // Wait until get-roots succeeds before attempting add-chain. - // get-roots triggers the CCADB fetch that populates the ROOTS - // OnceCell. If add-chain races with that fetch in-flight from - // another request, the Workers runtime cancels it with a 500 - // (cross-request promise resolution is not permitted). Waiting - // here ensures ROOTS is fully populated before add-chain is called. - let mut roots_ready = false; - for _ in 0..MAX_ATTEMPTS { - match client.get_roots().await { - Ok(_) => { roots_ready = true; break; } + // Fetch log metadata (needed for checkpoint verification). + // Retry until the frontend is reachable. + let meta = loop { + match client.get_log_v3_json().await { + Ok(m) => break m, Err(_) => tokio::time::sleep(RETRY_DELAY).await, } - } - if !roots_ready { - panic!("ct_worker get-roots never succeeded after {MAX_ATTEMPTS}s"); - } - - // Fetch log metadata (needed for checkpoint verification). - let meta = client.get_log_v3_json().await.expect("log.v3.json in warmup"); + }; for attempt in 0..MAX_ATTEMPTS { // Submit a chain to trigger full initialization (root pool load, @@ -148,10 +137,6 @@ async fn ensure_initialized() { /// valid DER-encoded X.509 certificates. #[tokio::test] async fn get_roots_returns_valid_certs() { - // get-roots triggers the CCADB fetch that populates the ROOTS OnceCell. - // ensure_initialized uses get-roots as its readiness probe, so whichever - // test runs first will serialize the fetch before add-chain is attempted. - ensure_initialized().await; let client = CtClient::default_log(); let roots = client.get_roots().await.expect("get-roots failed"); diff --git a/crates/sct_validator/Cargo.toml b/crates/sct_validator/Cargo.toml index e67c551f..2482a118 100644 --- a/crates/sct_validator/Cargo.toml +++ b/crates/sct_validator/Cargo.toml @@ -12,7 +12,7 @@ description = "WASM-compatible SCT (Signed Certificate Timestamp) validation for [dependencies] base64.workspace = true chrono.workspace = true -const-oid = "0.9.6" +const-oid.workspace = true der.workspace = true hashbrown = "0.15" log.workspace = true @@ -22,9 +22,9 @@ serde.workspace = true serde_json.workspace = true sha2.workspace = true signature.workspace = true -spki = "0.7" +spki.workspace = true thiserror.workspace = true -x509-cert = { version = "0.2.5", features = ["sct"] } +x509-cert = { workspace = true, features = ["sct"] } [dev-dependencies] -x509-cert = { version = "0.2.5", features = ["pem"] } +x509-cert = { workspace = true, features = ["pem"] } diff --git a/crates/sct_validator/src/lib.rs b/crates/sct_validator/src/lib.rs index b284ece4..e4d5cf7a 100644 --- a/crates/sct_validator/src/lib.rs +++ b/crates/sct_validator/src/lib.rs @@ -24,12 +24,11 @@ pub use sct::{extract_scts_from_cert, ParsedSct}; pub use verify::verify_sct_signature; use base64::prelude::*; -use der::Encode; +use der::{Decode, Encode}; use hashbrown::HashMap; use p256::ecdsa::VerifyingKey as P256VerifyingKey; use serde::Deserialize; use spki::SubjectPublicKeyInfoRef; -use x509_cert::der::Decode; /// Log list freshness period in seconds (70 days). /// If the log list is older than this, SCT validation auto-succeeds. @@ -447,8 +446,8 @@ impl SctValidator { let issuer_cert = x509_cert::Certificate::from_der(issuer_der) .map_err(|e| SctError::Other(format!("issuer: {e}")))?; let issuer_spki_der = issuer_cert - .tbs_certificate - .subject_public_key_info + .tbs_certificate() + .subject_public_key_info() .to_der() .map_err(|e| SctError::Other(format!("issuer SPKI: {e}")))?; diff --git a/crates/sct_validator/src/sct.rs b/crates/sct_validator/src/sct.rs index 077a0ede..9322f98b 100644 --- a/crates/sct_validator/src/sct.rs +++ b/crates/sct_validator/src/sct.rs @@ -5,9 +5,14 @@ use crate::error::SctError; use const_oid::AssociatedOid; -use der::{Decode, Encode}; -use x509_cert::ext::pkix::sct::{SignedCertificateTimestamp, SignedCertificateTimestampList}; -use x509_cert::Certificate; +use der::Decode; +use x509_cert::{ + ext::{ + pkix::sct::{SignedCertificateTimestamp, SignedCertificateTimestampList}, + Extension, + }, + Certificate, +}; /// A parsed SCT from a certificate. #[derive(Clone, Debug)] @@ -51,41 +56,121 @@ pub fn extract_scts_from_cert(leaf_der: &[u8]) -> Result<(Vec, Vec = None; + let mut other_extensions: Vec<&Extension> = Vec::with_capacity(extensions.len() - 1); + for ext in extensions { + if ext.extn_id == SignedCertificateTimestampList::OID { + if sct_ext.is_some() { + return Err(SctError::Other( + "certificate has multiple SCT extensions, expected 1".into(), + )); + } + sct_ext = Some(ext); + } else { + other_extensions.push(ext); + } + } + let sct_ext = sct_ext.ok_or(SctError::NoSctExtension)?; + + let parsed_scts = parse_sct_extension(sct_ext)?; - let ct_cert_der = tbs - .to_der() + let tbs_der = encode_tbs_without_sct(&cert, &other_extensions) .map_err(|e| SctError::Other(format!("failed to re-serialize TBS: {e}")))?; - Ok((parsed_scts, ct_cert_der, lifetime_days)) + Ok((parsed_scts, tbs_der, lifetime_days)) } -fn find_and_parse_scts( - extensions: &[x509_cert::ext::Extension], -) -> Result<(usize, Vec), SctError> { - // Per RFC 6962, all SCTs go in one extension. Reject certs with multiple. - let sct_extensions: Vec<_> = extensions - .iter() - .enumerate() - .filter(|(_, ext)| ext.extn_id == SignedCertificateTimestampList::OID) - .collect(); - - let (index, sct_ext) = match sct_extensions.as_slice() { - [] => return Err(SctError::NoSctExtension), - [(idx, ext)] => (*idx, *ext), - _ => { - return Err(SctError::Other(format!( - "certificate has {} SCT extensions, expected 1", - sct_extensions.len() - ))) - } +/// Rebuild the TBS DER with the SCT extension removed. +/// +/// Works by re-encoding each TBS field individually via the public getter API +/// introduced in x509-cert 0.3, then rebuilding the extensions [3] EXPLICIT +/// wrapper without the SCT entry. +fn encode_tbs_without_sct( + cert: &Certificate, + other_extensions: &[&Extension], +) -> Result, der::Error> { + use der::{ + asn1::{ContextSpecific, ContextSpecificRef}, + Encode, TagMode, TagNumber, }; + let tbs = cert.tbs_certificate(); + let mut tbs_content = Vec::new(); + + // version [0] EXPLICIT INTEGER DEFAULT v1 — omit if v1. + if tbs.version() != x509_cert::certificate::Version::V1 { + let tagged = ContextSpecific { + tag_number: TagNumber(0), + tag_mode: TagMode::Explicit, + value: tbs.version(), + }; + tagged.encode_to_vec(&mut tbs_content)?; + } + tbs.serial_number().encode_to_vec(&mut tbs_content)?; + tbs.signature().encode_to_vec(&mut tbs_content)?; + tbs.issuer().encode_to_vec(&mut tbs_content)?; + tbs.validity().encode_to_vec(&mut tbs_content)?; + tbs.subject().encode_to_vec(&mut tbs_content)?; + tbs.subject_public_key_info() + .encode_to_vec(&mut tbs_content)?; + if let Some(uid) = tbs.issuer_unique_id() { + // issuerUniqueID [1] IMPLICIT UniqueIdentifier OPTIONAL + ContextSpecificRef { + tag_number: TagNumber(1), + tag_mode: TagMode::Implicit, + value: uid, + } + .encode_to_vec(&mut tbs_content)?; + } + if let Some(uid) = tbs.subject_unique_id() { + // subjectUniqueID [2] IMPLICIT UniqueIdentifier OPTIONAL + ContextSpecificRef { + tag_number: TagNumber(2), + tag_mode: TagMode::Implicit, + value: uid, + } + .encode_to_vec(&mut tbs_content)?; + } + if !other_extensions.is_empty() { + // Encode extensions as SEQUENCE OF Extension. + let mut exts_items = Vec::new(); + for ext in other_extensions { + exts_items.extend(ext.to_der()?); + } + // Wrap in SEQUENCE (= Extensions type). + let mut exts_seq = Vec::new(); + der::Header::new(der::Tag::Sequence, der::Length::try_from(exts_items.len())?) + .encode_to_vec(&mut exts_seq)?; + exts_seq.extend(exts_items); + // Wrap in [3] EXPLICIT. + let exts_any = der::asn1::Any::from_der(&exts_seq)?; + let tagged = ContextSpecific { + tag_number: TagNumber(3), + tag_mode: TagMode::Explicit, + value: exts_any, + }; + tagged.encode_to_vec(&mut tbs_content)?; + } + + // Wrap tbs_content in outer SEQUENCE. + let mut tbs_der = Vec::new(); + der::Header::new( + der::Tag::Sequence, + der::Length::try_from(tbs_content.len())?, + ) + .encode_to_vec(&mut tbs_der)?; + tbs_der.extend(tbs_content); + Ok(tbs_der) +} + +fn parse_sct_extension(sct_ext: &Extension) -> Result, SctError> { let sct_list = SignedCertificateTimestampList::from_der(sct_ext.extn_value.as_bytes()) .map_err(|e| SctError::Other(format!("failed to parse SCT list DER: {e}")))?; @@ -99,8 +184,7 @@ fn find_and_parse_scts( parsed_scts.push(convert_sct(&sct)?); } } - - Ok((index, parsed_scts)) + Ok(parsed_scts) } fn convert_sct(sct: &SignedCertificateTimestamp) -> Result { @@ -146,7 +230,7 @@ fn parse_signature_algorithm( } fn extract_lifetime_days(cert: &Certificate) -> Result { - let validity = &cert.tbs_certificate.validity; + let validity = cert.tbs_certificate().validity(); let not_before_secs = validity.not_before.to_unix_duration().as_secs(); let not_after_secs = validity.not_after.to_unix_duration().as_secs(); @@ -161,3 +245,168 @@ fn extract_lifetime_days(cert: &Certificate) -> Result { Ok(lifetime_days) } + +#[cfg(test)] +mod tests { + use super::*; + use der::{ + asn1::{BitString, ContextSpecific, ContextSpecificRef}, + Decode as _, Encode as _, TagMode, TagNumber, + }; + use x509_cert::Certificate; + + /// Construct a Certificate DER identical to `cert` but with `issuerUniqueID` + /// and `subjectUniqueID` injected, to exercise the unique ID encoding paths + /// in `encode_tbs_without_sct`. + fn inject_unique_ids(cert: &Certificate) -> Certificate { + let tbs = cert.tbs_certificate(); + let issuer_uid = BitString::from_bytes(&[0xDE, 0xAD]).unwrap(); + let subject_uid = BitString::from_bytes(&[0xBE, 0xEF]).unwrap(); + + let mut content = Vec::new(); + ContextSpecific { + tag_number: TagNumber(0), + tag_mode: TagMode::Explicit, + value: tbs.version(), + } + .encode_to_vec(&mut content) + .unwrap(); + tbs.serial_number().encode_to_vec(&mut content).unwrap(); + tbs.signature().encode_to_vec(&mut content).unwrap(); + tbs.issuer().encode_to_vec(&mut content).unwrap(); + tbs.validity().encode_to_vec(&mut content).unwrap(); + tbs.subject().encode_to_vec(&mut content).unwrap(); + tbs.subject_public_key_info() + .encode_to_vec(&mut content) + .unwrap(); + ContextSpecificRef { + tag_number: TagNumber(1), + tag_mode: TagMode::Implicit, + value: &issuer_uid, + } + .encode_to_vec(&mut content) + .unwrap(); + ContextSpecificRef { + tag_number: TagNumber(2), + tag_mode: TagMode::Implicit, + value: &subject_uid, + } + .encode_to_vec(&mut content) + .unwrap(); + if let Some(exts) = tbs.extensions() { + let mut exts_items = Vec::new(); + for ext in exts { + exts_items.extend(ext.to_der().unwrap()); + } + let mut exts_seq = Vec::new(); + der::Header::new( + der::Tag::Sequence, + der::Length::try_from(exts_items.len()).unwrap(), + ) + .encode_to_vec(&mut exts_seq) + .unwrap(); + exts_seq.extend(exts_items); + let exts_any = der::asn1::Any::from_der(&exts_seq).unwrap(); + ContextSpecific { + tag_number: TagNumber(3), + tag_mode: TagMode::Explicit, + value: exts_any, + } + .encode_to_vec(&mut content) + .unwrap(); + } + let mut tbs_der = Vec::new(); + der::Header::new( + der::Tag::Sequence, + der::Length::try_from(content.len()).unwrap(), + ) + .encode_to_vec(&mut tbs_der) + .unwrap(); + tbs_der.extend(content); + + let mut cert_content = tbs_der; + cert.signature_algorithm() + .encode_to_vec(&mut cert_content) + .unwrap(); + cert.signature().encode_to_vec(&mut cert_content).unwrap(); + let mut cert_der = Vec::new(); + der::Header::new( + der::Tag::Sequence, + der::Length::try_from(cert_content.len()).unwrap(), + ) + .encode_to_vec(&mut cert_der) + .unwrap(); + cert_der.extend(cert_content); + Certificate::from_der(&cert_der).unwrap() + } + + /// Golden-file regression test for `extract_scts_from_cert`. + /// + /// The golden file contains the expected TBS DER output after the SCT extension + /// is removed from `cloudflare.pem`. Re-generate with: + /// + /// ```sh + /// UPDATE_GOLDEN=1 cargo test -p sct_validator test_tbs_without_sct_golden + /// ``` + #[test] + fn test_tbs_without_sct_golden() { + use der::Encode as _; + use x509_cert::der::DecodePem as _; + + const GOLDEN: &str = "tests/golden/cloudflare-tbs-without-sct.der"; + + let cert = Certificate::load_pem_chain(include_bytes!("../tests/cloudflare.pem")) + .unwrap() + .remove(0); + let cert_der = cert.to_der().unwrap(); + let (_, tbs_der, _) = extract_scts_from_cert(&cert_der).unwrap(); + + let golden_path = std::path::Path::new(env!("CARGO_MANIFEST_DIR")).join(GOLDEN); + if std::env::var("UPDATE_GOLDEN").is_ok() { + std::fs::write(&golden_path, &tbs_der).expect("failed to write golden file"); + return; + } + let expected = + std::fs::read(&golden_path).expect("golden file missing — run with UPDATE_GOLDEN=1"); + assert_eq!( + tbs_der, expected, + "TBS DER mismatch — if intentional, re-run with UPDATE_GOLDEN=1" + ); + } + + #[test] + fn test_unique_ids_round_trip() { + let cert = Certificate::load_pem_chain(include_bytes!("../tests/cloudflare.pem")) + .unwrap() + .remove(0); + let cert_with_uids = inject_unique_ids(&cert); + let tbs = cert_with_uids.tbs_certificate(); + + // Confirm unique IDs parsed correctly from the injected cert. + assert!(tbs.issuer_unique_id().is_some()); + assert!(tbs.subject_unique_id().is_some()); + + // Use extract_scts_from_cert end-to-end so the single-pass logic is exercised. + let cert_der = cert_with_uids.to_der().unwrap(); + let (_, rebuilt_tbs_der, _) = extract_scts_from_cert(&cert_der).unwrap(); + let rebuilt = x509_cert::certificate::TbsCertificate::from_der(&rebuilt_tbs_der).unwrap(); + + // SCT extension must be gone. + assert!(rebuilt + .get_extension::() + .unwrap() + .is_none()); + + // Unique IDs must survive with correct IMPLICIT tags. + assert_eq!( + rebuilt.issuer_unique_id(), + tbs.issuer_unique_id(), + "issuerUniqueID was dropped or corrupted" + ); + assert_eq!( + rebuilt.subject_unique_id(), + tbs.subject_unique_id(), + "subjectUniqueID was dropped or corrupted" + ); + } +} diff --git a/crates/sct_validator/tests/golden/cloudflare-tbs-without-sct.der b/crates/sct_validator/tests/golden/cloudflare-tbs-without-sct.der new file mode 100644 index 00000000..ca2b56f3 Binary files /dev/null and b/crates/sct_validator/tests/golden/cloudflare-tbs-without-sct.der differ diff --git a/crates/signed_note/src/ed25519.rs b/crates/signed_note/src/ed25519.rs index 0e4377bd..b7f2f6d4 100644 --- a/crates/signed_note/src/ed25519.rs +++ b/crates/signed_note/src/ed25519.rs @@ -6,7 +6,7 @@ use ed25519_dalek::{ Signer as Ed25519Signer, SigningKey as Ed25519SigningKey, Verifier as Ed25519Verifier, VerifyingKey as Ed25519VerifyingKey, }; -use rand_core::CryptoRngCore; +use rand_core::CryptoRng; /// [`Ed25519NoteVerifier`] is the verifier for the ordinary (non-timestamped) Ed25519 signature type defined in . #[derive(Clone)] @@ -198,7 +198,7 @@ impl Ed25519NoteSigner { /// Generates a signer and verifier key pair for a named server. /// The signer key skey is private and must be kept secret. -pub fn generate_encoded_ed25519_key( +pub fn generate_encoded_ed25519_key( csprng: &mut R, name: &KeyName, ) -> (String, String) { diff --git a/crates/signed_note/src/lib.rs b/crates/signed_note/src/lib.rs index ed18607a..ac75526d 100644 --- a/crates/signed_note/src/lib.rs +++ b/crates/signed_note/src/lib.rs @@ -186,28 +186,17 @@ //! //! struct ZeroRng; //! -//! impl rand_core::RngCore for ZeroRng { -//! fn next_u32(&mut self) -> u32 { -//! 0 -//! } -//! -//! fn next_u64(&mut self) -> u64 { -//! 0 -//! } -//! -//! fn fill_bytes(&mut self, dest: &mut [u8]) { -//! for byte in dest.iter_mut() { -//! *byte = 0; -//! } -//! } -//! -//! fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), rand_core::Error> { -//! self.fill_bytes(dest); +//! impl rand_core::TryRng for ZeroRng { +//! type Error = core::convert::Infallible; +//! fn try_next_u32(&mut self) -> Result { Ok(0) } +//! fn try_next_u64(&mut self) -> Result { Ok(0) } +//! fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Self::Error> { +//! for byte in dest.iter_mut() { *byte = 0; } //! Ok(()) //! } //! } //! -//! impl rand_core::CryptoRng for ZeroRng {} +//! impl rand_core::TryCryptoRng for ZeroRng {} //! //! let (skey, _) = signed_note::generate_encoded_ed25519_key(&mut ZeroRng{}, &KeyName::new("EnochRoot".into()).unwrap()); //! let signer = Ed25519NoteSigner::new_from_encoded_key(&skey).unwrap(); @@ -708,7 +697,7 @@ impl Note { mod tests { use super::*; - use rand::rngs::OsRng; + use std::sync::LazyLock; static NAME: LazyLock = LazyLock::new(|| KeyName::new("EnochRoot".into()).unwrap()); @@ -729,7 +718,7 @@ mod tests { #[test] fn test_generate_key() { - let (skey, vkey) = generate_encoded_ed25519_key(&mut OsRng, &NAME); + let (skey, vkey) = generate_encoded_ed25519_key(&mut rand::rng(), &NAME); let signer = Ed25519NoteSigner::new_from_encoded_key(&skey).unwrap(); let verifier = Ed25519NoteVerifier::new_from_encoded_key(&vkey).unwrap(); @@ -739,7 +728,7 @@ mod tests { #[test] fn test_from_ed25519() { - let signing_key = ed25519_dalek::SigningKey::generate(&mut OsRng); + let signing_key = ed25519_dalek::SigningKey::generate(&mut rand::rng()); let pubkey = [ &[SignatureType::Ed25519 as u8], diff --git a/crates/static_ct_api/Cargo.toml b/crates/static_ct_api/Cargo.toml index f5aefb08..ff70fb34 100644 --- a/crates/static_ct_api/Cargo.toml +++ b/crates/static_ct_api/Cargo.toml @@ -38,6 +38,6 @@ signature.workspace = true signed_note.workspace = true thiserror.workspace = true tlog_tiles.workspace = true +spki.workspace = true x509-cert.workspace = true -x509-verify.workspace = true x509_util.workspace = true diff --git a/crates/static_ct_api/src/lib.rs b/crates/static_ct_api/src/lib.rs index 950ebac7..8ee982bd 100644 --- a/crates/static_ct_api/src/lib.rs +++ b/crates/static_ct_api/src/lib.rs @@ -20,7 +20,7 @@ pub enum StaticCTError { #[error(transparent)] Der(#[from] der::Error), #[error(transparent)] - X509(#[from] x509_verify::spki::Error), + X509(#[from] spki::Error), #[error(transparent)] Validation(#[from] x509_util::ValidationError), #[error("unexpected extension")] diff --git a/crates/static_ct_api/src/rfc6962.rs b/crates/static_ct_api/src/rfc6962.rs index b30f539f..9e8752eb 100644 --- a/crates/static_ct_api/src/rfc6962.rs +++ b/crates/static_ct_api/src/rfc6962.rs @@ -34,7 +34,9 @@ use serde_with::{base64::Base64, serde_as}; use sha2::{Digest, Sha256}; use tlog_tiles::UnixTimestamp; use x509_cert::{ - der::Encode, ext::pkix::ExtendedKeyUsage, impl_newtype, Certificate, TbsCertificate, + der::Encode, + ext::{pkix::ExtendedKeyUsage, Extension}, + impl_newtype, Certificate, TbsCertificate, }; use x509_util::{validate_chain_lax, CertPool, ValidationOptions}; @@ -106,8 +108,8 @@ pub fn partially_validate_chain( // reason for a CT log to reject a submission: . if require_server_auth_eku && !leaf - .tbs_certificate - .get::()? + .tbs_certificate() + .get_extension::()? .is_some_and(|(_, eku)| eku.0.contains(&ID_KP_SERVER_AUTH)) { return Err(StaticCTError::InvalidLeaf); @@ -132,12 +134,15 @@ pub fn partially_validate_chain( ( Some(PrecertData { issuer_key_hash: Sha256::digest( - issuer.tbs_certificate.subject_public_key_info.to_der()?, + issuer + .tbs_certificate() + .subject_public_key_info() + .to_der()?, ) .into(), pre_certificate: leaf.to_der()?, }), - build_precert_tbs(&leaf.tbs_certificate)?, + build_precert_tbs(leaf.tbs_certificate())?, ) } else { (None, leaf.to_der()?) @@ -181,7 +186,7 @@ impl_newtype!(CTPrecertPoison, Null); /// Returns whether or not the certificate contains the precertificate poison extension. fn is_precert(cert: &Certificate) -> Result { - match cert.tbs_certificate.get::()? { + match cert.tbs_certificate().get_extension::()? { Some((true, _)) => Ok(true), Some((false, _)) => Err(StaticCTError::InvalidCTPoison), None => Ok(false), @@ -190,7 +195,7 @@ fn is_precert(cert: &Certificate) -> Result { /// Returns whether or not the certificate is a precertificate signing certificate. fn is_precert_signing_cert(cert: &Certificate) -> Result { - match cert.tbs_certificate.get::()? { + match cert.tbs_certificate().get_extension::()? { Some((_, eku)) => { for usage in eku.0 { if usage == CT_PRECERT_SIGNING_CERT { @@ -219,21 +224,88 @@ fn is_precert_signing_cert(cert: &Certificate) -> Result { /// /// Returns an error if the certificate is not a valid precertificate. pub fn build_precert_tbs(tbs: &TbsCertificate) -> Result, StaticCTError> { - let mut tbs = tbs.clone(); + use der::{ + asn1::{ContextSpecific, ContextSpecificRef}, + Decode, Encode, TagMode, TagNumber, + }; - let exts = tbs - .extensions - .as_mut() - .ok_or(StaticCTError::InvalidCTPoison)?; + let extensions = tbs.extensions().ok_or(StaticCTError::InvalidCTPoison)?; // Remove CT poison extension (there must be exactly 1). - let ct_poison_idx = exts + let ct_poison_idx = extensions .iter() .position(|v| v.extn_id == CT_PRECERT_POISON) .ok_or(StaticCTError::InvalidCTPoison)?; - exts.remove(ct_poison_idx); - Ok(tbs.to_der()?) + let filtered_extensions: Vec<&Extension> = extensions + .iter() + .enumerate() + .filter(|(i, _)| *i != ct_poison_idx) + .map(|(_, ext)| ext) + .collect(); + + // Re-encode TBS field-by-field (all fields are private in x509-cert 0.3). + let mut tbs_content = Vec::new(); + + if tbs.version() != x509_cert::certificate::Version::V1 { + let tagged = ContextSpecific { + tag_number: TagNumber(0), + tag_mode: TagMode::Explicit, + value: tbs.version(), + }; + tagged.encode_to_vec(&mut tbs_content)?; + } + tbs.serial_number().encode_to_vec(&mut tbs_content)?; + tbs.signature().encode_to_vec(&mut tbs_content)?; + tbs.issuer().encode_to_vec(&mut tbs_content)?; + tbs.validity().encode_to_vec(&mut tbs_content)?; + tbs.subject().encode_to_vec(&mut tbs_content)?; + tbs.subject_public_key_info() + .encode_to_vec(&mut tbs_content)?; + if let Some(uid) = tbs.issuer_unique_id() { + // issuerUniqueID [1] IMPLICIT UniqueIdentifier OPTIONAL + ContextSpecificRef { + tag_number: TagNumber(1), + tag_mode: TagMode::Implicit, + value: uid, + } + .encode_to_vec(&mut tbs_content)?; + } + if let Some(uid) = tbs.subject_unique_id() { + // subjectUniqueID [2] IMPLICIT UniqueIdentifier OPTIONAL + ContextSpecificRef { + tag_number: TagNumber(2), + tag_mode: TagMode::Implicit, + value: uid, + } + .encode_to_vec(&mut tbs_content)?; + } + if !filtered_extensions.is_empty() { + let mut exts_items = Vec::new(); + for ext in &filtered_extensions { + exts_items.extend((*ext).to_der()?); + } + let mut exts_seq = Vec::new(); + der::Header::new(der::Tag::Sequence, der::Length::try_from(exts_items.len())?) + .encode_to_vec(&mut exts_seq)?; + exts_seq.extend(exts_items); + let exts_any = der::asn1::Any::from_der(&exts_seq)?; + let tagged = ContextSpecific { + tag_number: TagNumber(3), + tag_mode: TagMode::Explicit, + value: exts_any, + }; + tagged.encode_to_vec(&mut tbs_content)?; + } + + let mut tbs_der = Vec::new(); + der::Header::new( + der::Tag::Sequence, + der::Length::try_from(tbs_content.len())?, + ) + .encode_to_vec(&mut tbs_der)?; + tbs_der.extend(tbs_content); + Ok(tbs_der) } #[cfg(test)] @@ -242,7 +314,7 @@ mod tests { use chrono::prelude::*; use der::{asn1::OctetString, Decode}; use x509_cert::ext::Extension; - use x509_verify::x509_cert::Certificate; + use x509_cert::{Certificate, TbsCertificate}; fn parse_datetime(s: &str) -> UnixTimestamp { u64::try_from(DateTime::parse_from_rfc3339(s).unwrap().timestamp_millis()).unwrap() @@ -279,9 +351,8 @@ mod tests { test_is_precert!( remove_exts_from_precert, - wipe_extensions( - &mut Certificate::load_pem_chain(include_bytes!("../tests/precert-valid.pem")).unwrap() - [0] + &wipe_extensions( + &Certificate::load_pem_chain(include_bytes!("../tests/precert-valid.pem")).unwrap()[0] ), false, false @@ -289,9 +360,8 @@ mod tests { test_is_precert!( poison_non_critical, - make_poison_non_critical( - &mut Certificate::load_pem_chain(include_bytes!("../tests/precert-valid.pem")).unwrap() - [0] + &make_poison_non_critical( + &Certificate::load_pem_chain(include_bytes!("../tests/precert-valid.pem")).unwrap()[0] ), false, true @@ -299,9 +369,8 @@ mod tests { test_is_precert!( poison_non_null, - make_poison_non_null( - &mut Certificate::load_pem_chain(include_bytes!("../tests/precert-valid.pem")).unwrap() - [0] + &make_poison_non_null( + &Certificate::load_pem_chain(include_bytes!("../tests/precert-valid.pem")).unwrap()[0] ), false, true @@ -359,40 +428,307 @@ mod tests { // CT does not allow extra certs at the end of the chain. test_validate_chain!(unrelated_cert_after_chain_inc_root; "../../static_ct_api/tests/fake-ca-cert.pem"; "../tests/leaf-signed-by-fake-intermediate-cert.pem", "../tests/fake-intermediate-cert.pem", "../tests/fake-ca-cert.pem", "../tests/test-cert.pem"; None; None; false; true; true; 0); + /// Golden-file regression test for `build_precert_tbs`. + /// + /// The golden file contains the expected TBS DER output after the CT poison + /// extension is removed from the precertificate in `preissuer-chain.pem`. + /// Re-generate with: + /// + /// ```sh + /// UPDATE_GOLDEN=1 cargo test -p static_ct_api test_build_precert_tbs_golden + /// ``` + #[test] + fn test_build_precert_tbs_golden() { + const GOLDEN: &str = "tests/golden/preissuer-precert-tbs.der"; + + let precert_chain = + Certificate::load_pem_chain(include_bytes!("../tests/preissuer-chain.pem")).unwrap(); + let tbs_der = build_precert_tbs(precert_chain[0].tbs_certificate()).unwrap(); + + let golden_path = std::path::Path::new(env!("CARGO_MANIFEST_DIR")).join(GOLDEN); + if std::env::var("UPDATE_GOLDEN").is_ok() { + std::fs::write(&golden_path, &tbs_der).expect("failed to write golden file"); + return; + } + let expected = + std::fs::read(&golden_path).expect("golden file missing — run with UPDATE_GOLDEN=1"); + assert_eq!( + tbs_der, expected, + "TBS DER mismatch — if intentional, re-run with UPDATE_GOLDEN=1" + ); + } + #[test] fn test_build_precert_tbs() { let precert_chain = Certificate::load_pem_chain(include_bytes!("../tests/preissuer-chain.pem")).unwrap(); - let precert = &precert_chain[0].tbs_certificate; + let precert = precert_chain[0].tbs_certificate(); let der = build_precert_tbs(precert).unwrap(); let tbs = TbsCertificate::from_der(&der).unwrap(); // Ensure CT poison is removed. - assert!(precert.get::().unwrap().is_some()); - assert!(tbs.get::().unwrap().is_none()); + assert!(precert + .get_extension::() + .unwrap() + .is_some()); + assert!(tbs.get_extension::().unwrap().is_none()); + } + + /// Build a Certificate DER from the given `precert_tbs_der` (which contains a CT + /// poison extension) by wrapping it with a dummy signature, suitable for passing + /// to `build_precert_tbs`. Used to inject synthetic fields (e.g. unique IDs) that + /// don't appear in committed test fixtures. + fn make_cert_from_tbs_der(tbs_der: Vec) -> Certificate { + use der::{asn1::BitString, Encode}; + use x509_cert::der::Decode; + // Dummy AlgorithmIdentifier (sha256WithRSAEncryption OID, NULL params) and empty BIT STRING. + let sig_alg_der: &[u8] = &[ + 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0b, 0x05, + 0x00, + ]; + let sig_bs = BitString::from_bytes(&[]).unwrap(); + let mut cert_content = tbs_der; + cert_content.extend(sig_alg_der); + sig_bs.encode_to_vec(&mut cert_content).unwrap(); + let mut cert_der = Vec::new(); + der::Header::new( + der::Tag::Sequence, + der::Length::try_from(cert_content.len()).unwrap(), + ) + .encode_to_vec(&mut cert_der) + .unwrap(); + cert_der.extend(cert_content); + Certificate::from_der(&cert_der).unwrap() + } + + /// Construct a TBS DER for a precertificate identical to `tbs` but with + /// `issuerUniqueID` and `subjectUniqueID` injected, for round-trip testing. + fn inject_unique_ids_into_precert_tbs(tbs: &TbsCertificate) -> Vec { + use der::{ + asn1::{BitString, ContextSpecific, ContextSpecificRef}, + Encode, TagMode, TagNumber, + }; + let issuer_uid = BitString::from_bytes(&[0xDE, 0xAD]).unwrap(); + let subject_uid = BitString::from_bytes(&[0xBE, 0xEF]).unwrap(); + let mut content = Vec::new(); + // [0] EXPLICIT version (V3) + ContextSpecific { + tag_number: TagNumber(0), + tag_mode: TagMode::Explicit, + value: tbs.version(), + } + .encode_to_vec(&mut content) + .unwrap(); + tbs.serial_number().encode_to_vec(&mut content).unwrap(); + tbs.signature().encode_to_vec(&mut content).unwrap(); + tbs.issuer().encode_to_vec(&mut content).unwrap(); + tbs.validity().encode_to_vec(&mut content).unwrap(); + tbs.subject().encode_to_vec(&mut content).unwrap(); + tbs.subject_public_key_info() + .encode_to_vec(&mut content) + .unwrap(); + // [1] IMPLICIT issuerUniqueID + ContextSpecificRef { + tag_number: TagNumber(1), + tag_mode: TagMode::Implicit, + value: &issuer_uid, + } + .encode_to_vec(&mut content) + .unwrap(); + // [2] IMPLICIT subjectUniqueID + ContextSpecificRef { + tag_number: TagNumber(2), + tag_mode: TagMode::Implicit, + value: &subject_uid, + } + .encode_to_vec(&mut content) + .unwrap(); + // [3] EXPLICIT extensions (copy from original, including CT poison) + if let Some(exts) = tbs.extensions() { + let mut exts_items = Vec::new(); + for ext in exts { + exts_items.extend(ext.to_der().unwrap()); + } + let mut exts_seq = Vec::new(); + der::Header::new( + der::Tag::Sequence, + der::Length::try_from(exts_items.len()).unwrap(), + ) + .encode_to_vec(&mut exts_seq) + .unwrap(); + exts_seq.extend(exts_items); + let exts_any = der::asn1::Any::from_der(&exts_seq).unwrap(); + ContextSpecific { + tag_number: TagNumber(3), + tag_mode: TagMode::Explicit, + value: exts_any, + } + .encode_to_vec(&mut content) + .unwrap(); + } + let mut tbs_der = Vec::new(); + der::Header::new( + der::Tag::Sequence, + der::Length::try_from(content.len()).unwrap(), + ) + .encode_to_vec(&mut tbs_der) + .unwrap(); + tbs_der.extend(content); + tbs_der + } + + #[test] + fn test_build_precert_tbs_unique_ids_round_trip() { + let precert_chain = + Certificate::load_pem_chain(include_bytes!("../tests/preissuer-chain.pem")).unwrap(); + let precert_tbs = precert_chain[0].tbs_certificate(); + + let tbs_der = inject_unique_ids_into_precert_tbs(precert_tbs); + let cert = make_cert_from_tbs_der(tbs_der); + let tbs_with_uids = cert.tbs_certificate(); + + // Sanity-check that the injected unique IDs are present in the parsed input cert. + assert!(tbs_with_uids.issuer_unique_id().is_some()); + assert!(tbs_with_uids.subject_unique_id().is_some()); + + let rebuilt_der = build_precert_tbs(tbs_with_uids).unwrap(); + let rebuilt = TbsCertificate::from_der(&rebuilt_der).unwrap(); + + // CT poison must be stripped. + assert!(rebuilt + .get_extension::() + .unwrap() + .is_none()); + + // Unique IDs must survive the reconstruction with correct IMPLICIT tags. + assert_eq!( + rebuilt.issuer_unique_id(), + tbs_with_uids.issuer_unique_id(), + "issuerUniqueID was dropped or corrupted" + ); + assert_eq!( + rebuilt.subject_unique_id(), + tbs_with_uids.subject_unique_id(), + "subjectUniqueID was dropped or corrupted" + ); + } + + /// Rebuild a `Certificate` replacing its TBS extensions with `new_exts`. + /// Uses field-by-field DER re-encoding since x509-cert 0.3 fields are private. + fn rebuild_cert_with_extensions( + cert: &Certificate, + new_exts: Option<&[Extension]>, + ) -> Certificate { + use der::{ + asn1::{ContextSpecific, ContextSpecificRef}, + Decode, Encode, TagMode, TagNumber, + }; + let tbs = cert.tbs_certificate(); + let mut tbs_content = Vec::new(); + + if tbs.version() != x509_cert::certificate::Version::V1 { + let tagged = ContextSpecific { + tag_number: TagNumber(0), + tag_mode: TagMode::Explicit, + value: tbs.version(), + }; + tagged.encode_to_vec(&mut tbs_content).unwrap(); + } + tbs.serial_number().encode_to_vec(&mut tbs_content).unwrap(); + tbs.signature().encode_to_vec(&mut tbs_content).unwrap(); + tbs.issuer().encode_to_vec(&mut tbs_content).unwrap(); + tbs.validity().encode_to_vec(&mut tbs_content).unwrap(); + tbs.subject().encode_to_vec(&mut tbs_content).unwrap(); + tbs.subject_public_key_info() + .encode_to_vec(&mut tbs_content) + .unwrap(); + if let Some(uid) = tbs.issuer_unique_id() { + ContextSpecificRef { + tag_number: TagNumber(1), + tag_mode: TagMode::Implicit, + value: uid, + } + .encode_to_vec(&mut tbs_content) + .unwrap(); + } + if let Some(uid) = tbs.subject_unique_id() { + ContextSpecificRef { + tag_number: TagNumber(2), + tag_mode: TagMode::Implicit, + value: uid, + } + .encode_to_vec(&mut tbs_content) + .unwrap(); + } + if let Some(exts) = new_exts { + if !exts.is_empty() { + let mut exts_items = Vec::new(); + for ext in exts { + exts_items.extend(ext.to_der().unwrap()); + } + let mut exts_seq = Vec::new(); + der::Header::new( + der::Tag::Sequence, + der::Length::try_from(exts_items.len()).unwrap(), + ) + .encode_to_vec(&mut exts_seq) + .unwrap(); + exts_seq.extend(exts_items); + let exts_any = der::asn1::Any::from_der(&exts_seq).unwrap(); + let tagged = ContextSpecific { + tag_number: TagNumber(3), + tag_mode: TagMode::Explicit, + value: exts_any, + }; + tagged.encode_to_vec(&mut tbs_content).unwrap(); + } + } + let mut tbs_der = Vec::new(); + der::Header::new( + der::Tag::Sequence, + der::Length::try_from(tbs_content.len()).unwrap(), + ) + .encode_to_vec(&mut tbs_der) + .unwrap(); + tbs_der.extend(&tbs_content); + + let mut cert_content = Vec::new(); + cert_content.extend(&tbs_der); + cert.signature_algorithm() + .encode_to_vec(&mut cert_content) + .unwrap(); + cert.signature().encode_to_vec(&mut cert_content).unwrap(); + let mut cert_der = Vec::new(); + der::Header::new( + der::Tag::Sequence, + der::Length::try_from(cert_content.len()).unwrap(), + ) + .encode_to_vec(&mut cert_der) + .unwrap(); + cert_der.extend(cert_content); + Certificate::from_der(&cert_der).unwrap() } - fn wipe_extensions(cert: &mut Certificate) -> &Certificate { - cert.tbs_certificate.extensions = None; - cert + fn wipe_extensions(cert: &Certificate) -> Certificate { + rebuild_cert_with_extensions(cert, None) } - fn make_poison_non_critical(cert: &mut Certificate) -> &Certificate { - cert.tbs_certificate.extensions = Some(vec![Extension { + fn make_poison_non_critical(cert: &Certificate) -> Certificate { + let exts = vec![Extension { extn_id: CT_PRECERT_POISON, critical: false, extn_value: OctetString::new(Null.to_der().unwrap()).unwrap(), - }]); - cert + }]; + rebuild_cert_with_extensions(cert, Some(&exts)) } - fn make_poison_non_null(cert: &mut Certificate) -> &Certificate { - cert.tbs_certificate.extensions = Some(vec![Extension { + fn make_poison_non_null(cert: &Certificate) -> Certificate { + let exts = vec![Extension { extn_id: CT_PRECERT_POISON, critical: true, extn_value: OctetString::new([]).unwrap(), - }]); - cert + }]; + rebuild_cert_with_extensions(cert, Some(&exts)) } } diff --git a/crates/static_ct_api/src/static_ct.rs b/crates/static_ct_api/src/static_ct.rs index 3d2667aa..323e6b59 100644 --- a/crates/static_ct_api/src/static_ct.rs +++ b/crates/static_ct_api/src/static_ct.rs @@ -280,12 +280,22 @@ impl LogEntry for StaticCTLogEntry { const REQUIRE_CHECKPOINT_TIMESTAMP: bool = true; type Pending = StaticCTPendingLogEntry; type ParseError = StaticCTError; + type Metadata = SequenceMetadata; + + fn make_metadata( + leaf_index: LeafIndex, + timestamp: UnixTimestamp, + _old_tree_size: u64, + _new_tree_size: u64, + ) -> Self::Metadata { + (leaf_index, timestamp) + } fn initial_entry() -> Option { None } - fn new(pending: StaticCTPendingLogEntry, metadata: SequenceMetadata) -> Self { + fn new(pending: StaticCTPendingLogEntry, metadata: Self::Metadata) -> Self { StaticCTLogEntry { inner: pending, leaf_index: metadata.0, diff --git a/crates/static_ct_api/tests/golden/preissuer-precert-tbs.der b/crates/static_ct_api/tests/golden/preissuer-precert-tbs.der new file mode 100644 index 00000000..cb0d6070 Binary files /dev/null and b/crates/static_ct_api/tests/golden/preissuer-precert-tbs.der differ diff --git a/crates/static_ct_api/tests/p521-leaf-cert.pem b/crates/static_ct_api/tests/p521-leaf-cert.pem new file mode 100644 index 00000000..dcfbf4ec --- /dev/null +++ b/crates/static_ct_api/tests/p521-leaf-cert.pem @@ -0,0 +1,14 @@ +-----BEGIN CERTIFICATE----- +MIICNjCCAZegAwIBAgIUFWgAUAMtB3VPx20AjU78l5fy3CcwCgYIKoZIzj0EAwQw +HTEbMBkGA1UEAwwSVGVzdCBQLTUyMSBSb290IENBMB4XDTI2MDQxMDE2NTUyNVoX +DTI3MDQxMDE2NTUyNVowGzEZMBcGA1UEAwwQdGVzdC5leGFtcGxlLmNvbTCBmzAQ +BgcqhkjOPQIBBgUrgQQAIwOBhgAEAYEXsb+9GabnmikwzkZKsV65Xg+sOEzhopLw +mdhRNCs1n+wlyDVLaDEzaJ6hCy6vLBBz3dCgx0TYb5XhDZRxaQDtAShYZKFga7GX +VjGrQGR7gUx9u1mBZ18htVfWHMiKXOA5dV0baGeogonlpM1yO0bX+fC3LczDJ4xx +XVh9KdtPvoL6o3QwcjAbBgNVHREEFDASghB0ZXN0LmV4YW1wbGUuY29tMBMGA1Ud +JQQMMAoGCCsGAQUFBwMBMB0GA1UdDgQWBBT0/6WofTDoPNW0LcUKwBOs9io/RjAf +BgNVHSMEGDAWgBS8lHghIhqWkWXQpqMXyXZHanClyDAKBggqhkjOPQQDBAOBjAAw +gYgCQgEOfYyHxBdX2HuMF4Jb/UIL3XAEygpfZR2C5LXXRnMpAM8Cbae1VZGZDRzU +W7wpgcI/ybtbeJ/msoxAAW9EeUkG9gJCAJ3/gCSDvuMxs8j6r1XSU+Auo8POC9pc +8hUboSqsg6ImkNdK4hfUnSxwmk9m1NPcf6QR7zaE0w57INcE3AWl/qDa +-----END CERTIFICATE----- diff --git a/crates/static_ct_api/tests/p521-root-cert.pem b/crates/static_ct_api/tests/p521-root-cert.pem new file mode 100644 index 00000000..559996d3 --- /dev/null +++ b/crates/static_ct_api/tests/p521-root-cert.pem @@ -0,0 +1,14 @@ +-----BEGIN CERTIFICATE----- +MIICFzCCAXigAwIBAgIUdVsoIPHv1PRmtSC3wOHG6oNQWggwCgYIKoZIzj0EAwQw +HTEbMBkGA1UEAwwSVGVzdCBQLTUyMSBSb290IENBMB4XDTI2MDQxMDE2NTUyNVoX +DTM2MDQwNzE2NTUyNVowHTEbMBkGA1UEAwwSVGVzdCBQLTUyMSBSb290IENBMIGb +MBAGByqGSM49AgEGBSuBBAAjA4GGAAQAxV9DAmxQrou7Uef5qRNgHpWlwJjLU5po +Px7QFuFhnflgrNEwniTCsrF0G/MLTDJSAFx9ktLTDRb9f7G1n2MFFFsAGqXtAxa9 +cl83dJVZQp4NULAphGwUe9B57+sSmNe1Lzqwhv/m3iNRBAD2Jdwx4wmPnHsq/KLf +W4pYEo7Pcj6A5yCjUzBRMB8GA1UdIwQYMBaAFLyUeCEiGpaRZdCmoxfJdkdqcKXI +MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLyUeCEiGpaRZdCmoxfJdkdqcKXI +MAoGCCqGSM49BAMEA4GMADCBiAJCASZ/7YzjIGLsIn8KTzjZakLJ3q12xBWdYpo8 +V7Lto4Hdw+HITe56mi/2dwXXiAuJ6agNnwk7AkYFKriPsvoqXoswAkIAqXzMGxAG +q3lTFHjaAuys8TqloeVvdpKWebsfeLeax3fr3aUXCx/jAORp6arNDh1RhKWEnFZj +ajby8XqhMasfAHs= +-----END CERTIFICATE----- diff --git a/crates/tlog_tiles/src/checkpoint.rs b/crates/tlog_tiles/src/checkpoint.rs index ae0c1c58..6fb4e280 100644 --- a/crates/tlog_tiles/src/checkpoint.rs +++ b/crates/tlog_tiles/src/checkpoint.rs @@ -37,7 +37,7 @@ use crate::{tlog::Hash, HashReader, TlogError, UnixTimestamp}; use base64::{prelude::BASE64_STANDARD, Engine}; use ed25519_dalek::{Signer, SigningKey as Ed25519SigningKey}; -use rand::{seq::SliceRandom, Rng}; +use rand::{seq::SliceRandom, Rng, RngExt}; use sha2::{Digest, Sha256}; use signed_note::{ Ed25519NoteVerifier, KeyName, Note, NoteError, NoteSignature, NoteVerifier, VerifierList, @@ -508,13 +508,13 @@ impl TreeWithTimestamp { /// Clients MUST ignore unknown signatures, and including some "grease" ones /// ensures they do. fn gen_grease_signatures(origin: &str, rng: &mut impl Rng) -> Vec { - let mut g1 = vec![0u8; 5 + rng.gen_range(0..100)]; + let mut g1 = vec![0u8; 5 + rng.random_range(0..100)]; rng.fill(&mut g1[..]); - let mut g2 = vec![0u8; 5 + rng.gen_range(0..100)]; + let mut g2 = vec![0u8; 5 + rng.random_range(0..100)]; let mut hasher = Sha256::new(); hasher.update(b"grease\n"); - hasher.update([rng.gen()]); + hasher.update([rng.random::()]); let h = hasher.finalize(); g2[..4].copy_from_slice(&h[..4]); rng.fill(&mut g2[4..]); @@ -547,8 +547,6 @@ fn gen_grease_signatures(origin: &str, rng: &mut impl Rng) -> Vec #[cfg(test)] mod tests { - use rand::rngs::OsRng; - use super::*; use crate::tlog::record_hash; @@ -643,8 +641,6 @@ mod tests { #[test] fn test_sign_verify() { - let mut rng = OsRng; - let origin = "example.com/origin"; let timestamp = 100; let tree_size = 4; @@ -652,10 +648,12 @@ mod tests { // Make a tree head and sign it let tree = TreeWithTimestamp::new(tree_size, record_hash(b"hello world"), timestamp); let signer = { - let sk = Ed25519SigningKey::generate(&mut rng); + let sk = Ed25519SigningKey::generate(&mut rand::rng()); Ed25519CheckpointSigner::new(KeyName::new("my-signer".into()).unwrap(), sk).unwrap() }; - let checkpoint = tree.sign(origin, &[], &[&signer], &mut rng).unwrap(); + let checkpoint = tree + .sign(origin, &[], &[&signer], &mut rand::rng()) + .unwrap(); // Now verify the checkpoint let verifier = signer.verifier(); diff --git a/crates/tlog_tiles/src/cosignature_v1.rs b/crates/tlog_tiles/src/cosignature_v1.rs index e0bfbdab..3d0253e1 100644 --- a/crates/tlog_tiles/src/cosignature_v1.rs +++ b/crates/tlog_tiles/src/cosignature_v1.rs @@ -146,13 +146,10 @@ mod tests { use crate::{open_checkpoint, record_hash, TreeWithTimestamp}; use super::*; - use rand::rngs::OsRng; use signed_note::VerifierList; #[test] fn test_cosignature_v1_sign_verify() { - let mut rng = OsRng; - let origin = "example.com/origin"; let timestamp = 100; let tree_size = 4; @@ -160,11 +157,13 @@ mod tests { // Make a tree head and sign it let tree = TreeWithTimestamp::new(tree_size, record_hash(b"hello world"), timestamp); let signer = { - let sk = Ed25519SigningKey::generate(&mut rng); + let sk = Ed25519SigningKey::generate(&mut rand::rng()); let name = KeyName::new("my-signer".into()).unwrap(); CosignatureV1CheckpointSigner::new(name, sk) }; - let checkpoint = tree.sign(origin, &[], &[&signer], &mut rng).unwrap(); + let checkpoint = tree + .sign(origin, &[], &[&signer], &mut rand::rng()) + .unwrap(); // Now verify the signed checkpoint let verifier = signer.verifier(); diff --git a/crates/tlog_tiles/src/entries.rs b/crates/tlog_tiles/src/entries.rs index a0906d30..7d76c873 100644 --- a/crates/tlog_tiles/src/entries.rs +++ b/crates/tlog_tiles/src/entries.rs @@ -1,7 +1,7 @@ use length_prefixed::{ReadLengthPrefixedBytesExt, WriteLengthPrefixedBytesExt}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use sha2::{Digest, Sha256}; -use std::{io::Read, marker::PhantomData}; +use std::{fmt::Debug, io::Read, marker::PhantomData}; use crate::{Hash, PathElem, TlogError}; @@ -14,9 +14,13 @@ pub type UnixTimestamp = u64; /// Index of a leaf in the Merkle tree. pub type LeafIndex = u64; -/// Metadata from sequencing that can optionally be incorporated into a -/// `PendingLogEntry` to derive a `LogEntry`. This metadata is also transmitted -/// from the sequencing backend to the frontend to return to the caller. +/// Default sequence metadata type: `(LeafIndex, UnixTimestamp)`. +/// +/// Used by all tlog applications that don't need tree-size information. +/// Applications that need additional metadata (e.g. the IETF MTC worker, which +/// needs `old_tree_size` and `new_tree_size` to compute subtree signature keys +/// without enumeration) define their own type via the [`LogEntry::Metadata`] +/// associated type. pub type SequenceMetadata = (LeafIndex, UnixTimestamp); /// An opaque `PendingLogEntry` that can be passed around without requiring full @@ -56,12 +60,40 @@ pub trait LogEntry: core::fmt::Debug + Sized { /// The error type for [`Self::parse_from_tile_entry`] type ParseError: std::error::Error + Send + Sync + 'static; + /// The metadata produced by the sequencer for each entry. Transmitted from + /// the sequencing backend to the frontend as the response to an `add-entry` + /// request. + /// + /// Most applications use the default [`SequenceMetadata`] = `(LeafIndex, + /// UnixTimestamp)`. Applications that need additional sequencer-computed + /// values (e.g. tree sizes for subtree key lookup) define their own type. + type Metadata: Serialize + + DeserializeOwned + + Send + + Sync + + Clone + + Copy + + Debug + + Default + + 'static; + + /// Construct a [`Self::Metadata`] value from the sequencer's raw output. + /// + /// Called once per entry at sequencing time. The default implementation + /// for `SequenceMetadata` ignores `old_tree_size` and `new_tree_size`. + fn make_metadata( + leaf_index: LeafIndex, + timestamp: UnixTimestamp, + old_tree_size: u64, + new_tree_size: u64, + ) -> Self::Metadata; + /// Returns an optional initial entry to add into the log. This is used for /// the initial `null_entry` in Merkle Tree Certificates, but likely not /// useful anywhere else. fn initial_entry() -> Option; - fn new(pending: Self::Pending, metadata: SequenceMetadata) -> Self; + fn new(pending: Self::Pending, metadata: Self::Metadata) -> Self; /// Returns the Merkle tree leaf hash for this entry. For tlog-tiles, this is the Merkle Tree Hash /// (according to ) @@ -155,12 +187,22 @@ impl LogEntry for TlogTilesLogEntry { const REQUIRE_CHECKPOINT_TIMESTAMP: bool = false; type Pending = TlogTilesPendingLogEntry; type ParseError = TlogError; + type Metadata = SequenceMetadata; + + fn make_metadata( + leaf_index: LeafIndex, + timestamp: UnixTimestamp, + _old_tree_size: u64, + _new_tree_size: u64, + ) -> Self::Metadata { + (leaf_index, timestamp) + } fn initial_entry() -> Option { None } - fn new(pending: Self::Pending, _metadata: SequenceMetadata) -> Self { + fn new(pending: Self::Pending, _metadata: Self::Metadata) -> Self { Self { inner: pending } } diff --git a/crates/tlog_tiles/src/tlog.rs b/crates/tlog_tiles/src/tlog.rs index b6820329..74a52313 100644 --- a/crates/tlog_tiles/src/tlog.rs +++ b/crates/tlog_tiles/src/tlog.rs @@ -544,6 +544,60 @@ pub fn verify_subtree_inclusion_proof( verify_inclusion_proof(proof, n.hi - n.lo, n_hash, leaf_index - n.lo, leaf_hash) } +/// Evaluate a subtree inclusion proof, returning the expected subtree hash. +/// +/// Implements the "Evaluating a Subtree Inclusion Proof" procedure from +/// draft-ietf-plants-merkle-tree-certs §4.3.2. Given the proof hashes and +/// the leaf hash, it derives the subtree root without requiring it as input. +/// The caller can then verify the result against an external commitment such +/// as a cosignature. +/// +/// This is the complement of [`verify_subtree_inclusion_proof`], which takes +/// the expected root as input and checks equality. +/// +/// # Errors +/// +/// Returns an error if `leaf_index` is outside the subtree `n` or the proof +/// is malformed. +pub fn evaluate_subtree_inclusion_proof( + proof: &Proof, + n: &Subtree, + leaf_index: u64, + leaf_hash: Hash, +) -> Result { + let tree_size = n.hi - n.lo; + let index = leaf_index + .checked_sub(n.lo) + .ok_or(TlogError::InvalidProof)?; + if index >= tree_size { + return Err(TlogError::InvalidProof); + } + let mut f_n = index; + let mut s_n = tree_size - 1; + let mut r = leaf_hash; + for p in proof { + if s_n == 0 { + return Err(TlogError::InvalidProof); + } + if lsb_set(f_n) || f_n == s_n { + r = node_hash(*p, r); + while !lsb_set(f_n) { + f_n >>= 1; + s_n >>= 1; + } + } else { + r = node_hash(r, *p); + } + f_n >>= 1; + s_n >>= 1; + } + if s_n == 0 { + Ok(r) + } else { + Err(TlogError::InvalidProof) + } +} + /// Returns the proof that the tree of size `n` contains as a prefix all the /// records from the tree of smaller size `m`. /// diff --git a/crates/x509_util/Cargo.toml b/crates/x509_util/Cargo.toml index db09df5f..3b0223d2 100644 --- a/crates/x509_util/Cargo.toml +++ b/crates/x509_util/Cargo.toml @@ -10,11 +10,18 @@ repository.workspace = true description.workspace = true [dependencies] +const-oid.workspace = true der.workspace = true +p256.workspace = true +p384.workspace = true +p521.workspace = true +pkcs8.workspace = true +rsa.workspace = true sha2.workspace = true +signature.workspace = true +spki.workspace = true thiserror.workspace = true x509-cert.workspace = true -x509-verify.workspace = true [dev-dependencies] chrono.workspace = true diff --git a/crates/x509_util/src/lib.rs b/crates/x509_util/src/lib.rs index fc661c91..908acb60 100644 --- a/crates/x509_util/src/lib.rs +++ b/crates/x509_util/src/lib.rs @@ -5,12 +5,12 @@ use der::{Decode, Encode, Error as DerError}; use sha2::{Digest, Sha256}; +use signature::Verifier; use std::collections::{hash_map::Entry, HashMap}; use x509_cert::{ ext::pkix::{AuthorityKeyIdentifier, BasicConstraints, SubjectKeyIdentifier}, Certificate, }; -use x509_verify::VerifyingKey; /// Converts a vector of certificates into an array of DER-encoded certificates. /// @@ -59,12 +59,18 @@ impl CertPool { /// /// Returns an error if there are issues DER-encoding certificate extensions. pub fn find_potential_parents(&self, cert: &Certificate) -> Result<&[usize], DerError> { - if let Some((_, aki)) = cert.tbs_certificate.get::()? { + if let Some((_, aki)) = cert + .tbs_certificate() + .get_extension::()? + { if let Some(indexes) = self.by_subject_key_id.get(&aki.to_der()?) { return Ok(indexes); } } - if let Some(indexes) = self.by_name.get(&cert.tbs_certificate.issuer.to_string()) { + if let Some(indexes) = self + .by_name + .get(&cert.tbs_certificate().issuer().to_string()) + { return Ok(indexes); } Ok(&[]) @@ -82,10 +88,13 @@ impl CertPool { let idx = self.certs.len(); e.insert(idx); self.by_name - .entry(cert.tbs_certificate.subject.to_string()) + .entry(cert.tbs_certificate().subject().to_string()) .or_default() .push(idx); - if let Some((_, ski)) = cert.tbs_certificate.get::()? { + if let Some((_, ski)) = cert + .tbs_certificate() + .get_extension::()? + { self.by_subject_key_id .entry(ski.to_der()?) .or_default() @@ -264,8 +273,8 @@ where // Check whether the leaf expiry date is within the acceptable range. let not_after = u64::try_from( - leaf.tbs_certificate - .validity + leaf.tbs_certificate() + .validity() .not_after .to_unix_duration() .as_millis(), @@ -335,7 +344,7 @@ where let Some(path) = find_path_to_root(current_cert, roots, validated_intermediates.len())? else { return Err(ValidationError::NoPathToTrustedRoot { - to_verify_issuer: current_cert.tbs_certificate.issuer.to_string(), + to_verify_issuer: current_cert.tbs_certificate().issuer().to_string(), } .into()); }; @@ -417,7 +426,7 @@ fn find_path_to_root( /// Verify that a cert is well-formed according to RFC 5280. fn check_well_formedness(cert: &Certificate) -> Result<(), ValidationError> { // Reject mismatched signature algorithms: https://github.com/google/certificate-transparency-go/pull/702. - if cert.signature_algorithm != cert.tbs_certificate.signature { + if cert.signature_algorithm() != cert.tbs_certificate().signature() { return Err(ValidationError::MismatchingSigAlg); } Ok(()) @@ -431,23 +440,99 @@ fn check_well_formedness(cert: &Certificate) -> Result<(), ValidationError> { /// root CA certificate, using the chain of intermediate CA certificates /// provided by the submitter. /// ``` +/// Verify that `issuer` signed `child` by dispatching on the signature algorithm OID. +/// +/// Supported algorithms: ECDSA P-256, ECDSA P-384, ECDSA P-521, RSA PKCS#1 v1.5. +/// Returns `false` for unsupported algorithms or on verification failure. fn is_link_valid(child: &Certificate, issuer: &Certificate) -> bool { - // Currently paths are built by comparing - // child.tbs_certificate.issuer.to_string() + // Note: chain links are discovered by comparing + // child.tbs_certificate().issuer().to_string() // to - // issuer.tbs_certificate.subject.to_string(). + // issuer.tbs_certificate().subject().to_string(). // When these are equal, there is a plausible link between these. This is NOT the actual // algorithm for determining whether a link is valid. A discussion on the correct algorithm can - // be found here + // be found here: // https://github.com/golang/go/issues/31440#issuecomment-537222858 // The short version is: many clients do byte-by-byte comparison. This to_string() comparison is // strictly laxer than that. Which is probably fine for MTC and (static) CT use cases. - // Verify the issuer's signature on the child cert - if let Ok(key) = VerifyingKey::try_from(issuer) { - key.verify_strict(child).is_ok() - } else { - false + use const_oid::db::rfc5912::{ + ECDSA_WITH_SHA_256, ECDSA_WITH_SHA_384, ECDSA_WITH_SHA_512, SHA_256_WITH_RSA_ENCRYPTION, + SHA_384_WITH_RSA_ENCRYPTION, SHA_512_WITH_RSA_ENCRYPTION, + }; + + let Ok(tbs_der) = child.tbs_certificate().to_der() else { + return false; + }; + let Some(sig_bytes) = child.signature().as_bytes() else { + return false; + }; + let Ok(spki_der) = issuer.tbs_certificate().subject_public_key_info().to_der() else { + return false; + }; + let sig_alg = child.signature_algorithm().oid; + + match sig_alg { + ECDSA_WITH_SHA_256 | ECDSA_WITH_SHA_384 | ECDSA_WITH_SHA_512 => { + let Ok(spki) = spki::SubjectPublicKeyInfoRef::try_from(spki_der.as_ref()) else { + return false; + }; + if let Ok(vk) = p256::ecdsa::VerifyingKey::try_from(spki) { + p256::ecdsa::DerSignature::try_from(sig_bytes) + .map(|sig| vk.verify(&tbs_der, &sig).is_ok()) + .unwrap_or(false) + } else if let Ok(spki) = spki::SubjectPublicKeyInfoRef::try_from(spki_der.as_ref()) { + if let Ok(vk) = p384::ecdsa::VerifyingKey::try_from(spki) { + p384::ecdsa::DerSignature::try_from(sig_bytes) + .map(|sig| vk.verify(&tbs_der, &sig).is_ok()) + .unwrap_or(false) + } else if let Ok(spki) = spki::SubjectPublicKeyInfoRef::try_from(spki_der.as_ref()) + { + if let Ok(vk) = p521::ecdsa::VerifyingKey::try_from(spki) { + p521::ecdsa::DerSignature::try_from(sig_bytes) + .map(|sig| vk.verify(&tbs_der, &sig).is_ok()) + .unwrap_or(false) + } else { + false + } + } else { + false + } + } else { + false + } + } + + SHA_256_WITH_RSA_ENCRYPTION | SHA_384_WITH_RSA_ENCRYPTION | SHA_512_WITH_RSA_ENCRYPTION => { + use rsa::{ + pkcs1v15::VerifyingKey as RsaVerifyingKey, pkcs8::DecodePublicKey, RsaPublicKey, + }; + use sha2::{Sha256, Sha384, Sha512}; + let Ok(rsa_key) = RsaPublicKey::from_public_key_der(&spki_der) else { + return false; + }; + match sig_alg { + SHA_256_WITH_RSA_ENCRYPTION => { + let vk = RsaVerifyingKey::::new(rsa_key); + rsa::pkcs1v15::Signature::try_from(sig_bytes) + .map(|sig| vk.verify(&tbs_der, &sig).is_ok()) + .unwrap_or(false) + } + SHA_384_WITH_RSA_ENCRYPTION => { + let vk = RsaVerifyingKey::::new(rsa_key); + rsa::pkcs1v15::Signature::try_from(sig_bytes) + .map(|sig| vk.verify(&tbs_der, &sig).is_ok()) + .unwrap_or(false) + } + _ => { + let vk = RsaVerifyingKey::::new(rsa_key); + rsa::pkcs1v15::Signature::try_from(sig_bytes) + .map(|sig| vk.verify(&tbs_der, &sig).is_ok()) + .unwrap_or(false) + } + } + } + _ => false, } } @@ -464,8 +549,8 @@ fn check_ca_basic_constraints( ) -> Result<(), ValidationError> { // Check the cert's basic constraints. if ca_cert - .tbs_certificate - .get::() + .tbs_certificate() + .get_extension::() .map_err(ValidationError::from)? .is_none_or(|(_, bc)| { // If the path length constraint is specified, check it. The @@ -504,7 +589,7 @@ mod tests { use super::*; use chrono::prelude::*; use der::DecodePem; - use x509_verify::x509_cert::Certificate; + use x509_cert::Certificate; fn parse_datetime(s: &str) -> UnixTimestamp { u64::try_from(DateTime::parse_from_rfc3339(s).unwrap().timestamp_millis()).unwrap() @@ -681,6 +766,12 @@ mod tests { "../../static_ct_api/tests/subleaf.misordered.chain"; false ); + test_validate_chain!( + valid_p521_chain; + "../../static_ct_api/tests/p521-root-cert.pem"; + "../../static_ct_api/tests/p521-leaf-cert.pem"; + None; None; false; 1; false + ); macro_rules! test_not_after { ($name:ident; $start:expr; $end:expr; $want_err:expr) => { @@ -703,7 +794,7 @@ mod tests { .unwrap(); // Get the subject DN before moving into pool - let subject_dn = root.tbs_certificate.subject.clone(); + let subject_dn = root.tbs_certificate().subject().clone(); // Create a pool with this root let pool = CertPool::new(vec![root]).unwrap(); @@ -714,7 +805,7 @@ mod tests { // Verify it's the same cert (compare subjects) assert_eq!( - found.unwrap().tbs_certificate.subject.to_string(), + found.unwrap().tbs_certificate().subject().to_string(), subject_dn.to_string() ); } @@ -732,7 +823,7 @@ mod tests { // Try to find by a different subject (use the leaf's issuer, which isn't in pool) let leaf = &pool.certs[0]; - let issuer_dn = &leaf.tbs_certificate.issuer; + let issuer_dn = &leaf.tbs_certificate().issuer(); // The issuer is not in the pool, so this should return None let not_found = pool.find_by_subject(issuer_dn);