diff --git a/.commitlintrc.yml b/.commitlintrc.yml new file mode 100644 index 0000000..320a584 --- /dev/null +++ b/.commitlintrc.yml @@ -0,0 +1,21 @@ +extends: + - '@commitlint/config-conventional' + +rules: + # Allow slightly longer subjects; we have descriptive messages. + header-max-length: [2, always, 100] + # Enforce lowercase type (feat, fix, ...) and allow common scopes. + type-enum: + - 2 + - always + - - build + - chore + - ci + - docs + - feat + - fix + - perf + - refactor + - revert + - style + - test \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d6b39d4..89cfb06 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -11,7 +11,9 @@ env: jobs: test: + name: Test (${{ matrix.os }}) strategy: + fail-fast: false matrix: os: [ubuntu-latest, macos-latest, windows-latest] runs-on: ${{ matrix.os }} @@ -19,5 +21,54 @@ jobs: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 - - name: Run tests - run: cargo test --verbose + - run: cargo test --verbose + + features: + name: Feature matrix + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + with: + targets: wasm32-unknown-unknown + - uses: Swatinem/rust-cache@v2 + - name: Build default + run: cargo build --verbose + - name: Build no_std (no default features) + run: cargo build --verbose --no-default-features + - name: Build with hash-idx + run: cargo build --verbose --features hash-idx + - name: Check wasm target + run: cargo check --verbose --target wasm32-unknown-unknown --no-default-features --features wasm + + lint: + name: Lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + with: + components: rustfmt, clippy + - uses: Swatinem/rust-cache@v2 + - run: cargo fmt --all -- --check + - run: cargo clippy --all-targets --all-features -- -D warnings + + docs: + name: Docs + runs-on: ubuntu-latest + env: + RUSTDOCFLAGS: -D warnings + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + - run: cargo doc --no-deps --all-features + + msrv: + name: MSRV (1.85) + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@1.85.0 + - uses: Swatinem/rust-cache@v2 + - run: cargo check --all-features \ No newline at end of file diff --git a/.github/workflows/commitlint.yml b/.github/workflows/commitlint.yml new file mode 100644 index 0000000..57918ab --- /dev/null +++ b/.github/workflows/commitlint.yml @@ -0,0 +1,16 @@ +name: Commitlint + +on: + pull_request: + types: [opened, reopened, edited, synchronize] + +jobs: + commitlint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - uses: wagoid/commitlint-github-action@v6 + with: + configFile: .commitlintrc.yml \ No newline at end of file diff --git a/.github/workflows/release-plz.yml b/.github/workflows/release-plz.yml new file mode 100644 index 0000000..7a6e06c --- /dev/null +++ b/.github/workflows/release-plz.yml @@ -0,0 +1,49 @@ +name: Release-plz + +permissions: + pull-requests: write + contents: write + +on: + push: + branches: [main] + +jobs: + # Opens / updates the "release PR" that bumps versions and edits the CHANGELOG. + release-plz-pr: + name: Release-plz PR + runs-on: ubuntu-latest + if: ${{ github.repository_owner == 'spacesprotocol' }} + concurrency: + group: release-plz-${{ github.ref }} + cancel-in-progress: false + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - uses: dtolnay/rust-toolchain@stable + - name: Run release-plz + uses: release-plz/action@v0.5 + with: + command: release-pr + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} + + # Tags + publishes to crates.io when a release commit lands on main. + release-plz-release: + name: Release-plz publish + runs-on: ubuntu-latest + if: ${{ github.repository_owner == 'spacesprotocol' }} + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - uses: dtolnay/rust-toolchain@stable + - name: Run release-plz + uses: release-plz/action@v0.5 + with: + command: release + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..de22229 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,19 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [0.1.0] - 2026-04-16 + +Initial release on crates.io. + +### Features + +- Merkle-ized binary trie with MVCC concurrency (multi-reader, single-writer). +- Subtree accumulators with inclusion and exclusion proofs. +- `no_std` support (RISC0 zkVM compatible) via `default-features = false`. +- Optional wasm bindings behind the `wasm` feature. +- Optional sqlite-backed hash index sidecar behind the `hash-idx` feature for fast `prove` and `compute_root` on large trees. +- Snapshot iteration and rollback. \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index 798108a..cdeadb9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,11 +2,25 @@ workspace = { members = ["example"] } [package] name = "spacedb" -version = "0.0.12" -edition = "2021" +version = "0.1.0" +edition = "2024" +rust-version = "1.85" description = "A cryptographically verifiable data store and universal accumulator for the Spaces protocol." repository = "https://github.com/spacesprotocol/spacedb" +homepage = "https://spacesprotocol.org" +documentation = "https://docs.rs/spacedb" +readme = "README.md" license = "Apache-2.0" +keywords = ["merkle", "trie", "accumulator", "verifiable", "no-std"] +categories = ["database-implementations", "cryptography", "data-structures"] +exclude = [ + "example/*", + "tests/data/*", + "target/*", + ".github/*", + ".commitlintrc.yml", + "release-plz.toml", +] [lib] crate-type = ["cdylib", "rlib"] @@ -35,3 +49,7 @@ std = ["libc", "hex", "bincode"] wasm = ["wasm-bindgen", "js-sys"] extras = [] hash-idx = ["std", "rusqlite"] + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] diff --git a/README.md b/README.md index e1194c7..f8466b5 100644 --- a/README.md +++ b/README.md @@ -95,6 +95,13 @@ Subtrees work in `no_std` environments utilizing the SHA256 accelerator when run spacedb = { version = "0.1", default-features = false } ``` +The `hash-idx` feature enables an optional sqlite-backed sidecar that accelerates `prove` and `compute_root` on large trees: + +```toml +[dependencies] +spacedb = { version = "0.1", features = ["hash-idx"] } +``` + ## Using Subtrees in wasm diff --git a/example/src/main.rs b/example/src/main.rs index e310884..52190f8 100644 --- a/example/src/main.rs +++ b/example/src/main.rs @@ -1,5 +1,5 @@ -use spacedb::{Result, db::Database }; use spacedb::tx::ProofType; +use spacedb::{db::Database, Result}; fn main() -> Result<()> { let db = Database::memory()?; @@ -29,7 +29,10 @@ fn main() -> Result<()> { let subtree = snapshot.prove(&keys_to_prove, ProofType::Standard)?; // Will have the exact same root as the snapshot - println!("Subtree root: {}", hex::encode(subtree.compute_root().unwrap())); + println!( + "Subtree root: {}", + hex::encode(subtree.compute_root().unwrap()) + ); // Prove inclusion assert!(subtree.contains(&db.hash("key0".as_bytes())).unwrap()); diff --git a/release-plz.toml b/release-plz.toml new file mode 100644 index 0000000..328b872 --- /dev/null +++ b/release-plz.toml @@ -0,0 +1,56 @@ +[workspace] +# Changelog lives at repo root alongside Cargo.toml. +changelog_path = "CHANGELOG.md" +# Always regenerate the changelog from conventional commits. +changelog_update = true +# Open a single release PR per push to main. +pr_draft = false +# Commit messages released by release-plz itself (should not trigger commitlint recursion). +pr_name = "chore(release): {{ version }}" + +[changelog] +header = """# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +""" + +body = """ +## [{{ version | trim_start_matches(pat=\"v\") }}]\ + {%- if release_link -%}\ + ({{ release_link }})\ + {% endif %} - {{ timestamp | date(format=\"%Y-%m-%d\") }} +{% for group, commits in commits | group_by(attribute=\"group\") %} +### {{ group | upper_first }} +{% for commit in commits %} +- {% if commit.scope %}*({{ commit.scope }})* {% endif %}\ + {% if commit.breaking %}[**breaking**] {% endif %}\ + {{ commit.message | upper_first }}\ +{% endfor %} +{% endfor %} +""" + +commit_parsers = [ + { message = "^feat", group = "Features" }, + { message = "^fix", group = "Bug Fixes" }, + { message = "^perf", group = "Performance" }, + { message = "^refactor", group = "Refactor" }, + { message = "^docs", group = "Documentation" }, + { message = "^test", group = "Tests" }, + { message = "^build", group = "Build" }, + { message = "^ci", group = "CI" }, + { message = "^chore\\(release\\)", skip = true }, + { message = "^chore", group = "Chore" }, + { message = "^revert", group = "Revert" }, +] + +[[package]] +name = "spacedb" +# Publish to crates.io on release commits. +publish = true +# Create a GitHub release alongside the tag. +git_release_enable = true +# Use the commit subject as the GitHub release title. +git_tag_name = "v{{ version }}" \ No newline at end of file diff --git a/src/db.rs b/src/db.rs index 5cb7284..af101a7 100644 --- a/src/db.rs +++ b/src/db.rs @@ -1,11 +1,10 @@ use crate::{ - Result, + Configuration, Hash, NodeHasher, Result, Sha256Hasher, fs::{FileBackend, StorageBackend}, node::NodeInner, tx::{ReadTransaction, WriteTransaction}, - Configuration, Hash, NodeHasher, Sha256Hasher, }; -use bincode::{config, error::DecodeError, Decode, Encode}; +use bincode::{Decode, Encode, config, error::DecodeError}; use sha2::{Digest as _, Sha256}; use std::{ fs::OpenOptions, @@ -47,6 +46,12 @@ pub struct Record { pub const EMPTY_RECORD: Record = Record { offset: 0, size: 0 }; +impl Default for DatabaseHeader { + fn default() -> Self { + Self::new() + } +} + impl DatabaseHeader { pub fn new() -> Self { Self { @@ -86,7 +91,7 @@ impl DatabaseHeader { let expected = hasher.finalize(); let actual = &bytes[len..len + 4]; - if &actual[..4] != &expected[..4] { + if actual[..4] != expected[..4] { return Err(DecodeError::Other("Checksum mismatch")); } @@ -94,8 +99,7 @@ impl DatabaseHeader { } pub(crate) fn len(&self) -> u64 { - - let chunks_required = (self.savepoint.len() + CHUNK_SIZE - 1) / CHUNK_SIZE; + let chunks_required = self.savepoint.len().div_ceil(CHUNK_SIZE); std::cmp::max(chunks_required * CHUNK_SIZE, HEADER_SIZE) } } @@ -173,9 +177,8 @@ impl Database { H::hash(data) } - pub(crate) fn recover_header( - file: &Box, - ) -> Result<(DatabaseHeader, bool)> { + #[allow(clippy::borrowed_box)] + pub(crate) fn recover_header(file: &Box) -> Result<(DatabaseHeader, bool)> { // Attempt to read from slot 0 let mut offset = 0; let bytes = file.read(offset, CHUNK_SIZE as usize)?; @@ -186,8 +189,7 @@ impl Database { // Didn't work, try backup offset = CHUNK_SIZE; let bytes = file.read(offset, CHUNK_SIZE as usize)?; - let header = DatabaseHeader::from_bytes(&bytes) - .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; + let header = DatabaseHeader::from_bytes(&bytes).map_err(io::Error::other)?; Ok((header, true)) } @@ -211,8 +213,8 @@ impl Database { fn read_save_point(&self, record: Record) -> Result { let raw = self.file.read(record.offset, record.size as usize)?; - let (save_point, _) = bincode::decode_from_slice(&raw, config::standard()) - .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; + let (save_point, _) = + bincode::decode_from_slice(&raw, config::standard()).map_err(io::Error::other)?; Ok(save_point) } @@ -296,7 +298,7 @@ impl Database { } // Sort by offset descending (most recent first) - index_files.sort_by(|a, b| b.0.cmp(&a.0)); + index_files.sort_by_key(|entry| core::cmp::Reverse(entry.0)); // Delete everything after the first `keep` for (_, path) in index_files.into_iter().skip(keep) { @@ -372,7 +374,9 @@ impl<'db, H: NodeHasher> Iterator for SnapshotIterator<'db, H> { type Item = Result>; fn next(&mut self) -> Option { match self.prev() { - Ok(Some(prev_savepoint)) => Some(Ok(ReadTransaction::new(self.db.clone(), prev_savepoint))), + Ok(Some(prev_savepoint)) => { + Some(Ok(ReadTransaction::new(self.db.clone(), prev_savepoint))) + } Ok(None) => None, Err(e) => Some(Err(e)), } @@ -394,9 +398,7 @@ impl SavePoint { pub fn len(&self) -> u64 { let meta_size = match &self.metadata { None => 0, - Some(m) => { - bincode::encode_to_vec(m, config::standard()).unwrap().len() - } + Some(m) => bincode::encode_to_vec(m, config::standard()).unwrap().len(), } as u64; let root_size = self.root.offset + self.root.size as u64; let save_point_size = self.previous_savepoint.offset + self.previous_savepoint.size as u64; diff --git a/src/encode.rs b/src/encode.rs index a907ec7..27ec034 100644 --- a/src/encode.rs +++ b/src/encode.rs @@ -1,8 +1,8 @@ +use crate::path::{BitLength, Path, PathSegment}; +use crate::subtree::{SubTreeNode, ValueOrHash}; use alloc::boxed::Box; use alloc::vec; use borsh::io::{Error as IoError, ErrorKind, Read, Write}; -use crate::path::{BitLength, Path, PathSegment}; -use crate::subtree::{SubTreeNode, ValueOrHash}; const NODE_LEAF: u8 = 0; const NODE_INTERNAL: u8 = 1; @@ -12,7 +12,10 @@ const LEAF_VALUE: u8 = 0; const LEAF_HASH: u8 = 1; /// Serializes a `SubTreeNode` into a writer. -pub(crate) fn serialize_node(node: &SubTreeNode, writer: &mut W) -> borsh::io::Result<()> { +pub(crate) fn serialize_node( + node: &SubTreeNode, + writer: &mut W, +) -> borsh::io::Result<()> { match node { SubTreeNode::Leaf { key, value_or_hash } => { writer.write_all(&[NODE_LEAF])?; @@ -29,7 +32,11 @@ pub(crate) fn serialize_node(node: &SubTreeNode, writer: &mut W) -> bo } } } - SubTreeNode::Internal { prefix, left, right } => { + SubTreeNode::Internal { + prefix, + left, + right, + } => { writer.write_all(&[NODE_INTERNAL])?; writer.write_all(prefix.as_bytes())?; serialize_node(left, writer)?; @@ -80,14 +87,18 @@ pub(crate) fn deserialize_node(reader: &mut R) -> borsh::io::Result { let mut bit_len = [0u8; 1]; reader.read_exact(&mut bit_len)?; - let byte_count = (bit_len[0] as usize + 7) / 8; + let byte_count = (bit_len[0] as usize).div_ceil(8); let mut seg = [0u8; 33]; seg[0] = bit_len[0]; reader.read_exact(&mut seg[1..byte_count + 1])?; let prefix = PathSegment(seg); let left = Box::new(deserialize_node(reader)?); let right = Box::new(deserialize_node(reader)?); - Ok(SubTreeNode::Internal { prefix, left, right }) + Ok(SubTreeNode::Internal { + prefix, + left, + right, + }) } NODE_HASH => { let mut hash = [0u8; 32]; diff --git a/src/fs.rs b/src/fs.rs index b08f225..c1ec102 100644 --- a/src/fs.rs +++ b/src/fs.rs @@ -1,7 +1,7 @@ // Uses flock on Unix and LockFile on Windows to ensure exclusive access to the database file. // based on https://github.com/cberner/redb/tree/master/src/tree_store/page_store/file_backend use crate::{ - db::{Record, SavePoint, EMPTY_RECORD, CHUNK_SIZE}, + db::{CHUNK_SIZE, EMPTY_RECORD, Record, SavePoint}, node::Node, }; use bincode::config; @@ -14,6 +14,9 @@ use std::{ pub trait StorageBackend: Sync + Send { fn len(&self) -> Result; + fn is_empty(&self) -> Result { + Ok(self.len()? == 0) + } fn set_len(&self, len: u64) -> Result<(), io::Error>; fn read(&self, offset: u64, len: usize) -> Result, io::Error>; fn sync_data(&self) -> Result<(), io::Error>; @@ -23,11 +26,8 @@ pub trait StorageBackend: Sync + Send { #[derive(Debug, Default)] pub struct MemoryBackend(RwLock>); -#[cfg(any(unix))] -use std::os::{ - fd::AsRawFd, - unix::fs::FileExt, -}; +#[cfg(unix)] +use std::os::{fd::AsRawFd, unix::fs::FileExt}; #[cfg(windows)] use std::os::windows::fs::FileExt; @@ -41,7 +41,7 @@ pub struct FileBackend { locked: bool, } -#[cfg(any(unix))] +#[cfg(unix)] impl FileBackend { pub fn new(file: File) -> Result { let fd = file.as_raw_fd(); @@ -54,7 +54,7 @@ impl FileBackend { "Database already open for writing", )) } else { - Err(err.into()) + Err(err) } } else { Ok(Self { file, locked: true }) @@ -62,11 +62,14 @@ impl FileBackend { } pub fn read_only(file: File) -> Self { - Self { file, locked: false } + Self { + file, + locked: false, + } } } -#[cfg(any(unix))] +#[cfg(unix)] impl Drop for FileBackend { fn drop(&mut self) { if self.locked { @@ -75,7 +78,7 @@ impl Drop for FileBackend { } } -#[cfg(any(unix))] +#[cfg(unix)] impl StorageBackend for FileBackend { fn len(&self) -> Result { Ok(self.file.metadata()?.len()) @@ -103,11 +106,17 @@ impl StorageBackend for FileBackend { #[cfg(windows)] impl FileBackend { pub fn new(file: File) -> Result { - Ok(Self { file, locked: false }) + Ok(Self { + file, + locked: false, + }) } pub fn read_only(file: File) -> Self { - Self { file, locked: false } + Self { + file, + locked: false, + } } } @@ -261,6 +270,9 @@ impl StorageBackend for MemoryBackend { } } +// Callers hold `Arc>`; borrowing through the `Box` +// avoids forcing an extra manual deref at each call site. +#[allow(clippy::borrowed_box)] pub struct WriteBuffer<'file, const SIZE: usize> { file: &'file Box, buffer: Box<[u8; SIZE]>, @@ -269,6 +281,7 @@ pub struct WriteBuffer<'file, const SIZE: usize> { } impl<'file, const SIZE: usize> WriteBuffer<'file, SIZE> { + #[allow(clippy::borrowed_box)] pub(crate) fn new(file: &'file Box, file_len: u64) -> Self { Self { file, @@ -308,7 +321,8 @@ impl<'file, const SIZE: usize> WriteBuffer<'file, SIZE> { self.buffer[remaining_len..CHUNK_SIZE as usize].fill(0); self.file.set_len(self.file_len + CHUNK_SIZE)?; - self.file.write(self.file_len, &self.buffer[0..CHUNK_SIZE as usize])?; + self.file + .write(self.file_len, &self.buffer[0..CHUNK_SIZE as usize])?; self.file_len += CHUNK_SIZE; } @@ -318,13 +332,8 @@ impl<'file, const SIZE: usize> WriteBuffer<'file, SIZE> { pub fn write_save_point(&mut self, save_point: &SavePoint) -> Result { let config = config::standard(); - let size = - bincode::encode_into_slice(save_point, &mut self.tail(), config).map_err(|e| { - io::Error::new( - io::ErrorKind::Other, - format!("Failed to encode save point: {}", e), - ) - })?; + let size = bincode::encode_into_slice(save_point, self.tail(), config) + .map_err(|e| io::Error::other(format!("Failed to encode save point: {}", e)))?; let record = Record { offset: self.file_len + self.len as u64, size: size as u32, @@ -350,12 +359,8 @@ impl<'file, const SIZE: usize> WriteBuffer<'file, SIZE> { let size = { let inner = node.inner.as_mut().unwrap(); - bincode::encode_into_slice(inner, &mut self.tail(), config).map_err(|e| { - io::Error::new( - io::ErrorKind::Other, - format!("Failed to encode node: {}", e), - ) - })? + bincode::encode_into_slice(inner, self.tail(), config) + .map_err(|e| io::Error::other(format!("Failed to encode node: {}", e)))? }; let node_id = Record { diff --git a/src/lib.rs b/src/lib.rs index a86a78b..cf69259 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -61,7 +61,7 @@ pub enum VerifyError { #[derive(Debug)] pub enum EncodeError { BufferTooSmall, - InvalidData(&'static str) + InvalidData(&'static str), } impl core::fmt::Display for Error { @@ -70,7 +70,7 @@ impl core::fmt::Display for Error { #[cfg(feature = "std")] Error::IO(err) => write!(f, "IO error: {}", err), Error::Verify(err) => write!(f, "Verification error: {}", err), - Error::Encode(err) => write!(f, "Encode error: {}", err) + Error::Encode(err) => write!(f, "Encode error: {}", err), } } } @@ -98,6 +98,12 @@ impl core::fmt::Display for EncodeError { } } +impl Default for Configuration { + fn default() -> Self { + Self::new() + } +} + impl Configuration { pub fn new() -> Self { Self { @@ -146,8 +152,8 @@ impl NodeHasher for Sha256Hasher { fn hash_leaf(key: &[u8], value_hash: &[u8]) -> Hash { let mut hasher = Sha256::new(); hasher.update([LEAF_TAG]); - hasher.update(&key); - hasher.update(&value_hash); + hasher.update(key); + hasher.update(value_hash); hasher.finalize().as_slice().try_into().unwrap() } diff --git a/src/node.rs b/src/node.rs index 4e187fd..187329f 100644 --- a/src/node.rs +++ b/src/node.rs @@ -1,13 +1,14 @@ use crate::{ - db::{Record, EMPTY_RECORD}, - path::{Path, PathSegment, PathSegmentInner}, Hash, + db::{EMPTY_RECORD, Record}, + path::{Path, PathSegment, PathSegmentInner}, }; use bincode::{ + Decode, Encode, de::Decoder, enc::Encoder, error::{DecodeError, EncodeError}, - impl_borrow_decode, Decode, Encode, + impl_borrow_decode, }; #[derive(Clone, Debug)] @@ -68,7 +69,7 @@ impl Node { #[inline] pub fn mem_size(&self) -> usize { - let base_size = std::mem::size_of_val(&self); + let base_size = std::mem::size_of_val(self); let inner_size = std::mem::size_of_val(&self.inner) + match &self.inner { Some(NodeInner::Leaf { value, .. }) => value.capacity(), @@ -132,7 +133,7 @@ impl Decode for NodeInner { } } -impl<'a> Encode for &'a mut NodeInner { +impl Encode for &mut NodeInner { fn encode(&self, encoder: &mut E) -> Result<(), EncodeError> { match self { NodeInner::Leaf { key, value } => { diff --git a/src/path.rs b/src/path.rs index 40edc13..ae6a932 100644 --- a/src/path.rs +++ b/src/path.rs @@ -188,7 +188,7 @@ impl> PathUtils for T { fn split_point(&self, start: usize, b: S) -> Option { let max_bit_len = core::cmp::min(self.bit_len(), b.bit_len()); let (src_start_byte, src_start_bit, seg_end_byte) = - (start / 8, start % 8, (max_bit_len + 7) / 8); + (start / 8, start % 8, max_bit_len.div_ceil(8)); let mut count = 0; // Aligned on byte boundary @@ -223,9 +223,9 @@ impl> PathUtils for T { let count = core::cmp::min(count as usize, max_bit_len); if count == max_bit_len { - return None; + None } else { - return Some(count); + Some(count) } } } @@ -275,12 +275,12 @@ impl> BitLength for Path { #[inline(always)] fn inner(&self) -> &[u8] { - &self.0.as_ref() + self.0.as_ref() } #[inline(always)] fn as_bytes(&self) -> &[u8] { - &self.0.as_ref() + self.0.as_ref() } } @@ -297,7 +297,7 @@ impl> BitLength for PathSegment { #[inline(always)] fn as_bytes(&self) -> &[u8] { - let byte_len = (self.bit_len() + 7) / 8; + let byte_len = self.bit_len().div_ceil(8); &self.0.as_ref()[..(byte_len + 1)] } } @@ -322,11 +322,10 @@ pub trait BitLength { fn as_bytes(&self) -> &[u8]; } - #[cfg(test)] mod tests { - use core::fmt::Display; use crate::path::{BitLength, Direction, PathSegment, PathUtils}; + use core::fmt::Display; impl> Display for PathSegment { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { @@ -343,10 +342,10 @@ mod tests { #[test] fn test_extend() { - let mut parent = PathSegment([0u8;33]); + let mut parent = PathSegment([0u8; 33]); parent.set_len(5); - let mut child = PathSegment([0u8;33]); + let mut child = PathSegment([0u8; 33]); child.set_len(10); child.0[1] = 0b1111_1010; @@ -356,13 +355,13 @@ mod tests { #[test] fn test_extend_from_byte() { - let mut segment = PathSegment([0u8;33]); + let mut segment = PathSegment([0u8; 33]); segment.set_len(2); let inner = segment.as_mut_inner(); inner[0] = 0b1100_0000; - segment.extend_from_byte(0b1000_1000,3); + segment.extend_from_byte(0b1000_1000, 3); assert_eq!(segment.to_string(), "11100"); segment.extend_from_byte(0b1111_1111, 8); @@ -378,7 +377,10 @@ mod tests { assert_eq!(segment.to_string(), "111001111111100111111110000"); segment.set_len(segment.bit_len() + 2); - assert_eq!(segment.to_string(), "11100111111110011111111000000", - "trailing bits must be cleared"); + assert_eq!( + segment.to_string(), + "11100111111110011111111000000", + "trailing bits must be cleared" + ); } } diff --git a/src/subtree.rs b/src/subtree.rs index 0741016..9d62c9b 100644 --- a/src/subtree.rs +++ b/src/subtree.rs @@ -1,11 +1,14 @@ use crate::{ + Hash, NodeHasher, Result, VerifyError, path::{BitLength, Direction, Path, PathSegment, PathSegmentInner, PathUtils}, - Result, Hash, NodeHasher, VerifyError }; use alloc::{boxed::Box, vec, vec::Vec}; +use borsh::{ + BorshDeserialize, BorshSerialize, + io::{Read, Write}, +}; use core::marker::PhantomData; -use borsh::{BorshDeserialize, BorshSerialize, io::{Read, Write}}; #[derive(Clone, Debug)] pub struct SubTree { @@ -13,7 +16,7 @@ pub struct SubTree { pub _marker: PhantomData, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Default)] pub enum SubTreeNode { Leaf { key: Path, @@ -25,6 +28,7 @@ pub enum SubTreeNode { right: Box, }, Hash(Hash), + #[default] None, } @@ -43,11 +47,15 @@ impl SubTree { } pub fn to_vec(&self) -> Result> { - borsh::to_vec(self).map_err(|_| crate::Error::Encode(crate::EncodeError::InvalidData("serialization failed"))) + borsh::to_vec(self).map_err(|_| { + crate::Error::Encode(crate::EncodeError::InvalidData("serialization failed")) + }) } pub fn from_slice(buf: &[u8]) -> Result { - borsh::from_slice(buf).map_err(|_| crate::Error::Encode(crate::EncodeError::InvalidData("deserialization failed"))) + borsh::from_slice(buf).map_err(|_| { + crate::Error::Encode(crate::EncodeError::InvalidData("deserialization failed")) + }) } pub fn compute_root(&self) -> Result { @@ -64,10 +72,7 @@ impl SubTree { #[inline(always)] pub fn is_empty(&self) -> bool { - match self.root { - SubTreeNode::None => true, - _ => false, - } + matches!(self.root, SubTreeNode::None) } /// Inserts a key-value pair. Returns error if key already exists. @@ -94,7 +99,10 @@ impl SubTree { let mut depth = 0; loop { match node { - SubTreeNode::Leaf { key: node_key, value_or_hash: existing } => { + SubTreeNode::Leaf { + key: node_key, + value_or_hash: existing, + } => { // Same key - replace value if key.0 == node_key.0 { let old = core::mem::replace(existing, value_or_hash); @@ -104,7 +112,7 @@ impl SubTree { // A split point must exist: compress common path into an internal node let point = node_key.split_point(0, key).unwrap(); let prefix = PathSegment::from_path(*node_key, depth, point); - let depth = depth + prefix.bit_len() as usize; + let depth = depth + prefix.bit_len(); let direction = key.direction(depth); let current_node = core::mem::take(node); let new_node = SubTreeNode::Leaf { key, value_or_hash }; @@ -126,7 +134,7 @@ impl SubTree { } => { let point = key.split_point(depth, *prefix); if point.is_none() { - depth = depth + prefix.bit_len() as usize; + depth += prefix.bit_len(); match key.direction(depth) { Direction::Right => node = right, Direction::Left => node = left, @@ -147,7 +155,7 @@ impl SubTree { right: core::mem::take(right), }; - depth = depth + parent_prefix.bit_len(); + depth += parent_prefix.bit_len(); let new_node = SubTreeNode::Leaf { key, value_or_hash }; let (lefty, righty) = match key.direction(depth) { @@ -193,7 +201,7 @@ impl SubTree { if key.split_point(depth, *prefix).is_some() { return Ok(false); } - depth = depth + prefix.bit_len() as usize; + depth += prefix.bit_len(); match key.direction(depth) { Direction::Left => node = left, Direction::Right => node = right, @@ -220,13 +228,17 @@ impl SubTree { fn delete_node(node: SubTreeNode, key: &Path<&Hash>, depth: usize) -> Result { match node { - SubTreeNode::Leaf { key : node_key, .. } => { + SubTreeNode::Leaf { key: node_key, .. } => { if node_key.0 != *key.0 { return Err(VerifyError::KeyNotFound.into()); } Ok(SubTreeNode::None) } - SubTreeNode::Internal { prefix, left, right } => { + SubTreeNode::Internal { + prefix, + left, + right, + } => { let depth = depth + prefix.bit_len(); match key.direction(depth) { Direction::Right => { @@ -235,11 +247,9 @@ impl SubTree { SubTreeNode::None => { // Right subtree was deleted, move left subtree up Ok(Self::lift_node(prefix, *left, Direction::Left)?) - }, - SubTreeNode::Hash(_) => { - return Err(VerifyError::IncompleteProof.into()) - }, - other => { + } + SubTreeNode::Hash(_) => Err(VerifyError::IncompleteProof.into()), + other => { // Right node was updated Ok(SubTreeNode::Internal { prefix, @@ -255,11 +265,9 @@ impl SubTree { SubTreeNode::None => { // left subtree was deleted, move right subtree up Ok(Self::lift_node(prefix, *right, Direction::Right)?) - }, - SubTreeNode::Hash(_) => { - return Err(VerifyError::IncompleteProof.into()) - }, - other => { + } + SubTreeNode::Hash(_) => Err(VerifyError::IncompleteProof.into()), + other => { // left node was updated Ok(SubTreeNode::Internal { prefix, @@ -276,17 +284,23 @@ impl SubTree { } } - fn lift_node(mut parent_prefix: PathSegment, node: SubTreeNode, direction: Direction) -> Result { + fn lift_node( + mut parent_prefix: PathSegment, + node: SubTreeNode, + direction: Direction, + ) -> Result { match node { - SubTreeNode::Leaf { .. } => { - Ok(node.clone()) - } - SubTreeNode::Internal { prefix, left, right } => { + SubTreeNode::Leaf { .. } => Ok(node.clone()), + SubTreeNode::Internal { + prefix, + left, + right, + } => { match direction { Direction::Left => parent_prefix.extend_from_byte(0, 1), - Direction::Right => parent_prefix.extend_from_byte(0b1000_0000, 1) + Direction::Right => parent_prefix.extend_from_byte(0b1000_0000, 1), } - parent_prefix.extend(prefix.clone()); + parent_prefix.extend(prefix); Ok(SubTreeNode::Internal { prefix: parent_prefix, @@ -294,12 +308,8 @@ impl SubTree { right, }) } - SubTreeNode::Hash(_) => { - Err(VerifyError::IncompleteProof.into()) - } - SubTreeNode::None => { - Err(VerifyError::IncompleteProof.into()) - } + SubTreeNode::Hash(_) => Err(VerifyError::IncompleteProof.into()), + SubTreeNode::None => Err(VerifyError::IncompleteProof.into()), } } @@ -321,10 +331,8 @@ impl SubTree { let right_hash = Self::hash_node(right)?; Ok(H::hash_internal(prefix.as_bytes(), &left_hash, &right_hash)) } - SubTreeNode::Hash(hash) => Ok(hash.clone()), - SubTreeNode::None => { - return Err(VerifyError::IncompleteProof.into()) - } + SubTreeNode::Hash(hash) => Ok(*hash), + SubTreeNode::None => Err(VerifyError::IncompleteProof.into()), } } @@ -371,7 +379,7 @@ impl SubTree { return Ok(SubTree::::empty()); } - let mut key_paths: Vec> = keys.iter().map(|k| Path(k)).collect(); + let mut key_paths: Vec> = keys.iter().map(Path).collect(); key_paths.sort_by(|a, b| a.0.cmp(b.0)); let info = Self::prove_node(&self.root, key_paths.as_slice(), 0, proof_type)?; @@ -388,7 +396,10 @@ impl SubTree { proof_type: ProofType, ) -> Result { match node { - SubTreeNode::Leaf { key: node_key, value_or_hash } => { + SubTreeNode::Leaf { + key: node_key, + value_or_hash, + } => { let include_value = keys.iter().any(|k| *k.0 == node_key.0); let new_value_or_hash = match value_or_hash { ValueOrHash::Value(value) => { @@ -399,17 +410,21 @@ impl SubTree { } } // If already a hash, keep it as is - ValueOrHash::Hash(hash) => ValueOrHash::Hash(hash.clone()), + ValueOrHash::Hash(hash) => ValueOrHash::Hash(*hash), }; Ok(ProveNodeInfo { node: SubTreeNode::Leaf { - key: node_key.clone(), + key: *node_key, value_or_hash: new_value_or_hash, }, value_node: include_value, }) } - SubTreeNode::Internal { prefix, left, right } => { + SubTreeNode::Internal { + prefix, + left, + right, + } => { // Exclude keys that don't match this prefix let end = keys.partition_point(|key| key.split_point(depth, *prefix).is_none()); let keys = &keys[..end]; @@ -432,13 +447,13 @@ impl SubTree { // For extended proofs, include sibling leaf structure (with hashed values) if proof_type == ProofType::Extended { - if left_info.is_none() && right_info.as_ref().map_or(false, |r| r.value_node) { + if left_info.is_none() && right_info.as_ref().is_some_and(|r| r.value_node) { left_info = Some(ProveNodeInfo { node: Self::hash_node_extended(left)?, value_node: false, }); } - if right_info.is_none() && left_info.as_ref().map_or(false, |l| l.value_node) { + if right_info.is_none() && left_info.as_ref().is_some_and(|l| l.value_node) { right_info = Some(ProveNodeInfo { node: Self::hash_node_extended(right)?, value_node: false, @@ -467,7 +482,7 @@ impl SubTree { Ok(ProveNodeInfo { node: SubTreeNode::Internal { - prefix: prefix.clone(), + prefix: *prefix, left: Box::new(left_info.unwrap().node), right: Box::new(right_info.unwrap().node), }, @@ -485,23 +500,27 @@ impl SubTree { SubTreeNode::Leaf { key, value_or_hash } => { let hash = match value_or_hash { ValueOrHash::Value(value) => H::hash(value), - ValueOrHash::Hash(h) => h.clone(), + ValueOrHash::Hash(h) => *h, }; Ok(SubTreeNode::Leaf { - key: key.clone(), + key: *key, value_or_hash: ValueOrHash::Hash(hash), }) } - SubTreeNode::Internal { prefix, left, right } => { + SubTreeNode::Internal { + prefix, + left, + right, + } => { let left_hash = Self::hash_node(left)?; let right_hash = Self::hash_node(right)?; Ok(SubTreeNode::Internal { - prefix: prefix.clone(), + prefix: *prefix, left: Box::new(SubTreeNode::Hash(left_hash)), right: Box::new(SubTreeNode::Hash(right_hash)), }) } - SubTreeNode::Hash(h) => Ok(SubTreeNode::Hash(h.clone())), + SubTreeNode::Hash(h) => Ok(SubTreeNode::Hash(*h)), SubTreeNode::None => Err(VerifyError::IncompleteProof.into()), } } @@ -548,15 +567,19 @@ impl SubTree { SubTreeNode::Leaf { key, .. } => { // Check if leaf matches prefix let key_path = Path(&key.0); - for i in depth..prefix.len() { + for (i, &want) in prefix.iter().enumerate().skip(depth) { let key_bit = matches!(key_path.direction(i), Direction::Right); - if key_bit != prefix[i] { + if key_bit != want { return None; } } Self::hash_node(node).ok() } - SubTreeNode::Internal { prefix: seg, left, right } => { + SubTreeNode::Internal { + prefix: seg, + left, + right, + } => { let seg_len = seg.bit_len(); let mut current_depth = depth; @@ -598,13 +621,13 @@ impl SubTree { // First, navigate to the prefix if prefix_idx < prefix.len() { match node { - SubTreeNode::None | SubTreeNode::Hash(_) => return, + SubTreeNode::None | SubTreeNode::Hash(_) => (), SubTreeNode::Leaf { key, .. } => { // Check if leaf matches remaining prefix let key_path = Path(&key.0); - for i in prefix_idx..prefix.len() { + for (i, &want) in prefix.iter().enumerate().skip(prefix_idx) { let key_bit = matches!(key_path.direction(i), Direction::Right); - if key_bit != prefix[i] { + if key_bit != want { return; } } @@ -620,7 +643,11 @@ impl SubTree { result[bucket] = Some(hash); } } - SubTreeNode::Internal { prefix: seg, left, right } => { + SubTreeNode::Internal { + prefix: seg, + left, + right, + } => { let seg_len = seg.bit_len(); let mut current_prefix_idx = prefix_idx; @@ -630,8 +657,13 @@ impl SubTree { // Prefix consumed within segment - collect from here let remaining_seg_bits = seg_len - i; Self::collect_from_segment_point( - node, seg, i, remaining_seg_bits, - bucket_bits, target_bits, result + node, + seg, + i, + remaining_seg_bits, + bucket_bits, + target_bits, + result, ); return; } @@ -646,16 +678,30 @@ impl SubTree { if current_prefix_idx >= prefix.len() { // Prefix exactly consumed - collect buckets from children Self::collect_bucket_hashes(left, bucket_bits << 1, 0, target_bits, result); - Self::collect_bucket_hashes(right, (bucket_bits << 1) | 1, 0, target_bits, result); + Self::collect_bucket_hashes( + right, + (bucket_bits << 1) | 1, + 0, + target_bits, + result, + ); } else if prefix[current_prefix_idx] { Self::collect_bucket_hashes_at_prefix( - right, prefix, current_prefix_idx + 1, - bucket_bits, target_bits, result + right, + prefix, + current_prefix_idx + 1, + bucket_bits, + target_bits, + result, ); } else { Self::collect_bucket_hashes_at_prefix( - left, prefix, current_prefix_idx + 1, - bucket_bits, target_bits, result + left, + prefix, + current_prefix_idx + 1, + bucket_bits, + target_bits, + result, ); } } @@ -700,7 +746,13 @@ impl SubTree { if let SubTreeNode::Internal { left, right, .. } = node { let remaining_bits = target_bits - bits_collected; Self::collect_bucket_hashes(left, current_bucket << 1, 0, remaining_bits, result); - Self::collect_bucket_hashes(right, (current_bucket << 1) | 1, 0, remaining_bits, result); + Self::collect_bucket_hashes( + right, + (current_bucket << 1) | 1, + 0, + remaining_bits, + result, + ); } } } @@ -738,7 +790,11 @@ impl SubTree { result[bucket] = Some(hash); } } - SubTreeNode::Internal { prefix, left, right } => { + SubTreeNode::Internal { + prefix, + left, + right, + } => { let seg_len = prefix.bit_len(); let mut current_prefix = prefix_bits; let mut current_depth = depth; @@ -813,9 +869,9 @@ impl SubTree { SubTreeNode::Leaf { key, .. } => { // Check if leaf's key matches the prefix let key_path = Path(&key.0); - for i in depth..prefix.len() { + for (i, &want) in prefix.iter().enumerate().skip(depth) { let key_bit = matches!(key_path.direction(i), Direction::Right); - if key_bit != prefix[i] { + if key_bit != want { // Leaf doesn't match prefix - hash it let hash = Self::hash_node(node)?; return Ok(SubTreeNode::Hash(hash)); @@ -825,7 +881,11 @@ impl SubTree { Ok(node.clone()) } - SubTreeNode::Internal { prefix: seg, left, right } => { + SubTreeNode::Internal { + prefix: seg, + left, + right, + } => { let seg_len = seg.bit_len(); let mut current_depth = depth; @@ -857,7 +917,7 @@ impl SubTree { let left_hash = Self::hash_node(left)?; let right_node = Self::extract_prefix_node(right, prefix, current_depth + 1)?; Ok(SubTreeNode::Internal { - prefix: seg.clone(), + prefix: *seg, left: Box::new(SubTreeNode::Hash(left_hash)), right: Box::new(right_node), }) @@ -866,7 +926,7 @@ impl SubTree { let right_hash = Self::hash_node(right)?; let left_node = Self::extract_prefix_node(left, prefix, current_depth + 1)?; Ok(SubTreeNode::Internal { - prefix: seg.clone(), + prefix: *seg, left: Box::new(left_node), right: Box::new(SubTreeNode::Hash(right_hash)), }) @@ -919,8 +979,14 @@ impl SubTree { // Two leaves ( - SubTreeNode::Leaf { key: k1, value_or_hash: v1 }, - SubTreeNode::Leaf { key: k2, value_or_hash: v2 }, + SubTreeNode::Leaf { + key: k1, + value_or_hash: v1, + }, + SubTreeNode::Leaf { + key: k2, + value_or_hash: v2, + }, ) => { if k1.0 == k2.0 { // Same key - prefer value over hash @@ -953,8 +1019,16 @@ impl SubTree { // Two internal nodes ( - SubTreeNode::Internal { prefix: p1, left: l1, right: r1 }, - SubTreeNode::Internal { prefix: p2, left: l2, right: r2 }, + SubTreeNode::Internal { + prefix: p1, + left: l1, + right: r1, + }, + SubTreeNode::Internal { + prefix: p2, + left: l2, + right: r2, + }, ) => { // Prefixes must match for valid merge if p1.0 != p2.0 { @@ -996,9 +1070,9 @@ impl SubTree { SubTreeNode::Leaf { key, value_or_hash } => { // Check if leaf matches prefix let key_path = Path(&key.0); - for i in depth..prefix.len() { + for (i, &want) in prefix.iter().enumerate().skip(depth) { let key_bit = matches!(key_path.direction(i), Direction::Right); - if key_bit != prefix[i] { + if key_bit != want { return; // Doesn't match } } @@ -1010,7 +1084,11 @@ impl SubTree { result.push((key.0, value_hash)); } - SubTreeNode::Internal { prefix: seg, left, right } => { + SubTreeNode::Internal { + prefix: seg, + left, + right, + } => { let seg_len = seg.bit_len(); let mut current_depth = depth; @@ -1138,10 +1216,15 @@ impl<'a, H: NodeHasher> DiffSession<'a, H> { let request = self.current_request.take(); match (request, response) { - (Some(DiffRequest::BucketHashes { prefix, bits }), DiffResponse::BucketHashes(remote_hashes)) => { + ( + Some(DiffRequest::BucketHashes { prefix, bits }), + DiffResponse::BucketHashes(remote_hashes), + ) => { let local_hashes = self.local.bucket_hashes_at_prefix(&prefix, bits); - for (i, (local_h, remote_h)) in local_hashes.iter().zip(remote_hashes.iter()).enumerate() { + for (i, (local_h, remote_h)) in + local_hashes.iter().zip(remote_hashes.iter()).enumerate() + { if local_h != remote_h { let new_prefix = extend_prefix(&prefix, i, bits); if new_prefix.len() >= self.target_depth { @@ -1207,12 +1290,6 @@ impl BorshDeserialize for SubTree { } } -impl Default for SubTreeNode { - fn default() -> Self { - SubTreeNode::None - } -} - pub struct SubtreeIter<'a> { stack: Vec<(&'a SubTreeNode, usize)>, } @@ -1222,10 +1299,7 @@ impl<'a> Iterator for SubtreeIter<'a> { fn next(&mut self) -> Option { loop { - let (node, depth) = match self.stack.pop() { - Some(x) => x, - None => return None, - }; + let (node, depth) = self.stack.pop()?; match node { SubTreeNode::Leaf { key, value_or_hash } => { @@ -1305,7 +1379,13 @@ impl<'a> Iterator for SubtreeIterMut<'a> { impl SubTreeNode { pub fn is_value_leaf(&self) -> bool { - matches!(self, SubTreeNode::Leaf { value_or_hash: ValueOrHash::Value(_), ..}) + matches!( + self, + SubTreeNode::Leaf { + value_or_hash: ValueOrHash::Value(_), + .. + } + ) } } diff --git a/src/tx.rs b/src/tx.rs index 74393bf..5c2afae 100644 --- a/src/tx.rs +++ b/src/tx.rs @@ -1,15 +1,19 @@ -use crate::{Result, path::{Direction, Path}, subtree::ValueOrHash, Error}; +use crate::{ + Error, Result, + path::{Direction, Path}, + subtree::ValueOrHash, +}; use bincode::config; use core::marker::PhantomData; use std::{io, sync::MutexGuard}; use crate::{ - db::{Database, Record, SavePoint, EMPTY_RECORD, CHUNK_SIZE}, + Hash, NodeHasher, + db::{CHUNK_SIZE, Database, EMPTY_RECORD, Record, SavePoint}, node::{Node, NodeInner}, path::{BitLength, PathUtils}, subtree::{SubTree, SubTreeNode}, - Hash, NodeHasher, }; use crate::{db::DatabaseHeader, fs::WriteBuffer}; @@ -42,7 +46,9 @@ pub struct HashIndex { #[cfg(feature = "hash-idx")] impl Clone for HashIndex { fn clone(&self) -> Self { - Self { conn: self.conn.clone() } + Self { + conn: self.conn.clone(), + } } } @@ -89,7 +95,9 @@ impl ReadTransaction { }; #[cfg(feature = "hash-idx")] - { let _ = tx.load_hash_index(); } + { + let _ = tx.load_hash_index(); + } tx } @@ -110,7 +118,7 @@ impl ReadTransaction { pub fn metadata(&self) -> &[u8] { match &self.savepoint.metadata { None => &[], - Some(meta) => meta.as_slice() + Some(meta) => meta.as_slice(), } } @@ -121,7 +129,10 @@ impl ReadTransaction { let path = std::path::Path::new(db_path); let stem = path.file_stem()?.to_str()?; let parent = path.parent().unwrap_or(std::path::Path::new(".")); - let idx_path = parent.join(format!("{}.{}.hidx.sqlite", stem, self.savepoint.root.offset)); + let idx_path = parent.join(format!( + "{}.{}.hidx.sqlite", + stem, self.savepoint.root.offset + )); idx_path.to_str().map(|s| s.to_string()) } @@ -135,22 +146,30 @@ impl ReadTransaction { return Ok(()); } - let idx_path = self.hash_index_path() - .ok_or_else(|| io::Error::new(io::ErrorKind::Unsupported, "Cannot build hash index for in-memory database"))?; + let idx_path = self.hash_index_path().ok_or_else(|| { + io::Error::new( + io::ErrorKind::Unsupported, + "Cannot build hash index for in-memory database", + ) + })?; // Compute fingerprint: hash of root node's raw bytes - let root_raw = self.db.file.read(self.savepoint.root.offset, self.savepoint.root.size as usize)?; + let root_raw = self.db.file.read( + self.savepoint.root.offset, + self.savepoint.root.size as usize, + )?; let fingerprint = H::hash(&root_raw); // Skip if a valid index already exists if std::path::Path::new(&idx_path).exists() { if let Ok(existing) = rusqlite::Connection::open_with_flags( &idx_path, - rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY | rusqlite::OpenFlags::SQLITE_OPEN_NO_MUTEX, + rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY + | rusqlite::OpenFlags::SQLITE_OPEN_NO_MUTEX, ) { - let stored: core::result::Result, _> = existing.query_row( - "SELECT fingerprint FROM meta LIMIT 1", [], |row| row.get(0), - ); + let stored: core::result::Result, _> = + existing + .query_row("SELECT fingerprint FROM meta LIMIT 1", [], |row| row.get(0)); if let Ok(stored) = stored { if stored.len() == 32 && stored == fingerprint { return Ok(()); @@ -161,8 +180,7 @@ impl ReadTransaction { // Create the sqlite sidecar let _ = std::fs::remove_file(&idx_path); - let conn = rusqlite::Connection::open(&idx_path) - .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; + let conn = rusqlite::Connection::open(&idx_path).map_err(io::Error::other)?; conn.execute_batch( "PRAGMA journal_mode = OFF; @@ -170,8 +188,9 @@ impl ReadTransaction { PRAGMA cache_size = -65536; PRAGMA page_size = 4096; CREATE TABLE hashes (offset INTEGER PRIMARY KEY, value BLOB NOT NULL) WITHOUT ROWID; - CREATE TABLE meta (fingerprint BLOB NOT NULL);" - ).map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; + CREATE TABLE meta (fingerprint BLOB NOT NULL);", + ) + .map_err(io::Error::other)?; // Walk tree bottom-up, flushing in batches let batch_size = 500_000; @@ -188,7 +207,8 @@ impl ReadTransaction { conn.execute( "INSERT INTO meta (fingerprint) VALUES (?1)", [&fingerprint[..]], - ).map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; + ) + .map_err(io::Error::other)?; // Auto-load the index we just built let _ = self.load_hash_index(); @@ -202,22 +222,18 @@ impl ReadTransaction { } #[cfg(feature = "hash-idx")] - fn flush_index_batch( - buffer: &[(u64, Hash)], - conn: &rusqlite::Connection, - ) -> Result<()> { - conn.execute_batch("BEGIN") - .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; + fn flush_index_batch(buffer: &[(u64, Hash)], conn: &rusqlite::Connection) -> Result<()> { + conn.execute_batch("BEGIN").map_err(io::Error::other)?; { - let mut stmt = conn.prepare_cached("INSERT OR REPLACE INTO hashes (offset, value) VALUES (?1, ?2)") - .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; + let mut stmt = conn + .prepare_cached("INSERT OR REPLACE INTO hashes (offset, value) VALUES (?1, ?2)") + .map_err(io::Error::other)?; for (offset, v) in buffer { stmt.execute(rusqlite::params![offset, &v[..]]) - .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; + .map_err(io::Error::other)?; } } - conn.execute_batch("COMMIT") - .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; + conn.execute_batch("COMMIT").map_err(io::Error::other)?; Ok(()) } @@ -241,7 +257,11 @@ impl ReadTransaction { } Ok(hash) } - NodeInner::Internal { prefix, left, right } => { + NodeInner::Internal { + prefix, + left, + right, + } => { let left_hash = self.build_index_node(left.id, buffer, conn, batch_size)?; let right_hash = self.build_index_node(right.id, buffer, conn, batch_size)?; let hash = H::hash_internal(prefix.as_bytes(), &left_hash, &right_hash); @@ -276,31 +296,35 @@ impl ReadTransaction { let conn = rusqlite::Connection::open_with_flags( &idx_path, rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY | rusqlite::OpenFlags::SQLITE_OPEN_NO_MUTEX, - ).map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; + ) + .map_err(io::Error::other)?; // Validate fingerprint - let root_raw = self.db.file.read(self.savepoint.root.offset, self.savepoint.root.size as usize)?; + let root_raw = self.db.file.read( + self.savepoint.root.offset, + self.savepoint.root.size as usize, + )?; let expected_fingerprint = H::hash(&root_raw); - let stored: Vec = conn.query_row( - "SELECT fingerprint FROM meta LIMIT 1", - [], - |row| row.get(0), - ).map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; + let stored: Vec = conn + .query_row("SELECT fingerprint FROM meta LIMIT 1", [], |row| row.get(0)) + .map_err(io::Error::other)?; if stored.len() != 32 || stored != expected_fingerprint { return Ok(false); } - self.hash_index = Some(HashIndex { conn: Arc::new(Mutex::new(conn)) }); + self.hash_index = Some(HashIndex { + conn: Arc::new(Mutex::new(conn)), + }); Ok(true) } /// Exports the current snapshot to a new database file. pub fn export(&self, path: &str) -> Result<()> { - use std::fs::OpenOptions; - use crate::fs::FileBackend; use crate::db::{DatabaseHeader, HEADER_SIZE}; + use crate::fs::FileBackend; + use std::fs::OpenOptions; let file = OpenOptions::new() .read(true) @@ -367,7 +391,11 @@ impl ReadTransaction { let mut node = Node::from_leaf(key, value); Ok(buffer.write_node(&mut node)?) } - NodeInner::Internal { prefix, left, right } => { + NodeInner::Internal { + prefix, + left, + right, + } => { let new_left = if left.id == EMPTY_RECORD { EMPTY_RECORD } else { @@ -392,7 +420,7 @@ impl ReadTransaction { pub fn get(&mut self, key: &Hash) -> Result>> { if self.is_empty() { - return Ok(None) + return Ok(None); } let mut node = self.cache.node.take().unwrap(); @@ -409,7 +437,7 @@ impl ReadTransaction { let mut n = self.cache.node.take().unwrap(); let h = { let entry = Self::hash_node(&self.db, &mut self.cache, &mut n, &self.hash_index)?; - entry.node.hash_cache.clone().unwrap() + entry.node.hash_cache.unwrap() }; self.cache.node = Some(n); Ok(h) @@ -421,10 +449,18 @@ impl ReadTransaction { } let mut node = self.cache.node.take().unwrap(); - let mut key_paths = keys.iter().map(|k| Path(k)).collect::>(); + let mut key_paths = keys.iter().map(Path).collect::>(); key_paths.sort(); - match Self::prove_nodes(&self.db, &mut self.cache, &mut node, key_paths.as_slice(), 0, proof_type, &self.hash_index) { + match Self::prove_nodes( + &self.db, + &mut self.cache, + &mut node, + key_paths.as_slice(), + 0, + proof_type, + &self.hash_index, + ) { Ok(info) => { self.cache.node = Some(node); Ok(SubTree:: { @@ -471,7 +507,7 @@ impl ReadTransaction { }; Ok(SubTreeNodeInfo { node: SubTreeNode::Leaf { - key: node_key.clone(), + key: *node_key, value_or_hash, }, value_node: include_value, @@ -492,23 +528,49 @@ impl ReadTransaction { let split = keys.partition_point(|key| key.direction(depth) == Direction::Left); let (left_keys, right_keys) = keys.split_at(split); - let mut left_subtree = if left_keys.is_empty() { None } else { - Some(Self::prove_nodes(db, cache, left, left_keys, depth + 1, proof_type, hash_index)?) + let mut left_subtree = if left_keys.is_empty() { + None + } else { + Some(Self::prove_nodes( + db, + cache, + left, + left_keys, + depth + 1, + proof_type, + hash_index, + )?) }; - let mut right_subtree = if right_keys.is_empty() { None } else { - Some(Self::prove_nodes(db, cache, right, right_keys, depth + 1, proof_type, hash_index)?) + let mut right_subtree = if right_keys.is_empty() { + None + } else { + Some(Self::prove_nodes( + db, + cache, + right, + right_keys, + depth + 1, + proof_type, + hash_index, + )?) }; // Include extended hash of the sibling if its subtree isn't already part of the proof - if proof_type == ProofType::Extended && left_subtree.is_none() && - right_subtree.is_some() && right_subtree.as_ref().unwrap().value_node { + if proof_type == ProofType::Extended + && left_subtree.is_none() + && right_subtree.is_some() + && right_subtree.as_ref().unwrap().value_node + { left_subtree = Some(SubTreeNodeInfo { node: Self::hash_node_extended(db, cache, left, hash_index)?, value_node: false, }) } - if proof_type == ProofType::Extended && right_subtree.is_none() && - left_subtree.is_some() && left_subtree.as_ref().unwrap().value_node { + if proof_type == ProofType::Extended + && right_subtree.is_none() + && left_subtree.is_some() + && left_subtree.as_ref().unwrap().value_node + { right_subtree = Some(SubTreeNodeInfo { node: Self::hash_node_extended(db, cache, right, hash_index)?, value_node: false, @@ -518,7 +580,7 @@ impl ReadTransaction { // If extended hashes aren't needed, include basic ones if left_subtree.is_none() { let left_entry = Self::hash_node(db, cache, left, hash_index)?; - let left_hash = left_entry.node.hash_cache.clone().unwrap(); + let left_hash = left_entry.node.hash_cache.unwrap(); left_subtree = Some(SubTreeNodeInfo { node: SubTreeNode::Hash(left_hash), value_node: false, @@ -526,7 +588,7 @@ impl ReadTransaction { } if right_subtree.is_none() { let right_entry = Self::hash_node(db, cache, right, hash_index)?; - let right_hash = right_entry.node.hash_cache.clone().unwrap(); + let right_hash = right_entry.node.hash_cache.unwrap(); right_subtree = Some(SubTreeNodeInfo { node: SubTreeNode::Hash(right_hash), value_node: false, @@ -534,12 +596,12 @@ impl ReadTransaction { } // if left and right subtrees are value leafs, we need to include the sibling of this node - let value_node = left_subtree.as_ref().unwrap().value_node && - right_subtree.as_ref().unwrap().value_node; + let value_node = left_subtree.as_ref().unwrap().value_node + && right_subtree.as_ref().unwrap().value_node; Ok(SubTreeNodeInfo { node: SubTreeNode::Internal { - prefix: prefix.clone(), + prefix: *prefix, left: Box::new(left_subtree.unwrap().node), right: Box::new(right_subtree.unwrap().node), }, @@ -549,7 +611,6 @@ impl ReadTransaction { } } - fn hash_node<'c>( db: &Database, cache: &mut Cache, @@ -563,7 +624,7 @@ impl ReadTransaction { // Check hash index sidecar before loading/recursing #[cfg(feature = "hash-idx")] if node.id != EMPTY_RECORD { - if let Some(ref idx) = hash_index { + if let Some(idx) = hash_index { let conn = idx.conn.lock().expect("hash index lock"); let result: core::result::Result, _> = conn.query_row( "SELECT value FROM hashes WHERE offset = ?1", @@ -621,7 +682,7 @@ impl ReadTransaction { NodeInner::Leaf { key, value } => { let hash = H::hash(value); Ok(SubTreeNode::Leaf { - key: key.clone(), + key: *key, value_or_hash: ValueOrHash::Hash(hash), }) } @@ -630,10 +691,18 @@ impl ReadTransaction { left, right, } => { - let left_hash = Self::hash_node(db, cache, left, hash_index)?.node.hash_cache.as_ref().unwrap().clone(); - let right_hash = Self::hash_node(db, cache, right, hash_index)?.node.hash_cache.as_ref().unwrap().clone(); + let left_hash = *Self::hash_node(db, cache, left, hash_index)? + .node + .hash_cache + .as_ref() + .unwrap(); + let right_hash = *Self::hash_node(db, cache, right, hash_index)? + .node + .hash_cache + .as_ref() + .unwrap(); Ok(SubTreeNode::Internal { - prefix: prefix.clone(), + prefix: *prefix, left: Box::new(SubTreeNode::Hash(left_hash)), right: Box::new(SubTreeNode::Hash(right_hash)), }) @@ -641,10 +710,10 @@ impl ReadTransaction { } } - fn get_node<'c>( + fn get_node( db: &Database, cache: &mut Cache, - node: &'c mut Node, + node: &mut Node, key: Path<&Hash>, depth: usize, ) -> Result>> { @@ -657,7 +726,7 @@ impl ReadTransaction { if node_key.0 == *key.0 { return Ok(Some(value.clone())); } - return Ok(None); + Ok(None) } NodeInner::Internal { prefix, @@ -677,7 +746,6 @@ impl ReadTransaction { } } - impl<'db, H: NodeHasher> WriteTransaction<'db, H> { pub(crate) fn new(db: &'db Database) -> Self { let head = db.header.lock().unwrap(); @@ -697,7 +765,7 @@ impl<'db, H: NodeHasher> WriteTransaction<'db, H> { pub fn metadata(&mut self, metadata: Vec) -> Result<()> { if metadata.len() > 512 { - return Err(io::Error::new(io::ErrorKind::Other, "metadata must not exceed 512 bytes").into()); + return Err(io::Error::other("metadata must not exceed 512 bytes").into()); } self.metadata = Some(metadata); @@ -831,16 +899,12 @@ impl<'db, H: NodeHasher> WriteTransaction<'db, H> { Ok(Node::from_internal(prefix, Box::new(left), Box::new(right))) } - fn delete_node( - &mut self, - node: Node, - key: Path, - depth: usize, - ) -> Result> { + fn delete_node(&mut self, node: Node, key: Path, depth: usize) -> Result> { let inner = self.read_inner(node)?; - return match inner { + match inner { NodeInner::Leaf { - key: node_key, value + key: node_key, + value, } => { if node_key != key { let node = Node::from_leaf(node_key, value); @@ -864,31 +928,35 @@ impl<'db, H: NodeHasher> WriteTransaction<'db, H> { let left_subtree = self.read_inner(*left)?; Ok(Some(self.lift_node(prefix, left_subtree, Direction::Left))) } - Some(right_subtree) => { - Ok(Some( - Node::from_internal(prefix, left, Box::new(right_subtree)) - )) - } + Some(right_subtree) => Ok(Some(Node::from_internal( + prefix, + left, + Box::new(right_subtree), + ))), } } Direction::Left => { let left_subtree = self.delete_node(*left, key, depth + 1)?; - return match left_subtree { + match left_subtree { None => { // Left subtree was deleted, move right subtree up let right_subtree = self.read_inner(*right)?; - Ok(Some(self.lift_node(prefix, right_subtree, Direction::Right))) + Ok(Some(self.lift_node( + prefix, + right_subtree, + Direction::Right, + ))) } - Some(left_subtree) => { - Ok(Some( - Node::from_internal(prefix, Box::new(left_subtree), right) - )) - } - }; + Some(left_subtree) => Ok(Some(Node::from_internal( + prefix, + Box::new(left_subtree), + right, + ))), + } } } } - }; + } } #[inline(always)] @@ -899,21 +967,20 @@ impl<'db, H: NodeHasher> WriteTransaction<'db, H> { direction: Direction, ) -> Node { match node { - NodeInner::Leaf { key: leaf_key, value: leaf_value } => { - Node::from_leaf(leaf_key, leaf_value) - } + NodeInner::Leaf { + key: leaf_key, + value: leaf_value, + } => Node::from_leaf(leaf_key, leaf_value), NodeInner::Internal { - prefix: - child_prefix, + prefix: child_prefix, left: child_left, - right: child_right + right: child_right, } => { - // Since this node is being lifted one level append a single bit // based on its direction match direction { Direction::Left => parent_prefix.extend_from_byte(0, 1), - Direction::Right => parent_prefix.extend_from_byte(0b1000_0000, 1) + Direction::Right => parent_prefix.extend_from_byte(0b1000_0000, 1), } // Extend the parent's prefix with the node prefix being lifted @@ -939,11 +1006,7 @@ impl<'db, H: NodeHasher> WriteTransaction<'db, H> { }) } - fn write_all( - &mut self, - buf: &mut WriteBuffer, - node: &mut Node, - ) -> Result { + fn write_all(&mut self, buf: &mut WriteBuffer, node: &mut Node) -> Result { match &mut node.inner { Some(NodeInner::Leaf { .. }) => { node.id = buf.write_node(node)?; @@ -964,7 +1027,6 @@ impl<'db, H: NodeHasher> WriteTransaction<'db, H> { Ok(node.id) } - pub fn commit(mut self) -> Result<()> { if self.state.is_none() && self.metadata.is_none() { return Ok(()); @@ -988,9 +1050,7 @@ impl<'db, H: NodeHasher> WriteTransaction<'db, H> { let root = match self.state.take() { None => self.header.savepoint.root, - Some(mut state) => { - self.write_all(&mut buf, &mut state)? - } + Some(mut state) => self.write_all(&mut buf, &mut state)?, }; let previous_savepoint = buf.write_save_point(&self.header.savepoint)?; @@ -1027,7 +1087,7 @@ impl KeyIterator { } } -impl<'db, H: NodeHasher> Iterator for KeyIterator { +impl Iterator for KeyIterator { type Item = Result<(Hash, Vec)>; fn next(&mut self) -> Option { @@ -1045,7 +1105,7 @@ impl<'db, H: NodeHasher> Iterator for KeyIterator { self.next() } }, - Err(e) => Some(Err(Error::from(e))), + Err(e) => Some(Err(e)), } } } @@ -1077,7 +1137,11 @@ impl Cache { self.len > self.max_len } - fn load_node<'c, H: NodeHasher>(&mut self, db: &Database, node: &'c mut Node) -> Result> { + fn load_node<'c, H: NodeHasher>( + &mut self, + db: &Database, + node: &'c mut Node, + ) -> Result> { if node.inner.is_some() { return Ok(CacheEntry { node, clean: false }); } @@ -1110,10 +1174,14 @@ mod tests { fn test_extended_proofs() { let db = Database::memory().unwrap(); let tx = db.begin_write().unwrap(); - tx.insert([0b1000_0000u8; 32], vec![1]).unwrap() - .insert([0b1100_0000u8; 32], vec![2]).unwrap() - .insert([0b0000_0000u8; 32], vec![3]).unwrap() - .commit().unwrap(); + tx.insert([0b1000_0000u8; 32], vec![1]) + .unwrap() + .insert([0b1100_0000u8; 32], vec![2]) + .unwrap() + .insert([0b0000_0000u8; 32], vec![3]) + .unwrap() + .commit() + .unwrap(); let mut snapshot = db.begin_read().unwrap(); let standard_subtree = snapshot.prove(&[[0u8; 32]], ProofType::Standard).unwrap(); @@ -1121,9 +1189,12 @@ mod tests { match standard_subtree.root { SubTreeNode::Internal { left, right, .. } => { assert!(left.is_value_leaf(), "expected a value leaf on left"); - assert!(matches!(*right, SubTreeNode::Hash(_)), "expected a hash node on the right"); + assert!( + matches!(*right, SubTreeNode::Hash(_)), + "expected a hash node on the right" + ); } - _ => panic!("invalid result") + _ => panic!("invalid result"), } let extended_subtree = snapshot.prove(&[[0u8; 32]], ProofType::Extended).unwrap(); @@ -1132,14 +1203,24 @@ mod tests { assert!(left.is_value_leaf(), "expected a value leaf on left"); // Extended proof includes the sibling with terminal child hashes if any match *right { - SubTreeNode::Internal { left: left_left, right: left_right, .. } => { - assert!(matches!(*left_left, SubTreeNode::Hash(_)), "expected a hash node"); - assert!(matches!(*left_right, SubTreeNode::Hash(_)), "expected a hash node"); + SubTreeNode::Internal { + left: left_left, + right: left_right, + .. + } => { + assert!( + matches!(*left_left, SubTreeNode::Hash(_)), + "expected a hash node" + ); + assert!( + matches!(*left_right, SubTreeNode::Hash(_)), + "expected a hash node" + ); } - _ => panic!("expected internal node") + _ => panic!("expected internal node"), } } - _ => panic!("invalid result") + _ => panic!("invalid result"), } } } diff --git a/src/wasm.rs b/src/wasm.rs index a3e1da5..8b7c7de 100644 --- a/src/wasm.rs +++ b/src/wasm.rs @@ -1,14 +1,11 @@ #[cfg(feature = "wasm")] mod wasm_api { use alloc::format; - use wasm_bindgen::prelude::{wasm_bindgen, JsValue}; use js_sys::{Array, Uint8Array}; + use wasm_bindgen::prelude::{JsValue, wasm_bindgen}; - use crate::{ - subtree::SubTree as NativeSubTree, - Sha256Hasher, - }; use crate::subtree::ValueOrHash; + use crate::{Sha256Hasher, subtree::SubTree as NativeSubTree}; #[wasm_bindgen] pub struct SubTree { @@ -27,20 +24,22 @@ mod wasm_api { let buf = array.to_vec(); NativeSubTree::from_slice(&buf) .map(|inner| SubTree { inner }) - .map_err(|err| JsValue::from_str(&format!("Deserialization error: {:?}", err))) - } - None => { - Ok(SubTree { - inner: NativeSubTree::empty(), - }) + .map_err(|err| { + JsValue::from_str(&format!("Deserialization error: {:?}", err)) + }) } + None => Ok(SubTree { + inner: NativeSubTree::empty(), + }), } } /// Serializes the SubTree to a Uint8Array. #[wasm_bindgen] pub fn to_bytes(&self) -> Result { - let bytes = self.inner.to_vec() + let bytes = self + .inner + .to_vec() .map_err(|err| JsValue::from_str(&format!("Serialization error: {:?}", err)))?; Ok(Uint8Array::from(&bytes[..])) } diff --git a/tests/integration_test.rs b/tests/integration_test.rs index 9ce9670..24621d7 100644 --- a/tests/integration_test.rs +++ b/tests/integration_test.rs @@ -1,7 +1,11 @@ -use std::collections::HashSet; -use spacedb::{db::Database, subtree::{SubTree, ValueOrHash}, NodeHasher, Sha256Hasher, Hash}; -use spacedb::tx::{ProofType, ReadTransaction}; use rand::{Rng, SeedableRng, rngs::StdRng}; +use spacedb::tx::{ProofType, ReadTransaction}; +use spacedb::{ + Hash, NodeHasher, Sha256Hasher, + db::Database, + subtree::{SubTree, ValueOrHash}, +}; +use std::collections::HashSet; #[test] fn it_proves_non_existence_single_key_opposite_path() { @@ -14,9 +18,12 @@ fn it_proves_non_existence_single_key_opposite_path() { k }; - db.begin_write().unwrap() - .insert(key_with_1, vec![1, 2, 3]).unwrap() - .commit().unwrap(); + db.begin_write() + .unwrap() + .insert(key_with_1, vec![1, 2, 3]) + .unwrap() + .commit() + .unwrap(); // Try to prove a key starting with bit 0 (0b0xxx_xxxx) let key_with_0 = { @@ -35,11 +42,11 @@ fn it_proves_non_existence_single_key_opposite_path() { assert_eq!(subtree.compute_root().unwrap(), tree_root); // contains should return false for the non-existent key (not error) - assert_eq!(subtree.contains(&key_with_0).unwrap(), false); + assert!(!subtree.contains(&key_with_0).unwrap()); // The existing key is still visible in the proof (the leaf node contains its key) // but the value is hashed since we didn't ask for it - assert_eq!(subtree.contains(&key_with_1).unwrap(), true); + assert!(subtree.contains(&key_with_1).unwrap()); } #[test] @@ -57,19 +64,20 @@ fn it_proves_non_existence_when_key_diverges_at_prefix() { write.commit().unwrap(); // Prove a key starting with bit 0 (completely different subtree) - let non_existent = [0u8; 32]; // all zeros + let non_existent = [0u8; 32]; // all zeros let mut snapshot = db.begin_read().unwrap(); - let proof = snapshot.prove(&[non_existent], ProofType::Standard).unwrap(); + let proof = snapshot + .prove(&[non_existent], ProofType::Standard) + .unwrap(); let result = proof.contains(&non_existent); assert!(result.is_ok()); - assert_eq!(result.unwrap(), false); + assert!(!result.unwrap()); } #[test] fn subtree_borsh_serialization_roundtrip() { - let db = Database::memory().unwrap(); let mut write = db.begin_write().unwrap(); @@ -82,14 +90,17 @@ fn subtree_borsh_serialization_roundtrip() { write.commit().unwrap(); // Create a proof for some keys - let keys_to_prove: Vec = (0u8..5).map(|i| { - let mut k = [0u8; 32]; - k[0] = i; - k - }).collect(); + let keys_to_prove: Vec = (0u8..5) + .map(|i| { + let mut k = [0u8; 32]; + k[0] = i; + k + }) + .collect(); let mut snapshot = db.begin_read().unwrap(); - let subtree: SubTree = snapshot.prove(&keys_to_prove, ProofType::Standard).unwrap(); + let subtree: SubTree = + snapshot.prove(&keys_to_prove, ProofType::Standard).unwrap(); let original_root = subtree.compute_root().unwrap(); // Serialize and deserialize @@ -127,21 +138,29 @@ fn subtree_prove_creates_smaller_proof() { // Get a proof for all 10 keys from the main tree let mut snapshot = db.begin_read().unwrap(); - let all_keys: Vec = (0u8..10).map(|i| { - let mut k = [0u8; 32]; - k[0] = i; - k - }).collect(); - let full_subtree: SubTree = snapshot.prove(&all_keys, spacedb::tx::ProofType::Standard).unwrap(); + let all_keys: Vec = (0u8..10) + .map(|i| { + let mut k = [0u8; 32]; + k[0] = i; + k + }) + .collect(); + let full_subtree: SubTree = snapshot + .prove(&all_keys, spacedb::tx::ProofType::Standard) + .unwrap(); let full_root = full_subtree.compute_root().unwrap(); // Now create a smaller proof from the subtree for just 2 keys - let subset_keys: Vec = (0u8..2).map(|i| { - let mut k = [0u8; 32]; - k[0] = i; - k - }).collect(); - let smaller_subtree = full_subtree.prove(&subset_keys, ProofType::Standard).unwrap(); + let subset_keys: Vec = (0u8..2) + .map(|i| { + let mut k = [0u8; 32]; + k[0] = i; + k + }) + .collect(); + let smaller_subtree = full_subtree + .prove(&subset_keys, ProofType::Standard) + .unwrap(); // Root should still match assert_eq!(smaller_subtree.compute_root().unwrap(), full_root); @@ -161,9 +180,12 @@ fn subtree_prove_creates_smaller_proof() { // Serialized size should be smaller (fewer values included) let full_serialized = borsh::to_vec(&full_subtree).unwrap(); let smaller_serialized = borsh::to_vec(&smaller_subtree).unwrap(); - assert!(smaller_serialized.len() < full_serialized.len(), + assert!( + smaller_serialized.len() < full_serialized.len(), "smaller proof should serialize to fewer bytes: {} vs {}", - smaller_serialized.len(), full_serialized.len()); + smaller_serialized.len(), + full_serialized.len() + ); } #[test] @@ -175,7 +197,10 @@ fn subtree_prove_empty_subtree() { let result = empty.prove(&[key], ProofType::Standard).unwrap(); assert!(result.is_empty()); - assert_eq!(result.compute_root().unwrap(), empty.compute_root().unwrap()); + assert_eq!( + result.compute_root().unwrap(), + empty.compute_root().unwrap() + ); } #[test] @@ -192,20 +217,26 @@ fn subtree_prove_nonexistent_keys() { write.commit().unwrap(); let mut snapshot = db.begin_read().unwrap(); - let all_keys: Vec = (0u8..5).map(|i| { - let mut k = [0u8; 32]; - k[0] = i * 2; - k - }).collect(); - let subtree: SubTree = snapshot.prove(&all_keys, spacedb::tx::ProofType::Standard).unwrap(); + let all_keys: Vec = (0u8..5) + .map(|i| { + let mut k = [0u8; 32]; + k[0] = i * 2; + k + }) + .collect(); + let subtree: SubTree = snapshot + .prove(&all_keys, spacedb::tx::ProofType::Standard) + .unwrap(); let original_root = subtree.compute_root().unwrap(); // Prove keys that don't exist (odd numbers) - let nonexistent: Vec = (0u8..3).map(|i| { - let mut k = [0u8; 32]; - k[0] = i * 2 + 1; // 1, 3, 5 - k - }).collect(); + let nonexistent: Vec = (0u8..3) + .map(|i| { + let mut k = [0u8; 32]; + k[0] = i * 2 + 1; // 1, 3, 5 + k + }) + .collect(); let proof = subtree.prove(&nonexistent, ProofType::Standard).unwrap(); // Root should still match @@ -213,7 +244,10 @@ fn subtree_prove_nonexistent_keys() { // Non-existent keys should return false (not error) for key in &nonexistent { - assert!(!proof.contains(key).unwrap(), "nonexistent key should return false"); + assert!( + !proof.contains(key).unwrap(), + "nonexistent key should return false" + ); } } @@ -235,7 +269,9 @@ fn subtree_prove_through_hash_node_fails() { let mut snapshot = db.begin_read().unwrap(); // Only prove key_left - key_right becomes a hash node - let partial_subtree: SubTree = snapshot.prove(&[key_left], spacedb::tx::ProofType::Standard).unwrap(); + let partial_subtree: SubTree = snapshot + .prove(&[key_left], spacedb::tx::ProofType::Standard) + .unwrap(); // Now try to prove key_right from the partial subtree - should fail let result = partial_subtree.prove(&[key_right], ProofType::Standard); @@ -248,16 +284,23 @@ fn subtree_prove_duplicate_keys() { let db = Database::memory().unwrap(); let key = [42u8; 32]; - db.begin_write().unwrap() - .insert(key, vec![1, 2, 3]).unwrap() - .commit().unwrap(); + db.begin_write() + .unwrap() + .insert(key, vec![1, 2, 3]) + .unwrap() + .commit() + .unwrap(); let mut snapshot = db.begin_read().unwrap(); - let subtree: SubTree = snapshot.prove(&[key], spacedb::tx::ProofType::Standard).unwrap(); + let subtree: SubTree = snapshot + .prove(&[key], spacedb::tx::ProofType::Standard) + .unwrap(); let original_root = subtree.compute_root().unwrap(); // Prove with duplicate keys - let proof = subtree.prove(&[key, key, key], ProofType::Standard).unwrap(); + let proof = subtree + .prove(&[key, key, key], ProofType::Standard) + .unwrap(); assert_eq!(proof.compute_root().unwrap(), original_root); assert!(proof.contains(&key).unwrap()); } @@ -269,11 +312,13 @@ fn subtree_prove_order_independence() { let db = Database::memory().unwrap(); let mut write = db.begin_write().unwrap(); - let keys: Vec = (0u8..5).map(|i| { - let mut k = [0u8; 32]; - k[0] = i; - k - }).collect(); + let keys: Vec = (0u8..5) + .map(|i| { + let mut k = [0u8; 32]; + k[0] = i; + k + }) + .collect(); for (i, key) in keys.iter().enumerate() { write = write.insert(*key, vec![i as u8]).unwrap(); @@ -281,13 +326,19 @@ fn subtree_prove_order_independence() { write.commit().unwrap(); let mut snapshot = db.begin_read().unwrap(); - let subtree: SubTree = snapshot.prove(&keys, spacedb::tx::ProofType::Standard).unwrap(); + let subtree: SubTree = snapshot + .prove(&keys, spacedb::tx::ProofType::Standard) + .unwrap(); // Prove in forward order - let proof_forward = subtree.prove(&[keys[0], keys[1], keys[2]], ProofType::Standard).unwrap(); + let proof_forward = subtree + .prove(&[keys[0], keys[1], keys[2]], ProofType::Standard) + .unwrap(); // Prove in reverse order - let proof_reverse = subtree.prove(&[keys[2], keys[1], keys[0]], ProofType::Standard).unwrap(); + let proof_reverse = subtree + .prove(&[keys[2], keys[1], keys[0]], ProofType::Standard) + .unwrap(); // Both should produce same root assert_eq!( @@ -298,7 +349,10 @@ fn subtree_prove_order_independence() { // Both should have same serialized form let ser_forward = borsh::to_vec(&proof_forward).unwrap(); let ser_reverse = borsh::to_vec(&proof_reverse).unwrap(); - assert_eq!(ser_forward, ser_reverse, "order should not affect proof structure"); + assert_eq!( + ser_forward, ser_reverse, + "order should not affect proof structure" + ); } #[test] @@ -308,11 +362,13 @@ fn subtree_prove_chained() { let db = Database::memory().unwrap(); let mut write = db.begin_write().unwrap(); - let keys: Vec = (0u8..10).map(|i| { - let mut k = [0u8; 32]; - k[0] = i; - k - }).collect(); + let keys: Vec = (0u8..10) + .map(|i| { + let mut k = [0u8; 32]; + k[0] = i; + k + }) + .collect(); for (i, key) in keys.iter().enumerate() { write = write.insert(*key, vec![i as u8; 50]).unwrap(); @@ -321,10 +377,14 @@ fn subtree_prove_chained() { let mut snapshot = db.begin_read().unwrap(); let original_root = snapshot.compute_root().unwrap(); - let full_subtree: SubTree = snapshot.prove(&keys, spacedb::tx::ProofType::Standard).unwrap(); + let full_subtree: SubTree = snapshot + .prove(&keys, spacedb::tx::ProofType::Standard) + .unwrap(); // First prove: 10 keys -> 5 keys - let proof1 = full_subtree.prove(&keys[0..5], ProofType::Standard).unwrap(); + let proof1 = full_subtree + .prove(&keys[0..5], ProofType::Standard) + .unwrap(); assert_eq!(proof1.compute_root().unwrap(), original_root); // Second prove: 5 keys -> 2 keys @@ -361,26 +421,38 @@ fn subtree_prove_extended_includes_sibling_leaves() { let mut key_b = [0xFFu8; 32]; key_b[31] = 0b1111_1111; // ends in 1 - db.begin_write().unwrap() - .insert(key_a, vec![0xAA]).unwrap() - .insert(key_b, vec![0xBB]).unwrap() - .commit().unwrap(); + db.begin_write() + .unwrap() + .insert(key_a, vec![0xAA]) + .unwrap() + .insert(key_b, vec![0xBB]) + .unwrap() + .commit() + .unwrap(); let mut snapshot = db.begin_read().unwrap(); - let full_subtree: SubTree = snapshot.prove(&[key_a, key_b], spacedb::tx::ProofType::Standard).unwrap(); + let full_subtree: SubTree = snapshot + .prove(&[key_a, key_b], spacedb::tx::ProofType::Standard) + .unwrap(); let original_root = full_subtree.compute_root().unwrap(); // Standard proof of key_a - key_b becomes Hash node let standard_proof = full_subtree.prove(&[key_a], ProofType::Standard).unwrap(); assert_eq!(standard_proof.compute_root().unwrap(), original_root); - assert!(standard_proof.contains(&key_b).is_err(), "standard proof should have hash node for sibling"); + assert!( + standard_proof.contains(&key_b).is_err(), + "standard proof should have hash node for sibling" + ); // Extended proof of key_a - key_b should be a leaf with hashed value let extended_proof = full_subtree.prove(&[key_a], ProofType::Extended).unwrap(); assert_eq!(extended_proof.compute_root().unwrap(), original_root); // In extended proof, sibling leaf structure is preserved (key visible, value hashed) // So contains should return true (key is there) but value is hashed - assert!(extended_proof.contains(&key_b).unwrap(), "extended proof should preserve sibling leaf key"); + assert!( + extended_proof.contains(&key_b).unwrap(), + "extended proof should preserve sibling leaf key" + ); } #[test] @@ -391,12 +463,17 @@ fn subtree_prove_single_key_tree() { let key = [0x42u8; 32]; let value = vec![1, 2, 3, 4, 5]; - db.begin_write().unwrap() - .insert(key, value.clone()).unwrap() - .commit().unwrap(); + db.begin_write() + .unwrap() + .insert(key, value.clone()) + .unwrap() + .commit() + .unwrap(); let mut snapshot = db.begin_read().unwrap(); - let subtree: SubTree = snapshot.prove(&[key], spacedb::tx::ProofType::Standard).unwrap(); + let subtree: SubTree = snapshot + .prove(&[key], spacedb::tx::ProofType::Standard) + .unwrap(); let original_root = subtree.compute_root().unwrap(); // Prove the same key @@ -430,11 +507,13 @@ fn mixed_existence_proof() { let tree_root = snapshot.compute_root().unwrap(); // Prove a mix of existing (0, 2, 4) and non-existing (1, 3, 5) keys - let keys_to_prove: Vec = (0u8..6).map(|i| { - let mut k = [0u8; 32]; - k[0] = i; - k - }).collect(); + let keys_to_prove: Vec = (0u8..6) + .map(|i| { + let mut k = [0u8; 32]; + k[0] = i; + k + }) + .collect(); let subtree = snapshot.prove(&keys_to_prove, ProofType::Standard).unwrap(); @@ -472,10 +551,14 @@ fn adjacent_keys_differ_by_one_bit() { k }; - db.begin_write().unwrap() - .insert(key_a, vec![0xAA]).unwrap() - .insert(key_b, vec![0xBB]).unwrap() - .commit().unwrap(); + db.begin_write() + .unwrap() + .insert(key_a, vec![0xAA]) + .unwrap() + .insert(key_b, vec![0xBB]) + .unwrap() + .commit() + .unwrap(); let mut snapshot = db.begin_read().unwrap(); let tree_root = snapshot.compute_root().unwrap(); @@ -485,7 +568,10 @@ fn adjacent_keys_differ_by_one_bit() { assert_eq!(subtree.compute_root().unwrap(), tree_root); assert!(subtree.contains(&key_a).unwrap()); // key_b is a sibling hash node - we can't prove it exists without its own proof - assert!(subtree.contains(&key_b).is_err(), "key_b should be incomplete proof"); + assert!( + subtree.contains(&key_b).is_err(), + "key_b should be incomplete proof" + ); // Prove only key_b let mut snapshot = db.begin_read().unwrap(); @@ -493,11 +579,16 @@ fn adjacent_keys_differ_by_one_bit() { assert_eq!(subtree.compute_root().unwrap(), tree_root); assert!(subtree.contains(&key_b).unwrap()); // key_a is a sibling hash node - assert!(subtree.contains(&key_a).is_err(), "key_a should be incomplete proof"); + assert!( + subtree.contains(&key_a).is_err(), + "key_a should be incomplete proof" + ); // Prove both keys together let mut snapshot = db.begin_read().unwrap(); - let subtree = snapshot.prove(&[key_a, key_b], ProofType::Standard).unwrap(); + let subtree = snapshot + .prove(&[key_a, key_b], ProofType::Standard) + .unwrap(); assert_eq!(subtree.compute_root().unwrap(), tree_root); assert!(subtree.contains(&key_a).unwrap()); assert!(subtree.contains(&key_b).unwrap()); @@ -523,7 +614,7 @@ fn it_works_with_empty_trees() { "empty subtree must return zero hash" ); - assert_eq!(subtree.contains(&foo).unwrap(), false) + assert!(!subtree.contains(&foo).unwrap()) } #[test] @@ -533,7 +624,7 @@ fn it_inserts_into_tree() { let key = db.hash(&[]); let value = "some data".as_bytes().to_vec(); - tx.insert(key.clone(), value.clone()).unwrap().commit().unwrap(); + tx.insert(key, value.clone()).unwrap().commit().unwrap(); let mut tree = db.begin_read().unwrap(); @@ -559,10 +650,10 @@ fn it_inserts_many_items_into_tree() { let mut keys = Vec::new(); for i in 0..100 { let key = Sha256Hasher::hash(format!("key{}", i).as_bytes()); - keys.push(key.clone()); + keys.push(key); let value = format!("data{}", i).as_bytes().to_vec(); - tx = tx.insert(key.clone(), value.clone()).unwrap(); + tx = tx.insert(key, value.clone()).unwrap(); subtree.insert(key, ValueOrHash::Value(value)).unwrap(); } @@ -624,13 +715,17 @@ fn it_should_iterate_over_tree() { fn it_returns_none_when_key_not_exists() { let db = Database::memory().unwrap(); let mut snapshot = db.begin_read().unwrap(); - assert_eq!(snapshot.get(&[0u8; 32]).unwrap(), None, "empty tree should return none"); + assert_eq!( + snapshot.get(&[0u8; 32]).unwrap(), + None, + "empty tree should return none" + ); let mut tx = db.begin_write().unwrap(); let key = db.hash(&[]); let value = "some data".as_bytes().to_vec(); - tx = tx.insert(key.clone(), value.clone()).unwrap(); + tx = tx.insert(key, value.clone()).unwrap(); tx.commit().unwrap(); let mut tree = db.begin_read().unwrap(); @@ -731,7 +826,10 @@ fn it_should_delete_elements_from_subtree() { let root_with_entire_sample_size = db.begin_read().unwrap().compute_root().unwrap(); assert_ne!(expected_root_after_deletion, root_with_entire_sample_size); - let key_hashes: Vec = keys_to_delete.iter().map(|k: &u32| u32_to_key(*k)).collect(); + let key_hashes: Vec = keys_to_delete + .iter() + .map(|k: &u32| u32_to_key(*k)) + .collect(); let mut snapshot = db.begin_read().unwrap(); let mut subtree = snapshot.prove(&key_hashes, ProofType::Extended).unwrap(); @@ -765,13 +863,15 @@ fn it_should_store_metadata() { let snapshot = db.begin_read().unwrap(); assert_eq!(snapshot.metadata(), "snapshot 1".as_bytes()); - let snapshots: Vec> = db.iter() - .map(|s| s.unwrap()).collect(); + let snapshots: Vec> = db.iter().map(|s| s.unwrap()).collect(); assert_eq!(snapshots.len(), 2); for (index, snapshot) in snapshots.iter().rev().enumerate() { - assert_eq!(String::from_utf8_lossy(snapshot.metadata()), format!("snapshot {}", index)); + assert_eq!( + String::from_utf8_lossy(snapshot.metadata()), + format!("snapshot {}", index) + ); } } @@ -784,7 +884,10 @@ fn it_should_rollback() -> spacedb::Result<()> { for snapshot_index in 0..snapshots_len { let mut tx = db.begin_write()?; for entry in 0..items_per_snapshot { - tx = tx.insert(u32_to_key((snapshot_index * entry) as u32), entry.to_be_bytes().to_vec())?; + tx = tx.insert( + u32_to_key((snapshot_index * entry) as u32), + entry.to_be_bytes().to_vec(), + )?; } tx.commit()?; } @@ -793,7 +896,11 @@ fn it_should_rollback() -> spacedb::Result<()> { for snapshot in db.iter() { roots.push(snapshot?.compute_root()?) } - assert_eq!(roots.len(), snapshots_len, "expected roots == snapshots len"); + assert_eq!( + roots.len(), + snapshots_len, + "expected roots == snapshots len" + ); // try rolling back latest snapshot let snapshot = db.begin_read()?; @@ -802,16 +909,24 @@ fn it_should_rollback() -> spacedb::Result<()> { // confirm we still have the same snapshot let mut snapshot = db.begin_read()?; - assert_eq!(&snapshot.compute_root()?, roots.first().unwrap(), "bad roots"); + assert_eq!( + &snapshot.compute_root()?, + roots.first().unwrap(), + "bad roots" + ); // rollback the 6th snapshot - db.iter().skip(5).next().unwrap()?.rollback()?; + db.iter().nth(5).unwrap()?.rollback()?; let snapshots_len = snapshots_len - 5; assert_eq!(db.iter().count(), snapshots_len, "snapshot count mismatch"); // db should now point to the snapshot we just rolled back let mut snapshot = db.begin_read()?; - assert_eq!(&snapshot.compute_root()?, roots.iter().skip(5).next().unwrap(), "bad roots"); + assert_eq!( + &snapshot.compute_root()?, + roots.get(5).unwrap(), + "bad roots" + ); Ok(()) } @@ -831,8 +946,12 @@ fn subtree_merge_disjoint_proofs() { let original_root = snapshot.compute_root().unwrap(); // Create two separate proofs for each key - let proof1: SubTree = snapshot.prove(&[key1], spacedb::tx::ProofType::Standard).unwrap(); - let proof2: SubTree = snapshot.prove(&[key2], spacedb::tx::ProofType::Standard).unwrap(); + let proof1: SubTree = snapshot + .prove(&[key1], spacedb::tx::ProofType::Standard) + .unwrap(); + let proof2: SubTree = snapshot + .prove(&[key2], spacedb::tx::ProofType::Standard) + .unwrap(); // Verify each proof individually assert_eq!(proof1.compute_root().unwrap(), original_root); @@ -873,8 +992,12 @@ fn subtree_merge_overlapping_proofs() { let original_root = snapshot.compute_root().unwrap(); // Create overlapping proofs - let proof12: SubTree = snapshot.prove(&[key1, key2], spacedb::tx::ProofType::Standard).unwrap(); - let proof23: SubTree = snapshot.prove(&[key2, key3], spacedb::tx::ProofType::Standard).unwrap(); + let proof12: SubTree = snapshot + .prove(&[key1, key2], spacedb::tx::ProofType::Standard) + .unwrap(); + let proof23: SubTree = snapshot + .prove(&[key2, key3], spacedb::tx::ProofType::Standard) + .unwrap(); // Merge them let merged = proof12.merge(proof23).unwrap(); @@ -889,12 +1012,17 @@ fn subtree_merge_overlapping_proofs() { #[test] fn subtree_merge_with_empty() { let db = Database::memory().unwrap(); - db.begin_write().unwrap() - .insert([1u8; 32], vec![1, 2, 3]).unwrap() - .commit().unwrap(); + db.begin_write() + .unwrap() + .insert([1u8; 32], vec![1, 2, 3]) + .unwrap() + .commit() + .unwrap(); let mut snapshot = db.begin_read().unwrap(); - let subtree: SubTree = snapshot.prove(&[[1u8; 32]], spacedb::tx::ProofType::Standard).unwrap(); + let subtree: SubTree = snapshot + .prove(&[[1u8; 32]], spacedb::tx::ProofType::Standard) + .unwrap(); let original_root = subtree.compute_root().unwrap(); let empty: SubTree = SubTree::empty(); @@ -910,13 +1038,20 @@ fn subtree_merge_with_empty() { #[test] fn subtree_merge_identical_proofs() { let db = Database::memory().unwrap(); - db.begin_write().unwrap() - .insert([1u8; 32], vec![1, 2, 3]).unwrap() - .commit().unwrap(); + db.begin_write() + .unwrap() + .insert([1u8; 32], vec![1, 2, 3]) + .unwrap() + .commit() + .unwrap(); let mut snapshot = db.begin_read().unwrap(); - let proof1: SubTree = snapshot.prove(&[[1u8; 32]], spacedb::tx::ProofType::Standard).unwrap(); - let proof2: SubTree = snapshot.prove(&[[1u8; 32]], spacedb::tx::ProofType::Standard).unwrap(); + let proof1: SubTree = snapshot + .prove(&[[1u8; 32]], spacedb::tx::ProofType::Standard) + .unwrap(); + let proof2: SubTree = snapshot + .prove(&[[1u8; 32]], spacedb::tx::ProofType::Standard) + .unwrap(); let original_root = proof1.compute_root().unwrap(); // Merging identical proofs should work @@ -929,20 +1064,30 @@ fn subtree_merge_identical_proofs() { fn subtree_merge_mismatched_roots_fails() { // Create two different databases with different data let db1 = Database::memory().unwrap(); - db1.begin_write().unwrap() - .insert([1u8; 32], vec![1, 2, 3]).unwrap() - .commit().unwrap(); + db1.begin_write() + .unwrap() + .insert([1u8; 32], vec![1, 2, 3]) + .unwrap() + .commit() + .unwrap(); let db2 = Database::memory().unwrap(); - db2.begin_write().unwrap() - .insert([2u8; 32], vec![4, 5, 6]).unwrap() - .commit().unwrap(); + db2.begin_write() + .unwrap() + .insert([2u8; 32], vec![4, 5, 6]) + .unwrap() + .commit() + .unwrap(); let mut snapshot1 = db1.begin_read().unwrap(); let mut snapshot2 = db2.begin_read().unwrap(); - let proof1: SubTree = snapshot1.prove(&[[1u8; 32]], spacedb::tx::ProofType::Standard).unwrap(); - let proof2: SubTree = snapshot2.prove(&[[2u8; 32]], spacedb::tx::ProofType::Standard).unwrap(); + let proof1: SubTree = snapshot1 + .prove(&[[1u8; 32]], spacedb::tx::ProofType::Standard) + .unwrap(); + let proof2: SubTree = snapshot2 + .prove(&[[2u8; 32]], spacedb::tx::ProofType::Standard) + .unwrap(); // These have different roots, merge should fail assert!(proof1.merge(proof2).is_err()); @@ -967,10 +1112,9 @@ fn subtree_bucket_hashes_basic() { write.commit().unwrap(); let mut snapshot = db.begin_read().unwrap(); - let subtree: SubTree = snapshot.prove( - &[key00, key01, key10], - spacedb::tx::ProofType::Standard - ).unwrap(); + let subtree: SubTree = snapshot + .prove(&[key00, key01, key10], spacedb::tx::ProofType::Standard) + .unwrap(); // Get bucket hashes with 2 bits (4 buckets) let hashes = subtree.bucket_hashes(2); @@ -1000,10 +1144,9 @@ fn subtree_bucket_hashes_single_bit() { write.commit().unwrap(); let mut snapshot = db.begin_read().unwrap(); - let subtree: SubTree = snapshot.prove( - &[key0a, key0b, key1], - spacedb::tx::ProofType::Standard - ).unwrap(); + let subtree: SubTree = snapshot + .prove(&[key0a, key0b, key1], spacedb::tx::ProofType::Standard) + .unwrap(); // Get bucket hashes with 1 bit (2 buckets) let hashes = subtree.bucket_hashes(1); @@ -1035,16 +1178,15 @@ fn subtree_get_prefix_basic() { write.commit().unwrap(); let mut snapshot = db.begin_read().unwrap(); - let subtree: SubTree = snapshot.prove( - &[key00, key01, key10], - spacedb::tx::ProofType::Standard - ).unwrap(); + let subtree: SubTree = snapshot + .prove(&[key00, key01, key10], spacedb::tx::ProofType::Standard) + .unwrap(); // Get subtree for prefix "0" (should contain key00 and key01) let prefix_0 = subtree.get_prefix(&[false]).unwrap(); assert!(prefix_0.contains(&key00).unwrap()); assert!(prefix_0.contains(&key01).unwrap()); - assert!(!prefix_0.contains(&key10).is_ok() || !prefix_0.contains(&key10).unwrap()); + assert!(prefix_0.contains(&key10).is_err() || !prefix_0.contains(&key10).unwrap()); // Get subtree for prefix "1" (should contain key10) let prefix_1 = subtree.get_prefix(&[true]).unwrap(); @@ -1058,15 +1200,17 @@ fn subtree_get_prefix_basic() { #[test] fn subtree_get_prefix_no_match() { let db = Database::memory().unwrap(); - db.begin_write().unwrap() - .insert([0x00u8; 32], vec![1]).unwrap() // starts with 0 - .commit().unwrap(); + db.begin_write() + .unwrap() + .insert([0x00u8; 32], vec![1]) + .unwrap() // starts with 0 + .commit() + .unwrap(); let mut snapshot = db.begin_read().unwrap(); - let subtree: SubTree = snapshot.prove( - &[[0x00u8; 32]], - spacedb::tx::ProofType::Standard - ).unwrap(); + let subtree: SubTree = snapshot + .prove(&[[0x00u8; 32]], spacedb::tx::ProofType::Standard) + .unwrap(); let original_root = subtree.compute_root().unwrap(); // Get prefix "1" - no keys start with 1 @@ -1092,40 +1236,61 @@ fn subtree_bucket_hashes_sync_scenario() { // Bob has an extra key let bob_only = [0x80u8; 32]; - alice_db.begin_write().unwrap() - .insert(shared1, vec![1]).unwrap() - .insert(shared2, vec![2]).unwrap() - .commit().unwrap(); - - bob_db.begin_write().unwrap() - .insert(shared1, vec![1]).unwrap() - .insert(shared2, vec![2]).unwrap() - .insert(bob_only, vec![3]).unwrap() - .commit().unwrap(); + alice_db + .begin_write() + .unwrap() + .insert(shared1, vec![1]) + .unwrap() + .insert(shared2, vec![2]) + .unwrap() + .commit() + .unwrap(); + + bob_db + .begin_write() + .unwrap() + .insert(shared1, vec![1]) + .unwrap() + .insert(shared2, vec![2]) + .unwrap() + .insert(bob_only, vec![3]) + .unwrap() + .commit() + .unwrap(); let mut alice_snapshot = alice_db.begin_read().unwrap(); let mut bob_snapshot = bob_db.begin_read().unwrap(); - let alice_tree: SubTree = alice_snapshot.prove( - &[shared1, shared2], - spacedb::tx::ProofType::Standard - ).unwrap(); + let alice_tree: SubTree = alice_snapshot + .prove(&[shared1, shared2], spacedb::tx::ProofType::Standard) + .unwrap(); - let bob_tree: SubTree = bob_snapshot.prove( - &[shared1, shared2, bob_only], - spacedb::tx::ProofType::Standard - ).unwrap(); + let bob_tree: SubTree = bob_snapshot + .prove( + &[shared1, shared2, bob_only], + spacedb::tx::ProofType::Standard, + ) + .unwrap(); // Compare bucket hashes at 2 bits let alice_hashes = alice_tree.bucket_hashes(2); let bob_hashes = bob_tree.bucket_hashes(2); // Buckets 00 and 01 should match (shared keys) - assert_eq!(alice_hashes[0b00], bob_hashes[0b00], "bucket 00 should match"); - assert_eq!(alice_hashes[0b01], bob_hashes[0b01], "bucket 01 should match"); + assert_eq!( + alice_hashes[0b00], bob_hashes[0b00], + "bucket 00 should match" + ); + assert_eq!( + alice_hashes[0b01], bob_hashes[0b01], + "bucket 01 should match" + ); // Bucket 10 should differ (Bob has extra key) - assert_ne!(alice_hashes[0b10], bob_hashes[0b10], "bucket 10 should differ"); + assert_ne!( + alice_hashes[0b10], bob_hashes[0b10], + "bucket 10 should differ" + ); // Alice can now request bucket 10 from Bob let bob_prefix_10 = bob_tree.get_prefix(&[true, false]).unwrap(); @@ -1134,7 +1299,7 @@ fn subtree_bucket_hashes_sync_scenario() { #[test] fn subtree_sync_100k_keys_80_differ() { - use spacedb::subtree::{ValueOrHash, DiffSession, DiffRequest, DiffResponse}; + use spacedb::subtree::{DiffRequest, DiffResponse, DiffSession, ValueOrHash}; fn make_key(n: u32) -> Hash { Sha256Hasher::hash(&n.to_le_bytes()) @@ -1143,14 +1308,18 @@ fn subtree_sync_100k_keys_80_differ() { // Alice: 100k keys, Bob: 100k + 80 extra + 1 modified let mut alice: SubTree = SubTree::empty(); for i in 0..100_000u32 { - alice.insert(make_key(i), ValueOrHash::Value(vec![(i % 256) as u8])).unwrap(); + alice + .insert(make_key(i), ValueOrHash::Value(vec![(i % 256) as u8])) + .unwrap(); } let mut bob = alice.clone(); for i in 100_000..100_080u32 { - bob.insert(make_key(i), ValueOrHash::Value(vec![0xBB])).unwrap(); + bob.insert(make_key(i), ValueOrHash::Value(vec![0xBB])) + .unwrap(); } - bob.update(make_key(100), ValueOrHash::Value(vec![0xCC])).unwrap(); // modify one + bob.update(make_key(100), ValueOrHash::Value(vec![0xCC])) + .unwrap(); // modify one let bob_root = bob.compute_root().unwrap(); assert_ne!(alice.compute_root().unwrap(), bob_root); @@ -1194,12 +1363,14 @@ fn compare_encoding_sizes() { } write.commit().unwrap(); - let all_keys: Vec = (0u8..50).map(|i| { - let mut k = [0u8; 32]; - k[0] = i; - k[1] = i.wrapping_mul(37); - k - }).collect(); + let all_keys: Vec = (0u8..50) + .map(|i| { + let mut k = [0u8; 32]; + k[0] = i; + k[1] = i.wrapping_mul(37); + k + }) + .collect(); let mut snapshot = db.begin_read().unwrap(); @@ -1208,10 +1379,14 @@ fn compare_encoding_sizes() { // Partial proof (mix of values and hash nodes) let partial_keys: Vec = all_keys[..10].to_vec(); - let partial_proof = full_proof.prove(&partial_keys, spacedb::subtree::ProofType::Standard).unwrap(); + let partial_proof = full_proof + .prove(&partial_keys, spacedb::subtree::ProofType::Standard) + .unwrap(); // Single leaf - let single_proof = full_proof.prove(&all_keys[..1], spacedb::subtree::ProofType::Standard).unwrap(); + let single_proof = full_proof + .prove(&all_keys[..1], spacedb::subtree::ProofType::Standard) + .unwrap(); for (label, subtree) in [ ("full (50 keys)", &full_proof), @@ -1224,7 +1399,10 @@ fn compare_encoding_sizes() { // Verify round-trip let deserialized: SubTree = borsh::from_slice(&bytes).unwrap(); - assert_eq!(deserialized.compute_root().unwrap(), subtree.compute_root().unwrap()); + assert_eq!( + deserialized.compute_root().unwrap(), + subtree.compute_root().unwrap() + ); } } @@ -1337,7 +1515,10 @@ fn read_only_while_writer_holds_db() { let mut writer_snapshot = db.begin_read().unwrap(); let writer_root = writer_snapshot.compute_root().unwrap(); - assert_eq!(reader_root, writer_root, "reader should see the same root as writer"); + assert_eq!( + reader_root, writer_root, + "reader should see the same root as writer" + ); for i in 0u8..10 { let mut k = [0u8; 32]; @@ -1374,9 +1555,12 @@ fn reset_to_empty() { } write.commit().unwrap(); - db.begin_write().unwrap() - .insert([0xFFu8; 32], vec![0xFF]).unwrap() - .commit().unwrap(); + db.begin_write() + .unwrap() + .insert([0xFFu8; 32], vec![0xFF]) + .unwrap() + .commit() + .unwrap(); assert_eq!(db.iter().count(), 2, "should have 2 snapshots"); assert_ne!(db.begin_read().unwrap().compute_root().unwrap(), empty_root); @@ -1391,9 +1575,12 @@ fn reset_to_empty() { assert_eq!(db.iter().count(), 0, "should have 0 snapshots after reset"); // Should be able to write again after reset - db.begin_write().unwrap() - .insert([0x42u8; 32], vec![1, 2, 3]).unwrap() - .commit().unwrap(); + db.begin_write() + .unwrap() + .insert([0x42u8; 32], vec![1, 2, 3]) + .unwrap() + .commit() + .unwrap(); let mut snapshot = db.begin_read().unwrap(); assert_eq!(snapshot.get(&[0x42u8; 32]).unwrap(), Some(vec![1, 2, 3])); @@ -1434,12 +1621,18 @@ fn hash_index_prove_matches_without_index() { assert!(snapshot.load_hash_index().unwrap(), "should load index"); let root_with = snapshot.compute_root().unwrap(); - assert_eq!(root_with, root_without, "root must match with and without index"); + assert_eq!( + root_with, root_without, + "root must match with and without index" + ); let proof_with = snapshot.prove(&prove_keys, ProofType::Standard).unwrap(); let proof_root_with = proof_with.compute_root().unwrap(); assert_eq!(proof_root_with, proof_root_without, "proof root must match"); - assert_eq!(proof_root_with, root_with, "proof root must equal tree root"); + assert_eq!( + proof_root_with, root_with, + "proof root must equal tree root" + ); // Cleanup let _ = std::fs::remove_file(&dir); @@ -1496,43 +1689,61 @@ fn hash_index_rollback_deletes_stale_index() { let db = Database::open(db_path).unwrap(); // Snapshot 1 - db.begin_write().unwrap() - .insert(Sha256Hasher::hash(b"a"), vec![1]).unwrap() - .commit().unwrap(); + db.begin_write() + .unwrap() + .insert(Sha256Hasher::hash(b"a"), vec![1]) + .unwrap() + .commit() + .unwrap(); let snapshot1 = db.begin_read().unwrap(); // Snapshot 2 - db.begin_write().unwrap() - .insert(Sha256Hasher::hash(b"b"), vec![2]).unwrap() - .commit().unwrap(); + db.begin_write() + .unwrap() + .insert(Sha256Hasher::hash(b"b"), vec![2]) + .unwrap() + .commit() + .unwrap(); let mut snapshot2 = db.begin_read().unwrap(); // Build index for snapshot 2 snapshot2.build_hash_index().unwrap(); // Verify index file exists - let stem = std::path::Path::new(db_path).file_stem().unwrap().to_str().unwrap(); + let stem = std::path::Path::new(db_path) + .file_stem() + .unwrap() + .to_str() + .unwrap(); let parent = std::path::Path::new(db_path).parent().unwrap(); - let index_exists_before = std::fs::read_dir(parent).unwrap() + let index_exists_before = std::fs::read_dir(parent) + .unwrap() .filter_map(|e| e.ok()) .any(|e| { let name = e.file_name().to_str().unwrap_or("").to_string(); name.starts_with(&format!("{}.", stem)) && name.ends_with(".hidx.sqlite") }); - assert!(index_exists_before, "index file should exist before rollback"); + assert!( + index_exists_before, + "index file should exist before rollback" + ); // Rollback to snapshot 1 snapshot1.rollback().unwrap(); // Index for snapshot 2 should be deleted - let index_exists_after = std::fs::read_dir(parent).unwrap() + let index_exists_after = std::fs::read_dir(parent) + .unwrap() .filter_map(|e| e.ok()) .any(|e| { let name = e.file_name().to_str().unwrap_or("").to_string(); name.starts_with(&format!("{}.", stem)) && name.ends_with(".hidx.sqlite") }); - assert!(!index_exists_after, "index file should be deleted after rollback"); + assert!( + !index_exists_after, + "index file should be deleted after rollback" + ); let _ = std::fs::remove_file(&dir); } @@ -1547,23 +1758,34 @@ fn hash_index_reset_deletes_all_indexes() { let db = Database::open(db_path).unwrap(); // Create two snapshots with indexes - db.begin_write().unwrap() - .insert(Sha256Hasher::hash(b"a"), vec![1]).unwrap() - .commit().unwrap(); + db.begin_write() + .unwrap() + .insert(Sha256Hasher::hash(b"a"), vec![1]) + .unwrap() + .commit() + .unwrap(); db.begin_read().unwrap().build_hash_index().unwrap(); - db.begin_write().unwrap() - .insert(Sha256Hasher::hash(b"b"), vec![2]).unwrap() - .commit().unwrap(); + db.begin_write() + .unwrap() + .insert(Sha256Hasher::hash(b"b"), vec![2]) + .unwrap() + .commit() + .unwrap(); db.begin_read().unwrap().build_hash_index().unwrap(); // Reset db.reset().unwrap(); // All index files should be gone - let stem = std::path::Path::new(db_path).file_stem().unwrap().to_str().unwrap(); + let stem = std::path::Path::new(db_path) + .file_stem() + .unwrap() + .to_str() + .unwrap(); let parent = std::path::Path::new(db_path).parent().unwrap(); - let any_index = std::fs::read_dir(parent).unwrap() + let any_index = std::fs::read_dir(parent) + .unwrap() .filter_map(|e| e.ok()) .any(|e| { let name = e.file_name().to_str().unwrap_or("").to_string(); @@ -1582,11 +1804,16 @@ fn hash_index_prune_keeps_n_most_recent() { let db_path = dir.to_str().unwrap(); let db = Database::open(db_path).unwrap(); - let stem = std::path::Path::new(db_path).file_stem().unwrap().to_str().unwrap(); + let stem = std::path::Path::new(db_path) + .file_stem() + .unwrap() + .to_str() + .unwrap(); let parent = std::path::Path::new(db_path).parent().unwrap(); let count_indexes = || -> usize { - std::fs::read_dir(parent).unwrap() + std::fs::read_dir(parent) + .unwrap() .filter_map(|e| e.ok()) .filter(|e| { let name = e.file_name().to_str().unwrap_or("").to_string(); @@ -1599,9 +1826,12 @@ fn hash_index_prune_keeps_n_most_recent() { for i in 0u8..4 { let mut key = [0u8; 32]; key[0] = i; - db.begin_write().unwrap() - .insert(key, vec![i]).unwrap() - .commit().unwrap(); + db.begin_write() + .unwrap() + .insert(key, vec![i]) + .unwrap() + .commit() + .unwrap(); db.begin_read().unwrap().build_hash_index().unwrap(); } assert_eq!(count_indexes(), 4, "should have 4 index files"); @@ -1612,7 +1842,11 @@ fn hash_index_prune_keeps_n_most_recent() { // Prune to keep 0 db.prune_hash_indexes(0); - assert_eq!(count_indexes(), 0, "should have 0 index files after prune(0)"); + assert_eq!( + count_indexes(), + 0, + "should have 0 index files after prune(0)" + ); let _ = std::fs::remove_file(&dir); } @@ -1627,15 +1861,21 @@ fn hash_index_fingerprint_mismatch_after_rollback_and_new_writes() { let db = Database::open(db_path).unwrap(); // Snapshot 1 - db.begin_write().unwrap() - .insert(Sha256Hasher::hash(b"x"), vec![1]).unwrap() - .commit().unwrap(); + db.begin_write() + .unwrap() + .insert(Sha256Hasher::hash(b"x"), vec![1]) + .unwrap() + .commit() + .unwrap(); let snap1 = db.begin_read().unwrap(); // Snapshot 2 - db.begin_write().unwrap() - .insert(Sha256Hasher::hash(b"y"), vec![2]).unwrap() - .commit().unwrap(); + db.begin_write() + .unwrap() + .insert(Sha256Hasher::hash(b"y"), vec![2]) + .unwrap() + .commit() + .unwrap(); // Build index for snapshot 2 db.begin_read().unwrap().build_hash_index().unwrap(); @@ -1644,13 +1884,19 @@ fn hash_index_fingerprint_mismatch_after_rollback_and_new_writes() { snap1.rollback().unwrap(); // New writes — new snapshot may reuse the old root offset - db.begin_write().unwrap() - .insert(Sha256Hasher::hash(b"z"), vec![3]).unwrap() - .commit().unwrap(); + db.begin_write() + .unwrap() + .insert(Sha256Hasher::hash(b"z"), vec![3]) + .unwrap() + .commit() + .unwrap(); // The old index was already cleaned up by rollback, so load should return false let mut snapshot = db.begin_read().unwrap(); - assert!(!snapshot.load_hash_index().unwrap(), "should not load stale index"); + assert!( + !snapshot.load_hash_index().unwrap(), + "should not load stale index" + ); let _ = std::fs::remove_file(&dir); Database::::cleanup_hash_indexes(&Some(db_path.to_string()), 0); @@ -1660,12 +1906,18 @@ fn hash_index_fingerprint_mismatch_after_rollback_and_new_writes() { #[test] fn hash_index_memory_db_returns_error() { let db = Database::memory().unwrap(); - db.begin_write().unwrap() - .insert([1u8; 32], vec![1]).unwrap() - .commit().unwrap(); + db.begin_write() + .unwrap() + .insert([1u8; 32], vec![1]) + .unwrap() + .commit() + .unwrap(); let mut snapshot = db.begin_read().unwrap(); - assert!(snapshot.build_hash_index().is_err(), "should error for in-memory db"); + assert!( + snapshot.build_hash_index().is_err(), + "should error for in-memory db" + ); } #[cfg(feature = "hash-idx")] @@ -1721,9 +1973,14 @@ fn hash_index_auto_build_on_commit() { let mut snapshot = db.begin_read().unwrap(); // Verify the index file exists - let stem = std::path::Path::new(db_path).file_stem().unwrap().to_str().unwrap(); + let stem = std::path::Path::new(db_path) + .file_stem() + .unwrap() + .to_str() + .unwrap(); let parent = std::path::Path::new(db_path).parent().unwrap(); - let has_index = std::fs::read_dir(parent).unwrap() + let has_index = std::fs::read_dir(parent) + .unwrap() .filter_map(|e| e.ok()) .any(|e| { let name = e.file_name().to_str().unwrap_or("").to_string(); @@ -1732,7 +1989,9 @@ fn hash_index_auto_build_on_commit() { assert!(has_index, "auto-built index file should exist after commit"); // Verify prove works and roots match - let keys: Vec<_> = (0u32..3).map(|i| Sha256Hasher::hash(&i.to_le_bytes())).collect(); + let keys: Vec<_> = (0u32..3) + .map(|i| Sha256Hasher::hash(&i.to_le_bytes())) + .collect(); let root = snapshot.compute_root().unwrap(); let proof = snapshot.prove(&keys, ProofType::Standard).unwrap(); assert_eq!(proof.compute_root().unwrap(), root);