diff --git a/docs/release-notes/release-notes-0.8.0.md b/docs/release-notes/release-notes-0.8.0.md index 65e5e64b6..f5f2baa36 100644 --- a/docs/release-notes/release-notes-0.8.0.md +++ b/docs/release-notes/release-notes-0.8.0.md @@ -300,6 +300,11 @@ ## Performance Improvements +- [PR#2045](https://github.com/lightninglabs/taproot-assets/pull/2045) + Added a raw proof append path that appends pre-encoded proofs directly to an + encoded proof file blob in space, plus benchmark coverage for append + performance. + ## Deprecations # Technical and Architectural Updates diff --git a/proof/append.go b/proof/append.go index 16d0b28cc..84c02e0a7 100644 --- a/proof/append.go +++ b/proof/append.go @@ -84,22 +84,13 @@ type TransitionParams struct { func AppendTransition(blob Blob, params *TransitionParams, vCtx VerifierCtx, opts ...GenOption) (Blob, *Proof, error) { - // Decode the proof blob into a proper file structure first. - f := NewEmptyFile(V0) - if err := f.Decode(bytes.NewReader(blob)); err != nil { - return nil, nil, fmt.Errorf("error decoding proof file: %w", - err) - } - - // Cannot add a transition to an empty proof file. - if f.IsEmpty() { - return nil, nil, fmt.Errorf("invalid empty proof file") - } - - lastProof, err := f.LastProof() + // Read only the last proof from the blob to obtain the previous + // outpoint and append metadata, while avoiding allocation of the full + // proof chain. + lastProof, _, appendHint, err := lastProofFromBlobWithHint(blob) if err != nil { - return nil, nil, fmt.Errorf("error fetching last proof: %w", - err) + return nil, nil, fmt.Errorf("error reading last proof from "+ + "blob: %w", err) } lastPrevOut := wire.OutPoint{ @@ -114,9 +105,20 @@ func AppendTransition(blob Blob, params *TransitionParams, vCtx VerifierCtx, "proof: %w", err) } - // Before we encode and return the proof, we want to validate it. For - // that we need to start at the beginning. + // Before we return the proof we want to validate the full chain. For + // that we still need to decode the entire file into memory. ctx := context.Background() + + f := NewEmptyFile(V0) + if err := f.Decode(bytes.NewReader(blob)); err != nil { + return nil, nil, fmt.Errorf("error decoding proof file: %w", + err) + } + + if f.IsEmpty() { + return nil, nil, fmt.Errorf("invalid empty proof file") + } + if err := f.AppendProof(*newProof); err != nil { return nil, nil, fmt.Errorf("error appending proof: %w", err) } @@ -126,14 +128,23 @@ func AppendTransition(blob Blob, params *TransitionParams, vCtx VerifierCtx, return nil, nil, fmt.Errorf("error verifying proof: %w", err) } - // Encode the full file again, with the new proof appended. - var buf bytes.Buffer - if err := f.Encode(&buf); err != nil { - return nil, nil, fmt.Errorf("error encoding proof file: %w", + // Encode the new proof bytes so we can use the streaming append to + // avoid re-encoding the entire file. + newProofBytes, err := newProof.Bytes() + if err != nil { + return nil, nil, fmt.Errorf("error encoding new proof: %w", + err) + } + + result, err := appendRawProofToBlobWithHint( + blob, newProofBytes, appendHint, + ) + if err != nil { + return nil, nil, fmt.Errorf("error appending proof to blob: %w", err) } - return buf.Bytes(), newProof, nil + return result, newProof, nil } // UpdateTransitionProof computes a new transaction merkle proof from the given diff --git a/proof/file.go b/proof/file.go index 0c222de89..6340a35bf 100644 --- a/proof/file.go +++ b/proof/file.go @@ -72,6 +72,15 @@ type hashedProof struct { hash [sha256.Size]byte } +// blobAppendHint contains enough metadata to append a raw proof to an encoded +// proof file blob without re-scanning existing proof entries. +type blobAppendHint struct { + count uint64 + countOffset int64 + oldCountSize int64 + lastHash [sha256.Size]byte +} + // File represents a proof file comprised of proofs for all of an asset's state // transitions back to its genesis state. type File struct { @@ -477,6 +486,291 @@ func hashProof(proofBytes []byte, prevHash [32]byte) [32]byte { return *(*[32]byte)(h.Sum(nil)) } +// LastProofFromBlob decodes only the last proof entry from an encoded proof +// file blob without loading all proofs into memory. It returns the decoded +// last proof and its raw bytes. +func LastProofFromBlob(blob Blob) (*Proof, []byte, error) { + p, rawProof, _, err := lastProofFromBlobWithHint(blob) + return p, rawProof, err +} + +// lastProofFromBlobWithHint decodes only the last proof entry from an encoded +// proof file blob and also returns append metadata derived from the same scan. +func lastProofFromBlobWithHint(blob Blob) (*Proof, []byte, *blobAppendHint, + error) { + + const fixedHeaderSize = PrefixMagicBytesLength + 4 + + if len(blob) < fixedHeaderSize { + return nil, nil, nil, fmt.Errorf( + "blob too short to be a valid proof file", + ) + } + + if !IsProofFile(blob) { + return nil, nil, nil, fmt.Errorf("blob is not a valid proof file") + } + + r := bytes.NewReader(blob) + + if _, err := r.Seek(fixedHeaderSize, io.SeekStart); err != nil { + return nil, nil, nil, fmt.Errorf( + "seeking past fixed header: %w", err, + ) + } + + var tlvBuf [8]byte + count, err := tlv.ReadVarInt(r, &tlvBuf) + if err != nil { + return nil, nil, nil, fmt.Errorf("reading proof count: %w", err) + } + + if count == 0 { + return nil, nil, nil, ErrEmptyProofFile + } + + if count > FileMaxNumProofs { + return nil, nil, nil, fmt.Errorf("%w: too many proofs in file", + ErrProofFileInvalid) + } + + // Skip all proof bytes except the last one, while keeping track of the + // previous stored hash so we can verify the last proof hash. + var ( + lastProofBytes []byte + prevHash [sha256.Size]byte + lastHash [sha256.Size]byte + ) + for i := uint64(0); i < count; i++ { + numProofBytes, err := tlv.ReadVarInt(r, &tlvBuf) + if err != nil { + return nil, nil, nil, fmt.Errorf( + "reading proof length (idx=%d): %w", i, err, + ) + } + + if numProofBytes > FileMaxProofSizeBytes { + return nil, nil, nil, fmt.Errorf( + "%w: proof in file too large", + ErrProofFileInvalid, + ) + } + + if i < count-1 { + // Skip proof bytes and read the stored chained hash for all + // but the last proof. + if _, err := r.Seek( + int64(numProofBytes), io.SeekCurrent, + ); err != nil { + return nil, nil, nil, fmt.Errorf( + "skipping proof bytes (idx=%d): %w", i, err, + ) + } + + if _, err := io.ReadFull( + r, prevHash[:], + ); err != nil { + return nil, nil, nil, fmt.Errorf( + "reading hash (idx=%d): %w", i, err, + ) + } + } else { + proofBytes := make([]byte, numProofBytes) + if _, err := io.ReadFull(r, proofBytes); err != nil { + return nil, nil, nil, fmt.Errorf( + "reading proof bytes (idx=%d): %w", i, err, + ) + } + + // For the last proof, read and verify the stored hash. + var storedHash [sha256.Size]byte + if _, err := io.ReadFull( + r, storedHash[:], + ); err != nil { + return nil, nil, nil, fmt.Errorf( + "reading last proof hash: %w", err, + ) + } + computedHash := hashProof(proofBytes, prevHash) + if storedHash != computedHash { + return nil, nil, nil, ErrInvalidChecksum + } + + lastProofBytes = proofBytes + lastHash = storedHash + } + } + + p := &Proof{} + if err := p.Decode(bytes.NewReader(lastProofBytes)); err != nil { + return nil, nil, nil, fmt.Errorf("decoding last proof: %w", err) + } + + hint := &blobAppendHint{ + count: count, + countOffset: fixedHeaderSize, + oldCountSize: int64(tlv.VarIntSize(count)), + lastHash: lastHash, + } + + return p, lastProofBytes, hint, nil +} + +// blobAppendHintFromBlob scans an encoded proof blob and returns metadata +// needed for appending a raw proof. For non-empty files this verifies chained +// hashes while reading. +func blobAppendHintFromBlob(blob Blob) (*blobAppendHint, error) { + const fixedHeaderSize = PrefixMagicBytesLength + 4 + + if len(blob) < fixedHeaderSize { + return nil, fmt.Errorf("blob too short to be a valid proof file") + } + + if !IsProofFile(blob) { + return nil, fmt.Errorf("blob is not a valid proof file") + } + + r := bytes.NewReader(blob) + if _, err := r.Seek(fixedHeaderSize, io.SeekStart); err != nil { + return nil, fmt.Errorf("seeking past fixed header: %w", err) + } + + var tlvBuf [8]byte + count, err := tlv.ReadVarInt(r, &tlvBuf) + if err != nil { + return nil, fmt.Errorf("reading proof count: %w", err) + } + + if count > FileMaxNumProofs { + return nil, fmt.Errorf("%w: too many proofs in file", + ErrProofFileInvalid) + } + + var ( + prevHash [sha256.Size]byte + lastHash [sha256.Size]byte + ) + + for i := uint64(0); i < count; i++ { + numProofBytes, err := tlv.ReadVarInt(r, &tlvBuf) + if err != nil { + return nil, fmt.Errorf("reading proof length "+ + "(idx=%d): %w", i, err) + } + + if numProofBytes > FileMaxProofSizeBytes { + return nil, fmt.Errorf("%w: proof in file too large", + ErrProofFileInvalid) + } + + proofBytes := make([]byte, numProofBytes) + if _, err := io.ReadFull(r, proofBytes); err != nil { + return nil, fmt.Errorf("reading proof bytes (idx=%d): %w", + i, err) + } + + var storedHash [sha256.Size]byte + if _, err := io.ReadFull(r, storedHash[:]); err != nil { + return nil, fmt.Errorf("reading proof hash "+ + "(idx=%d): %w", i, err) + } + + computedHash := hashProof(proofBytes, prevHash) + if storedHash != computedHash { + return nil, ErrInvalidChecksum + } + + lastHash = storedHash + prevHash = storedHash + } + + return &blobAppendHint{ + count: count, + countOffset: fixedHeaderSize, + oldCountSize: int64(tlv.VarIntSize(count)), + lastHash: lastHash, + }, nil +} + +// AppendRawProofToBlob appends a pre-encoded proof to an existing encoded +// proof file blob without decoding the entire file into memory. It operates +// in O(1) space, and O(N) time in the number of existing proofs, since it +// must walk all existing entries to find the last hash by: +// +// 1. Reading the proof count from the file header. +// 2. Skipping through all existing proof entries to reach the last hash. +// 3. Computing the new proof's chained hash using the last hash as prevHash. +// 4. Appending the new proof entry to the blob. +// 5. Patching the proof count field in the header in-place. +// +// If incrementing the proof count causes the varint encoding to grow (e.g. +// crossing the 253-proof boundary), the header bytes are shifted accordingly. +func AppendRawProofToBlob(blob Blob, newProofBytes []byte) (Blob, error) { + hint, err := blobAppendHintFromBlob(blob) + if err != nil { + return nil, err + } + + return appendRawProofToBlobWithHint(blob, newProofBytes, hint) +} + +// appendRawProofToBlobWithHint appends a pre-encoded proof to an encoded proof +// file blob using append metadata gathered from an earlier scan. +func appendRawProofToBlobWithHint(blob Blob, newProofBytes []byte, + hint *blobAppendHint) (Blob, error) { + + if hint == nil { + return nil, fmt.Errorf("missing blob append hint") + } + + newCount := hint.count + 1 + if newCount > FileMaxNumProofs { + return nil, fmt.Errorf("%w: too many proofs in file", + ErrProofFileInvalid) + } + + newCountSize := int64(tlv.VarIntSize(newCount)) + newHash := hashProof(newProofBytes, hint.lastHash) + + // Encode the new proof entry: varint(len) + bytes + hash. + var tlvBuf [8]byte + var entryBuf bytes.Buffer + if err := tlv.WriteVarInt( + &entryBuf, uint64(len(newProofBytes)), &tlvBuf, + ); err != nil { + return nil, fmt.Errorf("encoding new proof length: %w", err) + } + entryBuf.Write(newProofBytes) + entryBuf.Write(newHash[:]) + + // Build the result blob. If the count varint size is unchanged we can + // patch the count field in-place and simply append the new entry. + // Otherwise we need to splice in the wider count encoding. + result := make( + []byte, int64(len(blob))+newCountSize-hint.oldCountSize, + int64(len(blob))+newCountSize-hint.oldCountSize+ + int64(entryBuf.Len()), + ) + copy(result, blob[:hint.countOffset]) + + // Write the updated count at the count offset. + var countBuf bytes.Buffer + if err := tlv.WriteVarInt(&countBuf, newCount, &tlvBuf); err != nil { + return nil, fmt.Errorf("encoding updated proof count: %w", err) + } + copy(result[hint.countOffset:], countBuf.Bytes()) + + // Copy the rest of the original blob (all proof entries) after the + // count field. + proofDataStart := hint.countOffset + hint.oldCountSize + copy(result[hint.countOffset+newCountSize:], blob[proofDataStart:]) + + // Append the new proof entry. + result = append(result, entryBuf.Bytes()...) + + return result, nil +} + // AssetSnapshot commits to the result of a valid proof within a proof file. // This represents the state of an asset's lineage at a given point in time. type AssetSnapshot struct { diff --git a/proof/file_append_test.go b/proof/file_append_test.go new file mode 100644 index 000000000..47d6e90dd --- /dev/null +++ b/proof/file_append_test.go @@ -0,0 +1,540 @@ +package proof + +import ( + "bytes" + "crypto/sha256" + "strconv" + "testing" + + "github.com/lightninglabs/taproot-assets/asset" + "github.com/stretchr/testify/require" +) + +// buildProofChain creates a proof file with n genesis proofs appended +// sequentially. Each proof is independently generated to simulate a realistic +// chain of distinct proofs. +func buildProofChain(t testing.TB, n int) (*File, []Proof) { + t.Helper() + + proofs := make([]Proof, n) + for i := range proofs { + amt := uint64(i + 1) + proofs[i], _ = genRandomGenesisWithProof( + t, asset.Normal, &amt, nil, true, nil, nil, nil, nil, + asset.V0, + ) + } + + f := NewEmptyFile(V0) + for i := range proofs { + require.NoError(t, f.AppendProof(proofs[i])) + } + + return f, proofs +} + +// encodeFile encodes the file to a byte slice. +func encodeFile(t testing.TB, f *File) []byte { + t.Helper() + + var buf bytes.Buffer + require.NoError(t, f.Encode(&buf)) + + return buf.Bytes() +} + +// TestFileAppendProofChainIntegrity verifies that after appending proofs one +// by one the chained hashes remain consistent with a file built all at once. +func TestFileAppendProofChainIntegrity(t *testing.T) { + t.Parallel() + + const numProofs = 10 + + proofs := make([]Proof, numProofs) + for i := range proofs { + amt := uint64(i + 1) + proofs[i], _ = genRandomGenesisWithProof( + t, asset.Normal, &amt, nil, true, nil, nil, nil, nil, + asset.V0, + ) + } + + // Build the reference file from all proofs at once. + reference, err := NewFile(V0, proofs...) + require.NoError(t, err) + + // Build the same file by appending one proof at a time. + incremental := NewEmptyFile(V0) + for i := range proofs { + require.NoError(t, incremental.AppendProof(proofs[i])) + } + + require.Equal(t, reference.NumProofs(), incremental.NumProofs()) + + // Every stored hash must match between the two files. + for i := range reference.proofs { + require.Equal( + t, reference.proofs[i].hash, incremental.proofs[i].hash, + "hash mismatch at index %d", i, + ) + require.Equal( + t, reference.proofs[i].proofBytes, + incremental.proofs[i].proofBytes, + "proof bytes mismatch at index %d", i, + ) + } +} + +// TestFileAppendRawProofChainIntegrity verifies that AppendProofRaw produces +// the same chained hashes as AppendProof for the same proof bytes. +func TestFileAppendRawProofChainIntegrity(t *testing.T) { + t.Parallel() + + const numProofs = 5 + + proofs := make([]Proof, numProofs) + for i := range proofs { + amt := uint64(i + 1) + proofs[i], _ = genRandomGenesisWithProof( + t, asset.Normal, &amt, nil, true, nil, nil, nil, nil, + asset.V0, + ) + } + + typed := NewEmptyFile(V0) + raw := NewEmptyFile(V0) + + for i := range proofs { + require.NoError(t, typed.AppendProof(proofs[i])) + + proofBytes, err := proofs[i].Bytes() + require.NoError(t, err) + require.NoError(t, raw.AppendProofRaw(proofBytes)) + } + + require.Equal(t, typed.NumProofs(), raw.NumProofs()) + for i := range typed.proofs { + require.Equal( + t, typed.proofs[i].hash, raw.proofs[i].hash, + "hash mismatch at index %d", i, + ) + } +} + +// TestFileAppendHashChain verifies the chained hash invariant explicitly: +// each proof's hash must equal SHA256(prev_hash || proof_bytes). +func TestFileAppendHashChain(t *testing.T) { + t.Parallel() + + f, _ := buildProofChain(t, 8) + + var prevHash [sha256.Size]byte + for i, hp := range f.proofs { + expected := hashProof(hp.proofBytes, prevHash) + require.Equal( + t, expected, hp.hash, + "chained hash invariant broken at index %d", i, + ) + prevHash = hp.hash + } +} + +// TestFileAppendEncodeDecode verifies that a file built by sequential appends +// survives a full encode/decode round-trip with all hashes intact. +func TestFileAppendEncodeDecode(t *testing.T) { + t.Parallel() + + const numProofs = 15 + + f, _ := buildProofChain(t, numProofs) + + blob := encodeFile(t, f) + + decoded := NewEmptyFile(V0) + require.NoError(t, decoded.Decode(bytes.NewReader(blob))) + + require.Equal(t, numProofs, decoded.NumProofs()) + for i := range f.proofs { + require.Equal( + t, f.proofs[i].hash, decoded.proofs[i].hash, + "hash mismatch at index %d after round-trip", i, + ) + require.Equal( + t, f.proofs[i].proofBytes, decoded.proofs[i].proofBytes, + "proof bytes mismatch at index %d after round-trip", i, + ) + } +} + +// TestFileAppendToExistingBlob verifies that appending a proof to an already +// encoded blob (the pattern used by AppendTransition) yields the same result +// as building the file from scratch. +func TestFileAppendToExistingBlob(t *testing.T) { + t.Parallel() + + const numExisting = 5 + + // Build a file with numExisting proofs and encode it. + existing, proofs := buildProofChain(t, numExisting) + blob := encodeFile(t, existing) + + // Generate the new proof to append. + amt := uint64(numExisting + 1) + newProof, _ := genRandomGenesisWithProof( + t, asset.Normal, &amt, nil, true, nil, nil, nil, nil, asset.V0, + ) + + // Append via decode → AppendProof → encode (current approach). + f := NewEmptyFile(V0) + require.NoError(t, f.Decode(bytes.NewReader(blob))) + require.NoError(t, f.AppendProof(newProof)) + + got := encodeFile(t, f) + + // Build the reference by constructing the full file from scratch. + allProofs := append(proofs, newProof) //nolint:gocritic + reference, err := NewFile(V0, allProofs...) + require.NoError(t, err) + + want := encodeFile(t, reference) + + require.Equal(t, want, got) +} + +// TestFileAppendUnknownVersionRejected ensures AppendProof and AppendProofRaw +// both reject files with an unrecognised version. +func TestFileAppendUnknownVersionRejected(t *testing.T) { + t.Parallel() + + amt := uint64(1) + p, _ := genRandomGenesisWithProof( + t, asset.Normal, &amt, nil, true, nil, nil, nil, nil, asset.V0, + ) + pBytes, err := p.Bytes() + require.NoError(t, err) + + f := NewEmptyFile(Version(255)) + + require.ErrorIs(t, f.AppendProof(p), ErrUnknownVersion) + require.ErrorIs(t, f.AppendProofRaw(pBytes), ErrUnknownVersion) +} + +// TestFileAppendFirstProofUsesZeroPrevHash verifies that the first proof in a +// file is hashed against the all-zero previous hash. +func TestFileAppendFirstProofUsesZeroPrevHash(t *testing.T) { + t.Parallel() + + amt := uint64(42) + p, _ := genRandomGenesisWithProof( + t, asset.Normal, &amt, nil, true, nil, nil, nil, nil, asset.V0, + ) + + f := NewEmptyFile(V0) + require.NoError(t, f.AppendProof(p)) + + pBytes, err := p.Bytes() + require.NoError(t, err) + + var zeroPrevHash [sha256.Size]byte + expected := hashProof(pBytes, zeroPrevHash) + + require.Equal(t, expected, f.proofs[0].hash) +} + +// TestAppendRawProofToBlobMatchesFullRoundTrip verifies that +// AppendRawProofToBlob produces a blob identical to the full +// decode→AppendProof→encode round-trip. +func TestAppendRawProofToBlobMatchesFullRoundTrip(t *testing.T) { + t.Parallel() + + const numExisting = 7 + + existing, _ := buildProofChain(t, numExisting) + blob := encodeFile(t, existing) + + amt := uint64(numExisting + 1) + newProof, _ := genRandomGenesisWithProof( + t, asset.Normal, &amt, nil, true, nil, nil, nil, nil, asset.V0, + ) + newProofBytes, err := newProof.Bytes() + require.NoError(t, err) + + // Streaming path. + streamBlob, err := AppendRawProofToBlob(blob, newProofBytes) + require.NoError(t, err) + + // Full round-trip path. + f := NewEmptyFile(V0) + require.NoError(t, f.Decode(bytes.NewReader(blob))) + require.NoError(t, f.AppendProofRaw(newProofBytes)) + fullBlob := encodeFile(t, f) + + require.Equal(t, fullBlob, []byte(streamBlob)) +} + +// TestAppendRawProofToBlobCountBoundary verifies that AppendRawProofToBlob +// correctly handles the varint size boundary at 253 proofs (where the count +// encoding grows from 1 byte to 3 bytes). +func TestAppendRawProofToBlobCountBoundary(t *testing.T) { + t.Parallel() + + // Build a file with exactly 252 proofs (one below the 1-byte varint + // limit of 0xfd=253). + const numExisting = 252 + + existing, _ := buildProofChain(t, numExisting) + blob := encodeFile(t, existing) + + amt := uint64(numExisting + 1) + newProof, _ := genRandomGenesisWithProof( + t, asset.Normal, &amt, nil, true, nil, nil, nil, nil, asset.V0, + ) + newProofBytes, err := newProof.Bytes() + require.NoError(t, err) + + // Streaming append crosses the 1→3 byte varint boundary. + streamBlob, err := AppendRawProofToBlob(blob, newProofBytes) + require.NoError(t, err) + + // Full round-trip for reference. + f := NewEmptyFile(V0) + require.NoError(t, f.Decode(bytes.NewReader(blob))) + require.NoError(t, f.AppendProofRaw(newProofBytes)) + fullBlob := encodeFile(t, f) + + require.Equal(t, fullBlob, []byte(streamBlob)) + + // Decode the streaming result and verify it has 253 proofs. + decoded := NewEmptyFile(V0) + require.NoError(t, decoded.Decode(bytes.NewReader(streamBlob))) + require.Equal(t, numExisting+1, decoded.NumProofs()) +} + +// TestAppendRawProofToBlobWithHintMatchesFullRoundTrip verifies that appending +// with metadata from LastProofFromBlob yields identical bytes to the full +// decode→append→encode path. +func TestAppendRawProofToBlobWithHintMatchesFullRoundTrip(t *testing.T) { + t.Parallel() + + const numExisting = 9 + + existing, _ := buildProofChain(t, numExisting) + blob := encodeFile(t, existing) + + amt := uint64(numExisting + 1) + newProof, _ := genRandomGenesisWithProof( + t, asset.Normal, &amt, nil, true, nil, nil, nil, nil, asset.V0, + ) + newProofBytes, err := newProof.Bytes() + require.NoError(t, err) + + _, _, hint, err := lastProofFromBlobWithHint(blob) + require.NoError(t, err) + + streamBlob, err := appendRawProofToBlobWithHint( + blob, newProofBytes, hint, + ) + require.NoError(t, err) + + f := NewEmptyFile(V0) + require.NoError(t, f.Decode(bytes.NewReader(blob))) + require.NoError(t, f.AppendProofRaw(newProofBytes)) + fullBlob := encodeFile(t, f) + + require.Equal(t, fullBlob, []byte(streamBlob)) +} + +// BenchmarkFileAppendProof measures the time and allocations for appending a +// single proof to files of increasing size. This establishes the baseline for +// the current O(1)-in-memory append but O(n) encode/decode round-trip that +// AppendTransition performs. +func BenchmarkFileAppendProof(b *testing.B) { + sizes := []int{10, 100, 1_000, 10_000} + + for _, n := range sizes { + n := n + b.Run( + // nolint:forbidigo + func() string { + if n < 1000 { + return "proofs=" + strconv.Itoa(n) + } + return "proofs=" + strconv.Itoa(n/1000) + "k" + }(), + func(b *testing.B) { + f, _ := buildProofChain(b, n) + + amt := uint64(n + 1) + newProof, _ := genRandomGenesisWithProof( + b, asset.Normal, &amt, nil, true, nil, + nil, nil, nil, asset.V0, + ) + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + // Clone the file so each iteration + // starts from the same state. + clone := &File{ + Version: f.Version, + proofs: make( + []*hashedProof, + len(f.proofs), + ), + } + copy(clone.proofs, f.proofs) + + if err := clone.AppendProof( + newProof, + ); err != nil { + b.Fatal(err) + } + } + }, + ) + } +} + +// BenchmarkAppendTransitionFullRoundTrip measures the full cost of the current +// AppendTransition pattern: decode entire blob → append → encode entire blob. +// This is the hot path that needs to be optimised. +func BenchmarkAppendTransitionFullRoundTrip(b *testing.B) { + sizes := []int{10, 100, 1_000, 5_000} + + for _, n := range sizes { + n := n + b.Run( + func() string { + if n < 1000 { + return "proofs=" + strconv.Itoa(n) + } + return "proofs=" + strconv.Itoa(n/1000) + "k" + }(), + func(b *testing.B) { + f, _ := buildProofChain(b, n) + blob := encodeFile(b, f) + + amt := uint64(n + 1) + newProof, _ := genRandomGenesisWithProof( + b, asset.Normal, &amt, nil, true, nil, + nil, nil, nil, asset.V0, + ) + newProofBytes, err := newProof.Bytes() + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + // Simulate the current approach used + // by AppendTransition. + decoded := NewEmptyFile(V0) + if err := decoded.Decode( + bytes.NewReader(blob), + ); err != nil { + b.Fatal(err) + } + + if err := decoded.AppendProofRaw( + newProofBytes, + ); err != nil { + b.Fatal(err) + } + + var out bytes.Buffer + if err := decoded.Encode(&out); err != nil { + b.Fatal(err) + } + } + }, + ) + } +} + +// BenchmarkStreamingAppend measures the cost of AppendRawProofToBlob, which +// implements the O(1) streaming append path. Results should be compared +// against BenchmarkAppendTransitionFullRoundTrip to quantify the improvement. +func BenchmarkStreamingAppend(b *testing.B) { + sizes := []int{10, 100, 1_000, 5_000} + + for _, n := range sizes { + n := n + b.Run( + func() string { + if n < 1000 { + return "proofs=" + strconv.Itoa(n) + } + return "proofs=" + strconv.Itoa(n/1000) + "k" + }(), + func(b *testing.B) { + f, _ := buildProofChain(b, n) + blob := encodeFile(b, f) + + amt := uint64(n + 1) + newProof, _ := genRandomGenesisWithProof( + b, asset.Normal, &amt, nil, true, nil, + nil, nil, nil, asset.V0, + ) + newProofBytes, err := newProof.Bytes() + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + _, err := AppendRawProofToBlob( + blob, newProofBytes, + ) + if err != nil { + b.Fatal(err) + } + } + }, + ) + } +} + +// BenchmarkFileEncodeDecode measures encode/decode throughput for files of +// increasing size, providing context for how much of the round-trip cost comes +// from serialisation alone. +func BenchmarkFileEncodeDecode(b *testing.B) { + sizes := []int{10, 100, 1_000, 10_000} + + for _, n := range sizes { + n := n + b.Run( + func() string { + if n < 1000 { + return "proofs=" + strconv.Itoa(n) + } + return "proofs=" + strconv.Itoa(n/1000) + "k" + }(), + func(b *testing.B) { + f, _ := buildProofChain(b, n) + blob := encodeFile(b, f) + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + decoded := NewEmptyFile(V0) + if err := decoded.Decode( + bytes.NewReader(blob), + ); err != nil { + b.Fatal(err) + } + + var out bytes.Buffer + if err := decoded.Encode(&out); err != nil { + b.Fatal(err) + } + } + }, + ) + } +}