diff --git a/admin/commands/storage/read_range_cluster_blocks.go b/admin/commands/storage/read_range_cluster_blocks.go index b0e41b86fe8..b83a07e584d 100644 --- a/admin/commands/storage/read_range_cluster_blocks.go +++ b/admin/commands/storage/read_range_cluster_blocks.go @@ -10,6 +10,7 @@ import ( "github.com/onflow/flow-go/admin/commands" "github.com/onflow/flow-go/cmd/util/cmd/read-light-block" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/store" ) @@ -22,14 +23,12 @@ const Max_Range_Cluster_Block_Limit = uint64(10001) type ReadRangeClusterBlocksCommand struct { db storage.DB - headers *store.Headers payloads *store.ClusterPayloads } -func NewReadRangeClusterBlocksCommand(db storage.DB, headers *store.Headers, payloads *store.ClusterPayloads) commands.AdminCommand { +func NewReadRangeClusterBlocksCommand(db storage.DB, payloads *store.ClusterPayloads) commands.AdminCommand { return &ReadRangeClusterBlocksCommand{ db: db, - headers: headers, payloads: payloads, } } @@ -51,8 +50,12 @@ func (c *ReadRangeClusterBlocksCommand) Handler(ctx context.Context, req *admin. return nil, admin.NewInvalidAdminReqErrorf("getting for more than %v blocks at a time might have an impact to node's performance and is not allowed", Max_Range_Cluster_Block_Limit) } + clusterHeaders, err := store.NewClusterHeaders(metrics.NewNoopCollector(), c.db, flow.ChainID(chainID)) + if err != nil { + return nil, err + } clusterBlocks := store.NewClusterBlocks( - c.db, flow.ChainID(chainID), c.headers, c.payloads, + c.db, flow.ChainID(chainID), clusterHeaders, c.payloads, ) lights, err := read.ReadClusterLightBlockByHeightRange(clusterBlocks, reqData.startHeight, reqData.endHeight) diff --git a/cmd/collection/main.go b/cmd/collection/main.go index 08fd542449b..5bcaa9b44f1 100644 --- a/cmd/collection/main.go +++ b/cmd/collection/main.go @@ -235,8 +235,8 @@ func main() { }). AdminCommand("read-range-cluster-blocks", func(conf *cmd.NodeConfig) commands.AdminCommand { clusterPayloads := store.NewClusterPayloads(&metrics.NoopCollector{}, conf.ProtocolDB) - headers := store.NewHeaders(&metrics.NoopCollector{}, conf.ProtocolDB) - return storageCommands.NewReadRangeClusterBlocksCommand(conf.ProtocolDB, headers, clusterPayloads) + // defer construction of Headers since the cluster's ChainID is provided by the command + return storageCommands.NewReadRangeClusterBlocksCommand(conf.ProtocolDB, clusterPayloads) }). Module("follower distributor", func(node *cmd.NodeConfig) error { followerDistributor = pubsub.NewFollowerDistributor() diff --git a/cmd/scaffold.go b/cmd/scaffold.go index 142f2d55c6d..a44f9f86754 100644 --- a/cmd/scaffold.go +++ b/cmd/scaffold.go @@ -1186,8 +1186,45 @@ func (fnb *FlowNodeBuilder) initStorageLockManager() error { return nil } +// determineChainID attempts to determine the chain this node is running on +// directly from the database or root snapshot, before storage interfaces have been initialized. +// No errors expected during normal operation. +func (fnb *FlowNodeBuilder) determineChainID() error { + bootstrapped, err := badgerState.IsBootstrapped(fnb.ProtocolDB) + if err != nil { + return err + } + if bootstrapped { + chainID, err := badgerState.GetChainID(fnb.ProtocolDB) + if err != nil { + return err + } + fnb.RootChainID = chainID + } else { + // if no root snapshot is configured, attempt to load the file from disk + var rootSnapshot = fnb.RootSnapshot + if rootSnapshot == nil { + fnb.Logger.Info().Msgf("loading root protocol state snapshot from disk") + rootSnapshot, err = loadRootProtocolSnapshot(fnb.BaseConfig.BootstrapDir) + if err != nil { + return fmt.Errorf("failed to read protocol snapshot from disk: %w", err) + } + } + // retrieve ChainID from the snapshot + sealingSegment, err := rootSnapshot.SealingSegment() + if err != nil { + return fmt.Errorf("failed to read ChainID from root snapshot: %w", err) + } + fnb.RootChainID = sealingSegment.Highest().ChainID + } + return nil +} + func (fnb *FlowNodeBuilder) initStorage() error { - headers := store.NewHeaders(fnb.Metrics.Cache, fnb.ProtocolDB) + headers, err := store.NewHeaders(fnb.Metrics.Cache, fnb.ProtocolDB, fnb.RootChainID) + if err != nil { + return err + } guarantees := store.NewGuarantees(fnb.Metrics.Cache, fnb.ProtocolDB, fnb.BaseConfig.guaranteesCacheSize, store.DefaultCacheSize) seals := store.NewSeals(fnb.Metrics.Cache, fnb.ProtocolDB) @@ -1457,7 +1494,6 @@ func (fnb *FlowNodeBuilder) setRootSnapshot(rootSnapshot protocol.Snapshot) erro return fmt.Errorf("failed to read root QC: %w", err) } - fnb.RootChainID = fnb.FinalizedRootBlock.ChainID fnb.SporkID = fnb.RootSnapshot.Params().SporkID() return nil @@ -2081,16 +2117,20 @@ func (fnb *FlowNodeBuilder) onStart() error { return err } - if err := fnb.initStorage(); err != nil { - return err - } - for _, f := range fnb.preInitFns { if err := fnb.handlePreInit(f); err != nil { return err } } + if err := fnb.determineChainID(); err != nil { + return err + } + + if err := fnb.initStorage(); err != nil { + return err + } + if err := fnb.initState(); err != nil { return err } diff --git a/cmd/util/cmd/common/storage.go b/cmd/util/cmd/common/storage.go index 13cd2170f98..da38662d9a9 100644 --- a/cmd/util/cmd/common/storage.go +++ b/cmd/util/cmd/common/storage.go @@ -5,6 +5,7 @@ import ( "github.com/rs/zerolog/log" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" storagebadger "github.com/onflow/flow-go/storage/badger" @@ -53,9 +54,14 @@ func IsPebbleFolder(dataDir string) (bool, error) { return pebblestorage.IsPebbleFolder(dataDir) } -func InitStorages(db storage.DB) *store.All { +// InitStorages initializes the common storage abstractions used by all node roles (with default cache sizes +// suitable for mainnet). However, no metrics are collected (if you need metrics, use [store.InitAll] directly). +// The chain ID indicates which Flow network the node is operating, referencing the ID of the blockchain +// build by the main consensus, i.e. security nodes (not the chains built by collector clusters). +// No errors are expected during normal operations. +func InitStorages(db storage.DB, chainID flow.ChainID) (*store.All, error) { metrics := &metrics.NoopCollector{} - return store.InitAll(metrics, db) + return store.InitAll(metrics, db, chainID) } // WithStorage runs the given function with the storage depending on the flags. diff --git a/cmd/util/cmd/exec-data-json-export/block_exporter.go b/cmd/util/cmd/exec-data-json-export/block_exporter.go index 32efa2e78a5..f00419b0080 100644 --- a/cmd/util/cmd/exec-data-json-export/block_exporter.go +++ b/cmd/util/cmd/exec-data-json-export/block_exporter.go @@ -12,6 +12,7 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" + badgerstate "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/store" ) @@ -37,9 +38,16 @@ func ExportBlocks(blockID flow.Identifier, dbPath string, outputPath string) (fl // traverse backward from the given block (parent block) and fetch by blockHash err := common.WithStorage(dbPath, func(db storage.DB) error { + chainID, err := badgerstate.GetChainID(db) + if err != nil { + return err + } cacheMetrics := &metrics.NoopCollector{} - headers := store.NewHeaders(cacheMetrics, db) + headers, err := store.NewHeaders(cacheMetrics, db, chainID) + if err != nil { + return err + } index := store.NewIndex(cacheMetrics, db) guarantees := store.NewGuarantees(cacheMetrics, db, store.DefaultCacheSize, store.DefaultCacheSize) seals := store.NewSeals(cacheMetrics, db) diff --git a/cmd/util/cmd/exec-data-json-export/delta_snapshot_exporter.go b/cmd/util/cmd/exec-data-json-export/delta_snapshot_exporter.go index 3e3d6971a51..b59415077fb 100644 --- a/cmd/util/cmd/exec-data-json-export/delta_snapshot_exporter.go +++ b/cmd/util/cmd/exec-data-json-export/delta_snapshot_exporter.go @@ -11,6 +11,7 @@ import ( "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" + badgerstate "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/operation" "github.com/onflow/flow-go/storage/store" @@ -26,9 +27,16 @@ func ExportDeltaSnapshots(blockID flow.Identifier, dbPath string, outputPath str // traverse backward from the given block (parent block) and fetch by blockHash return common.WithStorage(dbPath, func(db storage.DB) error { + chainID, err := badgerstate.GetChainID(db) + if err != nil { + return err + } cacheMetrics := &metrics.NoopCollector{} - headers := store.NewHeaders(cacheMetrics, db) + headers, err := store.NewHeaders(cacheMetrics, db, chainID) + if err != nil { + return err + } activeBlockID := blockID outputFile := filepath.Join(outputPath, "delta.jsonl") diff --git a/cmd/util/cmd/exec-data-json-export/event_exporter.go b/cmd/util/cmd/exec-data-json-export/event_exporter.go index 600f4d45af3..d95d8d77162 100644 --- a/cmd/util/cmd/exec-data-json-export/event_exporter.go +++ b/cmd/util/cmd/exec-data-json-export/event_exporter.go @@ -11,6 +11,7 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" + badgerstate "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/store" ) @@ -30,9 +31,16 @@ func ExportEvents(blockID flow.Identifier, dbPath string, outputPath string) err // traverse backward from the given block (parent block) and fetch by blockHash return common.WithStorage(dbPath, func(db storage.DB) error { + chainID, err := badgerstate.GetChainID(db) + if err != nil { + return err + } cacheMetrics := &metrics.NoopCollector{} - headers := store.NewHeaders(cacheMetrics, db) + headers, err := store.NewHeaders(cacheMetrics, db, chainID) + if err != nil { + return err + } events := store.NewEvents(cacheMetrics, db) activeBlockID := blockID diff --git a/cmd/util/cmd/exec-data-json-export/result_exporter.go b/cmd/util/cmd/exec-data-json-export/result_exporter.go index 6afefd7ee9a..655727b644f 100644 --- a/cmd/util/cmd/exec-data-json-export/result_exporter.go +++ b/cmd/util/cmd/exec-data-json-export/result_exporter.go @@ -11,6 +11,7 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" + badgerstate "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/store" ) @@ -28,9 +29,16 @@ func ExportResults(blockID flow.Identifier, dbPath string, outputPath string) er // traverse backward from the given block (parent block) and fetch by blockHash return common.WithStorage(dbPath, func(db storage.DB) error { + chainID, err := badgerstate.GetChainID(db) + if err != nil { + return err + } cacheMetrics := &metrics.NoopCollector{} - headers := store.NewHeaders(cacheMetrics, db) + headers, err := store.NewHeaders(cacheMetrics, db, chainID) + if err != nil { + return err + } results := store.NewExecutionResults(cacheMetrics, db) activeBlockID := blockID diff --git a/cmd/util/cmd/exec-data-json-export/transaction_exporter.go b/cmd/util/cmd/exec-data-json-export/transaction_exporter.go index 5c8f937437d..e0edeeccbd0 100644 --- a/cmd/util/cmd/exec-data-json-export/transaction_exporter.go +++ b/cmd/util/cmd/exec-data-json-export/transaction_exporter.go @@ -11,6 +11,7 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" + badgerstate "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/storage/store" ) @@ -48,6 +49,11 @@ func ExportExecutedTransactions(blockID flow.Identifier, dbPath string, outputPa } defer db.Close() + chainID, err := badgerstate.GetChainID(db) + if err != nil { + return err + } + cacheMetrics := &metrics.NoopCollector{} index := store.NewIndex(cacheMetrics, db) guarantees := store.NewGuarantees(cacheMetrics, db, store.DefaultCacheSize, store.DefaultCacheSize) @@ -55,7 +61,10 @@ func ExportExecutedTransactions(blockID flow.Identifier, dbPath string, outputPa results := store.NewExecutionResults(cacheMetrics, db) receipts := store.NewExecutionReceipts(cacheMetrics, db, results, store.DefaultCacheSize) transactions := store.NewTransactions(cacheMetrics, db) - headers := store.NewHeaders(cacheMetrics, db) + headers, err := store.NewHeaders(cacheMetrics, db, chainID) + if err != nil { + return err + } payloads := store.NewPayloads(db, index, guarantees, seals, receipts, results) blocks := store.NewBlocks(db, headers, payloads) collections := store.NewCollections(db, transactions) diff --git a/cmd/util/cmd/export-json-transactions/cmd.go b/cmd/util/cmd/export-json-transactions/cmd.go index c8ba51cf2b5..70313b34025 100644 --- a/cmd/util/cmd/export-json-transactions/cmd.go +++ b/cmd/util/cmd/export-json-transactions/cmd.go @@ -14,6 +14,7 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/cmd/util/cmd/export-json-transactions/transactions" + badgerstate "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/storage" ) @@ -67,7 +68,14 @@ func ExportTransactions(lockManager lockctx.Manager, dataDir string, outputDir s // init dependencies return common.WithStorage(flagDatadir, func(db storage.DB) error { - storages := common.InitStorages(db) + chainID, err := badgerstate.GetChainID(db) + if err != nil { + return err + } + storages, err := common.InitStorages(db, chainID) + if err != nil { + return err + } state, err := common.OpenProtocolState(lockManager, db, storages) if err != nil { diff --git a/cmd/util/cmd/export-json-transactions/transactions/range_test.go b/cmd/util/cmd/export-json-transactions/transactions/range_test.go index 9d85f5c6232..a086ba759dc 100644 --- a/cmd/util/cmd/export-json-transactions/transactions/range_test.go +++ b/cmd/util/cmd/export-json-transactions/transactions/range_test.go @@ -60,7 +60,8 @@ func TestFindBlockTransactions(t *testing.T) { ) // prepare dependencies - storages := common.InitStorages(db) + storages, err := common.InitStorages(db, flow.Emulator) + require.NoError(t, err) payloads, collections := storages.Payloads, storages.Collections snap4 := &mock.Snapshot{} snap4.On("Head").Return(b1.ToHeader(), nil) @@ -73,7 +74,7 @@ func TestFindBlockTransactions(t *testing.T) { // store into database p1 := unittest.ProposalFromBlock(b1) p2 := unittest.ProposalFromBlock(b2) - err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { err := storages.Blocks.BatchStore(lctx, rw, p1) if err != nil { diff --git a/cmd/util/cmd/find-inconsistent-result/cmd.go b/cmd/util/cmd/find-inconsistent-result/cmd.go index 16636dd5fc9..661ec9b0e40 100644 --- a/cmd/util/cmd/find-inconsistent-result/cmd.go +++ b/cmd/util/cmd/find-inconsistent-result/cmd.go @@ -11,6 +11,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/block_iterator/latest" "github.com/onflow/flow-go/state/protocol" + badgerstate "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/storage" ) @@ -95,7 +96,14 @@ func findFirstMismatch(datadir string, startHeight, endHeight uint64, lockManage func createStorages(db storage.DB, lockManager lockctx.Manager) ( storage.Headers, storage.ExecutionResults, storage.Seals, protocol.State, error) { - storages := common.InitStorages(db) + chainID, err := badgerstate.GetChainID(db) + if err != nil { + return nil, nil, nil, nil, err + } + storages, err := common.InitStorages(db, chainID) + if err != nil { + return nil, nil, nil, nil, err + } state, err := common.OpenProtocolState(lockManager, db, storages) if err != nil { return nil, nil, nil, nil, fmt.Errorf("could not open protocol state: %v", err) diff --git a/cmd/util/cmd/read-badger/cmd/blocks.go b/cmd/util/cmd/read-badger/cmd/blocks.go index 1e60bf85487..d84235eb831 100644 --- a/cmd/util/cmd/read-badger/cmd/blocks.go +++ b/cmd/util/cmd/read-badger/cmd/blocks.go @@ -9,6 +9,7 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" + badgerstate "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/store" ) @@ -28,8 +29,15 @@ var blocksCmd = &cobra.Command{ Short: "get a block by block ID or height", RunE: func(cmd *cobra.Command, args []string) error { return common.WithStorage(flagDatadir, func(db storage.DB) error { - cacheMetrics := &metrics.NoopCollector{} - headers := store.NewHeaders(cacheMetrics, db) + chainID, err := badgerstate.GetChainID(db) + if err != nil { + return err + } + cacheMetrics := metrics.NewNoopCollector() + headers, err := store.NewHeaders(cacheMetrics, db, chainID) + if err != nil { + return err + } index := store.NewIndex(cacheMetrics, db) guarantees := store.NewGuarantees(cacheMetrics, db, store.DefaultCacheSize, store.DefaultCacheSize) seals := store.NewSeals(cacheMetrics, db) @@ -39,7 +47,6 @@ var blocksCmd = &cobra.Command{ blocks := store.NewBlocks(db, headers, payloads) var block *flow.Block - var err error if flagBlockID != "" { log.Info().Msgf("got flag block id: %s", flagBlockID) diff --git a/cmd/util/cmd/read-badger/cmd/cluster_blocks.go b/cmd/util/cmd/read-badger/cmd/cluster_blocks.go index 6d094fd10ae..758917d2869 100644 --- a/cmd/util/cmd/read-badger/cmd/cluster_blocks.go +++ b/cmd/util/cmd/read-badger/cmd/cluster_blocks.go @@ -34,13 +34,16 @@ var clusterBlocksCmd = &cobra.Command{ return common.WithStorage(flagDatadir, func(db storage.DB) error { metrics := metrics.NewNoopCollector() - headers := store.NewHeaders(metrics, db) - clusterPayloads := store.NewClusterPayloads(metrics, db) - // get chain id log.Info().Msgf("got flag chain name: %s", flagChainName) chainID := flow.ChainID(flagChainName) - clusterBlocks := store.NewClusterBlocks(db, chainID, headers, clusterPayloads) + + clusterHeaders, err := store.NewClusterHeaders(metrics, db, chainID) + if err != nil { + return err + } + clusterPayloads := store.NewClusterPayloads(metrics, db) + clusterBlocks := store.NewClusterBlocks(db, chainID, clusterHeaders, clusterPayloads) if flagClusterBlockID != "" && flagHeight != 0 { return fmt.Errorf("provide either a --id or --height and not both") diff --git a/cmd/util/cmd/read-badger/cmd/collections.go b/cmd/util/cmd/read-badger/cmd/collections.go index b59817cad8e..4f515f15661 100644 --- a/cmd/util/cmd/read-badger/cmd/collections.go +++ b/cmd/util/cmd/read-badger/cmd/collections.go @@ -8,7 +8,9 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) var flagCollectionID string @@ -28,7 +30,8 @@ var collectionsCmd = &cobra.Command{ Short: "get collection by collection or transaction ID", RunE: func(cmd *cobra.Command, args []string) error { return common.WithStorage(flagDatadir, func(db storage.DB) error { - storages := common.InitStorages(db) + transactions := store.NewTransactions(metrics.NewNoopCollector(), db) + collections := store.NewCollections(db, transactions) if flagCollectionID != "" { log.Info().Msgf("got flag collection id: %s", flagCollectionID) @@ -41,7 +44,7 @@ var collectionsCmd = &cobra.Command{ // get only the light collection if specified if flagLightCollection { - light, err := storages.Collections.LightByID(collectionID) + light, err := collections.LightByID(collectionID) if err != nil { return fmt.Errorf("could not get collection with id %v: %w", collectionID, err) } @@ -50,7 +53,7 @@ var collectionsCmd = &cobra.Command{ } // otherwise get the full collection - fullCollection, err := storages.Collections.ByID(collectionID) + fullCollection, err := collections.ByID(collectionID) if err != nil { return fmt.Errorf("could not get collection: %w", err) } @@ -66,7 +69,7 @@ var collectionsCmd = &cobra.Command{ } log.Info().Msgf("getting collections by transaction id: %v", transactionID) - light, err := storages.Collections.LightByTransactionID(transactionID) + light, err := collections.LightByTransactionID(transactionID) if err != nil { return fmt.Errorf("could not get collections for transaction id %v: %w", transactionID, err) } diff --git a/cmd/util/cmd/read-badger/cmd/epoch_commit.go b/cmd/util/cmd/read-badger/cmd/epoch_commit.go index 23deb37105b..d6cd8d4ef22 100644 --- a/cmd/util/cmd/read-badger/cmd/epoch_commit.go +++ b/cmd/util/cmd/read-badger/cmd/epoch_commit.go @@ -8,7 +8,9 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) var flagEpochCommitID string @@ -25,7 +27,7 @@ var epochCommitCmd = &cobra.Command{ Short: "get epoch commit by ID", RunE: func(cmd *cobra.Command, args []string) error { return common.WithStorage(flagDatadir, func(db storage.DB) error { - storages := common.InitStorages(db) + epochCommits := store.NewEpochCommits(metrics.NewNoopCollector(), db) log.Info().Msgf("got flag commit id: %s", flagEpochCommitID) commitID, err := flow.HexStringToIdentifier(flagEpochCommitID) @@ -34,7 +36,7 @@ var epochCommitCmd = &cobra.Command{ } log.Info().Msgf("getting epoch commit by id: %v", commitID) - epochCommit, err := storages.EpochCommits.ByID(commitID) + epochCommit, err := epochCommits.ByID(commitID) if err != nil { return fmt.Errorf("could not get epoch commit with id: %v: %w", commitID, err) } diff --git a/cmd/util/cmd/read-badger/cmd/epoch_protocol_state.go b/cmd/util/cmd/read-badger/cmd/epoch_protocol_state.go index 0a7922e4cf3..e2a6a6360d3 100644 --- a/cmd/util/cmd/read-badger/cmd/epoch_protocol_state.go +++ b/cmd/util/cmd/read-badger/cmd/epoch_protocol_state.go @@ -8,7 +8,9 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) func init() { @@ -23,7 +25,11 @@ var epochProtocolStateCmd = &cobra.Command{ Short: "get epoch protocol state by block ID", RunE: func(cmd *cobra.Command, args []string) error { return common.WithStorage(flagDatadir, func(db storage.DB) error { - storages := common.InitStorages(db) + metrics := metrics.NewNoopCollector() + setups := store.NewEpochSetups(metrics, db) + epochCommits := store.NewEpochCommits(metrics, db) + epochProtocolStateEntries := store.NewEpochProtocolStateEntries(metrics, setups, epochCommits, db, + store.DefaultEpochProtocolStateCacheSize, store.DefaultProtocolStateIndexCacheSize) log.Info().Msgf("got flag block id: %s", flagBlockID) blockID, err := flow.HexStringToIdentifier(flagBlockID) @@ -32,7 +38,7 @@ var epochProtocolStateCmd = &cobra.Command{ } log.Info().Msgf("getting protocol state by block id: %v", blockID) - protocolState, err := storages.EpochProtocolStateEntries.ByBlockID(blockID) + protocolState, err := epochProtocolStateEntries.ByBlockID(blockID) if err != nil { return fmt.Errorf("could not get protocol state for block id: %v: %w", blockID, err) } diff --git a/cmd/util/cmd/read-badger/cmd/guarantees.go b/cmd/util/cmd/read-badger/cmd/guarantees.go index 14adf48588a..8f0ba38a8dd 100644 --- a/cmd/util/cmd/read-badger/cmd/guarantees.go +++ b/cmd/util/cmd/read-badger/cmd/guarantees.go @@ -8,7 +8,9 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) func init() { @@ -23,7 +25,7 @@ var guaranteesCmd = &cobra.Command{ Short: "get guarantees by collection ID", RunE: func(cmd *cobra.Command, args []string) error { return common.WithStorage(flagDatadir, func(db storage.DB) error { - storages := common.InitStorages(db) + guarantees := store.NewGuarantees(metrics.NewNoopCollector(), db, store.DefaultCacheSize, store.DefaultCacheSize) log.Info().Msgf("got flag collection id: %s", flagCollectionID) collectionID, err := flow.HexStringToIdentifier(flagCollectionID) @@ -32,7 +34,7 @@ var guaranteesCmd = &cobra.Command{ } log.Info().Msgf("getting guarantee by collection id: %v", collectionID) - guarantee, err := storages.Guarantees.ByCollectionID(collectionID) + guarantee, err := guarantees.ByCollectionID(collectionID) if err != nil { return fmt.Errorf("could not get guarantee for collection id: %v: %w", collectionID, err) } diff --git a/cmd/util/cmd/read-badger/cmd/protocol_kvstore.go b/cmd/util/cmd/read-badger/cmd/protocol_kvstore.go index d434d6e1aaa..dffbebd0629 100644 --- a/cmd/util/cmd/read-badger/cmd/protocol_kvstore.go +++ b/cmd/util/cmd/read-badger/cmd/protocol_kvstore.go @@ -10,7 +10,9 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) var flagDecodeData bool @@ -30,7 +32,7 @@ var protocolStateKVStore = &cobra.Command{ Short: "get protocol state kvstore by block ID", RunE: func(cmd *cobra.Command, args []string) error { return common.WithStorage(flagDatadir, func(db storage.DB) error { - storages := common.InitStorages(db) + protocolKVStore := store.NewProtocolKVStore(metrics.NewNoopCollector(), db, store.DefaultProtocolKVStoreCacheSize, store.DefaultProtocolKVStoreByBlockIDCacheSize) log.Info().Msgf("got flag block id: %s", flagBlockID) blockID, err := flow.HexStringToIdentifier(flagBlockID) @@ -39,7 +41,7 @@ var protocolStateKVStore = &cobra.Command{ } log.Info().Msgf("getting protocol state kvstore by block id: %v", blockID) - protocolState, err := storages.ProtocolKVStore.ByBlockID(blockID) + protocolState, err := protocolKVStore.ByBlockID(blockID) if err != nil { return fmt.Errorf("could not get protocol state kvstore for block id: %v: %w", blockID, err) } diff --git a/cmd/util/cmd/read-badger/cmd/seals.go b/cmd/util/cmd/read-badger/cmd/seals.go index f6ebaa2e180..842f9b5db6e 100644 --- a/cmd/util/cmd/read-badger/cmd/seals.go +++ b/cmd/util/cmd/read-badger/cmd/seals.go @@ -8,7 +8,9 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) var flagSealID string @@ -25,7 +27,7 @@ var sealsCmd = &cobra.Command{ Short: "get seals by block or seal ID", RunE: func(cmd *cobra.Command, args []string) error { return common.WithStorage(flagDatadir, func(db storage.DB) error { - storages := common.InitStorages(db) + seals := store.NewSeals(metrics.NewNoopCollector(), db) if flagSealID != "" && flagBlockID != "" { return fmt.Errorf("provide one of the flags --id or --block-id") @@ -39,7 +41,7 @@ var sealsCmd = &cobra.Command{ } log.Info().Msgf("getting seal by id: %v", sealID) - seal, err := storages.Seals.ByID(sealID) + seal, err := seals.ByID(sealID) if err != nil { return fmt.Errorf("could not get seal with id: %v: %w", sealID, err) } @@ -56,7 +58,7 @@ var sealsCmd = &cobra.Command{ } log.Info().Msgf("getting seal by block id: %v", blockID) - seal, err := storages.Seals.FinalizedSealForBlock(blockID) + seal, err := seals.FinalizedSealForBlock(blockID) if err != nil { return fmt.Errorf("could not get seal for block id: %v: %w", blockID, err) } diff --git a/cmd/util/cmd/read-badger/cmd/transaction_results.go b/cmd/util/cmd/read-badger/cmd/transaction_results.go index 44285007cc7..055969b18fa 100644 --- a/cmd/util/cmd/read-badger/cmd/transaction_results.go +++ b/cmd/util/cmd/read-badger/cmd/transaction_results.go @@ -9,6 +9,7 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" + badgerstate "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/store" ) @@ -25,11 +26,18 @@ var transactionResultsCmd = &cobra.Command{ Short: "get transaction-result by block ID", RunE: func(cmd *cobra.Command, args []string) error { return common.WithStorage(flagDatadir, func(db storage.DB) error { + chainID, err := badgerstate.GetChainID(db) + if err != nil { + return err + } transactionResults, err := store.NewTransactionResults(metrics.NewNoopCollector(), db, 1) if err != nil { return err } - storages := common.InitStorages(db) + storages, err := common.InitStorages(db, chainID) + if err != nil { + return err + } log.Info().Msgf("got flag block id: %s", flagBlockID) blockID, err := flow.HexStringToIdentifier(flagBlockID) if err != nil { diff --git a/cmd/util/cmd/read-badger/cmd/transactions.go b/cmd/util/cmd/read-badger/cmd/transactions.go index c3e123d4808..ba286e1e510 100644 --- a/cmd/util/cmd/read-badger/cmd/transactions.go +++ b/cmd/util/cmd/read-badger/cmd/transactions.go @@ -8,7 +8,9 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) func init() { @@ -23,7 +25,7 @@ var transactionsCmd = &cobra.Command{ Short: "get transaction by ID", RunE: func(cmd *cobra.Command, args []string) error { return common.WithStorage(flagDatadir, func(db storage.DB) error { - storages := common.InitStorages(db) + transactions := store.NewTransactions(metrics.NewNoopCollector(), db) log.Info().Msgf("got flag transaction id: %s", flagTransactionID) transactionID, err := flow.HexStringToIdentifier(flagTransactionID) @@ -32,7 +34,7 @@ var transactionsCmd = &cobra.Command{ } log.Info().Msgf("getting transaction by id: %v", transactionID) - tx, err := storages.Transactions.ByID(transactionID) + tx, err := transactions.ByID(transactionID) if err != nil { return fmt.Errorf("could not get transaction with id: %v: %w", transactionID, err) } diff --git a/cmd/util/cmd/read-light-block/read_light_block_test.go b/cmd/util/cmd/read-light-block/read_light_block_test.go index 84e5abfc002..17fc2727d34 100644 --- a/cmd/util/cmd/read-light-block/read_light_block_test.go +++ b/cmd/util/cmd/read-light-block/read_light_block_test.go @@ -52,11 +52,12 @@ func TestReadClusterRange(t *testing.T) { }) require.NoError(t, err) } - + clusterHeaders, err := store.NewClusterHeaders(metrics.NewNoopCollector(), db, blocks[0].ChainID) + require.NoError(t, err) clusterBlocks := store.NewClusterBlocks( db, blocks[0].ChainID, - store.NewHeaders(metrics.NewNoopCollector(), db), + clusterHeaders, store.NewClusterPayloads(metrics.NewNoopCollector(), db), ) diff --git a/cmd/util/cmd/read-protocol-state/cmd/blocks.go b/cmd/util/cmd/read-protocol-state/cmd/blocks.go index 4c2ef0d13bd..1027baef52c 100644 --- a/cmd/util/cmd/read-protocol-state/cmd/blocks.go +++ b/cmd/util/cmd/read-protocol-state/cmd/blocks.go @@ -10,6 +10,7 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol" + badgerstate "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/storage" ) @@ -150,7 +151,14 @@ func (r *Reader) IsExecuted(blockID flow.Identifier) (bool, error) { func runE(*cobra.Command, []string) error { lockManager := storage.MakeSingletonLockManager() return common.WithStorage(flagDatadir, func(db storage.DB) error { - storages := common.InitStorages(db) + chainID, err := badgerstate.GetChainID(db) + if err != nil { + return err + } + storages, err := common.InitStorages(db, chainID) + if err != nil { + return err + } state, err := common.OpenProtocolState(lockManager, db, storages) if err != nil { diff --git a/cmd/util/cmd/read-protocol-state/cmd/snapshot.go b/cmd/util/cmd/read-protocol-state/cmd/snapshot.go index 921fc26ba9b..75c7f5a7cff 100644 --- a/cmd/util/cmd/read-protocol-state/cmd/snapshot.go +++ b/cmd/util/cmd/read-protocol-state/cmd/snapshot.go @@ -11,6 +11,7 @@ import ( commonFuncs "github.com/onflow/flow-go/cmd/util/common" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol" + badgerstate "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/inmem" "github.com/onflow/flow-go/storage" ) @@ -56,7 +57,14 @@ func init() { func runSnapshotE(*cobra.Command, []string) error { lockManager := storage.MakeSingletonLockManager() return common.WithStorage(flagDatadir, func(db storage.DB) error { - storages := common.InitStorages(db) + chainID, err := badgerstate.GetChainID(db) + if err != nil { + return err + } + storages, err := common.InitStorages(db, chainID) + if err != nil { + return err + } state, err := common.OpenProtocolState(lockManager, db, storages) if err != nil { return fmt.Errorf("could not init protocol state") diff --git a/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height.go b/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height.go index fa39fc67f80..0a196fd37f2 100644 --- a/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height.go +++ b/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height.go @@ -11,6 +11,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/state/protocol" + badgerstate "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/badger" "github.com/onflow/flow-go/storage/operation/pebbleimpl" @@ -59,7 +60,14 @@ func runE(*cobra.Command, []string) error { } return common.WithStorage(flagDatadir, func(db storage.DB) error { - storages := common.InitStorages(db) + chainID, err := badgerstate.GetChainID(db) + if err != nil { + return err + } + storages, err := common.InitStorages(db, chainID) + if err != nil { + return err + } state, err := common.OpenProtocolState(lockManager, db, storages) if err != nil { return fmt.Errorf("could not open protocol states: %w", err) @@ -71,15 +79,14 @@ func runE(*cobra.Command, []string) error { if err != nil { return err } - commits := store.NewCommits(metrics, db) - results := store.NewExecutionResults(metrics, db) - receipts := store.NewExecutionReceipts(metrics, db, results, badger.DefaultCacheSize) + commits := storages.Commits + results := storages.Results + receipts := storages.Receipts myReceipts := store.NewMyExecutionReceipts(metrics, db, receipts) - headers := store.NewHeaders(metrics, db) + headers := storages.Headers events := store.NewEvents(metrics, db) serviceEvents := store.NewServiceEvents(metrics, db) - transactions := store.NewTransactions(metrics, db) - collections := store.NewCollections(db, transactions) + collections := storages.Collections // require the chunk data pack data must exist before returning the storage module chunkDataPacksPebbleDB, err := storagepebble.ShouldOpenDefaultPebbleDB( log.Logger.With().Str("pebbledb", "cdp").Logger(), flagChunkDataPackDir) diff --git a/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height_test.go b/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height_test.go index 386acefd145..228cf6c35ad 100644 --- a/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height_test.go +++ b/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height_test.go @@ -39,7 +39,8 @@ func TestReExecuteBlock(t *testing.T) { // create all modules metrics := &metrics.NoopCollector{} - all := store.InitAll(metrics, db) + all, err := store.InitAll(metrics, db, flow.Emulator) + require.NoError(t, err) headers := all.Headers blocks := all.Blocks txResults, err := store.NewTransactionResults(metrics, db, store.DefaultCacheSize) @@ -199,7 +200,8 @@ func TestReExecuteBlockWithDifferentResult(t *testing.T) { // create all modules metrics := &metrics.NoopCollector{} - all := store.InitAll(metrics, db) + all, err := store.InitAll(metrics, db, flow.Emulator) + require.NoError(t, err) headers := all.Headers blocks := all.Blocks commits := store.NewCommits(metrics, db) diff --git a/cmd/util/cmd/snapshot/cmd.go b/cmd/util/cmd/snapshot/cmd.go index b384c81220f..6a77f452094 100644 --- a/cmd/util/cmd/snapshot/cmd.go +++ b/cmd/util/cmd/snapshot/cmd.go @@ -11,6 +11,7 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/engine/common/rpc/convert" + badgerstate "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/storage" ) @@ -47,7 +48,14 @@ func runE(*cobra.Command, []string) error { lockManager := storage.MakeSingletonLockManager() return common.WithStorage(flagDatadir, func(db storage.DB) error { - storages := common.InitStorages(db) + chainID, err := badgerstate.GetChainID(db) + if err != nil { + return err + } + storages, err := common.InitStorages(db, chainID) + if err != nil { + return err + } state, err := common.OpenProtocolState(lockManager, db, storages) if err != nil { return fmt.Errorf("could not open protocol state: %w", err) diff --git a/cmd/util/cmd/storehouse-checkpoint-validator/cmd.go b/cmd/util/cmd/storehouse-checkpoint-validator/cmd.go index c92455f4303..d0d72aec44c 100644 --- a/cmd/util/cmd/storehouse-checkpoint-validator/cmd.go +++ b/cmd/util/cmd/storehouse-checkpoint-validator/cmd.go @@ -9,6 +9,7 @@ import ( "github.com/onflow/flow-go/engine/execution/storehouse" "github.com/onflow/flow-go/module/metrics" + badgerstate "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/storage/operation/pebbleimpl" pebblestorage "github.com/onflow/flow-go/storage/pebble" "github.com/onflow/flow-go/storage/store" @@ -94,7 +95,14 @@ func runE(*cobra.Command, []string) error { // Initialize storage components metricsCollector := &metrics.NoopCollector{} - storages := store.InitAll(metricsCollector, protocolDB) + chainID, err := badgerstate.GetChainID(protocolDB) + if err != nil { + return err + } + storages, err := store.InitAll(metricsCollector, protocolDB, chainID) + if err != nil { + return err + } // Validate checkpoint ctx := context.Background() diff --git a/cmd/util/cmd/verify-evm-offchain-replay/verify.go b/cmd/util/cmd/verify-evm-offchain-replay/verify.go index 96eeeb3c9a3..740f64b450d 100644 --- a/cmd/util/cmd/verify-evm-offchain-replay/verify.go +++ b/cmd/util/cmd/verify-evm-offchain-replay/verify.go @@ -16,6 +16,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/blobs" "github.com/onflow/flow-go/module/executiondatasync/execution_data" + badgerstate "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/store" ) @@ -144,10 +145,17 @@ func initStorages(db storage.DB, executionDataDir string) ( io.Closer, error, ) { - storages := common.InitStorages(db) + chainID, err := badgerstate.GetChainID(db) + if err != nil { + return nil, nil, nil, err + } + storages, err := common.InitStorages(db, chainID) + if err != nil { + return nil, nil, nil, err + } datastoreDir := filepath.Join(executionDataDir, "blobstore") - err := os.MkdirAll(datastoreDir, 0700) + err = os.MkdirAll(datastoreDir, 0700) if err != nil { return nil, nil, nil, err } diff --git a/consensus/integration/nodes_test.go b/consensus/integration/nodes_test.go index 165d2450933..e6d356b8396 100644 --- a/consensus/integration/nodes_test.go +++ b/consensus/integration/nodes_test.go @@ -387,7 +387,8 @@ func createNode( db := pebbleimpl.ToDB(pdb) lockManager := fstorage.NewTestingLockManager() - headersDB := store.NewHeaders(metricsCollector, db) + headersDB, err := store.NewHeaders(metricsCollector, db, rootSnapshot.Params().ChainID()) + require.NoError(t, err) guaranteesDB := store.NewGuarantees(metricsCollector, db, store.DefaultCacheSize, store.DefaultCacheSize) sealsDB := store.NewSeals(metricsCollector, db) indexDB := store.NewIndex(metricsCollector, db) diff --git a/consensus/recovery/protocol/state_test.go b/consensus/recovery/protocol/state_test.go index 790aa8751a4..091be4b159c 100644 --- a/consensus/recovery/protocol/state_test.go +++ b/consensus/recovery/protocol/state_test.go @@ -46,7 +46,8 @@ func TestSaveBlockAsReplica(t *testing.T) { require.NoError(t, err) metrics := metrics.NewNoopCollector() - headers := store.NewHeaders(metrics, db) + headers, err := store.NewHeaders(metrics, db, flow.Emulator) + require.NoError(t, err) finalized, pending, err := recovery.FindLatest(state, headers) require.NoError(t, err) require.Equal(t, b0.ID(), finalized.ID(), "recover find latest returns inconsistent finalized block") diff --git a/engine/access/access_test.go b/engine/access/access_test.go index 1b77dc19518..d1f8345f243 100644 --- a/engine/access/access_test.go +++ b/engine/access/access_test.go @@ -161,9 +161,9 @@ func (suite *Suite) RunTest( ) { unittest.RunWithPebbleDB(suite.T(), func(pdb *pebble.DB) { db := pebbleimpl.ToDB(pdb) - all := store.InitAll(metrics.NewNoopCollector(), db) + all, err := store.InitAll(metrics.NewNoopCollector(), db, flow.Emulator) + require.NoError(suite.T(), err) - var err error suite.backend, err = backend.New(backend.Params{ State: suite.state, CollectionRPC: suite.collClient, @@ -653,7 +653,8 @@ func (suite *Suite) TestGetSealedTransaction() { unittest.RunWithPebbleDB(suite.T(), func(pdb *pebble.DB) { lockManager := storage.NewTestingLockManager() db := pebbleimpl.ToDB(pdb) - all := store.InitAll(metrics.NewNoopCollector(), db) + all, err := store.InitAll(metrics.NewNoopCollector(), db, flow.Emulator) + require.NoError(suite.T(), err) enIdentities := unittest.IdentityListFixture(2, unittest.WithRole(flow.RoleExecution)) enNodeIDs := enIdentities.NodeIDs() @@ -873,7 +874,8 @@ func (suite *Suite) TestGetTransactionResult() { unittest.RunWithPebbleDB(suite.T(), func(pdb *pebble.DB) { lockManager := storage.NewTestingLockManager() db := pebbleimpl.ToDB(pdb) - all := store.InitAll(metrics.NewNoopCollector(), db) + all, err := store.InitAll(metrics.NewNoopCollector(), db, flow.Emulator) + require.NoError(suite.T(), err) originID := unittest.IdentifierFixture() *suite.state = protocol.State{} @@ -896,7 +898,7 @@ func (suite *Suite) TestGetTransactionResult() { // specifically for this test we will consider that sealed block is far behind finalized, so we get EXECUTED status suite.sealedSnapshot.On("Head").Return(sealedBlock, nil) - err := unittest.WithLocks(suite.T(), suite.lockManager, []string{ + err = unittest.WithLocks(suite.T(), suite.lockManager, []string{ storage.LockInsertBlock, storage.LockFinalizeBlock, }, func(lctx lockctx.Context) error { @@ -1210,7 +1212,8 @@ func (suite *Suite) TestExecuteScript() { unittest.RunWithPebbleDB(suite.T(), func(pdb *pebble.DB) { lockManager := storage.NewTestingLockManager() db := pebbleimpl.ToDB(pdb) - all := store.InitAll(metrics.NewNoopCollector(), db) + all, err := store.InitAll(metrics.NewNoopCollector(), db, flow.Emulator) + require.NoError(suite.T(), err) identities := unittest.IdentityListFixture(2, unittest.WithRole(flow.RoleExecution)) suite.sealedSnapshot.On("Identities", mock.Anything).Return(identities, nil) suite.finalSnapshot.On("Identities", mock.Anything).Return(identities, nil) @@ -1227,7 +1230,6 @@ func (suite *Suite) TestExecuteScript() { identities.NodeIDs(), ) - var err error suite.backend, err = backend.New(backend.Params{ State: suite.state, CollectionRPC: suite.collClient, diff --git a/engine/access/ingestion/collections/indexer_test.go b/engine/access/ingestion/collections/indexer_test.go index 294ac3c034a..c677e5c41a1 100644 --- a/engine/access/ingestion/collections/indexer_test.go +++ b/engine/access/ingestion/collections/indexer_test.go @@ -392,7 +392,8 @@ func newBlockchain(t *testing.T, pdb *pebble.DB) *blockchain { db := pebbleimpl.ToDB(pdb) lockManager := storage.NewTestingLockManager() - all := store.InitAll(metrics, db) + all, err := store.InitAll(metrics, db, flow.Emulator) + require.NoError(t, err) transactions := store.NewTransactions(metrics, db) collections := store.NewCollections(db, transactions) diff --git a/engine/access/rpc/backend/transactions/transactions_functional_test.go b/engine/access/rpc/backend/transactions/transactions_functional_test.go index f43834e4ea5..bd6855d5405 100644 --- a/engine/access/rpc/backend/transactions/transactions_functional_test.go +++ b/engine/access/rpc/backend/transactions/transactions_functional_test.go @@ -121,7 +121,8 @@ func (s *TransactionsFunctionalSuite) SetupTest() { s.db = pebbleimpl.ToDB(pdb) // Instantiate storages - all := store.InitAll(metrics, s.db) + all, err := store.InitAll(metrics, s.db, flow.Emulator) + s.Require().NoError(err) s.blocks = all.Blocks s.collections = all.Collections @@ -135,7 +136,7 @@ func (s *TransactionsFunctionalSuite) SetupTest() { s.reporter = syncmock.NewIndexReporter(s.T()) reporter := index.NewReporter() - err := reporter.Initialize(s.reporter) + err = reporter.Initialize(s.reporter) s.Require().NoError(err) s.eventsIndex = index.NewEventsIndex(reporter, s.events) diff --git a/engine/collection/compliance/engine_test.go b/engine/collection/compliance/engine_test.go index ef050881b46..bbd4db9ac82 100644 --- a/engine/collection/compliance/engine_test.go +++ b/engine/collection/compliance/engine_test.go @@ -19,6 +19,7 @@ import ( netint "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" mocknetwork "github.com/onflow/flow-go/network/mock" + clusterstate "github.com/onflow/flow-go/state/cluster" protocol "github.com/onflow/flow-go/state/protocol/mock" storerr "github.com/onflow/flow-go/storage" storage "github.com/onflow/flow-go/storage/mock" @@ -79,7 +80,7 @@ func (cs *EngineSuite) SetupTest() { cs.protoState = &protocol.State{} cs.protoState.On("Final").Return(protoSnapshot) - cs.clusterID = "cluster-id" + cs.clusterID = clusterstate.CanonicalClusterID(0, unittest.IdentifierListFixture(1)) clusterParams := &protocol.Params{} clusterParams.On("ChainID").Return(cs.clusterID, nil) diff --git a/engine/collection/epochmgr/engine.go b/engine/collection/epochmgr/engine.go index c0bc2d87a81..5db5e9624d0 100644 --- a/engine/collection/epochmgr/engine.go +++ b/engine/collection/epochmgr/engine.go @@ -288,7 +288,7 @@ func (e *Engine) Done() <-chan struct{} { // Error returns: // - ErrNotAuthorizedForEpoch if this node is not authorized in the epoch. func (e *Engine) createEpochComponents(epoch protocol.CommittedEpoch) (*EpochComponents, error) { - state, prop, sync, hot, voteAggregator, timeoutAggregator, messageHub, err := e.factory.Create(epoch) + state, prop, sync, hot, voteAggregator, timeoutAggregator, messageHub, err := e.factory.Create(epoch, e.state.Params().ChainID()) if err != nil { return nil, fmt.Errorf("could not setup requirements for epoch (%d): %w", epoch.Counter(), err) } diff --git a/engine/collection/epochmgr/engine_test.go b/engine/collection/epochmgr/engine_test.go index e5309df3f82..0c4480e12d9 100644 --- a/engine/collection/epochmgr/engine_test.go +++ b/engine/collection/epochmgr/engine_test.go @@ -108,35 +108,35 @@ type Suite struct { // MockFactoryCreate mocks the epoch factory to create epoch components for the given epoch. func (suite *Suite) MockFactoryCreate(arg any) { - suite.factory.On("Create", arg). + suite.factory.On("Create", arg, mock.Anything). Run(func(args mock.Arguments) { epoch, ok := args.Get(0).(realprotocol.CommittedEpoch) suite.Require().Truef(ok, "invalid type %T", args.Get(0)) suite.components[epoch.Counter()] = newMockComponents(suite.T()) }). Return( - func(epoch realprotocol.CommittedEpoch) realcluster.State { + func(epoch realprotocol.CommittedEpoch, consensusChainID flow.ChainID) realcluster.State { return suite.ComponentsForEpoch(epoch).state }, - func(epoch realprotocol.CommittedEpoch) component.Component { + func(epoch realprotocol.CommittedEpoch, consensusChainID flow.ChainID) component.Component { return suite.ComponentsForEpoch(epoch).prop }, - func(epoch realprotocol.CommittedEpoch) realmodule.ReadyDoneAware { + func(epoch realprotocol.CommittedEpoch, consensusChainID flow.ChainID) realmodule.ReadyDoneAware { return suite.ComponentsForEpoch(epoch).sync }, - func(epoch realprotocol.CommittedEpoch) realmodule.HotStuff { + func(epoch realprotocol.CommittedEpoch, consensusChainID flow.ChainID) realmodule.HotStuff { return suite.ComponentsForEpoch(epoch).hotstuff }, - func(epoch realprotocol.CommittedEpoch) hotstuff.VoteAggregator { + func(epoch realprotocol.CommittedEpoch, consensusChainID flow.ChainID) hotstuff.VoteAggregator { return suite.ComponentsForEpoch(epoch).voteAggregator }, - func(epoch realprotocol.CommittedEpoch) hotstuff.TimeoutAggregator { + func(epoch realprotocol.CommittedEpoch, consensusChainID flow.ChainID) hotstuff.TimeoutAggregator { return suite.ComponentsForEpoch(epoch).timeoutAggregator }, - func(epoch realprotocol.CommittedEpoch) component.Component { + func(epoch realprotocol.CommittedEpoch, consensusChainID flow.ChainID) component.Component { return suite.ComponentsForEpoch(epoch).messageHub }, - func(epoch realprotocol.CommittedEpoch) error { return nil }, + func(epoch realprotocol.CommittedEpoch, consensusChainID flow.ChainID) error { return nil }, ).Maybe() } @@ -164,6 +164,9 @@ func (suite *Suite) SetupTest() { suite.state.On("Final").Return(suite.snap) suite.state.On("AtBlockID", suite.header.ID()).Return(suite.snap).Maybe() + params := protocol.NewParams(suite.T()) + params.On("ChainID").Return(flow.ChainID("Consensus-Chain-ID")).Maybe() + suite.state.On("Params").Return(params) suite.snap.On("Epochs").Return(suite.epochQuery) suite.snap.On("Head").Return( func() *flow.Header { return suite.header }, @@ -274,7 +277,7 @@ func (suite *Suite) MockAsUnauthorizedNode(forEpoch uint64) { suite.factory = epochmgr.NewEpochComponentsFactory(suite.T()) suite.factory. - On("Create", mock.MatchedBy(unauthorizedMatcher)). + On("Create", mock.MatchedBy(unauthorizedMatcher), mock.Anything). Return(nil, nil, nil, nil, nil, nil, nil, ErrNotAuthorizedForEpoch) suite.MockFactoryCreate(mock.MatchedBy(authorizedMatcher)) diff --git a/engine/collection/epochmgr/factories/cluster_state.go b/engine/collection/epochmgr/factories/cluster_state.go index 9548f033943..5046a288540 100644 --- a/engine/collection/epochmgr/factories/cluster_state.go +++ b/engine/collection/epochmgr/factories/cluster_state.go @@ -5,6 +5,7 @@ import ( "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" clusterkv "github.com/onflow/flow-go/state/cluster/badger" "github.com/onflow/flow-go/storage" @@ -33,38 +34,46 @@ func NewClusterStateFactory( return factory, nil } -func (f *ClusterStateFactory) Create(stateRoot *clusterkv.StateRoot) ( +func (f *ClusterStateFactory) Create(stateRoot *clusterkv.StateRoot, consensusChainID flow.ChainID) ( *clusterkv.MutableState, *store.Headers, storage.ClusterPayloads, storage.ClusterBlocks, + *store.Headers, error, ) { - headers := store.NewHeaders(f.metrics, f.db) - payloads := store.NewClusterPayloads(f.metrics, f.db) - blocks := store.NewClusterBlocks(f.db, stateRoot.ClusterID(), headers, payloads) + clusterHeaders, err := store.NewClusterHeaders(f.metrics, f.db, stateRoot.ClusterID()) + if err != nil { + return nil, nil, nil, nil, nil, fmt.Errorf("failed to create storage abstraction for cluster headers: %w", err) + } + clusterPayloads := store.NewClusterPayloads(f.metrics, f.db) + clusterBlocks := store.NewClusterBlocks(f.db, stateRoot.ClusterID(), clusterHeaders, clusterPayloads) + consensusHeaders, err := store.NewHeaders(f.metrics, f.db, consensusChainID) // for reference blocks + if err != nil { + return nil, nil, nil, nil, nil, fmt.Errorf("failed to create storage abstraction for consensus headers: %w", err) + } isBootStrapped, err := clusterkv.IsBootstrapped(f.db, stateRoot.ClusterID()) if err != nil { - return nil, nil, nil, nil, fmt.Errorf("could not check cluster state db: %w", err) + return nil, nil, nil, nil, nil, fmt.Errorf("could not check cluster state db: %w", err) } var clusterState *clusterkv.State if isBootStrapped { - clusterState, err = clusterkv.OpenState(f.db, f.tracer, headers, payloads, stateRoot.ClusterID(), stateRoot.EpochCounter()) + clusterState, err = clusterkv.OpenState(f.db, f.tracer, clusterHeaders, clusterPayloads, stateRoot.ClusterID(), stateRoot.EpochCounter()) if err != nil { - return nil, nil, nil, nil, fmt.Errorf("could not open cluster state: %w", err) + return nil, nil, nil, nil, nil, fmt.Errorf("could not open cluster state: %w", err) } } else { clusterState, err = clusterkv.Bootstrap(f.db, f.lockManager, stateRoot) if err != nil { - return nil, nil, nil, nil, fmt.Errorf("could not bootstrap cluster state: %w", err) + return nil, nil, nil, nil, nil, fmt.Errorf("could not bootstrap cluster state: %w", err) } } - mutableState, err := clusterkv.NewMutableState(clusterState, f.lockManager, f.tracer, headers, payloads) + mutableState, err := clusterkv.NewMutableState(clusterState, f.lockManager, f.tracer, clusterHeaders, clusterPayloads, consensusHeaders) if err != nil { - return nil, nil, nil, nil, fmt.Errorf("could create mutable cluster state: %w", err) + return nil, nil, nil, nil, nil, fmt.Errorf("could not create mutable cluster state: %w", err) } - return mutableState, headers, payloads, blocks, err + return mutableState, clusterHeaders, clusterPayloads, clusterBlocks, consensusHeaders, nil } diff --git a/engine/collection/epochmgr/factories/epoch.go b/engine/collection/epochmgr/factories/epoch.go index c55224a62bc..7bb81160fa6 100644 --- a/engine/collection/epochmgr/factories/epoch.go +++ b/engine/collection/epochmgr/factories/epoch.go @@ -5,6 +5,7 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/engine/collection/epochmgr" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/mempool/epochs" @@ -56,6 +57,7 @@ func NewEpochComponentsFactory( func (factory *EpochComponentsFactory) Create( epoch protocol.CommittedEpoch, + consensusChainID flow.ChainID, ) ( state cluster.State, compliance component.Component, @@ -107,7 +109,7 @@ func (factory *EpochComponentsFactory) Create( return } var mutableState *badger.MutableState - mutableState, headers, payloads, blocks, err = factory.state.Create(stateRoot) + mutableState, headers, payloads, blocks, _, err = factory.state.Create(stateRoot, consensusChainID) state = mutableState if err != nil { err = fmt.Errorf("could not create cluster state: %w", err) diff --git a/engine/collection/epochmgr/factory.go b/engine/collection/epochmgr/factory.go index c6370674e51..fec38261b38 100644 --- a/engine/collection/epochmgr/factory.go +++ b/engine/collection/epochmgr/factory.go @@ -2,6 +2,7 @@ package epochmgr import ( "github.com/onflow/flow-go/consensus/hotstuff" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/state/cluster" @@ -18,7 +19,7 @@ type EpochComponentsFactory interface { // a given epoch counter. // // Must return ErrNotAuthorizedForEpoch if this node is not authorized in the epoch. - Create(epoch protocol.CommittedEpoch) ( + Create(epoch protocol.CommittedEpoch, consensusChainID flow.ChainID) ( state cluster.State, proposal component.Component, sync module.ReadyDoneAware, diff --git a/engine/collection/epochmgr/mock/epoch_components_factory.go b/engine/collection/epochmgr/mock/epoch_components_factory.go index 4e58c01c41a..171c88676f6 100644 --- a/engine/collection/epochmgr/mock/epoch_components_factory.go +++ b/engine/collection/epochmgr/mock/epoch_components_factory.go @@ -6,6 +6,8 @@ import ( component "github.com/onflow/flow-go/module/component" cluster "github.com/onflow/flow-go/state/cluster" + flow "github.com/onflow/flow-go/model/flow" + hotstuff "github.com/onflow/flow-go/consensus/hotstuff" mock "github.com/stretchr/testify/mock" @@ -20,9 +22,9 @@ type EpochComponentsFactory struct { mock.Mock } -// Create provides a mock function with given fields: epoch -func (_m *EpochComponentsFactory) Create(epoch protocol.CommittedEpoch) (cluster.State, component.Component, module.ReadyDoneAware, module.HotStuff, hotstuff.VoteAggregator, hotstuff.TimeoutAggregator, component.Component, error) { - ret := _m.Called(epoch) +// Create provides a mock function with given fields: epoch, consensusChainID +func (_m *EpochComponentsFactory) Create(epoch protocol.CommittedEpoch, consensusChainID flow.ChainID) (cluster.State, component.Component, module.ReadyDoneAware, module.HotStuff, hotstuff.VoteAggregator, hotstuff.TimeoutAggregator, component.Component, error) { + ret := _m.Called(epoch, consensusChainID) if len(ret) == 0 { panic("no return value specified for Create") @@ -36,67 +38,67 @@ func (_m *EpochComponentsFactory) Create(epoch protocol.CommittedEpoch) (cluster var r5 hotstuff.TimeoutAggregator var r6 component.Component var r7 error - if rf, ok := ret.Get(0).(func(protocol.CommittedEpoch) (cluster.State, component.Component, module.ReadyDoneAware, module.HotStuff, hotstuff.VoteAggregator, hotstuff.TimeoutAggregator, component.Component, error)); ok { - return rf(epoch) + if rf, ok := ret.Get(0).(func(protocol.CommittedEpoch, flow.ChainID) (cluster.State, component.Component, module.ReadyDoneAware, module.HotStuff, hotstuff.VoteAggregator, hotstuff.TimeoutAggregator, component.Component, error)); ok { + return rf(epoch, consensusChainID) } - if rf, ok := ret.Get(0).(func(protocol.CommittedEpoch) cluster.State); ok { - r0 = rf(epoch) + if rf, ok := ret.Get(0).(func(protocol.CommittedEpoch, flow.ChainID) cluster.State); ok { + r0 = rf(epoch, consensusChainID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(cluster.State) } } - if rf, ok := ret.Get(1).(func(protocol.CommittedEpoch) component.Component); ok { - r1 = rf(epoch) + if rf, ok := ret.Get(1).(func(protocol.CommittedEpoch, flow.ChainID) component.Component); ok { + r1 = rf(epoch, consensusChainID) } else { if ret.Get(1) != nil { r1 = ret.Get(1).(component.Component) } } - if rf, ok := ret.Get(2).(func(protocol.CommittedEpoch) module.ReadyDoneAware); ok { - r2 = rf(epoch) + if rf, ok := ret.Get(2).(func(protocol.CommittedEpoch, flow.ChainID) module.ReadyDoneAware); ok { + r2 = rf(epoch, consensusChainID) } else { if ret.Get(2) != nil { r2 = ret.Get(2).(module.ReadyDoneAware) } } - if rf, ok := ret.Get(3).(func(protocol.CommittedEpoch) module.HotStuff); ok { - r3 = rf(epoch) + if rf, ok := ret.Get(3).(func(protocol.CommittedEpoch, flow.ChainID) module.HotStuff); ok { + r3 = rf(epoch, consensusChainID) } else { if ret.Get(3) != nil { r3 = ret.Get(3).(module.HotStuff) } } - if rf, ok := ret.Get(4).(func(protocol.CommittedEpoch) hotstuff.VoteAggregator); ok { - r4 = rf(epoch) + if rf, ok := ret.Get(4).(func(protocol.CommittedEpoch, flow.ChainID) hotstuff.VoteAggregator); ok { + r4 = rf(epoch, consensusChainID) } else { if ret.Get(4) != nil { r4 = ret.Get(4).(hotstuff.VoteAggregator) } } - if rf, ok := ret.Get(5).(func(protocol.CommittedEpoch) hotstuff.TimeoutAggregator); ok { - r5 = rf(epoch) + if rf, ok := ret.Get(5).(func(protocol.CommittedEpoch, flow.ChainID) hotstuff.TimeoutAggregator); ok { + r5 = rf(epoch, consensusChainID) } else { if ret.Get(5) != nil { r5 = ret.Get(5).(hotstuff.TimeoutAggregator) } } - if rf, ok := ret.Get(6).(func(protocol.CommittedEpoch) component.Component); ok { - r6 = rf(epoch) + if rf, ok := ret.Get(6).(func(protocol.CommittedEpoch, flow.ChainID) component.Component); ok { + r6 = rf(epoch, consensusChainID) } else { if ret.Get(6) != nil { r6 = ret.Get(6).(component.Component) } } - if rf, ok := ret.Get(7).(func(protocol.CommittedEpoch) error); ok { - r7 = rf(epoch) + if rf, ok := ret.Get(7).(func(protocol.CommittedEpoch, flow.ChainID) error); ok { + r7 = rf(epoch, consensusChainID) } else { r7 = ret.Error(7) } diff --git a/engine/collection/message_hub/message_hub_test.go b/engine/collection/message_hub/message_hub_test.go index cb67a12d918..86e89e62005 100644 --- a/engine/collection/message_hub/message_hub_test.go +++ b/engine/collection/message_hub/message_hub_test.go @@ -73,7 +73,7 @@ func (s *MessageHubSuite) SetupTest() { unittest.WithInitialWeight(1000), ) s.myID = s.cluster[0].NodeID - s.clusterID = "cluster-id" + s.clusterID = clusterint.CanonicalClusterID(0, unittest.IdentifierListFixture(1)) s.head = unittest.ClusterBlockFixture() s.payloads = storage.NewClusterPayloads(s.T()) diff --git a/engine/collection/test/cluster_switchover_test.go b/engine/collection/test/cluster_switchover_test.go index 8eb42d61314..2d2325918fc 100644 --- a/engine/collection/test/cluster_switchover_test.go +++ b/engine/collection/test/cluster_switchover_test.go @@ -377,7 +377,7 @@ func (tc *ClusterSwitchoverTestCase) BlockInEpoch(epochCounter uint64) *flow.Hea func (tc *ClusterSwitchoverTestCase) SubmitTransactionToCluster( epochCounter uint64, // the epoch we are submitting the transacting w.r.t. clustering flow.ClusterList, // the clustering for the epoch - clusterIndex uint, // the index of the cluster we are targetting + clusterIndex uint, // the index of the cluster we are targeting ) { clusterMembers := clustering[int(clusterIndex)] diff --git a/engine/common/follower/integration_test.go b/engine/common/follower/integration_test.go index 486f1b236a0..e9d71d97f78 100644 --- a/engine/common/follower/integration_test.go +++ b/engine/common/follower/integration_test.go @@ -51,7 +51,8 @@ func TestFollowerHappyPath(t *testing.T) { tracer := trace.NewNoopTracer() log := unittest.Logger() consumer := events.NewNoop() - all := store.InitAll(metrics, pebbleimpl.ToDB(pdb)) + all, err := store.InitAll(metrics, pebbleimpl.ToDB(pdb), flow.Emulator) + require.NoError(t, err) // bootstrap root snapshot state, err := pbadger.Bootstrap( diff --git a/engine/execution/pruner/core_test.go b/engine/execution/pruner/core_test.go index a50811582b1..2e28c708cf8 100644 --- a/engine/execution/pruner/core_test.go +++ b/engine/execution/pruner/core_test.go @@ -33,7 +33,8 @@ func TestLoopPruneExecutionDataFromRootToLatestSealed(t *testing.T) { db := pebbleimpl.ToDB(pdb) ctx, cancel := context.WithCancel(context.Background()) metrics := metrics.NewNoopCollector() - all := store.InitAll(metrics, db) + all, err := store.InitAll(metrics, db, flow.Emulator) + require.NoError(t, err) headers := all.Headers blockstore := all.Blocks results := all.Results @@ -48,7 +49,7 @@ func TestLoopPruneExecutionDataFromRootToLatestSealed(t *testing.T) { // indexed by height chunks := make([]*verification.VerifiableChunkData, lastFinalizedHeight+2) parentID := genesis.ID() - err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { // By convention, root block has no proposer signature - implementation has to handle this edge case return blockstore.BatchStore(lctx, rw, &flow.Proposal{Block: *genesis, ProposerSigData: nil}) diff --git a/engine/testutil/nodes.go b/engine/testutil/nodes.go index 7b24125f9db..5f28e471f76 100644 --- a/engine/testutil/nodes.go +++ b/engine/testutil/nodes.go @@ -236,7 +236,8 @@ func CompleteStateFixture( pdb := unittest.TypedPebbleDB(t, publicDBDir, pebble.Open) db := pebbleimpl.ToDB(pdb) lockManager := storage.NewTestingLockManager() - s := store.InitAll(metric, db) + s, err := store.InitAll(metric, db, rootSnapshot.Params().ChainID()) + require.NoError(t, err) secretsDB := unittest.TypedBadgerDB(t, secretsDBDir, storagebadger.InitSecret) consumer := events.NewDistributor() @@ -558,7 +559,8 @@ func ExecutionNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, ide receipts := store.NewExecutionReceipts(node.Metrics, db, results, storagebadger.DefaultCacheSize) myReceipts := store.NewMyExecutionReceipts(node.Metrics, db, receipts) versionBeacons := store.NewVersionBeacons(db) - headersStorage := store.NewHeaders(node.Metrics, db) + headersStorage, err := store.NewHeaders(node.Metrics, db, chainID) + require.NoError(t, err) checkAuthorizedAtBlock := func(blockID flow.Identifier) (bool, error) { return protocol.IsNodeAuthorizedAt(node.State.AtBlockID(blockID), node.Me.NodeID()) diff --git a/engine/verification/verifier/verifiers.go b/engine/verification/verifier/verifiers.go index 7f43d90db41..9bdb0f6b3fb 100644 --- a/engine/verification/verifier/verifiers.go +++ b/engine/verification/verifier/verifiers.go @@ -241,7 +241,10 @@ func initStorages( return nil, nil, nil, nil, nil, fmt.Errorf("could not init storage database: %w", err) } - storages := common.InitStorages(db) + storages, err := common.InitStorages(db, chainID) + if err != nil { + return nil, nil, nil, nil, nil, fmt.Errorf("could not init common storage abstractions: %w", err) + } state, err := common.OpenProtocolState(lockManager, db, storages) if err != nil { return nil, nil, nil, nil, nil, fmt.Errorf("could not open protocol state: %w", err) diff --git a/integration/testnet/container.go b/integration/testnet/container.go index b32af817506..30f4b3809bf 100644 --- a/integration/testnet/container.go +++ b/integration/testnet/container.go @@ -385,7 +385,10 @@ func (c *Container) OpenState() (*state.State, error) { } metrics := metrics.NewNoopCollector() index := store.NewIndex(metrics, db) - headers := store.NewHeaders(metrics, db) + headers, err := store.NewHeaders(metrics, db, c.net.Root().ChainID) + if err != nil { + return nil, err + } seals := store.NewSeals(metrics, db) results := store.NewExecutionResults(metrics, db) receipts := store.NewExecutionReceipts(metrics, db, results, store.DefaultCacheSize) diff --git a/integration/tests/access/cohort4/access_test.go b/integration/tests/access/cohort4/access_test.go index 5f059783f2c..eabf87b892f 100644 --- a/integration/tests/access/cohort4/access_test.go +++ b/integration/tests/access/cohort4/access_test.go @@ -154,7 +154,7 @@ func (s *AccessSuite) runTestSignerIndicesDecoding() { err = container.WaitForContainerStopped(5 * time.Second) require.NoError(s.T(), err) - // open state to build a block singer decoder + // open state to build a block signer decoder state, err := container.OpenState() require.NoError(s.T(), err) diff --git a/integration/tests/access/cohort4/execution_data_pruning_test.go b/integration/tests/access/cohort4/execution_data_pruning_test.go index edf899c921f..084168b1fda 100644 --- a/integration/tests/access/cohort4/execution_data_pruning_test.go +++ b/integration/tests/access/cohort4/execution_data_pruning_test.go @@ -163,7 +163,8 @@ func (s *ExecutionDataPruningSuite) TestHappyPath() { // setup storage objects needed to get the execution data id db, err := accessNode.DB() require.NoError(s.T(), err, "could not open db") - anHeaders := store.NewHeaders(metrics, db) + anHeaders, err := store.NewHeaders(metrics, db, flow.Localnet) + require.NoError(s.T(), err) anResults := store.NewExecutionResults(metrics, db) anSeals := store.NewSeals(metrics, db) diff --git a/model/flow/chain.go b/model/flow/chain.go index 2ac2b219c1b..9d19a401ba9 100644 --- a/model/flow/chain.go +++ b/model/flow/chain.go @@ -98,7 +98,7 @@ type chainImpl interface { chain() ChainID } -// monotonicImpl is a simple implementation of adress generation +// monotonicImpl is a simple implementation of address generation // where addresses are simply the index of the account. type monotonicImpl struct{} diff --git a/module/block_iterator/iterator_test.go b/module/block_iterator/iterator_test.go index ee5a6115f03..8199a272fbd 100644 --- a/module/block_iterator/iterator_test.go +++ b/module/block_iterator/iterator_test.go @@ -21,9 +21,9 @@ func TestIterateHeight(t *testing.T) { lockManager := storage.NewTestingLockManager() dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { // create blocks with siblings - b1 := &flow.Header{HeaderBody: flow.HeaderBody{Height: 1}} - b2 := &flow.Header{HeaderBody: flow.HeaderBody{Height: 2}} - b3 := &flow.Header{HeaderBody: flow.HeaderBody{Height: 3}} + b1 := &flow.Header{HeaderBody: flow.HeaderBody{Height: 1, ChainID: flow.Emulator}} + b2 := &flow.Header{HeaderBody: flow.HeaderBody{Height: 2, ChainID: flow.Emulator}} + b3 := &flow.Header{HeaderBody: flow.HeaderBody{Height: 3, ChainID: flow.Emulator}} bs := []*flow.Header{b1, b2, b3} // index height @@ -41,7 +41,8 @@ func TestIterateHeight(t *testing.T) { // create iterator // b0 is the root block, iterate from b1 to b3 iterRange := module.IteratorRange{Start: b1.Height, End: b3.Height} - headers := store.NewHeaders(&metrics.NoopCollector{}, db) + headers, err := store.NewHeaders(&metrics.NoopCollector{}, db, flow.Emulator) + require.NoError(t, err) getBlockIDByIndex := func(height uint64) (flow.Identifier, bool, error) { blockID, err := headers.BlockIDByHeight(height) if err != nil { diff --git a/module/builder/collection/builder.go b/module/builder/collection/builder.go index 1fa49d6154a..886af075ace 100644 --- a/module/builder/collection/builder.go +++ b/module/builder/collection/builder.go @@ -44,7 +44,13 @@ type Builder struct { bySealingRateLimiterConfig module.ReadonlySealingLagRateLimiterConfig log zerolog.Logger clusterEpoch uint64 // the operating epoch for this cluster - // cache of values about the operating epoch which never change + + // cache of values about the operating epoch which never change: + // We can't specify the height of the epoch's first consensus block (height ON MAIN CHAIN) during which this cluster is + // active, because the builder is typically _instantiated_ before the epoch starts. However, the builder should only be + // called once the epoch has started, i.e. consensus has finalized the first block in the epoch. Consequently, we + // retrieve the epoch's first height on the first call of the builder, and cache it. + epochFirstHeight *uint64 epochFinalHeight *uint64 // last height of this cluster's operating epoch (nil if epoch not ended) epochFinalID *flow.Identifier // ID of last block in this cluster's operating epoch (nil if epoch not ended) } @@ -285,6 +291,28 @@ func (b *Builder) getBlockBuildContext(parentID flow.Identifier) (*blockBuildCon ctx.refChainFinalizedHeight = mainChainFinalizedHeader.Height ctx.refChainFinalizedID = mainChainFinalizedHeader.ID() + // We can't specify the height of the epoch's first consensus block (height ON MAIN CHAIN) during which this cluster is + // active, because the builder is typically _instantiated_ before the epoch starts. However, the builder should only be + // called once the epoch has started, i.e. consensus has finalized the first block in the epoch. Consequently, we + // retrieve the epoch's first height on the first call of the builder, and cache it for future calls. + r := b.db.Reader() + if b.epochFirstHeight == nil { + var refEpochFirstHeight uint64 + err = operation.RetrieveEpochFirstHeight(r, b.clusterEpoch, &refEpochFirstHeight) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + // can be missing if we joined (dynamic bootstrapped) in the middle of an epoch. + // 0 means FinalizedAncestryLookup will not be bounded by the epoch start, + // but only by which cluster blocks we have available. + refEpochFirstHeight = 0 + } else { + return nil, fmt.Errorf("unexpected failure to retrieve first height of operating epoch: %w", err) + } + } + b.epochFirstHeight = &refEpochFirstHeight + } + ctx.refEpochFirstHeight = *b.epochFirstHeight + // if the epoch has ended and the final block is cached, use the cached values if b.epochFinalHeight != nil && b.epochFinalID != nil { ctx.refEpochFinalID = b.epochFinalID @@ -292,13 +320,12 @@ func (b *Builder) getBlockBuildContext(parentID flow.Identifier) (*blockBuildCon return ctx, nil } - r := b.db.Reader() - var refEpochFinalHeight uint64 var refEpochFinalID flow.Identifier err = operation.RetrieveEpochLastHeight(r, b.clusterEpoch, &refEpochFinalHeight) if err != nil { + // If the epoch has not yet ended, the final height is not available if errors.Is(err, storage.ErrNotFound) { return ctx, nil } @@ -379,7 +406,7 @@ func (b *Builder) populateFinalizedAncestryLookup(lctx lockctx.Proof, ctx *block // the finalized cluster blocks which could possibly contain any conflicting transactions var clusterBlockIDs []flow.Identifier - start, end := findRefHeightSearchRangeForConflictingClusterBlocks(minRefHeight, maxRefHeight) + start, end := findRefHeightSearchRangeForConflictingClusterBlocks(minRefHeight, maxRefHeight, ctx) err := operation.LookupClusterBlocksByReferenceHeightRange(lctx, b.db.Reader(), start, end, &clusterBlockIDs) if err != nil { return fmt.Errorf("could not lookup finalized cluster blocks by reference height range [%d,%d]: %w", start, end, err) @@ -609,19 +636,35 @@ func (b *Builder) buildHeader( }, nil } -// findRefHeightSearchRangeForConflictingClusterBlocks computes the range of reference -// block heights of ancestor blocks which could possibly contain transactions -// duplicating those in our collection under construction, based on the range of -// reference heights of transactions in the collection under construction. +// findRefHeightSearchRangeForConflictingClusterBlocks computes the range of reference block heights of ancestor blocks +// which could possibly contain transactions duplicating those in our collection under construction, based on the range +// of reference heights of transactions in the collection under construction. +// Input range is the (inclusive) range of reference heights of transactions eligible for inclusion in the collection +// under construction. Output range is the (inclusive) range of reference heights which need to be searched in order to +// avoid transaction repeats. +// +// Within a single epoch, we have argued that for a set of transactions, with `minRefHeight` (`maxRefHeight`) being +// the smallest (largest) reference block height, we only need to inspect collections with reference block heights +// c ∈ (minRefHeight-E, maxRefHeight]. Note that the lower bound is exclusive, while the upper bound is inclusive, +// which we transform to an inclusive range: +// +// c ∈ (minRefHeight-E, maxRefHeight] +// ⇔ c ∈ [minRefHeight-E+1, maxRefHeight] +// +// In order to take epoch boundaries into account, we note: A collector cluster is only responsible for transactions whose +// reference blocks are within the cluster's operating epoch. Thus, we can bound the lower end of the search range by the +// height of the first block in the epoch. Formally, we only need to inspect collections with reference block height // -// Input range is the (inclusive) range of reference heights of transactions included -// in the collection under construction. Output range is the (inclusive) range of -// reference heights which need to be searched. -func findRefHeightSearchRangeForConflictingClusterBlocks(minRefHeight, maxRefHeight uint64) (start, end uint64) { - start = minRefHeight - flow.DefaultTransactionExpiry + 1 - if start > minRefHeight { - start = 0 // overflow check +// c ∈ [max{minRefHeight-E+1, epochFirstHeight}, maxRefHeight] +func findRefHeightSearchRangeForConflictingClusterBlocks(minRefHeight, maxRefHeight uint64, ctx *blockBuildContext) (start, end uint64) { + // in order to avoid underflow, we rewrite the lower-bound equation entirely without subtraction: + // max{minRefHeight-E+1, epochFirstHeight} == epochFirstHeight + // ⇔ minRefHeight - E + 1 ≤ epochFirstHeight + // ⇔ minRefHeight - E < epochFirstHeight + // ⇔ minRefHeight < epochFirstHeight + E + if minRefHeight < ctx.refEpochFirstHeight+flow.DefaultTransactionExpiry { + return ctx.refEpochFirstHeight, maxRefHeight } - end = maxRefHeight - return start, end + // We reach the following line only if minRefHeight-E+1 > epochFirstHeight ≥ 0. Hence, an underflow is impossible. + return minRefHeight + 1 - flow.DefaultTransactionExpiry, maxRefHeight } diff --git a/module/builder/collection/builder_test.go b/module/builder/collection/builder_test.go index a63e3583e81..ca0bb782210 100644 --- a/module/builder/collection/builder_test.go +++ b/module/builder/collection/builder_test.go @@ -39,18 +39,7 @@ import ( ) var signer = func(*flow.Header) ([]byte, error) { return unittest.SignatureFixture(), nil } -var setter = func(h *flow.HeaderBodyBuilder) error { - h.WithHeight(42). - WithChainID(flow.Emulator). - WithParentID(unittest.IdentifierFixture()). - WithView(1337). - WithParentView(1336). - WithParentVoterIndices(unittest.SignerIndicesFixture(4)). - WithParentVoterSigData(unittest.QCSigDataFixture()). - WithProposerID(unittest.IdentifierFixture()) - - return nil -} +var setter func(*flow.HeaderBodyBuilder) error type BuilderSuite struct { suite.Suite @@ -62,9 +51,9 @@ type BuilderSuite struct { chainID flow.ChainID epochCounter uint64 - headers storage.Headers - payloads storage.ClusterPayloads - blocks storage.Blocks + clusterHeaders storage.Headers + clusterPayloads storage.ClusterPayloads + consensusHeaders storage.Headers state cluster.MutableState @@ -84,6 +73,18 @@ func (suite *BuilderSuite) SetupTest() { suite.genesis, err = unittest.ClusterBlock.Genesis() require.NoError(suite.T(), err) suite.chainID = suite.genesis.ChainID + setter = func(h *flow.HeaderBodyBuilder) error { + h.WithHeight(42). + WithChainID(suite.chainID). + WithParentID(unittest.IdentifierFixture()). + WithView(1337). + WithParentView(1336). + WithParentVoterIndices(unittest.SignerIndicesFixture(4)). + WithParentVoterSigData(unittest.QCSigDataFixture()). + WithProposerID(unittest.IdentifierFixture()) + + return nil + } suite.pool = herocache.NewTransactions(1000, unittest.Logger(), metrics.NewNoopCollector()) @@ -95,12 +96,14 @@ func (suite *BuilderSuite) SetupTest() { tracer := trace.NewNoopTracer() log := zerolog.Nop() - all := store.InitAll(metrics, suite.db) + all, err := store.InitAll(metrics, suite.db, flow.Emulator) + require.NoError(suite.T(), err) consumer := events.NewNoop() - suite.headers = all.Headers - suite.blocks = all.Blocks - suite.payloads = store.NewClusterPayloads(metrics, suite.db) + suite.clusterHeaders, err = store.NewClusterHeaders(metrics, suite.db, suite.chainID) + require.NoError(suite.T(), err) + suite.clusterPayloads = store.NewClusterPayloads(metrics, suite.db) + suite.consensusHeaders = all.Headers // just bootstrap with a genesis block, we'll use this as reference root, result, seal := unittest.BootstrapFixture(unittest.IdentityListFixture(5, unittest.WithAllRoles())) @@ -132,7 +135,7 @@ func (suite *BuilderSuite) SetupTest() { clusterState, err := clusterkv.Bootstrap(suite.db, suite.lockManager, clusterStateRoot) suite.Require().NoError(err) - suite.state, err = clusterkv.NewMutableState(clusterState, suite.lockManager, tracer, suite.headers, suite.payloads) + suite.state, err = clusterkv.NewMutableState(clusterState, suite.lockManager, tracer, suite.clusterHeaders, suite.clusterPayloads, suite.consensusHeaders) suite.Require().NoError(err) state, err := pbadger.Bootstrap( @@ -182,9 +185,9 @@ func (suite *BuilderSuite) SetupTest() { metrics, suite.protoState, suite.state, - suite.headers, - suite.headers, - suite.payloads, + suite.consensusHeaders, + suite.clusterHeaders, + suite.clusterPayloads, suite.pool, unittest.Logger(), suite.epochCounter, @@ -602,9 +605,9 @@ func (suite *BuilderSuite) TestBuildOn_LargeHistory() { metrics.NewNoopCollector(), suite.protoState, suite.state, - suite.headers, - suite.headers, - suite.payloads, + suite.consensusHeaders, + suite.clusterHeaders, + suite.clusterPayloads, suite.pool, unittest.Logger(), suite.epochCounter, @@ -698,9 +701,9 @@ func (suite *BuilderSuite) TestBuildOn_MaxCollectionSize() { metrics.NewNoopCollector(), suite.protoState, suite.state, - suite.headers, - suite.headers, - suite.payloads, + suite.consensusHeaders, + suite.clusterHeaders, + suite.clusterPayloads, suite.pool, unittest.Logger(), suite.epochCounter, @@ -731,9 +734,9 @@ func (suite *BuilderSuite) TestBuildOn_MaxCollectionByteSize() { metrics.NewNoopCollector(), suite.protoState, suite.state, - suite.headers, - suite.headers, - suite.payloads, + suite.consensusHeaders, + suite.clusterHeaders, + suite.clusterPayloads, suite.pool, unittest.Logger(), suite.epochCounter, @@ -764,9 +767,9 @@ func (suite *BuilderSuite) TestBuildOn_MaxCollectionTotalGas() { metrics.NewNoopCollector(), suite.protoState, suite.state, - suite.headers, - suite.headers, - suite.payloads, + suite.consensusHeaders, + suite.clusterHeaders, + suite.clusterPayloads, suite.pool, unittest.Logger(), suite.epochCounter, @@ -823,9 +826,9 @@ func (suite *BuilderSuite) TestBuildOn_ExpiredTransaction() { metrics.NewNoopCollector(), suite.protoState, suite.state, - suite.headers, - suite.headers, - suite.payloads, + suite.consensusHeaders, + suite.clusterHeaders, + suite.clusterPayloads, suite.pool, unittest.Logger(), suite.epochCounter, @@ -879,9 +882,9 @@ func (suite *BuilderSuite) TestBuildOn_EmptyMempool() { metrics.NewNoopCollector(), suite.protoState, suite.state, - suite.headers, - suite.headers, - suite.payloads, + suite.consensusHeaders, + suite.clusterHeaders, + suite.clusterPayloads, suite.pool, unittest.Logger(), suite.epochCounter, @@ -920,9 +923,9 @@ func (suite *BuilderSuite) TestBuildOn_NoRateLimiting() { metrics.NewNoopCollector(), suite.protoState, suite.state, - suite.headers, - suite.headers, - suite.payloads, + suite.consensusHeaders, + suite.clusterHeaders, + suite.clusterPayloads, suite.pool, unittest.Logger(), suite.epochCounter, @@ -945,7 +948,7 @@ func (suite *BuilderSuite) TestBuildOn_NoRateLimiting() { parentID := suite.genesis.ID() setter := func(h *flow.HeaderBodyBuilder) error { h.WithHeight(1). - WithChainID(flow.Emulator). + WithChainID(suite.chainID). WithParentID(parentID). WithView(1337). WithParentView(1336). @@ -987,9 +990,9 @@ func (suite *BuilderSuite) TestBuildOn_RateLimitNonPayer() { metrics.NewNoopCollector(), suite.protoState, suite.state, - suite.headers, - suite.headers, - suite.payloads, + suite.consensusHeaders, + suite.clusterHeaders, + suite.clusterPayloads, suite.pool, unittest.Logger(), suite.epochCounter, @@ -1017,7 +1020,7 @@ func (suite *BuilderSuite) TestBuildOn_RateLimitNonPayer() { // since rate limiting does not apply to non-payer keys, we should fill all collections in 10 blocks parentID := suite.genesis.ID() setter := func(h *flow.HeaderBodyBuilder) error { - h.WithChainID(flow.Emulator). + h.WithChainID(suite.chainID). WithParentID(parentID). WithView(1337). WithParentView(1336). @@ -1055,9 +1058,9 @@ func (suite *BuilderSuite) TestBuildOn_HighRateLimit() { metrics.NewNoopCollector(), suite.protoState, suite.state, - suite.headers, - suite.headers, - suite.payloads, + suite.consensusHeaders, + suite.clusterHeaders, + suite.clusterPayloads, suite.pool, unittest.Logger(), suite.epochCounter, @@ -1079,7 +1082,7 @@ func (suite *BuilderSuite) TestBuildOn_HighRateLimit() { // rate-limiting should be applied, resulting in half-full collections (5/10) parentID := suite.genesis.ID() setter := func(h *flow.HeaderBodyBuilder) error { - h.WithChainID(flow.Emulator). + h.WithChainID(suite.chainID). WithParentID(parentID). WithView(1337). WithParentView(1336). @@ -1119,9 +1122,9 @@ func (suite *BuilderSuite) TestBuildOn_MaxCollectionSizeRateLimiting() { metrics.NewNoopCollector(), suite.protoState, suite.state, - suite.headers, - suite.headers, - suite.payloads, + suite.consensusHeaders, + suite.clusterHeaders, + suite.clusterPayloads, suite.pool, unittest.Logger(), suite.epochCounter, @@ -1163,7 +1166,7 @@ func (suite *BuilderSuite) TestBuildOn_MaxCollectionSizeRateLimiting() { // rate-limiting should be applied, resulting in minimum collection size. parentID := suite.genesis.ID() setter := func(h *flow.HeaderBodyBuilder) error { - h.WithChainID(flow.Emulator). + h.WithChainID(suite.chainID). WithParentID(parentID). WithView(1337). WithParentView(1336). @@ -1200,9 +1203,9 @@ func (suite *BuilderSuite) TestBuildOn_LowRateLimit() { metrics.NewNoopCollector(), suite.protoState, suite.state, - suite.headers, - suite.headers, - suite.payloads, + suite.consensusHeaders, + suite.clusterHeaders, + suite.clusterPayloads, suite.pool, unittest.Logger(), suite.epochCounter, @@ -1225,7 +1228,7 @@ func (suite *BuilderSuite) TestBuildOn_LowRateLimit() { // having one transaction and empty collections otherwise parentID := suite.genesis.ID() setter := func(h *flow.HeaderBodyBuilder) error { - h.WithChainID(flow.Emulator). + h.WithChainID(suite.chainID). WithParentID(parentID). WithView(1337). WithParentView(1336). @@ -1266,9 +1269,9 @@ func (suite *BuilderSuite) TestBuildOn_UnlimitedPayer() { metrics.NewNoopCollector(), suite.protoState, suite.state, - suite.headers, - suite.headers, - suite.payloads, + suite.consensusHeaders, + suite.clusterHeaders, + suite.clusterPayloads, suite.pool, unittest.Logger(), suite.epochCounter, @@ -1290,7 +1293,7 @@ func (suite *BuilderSuite) TestBuildOn_UnlimitedPayer() { // rate-limiting should not be applied, since the payer is marked as unlimited parentID := suite.genesis.ID() setter := func(h *flow.HeaderBodyBuilder) error { - h.WithChainID(flow.Emulator). + h.WithChainID(suite.chainID). WithParentID(parentID). WithView(1337). WithParentView(1336). @@ -1331,9 +1334,9 @@ func (suite *BuilderSuite) TestBuildOn_RateLimitDryRun() { metrics.NewNoopCollector(), suite.protoState, suite.state, - suite.headers, - suite.headers, - suite.payloads, + suite.consensusHeaders, + suite.clusterHeaders, + suite.clusterPayloads, suite.pool, unittest.Logger(), suite.epochCounter, @@ -1355,7 +1358,7 @@ func (suite *BuilderSuite) TestBuildOn_RateLimitDryRun() { // rate-limiting should not be applied, since dry-run setting is enabled parentID := suite.genesis.ID() setter := func(h *flow.HeaderBodyBuilder) error { - h.WithChainID(flow.Emulator). + h.WithChainID(suite.chainID). WithParentID(parentID). WithView(1337). WithParentView(1336). @@ -1394,9 +1397,9 @@ func (suite *BuilderSuite) TestBuildOn_SystemTxAlwaysIncluded() { metrics.NewNoopCollector(), suite.protoState, suite.state, - suite.headers, - suite.headers, - suite.payloads, + suite.consensusHeaders, + suite.clusterHeaders, + suite.clusterPayloads, suite.pool, unittest.Logger(), suite.epochCounter, @@ -1488,10 +1491,12 @@ func benchmarkBuildOn(b *testing.B, size int) { metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() - all := store.InitAll(metrics, suite.db) - suite.headers = all.Headers - suite.blocks = all.Blocks - suite.payloads = store.NewClusterPayloads(metrics, suite.db) + all, err := store.InitAll(metrics, suite.db, flow.Emulator) + require.NoError(b, err) + suite.clusterHeaders, err = store.NewClusterHeaders(metrics, suite.db, suite.chainID) + require.NoError(b, err) + suite.clusterPayloads = store.NewClusterPayloads(metrics, suite.db) + suite.consensusHeaders = all.Headers qc := unittest.QuorumCertificateFixture(unittest.QCWithRootBlockID(suite.genesis.ID())) stateRoot, err := clusterkv.NewStateRoot(suite.genesis, qc, suite.epochCounter) @@ -1499,7 +1504,7 @@ func benchmarkBuildOn(b *testing.B, size int) { state, err := clusterkv.Bootstrap(suite.db, suite.lockManager, stateRoot) assert.NoError(b, err) - suite.state, err = clusterkv.NewMutableState(state, suite.lockManager, tracer, suite.headers, suite.payloads) + suite.state, err = clusterkv.NewMutableState(state, suite.lockManager, tracer, suite.clusterHeaders, suite.clusterPayloads, suite.consensusHeaders) assert.NoError(b, err) // add some transactions to transaction pool @@ -1517,9 +1522,9 @@ func benchmarkBuildOn(b *testing.B, size int) { metrics, suite.protoState, suite.state, - suite.headers, - suite.headers, - suite.payloads, + suite.consensusHeaders, + suite.clusterHeaders, + suite.clusterPayloads, suite.pool, unittest.Logger(), suite.epochCounter, diff --git a/module/executiondatasync/optimistic_sync/pipeline/pipeline_functional_test.go b/module/executiondatasync/optimistic_sync/pipeline/pipeline_functional_test.go index c7da63bec2f..bd200786d0e 100644 --- a/module/executiondatasync/optimistic_sync/pipeline/pipeline_functional_test.go +++ b/module/executiondatasync/optimistic_sync/pipeline/pipeline_functional_test.go @@ -124,7 +124,8 @@ func (p *PipelineFunctionalSuite) SetupTest() { p.Require().NoError(err) // store and index the root header - p.headers = store.NewHeaders(p.metrics, p.db) + p.headers, err = store.NewHeaders(p.metrics, p.db, g.ChainID()) + p.Require().NoError(err) err = unittest.WithLock(t, p.lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { return p.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { diff --git a/module/finalizedreader/finalizedreader_test.go b/module/finalizedreader/finalizedreader_test.go index cd6bd194878..5788850e121 100644 --- a/module/finalizedreader/finalizedreader_test.go +++ b/module/finalizedreader/finalizedreader_test.go @@ -7,6 +7,7 @@ import ( "github.com/jordanschalm/lockctx" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/operation" @@ -20,14 +21,15 @@ func TestFinalizedReader(t *testing.T) { lockManager := storage.NewTestingLockManager() // prepare the storage.Headers instance metrics := metrics.NewNoopCollector() - all := store.InitAll(metrics, db) + all, err := store.InitAll(metrics, db, flow.Emulator) + require.NoError(t, err) blocks := all.Blocks headers := all.Headers proposal := unittest.ProposalFixture() block := proposal.Block // store `block` - err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return blocks.BatchStore(lctx, rw, proposal) }) diff --git a/module/finalizer/collection/finalizer_test.go b/module/finalizer/collection/finalizer_test.go index afc72b00168..436e7e63a82 100644 --- a/module/finalizer/collection/finalizer_test.go +++ b/module/finalizer/collection/finalizer_test.go @@ -25,7 +25,7 @@ import ( func TestFinalizer(t *testing.T) { // reference block on the main consensus chain - refBlock := unittest.ClusterBlockFixture() + refBlock := unittest.BlockFixture() // genesis block for the cluster chain genesis, err := unittest.ClusterBlock.Genesis() require.NoError(t, err) @@ -40,7 +40,7 @@ func TestFinalizer(t *testing.T) { state, err := cluster.Bootstrap(db, lockManager, stateRoot) require.NoError(t, err) - err = unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return operation.InsertHeader(lctx, rw, refBlock.ID(), refBlock.ToHeader()) }) diff --git a/module/finalizer/consensus/finalizer_test.go b/module/finalizer/consensus/finalizer_test.go index 965654328dc..baf7d776b2f 100644 --- a/module/finalizer/consensus/finalizer_test.go +++ b/module/finalizer/consensus/finalizer_test.go @@ -105,9 +105,11 @@ func TestMakeFinalValidChain(t *testing.T) { // initialize the finalizer with the dependencies and make the call metrics := metrics.NewNoopCollector() + headers, err := store.NewHeaders(metrics, pebbleimpl.ToDB(pdb), flow.Emulator) + require.NoError(t, err) fin := Finalizer{ dbReader: pebbleimpl.ToDB(pdb).Reader(), - headers: store.NewHeaders(metrics, pebbleimpl.ToDB(pdb)), + headers: headers, state: state, tracer: trace.NewNoopTracer(), cleanup: LogCleanup(&list), @@ -175,9 +177,11 @@ func TestMakeFinalInvalidHeight(t *testing.T) { // initialize the finalizer with the dependencies and make the call metrics := metrics.NewNoopCollector() + headers, err := store.NewHeaders(metrics, pebbleimpl.ToDB(pdb), flow.Emulator) + require.NoError(t, err) fin := Finalizer{ dbReader: pebbleimpl.ToDB(pdb).Reader(), - headers: store.NewHeaders(metrics, pebbleimpl.ToDB(pdb)), + headers: headers, state: state, tracer: trace.NewNoopTracer(), cleanup: LogCleanup(&list), @@ -234,9 +238,11 @@ func TestMakeFinalDuplicate(t *testing.T) { // initialize the finalizer with the dependencies and make the call metrics := metrics.NewNoopCollector() + headers, err := store.NewHeaders(metrics, pebbleimpl.ToDB(pdb), flow.Emulator) + require.NoError(t, err) fin := Finalizer{ dbReader: pebbleimpl.ToDB(pdb).Reader(), - headers: store.NewHeaders(metrics, pebbleimpl.ToDB(pdb)), + headers: headers, state: state, tracer: trace.NewNoopTracer(), cleanup: LogCleanup(&list), diff --git a/network/channels/errors_test.go b/network/channels/errors_test.go index 56dd23cd09b..b2c0aace33f 100644 --- a/network/channels/errors_test.go +++ b/network/channels/errors_test.go @@ -1,4 +1,4 @@ -package channels +package channels_test import ( "fmt" @@ -7,40 +7,47 @@ import ( "github.com/stretchr/testify/assert" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/state/cluster" + "github.com/onflow/flow-go/utils/unittest" ) // TestInvalidTopicErrRoundTrip ensures correct error formatting for InvalidTopicErr. func TestInvalidTopicErrRoundTrip(t *testing.T) { - topic := Topic("invalid-topic") + topic := channels.Topic("invalid-topic") wrapErr := fmt.Errorf("this err should be wrapped with topic to add context") - err := NewInvalidTopicErr(topic, wrapErr) + err := channels.NewInvalidTopicErr(topic, wrapErr) // tests the error message formatting. expectedErrMsg := fmt.Errorf("invalid topic %s: %w", topic, wrapErr).Error() assert.Equal(t, expectedErrMsg, err.Error(), "the error message should be correctly formatted") // tests the IsErrActiveClusterIDsNotSet function. - assert.True(t, IsInvalidTopicErr(err), "IsInvalidTopicErr should return true for InvalidTopicErr error") + assert.True(t, channels.IsInvalidTopicErr(err), "IsInvalidTopicErr should return true for InvalidTopicErr error") // test IsErrActiveClusterIDsNotSet with a different error type. dummyErr := fmt.Errorf("dummy error") - assert.False(t, IsInvalidTopicErr(dummyErr), "IsInvalidTopicErr should return false for non-IsInvalidTopicErr error") + assert.False(t, channels.IsInvalidTopicErr(dummyErr), "IsInvalidTopicErr should return false for non-IsInvalidTopicErr error") } // TestUnknownClusterIDErrRoundTrip ensures correct error formatting for UnknownClusterIDErr. func TestUnknownClusterIDErrRoundTrip(t *testing.T) { - clusterId := flow.ChainID("cluster-id") - activeClusterIds := flow.ChainIDList{"active", "cluster", "ids"} - err := NewUnknownClusterIdErr(clusterId, activeClusterIds) + clusterId := cluster.CanonicalClusterID(10, unittest.IdentifierListFixture(1)) + activeClusterIds := flow.ChainIDList{ + cluster.CanonicalClusterID(3, unittest.IdentifierListFixture(1)), + cluster.CanonicalClusterID(3, unittest.IdentifierListFixture(1)), + cluster.CanonicalClusterID(3, unittest.IdentifierListFixture(1)), + } + err := channels.NewUnknownClusterIdErr(clusterId, activeClusterIds) // tests the error message formatting. expectedErrMsg := fmt.Errorf("cluster ID %s not found in active cluster IDs list %s", clusterId, activeClusterIds).Error() assert.Equal(t, expectedErrMsg, err.Error(), "the error message should be correctly formatted") // tests the IsErrActiveClusterIDsNotSet function. - assert.True(t, IsUnknownClusterIDErr(err), "IsUnknownClusterIDErr should return true for UnknownClusterIDErr error") + assert.True(t, channels.IsUnknownClusterIDErr(err), "IsUnknownClusterIDErr should return true for UnknownClusterIDErr error") // test IsErrActiveClusterIDsNotSet with a different error type. dummyErr := fmt.Errorf("dummy error") - assert.False(t, IsUnknownClusterIDErr(dummyErr), "IsUnknownClusterIDErr should return false for non-UnknownClusterIDErr error") + assert.False(t, channels.IsUnknownClusterIDErr(dummyErr), "IsUnknownClusterIDErr should return false for non-UnknownClusterIDErr error") } diff --git a/state/cluster/badger/mutator.go b/state/cluster/badger/mutator.go index c27a02ae955..b22ac882fc8 100644 --- a/state/cluster/badger/mutator.go +++ b/state/cluster/badger/mutator.go @@ -22,21 +22,23 @@ import ( type MutableState struct { *State - lockManager lockctx.Manager - tracer module.Tracer - headers storage.Headers - payloads storage.ClusterPayloads + lockManager lockctx.Manager + tracer module.Tracer + clusterHeaders storage.Headers + clusterPayloads storage.ClusterPayloads + consensusHeaders storage.Headers } var _ clusterstate.MutableState = (*MutableState)(nil) -func NewMutableState(state *State, lockManager lockctx.Manager, tracer module.Tracer, headers storage.Headers, payloads storage.ClusterPayloads) (*MutableState, error) { +func NewMutableState(state *State, lockManager lockctx.Manager, tracer module.Tracer, clusterHeaders storage.Headers, clusterPayloads storage.ClusterPayloads, consensusHeaders storage.Headers) (*MutableState, error) { mutableState := &MutableState{ - State: state, - lockManager: lockManager, - tracer: tracer, - headers: headers, - payloads: payloads, + State: state, + lockManager: lockManager, + tracer: tracer, + clusterHeaders: clusterHeaders, + clusterPayloads: clusterPayloads, + consensusHeaders: consensusHeaders, } return mutableState, nil } @@ -165,7 +167,7 @@ func (m *MutableState) checkHeaderValidity(candidate *cluster.Block) error { } // get the header of the parent of the new block - parent, err := m.headers.ByBlockID(candidate.ParentID) + parent, err := m.clusterHeaders.ByBlockID(candidate.ParentID) if err != nil { return irrecoverable.NewExceptionf("could not retrieve latest finalized header: %w", err) } @@ -196,7 +198,7 @@ func (m *MutableState) checkConnectsToFinalizedState(ctx extendContext) error { // start with the extending block's parent for parentID != finalizedID { // get the parent of current block - ancestor, err := m.headers.ByBlockID(parentID) + ancestor, err := m.clusterHeaders.ByBlockID(parentID) if err != nil { return irrecoverable.NewExceptionf("could not get parent which must be known (%x): %w", parentID, err) } @@ -223,8 +225,8 @@ func (m *MutableState) checkConnectsToFinalizedState(ctx extendContext) error { func (m *MutableState) checkPayloadReferenceBlock(ctx extendContext) error { payload := ctx.candidate.Payload - // 1 - the reference block must be known - refBlock, err := m.headers.ByBlockID(payload.ReferenceBlockID) + // 1 - the reference block must be known, and it must be part of the main consensus chain + refBlock, err := m.consensusHeaders.ByBlockID(payload.ReferenceBlockID) if err != nil { if errors.Is(err, storage.ErrNotFound) { return state.NewUnverifiableExtensionError("cluster block references unknown reference block (id=%x)", payload.ReferenceBlockID) @@ -237,7 +239,7 @@ func (m *MutableState) checkPayloadReferenceBlock(ctx extendContext) error { // a reference block which is above the finalized boundary can't be verified yet return state.NewUnverifiableExtensionError("reference block is above finalized boundary (%d>%d)", refBlock.Height, ctx.finalizedConsensusHeight) } else { - storedBlockIDForHeight, err := m.headers.BlockIDByHeight(refBlock.Height) + storedBlockIDForHeight, err := m.consensusHeaders.BlockIDByHeight(refBlock.Height) if err != nil { return irrecoverable.NewExceptionf("could not look up block ID for finalized height: %w", err) } @@ -248,9 +250,6 @@ func (m *MutableState) checkPayloadReferenceBlock(ctx extendContext) error { } } - // TODO ensure the reference block is part of the main chain https://github.com/onflow/flow-go/issues/4204 - _ = refBlock - // 3 - the reference block must be within the cluster's operating epoch if refBlock.Height < ctx.epochFirstHeight { return state.NewInvalidExtensionErrorf("invalid reference block is before operating epoch for cluster, height %d<%d", refBlock.Height, ctx.epochFirstHeight) @@ -291,7 +290,7 @@ func (m *MutableState) checkPayloadTransactions(lctx lockctx.Proof, ctx extendCo minRefHeight := uint64(math.MaxUint64) maxRefHeight := uint64(0) for _, flowTx := range payload.Collection.Transactions { - refBlock, err := m.headers.ByBlockID(flowTx.ReferenceBlockID) + refBlock, err := m.consensusHeaders.ByBlockID(flowTx.ReferenceBlockID) if errors.Is(err, storage.ErrNotFound) { // Reject collection if it contains a transaction with an unknown reference block, because we cannot verify its validity. return state.NewUnverifiableExtensionError("collection contains tx (tx_id=%x) with unknown reference block (block_id=%x): %w", flowTx.ID(), flowTx.ReferenceBlockID, err) @@ -385,8 +384,8 @@ func (m *MutableState) checkPayloadTransactions(lctx lockctx.Proof, ctx extendCo // No errors are expected during normal operation. func (m *MutableState) checkDupeTransactionsInUnfinalizedAncestry(block *cluster.Block, includedTransactions map[flow.Identifier]struct{}, finalHeight uint64) ([]flow.Identifier, error) { var duplicateTxIDs []flow.Identifier - err := fork.TraverseBackward(m.headers, block.ParentID, func(ancestor *flow.Header) error { - payload, err := m.payloads.ByBlockID(ancestor.ID()) + err := fork.TraverseBackward(m.clusterHeaders, block.ParentID, func(ancestor *flow.Header) error { + payload, err := m.clusterPayloads.ByBlockID(ancestor.ID()) if err != nil { return fmt.Errorf("could not retrieve ancestor payload: %w", err) } @@ -457,7 +456,7 @@ func (m *MutableState) checkDupeTransactionsInFinalizedAncestry( for _, blockID := range clusterBlockIDs { // TODO: could add LightByBlockID and retrieve only tx IDs - payload, err := m.payloads.ByBlockID(blockID) + payload, err := m.clusterPayloads.ByBlockID(blockID) if err != nil { return nil, fmt.Errorf("could not retrieve cluster payload (block_id=%x) to de-duplicate: %w", blockID, err) } @@ -485,7 +484,7 @@ func (m *MutableState) checkDupeTransactionsInFinalizedAncestry( // extension here. Hence, a higher block may have been finalized just now and returned by the // database search. However, all newer finalized blocks have height > `finalClusterHeight`, i.e. // a height outside the range this function scans. - header, err := m.headers.ByBlockID(blockID) + header, err := m.clusterHeaders.ByBlockID(blockID) if err != nil { return nil, fmt.Errorf("could not retrieve header by block_id=%x: %w", blockID, err) } diff --git a/state/cluster/badger/mutator_test.go b/state/cluster/badger/mutator_test.go index cbf68967be3..8450ed629fb 100644 --- a/state/cluster/badger/mutator_test.go +++ b/state/cluster/badger/mutator_test.go @@ -68,7 +68,10 @@ func (suite *MutatorSuite) SetupTest() { metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() log := zerolog.Nop() - all := store.InitAll(metrics, suite.db) + all, err := store.InitAll(metrics, suite.db, flow.Emulator) + require.NoError(suite.T(), err) + clusterHeaders, err := store.NewClusterHeaders(metrics, suite.db, suite.chainID) + require.NoError(suite.T(), err) colPayloads := store.NewClusterPayloads(metrics, suite.db) // just bootstrap with a genesis block, we'll use this as reference @@ -135,7 +138,7 @@ func (suite *MutatorSuite) SetupTest() { suite.NoError(err) clusterState, err := Bootstrap(suite.db, suite.lockManager, clusterStateRoot) suite.Assert().Nil(err) - suite.state, err = NewMutableState(clusterState, suite.lockManager, tracer, all.Headers, colPayloads) + suite.state, err = NewMutableState(clusterState, suite.lockManager, tracer, clusterHeaders, colPayloads, all.Headers) suite.Assert().Nil(err) } @@ -444,8 +447,6 @@ func (suite *MutatorSuite) TestExtend_WithExpiredReferenceBlock() { } func (suite *MutatorSuite) TestExtend_WithReferenceBlockFromClusterChain() { - // TODO skipping as this isn't implemented yet - unittest.SkipUnless(suite.T(), unittest.TEST_TODO, "skipping as this isn't implemented yet") // set genesis from cluster chain as reference block proposal := suite.ProposalWithParentAndPayload(suite.genesis, *model.NewEmptyPayload(suite.genesis.ID())) err := suite.state.Extend(&proposal) diff --git a/state/cluster/badger/snapshot_test.go b/state/cluster/badger/snapshot_test.go index 76db04a71f5..767bf834ed3 100644 --- a/state/cluster/badger/snapshot_test.go +++ b/state/cluster/badger/snapshot_test.go @@ -56,8 +56,11 @@ func (suite *SnapshotSuite) SetupTest() { metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() - all := store.InitAll(metrics, suite.db) + all, err := store.InitAll(metrics, suite.db, flow.Emulator) + require.NoError(suite.T(), err) colPayloads := store.NewClusterPayloads(metrics, suite.db) + clusterHeaders, err := store.NewClusterHeaders(metrics, suite.db, suite.chainID) + require.NoError(suite.T(), err) root := unittest.RootSnapshotFixture(unittest.IdentityListFixture(5, unittest.WithAllRoles())) suite.epochCounter = root.Encodable().SealingSegment.LatestProtocolStateEntry().EpochEntry.EpochCounter() @@ -84,7 +87,7 @@ func (suite *SnapshotSuite) SetupTest() { suite.Require().NoError(err) clusterState, err := Bootstrap(suite.db, suite.lockManager, clusterStateRoot) suite.Require().NoError(err) - suite.state, err = NewMutableState(clusterState, suite.lockManager, tracer, all.Headers, colPayloads) + suite.state, err = NewMutableState(clusterState, suite.lockManager, tracer, clusterHeaders, colPayloads, all.Headers) suite.Require().NoError(err) } diff --git a/state/cluster/root_block.go b/state/cluster/root_block.go index 7b408dc8d82..1c64a26f51c 100644 --- a/state/cluster/root_block.go +++ b/state/cluster/root_block.go @@ -2,15 +2,29 @@ package cluster import ( "fmt" + "regexp" "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" ) +// ClusterChainPrefix is the prefix used for all Collection Cluster chains. +// Clusters change every epoch, resulting in a new ChainID every epoch. +// Cluster ChainIDs are generated by [CanonicalClusterID] and verified by [IsCanonicalClusterID]. +const ClusterChainPrefix string = "cluster" + // CanonicalClusterID returns the canonical chain ID for the given cluster in // the given epoch. func CanonicalClusterID(epoch uint64, participants flow.IdentifierList) flow.ChainID { - return flow.ChainID(fmt.Sprintf("cluster-%d-%s", epoch, participants.ID())) + return flow.ChainID(fmt.Sprintf(ClusterChainPrefix+"-%d-%s", epoch, participants.ID())) +} + +var clusterChainFormat = regexp.MustCompile("^" + ClusterChainPrefix + `-\d+-[0-9a-f]{64}$`) + +// IsCanonicalClusterID returns true if the chain ID matches the standard format +// for a collection cluster during an epoch, rather than a full network. +func IsCanonicalClusterID(c flow.ChainID) bool { + return clusterChainFormat.MatchString(string(c)) } // CanonicalRootBlock returns the canonical root block for the given diff --git a/state/cluster/state_test.go b/state/cluster/state_test.go new file mode 100644 index 00000000000..bde7a27c20e --- /dev/null +++ b/state/cluster/state_test.go @@ -0,0 +1,28 @@ +package cluster_test + +import ( + "math/rand/v2" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/cluster" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestIsCanonicalClusterID verifies that cluster ChainIDs generated +// by [CanonicalClusterID] are accepted by [IsCanonicalClusterID], and +// that the standard consensus chainIDs are not accepted. +func TestIsCanonicalClusterID(t *testing.T) { + for _, chainID := range flow.AllChainIDs() { + require.False(t, cluster.IsCanonicalClusterID(chainID)) + } + for n := range 100 { + epoch := rand.Uint64() + clusterID := cluster.CanonicalClusterID(epoch, unittest.IdentifierListFixture(n)) + require.True(t, cluster.IsCanonicalClusterID(clusterID)) + require.True(t, strings.HasPrefix(string(clusterID), cluster.ClusterChainPrefix)) + } +} diff --git a/state/protocol/badger/mutator_test.go b/state/protocol/badger/mutator_test.go index eb9c7a2df07..aa73691926f 100644 --- a/state/protocol/badger/mutator_test.go +++ b/state/protocol/badger/mutator_test.go @@ -89,7 +89,8 @@ func TestExtendValid(t *testing.T) { tracer := trace.NewNoopTracer() db := pebbleimpl.ToDB(pdb) log := zerolog.Nop() - all := store.InitAll(metrics, db) + all, err := store.InitAll(metrics, db, flow.Emulator) + require.NoError(t, err) distributor := events.NewDistributor() consumer := mockprotocol.NewConsumer(t) @@ -919,7 +920,8 @@ func TestExtendEpochTransitionValid(t *testing.T) { tracer := trace.NewNoopTracer() log := zerolog.Nop() - all := store.InitAll(mmetrics.NewNoopCollector(), db) + all, err := store.InitAll(mmetrics.NewNoopCollector(), db, flow.Emulator) + require.NoError(t, err) protoState, err := protocol.Bootstrap( metrics, db, @@ -2802,7 +2804,8 @@ func TestExtendInvalidSealsInBlock(t *testing.T) { tracer := trace.NewNoopTracer() log := zerolog.Nop() db := pebbleimpl.ToDB(pdb) - all := store.InitAll(metrics, db) + all, err := store.InitAll(metrics, db, flow.Emulator) + require.NoError(t, err) // create a event consumer to test epoch transition events distributor := events.NewDistributor() @@ -3466,7 +3469,8 @@ func TestHeaderInvalidTimestamp(t *testing.T) { tracer := trace.NewNoopTracer() log := zerolog.Nop() db := pebbleimpl.ToDB(pdb) - all := store.InitAll(metrics, db) + all, err := store.InitAll(metrics, db, flow.Emulator) + require.NoError(t, err) // create a event consumer to test epoch transition events distributor := events.NewDistributor() diff --git a/state/protocol/badger/state.go b/state/protocol/badger/state.go index cf0ae743c48..b5a1e251275 100644 --- a/state/protocol/badger/state.go +++ b/state/protocol/badger/state.go @@ -977,10 +977,11 @@ func newState( } // IsBootstrapped returns whether the database contains a bootstrapped state +// No errors expected during normal operation. Any error is a symptom of a bug or state corruption. func IsBootstrapped(db storage.DB) (bool, error) { var finalized uint64 err := operation.RetrieveFinalizedHeight(db.Reader(), &finalized) - if errors.Is(err, storage.ErrNotFound) { + if errors.Is(err, operation.IncompleteStateError) { return false, nil } if err != nil { @@ -989,6 +990,61 @@ func IsBootstrapped(db storage.DB) (bool, error) { return true, nil } +// GetChainID retrieves the consensus chainID from the latest finalized block in the database. This +// function reads directly from the database, without instantiating high-level storage abstractions +// or the protocol state struct. +// +// During bootstrapping, the latest finalized block and its height are indexed and thereafter the +// latest finalized height is only updated (but never removed). Hence, for a properly bootstrapped node, +// this function should _always_ return a proper value (constant throughout the lifetime of the node). +// +// Note: This function should only be called on properly bootstrapped nodes. If the state is corrupted +// or the node is not properly bootstrapped, this function may return [operation.IncompleteStateError]. +// The reason for not returning [storage.ErrNotFound] directly is to avoid confusion between an often +// benign [storage.ErrNotFound] and failed reads of quantities that the protocol mandates to be present. +// +// No error returns expected during normal operations. +func GetChainID(db storage.DB) (flow.ChainID, error) { + h, err := GetLatestFinalizedHeader(db) + if err != nil { + return "", fmt.Errorf("failed to determine chain ID: %w", err) + } + return h.ChainID, nil +} + +// GetLatestFinalizedHeader retrieves the header of the latest finalized block. This function reads directly +// from the database, without instantiating high-level storage abstractions or the protocol state struct. +// +// During bootstrapping, the latest finalized block and its height are indexed and thereafter the latest +// finalized height is only updated (but never removed). Hence, for a properly bootstrapped node, this +// function should _always_ return a proper value. +// +// Note: This function should only be called on properly bootstrapped nodes. If the state is corrupted +// or the node is not properly bootstrapped, this function may return [operation.IncompleteStateError]. +// The reason for not returning [storage.ErrNotFound] directly is to avoid confusion between an often +// benign [storage.ErrNotFound] and failed reads of quantities that the protocol mandates to be present. +// +// No error returns are expected during normal operations. +func GetLatestFinalizedHeader(db storage.DB) (*flow.Header, error) { + var finalized uint64 + r := db.Reader() + err := operation.RetrieveFinalizedHeight(r, &finalized) + if err != nil { + return nil, fmt.Errorf("could not retrieve latest finalized height: %w", err) + } + var id flow.Identifier + err = operation.LookupBlockHeight(r, finalized, &id) + if err != nil { + return nil, fmt.Errorf("could not retrieve blockID of finalized block at height %d: %w", finalized, operation.IncompleteStateError) + } + var header flow.Header + err = operation.RetrieveHeader(r, id, &header) + if err != nil { + return nil, fmt.Errorf("could not retrieve latest finalized block %x: %w", id, operation.IncompleteStateError) + } + return &header, nil +} + // updateEpochMetrics update the `consensus_compliance_current_epoch_counter` and the // `consensus_compliance_current_epoch_phase` metric func updateEpochMetrics(metrics module.ComplianceMetrics, snap protocol.Snapshot) error { diff --git a/state/protocol/badger/state_test.go b/state/protocol/badger/state_test.go index 83c05b1e40c..2c642dfe292 100644 --- a/state/protocol/badger/state_test.go +++ b/state/protocol/badger/state_test.go @@ -55,7 +55,8 @@ func TestBootstrapAndOpen(t *testing.T) { epoch.DKGPhase1FinalView(), epoch.DKGPhase2FinalView(), epoch.DKGPhase3FinalView()).Once() noopMetrics := new(metrics.NoopCollector) - all := store.InitAll(noopMetrics, db) + all, err := store.InitAll(noopMetrics, db, flow.Emulator) + require.NoError(t, err) // protocol state has been bootstrapped, now open a protocol state with the database state, err := bprotocol.OpenState( complianceMetrics, @@ -135,7 +136,8 @@ func TestBootstrapAndOpen_EpochCommitted(t *testing.T) { complianceMetrics.On("BlockSealed", testmock.Anything).Once() noopMetrics := new(metrics.NoopCollector) - all := store.InitAll(noopMetrics, db) + all, err := store.InitAll(noopMetrics, db, flow.Emulator) + require.NoError(t, err) state, err := bprotocol.OpenState( complianceMetrics, db, @@ -850,7 +852,8 @@ func bootstrap(t *testing.T, rootSnapshot protocol.Snapshot, f func(*bprotocol.S db := pebbleimpl.ToDB(pdb) lockManager := storage.NewTestingLockManager() defer db.Close() - all := store.InitAll(metrics, db) + all, err := store.InitAll(metrics, db, flow.Emulator) + require.NoError(t, err) state, err := bprotocol.Bootstrap( metrics, db, diff --git a/state/protocol/util/testing.go b/state/protocol/util/testing.go index cc0ba5f0a79..b74ef4037d3 100644 --- a/state/protocol/util/testing.go +++ b/state/protocol/util/testing.go @@ -70,7 +70,8 @@ func RunWithBootstrapState(t testing.TB, rootSnapshot protocol.Snapshot, f func( lockManager := storage.NewTestingLockManager() db := pebbleimpl.ToDB(pdb) metrics := mmetrics.NewNoopCollector() - all := store.InitAll(metrics, db) + all, err := store.InitAll(metrics, db, flow.Emulator) + require.NoError(t, err) state, err := pbadger.Bootstrap( metrics, db, @@ -100,7 +101,8 @@ func RunWithFullProtocolState(t testing.TB, rootSnapshot protocol.Snapshot, f fu tracer := trace.NewNoopTracer() log := zerolog.Nop() consumer := events.NewNoop() - all := store.InitAll(metrics, db) + all, err := store.InitAll(metrics, db, flow.Emulator) + require.NoError(t, err) state, err := pbadger.Bootstrap( metrics, db, @@ -144,7 +146,8 @@ func RunWithFullProtocolStateAndMetrics(t testing.TB, rootSnapshot protocol.Snap tracer := trace.NewNoopTracer() log := zerolog.Nop() consumer := events.NewNoop() - all := store.InitAll(mmetrics.NewNoopCollector(), db) + all, err := store.InitAll(mmetrics.NewNoopCollector(), db, flow.Emulator) + require.NoError(t, err) state, err := pbadger.Bootstrap( metrics, db, @@ -190,7 +193,8 @@ func RunWithFullProtocolStateAndValidator(t testing.TB, rootSnapshot protocol.Sn tracer := trace.NewNoopTracer() log := zerolog.Nop() consumer := events.NewNoop() - all := store.InitAll(metrics, db) + all, err := store.InitAll(metrics, db, flow.Emulator) + require.NoError(t, err) state, err := pbadger.Bootstrap( metrics, db, @@ -234,7 +238,8 @@ func RunWithFollowerProtocolState(t testing.TB, rootSnapshot protocol.Snapshot, tracer := trace.NewNoopTracer() log := zerolog.Nop() consumer := events.NewNoop() - all := store.InitAll(metrics, db) + all, err := store.InitAll(metrics, db, flow.Emulator) + require.NoError(t, err) state, err := pbadger.Bootstrap( metrics, db, @@ -274,7 +279,8 @@ func RunWithFullProtocolStateAndConsumer(t testing.TB, rootSnapshot protocol.Sna metrics := mmetrics.NewNoopCollector() tracer := trace.NewNoopTracer() log := zerolog.Nop() - all := store.InitAll(metrics, db) + all, err := store.InitAll(metrics, db, flow.Emulator) + require.NoError(t, err) state, err := pbadger.Bootstrap( metrics, db, @@ -317,7 +323,8 @@ func RunWithFullProtocolStateAndMetricsAndConsumer(t testing.TB, rootSnapshot pr db := pebbleimpl.ToDB(pdb) tracer := trace.NewNoopTracer() log := zerolog.Nop() - all := store.InitAll(mmetrics.NewNoopCollector(), db) + all, err := store.InitAll(mmetrics.NewNoopCollector(), db, flow.Emulator) + require.NoError(t, err) state, err := pbadger.Bootstrap( metrics, db, @@ -372,7 +379,8 @@ func RunWithFollowerProtocolStateAndHeaders(t testing.TB, rootSnapshot protocol. tracer := trace.NewNoopTracer() log := zerolog.Nop() consumer := events.NewNoop() - all := store.InitAll(metrics, db) + all, err := store.InitAll(metrics, db, flow.Emulator) + require.NoError(t, err) state, err := pbadger.Bootstrap( metrics, db, @@ -413,7 +421,8 @@ func RunWithFullProtocolStateAndMutator(t testing.TB, rootSnapshot protocol.Snap tracer := trace.NewNoopTracer() log := zerolog.Nop() consumer := events.NewNoop() - all := store.InitAll(metrics, db) + all, err := store.InitAll(metrics, db, flow.Emulator) + require.NoError(t, err) state, err := pbadger.Bootstrap( metrics, db, diff --git a/storage/badger/all.go b/storage/badger/all.go index 4a8e4dfb3c2..01659b51615 100644 --- a/storage/badger/all.go +++ b/storage/badger/all.go @@ -3,15 +3,20 @@ package badger import ( "github.com/dgraph-io/badger/v2" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/operation/badgerimpl" "github.com/onflow/flow-go/storage/store" ) -func InitAll(metrics module.CacheMetrics, db *badger.DB) *storage.All { +// deprecated: use [store.InitAll] instead +func InitAll(metrics module.CacheMetrics, db *badger.DB, chainID flow.ChainID) (*storage.All, error) { sdb := badgerimpl.ToDB(db) - headers := store.NewHeaders(metrics, sdb) + headers, err := store.NewHeaders(metrics, sdb, chainID) + if err != nil { + return nil, err + } guarantees := store.NewGuarantees(metrics, sdb, DefaultCacheSize, DefaultCacheSize) seals := store.NewSeals(metrics, sdb) index := store.NewIndex(metrics, sdb) @@ -47,5 +52,5 @@ func InitAll(metrics module.CacheMetrics, db *badger.DB) *storage.All { Receipts: receipts, Transactions: transactions, Collections: collections, - } + }, nil } diff --git a/storage/blocks.go b/storage/blocks.go index dcd82dec013..e5f50eb5c46 100644 --- a/storage/blocks.go +++ b/storage/blocks.go @@ -23,45 +23,40 @@ type Blocks interface { // the subsequent database write in order to prevent accidental state corruption. Therefore, the caller must // acquire [storage.LockInsertBlock] and hold it until the database write has been committed. // - // Expected error returns during normal operations: + // Expected errors during normal operations: // - [storage.ErrAlreadyExists] if some block with the same ID has already been stored + // - [storage.ErrWrongChain] if the block is part of a different chain than this storage was initialized with BatchStore(lctx lockctx.Proof, rw ReaderBatchWriter, proposal *flow.Proposal) error // ByID returns the block with the given hash. It is available for all incorporated blocks (validated blocks // that have been appended to any of the known forks) no matter whether the block has been finalized or not. // - // Error returns: - // - storage.ErrNotFound if no block with the corresponding ID was found - // - generic error in case of unexpected failure from the database layer, or failure - // to decode an existing database value + // Expected errors during normal operations: + // - [storage.ErrNotFound] if no block with the corresponding ID was found + // - [storage.ErrWrongChain] if a block with that ID exists but on a different chain, such as a cluster chain ByID(blockID flow.Identifier) (*flow.Block, error) // ProposalByID returns the block with the given ID, along with the proposer's signature on it. // It is available for all incorporated blocks (validated blocks that have been appended to any // of the known forks) no matter whether the block has been finalized or not. // - // Error returns: - // - storage.ErrNotFound if no block with the corresponding ID was found - // - generic error in case of unexpected failure from the database layer, or failure - // to decode an existing database value + // Expected errors during normal operations: + // - [storage.ErrNotFound] if no block with the corresponding ID was found + // - [storage.ErrWrongChain] if a block with that ID exists but on a different chain, such as a cluster chain ProposalByID(blockID flow.Identifier) (*flow.Proposal, error) // ByHeight returns the block at the given height. It is only available // for finalized blocks. // - // Error returns: - // - storage.ErrNotFound if no block for the corresponding height was found - // - generic error in case of unexpected failure from the database layer, or failure - // to decode an existing database value + // Expected errors during normal operations: + // - [storage.ErrNotFound] if no block for the corresponding height was found ByHeight(height uint64) (*flow.Block, error) // ProposalByHeight returns the block at the given height, along with the proposer's // signature on it. It is only available for finalized blocks. // - // Error returns: - // - storage.ErrNotFound if no block proposal for the corresponding height was found - // - generic error in case of unexpected failure from the database layer, or failure - // to decode an existing database value + // Expected errors during normal operations: + // - [storage.ErrNotFound] if no block proposal for the corresponding height was found ProposalByHeight(height uint64) (*flow.Proposal, error) // ByView returns the block with the given view. It is only available for certified blocks. @@ -70,13 +65,13 @@ type Blocks interface { // even for non-finalized blocks. // // Expected errors during normal operations: - // - `storage.ErrNotFound` if no certified block is known at given view. + // - [storage.ErrNotFound] if no certified block is known at given view. ByView(view uint64) (*flow.Block, error) // ProposalByView returns the block proposal with the given view. It is only available for certified blocks. // // Expected errors during normal operations: - // - `storage.ErrNotFound` if no certified block is known at given view. + // - [storage.ErrNotFound] if no certified block is known at given view. ProposalByView(view uint64) (*flow.Proposal, error) // ByCollectionID returns the block for the given [flow.CollectionGuarantee] ID. @@ -86,10 +81,8 @@ type Blocks interface { // finality. // CAUTION: this method is not backed by a cache and therefore comparatively slow! // - // Error returns: - // - storage.ErrNotFound if the collection ID was not found - // - generic error in case of unexpected failure from the database layer, or failure - // to decode an existing database value + // Expected errors during normal operations: + // - [storage.ErrNotFound] if the collection ID was not found ByCollectionID(collID flow.Identifier) (*flow.Block, error) // BlockIDByCollectionID returns the block ID for the finalized block which includes the guarantee for the given collection @@ -100,10 +93,8 @@ type Blocks interface { // finality. // CAUTION: this method is not backed by a cache and therefore comparatively slow! // - // Error returns: - // - storage.ErrNotFound if no FINALIZED block exists containing the expected collection guarantee - // - generic error in case of unexpected failure from the database layer, or failure - // to decode an existing database value + // Expected errors during normal operations: + // - [storage.ErrNotFound] if no FINALIZED block exists containing the expected collection guarantee BlockIDByCollectionID(collID flow.Identifier) (flow.Identifier, error) // BatchIndexBlockContainingCollectionGuarantees produces mappings from the IDs of [flow.CollectionGuarantee]s to the block ID containing these guarantees. diff --git a/storage/cluster_blocks.go b/storage/cluster_blocks.go index 6827b8f4b57..2012454b6d3 100644 --- a/storage/cluster_blocks.go +++ b/storage/cluster_blocks.go @@ -17,7 +17,8 @@ type ClusterBlocks interface { // of the known forks) no matter whether the collection has been finalized or not. // // Error returns: - // - storage.ErrNotFound if the block ID was not found + // - [storage.ErrNotFound] if the block ID was not found + // - [storage.ErrWrongChain] if the block header exists in the database but is part of a different chain than expected // - generic error in case of unexpected failure from the database layer, or failure // to decode an existing database value ProposalByID(blockID flow.Identifier) (*cluster.Proposal, error) @@ -26,7 +27,7 @@ type ClusterBlocks interface { // signature on it. It is only available for finalized collections. // // Error returns: - // - storage.ErrNotFound if the block height or block ID was not found + // - [storage.ErrNotFound] if the block height or block ID was not found // - generic error in case of unexpected failure from the database layer, or failure // to decode an existing database value ProposalByHeight(height uint64) (*cluster.Proposal, error) diff --git a/storage/errors.go b/storage/errors.go index b3d81d9709c..82ee6fea57c 100644 --- a/storage/errors.go +++ b/storage/errors.go @@ -31,6 +31,14 @@ var ( // ErrNotBootstrapped is returned when the database has not been bootstrapped. ErrNotBootstrapped = errors.New("pebble database not bootstrapped") + + // ErrWrongChain is returned when data from a specific chain (consensus or cluster) + // is expected to be read or inserted, but the actual chainID does not match. + ErrWrongChain = errors.New("data is not part of the expected chain") + + // ErrNotAvailableForClusterConsensus is returned when a method defined for main consensus storage + // is not implemented by cluster consensus storage. + ErrNotAvailableForClusterConsensus = errors.New("method not available for cluster chain") ) // InvalidDKGStateTransitionError is a sentinel error that is returned in case an invalid state transition is attempted. diff --git a/storage/headers.go b/storage/headers.go index 045f2e39710..14f7c5c7c20 100644 --- a/storage/headers.go +++ b/storage/headers.go @@ -4,12 +4,20 @@ import ( "github.com/onflow/flow-go/model/flow" ) -// Headers represents persistent storage for blocks. +// Headers represents persistent storage for block headers on a specific chain. +// There may be several instances active on a single node for different chains, +// including the main consensus chain and multiple epoch-specific cluster chains. +// For example, Collector nodes store headers both for the main chain (via a consensus follower) +// and for their own cluster consensus. +// Users of the interface should be careful to use the correct instance with the appropriate chainID, +// as otherwise, retrieving or storing will fail with [storage.ErrWrongChain]. +// NOTE: instances for cluster consensus do not currently support ByView() lookup. type Headers interface { // ByBlockID returns the header with the given ID. It is available for finalized blocks and those pending finalization. // Error returns: // - [storage.ErrNotFound] if no block header with the given ID exists + // - [storage.ErrWrongChain] if the block header exists in the database but is part of a different chain than expected ByBlockID(blockID flow.Identifier) (*flow.Header, error) // ByHeight returns the block with the given number. It is only available for finalized blocks. @@ -17,13 +25,14 @@ type Headers interface { // - [storage.ErrNotFound] if no finalized block is known at the given height ByHeight(height uint64) (*flow.Header, error) - // ByView returns the block with the given view. It is only available for certified blocks. + // ByView returns the block with the given view. It is only available for certified blocks on a consensus chain. // Certified blocks are the blocks that have received QC. Hotstuff guarantees that for each view, // at most one block is certified. Hence, the return value of `ByView` is guaranteed to be unique // even for non-finalized blocks. // // Expected errors during normal operations: // - [storage.ErrNotFound] if no certified block is known at given view. + // - [storage.ErrNotAvailableForClusterConsensus] if called on a cluster Headers instance (created by store.NewClusterHeaders) ByView(view uint64) (*flow.Header, error) // Exists returns true if a header with the given ID has been stored. @@ -42,11 +51,13 @@ type Headers interface { // // Expected error returns during normal operations: // - [storage.ErrNotFound] if no block with the given parentID is known + // - [storage.ErrWrongChain] if the parent is part of a different chain than expected ByParentID(parentID flow.Identifier) ([]*flow.Header, error) // ProposalByBlockID returns the header with the given ID, along with the corresponding proposer signature. // It is available for finalized blocks and those pending finalization. // Error returns: // - [storage.ErrNotFound] if no block header or proposer signature with the given blockID exists + // - [storage.ErrWrongChain] if the block header exists in the database but is part of a different chain than expected ProposalByBlockID(blockID flow.Identifier) (*flow.ProposalHeader, error) } diff --git a/storage/operation/cluster.go b/storage/operation/cluster.go index bf8765c322f..f91d2c3a275 100644 --- a/storage/operation/cluster.go +++ b/storage/operation/cluster.go @@ -215,8 +215,8 @@ func LookupClusterBlocksByReferenceHeightRange(lctx lockctx.Proof, r storage.Rea func InsertClusterBlock(lctx lockctx.Proof, rw storage.ReaderBatchWriter, proposal *cluster.Proposal) error { // We need to enforce that each cluster block is inserted and indexed exactly once (no overwriting allowed): // 1. We check that the lock [storage.LockInsertOrFinalizeClusterBlock] for cluster block insertion is held. - // 2. When calling `operation.InsertHeader`, we append the storage operations for inserting the header to the - // provided write batch. Note that `operation.InsertHeader` checks whether the header already exists, + // 2. When calling `operation.InsertClusterHeader `, we append the storage operations for inserting the header to the + // provided write batch. Note that `operation.InsertClusterHeader` checks whether the header already exists, // returning [storage.ErrAlreadyExists] if so. // 3. We append all other storage indexing operations to the same write batch, without additional existence // checks. This is safe, because this is the only place where these indexes are created, and we always @@ -224,7 +224,7 @@ func InsertClusterBlock(lctx lockctx.Proof, rw storage.ReaderBatchWriter, propos // that the header did not exist before, we also know that none of the other indexes existed before either // 4. We require that the caller holds the lock until the write batch has been committed. // Thereby, we guarantee that no other thread can write data about the same block concurrently. - // When these constraints are met, we know that no overwrites occurred because `InsertHeader` + // When these constraints are met, we know that no overwrites occurred because `InsertClusterHeader` // includes guarantees that the key `blockID` has not yet been used before. if !lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) { // 1. check lock return fmt.Errorf("missing required lock: %s", storage.LockInsertOrFinalizeClusterBlock) @@ -234,7 +234,7 @@ func InsertClusterBlock(lctx lockctx.Proof, rw storage.ReaderBatchWriter, propos // Hence, two different blocks having the same key is practically impossible. blockID := proposal.Block.ID() // 2. Store the block header; errors with [storage.ErrAlreadyExists] if some entry for `blockID` already exists - err := InsertHeader(lctx, rw, blockID, proposal.Block.ToHeader()) + err := InsertClusterHeader(lctx, rw, blockID, proposal.Block.ToHeader()) if err != nil { return fmt.Errorf("could not insert cluster block header: %w", err) } diff --git a/storage/operation/cluster_test.go b/storage/operation/cluster_test.go index 9cf4839b2af..a922a115b24 100644 --- a/storage/operation/cluster_test.go +++ b/storage/operation/cluster_test.go @@ -10,6 +10,7 @@ import ( "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" + clusterstate "github.com/onflow/flow-go/state/cluster" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/operation" "github.com/onflow/flow-go/storage/operation/dbtest" @@ -20,7 +21,7 @@ func TestClusterHeights(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { lockManager := storage.NewTestingLockManager() var ( - clusterID flow.ChainID = "cluster" + clusterID flow.ChainID = clusterstate.CanonicalClusterID(0, unittest.IdentifierListFixture(1)) height uint64 = 42 expected = unittest.IdentifierFixture() err error @@ -80,7 +81,7 @@ func TestClusterHeights(t *testing.T) { // First writing all three is important to detect bugs, where the logic ignores the cluster ID // and only memorizes the latest block stored for a given height (irrespective of cluster ID). clusterBlockIDs := unittest.IdentifierListFixture(3) - clusterIDs := []flow.ChainID{"cluster-0", "cluster-1", "cluster-2"} + clusterIDs := []flow.ChainID{"cluster-0-00", "cluster-1-ff", "cluster-2-02"} var actual flow.Identifier for i := 0; i < len(clusterBlockIDs); i++ { err = operation.LookupClusterBlockHeight(db.Reader(), clusterIDs[i], height, &actual) @@ -107,7 +108,7 @@ func Test_RetrieveClusterFinalizedHeight(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { lockManager := storage.NewTestingLockManager() var ( - clusterID flow.ChainID = "cluster" + clusterID flow.ChainID = clusterstate.CanonicalClusterID(0, unittest.IdentifierListFixture(1)) err error ) @@ -154,7 +155,7 @@ func Test_RetrieveClusterFinalizedHeight(t *testing.T) { // First writing all three is important to detect bugs, where the logic ignores the cluster ID // and only memorizes the last value stored (irrespective of cluster ID). clusterFinalizedHeights := []uint64{117, 11, 791} - clusterIDs := []flow.ChainID{"cluster-0", "cluster-1", "cluster-2"} + clusterIDs := []flow.ChainID{"cluster-0-00", "cluster-1-ff", "cluster-2-02"} var actual uint64 for i := 0; i < len(clusterFinalizedHeights); i++ { err = operation.RetrieveClusterFinalizedHeight(db.Reader(), clusterIDs[i], &actual) diff --git a/storage/operation/headers.go b/storage/operation/headers.go index 83f17bc9106..a0cf8d0d3bb 100644 --- a/storage/operation/headers.go +++ b/storage/operation/headers.go @@ -13,19 +13,51 @@ import ( // InsertHeader inserts a block header into the database. // // CAUTION: +// - This function must ONLY be used for storing headers of main consensus, NOT CLUSTER consensus. // - The caller must ensure that headerID is a collision-resistant hash of the provided header! // Otherwise, data corruption may occur. -// - The caller must acquire one (but not both) of the following locks and hold it until the database -// write has been committed: either [storage.LockInsertBlock] or [storage.LockInsertOrFinalizeClusterBlock]. +// - The caller must acquire the following lock and hold it until the database +// write has been committed: [storage.LockInsertBlock]. // // It returns [storage.ErrAlreadyExists] if the header already exists, i.e. we only insert a new header once. // This error allows the caller to detect duplicate inserts. If the header is stored along with other parts // of the block in the same batch, similar duplication checks can be skipped for storing other parts of the block. // No other error returns are expected during normal operation. func InsertHeader(lctx lockctx.Proof, rw storage.ReaderBatchWriter, headerID flow.Identifier, header *flow.Header) error { - held := lctx.HoldsLock(storage.LockInsertBlock) || lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) + held := lctx.HoldsLock(storage.LockInsertBlock) if !held { - return fmt.Errorf("missing required lock: %s or %s", storage.LockInsertBlock, storage.LockInsertOrFinalizeClusterBlock) + return fmt.Errorf("missing required lock: %s", storage.LockInsertBlock) + } + + key := MakePrefix(codeHeader, headerID) + exist, err := KeyExists(rw.GlobalReader(), key) + if err != nil { + return err + } + if exist { + return fmt.Errorf("header already exists: %w", storage.ErrAlreadyExists) + } + + return UpsertByKey(rw.Writer(), key, header) +} + +// InsertClusterHeader inserts a cluster block header into the database. +// +// CAUTION: +// - This function must ONLY be used for storing headers produced by CLUSTER consensus. +// - The caller must ensure that headerID is a collision-resistant hash of the provided header! +// Otherwise, data corruption may occur. +// - The caller must acquire the following lock and hold it until the database +// write has been committed: [storage.LockInsertOrFinalizeClusterBlock]. +// +// It returns [storage.ErrAlreadyExists] if the header already exists, i.e. we only insert a new header once. +// This error allows the caller to detect duplicate inserts. If the header is stored along with other parts +// of the block in the same batch, similar duplication checks can be skipped for storing other parts of the block. +// No other error returns are expected during normal operation. +func InsertClusterHeader(lctx lockctx.Proof, rw storage.ReaderBatchWriter, headerID flow.Identifier, header *flow.Header) error { + held := lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) + if !held { + return fmt.Errorf("missing required lock: %s", storage.LockInsertOrFinalizeClusterBlock) } key := MakePrefix(codeHeader, headerID) diff --git a/storage/operation/headers_test.go b/storage/operation/headers_test.go index 763675e5fab..99c92aa5111 100644 --- a/storage/operation/headers_test.go +++ b/storage/operation/headers_test.go @@ -15,19 +15,20 @@ import ( "github.com/onflow/flow-go/utils/unittest" ) +var expected = &flow.Header{ + HeaderBody: flow.HeaderBody{ + View: 1337, + Timestamp: uint64(time.Now().UnixMilli()), + ParentID: flow.Identifier{0x11}, + ParentVoterIndices: []byte{0x44}, + ParentVoterSigData: []byte{0x88}, + ProposerID: flow.Identifier{0x33}, + }, + PayloadHash: flow.Identifier{0x22}, +} + func TestHeaderInsertCheckRetrieve(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { - expected := &flow.Header{ - HeaderBody: flow.HeaderBody{ - View: 1337, - Timestamp: uint64(time.Now().UnixMilli()), - ParentID: flow.Identifier{0x11}, - ParentVoterIndices: []byte{0x44}, - ParentVoterSigData: []byte{0x88}, - ProposerID: flow.Identifier{0x33}, - }, - PayloadHash: flow.Identifier{0x22}, - } blockID := expected.ID() lockManager := storage.NewTestingLockManager() @@ -47,6 +48,67 @@ func TestHeaderInsertCheckRetrieve(t *testing.T) { }) } +func TestClusterHeaderInsertCheckRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + blockID := expected.ID() + + lockManager := storage.NewTestingLockManager() + + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertClusterHeader(lctx, rw, expected.ID(), expected) + }) + }) + require.NoError(t, err) + + var actual flow.Header + err = operation.RetrieveHeader(db.Reader(), blockID, &actual) + require.NoError(t, err) + + assert.Equal(t, *expected, actual) + }) +} + +func TestHeaderInsertWrongLock(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + // without any locks + err := unittest.WithLocks(t, lockManager, []string{}, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertHeader(lctx, rw, expected.ID(), expected) + }) + }) + require.Error(t, err) + // wrong lock + err = unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertHeader(lctx, rw, expected.ID(), expected) + }) + }) + require.Error(t, err) + }) +} + +func TestClusterHeaderInsertWrongLock(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + // without any locks + err := unittest.WithLocks(t, lockManager, []string{}, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertClusterHeader(lctx, rw, expected.ID(), expected) + }) + }) + require.Error(t, err) + // wrong lock + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertClusterHeader(lctx, rw, expected.ID(), expected) + }) + }) + require.Error(t, err) + }) +} + func TestHeaderIDIndexByCollectionID(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { diff --git a/storage/operation/heights.go b/storage/operation/heights.go index 5c1e9c4ce8a..c2bacf34d7d 100644 --- a/storage/operation/heights.go +++ b/storage/operation/heights.go @@ -6,9 +6,21 @@ import ( "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/storage" ) +var ( + // IncompleteStateError indicates that some information cannot be retrieved from the database, + // which the protocol mandates to be present. This can be a symptom of a corrupted state + // or an incorrectly / incompletely bootstrapped node. In most cases, this is an exception. + // + // ATTENTION: in most cases, [IncompleteStateError] error is a symptom of a corrupted state + // or an incorrectly / incompletely bootstrapped node. Typically, this is an unexpected exception + // and should not be checked for the same way as benign sentinel errors. + IncompleteStateError = errors.New("data required by protocol is missing in database") +) + // UpsertFinalizedHeight upserts the finalized height index, overwriting the current value. // Updates to this index must strictly increase the finalized height. // To enforce this, the caller must check the current finalized height while holding [storage.LockFinalizeBlock]. @@ -19,8 +31,30 @@ func UpsertFinalizedHeight(lctx lockctx.Proof, w storage.Writer, height uint64) return UpsertByKey(w, MakePrefix(codeFinalizedHeight), height) } +// RetrieveFinalizedHeight reads height of the latest finalized block directly from the database. +// +// During bootstrapping, the latest finalized block and its height are indexed and thereafter the +// latest finalized height is only updated (but never removed). Hence, for a properly bootstrapped +// node, this function should _always_ return a proper value. +// +// CAUTION: This function should only be called on properly bootstrapped nodes. If the state is +// corrupted or the node is not properly bootstrapped, this function may return [IncompleteStateError]. +// The reason for not returning [storage.ErrNotFound] directly is to avoid confusion between an often +// benign [storage.ErrNotFound] and failed reads of quantities that the protocol mandates to be present. +// +// No error returns are expected during normal operations. func RetrieveFinalizedHeight(r storage.Reader, height *uint64) error { - return RetrieveByKey(r, MakePrefix(codeFinalizedHeight), height) + var h uint64 + err := RetrieveByKey(r, MakePrefix(codeFinalizedHeight), &h) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + // mask the lower-level error to prevent confusion with the often benign `storage.ErrNotFound`: + return fmt.Errorf("latest finalized height could not be read, which should never happen for bootstrapped nodes: %w", IncompleteStateError) + } + return irrecoverable.NewExceptionf("latest finalized height could not be read, which should never happen for bootstrapped nodes: %w", err) + } + *height = h + return nil } // UpsertSealedHeight upserts the latest sealed height, OVERWRITING the current value. @@ -33,8 +67,30 @@ func UpsertSealedHeight(lctx lockctx.Proof, w storage.Writer, height uint64) err return UpsertByKey(w, MakePrefix(codeSealedHeight), height) } +// RetrieveSealedHeight reads height of the latest sealed block directly from the database. +// +// During bootstrapping, the latest sealed block and its height are indexed and thereafter the +// latest sealed height is only updated (but never removed). Hence, for a properly bootstrapped +// node, this function should _always_ return a proper value. +// +// CAUTION: This function should only be called on properly bootstrapped nodes. If the state is +// corrupted or the node is not properly bootstrapped, this function may return [IncompleteStateError]. +// The reason for not returning [storage.ErrNotFound] directly is to avoid confusion between an often +// benign [storage.ErrNotFound] and failed reads of quantities that the protocol mandates to be present. +// +// No error returns are expected during normal operations. func RetrieveSealedHeight(r storage.Reader, height *uint64) error { - return RetrieveByKey(r, MakePrefix(codeSealedHeight), height) + var h uint64 + err := RetrieveByKey(r, MakePrefix(codeSealedHeight), &h) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + // mask the lower-level error to prevent confusion with the often benign `storage.ErrNotFound`: + return fmt.Errorf("latest sealed height could not be read, which should never happen for bootstrapped nodes: %w", IncompleteStateError) + } + return irrecoverable.NewExceptionf("latest sealed height could not be read, which should never happen for bootstrapped nodes: %w", err) + } + *height = h + return nil } // InsertEpochFirstHeight inserts the height of the first block in the given epoch. diff --git a/storage/operation/heights_test.go b/storage/operation/heights_test.go index 8b29f603028..5bc519aacd8 100644 --- a/storage/operation/heights_test.go +++ b/storage/operation/heights_test.go @@ -14,6 +14,30 @@ import ( "github.com/onflow/flow-go/utils/unittest" ) +// TestNonBootstrappedRetrieveFinalizedHeight tests that retrieving the finalized height from a non-bootstrapped +// database returns [operation.IncompleteStateError], which is not a [storage.ErrNotFound]. This separate error +// avoids accidental confusion with the very common and typically benign sentinel [storage.ErrNotFound]. +func TestNonBootstrappedRetrieveFinalizedHeight(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + var retrieved uint64 + err := operation.RetrieveFinalizedHeight(db.Reader(), &retrieved) + require.ErrorIs(t, err, operation.IncompleteStateError) + require.NotErrorIs(t, err, storage.ErrNotFound) + }) +} + +// TestNonBootstrappedRetrieveSealedHeight tests that retrieving the sealed height from a non-bootstrapped +// database returns [operation.IncompleteStateError], which is not a [storage.ErrNotFound]. This separate error +// avoids accidental confusion with the very common and typically benign sentinel [storage.ErrNotFound]. +func TestNonBootstrappedRetrieveSealedHeight(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + var retrieved uint64 + err := operation.RetrieveSealedHeight(db.Reader(), &retrieved) + require.ErrorIs(t, err, operation.IncompleteStateError) + require.NotErrorIs(t, err, storage.ErrNotFound) + }) +} + func TestFinalizedInsertUpdateRetrieve(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { lockManager := storage.NewTestingLockManager() diff --git a/storage/store/blocks.go b/storage/store/blocks.go index 738eed63f04..617deb2b7e4 100644 --- a/storage/store/blocks.go +++ b/storage/store/blocks.go @@ -6,6 +6,7 @@ import ( "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/operation" ) @@ -39,6 +40,7 @@ func NewBlocks(db storage.DB, headers *Headers, payloads *Payloads) *Blocks { // // Expected error returns during normal operations: // - [storage.ErrAlreadyExists] if some block with the same ID has already been stored +// - [storage.ErrWrongChain] if the block is part of a different chain than this storage was initialized with func (b *Blocks) BatchStore(lctx lockctx.Proof, rw storage.ReaderBatchWriter, proposal *flow.Proposal) error { blockID := proposal.Block.ID() err := b.headers.storeTx(lctx, rw, blockID, proposal.Block.ToHeader(), proposal.ProposerSigData) @@ -56,6 +58,7 @@ func (b *Blocks) BatchStore(lctx lockctx.Proof, rw storage.ReaderBatchWriter, pr // finalized and pending blocks. // Expected error returns during normal operations: // - [storage.ErrNotFound] if no block is found +// - [storage.ErrWrongChain] if a block with that ID exists but on a different chain, such as a cluster chain func (b *Blocks) retrieve(blockID flow.Identifier) (*flow.Block, error) { header, err := b.headers.retrieveTx(blockID) if err != nil { @@ -63,7 +66,7 @@ func (b *Blocks) retrieve(blockID flow.Identifier) (*flow.Block, error) { } payload, err := b.payloads.retrieveTx(blockID) if err != nil { - return nil, fmt.Errorf("could not retrieve payload: %w", err) + return nil, irrecoverable.NewExceptionf("could not retrieve payload for block %x: %w", blockID, err) } untrustedBlock := flow.UntrustedBlock{ HeaderBody: header.HeaderBody, @@ -86,8 +89,9 @@ func (b *Blocks) retrieve(blockID flow.Identifier) (*flow.Block, error) { // retrieveProposal returns the proposal with the given block ID. // It is available for finalized and pending blocks. -// Expected error returns during normal operations: +// Expected errors during normal operations: // - [storage.ErrNotFound] if no block is found +// - [storage.ErrWrongChain] if a block with that ID exists but on a different chain, such as a cluster chain func (b *Blocks) retrieveProposal(blockID flow.Identifier) (*flow.Proposal, error) { block, err := b.retrieve(blockID) if err != nil { @@ -95,7 +99,7 @@ func (b *Blocks) retrieveProposal(blockID flow.Identifier) (*flow.Proposal, erro } sig, err := b.headers.sigs.retrieveTx(blockID) if err != nil { - return nil, fmt.Errorf("could not retrieve proposer signature: %w", err) + return nil, irrecoverable.NewExceptionf("could not retrieve proposer signature for block %x: %w", blockID, err) } untrustedProposal := flow.UntrustedProposal{ @@ -121,10 +125,9 @@ func (b *Blocks) retrieveProposal(blockID flow.Identifier) (*flow.Proposal, erro // ByID returns the block with the given hash. It is available for all incorporated blocks (validated blocks // that have been appended to any of the known forks) no matter whether the block has been finalized or not. // -// Error returns: -// - storage.ErrNotFound if no block with the corresponding ID was found -// - generic error in case of unexpected failure from the database layer, or failure -// to decode an existing database value +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no block with the corresponding ID was found +// - [storage.ErrWrongChain] if a block with that ID exists but on a different chain, such as a cluster chain func (b *Blocks) ByID(blockID flow.Identifier) (*flow.Block, error) { return b.retrieve(blockID) } @@ -133,10 +136,9 @@ func (b *Blocks) ByID(blockID flow.Identifier) (*flow.Block, error) { // It is available for all incorporated blocks (validated blocks that have been appended to any // of the known forks) no matter whether the block has been finalized or not. // -// Error returns: -// - storage.ErrNotFound if no block with the corresponding ID was found -// - generic error in case of unexpected failure from the database layer, or failure -// to decode an existing database value +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no block with the corresponding ID was found +// - [storage.ErrWrongChain] if a block with that ID exists but on a different chain, such as a cluster chain func (b *Blocks) ProposalByID(blockID flow.Identifier) (*flow.Proposal, error) { return b.retrieveProposal(blockID) } @@ -146,55 +148,71 @@ func (b *Blocks) ProposalByID(blockID flow.Identifier) (*flow.Proposal, error) { // at most one block is certified. Hence, the return value of `ByView` is guaranteed to be unique // even for non-finalized blocks. // Expected errors during normal operations: -// - `storage.ErrNotFound` if no certified block is known at given view. +// - [storage.ErrNotFound] if no certified block is known at given view. func (b *Blocks) ByView(view uint64) (*flow.Block, error) { blockID, err := b.headers.BlockIDByView(view) if err != nil { return nil, err } - return b.ByID(blockID) + block, err := b.ByID(blockID) + if err != nil { + // failure to retrieve a block that has been indexed indicates state corruption + return nil, irrecoverable.NewExceptionf("could not retrieve indexed block for view %d: %w", view, err) + } + return block, nil } // ProposalByView returns the block proposal with the given view. It is only available for certified blocks. // // Expected errors during normal operations: -// - `storage.ErrNotFound` if no certified block is known at given view. +// - [storage.ErrNotFound] if no certified block is known at given view. func (b *Blocks) ProposalByView(view uint64) (*flow.Proposal, error) { blockID, err := b.headers.BlockIDByView(view) if err != nil { return nil, err } - return b.retrieveProposal(blockID) + proposal, err := b.retrieveProposal(blockID) + if err != nil { + // not being able to retrieve a proposal indexed by view indicates state corruption + return nil, irrecoverable.NewExceptionf("could not retrieve proposal for view %d: %w", view, err) + } + return proposal, nil } // ByHeight returns the block at the given height. It is only available // for finalized blocks. // -// Error returns: -// - storage.ErrNotFound if no block for the corresponding height was found -// - generic error in case of unexpected failure from the database layer, or failure -// to decode an existing database value +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no block for the corresponding height was found func (b *Blocks) ByHeight(height uint64) (*flow.Block, error) { blockID, err := b.headers.retrieveIdByHeightTx(height) if err != nil { return nil, err } - return b.retrieve(blockID) + block, err := b.retrieve(blockID) + if err != nil { + // failure to retrieve a block that has been indexed indicates state corruption + return nil, irrecoverable.NewExceptionf("could not retrieve indexed block for height %d: %w", height, err) + } + return block, nil } // ProposalByHeight returns the block at the given height, along with the proposer's // signature on it. It is only available for finalized blocks. // -// Error returns: -// - storage.ErrNotFound if no block proposal for the corresponding height was found -// - generic error in case of unexpected failure from the database layer, or failure -// to decode an existing database value +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no block proposal for the corresponding height was found func (b *Blocks) ProposalByHeight(height uint64) (*flow.Proposal, error) { blockID, err := b.headers.retrieveIdByHeightTx(height) if err != nil { return nil, err } - return b.retrieveProposal(blockID) + proposal, err := b.retrieveProposal(blockID) + if err != nil { + // failure to retrieve a block that has been indexed indicates state corruption + return nil, irrecoverable.NewExceptionf("could not retrieve indexed proposal for height %d: %w", height, err) + } + return proposal, nil } // ByCollectionID returns the block for the given [flow.CollectionGuarantee] ID. @@ -204,16 +222,19 @@ func (b *Blocks) ProposalByHeight(height uint64) (*flow.Proposal, error) { // finality. // CAUTION: this method is not backed by a cache and therefore comparatively slow! // -// Error returns: -// - storage.ErrNotFound if the collection ID was not found -// - generic error in case of unexpected failure from the database layer, or failure -// to decode an existing database value +// Expected errors during normal operations: +// - [storage.ErrNotFound] if the collection ID was not found func (b *Blocks) ByCollectionID(collID flow.Identifier) (*flow.Block, error) { blockID, err := b.BlockIDByCollectionID(collID) if err != nil { return nil, err } - return b.ByID(blockID) + block, err := b.ByID(blockID) + if err != nil { + // failure to retrieve a block that has been indexed indicates state corruption + return nil, irrecoverable.NewExceptionf("could not retrieve indexed block %x for collection id %x: %w", blockID, collID, err) + } + return block, nil } // BlockIDByCollectionID returns the block ID for the finalized block which includes the guarantee for the @@ -226,10 +247,8 @@ func (b *Blocks) ByCollectionID(collID flow.Identifier) (*flow.Block, error) { // finality. // CAUTION: this method is not backed by a cache and therefore comparatively slow! // -// Error returns: -// - storage.ErrNotFound if no FINALIZED block exists containing the expected collection guarantee -// - generic error in case of unexpected failure from the database layer, or failure -// to decode an existing database value +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no FINALIZED block exists containing the expected collection guarantee func (b *Blocks) BlockIDByCollectionID(collID flow.Identifier) (flow.Identifier, error) { guarantee, err := b.payloads.guarantees.ByCollectionID(collID) if err != nil { @@ -257,7 +276,7 @@ func (b *Blocks) BlockIDByCollectionID(collID flow.Identifier) (flow.Identifier, // Hence, this function should be treated as a temporary solution, which requires generalization // (one-to-many mapping) for soft finality and the mature protocol. // -// Expected error returns during normal operations: +// Expected errors during normal operations: // - [storage.ErrAlreadyExists] if any collection guarantee is already indexed func (b *Blocks) BatchIndexBlockContainingCollectionGuarantees(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, guaranteeIDs []flow.Identifier) error { return operation.BatchIndexBlockContainingCollectionGuarantees(lctx, rw, blockID, guaranteeIDs) diff --git a/storage/store/blocks_test.go b/storage/store/blocks_test.go index f88ceeb977b..b4edd40aacb 100644 --- a/storage/store/blocks_test.go +++ b/storage/store/blocks_test.go @@ -6,6 +6,7 @@ import ( "github.com/jordanschalm/lockctx" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/operation" @@ -19,11 +20,19 @@ func TestBlockStoreAndRetrieve(t *testing.T) { lockManager := storage.NewTestingLockManager() cacheMetrics := &metrics.NoopCollector{} // verify after storing a block should be able to retrieve it back - blocks := store.InitAll(cacheMetrics, db).Blocks + all, err := store.InitAll(cacheMetrics, db, flow.Emulator) + require.NoError(t, err) + blocks := all.Blocks block := unittest.FullBlockFixture() prop := unittest.ProposalFromBlock(block) - err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + // Test that retrieving a nonexistent block by ID returns ErrNotFound + _, err = blocks.ByID(block.ID()) + require.ErrorIs(t, err, storage.ErrNotFound) + _, err = blocks.ProposalByID(block.ID()) + require.ErrorIs(t, err, storage.ErrNotFound) + + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return blocks.BatchStore(lctx, rw, prop) }) @@ -46,7 +55,9 @@ func TestBlockStoreAndRetrieve(t *testing.T) { // verify after a restart, the block stored in the database is the same // as the original - blocksAfterRestart := store.InitAll(cacheMetrics, db).Blocks + allAfterRestart, err := store.InitAll(cacheMetrics, db, flow.Emulator) + require.NoError(t, err) + blocksAfterRestart := allAfterRestart.Blocks receivedAfterRestart, err := blocksAfterRestart.ByID(block.ID()) require.NoError(t, err) require.Equal(t, *block, *receivedAfterRestart) @@ -57,12 +68,14 @@ func TestBlockIndexByHeightAndRetrieve(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { lockManager := storage.NewTestingLockManager() cacheMetrics := &metrics.NoopCollector{} - blocks := store.InitAll(cacheMetrics, db).Blocks + all, err := store.InitAll(cacheMetrics, db, flow.Emulator) + require.NoError(t, err) + blocks := all.Blocks block := unittest.FullBlockFixture() prop := unittest.ProposalFromBlock(block) // First store the block - err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return blocks.BatchStore(lctx, rw, prop) }) @@ -100,9 +113,13 @@ func TestBlockIndexByHeightAndRetrieve(t *testing.T) { // Test that retrieving by non-existent height returns ErrNotFound _, err = blocks.ByHeight(block.Height + 1000) require.ErrorIs(t, err, storage.ErrNotFound) + _, err = blocks.ProposalByHeight(block.Height + 1000) + require.ErrorIs(t, err, storage.ErrNotFound) // Verify after a restart, the block indexed by height is still retrievable - blocksAfterRestart := store.InitAll(cacheMetrics, db).Blocks + allAfterRestart, err := store.InitAll(cacheMetrics, db, flow.Emulator) + require.NoError(t, err) + blocksAfterRestart := allAfterRestart.Blocks receivedAfterRestart, err := blocksAfterRestart.ByHeight(block.Height) require.NoError(t, err) require.Equal(t, *block, *receivedAfterRestart) @@ -113,12 +130,14 @@ func TestBlockIndexByViewAndRetrieve(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { lockManager := storage.NewTestingLockManager() cacheMetrics := &metrics.NoopCollector{} - blocks := store.InitAll(cacheMetrics, db).Blocks + all, err := store.InitAll(cacheMetrics, db, flow.Emulator) + require.NoError(t, err) + blocks := all.Blocks block := unittest.FullBlockFixture() prop := unittest.ProposalFromBlock(block) // First store the block and index by view - err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { err := blocks.BatchStore(lctx, rw, prop) if err != nil { @@ -151,11 +170,55 @@ func TestBlockIndexByViewAndRetrieve(t *testing.T) { // Test that retrieving by non-existent view returns ErrNotFound _, err = blocks.ByView(block.View + 1000) require.ErrorIs(t, err, storage.ErrNotFound) + _, err = blocks.ProposalByView(block.View + 1000) + require.ErrorIs(t, err, storage.ErrNotFound) // Verify after a restart, the block indexed by view is still retrievable - blocksAfterRestart := store.InitAll(cacheMetrics, db).Blocks + allAfterRestart, err := store.InitAll(cacheMetrics, db, flow.Emulator) + require.NoError(t, err) + blocksAfterRestart := allAfterRestart.Blocks receivedAfterRestart, err := blocksAfterRestart.ByView(block.View) require.NoError(t, err) require.Equal(t, *block, *receivedAfterRestart) }) } + +func TestBlockRetrieveWrongChain(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + cacheMetrics := &metrics.NoopCollector{} + all, err := store.InitAll(cacheMetrics, db, flow.Emulator) + require.NoError(t, err) + blocks := all.Blocks + + // insert and finalize a block on a cluster chain + clusterBlock := unittest.ClusterBlockFixture() + err = unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + err := operation.InsertClusterBlock(lctx, rw, unittest.ClusterProposalFromBlock(clusterBlock)) + if err != nil { + return err + } + err = operation.IndexClusterBlockHeight(lctx, rw, clusterBlock.ChainID, clusterBlock.Height, clusterBlock.ID()) + if err != nil { + return err + } + return operation.BootstrapClusterFinalizedHeight(lctx, rw, clusterBlock.ChainID, clusterBlock.Height) + }) + }) + require.NoError(t, err) + + // error should reflect that the block ID exists on a cluster chain + _, err = blocks.ByID(clusterBlock.ID()) + require.ErrorIs(t, err, storage.ErrWrongChain) + _, err = blocks.ProposalByID(clusterBlock.ID()) + require.ErrorIs(t, err, storage.ErrWrongChain) + + // However, height index is chain-specific, so should simply reflect + // that the height does not exist on the consensus chain + _, err = blocks.ByHeight(clusterBlock.Height) + require.ErrorIs(t, err, storage.ErrNotFound) + _, err = blocks.ProposalByHeight(clusterBlock.Height) + require.ErrorIs(t, err, storage.ErrNotFound) + }) +} diff --git a/storage/store/cluster_blocks.go b/storage/store/cluster_blocks.go index 5592a5fd238..7db63e385e2 100644 --- a/storage/store/cluster_blocks.go +++ b/storage/store/cluster_blocks.go @@ -5,6 +5,7 @@ import ( "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/operation" ) @@ -29,18 +30,26 @@ func NewClusterBlocks(db storage.DB, chainID flow.ChainID, headers *Headers, pay return b } +// ProposalByID returns the collection with the given ID, along with the proposer's signature on it. +// It is available for all incorporated collections (validated blocks that have been appended to any +// of the known forks) no matter whether the collection has been finalized or not. +// +// Error returns expected during normal operation: +// - [storage.ErrNotFound] if the block ID was not found +// - [storage.ErrWrongChain] if the block header exists in the database but is part of a different chain than expected func (b *ClusterBlocks) ProposalByID(blockID flow.Identifier) (*cluster.Proposal, error) { header, err := b.headers.ByBlockID(blockID) if err != nil { return nil, fmt.Errorf("could not get header: %w", err) } + // further data not being retrievable indicates state corruption payload, err := b.payloads.ByBlockID(blockID) if err != nil { - return nil, fmt.Errorf("could not retrieve payload: %w", err) + return nil, irrecoverable.NewExceptionf("could not retrieve payload: %w", err) } sig, err := b.headers.sigs.ByBlockID(blockID) if err != nil { - return nil, fmt.Errorf("could not retrieve proposer signature: %w", err) + return nil, irrecoverable.NewExceptionf("could not retrieve proposer signature: %w", err) } untrustedBlock := cluster.UntrustedBlock{ HeaderBody: header.HeaderBody, @@ -80,11 +89,21 @@ func (b *ClusterBlocks) ProposalByID(blockID flow.Identifier) (*cluster.Proposal return proposal, nil } +// ProposalByHeight returns the collection at the given height, along with the proposer's +// signature on it. It is only available for finalized collections. +// +// Error returns expected during normal operation: +// - [storage.ErrNotFound] if the block height or block ID was not found func (b *ClusterBlocks) ProposalByHeight(height uint64) (*cluster.Proposal, error) { var blockID flow.Identifier err := operation.LookupClusterBlockHeight(b.db.Reader(), b.chainID, height, &blockID) if err != nil { return nil, fmt.Errorf("could not look up block: %w", err) } - return b.ProposalByID(blockID) + proposal, err := b.ProposalByID(blockID) + if err != nil { + // failure to retrieve a proposal that has been indexed indicates state corruption + return nil, irrecoverable.NewExceptionf("could not retrieve proposal for id %x: %w", blockID, err) + } + return proposal, nil } diff --git a/storage/store/cluster_blocks_test.go b/storage/store/cluster_blocks_test.go index 1f9377348d3..f493bdc1a23 100644 --- a/storage/store/cluster_blocks_test.go +++ b/storage/store/cluster_blocks_test.go @@ -52,10 +52,12 @@ func TestClusterBlocks(t *testing.T) { require.NoError(t, err) } + clusterHeaders, err := NewClusterHeaders(metrics.NewNoopCollector(), db, blocks[0].ChainID) + require.NoError(t, err) clusterBlocks := NewClusterBlocks( db, blocks[0].ChainID, - NewHeaders(metrics.NewNoopCollector(), db), + clusterHeaders, NewClusterPayloads(metrics.NewNoopCollector(), db), ) diff --git a/storage/store/guarantees_test.go b/storage/store/guarantees_test.go index de27ab3f5ab..c3e24160dcd 100644 --- a/storage/store/guarantees_test.go +++ b/storage/store/guarantees_test.go @@ -26,7 +26,8 @@ func TestGuaranteeStoreRetrieve(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() - all := store.InitAll(metrics, db) + all, err := store.InitAll(metrics, db, flow.Emulator) + require.NoError(t, err) blocks := all.Blocks guarantees := all.Guarantees @@ -38,7 +39,7 @@ func TestGuaranteeStoreRetrieve(t *testing.T) { proposal := unittest.ProposalFromBlock(block) // attempt to retrieve (still) unknown guarantee - _, err := s.ByCollectionID(guarantee1.ID()) + _, err = s.ByCollectionID(guarantee1.CollectionID) require.ErrorIs(t, err, storage.ErrNotFound) // store guarantee @@ -92,7 +93,8 @@ func TestStoreDuplicateGuarantee(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() - all := store.InitAll(metrics, db) + all, err := store.InitAll(metrics, db, flow.Emulator) + require.NoError(t, err) blocks := all.Blocks store1 := all.Guarantees expected := unittest.CollectionGuaranteeFixture() @@ -100,7 +102,7 @@ func TestStoreDuplicateGuarantee(t *testing.T) { proposal := unittest.ProposalFromBlock(block) // store guarantee - err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return blocks.BatchStore(lctx, rw, proposal) }) @@ -131,14 +133,15 @@ func TestStoreConflictingGuarantee(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() - all := store.InitAll(metrics, db) + all, err := store.InitAll(metrics, db, flow.Emulator) + require.NoError(t, err) blocks := all.Blocks store1 := all.Guarantees expected := unittest.CollectionGuaranteeFixture() block := unittest.BlockWithGuaranteesFixture([]*flow.CollectionGuarantee{expected}) proposal := unittest.ProposalFromBlock(block) - err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return blocks.BatchStore(lctx, rw, proposal) }) diff --git a/storage/store/headers.go b/storage/store/headers.go index 8fd1210d76d..f44e6bcc7a5 100644 --- a/storage/store/headers.go +++ b/storage/store/headers.go @@ -8,7 +8,9 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/state/cluster" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/operation" ) @@ -21,34 +23,90 @@ type Headers struct { heightCache *Cache[uint64, flow.Identifier] viewCache *Cache[uint64, flow.Identifier] sigs *proposalSignatures + chainID flow.ChainID } var _ storage.Headers = (*Headers)(nil) -// NewHeaders creates a Headers instance, which stores block headers. -// It supports storing, caching and retrieving by block ID and the additionally indexed by header height. -func NewHeaders(collector module.CacheMetrics, db storage.DB) *Headers { +// NewHeaders creates a Headers instance, which manages block headers of the main consensus (not cluster consensus). +// It supports storing, caching and retrieving by block ID, and additionally indexes by header height and view. +// Must be initialized with a non-cluster chainID; see [flow.AllChainIDs] and [cluster.IsCanonicalClusterID]. +// No errors are expected during normal operations. +func NewHeaders(collector module.CacheMetrics, db storage.DB, chainID flow.ChainID) (*Headers, error) { + if cluster.IsCanonicalClusterID(chainID) { + return nil, irrecoverable.NewExceptionf("NewHeaders called on cluster chain ID %s - use NewClusterHeaders instead", chainID) + } storeWithLock := func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, header *flow.Header) error { + if header.ChainID != chainID { + return fmt.Errorf("expected chain ID %v, got %v: %w", chainID, header.ChainID, storage.ErrWrongChain) + } + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("missing lock: %v", storage.LockInsertBlock) + } return operation.InsertHeader(lctx, rw, blockID, header) } - - retrieve := func(r storage.Reader, blockID flow.Identifier) (*flow.Header, error) { - var header flow.Header - err := operation.RetrieveHeader(r, blockID, &header) - return &header, err - } - retrieveHeight := func(r storage.Reader, height uint64) (flow.Identifier, error) { var id flow.Identifier err := operation.LookupBlockHeight(r, height, &id) return id, err } - retrieveView := func(r storage.Reader, view uint64) (flow.Identifier, error) { var id flow.Identifier err := operation.LookupCertifiedBlockByView(r, view, &id) return id, err } + return newHeaders(collector, db, chainID, storeWithLock, retrieveHeight, retrieveView), nil +} + +// NewClusterHeaders creates a Headers instance for a collection cluster chain, which stores block headers for cluster blocks. +// It supports storing, caching and retrieving by block ID, and additionally an index by header height. +// It does NOT support retrieving by view. +// Must be initialized with a valid cluster chain ID; see [cluster.IsCanonicalClusterID] +// No errors are expected during normal operations. +func NewClusterHeaders(collector module.CacheMetrics, db storage.DB, chainID flow.ChainID) (*Headers, error) { + if !cluster.IsCanonicalClusterID(chainID) { + return nil, irrecoverable.NewExceptionf("NewClusterHeaders called on non-cluster chain ID %s - use NewHeaders instead", chainID) + } + storeWithLock := func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, header *flow.Header) error { + if header.ChainID != chainID { + return fmt.Errorf("expected chain ID %v, got %v: %w", chainID, header.ChainID, storage.ErrWrongChain) + } + if !lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) { + return fmt.Errorf("missing lock: %v", storage.LockInsertOrFinalizeClusterBlock) + } + return operation.InsertClusterHeader(lctx, rw, blockID, header) + } + retrieveHeight := func(r storage.Reader, height uint64) (flow.Identifier, error) { + var id flow.Identifier + err := operation.LookupClusterBlockHeight(r, chainID, height, &id) + return id, err + } + retrieveView := func(r storage.Reader, view uint64) (flow.Identifier, error) { + return flow.ZeroID, storage.ErrNotAvailableForClusterConsensus + } + return newHeaders(collector, db, chainID, storeWithLock, retrieveHeight, retrieveView), nil +} + +// newHeaders contains shared logic for Header storage, including storing and retrieving by block ID +func newHeaders(collector module.CacheMetrics, + db storage.DB, + chainID flow.ChainID, + storeWithLock storeWithLockFunc[flow.Identifier, *flow.Header], + retrieveHeight retrieveFunc[uint64, flow.Identifier], + retrieveView retrieveFunc[uint64, flow.Identifier], +) *Headers { + retrieve := func(r storage.Reader, blockID flow.Identifier) (*flow.Header, error) { + var header flow.Header + err := operation.RetrieveHeader(r, blockID, &header) + if err != nil { + return nil, err + } + // raise an error when the retrieved header is for a different chain than expected + if header.ChainID != chainID { + return nil, fmt.Errorf("expected chain ID %v, got %v: %w", chainID, header.ChainID, storage.ErrWrongChain) + } + return &header, nil + } h := &Headers{ db: db, @@ -65,7 +123,8 @@ func NewHeaders(collector module.CacheMetrics, db storage.DB) *Headers { withLimit[uint64, flow.Identifier](4*flow.DefaultTransactionExpiry), withRetrieve(retrieveView)), - sigs: newProposalSignatures(collector, db), + sigs: newProposalSignatures(collector, db), + chainID: chainID, } return h @@ -83,6 +142,7 @@ func NewHeaders(collector module.CacheMetrics, db storage.DB) *Headers { // It returns [storage.ErrAlreadyExists] if the header already exists, i.e. we only insert a new header once. // This error allows the caller to detect duplicate inserts. If the header is stored along with other parts // of the block in the same batch, similar duplication checks can be skipped for storing other parts of the block. +// Returns [storage.ErrWrongChain] if the header's ChainID does not match the one used when initializing the storage. // No other errors are expected during normal operation. func (h *Headers) storeTx( lctx lockctx.Proof, @@ -99,6 +159,10 @@ func (h *Headers) storeTx( return h.sigs.storeTx(lctx, rw, blockID, proposalSig) } +// retrieveTx returns the header with the given ID. +// Expected error returns during normal operations: +// - [storage.ErrNotFound] if no block header with the given ID exists +// - [storage.ErrWrongChain] if the block header exists in the database but is part of a different chain than expected func (h *Headers) retrieveTx(blockID flow.Identifier) (*flow.Header, error) { val, err := h.cache.Get(h.db.Reader(), blockID) if err != nil { @@ -107,6 +171,11 @@ func (h *Headers) retrieveTx(blockID flow.Identifier) (*flow.Header, error) { return val, nil } +// retrieveProposalTx returns a proposal header of the block with the given ID. +// Essentially, this is the header, along with the proposer's signature. +// Expected error returns during normal operations: +// - [storage.ErrNotFound] if no block header with the given ID exists +// - [storage.ErrWrongChain] if the block header exists in the database but is part of a different chain than expected func (h *Headers) retrieveProposalTx(blockID flow.Identifier) (*flow.ProposalHeader, error) { header, err := h.cache.Get(h.db.Reader(), blockID) if err != nil { @@ -114,13 +183,19 @@ func (h *Headers) retrieveProposalTx(blockID flow.Identifier) (*flow.ProposalHea } sig, err := h.sigs.retrieveTx(blockID) if err != nil { - return nil, fmt.Errorf("could not retrieve proposer signature for id %x: %w", blockID, err) + // a missing proposer signature implies state corruption + return nil, irrecoverable.NewExceptionf("could not retrieve proposer signature for id %x: %w", blockID, err) } return &flow.ProposalHeader{Header: header, ProposerSigData: sig}, nil } -// results in [storage.ErrNotFound] for unknown height +// retrieveIdByHeightTx returns the block ID for the given finalized height. +// Expected error returns during normal operations: +// - [storage.ErrNotFound] if no finalized block is known (for the chain this Headers instance is bound to) func (h *Headers) retrieveIdByHeightTx(height uint64) (flow.Identifier, error) { + // This method can only return IDs for the desired chain. This is because the height cache is populated + // on retrieval, using a chain-specific database index `height` -> `ID`. Only blocks of the respective chain + // are added to the index. Hence, only blocks of the respective chain are put into the cache on retrieval. blockID, err := h.heightCache.Get(h.db.Reader(), height) if err != nil { return flow.ZeroID, fmt.Errorf("failed to retrieve block ID for height %d: %w", height, err) @@ -131,6 +206,7 @@ func (h *Headers) retrieveIdByHeightTx(height uint64) (flow.Identifier, error) { // ByBlockID returns the header with the given ID. It is available for finalized blocks and those pending finalization. // Error returns: // - [storage.ErrNotFound] if no block header with the given ID exists +// - [storage.ErrWrongChain] if the block header exists in the database but is part of a different chain than expected func (h *Headers) ByBlockID(blockID flow.Identifier) (*flow.Header, error) { return h.retrieveTx(blockID) } @@ -139,6 +215,7 @@ func (h *Headers) ByBlockID(blockID flow.Identifier) (*flow.Header, error) { // It is available for finalized blocks and those pending finalization. // Error returns: // - [storage.ErrNotFound] if no block header or proposer signature with the given blockID exists +// - [storage.ErrWrongChain] if the block header exists in the database but is part of a different chain than expected func (h *Headers) ProposalByBlockID(blockID flow.Identifier) (*flow.ProposalHeader, error) { return h.retrieveProposalTx(blockID) } @@ -149,39 +226,53 @@ func (h *Headers) ProposalByBlockID(blockID flow.Identifier) (*flow.ProposalHead func (h *Headers) ByHeight(height uint64) (*flow.Header, error) { blockID, err := h.retrieveIdByHeightTx(height) if err != nil { - return nil, err + return nil, fmt.Errorf("could not retrieve header for height %d: %w", height, err) } - return h.retrieveTx(blockID) + header, err := h.retrieveTx(blockID) + if err != nil { + // any error here implies state corruption, since the block indicated by the height index was unavailable + return nil, irrecoverable.NewExceptionf("could not retrieve indexed header %x for height %d: %w", blockID, height, err) + } + return header, nil } -// ByView returns the block with the given view. It is only available for certified blocks. +// ByView returns the block with the given view. It is only available for certified blocks on a consensus chain. // Certified blocks are the blocks that have received QC. Hotstuff guarantees that for each view, // at most one block is certified. Hence, the return value of `ByView` is guaranteed to be unique // even for non-finalized blocks. // // Expected errors during normal operations: // - [storage.ErrNotFound] if no certified block is known at given view. +// - [storage.ErrNotAvailableForClusterConsensus] if called on a cluster Headers instance (created by [NewClusterHeaders]) func (h *Headers) ByView(view uint64) (*flow.Header, error) { blockID, err := h.viewCache.Get(h.db.Reader(), view) if err != nil { return nil, err } - return h.retrieveTx(blockID) + header, err := h.retrieveTx(blockID) + if err != nil { + // any error here implies state corruption, since the block indicated by the view index was unavailable + return nil, irrecoverable.NewExceptionf("could not retrieve indexed header %x for view %d: %w", blockID, view, err) + } + return header, nil } -// Exists returns true if a header with the given ID has been stored. +// Exists returns true if a header with the given ID has been stored on the appropriate chain. // No errors are expected during normal operation. func (h *Headers) Exists(blockID flow.Identifier) (bool, error) { - // if the block is in the cache, return true + // if the block is in the cache, return true (blocks on a different chain are never cached) if ok := h.cache.IsCached(blockID); ok { return ok, nil } - // otherwise, check badger store - exists, err := operation.BlockExists(h.db.Reader(), blockID) + // otherwise, try retrieve the header and check the ChainID is correct + _, err := h.retrieveTx(blockID) if err != nil { + if errors.Is(err, storage.ErrNotFound) || errors.Is(err, storage.ErrWrongChain) { + return false, nil + } return false, fmt.Errorf("could not check existence: %w", err) } - return exists, nil + return true, nil } // BlockIDByHeight returns the block ID that is finalized at the given height. It is an optimized @@ -202,21 +293,20 @@ func (h *Headers) BlockIDByHeight(height uint64) (flow.Identifier, error) { // // Expected error returns during normal operations: // - [storage.ErrNotFound] if no block with the given parentID is known +// - [storage.ErrWrongChain] if the parent is part of a different chain than expected func (h *Headers) ByParentID(parentID flow.Identifier) ([]*flow.Header, error) { + // first check the parent exists on the correct chain + _, err := h.retrieveTx(parentID) + if err != nil { + return nil, fmt.Errorf("could not check existence of parent %x: %w", parentID, err) + } var blockIDs flow.IdentifierList - err := operation.RetrieveBlockChildren(h.db.Reader(), parentID, &blockIDs) + err = operation.RetrieveBlockChildren(h.db.Reader(), parentID, &blockIDs) if err != nil { // if not found error is returned, there are two possible reasons: - // 1. the parent block does not exist, in which case we should return not found error + // 1. the parent block does not exist - has already been ruled out above // 2. the parent block exists but has no children, in which case we should return empty list if errors.Is(err, storage.ErrNotFound) { - exists, err := h.Exists(parentID) - if err != nil { - return nil, fmt.Errorf("could not check existence of parent %x: %w", parentID, err) - } - if !exists { - return nil, fmt.Errorf("cannot retrieve children of unknown block %x: %w", parentID, storage.ErrNotFound) - } // parent exists but has no children return []*flow.Header{}, nil } @@ -226,7 +316,8 @@ func (h *Headers) ByParentID(parentID flow.Identifier) ([]*flow.Header, error) { for _, blockID := range blockIDs { header, err := h.ByBlockID(blockID) if err != nil { - return nil, fmt.Errorf("could not retrieve child (%x): %w", blockID, err) + // failure to retrieve an indexed child indicates state corruption + return nil, irrecoverable.NewExceptionf("could not retrieve indexed block %x: %w", blockID, err) } headers = append(headers, header) } @@ -235,7 +326,8 @@ func (h *Headers) ByParentID(parentID flow.Identifier) ([]*flow.Header, error) { // BlockIDByView returns the block ID that is certified at the given view. It is an optimized // version of `ByView` that skips retrieving the block. Expected errors during normal operations: -// - `[storage.ErrNotFound] if no certified block is known at given view. +// - [storage.ErrNotFound] if no certified block is known at given view. +// - [storage.ErrNotAvailableForClusterConsensus] if called on a cluster Headers instance (created by [NewClusterHeaders]) // // NOTE: this method is not available until next spork (mainnet27) or a migration that builds the index. func (h *Headers) BlockIDByView(view uint64) (flow.Identifier, error) { @@ -246,6 +338,7 @@ func (h *Headers) BlockIDByView(view uint64) (flow.Identifier, error) { return blockID, nil } +// Deprecated: Undocumented, hence unsafe for public use. func (h *Headers) FindHeaders(filter func(header *flow.Header) bool) ([]flow.Header, error) { blocks := make([]flow.Header, 0, 1) err := operation.FindHeaders(h.db.Reader(), filter, &blocks) diff --git a/storage/store/headers_test.go b/storage/store/headers_test.go index bc4f8ceb12f..9ace00b1da4 100644 --- a/storage/store/headers_test.go +++ b/storage/store/headers_test.go @@ -6,6 +6,7 @@ import ( "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/cluster" "github.com/onflow/flow-go/storage/operation" "github.com/onflow/flow-go/storage/operation/dbtest" "github.com/onflow/flow-go/storage/store" @@ -21,7 +22,8 @@ func TestHeaderStoreRetrieve(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() - all := store.InitAll(metrics, db) + all, err := store.InitAll(metrics, db, flow.Emulator) + require.NoError(t, err) headers := all.Headers blocks := all.Blocks @@ -29,7 +31,7 @@ func TestHeaderStoreRetrieve(t *testing.T) { block := proposal.Block // store block which will also store header - err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return blocks.BatchStore(lctx, rw, proposal) }) @@ -48,6 +50,14 @@ func TestHeaderStoreRetrieve(t *testing.T) { actual, err := headers.ByHeight(block.Height) require.NoError(t, err) require.Equal(t, block.ToHeader(), actual) + // retrieve by ID + actual, err = headers.ByBlockID(block.ID()) + require.NoError(t, err) + require.Equal(t, block.ToHeader(), actual) + // retrieve with proposer signature + headerProp, err := headers.ProposalByBlockID(block.ID()) + require.NoError(t, err) + require.Equal(t, proposal.ProposalHeader(), headerProp) }) } @@ -55,7 +65,8 @@ func TestHeaderIndexByViewAndRetrieve(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() - all := store.InitAll(metrics, db) + all, err := store.InitAll(metrics, db, flow.Emulator) + require.NoError(t, err) headers := all.Headers blocks := all.Blocks @@ -63,7 +74,7 @@ func TestHeaderIndexByViewAndRetrieve(t *testing.T) { block := proposal.Block // store block which will also store header - err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return blocks.BatchStore(lctx, rw, proposal) }) @@ -82,18 +93,28 @@ func TestHeaderIndexByViewAndRetrieve(t *testing.T) { actual, err := headers.ByView(block.View) require.NoError(t, err) require.Equal(t, block.ToHeader(), actual) + + // verify error sentinel of cluster Headers ByView + clusterChainID := cluster.CanonicalClusterID(0, unittest.IdentifierListFixture(1)) + clusterHeaders, err := store.NewClusterHeaders(metrics, db, clusterChainID) + require.NoError(t, err) + _, err = clusterHeaders.ByView(block.View) + require.ErrorIs(t, err, storage.ErrNotAvailableForClusterConsensus) + _, err = clusterHeaders.ByView(block.View + 1) + require.ErrorIs(t, err, storage.ErrNotAvailableForClusterConsensus) }) } func TestHeaderRetrieveWithoutStore(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { metrics := metrics.NewNoopCollector() - headers := store.NewHeaders(metrics, db) + headers, err := store.NewHeaders(metrics, db, flow.Emulator) + require.NoError(t, err) header := unittest.BlockHeaderFixture() // retrieve header by height, should err as not store before height - _, err := headers.ByHeight(header.Height) + _, err = headers.ByHeight(header.Height) require.ErrorIs(t, err, storage.ErrNotFound) }) } @@ -102,11 +123,13 @@ func TestHeaderRetrieveWithoutStore(t *testing.T) { // 1. a known parent with no children should return an empty list; // 2. a known parent with 3 children should return the headers of those children; // 3. an unknown parent should return [storage.ErrNotFound]. +// 4. a known parent on a different chain should return [storage.ErrWrongChain]. func TestHeadersByParentID(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() - all := store.InitAll(metrics, db) + all, err := store.InitAll(metrics, db, flow.Emulator) + require.NoError(t, err) headers := all.Headers blocks := all.Blocks @@ -115,7 +138,7 @@ func TestHeadersByParentID(t *testing.T) { parentBlock := parentProposal.Block // Store parent block - err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return blocks.BatchStore(lctx, rw, parentProposal) }) @@ -158,6 +181,17 @@ func TestHeadersByParentID(t *testing.T) { nonExistentParent := unittest.IdentifierFixture() _, err = headers.ByParentID(nonExistentParent) require.ErrorIs(t, err, storage.ErrNotFound) + + // Test case 4: parent on a different chain should return ErrWrongChain + clusterBlock := unittest.ClusterBlockFixture() + err = unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertClusterBlock(lctx, rw, unittest.ClusterProposalFromBlock(clusterBlock)) + }) + }) + require.NoError(t, err) + _, err = headers.ByParentID(clusterBlock.ID()) + require.ErrorIs(t, err, storage.ErrWrongChain) }) } @@ -172,7 +206,8 @@ func TestHeadersByParentIDChainStructure(t *testing.T) { dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() - all := store.InitAll(metrics, db) + all, err := store.InitAll(metrics, db, flow.Emulator) + require.NoError(t, err) headers := all.Headers blocks := all.Blocks @@ -209,7 +244,6 @@ func TestHeadersByParentIDChainStructure(t *testing.T) { require.Len(t, children, 1) require.Equal(t, child.ToHeader(), children[0]) - // Test that child1 returns its direct children (grandchild1, grandchild2) // Test that child returns its direct children (grandchild1, grandchild2) grandchildren, err := headers.ByParentID(child.ID()) require.NoError(t, err) @@ -226,3 +260,108 @@ func TestHeadersByParentIDChainStructure(t *testing.T) { require.Empty(t, children) }) } + +// TestHeadersStoreWrongChainID tests that attempting to store a block with a different chainID than +// expected returns the appropriate sentinel error. +func TestHeadersStoreWrongChainID(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + all, err := store.InitAll(metrics, db, flow.Emulator) + require.NoError(t, err) + blocks := all.Blocks + // the underlying Headers has chainID flow.Emulator + + clusterChain := cluster.CanonicalClusterID(0, unittest.IdentifierListFixture(1)) + clusterChain2 := cluster.CanonicalClusterID(0, unittest.IdentifierListFixture(1)) + + // A [flow.Proposal] with a mismatched chain should not be stored + for _, invalidChainID := range []flow.ChainID{clusterChain, clusterChain2, flow.Localnet, flow.Testnet, flow.ChainID("invalid-chain")} { + invalidBlock := unittest.BlockFixture() + invalidBlock.ChainID = invalidChainID + invalidProposal := unittest.ProposalFromBlock(invalidBlock) + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx, rw, invalidProposal) + }) + }) + require.ErrorIs(t, err, storage.ErrWrongChain) + } + }) +} + +// TestHeadersRetrieveWrongChainID tests that methods of Headers throw an appropriate sentinel error +// when attempting to retrieve data that does not match the expected chain. +func TestHeadersRetrieveWrongChainID(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + all, err := store.InitAll(metrics, db, flow.Emulator) + require.NoError(t, err) + headers := all.Headers + blocks := all.Blocks + + clusterChain := cluster.CanonicalClusterID(0, unittest.IdentifierListFixture(1)) + clusterChain2 := cluster.CanonicalClusterID(0, unittest.IdentifierListFixture(1)) + clusterHeaders, err := store.NewClusterHeaders(metrics, db, clusterChain) + require.NoError(t, err) + + // Cluster Headers should not be able to retrieve a stored header for a different chain. + // 1. store and index a block on main consensus chain + proposal := unittest.ProposalFixture() + block := proposal.Block + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx, rw, proposal) + }) + }) + require.NoError(t, err) + err = unittest.WithLock(t, lockManager, storage.LockFinalizeBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(lctx, rw, block.Height, block.ID()) + }) + }) + require.NoError(t, err) + + // 2. check we can retrieve header by height and ID using the correct header storage instance + actual, err := headers.ByHeight(block.Height) + require.NoError(t, err) + require.Equal(t, block.ToHeader(), actual) + actual, err = headers.ByBlockID(block.ID()) + require.NoError(t, err) + require.Equal(t, block.ToHeader(), actual) + headerProp, err := headers.ProposalByBlockID(block.ID()) + require.NoError(t, err) + require.Equal(t, proposal.ProposalHeader(), headerProp) + + // 3. clusterHeaders should not be able to retrieve that block by height or ID + _, err = clusterHeaders.ByHeight(block.Height) + require.ErrorIs(t, err, storage.ErrNotFound) // there are no finalized cluster blocks at any height + _, err = clusterHeaders.ByBlockID(block.ID()) + require.ErrorIs(t, err, storage.ErrWrongChain) + _, err = clusterHeaders.ProposalByBlockID(block.ID()) + require.ErrorIs(t, err, storage.ErrWrongChain) + + // 4. Store a block on a different cluster chain + differentClusterBlock := unittest.ClusterBlockFixture() + differentClusterBlock.ChainID = clusterChain2 + err = unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertClusterBlock(lctx, rw, unittest.ClusterProposalFromBlock(differentClusterBlock)) + }) + }) + require.NoError(t, err) + + // 5. clusterHeaders should not be able to retrieve it, as it is for a different cluster chain + _, err = clusterHeaders.ByBlockID(differentClusterBlock.ID()) + require.ErrorIs(t, err, storage.ErrWrongChain) + _, err = clusterHeaders.ProposalByBlockID(differentClusterBlock.ID()) + require.ErrorIs(t, err, storage.ErrWrongChain) + + // 6. main consensus chain Headers should also not be able to retrieve the cluster header + _, err = headers.ByBlockID(differentClusterBlock.ID()) + require.ErrorIs(t, err, storage.ErrWrongChain) + _, err = headers.ProposalByBlockID(differentClusterBlock.ID()) + require.ErrorIs(t, err, storage.ErrWrongChain) + }) +} diff --git a/storage/store/init.go b/storage/store/init.go index a4ec067d16c..7ddff6f9c1d 100644 --- a/storage/store/init.go +++ b/storage/store/init.go @@ -1,6 +1,9 @@ package store import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/storage" ) @@ -27,8 +30,15 @@ type All struct { Collections *Collections } -func InitAll(metrics module.CacheMetrics, db storage.DB) *All { - headers := NewHeaders(metrics, db) +// InitAll initializes the common storage abstractions used by all node roles (with default cache sizes +// suitable for mainnet). The chain ID indicates which Flow network the node is operating on and references +// the ID of the main consensus (not the chains built by collector clusters) +// No errors are expected during normal operations. +func InitAll(metrics module.CacheMetrics, db storage.DB, chainID flow.ChainID) (*All, error) { + headers, err := NewHeaders(metrics, db, chainID) + if err != nil { + return nil, fmt.Errorf("instantiating header storage abstraction failed: %w", err) + } guarantees := NewGuarantees(metrics, db, DefaultCacheSize, DefaultCacheSize) seals := NewSeals(metrics, db) index := NewIndex(metrics, db) @@ -68,5 +78,5 @@ func InitAll(metrics module.CacheMetrics, db storage.DB) *All { Transactions: transactions, Collections: collections, - } + }, nil } diff --git a/storage/store/payloads_test.go b/storage/store/payloads_test.go index 604ff48b9cc..942e23b5a25 100644 --- a/storage/store/payloads_test.go +++ b/storage/store/payloads_test.go @@ -6,6 +6,7 @@ import ( "github.com/jordanschalm/lockctx" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/operation/dbtest" @@ -18,7 +19,8 @@ func TestPayloadStoreRetrieve(t *testing.T) { lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() - all := store.InitAll(metrics, db) + all, err := store.InitAll(metrics, db, flow.Emulator) + require.NoError(t, err) payloads := all.Payloads blocks := all.Blocks @@ -28,7 +30,7 @@ func TestPayloadStoreRetrieve(t *testing.T) { require.Equal(t, expected, block.Payload) blockID := block.ID() - err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { return blocks.BatchStore(lctx, rw, proposal) }) diff --git a/utils/unittest/cluster_block.go b/utils/unittest/cluster_block.go index 55e2206a64b..e766f46d554 100644 --- a/utils/unittest/cluster_block.go +++ b/utils/unittest/cluster_block.go @@ -6,6 +6,7 @@ import ( "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" + clusterstate "github.com/onflow/flow-go/state/cluster" ) var ClusterBlock clusterBlockFactory @@ -17,6 +18,7 @@ func ClusterBlockFixture(opts ...func(*cluster.Block)) *cluster.Block { HeaderBody: HeaderBodyFixture(), Payload: *ClusterPayloadFixture(3), } + block.ChainID = clusterstate.CanonicalClusterID(0, IdentifierListFixture(1)) for _, opt := range opts { opt(block) } @@ -61,7 +63,7 @@ func (f *clusterBlockFactory) WithPayload(payload cluster.Payload) func(*cluster func (f *clusterBlockFactory) Genesis() (*cluster.Block, error) { headerBody, err := flow.NewRootHeaderBody(flow.UntrustedHeaderBody{ View: 0, - ChainID: "cluster", + ChainID: clusterstate.CanonicalClusterID(0, IdentifierListFixture(1)), Timestamp: uint64(flow.GenesisTime.UnixMilli()), ParentID: flow.ZeroID, }) diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index c18ca4556bd..155322a9da1 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -46,6 +46,7 @@ import ( "github.com/onflow/flow-go/network/message" p2pconfig "github.com/onflow/flow-go/network/p2p/config" "github.com/onflow/flow-go/network/p2p/keyutils" + clusterstate "github.com/onflow/flow-go/state/cluster" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/state/protocol/inmem" "github.com/onflow/flow-go/state/protocol/protocol_state" @@ -720,7 +721,7 @@ func CollectionGuaranteeFixture(options ...func(*flow.CollectionGuarantee)) *flo guarantee := &flow.CollectionGuarantee{ CollectionID: IdentifierFixture(), ReferenceBlockID: IdentifierFixture(), - ClusterChainID: flow.ChainID("cluster-1-00000000"), + ClusterChainID: clusterstate.CanonicalClusterID(1, IdentifierListFixture(1)), SignerIndices: RandomBytes(16), Signature: SignatureFixture(), }