Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 14 additions & 2 deletions src/new_index/db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,10 @@ pub enum DBFlush {

impl DB {
pub fn open(path: &Path, config: &Config, verify_compat: bool) -> DB {
Self::open_with_cache(path, config, verify_compat, None)
}

pub fn open_with_cache(path: &Path, config: &Config, verify_compat: bool, shared_cache: Option<&rocksdb::Cache>) -> DB {
debug!("opening DB at {:?}", path);
let mut db_opts = rocksdb::Options::default();
db_opts.create_if_missing(true);
Expand Down Expand Up @@ -148,8 +152,16 @@ impl DB {

// Configure block cache and table options
let mut block_opts = rocksdb::BlockBasedOptions::default();
let cache_size_bytes = config.db_block_cache_mb * 1024 * 1024;
block_opts.set_block_cache(&rocksdb::Cache::new_lru_cache(cache_size_bytes));
let owned_cache;
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Consider adding a type annotation for readability:
let owned_cache: rocksdb::Cache;

let cache = match shared_cache {
Some(c) => c,
None => {
let cache_size_bytes = config.db_block_cache_mb * 1024 * 1024;
owned_cache = rocksdb::Cache::new_lru_cache(cache_size_bytes);
&owned_cache
}
};
block_opts.set_block_cache(cache);
// Store index and filter blocks inside the block cache so their memory is
// bounded by --db-block-cache-mb. Without this, RocksDB allocates table-reader
// memory (index + filter blocks) on the heap separately for every open SST file.
Expand Down
15 changes: 12 additions & 3 deletions src/new_index/schema.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,15 +61,24 @@ impl Store {
pub fn open(config: &Config, metrics: &Metrics, verify_compat: bool) -> Self {
let path = config.db_path.join("newindex");

let txstore_db = DB::open(&path.join("txstore"), config, verify_compat);
// Create a single shared LRU cache for all three DBs. The total size is
// --db-block-cache-mb (not multiplied by 3). RocksDB's LRU cache is
// thread-safe, so all DBs share one eviction pool. This lets the
// txstore (which holds the bulk of the data) claim as much cache as it
// needs without being artificially capped at 1/3 of the total.
let cache_size_bytes = config.db_block_cache_mb * 1024 * 1024;
let shared_cache = rocksdb::Cache::new_lru_cache(cache_size_bytes);
info!("shared LRU block cache: db_block_cache_mb='{}'", config.db_block_cache_mb);

let txstore_db = DB::open_with_cache(&path.join("txstore"), config, verify_compat, Some(&shared_cache));
let added_blockhashes = load_blockhashes(&txstore_db, &BlockRow::done_filter());
debug!("{} blocks were added", added_blockhashes.len());

let history_db = DB::open(&path.join("history"), config, verify_compat);
let history_db = DB::open_with_cache(&path.join("history"), config, verify_compat, Some(&shared_cache));
let indexed_blockhashes = load_blockhashes(&history_db, &BlockRow::done_filter());
debug!("{} blocks were indexed", indexed_blockhashes.len());

let cache_db = DB::open(&path.join("cache"), config, verify_compat);
let cache_db = DB::open_with_cache(&path.join("cache"), config, verify_compat, Some(&shared_cache));

let db_metrics = Arc::new(RocksDbMetrics::new(&metrics));
txstore_db.start_stats_exporter(Arc::clone(&db_metrics), "txstore_db");
Expand Down
Loading