Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
119 changes: 113 additions & 6 deletions crates/adapters/mock-zkvm/src/host.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,14 @@
use std::fmt::Debug;
use std::sync::{Arc, Mutex};

use crate::notifier::NotificationManager;
use crate::{MockCodeCommitment, MockProof, MockZkGuest};
use serde::de::DeserializeOwned;
use serde::Serialize;
use sov_rollup_interface::common::SlotNumber;
use sov_rollup_interface::da::DaSpec;
use sov_rollup_interface::zk::aggregated_proof::{
BlockProof, OuterZkvmHost, SerializedAggregatedProof,
AggregatedProofPublicData, BlockProof, OuterZkvmHost, SerializedAggregatedProof,
};
use sov_rollup_interface::zk::SerializedZkProof;

Expand All @@ -12,6 +17,48 @@ use sov_rollup_interface::zk::SerializedZkProof;
pub struct MockZkvmHost {
notification_manager: NotificationManager,
wait_for_proof: bool,
/// Anchor extracted from the most recently produced aggregated proof.
/// Shared across clones so that continuity assertions in
/// [`OuterZkvmHost::run_proof_aggregation`] hold across the whole prover
/// service.
previous_anchor: Arc<Mutex<Option<PreviousAggregatedProofAnchor>>>,
}

/// Continuity anchor extracted from a previously produced aggregated proof.
#[derive(Clone, Debug)]
struct PreviousAggregatedProofAnchor {
final_slot_number: SlotNumber,
genesis_state_root: Vec<u8>,
final_state_root: Vec<u8>,
}

impl PreviousAggregatedProofAnchor {
fn from_public_data<Address, Da, Root>(
public_data: &AggregatedProofPublicData<Address, Da, Root>,
) -> Self
where
Address: Serialize,
Da: DaSpec,
Root: Serialize,
{
Self {
final_slot_number: public_data.final_slot_number,
genesis_state_root: bincode::serialize(&public_data.genesis_state_root)
.expect("genesis_state_root must be bincode-serializable"),
final_state_root: bincode::serialize(&public_data.final_state_root)
.expect("final_state_root must be bincode-serializable"),
}
}

fn deserialize_genesis_state_root<Root: DeserializeOwned>(&self) -> Root {
bincode::deserialize(&self.genesis_state_root)
.expect("genesis_state_root must be bincode-deserializable")
}

fn deserialize_final_state_root<Root: DeserializeOwned>(&self) -> Root {
bincode::deserialize(&self.final_state_root)
.expect("final_state_root must be bincode-deserializable")
}
}

impl MockZkvmHost {
Expand All @@ -20,6 +67,7 @@ impl MockZkvmHost {
Self {
wait_for_proof: true,
notification_manager: Default::default(),
previous_anchor: Arc::new(Mutex::new(None)),
}
}

Expand All @@ -28,6 +76,28 @@ impl MockZkvmHost {
Self {
wait_for_proof: false,
notification_manager: Default::default(),
previous_anchor: Arc::new(Mutex::new(None)),
}
}

/// Like [`Self::new_non_blocking`], but seeded with the public data of the
/// latest verified aggregated proof previously persisted in the ledger DB
/// so that continuity assertions in
/// [`OuterZkvmHost::run_proof_aggregation`] survive a node restart.
pub fn new_non_blocking_with_previous_anchor<Address, Da, Root>(
previous_public_data: Option<&AggregatedProofPublicData<Address, Da, Root>>,
) -> Self
where
Address: Serialize,
Da: DaSpec,
Root: Serialize,
{
let previous_anchor =
previous_public_data.map(PreviousAggregatedProofAnchor::from_public_data);
Self {
wait_for_proof: false,
notification_manager: Default::default(),
previous_anchor: Arc::new(Mutex::new(previous_anchor)),
}
}

Expand Down Expand Up @@ -87,13 +157,15 @@ impl sov_rollup_interface::zk::ZkvmHost for MockZkvmHost {
}

impl OuterZkvmHost for MockZkvmHost {
fn run_proof_aggregation<Address: Serialize + Clone, Da: DaSpec, Root: Serialize + Clone>(
fn run_proof_aggregation<
Address: Serialize + Clone,
Da: DaSpec,
Root: Serialize + DeserializeOwned + Clone + PartialEq + Debug,
>(
&self,
genesis_state_root: Root,
headers_with_block_proofs: Vec<(Da::BlockHeader, BlockProof<Address, Da, Root>)>,
) -> anyhow::Result<SerializedAggregatedProof> {
use sov_rollup_interface::zk::aggregated_proof::AggregatedProofPublicData;

let block_proofs_data = headers_with_block_proofs
.iter()
.map(|(_, bp)| bp)
Expand All @@ -104,9 +176,44 @@ impl OuterZkvmHost for MockZkvmHost {
genesis_state_root,
);

self.add_hint_and_run_inner(&public_data)
let mut previous = self
.previous_anchor
.lock()
.expect("previous_anchor mutex was poisoned");

if let Some(prev) = previous.as_ref() {
assert_eq!(
public_data.initial_slot_number,
prev.final_slot_number.next(),
"Aggregated proof continuity violated: new aggregation starts at slot {} but previous aggregation ended at slot {}",
public_data.initial_slot_number,
prev.final_slot_number,
);

let prev_genesis_state_root: Root = prev.deserialize_genesis_state_root();
let prev_final_state_root: Root = prev.deserialize_final_state_root();

assert_eq!(
public_data.genesis_state_root, prev_genesis_state_root,
"Aggregated proof continuity violated: genesis_state_root differs from previous aggregation",
);
assert_eq!(
public_data.initial_state_root, prev_final_state_root,
"Aggregated proof continuity violated: new initial_state_root does not match previous final_state_root",
);
} else {
assert_eq!(public_data.initial_slot_number, SlotNumber::ONE);
}

let serialized = self
.add_hint_and_run_inner(&public_data)
.map(|raw_aggregated_proof| SerializedAggregatedProof {
raw_aggregated_proof,
})
})?;

*previous = Some(PreviousAggregatedProofAnchor::from_public_data(
&public_data,
));
Ok(serialized)
}
}
6 changes: 5 additions & 1 deletion crates/adapters/risc0/src/host.rs
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,11 @@ impl ZkvmHost for Risc0Host<'static> {
}

impl OuterZkvmHost for Risc0Host<'static> {
fn run_proof_aggregation<Address: Serialize + Clone, Da: DaSpec, Root: Serialize + Clone>(
fn run_proof_aggregation<
Address: Serialize + Clone,
Da: DaSpec,
Root: Serialize + serde::de::DeserializeOwned + Clone + PartialEq + core::fmt::Debug,
>(
&self,
_genesis_state_root: Root,
_headers_with_block_proofs: Vec<(Da::BlockHeader, BlockProof<Address, Da, Root>)>,
Expand Down
23 changes: 21 additions & 2 deletions crates/adapters/sp1/src/host.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,15 +43,30 @@ impl SP1AggregationHost {
/// Creates a new aggregation host from the aggregation guest `elf` binary
/// and the verifying key (`inner_method_id`) of the inner proof program.
pub fn new(elf: &'static [u8], inner_vk: sp1_sdk::SP1VerifyingKey) -> anyhow::Result<Self> {
Self::new_with_previous_proof(elf, inner_vk, None)
}

/// Like [`Self::new`], but seeded with the latest aggregated proof
/// previously persisted in the ledger DB so that recursive verification of
/// the previous outer proof survives a node restart.
pub fn new_with_previous_proof(
elf: &'static [u8],
inner_vk: sp1_sdk::SP1VerifyingKey,
previous_aggregated_proof: Option<SerializedAggregatedProof>,
) -> anyhow::Result<Self> {
let prover = SP1Prover::new(elf)?;
let outer_vk = prover.verifying_key().clone();

let prev_agg_proof = previous_aggregated_proof
.map(|proof| crate::decode_sp1_proof(&proof.to_serialized_zk_proof()))
.transpose()?;

Ok(Self {
inner: Arc::new(Inner {
prover,
outer_vk,
inner_vk,
prev_agg_proof: Mutex::new(None),
prev_agg_proof: Mutex::new(prev_agg_proof),
}),
})
}
Expand Down Expand Up @@ -295,7 +310,11 @@ impl ZkvmHost for SP1Host {
}

impl OuterZkvmHost for SP1AggregationHost {
fn run_proof_aggregation<Address: Serialize + Clone, Da: DaSpec, Root: Serialize + Clone>(
fn run_proof_aggregation<
Address: Serialize + Clone,
Da: DaSpec,
Root: Serialize + serde::de::DeserializeOwned + Clone + PartialEq + core::fmt::Debug,
>(
&self,
_genesis_state_root: Root,
headers_with_block_proofs: Vec<(Da::BlockHeader, BlockProof<Address, Da, Root>)>,
Expand Down
21 changes: 0 additions & 21 deletions crates/full-node/sov-db/src/ledger_db/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -260,8 +260,6 @@ pub struct LedgerDb {

// Db key for the latest height of the written STF info.
const WRITE_ROLLUP_HEIGHT_ID: StfInfoUniqueId = StfInfoUniqueId(0);
// DB key for the latest height of the retrieved STF info.
const NEXT_SLOT_NUMBER_TO_RECEIVE_ID: StfInfoUniqueId = StfInfoUniqueId(1);
// Db key for the oldest saved STF info.
const LAST_SLOT_NUMBER_ID: StfInfoUniqueId = StfInfoUniqueId(2);

Expand Down Expand Up @@ -656,25 +654,6 @@ impl LedgerDb {
.await
}

/// Materializes the latest height of the retrieved STF info.
pub fn materialize_stf_info_next_slot_number_to_receive(
&self,
read_slot_number: SlotNumber,
) -> anyhow::Result<SchemaBatch> {
let mut schema_batch = SchemaBatch::new();
schema_batch.put::<StfInfoMetadata>(&NEXT_SLOT_NUMBER_TO_RECEIVE_ID, &read_slot_number)?;
Ok(schema_batch)
}

/// Gets the latest height of the submitted STF info.
pub async fn get_stf_info_next_slot_number_to_receive(
&self,
) -> anyhow::Result<Option<SlotNumber>> {
let db = self.db.read().expect(DB_LOCK_POISONED).clone();
db.get_async::<StfInfoMetadata>(&NEXT_SLOT_NUMBER_TO_RECEIVE_ID)
.await
}

/// Materializes the oldest height of the retrieved STF info.
pub fn materialize_stf_info_oldest_slot_number(
&self,
Expand Down
14 changes: 0 additions & 14 deletions crates/full-node/sov-db/tests/integration/ledger_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -147,20 +147,6 @@ async fn test_stf_info() {
assert_eq!(original_stored_inf_info, stored_stf_info);
}

#[tokio::test(flavor = "multi_thread")]
async fn next_slot_number_to_receive_is_none_at_startup() {
let temp_dir = tempfile::tempdir().unwrap();
let mut storage_manager = SimpleLedgerStorageManager::new(temp_dir.path());
let ledger_storage = storage_manager.create_ledger_storage();

let ledger_db = LedgerDb::with_reader(ledger_storage).unwrap();
assert!(ledger_db
.get_stf_info_next_slot_number_to_receive()
.await
.unwrap()
.is_none());
}

#[tokio::test(flavor = "multi_thread")]
async fn test_rollback() {
let temp_dir = tempfile::tempdir().unwrap();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ use crate::processes::{ProofAggregationStatus, ProofProcessingStatus, StateTrans
pub struct ParallelProverService<Address, StateRoot, Witness, Da, InnerVm, OuterVm>
where
Address: Serialize + DeserializeOwned,
StateRoot: Serialize + DeserializeOwned + Clone + AsRef<[u8]>,
StateRoot: Serialize + DeserializeOwned + Clone + AsRef<[u8]> + PartialEq + core::fmt::Debug,
Witness: Serialize + DeserializeOwned,
Da: DaService,
InnerVm: Zkvm,
Expand All @@ -37,7 +37,15 @@ impl<Address, StateRoot, Witness, Da, InnerVm, OuterVm>
where
Address:
BorshSerialize + AsRef<[u8]> + Serialize + DeserializeOwned + Clone + Send + Sync + 'static,
StateRoot: Serialize + DeserializeOwned + Clone + AsRef<[u8]> + Send + Sync + 'static,
StateRoot: Serialize
+ DeserializeOwned
+ Clone
+ AsRef<[u8]>
+ PartialEq
+ core::fmt::Debug
+ Send
+ Sync
+ 'static,
Witness: Serialize + DeserializeOwned + Send + Sync + 'static,
Da: DaService,
InnerVm: Zkvm,
Expand Down Expand Up @@ -87,8 +95,16 @@ impl<Address, StateRoot, Witness, Da, InnerVm, OuterVm> ProverService
where
Address:
BorshSerialize + AsRef<[u8]> + Serialize + DeserializeOwned + Clone + Send + Sync + 'static,
StateRoot:
BorshSerialize + Serialize + DeserializeOwned + Clone + AsRef<[u8]> + Send + Sync + 'static,
StateRoot: BorshSerialize
+ Serialize
+ DeserializeOwned
+ Clone
+ AsRef<[u8]>
+ PartialEq
+ core::fmt::Debug
+ Send
+ Sync
+ 'static,
Witness: Serialize + DeserializeOwned + Send + Sync + 'static,
Da: DaService,
InnerVm: Zkvm + 'static,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,15 @@ where
Da: DaService,
Address:
BorshSerialize + Serialize + DeserializeOwned + AsRef<[u8]> + Clone + Send + Sync + 'static,
StateRoot: Serialize + DeserializeOwned + Clone + AsRef<[u8]> + Send + Sync + 'static,
StateRoot: Serialize
+ DeserializeOwned
+ Clone
+ AsRef<[u8]>
+ PartialEq
+ core::fmt::Debug
+ Send
+ Sync
+ 'static,
Witness: Serialize + DeserializeOwned + Send + Sync + 'static,
{
pub(crate) fn new(prover_address: Address, num_threads: usize) -> Self {
Expand Down
Loading
Loading