diff --git a/contracts b/contracts index decf61c55c24..9830f00b8f5b 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit decf61c55c24f953e6050150cc5378ea39c3bb9d +Subproject commit 9830f00b8f5bfc6b79869bf59d638676e3a098b6 diff --git a/core/Cargo.lock b/core/Cargo.lock index 2b629c96d4cf..095fd2d58cab 100644 --- a/core/Cargo.lock +++ b/core/Cargo.lock @@ -262,6 +262,39 @@ version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" +[[package]] +name = "ark-bn254" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d69eab57e8d2663efa5c63135b2af4f396d66424f88954c21104125ab6b3e6bc" +dependencies = [ + "ark-ec", + "ark-ff 0.5.0", + "ark-std 0.5.0", +] + +[[package]] +name = "ark-ec" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43d68f2d516162846c1238e755a7c4d131b892b70cc70c471a8e3ca3ed818fce" +dependencies = [ + "ahash 0.8.11", + "ark-ff 0.5.0", + "ark-poly", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "educe", + "fnv", + "hashbrown 0.15.2", + "itertools 0.13.0", + "num-bigint 0.4.6", + "num-integer", + "num-traits", + "rayon", + "zeroize", +] + [[package]] name = "ark-ff" version = "0.3.0" @@ -300,6 +333,27 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ark-ff" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a177aba0ed1e0fbb62aa9f6d0502e9b46dad8c2eab04c14258a1212d2557ea70" +dependencies = [ + "ark-ff-asm 0.5.0", + "ark-ff-macros 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "arrayvec 0.7.6", + "digest 0.10.7", + "educe", + "itertools 0.13.0", + "num-bigint 0.4.6", + "num-traits", + "paste", + "rayon", + "zeroize", +] + [[package]] name = "ark-ff-asm" version = "0.3.0" @@ -320,6 +374,16 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "ark-ff-asm" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" +dependencies = [ + "quote 1.0.37", + "syn 2.0.90", +] + [[package]] name = "ark-ff-macros" version = "0.3.0" @@ -345,6 +409,35 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "ark-ff-macros" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09be120733ee33f7693ceaa202ca41accd5653b779563608f1234f78ae07c4b3" +dependencies = [ + "num-bigint 0.4.6", + "num-traits", + "proc-macro2 1.0.92", + "quote 1.0.37", + "syn 2.0.90", +] + +[[package]] +name = "ark-poly" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "579305839da207f02b89cd1679e50e67b4331e2f9294a57693e5051b7703fe27" +dependencies = [ + "ahash 0.8.11", + "ark-ff 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "educe", + "fnv", + "hashbrown 0.15.2", + "rayon", +] + [[package]] name = "ark-serialize" version = "0.3.0" @@ -366,6 +459,31 @@ dependencies = [ "num-bigint 0.4.6", ] +[[package]] +name = "ark-serialize" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f4d068aaf107ebcd7dfb52bc748f8030e0fc930ac8e360146ca54c1203088f7" +dependencies = [ + "ark-serialize-derive", + "ark-std 0.5.0", + "arrayvec 0.7.6", + "digest 0.10.7", + "num-bigint 0.4.6", + "rayon", +] + +[[package]] +name = "ark-serialize-derive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213888f660fddcca0d257e88e54ac05bca01885f258ccdf695bafd77031bb69d" +dependencies = [ + "proc-macro2 1.0.92", + "quote 1.0.37", + "syn 2.0.90", +] + [[package]] name = "ark-std" version = "0.3.0" @@ -386,6 +504,17 @@ dependencies = [ "rand 0.8.5", ] +[[package]] +name = "ark-std" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "246a225cc6131e9ee4f24619af0f19d67761fff15d7ccc22e42b80846e69449a" +dependencies = [ + "num-traits", + "rand 0.8.5", + "rayon", +] + [[package]] name = "arr_macro" version = "0.1.3" @@ -2445,6 +2574,15 @@ dependencies = [ "subtle", ] +[[package]] +name = "directories" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a49173b84e034382284f27f1af4dcbbd231ffa358c0fe316541a7337f376a35" +dependencies = [ + "dirs-sys", +] + [[package]] name = "dirs" version = "5.0.1" @@ -2586,6 +2724,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "educe" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7bc049e1bd8cdeb31b68bbd586a9464ecf9f3944af3958a7a9d0f8b9799417" +dependencies = [ + "enum-ordinalize", + "proc-macro2 1.0.92", + "quote 1.0.37", + "syn 2.0.90", +] + [[package]] name = "either" version = "1.13.0" @@ -2668,6 +2818,26 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "enum-ordinalize" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea0dcfa4e54eeb516fe454635a95753ddd39acda650ce703031c6973e315dd5" +dependencies = [ + "enum-ordinalize-derive", +] + +[[package]] +name = "enum-ordinalize-derive" +version = "4.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" +dependencies = [ + "proc-macro2 1.0.92", + "quote 1.0.37", + "syn 2.0.90", +] + [[package]] name = "enum_dispatch" version = "0.3.13" @@ -3215,6 +3385,16 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" +[[package]] +name = "fslock" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04412b8935272e3a9bae6f48c7bfff74c2911f60525404edfdd28e49884c3bfb" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "fuchsia-cprng" version = "0.1.1" @@ -3695,6 +3875,7 @@ version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" dependencies = [ + "allocator-api2", "foldhash", "serde", ] @@ -7115,6 +7296,32 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" +[[package]] +name = "rust-kzg-bn254" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acdae4058a9f604acf7023d99d931d6f30261fff93787bcfd1f1ccfc725b701c" +dependencies = [ + "ark-bn254", + "ark-ec", + "ark-ff 0.5.0", + "ark-poly", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "byteorder", + "crossbeam-channel", + "directories", + "hex-literal", + "num-bigint 0.4.6", + "num-traits", + "num_cpus", + "rand 0.8.5", + "rayon", + "sha2 0.10.8", + "sys-info", + "ureq", +] + [[package]] name = "rust_decimal" version = "1.36.0" @@ -7440,6 +7647,15 @@ dependencies = [ "smallvec", ] +[[package]] +name = "scc" +version = "2.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea091f6cac2595aa38993f04f4ee692ed43757035c36e67c180b6828356385b1" +dependencies = [ + "sdd", +] + [[package]] name = "schannel" version = "0.1.27" @@ -7473,6 +7689,12 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "sdd" +version = "3.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b07779b9b918cc05650cb30f404d4d7835d26df37c235eded8a6832e2fb82cca" + [[package]] name = "seahash" version = "4.1.0" @@ -7873,6 +8095,32 @@ dependencies = [ "unsafe-libyaml", ] +[[package]] +name = "serial_test" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b258109f244e1d6891bf1053a55d63a5cd4f8f4c30cf9a1280989f80e7a1fa9" +dependencies = [ + "fslock", + "futures 0.3.31", + "log", + "once_cell", + "parking_lot", + "scc", + "serial_test_derive", +] + +[[package]] +name = "serial_test_derive" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" +dependencies = [ + "proc-macro2 1.0.92", + "quote 1.0.37", + "syn 2.0.90", +] + [[package]] name = "sha1" version = "0.10.6" @@ -8710,6 +8958,16 @@ dependencies = [ "syn 2.0.90", ] +[[package]] +name = "sys-info" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b3a0d0aba8bf96a0e1ddfdc352fc53b3df7f39318c71854910c3c4b024ae52c" +dependencies = [ + "cc", + "libc", +] + [[package]] name = "system-configuration" version = "0.5.1" @@ -9647,10 +9905,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02d1a66277ed75f640d608235660df48c8e3c19f3b4edb6a263315626cc3c01d" dependencies = [ "base64 0.22.1", + "flate2", "log", "native-tls", "once_cell", + "rustls 0.23.19", + "rustls-pki-types", "url", + "webpki-roots", ] [[package]] @@ -11074,6 +11336,7 @@ name = "zksync_da_clients" version = "26.3.0-non-semver-compat" dependencies = [ "anyhow", + "ark-bn254", "async-trait", "backon", "base58", @@ -11084,23 +11347,32 @@ dependencies = [ "blake2b_simd", "bytes", "celestia-types", + "ethabi", "flate2", "futures 0.3.31", "hex", "http 1.2.0", "jsonrpsee", + "num-bigint 0.4.6", "parity-scale-codec", "pbjson-types", "prost 0.12.6", + "rand 0.8.5", "reqwest 0.12.9", "ripemd", + "rust-kzg-bn254", "scale-encode", "secp256k1", "serde", "serde_json", + "serial_test", "sha2 0.10.8", + "sha3 0.10.8", "subxt-metadata", "subxt-signer", + "tempfile", + "thiserror 1.0.69", + "tiny-keccak 2.0.2", "tokio", "tokio-stream", "tonic 0.11.0", @@ -11110,8 +11382,10 @@ dependencies = [ "zksync_config", "zksync_da_client", "zksync_env_config", + "zksync_eth_client", "zksync_object_store", "zksync_types", + "zksync_web3_decl", ] [[package]] diff --git a/core/Cargo.toml b/core/Cargo.toml index 8f721be2c279..ae4824fdec89 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -225,6 +225,10 @@ pbjson-types = "0.6.0" # Eigen tokio-stream = "0.1.16" +rust-kzg-bn254 = "0.2.1" +ark-bn254 = "0.5.0" +num-bigint = "0.4.6" +serial_test = { version = "3.1.1", features = ["file_locks"] } # Here and below: # We *always* pin the latest version of protocol to disallow accidental changes in the execution logic. diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 9f49f822e8b5..7838fc3c63a9 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -569,6 +569,7 @@ impl MainNodeBuilder { } let secrets = try_load_config!(self.secrets.data_availability); + let l1_secrets = try_load_config!(self.secrets.l1); match (da_client_config, secrets) { (DAClientConfig::Avail(config), DataAvailabilitySecrets::Avail(secret)) => { self.node.add_layer(AvailWiringLayer::new(config, secret)); @@ -579,7 +580,10 @@ impl MainNodeBuilder { .add_layer(CelestiaWiringLayer::new(config, secret)); } - (DAClientConfig::Eigen(config), DataAvailabilitySecrets::Eigen(secret)) => { + (DAClientConfig::Eigen(mut config), DataAvailabilitySecrets::Eigen(secret)) => { + if config.eigenda_eth_rpc.is_none() { + config.eigenda_eth_rpc = Some(l1_secrets.l1_rpc_url); + } self.node.add_layer(EigenWiringLayer::new(config, secret)); } diff --git a/core/lib/config/src/configs/da_client/eigen.rs b/core/lib/config/src/configs/da_client/eigen.rs index f2c05a0f61ef..75817e5bf96c 100644 --- a/core/lib/config/src/configs/da_client/eigen.rs +++ b/core/lib/config/src/configs/da_client/eigen.rs @@ -1,10 +1,31 @@ use serde::Deserialize; -use zksync_basic_types::secrets::PrivateKey; +use zksync_basic_types::{secrets::PrivateKey, url::SensitiveUrl, Address}; -#[derive(Clone, Debug, Default, PartialEq, Deserialize)] +#[derive(Clone, Debug, PartialEq, Deserialize)] +pub enum PointsSource { + Path(String), + /// g1_url, g2_url + Url((String, String)), +} + +/// Configuration for the EigenDA remote disperser client. +#[derive(Clone, Debug, PartialEq, Deserialize)] pub struct EigenConfig { - pub rpc_node_url: String, - pub inclusion_polling_interval_ms: u64, + /// URL of the Disperser RPC server + pub disperser_rpc: String, + /// Block height needed to reach in order to consider the blob finalized + /// a value less or equal to 0 means that the disperser will not wait for finalization + pub settlement_layer_confirmation_depth: u32, + /// URL of the Ethereum RPC server + pub eigenda_eth_rpc: Option, + /// Address of the service manager contract + pub eigenda_svc_manager_address: Address, + /// Wait for the blob to be finalized before returning the response + pub wait_for_finalization: bool, + /// Authenticated dispersal + pub authenticated: bool, + /// Points source + pub points_source: PointsSource, } #[derive(Clone, Debug, PartialEq)] diff --git a/core/lib/dal/.sqlx/query-01aa65f0899ae807326f6e2382f485b830b304291ecd0538041ab72f71ee7dea.json b/core/lib/dal/.sqlx/query-01aa65f0899ae807326f6e2382f485b830b304291ecd0538041ab72f71ee7dea.json new file mode 100644 index 000000000000..28a1915eef99 --- /dev/null +++ b/core/lib/dal/.sqlx/query-01aa65f0899ae807326f6e2382f485b830b304291ecd0538041ab72f71ee7dea.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number,\n pubdata_input,\n sealed_at\n FROM\n l1_batches\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number != 0\n AND data_availability.blob_id = $1\n ORDER BY\n number\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "pubdata_input", + "type_info": "Bytea" + }, + { + "ordinal": 2, + "name": "sealed_at", + "type_info": "Timestamp" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false, + true, + true + ] + }, + "hash": "01aa65f0899ae807326f6e2382f485b830b304291ecd0538041ab72f71ee7dea" +} diff --git a/core/lib/dal/migrations/20241812144402_create_index_data_availability.sql b/core/lib/dal/migrations/20241812144402_create_index_data_availability.sql new file mode 100644 index 000000000000..938d2e09de44 --- /dev/null +++ b/core/lib/dal/migrations/20241812144402_create_index_data_availability.sql @@ -0,0 +1 @@ +CREATE INDEX idx_blob_id_l1_batch_number ON data_availability (blob_id, l1_batch_number); diff --git a/core/lib/dal/src/data_availability_dal.rs b/core/lib/dal/src/data_availability_dal.rs index 8503cc21f283..cea5c8249983 100644 --- a/core/lib/dal/src/data_availability_dal.rs +++ b/core/lib/dal/src/data_availability_dal.rs @@ -219,4 +219,44 @@ impl DataAvailabilityDal<'_, '_> { }) .collect()) } + + /// Fetches the pubdata for the L1 batch with a given blob id. + pub async fn get_blob_data_by_blob_id( + &mut self, + blob_id: &str, + ) -> DalResult> { + let row = sqlx::query!( + r#" + SELECT + number, + pubdata_input, + sealed_at + FROM + l1_batches + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number + WHERE + number != 0 + AND data_availability.blob_id = $1 + ORDER BY + number + LIMIT + 1 + "#, + blob_id, + ) + .instrument("get_blob_data_by_blob_id") + .with_arg("blob_id", &blob_id) + .fetch_optional(self.storage) + .await? + .map(|row| L1BatchDA { + // `unwrap` is safe here because we have a `WHERE` clause that filters out `NULL` values + pubdata: row.pubdata_input.unwrap(), + l1_batch_number: L1BatchNumber(row.number as u32), + sealed_at: row.sealed_at.unwrap().and_utc(), + }); + + Ok(row) + } } diff --git a/core/lib/env_config/src/da_client.rs b/core/lib/env_config/src/da_client.rs index 3f768eebd1a3..74583d041199 100644 --- a/core/lib/env_config/src/da_client.rs +++ b/core/lib/env_config/src/da_client.rs @@ -1,17 +1,22 @@ -use std::env; +use std::{env, str::FromStr}; -use zksync_config::configs::{ - da_client::{ - avail::{ - AvailClientConfig, AvailSecrets, AVAIL_FULL_CLIENT_NAME, AVAIL_GAS_RELAY_CLIENT_NAME, +use zksync_basic_types::{url::SensitiveUrl, H160}; +use zksync_config::{ + configs::{ + da_client::{ + avail::{ + AvailClientConfig, AvailSecrets, AVAIL_FULL_CLIENT_NAME, + AVAIL_GAS_RELAY_CLIENT_NAME, + }, + celestia::CelestiaSecrets, + eigen::EigenSecrets, + DAClientConfig, AVAIL_CLIENT_CONFIG_NAME, CELESTIA_CLIENT_CONFIG_NAME, + EIGEN_CLIENT_CONFIG_NAME, NO_DA_CLIENT_CONFIG_NAME, OBJECT_STORE_CLIENT_CONFIG_NAME, }, - celestia::CelestiaSecrets, - eigen::EigenSecrets, - DAClientConfig, AVAIL_CLIENT_CONFIG_NAME, CELESTIA_CLIENT_CONFIG_NAME, - EIGEN_CLIENT_CONFIG_NAME, NO_DA_CLIENT_CONFIG_NAME, OBJECT_STORE_CLIENT_CONFIG_NAME, + secrets::DataAvailabilitySecrets, + AvailConfig, }, - secrets::DataAvailabilitySecrets, - AvailConfig, + EigenConfig, }; use crate::{envy_load, FromEnv}; @@ -34,7 +39,35 @@ impl FromEnv for DAClientConfig { }, }), CELESTIA_CLIENT_CONFIG_NAME => Self::Celestia(envy_load("da_celestia_config", "DA_")?), - EIGEN_CLIENT_CONFIG_NAME => Self::Eigen(envy_load("da_eigen_config", "DA_")?), + EIGEN_CLIENT_CONFIG_NAME => Self::Eigen(EigenConfig { + disperser_rpc: env::var("DA_DISPERSER_RPC")?, + settlement_layer_confirmation_depth: env::var( + "DA_SETTLEMENT_LAYER_CONFIRMATION_DEPTH", + )? + .parse()?, + eigenda_eth_rpc: match env::var("DA_EIGENDA_ETH_RPC") { + // Use a specific L1 RPC URL for the EigenDA client. + Ok(url) => Some(SensitiveUrl::from_str(&url)?), + // Err means that the environment variable is not set. + // Use zkSync default L1 RPC for the EigenDA client. + Err(_) => None, + }, + eigenda_svc_manager_address: H160::from_str(&env::var( + "DA_EIGENDA_SVC_MANAGER_ADDRESS", + )?)?, + wait_for_finalization: env::var("DA_WAIT_FOR_FINALIZATION")?.parse()?, + authenticated: env::var("DA_AUTHENTICATED")?.parse()?, + points_source: match env::var("DA_POINTS_SOURCE")?.as_str() { + "Path" => zksync_config::configs::da_client::eigen::PointsSource::Path( + env::var("DA_POINTS_PATH")?, + ), + "Url" => zksync_config::configs::da_client::eigen::PointsSource::Url(( + env::var("DA_POINTS_LINK_G1")?, + env::var("DA_POINTS_LINK_G2")?, + )), + _ => anyhow::bail!("Unknown Eigen points type"), + }, + }), OBJECT_STORE_CLIENT_CONFIG_NAME => { Self::ObjectStore(envy_load("da_object_store", "DA_")?) } @@ -87,10 +120,14 @@ impl FromEnv for DataAvailabilitySecrets { #[cfg(test)] mod tests { + use std::str::FromStr; + + use zksync_basic_types::url::SensitiveUrl; use zksync_config::{ configs::{ da_client::{ avail::{AvailClientConfig, AvailDefaultConfig}, + eigen::PointsSource, DAClientConfig::{self, ObjectStore}, }, object_store::ObjectStoreMode::GCS, @@ -244,8 +281,14 @@ mod tests { let mut lock = MUTEX.lock(); let config = r#" DA_CLIENT="Eigen" - DA_RPC_NODE_URL="localhost:12345" - DA_INCLUSION_POLLING_INTERVAL_MS="1000" + DA_DISPERSER_RPC="http://localhost:8080" + DA_SETTLEMENT_LAYER_CONFIRMATION_DEPTH=0 + DA_EIGENDA_ETH_RPC="http://localhost:8545" + DA_EIGENDA_SVC_MANAGER_ADDRESS="0x0000000000000000000000000000000000000123" + DA_WAIT_FOR_FINALIZATION=true + DA_AUTHENTICATED=false + DA_POINTS_SOURCE="Path" + DA_POINTS_PATH="resources" "#; lock.set_env(config); @@ -253,8 +296,15 @@ mod tests { assert_eq!( actual, DAClientConfig::Eigen(EigenConfig { - rpc_node_url: "localhost:12345".to_string(), - inclusion_polling_interval_ms: 1000, + disperser_rpc: "http://localhost:8080".to_string(), + settlement_layer_confirmation_depth: 0, + eigenda_eth_rpc: Some(SensitiveUrl::from_str("http://localhost:8545").unwrap()), + eigenda_svc_manager_address: "0x0000000000000000000000000000000000000123" + .parse() + .unwrap(), + wait_for_finalization: true, + authenticated: false, + points_source: PointsSource::Path("resources".to_string()), }) ); } diff --git a/core/lib/env_config/src/lib.rs b/core/lib/env_config/src/lib.rs index 325288056b35..f7c45e98500e 100644 --- a/core/lib/env_config/src/lib.rs +++ b/core/lib/env_config/src/lib.rs @@ -1,4 +1,3 @@ -use anyhow::Context as _; use serde::de::DeserializeOwned; mod api; @@ -44,5 +43,5 @@ pub trait FromEnv: Sized { pub fn envy_load(name: &str, prefix: &str) -> anyhow::Result { envy::prefixed(prefix) .from_env() - .with_context(|| format!("Cannot load config <{name}>")) + .map_err(|e| anyhow::anyhow!("Failed to load {} from env: {}", name, e)) } diff --git a/core/lib/protobuf_config/src/da_client.rs b/core/lib/protobuf_config/src/da_client.rs index 77d2965149fe..59de62d99fb2 100644 --- a/core/lib/protobuf_config/src/da_client.rs +++ b/core/lib/protobuf_config/src/da_client.rs @@ -1,3 +1,5 @@ +use std::str::FromStr; + use anyhow::Context; use zksync_config::configs::{ self, @@ -9,8 +11,15 @@ use zksync_config::configs::{ }, }; use zksync_protobuf::{required, ProtoRepr}; +use zksync_types::url::SensitiveUrl; -use crate::proto::{da_client as proto, object_store as object_store_proto}; +use crate::{ + parse_h160, + proto::{ + da_client::{self as proto}, + object_store as object_store_proto, + }, +}; impl ProtoRepr for proto::DataAvailabilityClient { type Type = configs::DAClientConfig; @@ -53,11 +62,36 @@ impl ProtoRepr for proto::DataAvailabilityClient { timeout_ms: *required(&conf.timeout_ms).context("timeout_ms")?, }), proto::data_availability_client::Config::Eigen(conf) => Eigen(EigenConfig { - rpc_node_url: required(&conf.rpc_node_url) - .context("rpc_node_url")? + disperser_rpc: required(&conf.disperser_rpc) + .context("disperser_rpc")? .clone(), - inclusion_polling_interval_ms: *required(&conf.inclusion_polling_interval_ms) - .context("inclusion_polling_interval_ms")?, + settlement_layer_confirmation_depth: *required( + &conf.settlement_layer_confirmation_depth, + ) + .context("settlement_layer_confirmation_depth")?, + eigenda_eth_rpc: Some(SensitiveUrl::from_str( + required(&conf.eigenda_eth_rpc).context("eigenda_eth_rpc")?, + )?), + eigenda_svc_manager_address: required(&conf.eigenda_svc_manager_address) + .and_then(|x| parse_h160(x)) + .context("eigenda_svc_manager_address")?, + wait_for_finalization: *required(&conf.wait_for_finalization) + .context("wait_for_finalization")?, + authenticated: *required(&conf.authenticated).context("authenticated")?, + points_source: match conf.points_source.clone() { + Some(proto::eigen_config::PointsSource::Path(path)) => { + zksync_config::configs::da_client::eigen::PointsSource::Path(path) + } + Some(proto::eigen_config::PointsSource::Url(url)) => { + let g1_url = required(&url.g1_url).context("g1_url")?; + let g2_url = required(&url.g2_url).context("g2_url")?; + zksync_config::configs::da_client::eigen::PointsSource::Url(( + g1_url.to_owned(), + g2_url.to_owned(), + )) + } + None => return Err(anyhow::anyhow!("Invalid Eigen DA configuration")), + }, }), proto::data_availability_client::Config::ObjectStore(conf) => { ObjectStore(object_store_proto::ObjectStore::read(conf)?) @@ -98,8 +132,32 @@ impl ProtoRepr for proto::DataAvailabilityClient { }) } Eigen(config) => proto::data_availability_client::Config::Eigen(proto::EigenConfig { - rpc_node_url: Some(config.rpc_node_url.clone()), - inclusion_polling_interval_ms: Some(config.inclusion_polling_interval_ms), + disperser_rpc: Some(config.disperser_rpc.clone()), + settlement_layer_confirmation_depth: Some( + config.settlement_layer_confirmation_depth, + ), + eigenda_eth_rpc: config + .eigenda_eth_rpc + .as_ref() + .map(|a| a.expose_str().to_string()), + eigenda_svc_manager_address: Some(format!( + "{:?}", + config.eigenda_svc_manager_address + )), + wait_for_finalization: Some(config.wait_for_finalization), + authenticated: Some(config.authenticated), + points_source: Some(match &config.points_source { + zksync_config::configs::da_client::eigen::PointsSource::Path(path) => { + proto::eigen_config::PointsSource::Path(path.clone()) + } + zksync_config::configs::da_client::eigen::PointsSource::Url(( + g1_url, + g2_url, + )) => proto::eigen_config::PointsSource::Url(proto::Url { + g1_url: Some(g1_url.clone()), + g2_url: Some(g2_url.clone()), + }), + }), }), ObjectStore(config) => proto::data_availability_client::Config::ObjectStore( object_store_proto::ObjectStore::build(config), diff --git a/core/lib/protobuf_config/src/proto/config/da_client.proto b/core/lib/protobuf_config/src/proto/config/da_client.proto index 9ad7527979e6..70880d93b55a 100644 --- a/core/lib/protobuf_config/src/proto/config/da_client.proto +++ b/core/lib/protobuf_config/src/proto/config/da_client.proto @@ -37,9 +37,24 @@ message CelestiaConfig { optional uint64 timeout_ms = 4; } +message Url { + optional string g1_url = 1; + optional string g2_url = 2; +} + message EigenConfig { - optional string rpc_node_url = 1; - optional uint64 inclusion_polling_interval_ms = 2; + optional string disperser_rpc = 3; + optional uint32 settlement_layer_confirmation_depth = 4; + optional string eigenda_eth_rpc = 5; + optional string eigenda_svc_manager_address = 6; + optional bool wait_for_finalization = 7; + optional bool authenticated = 8; + oneof points_source { + string path = 9; + Url url = 10; + } + reserved 1,2; + reserved "rpc_node_url","inclusion_polling_interval_ms"; } message NoDAConfig {} diff --git a/core/node/da_clients/Cargo.toml b/core/node/da_clients/Cargo.toml index fcfa74630633..fd48c8dd9e4c 100644 --- a/core/node/da_clients/Cargo.toml +++ b/core/node/da_clients/Cargo.toml @@ -57,3 +57,17 @@ pbjson-types.workspace = true # Eigen dependencies tokio-stream.workspace = true +rand.workspace = true +sha3.workspace = true +tiny-keccak.workspace = true +ethabi.workspace = true +rust-kzg-bn254.workspace = true +ark-bn254.workspace = true +num-bigint.workspace = true +zksync_web3_decl.workspace = true +zksync_eth_client.workspace = true +thiserror.workspace = true +tempfile.workspace = true + +[dev-dependencies] +serial_test.workspace = true diff --git a/core/node/da_clients/src/eigen/README.md b/core/node/da_clients/src/eigen/README.md index 634b4eb58780..bf6b5fb8038b 100644 --- a/core/node/da_clients/src/eigen/README.md +++ b/core/node/da_clients/src/eigen/README.md @@ -1,11 +1,13 @@ -# EigenDA client +# EigenDA Client ---- +EigenDA is as a high-throughput data availability layer for rollups. It is an EigenLayer AVS (Actively Validated +Service), so it leverages Ethereum's economic security instead of bootstrapping a new network with its own validators. +For more information you can check the [docs](https://docs.eigenda.xyz/). -This is an implementation of the EigenDA client capable of sending the blobs to DA layer. It uses authenticated -requests, though the auth headers are kind of mocked in the current API implementation. +## Temporary -The generated files are received by compiling the `.proto` files from EigenDA repo using the following function: +In order to implement the integration we generated some `.proto` files from EigenDA repo that were compiled using the +following function: ```rust pub fn compile_protos() { @@ -28,8 +30,8 @@ pub fn compile_protos() { } ``` +The generated folder is considered a temporary solution until the EigenDA has a library with either a protogen, or +preferably a full Rust client implementation. + proto files are not included here to not create confusion in case they are not updated in time, so the EigenDA [repo](https://github.com/Layr-Labs/eigenda/tree/master/api/proto) has to be a source of truth for the proto files. - -The generated folder here is considered a temporary solution until the EigenDA has a library with either a protogen, or -preferably a full Rust client implementation. diff --git a/core/node/da_clients/src/eigen/blob_info.rs b/core/node/da_clients/src/eigen/blob_info.rs new file mode 100644 index 000000000000..63fece177c59 --- /dev/null +++ b/core/node/da_clients/src/eigen/blob_info.rs @@ -0,0 +1,168 @@ +use std::fmt; + +use super::{ + common::G1Commitment as DisperserG1Commitment, + disperser::{ + BatchHeader as DisperserBatchHeader, BatchMetadata as DisperserBatchMetadata, + BlobHeader as DisperserBlobHeader, BlobInfo as DisperserBlobInfo, + BlobQuorumParam as DisperserBlobQuorumParam, + BlobVerificationProof as DisperserBlobVerificationProof, + }, +}; + +#[derive(Debug)] +pub enum ConversionError { + NotPresent, +} + +impl fmt::Display for ConversionError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ConversionError::NotPresent => write!(f, "Failed to convert BlobInfo"), + } + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct G1Commitment { + pub x: Vec, + pub y: Vec, +} + +impl From for G1Commitment { + fn from(value: DisperserG1Commitment) -> Self { + Self { + x: value.x, + y: value.y, + } + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct BlobQuorumParam { + pub quorum_number: u32, + pub adversary_threshold_percentage: u32, + pub confirmation_threshold_percentage: u32, + pub chunk_length: u32, +} + +impl From for BlobQuorumParam { + fn from(value: DisperserBlobQuorumParam) -> Self { + Self { + quorum_number: value.quorum_number, + adversary_threshold_percentage: value.adversary_threshold_percentage, + confirmation_threshold_percentage: value.confirmation_threshold_percentage, + chunk_length: value.chunk_length, + } + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct BlobHeader { + pub commitment: G1Commitment, + pub data_length: u32, + pub blob_quorum_params: Vec, +} + +impl TryFrom for BlobHeader { + type Error = ConversionError; + fn try_from(value: DisperserBlobHeader) -> Result { + let blob_quorum_params: Vec = value + .blob_quorum_params + .iter() + .map(|param| BlobQuorumParam::from(param.clone())) + .collect(); + Ok(Self { + commitment: G1Commitment::from(value.commitment.ok_or(ConversionError::NotPresent)?), + data_length: value.data_length, + blob_quorum_params, + }) + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct BatchHeader { + pub batch_root: Vec, + pub quorum_numbers: Vec, + pub quorum_signed_percentages: Vec, + pub reference_block_number: u32, +} + +impl From for BatchHeader { + fn from(value: DisperserBatchHeader) -> Self { + Self { + batch_root: value.batch_root, + quorum_numbers: value.quorum_numbers, + quorum_signed_percentages: value.quorum_signed_percentages, + reference_block_number: value.reference_block_number, + } + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct BatchMetadata { + pub batch_header: BatchHeader, + pub signatory_record_hash: Vec, + pub fee: Vec, + pub confirmation_block_number: u32, + pub batch_header_hash: Vec, +} + +impl TryFrom for BatchMetadata { + type Error = ConversionError; + fn try_from(value: DisperserBatchMetadata) -> Result { + Ok(Self { + batch_header: BatchHeader::from(value.batch_header.ok_or(ConversionError::NotPresent)?), + signatory_record_hash: value.signatory_record_hash, + fee: value.fee, + confirmation_block_number: value.confirmation_block_number, + batch_header_hash: value.batch_header_hash, + }) + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct BlobVerificationProof { + pub batch_id: u32, + pub blob_index: u32, + pub batch_medatada: BatchMetadata, + pub inclusion_proof: Vec, + pub quorum_indexes: Vec, +} + +impl TryFrom for BlobVerificationProof { + type Error = ConversionError; + fn try_from(value: DisperserBlobVerificationProof) -> Result { + Ok(Self { + batch_id: value.batch_id, + blob_index: value.blob_index, + batch_medatada: BatchMetadata::try_from( + value.batch_metadata.ok_or(ConversionError::NotPresent)?, + )?, + inclusion_proof: value.inclusion_proof, + quorum_indexes: value.quorum_indexes, + }) + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct BlobInfo { + pub blob_header: BlobHeader, + pub blob_verification_proof: BlobVerificationProof, +} + +impl TryFrom for BlobInfo { + type Error = ConversionError; + fn try_from(value: DisperserBlobInfo) -> Result { + Ok(Self { + blob_header: BlobHeader::try_from( + value.blob_header.ok_or(ConversionError::NotPresent)?, + )?, + blob_verification_proof: BlobVerificationProof::try_from( + value + .blob_verification_proof + .ok_or(ConversionError::NotPresent)?, + )?, + }) + } +} diff --git a/core/node/da_clients/src/eigen/client.rs b/core/node/da_clients/src/eigen/client.rs index 98a52321d95b..6a4aa174db7f 100644 --- a/core/node/da_clients/src/eigen/client.rs +++ b/core/node/da_clients/src/eigen/client.rs @@ -10,27 +10,31 @@ use zksync_da_client::{ }; use super::sdk::RawEigenClient; -use crate::utils::to_non_retriable_da_error; +use crate::utils::to_retriable_da_error; +#[async_trait] +pub trait GetBlobData: std::fmt::Debug + Send + Sync { + async fn get_blob_data(&self, input: &str) -> anyhow::Result>>; +} + +/// EigenClient is a client for the Eigen DA service. #[derive(Debug, Clone)] pub struct EigenClient { - client: Arc, + pub(crate) client: Arc, } impl EigenClient { - pub async fn new(config: EigenConfig, secrets: EigenSecrets) -> anyhow::Result { + pub async fn new( + config: EigenConfig, + secrets: EigenSecrets, + get_blob_data: Arc, + ) -> anyhow::Result { let private_key = SecretKey::from_str(secrets.private_key.0.expose_secret()) .map_err(|e| anyhow::anyhow!("Failed to parse private key: {}", e))?; - Ok(EigenClient { - client: Arc::new( - RawEigenClient::new( - config.rpc_node_url, - config.inclusion_polling_interval_ms, - private_key, - ) - .await?, - ), + let client = RawEigenClient::new(private_key, config, get_blob_data).await?; + Ok(Self { + client: Arc::new(client), }) } } @@ -46,13 +50,24 @@ impl DataAvailabilityClient for EigenClient { .client .dispatch_blob(data) .await - .map_err(to_non_retriable_da_error)?; + .map_err(to_retriable_da_error)?; Ok(DispatchResponse::from(blob_id)) } - async fn get_inclusion_data(&self, _: &str) -> Result, DAError> { - Ok(Some(InclusionData { data: vec![] })) + async fn get_inclusion_data(&self, blob_id: &str) -> Result, DAError> { + let inclusion_data = self + .client + .get_inclusion_data(blob_id) + .await + .map_err(to_retriable_da_error)?; + if let Some(inclusion_data) = inclusion_data { + Ok(Some(InclusionData { + data: inclusion_data, + })) + } else { + Ok(None) + } } fn clone_boxed(&self) -> Box { @@ -60,10 +75,220 @@ impl DataAvailabilityClient for EigenClient { } fn blob_size_limit(&self) -> Option { - Some(1920 * 1024) // 2mb - 128kb as a buffer + Some(RawEigenClient::blob_size_limit()) } async fn balance(&self) -> Result { Ok(0) // TODO fetch from API when payments are enabled in Eigen (PE-305) } } + +/// EigenDA Client tests are ignored by default, because they require a remote dependency, +/// which may not always be available, causing tests to be flaky. +/// To run these tests, use the following command: +/// `cargo test -p zksync_da_clients -- --ignored` +#[cfg(test)] +mod tests { + use std::{sync::Arc, time::Duration}; + + use backon::{ConstantBuilder, Retryable}; + use serial_test::file_serial; + use zksync_config::{configs::da_client::eigen::EigenSecrets, EigenConfig}; + use zksync_da_client::{types::DispatchResponse, DataAvailabilityClient}; + use zksync_types::secrets::PrivateKey; + + use crate::eigen::{blob_info::BlobInfo, test_eigenda_config, EigenClient, GetBlobData}; + + impl EigenClient { + async fn get_blob_data(&self, blob_id: BlobInfo) -> anyhow::Result> { + self.client.get_blob_data(blob_id).await + } + + async fn get_commitment(&self, blob_id: &str) -> anyhow::Result> { + self.client.get_commitment(blob_id).await + } + } + + const STATUS_QUERY_INTERVAL: Duration = Duration::from_millis(5); + const MAX_RETRY_ATTEMPTS: usize = 1800000; // With this value we retry for a duration of 30 minutes + + async fn get_blob_info( + client: &EigenClient, + result: &DispatchResponse, + ) -> anyhow::Result { + let blob_info = (|| async { + let blob_info = client.get_commitment(&result.blob_id).await?; + if blob_info.is_none() { + return Err(anyhow::anyhow!("Blob not found")); + } + Ok(blob_info.unwrap()) + }) + .retry( + &ConstantBuilder::default() + .with_delay(STATUS_QUERY_INTERVAL) + .with_max_times(MAX_RETRY_ATTEMPTS), + ) + .when(|e| e.to_string().contains("Blob not found")) + .await?; + + Ok(blob_info) + } + + #[derive(Debug, Clone)] + struct MockGetBlobData; + + #[async_trait::async_trait] + impl GetBlobData for MockGetBlobData { + async fn get_blob_data(&self, _input: &'_ str) -> anyhow::Result>> { + Ok(None) + } + } + + fn test_secrets() -> EigenSecrets { + EigenSecrets { + private_key: PrivateKey::from( + "d08aa7ae1bb5ddd46c3c2d8cdb5894ab9f54dec467233686ca42629e826ac4c6", + ), + } + } + + #[ignore = "depends on external RPC"] + #[tokio::test] + #[file_serial] + async fn test_non_auth_dispersal() { + let config = test_eigenda_config(); + let secrets = test_secrets(); + let client = EigenClient::new(config.clone(), secrets, Arc::new(MockGetBlobData)) + .await + .unwrap(); + let data = vec![1; 20]; + let result = client.dispatch_blob(0, data.clone()).await.unwrap(); + + let blob_info = get_blob_info(&client, &result).await.unwrap(); + let expected_inclusion_data = blob_info.clone().blob_verification_proof.inclusion_proof; + let actual_inclusion_data = client + .get_inclusion_data(&result.blob_id) + .await + .unwrap() + .unwrap() + .data; + assert_eq!(expected_inclusion_data, actual_inclusion_data); + let retrieved_data = client.get_blob_data(blob_info).await.unwrap(); + assert_eq!(retrieved_data, data); + } + + #[ignore = "depends on external RPC"] + #[tokio::test] + #[file_serial] + async fn test_auth_dispersal() { + let config = EigenConfig { + authenticated: true, + ..test_eigenda_config() + }; + let secrets = test_secrets(); + let client = EigenClient::new(config.clone(), secrets, Arc::new(MockGetBlobData)) + .await + .unwrap(); + let data = vec![1; 20]; + let result = client.dispatch_blob(0, data.clone()).await.unwrap(); + let blob_info = get_blob_info(&client, &result).await.unwrap(); + + let expected_inclusion_data = blob_info.clone().blob_verification_proof.inclusion_proof; + let actual_inclusion_data = client + .get_inclusion_data(&result.blob_id) + .await + .unwrap() + .unwrap() + .data; + assert_eq!(expected_inclusion_data, actual_inclusion_data); + let retrieved_data = client.get_blob_data(blob_info).await.unwrap(); + assert_eq!(retrieved_data, data); + } + + #[ignore = "depends on external RPC"] + #[tokio::test] + #[file_serial] + async fn test_wait_for_finalization() { + let config = EigenConfig { + wait_for_finalization: true, + authenticated: true, + ..test_eigenda_config() + }; + let secrets = test_secrets(); + + let client = EigenClient::new(config.clone(), secrets, Arc::new(MockGetBlobData)) + .await + .unwrap(); + let data = vec![1; 20]; + let result = client.dispatch_blob(0, data.clone()).await.unwrap(); + let blob_info = get_blob_info(&client, &result).await.unwrap(); + + let expected_inclusion_data = blob_info.clone().blob_verification_proof.inclusion_proof; + let actual_inclusion_data = client + .get_inclusion_data(&result.blob_id) + .await + .unwrap() + .unwrap() + .data; + assert_eq!(expected_inclusion_data, actual_inclusion_data); + let retrieved_data = client.get_blob_data(blob_info).await.unwrap(); + assert_eq!(retrieved_data, data); + } + + #[ignore = "depends on external RPC"] + #[tokio::test] + #[file_serial] + async fn test_settlement_layer_confirmation_depth() { + let config = EigenConfig { + settlement_layer_confirmation_depth: 5, + ..test_eigenda_config() + }; + let secrets = test_secrets(); + let client = EigenClient::new(config.clone(), secrets, Arc::new(MockGetBlobData)) + .await + .unwrap(); + let data = vec![1; 20]; + let result = client.dispatch_blob(0, data.clone()).await.unwrap(); + let blob_info = get_blob_info(&client, &result).await.unwrap(); + + let expected_inclusion_data = blob_info.clone().blob_verification_proof.inclusion_proof; + let actual_inclusion_data = client + .get_inclusion_data(&result.blob_id) + .await + .unwrap() + .unwrap() + .data; + assert_eq!(expected_inclusion_data, actual_inclusion_data); + let retrieved_data = client.get_blob_data(blob_info).await.unwrap(); + assert_eq!(retrieved_data, data); + } + + #[ignore = "depends on external RPC"] + #[tokio::test] + #[file_serial] + async fn test_auth_dispersal_settlement_layer_confirmation_depth() { + let config = EigenConfig { + settlement_layer_confirmation_depth: 5, + authenticated: true, + ..test_eigenda_config() + }; + let secrets = test_secrets(); + let client = EigenClient::new(config.clone(), secrets, Arc::new(MockGetBlobData)) + .await + .unwrap(); + let data = vec![1; 20]; + let result = client.dispatch_blob(0, data.clone()).await.unwrap(); + let blob_info = get_blob_info(&client, &result).await.unwrap(); + + let expected_inclusion_data = blob_info.clone().blob_verification_proof.inclusion_proof; + let actual_inclusion_data = client + .get_inclusion_data(&result.blob_id) + .await + .unwrap() + .unwrap() + .data; + assert_eq!(expected_inclusion_data, actual_inclusion_data); + let retrieved_data = client.get_blob_data(blob_info).await.unwrap(); + assert_eq!(retrieved_data, data); + } +} diff --git a/core/node/da_clients/src/eigen/errors.rs b/core/node/da_clients/src/eigen/errors.rs new file mode 100644 index 000000000000..b2eba8ee843e --- /dev/null +++ b/core/node/da_clients/src/eigen/errors.rs @@ -0,0 +1,111 @@ +use ark_bn254::G1Affine; +use tonic::{transport::Error as TonicError, Status}; +use zksync_eth_client::EnrichedClientError; + +use super::blob_info::BlobQuorumParam; + +/// Errors returned by this crate +#[derive(Debug, thiserror::Error)] +pub enum EigenClientError { + #[error(transparent)] + EthClient(#[from] EthClientError), + #[error(transparent)] + Verification(#[from] VerificationError), + #[error(transparent)] + Communication(#[from] CommunicationError), + #[error(transparent)] + BlobStatus(#[from] BlobStatusError), + #[error(transparent)] + Conversion(#[from] ConversionError), + #[error(transparent)] + Config(#[from] ConfigError), +} + +#[derive(Debug, thiserror::Error)] +pub enum ConfigError { + #[error(transparent)] + Secp(#[from] secp256k1::Error), + #[error(transparent)] + Tonic(#[from] TonicError), +} + +#[derive(Debug, thiserror::Error)] +pub enum CommunicationError { + #[error(transparent)] + Secp(#[from] secp256k1::Error), + #[error(transparent)] + Hex(#[from] hex::FromHexError), + #[error(transparent)] + GetBlobData(#[from] Box), +} + +#[derive(Debug, thiserror::Error)] +pub enum BlobStatusError { + #[error(transparent)] + Prost(#[from] prost::DecodeError), + #[error(transparent)] + Status(#[from] Status), +} + +/// Errors specific to conversion +#[derive(Debug, thiserror::Error)] +pub enum ConversionError {} + +/// Errors for the EthClient +#[derive(Debug, thiserror::Error)] +pub enum EthClientError { + #[error(transparent)] + HTTPClient(#[from] reqwest::Error), + #[error(transparent)] + SerdeJSON(#[from] serde_json::Error), + #[error("RPC: {0}")] + Rpc(String), +} + +#[derive(Debug, thiserror::Error)] +pub enum KzgError { + #[error("Kzg setup error: {0}")] + Setup(String), + #[error(transparent)] + Internal(#[from] rust_kzg_bn254::errors::KzgError), +} + +#[derive(Debug, thiserror::Error)] +pub enum ServiceManagerError { + #[error(transparent)] + EnrichedClient(#[from] EnrichedClientError), + #[error("Decoding error: {0}")] + Decoding(String), +} + +/// Errors for the Verifier +#[derive(Debug, thiserror::Error)] +pub enum VerificationError { + #[error(transparent)] + ServiceManager(#[from] ServiceManagerError), + #[error(transparent)] + Kzg(#[from] KzgError), + #[error("Wrong proof")] + WrongProof, + #[error("Different commitments: expected {expected:?}, got {actual:?}")] + DifferentCommitments { + expected: Box, + actual: Box, + }, + #[error("Different roots: expected {expected:?}, got {actual:?}")] + DifferentRoots { expected: String, actual: String }, + #[error("Empty hashes")] + EmptyHash, + #[error("Different hashes: expected {expected:?}, got {actual:?}")] + DifferentHashes { expected: String, actual: String }, + #[error("Wrong quorum params: {blob_quorum_params:?}")] + WrongQuorumParams { blob_quorum_params: BlobQuorumParam }, + #[error("Quorum not confirmed")] + QuorumNotConfirmed, + #[error("Commitment not on curve: {0}")] + CommitmentNotOnCurve(G1Affine), + #[error("Commitment not on correct subgroup: {0}")] + CommitmentNotOnCorrectSubgroup(G1Affine), + #[error("Point download error: {0}")] + PointDownloadError(String), +} diff --git a/core/node/da_clients/src/eigen/mod.rs b/core/node/da_clients/src/eigen/mod.rs index 699eae894246..b2fccc7d606d 100644 --- a/core/node/da_clients/src/eigen/mod.rs +++ b/core/node/da_clients/src/eigen/mod.rs @@ -1,8 +1,10 @@ +mod blob_info; mod client; +mod errors; mod sdk; +mod verifier; -pub use self::client::EigenClient; - +pub use self::client::{EigenClient, GetBlobData}; #[allow(clippy::all)] pub(crate) mod disperser { include!("generated/disperser.rs"); @@ -12,3 +14,24 @@ pub(crate) mod disperser { pub(crate) mod common { include!("generated/common.rs"); } + +#[cfg(test)] +pub fn test_eigenda_config() -> zksync_config::EigenConfig { + use std::str::FromStr; + + zksync_config::EigenConfig { + disperser_rpc: "https://disperser-holesky.eigenda.xyz:443".to_string(), + settlement_layer_confirmation_depth: 0, + eigenda_eth_rpc: Some(zksync_basic_types::url::SensitiveUrl::from_str("https://ethereum-holesky-rpc.publicnode.com").unwrap()), // Safe to unwrap, never fails + eigenda_svc_manager_address: zksync_basic_types::H160([ + 0xd4, 0xa7, 0xe1, 0xbd, 0x80, 0x15, 0x05, 0x72, 0x93, 0xf0, 0xd0, 0xa5, 0x57, 0x08, 0x8c, + 0x28, 0x69, 0x42, 0xe8, 0x4b, + ]), // DEFAULT_EIGENDA_SVC_MANAGER_ADDRESS + wait_for_finalization: false, + authenticated: false, + points_source: zksync_config::configs::da_client::eigen::PointsSource::Url(( + "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g1.point".to_string(), + "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g2.point.powerOf2".to_string(), + )) + } +} diff --git a/core/node/da_clients/src/eigen/sdk.rs b/core/node/da_clients/src/eigen/sdk.rs index 7ab7ea3ce33b..3c0b56a4cc2a 100644 --- a/core/node/da_clients/src/eigen/sdk.rs +++ b/core/node/da_clients/src/eigen/sdk.rs @@ -1,76 +1,130 @@ -use std::{str::FromStr, time::Duration}; +use std::{str::FromStr, sync::Arc}; +use anyhow::Context; use secp256k1::{ecdsa::RecoverableSignature, SecretKey}; use tokio::sync::mpsc; -use tokio_stream::{wrappers::ReceiverStream, StreamExt}; +use tokio_stream::{wrappers::UnboundedReceiverStream, StreamExt}; use tonic::{ transport::{Channel, ClientTlsConfig, Endpoint}, Streaming, }; - +use zksync_config::EigenConfig; +use zksync_web3_decl::client::{Client, DynClient, L1}; + +use super::{ + blob_info::BlobInfo, + disperser::BlobInfo as DisperserBlobInfo, + errors::{ConfigError, EigenClientError, EthClientError, VerificationError}, + verifier::Verifier, + GetBlobData, +}; use crate::eigen::{ - disperser, + blob_info, disperser::{ + self, authenticated_request::Payload::{AuthenticationData, DisperseRequest}, disperser_client::DisperserClient, - AuthenticatedReply, BlobAuthHeader, BlobVerificationProof, DisperseBlobReply, + AuthenticatedReply, BlobAuthHeader, }, }; -#[derive(Debug, Clone)] -pub struct RawEigenClient { +#[derive(Debug)] +pub(crate) struct RawEigenClient { client: DisperserClient, - polling_interval: Duration, private_key: SecretKey, - account_id: String, + pub config: EigenConfig, + verifier: Verifier, + blob_data_provider: Arc, } pub(crate) const DATA_CHUNK_SIZE: usize = 32; impl RawEigenClient { - pub(crate) const BUFFER_SIZE: usize = 1000; + const BLOB_SIZE_LIMIT: usize = 1024 * 1024 * 2; // 2 MB pub async fn new( - rpc_node_url: String, - inclusion_polling_interval_ms: u64, private_key: SecretKey, - ) -> anyhow::Result { - let endpoint = - Endpoint::from_str(rpc_node_url.as_str())?.tls_config(ClientTlsConfig::new())?; + cfg: EigenConfig, + get_blob_data: Arc, + ) -> Result { + let endpoint = Endpoint::from_str(cfg.disperser_rpc.as_str()) + .map_err(ConfigError::Tonic)? + .tls_config(ClientTlsConfig::new()) + .map_err(ConfigError::Tonic)?; let client = DisperserClient::connect(endpoint) .await - .map_err(|e| anyhow::anyhow!("Failed to connect to Disperser server: {}", e))?; - let polling_interval = Duration::from_millis(inclusion_polling_interval_ms); - - let account_id = get_account_id(&private_key); - + .map_err(ConfigError::Tonic)?; + + let rpc_url = cfg + .eigenda_eth_rpc + .clone() + .ok_or(EthClientError::Rpc("EigenDA ETH RPC not set".to_string()))?; + let query_client: Client = Client::http(rpc_url) + .map_err(|e| EthClientError::Rpc(e.to_string()))? + .build(); + let query_client = Box::new(query_client) as Box>; + + let verifier = Verifier::new(cfg.clone(), Arc::new(query_client)).await?; Ok(RawEigenClient { client, - polling_interval, private_key, - account_id, + config: cfg, + verifier, + blob_data_provider: get_blob_data, }) } - pub async fn dispatch_blob(&self, data: Vec) -> anyhow::Result { - let mut client_clone = self.client.clone(); - let (tx, rx) = mpsc::channel(Self::BUFFER_SIZE); + pub fn blob_size_limit() -> usize { + Self::BLOB_SIZE_LIMIT + } - let response_stream = client_clone.disperse_blob_authenticated(ReceiverStream::new(rx)); + async fn dispatch_blob_non_authenticated(&self, data: Vec) -> anyhow::Result { let padded_data = convert_by_padding_empty_byte(&data); + let request = disperser::DisperseBlobRequest { + data: padded_data, + custom_quorum_numbers: vec![], + account_id: String::default(), // Account Id is not used in non-authenticated mode + }; + + let disperse_reply = self + .client + .clone() + .disperse_blob(request) + .await? + .into_inner(); + + match disperser::BlobStatus::try_from(disperse_reply.result)? { + disperser::BlobStatus::Failed + | disperser::BlobStatus::InsufficientSignatures + | disperser::BlobStatus::Unknown => Err(anyhow::anyhow!("Blob dispatch failed")), + + disperser::BlobStatus::Dispersing + | disperser::BlobStatus::Processing + | disperser::BlobStatus::Finalized + | disperser::BlobStatus::Confirmed => Ok(hex::encode(disperse_reply.request_id)), + } + } + + async fn dispatch_blob_authenticated(&self, data: Vec) -> anyhow::Result { + let (tx, rx) = mpsc::unbounded_channel(); // 1. send DisperseBlobRequest - self.disperse_data(padded_data, &tx).await?; + let padded_data = convert_by_padding_empty_byte(&data); + self.disperse_data(padded_data, &tx)?; // this await is blocked until the first response on the stream, so we only await after sending the `DisperseBlobRequest` - let mut response_stream = response_stream.await?.into_inner(); + let mut response_stream = self + .client + .clone() + .disperse_blob_authenticated(UnboundedReceiverStream::new(rx)) + .await?; + let response_stream = response_stream.get_mut(); // 2. receive BlobAuthHeader - let blob_auth_header = self.receive_blob_auth_header(&mut response_stream).await?; + let blob_auth_header = self.receive_blob_auth_header(response_stream).await?; // 3. sign and send BlobAuthHeader - self.submit_authentication_data(blob_auth_header.clone(), &tx) - .await?; + self.submit_authentication_data(blob_auth_header.clone(), &tx)?; // 4. receive DisperseBlobReply let reply = response_stream @@ -85,41 +139,94 @@ impl RawEigenClient { return Err(anyhow::anyhow!("Unexpected response from server")); }; - // 5. poll for blob status until it reaches the Confirmed state - let verification_proof = self - .await_for_inclusion(client_clone, disperse_reply) - .await?; - let blob_id = format!( - "{}:{}", - verification_proof.batch_id, verification_proof.blob_index - ); - tracing::info!("Blob dispatch confirmed, blob id: {}", blob_id); + match disperser::BlobStatus::try_from(disperse_reply.result)? { + disperser::BlobStatus::Failed + | disperser::BlobStatus::InsufficientSignatures + | disperser::BlobStatus::Unknown => Err(anyhow::anyhow!("Blob dispatch failed")), + + disperser::BlobStatus::Dispersing + | disperser::BlobStatus::Processing + | disperser::BlobStatus::Finalized + | disperser::BlobStatus::Confirmed => Ok(hex::encode(disperse_reply.request_id)), + } + } + + pub async fn get_commitment(&self, request_id: &str) -> anyhow::Result> { + let blob_info = self.try_get_inclusion_data(request_id.to_string()).await?; + + let Some(blob_info) = blob_info else { + return Ok(None); + }; + let blob_info = blob_info::BlobInfo::try_from(blob_info) + .map_err(|e| anyhow::anyhow!("Failed to convert blob info: {}", e))?; + + let data = self.get_blob_data(blob_info.clone()).await?; + let data_db = self.blob_data_provider.get_blob_data(request_id).await?; + if let Some(data_db) = data_db { + if data_db != data { + return Err(anyhow::anyhow!( + "Data from db and from disperser are different" + )); + } + } + self.verifier + .verify_commitment(blob_info.blob_header.commitment.clone(), &data) + .context("Failed to verify commitment")?; + + let result = self + .verifier + .verify_inclusion_data_against_settlement_layer(&blob_info) + .await; + if let Err(e) = result { + match e { + // in case of an error, the dispatcher will retry, so the need to return None + VerificationError::EmptyHash => return Ok(None), + _ => return Err(anyhow::anyhow!("Failed to verify inclusion data: {:?}", e)), + } + } + + tracing::info!("Blob dispatch confirmed, request id: {}", request_id); + Ok(Some(blob_info)) + } + + pub async fn get_inclusion_data(&self, request_id: &str) -> anyhow::Result>> { + let blob_info = self.get_commitment(request_id).await?; + if let Some(blob_info) = blob_info { + Ok(Some(blob_info.blob_verification_proof.inclusion_proof)) + } else { + Ok(None) + } + } - Ok(blob_id) + pub async fn dispatch_blob(&self, data: Vec) -> anyhow::Result { + if self.config.authenticated { + self.dispatch_blob_authenticated(data).await + } else { + self.dispatch_blob_non_authenticated(data).await + } } - async fn disperse_data( + fn disperse_data( &self, data: Vec, - tx: &mpsc::Sender, + tx: &mpsc::UnboundedSender, ) -> anyhow::Result<()> { let req = disperser::AuthenticatedRequest { payload: Some(DisperseRequest(disperser::DisperseBlobRequest { data, custom_quorum_numbers: vec![], - account_id: self.account_id.clone(), + account_id: get_account_id(&self.private_key), })), }; tx.send(req) - .await .map_err(|e| anyhow::anyhow!("Failed to send DisperseBlobRequest: {}", e)) } - async fn submit_authentication_data( + fn submit_authentication_data( &self, blob_auth_header: BlobAuthHeader, - tx: &mpsc::Sender, + tx: &mpsc::UnboundedSender, ) -> anyhow::Result<()> { // TODO: replace challenge_parameter with actual auth header when it is available let digest = zksync_basic_types::web3::keccak256( @@ -143,7 +250,6 @@ impl RawEigenClient { }; tx.send(req) - .await .map_err(|e| anyhow::anyhow!("Failed to send AuthenticationData: {}", e)) } @@ -171,44 +277,67 @@ impl RawEigenClient { } } - async fn await_for_inclusion( + async fn try_get_inclusion_data( &self, - mut client: DisperserClient, - disperse_blob_reply: DisperseBlobReply, - ) -> anyhow::Result { + request_id: String, + ) -> anyhow::Result> { let polling_request = disperser::BlobStatusRequest { - request_id: disperse_blob_reply.request_id, + request_id: hex::decode(request_id)?, }; - loop { - tokio::time::sleep(self.polling_interval).await; - let resp = client - .get_blob_status(polling_request.clone()) - .await? - .into_inner(); - - match disperser::BlobStatus::try_from(resp.status)? { - disperser::BlobStatus::Processing | disperser::BlobStatus::Dispersing => {} - disperser::BlobStatus::Failed => { - return Err(anyhow::anyhow!("Blob dispatch failed")) - } - disperser::BlobStatus::InsufficientSignatures => { - return Err(anyhow::anyhow!("Insufficient signatures")) - } - disperser::BlobStatus::Confirmed | disperser::BlobStatus::Finalized => { - let verification_proof = resp - .info - .ok_or_else(|| anyhow::anyhow!("No blob header in response"))? - .blob_verification_proof - .ok_or_else(|| anyhow::anyhow!("No blob verification proof in response"))?; - - return Ok(verification_proof); + let resp = self + .client + .clone() + .get_blob_status(polling_request) + .await? + .into_inner(); + + match disperser::BlobStatus::try_from(resp.status)? { + disperser::BlobStatus::Processing | disperser::BlobStatus::Dispersing => Ok(None), + disperser::BlobStatus::Failed => anyhow::bail!("Blob dispatch failed"), + disperser::BlobStatus::InsufficientSignatures => { + anyhow::bail!("Insufficient signatures") + } + disperser::BlobStatus::Confirmed => { + if !self.config.wait_for_finalization { + let blob_info = resp.info.context("No blob header in response")?; + return Ok(Some(blob_info)); } - - _ => return Err(anyhow::anyhow!("Received unknown blob status")), + Ok(None) } + disperser::BlobStatus::Finalized => { + let blob_info = resp.info.context("No blob header in response")?; + Ok(Some(blob_info)) + } + _ => anyhow::bail!("Received unknown blob status"), } } + + pub async fn get_blob_data(&self, blob_info: BlobInfo) -> anyhow::Result> { + use anyhow::anyhow; + + let blob_index = blob_info.blob_verification_proof.blob_index; + let batch_header_hash = blob_info + .blob_verification_proof + .batch_medatada + .batch_header_hash; + let get_response = self + .client + .clone() + .retrieve_blob(disperser::RetrieveBlobRequest { + batch_header_hash, + blob_index, + }) + .await? + .into_inner(); + + if get_response.data.is_empty() { + return Err(anyhow!("Failed to get blob data")); + } + + let data = remove_empty_byte_from_padded_bytes(&get_response.data); + Ok(data) + } } fn get_account_id(secret_key: &SecretKey) -> String { @@ -222,25 +351,54 @@ fn get_account_id(secret_key: &SecretKey) -> String { fn convert_by_padding_empty_byte(data: &[u8]) -> Vec { let parse_size = DATA_CHUNK_SIZE - 1; - // Calculate the number of chunks - let data_len = (data.len() + parse_size - 1) / parse_size; + let chunk_count = data.len().div_ceil(parse_size); + let mut valid_data = Vec::with_capacity(data.len() + chunk_count); - // Pre-allocate `valid_data` with enough space for all chunks - let mut valid_data = vec![0u8; data_len * DATA_CHUNK_SIZE]; - let mut valid_end = data_len * DATA_CHUNK_SIZE; + for chunk in data.chunks(parse_size) { + valid_data.push(0x00); // Add the padding byte (0x00) + valid_data.extend_from_slice(chunk); + } - for (i, chunk) in data.chunks(parse_size).enumerate() { - let offset = i * DATA_CHUNK_SIZE; - valid_data[offset] = 0x00; // Set first byte of each chunk to 0x00 for big-endian compliance + valid_data +} - let copy_end = offset + 1 + chunk.len(); - valid_data[offset + 1..copy_end].copy_from_slice(chunk); +fn remove_empty_byte_from_padded_bytes(data: &[u8]) -> Vec { + let parse_size = DATA_CHUNK_SIZE; - if i == data_len - 1 && chunk.len() < parse_size { - valid_end = offset + 1 + chunk.len(); - } + let chunk_count = data.len().div_ceil(parse_size); + // Safe subtraction, as we know chunk_count is always less than the length of the data + let mut valid_data = Vec::with_capacity(data.len() - chunk_count); + + for chunk in data.chunks(parse_size) { + valid_data.extend_from_slice(&chunk[1..]); } - valid_data.truncate(valid_end); valid_data } + +#[cfg(test)] +mod test { + #[test] + fn test_pad_and_unpad() { + let data = vec![1, 2, 3, 4, 5, 6, 7, 8, 9]; + let padded_data = super::convert_by_padding_empty_byte(&data); + let unpadded_data = super::remove_empty_byte_from_padded_bytes(&padded_data); + assert_eq!(data, unpadded_data); + } + + #[test] + fn test_pad_and_unpad_large() { + let data = vec![1; 1000]; + let padded_data = super::convert_by_padding_empty_byte(&data); + let unpadded_data = super::remove_empty_byte_from_padded_bytes(&padded_data); + assert_eq!(data, unpadded_data); + } + + #[test] + fn test_pad_and_unpad_empty() { + let data = Vec::new(); + let padded_data = super::convert_by_padding_empty_byte(&data); + let unpadded_data = super::remove_empty_byte_from_padded_bytes(&padded_data); + assert_eq!(data, unpadded_data); + } +} diff --git a/core/node/da_clients/src/eigen/verifier/mod.rs b/core/node/da_clients/src/eigen/verifier/mod.rs new file mode 100644 index 000000000000..a53d7b016db5 --- /dev/null +++ b/core/node/da_clients/src/eigen/verifier/mod.rs @@ -0,0 +1,545 @@ +use std::{ + collections::HashMap, + io::Write, + path::{Path, PathBuf}, + sync::Arc, +}; + +use ark_bn254::{Fq, G1Affine}; +use ethabi::{encode, ParamType, Token}; +use rust_kzg_bn254::{blob::Blob, kzg::Kzg, polynomial::PolynomialFormat}; +use tempfile::NamedTempFile; +use tokio::task::JoinHandle; +use zksync_basic_types::web3::CallRequest; +use zksync_config::{configs::da_client::eigen::PointsSource, EigenConfig}; +use zksync_eth_client::EthInterface; +use zksync_types::{web3, Address, U256, U64}; +use zksync_web3_decl::client::{DynClient, L1}; + +use super::{ + blob_info::{BatchHeader, BlobHeader, BlobInfo, G1Commitment}, + errors::{KzgError, ServiceManagerError, VerificationError}, + sdk::RawEigenClient, +}; + +#[cfg(test)] +mod tests; + +fn decode_bytes(encoded: Vec) -> Result, VerificationError> { + let output_type = [ParamType::Bytes]; + let tokens = ethabi::decode(&output_type, &encoded) + .map_err(|e| ServiceManagerError::Decoding(e.to_string()))?; + + // Safe unwrap because decode guarantees type correctness and non-empty output + let token = tokens.into_iter().next().unwrap(); + + // Safe unwrap, as type is guaranteed + Ok(token.into_bytes().unwrap()) +} + +#[async_trait::async_trait] +pub trait VerifierClient: Sync + Send + std::fmt::Debug { + /// Request to the EigenDA service manager contract + /// the batch metadata hash for a given batch id + async fn batch_id_to_batch_metadata_hash( + &self, + batch_id: u32, + svc_manager_addr: Address, + settlement_layer_confirmation_depth: Option, + ) -> Result, VerificationError>; + + /// Request to the EigenDA service manager contract + /// the quorum adversary threshold percentages for a given quorum number + async fn quorum_adversary_threshold_percentages( + &self, + quorum_number: u32, + svc_manager_addr: Address, + ) -> Result; + + /// Request to the EigenDA service manager contract + /// the set of quorum numbers that are required + async fn required_quorum_numbers( + &self, + svc_manager_addr: Address, + ) -> Result, VerificationError>; +} + +#[async_trait::async_trait] +impl VerifierClient for Box> { + async fn batch_id_to_batch_metadata_hash( + &self, + batch_id: u32, + svc_manager_addr: Address, + settlement_layer_confirmation_depth: Option, + ) -> Result, VerificationError> { + let mut data = vec![]; + let func_selector = + ethabi::short_signature("batchIdToBatchMetadataHash", &[ParamType::Uint(32)]); + data.extend_from_slice(&func_selector); + let batch_id_data = encode(&[Token::Uint(U256::from(batch_id))]); + data.extend_from_slice(&batch_id_data); + + let call_request = CallRequest { + to: Some(svc_manager_addr), + data: Some(zksync_basic_types::web3::Bytes(data)), + ..Default::default() + }; + + let block_id = match settlement_layer_confirmation_depth { + Some(depth) => { + let depth = depth.saturating_sub(U64::one()); + let mut current_block = self + .block_number() + .await + .map_err(ServiceManagerError::EnrichedClient)?; + current_block = current_block.saturating_sub(depth); + Some(current_block.into()) + } + None => None, + }; + + let res = self + .as_ref() + .call_contract_function(call_request, block_id) + .await + .map_err(ServiceManagerError::EnrichedClient)?; + + Ok(res.0.to_vec()) + } + + async fn quorum_adversary_threshold_percentages( + &self, + quorum_number: u32, + svc_manager_addr: Address, + ) -> Result { + let func_selector = ethabi::short_signature("quorumAdversaryThresholdPercentages", &[]); + let data = func_selector.to_vec(); + + let call_request = CallRequest { + to: Some(svc_manager_addr), + data: Some(zksync_basic_types::web3::Bytes(data)), + ..Default::default() + }; + + let res = self + .as_ref() + .call_contract_function(call_request, None) + .await + .map_err(ServiceManagerError::EnrichedClient)?; + + let percentages = decode_bytes(res.0)?; + + if percentages.len() > quorum_number as usize { + return Ok(percentages[quorum_number as usize]); + } + Ok(0) + } + + async fn required_quorum_numbers( + &self, + svc_manager_addr: Address, + ) -> Result, VerificationError> { + let func_selector = ethabi::short_signature("quorumNumbersRequired", &[]); + let data = func_selector.to_vec(); + let call_request = CallRequest { + to: Some(svc_manager_addr), + data: Some(zksync_basic_types::web3::Bytes(data)), + ..Default::default() + }; + let res = self + .as_ref() + .call_contract_function(call_request, None) + .await + .map_err(ServiceManagerError::EnrichedClient)?; + + decode_bytes(res.0.to_vec()) + } +} + +#[derive(Debug)] +enum PointFile { + Temp(NamedTempFile), + Path(PathBuf), +} + +impl PointFile { + fn path(&self) -> &Path { + match self { + PointFile::Temp(file) => file.path(), + PointFile::Path(path) => path.as_path(), + } + } +} + +/// Verifier used to verify the integrity of the blob info +/// Kzg is used for commitment verification +/// EigenDA service manager is used to connect to the service manager contract +#[derive(Debug)] +pub struct Verifier { + kzg: Kzg, + cfg: EigenConfig, + client: Arc, +} + +impl Verifier { + pub const DEFAULT_PRIORITY_FEE_PER_GAS: u64 = 100; + pub const SRSORDER: u32 = 1 << 28; // 2 ^ 28 + pub const G1POINT: &'static str = "g1.point"; + pub const G2POINT: &'static str = "g2.point.powerOf2"; + pub const POINT_SIZE: u32 = 32; + + async fn download_temp_point(url: &String) -> Result { + let response = reqwest::get(url) + .await + .map_err(|e| VerificationError::PointDownloadError(e.to_string()))?; + + if !response.status().is_success() { + return Err(VerificationError::PointDownloadError(format!( + "Failed to download point from source {}", + url + ))); + } + + let content = response + .bytes() + .await + .map_err(|e| VerificationError::PointDownloadError(e.to_string()))?; + + // Tempfile writting uses `std::fs`, so we need to spawn a blocking task + let temp_file = tokio::task::spawn_blocking(move || { + let mut file = NamedTempFile::new() + .map_err(|e| VerificationError::PointDownloadError(e.to_string()))?; + + file.write_all(&content) + .map_err(|e| VerificationError::PointDownloadError(e.to_string()))?; + + file.flush() + .map_err(|e| VerificationError::PointDownloadError(e.to_string()))?; + + Ok::(file) + }) + .await + .map_err(|e| VerificationError::PointDownloadError(e.to_string()))??; + + Ok::(temp_file) + } + + async fn get_points(cfg: &EigenConfig) -> Result<(PointFile, PointFile), VerificationError> { + match &cfg.points_source { + PointsSource::Path(path) => Ok(( + PointFile::Path(PathBuf::from(format!("{}/{}", path, Self::G1POINT))), + PointFile::Path(PathBuf::from(format!("{}/{}", path, Self::G2POINT))), + )), + PointsSource::Url((g1_url, g2_url)) => { + tracing::info!("Downloading points for KZG setup to a temp file"); + Ok(( + PointFile::Temp(Self::download_temp_point(g1_url).await?), + PointFile::Temp(Self::download_temp_point(g2_url).await?), + )) + } + } + } + + pub(crate) async fn new( + cfg: EigenConfig, + client: Arc, + ) -> Result { + let srs_points_to_load = RawEigenClient::blob_size_limit() as u32 / Self::POINT_SIZE; + let (g1_point_file, g2_point_file) = Self::get_points(&cfg).await?; + let kzg_handle: JoinHandle> = + tokio::task::spawn_blocking(move || { + let g1_point_file_path = g1_point_file.path().to_str().ok_or(KzgError::Setup( + "Could not format point path into a valid string".to_string(), + ))?; + let g2_point_file_path = g2_point_file.path().to_str().ok_or(KzgError::Setup( + "Could not format point path into a valid string".to_string(), + ))?; + Kzg::setup( + g1_point_file_path, + "", + g2_point_file_path, + Self::SRSORDER, + srs_points_to_load, + "".to_string(), + ) + .map_err(KzgError::Internal) + }); + let kzg = kzg_handle + .await + .map_err(|e| VerificationError::Kzg(KzgError::Setup(e.to_string())))??; + + Ok(Self { kzg, cfg, client }) + } + + /// Return the commitment from a blob + fn commit(&self, blob: &[u8]) -> Result { + let blob = Blob::from_bytes_and_pad(blob); + self.kzg + .blob_to_kzg_commitment(&blob, PolynomialFormat::InEvaluationForm) + .map_err(|e| VerificationError::Kzg(KzgError::Internal(e))) + } + + /// Compare the given commitment with the commitment generated with the blob + pub fn verify_commitment( + &self, + expected_commitment: G1Commitment, + blob: &[u8], + ) -> Result<(), VerificationError> { + let actual_commitment = self.commit(blob)?; + let expected_commitment = G1Affine::new_unchecked( + Fq::from(num_bigint::BigUint::from_bytes_be(&expected_commitment.x)), + Fq::from(num_bigint::BigUint::from_bytes_be(&expected_commitment.y)), + ); + if !expected_commitment.is_on_curve() { + return Err(VerificationError::CommitmentNotOnCurve(expected_commitment)); + } + if !expected_commitment.is_in_correct_subgroup_assuming_on_curve() { + return Err(VerificationError::CommitmentNotOnCorrectSubgroup( + expected_commitment, + )); + } + if actual_commitment != expected_commitment { + return Err(VerificationError::DifferentCommitments { + expected: Box::new(expected_commitment), + actual: Box::new(actual_commitment), + }); + } + Ok(()) + } + + pub(crate) fn hash_encode_blob_header(&self, blob_header: &BlobHeader) -> Vec { + let mut blob_quorums = vec![]; + for quorum in &blob_header.blob_quorum_params { + let quorum = Token::Tuple(vec![ + Token::Uint(ethabi::Uint::from(quorum.quorum_number)), + Token::Uint(ethabi::Uint::from(quorum.adversary_threshold_percentage)), + Token::Uint(ethabi::Uint::from(quorum.confirmation_threshold_percentage)), + Token::Uint(ethabi::Uint::from(quorum.chunk_length)), + ]); + blob_quorums.push(quorum); + } + let blob_header = Token::Tuple(vec![ + Token::Tuple(vec![ + Token::Uint(ethabi::Uint::from_big_endian(&blob_header.commitment.x)), + Token::Uint(ethabi::Uint::from_big_endian(&blob_header.commitment.y)), + ]), + Token::Uint(ethabi::Uint::from(blob_header.data_length)), + Token::Array(blob_quorums), + ]); + + let encoded = encode(&[blob_header]); + web3::keccak256(&encoded).to_vec() + } + + pub(crate) fn process_inclusion_proof( + &self, + proof: &[u8], + leaf: [u8; 32], + index: u32, + ) -> Result, VerificationError> { + let mut index = index; + if proof.is_empty() || proof.len() % 32 != 0 { + return Err(VerificationError::WrongProof); + } + let mut computed_hash = leaf.to_vec(); + for chunk in proof.chunks(32) { + let mut buffer = [0u8; 64]; + if index % 2 == 0 { + buffer[..32].copy_from_slice(&computed_hash); + buffer[32..].copy_from_slice(chunk); + } else { + buffer[..32].copy_from_slice(chunk); + buffer[32..].copy_from_slice(&computed_hash); + } + computed_hash = web3::keccak256(&buffer).to_vec(); + index /= 2; + } + + Ok(computed_hash) + } + + /// Verifies the certificate's batch root + pub(crate) fn verify_merkle_proof(&self, cert: &BlobInfo) -> Result<(), VerificationError> { + let inclusion_proof = &cert.blob_verification_proof.inclusion_proof; + let root = &cert + .blob_verification_proof + .batch_medatada + .batch_header + .batch_root; + let blob_index = cert.blob_verification_proof.blob_index; + let blob_header = &cert.blob_header; + + let blob_header_hash = self.hash_encode_blob_header(blob_header); + let leaf_hash = web3::keccak256(&blob_header_hash); + + let generated_root = + self.process_inclusion_proof(inclusion_proof, leaf_hash, blob_index)?; + + if generated_root != *root { + return Err(VerificationError::DifferentRoots { + expected: hex::encode(root), + actual: hex::encode(&generated_root), + }); + } + Ok(()) + } + + fn hash_batch_metadata( + &self, + batch_header: &BatchHeader, + signatory_record_hash: &[u8], + confirmation_block_number: u32, + ) -> Vec { + let batch_header_token = Token::Tuple(vec![ + Token::FixedBytes(batch_header.batch_root.clone()), // Clone only where necessary + Token::Bytes(batch_header.quorum_numbers.clone()), + Token::Bytes(batch_header.quorum_signed_percentages.clone()), + Token::Uint(ethabi::Uint::from(batch_header.reference_block_number)), + ]); + + let encoded = encode(&[batch_header_token]); + let header_hash = web3::keccak256(&encoded).to_vec(); + + let hash_token = Token::Tuple(vec![ + Token::FixedBytes(header_hash.to_vec()), + Token::FixedBytes(signatory_record_hash.to_owned()), // Clone only if required + ]); + + let mut hash_encoded = encode(&[hash_token]); + + hash_encoded.append(&mut confirmation_block_number.to_be_bytes().to_vec()); + web3::keccak256(&hash_encoded).to_vec() + } + + async fn call_batch_id_to_metadata_hash( + &self, + blob_info: &BlobInfo, + ) -> Result, VerificationError> { + self.client + .as_ref() + .batch_id_to_batch_metadata_hash( + blob_info.blob_verification_proof.batch_id, + self.cfg.eigenda_svc_manager_address, + Some(U64::from(self.cfg.settlement_layer_confirmation_depth)), + ) + .await + } + + /// Verifies the certificate batch hash + pub(crate) async fn verify_batch(&self, blob_info: &BlobInfo) -> Result<(), VerificationError> { + let expected_hash = self.call_batch_id_to_metadata_hash(blob_info).await?; + + if expected_hash == vec![0u8; 32] { + return Err(VerificationError::EmptyHash); + } + + let actual_hash = self.hash_batch_metadata( + &blob_info + .blob_verification_proof + .batch_medatada + .batch_header, + &blob_info + .blob_verification_proof + .batch_medatada + .signatory_record_hash, + blob_info + .blob_verification_proof + .batch_medatada + .confirmation_block_number, + ); + + if expected_hash != actual_hash { + return Err(VerificationError::DifferentHashes { + expected: hex::encode(&expected_hash), + actual: hex::encode(&actual_hash), + }); + } + Ok(()) + } + + async fn get_quorum_adversary_threshold( + &self, + quorum_number: u32, + ) -> Result { + self.client + .as_ref() + .quorum_adversary_threshold_percentages( + quorum_number, + self.cfg.eigenda_svc_manager_address, + ) + .await + } + + async fn call_quorum_numbers_required(&self) -> Result, VerificationError> { + self.client + .as_ref() + .required_quorum_numbers(self.cfg.eigenda_svc_manager_address) + .await + } + + /// Verifies that the certificate's blob quorum params are correct + pub async fn verify_security_params(&self, cert: &BlobInfo) -> Result<(), VerificationError> { + let blob_header = &cert.blob_header; + let batch_header = &cert.blob_verification_proof.batch_medatada.batch_header; + + let mut confirmed_quorums: HashMap = HashMap::new(); + for i in 0..blob_header.blob_quorum_params.len() { + if batch_header.quorum_numbers[i] as u32 + != blob_header.blob_quorum_params[i].quorum_number + { + return Err(VerificationError::WrongQuorumParams { + blob_quorum_params: blob_header.blob_quorum_params[i].clone(), + }); + } + if blob_header.blob_quorum_params[i].adversary_threshold_percentage + > blob_header.blob_quorum_params[i].confirmation_threshold_percentage + { + return Err(VerificationError::WrongQuorumParams { + blob_quorum_params: blob_header.blob_quorum_params[i].clone(), + }); + } + let quorum_adversary_threshold = self + .get_quorum_adversary_threshold(blob_header.blob_quorum_params[i].quorum_number) + .await?; + + if quorum_adversary_threshold > 0 + && blob_header.blob_quorum_params[i].adversary_threshold_percentage + < quorum_adversary_threshold as u32 + { + return Err(VerificationError::WrongQuorumParams { + blob_quorum_params: blob_header.blob_quorum_params[i].clone(), + }); + } + + if (batch_header.quorum_signed_percentages[i] as u32) + < blob_header.blob_quorum_params[i].confirmation_threshold_percentage + { + return Err(VerificationError::WrongQuorumParams { + blob_quorum_params: blob_header.blob_quorum_params[i].clone(), + }); + } + + confirmed_quorums.insert(blob_header.blob_quorum_params[i].quorum_number, true); + } + + let required_quorums = self.call_quorum_numbers_required().await?; + + for quorum in required_quorums { + if !confirmed_quorums.contains_key(&(quorum as u32)) { + return Err(VerificationError::QuorumNotConfirmed); + } + } + Ok(()) + } + + /// Verifies that the certificate is valid + pub async fn verify_inclusion_data_against_settlement_layer( + &self, + cert: &BlobInfo, + ) -> Result<(), VerificationError> { + self.verify_batch(cert).await?; + self.verify_merkle_proof(cert)?; + self.verify_security_params(cert).await?; + Ok(()) + } +} diff --git a/core/node/da_clients/src/eigen/verifier/tests.rs b/core/node/da_clients/src/eigen/verifier/tests.rs new file mode 100644 index 000000000000..b171caf61fff --- /dev/null +++ b/core/node/da_clients/src/eigen/verifier/tests.rs @@ -0,0 +1,795 @@ +use std::{collections::HashMap, str::FromStr, sync::Arc}; + +use ethabi::{ParamType, Token}; +use zksync_types::{ + url::SensitiveUrl, + web3::{Bytes, CallRequest}, + Address, H160, U256, U64, +}; +use zksync_web3_decl::client::{Client, DynClient, L1}; + +use super::VerificationError; +use crate::eigen::{ + blob_info::{ + BatchHeader, BatchMetadata, BlobHeader, BlobInfo, BlobQuorumParam, BlobVerificationProof, + G1Commitment, + }, + test_eigenda_config, + verifier::{decode_bytes, Verifier, VerifierClient}, +}; + +/// Mock struct for the Verifier +/// Used to avoid making actual calls to a remote disperser +/// and possible making the CI fail due to network issues. +/// To run tests with the actual verifier run: +/// `cargo test -p zksync_da_clients -- --ignored` +#[derive(Debug)] +pub struct MockVerifierClient { + replies: HashMap, +} + +impl MockVerifierClient { + pub fn new(replies: HashMap) -> Self { + Self { replies } + } +} + +#[async_trait::async_trait] +impl VerifierClient for MockVerifierClient { + async fn batch_id_to_batch_metadata_hash( + &self, + batch_id: u32, + svc_manager_addr: Address, + _settlement_layer_confirmation_depth: Option, + ) -> Result, VerificationError> { + let mut data = vec![]; + let func_selector = + ethabi::short_signature("batchIdToBatchMetadataHash", &[ParamType::Uint(32)]); + data.extend_from_slice(&func_selector); + let batch_id_data = ethabi::encode(&[Token::Uint(U256::from(batch_id))]); + data.extend_from_slice(&batch_id_data); + + let call_request = CallRequest { + to: Some(svc_manager_addr), + data: Some(zksync_basic_types::web3::Bytes(data)), + ..Default::default() + }; + + let req = serde_json::to_string(&call_request).unwrap(); + Ok(self.replies.get(&req).unwrap().clone().0) + } + + async fn quorum_adversary_threshold_percentages( + &self, + quorum_number: u32, + svc_manager_addr: Address, + ) -> Result { + let func_selector = ethabi::short_signature("quorumAdversaryThresholdPercentages", &[]); + let data = func_selector.to_vec(); + + let call_request = CallRequest { + to: Some(svc_manager_addr), + data: Some(zksync_basic_types::web3::Bytes(data)), + ..Default::default() + }; + + let req = serde_json::to_string(&call_request).unwrap(); + let res = self.replies.get(&req).unwrap().clone(); + let percentages = decode_bytes(res.0)?; + + if percentages.len() > quorum_number as usize { + return Ok(percentages[quorum_number as usize]); + } + Ok(0) + } + + async fn required_quorum_numbers( + &self, + svc_manager_addr: Address, + ) -> Result, VerificationError> { + let func_selector = ethabi::short_signature("quorumNumbersRequired", &[]); + let data = func_selector.to_vec(); + let call_request = CallRequest { + to: Some(svc_manager_addr), + data: Some(zksync_basic_types::web3::Bytes(data)), + ..Default::default() + }; + + let req = serde_json::to_string(&call_request).unwrap(); + let res = self.replies.get(&req).unwrap().clone(); + decode_bytes(res.0.to_vec()) + } +} + +fn create_remote_query_client() -> Box> { + let url = SensitiveUrl::from_str("https://ethereum-holesky-rpc.publicnode.com").unwrap(); + let query_client: Client = Client::http(url).unwrap().build(); + Box::new(query_client) as Box> +} + +#[ignore = "depends on external RPC"] +#[tokio::test] +async fn test_verify_commitment() { + let cfg = test_eigenda_config(); + let query_client = create_remote_query_client(); + let verifier = Verifier::new(cfg, Arc::new(query_client)).await.unwrap(); + let commitment = G1Commitment { + x: vec![ + 22, 11, 176, 29, 82, 48, 62, 49, 51, 119, 94, 17, 156, 142, 248, 96, 240, 183, 134, 85, + 152, 5, 74, 27, 175, 83, 162, 148, 17, 110, 201, 74, + ], + y: vec![ + 12, 132, 236, 56, 147, 6, 176, 135, 244, 166, 21, 18, 87, 76, 122, 3, 23, 22, 254, 236, + 148, 129, 110, 207, 131, 116, 58, 170, 4, 130, 191, 157, + ], + }; + let blob = vec![1u8; 100]; // Actual blob sent was this blob but kzg-padded, but Blob::from_bytes_and_pad padds it inside, so we don't need to pad it here. + let result = verifier.verify_commitment(commitment, &blob); + assert!(result.is_ok()); +} + +/// Test the verification of the commitment with a mocked verifier. +/// To test actual behaviour of the verifier, run the test above +#[tokio::test] +async fn test_verify_commitment_mocked() { + let cfg = test_eigenda_config(); + let query_client = MockVerifierClient::new(HashMap::new()); + let verifier = Verifier::new(cfg, Arc::new(query_client)).await.unwrap(); + let commitment = G1Commitment { + x: vec![ + 22, 11, 176, 29, 82, 48, 62, 49, 51, 119, 94, 17, 156, 142, 248, 96, 240, 183, 134, 85, + 152, 5, 74, 27, 175, 83, 162, 148, 17, 110, 201, 74, + ], + y: vec![ + 12, 132, 236, 56, 147, 6, 176, 135, 244, 166, 21, 18, 87, 76, 122, 3, 23, 22, 254, 236, + 148, 129, 110, 207, 131, 116, 58, 170, 4, 130, 191, 157, + ], + }; + let blob = vec![1u8; 100]; // Actual blob sent was this blob but kzg-padded, but Blob::from_bytes_and_pad padds it inside, so we don't need to pad it here. + let result = verifier.verify_commitment(commitment, &blob); + assert!(result.is_ok()); +} + +#[ignore = "depends on external RPC"] +#[tokio::test] +async fn test_verify_merkle_proof() { + let cfg = test_eigenda_config(); + let query_client = create_remote_query_client(); + let verifier = Verifier::new(cfg, Arc::new(query_client)).await.unwrap(); + let cert = BlobInfo { + blob_header: BlobHeader { + commitment: G1Commitment { + x: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + ], + y: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + ], + }, + data_length: 4, + blob_quorum_params: vec![ + BlobQuorumParam { + quorum_number: 0, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + BlobQuorumParam { + quorum_number: 1, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + ], + }, + blob_verification_proof: BlobVerificationProof { + batch_id: 66507, + blob_index: 92, + batch_medatada: BatchMetadata { + batch_header: BatchHeader { + batch_root: vec![ + 179, 187, 53, 98, 192, 80, 151, 28, 125, 192, 115, 29, 129, 238, 216, 8, + 213, 210, 203, 143, 181, 19, 146, 113, 98, 131, 39, 238, 149, 248, 211, 43, + ], + quorum_numbers: vec![0, 1], + quorum_signed_percentages: vec![100, 100], + reference_block_number: 2624794, + }, + signatory_record_hash: vec![ + 172, 32, 172, 142, 197, 52, 84, 143, 120, 26, 190, 9, 143, 217, 62, 19, 17, + 107, 105, 67, 203, 5, 172, 249, 6, 60, 105, 240, 134, 34, 66, 133, + ], + fee: vec![0], + confirmation_block_number: 2624876, + batch_header_hash: vec![ + 122, 115, 2, 85, 233, 75, 121, 85, 51, 81, 248, 170, 198, 252, 42, 16, 1, 146, + 96, 218, 159, 44, 41, 40, 94, 247, 147, 11, 255, 68, 40, 177, + ], + }, + inclusion_proof: vec![ + 203, 160, 237, 48, 117, 255, 75, 254, 117, 144, 164, 77, 29, 146, 36, 48, 190, 140, + 50, 100, 144, 237, 125, 125, 75, 54, 210, 247, 147, 23, 48, 189, 120, 4, 125, 123, + 195, 244, 207, 239, 145, 109, 0, 21, 11, 162, 109, 79, 192, 100, 138, 157, 203, 22, + 17, 114, 234, 72, 174, 231, 209, 133, 99, 118, 201, 160, 137, 128, 112, 84, 34, + 136, 174, 139, 96, 26, 246, 148, 134, 52, 200, 229, 160, 145, 5, 120, 18, 187, 51, + 11, 109, 91, 237, 171, 215, 207, 90, 95, 146, 54, 135, 166, 66, 157, 255, 237, 69, + 183, 141, 45, 162, 145, 71, 16, 87, 184, 120, 84, 156, 220, 159, 4, 99, 48, 191, + 203, 136, 112, 127, 226, 192, 184, 110, 6, 177, 182, 109, 207, 197, 239, 161, 132, + 17, 89, 56, 137, 205, 202, 101, 97, 60, 162, 253, 23, 169, 75, 236, 211, 126, 121, + 132, 191, 68, 167, 200, 16, 154, 149, 202, 197, 7, 191, 26, 8, 67, 3, 37, 137, 16, + 153, 30, 209, 238, 53, 233, 148, 198, 253, 94, 216, 73, 25, 190, 205, 132, 208, + 255, 219, 170, 98, 17, 160, 179, 183, 200, 17, 99, 36, 130, 216, 223, 72, 222, 250, + 73, 78, 79, 72, 253, 105, 245, 84, 244, 196, + ], + quorum_indexes: vec![0, 1], + }, + }; + let result = verifier.verify_merkle_proof(&cert); + assert!(result.is_ok()); +} + +/// Test the verificarion of a merkle proof with a mocked verifier. +/// To test actual behaviour of the verifier, run the test above +#[tokio::test] +async fn test_verify_merkle_proof_mocked() { + let cfg = test_eigenda_config(); + let query_client = MockVerifierClient::new(HashMap::new()); + let verifier = Verifier::new(cfg, Arc::new(query_client)).await.unwrap(); + let cert = BlobInfo { + blob_header: BlobHeader { + commitment: G1Commitment { + x: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + ], + y: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + ], + }, + data_length: 4, + blob_quorum_params: vec![ + BlobQuorumParam { + quorum_number: 0, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + BlobQuorumParam { + quorum_number: 1, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + ], + }, + blob_verification_proof: BlobVerificationProof { + batch_id: 66507, + blob_index: 92, + batch_medatada: BatchMetadata { + batch_header: BatchHeader { + batch_root: vec![ + 179, 187, 53, 98, 192, 80, 151, 28, 125, 192, 115, 29, 129, 238, 216, 8, + 213, 210, 203, 143, 181, 19, 146, 113, 98, 131, 39, 238, 149, 248, 211, 43, + ], + quorum_numbers: vec![0, 1], + quorum_signed_percentages: vec![100, 100], + reference_block_number: 2624794, + }, + signatory_record_hash: vec![ + 172, 32, 172, 142, 197, 52, 84, 143, 120, 26, 190, 9, 143, 217, 62, 19, 17, + 107, 105, 67, 203, 5, 172, 249, 6, 60, 105, 240, 134, 34, 66, 133, + ], + fee: vec![0], + confirmation_block_number: 2624876, + batch_header_hash: vec![ + 122, 115, 2, 85, 233, 75, 121, 85, 51, 81, 248, 170, 198, 252, 42, 16, 1, 146, + 96, 218, 159, 44, 41, 40, 94, 247, 147, 11, 255, 68, 40, 177, + ], + }, + inclusion_proof: vec![ + 203, 160, 237, 48, 117, 255, 75, 254, 117, 144, 164, 77, 29, 146, 36, 48, 190, 140, + 50, 100, 144, 237, 125, 125, 75, 54, 210, 247, 147, 23, 48, 189, 120, 4, 125, 123, + 195, 244, 207, 239, 145, 109, 0, 21, 11, 162, 109, 79, 192, 100, 138, 157, 203, 22, + 17, 114, 234, 72, 174, 231, 209, 133, 99, 118, 201, 160, 137, 128, 112, 84, 34, + 136, 174, 139, 96, 26, 246, 148, 134, 52, 200, 229, 160, 145, 5, 120, 18, 187, 51, + 11, 109, 91, 237, 171, 215, 207, 90, 95, 146, 54, 135, 166, 66, 157, 255, 237, 69, + 183, 141, 45, 162, 145, 71, 16, 87, 184, 120, 84, 156, 220, 159, 4, 99, 48, 191, + 203, 136, 112, 127, 226, 192, 184, 110, 6, 177, 182, 109, 207, 197, 239, 161, 132, + 17, 89, 56, 137, 205, 202, 101, 97, 60, 162, 253, 23, 169, 75, 236, 211, 126, 121, + 132, 191, 68, 167, 200, 16, 154, 149, 202, 197, 7, 191, 26, 8, 67, 3, 37, 137, 16, + 153, 30, 209, 238, 53, 233, 148, 198, 253, 94, 216, 73, 25, 190, 205, 132, 208, + 255, 219, 170, 98, 17, 160, 179, 183, 200, 17, 99, 36, 130, 216, 223, 72, 222, 250, + 73, 78, 79, 72, 253, 105, 245, 84, 244, 196, + ], + quorum_indexes: vec![0, 1], + }, + }; + let result = verifier.verify_merkle_proof(&cert); + assert!(result.is_ok()); +} + +#[ignore = "depends on external RPC"] +#[tokio::test] +async fn test_hash_blob_header() { + let cfg = test_eigenda_config(); + let query_client = create_remote_query_client(); + let verifier = Verifier::new(cfg, Arc::new(query_client)).await.unwrap(); + let blob_header = BlobHeader { + commitment: G1Commitment { + x: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ], + y: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ], + }, + data_length: 2, + blob_quorum_params: vec![ + BlobQuorumParam { + quorum_number: 2, + adversary_threshold_percentage: 4, + confirmation_threshold_percentage: 5, + chunk_length: 6, + }, + BlobQuorumParam { + quorum_number: 2, + adversary_threshold_percentage: 4, + confirmation_threshold_percentage: 5, + chunk_length: 6, + }, + ], + }; + let result = verifier.hash_encode_blob_header(&blob_header); + let expected = "ba4675a31c9bf6b2f7abfdcedd34b74645cb7332b35db39bff00ae8516a67393"; + assert_eq!(result, hex::decode(expected).unwrap()); +} + +/// Test hashing of a blob header with a mocked verifier. +/// To test actual behaviour of the verifier, run the test above +#[tokio::test] +async fn test_hash_blob_header_mocked() { + let cfg = test_eigenda_config(); + let query_client = MockVerifierClient::new(HashMap::new()); + let verifier = Verifier::new(cfg, Arc::new(query_client)).await.unwrap(); + let blob_header = BlobHeader { + commitment: G1Commitment { + x: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ], + y: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ], + }, + data_length: 2, + blob_quorum_params: vec![ + BlobQuorumParam { + quorum_number: 2, + adversary_threshold_percentage: 4, + confirmation_threshold_percentage: 5, + chunk_length: 6, + }, + BlobQuorumParam { + quorum_number: 2, + adversary_threshold_percentage: 4, + confirmation_threshold_percentage: 5, + chunk_length: 6, + }, + ], + }; + let result = verifier.hash_encode_blob_header(&blob_header); + let expected = "ba4675a31c9bf6b2f7abfdcedd34b74645cb7332b35db39bff00ae8516a67393"; + assert_eq!(result, hex::decode(expected).unwrap()); +} + +#[ignore = "depends on external RPC"] +#[tokio::test] +async fn test_inclusion_proof() { + let cfg = test_eigenda_config(); + let query_client = create_remote_query_client(); + let verifier = Verifier::new(cfg, Arc::new(query_client)).await.unwrap(); + let proof = hex::decode("c455c1ea0e725d7ea3e5f29e9f48be8fc2787bb0a914d5a86710ba302c166ac4f626d76f67f1055bb960a514fb8923af2078fd84085d712655b58a19612e8cd15c3e4ac1cef57acde3438dbcf63f47c9fefe1221344c4d5c1a4943dd0d1803091ca81a270909dc0e146841441c9bd0e08e69ce6168181a3e4060ffacf3627480bec6abdd8d7bb92b49d33f180c42f49e041752aaded9c403db3a17b85e48a11e9ea9a08763f7f383dab6d25236f1b77c12b4c49c5cdbcbea32554a604e3f1d2f466851cb43fe73617b3d01e665e4c019bf930f92dea7394c25ed6a1e200d051fb0c30a2193c459f1cfef00bf1ba6656510d16725a4d1dc031cb759dbc90bab427b0f60ddc6764681924dda848824605a4f08b7f526fe6bd4572458c94e83fbf2150f2eeb28d3011ec921996dc3e69efa52d5fcf3182b20b56b5857a926aa66605808079b4d52c0c0cfe06923fa92e65eeca2c3e6126108e8c1babf5ac522f4d7").unwrap(); + let leaf: [u8; 32] = + hex::decode("f6106e6ae4631e68abe0fa898cedbe97dbae6c7efb1b088c5aa2e8b91190ff96") + .unwrap() + .try_into() + .unwrap(); + let expected_root = + hex::decode("7390b8023db8248123dcaeca57fa6c9340bef639e204f2278fc7ec3d46ad071b").unwrap(); + + let actual_root = verifier.process_inclusion_proof(&proof, leaf, 580).unwrap(); + + assert_eq!(actual_root, expected_root); +} + +/// Test proof inclusion with a mocked verifier. +/// To test actual behaviour of the verifier, run the test above +#[tokio::test] +async fn test_inclusion_proof_mocked() { + let cfg = test_eigenda_config(); + let query_client = MockVerifierClient::new(HashMap::new()); + let verifier = Verifier::new(cfg, Arc::new(query_client)).await.unwrap(); + let proof = hex::decode("c455c1ea0e725d7ea3e5f29e9f48be8fc2787bb0a914d5a86710ba302c166ac4f626d76f67f1055bb960a514fb8923af2078fd84085d712655b58a19612e8cd15c3e4ac1cef57acde3438dbcf63f47c9fefe1221344c4d5c1a4943dd0d1803091ca81a270909dc0e146841441c9bd0e08e69ce6168181a3e4060ffacf3627480bec6abdd8d7bb92b49d33f180c42f49e041752aaded9c403db3a17b85e48a11e9ea9a08763f7f383dab6d25236f1b77c12b4c49c5cdbcbea32554a604e3f1d2f466851cb43fe73617b3d01e665e4c019bf930f92dea7394c25ed6a1e200d051fb0c30a2193c459f1cfef00bf1ba6656510d16725a4d1dc031cb759dbc90bab427b0f60ddc6764681924dda848824605a4f08b7f526fe6bd4572458c94e83fbf2150f2eeb28d3011ec921996dc3e69efa52d5fcf3182b20b56b5857a926aa66605808079b4d52c0c0cfe06923fa92e65eeca2c3e6126108e8c1babf5ac522f4d7").unwrap(); + let leaf: [u8; 32] = + hex::decode("f6106e6ae4631e68abe0fa898cedbe97dbae6c7efb1b088c5aa2e8b91190ff96") + .unwrap() + .try_into() + .unwrap(); + let expected_root = + hex::decode("7390b8023db8248123dcaeca57fa6c9340bef639e204f2278fc7ec3d46ad071b").unwrap(); + + let actual_root = verifier.process_inclusion_proof(&proof, leaf, 580).unwrap(); + + assert_eq!(actual_root, expected_root); +} + +#[ignore = "depends on external RPC"] +#[tokio::test] +async fn test_verify_batch() { + let cfg = test_eigenda_config(); + let query_client = create_remote_query_client(); + let verifier = Verifier::new(cfg, Arc::new(query_client)).await.unwrap(); + let cert = BlobInfo { + blob_header: BlobHeader { + commitment: G1Commitment { + x: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + ], + y: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + ], + }, + data_length: 4, + blob_quorum_params: vec![ + BlobQuorumParam { + quorum_number: 0, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + BlobQuorumParam { + quorum_number: 1, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + ], + }, + blob_verification_proof: BlobVerificationProof { + batch_id: 66507, + blob_index: 92, + batch_medatada: BatchMetadata { + batch_header: BatchHeader { + batch_root: vec![ + 179, 187, 53, 98, 192, 80, 151, 28, 125, 192, 115, 29, 129, 238, 216, 8, + 213, 210, 203, 143, 181, 19, 146, 113, 98, 131, 39, 238, 149, 248, 211, 43, + ], + quorum_numbers: vec![0, 1], + quorum_signed_percentages: vec![100, 100], + reference_block_number: 2624794, + }, + signatory_record_hash: vec![ + 172, 32, 172, 142, 197, 52, 84, 143, 120, 26, 190, 9, 143, 217, 62, 19, 17, + 107, 105, 67, 203, 5, 172, 249, 6, 60, 105, 240, 134, 34, 66, 133, + ], + fee: vec![0], + confirmation_block_number: 2624876, + batch_header_hash: vec![ + 122, 115, 2, 85, 233, 75, 121, 85, 51, 81, 248, 170, 198, 252, 42, 16, 1, 146, + 96, 218, 159, 44, 41, 40, 94, 247, 147, 11, 255, 68, 40, 177, + ], + }, + inclusion_proof: vec![ + 203, 160, 237, 48, 117, 255, 75, 254, 117, 144, 164, 77, 29, 146, 36, 48, 190, 140, + 50, 100, 144, 237, 125, 125, 75, 54, 210, 247, 147, 23, 48, 189, 120, 4, 125, 123, + 195, 244, 207, 239, 145, 109, 0, 21, 11, 162, 109, 79, 192, 100, 138, 157, 203, 22, + 17, 114, 234, 72, 174, 231, 209, 133, 99, 118, 201, 160, 137, 128, 112, 84, 34, + 136, 174, 139, 96, 26, 246, 148, 134, 52, 200, 229, 160, 145, 5, 120, 18, 187, 51, + 11, 109, 91, 237, 171, 215, 207, 90, 95, 146, 54, 135, 166, 66, 157, 255, 237, 69, + 183, 141, 45, 162, 145, 71, 16, 87, 184, 120, 84, 156, 220, 159, 4, 99, 48, 191, + 203, 136, 112, 127, 226, 192, 184, 110, 6, 177, 182, 109, 207, 197, 239, 161, 132, + 17, 89, 56, 137, 205, 202, 101, 97, 60, 162, 253, 23, 169, 75, 236, 211, 126, 121, + 132, 191, 68, 167, 200, 16, 154, 149, 202, 197, 7, 191, 26, 8, 67, 3, 37, 137, 16, + 153, 30, 209, 238, 53, 233, 148, 198, 253, 94, 216, 73, 25, 190, 205, 132, 208, + 255, 219, 170, 98, 17, 160, 179, 183, 200, 17, 99, 36, 130, 216, 223, 72, 222, 250, + 73, 78, 79, 72, 253, 105, 245, 84, 244, 196, + ], + quorum_indexes: vec![0, 1], + }, + }; + let result = verifier.verify_batch(&cert).await; + assert!(result.is_ok()); +} + +/// Test batch verification with a mocked verifier. +/// To test actual behaviour of the verifier, run the test above +#[tokio::test] +async fn test_verify_batch_mocked() { + let mut mock_replies = HashMap::new(); + let mock_req = CallRequest { + to: Some(H160::from_str("0xd4a7e1bd8015057293f0d0a557088c286942e84b").unwrap()), + data: Some(Bytes::from( + hex::decode("eccbbfc900000000000000000000000000000000000000000000000000000000000103cb") + .unwrap(), + )), + ..Default::default() + }; + let mock_req = serde_json::to_string(&mock_req).unwrap(); + let mock_res = Bytes::from( + hex::decode("60933e76989e57d6fd210ae2fc3086958d708660ee6927f91963047ab1a91ba8").unwrap(), + ); + mock_replies.insert(mock_req, mock_res); + + let cfg = test_eigenda_config(); + let query_client = MockVerifierClient::new(mock_replies); + let verifier = Verifier::new(cfg, Arc::new(query_client)).await.unwrap(); + let cert = BlobInfo { + blob_header: BlobHeader { + commitment: G1Commitment { + x: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + ], + y: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + ], + }, + data_length: 4, + blob_quorum_params: vec![ + BlobQuorumParam { + quorum_number: 0, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + BlobQuorumParam { + quorum_number: 1, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + ], + }, + blob_verification_proof: BlobVerificationProof { + batch_id: 66507, + blob_index: 92, + batch_medatada: BatchMetadata { + batch_header: BatchHeader { + batch_root: vec![ + 179, 187, 53, 98, 192, 80, 151, 28, 125, 192, 115, 29, 129, 238, 216, 8, + 213, 210, 203, 143, 181, 19, 146, 113, 98, 131, 39, 238, 149, 248, 211, 43, + ], + quorum_numbers: vec![0, 1], + quorum_signed_percentages: vec![100, 100], + reference_block_number: 2624794, + }, + signatory_record_hash: vec![ + 172, 32, 172, 142, 197, 52, 84, 143, 120, 26, 190, 9, 143, 217, 62, 19, 17, + 107, 105, 67, 203, 5, 172, 249, 6, 60, 105, 240, 134, 34, 66, 133, + ], + fee: vec![0], + confirmation_block_number: 2624876, + batch_header_hash: vec![ + 122, 115, 2, 85, 233, 75, 121, 85, 51, 81, 248, 170, 198, 252, 42, 16, 1, 146, + 96, 218, 159, 44, 41, 40, 94, 247, 147, 11, 255, 68, 40, 177, + ], + }, + inclusion_proof: vec![ + 203, 160, 237, 48, 117, 255, 75, 254, 117, 144, 164, 77, 29, 146, 36, 48, 190, 140, + 50, 100, 144, 237, 125, 125, 75, 54, 210, 247, 147, 23, 48, 189, 120, 4, 125, 123, + 195, 244, 207, 239, 145, 109, 0, 21, 11, 162, 109, 79, 192, 100, 138, 157, 203, 22, + 17, 114, 234, 72, 174, 231, 209, 133, 99, 118, 201, 160, 137, 128, 112, 84, 34, + 136, 174, 139, 96, 26, 246, 148, 134, 52, 200, 229, 160, 145, 5, 120, 18, 187, 51, + 11, 109, 91, 237, 171, 215, 207, 90, 95, 146, 54, 135, 166, 66, 157, 255, 237, 69, + 183, 141, 45, 162, 145, 71, 16, 87, 184, 120, 84, 156, 220, 159, 4, 99, 48, 191, + 203, 136, 112, 127, 226, 192, 184, 110, 6, 177, 182, 109, 207, 197, 239, 161, 132, + 17, 89, 56, 137, 205, 202, 101, 97, 60, 162, 253, 23, 169, 75, 236, 211, 126, 121, + 132, 191, 68, 167, 200, 16, 154, 149, 202, 197, 7, 191, 26, 8, 67, 3, 37, 137, 16, + 153, 30, 209, 238, 53, 233, 148, 198, 253, 94, 216, 73, 25, 190, 205, 132, 208, + 255, 219, 170, 98, 17, 160, 179, 183, 200, 17, 99, 36, 130, 216, 223, 72, 222, 250, + 73, 78, 79, 72, 253, 105, 245, 84, 244, 196, + ], + quorum_indexes: vec![0, 1], + }, + }; + let result = verifier.verify_batch(&cert).await; + assert!(result.is_ok()); +} + +#[ignore = "depends on external RPC"] +#[tokio::test] +async fn test_verify_security_params() { + let cfg = test_eigenda_config(); + let query_client = create_remote_query_client(); + let verifier = Verifier::new(cfg, Arc::new(query_client)).await.unwrap(); + let cert = BlobInfo { + blob_header: BlobHeader { + commitment: G1Commitment { + x: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + ], + y: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + ], + }, + data_length: 4, + blob_quorum_params: vec![ + BlobQuorumParam { + quorum_number: 0, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + BlobQuorumParam { + quorum_number: 1, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + ], + }, + blob_verification_proof: BlobVerificationProof { + batch_id: 66507, + blob_index: 92, + batch_medatada: BatchMetadata { + batch_header: BatchHeader { + batch_root: vec![ + 179, 187, 53, 98, 192, 80, 151, 28, 125, 192, 115, 29, 129, 238, 216, 8, + 213, 210, 203, 143, 181, 19, 146, 113, 98, 131, 39, 238, 149, 248, 211, 43, + ], + quorum_numbers: vec![0, 1], + quorum_signed_percentages: vec![100, 100], + reference_block_number: 2624794, + }, + signatory_record_hash: vec![ + 172, 32, 172, 142, 197, 52, 84, 143, 120, 26, 190, 9, 143, 217, 62, 19, 17, + 107, 105, 67, 203, 5, 172, 249, 6, 60, 105, 240, 134, 34, 66, 133, + ], + fee: vec![0], + confirmation_block_number: 2624876, + batch_header_hash: vec![ + 122, 115, 2, 85, 233, 75, 121, 85, 51, 81, 248, 170, 198, 252, 42, 16, 1, 146, + 96, 218, 159, 44, 41, 40, 94, 247, 147, 11, 255, 68, 40, 177, + ], + }, + inclusion_proof: vec![ + 203, 160, 237, 48, 117, 255, 75, 254, 117, 144, 164, 77, 29, 146, 36, 48, 190, 140, + 50, 100, 144, 237, 125, 125, 75, 54, 210, 247, 147, 23, 48, 189, 120, 4, 125, 123, + 195, 244, 207, 239, 145, 109, 0, 21, 11, 162, 109, 79, 192, 100, 138, 157, 203, 22, + 17, 114, 234, 72, 174, 231, 209, 133, 99, 118, 201, 160, 137, 128, 112, 84, 34, + 136, 174, 139, 96, 26, 246, 148, 134, 52, 200, 229, 160, 145, 5, 120, 18, 187, 51, + 11, 109, 91, 237, 171, 215, 207, 90, 95, 146, 54, 135, 166, 66, 157, 255, 237, 69, + 183, 141, 45, 162, 145, 71, 16, 87, 184, 120, 84, 156, 220, 159, 4, 99, 48, 191, + 203, 136, 112, 127, 226, 192, 184, 110, 6, 177, 182, 109, 207, 197, 239, 161, 132, + 17, 89, 56, 137, 205, 202, 101, 97, 60, 162, 253, 23, 169, 75, 236, 211, 126, 121, + 132, 191, 68, 167, 200, 16, 154, 149, 202, 197, 7, 191, 26, 8, 67, 3, 37, 137, 16, + 153, 30, 209, 238, 53, 233, 148, 198, 253, 94, 216, 73, 25, 190, 205, 132, 208, + 255, 219, 170, 98, 17, 160, 179, 183, 200, 17, 99, 36, 130, 216, 223, 72, 222, 250, + 73, 78, 79, 72, 253, 105, 245, 84, 244, 196, + ], + quorum_indexes: vec![0, 1], + }, + }; + let result = verifier.verify_security_params(&cert).await; + assert!(result.is_ok()); +} + +/// Test security params verification with a mocked verifier. +/// To test actual behaviour of the verifier, run the test above +#[tokio::test] +async fn test_verify_securityt_params_mocked() { + let mut mock_replies = HashMap::new(); + + // First request + let mock_req = CallRequest { + to: Some(H160::from_str("0xd4a7e1bd8015057293f0d0a557088c286942e84b").unwrap()), + data: Some(Bytes::from(hex::decode("8687feae").unwrap())), + ..Default::default() + }; + let mock_req = serde_json::to_string(&mock_req).unwrap(); + let mock_res = Bytes::from( + hex::decode("000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000032121210000000000000000000000000000000000000000000000000000000000") + .unwrap(), + ); + mock_replies.insert(mock_req, mock_res); + + // Second request + let mock_req = CallRequest { + to: Some(H160::from_str("0xd4a7e1bd8015057293f0d0a557088c286942e84b").unwrap()), + data: Some(Bytes::from(hex::decode("e15234ff").unwrap())), + ..Default::default() + }; + let mock_req = serde_json::to_string(&mock_req).unwrap(); + let mock_res = Bytes::from( + hex::decode("000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000020001000000000000000000000000000000000000000000000000000000000000") + .unwrap(), + ); + mock_replies.insert(mock_req, mock_res); + + let cfg = test_eigenda_config(); + let client = MockVerifierClient::new(mock_replies); + let verifier = Verifier::new(cfg, Arc::new(client)).await.unwrap(); + let cert = BlobInfo { + blob_header: BlobHeader { + commitment: G1Commitment { + x: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + ], + y: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + ], + }, + data_length: 4, + blob_quorum_params: vec![ + BlobQuorumParam { + quorum_number: 0, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + BlobQuorumParam { + quorum_number: 1, + adversary_threshold_percentage: 33, + confirmation_threshold_percentage: 55, + chunk_length: 1, + }, + ], + }, + blob_verification_proof: BlobVerificationProof { + batch_id: 66507, + blob_index: 92, + batch_medatada: BatchMetadata { + batch_header: BatchHeader { + batch_root: vec![ + 179, 187, 53, 98, 192, 80, 151, 28, 125, 192, 115, 29, 129, 238, 216, 8, + 213, 210, 203, 143, 181, 19, 146, 113, 98, 131, 39, 238, 149, 248, 211, 43, + ], + quorum_numbers: vec![0, 1], + quorum_signed_percentages: vec![100, 100], + reference_block_number: 2624794, + }, + signatory_record_hash: vec![ + 172, 32, 172, 142, 197, 52, 84, 143, 120, 26, 190, 9, 143, 217, 62, 19, 17, + 107, 105, 67, 203, 5, 172, 249, 6, 60, 105, 240, 134, 34, 66, 133, + ], + fee: vec![0], + confirmation_block_number: 2624876, + batch_header_hash: vec![ + 122, 115, 2, 85, 233, 75, 121, 85, 51, 81, 248, 170, 198, 252, 42, 16, 1, 146, + 96, 218, 159, 44, 41, 40, 94, 247, 147, 11, 255, 68, 40, 177, + ], + }, + inclusion_proof: vec![ + 203, 160, 237, 48, 117, 255, 75, 254, 117, 144, 164, 77, 29, 146, 36, 48, 190, 140, + 50, 100, 144, 237, 125, 125, 75, 54, 210, 247, 147, 23, 48, 189, 120, 4, 125, 123, + 195, 244, 207, 239, 145, 109, 0, 21, 11, 162, 109, 79, 192, 100, 138, 157, 203, 22, + 17, 114, 234, 72, 174, 231, 209, 133, 99, 118, 201, 160, 137, 128, 112, 84, 34, + 136, 174, 139, 96, 26, 246, 148, 134, 52, 200, 229, 160, 145, 5, 120, 18, 187, 51, + 11, 109, 91, 237, 171, 215, 207, 90, 95, 146, 54, 135, 166, 66, 157, 255, 237, 69, + 183, 141, 45, 162, 145, 71, 16, 87, 184, 120, 84, 156, 220, 159, 4, 99, 48, 191, + 203, 136, 112, 127, 226, 192, 184, 110, 6, 177, 182, 109, 207, 197, 239, 161, 132, + 17, 89, 56, 137, 205, 202, 101, 97, 60, 162, 253, 23, 169, 75, 236, 211, 126, 121, + 132, 191, 68, 167, 200, 16, 154, 149, 202, 197, 7, 191, 26, 8, 67, 3, 37, 137, 16, + 153, 30, 209, 238, 53, 233, 148, 198, 253, 94, 216, 73, 25, 190, 205, 132, 208, + 255, 219, 170, 98, 17, 160, 179, 183, 200, 17, 99, 36, 130, 216, 223, 72, 222, 250, + 73, 78, 79, 72, 253, 105, 245, 84, 244, 196, + ], + quorum_indexes: vec![0, 1], + }, + }; + let result = verifier.verify_security_params(&cert).await; + assert!(result.is_ok()); +} diff --git a/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs b/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs index d5391ee433f9..358d517f4363 100644 --- a/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs +++ b/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs @@ -1,9 +1,16 @@ +use std::sync::Arc; + use zksync_config::{configs::da_client::eigen::EigenSecrets, EigenConfig}; use zksync_da_client::DataAvailabilityClient; -use zksync_da_clients::eigen::EigenClient; +use zksync_da_clients::eigen::{EigenClient, GetBlobData}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; +use zksync_node_framework_derive::FromContext; use crate::{ - implementations::resources::da_client::DAClientResource, + implementations::resources::{ + da_client::DAClientResource, + pools::{MasterPool, PoolResource}, + }, wiring_layer::{WiringError, WiringLayer}, IntoContext, }; @@ -20,6 +27,12 @@ impl EigenWiringLayer { } } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, +} + #[derive(Debug, IntoContext)] #[context(crate = crate)] pub struct Output { @@ -28,19 +41,39 @@ pub struct Output { #[async_trait::async_trait] impl WiringLayer for EigenWiringLayer { - type Input = (); + type Input = Input; type Output = Output; fn layer_name(&self) -> &'static str { "eigen_client_layer" } - async fn wire(self, _input: Self::Input) -> Result { - let client: Box = - Box::new(EigenClient::new(self.config, self.secrets).await?); + async fn wire(self, input: Self::Input) -> Result { + let master_pool = input.master_pool.get().await?; + let get_blob_from_db = GetBlobFromDB { pool: master_pool }; + let client: Box = Box::new( + EigenClient::new(self.config, self.secrets, Arc::new(get_blob_from_db)).await?, + ); Ok(Self::Output { client: DAClientResource(client), }) } } + +#[derive(Debug, Clone)] +pub struct GetBlobFromDB { + pool: ConnectionPool, +} + +#[async_trait::async_trait] +impl GetBlobData for GetBlobFromDB { + async fn get_blob_data(&self, input: &str) -> anyhow::Result>> { + let mut conn = self.pool.connection_tagged("eigen_client").await?; + let batch = conn + .data_availability_dal() + .get_blob_data_by_blob_id(input) + .await?; + Ok(batch.map(|b| b.pubdata)) + } +} diff --git a/zkstack_cli/crates/config/src/chain.rs b/zkstack_cli/crates/config/src/chain.rs index b411b1f882a3..2a49b6445f00 100644 --- a/zkstack_cli/crates/config/src/chain.rs +++ b/zkstack_cli/crates/config/src/chain.rs @@ -73,6 +73,7 @@ pub enum DAValidatorType { Rollup = 0, NoDA = 1, Avail = 2, + EigenDA = 3, } impl Serialize for ChainConfig { diff --git a/zkstack_cli/crates/config/src/contracts.rs b/zkstack_cli/crates/config/src/contracts.rs index eb88cf3af854..f8b7b212ea4a 100644 --- a/zkstack_cli/crates/config/src/contracts.rs +++ b/zkstack_cli/crates/config/src/contracts.rs @@ -114,6 +114,11 @@ impl ContractsConfig { .deployed_addresses .avail_l1_da_validator_addr, ); + self.l1.eigenda_l1_validator_addr = Some( + deploy_l1_output + .deployed_addresses + .eigenda_l1_validator_addr, + ); self.l1.chain_admin_addr = deploy_l1_output.deployed_addresses.chain_admin; } @@ -252,6 +257,9 @@ pub struct L1Contracts { // `Option` to be able to parse configs from pre-gateway protocol version. #[serde(skip_serializing_if = "Option::is_none")] pub no_da_validium_l1_validator_addr: Option
, + // `Option` to be able to parse configs from previous protocol version + #[serde(skip_serializing_if = "Option::is_none")] + pub eigenda_l1_validator_addr: Option
, // `Option` to be able to parse configs from pre-gateway protocol version. #[serde(skip_serializing_if = "Option::is_none")] pub transaction_filterer_addr: Option
, diff --git a/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/input.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/input.rs index c90c3a08de3b..c3dcc03cd47d 100644 --- a/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/input.rs +++ b/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/input.rs @@ -196,6 +196,7 @@ impl DeployL1Config { validator_timelock_execution_delay: initial_deployment_config .validator_timelock_execution_delay, avail_l1_da_validator_addr: l1_network.avail_l1_da_validator_addr(), + eigenda_l1_validator_addr: l1_network.eigenda_l1_validator_addr(), }, tokens: TokensDeployL1Config { token_weth_address: initial_deployment_config.token_weth_address, @@ -232,6 +233,8 @@ pub struct ContractsDeployL1Config { pub evm_emulator_hash: Option, #[serde(skip_serializing_if = "Option::is_none")] pub avail_l1_da_validator_addr: Option
, + #[serde(skip_serializing_if = "Option::is_none")] + pub eigenda_l1_validator_addr: Option
, } #[derive(Debug, Deserialize, Serialize, Clone)] diff --git a/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/output.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/output.rs index a0bca69cafd5..9294cba6ef8c 100644 --- a/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/output.rs +++ b/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/output.rs @@ -36,6 +36,7 @@ pub struct DeployL1DeployedAddressesOutput { pub rollup_l1_da_validator_addr: Address, pub no_da_validium_l1_validator_addr: Address, pub avail_l1_da_validator_addr: Address, + pub eigenda_l1_validator_addr: Address, pub l1_rollup_da_manager: Address, pub native_token_vault_addr: Address, } diff --git a/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/input.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/input.rs index 41aa65bf3bfb..97fcf8663794 100644 --- a/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/input.rs +++ b/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/input.rs @@ -55,6 +55,7 @@ async fn get_da_validator_type(config: &ChainConfig) -> anyhow::Result Ok(DAValidatorType::Rollup), (L1BatchCommitmentMode::Validium, None | Some("no_da")) => Ok(DAValidatorType::NoDA), (L1BatchCommitmentMode::Validium, Some("avail")) => Ok(DAValidatorType::Avail), + (L1BatchCommitmentMode::Validium, Some("eigen")) => Ok(DAValidatorType::EigenDA), _ => anyhow::bail!("DAValidatorType is not supported"), } } diff --git a/zkstack_cli/crates/types/src/l1_network.rs b/zkstack_cli/crates/types/src/l1_network.rs index 609af7ef3e7c..0578f685b6ec 100644 --- a/zkstack_cli/crates/types/src/l1_network.rs +++ b/zkstack_cli/crates/types/src/l1_network.rs @@ -48,4 +48,15 @@ impl L1Network { L1Network::Mainnet => None, // TODO: add mainnet address after it is known } } + + pub fn eigenda_l1_validator_addr(&self) -> Option
{ + match self { + L1Network::Localhost => None, + L1Network::Sepolia | L1Network::Holesky => { + None + //TODO: add real address + } + L1Network::Mainnet => None, // TODO: add mainnet address after it is known + } + } } diff --git a/zkstack_cli/crates/zkstack/completion/_zkstack.zsh b/zkstack_cli/crates/zkstack/completion/_zkstack.zsh index 93496c220e15..3cbaa5497cbf 100644 --- a/zkstack_cli/crates/zkstack/completion/_zkstack.zsh +++ b/zkstack_cli/crates/zkstack/completion/_zkstack.zsh @@ -134,7 +134,7 @@ _arguments "${_arguments_options[@]}" : \ '-o+[Enable Grafana]' \ '--observability=[Enable Grafana]' \ '--update-submodules=[]:UPDATE_SUBMODULES:(true false)' \ -'--validium-type=[Type of the Validium network]:VALIDIUM_TYPE:(no-da avail)' \ +'--validium-type=[Type of the Validium network]:VALIDIUM_TYPE:(no-da avail eigen-da)' \ '--support-l2-legacy-shared-bridge-test=[]' \ '--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ @@ -295,7 +295,7 @@ _arguments "${_arguments_options[@]}" : \ '--deploy-paymaster=[]' \ '--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL:_default' \ '--update-submodules=[]:UPDATE_SUBMODULES:(true false)' \ -'--validium-type=[Type of the Validium network]:VALIDIUM_TYPE:(no-da avail)' \ +'--validium-type=[Type of the Validium network]:VALIDIUM_TYPE:(no-da avail eigen-da)' \ '--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '--zksync[]' \ diff --git a/zkstack_cli/crates/zkstack/completion/zkstack.fish b/zkstack_cli/crates/zkstack/completion/zkstack.fish index b05971fe5f3f..cc09d3c45879 100644 --- a/zkstack_cli/crates/zkstack/completion/zkstack.fish +++ b/zkstack_cli/crates/zkstack/completion/zkstack.fish @@ -141,7 +141,8 @@ false\t''" complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l update-submodules -r -f -a "true\t'' false\t''" complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l validium-type -d 'Type of the Validium network' -r -f -a "no-da\t'' -avail\t''" +avail\t'' +eigen-da\t''" complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l support-l2-legacy-shared-bridge-test -r -f -a "true\t'' false\t''" complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r @@ -247,7 +248,8 @@ complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_s complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l update-submodules -r -f -a "true\t'' false\t''" complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l validium-type -d 'Type of the Validium network' -r -f -a "no-da\t'' -avail\t''" +avail\t'' +eigen-da\t''" complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l resume complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l zksync diff --git a/zkstack_cli/crates/zkstack/completion/zkstack.sh b/zkstack_cli/crates/zkstack/completion/zkstack.sh index a6b4498d53fb..4fa85e45f3d8 100644 --- a/zkstack_cli/crates/zkstack/completion/zkstack.sh +++ b/zkstack_cli/crates/zkstack/completion/zkstack.sh @@ -2063,7 +2063,7 @@ _zkstack() { return 0 ;; --validium-type) - COMPREPLY=($(compgen -W "no-da avail" -- "${cur}")) + COMPREPLY=($(compgen -W "no-da avail eigen-da" -- "${cur}")) return 0 ;; --chain) @@ -5195,7 +5195,7 @@ _zkstack() { return 0 ;; --validium-type) - COMPREPLY=($(compgen -W "no-da avail" -- "${cur}")) + COMPREPLY=($(compgen -W "no-da avail eigen-da" -- "${cur}")) return 0 ;; --support-l2-legacy-shared-bridge-test) diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/args/init/da_configs.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/init/da_configs.rs index 7a8e6d5f838e..0f6eb150d3b7 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/args/init/da_configs.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/args/init/da_configs.rs @@ -28,6 +28,7 @@ pub struct ValidiumTypeArgs { pub enum ValidiumTypeInternal { NoDA, Avail, + EigenDA, } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, EnumIter, Display, ValueEnum)] @@ -40,6 +41,7 @@ pub enum AvailClientTypeInternal { pub enum ValidiumType { NoDA, Avail((AvailConfig, AvailSecrets)), + EigenDA, } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, EnumIter, Display, ValueEnum)] @@ -51,6 +53,7 @@ pub enum AvailFinalityState { impl ValidiumType { pub fn read() -> Self { match PromptSelect::new(MSG_VALIDIUM_TYPE_PROMPT, ValidiumTypeInternal::iter()).ask() { + ValidiumTypeInternal::EigenDA => ValidiumType::EigenDA, // EigenDA doesn't support configuration through CLI ValidiumTypeInternal::NoDA => ValidiumType::NoDA, ValidiumTypeInternal::Avail => { let avail_client_type = PromptSelect::new( diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs index 2d8539620ef0..770ac50996a2 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs @@ -95,6 +95,7 @@ impl InitArgs { Some(da_configs::ValidiumTypeInternal::Avail) => panic!( "Avail is not supported via CLI args, use interactive mode" // TODO: Add support for configuration via CLI args ), + Some(da_configs::ValidiumTypeInternal::EigenDA) => Some(ValidiumType::EigenDA), }, _ => None, }; diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs b/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs index dafc786333da..40ed279911b8 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs @@ -72,7 +72,7 @@ pub async fn init_configs( set_genesis_specs(&mut general_config, chain_config, &consensus_keys)?; match &init_args.validium_config { - None | Some(ValidiumType::NoDA) => { + None | Some(ValidiumType::NoDA) | Some(ValidiumType::EigenDA) => { general_config.remove("da_client"); } Some(ValidiumType::Avail((avail_config, _))) => { @@ -103,7 +103,7 @@ pub async fn init_configs( set_l1_rpc_url(&mut secrets, init_args.l1_rpc_url.clone())?; set_consensus_secrets(&mut secrets, &consensus_keys)?; match &init_args.validium_config { - None | Some(ValidiumType::NoDA) => { /* Do nothing */ } + None | Some(ValidiumType::NoDA) | Some(ValidiumType::EigenDA) => { /* Do nothing */ } Some(ValidiumType::Avail((_, avail_secrets))) => { secrets.insert_yaml("da.avail", avail_secrets)?; } diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/init/mod.rs b/zkstack_cli/crates/zkstack/src/commands/chain/init/mod.rs index b2638e989d3f..548b7156f6fc 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/init/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/init/mod.rs @@ -258,6 +258,7 @@ pub(crate) async fn get_l1_da_validator(chain_config: &ChainConfig) -> anyhow::R match get_da_client_type(&general_config) { Some("avail") => contracts_config.l1.avail_l1_da_validator_addr, Some("no_da") | None => contracts_config.l1.no_da_validium_l1_validator_addr, + Some("eigen") => contracts_config.l1.eigenda_l1_validator_addr, Some(unsupported) => { anyhow::bail!("DA client config is not supported: {unsupported:?}"); }