From f0eec07f93759331e6520ccc67f3d3291f0122c4 Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Mon, 13 Jan 2025 18:38:52 +0200 Subject: [PATCH 1/8] Increase the number of pvf execute workers (#7116) Reference hardware requirements have been bumped to at least 8 cores so we can no allocate 50% of that capacity to PVF execution. --------- Signed-off-by: Alexandru Gheorghe --- polkadot/node/service/src/lib.rs | 11 +++-------- prdoc/pr_7116.prdoc | 8 ++++++++ 2 files changed, 11 insertions(+), 8 deletions(-) create mode 100644 prdoc/pr_7116.prdoc diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 227bc52539946..820cce8d083a6 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -944,14 +944,9 @@ pub fn new_full< secure_validator_mode, prep_worker_path, exec_worker_path, - pvf_execute_workers_max_num: execute_workers_max_num.unwrap_or_else( - || match config.chain_spec.identify_chain() { - // The intention is to use this logic for gradual increasing from 2 to 4 - // of this configuration chain by chain until it reaches production chain. - Chain::Polkadot | Chain::Kusama => 2, - Chain::Rococo | Chain::Westend | Chain::Unknown => 4, - }, - ), + // Default execution workers is 4 because we have 8 cores on the reference hardware, + // and this accounts for 50% of that cpu capacity. + pvf_execute_workers_max_num: execute_workers_max_num.unwrap_or(4), pvf_prepare_workers_soft_max_num: prepare_workers_soft_max_num.unwrap_or(1), pvf_prepare_workers_hard_max_num: prepare_workers_hard_max_num.unwrap_or(2), }) diff --git a/prdoc/pr_7116.prdoc b/prdoc/pr_7116.prdoc new file mode 100644 index 0000000000000..95a5254778a4d --- /dev/null +++ b/prdoc/pr_7116.prdoc @@ -0,0 +1,8 @@ +title: Increase the number of pvf execution workers from 2 to 4 +doc: +- audience: Node Dev + description: |- + Increase the number of pvf execution workers from 2 to 4. +crates: +- name: polkadot-service + bump: patch From 0e0fa4782e2872ea74d8038ebedb9f6e6be53457 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Mon, 13 Jan 2025 18:42:22 +0100 Subject: [PATCH 2/8] `fatxpool`: rotator cache size now depends on pool's limits (#7102) # Description This PR modifies the hard-coded size of extrinsics cache within [`PoolRotator`](https://github.com/paritytech/polkadot-sdk/blob/cdf107de700388a52a17b2fb852c98420c78278e/substrate/client/transaction-pool/src/graph/rotator.rs#L36-L45) to be inline with pool limits. The problem was, that due to small size (comparing to number of txs in single block) of hard coded size: https://github.com/paritytech/polkadot-sdk/blob/cdf107de700388a52a17b2fb852c98420c78278e/substrate/client/transaction-pool/src/graph/rotator.rs#L34 excessive number of unnecessary verification were performed in `prune_tags`: https://github.com/paritytech/polkadot-sdk/blob/cdf107de700388a52a17b2fb852c98420c78278e/substrate/client/transaction-pool/src/graph/pool.rs#L369-L370 This was resulting in quite long durations of `prune_tags` execution time (which was ok for 6s, but becomes noticable for 2s blocks): ``` Pruning at HashAndNumber { number: 83, ... }. Resubmitting transactions: 6142, reverification took: 237.818955ms Pruning at HashAndNumber { number: 84, ... }. Resubmitting transactions: 5985, reverification took: 222.118218ms Pruning at HashAndNumber { number: 85, ... }. Resubmitting transactions: 5981, reverification took: 215.546847ms ``` The fix reduces the overhead: ``` Pruning at HashAndNumber { number: 92, ... }. Resubmitting transactions: 6325, reverification took: 14.728354ms Pruning at HashAndNumber { number: 93, ... }. Resubmitting transactions: 7030, reverification took: 23.973607ms Pruning at HashAndNumber { number: 94, ... }. Resubmitting transactions: 4465, reverification took: 9.532472ms ``` ## Review Notes I decided to leave the hardocded `EXPECTED_SIZE` for the legacy transaction pool. Removing verification of transactions during re-submission may negatively impact the behavior of the legacy (single-state) pool. As in long-term we probably want to deprecate old pool, I did not invest time to assess the impact of rotator change in behavior of the legacy pool. --------- Co-authored-by: command-bot <> Co-authored-by: Iulian Barbu <14218860+iulianbarbu@users.noreply.github.com> --- prdoc/pr_7102.prdoc | 8 +++ .../client/transaction-pool/benches/basics.rs | 12 ++++- .../transaction-pool/src/common/tests.rs | 2 +- .../src/fork_aware_txpool/dropped_watcher.rs | 4 +- .../fork_aware_txpool/fork_aware_txpool.rs | 2 +- .../client/transaction-pool/src/graph/pool.rs | 49 ++++++++++++++----- .../transaction-pool/src/graph/rotator.rs | 42 ++++++++++++---- .../src/graph/validated_pool.rs | 31 ++++++++++-- .../src/single_state_txpool/revalidation.rs | 12 ++++- .../single_state_txpool.rs | 12 ++++- .../client/transaction-pool/tests/fatp.rs | 4 +- .../client/transaction-pool/tests/pool.rs | 4 +- 12 files changed, 144 insertions(+), 38 deletions(-) create mode 100644 prdoc/pr_7102.prdoc diff --git a/prdoc/pr_7102.prdoc b/prdoc/pr_7102.prdoc new file mode 100644 index 0000000000000..b1923aafc3db4 --- /dev/null +++ b/prdoc/pr_7102.prdoc @@ -0,0 +1,8 @@ +title: '`fatxpool`: rotator cache size now depends on pool''s limits' +doc: +- audience: Node Dev + description: |- + This PR modifies the hard-coded size of extrinsics cache within `PoolRotator` to be inline with pool limits. It only applies to fork-aware transaction pool. For the legacy (single-state) transaction pool the logic remains untouched. +crates: +- name: sc-transaction-pool + bump: minor diff --git a/substrate/client/transaction-pool/benches/basics.rs b/substrate/client/transaction-pool/benches/basics.rs index 5e40b0fb72d6b..5ba9dd40c1568 100644 --- a/substrate/client/transaction-pool/benches/basics.rs +++ b/substrate/client/transaction-pool/benches/basics.rs @@ -197,14 +197,22 @@ fn benchmark_main(c: &mut Criterion) { c.bench_function("sequential 50 tx", |b| { b.iter(|| { let api = Arc::from(TestApi::new_dependant()); - bench_configured(Pool::new(Default::default(), true.into(), api.clone()), 50, api); + bench_configured( + Pool::new_with_staticly_sized_rotator(Default::default(), true.into(), api.clone()), + 50, + api, + ); }); }); c.bench_function("random 100 tx", |b| { b.iter(|| { let api = Arc::from(TestApi::default()); - bench_configured(Pool::new(Default::default(), true.into(), api.clone()), 100, api); + bench_configured( + Pool::new_with_staticly_sized_rotator(Default::default(), true.into(), api.clone()), + 100, + api, + ); }); }); } diff --git a/substrate/client/transaction-pool/src/common/tests.rs b/substrate/client/transaction-pool/src/common/tests.rs index b00cf5fbfede9..7f2cbe24d8ef6 100644 --- a/substrate/client/transaction-pool/src/common/tests.rs +++ b/substrate/client/transaction-pool/src/common/tests.rs @@ -222,5 +222,5 @@ pub(crate) fn uxt(transfer: Transfer) -> Extrinsic { pub(crate) fn pool() -> (Pool, Arc) { let api = Arc::new(TestApi::default()); - (Pool::new(Default::default(), true.into(), api.clone()), api) + (Pool::new_with_staticly_sized_rotator(Default::default(), true.into(), api.clone()), api) } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs index 7679e3b169d2e..d69aa37c94a1a 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs @@ -329,14 +329,14 @@ where let stream_map = futures::stream::unfold(ctx, |mut ctx| async move { loop { if let Some(dropped) = ctx.get_pending_dropped_transaction() { - debug!("dropped_watcher: sending out (pending): {dropped:?}"); + trace!("dropped_watcher: sending out (pending): {dropped:?}"); return Some((dropped, ctx)); } tokio::select! { biased; Some(event) = next_event(&mut ctx.stream_map) => { if let Some(dropped) = ctx.handle_event(event.0, event.1) { - debug!("dropped_watcher: sending out: {dropped:?}"); + trace!("dropped_watcher: sending out: {dropped:?}"); return Some((dropped, ctx)); } }, diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index 4ec87f1fefa40..e57256943ccfe 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -318,7 +318,7 @@ where pool_api.clone(), listener.clone(), metrics.clone(), - TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER * (options.ready.count + options.future.count), + TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER * options.total_count(), options.ready.total_bytes + options.future.total_bytes, )); diff --git a/substrate/client/transaction-pool/src/graph/pool.rs b/substrate/client/transaction-pool/src/graph/pool.rs index ff9cc1541af4c..4c0ace0b1c73a 100644 --- a/substrate/client/transaction-pool/src/graph/pool.rs +++ b/substrate/client/transaction-pool/src/graph/pool.rs @@ -158,6 +158,13 @@ impl Default for Options { } } +impl Options { + /// Total (ready+future) maximal number of transactions in the pool. + pub fn total_count(&self) -> usize { + self.ready.count + self.future.count + } +} + /// Should we check that the transaction is banned /// in the pool, before we verify it? #[derive(Copy, Clone)] @@ -172,6 +179,21 @@ pub struct Pool { } impl Pool { + /// Create a new transaction pool with statically sized rotator. + pub fn new_with_staticly_sized_rotator( + options: Options, + is_validator: IsValidator, + api: Arc, + ) -> Self { + Self { + validated_pool: Arc::new(ValidatedPool::new_with_staticly_sized_rotator( + options, + is_validator, + api, + )), + } + } + /// Create a new transaction pool. pub fn new(options: Options, is_validator: IsValidator, api: Arc) -> Self { Self { validated_pool: Arc::new(ValidatedPool::new(options, is_validator, api)) } @@ -284,6 +306,7 @@ impl Pool { let mut validated_counter: usize = 0; let mut future_tags = Vec::new(); + let now = Instant::now(); for (extrinsic, in_pool_tags) in all { match in_pool_tags { // reuse the tags for extrinsics that were found in the pool @@ -319,7 +342,7 @@ impl Pool { } } - log::trace!(target: LOG_TARGET,"prune: validated_counter:{validated_counter}"); + log::debug!(target: LOG_TARGET,"prune: validated_counter:{validated_counter}, took:{:?}", now.elapsed()); self.prune_tags(at, future_tags, in_pool_hashes).await } @@ -351,6 +374,7 @@ impl Pool { tags: impl IntoIterator, known_imported_hashes: impl IntoIterator> + Clone, ) { + let now = Instant::now(); log::trace!(target: LOG_TARGET, "Pruning at {:?}", at); // Prune all transactions that provide given tags let prune_status = self.validated_pool.prune_tags(tags); @@ -369,9 +393,8 @@ impl Pool { let reverified_transactions = self.verify(at, pruned_transactions, CheckBannedBeforeVerify::Yes).await; - let pruned_hashes = reverified_transactions.keys().map(Clone::clone).collect(); - - log::trace!(target: LOG_TARGET, "Pruning at {:?}. Resubmitting transactions: {}", &at, reverified_transactions.len()); + let pruned_hashes = reverified_transactions.keys().map(Clone::clone).collect::>(); + log::debug!(target: LOG_TARGET, "Pruning at {:?}. Resubmitting transactions: {}, reverification took: {:?}", &at, reverified_transactions.len(), now.elapsed()); log_xt_trace!(data: tuple, target: LOG_TARGET, &reverified_transactions, "[{:?}] Resubmitting transaction: {:?}"); // And finally - submit reverified transactions back to the pool @@ -580,7 +603,7 @@ mod tests { fn should_reject_unactionable_transactions() { // given let api = Arc::new(TestApi::default()); - let pool = Pool::new( + let pool = Pool::new_with_staticly_sized_rotator( Default::default(), // the node does not author blocks false.into(), @@ -767,7 +790,7 @@ mod tests { let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; let api = Arc::new(TestApi::default()); - let pool = Pool::new(options, true.into(), api.clone()); + let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone()); let hash1 = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, xt.into())).unwrap(); @@ -803,7 +826,7 @@ mod tests { let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; let api = Arc::new(TestApi::default()); - let pool = Pool::new(options, true.into(), api.clone()); + let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone()); // when block_on( @@ -1036,7 +1059,7 @@ mod tests { Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; let api = Arc::new(TestApi::default()); - let pool = Pool::new(options, true.into(), api.clone()); + let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone()); let xt = uxt(Transfer { from: Alice.into(), @@ -1074,7 +1097,7 @@ mod tests { Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; let api = Arc::new(TestApi::default()); - let pool = Pool::new(options, true.into(), api.clone()); + let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone()); // after validation `IncludeData` will have priority set to 9001 // (validate_transaction mock) @@ -1106,7 +1129,7 @@ mod tests { Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; let api = Arc::new(TestApi::default()); - let pool = Pool::new(options, true.into(), api.clone()); + let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone()); let han_of_block0 = api.expect_hash_and_number(0); @@ -1151,7 +1174,11 @@ mod tests { let mut api = TestApi::default(); api.delay = Arc::new(Mutex::new(rx.into())); let api = Arc::new(api); - let pool = Arc::new(Pool::new(Default::default(), true.into(), api.clone())); + let pool = Arc::new(Pool::new_with_staticly_sized_rotator( + Default::default(), + true.into(), + api.clone(), + )); let han_of_block0 = api.expect_hash_and_number(0); diff --git a/substrate/client/transaction-pool/src/graph/rotator.rs b/substrate/client/transaction-pool/src/graph/rotator.rs index 9a2e269b5eede..80d8f24144c8a 100644 --- a/substrate/client/transaction-pool/src/graph/rotator.rs +++ b/substrate/client/transaction-pool/src/graph/rotator.rs @@ -31,7 +31,10 @@ use std::{ use super::base_pool::Transaction; /// Expected size of the banned extrinsics cache. -const EXPECTED_SIZE: usize = 2048; +const DEFAULT_EXPECTED_SIZE: usize = 2048; + +/// The default duration, in seconds, for which an extrinsic is banned. +const DEFAULT_BAN_TIME_SECS: u64 = 30 * 60; /// Pool rotator is responsible to only keep fresh extrinsics in the pool. /// @@ -42,18 +45,39 @@ pub struct PoolRotator { ban_time: Duration, /// Currently banned extrinsics. banned_until: RwLock>, + /// Expected size of the banned extrinsics cache. + expected_size: usize, +} + +impl Clone for PoolRotator { + fn clone(&self) -> Self { + Self { + ban_time: self.ban_time, + banned_until: RwLock::new(self.banned_until.read().clone()), + expected_size: self.expected_size, + } + } } impl Default for PoolRotator { fn default() -> Self { - Self { ban_time: Duration::from_secs(60 * 30), banned_until: Default::default() } + Self { + ban_time: Duration::from_secs(DEFAULT_BAN_TIME_SECS), + banned_until: Default::default(), + expected_size: DEFAULT_EXPECTED_SIZE, + } } } impl PoolRotator { /// New rotator instance with specified ban time. pub fn new(ban_time: Duration) -> Self { - Self { ban_time, banned_until: Default::default() } + Self { ban_time, ..Self::default() } + } + + /// New rotator instance with specified ban time and expected cache size. + pub fn new_with_expected_size(ban_time: Duration, expected_size: usize) -> Self { + Self { expected_size, ..Self::new(ban_time) } } /// Returns `true` if extrinsic hash is currently banned. @@ -69,8 +93,8 @@ impl PoolRotator { banned.insert(hash, *now + self.ban_time); } - if banned.len() > 2 * EXPECTED_SIZE { - while banned.len() > EXPECTED_SIZE { + if banned.len() > 2 * self.expected_size { + while banned.len() > self.expected_size { if let Some(key) = banned.keys().next().cloned() { banned.remove(&key); } @@ -201,16 +225,16 @@ mod tests { let past_block = 0; // when - for i in 0..2 * EXPECTED_SIZE { + for i in 0..2 * DEFAULT_EXPECTED_SIZE { let tx = tx_with(i as u64, past_block); assert!(rotator.ban_if_stale(&now, past_block, &tx)); } - assert_eq!(rotator.banned_until.read().len(), 2 * EXPECTED_SIZE); + assert_eq!(rotator.banned_until.read().len(), 2 * DEFAULT_EXPECTED_SIZE); // then - let tx = tx_with(2 * EXPECTED_SIZE as u64, past_block); + let tx = tx_with(2 * DEFAULT_EXPECTED_SIZE as u64, past_block); // trigger a garbage collection assert!(rotator.ban_if_stale(&now, past_block, &tx)); - assert_eq!(rotator.banned_until.read().len(), EXPECTED_SIZE); + assert_eq!(rotator.banned_until.read().len(), DEFAULT_EXPECTED_SIZE); } } diff --git a/substrate/client/transaction-pool/src/graph/validated_pool.rs b/substrate/client/transaction-pool/src/graph/validated_pool.rs index 14df63d9673e3..3f7bf4773de7b 100644 --- a/substrate/client/transaction-pool/src/graph/validated_pool.rs +++ b/substrate/client/transaction-pool/src/graph/validated_pool.rs @@ -121,16 +121,41 @@ impl Clone for ValidatedPool { listener: Default::default(), pool: RwLock::from(self.pool.read().clone()), import_notification_sinks: Default::default(), - rotator: PoolRotator::default(), + rotator: self.rotator.clone(), } } } impl ValidatedPool { + /// Create a new transaction pool with statically sized rotator. + pub fn new_with_staticly_sized_rotator( + options: Options, + is_validator: IsValidator, + api: Arc, + ) -> Self { + let ban_time = options.ban_time; + Self::new_with_rotator(options, is_validator, api, PoolRotator::new(ban_time)) + } + /// Create a new transaction pool. pub fn new(options: Options, is_validator: IsValidator, api: Arc) -> Self { - let base_pool = base::BasePool::new(options.reject_future_transactions); let ban_time = options.ban_time; + let total_count = options.total_count(); + Self::new_with_rotator( + options, + is_validator, + api, + PoolRotator::new_with_expected_size(ban_time, total_count), + ) + } + + fn new_with_rotator( + options: Options, + is_validator: IsValidator, + api: Arc, + rotator: PoolRotator>, + ) -> Self { + let base_pool = base::BasePool::new(options.reject_future_transactions); Self { is_validator, options, @@ -138,7 +163,7 @@ impl ValidatedPool { api, pool: RwLock::new(base_pool), import_notification_sinks: Default::default(), - rotator: PoolRotator::new(ban_time), + rotator, } } diff --git a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs index f22fa2ddabdee..caa09585b28bf 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs @@ -384,7 +384,11 @@ mod tests { #[test] fn revalidation_queue_works() { let api = Arc::new(TestApi::default()); - let pool = Arc::new(Pool::new(Default::default(), true.into(), api.clone())); + let pool = Arc::new(Pool::new_with_staticly_sized_rotator( + Default::default(), + true.into(), + api.clone(), + )); let queue = Arc::new(RevalidationQueue::new(api.clone(), pool.clone())); let uxt = uxt(Transfer { @@ -414,7 +418,11 @@ mod tests { #[test] fn revalidation_queue_skips_revalidation_for_unknown_block_hash() { let api = Arc::new(TestApi::default()); - let pool = Arc::new(Pool::new(Default::default(), true.into(), api.clone())); + let pool = Arc::new(Pool::new_with_staticly_sized_rotator( + Default::default(), + true.into(), + api.clone(), + )); let queue = Arc::new(RevalidationQueue::new(api.clone(), pool.clone())); let uxt0 = uxt(Transfer { diff --git a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs index e7504012ca67b..2b32704945c75 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs @@ -141,7 +141,11 @@ where finalized_hash: Block::Hash, options: graph::Options, ) -> (Self, Pin + Send>>) { - let pool = Arc::new(graph::Pool::new(options, true.into(), pool_api.clone())); + let pool = Arc::new(graph::Pool::new_with_staticly_sized_rotator( + options, + true.into(), + pool_api.clone(), + )); let (revalidation_queue, background_task) = revalidation::RevalidationQueue::new_background( pool_api.clone(), pool.clone(), @@ -177,7 +181,11 @@ where best_block_hash: Block::Hash, finalized_hash: Block::Hash, ) -> Self { - let pool = Arc::new(graph::Pool::new(options, is_validator, pool_api.clone())); + let pool = Arc::new(graph::Pool::new_with_staticly_sized_rotator( + options, + is_validator, + pool_api.clone(), + )); let (revalidation_queue, background_task) = match revalidation_type { RevalidationType::Light => (revalidation::RevalidationQueue::new(pool_api.clone(), pool.clone()), None), diff --git a/substrate/client/transaction-pool/tests/fatp.rs b/substrate/client/transaction-pool/tests/fatp.rs index 8bf08122995c1..dd82c52a6047b 100644 --- a/substrate/client/transaction-pool/tests/fatp.rs +++ b/substrate/client/transaction-pool/tests/fatp.rs @@ -2199,7 +2199,7 @@ fn import_sink_works3() { pool.submit_one(genesis, SOURCE, xt1.clone()), ]; - let x = block_on(futures::future::join_all(submissions)); + block_on(futures::future::join_all(submissions)); let header01a = api.push_block(1, vec![], true); let header01b = api.push_block(1, vec![], true); @@ -2213,8 +2213,6 @@ fn import_sink_works3() { assert_pool_status!(header01a.hash(), &pool, 1, 1); assert_pool_status!(header01b.hash(), &pool, 1, 1); - log::debug!("xxx {x:#?}"); - let import_events = futures::executor::block_on_stream(import_stream).take(1).collect::>(); diff --git a/substrate/client/transaction-pool/tests/pool.rs b/substrate/client/transaction-pool/tests/pool.rs index 20997606c607c..de35726435f0f 100644 --- a/substrate/client/transaction-pool/tests/pool.rs +++ b/substrate/client/transaction-pool/tests/pool.rs @@ -49,7 +49,7 @@ const LOG_TARGET: &str = "txpool"; fn pool() -> (Pool, Arc) { let api = Arc::new(TestApi::with_alice_nonce(209)); - (Pool::new(Default::default(), true.into(), api.clone()), api) + (Pool::new_with_staticly_sized_rotator(Default::default(), true.into(), api.clone()), api) } fn maintained_pool() -> (BasicPool, Arc, futures::executor::ThreadPool) { @@ -224,7 +224,7 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { api.set_valid_modifier(Box::new(|v: &mut ValidTransaction| { v.provides.push(vec![155]); })); - let pool = Pool::new(Default::default(), true.into(), api.clone()); + let pool = Pool::new_with_staticly_sized_rotator(Default::default(), true.into(), api.clone()); let xt0 = Arc::from(uxt(Alice, 209)); block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, xt0.clone())) .expect("1. Imported"); From cccefdd965c39498825f34e105979c447b315359 Mon Sep 17 00:00:00 2001 From: "polka.dom" Date: Mon, 13 Jan 2025 16:22:32 -0500 Subject: [PATCH 3/8] Remove usage of the pallet::getter macro from pallet-grandpa (#4529) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As per #3326, removes pallet::getter macro usage from pallet-grandpa. The syntax `StorageItem::::get()` should be used instead. cc @muraca --------- Co-authored-by: Bastian Köcher --- polkadot/runtime/rococo/src/lib.rs | 2 +- polkadot/runtime/test-runtime/src/lib.rs | 2 +- polkadot/runtime/westend/src/lib.rs | 2 +- prdoc/pr_4529.prdoc | 22 ++++ substrate/bin/node/runtime/src/lib.rs | 2 +- substrate/frame/grandpa/src/benchmarking.rs | 4 +- substrate/frame/grandpa/src/equivocation.rs | 2 +- substrate/frame/grandpa/src/lib.rs | 106 +++++++++++++------- substrate/frame/grandpa/src/tests.rs | 89 ++++++++-------- 9 files changed, 144 insertions(+), 87 deletions(-) create mode 100644 prdoc/pr_4529.prdoc diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index cab4394eb5a8d..e5d703700fee5 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -2276,7 +2276,7 @@ sp_api::impl_runtime_apis! { } fn current_set_id() -> fg_primitives::SetId { - Grandpa::current_set_id() + pallet_grandpa::CurrentSetId::::get() } fn submit_report_equivocation_unsigned_extrinsic( diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 82564d5c278ca..4f9ba8d8508cd 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -1186,7 +1186,7 @@ sp_api::impl_runtime_apis! { } fn current_set_id() -> fg_primitives::SetId { - Grandpa::current_set_id() + pallet_grandpa::CurrentSetId::::get() } fn submit_report_equivocation_unsigned_extrinsic( diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 166f3fc42eefe..9d77a5e5eea1f 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -2300,7 +2300,7 @@ sp_api::impl_runtime_apis! { } fn current_set_id() -> fg_primitives::SetId { - Grandpa::current_set_id() + pallet_grandpa::CurrentSetId::::get() } fn submit_report_equivocation_unsigned_extrinsic( diff --git a/prdoc/pr_4529.prdoc b/prdoc/pr_4529.prdoc new file mode 100644 index 0000000000000..32beea17ad6b6 --- /dev/null +++ b/prdoc/pr_4529.prdoc @@ -0,0 +1,22 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from pallet-grandpa + +doc: + - audience: Runtime Dev + description: | + This PR removed the `pallet::getter`s from `pallet-grandpa`. + The syntax `StorageItem::::get()` should be used instead + +crates: + - name: pallet-grandpa + bump: minor + - name: kitchensink-runtime + bump: none + - name: westend-runtime + bump: none + - name: polkadot-test-runtime + bump: none + - name: rococo-runtime + bump: none diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 7de04b27ff830..e11a009c1c3f5 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -2979,7 +2979,7 @@ impl_runtime_apis! { } fn current_set_id() -> sp_consensus_grandpa::SetId { - Grandpa::current_set_id() + pallet_grandpa::CurrentSetId::::get() } fn submit_report_equivocation_unsigned_extrinsic( diff --git a/substrate/frame/grandpa/src/benchmarking.rs b/substrate/frame/grandpa/src/benchmarking.rs index 0a10e58827761..56048efa22cae 100644 --- a/substrate/frame/grandpa/src/benchmarking.rs +++ b/substrate/frame/grandpa/src/benchmarking.rs @@ -17,7 +17,7 @@ //! Benchmarks for the GRANDPA pallet. -use super::{Pallet as Grandpa, *}; +use super::*; use frame_benchmarking::v2::*; use frame_system::RawOrigin; use sp_core::H256; @@ -69,7 +69,7 @@ mod benchmarks { #[extrinsic_call] _(RawOrigin::Root, delay, best_finalized_block_number); - assert!(Grandpa::::stalled().is_some()); + assert!(Stalled::::get().is_some()); } impl_benchmark_test_suite!( diff --git a/substrate/frame/grandpa/src/equivocation.rs b/substrate/frame/grandpa/src/equivocation.rs index 2366c957e9ab1..4ebdbc1eecd30 100644 --- a/substrate/frame/grandpa/src/equivocation.rs +++ b/substrate/frame/grandpa/src/equivocation.rs @@ -177,7 +177,7 @@ where evidence: (EquivocationProof>, T::KeyOwnerProof), ) -> Result<(), DispatchError> { let (equivocation_proof, key_owner_proof) = evidence; - let reporter = reporter.or_else(|| >::author()); + let reporter = reporter.or_else(|| pallet_authorship::Pallet::::author()); let offender = equivocation_proof.offender().clone(); // We check the equivocation within the context of its set id (and diff --git a/substrate/frame/grandpa/src/lib.rs b/substrate/frame/grandpa/src/lib.rs index 4f69aeaef5236..9017eec2ca8f8 100644 --- a/substrate/frame/grandpa/src/lib.rs +++ b/substrate/frame/grandpa/src/lib.rs @@ -127,7 +127,7 @@ pub mod pallet { impl Hooks> for Pallet { fn on_finalize(block_number: BlockNumberFor) { // check for scheduled pending authority set changes - if let Some(pending_change) = >::get() { + if let Some(pending_change) = PendingChange::::get() { // emit signal if we're at the block that scheduled the change if block_number == pending_change.scheduled_at { let next_authorities = pending_change.next_authorities.to_vec(); @@ -150,12 +150,12 @@ pub mod pallet { Self::deposit_event(Event::NewAuthorities { authority_set: pending_change.next_authorities.into_inner(), }); - >::kill(); + PendingChange::::kill(); } } // check for scheduled pending state changes - match >::get() { + match State::::get() { StoredState::PendingPause { scheduled_at, delay } => { // signal change to pause if block_number == scheduled_at { @@ -164,7 +164,7 @@ pub mod pallet { // enact change to paused state if block_number == scheduled_at + delay { - >::put(StoredState::Paused); + State::::put(StoredState::Paused); Self::deposit_event(Event::Paused); } }, @@ -176,7 +176,7 @@ pub mod pallet { // enact change to live state if block_number == scheduled_at + delay { - >::put(StoredState::Live); + State::::put(StoredState::Live); Self::deposit_event(Event::Resumed); } }, @@ -297,37 +297,32 @@ pub mod pallet { } #[pallet::type_value] - pub(super) fn DefaultForState() -> StoredState> { + pub fn DefaultForState() -> StoredState> { StoredState::Live } /// State of the current authority set. #[pallet::storage] - #[pallet::getter(fn state)] - pub(super) type State = + pub type State = StorageValue<_, StoredState>, ValueQuery, DefaultForState>; /// Pending change: (signaled at, scheduled change). #[pallet::storage] - #[pallet::getter(fn pending_change)] - pub(super) type PendingChange = + pub type PendingChange = StorageValue<_, StoredPendingChange, T::MaxAuthorities>>; /// next block number where we can force a change. #[pallet::storage] - #[pallet::getter(fn next_forced)] - pub(super) type NextForced = StorageValue<_, BlockNumberFor>; + pub type NextForced = StorageValue<_, BlockNumberFor>; /// `true` if we are currently stalled. #[pallet::storage] - #[pallet::getter(fn stalled)] - pub(super) type Stalled = StorageValue<_, (BlockNumberFor, BlockNumberFor)>; + pub type Stalled = StorageValue<_, (BlockNumberFor, BlockNumberFor)>; /// The number of changes (both in terms of keys and underlying economic responsibilities) /// in the "set" of Grandpa validators from genesis. #[pallet::storage] - #[pallet::getter(fn current_set_id)] - pub(super) type CurrentSetId = StorageValue<_, SetId, ValueQuery>; + pub type CurrentSetId = StorageValue<_, SetId, ValueQuery>; /// A mapping from grandpa set ID to the index of the *most recent* session for which its /// members were responsible. @@ -340,12 +335,11 @@ pub mod pallet { /// /// TWOX-NOTE: `SetId` is not under user control. #[pallet::storage] - #[pallet::getter(fn session_for_set)] - pub(super) type SetIdSession = StorageMap<_, Twox64Concat, SetId, SessionIndex>; + pub type SetIdSession = StorageMap<_, Twox64Concat, SetId, SessionIndex>; /// The current list of authorities. #[pallet::storage] - pub(crate) type Authorities = + pub type Authorities = StorageValue<_, BoundedAuthorityList, ValueQuery>; #[derive(frame_support::DefaultNoBound)] @@ -432,6 +426,44 @@ pub enum StoredState { } impl Pallet { + /// State of the current authority set. + pub fn state() -> StoredState> { + State::::get() + } + + /// Pending change: (signaled at, scheduled change). + pub fn pending_change() -> Option, T::MaxAuthorities>> { + PendingChange::::get() + } + + /// next block number where we can force a change. + pub fn next_forced() -> Option> { + NextForced::::get() + } + + /// `true` if we are currently stalled. + pub fn stalled() -> Option<(BlockNumberFor, BlockNumberFor)> { + Stalled::::get() + } + + /// The number of changes (both in terms of keys and underlying economic responsibilities) + /// in the "set" of Grandpa validators from genesis. + pub fn current_set_id() -> SetId { + CurrentSetId::::get() + } + + /// A mapping from grandpa set ID to the index of the *most recent* session for which its + /// members were responsible. + /// + /// This is only used for validating equivocation proofs. An equivocation proof must + /// contains a key-ownership proof for a given session, therefore we need a way to tie + /// together sessions and GRANDPA set ids, i.e. we need to validate that a validator + /// was the owner of a given key on a given session, and what the active set ID was + /// during that session. + pub fn session_for_set(set_id: SetId) -> Option { + SetIdSession::::get(set_id) + } + /// Get the current set of authorities, along with their respective weights. pub fn grandpa_authorities() -> AuthorityList { Authorities::::get().into_inner() @@ -440,9 +472,9 @@ impl Pallet { /// Schedule GRANDPA to pause starting in the given number of blocks. /// Cannot be done when already paused. pub fn schedule_pause(in_blocks: BlockNumberFor) -> DispatchResult { - if let StoredState::Live = >::get() { - let scheduled_at = >::block_number(); - >::put(StoredState::PendingPause { delay: in_blocks, scheduled_at }); + if let StoredState::Live = State::::get() { + let scheduled_at = frame_system::Pallet::::block_number(); + State::::put(StoredState::PendingPause { delay: in_blocks, scheduled_at }); Ok(()) } else { @@ -452,9 +484,9 @@ impl Pallet { /// Schedule a resume of GRANDPA after pausing. pub fn schedule_resume(in_blocks: BlockNumberFor) -> DispatchResult { - if let StoredState::Paused = >::get() { - let scheduled_at = >::block_number(); - >::put(StoredState::PendingResume { delay: in_blocks, scheduled_at }); + if let StoredState::Paused = State::::get() { + let scheduled_at = frame_system::Pallet::::block_number(); + State::::put(StoredState::PendingResume { delay: in_blocks, scheduled_at }); Ok(()) } else { @@ -481,17 +513,17 @@ impl Pallet { in_blocks: BlockNumberFor, forced: Option>, ) -> DispatchResult { - if !>::exists() { - let scheduled_at = >::block_number(); + if !PendingChange::::exists() { + let scheduled_at = frame_system::Pallet::::block_number(); if forced.is_some() { - if Self::next_forced().map_or(false, |next| next > scheduled_at) { + if NextForced::::get().map_or(false, |next| next > scheduled_at) { return Err(Error::::TooSoon.into()) } // only allow the next forced change when twice the window has passed since // this one. - >::put(scheduled_at + in_blocks * 2u32.into()); + NextForced::::put(scheduled_at + in_blocks * 2u32.into()); } let next_authorities = WeakBoundedVec::<_, T::MaxAuthorities>::force_from( @@ -502,7 +534,7 @@ impl Pallet { ), ); - >::put(StoredPendingChange { + PendingChange::::put(StoredPendingChange { delay: in_blocks, scheduled_at, next_authorities, @@ -518,7 +550,7 @@ impl Pallet { /// Deposit one of this module's logs. fn deposit_log(log: ConsensusLog>) { let log = DigestItem::Consensus(GRANDPA_ENGINE_ID, log.encode()); - >::deposit_log(log); + frame_system::Pallet::::deposit_log(log); } // Perform module initialization, abstracted so that it can be called either through genesis @@ -554,7 +586,7 @@ impl Pallet { // when we record old authority sets we could try to figure out _who_ // failed. until then, we can't meaningfully guard against // `next == last` the way that normal session changes do. - >::put((further_wait, median)); + Stalled::::put((further_wait, median)); } } @@ -583,10 +615,10 @@ where // Always issue a change if `session` says that the validators have changed. // Even if their session keys are the same as before, the underlying economic // identities have changed. - let current_set_id = if changed || >::exists() { + let current_set_id = if changed || Stalled::::exists() { let next_authorities = validators.map(|(_, k)| (k, 1)).collect::>(); - let res = if let Some((further_wait, median)) = >::take() { + let res = if let Some((further_wait, median)) = Stalled::::take() { Self::schedule_change(next_authorities, further_wait, Some(median)) } else { Self::schedule_change(next_authorities, Zero::zero(), None) @@ -608,17 +640,17 @@ where // either the session module signalled that the validators have changed // or the set was stalled. but since we didn't successfully schedule // an authority set change we do not increment the set id. - Self::current_set_id() + CurrentSetId::::get() } } else { // nothing's changed, neither economic conditions nor session keys. update the pointer // of the current set. - Self::current_set_id() + CurrentSetId::::get() }; // update the mapping to note that the current set corresponds to the // latest equivalent session (i.e. now). - let session_index = >::current_index(); + let session_index = pallet_session::Pallet::::current_index(); SetIdSession::::insert(current_set_id, &session_index); } diff --git a/substrate/frame/grandpa/src/tests.rs b/substrate/frame/grandpa/src/tests.rs index 383f77f00de71..f4720966b1797 100644 --- a/substrate/frame/grandpa/src/tests.rs +++ b/substrate/frame/grandpa/src/tests.rs @@ -110,7 +110,7 @@ fn cannot_schedule_change_when_one_pending() { new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { initialize_block(1, Default::default()); Grandpa::schedule_change(to_authorities(vec![(4, 1), (5, 1), (6, 1)]), 1, None).unwrap(); - assert!(>::exists()); + assert!(PendingChange::::exists()); assert_noop!( Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None), Error::::ChangePending @@ -120,7 +120,7 @@ fn cannot_schedule_change_when_one_pending() { let header = System::finalize(); initialize_block(2, header.hash()); - assert!(>::exists()); + assert!(PendingChange::::exists()); assert_noop!( Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None), Error::::ChangePending @@ -130,7 +130,7 @@ fn cannot_schedule_change_when_one_pending() { let header = System::finalize(); initialize_block(3, header.hash()); - assert!(!>::exists()); + assert!(!PendingChange::::exists()); assert_ok!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None)); Grandpa::on_finalize(3); @@ -144,7 +144,7 @@ fn dispatch_forced_change() { initialize_block(1, Default::default()); Grandpa::schedule_change(to_authorities(vec![(4, 1), (5, 1), (6, 1)]), 5, Some(0)).unwrap(); - assert!(>::exists()); + assert!(PendingChange::::exists()); assert_noop!( Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, Some(0)), Error::::ChangePending @@ -155,8 +155,8 @@ fn dispatch_forced_change() { for i in 2..7 { initialize_block(i, header.hash()); - assert!(>::get().unwrap().forced.is_some()); - assert_eq!(Grandpa::next_forced(), Some(11)); + assert!(PendingChange::::get().unwrap().forced.is_some()); + assert_eq!(NextForced::::get(), Some(11)); assert_noop!( Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None), Error::::ChangePending @@ -174,7 +174,7 @@ fn dispatch_forced_change() { // add a normal change. { initialize_block(7, header.hash()); - assert!(!>::exists()); + assert!(!PendingChange::::exists()); assert_eq!( Grandpa::grandpa_authorities(), to_authorities(vec![(4, 1), (5, 1), (6, 1)]) @@ -187,7 +187,7 @@ fn dispatch_forced_change() { // run the normal change. { initialize_block(8, header.hash()); - assert!(>::exists()); + assert!(PendingChange::::exists()); assert_eq!( Grandpa::grandpa_authorities(), to_authorities(vec![(4, 1), (5, 1), (6, 1)]) @@ -204,9 +204,9 @@ fn dispatch_forced_change() { // time. for i in 9..11 { initialize_block(i, header.hash()); - assert!(!>::exists()); + assert!(!PendingChange::::exists()); assert_eq!(Grandpa::grandpa_authorities(), to_authorities(vec![(5, 1)])); - assert_eq!(Grandpa::next_forced(), Some(11)); + assert_eq!(NextForced::::get(), Some(11)); assert_noop!( Grandpa::schedule_change(to_authorities(vec![(5, 1), (6, 1)]), 5, Some(0)), Error::::TooSoon @@ -217,13 +217,13 @@ fn dispatch_forced_change() { { initialize_block(11, header.hash()); - assert!(!>::exists()); + assert!(!PendingChange::::exists()); assert_ok!(Grandpa::schedule_change( to_authorities(vec![(5, 1), (6, 1), (7, 1)]), 5, Some(0) )); - assert_eq!(Grandpa::next_forced(), Some(21)); + assert_eq!(NextForced::::get(), Some(21)); Grandpa::on_finalize(11); header = System::finalize(); } @@ -239,7 +239,10 @@ fn schedule_pause_only_when_live() { Grandpa::schedule_pause(1).unwrap(); // we've switched to the pending pause state - assert_eq!(Grandpa::state(), StoredState::PendingPause { scheduled_at: 1u64, delay: 1 }); + assert_eq!( + State::::get(), + StoredState::PendingPause { scheduled_at: 1u64, delay: 1 } + ); Grandpa::on_finalize(1); let _ = System::finalize(); @@ -253,7 +256,7 @@ fn schedule_pause_only_when_live() { let _ = System::finalize(); // after finalizing block 2 the set should have switched to paused state - assert_eq!(Grandpa::state(), StoredState::Paused); + assert_eq!(State::::get(), StoredState::Paused); }); } @@ -265,14 +268,14 @@ fn schedule_resume_only_when_paused() { // the set is currently live, resuming it is an error assert_noop!(Grandpa::schedule_resume(1), Error::::ResumeFailed); - assert_eq!(Grandpa::state(), StoredState::Live); + assert_eq!(State::::get(), StoredState::Live); // we schedule a pause to be applied instantly Grandpa::schedule_pause(0).unwrap(); Grandpa::on_finalize(1); let _ = System::finalize(); - assert_eq!(Grandpa::state(), StoredState::Paused); + assert_eq!(State::::get(), StoredState::Paused); // we schedule the set to go back live in 2 blocks initialize_block(2, Default::default()); @@ -289,7 +292,7 @@ fn schedule_resume_only_when_paused() { let _ = System::finalize(); // it should be live at block 4 - assert_eq!(Grandpa::state(), StoredState::Live); + assert_eq!(State::::get(), StoredState::Live); }); } @@ -342,7 +345,7 @@ fn report_equivocation_current_set_works() { let equivocation_key = &authorities[equivocation_authority_index].0; let equivocation_keyring = extract_keyring(equivocation_key); - let set_id = Grandpa::current_set_id(); + let set_id = CurrentSetId::::get(); // generate an equivocation proof, with two votes in the same round for // different block hashes signed by the same key @@ -424,7 +427,7 @@ fn report_equivocation_old_set_works() { let equivocation_keyring = extract_keyring(equivocation_key); - let set_id = Grandpa::current_set_id(); + let set_id = CurrentSetId::::get(); // generate an equivocation proof for the old set, let equivocation_proof = generate_equivocation_proof( @@ -487,7 +490,7 @@ fn report_equivocation_invalid_set_id() { let key_owner_proof = Historical::prove((sp_consensus_grandpa::KEY_TYPE, &equivocation_key)).unwrap(); - let set_id = Grandpa::current_set_id(); + let set_id = CurrentSetId::::get(); // generate an equivocation for a future set let equivocation_proof = generate_equivocation_proof( @@ -527,7 +530,7 @@ fn report_equivocation_invalid_session() { start_era(2); - let set_id = Grandpa::current_set_id(); + let set_id = CurrentSetId::::get(); // generate an equivocation proof at set id = 2 let equivocation_proof = generate_equivocation_proof( @@ -568,7 +571,7 @@ fn report_equivocation_invalid_key_owner_proof() { let equivocation_key = &authorities[equivocation_authority_index].0; let equivocation_keyring = extract_keyring(equivocation_key); - let set_id = Grandpa::current_set_id(); + let set_id = CurrentSetId::::get(); // generate an equivocation proof for the authority at index 0 let equivocation_proof = generate_equivocation_proof( @@ -611,7 +614,7 @@ fn report_equivocation_invalid_equivocation_proof() { let key_owner_proof = Historical::prove((sp_consensus_grandpa::KEY_TYPE, &equivocation_key)).unwrap(); - let set_id = Grandpa::current_set_id(); + let set_id = CurrentSetId::::get(); let assert_invalid_equivocation_proof = |equivocation_proof| { assert_err!( @@ -675,7 +678,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { let equivocation_authority_index = 0; let equivocation_key = &authorities[equivocation_authority_index].0; let equivocation_keyring = extract_keyring(equivocation_key); - let set_id = Grandpa::current_set_id(); + let set_id = CurrentSetId::::get(); let equivocation_proof = generate_equivocation_proof( set_id, @@ -748,12 +751,12 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { #[test] fn on_new_session_doesnt_start_new_set_if_schedule_change_failed() { new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { - assert_eq!(Grandpa::current_set_id(), 0); + assert_eq!(CurrentSetId::::get(), 0); // starting a new era should lead to a change in the session // validators and trigger a new set start_era(1); - assert_eq!(Grandpa::current_set_id(), 1); + assert_eq!(CurrentSetId::::get(), 1); // we schedule a change delayed by 2 blocks, this should make it so that // when we try to rotate the session at the beginning of the era we will @@ -761,22 +764,22 @@ fn on_new_session_doesnt_start_new_set_if_schedule_change_failed() { // not increment the set id. Grandpa::schedule_change(to_authorities(vec![(1, 1)]), 2, None).unwrap(); start_era(2); - assert_eq!(Grandpa::current_set_id(), 1); + assert_eq!(CurrentSetId::::get(), 1); // everything should go back to normal after. start_era(3); - assert_eq!(Grandpa::current_set_id(), 2); + assert_eq!(CurrentSetId::::get(), 2); // session rotation might also fail to schedule a change if it's for a // forced change (i.e. grandpa is stalled) and it is too soon. - >::put(1000); - >::put((30, 1)); + NextForced::::put(1000); + Stalled::::put((30, 1)); // NOTE: we cannot go through normal era rotation since having `Stalled` // defined will also trigger a new set (regardless of whether the // session validators changed) Grandpa::on_new_session(true, std::iter::empty(), std::iter::empty()); - assert_eq!(Grandpa::current_set_id(), 2); + assert_eq!(CurrentSetId::::get(), 2); }); } @@ -790,19 +793,19 @@ fn cleans_up_old_set_id_session_mappings() { // we should have a session id mapping for all the set ids from // `max_set_id_session_entries` eras we have observed for i in 1..=max_set_id_session_entries { - assert!(Grandpa::session_for_set(i as u64).is_some()); + assert!(SetIdSession::::get(i as u64).is_some()); } start_era(max_set_id_session_entries * 2); // we should keep tracking the new mappings for new eras for i in max_set_id_session_entries + 1..=max_set_id_session_entries * 2 { - assert!(Grandpa::session_for_set(i as u64).is_some()); + assert!(SetIdSession::::get(i as u64).is_some()); } // but the old ones should have been pruned by now for i in 1..=max_set_id_session_entries { - assert!(Grandpa::session_for_set(i as u64).is_none()); + assert!(SetIdSession::::get(i as u64).is_none()); } }); } @@ -812,24 +815,24 @@ fn always_schedules_a_change_on_new_session_when_stalled() { new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { start_era(1); - assert!(Grandpa::pending_change().is_none()); - assert_eq!(Grandpa::current_set_id(), 1); + assert!(PendingChange::::get().is_none()); + assert_eq!(CurrentSetId::::get(), 1); // if the session handler reports no change then we should not schedule // any pending change Grandpa::on_new_session(false, std::iter::empty(), std::iter::empty()); - assert!(Grandpa::pending_change().is_none()); - assert_eq!(Grandpa::current_set_id(), 1); + assert!(PendingChange::::get().is_none()); + assert_eq!(CurrentSetId::::get(), 1); // if grandpa is stalled then we should **always** schedule a forced // change on a new session - >::put((10, 1)); + Stalled::::put((10, 1)); Grandpa::on_new_session(false, std::iter::empty(), std::iter::empty()); - assert!(Grandpa::pending_change().is_some()); - assert!(Grandpa::pending_change().unwrap().forced.is_some()); - assert_eq!(Grandpa::current_set_id(), 2); + assert!(PendingChange::::get().is_some()); + assert!(PendingChange::::get().unwrap().forced.is_some()); + assert_eq!(CurrentSetId::::get(), 2); }); } @@ -861,7 +864,7 @@ fn valid_equivocation_reports_dont_pay_fees() { let equivocation_key = &Grandpa::grandpa_authorities()[0].0; let equivocation_keyring = extract_keyring(equivocation_key); - let set_id = Grandpa::current_set_id(); + let set_id = CurrentSetId::::get(); // generate an equivocation proof. let equivocation_proof = generate_equivocation_proof( From ddffa027d7b78af330a2d3d18b7dfdbd00e431f0 Mon Sep 17 00:00:00 2001 From: Alin Dima Date: Tue, 14 Jan 2025 10:40:50 +0200 Subject: [PATCH 4/8] forbid v1 descriptors with UMP signals (#7127) --- .../node/core/candidate-validation/src/lib.rs | 15 ++-- .../core/candidate-validation/src/tests.rs | 71 +++++++++++++++++-- polkadot/primitives/src/vstaging/mod.rs | 30 ++++++-- prdoc/pr_7127.prdoc | 9 +++ 4 files changed, 104 insertions(+), 21 deletions(-) create mode 100644 prdoc/pr_7127.prdoc diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 25614349486ea..2a4643031bf87 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -912,15 +912,10 @@ async fn validate_candidate_exhaustive( // invalid. Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch)) } else { - let core_index = candidate_receipt.descriptor.core_index(); - - match (core_index, exec_kind) { + match exec_kind { // Core selectors are optional for V2 descriptors, but we still check the // descriptor core index. - ( - Some(_core_index), - PvfExecKind::Backing(_) | PvfExecKind::BackingSystemParas(_), - ) => { + PvfExecKind::Backing(_) | PvfExecKind::BackingSystemParas(_) => { let Some(claim_queue) = maybe_claim_queue else { let error = "cannot fetch the claim queue from the runtime"; gum::warn!( @@ -937,9 +932,9 @@ async fn validate_candidate_exhaustive( { gum::warn!( target: LOG_TARGET, - ?err, candidate_hash = ?candidate_receipt.hash(), - "Candidate core index is invalid", + "Candidate core index is invalid: {}", + err ); return Ok(ValidationResult::Invalid( InvalidCandidate::InvalidCoreIndex, @@ -947,7 +942,7 @@ async fn validate_candidate_exhaustive( } }, // No checks for approvals and disputes - (_, _) => {}, + _ => {}, } Ok(ValidationResult::Valid( diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index 98e34a1cb4c13..795d7c93f8a70 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -30,8 +30,8 @@ use polkadot_node_subsystem_util::reexports::SubsystemContext; use polkadot_overseer::ActivatedLeaf; use polkadot_primitives::{ vstaging::{ - CandidateDescriptorV2, ClaimQueueOffset, CoreSelector, MutateDescriptorV2, UMPSignal, - UMP_SEPARATOR, + CandidateDescriptorV2, CandidateDescriptorVersion, ClaimQueueOffset, CoreSelector, + MutateDescriptorV2, UMPSignal, UMP_SEPARATOR, }, CandidateDescriptor, CoreIndex, GroupIndex, HeadData, Id as ParaId, OccupiedCoreAssumption, SessionInfo, UpwardMessage, ValidatorId, @@ -851,7 +851,7 @@ fn invalid_session_or_core_index() { )) .unwrap(); - // Validation doesn't fail for approvals, core/session index is not checked. + // Validation doesn't fail for disputes, core/session index is not checked. assert_matches!(v, ValidationResult::Valid(outputs, used_validation_data) => { assert_eq!(outputs.head_data, HeadData(vec![1, 1, 1])); assert_eq!(outputs.upward_messages, commitments.upward_messages); @@ -911,6 +911,69 @@ fn invalid_session_or_core_index() { assert_eq!(outputs.hrmp_watermark, 0); assert_eq!(used_validation_data, validation_data); }); + + // Test that a v1 candidate that outputs the core selector UMP signal is invalid. + let descriptor_v1 = make_valid_candidate_descriptor( + ParaId::from(1_u32), + dummy_hash(), + dummy_hash(), + pov.hash(), + validation_code.hash(), + validation_result.head_data.hash(), + dummy_hash(), + sp_keyring::Sr25519Keyring::Ferdie, + ); + let descriptor: CandidateDescriptorV2 = descriptor_v1.into(); + + perform_basic_checks(&descriptor, validation_data.max_pov_size, &pov, &validation_code.hash()) + .unwrap(); + assert_eq!(descriptor.version(), CandidateDescriptorVersion::V1); + let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: commitments.hash() }; + + for exec_kind in + [PvfExecKind::Backing(dummy_hash()), PvfExecKind::BackingSystemParas(dummy_hash())] + { + let result = executor::block_on(validate_candidate_exhaustive( + Some(1), + MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), + validation_data.clone(), + validation_code.clone(), + candidate_receipt.clone(), + Arc::new(pov.clone()), + ExecutorParams::default(), + exec_kind, + &Default::default(), + Some(Default::default()), + )) + .unwrap(); + assert_matches!(result, ValidationResult::Invalid(InvalidCandidate::InvalidCoreIndex)); + } + + // Validation doesn't fail for approvals and disputes, core/session index is not checked. + for exec_kind in [PvfExecKind::Approval, PvfExecKind::Dispute] { + let v = executor::block_on(validate_candidate_exhaustive( + Some(1), + MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), + validation_data.clone(), + validation_code.clone(), + candidate_receipt.clone(), + Arc::new(pov.clone()), + ExecutorParams::default(), + exec_kind, + &Default::default(), + Default::default(), + )) + .unwrap(); + + assert_matches!(v, ValidationResult::Valid(outputs, used_validation_data) => { + assert_eq!(outputs.head_data, HeadData(vec![1, 1, 1])); + assert_eq!(outputs.upward_messages, commitments.upward_messages); + assert_eq!(outputs.horizontal_messages, Vec::new()); + assert_eq!(outputs.new_validation_code, Some(vec![2, 2, 2].into())); + assert_eq!(outputs.hrmp_watermark, 0); + assert_eq!(used_validation_data, validation_data); + }); + } } #[test] @@ -1407,7 +1470,7 @@ fn compressed_code_works() { ExecutorParams::default(), PvfExecKind::Backing(dummy_hash()), &Default::default(), - Default::default(), + Some(Default::default()), )); assert_matches!(v, Ok(ValidationResult::Valid(_, _))); diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index 271f78efe0901..c52f3539c3e53 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -505,6 +505,10 @@ pub enum CommittedCandidateReceiptError { /// Currenly only one such message is allowed. #[cfg_attr(feature = "std", error("Too many UMP signals"))] TooManyUMPSignals, + /// If the parachain runtime started sending core selectors, v1 descriptors are no longer + /// allowed. + #[cfg_attr(feature = "std", error("Version 1 receipt does not support core selectors"))] + CoreSelectorWithV1Decriptor, } macro_rules! impl_getter { @@ -603,15 +607,25 @@ impl CommittedCandidateReceiptV2 { &self, cores_per_para: &TransposedClaimQueue, ) -> Result<(), CommittedCandidateReceiptError> { + let maybe_core_selector = self.commitments.core_selector()?; + match self.descriptor.version() { - // Don't check v1 descriptors. - CandidateDescriptorVersion::V1 => return Ok(()), + CandidateDescriptorVersion::V1 => { + // If the parachain runtime started sending core selectors, v1 descriptors are no + // longer allowed. + if maybe_core_selector.is_some() { + return Err(CommittedCandidateReceiptError::CoreSelectorWithV1Decriptor) + } else { + // Nothing else to check for v1 descriptors. + return Ok(()) + } + }, CandidateDescriptorVersion::V2 => {}, CandidateDescriptorVersion::Unknown => return Err(CommittedCandidateReceiptError::UnknownVersion(self.descriptor.version)), } - let (maybe_core_index_selector, cq_offset) = self.commitments.core_selector()?.map_or_else( + let (maybe_core_index_selector, cq_offset) = maybe_core_selector.map_or_else( || (None, ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET)), |(sel, off)| (Some(sel), off), ); @@ -1207,8 +1221,7 @@ mod tests { assert_eq!(new_ccr.hash(), v2_ccr.hash()); } - // Only check descriptor `core_index` field of v2 descriptors. If it is v1, that field - // will be garbage. + // V1 descriptors are forbidden once the parachain runtime started sending UMP signals. #[test] fn test_v1_descriptors_with_ump_signal() { let mut ccr = dummy_old_committed_candidate_receipt(); @@ -1234,9 +1247,12 @@ mod tests { cq.insert(CoreIndex(0), vec![v1_ccr.descriptor.para_id()].into()); cq.insert(CoreIndex(1), vec![v1_ccr.descriptor.para_id()].into()); - assert!(v1_ccr.check_core_index(&transpose_claim_queue(cq)).is_ok()); - assert_eq!(v1_ccr.descriptor.core_index(), None); + + assert_eq!( + v1_ccr.check_core_index(&transpose_claim_queue(cq)), + Err(CommittedCandidateReceiptError::CoreSelectorWithV1Decriptor) + ); } #[test] diff --git a/prdoc/pr_7127.prdoc b/prdoc/pr_7127.prdoc new file mode 100644 index 0000000000000..761ddd04dbe15 --- /dev/null +++ b/prdoc/pr_7127.prdoc @@ -0,0 +1,9 @@ +title: 'Forbid v1 descriptors with UMP signals' +doc: +- audience: [Runtime Dev, Node Dev] + description: Adds a check that parachain candidates do not send out UMP signals with v1 descriptors. +crates: +- name: polkadot-node-core-candidate-validation + bump: minor +- name: polkadot-primitives + bump: major From f4743b009280e47398790bd85943819540a9ce0a Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 14 Jan 2025 14:09:01 +0100 Subject: [PATCH 5/8] `fatxpool`: proper handling of priorities when mempool is full (#6647) Higher-priority transactions can now replace lower-priority transactions even when the internal _tx_mem_pool_ is full. **Notes for reviewers:** - The _tx_mem_pool_ now maintains information about transaction priority. Although _tx_mem_pool_ itself is stateless, transaction priority is updated after submission to the view. An alternative approach could involve validating transactions at the `at` block, but this is computationally expensive. To avoid additional validation overhead, I opted to use the priority obtained from runtime during submission to the view. This is the rationale behind introducing the `SubmitOutcome` struct, which synchronously communicates transaction priority from the view to the pool. This results in a very brief window during which the transaction priority remains unknown - those transaction are not taken into consideration while dropping takes place. In the future, if needed, we could update transaction priority using view revalidation results to keep this information fully up-to-date (as priority of transaction may change with chain-state evolution). - When _tx_mem_pool_ becomes full (an event anticipated to be rare), transaction priority must be known to perform priority-based removal. In such cases, the most recent block known is utilized for validation. I think that speculative submission to the view and re-using the priority from this submission would be an unnecessary complication. - Once the priority is determined, lower-priority transactions whose cumulative size meets or exceeds the size of the new transaction are collected to ensure the pool size limit is not exceeded. - Transaction removed from _tx_mem_pool_ , also needs to be removed from all the views with appropriate event (which is done by `remove_transaction_subtree`). To ensure complete removal, the `PendingTxReplacement` struct was re-factored to more generic `PendingPreInsertTask` (introduced in #6405) which covers removal and submssion of transaction in the view which may be potentially created in the background. This is to ensure that removed transaction will not re-enter to the newly created view. - `submit_local` implementation was also improved to properly handle priorities in case when mempool is full. Some missing tests for this method were also added. Closes: #5809 --------- Co-authored-by: command-bot <> Co-authored-by: Iulian Barbu <14218860+iulianbarbu@users.noreply.github.com> --- prdoc/pr_6647.prdoc | 8 + .../src/fork_aware_txpool/dropped_watcher.rs | 18 +- .../fork_aware_txpool/fork_aware_txpool.rs | 238 +++++++++-- .../src/fork_aware_txpool/tx_mem_pool.rs | 402 ++++++++++++++++-- .../src/fork_aware_txpool/view.rs | 22 +- .../src/fork_aware_txpool/view_store.rs | 261 +++++++++--- .../transaction-pool/src/graph/base_pool.rs | 44 +- .../transaction-pool/src/graph/listener.rs | 4 +- .../client/transaction-pool/src/graph/mod.rs | 8 +- .../client/transaction-pool/src/graph/pool.rs | 84 ++-- .../transaction-pool/src/graph/ready.rs | 10 +- .../transaction-pool/src/graph/tracked_map.rs | 5 + .../src/graph/validated_pool.rs | 119 +++++- .../src/single_state_txpool/revalidation.rs | 5 +- .../single_state_txpool.rs | 30 +- .../transaction-pool/tests/fatp_common/mod.rs | 19 +- .../transaction-pool/tests/fatp_prios.rs | 317 +++++++++++++- .../client/transaction-pool/tests/pool.rs | 14 +- .../runtime/transaction-pool/src/lib.rs | 36 +- 19 files changed, 1393 insertions(+), 251 deletions(-) create mode 100644 prdoc/pr_6647.prdoc diff --git a/prdoc/pr_6647.prdoc b/prdoc/pr_6647.prdoc new file mode 100644 index 0000000000000..47af9924ef1c0 --- /dev/null +++ b/prdoc/pr_6647.prdoc @@ -0,0 +1,8 @@ +title: '`fatxpool`: proper handling of priorities when mempool is full' +doc: +- audience: Node Dev + description: |- + Higher-priority transactions can now replace lower-priority transactions even when the internal _tx_mem_pool_ is full. +crates: +- name: sc-transaction-pool + bump: minor diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs index d69aa37c94a1a..bf61558b00b0d 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs @@ -53,11 +53,13 @@ pub struct DroppedTransaction { } impl DroppedTransaction { - fn new_usurped(tx_hash: Hash, by: Hash) -> Self { + /// Creates a new instance with reason set to `DroppedReason::Usurped(by)`. + pub fn new_usurped(tx_hash: Hash, by: Hash) -> Self { Self { reason: DroppedReason::Usurped(by), tx_hash } } - fn new_enforced_by_limts(tx_hash: Hash) -> Self { + /// Creates a new instance with reason set to `DroppedReason::LimitsEnforced`. + pub fn new_enforced_by_limts(tx_hash: Hash) -> Self { Self { reason: DroppedReason::LimitsEnforced, tx_hash } } } @@ -256,11 +258,13 @@ where self.future_transaction_views.entry(tx_hash).or_default().insert(block_hash); }, TransactionStatus::Ready | TransactionStatus::InBlock(..) => { - // note: if future transaction was once seens as the ready we may want to treat it - // as ready transactions. Unreferenced future transactions are more likely to be - // removed when the last referencing view is removed then ready transactions. - // Transcaction seen as ready is likely quite close to be included in some - // future fork. + // note: if future transaction was once seen as the ready we may want to treat it + // as ready transaction. The rationale behind this is as follows: we want to remove + // unreferenced future transactions when the last referencing view is removed (to + // avoid clogging mempool). For ready transactions we prefer to keep them in mempool + // even if no view is currently referencing them. Future transcaction once seen as + // ready is likely quite close to be included in some future fork (it is close to be + // ready, so we make exception and treat such transaction as ready). if let Some(mut views) = self.future_transaction_views.remove(&tx_hash) { views.insert(block_hash); self.ready_transaction_views.insert(tx_hash, views); diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index e57256943ccfe..7660457182520 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -31,7 +31,10 @@ use crate::{ api::FullChainApi, common::log_xt::log_xt_trace, enactment_state::{EnactmentAction, EnactmentState}, - fork_aware_txpool::{dropped_watcher::DroppedReason, revalidation_worker}, + fork_aware_txpool::{ + dropped_watcher::{DroppedReason, DroppedTransaction}, + revalidation_worker, + }, graph::{ self, base_pool::{TimedTransactionSource, Transaction}, @@ -49,14 +52,16 @@ use futures::{ use parking_lot::Mutex; use prometheus_endpoint::Registry as PrometheusRegistry; use sc_transaction_pool_api::{ - ChainEvent, ImportNotificationStream, MaintainedTransactionPool, PoolStatus, TransactionFor, - TransactionPool, TransactionSource, TransactionStatusStreamFor, TxHash, + error::Error as TxPoolApiError, ChainEvent, ImportNotificationStream, + MaintainedTransactionPool, PoolStatus, TransactionFor, TransactionPool, TransactionPriority, + TransactionSource, TransactionStatusStreamFor, TxHash, }; use sp_blockchain::{HashAndNumber, TreeRoute}; use sp_core::traits::SpawnEssentialNamed; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, NumberFor}, + transaction_validity::{TransactionValidityError, ValidTransaction}, }; use std::{ collections::{HashMap, HashSet}, @@ -287,7 +292,7 @@ where DroppedReason::LimitsEnforced => {}, }; - mempool.remove_dropped_transaction(&dropped_tx_hash).await; + mempool.remove_transaction(&dropped_tx_hash); view_store.listener.transaction_dropped(dropped); import_notification_sink.clean_notified_items(&[dropped_tx_hash]); } @@ -598,7 +603,7 @@ where /// out: /// [ Ok(xth0), Ok(xth1), Err ] /// ``` -fn reduce_multiview_result(input: HashMap>>) -> Vec> { +fn reduce_multiview_result(input: HashMap>>) -> Vec> { let mut values = input.values(); let Some(first) = values.next() else { return Default::default(); @@ -650,9 +655,28 @@ where let mempool_results = self.mempool.extend_unwatched(source, &xts); if view_store.is_empty() { - return Ok(mempool_results.into_iter().map(|r| r.map(|r| r.hash)).collect::>()) + return Ok(mempool_results + .into_iter() + .map(|r| r.map(|r| r.hash).map_err(Into::into)) + .collect::>()) } + // Submit all the transactions to the mempool + let retries = mempool_results + .into_iter() + .zip(xts.clone()) + .map(|(result, xt)| async move { + match result { + Err(TxPoolApiError::ImmediatelyDropped) => + self.attempt_transaction_replacement(source, false, xt).await, + _ => result, + } + }) + .collect::>(); + + let mempool_results = futures::future::join_all(retries).await; + + // Collect transactions that were successfully submitted to the mempool... let to_be_submitted = mempool_results .iter() .zip(xts) @@ -664,22 +688,47 @@ where self.metrics .report(|metrics| metrics.submitted_transactions.inc_by(to_be_submitted.len() as _)); + // ... and submit them to the view_store. Please note that transactions rejected by mempool + // are not sent here. let mempool = self.mempool.clone(); let results_map = view_store.submit(to_be_submitted.into_iter()).await; let mut submission_results = reduce_multiview_result(results_map).into_iter(); + // Note for composing final result: + // + // For each failed insertion into the mempool, the mempool result should be placed into + // the returned vector. + // + // For each successful insertion into the mempool, the corresponding + // view_store submission result needs to be examined: + // - If there is an error during view_store submission, the transaction is removed from + // the mempool, and the final result recorded in the vector for this transaction is the + // view_store submission error. + // + // - If the view_store submission is successful, the transaction priority is updated in the + // mempool. + // + // Finally, it collects the hashes of updated transactions or submission errors (either + // from the mempool or view_store) into a returned vector. Ok(mempool_results .into_iter() .map(|result| { - result.and_then(|insertion| { - submission_results - .next() - .expect("The number of Ok results in mempool is exactly the same as the size of to-views-submission result. qed.") - .inspect_err(|_| - mempool.remove(insertion.hash) - ) + result + .map_err(Into::into) + .and_then(|insertion| { + submission_results + .next() + .expect("The number of Ok results in mempool is exactly the same as the size of view_store submission result. qed.") + .inspect_err(|_|{ + mempool.remove_transaction(&insertion.hash); + }) }) + }) + .map(|r| r.map(|r| { + mempool.update_transaction_priority(&r); + r.hash() + })) .collect::>()) } @@ -712,10 +761,13 @@ where ) -> Result>>, Self::Error> { log::trace!(target: LOG_TARGET, "[{:?}] fatp::submit_and_watch views:{}", self.tx_hash(&xt), self.active_views_count()); let xt = Arc::from(xt); - let InsertionInfo { hash: xt_hash, source: timed_source } = + + let InsertionInfo { hash: xt_hash, source: timed_source, .. } = match self.mempool.push_watched(source, xt.clone()) { Ok(result) => result, - Err(e) => return Err(e), + Err(TxPoolApiError::ImmediatelyDropped) => + self.attempt_transaction_replacement(source, true, xt.clone()).await?, + Err(e) => return Err(e.into()), }; self.metrics.report(|metrics| metrics.submitted_transactions.inc()); @@ -723,7 +775,13 @@ where self.view_store .submit_and_watch(at, timed_source, xt) .await - .inspect_err(|_| self.mempool.remove(xt_hash)) + .inspect_err(|_| { + self.mempool.remove_transaction(&xt_hash); + }) + .map(|mut outcome| { + self.mempool.update_transaction_priority(&outcome); + outcome.expect_watcher() + }) } /// Intended to remove transactions identified by the given hashes, and any dependent @@ -828,22 +886,16 @@ where } } -impl sc_transaction_pool_api::LocalTransactionPool - for ForkAwareTxPool, Block> +impl sc_transaction_pool_api::LocalTransactionPool + for ForkAwareTxPool where Block: BlockT, + ChainApi: 'static + graph::ChainApi, ::Hash: Unpin, - Client: sp_api::ProvideRuntimeApi - + sc_client_api::BlockBackend - + sc_client_api::blockchain::HeaderBackend - + sp_runtime::traits::BlockIdTo - + sp_blockchain::HeaderMetadata, - Client: Send + Sync + 'static, - Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue, { type Block = Block; - type Hash = ExtrinsicHash>; - type Error = as graph::ChainApi>::Error; + type Hash = ExtrinsicHash; + type Error = ChainApi::Error; fn submit_local( &self, @@ -852,12 +904,29 @@ where ) -> Result { log::debug!(target: LOG_TARGET, "fatp::submit_local views:{}", self.active_views_count()); let xt = Arc::from(xt); - let InsertionInfo { hash: xt_hash, .. } = self - .mempool - .extend_unwatched(TransactionSource::Local, &[xt.clone()]) - .remove(0)?; - self.view_store.submit_local(xt).or_else(|_| Ok(xt_hash)) + let result = + self.mempool.extend_unwatched(TransactionSource::Local, &[xt.clone()]).remove(0); + + let insertion = match result { + Err(TxPoolApiError::ImmediatelyDropped) => self.attempt_transaction_replacement_sync( + TransactionSource::Local, + false, + xt.clone(), + ), + _ => result, + }?; + + self.view_store + .submit_local(xt) + .inspect_err(|_| { + self.mempool.remove_transaction(&insertion.hash); + }) + .map(|outcome| { + self.mempool.update_transaction_priority(&outcome); + outcome.hash() + }) + .or_else(|_| Ok(insertion.hash)) } } @@ -1109,7 +1178,11 @@ where .await .into_iter() .zip(hashes) - .map(|(result, tx_hash)| result.or_else(|_| Err(tx_hash))) + .map(|(result, tx_hash)| { + result + .map(|outcome| self.mempool.update_transaction_priority(&outcome.into())) + .or_else(|_| Err(tx_hash)) + }) .collect::>(); let submitted_count = watched_results.len(); @@ -1131,7 +1204,7 @@ where for result in watched_results { if let Err(tx_hash) = result { self.view_store.listener.invalidate_transactions(&[tx_hash]); - self.mempool.remove(tx_hash); + self.mempool.remove_transaction(&tx_hash); } } } @@ -1263,6 +1336,101 @@ where fn tx_hash(&self, xt: &TransactionFor) -> TxHash { self.api.hash_and_length(xt).0 } + + /// Attempts to find and replace a lower-priority transaction in the transaction pool with a new + /// one. + /// + /// This asynchronous function verifies the new transaction against the most recent view. If a + /// transaction with a lower priority exists in the transaction pool, it is replaced with the + /// new transaction. + /// + /// If no lower-priority transaction is found, the function returns an error indicating the + /// transaction was dropped immediately. + async fn attempt_transaction_replacement( + &self, + source: TransactionSource, + watched: bool, + xt: ExtrinsicFor, + ) -> Result>, TxPoolApiError> { + let at = self + .view_store + .most_recent_view + .read() + .ok_or(TxPoolApiError::ImmediatelyDropped)?; + + let (best_view, _) = self + .view_store + .get_view_at(at, false) + .ok_or(TxPoolApiError::ImmediatelyDropped)?; + + let (xt_hash, validated_tx) = best_view + .pool + .verify_one( + best_view.at.hash, + best_view.at.number, + TimedTransactionSource::from_transaction_source(source, false), + xt.clone(), + crate::graph::CheckBannedBeforeVerify::Yes, + ) + .await; + + let Some(priority) = validated_tx.priority() else { + return Err(TxPoolApiError::ImmediatelyDropped) + }; + + self.attempt_transaction_replacement_inner(xt, xt_hash, priority, source, watched) + } + + /// Sync version of [`Self::attempt_transaction_replacement`]. + fn attempt_transaction_replacement_sync( + &self, + source: TransactionSource, + watched: bool, + xt: ExtrinsicFor, + ) -> Result>, TxPoolApiError> { + let at = self + .view_store + .most_recent_view + .read() + .ok_or(TxPoolApiError::ImmediatelyDropped)?; + + let ValidTransaction { priority, .. } = self + .api + .validate_transaction_blocking(at, TransactionSource::Local, Arc::from(xt.clone())) + .map_err(|_| TxPoolApiError::ImmediatelyDropped)? + .map_err(|e| match e { + TransactionValidityError::Invalid(i) => TxPoolApiError::InvalidTransaction(i), + TransactionValidityError::Unknown(u) => TxPoolApiError::UnknownTransaction(u), + })?; + let xt_hash = self.hash_of(&xt); + self.attempt_transaction_replacement_inner(xt, xt_hash, priority, source, watched) + } + + fn attempt_transaction_replacement_inner( + &self, + xt: ExtrinsicFor, + tx_hash: ExtrinsicHash, + priority: TransactionPriority, + source: TransactionSource, + watched: bool, + ) -> Result>, TxPoolApiError> { + let insertion_info = + self.mempool.try_insert_with_replacement(xt, priority, source, watched)?; + + for worst_hash in &insertion_info.removed { + log::trace!(target: LOG_TARGET, "removed: {worst_hash:?} replaced by {tx_hash:?}"); + self.view_store + .listener + .transaction_dropped(DroppedTransaction::new_enforced_by_limts(*worst_hash)); + + self.view_store + .remove_transaction_subtree(*worst_hash, |listener, removed_tx_hash| { + listener.limits_enforced(&removed_tx_hash); + }); + } + + return Ok(insertion_info) + } } #[async_trait] @@ -1410,7 +1578,7 @@ mod reduce_multiview_result_tests { fn empty() { sp_tracing::try_init_simple(); let input = HashMap::default(); - let r = reduce_multiview_result::(input); + let r = reduce_multiview_result::(input); assert!(r.is_empty()); } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index 989ae4425dc48..c8a4d0c72dd36 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -26,7 +26,10 @@ //! it), while on other forks tx can be valid. Depending on which view is chosen to be cloned, //! such transaction could not be present in the newly created view. -use super::{metrics::MetricsLink as PrometheusMetrics, multi_view_listener::MultiViewListener}; +use super::{ + metrics::MetricsLink as PrometheusMetrics, multi_view_listener::MultiViewListener, + view_store::ViewStoreSubmitOutcome, +}; use crate::{ common::log_xt::log_xt_trace, graph, @@ -35,15 +38,20 @@ use crate::{ }; use futures::FutureExt; use itertools::Itertools; -use sc_transaction_pool_api::TransactionSource; +use parking_lot::RwLock; +use sc_transaction_pool_api::{TransactionPriority, TransactionSource}; use sp_blockchain::HashAndNumber; use sp_runtime::{ traits::Block as BlockT, transaction_validity::{InvalidTransaction, TransactionValidityError}, }; use std::{ + cmp::Ordering, collections::HashMap, - sync::{atomic, atomic::AtomicU64, Arc}, + sync::{ + atomic::{self, AtomicU64}, + Arc, + }, time::Instant, }; @@ -77,6 +85,9 @@ where source: TimedTransactionSource, /// When the transaction was revalidated, used to periodically revalidate the mem pool buffer. validated_at: AtomicU64, + /// Priority of transaction at some block. It is assumed it will not be changed often. None if + /// not known. + priority: RwLock>, //todo: we need to add future / ready status at finalized block. //If future transactions are stuck in tx_mem_pool (due to limits being hit), we need a means // to replace them somehow with newly coming transactions. @@ -101,23 +112,50 @@ where /// Creates a new instance of wrapper for unwatched transaction. fn new_unwatched(source: TransactionSource, tx: ExtrinsicFor, bytes: usize) -> Self { - Self { - watched: false, - tx, - source: TimedTransactionSource::from_transaction_source(source, true), - validated_at: AtomicU64::new(0), - bytes, - } + Self::new(false, source, tx, bytes) } /// Creates a new instance of wrapper for watched transaction. fn new_watched(source: TransactionSource, tx: ExtrinsicFor, bytes: usize) -> Self { + Self::new(true, source, tx, bytes) + } + + /// Creates a new instance of wrapper for a transaction with no priority. + fn new( + watched: bool, + source: TransactionSource, + tx: ExtrinsicFor, + bytes: usize, + ) -> Self { + Self::new_with_optional_priority(watched, source, tx, bytes, None) + } + + /// Creates a new instance of wrapper for a transaction with given priority. + fn new_with_priority( + watched: bool, + source: TransactionSource, + tx: ExtrinsicFor, + bytes: usize, + priority: TransactionPriority, + ) -> Self { + Self::new_with_optional_priority(watched, source, tx, bytes, Some(priority)) + } + + /// Creates a new instance of wrapper for a transaction with optional priority. + fn new_with_optional_priority( + watched: bool, + source: TransactionSource, + tx: ExtrinsicFor, + bytes: usize, + priority: Option, + ) -> Self { Self { - watched: true, + watched, tx, source: TimedTransactionSource::from_transaction_source(source, true), validated_at: AtomicU64::new(0), bytes, + priority: priority.into(), } } @@ -132,6 +170,11 @@ where pub(crate) fn source(&self) -> TimedTransactionSource { self.source.clone() } + + /// Returns the priority of the transaction. + pub(crate) fn priority(&self) -> Option { + *self.priority.read() + } } impl Size for Arc> @@ -191,11 +234,15 @@ where pub(super) struct InsertionInfo { pub(super) hash: Hash, pub(super) source: TimedTransactionSource, + pub(super) removed: Vec, } impl InsertionInfo { fn new(hash: Hash, source: TimedTransactionSource) -> Self { - Self { hash, source } + Self::new_with_removed(hash, source, Default::default()) + } + fn new_with_removed(hash: Hash, source: TimedTransactionSource, removed: Vec) -> Self { + Self { hash, source, removed } } } @@ -279,27 +326,109 @@ where &self, hash: ExtrinsicHash, tx: TxInMemPool, - ) -> Result>, ChainApi::Error> { - let bytes = self.transactions.bytes(); + ) -> Result>, sc_transaction_pool_api::error::Error> { let mut transactions = self.transactions.write(); + + let bytes = self.transactions.bytes(); + let result = match ( - !self.is_limit_exceeded(transactions.len() + 1, bytes + tx.bytes), + self.is_limit_exceeded(transactions.len() + 1, bytes + tx.bytes), transactions.contains_key(&hash), ) { - (true, false) => { + (false, false) => { let source = tx.source(); transactions.insert(hash, Arc::from(tx)); Ok(InsertionInfo::new(hash, source)) }, (_, true) => - Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash)).into()), - (false, _) => Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped.into()), + Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash))), + (true, _) => Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped), }; log::trace!(target: LOG_TARGET, "[{:?}] mempool::try_insert: {:?}", hash, result.as_ref().map(|r| r.hash)); result } + /// Attempts to insert a new transaction in the memory pool and drop some worse existing + /// transactions. + /// + /// A "worse" transaction means transaction with lower priority, or older transaction with the + /// same prio. + /// + /// This operation will not overflow the limit of the mempool. It means that cumulative + /// size of removed transactions will be equal (or greated) then size of newly inserted + /// transaction. + /// + /// Returns a `Result` containing `InsertionInfo` if the new transaction is successfully + /// inserted; otherwise, returns an appropriate error indicating the failure. + pub(super) fn try_insert_with_replacement( + &self, + new_tx: ExtrinsicFor, + priority: TransactionPriority, + source: TransactionSource, + watched: bool, + ) -> Result>, sc_transaction_pool_api::error::Error> { + let (hash, length) = self.api.hash_and_length(&new_tx); + let new_tx = TxInMemPool::new_with_priority(watched, source, new_tx, length, priority); + if new_tx.bytes > self.max_transactions_total_bytes { + return Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped); + } + + let mut transactions = self.transactions.write(); + + if transactions.contains_key(&hash) { + return Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash))); + } + + let mut sorted = transactions + .iter() + .filter_map(|(h, v)| v.priority().map(|_| (*h, v.clone()))) + .collect::>(); + + // When pushing higher prio transaction, we need to find a number of lower prio txs, such + // that the sum of their bytes is ge then size of new tx. Otherwise we could overflow size + // limits. Naive way to do it - rev-sort by priority and eat the tail. + + // reverse (oldest, lowest prio last) + sorted.sort_by(|(_, a), (_, b)| match b.priority().cmp(&a.priority()) { + Ordering::Equal => match (a.source.timestamp, b.source.timestamp) { + (Some(a), Some(b)) => b.cmp(&a), + _ => Ordering::Equal, + }, + ordering => ordering, + }); + + let mut total_size_removed = 0usize; + let mut to_be_removed = vec![]; + let free_bytes = self.max_transactions_total_bytes - self.transactions.bytes(); + + loop { + let Some((worst_hash, worst_tx)) = sorted.pop() else { + return Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped); + }; + + if worst_tx.priority() >= new_tx.priority() { + return Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped); + } + + total_size_removed += worst_tx.bytes; + to_be_removed.push(worst_hash); + + if free_bytes + total_size_removed >= new_tx.bytes { + break; + } + } + + let source = new_tx.source(); + transactions.insert(hash, Arc::from(new_tx)); + for worst_hash in &to_be_removed { + transactions.remove(worst_hash); + } + debug_assert!(!self.is_limit_exceeded(transactions.len(), self.transactions.bytes())); + + Ok(InsertionInfo::new_with_removed(hash, source, to_be_removed)) + } + /// Adds a new unwatched transactions to the internal buffer not exceeding the limit. /// /// Returns the vector of results for each transaction, the order corresponds to the input @@ -308,7 +437,8 @@ where &self, source: TransactionSource, xts: &[ExtrinsicFor], - ) -> Vec>, ChainApi::Error>> { + ) -> Vec>, sc_transaction_pool_api::error::Error>> + { let result = xts .iter() .map(|xt| { @@ -325,20 +455,11 @@ where &self, source: TransactionSource, xt: ExtrinsicFor, - ) -> Result>, ChainApi::Error> { + ) -> Result>, sc_transaction_pool_api::error::Error> { let (hash, length) = self.api.hash_and_length(&xt); self.try_insert(hash, TxInMemPool::new_watched(source, xt.clone(), length)) } - /// Removes transaction from the memory pool which are specified by the given list of hashes. - pub(super) async fn remove_dropped_transaction( - &self, - dropped: &ExtrinsicHash, - ) -> Option>> { - log::debug!(target: LOG_TARGET, "[{:?}] mempool::remove_dropped_transaction", dropped); - self.transactions.write().remove(dropped) - } - /// Clones and returns a `HashMap` of references to all unwatched transactions in the memory /// pool. pub(super) fn clone_unwatched( @@ -362,9 +483,13 @@ where .collect::>() } - /// Removes a transaction from the memory pool based on a given hash. - pub(super) fn remove(&self, hash: ExtrinsicHash) { - let _ = self.transactions.write().remove(&hash); + /// Removes a transaction with given hash from the memory pool. + pub(super) fn remove_transaction( + &self, + hash: &ExtrinsicHash, + ) -> Option>> { + log::debug!(target: LOG_TARGET, "[{hash:?}] mempool::remove_transaction"); + self.transactions.write().remove(hash) } /// Revalidates a batch of transactions against the provided finalized block. @@ -462,6 +587,17 @@ where }); self.listener.invalidate_transactions(&invalid_hashes); } + + /// Updates the priority of transaction stored in mempool using provided view_store submission + /// outcome. + pub(super) fn update_transaction_priority(&self, outcome: &ViewStoreSubmitOutcome) { + outcome.priority().map(|priority| { + self.transactions + .write() + .get_mut(&outcome.hash()) + .map(|p| *p.priority.write() = Some(priority)) + }); + } } #[cfg(test)] @@ -583,6 +719,9 @@ mod tx_mem_pool_tests { assert_eq!(mempool.unwatched_and_watched_count(), (10, 5)); } + /// size of large extrinsic + const LARGE_XT_SIZE: usize = 1129; + fn large_uxt(x: usize) -> Extrinsic { ExtrinsicBuilder::new_include_data(vec![x as u8; 1024]).build() } @@ -592,8 +731,7 @@ mod tx_mem_pool_tests { sp_tracing::try_init_simple(); let max = 10; let api = Arc::from(TestApi::default()); - //size of large extrinsic is: 1129 - let mempool = TxMemPool::new_test(api.clone(), usize::MAX, max * 1129); + let mempool = TxMemPool::new_test(api.clone(), usize::MAX, max * LARGE_XT_SIZE); let xts = (0..max).map(|x| Arc::from(large_uxt(x))).collect::>(); @@ -617,4 +755,200 @@ mod tx_mem_pool_tests { sc_transaction_pool_api::error::Error::ImmediatelyDropped )); } + + #[test] + fn replacing_txs_works_for_same_tx_size() { + sp_tracing::try_init_simple(); + let max = 10; + let api = Arc::from(TestApi::default()); + let mempool = TxMemPool::new_test(api.clone(), usize::MAX, max * LARGE_XT_SIZE); + + let xts = (0..max).map(|x| Arc::from(large_uxt(x))).collect::>(); + + let low_prio = 0u64; + let hi_prio = u64::MAX; + + let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1); + let (submit_outcomes, hashes): (Vec<_>, Vec<_>) = xts + .iter() + .map(|t| { + let h = api.hash_and_length(t).0; + (ViewStoreSubmitOutcome::new(h, Some(low_prio)), h) + }) + .unzip(); + + let results = mempool.extend_unwatched(TransactionSource::External, &xts); + assert!(results.iter().all(Result::is_ok)); + assert_eq!(mempool.bytes(), total_xts_bytes); + + submit_outcomes + .into_iter() + .for_each(|o| mempool.update_transaction_priority(&o)); + + let xt = Arc::from(large_uxt(98)); + let hash = api.hash_and_length(&xt).0; + let result = mempool + .try_insert_with_replacement(xt, hi_prio, TransactionSource::External, false) + .unwrap(); + + assert_eq!(result.hash, hash); + assert_eq!(result.removed, hashes[0..1]); + } + + #[test] + fn replacing_txs_removes_proper_size_of_txs() { + sp_tracing::try_init_simple(); + let max = 10; + let api = Arc::from(TestApi::default()); + let mempool = TxMemPool::new_test(api.clone(), usize::MAX, max * LARGE_XT_SIZE); + + let xts = (0..max).map(|x| Arc::from(large_uxt(x))).collect::>(); + + let low_prio = 0u64; + let hi_prio = u64::MAX; + + let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1); + let (submit_outcomes, hashes): (Vec<_>, Vec<_>) = xts + .iter() + .map(|t| { + let h = api.hash_and_length(t).0; + (ViewStoreSubmitOutcome::new(h, Some(low_prio)), h) + }) + .unzip(); + + let results = mempool.extend_unwatched(TransactionSource::External, &xts); + assert!(results.iter().all(Result::is_ok)); + assert_eq!(mempool.bytes(), total_xts_bytes); + assert_eq!(total_xts_bytes, max * LARGE_XT_SIZE); + + submit_outcomes + .into_iter() + .for_each(|o| mempool.update_transaction_priority(&o)); + + //this one should drop 2 xts (size: 1130): + let xt = Arc::from(ExtrinsicBuilder::new_include_data(vec![98 as u8; 1025]).build()); + let (hash, length) = api.hash_and_length(&xt); + assert_eq!(length, 1130); + let result = mempool + .try_insert_with_replacement(xt, hi_prio, TransactionSource::External, false) + .unwrap(); + + assert_eq!(result.hash, hash); + assert_eq!(result.removed, hashes[0..2]); + } + + #[test] + fn replacing_txs_removes_proper_size_and_prios() { + sp_tracing::try_init_simple(); + const COUNT: usize = 10; + let api = Arc::from(TestApi::default()); + let mempool = TxMemPool::new_test(api.clone(), usize::MAX, COUNT * LARGE_XT_SIZE); + + let xts = (0..COUNT).map(|x| Arc::from(large_uxt(x))).collect::>(); + + let hi_prio = u64::MAX; + + let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1); + let (submit_outcomes, hashes): (Vec<_>, Vec<_>) = xts + .iter() + .enumerate() + .map(|(prio, t)| { + let h = api.hash_and_length(t).0; + (ViewStoreSubmitOutcome::new(h, Some((COUNT - prio).try_into().unwrap())), h) + }) + .unzip(); + + let results = mempool.extend_unwatched(TransactionSource::External, &xts); + assert!(results.iter().all(Result::is_ok)); + assert_eq!(mempool.bytes(), total_xts_bytes); + + submit_outcomes + .into_iter() + .for_each(|o| mempool.update_transaction_priority(&o)); + + //this one should drop 3 xts (each of size 1129) + let xt = Arc::from(ExtrinsicBuilder::new_include_data(vec![98 as u8; 2154]).build()); + let (hash, length) = api.hash_and_length(&xt); + // overhead is 105, thus length: 105 + 2154 + assert_eq!(length, 2 * LARGE_XT_SIZE + 1); + let result = mempool + .try_insert_with_replacement(xt, hi_prio, TransactionSource::External, false) + .unwrap(); + + assert_eq!(result.hash, hash); + assert!(result.removed.iter().eq(hashes[COUNT - 3..COUNT].iter().rev())); + } + + #[test] + fn replacing_txs_skips_lower_prio_tx() { + sp_tracing::try_init_simple(); + const COUNT: usize = 10; + let api = Arc::from(TestApi::default()); + let mempool = TxMemPool::new_test(api.clone(), usize::MAX, COUNT * LARGE_XT_SIZE); + + let xts = (0..COUNT).map(|x| Arc::from(large_uxt(x))).collect::>(); + + let hi_prio = 100u64; + let low_prio = 10u64; + + let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1); + let submit_outcomes = xts + .iter() + .map(|t| { + let h = api.hash_and_length(t).0; + ViewStoreSubmitOutcome::new(h, Some(hi_prio)) + }) + .collect::>(); + + let results = mempool.extend_unwatched(TransactionSource::External, &xts); + assert!(results.iter().all(Result::is_ok)); + assert_eq!(mempool.bytes(), total_xts_bytes); + + submit_outcomes + .into_iter() + .for_each(|o| mempool.update_transaction_priority(&o)); + + let xt = Arc::from(large_uxt(98)); + let result = + mempool.try_insert_with_replacement(xt, low_prio, TransactionSource::External, false); + + // lower prio tx is rejected immediately + assert!(matches!( + result.unwrap_err(), + sc_transaction_pool_api::error::Error::ImmediatelyDropped + )); + } + + #[test] + fn replacing_txs_is_skipped_if_prios_are_not_set() { + sp_tracing::try_init_simple(); + const COUNT: usize = 10; + let api = Arc::from(TestApi::default()); + let mempool = TxMemPool::new_test(api.clone(), usize::MAX, COUNT * LARGE_XT_SIZE); + + let xts = (0..COUNT).map(|x| Arc::from(large_uxt(x))).collect::>(); + + let hi_prio = u64::MAX; + + let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1); + + let results = mempool.extend_unwatched(TransactionSource::External, &xts); + assert!(results.iter().all(Result::is_ok)); + assert_eq!(mempool.bytes(), total_xts_bytes); + + //this one could drop 3 xts (each of size 1129) + let xt = Arc::from(ExtrinsicBuilder::new_include_data(vec![98 as u8; 2154]).build()); + let length = api.hash_and_length(&xt).1; + // overhead is 105, thus length: 105 + 2154 + assert_eq!(length, 2 * LARGE_XT_SIZE + 1); + + let result = + mempool.try_insert_with_replacement(xt, hi_prio, TransactionSource::External, false); + + // we did not update priorities (update_transaction_priority was not called): + assert!(matches!( + result.unwrap_err(), + sc_transaction_pool_api::error::Error::ImmediatelyDropped + )); + } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs index 3cbb8fa4871d0..a35d68120a3ab 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs @@ -28,7 +28,7 @@ use crate::{ common::log_xt::log_xt_trace, graph::{ self, base_pool::TimedTransactionSource, watcher::Watcher, ExtrinsicFor, ExtrinsicHash, - IsValidator, ValidatedTransaction, ValidatedTransactionFor, + IsValidator, ValidatedPoolSubmitOutcome, ValidatedTransaction, ValidatedTransactionFor, }, LOG_TARGET, }; @@ -158,7 +158,7 @@ where pub(super) async fn submit_many( &self, xts: impl IntoIterator)>, - ) -> Vec, ChainApi::Error>> { + ) -> Vec, ChainApi::Error>> { if log::log_enabled!(target: LOG_TARGET, log::Level::Trace) { let xts = xts.into_iter().collect::>(); log_xt_trace!(target: LOG_TARGET, xts.iter().map(|(_,xt)| self.pool.validated_pool().api().hash_and_length(xt).0), "[{:?}] view::submit_many at:{}", self.at.hash); @@ -173,7 +173,7 @@ where &self, source: TimedTransactionSource, xt: ExtrinsicFor, - ) -> Result, ExtrinsicHash>, ChainApi::Error> { + ) -> Result, ChainApi::Error> { log::trace!(target: LOG_TARGET, "[{:?}] view::submit_and_watch at:{}", self.pool.validated_pool().api().hash_and_length(&xt).0, self.at.hash); self.pool.submit_and_watch(&self.at, source, xt).await } @@ -182,7 +182,7 @@ where pub(super) fn submit_local( &self, xt: ExtrinsicFor, - ) -> Result, ChainApi::Error> { + ) -> Result, ChainApi::Error> { let (hash, length) = self.pool.validated_pool().api().hash_and_length(&xt); log::trace!(target: LOG_TARGET, "[{:?}] view::submit_local at:{}", hash, self.at.hash); @@ -460,4 +460,18 @@ where const IGNORE_BANNED: bool = false; self.pool.validated_pool().check_is_known(tx_hash, IGNORE_BANNED).is_err() } + + /// Removes the whole transaction subtree from the inner pool. + /// + /// Refer to [`crate::graph::ValidatedPool::remove_subtree`] for more details. + pub fn remove_subtree( + &self, + tx_hash: ExtrinsicHash, + listener_action: F, + ) -> Vec> + where + F: Fn(&mut crate::graph::Listener, ExtrinsicHash), + { + self.pool.validated_pool().remove_subtree(tx_hash, listener_action) + } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index a06c051f0a7eb..43ed5bbf8869f 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -27,7 +27,7 @@ use crate::{ graph::{ self, base_pool::{TimedTransactionSource, Transaction}, - ExtrinsicFor, ExtrinsicHash, TransactionFor, + BaseSubmitOutcome, ExtrinsicFor, ExtrinsicHash, TransactionFor, ValidatedPoolSubmitOutcome, }, ReadyIteratorFor, LOG_TARGET, }; @@ -38,20 +38,18 @@ use sc_transaction_pool_api::{error::Error as PoolError, PoolStatus}; use sp_blockchain::TreeRoute; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use std::{ - collections::{hash_map::Entry, HashMap}, + collections::{hash_map::Entry, HashMap, HashSet}, sync::Arc, time::Instant, }; -/// Helper struct to keep the context for transaction replacements. +/// Helper struct to maintain the context for pending transaction submission, executed for +/// newly inserted views. #[derive(Clone)] -struct PendingTxReplacement +struct PendingTxSubmission where ChainApi: graph::ChainApi, { - /// Indicates if the new transaction was already submitted to all the views in the view_store. - /// If true, it can be removed after inserting any new view. - processed: bool, /// New transaction replacing the old one. xt: ExtrinsicFor, /// Source of the transaction. @@ -60,13 +58,84 @@ where watched: bool, } -impl PendingTxReplacement +/// Helper type representing the callback allowing to trigger per-transaction events on +/// `ValidatedPool`'s listener. +type RemovalListener = + Arc, ExtrinsicHash) + Send + Sync>; + +/// Helper struct to maintain the context for pending transaction removal, executed for +/// newly inserted views. +struct PendingTxRemoval +where + ChainApi: graph::ChainApi, +{ + /// Hash of the transaction that will be removed, + xt_hash: ExtrinsicHash, + /// Action that shall be executed on underlying `ValidatedPool`'s listener. + listener_action: RemovalListener, +} + +/// This enum represents an action that should be executed on the newly built +/// view before this view is inserted into the view store. +enum PreInsertAction +where + ChainApi: graph::ChainApi, +{ + /// Represents the action of submitting a new transaction. Intended to use to handle usurped + /// transactions. + SubmitTx(PendingTxSubmission), + + /// Represents the action of removing a subtree of transactions. + RemoveSubtree(PendingTxRemoval), +} + +/// Represents a task awaiting execution, to be performed immediately prior to the view insertion +/// into the view store. +struct PendingPreInsertTask +where + ChainApi: graph::ChainApi, +{ + /// The action to be applied when inserting a new view. + action: PreInsertAction, + /// Indicates if the action was already applied to all the views in the view_store. + /// If true, it can be removed after inserting any new view. + processed: bool, +} + +impl PendingPreInsertTask where ChainApi: graph::ChainApi, { - /// Creates new unprocessed instance of pending transaction replacement. - fn new(xt: ExtrinsicFor, source: TimedTransactionSource, watched: bool) -> Self { - Self { processed: false, xt, source, watched } + /// Creates new unprocessed instance of pending transaction submission. + fn new_submission_action( + xt: ExtrinsicFor, + source: TimedTransactionSource, + watched: bool, + ) -> Self { + Self { + processed: false, + action: PreInsertAction::SubmitTx(PendingTxSubmission { xt, source, watched }), + } + } + + /// Creates new unprocessed instance of pending transaction removal. + fn new_removal_action( + xt_hash: ExtrinsicHash, + listener: RemovalListener, + ) -> Self { + Self { + processed: false, + action: PreInsertAction::RemoveSubtree(PendingTxRemoval { + xt_hash, + listener_action: listener, + }), + } + } + + /// Marks a task as done for every view present in view store. Basically means that can be + /// removed on new view insertion. + fn mark_processed(&mut self) { + self.processed = true; } } @@ -100,9 +169,20 @@ where /// notifcication threads. It is meant to assure that replaced transaction is also removed from /// newly built views in maintain process. /// - /// The map's key is hash of replaced extrinsic. - pending_txs_replacements: - RwLock, PendingTxReplacement>>, + /// The map's key is hash of actionable extrinsic (to avoid duplicated entries). + pending_txs_tasks: RwLock, PendingPreInsertTask>>, +} + +/// Type alias to outcome of submission to `ViewStore`. +pub(super) type ViewStoreSubmitOutcome = + BaseSubmitOutcome>; + +impl From> + for ViewStoreSubmitOutcome +{ + fn from(value: ValidatedPoolSubmitOutcome) -> Self { + Self::new(value.hash(), value.priority()) + } } impl ViewStore @@ -124,7 +204,7 @@ where listener, most_recent_view: RwLock::from(None), dropped_stream_controller, - pending_txs_replacements: Default::default(), + pending_txs_tasks: Default::default(), } } @@ -132,7 +212,7 @@ where pub(super) async fn submit( &self, xts: impl IntoIterator)> + Clone, - ) -> HashMap, ChainApi::Error>>> { + ) -> HashMap, ChainApi::Error>>> { let submit_futures = { let active_views = self.active_views.read(); active_views @@ -140,7 +220,16 @@ where .map(|(_, view)| { let view = view.clone(); let xts = xts.clone(); - async move { (view.at.hash, view.submit_many(xts).await) } + async move { + ( + view.at.hash, + view.submit_many(xts) + .await + .into_iter() + .map(|r| r.map(Into::into)) + .collect::>(), + ) + } }) .collect::>() }; @@ -153,7 +242,7 @@ where pub(super) fn submit_local( &self, xt: ExtrinsicFor, - ) -> Result, ChainApi::Error> { + ) -> Result, ChainApi::Error> { let active_views = self .active_views .read() @@ -168,12 +257,14 @@ where .map(|view| view.submit_local(xt.clone())) .find_or_first(Result::is_ok); - if let Some(Err(err)) = result { - log::trace!(target: LOG_TARGET, "[{:?}] submit_local: err: {}", tx_hash, err); - return Err(err) - }; - - Ok(tx_hash) + match result { + Some(Err(err)) => { + log::trace!(target: LOG_TARGET, "[{:?}] submit_local: err: {}", tx_hash, err); + Err(err) + }, + None => Ok(ViewStoreSubmitOutcome::new(tx_hash, None)), + Some(Ok(r)) => Ok(r.into()), + } } /// Import a single extrinsic and starts to watch its progress in the pool. @@ -188,7 +279,7 @@ where _at: Block::Hash, source: TimedTransactionSource, xt: ExtrinsicFor, - ) -> Result, ChainApi::Error> { + ) -> Result, ChainApi::Error> { let tx_hash = self.api.hash_and_length(&xt).0; let Some(external_watcher) = self.listener.create_external_watcher_for_tx(tx_hash) else { return Err(PoolError::AlreadyImported(Box::new(tx_hash)).into()) @@ -203,13 +294,13 @@ where let source = source.clone(); async move { match view.submit_and_watch(source, xt).await { - Ok(watcher) => { + Ok(mut result) => { self.listener.add_view_watcher_for_tx( tx_hash, view.at.hash, - watcher.into_stream().boxed(), + result.expect_watcher().into_stream().boxed(), ); - Ok(()) + Ok(result) }, Err(e) => Err(e), } @@ -217,17 +308,20 @@ where }) .collect::>() }; - let maybe_error = futures::future::join_all(submit_and_watch_futures) + let result = futures::future::join_all(submit_and_watch_futures) .await .into_iter() .find_or_first(Result::is_ok); - if let Some(Err(err)) = maybe_error { - log::trace!(target: LOG_TARGET, "[{:?}] submit_and_watch: err: {}", tx_hash, err); - return Err(err); - }; - - Ok(external_watcher) + match result { + Some(Err(err)) => { + log::trace!(target: LOG_TARGET, "[{:?}] submit_and_watch: err: {}", tx_hash, err); + return Err(err); + }, + Some(Ok(result)) => + Ok(ViewStoreSubmitOutcome::from(result).with_watcher(external_watcher)), + None => Ok(ViewStoreSubmitOutcome::new(tx_hash, None).with_watcher(external_watcher)), + } } /// Returns the pool status for every active view. @@ -575,8 +669,12 @@ where replaced: ExtrinsicHash, watched: bool, ) { - if let Entry::Vacant(entry) = self.pending_txs_replacements.write().entry(replaced) { - entry.insert(PendingTxReplacement::new(xt.clone(), source.clone(), watched)); + if let Entry::Vacant(entry) = self.pending_txs_tasks.write().entry(replaced) { + entry.insert(PendingPreInsertTask::new_submission_action( + xt.clone(), + source.clone(), + watched, + )); } else { return }; @@ -586,8 +684,8 @@ where self.replace_transaction_in_views(source, xt, xt_hash, replaced, watched).await; - if let Some(replacement) = self.pending_txs_replacements.write().get_mut(&replaced) { - replacement.processed = true; + if let Some(replacement) = self.pending_txs_tasks.write().get_mut(&replaced) { + replacement.mark_processed(); } } @@ -596,18 +694,25 @@ where /// After application, all already processed replacements are removed. async fn apply_pending_tx_replacements(&self, view: Arc>) { let mut futures = vec![]; - for replacement in self.pending_txs_replacements.read().values() { - let xt_hash = self.api.hash_and_length(&replacement.xt).0; - futures.push(self.replace_transaction_in_view( - view.clone(), - replacement.source.clone(), - replacement.xt.clone(), - xt_hash, - replacement.watched, - )); + for replacement in self.pending_txs_tasks.read().values() { + match replacement.action { + PreInsertAction::SubmitTx(ref submission) => { + let xt_hash = self.api.hash_and_length(&submission.xt).0; + futures.push(self.replace_transaction_in_view( + view.clone(), + submission.source.clone(), + submission.xt.clone(), + xt_hash, + submission.watched, + )); + }, + PreInsertAction::RemoveSubtree(ref removal) => { + view.remove_subtree(removal.xt_hash, &*removal.listener_action); + }, + } } let _results = futures::future::join_all(futures).await; - self.pending_txs_replacements.write().retain(|_, r| r.processed); + self.pending_txs_tasks.write().retain(|_, r| r.processed); } /// Submits `xt` to the given view. @@ -623,11 +728,11 @@ where ) { if watched { match view.submit_and_watch(source, xt).await { - Ok(watcher) => { + Ok(mut result) => { self.listener.add_view_watcher_for_tx( xt_hash, view.at.hash, - watcher.into_stream().boxed(), + result.expect_watcher().into_stream().boxed(), ); }, Err(e) => { @@ -690,4 +795,58 @@ where }; let _results = futures::future::join_all(submit_futures).await; } + + /// Removes a transaction subtree from every view in the view_store, starting from the given + /// transaction hash. + /// + /// This function traverses the dependency graph of transactions and removes the specified + /// transaction along with all its descendant transactions from every view. + /// + /// A `listener_action` callback function is invoked for every transaction that is removed, + /// providing a reference to the pool's listener and the hash of the removed transaction. This + /// allows to trigger the required events. Note that listener may be called multiple times for + /// the same hash. + /// + /// Function will also schedule view pre-insertion actions to ensure that transactions will be + /// removed from newly created view. + /// + /// Returns a vector containing the hashes of all removed transactions, including the root + /// transaction specified by `tx_hash`. Vector contains only unique hashes. + pub(super) fn remove_transaction_subtree( + &self, + xt_hash: ExtrinsicHash, + listener_action: F, + ) -> Vec> + where + F: Fn(&mut crate::graph::Listener, ExtrinsicHash) + + Clone + + Send + + Sync + + 'static, + { + if let Entry::Vacant(entry) = self.pending_txs_tasks.write().entry(xt_hash) { + entry.insert(PendingPreInsertTask::new_removal_action( + xt_hash, + Arc::from(listener_action.clone()), + )); + }; + + let mut seen = HashSet::new(); + + let removed = self + .active_views + .read() + .iter() + .chain(self.inactive_views.read().iter()) + .filter(|(_, view)| view.is_imported(&xt_hash)) + .flat_map(|(_, view)| view.remove_subtree(xt_hash, &listener_action)) + .filter(|xt_hash| seen.insert(*xt_hash)) + .collect(); + + if let Some(removal_action) = self.pending_txs_tasks.write().get_mut(&xt_hash) { + removal_action.mark_processed(); + } + + removed + } } diff --git a/substrate/client/transaction-pool/src/graph/base_pool.rs b/substrate/client/transaction-pool/src/graph/base_pool.rs index 04eaa998f42e6..3b4afc88b7897 100644 --- a/substrate/client/transaction-pool/src/graph/base_pool.rs +++ b/substrate/client/transaction-pool/src/graph/base_pool.rs @@ -453,27 +453,29 @@ impl BasePool, _>(|worst, current| { - let transaction = ¤t.transaction; - worst - .map(|worst| { - // Here we don't use `TransactionRef`'s ordering implementation because - // while it prefers priority like need here, it also prefers older - // transactions for inclusion purposes and limit enforcement needs to prefer - // newer transactions instead and drop the older ones. - match worst.transaction.priority.cmp(&transaction.transaction.priority) { - Ordering::Less => worst, - Ordering::Equal => - if worst.insertion_id > transaction.insertion_id { - transaction.clone() - } else { - worst - }, - Ordering::Greater => transaction.clone(), - } - }) - .or_else(|| Some(transaction.clone())) - }); + let worst = + self.ready.fold::>, _>(None, |worst, current| { + let transaction = ¤t.transaction; + worst + .map(|worst| { + // Here we don't use `TransactionRef`'s ordering implementation because + // while it prefers priority like need here, it also prefers older + // transactions for inclusion purposes and limit enforcement needs to + // prefer newer transactions instead and drop the older ones. + match worst.transaction.priority.cmp(&transaction.transaction.priority) + { + Ordering::Less => worst, + Ordering::Equal => + if worst.insertion_id > transaction.insertion_id { + transaction.clone() + } else { + worst + }, + Ordering::Greater => transaction.clone(), + } + }) + .or_else(|| Some(transaction.clone())) + }); if let Some(worst) = worst { removed.append(&mut self.remove_subtree(&[worst.transaction.hash.clone()])) diff --git a/substrate/client/transaction-pool/src/graph/listener.rs b/substrate/client/transaction-pool/src/graph/listener.rs index 41daf5491f709..7b09ee4c64095 100644 --- a/substrate/client/transaction-pool/src/graph/listener.rs +++ b/substrate/client/transaction-pool/src/graph/listener.rs @@ -126,8 +126,8 @@ impl Listener Pool { &self, at: &HashAndNumber, xts: impl IntoIterator)>, - ) -> Vec, B::Error>> { + ) -> Vec, B::Error>> { let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::Yes).await; self.validated_pool.submit(validated_transactions.into_values()) } @@ -216,7 +216,7 @@ impl Pool { &self, at: &HashAndNumber, xts: impl IntoIterator)>, - ) -> Vec, B::Error>> { + ) -> Vec, B::Error>> { let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::No).await; self.validated_pool.submit(validated_transactions.into_values()) } @@ -227,7 +227,7 @@ impl Pool { at: &HashAndNumber, source: base::TimedTransactionSource, xt: ExtrinsicFor, - ) -> Result, B::Error> { + ) -> Result, B::Error> { let res = self.submit_at(at, std::iter::once((source, xt))).await.pop(); res.expect("One extrinsic passed; one result returned; qed") } @@ -238,7 +238,7 @@ impl Pool { at: &HashAndNumber, source: base::TimedTransactionSource, xt: ExtrinsicFor, - ) -> Result, ExtrinsicHash>, B::Error> { + ) -> Result, B::Error> { let (_, tx) = self .verify_one(at.hash, at.number, source, xt, CheckBannedBeforeVerify::Yes) .await; @@ -432,7 +432,7 @@ impl Pool { } /// Returns future that validates single transaction at given block. - async fn verify_one( + pub(crate) async fn verify_one( &self, block_hash: ::Hash, block_number: NumberFor, @@ -539,6 +539,7 @@ mod tests { .into(), ), ) + .map(|outcome| outcome.hash()) .unwrap(); // then @@ -567,7 +568,10 @@ mod tests { // when let txs = txs.into_iter().map(|x| (SOURCE, Arc::from(x))).collect::>(); - let hashes = block_on(pool.submit_at(&api.expect_hash_and_number(0), txs)); + let hashes = block_on(pool.submit_at(&api.expect_hash_and_number(0), txs)) + .into_iter() + .map(|r| r.map(|o| o.hash())) + .collect::>(); log::debug!("--> {hashes:#?}"); // then @@ -591,7 +595,8 @@ mod tests { // when pool.validated_pool.ban(&Instant::now(), vec![pool.hash_of(&uxt)]); - let res = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.into())); + let res = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.into())) + .map(|o| o.hash()); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 0); @@ -614,7 +619,8 @@ mod tests { let uxt = ExtrinsicBuilder::new_include_data(vec![42]).build(); // when - let res = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.into())); + let res = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.into())) + .map(|o| o.hash()); // then assert_matches!(res.unwrap_err(), error::Error::Unactionable); @@ -642,7 +648,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); let hash1 = block_on( pool.submit_one( &han_of_block0, @@ -656,7 +663,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); // future doesn't count let _hash = block_on( pool.submit_one( @@ -671,7 +679,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); assert_eq!(pool.validated_pool().status().ready, 2); assert_eq!(pool.validated_pool().status().future, 1); @@ -704,7 +713,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); let hash2 = block_on( pool.submit_one( &han_of_block0, @@ -718,7 +728,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); let hash3 = block_on( pool.submit_one( &han_of_block0, @@ -732,7 +743,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); // when pool.validated_pool.clear_stale(&api.expect_hash_and_number(5)); @@ -764,7 +776,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); // when block_on(pool.prune_tags(&api.expect_hash_and_number(1), vec![vec![0]], vec![hash1])); @@ -792,8 +805,9 @@ mod tests { let api = Arc::new(TestApi::default()); let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone()); - let hash1 = - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, xt.into())).unwrap(); + let hash1 = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, xt.into())) + .unwrap() + .hash(); assert_eq!(pool.validated_pool().status().future, 1); // when @@ -810,7 +824,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .hash(); // then assert_eq!(pool.validated_pool().status().future, 1); @@ -842,6 +857,7 @@ mod tests { .into(), ), ) + .map(|o| o.hash()) .unwrap_err(); // then @@ -868,6 +884,7 @@ mod tests { .into(), ), ) + .map(|o| o.hash()) .unwrap_err(); // then @@ -896,7 +913,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(pool.validated_pool().status().future, 0); @@ -933,7 +951,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(pool.validated_pool().status().future, 0); @@ -972,7 +991,8 @@ mod tests { .into(), ), ) - .unwrap(); + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 1); @@ -1011,7 +1031,8 @@ mod tests { }); let watcher = block_on(pool.submit_and_watch(&api.expect_hash_and_number(0), SOURCE, uxt.into())) - .unwrap(); + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 1); // when @@ -1036,7 +1057,8 @@ mod tests { }); let watcher = block_on(pool.submit_and_watch(&api.expect_hash_and_number(0), SOURCE, uxt.into())) - .unwrap(); + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 1); // when @@ -1069,7 +1091,8 @@ mod tests { }); let watcher = block_on(pool.submit_and_watch(&api.expect_hash_and_number(0), SOURCE, xt.into())) - .unwrap(); + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 1); // when @@ -1136,7 +1159,9 @@ mod tests { // after validation `IncludeData` will have priority set to 9001 // (validate_transaction mock) let xt = ExtrinsicBuilder::new_include_data(Vec::new()).build(); - block_on(pool.submit_and_watch(&han_of_block0, SOURCE, xt.into())).unwrap(); + block_on(pool.submit_and_watch(&han_of_block0, SOURCE, xt.into())) + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 1); // after validation `Transfer` will have priority set to 4 (validate_transaction @@ -1147,8 +1172,9 @@ mod tests { amount: 5, nonce: 0, }); - let watcher = - block_on(pool.submit_and_watch(&han_of_block0, SOURCE, xt.into())).unwrap(); + let watcher = block_on(pool.submit_and_watch(&han_of_block0, SOURCE, xt.into())) + .unwrap() + .expect_watcher(); assert_eq!(pool.validated_pool().status().ready, 2); // when diff --git a/substrate/client/transaction-pool/src/graph/ready.rs b/substrate/client/transaction-pool/src/graph/ready.rs index 9061d0e255811..b8aef99e638dc 100644 --- a/substrate/client/transaction-pool/src/graph/ready.rs +++ b/substrate/client/transaction-pool/src/graph/ready.rs @@ -232,12 +232,10 @@ impl ReadyTransactions { Ok(replaced) } - /// Fold a list of ready transactions to compute a single value. - pub fn fold, &ReadyTx) -> Option>( - &mut self, - f: F, - ) -> Option { - self.ready.read().values().fold(None, f) + /// Fold a list of ready transactions to compute a single value using initial value of + /// accumulator. + pub fn fold) -> R>(&self, init: R, f: F) -> R { + self.ready.read().values().fold(init, f) } /// Returns true if given transaction is part of the queue. diff --git a/substrate/client/transaction-pool/src/graph/tracked_map.rs b/substrate/client/transaction-pool/src/graph/tracked_map.rs index 6c3bbbf34b553..fe15c6eca3080 100644 --- a/substrate/client/transaction-pool/src/graph/tracked_map.rs +++ b/substrate/client/transaction-pool/src/graph/tracked_map.rs @@ -173,6 +173,11 @@ where pub fn len(&mut self) -> usize { self.inner_guard.len() } + + /// Returns an iterator over all key-value pairs. + pub fn iter(&self) -> Iter<'_, K, V> { + self.inner_guard.iter() + } } #[cfg(test)] diff --git a/substrate/client/transaction-pool/src/graph/validated_pool.rs b/substrate/client/transaction-pool/src/graph/validated_pool.rs index 3f7bf4773de7b..bc2b07896dba0 100644 --- a/substrate/client/transaction-pool/src/graph/validated_pool.rs +++ b/substrate/client/transaction-pool/src/graph/validated_pool.rs @@ -18,25 +18,22 @@ use std::{ collections::{HashMap, HashSet}, - hash, sync::Arc, }; use crate::{common::log_xt::log_xt_trace, LOG_TARGET}; use futures::channel::mpsc::{channel, Sender}; use parking_lot::{Mutex, RwLock}; -use sc_transaction_pool_api::{error, PoolStatus, ReadyTransactions}; -use serde::Serialize; +use sc_transaction_pool_api::{error, PoolStatus, ReadyTransactions, TransactionPriority}; use sp_blockchain::HashAndNumber; use sp_runtime::{ - traits::{self, SaturatedConversion}, + traits::SaturatedConversion, transaction_validity::{TransactionTag as Tag, ValidTransaction}, }; use std::time::Instant; use super::{ base_pool::{self as base, PruneStatus}, - listener::Listener, pool::{ BlockHash, ChainApi, EventStream, ExtrinsicFor, ExtrinsicHash, Options, TransactionFor, }, @@ -79,12 +76,23 @@ impl ValidatedTransaction { valid_till: at.saturated_into::().saturating_add(validity.longevity), }) } + + /// Returns priority for valid transaction, None if transaction is not valid. + pub fn priority(&self) -> Option { + match self { + ValidatedTransaction::Valid(base::Transaction { priority, .. }) => Some(*priority), + _ => None, + } + } } -/// A type of validated transaction stored in the pool. +/// A type of validated transaction stored in the validated pool. pub type ValidatedTransactionFor = ValidatedTransaction, ExtrinsicFor, ::Error>; +/// A type alias representing ValidatedPool listener for given ChainApi type. +pub type Listener = super::listener::Listener, B>; + /// A closure that returns true if the local node is a validator that can author blocks. #[derive(Clone)] pub struct IsValidator(Arc bool + Send + Sync>>); @@ -101,12 +109,56 @@ impl From bool + Send + Sync>> for IsValidator { } } +/// Represents the result of `submit` or `submit_and_watch` operations. +pub struct BaseSubmitOutcome { + /// The hash of the submitted transaction. + hash: ExtrinsicHash, + /// A transaction watcher. This is `Some` for `submit_and_watch` and `None` for `submit`. + watcher: Option, + + /// The priority of the transaction. Defaults to None if unknown. + priority: Option, +} + +/// Type alias to outcome of submission to `ValidatedPool`. +pub type ValidatedPoolSubmitOutcome = + BaseSubmitOutcome, ExtrinsicHash>>; + +impl BaseSubmitOutcome { + /// Creates a new instance with given hash and priority. + pub fn new(hash: ExtrinsicHash, priority: Option) -> Self { + Self { hash, priority, watcher: None } + } + + /// Sets the transaction watcher. + pub fn with_watcher(mut self, watcher: W) -> Self { + self.watcher = Some(watcher); + self + } + + /// Provides priority of submitted transaction. + pub fn priority(&self) -> Option { + self.priority + } + + /// Provides hash of submitted transaction. + pub fn hash(&self) -> ExtrinsicHash { + self.hash + } + + /// Provides a watcher. Should only be called on outcomes of `submit_and_watch`. Otherwise will + /// panic (that would mean logical error in program). + pub fn expect_watcher(&mut self) -> W { + self.watcher.take().expect("watcher was set in submit_and_watch. qed") + } +} + /// Pool that deals with validated transactions. pub struct ValidatedPool { api: Arc, is_validator: IsValidator, options: Options, - listener: RwLock, B>>, + listener: RwLock>, pub(crate) pool: RwLock, ExtrinsicFor>>, import_notification_sinks: Mutex>>>, rotator: PoolRotator>, @@ -200,7 +252,7 @@ impl ValidatedPool { pub fn submit( &self, txs: impl IntoIterator>, - ) -> Vec, B::Error>> { + ) -> Vec, B::Error>> { let results = txs .into_iter() .map(|validated_tx| self.submit_one(validated_tx)) @@ -216,7 +268,7 @@ impl ValidatedPool { results .into_iter() .map(|res| match res { - Ok(ref hash) if removed.contains(hash) => + Ok(outcome) if removed.contains(&outcome.hash) => Err(error::Error::ImmediatelyDropped.into()), other => other, }) @@ -224,9 +276,13 @@ impl ValidatedPool { } /// Submit single pre-validated transaction to the pool. - fn submit_one(&self, tx: ValidatedTransactionFor) -> Result, B::Error> { + fn submit_one( + &self, + tx: ValidatedTransactionFor, + ) -> Result, B::Error> { match tx { ValidatedTransaction::Valid(tx) => { + let priority = tx.priority; log::trace!(target: LOG_TARGET, "[{:?}] ValidatedPool::submit_one", tx.hash); if !tx.propagate && !(self.is_validator.0)() { return Err(error::Error::Unactionable.into()) @@ -254,7 +310,7 @@ impl ValidatedPool { let mut listener = self.listener.write(); fire_events(&mut *listener, &imported); - Ok(*imported.hash()) + Ok(ValidatedPoolSubmitOutcome::new(*imported.hash(), Some(priority))) }, ValidatedTransaction::Invalid(hash, err) => { log::trace!(target: LOG_TARGET, "[{:?}] ValidatedPool::submit_one invalid: {:?}", hash, err); @@ -305,7 +361,7 @@ impl ValidatedPool { // run notifications let mut listener = self.listener.write(); for h in &removed { - listener.limit_enforced(h); + listener.limits_enforced(h); } removed @@ -318,7 +374,7 @@ impl ValidatedPool { pub fn submit_and_watch( &self, tx: ValidatedTransactionFor, - ) -> Result, ExtrinsicHash>, B::Error> { + ) -> Result, B::Error> { match tx { ValidatedTransaction::Valid(tx) => { let hash = self.api.hash_and_length(&tx.data).0; @@ -326,7 +382,7 @@ impl ValidatedPool { self.submit(std::iter::once(ValidatedTransaction::Valid(tx))) .pop() .expect("One extrinsic passed; one result returned; qed") - .map(|_| watcher) + .map(|outcome| outcome.with_watcher(watcher)) }, ValidatedTransaction::Invalid(hash, err) => { self.rotator.ban(&Instant::now(), std::iter::once(hash)); @@ -711,11 +767,42 @@ impl ValidatedPool { listener.future(&f.hash); }); } + + /// Removes a transaction subtree from the pool, starting from the given transaction hash. + /// + /// This function traverses the dependency graph of transactions and removes the specified + /// transaction along with all its descendant transactions from the pool. + /// + /// A `listener_action` callback function is invoked for every transaction that is removed, + /// providing a reference to the pool's listener and the hash of the removed transaction. This + /// allows to trigger the required events. + /// + /// Returns a vector containing the hashes of all removed transactions, including the root + /// transaction specified by `tx_hash`. + pub fn remove_subtree( + &self, + tx_hash: ExtrinsicHash, + listener_action: F, + ) -> Vec> + where + F: Fn(&mut Listener, ExtrinsicHash), + { + self.pool + .write() + .remove_subtree(&[tx_hash]) + .into_iter() + .map(|tx| { + let removed_tx_hash = tx.hash; + let mut listener = self.listener.write(); + listener_action(&mut *listener, removed_tx_hash); + removed_tx_hash + }) + .collect::>() + } } -fn fire_events(listener: &mut Listener, imported: &base::Imported) +fn fire_events(listener: &mut Listener, imported: &base::Imported, Ex>) where - H: hash::Hash + Eq + traits::Member + Serialize, B: ChainApi, { match *imported { diff --git a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs index caa09585b28bf..2a691ae35eaf7 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs @@ -405,7 +405,8 @@ mod tests { TimedTransactionSource::new_external(false), uxt.clone().into(), )) - .expect("Should be valid"); + .expect("Should be valid") + .hash(); block_on(queue.revalidate_later(han_of_block0.hash, vec![uxt_hash])); @@ -448,7 +449,7 @@ mod tests { vec![(source.clone(), uxt0.into()), (source, uxt1.into())], )) .into_iter() - .map(|r| r.expect("Should be valid")) + .map(|r| r.expect("Should be valid").hash()) .collect::>(); assert_eq!(api.validation_requests().len(), 2); diff --git a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs index 2b32704945c75..3598f9dbc2af1 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs @@ -274,7 +274,12 @@ where let number = self.api.resolve_block_number(at); let at = HashAndNumber { hash: at, number: number? }; - Ok(pool.submit_at(&at, xts).await) + Ok(pool + .submit_at(&at, xts) + .await + .into_iter() + .map(|result| result.map(|outcome| outcome.hash())) + .collect()) } async fn submit_one( @@ -292,6 +297,7 @@ where let at = HashAndNumber { hash: at, number: number? }; pool.submit_one(&at, TimedTransactionSource::from_transaction_source(source, false), xt) .await + .map(|outcome| outcome.hash()) } async fn submit_and_watch( @@ -308,15 +314,13 @@ where let number = self.api.resolve_block_number(at); let at = HashAndNumber { hash: at, number: number? }; - let watcher = pool - .submit_and_watch( - &at, - TimedTransactionSource::from_transaction_source(source, false), - xt, - ) - .await?; - - Ok(watcher.into_stream().boxed()) + pool.submit_and_watch( + &at, + TimedTransactionSource::from_transaction_source(source, false), + xt, + ) + .await + .map(|mut outcome| outcome.expect_watcher().into_stream().boxed()) } fn remove_invalid(&self, hashes: &[TxHash]) -> Vec> { @@ -484,7 +488,11 @@ where validity, ); - self.pool.validated_pool().submit(vec![validated]).remove(0) + self.pool + .validated_pool() + .submit(vec![validated]) + .remove(0) + .map(|outcome| outcome.hash()) } } diff --git a/substrate/client/transaction-pool/tests/fatp_common/mod.rs b/substrate/client/transaction-pool/tests/fatp_common/mod.rs index aaffebc0db0ac..530c25caf88e7 100644 --- a/substrate/client/transaction-pool/tests/fatp_common/mod.rs +++ b/substrate/client/transaction-pool/tests/fatp_common/mod.rs @@ -192,12 +192,9 @@ macro_rules! assert_ready_iterator { let output: Vec<_> = ready_iterator.collect(); log::debug!(target:LOG_TARGET, "expected: {:#?}", expected); log::debug!(target:LOG_TARGET, "output: {:#?}", output); + let output = output.into_iter().map(|t|t.hash).collect::>(); assert_eq!(expected.len(), output.len()); - assert!( - output.iter().zip(expected.iter()).all(|(o,e)| { - o.hash == *e - }) - ); + assert_eq!(output,expected); }}; } @@ -215,6 +212,18 @@ macro_rules! assert_future_iterator { }}; } +#[macro_export] +macro_rules! assert_watcher_stream { + ($stream:ident, [$( $event:expr ),*]) => {{ + let expected = vec![ $($event),*]; + log::debug!(target:LOG_TARGET, "expected: {:#?} {}, block now:", expected, expected.len()); + let output = futures::executor::block_on_stream($stream).take(expected.len()).collect::>(); + log::debug!(target:LOG_TARGET, "output: {:#?}", output); + assert_eq!(expected.len(), output.len()); + assert_eq!(output, expected); + }}; +} + pub const SOURCE: TransactionSource = TransactionSource::External; #[cfg(test)] diff --git a/substrate/client/transaction-pool/tests/fatp_prios.rs b/substrate/client/transaction-pool/tests/fatp_prios.rs index 4ed9b45038614..af5e7e8c5a6a8 100644 --- a/substrate/client/transaction-pool/tests/fatp_prios.rs +++ b/substrate/client/transaction-pool/tests/fatp_prios.rs @@ -20,13 +20,15 @@ pub mod fatp_common; -use fatp_common::{new_best_block_event, TestPoolBuilder, LOG_TARGET, SOURCE}; +use fatp_common::{invalid_hash, new_best_block_event, TestPoolBuilder, LOG_TARGET, SOURCE}; use futures::{executor::block_on, FutureExt}; use sc_transaction_pool::ChainApi; -use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool, TransactionStatus}; +use sc_transaction_pool_api::{ + error::Error as TxPoolError, LocalTransactionPool, MaintainedTransactionPool, TransactionPool, + TransactionStatus, +}; use substrate_test_runtime_client::Sr25519Keyring::*; use substrate_test_runtime_transaction_pool::uxt; - #[test] fn fatp_prio_ready_higher_evicts_lower() { sp_tracing::try_init_simple(); @@ -247,3 +249,312 @@ fn fatp_prio_watcher_future_lower_prio_gets_dropped_from_all_views() { assert_ready_iterator!(header01.hash(), pool, [xt2, xt1]); assert_ready_iterator!(header02.hash(), pool, [xt2, xt1]); } + +#[test] +fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(4).with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + api.set_nonce(api.genesis_hash(), Eve.into(), 600); + api.set_nonce(api.genesis_hash(), Ferdie.into(), 700); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 300); + let xt2 = uxt(Charlie, 400); + + let xt3 = uxt(Dave, 500); + + let xt4 = uxt(Eve, 600); + let xt5 = uxt(Ferdie, 700); + + api.set_priority(&xt0, 1); + api.set_priority(&xt1, 2); + api.set_priority(&xt2, 3); + api.set_priority(&xt3, 4); + + api.set_priority(&xt4, 5); + api.set_priority(&xt5, 6); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 2); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let _xt2_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let _xt3_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_pool_status!(header02.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 4); + + let header03 = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash()))); + + let _xt4_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); + let _xt5_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).unwrap(); + + assert_pool_status!(header03.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 4); + + assert_watcher_stream!(xt0_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt1_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + + assert_ready_iterator!(header01.hash(), pool, []); + assert_ready_iterator!(header02.hash(), pool, [xt3, xt2]); + assert_ready_iterator!(header03.hash(), pool, [xt5, xt4]); +} + +#[test] +fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted_with_subtree() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(4).with_ready_count(4).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Alice, 202); + let xt3 = uxt(Bob, 300); + let xt4 = uxt(Charlie, 400); + + api.set_priority(&xt0, 1); + api.set_priority(&xt1, 3); + api.set_priority(&xt2, 3); + api.set_priority(&xt3, 2); + api.set_priority(&xt4, 2); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_ready_iterator!(header01.hash(), pool, [xt3, xt0, xt1, xt2]); + assert_pool_status!(header01.hash(), &pool, 4, 0); + assert_eq!(pool.mempool_len().1, 4); + + let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_ready_iterator!(header01.hash(), pool, [xt3, xt4]); + + assert_watcher_stream!(xt0_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt1_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt2_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt3_watcher, [TransactionStatus::Ready]); + assert_watcher_stream!(xt4_watcher, [TransactionStatus::Ready]); +} + +#[test] +fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted_with_subtree2() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(4).with_ready_count(4).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Alice, 202); + let xt3 = uxt(Bob, 300); + let xt4 = uxt(Charlie, 400); + + api.set_priority(&xt0, 1); + api.set_priority(&xt1, 3); + api.set_priority(&xt2, 3); + api.set_priority(&xt3, 2); + api.set_priority(&xt4, 2); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_ready_iterator!(header01.hash(), pool, [xt3, xt0, xt1, xt2]); + assert_pool_status!(header01.hash(), &pool, 4, 0); + assert_eq!(pool.mempool_len().1, 4); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); + assert_ready_iterator!(header01.hash(), pool, [xt3]); + assert_pool_status!(header02.hash(), &pool, 2, 0); + assert_ready_iterator!(header02.hash(), pool, [xt3, xt4]); + + assert_watcher_stream!(xt0_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt1_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt2_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt3_watcher, [TransactionStatus::Ready]); + assert_watcher_stream!(xt4_watcher, [TransactionStatus::Ready]); +} + +#[test] +fn fatp_prios_watcher_full_mempool_lower_prio_gets_rejected() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(2).with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 300); + let xt2 = uxt(Charlie, 400); + let xt3 = uxt(Dave, 500); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 2); + api.set_priority(&xt2, 2); + api.set_priority(&xt3, 1); + + let _xt0_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let _xt1_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 2); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + assert_pool_status!(header02.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 2); + + assert_ready_iterator!(header01.hash(), pool, [xt0, xt1]); + assert_ready_iterator!(header02.hash(), pool, [xt0, xt1]); + + let result2 = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).map(|_| ()); + assert!(matches!(result2.as_ref().unwrap_err().0, TxPoolError::ImmediatelyDropped)); + let result3 = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).map(|_| ()); + assert!(matches!(result3.as_ref().unwrap_err().0, TxPoolError::ImmediatelyDropped)); +} + +#[test] +fn fatp_prios_watcher_full_mempool_does_not_keep_dropped_transaction() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(4).with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 300); + let xt2 = uxt(Charlie, 400); + let xt3 = uxt(Dave, 500); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 2); + api.set_priority(&xt2, 2); + api.set_priority(&xt3, 2); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_ready_iterator!(header01.hash(), pool, [xt2, xt3]); + + assert_watcher_stream!(xt0_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt1_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); + assert_watcher_stream!(xt2_watcher, [TransactionStatus::Ready]); + assert_watcher_stream!(xt3_watcher, [TransactionStatus::Ready]); +} + +#[test] +fn fatp_prios_submit_local_full_mempool_higher_prio_is_accepted() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(4).with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + api.set_nonce(api.genesis_hash(), Eve.into(), 600); + api.set_nonce(api.genesis_hash(), Ferdie.into(), 700); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 300); + let xt2 = uxt(Charlie, 400); + + let xt3 = uxt(Dave, 500); + + let xt4 = uxt(Eve, 600); + let xt5 = uxt(Ferdie, 700); + + api.set_priority(&xt0, 1); + api.set_priority(&xt1, 2); + api.set_priority(&xt2, 3); + api.set_priority(&xt3, 4); + + api.set_priority(&xt4, 5); + api.set_priority(&xt5, 6); + pool.submit_local(invalid_hash(), xt0.clone()).unwrap(); + pool.submit_local(invalid_hash(), xt1.clone()).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().0, 2); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + pool.submit_local(invalid_hash(), xt2.clone()).unwrap(); + pool.submit_local(invalid_hash(), xt3.clone()).unwrap(); + + assert_pool_status!(header02.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().0, 4); + + let header03 = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash()))); + + pool.submit_local(invalid_hash(), xt4.clone()).unwrap(); + pool.submit_local(invalid_hash(), xt5.clone()).unwrap(); + + assert_pool_status!(header03.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().0, 4); + + assert_ready_iterator!(header01.hash(), pool, []); + assert_ready_iterator!(header02.hash(), pool, [xt3, xt2]); + assert_ready_iterator!(header03.hash(), pool, [xt5, xt4]); +} diff --git a/substrate/client/transaction-pool/tests/pool.rs b/substrate/client/transaction-pool/tests/pool.rs index de35726435f0f..c70f454833145 100644 --- a/substrate/client/transaction-pool/tests/pool.rs +++ b/substrate/client/transaction-pool/tests/pool.rs @@ -158,6 +158,7 @@ fn prune_tags_should_work() { let (pool, api) = pool(); let hash209 = block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) + .map(|o| o.hash()) .unwrap(); block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 210).into())) .unwrap(); @@ -184,10 +185,13 @@ fn prune_tags_should_work() { fn should_ban_invalid_transactions() { let (pool, api) = pool(); let uxt = Arc::from(uxt(Alice, 209)); - let hash = - block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap(); + let hash = block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())) + .unwrap() + .hash(); pool.validated_pool().remove_invalid(&[hash]); - block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap_err(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())) + .map(|_| ()) + .unwrap_err(); // when let pending: Vec<_> = pool @@ -198,7 +202,9 @@ fn should_ban_invalid_transactions() { assert_eq!(pending, Vec::::new()); // then - block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap_err(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())) + .map(|_| ()) + .unwrap_err(); } #[test] diff --git a/substrate/test-utils/runtime/transaction-pool/src/lib.rs b/substrate/test-utils/runtime/transaction-pool/src/lib.rs index 93e5855eefc6c..f88694fb1071e 100644 --- a/substrate/test-utils/runtime/transaction-pool/src/lib.rs +++ b/substrate/test-utils/runtime/transaction-pool/src/lib.rs @@ -352,9 +352,18 @@ impl ChainApi for TestApi { fn validate_transaction( &self, at: ::Hash, - _source: TransactionSource, + source: TransactionSource, uxt: Arc<::Extrinsic>, ) -> Self::ValidationFuture { + ready(self.validate_transaction_blocking(at, source, uxt)) + } + + fn validate_transaction_blocking( + &self, + at: ::Hash, + _source: TransactionSource, + uxt: Arc<::Extrinsic>, + ) -> Result { let uxt = (*uxt).clone(); self.validation_requests.write().push(uxt.clone()); let block_number; @@ -374,16 +383,12 @@ impl ChainApi for TestApi { // the transaction. (This is not required for this test function, but in real // environment it would fail because of this). if !found_best { - return ready(Ok(Err(TransactionValidityError::Invalid( - InvalidTransaction::Custom(1), - )))) + return Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(1)))) } }, Ok(None) => - return ready(Ok(Err(TransactionValidityError::Invalid( - InvalidTransaction::Custom(2), - )))), - Err(e) => return ready(Err(e)), + return Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(2)))), + Err(e) => return Err(e), } let (requires, provides) = if let Ok(transfer) = TransferData::try_from(&uxt) { @@ -423,7 +428,7 @@ impl ChainApi for TestApi { if self.enable_stale_check && transfer.nonce < chain_nonce { log::info!("test_api::validate_transaction: invalid_transaction(stale)...."); - return ready(Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)))) + return Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Stale))) } (requires, provides) @@ -433,7 +438,7 @@ impl ChainApi for TestApi { if self.chain.read().invalid_hashes.contains(&self.hash_and_length(&uxt).0) { log::info!("test_api::validate_transaction: invalid_transaction...."); - return ready(Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(0))))) + return Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(0)))) } let priority = self.chain.read().priorities.get(&self.hash_and_length(&uxt).0).cloned(); @@ -447,16 +452,7 @@ impl ChainApi for TestApi { (self.valid_modifier.read())(&mut validity); - ready(Ok(Ok(validity))) - } - - fn validate_transaction_blocking( - &self, - _at: ::Hash, - _source: TransactionSource, - _uxt: Arc<::Extrinsic>, - ) -> Result { - unimplemented!(); + Ok(Ok(validity)) } fn block_id_to_number( From 105c5b94f5d3bf394a3ddf1d10ab0932ce93181b Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Tue, 14 Jan 2025 15:30:05 +0200 Subject: [PATCH 6/8] litep2p: Sufix litep2p to the identify agent version for visibility (#7133) This PR adds the `(litep2p)` suffix to the agent version (user agent) of the identify protocol. The change is needed to gain visibility into network backends and determine exactly the number of validators that are running litep2p. Using tools like subp2p-explorer, we can determine if the validators are running litep2p nodes. This reflects on the identify protocol: ``` info=Identify { protocol_version: Some("/substrate/1.0"), agent_version: Some("polkadot-parachain/v1.17.0-967989c5d94 (kusama-node-name-01) (litep2p)") ... } ``` cc @paritytech/networking --------- Signed-off-by: Alexandru Vasile --- prdoc/pr_7133.prdoc | 15 +++++++++++++++ substrate/client/network/src/litep2p/discovery.rs | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 prdoc/pr_7133.prdoc diff --git a/prdoc/pr_7133.prdoc b/prdoc/pr_7133.prdoc new file mode 100644 index 0000000000000..ca0d2bb0bd483 --- /dev/null +++ b/prdoc/pr_7133.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Sufix litep2p to the identify agent version for visibility + +doc: + - audience: [Node Dev, Node Operator] + description: | + This PR adds the `(litep2p)` suffix to the agent version (user agent) of the identify protocol. + The change is needed to gain visibility into network backends and determine exactly the number of validators that are running litep2p. + Using tools like subp2p-explorer, we can determine if the validators are running litep2p nodes. + +crates: +- name: sc-network + bump: patch diff --git a/substrate/client/network/src/litep2p/discovery.rs b/substrate/client/network/src/litep2p/discovery.rs index 2bea2e5a80dce..b55df374f60ec 100644 --- a/substrate/client/network/src/litep2p/discovery.rs +++ b/substrate/client/network/src/litep2p/discovery.rs @@ -254,7 +254,7 @@ impl Discovery { _peerstore_handle: Arc, ) -> (Self, PingConfig, IdentifyConfig, KademliaConfig, Option) { let (ping_config, ping_event_stream) = PingConfig::default(); - let user_agent = format!("{} ({})", config.client_version, config.node_name); + let user_agent = format!("{} ({}) (litep2p)", config.client_version, config.node_name); let (identify_config, identify_event_stream) = IdentifyConfig::new("/substrate/1.0".to_string(), Some(user_agent)); From 023763da2043333c3524bd7f12ac6c7b2d084b39 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Tue, 14 Jan 2025 14:41:24 +0100 Subject: [PATCH 7/8] [pallet-revive-eth-rpc] persist eth transaction hash (#6836) Add an option to persist EVM transaction hash to a SQL db. This should make it possible to run a full archive ETH RPC node (assuming the substrate node is also a full archive node) Some queries such as eth_getTransactionByHash, eth_getBlockTransactionCountByHash, and other need to work with a transaction hash indexes, which are not stored in Substrate and need to be stored by the eth-rpc proxy. The refactoring break down the Client into a `BlockInfoProvider` and `ReceiptProvider` - BlockInfoProvider does not need any persistence data, as we can fetch all block info from the source substrate chain - ReceiptProvider comes in two flavor, - An in memory cache implementation - This is the one we had so far. - A DB implementation - This one persist rows with the block_hash, the transaction_index and the transaction_hash, so that we can later fetch the block and extrinsic for that receipt and reconstruct the ReceiptInfo object. This PR also adds a new binary eth-indexer, that iterate past and new blocks and write the receipt hashes to the DB using the new ReceiptProvider. --------- Co-authored-by: GitHub Action Co-authored-by: command-bot <> --- .cargo/config.toml | 1 + .github/workflows/build-publish-eth-rpc.yml | 37 +- Cargo.lock | 476 +++++++++++++-- prdoc/pr_6836.prdoc | 17 + ...c1135227c1150f2c5083d1c7c6086b717ada0.json | 12 + ...68c427245f94b80d37ec3aef04cd96fb36298.json | 20 + ...332be50096d4e37be04ed8b6f46ac5c242043.json | 26 + substrate/frame/revive/rpc/Cargo.toml | 11 + .../rpc/dockerfiles/eth-indexer/Dockerfile | 28 + .../rpc/{ => dockerfiles/eth-rpc}/Dockerfile | 0 .../frame/revive/rpc/examples/js/bun.lockb | Bin 40649 -> 46862 bytes .../frame/revive/rpc/examples/js/package.json | 14 +- .../rpc/examples/js/src/build-contracts.ts | 7 +- .../rpc/examples/js/src/geth-diff.test.ts | 66 +- .../frame/revive/rpc/examples/js/src/lib.ts | 1 - .../revive/rpc/examples/js/src/piggy-bank.ts | 4 +- .../revive/rpc/examples/js/src/spammer.ts | 104 ++++ .../js/src/{geth-diff-setup.ts => util.ts} | 74 +-- .../rpc/examples/westend_local_network.toml | 8 +- ...241205165418_create_transaction_hashes.sql | 15 + .../revive/rpc/src/block_info_provider.rs | 250 ++++++++ substrate/frame/revive/rpc/src/cli.rs | 61 +- substrate/frame/revive/rpc/src/client.rs | 571 ++++++++---------- substrate/frame/revive/rpc/src/eth-indexer.rs | 88 +++ substrate/frame/revive/rpc/src/lib.rs | 27 +- .../frame/revive/rpc/src/receipt_provider.rs | 240 ++++++++ .../revive/rpc/src/receipt_provider/cache.rs | 148 +++++ .../revive/rpc/src/receipt_provider/db.rs | 216 +++++++ substrate/frame/revive/rpc/src/rpc_health.rs | 9 + .../frame/revive/rpc/src/rpc_methods_gen.rs | 4 + .../frame/revive/src/evm/api/rpc_types.rs | 12 +- .../frame/revive/src/evm/api/rpc_types_gen.rs | 10 +- substrate/frame/revive/src/wasm/mod.rs | 5 +- 33 files changed, 2090 insertions(+), 472 deletions(-) create mode 100644 prdoc/pr_6836.prdoc create mode 100644 substrate/frame/revive/rpc/.sqlx/query-027a434a38822c2ba4439e8f9f9c1135227c1150f2c5083d1c7c6086b717ada0.json create mode 100644 substrate/frame/revive/rpc/.sqlx/query-2348bd412ca114197996e4395fd68c427245f94b80d37ec3aef04cd96fb36298.json create mode 100644 substrate/frame/revive/rpc/.sqlx/query-29af64347f700919dc2ee12463f332be50096d4e37be04ed8b6f46ac5c242043.json create mode 100644 substrate/frame/revive/rpc/dockerfiles/eth-indexer/Dockerfile rename substrate/frame/revive/rpc/{ => dockerfiles/eth-rpc}/Dockerfile (100%) create mode 100644 substrate/frame/revive/rpc/examples/js/src/spammer.ts rename substrate/frame/revive/rpc/examples/js/src/{geth-diff-setup.ts => util.ts} (62%) create mode 100644 substrate/frame/revive/rpc/migrations/20241205165418_create_transaction_hashes.sql create mode 100644 substrate/frame/revive/rpc/src/block_info_provider.rs create mode 100644 substrate/frame/revive/rpc/src/eth-indexer.rs create mode 100644 substrate/frame/revive/rpc/src/receipt_provider.rs create mode 100644 substrate/frame/revive/rpc/src/receipt_provider/cache.rs create mode 100644 substrate/frame/revive/rpc/src/receipt_provider/db.rs diff --git a/.cargo/config.toml b/.cargo/config.toml index 68a0d7b552dc0..8573f582e258b 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -9,6 +9,7 @@ rustdocflags = [ CC_x86_64_unknown_linux_musl = { value = ".cargo/musl-gcc", force = true, relative = true } CXX_x86_64_unknown_linux_musl = { value = ".cargo/musl-g++", force = true, relative = true } CARGO_WORKSPACE_ROOT_DIR = { value = "", relative = true } +SQLX_OFFLINE = "true" [net] retry = 5 diff --git a/.github/workflows/build-publish-eth-rpc.yml b/.github/workflows/build-publish-eth-rpc.yml index 3aa1624096dfb..a98b3881a1453 100644 --- a/.github/workflows/build-publish-eth-rpc.yml +++ b/.github/workflows/build-publish-eth-rpc.yml @@ -12,7 +12,8 @@ concurrency: cancel-in-progress: true env: - IMAGE_NAME: "docker.io/paritypr/eth-rpc" + ETH_RPC_IMAGE_NAME: "docker.io/paritypr/eth-rpc" + ETH_INDEXER_IMAGE_NAME: "docker.io/paritypr/eth-indexer" jobs: set-variables: @@ -34,7 +35,7 @@ jobs: echo "set VERSION=${VERSION}" build_docker: - name: Build docker image + name: Build docker images runs-on: parity-large needs: [set-variables] env: @@ -43,17 +44,26 @@ jobs: - name: Check out the repo uses: actions/checkout@v4 - - name: Build Docker image + - name: Build eth-rpc Docker image uses: docker/build-push-action@v6 with: context: . - file: ./substrate/frame/revive/rpc/Dockerfile + file: ./substrate/frame/revive/rpc/dockerfiles/eth-rpc/Dockerfile push: false tags: | - ${{ env.IMAGE_NAME }}:${{ env.VERSION }} + ${{ env.ETH_RPC_IMAGE_NAME }}:${{ env.VERSION }} + + - name: Build eth-indexer Docker image + uses: docker/build-push-action@v6 + with: + context: . + file: ./substrate/frame/revive/rpc/dockerfiles/eth-indexer/Dockerfile + push: false + tags: | + ${{ env.ETH_INDEXER_IMAGE_NAME }}:${{ env.VERSION }} build_push_docker: - name: Build and push docker image + name: Build and push docker images runs-on: parity-large if: github.ref == 'refs/heads/master' needs: [set-variables] @@ -69,11 +79,20 @@ jobs: username: ${{ secrets.PARITYPR_DOCKERHUB_USERNAME }} password: ${{ secrets.PARITYPR_DOCKERHUB_PASSWORD }} - - name: Build Docker image + - name: Build eth-rpc Docker image + uses: docker/build-push-action@v6 + with: + context: . + file: ./substrate/frame/revive/rpc/dockerfiles/eth-rpc/Dockerfile + push: true + tags: | + ${{ env.ETH_RPC_IMAGE_NAME }}:${{ env.VERSION }} + + - name: Build eth-indexer Docker image uses: docker/build-push-action@v6 with: context: . - file: ./substrate/frame/revive/rpc/Dockerfile + file: ./substrate/frame/revive/rpc/dockerfiles/eth-indexer/Dockerfile push: true tags: | - ${{ env.IMAGE_NAME }}:${{ env.VERSION }} + ${{ env.ETH_INDEXER_IMAGE_NAME }}:${{ env.VERSION }} diff --git a/Cargo.lock b/Cargo.lock index cfb805fbe847c..3eab84d5ed162 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1396,7 +1396,7 @@ dependencies = [ "futures-lite 2.3.0", "parking", "polling 3.4.0", - "rustix 0.38.21", + "rustix 0.38.42", "slab", "tracing", "windows-sys 0.52.0", @@ -1478,7 +1478,7 @@ dependencies = [ "cfg-if", "event-listener 5.3.1", "futures-lite 2.3.0", - "rustix 0.38.21", + "rustix 0.38.42", "tracing", ] @@ -1494,7 +1494,7 @@ dependencies = [ "cfg-if", "futures-core", "futures-io", - "rustix 0.38.21", + "rustix 0.38.42", "signal-hook-registry", "slab", "windows-sys 0.52.0", @@ -1592,6 +1592,15 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "atoi" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28d99ec8bfea296261ca1af174f24225171fea9664ba9003cbebee704810528" +dependencies = [ + "num-traits", +] + [[package]] name = "atomic-take" version = "1.1.0" @@ -1880,6 +1889,9 @@ name = "bitflags" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" +dependencies = [ + "serde", +] [[package]] name = "bitvec" @@ -4391,6 +4403,21 @@ dependencies = [ "wasmtime-types", ] +[[package]] +name = "crc" +version = "3.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" +dependencies = [ + "crc-catalog", +] + +[[package]] +name = "crc-catalog" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" + [[package]] name = "crc32fast" version = "1.3.2" @@ -5945,6 +5972,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" dependencies = [ "const-oid", + "pem-rfc7468", "zeroize", ] @@ -6226,6 +6254,12 @@ dependencies = [ "litrs", ] +[[package]] +name = "dotenvy" +version = "0.15.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" + [[package]] name = "downcast" version = "0.11.0" @@ -6351,6 +6385,9 @@ name = "either" version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" +dependencies = [ + "serde", +] [[package]] name = "elliptic-curve" @@ -6559,23 +6596,23 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.2" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ - "errno-dragonfly", "libc", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] -name = "errno-dragonfly" -version = "0.1.2" +name = "etcetera" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" dependencies = [ - "cc", - "libc", + "cfg-if", + "home", + "windows-sys 0.48.0", ] [[package]] @@ -6772,9 +6809,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.1.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "fastrlp" @@ -6989,6 +7026,17 @@ dependencies = [ "num-traits", ] +[[package]] +name = "flume" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da0e4dd2a88388a1f4ccc7c9ce104604dab68d9f408dc34cd45823d5a9069095" +dependencies = [ + "futures-core", + "futures-sink", + "spin 0.9.8", +] + [[package]] name = "fnv" version = "1.0.7" @@ -7837,7 +7885,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29f9df8a11882c4e3335eb2d18a0137c505d9ca927470b0cac9c6f0ae07d28f7" dependencies = [ - "rustix 0.38.21", + "rustix 0.38.42", "windows-sys 0.48.0", ] @@ -7906,6 +7954,17 @@ dependencies = [ "num_cpus", ] +[[package]] +name = "futures-intrusive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f" +dependencies = [ + "futures-core", + "lock_api", + "parking_lot 0.12.3", +] + [[package]] name = "futures-io" version = "0.3.31" @@ -7933,7 +7992,7 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ - "fastrand 2.1.0", + "fastrand 2.3.0", "futures-core", "futures-io", "parking", @@ -8369,6 +8428,15 @@ dependencies = [ "hashbrown 0.14.5", ] +[[package]] +name = "hashlink" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +dependencies = [ + "hashbrown 0.14.5", +] + [[package]] name = "heck" version = "0.3.3" @@ -9100,7 +9168,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi 0.3.9", - "rustix 0.38.21", + "rustix 0.38.42", "windows-sys 0.48.0", ] @@ -9701,6 +9769,9 @@ name = "lazy_static" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +dependencies = [ + "spin 0.9.8", +] [[package]] name = "lazycell" @@ -9716,9 +9787,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.158" +version = "0.2.169" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" [[package]] name = "libflate" @@ -10264,6 +10335,17 @@ dependencies = [ "libsecp256k1-core", ] +[[package]] +name = "libsqlite3-sys" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + [[package]] name = "libz-sys" version = "1.1.12" @@ -10323,9 +10405,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.10" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "lioness" @@ -10607,6 +10689,16 @@ dependencies = [ "rawpointer", ] +[[package]] +name = "md-5" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" +dependencies = [ + "cfg-if", + "digest 0.10.7", +] + [[package]] name = "memchr" version = "2.7.4" @@ -10782,7 +10874,7 @@ dependencies = [ "c2-chacha", "curve25519-dalek 4.1.3", "either", - "hashlink", + "hashlink 0.8.4", "lioness", "log", "parking_lot 0.12.3", @@ -11453,6 +11545,23 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-bigint-dig" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" +dependencies = [ + "byteorder", + "lazy_static", + "libm", + "num-integer", + "num-iter", + "num-traits", + "rand", + "smallvec", + "zeroize", +] + [[package]] name = "num-complex" version = "0.4.4" @@ -14809,9 +14918,11 @@ dependencies = [ "sc-rpc", "sc-rpc-api", "sc-service", + "sp-arithmetic 23.0.0", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", "sp-weights 27.0.0", + "sqlx", "static_init", "substrate-cli-test-utils", "substrate-prometheus-endpoint", @@ -16516,6 +16627,15 @@ dependencies = [ "serde", ] +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + [[package]] name = "penpal-emulated-chain" version = "0.0.0" @@ -16890,6 +17010,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkcs1" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" +dependencies = [ + "der", + "pkcs8", + "spki", +] + [[package]] name = "pkcs8" version = "0.10.2" @@ -20030,7 +20161,7 @@ dependencies = [ "cfg-if", "concurrent-queue", "pin-project-lite", - "rustix 0.38.21", + "rustix 0.38.42", "tracing", "windows-sys 0.52.0", ] @@ -20338,7 +20469,7 @@ dependencies = [ "hex", "lazy_static", "procfs-core", - "rustix 0.38.21", + "rustix 0.38.42", ] [[package]] @@ -20871,11 +21002,11 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.4.1" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.6.0", ] [[package]] @@ -21533,6 +21664,26 @@ dependencies = [ "winapi", ] +[[package]] +name = "rsa" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af6c4b23d99685a1408194da11270ef8e9809aff951cc70ec9b17350b087e474" +dependencies = [ + "const-oid", + "digest 0.10.7", + "num-bigint-dig", + "num-integer", + "num-traits", + "pkcs1", + "pkcs8", + "rand_core 0.6.4", + "signature", + "spki", + "subtle 2.5.0", + "zeroize", +] + [[package]] name = "rstest" version = "0.18.2" @@ -21707,15 +21858,15 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.21" +version = "0.38.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" +checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" dependencies = [ "bitflags 2.6.0", "errno", "libc", - "linux-raw-sys 0.4.10", - "windows-sys 0.48.0", + "linux-raw-sys 0.4.14", + "windows-sys 0.59.0", ] [[package]] @@ -24439,6 +24590,9 @@ name = "smallvec" version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +dependencies = [ + "serde", +] [[package]] name = "smol" @@ -27690,6 +27844,9 @@ name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] [[package]] name = "spinners" @@ -27712,6 +27869,210 @@ dependencies = [ "der", ] +[[package]] +name = "sqlformat" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bba3a93db0cc4f7bdece8bb09e77e2e785c20bfebf79eb8340ed80708048790" +dependencies = [ + "nom", + "unicode_categories", +] + +[[package]] +name = "sqlx" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93334716a037193fac19df402f8571269c84a00852f6a7066b5d2616dcd64d3e" +dependencies = [ + "sqlx-core", + "sqlx-macros", + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", +] + +[[package]] +name = "sqlx-core" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4d8060b456358185f7d50c55d9b5066ad956956fddec42ee2e8567134a8936e" +dependencies = [ + "atoi", + "byteorder", + "bytes", + "crc", + "crossbeam-queue", + "either", + "event-listener 5.3.1", + "futures-channel", + "futures-core", + "futures-intrusive", + "futures-io", + "futures-util", + "hashbrown 0.14.5", + "hashlink 0.9.1", + "hex", + "indexmap 2.7.0", + "log", + "memchr", + "once_cell", + "paste", + "percent-encoding", + "serde", + "serde_json", + "sha2 0.10.8", + "smallvec", + "sqlformat", + "thiserror", + "tokio", + "tokio-stream", + "tracing", + "url", +] + +[[package]] +name = "sqlx-macros" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cac0692bcc9de3b073e8d747391827297e075c7710ff6276d9f7a1f3d58c6657" +dependencies = [ + "proc-macro2 1.0.86", + "quote 1.0.37", + "sqlx-core", + "sqlx-macros-core", + "syn 2.0.87", +] + +[[package]] +name = "sqlx-macros-core" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1804e8a7c7865599c9c79be146dc8a9fd8cc86935fa641d3ea58e5f0688abaa5" +dependencies = [ + "dotenvy", + "either", + "heck 0.5.0", + "hex", + "once_cell", + "proc-macro2 1.0.86", + "quote 1.0.37", + "serde", + "serde_json", + "sha2 0.10.8", + "sqlx-core", + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", + "syn 2.0.87", + "tempfile", + "tokio", + "url", +] + +[[package]] +name = "sqlx-mysql" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64bb4714269afa44aef2755150a0fc19d756fb580a67db8885608cf02f47d06a" +dependencies = [ + "atoi", + "base64 0.22.1", + "bitflags 2.6.0", + "byteorder", + "bytes", + "crc", + "digest 0.10.7", + "dotenvy", + "either", + "futures-channel", + "futures-core", + "futures-io", + "futures-util", + "generic-array 0.14.7", + "hex", + "hkdf", + "hmac 0.12.1", + "itoa", + "log", + "md-5", + "memchr", + "once_cell", + "percent-encoding", + "rand", + "rsa", + "serde", + "sha1", + "sha2 0.10.8", + "smallvec", + "sqlx-core", + "stringprep", + "thiserror", + "tracing", + "whoami", +] + +[[package]] +name = "sqlx-postgres" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fa91a732d854c5d7726349bb4bb879bb9478993ceb764247660aee25f67c2f8" +dependencies = [ + "atoi", + "base64 0.22.1", + "bitflags 2.6.0", + "byteorder", + "crc", + "dotenvy", + "etcetera", + "futures-channel", + "futures-core", + "futures-io", + "futures-util", + "hex", + "hkdf", + "hmac 0.12.1", + "home", + "itoa", + "log", + "md-5", + "memchr", + "once_cell", + "rand", + "serde", + "serde_json", + "sha2 0.10.8", + "smallvec", + "sqlx-core", + "stringprep", + "thiserror", + "tracing", + "whoami", +] + +[[package]] +name = "sqlx-sqlite" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5b2cf34a45953bfd3daaf3db0f7a7878ab9b7a6b91b422d24a7a9e4c857b680" +dependencies = [ + "atoi", + "flume", + "futures-channel", + "futures-core", + "futures-executor", + "futures-intrusive", + "futures-util", + "libsqlite3-sys", + "log", + "percent-encoding", + "serde", + "serde_urlencoded", + "sqlx-core", + "tracing", + "url", +] + [[package]] name = "ss58-registry" version = "1.43.0" @@ -28039,6 +28400,17 @@ dependencies = [ "serde", ] +[[package]] +name = "stringprep" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b4df3d392d81bd458a8a621b8bffbd2302a12ffe288a9d931670948749463b1" +dependencies = [ + "unicode-bidi", + "unicode-normalization", + "unicode-properties", +] + [[package]] name = "strsim" version = "0.8.0" @@ -29004,15 +29376,15 @@ checksum = "9d0e916b1148c8e263850e1ebcbd046f333e0683c724876bb0da63ea4373dc8a" [[package]] name = "tempfile" -version = "3.8.1" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" +checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" dependencies = [ "cfg-if", - "fastrand 2.1.0", - "redox_syscall 0.4.1", - "rustix 0.38.21", - "windows-sys 0.48.0", + "fastrand 2.3.0", + "once_cell", + "rustix 0.38.42", + "windows-sys 0.59.0", ] [[package]] @@ -29041,7 +29413,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" dependencies = [ - "rustix 0.38.21", + "rustix 0.38.42", "windows-sys 0.48.0", ] @@ -29992,6 +30364,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-properties" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e70f2a8b45122e719eb623c01822704c4e0907e7e426a05927e1a1cfff5b75d0" + [[package]] name = "unicode-segmentation" version = "1.11.0" @@ -30016,6 +30394,12 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" +[[package]] +name = "unicode_categories" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" + [[package]] name = "universal-hash" version = "0.5.1" @@ -30259,6 +30643,12 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasite" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" + [[package]] name = "wasm-bindgen" version = "0.2.95" @@ -30998,6 +31388,16 @@ dependencies = [ "westend-emulated-chain", ] +[[package]] +name = "whoami" +version = "1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" +dependencies = [ + "redox_syscall 0.5.8", + "wasite", +] + [[package]] name = "wide" version = "0.7.11" diff --git a/prdoc/pr_6836.prdoc b/prdoc/pr_6836.prdoc new file mode 100644 index 0000000000000..1de081bbaa400 --- /dev/null +++ b/prdoc/pr_6836.prdoc @@ -0,0 +1,17 @@ +title: '[pallet-revive-eth-rpc] persist eth transaction hash' +doc: +- audience: Runtime Dev + description: |- + Add an option to persist EVM transaction hash to a SQL db. + This make it possible to run a full archive ETH RPC node (assuming the substrate node is also a full archive node) + + Some queries such as eth_getTransactionByHash, eth_getBlockTransactionCountByHash, and other need to work with a transaction hash index, which is not available in Substrate and need to be stored by the eth-rpc proxy. + + The refactoring break down the Client into a `BlockInfoProvider` and `ReceiptProvider` + - BlockInfoProvider does not need any persistence data, as we can fetch all block info from the source substrate chain + - ReceiptProvider comes in two flavor, + - An in memory cache implementation - This is the one we had so far. + - A DB implementation - This one persist rows with the block_hash, the transaction_index and the transaction_hash, so that we can later fetch the block and extrinsic for that receipt and reconstruct the ReceiptInfo object. +crates: +- name: pallet-revive-eth-rpc + bump: minor diff --git a/substrate/frame/revive/rpc/.sqlx/query-027a434a38822c2ba4439e8f9f9c1135227c1150f2c5083d1c7c6086b717ada0.json b/substrate/frame/revive/rpc/.sqlx/query-027a434a38822c2ba4439e8f9f9c1135227c1150f2c5083d1c7c6086b717ada0.json new file mode 100644 index 0000000000000..016276144901a --- /dev/null +++ b/substrate/frame/revive/rpc/.sqlx/query-027a434a38822c2ba4439e8f9f9c1135227c1150f2c5083d1c7c6086b717ada0.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n\t\t\t\tINSERT INTO transaction_hashes (transaction_hash, block_hash, transaction_index)\n\t\t\t\tVALUES ($1, $2, $3)\n\n\t\t\t\tON CONFLICT(transaction_hash) DO UPDATE SET\n\t\t\t\tblock_hash = EXCLUDED.block_hash,\n\t\t\t\ttransaction_index = EXCLUDED.transaction_index\n\t\t\t\t", + "describe": { + "columns": [], + "parameters": { + "Right": 3 + }, + "nullable": [] + }, + "hash": "027a434a38822c2ba4439e8f9f9c1135227c1150f2c5083d1c7c6086b717ada0" +} diff --git a/substrate/frame/revive/rpc/.sqlx/query-2348bd412ca114197996e4395fd68c427245f94b80d37ec3aef04cd96fb36298.json b/substrate/frame/revive/rpc/.sqlx/query-2348bd412ca114197996e4395fd68c427245f94b80d37ec3aef04cd96fb36298.json new file mode 100644 index 0000000000000..507564cd05c57 --- /dev/null +++ b/substrate/frame/revive/rpc/.sqlx/query-2348bd412ca114197996e4395fd68c427245f94b80d37ec3aef04cd96fb36298.json @@ -0,0 +1,20 @@ +{ + "db_name": "SQLite", + "query": "\n SELECT COUNT(*) as count\n FROM transaction_hashes\n WHERE block_hash = $1\n ", + "describe": { + "columns": [ + { + "name": "count", + "ordinal": 0, + "type_info": "Integer" + } + ], + "parameters": { + "Right": 1 + }, + "nullable": [ + false + ] + }, + "hash": "2348bd412ca114197996e4395fd68c427245f94b80d37ec3aef04cd96fb36298" +} diff --git a/substrate/frame/revive/rpc/.sqlx/query-29af64347f700919dc2ee12463f332be50096d4e37be04ed8b6f46ac5c242043.json b/substrate/frame/revive/rpc/.sqlx/query-29af64347f700919dc2ee12463f332be50096d4e37be04ed8b6f46ac5c242043.json new file mode 100644 index 0000000000000..2443035c433d7 --- /dev/null +++ b/substrate/frame/revive/rpc/.sqlx/query-29af64347f700919dc2ee12463f332be50096d4e37be04ed8b6f46ac5c242043.json @@ -0,0 +1,26 @@ +{ + "db_name": "SQLite", + "query": "\n\t\t\tSELECT block_hash, transaction_index\n\t\t\tFROM transaction_hashes\n\t\t\tWHERE transaction_hash = $1\n\t\t\t", + "describe": { + "columns": [ + { + "name": "block_hash", + "ordinal": 0, + "type_info": "Text" + }, + { + "name": "transaction_index", + "ordinal": 1, + "type_info": "Integer" + } + ], + "parameters": { + "Right": 1 + }, + "nullable": [ + false, + false + ] + }, + "hash": "29af64347f700919dc2ee12463f332be50096d4e37be04ed8b6f46ac5c242043" +} diff --git a/substrate/frame/revive/rpc/Cargo.toml b/substrate/frame/revive/rpc/Cargo.toml index cfaaa102fc3de..9d822f5ff8e27 100644 --- a/substrate/frame/revive/rpc/Cargo.toml +++ b/substrate/frame/revive/rpc/Cargo.toml @@ -7,11 +7,16 @@ license = "Apache-2.0" homepage.workspace = true repository.workspace = true description = "An Ethereum JSON-RPC server for pallet-revive." +default-run = "eth-rpc" [[bin]] name = "eth-rpc" path = "src/main.rs" +[[bin]] +name = "eth-indexer" +path = "src/eth-indexer.rs" + [[example]] name = "deploy" path = "examples/rust/deploy.rs" @@ -53,9 +58,15 @@ sc-cli = { workspace = true, default-features = true } sc-rpc = { workspace = true, default-features = true } sc-rpc-api = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true } sp-weights = { workspace = true, default-features = true } +sqlx = { version = "0.8.2", features = [ + "macros", + "runtime-tokio", + "sqlite", +] } subxt = { workspace = true, default-features = true, features = ["reconnecting-rpc-client"] } subxt-signer = { workspace = true, optional = true, features = [ "unstable-eth", diff --git a/substrate/frame/revive/rpc/dockerfiles/eth-indexer/Dockerfile b/substrate/frame/revive/rpc/dockerfiles/eth-indexer/Dockerfile new file mode 100644 index 0000000000000..77fa846a145ce --- /dev/null +++ b/substrate/frame/revive/rpc/dockerfiles/eth-indexer/Dockerfile @@ -0,0 +1,28 @@ +FROM rust AS builder + +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y \ + protobuf-compiler \ + clang libclang-dev + +WORKDIR /polkadot +COPY . /polkadot +RUN rustup component add rust-src +RUN cargo build --locked --profile production -p pallet-revive-eth-rpc --bin eth-indexer + +FROM docker.io/parity/base-bin:latest +COPY --from=builder /polkadot/target/production/eth-indexer /usr/local/bin + +USER root +RUN useradd -m -u 1001 -U -s /bin/sh -d /polkadot polkadot && \ +# unclutter and minimize the attack surface + rm -rf /usr/bin /usr/sbin && \ +# check if executable works in this container + /usr/local/bin/eth-indexer --help + +USER polkadot + +ENTRYPOINT ["/usr/local/bin/eth-indexer"] + +# We call the help by default +CMD ["--help"] diff --git a/substrate/frame/revive/rpc/Dockerfile b/substrate/frame/revive/rpc/dockerfiles/eth-rpc/Dockerfile similarity index 100% rename from substrate/frame/revive/rpc/Dockerfile rename to substrate/frame/revive/rpc/dockerfiles/eth-rpc/Dockerfile diff --git a/substrate/frame/revive/rpc/examples/js/bun.lockb b/substrate/frame/revive/rpc/examples/js/bun.lockb index 67df5841e43fba141c7a146a1e4a8958b4c7a84c..39a1d0906b70e1c5cecc7df65976f4db77f41d17 100755 GIT binary patch literal 46862 zcmeHwhgTF$(=XtP$fJOYV!#JX7|Gdy1Qo=HVpfo_$Rf#Iq5+8#1O+5YP;!zSBnjtK z{bTpnvpcZstBCU6d%pV}Tbt>b?yl;tuCA)?sh&Ns&pBPAc8*phJ1dhj?4#3@%@G|F zQ(~URsS=fraVhGgs5Hk67jqLMBO_i+pb_fj4w^CH7MkV`9js<3nF5_asl# zB=tw~;3F=*Mio5C6QMV#9hDIhwHG6^@2-@qdvN-xE@2d_OiB^y43#oTT&W0uHshL` zB_~ALUp@&&wbXDx ztjCjW0(X)sS(T*Hq~Rj^V#MXhk!U2kBo#S^TP@%Yq$D^zf|R+`op(rWms0u2!VTqS z;O|b6c$zK2{9y+&RB1|)8>dlbD3jAPsVSPY7>&k=x`0a>QEEa;LXsSnoT7>~GE!=y z(^ZMFMo+dF8QBN~VMs`0a!O7dk`tD4f@eW@sfFB)6v#xmTrH<)VJX$Wl&V`w<;$sL zIdNZ31S28Q$3sr+lM@zlg6A!bjJ8S``0I`Zr}t8CmU;L+8c3w-;|l6|%z6Ks+@mQjQ*KaPd_~jr z?liwe`(~&OvHh`MJoQj14=Np^js(+tb8hzDoRsCvL)3qa+fr;@`P1kQi*YRcxu z7z_GB1^h0WCbnPXuG^cb`XF~4a_82q^yv!`&`innTs85KG*a5gMnlx&I&v z6dQ@RwcnZ9Av^KTq~;BhO}GY%3|>hKkUV5^jAM z#qR*~%VjglJI=M#q2{WmH1<$Kr6J(X3F}@Hl4 zU(9_j;VH_sdr6(QphQ2M;x7jhr~+eE61{`xH(J_R^VG4eN8F9+E!1usH&H>{1y`y= zZ}T|ch=+n5xX;9YC(rV%>kTgRQgX?L>r{V%dy_cdY>VGD^`%%wM^O>e6_O+Rmb>b1IrS zO}!qbN!$&3x6&fD(106#^Sz$PO(B-l6@ZE*YHB(RJ}9}k*^=A2%Jl@#^IM+Z`}~?Q zcfACabI)!Fi&KOCrLo}>lr}2n3Ci0I{CT$bax!@Gi1%R{^l{@kuDrnA0f)?ZGUyx^ z@^l0>=6Fyu25phv(tG|!&Ghxa7Vh`qkr&r2tN^Ks#t$hTxo|HPZ8J^d>exgK^0)~m z0T1zzKYa#AEzl%}>X@8+^YG-thI>mx9CBjx)=n9w}e@UR(wqECk?_r!53Qbu!g zgv$*YJ5Dn&Y|2cJ(zi=4l*bb<_{$TnppvaTu>U8_M;#C*4cYW5fvZ)hEE*UaWX{iN& z(gzv=1TVP>f{3f7rD(1QJc^W?K7`e_Lxyl*osp5F03enau)bCuWB~C1a6$kOHCKyW zl$8O{S3n395aikLy92>9fN)u+f|!xN3*kHjfg~72!vrN@$gL_Yb)pXtoMkeIs{2+| zf0+#21_&qRPKe4f>|X(r%N+v!JY+fwPLeZL3*a6A_{)6&Nq?J+)CdR?LkpDhqE)1S z738M!dqB7-Qvrs)SXr$q$Pjjd2PHBJ^r9214Kjcm0B}xjfq3}4EucC;@Dvclbie{& z)nZ<50=J{)(yYYd?6*h465}t)1p#ukQUyhqcqRaqBrOB3Gzihkv?wR^&XGF~oQ7TU z1!mcRK?&T*9eD{TK7tZL*@|XhbXOH*0Gx|j0WvMIgj_XO$qEj}1-a61Gt?p=NJNmWjo+1TCxB8C4Pp!gSFUzq$bEPW0RHkY zh<^NT3+OW-NP0rl6kyS(RRy^T+z$v>MH}QNzuN{MkWb>qKW#!P062*j2mpeFzuO0D z1q45t7sNoUHpqSEWnE4gItaZ06`i`!HZ&7bhSZ-!0!P;GMxlJ z3R+@lR~2LkC$MuV%}Vh=P~dl|5DNg3Aund5XaVf7RRy^PGzJKP@<52Ef-P4QWC%O~ z2ulpT+S&qx48ae!jwGRF3J88!O^^Y+0sx5$q7t@ne_Pe&z|OcTi14QXxEv6qN%^M` zc47gtCIGQ(1wbQfg~0QG5V{sRUVxbsvK9dJZmkfwZmkghg)muDC*sx$fQHrzfv46A z!4DI0O;n&afN)_g%)sSqg|HRcWlc@EyH)_y3JB-cM1}UXLU6!Kv^6y$VXXjYXsr;8 z@j}W|kWpw!q*oneC36&S+8(S002Knly|qB#mbJFQXOEH5&9yXv-U7m-wLsurKv1m( zLKt?aqSgX{#sFbWov_6F)hBCdLIwaR*8+f>00ABgf6mY+@eX%Q0H_fV)_s4_PsZs~Ag&O6fV~x>O$1MY^}gRp2^;D1!b~aFAFA3ivLFBK*l!!0|>D?ZQf1 z3i?+89|I-@{i}F>3Q@G*4^i-AAfgE3D)NI6#q(f95mvg2@*#)<-X%oQo-2qVh^xrQ z;!O%ax{Ca3x_ogJy!AkLFRlV^gzo-FS6CiK>AL*?AFqNJsaV&6|4$NP%%}u^#9&hy z{zy;WA|V2agm@x?nJ)gFcYo*I|D^f(zkA;Cf+a7=Q&5_R|Ajw*XKW-x)(5T-zBPE; z)^w2iu5-n~urN1nJi7ZXG_?v|S5;gd$t6#$;6>C-^U3j4q2j!Af&4WtdM^!M;bt&;wM*|lV zxXFu)spC40{u{n63vPyGRJs43`&6suPcB>SQq`95XzQgOZ|;QO+GiSx+{*n&qPd@X zlr()RWxKTjQlWz6px`<`>h!BMLa3E@-N*+GX1#ppWr8#&c<#F>p>OBG% zuafomUQ&K4<)4A$<^a_?T%$t%Y`mS{P;J)X3-IrAbm0$pS2_49HWc87xzk26^Oh+s zG_f2MS8r4C`Bb1Ogl}J!r`^q0z+DkU@8Vp7voqJb(8t3kRQ%N}5(U0^9itvIHFa#` zZ|FRn7+ZMZI#=DceMCk6hiDOwOoeDpE_}Coe7NM0L9c~@B?kp{(@+qP?MM*V(??H^a>)s9L`&h; zc=v81zu9&>h5L{B@&^Ny`lh)e%zfWO8orU`bNB*xt!M*YDR@;6L?(82=^TGQcV9 zC^;yi&j0iJJiI&MCYrmB->35I6<*-d3;W=H`jHE1amRh`@}ugQmxtVV)Jweu3{-f_ z+TQIne@myy0DHI+_P{AIm%HIO*+c#CtE=C-hl@^9AIM*7fLUlpIVdcsEXUYk9~=zl z9pINv?|S1`p&;E~_NJ-z_@#@lf_SZ`q)5Z3T0=kGWW1O~1@ z=gRHdxY7IwckQMj3upTFm}f2nWTgReys=ygicKndLle&1;PyWV@5|07m_2zslWoqQ zZQy~|O0_mT2KFAI;b1tFjv`jVSFp>33NHZpIF*AxI*z(x2tJ%dn@e5cmf6XTFLErn zbg$)6o=ppQ#1#e&5cYr^6g-{)>ha7W8a%)EA^ZVxEMfa8DoF;?!7R9U7SjhVjtYk} z@hlZn#W7ndaP$*Xao{9%+HB&+hq!BexvjJZHmp}?7 zPwfo76cljT7T&8p$-H~?d&1$d)eU}#ZzzwxS@RqXT-xnJ zqc;wpq{d8!FDu+I^VJHT3B>R<)8gwSAH^*j1FXWTm4iY%UQZ8afgg+L8;|pt2b@T! zciU1IT!eYXJti4bvmzRk>8m^3iU&a8N}f*QO74Ch1E12bD(bq5`A|x=+YQ+oiPUZ^;b#?9=*+^DPDIi1%=UdYCjAY-{}kV)(uX|4NqQiZ7{b{O*+q?s@ z6;@!;I;fX2jCKouOy{PXC%r=etuXm?ASgQz&S~BS{XBjOv#w70+WaN89i`?tU&yv; z`$L;Cxyr$DLpfZQ(F+3o8-#P+hASti;#y4ldT8D8EKGWOw~M~|od=sOLfXBh1$%hnx8AsA>cd4; zkzhd!$33||HEKJY-3{={+Xw==BGncGxy3RR%@$lVKqELqu+0RCN-9slY?K`BQT&m zwr||BgFdBU=FZSNS1#Sn9rjS84d$2;?RMP5MO?ALpv&^kh(NBO+(!>D&?vVZaF4u7 z#i#j03`FLKpqG9SPt_2R1N_!8lfT@ik1o{zczrM?=}5BknfvJ9XKwuG=t(6gFlvHI zz0|smYeLWC9K;t4(g%~f++u)OEcfN05N3wYZnUl)Qlnunwb_ER;H_S6XYUZfZ-Ovo z`!KnyPTaFT0i`gt=}8o&_FGz(j(LG)48)ng&^$m(2|x(7=FP zN__?`kT-!uD-`(g&$H*?1z&;=7vI|F66eWN z0bq-12>l9ra~(X_=M=G@+MsmDp$z8uZ8*J257=;*${vAg!_*p{^}qo8F1hmpyQ9b1 zkSBgLex94T@&V1E>ZuF|47cp~v8hQ9G`7|f%9%dlp?o{2vc z$@To@K3JfG8lkQS;ro1dJK8h=n)ISk5cUSR#ipGc6g+zoM-&cYW_AWr)m7__56oeE zKHAN_&(6^3VQSk2YoyMbhFaI*<7~?}A|%#L}orZRu;U;6u$}hZ_#-GojZ40{$8i2PE zO98wN*_eM6qJ6v*`h~#shOL&Fy=^U(X$a_9{2PD)toRpx6T&Y-R_HtS*dJGY$Ar zuVN>6yg)C?(_cV5)LL;3bbm>pJC`^?R}@j#CaAaZCk*4>3}*RTU;eo3rWy6dn!cpD zbum=sbq554IY|rjE(*h6Yk*#CWy(Q;`uQ+>#$)_3&x&9QEb_$G!?%H_n`UFNW+}Ef z!zCB!n>z8IRGJNj5UO^gvLiUC(Zp4^jNQTA4G!yF?sDw`h7j!c1ra~v^bS<_`*a?- zK}810vFE=O6qqZ6RCbnnc+nw)-zy$cZz4_}J%>pAMg?JacfW=J7<+Z;-f1X{eeRi% zSk!mgI)DrAQ0}ETYP0~8)$T?Q>ULv}ec2@}o7saGK*?fyZ-Cc+2`eU->vfv9QXBFhB+PuSO3#4+X!)wT`Npz}~ORolmP`&xSo7C`dJ1`Bu zztC{`&PUMsm0kqR700c&X>9*@-QA~;7RFWaA?U(}E%E&R290btz6Iu-cZX^9;SNm& zpLe4nFc(%9D4BQo8NGW5-BN|)VvV*u9C{F^YCbsyMqvmV4qEb?+o9a{Fmyd;Mg=!K z#Tvc*yYb9_59n^4rAY{LO>& zA1huTqq3J=?|&i1iR;(hqM{?aBWdJxCKWmDzhwp!aoC4SE?dBmg@ijpg>hVQEu4D| zP#)4piD_TPgO{l0!1mo#WKM++H2)--^Dt=(O|#C3Imz8Scr@o(VC*i+OLx8=3G0$( zdfKAvG%JHRYY%>5~M$n#u%EHLp*~MiU3PAZG)Ons2aG zhdhjftQ_G|Q_E8ZSPvUuZ8v%e+i&pHEAH8TI|%I5x5wv&SjTx_?c7Oksmj3_w>2<` zFxC2Cs(syp#aJ~|B93T=Q|s{;TxuU{DW)q|J7vL&Yv6h;V3IdEg5qr+M-7l4F+kpQ zf^vgleC9!=w{Qc^+E8gi`q8^i;oO4m<1i@9_A$iDm%%O&#xukgcFg6-3P2olr5O=9W)6*T(?&aedq< zJo}I*s2nnC1_n&NNBI2*M;YDXMjtMWqUqB->Gq2Hwo-!y7WRFRt~C}Xu{`O)8Cq;` zv~bA*9#*G3Qdk6_HE(b5$1_K%SQX9h?6NU72B=Tyqt>7&{r8|0rXW6Rk~f^+9lqa# zX0~y~1zOz2mA9zJ3PSh;hH{$QZ8y@peF5C0;9lPdo_qlf`0g3c!a(ogi8K{t4h?OF znrgWCoWH&_p(2d|W=DO@)PEKR;K(}8J^zdc+(VuwKtML6q;QjOI85Nr=zeM7&3ioW z8N*fP-0cDDuOCc40RDRyOhdQ0)C#MHv784Nsmcy=ZW!(C^M%lUw~=NaQSEsH+)f6# z7ogM@Azj~~2X8P_%R$w_v(K;-)fdbIZhmNxn!oI$zPlLbfn-R4X#oCg9glOS;|W{V zYp{my2IDOc#k*7$UXES8u|g|%fT`6@qbCe7%Wp~rb_Gw^QX7qNVa7|IO!S0Yn1(GH&lv86YOmE0uNP}Dx8a=;{3(+~}W8wb#+3>4+e~QaottgeAyaG*>uh zLp@;;$Q37&vAXRzX>y&vDb1+k*#l>cUl|WOT;c-ftB_VLJYdjmH+|ff{LUz2Uz@sL zg)|Z8L9Fyj^fQc!q1QRS1E7@A9R=7Mml+6xWX!c(5f+;5TtIR|~{!et2nG@Gyuh;b78 z>9){2{{mqfh}}R#;g6ghxFT}hMM%{F|JPXI4T8NoZbA8#c^M$~TtbTx)?k_x1?)N$ z0Xd3ogVWrTrKE2G4{3CVr#?;qdtg{1|Ff& z=&ZZ1xHT3HtG>f6_Ah)P$%~F+$D@NsW$a>yAMPDXLBTz{*S&g8pPVs^KN`byr+g*s zlP(&?NpL#|^;rRnwfAi`a&=UOlsIv!j`twv1r)+=6I zuY}n7OwI08XTzU@@G50_w1p>u9Dp6tY1EvCFxKBP(bvxxZNO_)_Lx*vkRScryMyLk zV^q*8@2+m$>$1lTYt07g$D5IC8jiU6>KIQxhWC&aRlI@`O!Y1fAVxnApN_$57gKI1!ZMfkkgyok^2Oh+!ZHT)ujBX*19NYwW z(^skIA0FRidw{FVZaCApl?H0G1nYxB%ER9Kb+Bpsx}e>WhOd%jX95%e` zm-y>Fd>z0ERO;ec(<|vhw>|-beDpNjlaw7YB;45tRn+ZmxsL|I>|y6k-lVR>FfA5s zwhLq26&r+5Q|YcQ*D;UlAQV4h=~0lw|p49TEy%2%DhtGwf%h){5#h$F`127qO ztHEJMz(4u}EnGk3prDVN8Q;bz!VddO8o|WT&xVsL~P2U)6W91be^T^)U7E>`kf(aptOU2yfVn5|>FI%@L;% zmV#m*jH`E$*uzwIkOt{%vR_78I8~VOTX(Kqhs8bKd1y>vaV+A!O3)^(nLq5`kqr~@ zV~*Mv3|h~_AqiA?+>FZ~;S~@Ds3@DujJXbi=Np$MOP$u@J+p*W=!}NUJDa%p)lM$i zwwuO5&}OV>-*bZrbVPf=S*Wt1U@G!-zKh|-3o|N?ak;jU^U?o&6s$s@T0FSzNR%;u zy@_d7d=h4AJGGnfn`>Ncnn(qk^iT^28#yRwI+G_&V@zQZwmE|{KJvH;^?HQ>Yil>u2X(=cBRJ-NY5?Av~zIzp&ln8cpv~Sz+-}3GLl} zJygP}ken^f$_Q~)z{0NI!o}7>*Rc&$kHM^k#kIJ>_#a2^!`3y1JlFbFB10U5!tX5T zqYX%oZ#Qr&x?YP^BYiP~Qtb>f=7N8qF7kJ&_?x#^`2I*x>J69J?c;VGa)=Wne|_Yd zOBklf>(=WnxaG17cF?i0w3kbwVb+05lY*fod;d}T?xWJP{N+qAFxQDu82Hx`_UI(n z9k7JCaLJo%9-49CLn?RLiM`1{E>GMlov}BDkCKp_0s1&C%^~3XFsMVAs+IfiJ>jom zkQVLd+_J5_v-qpe$y3~Hi>|eD=XJ*3-MuYO`7_>;Mu5L(Aa3*Taeern09eL6z+bnR z!NM4c-gCv6-k+u_T~8?&C#H_u2iAkF(1sQGzQ?IOK2(r$4w|cpOTDsBKuwQRhZePoc#UyRA2kH@fG-x@ z+^>Kw^)>rtDlNFDbLqw{`V6>K>}b?YEa#geoK8=i~%QLGTG{U(GzNAO^9@Rd*q z&&PP99Bbtdq1WrZD-f+3xR9E~H4;|PSfD*`fVOT6mi*tS)nW^eyHF>ZIFEsD1sgBi z4Y0`KFW7z>1lQI=78dS=Vb*DeK^^7}V*^36rV_l5*|}cb<+2KuWu6H!>4%gk@hl zfm(4GmV?Jm@YK5tTpj_vUu&20z!X3#xon*cjIN^VzNSVzEevP=0LpZ6%h}gd?dL@u zn<0R^F^mJ}Y=a!Ksq;E~10dg8@W~dgiv;6u)G?Nrw;}pHwZsbQ{rU5o_gbae*)q0Q zXYoEATEF9Z+W80J)Z(wecb8saV)Sl4XpB*w1o}RV^oJPgi`s#?SAP!c&qQ8~Q$oJh z($r&mzn&W69jNoNgj1^&F6!f?d93=l0w3Tx{cOMBPio9~0LG{ThmS@?a9tvIg)6L% zaNbk;3`&m$r_itk^g%uiC0x$|p5o20_anjP3()-C=dqOQkO(8TOb1?Lr(Z4MHiQ2t z5Qpj`zo_IQo9kF}HEzG+N?nOOZ52rM|3oT}<1Ju5_97N}J_Vb4O-8n`W{N`jTM*W> z4NtMwoPeIKqFlU?vc5qBLZMc3i5aBWmy3~H<%8Fhb-ZZzSSB^FUed>WpGs)T728W# z78L#SjPqUG)L5vzbqMD6R4Q=;^(sxc0&5g%`_~Q2v1Sh=jNcoouvSo^<*TDKx*?V7 zj&jcv!QEf(QT-*%3A{D#;z36zU5~{?x~z{j8pdJ01%?cJ;5-5~(g@*D<>Tc*A8kRj zFAx{CN5vWxU`DCo=z$BF4zcU5q+#jYf9b)&&?6+&S26xIIY74IJ4y2osP&~5t?xY zj(R8pYp6oge%6Xe;L$N8Lxrq;{a}?M&b@?*ISH0qgp8_sc!e7A?Wz}Nc;xoFmr*{@Rf~aC zbL$n4y@Yrkb_)&X@qKB&*eEHd=|?vY*ueCgv%)OokpsG3`vaTo)G8r{F6li$4fiAY zt$#2qBv7`S8?iFPPRtM&WMiHV@$?C7nbgqxfHVtQ%%sj_^sa^TU-F0W3mBDtwQ~T! zfhA78?({({RNLs&qu3NIRohKz;Fu1#80_l;sV=}{iKg~m#k3se4_P#p`Y6ZL7VW7p z2~2}gjqg0xQU6{~Dvdn^@mA&raCq%`hvv><`_1_*&)wmV_o>H{r|bjoqTL z8q|Q{Z{JF}Hc9JW!F0cTp9-U1QmJQr`fi7abP1vC{f1LIOF^*-R2iU}SgQBB66Ag{ zhDwe6(35@-8r+Xn!_*OI_zE6^6WCSAi}&>Y5!bnbqr0#aqKQ4|U}+5XL>+`ZH6sj? zLlz<205-433xIt*kYlblvf)c;g>_eOmlm99#^@1!coZE;!+71t)lZyvGBzk7h=zFF z{(!UiKv@y2)Qa8w9;Qh#&zvyj31Dc0Q6k=)O>I5*km`l`@h|s<*q_AbbEn1uddTjc_Oy_XgD&2$`4b8iH2+TfZv;Rd*{0Ods`pvD;hs&6ajC;8J=;1 z0Sc#rZQz@{M_iz!g3u&r$o%9NR332d3RP;THi616KjpUT^!eCXjKo)%4CQHeEL2?N zk4>#v%>7}1%fnoF1@95wa`|ChcZJZpqmLJ@%D2>CWI1R2n8rlJm7^uyxE!B zw4R6TFulg`rMrzj7jqma?|SrQ%P35)(gR^)BWc&>^-v+rsfVf7Hw>h20y#dP!=6Fs z-t}6Pl8JR!A0^(E-=}tZcSN%l&ORrPD{oMp(5;h6JQ~9jcPJnF90pJg7n)+<<-@fY zu1*bf;&(LT2!A>E5c+TtewUE%)l}d_jbXyvZ`y420P1sw`z5rPK0;dD)5pt=&(6a+ z0BiOTkf)0pXv)e4(x(^$*&BQd`zMF@gVCPtr4JAJYZ~=$!XmKb{*Ij&sf3}#Tfl;K zN3FMKJmvRKvDrNQ^y=*^RIy>xQ|*gv?V9YqK1%mgNYM6Ocyst7d?Q|E*FYcifjlBlF3UjQV^VG8=SnJP1V1$CL z2D2a1+k0n~wh8;W2>WRJBWd~~bk%2Sx^ybs2A>YZ+M)fn!kH@{ZR3V}5<;z5e5l`7 zA<`-D0ri=0zxz_G8S|()6LxZCF!i3`IiA1hW_FSLj?(8y>M>mpvjA_svNuvS7N3KD z>$v#-xg=lErU^TaN4Ufd#41RN=6-x|YfFfh!dk32`Lz@jSSI26!K9^Jo=(N3{H(1rH$=j$f`C#{WehcKafKmNoRXfn<2=DL;r!KDHDuQpUn;Pd z2(2~OBbPcX26o~ql^+d^vH)+j@xWCpHN8`x#eD6jPj~|!0)>zFLH0Cp0UIjwkii{W zpE**ENi;HYH9VCNhD#)9-PKi0=gZ!h0;%;Nq8 z%%axwm;znf@7=)gy?e9;mWzz`IV(Ziv!h7?$5Q^N<$|f+8M0LQ@;@ z44;YIrnU#E;J)cj_xN>iA}@TS}VyY{n~S0lIWo$!%J8rXo{0qlxr10|#r_nwBR`3x5@DqNot$@$ql zcb`94J8E}oAs>M~S|2G42kyCapNjVJCs(WodZFLOA5h-ELJCq3*rh}4Qx(yWt&pLE zd${A6)m2*f2gnwA<~28k!ulIcpausRk@NeVv%oj?N!XQG>O% z8Q;ZTGk>**Y%hestc&wKdydPn^m;2CNY zt)b&eUh(2NNSMASH0*`;4yrK8dmbD}1OJbl0cVfx*`j*5m+I{=!zTR}hy~5t9D^>$ z>0^GuAO3~Vnmdo>RU6(V9LEsBXYx4Kyt$m4K!av6+xU$WmMTruWybBX+we7NyO7;j zzfp5a*b_%C+71VntZh!1{UaXcT+Zd$(1^WY$@&nQF~IypAM*|;uCTm|1s`5Q6C6OV zVA^~=!yk@h*;|H0XP8IuuvlG`9e1YE=MUWCd^X@~<=E59d=2Zd8MctT&Y@r{RU;;Ahy(46|io(a>~?l$%aZ^ygy zAQzqDa@bKknUO{V-jKwj`?&i$)n3J!4>+2GQ+$t6AsU+d_{x|&hDIiu|;tTrX!}j9p zBoeC(QeE9Q?f2l62~Gk?N^F(Y{}$6Hpxs8@m)w6u7Ew_Cu>dFMm_*g@4AYMQ{x5ze zXeIxe0T<%D1P+S)_t{clk$(rl1t$}5E`oB!L#6IV5!872e+BdZ;-UK=-!*9->V6V} z$Kb+_LH_Ua*8g}i{6A=*SRIHUUqxC_U;cRuDzTR~ApU1k|F5wk#_MnROM$->_)CGm z6!=SlzZCdOfxi^^OM$->_)CGm6!=SlzZCdOfxi^^OM$->_-|7H&MsPzoSUdkn4i|D zW1LjUnzX3IM5jdE?}$5vtCdPWdp8$*jq15FCGLd1hkaC{>Pd1+9A4y!zzgu`(EBHz zJw_C}n|R$Pg~#|S?#14$`~|isq}S;p6(b>naJLWz#fGkYfJk^k$iT!gMUjrvT}^Qn zADa?@h4+BS6A0uFaEVv@B2T#5%OAxOufRn)yh@Xv<6Qiq>6_; zq8kX#O~x&P`RR0bH$M>kXQW3NQByQhP*M<0P+Sm2G(n=OaF!MR!NObj8lpH6CMfHI zD9*Twv!>z*rZ^2L4kaEzJc@V>@i^iML`Os?MA2N)M?n!mDM3|1Er}4q4NMGyAci2j zpp@|35rj}73U@6*dNIU;5NU{l?4tjo=Z_J^Pz$n0AqpahVU9)=K77KjPI$S!M9f0W zM5F@zd4-sR_#E*CBISucx#Ca0_){eQyuqJr#Mk1c5P#AUuOf;wTjC6rKcX|D`09-V zVlZMLVhG|z#8AW_#B+$}5icNKLA-=0xY7bqjH?)DiCY4Y5Qh%8BMPS2g($wGv=4DV z;#S0Mh|&jjrElnpk|K&)JRn=sB=kfSC`4bjAQ~fz_KC3*W2irV zrnp~^Xo4tDW5G!4bWt#p$QS*R%ZjvIR=x_@a$3F$-jM4EUJ*Paa0@!gWdvCSdGMM{ zB99<|HKI%&0Yi|?9`Rqqy@=DIy96%I(7yNx3@etxcLmJqZ|MkbMwuAci4cN4$o36;bfj zWkkVSg6}RO3O*D(c>(b}qToZpkAg=75d9Ga9}9jygDB>BETU*{3}Q556yjsVNW=(4 zF`dM;64UEG;yuK>h<6ZgBi=#`M~p{&il{TY@-Rxt61l*u|1LYShniMJj@$wJbn;5&I#bPLg@$W8??=9tv4oUe2zeKNvF6NFTX0-r1v*80;2Tw|#G827vadJhsel&#E#(rmj z9t8Utda#Fw!2dJ!zsi@p_p|ykaekJcqbuME`S_0@57(bOb|Qz%{iwmj7`bjv=>5-H zk9>)GYlFTr<({XAX%4~Vg(FG>!RKCcp0zP|%kBLEt63QzlWJfRuhUJdcnuCDE%K}- zT(GT;BS3P|t^3H9CJ$G7T8H0*6Z6CddC1(1s0P_PB$jz^&duJNld_yK12CIK<)4uR z`4T(+j3mgH81l#5id=~ue$44|-5*X8WJ*(r`q6AHqxJ~@SfxYMkzf;J!G?m5e@<+H zI76YM2(|6ysZg9n&BZBvsn>2!KA=S*3T%}4q|U-OdF%E|7mApdlf#k@d|e>!bl(P{uU;U9*x6flE7QT zw|bbn_%lvOEJ(~I$<$w{sHfS|m>b2x@wybTmFI3yqr`67u>qTI6U;~>@wPY+H4nFF z7WrZ(U~IM_Uqa7y{{9X&sqiZvMJ^`q&-u+;JGW&gL>y_z#aW&L>fVcU0hK~oNV5bk z;s7oUduW`-=KceG;18g6+EJCPl3LECIaHkrGH_dl#EARQ%9SOZHP0{H6MaK#y+PMYCDj|eGW0!I}Y&xBM6o90UGrw*C&+Ac*u>Kc4wIw!#tAI zEB%}s-M}tJYMn#c^+bF=-IBTj#597SmojB{M5_a5f-*P2W&SJjzy&~Ul$xSRi_vI| z9!BAeRT+P|Y(~P+@qr-ra5sw6C^MAFY505&+^_R(aN1-D2g6zwg5K_^ZIq})T?Fy9 z_G%a&Wt-RWi1m^N(nM*qZ*Wf_m2IJ__$pfN@jF*wDuxE=?7hn8#qQ1IY)0Kgj3q}YapG?Y0s zovy)mkGR$i(80ih73`2JsSq2gJCO}W#5aJre;K;$^BH-~?%;kLHLat!AQ|Z41T&4A zP+Q^x4VB_+s5m6rb?_zfAyfs2QsJZGXjXjj|B{rc6te>_DM!=kI#o@OW z8gQd;z6$6Hv<_mE-^e9YGy^Al57Q*>*2`{?v*G(+hu>hJwzD`hGpARgf2l+s4szhw z_%@qh{;F4CUM}Eu8Rw#>AepLhuxPn}g_VwwE`jthlD?W+nk;ATlQVH(qc10chs+Lf z^>VhyQg(@gzQjjvIhnPb87ya-ZSgx!)5bXJQ@)&gAGugPf;1%?u2cO5oN49!xs)$wb3W|;UH`|=UftOYq%HZPV>{D_! z4O~z?QlC9c&C8i#a%SRV9PL;Hw^!jC0n7PMkZ&Um3eNah?Zx3q?z^^}?FE4=O(wqx z6MSf?hKJ4glh75~70=2N28t>O^|2cOMK))>W9uX6sYb$>L$pbzVg$0WVSwyOwCxLz zneXPZL(BQkk#8sRp}uj1ddMFqeX4k2xd1hy1eUy%YnsN@v563a<6vePK7Ew|(D)(6 zBNrT7FWR=8&2@6N50AXKW?_XB*;RVklgQpLHSaCG#{srx`g&jsKD4p~-)5JznE{rb zj-bXI51be)rLJde{q^UDP`VgE|1P9aze0JaE~vvG3mEj47*rv$E6LEHMkD8_>HhZ;cG3DD)RQhusW zJ)PXVppSo2U0zvngKk@7C*=zD2Z?X6zpk~{oz7=LjU`7V-b_=glArk5yDo2?_!w@T z7^P`qBsegs=B%kp4dbWMI$D+NEHW%HKpm%fXzw$cP18UxW+>H<_=7#=-H5xy)Bg54 zZAOJYp7`=~mIW__$PEjnb_uZ9V#Mx^>(4fCjQqvUgT^^JJy}Y`s8y+H{;5%F>_|AP zl^LoGWqev%s^+9K?w-IzQ)fFRrzRz6oKnuZc-lE1jcN zsqSuiX089aG&ry;i_yP!5OflkS5@0Z{l2cE!OLUli<)9A zeV2(Ok7|@=CGOBGB|xAsAPAfM7qnVN?WRNRwkjg)b_h^MmqINUtQdhV!rk8Q3|6OD zFRZ{D(k30MOIN9~*Du1lC}K8_i9u@)PZwi2CE zvH&C{3;e4a7yqbObyQqjwzD!> zs~_VRp!C}V0*OtbU%(PuuBa(4=%CnV6%}Gql9Hm5V;z-Q5Gfknol+f#^-M~pqgolO zQY&N9G)c;&6m>R)jxs7~g&t!o1Ss%V5&`b+0z-<%5f1=og?dY6P^=D zq^|%&TR`+s+N=S6v3vasn&@{6HT7@N18n{LqP}iZ2~~9|iL$?v2Hz7G)xUovhSU=q z73lm5U0xb4Kv+s+3ScR}x|+ISU#_lu0Q6#;WM%By{e9HcCLFa}`=XLgl_jUdD#c!> zi(|AhEy~Grg`Vh5Uu+%y0h9O_SG&`ND%ymjeye_gOs_AfFS_KY-9Hoc z(vxFVF)Bw%VXx3T{bB&3e=Dfwi`lp`seh^~AxT$P^*^(%hDq zBQ+}KY19)XHW5>jSLnWeF+kD3b%fy3i7wfyaTErKV_M2nW%AOabampg48XbD6_9-| z2E?YBB!^b0qmM`Grq;f41X_&GuSKg`2v&8BG;5@xj8!UAF_qGk$+3``s#J+Wj$(?6 z-JBI#@`KU<{e!1q`(L+9-pvCDIl0`%6#we;@5D8IIEmT12` z?11uvr;@^yM$$hOhOaXsD4HC%LO%=(0<2sI)YA!fZ4m)S^~6b;oB@@Wniv(M)Kyic zWu~Z~I>kge#;FpO+G36|@lmRk_+7skkm}!pp!g^CB2}G|mg1NQwYx$OmJ0#U@{Kf# z9>1Kv5}6FUAc$c21hBBPC15>KqvLVuiH>nAv8Y}t0MomJR27@0t7_GM5`2kJ`lBF` z>sJt;W~1)YY?Sr>!?hMR>sCEV^`o9c|!*X*34_Y$XQ&ll3F)4{D*z!$DSH~zFlcI!3TcH&OpYwb?36TY0WLR++j07aW{ls1ax(bDbBuTWiA0`+w%h&8bhEFk#HnNg}N$JEpq zFOQ`siK^uEEXOEy(sxg?^vY<`lVR=ZKgm=jySo`YiH%a@p}`|9XjK#z^)i0U>1a)? zK`AXLgHkZ`)hVi2gI3AU^ji@Juf*@cE%isQ{n}?g>bJ(-*ClJIbt$RJ4i+4WDWol$er|{6r6|_NiK>iOKknr%9?*jhBZqaRmlg>V_7MTmuYY z_&|>@Ru4_rswH5$`#*xE>-Q2Y-TiOEGN3GA8QlLSEQ7HFEQ9;sgr(0M0HuHXn~?Na z4}kRUeiM!!TLX^X-EYFtvXL}p07vickKjntaS4ue_eXG~nY9E*y8BHyGUq_F0FK_> zZ^DteMhi#p?hoOhFKCYp2Y0^-XIZiW%<|1|LRl8sfU4J(!U8qO9pK_mePL&L(3^z7{AXaOGW|uvXs_qM=WfA zZ0IICYrO|nP#W3-0HaL^OIvJV2n$5})<7w?=9K=ra=KFbBJ#T|J?4i>G_0l_I&{~| z!!b#d5%XR45;c(hBsqPB+@y3(;&<74cpa749R4nQ3ATVN^5f%vT0%hoVG+F<`P1gh zYW8OxNKMI9s)aq5sQhUsC4v2e+~xW6UE7!E#82A=ru_lLWhNG#UV%@(LugoUnSBhO z>v7YcfCdSF1R0P22(TfMmLVHH|0B@L1G5Zv`PmH z10uaS@jVi~tUp5V-IQDg|6Rr(0oRgRi05UIu#DkHxobqTB%6MOWhwKIkbEy6moa>w z^GD#9h#^EfM9DJv?{n4+{s&?~t5Vh`7?#!2a@(~`w4C!tZP%l(WUlITsKMhu0<5ov zm)orWbd_NBH9Y`nRX+AZ#4f9D?@4w}SV4|2I~M@P^3C$=GGUO&UwHe)&WciZ&T0Kb{^$KQ|35aS-^c&} literal 40649 zcmeHw30zHI^zW^%N;F93C`CnX^DJr9M4AlI;8r)?rkn00-+T7&x9oHF+UvXbUVH7~>~n5QakzRoJ51f18KTYz ziIDdW4-o^R1+jbr{g^=vnjb4Hn9iX^Xov|?D3mQ4>W`UPy3NzwD?U?pNl!Sn!7ke; zz}QYv(B5j{YVjz;t&j-q!;i;@{2)s8|@hC=%SKbRT93}&)9 z5E1FJkdE}=2>B6zt{4}u5n_aA3QV|+eFXpXgSZs#QT}UiKM>+TR3QlM>lwl1Fi^T5 zn-ReX;jlwlY>p3`P5B7QL;8CI{~+8DdI*c@OQA5>-r>w3U&={P(f~-`4l&aAa$d~g z#h$#_7GhK%U5G&kvBP<>I4^FIU%p63a42AsTA(n+$fp^~%V)#F{7L+y&;(UllK)e@Xd59Bv z>3$F^!o4NL3J{NkSPo)gUi=D_gX9Y!M(GD3M&+!5cnHJ^5F`D9ZVCN{hA}uCCbCHG z{S+t+*6TCJ1M_Pu^g~t9DcSR?zA9%c3RPR!*>;sW6aR+m}>gQW+ z+r8k`eH*9zi2nDCMvqx=!(?N4q5J2d`=Z2L-Lit#s8aXNx;RYG?PMWCglaqg^eW?` z?3Rp{9kWdQSBpK=7Y~k7-V%BdSMgKa$=yZMV!1GqZdzojtFICFc zJ3sb-88hi`kNC^}JU^}U{>LeDQ}c2ch|O7|yeXgZBu89vsaWsk4;%^2lILfx=y)2~ z6~5KlJUB{ErXteNY40$<%7V&AU!6C#ucr!?DP`UBOVsJ)xB&@ompyW)-z=<~D7r}X z?#{51z+>+0M4KPScaMnink+X$;ZpeuDaCOXks^(!N;0H|UQu~HIcV07pXDiM%(5mh zJ;KH-r;87;e7*HhM831hZB^N!efDl0qH*@lYo8g{n`7l-c1>Bkc5Izuz{<2`-u=Jz zl$4Wc9xbh+EwG|G|YS@R&->khIyLTDPc)Ly@@wYW~@0r_u`9`eSHU5f2vu}`H(Tb z`5g0%@je~l;Fpgc+Pso{J~^~}mb;ZZsE8jC*8tD9ZCOuAN~eShsN z+8Dhppp*&AOSb|nGem$nfT9KC72U&pM^PtI|NIY@Ukh0+Kpsn=d7(1_%a=puu^^Ay z)1xzjygXRG4+vs;Sjsvbu>3@jH{_KMOKhhDmQMzGeV#mO1I&G$3Rpg$RQ}(zp8`O7 zyz)`Ooz(#@-wZBL|D(F2&{cUh$kRX`mUpy{bvoekPs7C+kVk^34mcg%cNB0w5!mNU zJLrnvi@I#JhER@9@1ZD18%1rbet`( z{jgQ)^7!)yd89v7HmZMT11^6D$XoKtM`>Lh|1Uva7v#~ngMAeR)X}Mc%hv)yJpTHC zWLNFi3*_UQ@ue)9C{}_-*;~%O)%>A7m1GxT2K;D$6f7FJqmj4dq(fp6(5GLGqD&X>sV8A(m zJihO0`O82a+27xmp9k`4Jb9$MuGar8$eVyXYBaLp&Ia7xG0^d-{n&20+JDPH9+i*q zkZf1=_Y>sJc6oq`6VEa*MF23r~gh4r{jE8AU__; zN9Ce4JdSX_-^Ey7wl{_1Oltq{^0!aN@*yCP?H`T7uJ&IV$fNx$nmd?v+TY`R zcR(KXAF@3-=je36@^aAO8X%AC59zS0_CF8g(fC7nsO?>Ce>TXY{WmHXmEYCp7Xdij zG6#8Ve<beBF@`@232jZpwe} zro1DZ$LoUr(Aqb!qY1wq56}Nm@X}yrH{|btyjwTq#|-XX`Ab3Gr5oi}g1kpJnG`JN5Sj`Ptnl|0>8cx*a#RbEb> zfBud3-(7wF^aOd_ek88|)R}Q#r&l5o-7fCIJ9OQA14mrX7n zn4%!W@?W228}L9cf2ZToVXl3nwk=Oq=rtoX?6HQlTxgi&l2?26pO-#4;wAk?#rw@( z#_6%FeljFnH232`EjQU^I`ymB;J5Y9R#qi9SY2*~+9xUtO784g5H!ij| zRcUhk6M=r$Qlpe7SftQ2?i{F}?`v1z@O(fl2^XzxI8Ys?F0~4M6u&t|SI%XAaN6RT`3Vm;paD_1_#MJk385=GxT~<1N_oP(KOu0bcSBJ9;EJfd$ z9x*#R%2&Cx<*UbsDM!wYXn1FT)_ZxZ`nWND9cf`p)*M zw_Uv1{pY7=P1!Z3FkeBVx4gah^iMlRew5nX<6uL*k=D;)dn(IJ_Wr!$zG;`%)1T4% z1p>#;GPb*LEM%CtH3=8312|AMu4H--GwIcwx^HEQ+3w7c4+g4(4d?WTD)BWiooSK& z@Y0&?8JjPwNmW>`58CVJa(w^9T*EU|r``3V_bBH(?|(?bh1W!_fx7Pfr9kDV(uADG z4?1T*50>2>XV;?b6H>I&`@t#gJ+(>6pM!6|oa?jl6mz%3nZ(rRs#Al%n0?>p<3!fTb*KnI;ra`Y6~{_)Pap<`n2lo(>dre5L(%#kB=b^|x#aoLB47Ja2B~^Lam}u1d7HOu|Kb zBpj%p_C&~kf9XvR_Y_Z;V9AyDShDba*_0Z`hb{{9Jx2^_yj?w9vc}!wgcNl_ze%Sy z{5)qgDJA8dTd#FtZx_Eh9KV)?OWrqA2QdXM7_9LscW%OPxt9Z#)y2Bx-Zk>=-`r9ZZbHYgiC%Npi&%nD8KwX zVTIQ9irMq;T0dPE-eT;nUgf>My85zN&tuMNPflFZzB4_OmR+OXH*@LmUMt+?%Ek=+ z)<6E4V6*mR6%sD`)&U3VN~y&4%L=z_sSJPMVx(`A86RS7zUchjhl7@9k3CS%SSS{g zF)H29aOG$Dk>Xd9vJSm788UM4$D*2nBiuFyC2kl(!iCoXt$`{@ce)eqefX%&xmDA8 ztUSI+@5lFb>qRDwSvlD!yhnXwl+b}ygHD-PW#@h~7}N?TWZB>H||!P?}cfMX|XC-v_i@qEs{BQp-ZFZx0EcV-+L zsJzEWUDJ8N&`+P69t%s)AJTZ_%q|^WGA>&CaG=_UYiP&pd>QC!V|}Z7%AmBzA8khW zqg%_ApBa$*#PRtu<1MA1k9_H=)30Zr{@XasEvqyO23N}$R*0S7Ay;#?ScFtxI96^A z)L0Mq2}hqQYI`{>HNNBX?T$r^Nor|JZ4oW9ZfcrF=Bysp4~~SWkDJl-I?>7eRiW_XohU&X5|}l@=Bi9EY|K{nli>s zW=wf=?~x&Q)>p?aKC$GmcHM=BfW_ZuoZPULlYB9{Lg!n|vx5C3T=blV1J%f~sQLA_ z&ygp^H_2q>e^aiMYf$)Nxn|jerBjA$_~|yArJc?xUgCIQznZ5)MnPlgxgysk;btP! zmwVJUY;}#ANy3HW$<{!n>5R9TZIW&E@b=6zZael`Pf#cn_T7AMdH-)~T0RE3!o|;%2%P$0Ds4;Ztm(^T-%N0_dIO$n4 z){$_@XQ0&AoE|xx=fO=9zR#A+ADvM4uCU_v?d&Dlqk2ZX9yicF`RIP{Cm#DAjN6v+ zOxQCyJ!Rh%<2@}81Wwld`!TV)!Tz;X=tf(qQ*MO0M4^xw-YxlGrd30UxnOpr^?w%Vy?~~=o%Qr~4=s6k( z>X1ig`tR9&{Cq-{sMbqXBtLVG#l5pWzu{BVWE`81RF}-m${pUw%<*BFkob_$Uml$__LQYsv zuh6sBJ4>~etK580GDSeUu|lbb#`L?2Hv%#ZPdte_U2-eD)*1bl0q>*G^F9vLociVC z_x@ChxJ7l}7es$M_ejoSr|E+iUi~oN-pyG2%5d$-9di4PWy~&5i?#RLB(XSk@TKy> zr3VIG^8GSm!jb&dBwTcUfCE+2@w^muR9(@Pu;7oQ=;Hki`g|@>TKJ@?@os^Gnau{V z;Vv89dz@+VGtaEs?XrUYavuA}K#%XY^;XpQJ(_wrmXAM;VKh=d5zCO}V*Aw!v=1mB(u4^qmzWB(F`id+Ki2<92aH z;GG-WNcBbM95_&$++H8q`L6E6*5X+nbLXjSdLZo{w0VnY+|>(x?^b!;aoD32#7SEu zUg*29Nb*eID?=43wwzc!hsk+D`}4N}3ghD`m@$UB5>`NUTcsdSlr%n$eBL<|N$V zWZaBtyT(rwJH9$@;@Et*MXsSmL8{+%F3qAY9KAf|p6LXZ+C8Bu2`YoVju=NKs3}Yl zebHdBY@)=HoA<}X`xQ@ieMQ1mAmg?a*4wXB>^C?;EMDpS?2wBI@xoE#nl``e|9-(7 zbKgOW!qpwUT=LHPZF8yr!7}cxrDjFFwz=@L&=f!Vy6VauPf56nWZa^NYjHbvR#7~z zoGeMS^&^t<9MN^=u8WjH=?fMv8b8n=_~yyTD-U}FM_QO`Lk#8}|DyB!`HB=J_2Xp`&+A^FJJsiYX+c4~ z>sHly1@gXFaH}!EyZ`#eLI-^LqBgwcA#>83|E=sPdI6ELQAbiwxHP@<> zEj#i)3jK?tLY^#Mh-DIdv|@xX0OX%w`t$Xnrd>oNm``1 zAoqNbl+~QzSJiLirySVKxT2M9BYiiecE!N5qhgYQc?Kk06*BJKv+f_0;#wNwbX;f5 z9QF0>eS<6X)5c9&^Y=?{lt_48R3>5cvL@f_`j+d84Ph(~s)X6ABIn9+qn0wgZ^}iV zl_lYhBIB})-?-(RJWv)QVl~)RLFY{0;t9EfZco`Owc<|o>1VP%R}D9NKz(KC*)(t0 z%{gC=?fuAMt{!|y{6vNA+)pur32YMXXfkf*_-*fm@~r06YTE<`?5<$39vHf&KKWW; z?&9`M*QZ46rA~3?{n{Njtq)Bpkg7ShV5V(~Tt>r+*M_%m_B`v6YDL0TCF5Qw&Hc1w z*R?A*66(j#IrZAQR?DPTJFIT_{^#cJa_4)pACi=ZT&djFKvSh2ku| zZ(YC26j&9$<9*y%*ZHD@;t%AGjTHW#q!hd7=ok_%jg0$IHsjcph?u4z-Ho3c3fDOu z+qN;s$RqUS6M^CwSCq!SXOegOAz{p?w2wO;jn*M$~MHj__XGK(+^T z-irfu$-~|6b@!@VDEwxb5ICd%ebeYv$M>f_Eq2|k7gk$(&bwGWdVO+I@tBa6)kcDj z-x;Di=gd_9w7fyd{<6BmqAj(g`r>nAgiW(s-;3ZLwJ zJYg35YM|ut~)pL#Z4H&}Nk#_hP)A6@U;GiMq$9YvjKfm z?z|4{Yd3A!*Qd!F54n1%D;0YQ*)FOzUAW=k_nlLUmg|vlwaB>rGq0bJ5Sr{_Uahfi zUtSbDc93CTvEc`m8k_pKD$T2VxzJ3}Q+ewUmzlLPlm`+;M`9K~jI%9S6Ofi7BA*>9 zoxW+y=jxdgS8w|AXp-JNM)CG^?ezm5TfiX+-aqKH!OeUxcIevo$cs;G2V|AVZ%c{G zeAWNFrVsPoO$X_r8bd_yZOk;;6@A##W&Y@Tqfx?pu(m0K%`Q=^Re6yPvhT>=zZa|!`@zvy=4_kM4i>M zrP$}LABkTy!iGa-w?UZx~m^qv6+>LkgCjeXabdK5+FAESBhxuyPmM()Uz$4~c0 zuU&k8pz8U8B~P4B(vzsc0^v{Y+kbM)cUDW;{Y7|EcyYn(2Wh%OBwT$A3NbZ$1aptR zq4(NB|n`VzCW)H$OH5}{8W7C>0FKC<-(v?DF!( zg=Z!#S@d2&rILh;--94{YMS^>k@j7oCW)O!Y{9bJ@DmojkFHhdADv&hAk464LxW8~ysGF859e1YSECCB z%3tIZgb?-R;-dF4I8ZAhr}fUCKk$0r!P({M>9;BbOLv%!zx~!aL05hHlFL;~eKaQb zy!0_Hy=2iAou?{vfln@*mKqg2+aY>wa;b38;@%`&c&FDIsHY>FgnfiRWT+QcHVW?3 zvssjThjsd!=Ec{C?O003-N(M`Z63E>eUO#6;fT$};WqVQ@)P<=Og=q`DfQy^>SvC2 z1TK|0{Jy!*<+t;qk4S&YNg~36*kHWxIw*7`CN0}ZHRTDLKX4MV7 z?)@q+KiK;HzFrK&;fdqx##xuG7c9*hCYgJpTv4*p*IvKOUG$23lG;Gc0`nU|(%*Qv zXdIhhP>89LVjK3SA6lEN;5Pe{(}!o4TG3MaQJ)Nq#y*ry-TK_IsM)Pg;FGYOp4(bZ zU2?nUI>f9=y7p5a(TpSBLnaBVHcsK;#sb!OGVW{}iAj%IrZ&APJXCYcEAp0b#vC1o zxU9DV*MyvN4;%ed8`jr(RNrGRk4~)VWiB}_=lSy0qeh6yoilSNm~B7L^c{gq5tRo_ zQ!=h>_N2>?=N$VMUg>1!BG&hvmHAQC5*^(+dK>T8XV-jM_27fxY(iqGLiYXonZ{>@zo6d~u&~ z>xI@=?x;!2RY<<_dWp>w&w6X?Np?@1XMGszD`lFuPk)ekdF_PIs4@aK7X99ucwdO2 zspWm{e%~t5r%_{9_W46wFI|{!CJ|NFEA-&;)a&sBFFY4*Ib1D}t2Qi7;$6>Exj$qM zOn%PlEpRToI9TwD{rymCHGxYda4j$>#MH!Nhc_!}kIwV-%8yaZ7Wu9%H(mGQfXJE8 zb{b`?pF3DT&`arp>a<~-=PEZv4V_Gv8d!8Xq4%bQ{a+_NH}*8!&)rv3MA6x#B^g)w zk+06crkfLPc$VCiW6cykom?n1ATz&exxD6(HGGOpyw)mB#GL#0CHWZzHt5>t~|?Z~m3>AJ;~TIfDM zW1sj4f%Q%C`*+;&*-!7EzQOXne9%unMtSPVS*v?&^jF%2ev>DF-nv0NfsA`zv7uPy zwecK<^6Q%-X6CKG{NB^EJm`fhRZ8Vz;Pz+7tXYR8@-H09+vchF}k^9{Yc7^;`!N`HQ1)orH8 zzD%(DZt=DvFz5J^lvuZoGR|3P;-%L`OApw3l^7(J|CF0W;1bgV`j)UYP-iSVsTp%G z>1wYzmo&7(9YfL*x+NvFs;UP9m?HnD`lwhlJiF$6$xEGs%CbaM4j6P z`}3!?ch}R@c=bhgX4{Gi_Tc^bp2UuleeYh}e^6qer<5l9`M{^tQ7c~We!p>);)F;g zU!h5y>P-8YBkWXGR_5nFP>MTac7A56rElKh8*VFO9C-Cb^Mf53_h>^wl9%{;@6;iQ z;tF-^HNBeeuitt=E|aNpMziwXiAO1S3e>X$;`C#WH~SyjV{fti!P5^qRtwyPWn^Tp zuJd8>aAN_}o{W1!WZ~VKf^*uJ-W*BDvoAVvJ@-n9`=m|Eo92ysH&rNxZjkrWeBG4! zA7Yjt*0OVL9L_PUzH!>aMRCBBuTqrH?-Y5srhtjwtK&eeeWkc@hF6iPwadEKd+w_( zy>;kv^xi|C$F!8KHseU-M(zq(UUezvj_ZWanPKW?*9K(m(_TBGCS#k2FKh8L;Vka^ z0E#HmFMdytu&L!IhdQJmz7usU)MJ6LY;CciIj1((s62D1ZldqV!^^c6S3H__&a&QB zef-yHl{5BAHp`n&f4wtpu*j#S){V2~j3eqRNQ`6rexJY%YHp!K1P517NK#0heOGv2 zkjgtNwWpWf)LtC-Tq1I`yXm+=>k3vhOJ;D+m>a!_Ixz5AQ+(f+O-8S_oL2cd;c6&> z8%w;8o{Ru_fqGZ|iQFFNafY)N)4~)FFAlNEZj^m3o6=9vX(6RJSoTBR!gXtRumUet zTzMuPJ0Q5n{P5z5GvetniMsybgOxnP30&@bGDi#wF}3&WtaZK{^d?zo+xjF@Ojf+n zIjHdTq{@kvZ$sl8j;~(rQLcIC#S+@_Ax()sd+8&twZH;0Qo^PpcEc^_;k66Ic>RX- z>qN%AJpF?D8H&eR4{xu-{lAsYRv&mbv-EAFyrR0s*{cP`gKrfsZ5bo{u_7a@>|L|8 zrTd1Q=fhsx$dBt?Gl`*J`zn}(>rBRF%<-+e*=#*wpvt0nx(xDiG% zDbJi2K@p7wO!%g-HBjeg zkDz!yKGULhyJ}0{br(G@GZxzgms|>&mtiS=y?88=O1#*S4g zY-#3hBUOg;`u^!Bg5UdmoEE2%mc8zZ@T#?weM&>k)oz}l&B)ibS!;X4YhUSNwM|Y^ z_aBH$&D`GdhQQ^nZ&S&*_vcs{Z`q(>lkj5WNvQ#!m*%{(p0aR*vclp1bB3;U|HR4c z{rLKf{sV27eN(LZRz6CmUqs-oz^{h`9i~oNDbhQTd#=pg_e~??YKJ~~HYoJ$JlCEN z%j))3>_1ucS^CYxwG<<%<(xkHlP(Kut}jg1w|%Kv9RAAT*kwV5L~&V{1Gz_TK09M+ z_-*+fqQ2aDeL5NUTDJQ@U7fN0+{bDD{IIYzeu2!~<~b^h&&sCQmDESOuW%dBkPT3{ z8o5cq>$L6G?VIh#x(>Laa_ZSss`_n@z7=i+E_WQmH+rpsy1Z)9%>82hrpH{Hd@((; zGTD0Hn6KY#wM1=SkJ|3FSGY&g(?`)g*QD2*2<6YP3^uIT{LND3x>nicVDC4S_kC=* zdf?*X?|l$B_4e)t`E|Vw5?Aze%xH6%j|BvH?1;q2X>kxO{oY`7iz_rLIh&*X@^>$RF+#p@Yo0-?1 zRz3+69w_$ib8_tG(B!1?vg#FHDH&PJWS`A_G*g$d!h`x-?aQ!o@D0nVGh8fB=z+U0 zaVO(`bVwRyPM3J{wAMi?Y*pC!?{BunoU=JmaaR8LgtTu>JEiB))(Lu_$YVFNj>QJL zm9F%5|NgUxZMD1X!q`lMV-_S_{CyLlhZPfDXi;whwrsWfy1Sp7bNWq>KF9B7*5p3? ze0p)vIriYXhL<_wZ}n~^yjMBdv!ObpZ(Z zdqj9$ivBv|jtq_aJP)H7@V&^m%e89kXCw@NtQo)WPUFl)!)g=vGF_{O9=m!-_P9&^ zkm}J=4i|cD_}Q}Xf!+P10UMR_&MT-#W!wusnp@CMps%SfufAx0m_^17TJgi{p{HH( zG%<(8Q(Gij)EgVW$7(%pURSuqEL(Ncj9LAgUTnITv+>jr^YnVp;NSp_QwK9F_P-h< z=Otp=IPf@u%bnNhWL$y72jv_spVh(hUN{L%S1SG_zcN0j+|O{lWuc?S3aMM$^+HXm zOtPlO-?Ka^xO*RS|A-;_8>n9dUZ;QCkQ9EDe17Il#$BQ>UGDF7PUW;4OLRuXt@^W< z4+N*5)(crXy5gYHp^P~z7Y~(g&>uG5p)x$NXTe3)n(Yow+LMcqDeqXevZ|#ii&S49 zGVY2%3mtKZ=gV)5y)ZBJOO;u2dZl);xLf@++uW~7! zeV$4+KjPH&ZdJE<537pkOTzUfDiM`lz{9s#^~^U-NVBKmVcc=sgEZVL1^U ztVlM@ovTVaxq4CN{h+CGKP581A79{L9nf3Mb9H61+=|3y(hK#Eiapwua&_=j*5=WQ zFB;q@livsUk#Ua}T&^i&&Ke(nElSl)@LKZZv(87$#_KO<_|ubQ(jIblUDCVFUTBiV z@gMG_aaADYGTUVJMy8IC$@(6Vb7rWfk?K2}jH~3BG`1##V=k8IG-vG&xv8--Q~kB; zua3Pj{%KPLCrf|d(cv#0Qyq#&sOQ9Qbc^ocuD&eh*qY&=p61&3^LsOQ7YWy&jJxCV z_(ii^jQ5F5yZYSWbmHP(J+k(bUv^f|ya@7W%FqU8DEEC>92SS4~7f?`|(RXpaLL>8Ha0YnShplqc0?b z%0#%Ry+~K6JxD)D9}Yn1dp1WP^qm;`b{pwL9Y_NR{dNNV9s&J!0Db4L45R`y3TQNt zD$p1p8W3tfsw-*>syosJYCCEx2Pgt45-18N8fY%iJfIk$`9KSR76QcrEdq)IS_~8q zlmLXjQ$gR9pzl9s0nvfHfqZ~`flwc!KAR2X4}??|0EAS9bRP_a^o{h3^f?D83<$lw zMc)FU?{?5PHj{wH0io}=#{;PWjQ~QwL!AI*24oFn17r(i4rBpj31kH{5y%b**-=j* z2_Q+JWk9At=yw7NK#D-{T7>x9f#^5H(m;KImIEaMp?v#z{al7B522&}Mmj*cKsrHX z!?8bskIF}OfcgdX52_RDD-j@M4^$u_AVDBBy6`>(lIib&Ev7a1K0Z&ae_)oj_O%wK{0~~EN_>XIENCx}b0gkSk zF5=)f&e#hQ!O>BJjzBg7eH@GZHVF!NYN%--j|SMI8ukbU96cysO^5gn1Jw`voFZfl z)U*s*y~bg$SHRIy)6!Ja(Z~MZu>UGS0S#yl8adcw8}>LQC}5zbX`lv1hkeLlA6S9{ znou=l4cN;Z_L9Ztp#A{+j>Ueo5Jwk*5j~4I*y9%B=n$MB;KKNc#Xh+ZM~~n{^ElYc z6(4O0kAwYsq4q$Fpdjc0qUUfg$OxE>gBpO|{#y?8DN%+0mh*cKZqI*Z7Vuc;ci}Mq z#A2Uff&#$NM7{mr8UTlAYM0SqVE(?Pb6LKi1sL^u)wR}^3optD=)r`C>eRlsr36M3 zIVqIwkONt-Kl{+nFOpeI839kwjT%fEnZQ9ddRte<)$QR&5v)lRcT2K!5nTUP9AsZzIR;yP<>=h1VG++*aYGc2L z*l!zf&>RBgU=NMh!yMP*wPAL_J{z&mIl|sF(BS^B4tOf~g(bpl@Y^w`P0SS7*B`{6Z`N3G#HCW+1N`a_VPzq zh&JM2KbqJNAivhw(JVEvfrs)OI(@X_!$ZrEod=m716U}eHyQL$G?;Gh){dI9@Q#eN?N4$-IB11a{P zNQK!FDgd(!_ECy`Eb=+niz)WP$md``r`S&;pMyQ2Vo#2I4)&FbeLeCy*qbW$2Fd3v zhD|-vANCi?=U|Vl*rOz$gMGGQpObtJ_UekgQt~<2?<@9O$>(6tu-LODpM!nJV&9j1 z4)!jKy<_q@*#9i{pULN754G2M9ya+L?86rO;N)|#ms{+mlh46^aIqgxJ_mcs#hyO- zob6IvU%J>AD4&D9?P71Cd=B;_i~V4t*^;}5#U6dzjZ;_{un$`7Lz9Z;5Mqadz0_ha zn|u!TV~hQ8@;TVkE%wyO=U`vB*q0}tgT3WqZ=ZY)_NR;ef$}-n<1Y3X%I9F8yf1k^ zMfn`;wHJFG<#VuKU+kBZ&%vI5vFB1g2m1!bzD@ZY>^&HJKjm|g*h2!|4lp&_Ff7rh5_uJVES)A^P$xYmLFI+f3Ys;s%h!LV)9=Z zyjKq8JxLyEeWD~E@NxHWYF1G&MTgLXTsUFOkl7~kn*4`NgVu*l4%-JU36#9AGP}dp zCCebk<0wt?(N5#HL+703uwjx1Puw1N+3LtCxjK!AXuRi2HO}n0rfi?^Fb4Qc57pKpT^pK)u)Kp9>cEU(uz5HB zbhbZ(9m#+{rpWORp@nMc>UB_Hn|x4mn|m7fi4WDF1Bf=|0tB}LG~!{TbE4e(gDCd` zMA07Y52f-|VhEB*HHILF9;iqPqq95GHdZC%LJ$xEf+p-jJ6*IEuV}4KHJA5L4>G(f zkV5-MD9%$`-eeqXnqdRjfvSn~0uU!+zHkQ;k!;8w$)<5n98gK#;eLLMuy77DD4gR* zGoVGliMTJF(~+K7Q2@e<0&_qcVZb0fwKIa592kGvrgYyhx}RUPIwOQl3t@!?1K`|_ z;C?LtMYI7xU4aVhxG&n&1VJDl^0;O+;z{)n2`z=staOb_v;F`{6I zvUzumFhBSJofSz7WB4+|7(N_!Fe8{177c@#K@aYr7vRYfihzHK5v8q-N?@^RkN`Lx z$hGDH>DFta--&KOQyF(E>;QwC0kx3$p%CaqpWBHY$dH=_lH^-x0X#Oc4q3mWB|PF9Xd}8Z@qN)ebs>G<@N~{SO5bCjn3{5ySEUrvQJD;GQag2={_U zuGJqzNo@gn)Fc}BtOCh}hxjsmm^5Mm?4WhzYyd*OMOrh2(b%!7f4lU+Oxiio|1Le~ z5&RFOwpq=g;@oAO7~8r?Hk9rYNT1E11+ZBm9Wirm8d}e399AGB zq%|o#EQmh`z;DtzD6IW7fX+IId8h*!aydjZx%(9w6bt?H=gBz?O{`%)#Hb;<(wD&q zg`vb@g!sao$qXelL_>oPX6+7Z|KHpI{R<2*`#;wUAISg&zU0@j1AV|D4~W3OkX!qY z#z|@{NRjTqQ2xBXY1KWy+X3Ylsl*~j^rUGhY$VlzK%*gk9kiouMu5dKpgrE?&dq{w zn6uRwArY|Tg$B`m7(7u1Cz2HwsOCeb`7whS+-x)-e>$@xdnaduLdmzFQ238%MQ9j{ z!=eSjvfDum_?ZBRe?#=5&L4*_p{BM?KuTzv0$6Z7g0NGyovPJ;72c{SaxW0-`V(ScG{^=}!W4my_)xpi zB!%A4bv#9(9Dso$Flu;theSQmn=&;G`Is2kAQN~R@CAoICj_4OeAsMsu18}8doz4} z8NMAD3_gnhe0)h5`=6#j-YF!A@vdMS0f&(KsA{21AC?a*hy^Estne@&1}&J5X0#4! z(Iy{Ywz(&a_fKm)zf4r6)(Y?q0;!sNY^Vl5Des^z+)RMNy`T{Xi@39R$Fd!W^KyVZ z?+Ru$beM<`Ofi$rjG~2x`snGjrUWrV!lP*Pu;6wnQKUTV@DSMSl2am?A=+APQhe!Q zkk}@P3(cg%BQh=@hO{@^w@ofCRGVC|&ktiUecRLur;%&n2VWDlhih#Qss7xwU$vX9 zZKx5|T02%KBcweZw|bx-5+1LuZ4+??+os|wwoT<$D2T-hnN5P_riL-uJ`w*(4Q7V2 z^>i3P9T-4sGq^Zd0t}*U0?95qkH)K2D;V$oudsOSZpGr=cM+?NW&x|seHXFX^c`Te zx$hzt**E}-eA`7NlIZ~u>8^`7B(nw_(p?vExMoBQ8Neak{S^)|99waSyT8IAMpi2h zao0s0Y&kHs01oM{i#XV7xHzP{zr=yIKz%R{+;tI$KeGZ1|E7y5{3#ny_%~fd;aUrt zKtw-ziUtw@9g^SU3J89h% zKwDa`N%ioBtv{UShNyGj8h5a0a5Df5_X2IS@%b)4Gw1qwPTaYr7_@axE;F{8zCdUzu#|jAoO2m zA&to2Rv$03ziU7!E0Ph0_H#ju-!_t%uz$hjkDqpR=Z}fs)(edG7l?c#Mvd;kPTCdN zR*r8zZPQ8C^f%C;!e1dn@?QbBRT3Y$ZTeq9^E-wQ%TN0&NUkPZErE~Qj?oS9cBa_A zh<5C56hs;m?F%8X{;Gg>Ly`~Qj`3IETvem#nLj1)OZXMHo5i%wO}{FomHAhNw4WdO zCA8=K6@05Q(9{l72_L>ar`zzqm=?H8O7|uP{<6fcJJ*Q#oWH6&Nx#H)mA8hrN&YKf z@+!=)GdZ=Bu;i5`A4PTWI8smO;@jvh1{{ { + if (!process.env.USE_LIVE_SERVERS) { + procs.push( + // Run geth on port 8546 + await (async () => { + killProcessOnPort(8546) + const proc = spawn( + 'geth --http --http.api web3,eth,debug,personal,net --http.port 8546 --dev --verbosity 0'.split( + ' ' + ), + { stdout: Bun.file('/tmp/geth.out.log'), stderr: Bun.file('/tmp/geth.err.log') } + ) + + await waitForHealth('http://localhost:8546').catch() + return proc + })(), + //Run the substate node + (() => { + killProcessOnPort(9944) + return spawn( + [ + './target/debug/substrate-node', + '--dev', + '-l=error,evm=debug,sc_rpc_server=info,runtime::revive=debug', + ], + { + stdout: Bun.file('/tmp/kitchensink.out.log'), + stderr: Bun.file('/tmp/kitchensink.err.log'), + cwd: polkadotSdkPath, + } + ) + })(), + // Run eth-rpc on 8545 + await (async () => { + killProcessOnPort(8545) + const proc = spawn( + [ + './target/debug/eth-rpc', + '--dev', + '--node-rpc-url=ws://localhost:9944', + '-l=rpc-metrics=debug,eth-rpc=debug', + ], + { + stdout: Bun.file('/tmp/eth-rpc.out.log'), + stderr: Bun.file('/tmp/eth-rpc.err.log'), + cwd: polkadotSdkPath, + } + ) + await waitForHealth('http://localhost:8545').catch() + return proc + })() + ) + } +}) afterEach(() => { jsonRpcErrors.length = 0 diff --git a/substrate/frame/revive/rpc/examples/js/src/lib.ts b/substrate/frame/revive/rpc/examples/js/src/lib.ts index e1f0e780d95b4..1470f492e34d6 100644 --- a/substrate/frame/revive/rpc/examples/js/src/lib.ts +++ b/substrate/frame/revive/rpc/examples/js/src/lib.ts @@ -50,7 +50,6 @@ if (geth) { child.unref() await new Promise((resolve) => setTimeout(resolve, 500)) } - const rpcUrl = proxy ? 'http://localhost:8080' : westend diff --git a/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts b/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts index 8289ac8b76e33..4983a6f3b301e 100644 --- a/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts +++ b/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts @@ -4,7 +4,7 @@ import { parseEther } from 'viem' const hash = await walletClient.deployContract({ abi: PiggyBankAbi, - bytecode: getByteCode('piggyBank'), + bytecode: getByteCode('PiggyBank'), }) const deployReceipt = await walletClient.waitForTransactionReceipt({ hash }) const contractAddress = deployReceipt.contractAddress @@ -31,9 +31,7 @@ assert(contractAddress, 'Contract address should be set') value: parseEther('10'), }) - request.nonce = 0 const hash = await walletClient.writeContract(request) - const receipt = await walletClient.waitForTransactionReceipt({ hash }) console.log(`Deposit receipt: ${receipt.status}`) } diff --git a/substrate/frame/revive/rpc/examples/js/src/spammer.ts b/substrate/frame/revive/rpc/examples/js/src/spammer.ts new file mode 100644 index 0000000000000..c038afa71f0aa --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/src/spammer.ts @@ -0,0 +1,104 @@ +import { spawn } from 'bun' +import { + createEnv, + getByteCode, + killProcessOnPort, + polkadotSdkPath, + timeout, + wait, + waitForHealth, +} from './util' +import { FlipperAbi } from '../abi/Flipper' + +//Run the substate node +console.log('🚀 Start kitchensink...') +killProcessOnPort(9944) +spawn( + [ + './target/debug/substrate-node', + '--dev', + '-l=error,evm=debug,sc_rpc_server=info,runtime::revive=debug', + ], + { + stdout: Bun.file('/tmp/kitchensink.out.log'), + stderr: Bun.file('/tmp/kitchensink.err.log'), + cwd: polkadotSdkPath, + } +) + +// Run eth-indexer +console.log('🔍 Start indexer...') +spawn( + [ + './target/debug/eth-indexer', + '--node-rpc-url=ws://localhost:9944', + '-l=eth-rpc=debug', + '--database-url ${polkadotSdkPath}/substrate/frame/revive/rpc/tx_hashes.db', + ], + { + stdout: Bun.file('/tmp/eth-indexer.out.log'), + stderr: Bun.file('/tmp/eth-indexer.err.log'), + cwd: polkadotSdkPath, + } +) + +// Run eth-rpc on 8545 +console.log('💻 Start eth-rpc...') +killProcessOnPort(8545) +spawn( + [ + './target/debug/eth-rpc', + '--dev', + '--node-rpc-url=ws://localhost:9944', + '-l=rpc-metrics=debug,eth-rpc=debug', + ], + { + stdout: Bun.file('/tmp/eth-rpc.out.log'), + stderr: Bun.file('/tmp/eth-rpc.err.log'), + cwd: polkadotSdkPath, + } +) +await waitForHealth('http://localhost:8545').catch() + +const env = await createEnv('kitchensink') +const wallet = env.accountWallet + +console.log('🚀 Deploy flipper...') +const hash = await wallet.deployContract({ + abi: FlipperAbi, + bytecode: getByteCode('Flipper'), +}) + +const deployReceipt = await wallet.waitForTransactionReceipt({ hash }) +if (!deployReceipt.contractAddress) throw new Error('Contract address should be set') +const flipperAddr = deployReceipt.contractAddress + +let nonce = await wallet.getTransactionCount(wallet.account) +let callCount = 0 + +console.log('🔄 Starting nonce:', nonce) +console.log('🔄 Starting loop...') +try { + while (true) { + callCount++ + console.log(`🔄 Call flip (${callCount})...`) + const { request } = await wallet.simulateContract({ + account: wallet.account, + address: flipperAddr, + abi: FlipperAbi, + functionName: 'flip', + }) + + console.log(`🔄 Submit flip (call ${callCount}...`) + + await Promise.race([ + (async () => { + const hash = await wallet.writeContract(request) + await wallet.waitForTransactionReceipt({ hash }) + })(), + timeout(15_000), + ]) + } +} catch (err) { + console.error('Failed with error:', err) +} diff --git a/substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts b/substrate/frame/revive/rpc/examples/js/src/util.ts similarity index 62% rename from substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts rename to substrate/frame/revive/rpc/examples/js/src/util.ts index 3db2453f24750..bdc64eea1ef58 100644 --- a/substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts +++ b/substrate/frame/revive/rpc/examples/js/src/util.ts @@ -1,10 +1,10 @@ -import { spawn, spawnSync, Subprocess } from 'bun' +import { spawnSync } from 'bun' import { resolve } from 'path' import { readFileSync } from 'fs' import { createWalletClient, defineChain, Hex, http, publicActions } from 'viem' -import { privateKeyToAccount } from 'viem/accounts' +import { privateKeyToAccount, nonceManager } from 'viem/accounts' -export function getByteCode(name: string, evm: boolean): Hex { +export function getByteCode(name: string, evm: boolean = false): Hex { const bytecode = evm ? readFileSync(`evm/${name}.bin`) : readFileSync(`pvm/${name}.polkavm`) return `0x${Buffer.from(bytecode).toString('hex')}` } @@ -15,6 +15,8 @@ export type JsonRpcError = { data: Hex } +export const polkadotSdkPath = resolve(__dirname, '../../../../../../..') + export function killProcessOnPort(port: number) { // Check which process is using the specified port const result = spawnSync(['lsof', '-ti', `:${port}`]) @@ -76,7 +78,8 @@ export async function createEnv(name: 'geth' | 'kitchensink') { const accountWallet = createWalletClient({ account: privateKeyToAccount( - '0xa872f6cbd25a0e04a08b1e21098017a9e6194d101d75e13111f71410c59cd57f' + '0x5fb92d6e98884f76de468fa3f6278f8807c48bebc13595d45af5bdc4da702133', + { nonceManager } ), transport, chain, @@ -85,6 +88,14 @@ export async function createEnv(name: 'geth' | 'kitchensink') { return { serverWallet, accountWallet, evm: name == 'geth' } } +export function wait(ms: number) { + return new Promise((resolve) => setTimeout(resolve, ms)) +} + +export function timeout(ms: number) { + return new Promise((_resolve, reject) => setTimeout(() => reject(new Error('timeout hit')), ms)) +} + // wait for http request to return 200 export function waitForHealth(url: string) { return new Promise((resolve, reject) => { @@ -120,58 +131,3 @@ export function waitForHealth(url: string) { }, 1000) }) } - -export const procs: Subprocess[] = [] -const polkadotSdkPath = resolve(__dirname, '../../../../../../..') -if (!process.env.USE_LIVE_SERVERS) { - procs.push( - // Run geth on port 8546 - await (async () => { - killProcessOnPort(8546) - const proc = spawn( - 'geth --http --http.api web3,eth,debug,personal,net --http.port 8546 --dev --verbosity 0'.split( - ' ' - ), - { stdout: Bun.file('/tmp/geth.out.log'), stderr: Bun.file('/tmp/geth.err.log') } - ) - - await waitForHealth('http://localhost:8546').catch() - return proc - })(), - //Run the substate node - (() => { - killProcessOnPort(9944) - return spawn( - [ - './target/debug/substrate-node', - '--dev', - '-l=error,evm=debug,sc_rpc_server=info,runtime::revive=debug', - ], - { - stdout: Bun.file('/tmp/kitchensink.out.log'), - stderr: Bun.file('/tmp/kitchensink.err.log'), - cwd: polkadotSdkPath, - } - ) - })(), - // Run eth-rpc on 8545 - await (async () => { - killProcessOnPort(8545) - const proc = spawn( - [ - './target/debug/eth-rpc', - '--dev', - '--node-rpc-url=ws://localhost:9944', - '-l=rpc-metrics=debug,eth-rpc=debug', - ], - { - stdout: Bun.file('/tmp/eth-rpc.out.log'), - stderr: Bun.file('/tmp/eth-rpc.err.log'), - cwd: polkadotSdkPath, - } - ) - await waitForHealth('http://localhost:8545').catch() - return proc - })() - ) -} diff --git a/substrate/frame/revive/rpc/examples/westend_local_network.toml b/substrate/frame/revive/rpc/examples/westend_local_network.toml index 28295db76133c..76561be814ece 100644 --- a/substrate/frame/revive/rpc/examples/westend_local_network.toml +++ b/substrate/frame/revive/rpc/examples/westend_local_network.toml @@ -29,13 +29,9 @@ name = "asset-hub-westend-collator1" rpc_port = 9011 ws_port = 9944 command = "{{POLKADOT_PARACHAIN_BINARY}}" -args = [ - "-lparachain=debug,runtime::revive=debug", -] +args = ["-lparachain=debug,runtime::revive=debug"] [[parachains.collators]] name = "asset-hub-westend-collator2" command = "{{POLKADOT_PARACHAIN_BINARY}}" -args = [ - "-lparachain=debug,runtime::revive=debug", -] +args = ["-lparachain=debug,runtime::revive=debug"] diff --git a/substrate/frame/revive/rpc/migrations/20241205165418_create_transaction_hashes.sql b/substrate/frame/revive/rpc/migrations/20241205165418_create_transaction_hashes.sql new file mode 100644 index 0000000000000..43405bea9d046 --- /dev/null +++ b/substrate/frame/revive/rpc/migrations/20241205165418_create_transaction_hashes.sql @@ -0,0 +1,15 @@ +-- Create DB: +-- DATABASE_URL="..." cargo sqlx database create +-- +-- Run migration: +-- DATABASE_URL="..." cargo sqlx migrate run +-- +-- Update compile time artifacts: +-- DATABASE_URL="..." cargo sqlx prepare +CREATE TABLE transaction_hashes ( + transaction_hash CHAR(64) NOT NULL PRIMARY KEY, + transaction_index INTEGER NOT NULL, + block_hash CHAR(64) NOT NULL +); + +CREATE INDEX idx_block_hash ON transaction_hashes (block_hash); diff --git a/substrate/frame/revive/rpc/src/block_info_provider.rs b/substrate/frame/revive/rpc/src/block_info_provider.rs new file mode 100644 index 0000000000000..0e91869cddaa2 --- /dev/null +++ b/substrate/frame/revive/rpc/src/block_info_provider.rs @@ -0,0 +1,250 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + client::{SubstrateBlock, SubstrateBlockNumber}, + subxt_client::SrcChainConfig, + ClientError, +}; +use jsonrpsee::core::async_trait; +use sp_core::H256; +use std::{ + collections::{HashMap, VecDeque}, + sync::Arc, +}; +use subxt::{backend::legacy::LegacyRpcMethods, OnlineClient}; +use tokio::sync::RwLock; + +/// BlockInfoProvider cache and retrieves information about blocks. +#[async_trait] +pub trait BlockInfoProvider: Send + Sync { + /// Cache a new block and return the pruned block hash. + async fn cache_block(&self, block: SubstrateBlock) -> Option; + + /// Return the latest ingested block. + async fn latest_block(&self) -> Option>; + + /// Get block by block_number. + async fn block_by_number( + &self, + block_number: SubstrateBlockNumber, + ) -> Result>, ClientError>; + + /// Get block by block hash. + async fn block_by_hash(&self, hash: &H256) -> Result>, ClientError>; +} + +/// Provides information about blocks. +#[derive(Clone)] +pub struct BlockInfoProviderImpl { + /// The shared in memory cache. + cache: Arc>>, + + /// The rpc client, used to fetch blocks not in the cache. + rpc: LegacyRpcMethods, + + /// The api client, used to fetch blocks not in the cache. + api: OnlineClient, +} + +impl BlockInfoProviderImpl { + pub fn new( + cache_size: usize, + api: OnlineClient, + rpc: LegacyRpcMethods, + ) -> Self { + Self { api, rpc, cache: Arc::new(RwLock::new(BlockCache::new(cache_size))) } + } + + async fn cache(&self) -> tokio::sync::RwLockReadGuard<'_, BlockCache> { + self.cache.read().await + } +} + +#[async_trait] +impl BlockInfoProvider for BlockInfoProviderImpl { + async fn cache_block(&self, block: SubstrateBlock) -> Option { + let mut cache = self.cache.write().await; + cache.insert(block) + } + + async fn latest_block(&self) -> Option> { + let cache = self.cache().await; + cache.buffer.back().cloned() + } + + async fn block_by_number( + &self, + block_number: SubstrateBlockNumber, + ) -> Result>, ClientError> { + let cache = self.cache().await; + if let Some(block) = cache.blocks_by_number.get(&block_number).cloned() { + return Ok(Some(block)); + } + + let Some(hash) = self.rpc.chain_get_block_hash(Some(block_number.into())).await? else { + return Ok(None); + }; + + self.block_by_hash(&hash).await + } + + async fn block_by_hash(&self, hash: &H256) -> Result>, ClientError> { + let cache = self.cache().await; + if let Some(block) = cache.blocks_by_hash.get(hash).cloned() { + return Ok(Some(block)); + } + + match self.api.blocks().at(*hash).await { + Ok(block) => Ok(Some(Arc::new(block))), + Err(subxt::Error::Block(subxt::error::BlockError::NotFound(_))) => Ok(None), + Err(err) => Err(err.into()), + } + } +} + +/// The cache maintains a buffer of the last N blocks, +struct BlockCache { + /// The maximum buffer's size. + max_cache_size: usize, + + /// A double-ended queue of the last N blocks. + /// The most recent block is at the back of the queue, and the oldest block is at the front. + buffer: VecDeque>, + + /// A map of blocks by block number. + blocks_by_number: HashMap>, + + /// A map of blocks by block hash. + blocks_by_hash: HashMap>, +} + +/// Provides information about a block, +/// This is an abstratction on top of [`SubstrateBlock`] used to test the [`BlockCache`]. +/// Can be removed once https://github.com/paritytech/subxt/issues/1883 is fixed. +trait BlockInfo { + /// Returns the block hash. + fn hash(&self) -> H256; + /// Returns the block number. + fn number(&self) -> SubstrateBlockNumber; +} + +impl BlockInfo for SubstrateBlock { + fn hash(&self) -> H256 { + SubstrateBlock::hash(self) + } + fn number(&self) -> u32 { + SubstrateBlock::number(self) + } +} + +impl BlockCache { + /// Create a new cache with the given maximum buffer size. + pub fn new(max_cache_size: usize) -> Self { + Self { + max_cache_size, + buffer: Default::default(), + blocks_by_number: Default::default(), + blocks_by_hash: Default::default(), + } + } + + /// Insert an entry into the cache, and prune the oldest entry if the cache is full. + pub fn insert(&mut self, block: B) -> Option { + let mut pruned_block_hash = None; + if self.buffer.len() >= self.max_cache_size { + if let Some(block) = self.buffer.pop_front() { + let hash = block.hash(); + self.blocks_by_hash.remove(&hash); + self.blocks_by_number.remove(&block.number()); + pruned_block_hash = Some(hash); + } + } + + let block = Arc::new(block); + self.buffer.push_back(block.clone()); + self.blocks_by_number.insert(block.number(), block.clone()); + self.blocks_by_hash.insert(block.hash(), block); + pruned_block_hash + } +} + +#[cfg(test)] +pub mod test { + use super::*; + + struct MockBlock { + block_number: SubstrateBlockNumber, + block_hash: H256, + } + + impl BlockInfo for MockBlock { + fn hash(&self) -> H256 { + self.block_hash + } + + fn number(&self) -> u32 { + self.block_number + } + } + + #[test] + fn cache_insert_works() { + let mut cache = BlockCache::::new(2); + + let pruned = cache.insert(MockBlock { block_number: 1, block_hash: H256::from([1; 32]) }); + assert_eq!(pruned, None); + + let pruned = cache.insert(MockBlock { block_number: 2, block_hash: H256::from([2; 32]) }); + assert_eq!(pruned, None); + + let pruned = cache.insert(MockBlock { block_number: 3, block_hash: H256::from([3; 32]) }); + assert_eq!(pruned, Some(H256::from([1; 32]))); + + assert_eq!(cache.buffer.len(), 2); + assert_eq!(cache.blocks_by_number.len(), 2); + assert_eq!(cache.blocks_by_hash.len(), 2); + } + + /// A Noop BlockInfoProvider used to test [`db::DBReceiptProvider`]. + pub struct MockBlockInfoProvider; + + #[async_trait] + impl BlockInfoProvider for MockBlockInfoProvider { + async fn cache_block(&self, _block: SubstrateBlock) -> Option { + None + } + + async fn latest_block(&self) -> Option> { + None + } + + async fn block_by_number( + &self, + _block_number: SubstrateBlockNumber, + ) -> Result>, ClientError> { + Ok(None) + } + + async fn block_by_hash( + &self, + _hash: &H256, + ) -> Result>, ClientError> { + Ok(None) + } + } +} diff --git a/substrate/frame/revive/rpc/src/cli.rs b/substrate/frame/revive/rpc/src/cli.rs index c0f81fcafd771..d63d596ab7a8b 100644 --- a/substrate/frame/revive/rpc/src/cli.rs +++ b/substrate/frame/revive/rpc/src/cli.rs @@ -16,7 +16,9 @@ // limitations under the License. //! The Ethereum JSON-RPC server. use crate::{ - client::Client, EthRpcServer, EthRpcServerImpl, SystemHealthRpcServer, + client::{connect, Client}, + BlockInfoProvider, BlockInfoProviderImpl, CacheReceiptProvider, DBReceiptProvider, + EthRpcServer, EthRpcServerImpl, ReceiptProvider, SystemHealthRpcServer, SystemHealthRpcServerImpl, }; use clap::Parser; @@ -27,6 +29,7 @@ use sc_service::{ config::{PrometheusConfig, RpcConfiguration}, start_rpc_servers, TaskManager, }; +use std::sync::Arc; // Default port if --prometheus-port is not specified const DEFAULT_PROMETHEUS_PORT: u16 = 9616; @@ -42,6 +45,21 @@ pub struct CliCommand { #[clap(long, default_value = "ws://127.0.0.1:9944")] pub node_rpc_url: String, + /// The maximum number of blocks to cache in memory. + #[clap(long, default_value = "256")] + pub cache_size: usize, + + /// The database used to store Ethereum transaction hashes. + /// This is only useful if the node needs to act as an archive node and respond to Ethereum RPC + /// queries for transactions that are not in the in memory cache. + #[clap(long)] + pub database_url: Option, + + /// If true, we will only read from the database and not write to it. + /// Only useful if `--database-url` is specified. + #[clap(long, default_value = "true")] + pub database_read_only: bool, + #[allow(missing_docs)] #[clap(flatten)] pub shared_params: SharedParams, @@ -78,7 +96,16 @@ fn init_logger(params: &SharedParams) -> anyhow::Result<()> { /// Start the JSON-RPC server using the given command line arguments. pub fn run(cmd: CliCommand) -> anyhow::Result<()> { - let CliCommand { rpc_params, prometheus_params, node_rpc_url, shared_params, .. } = cmd; + let CliCommand { + rpc_params, + prometheus_params, + node_rpc_url, + cache_size, + database_url, + database_read_only, + shared_params, + .. + } = cmd; #[cfg(not(test))] init_logger(&shared_params)?; @@ -110,19 +137,42 @@ pub fn run(cmd: CliCommand) -> anyhow::Result<()> { let tokio_runtime = sc_cli::build_runtime()?; let tokio_handle = tokio_runtime.handle(); - let signals = tokio_runtime.block_on(async { Signals::capture() })?; let mut task_manager = TaskManager::new(tokio_handle.clone(), prometheus_registry)?; let essential_spawn_handle = task_manager.spawn_essential_handle(); let gen_rpc_module = || { let signals = tokio_runtime.block_on(async { Signals::capture() })?; - let fut = Client::from_url(&node_rpc_url, &essential_spawn_handle).fuse(); + let fut = async { + let (api, rpc_client, rpc) = connect(&node_rpc_url).await?; + let block_provider: Arc = + Arc::new(BlockInfoProviderImpl::new(cache_size, api.clone(), rpc.clone())); + let receipt_provider: Arc = + if let Some(database_url) = database_url.as_ref() { + Arc::new(( + CacheReceiptProvider::default(), + DBReceiptProvider::new( + database_url, + database_read_only, + block_provider.clone(), + ) + .await?, + )) + } else { + Arc::new(CacheReceiptProvider::default()) + }; + + let client = + Client::new(api, rpc_client, rpc, block_provider, receipt_provider).await?; + client.subscribe_and_cache_blocks(&essential_spawn_handle); + Ok::<_, crate::ClientError>(client) + } + .fuse(); pin_mut!(fut); match tokio_handle.block_on(signals.try_until_signal(fut)) { Ok(Ok(client)) => rpc_module(is_dev, client), Ok(Err(err)) => { - log::error!("Error connecting to the node at {node_rpc_url}: {err}"); + log::error!("Error initializing: {err:?}"); Err(sc_service::Error::Application(err.into())) }, Err(_) => Err(sc_service::Error::Application("Client connection interrupted".into())), @@ -142,6 +192,7 @@ pub fn run(cmd: CliCommand) -> anyhow::Result<()> { start_rpc_servers(&rpc_config, prometheus_registry, tokio_handle, gen_rpc_module, None)?; task_manager.keep_alive(rpc_server_handle); + let signals = tokio_runtime.block_on(async { Signals::capture() })?; tokio_runtime.block_on(signals.run_until_signal(task_manager.future().fuse()))?; Ok(()) } diff --git a/substrate/frame/revive/rpc/src/client.rs b/substrate/frame/revive/rpc/src/client.rs index de97844eccbbf..cd0effe7faf2f 100644 --- a/substrate/frame/revive/rpc/src/client.rs +++ b/substrate/frame/revive/rpc/src/client.rs @@ -17,30 +17,23 @@ //! The client connects to the source substrate chain //! and is used by the rpc server to query and send transactions to the substrate chain. use crate::{ + extract_receipts_from_block, runtime::gas_from_fee, subxt_client::{ - revive::{calls::types::EthTransact, events::ContractEmitted}, - runtime_types::pallet_revive::storage::ContractInfo, + revive::calls::types::EthTransact, runtime_types::pallet_revive::storage::ContractInfo, }, - LOG_TARGET, + BlockInfoProvider, ReceiptProvider, TransactionInfo, LOG_TARGET, }; -use futures::{stream, StreamExt}; use jsonrpsee::types::{error::CALL_EXECUTION_FAILED_CODE, ErrorObjectOwned}; use pallet_revive::{ - create1, evm::{ - Block, BlockNumberOrTag, BlockNumberOrTagOrHash, Bytes256, GenericTransaction, Log, - ReceiptInfo, SyncingProgress, SyncingStatus, TransactionSigned, H160, H256, U256, + Block, BlockNumberOrTag, BlockNumberOrTagOrHash, GenericTransaction, ReceiptInfo, + SyncingProgress, SyncingStatus, TransactionSigned, H160, H256, U256, }, EthTransactError, EthTransactInfo, }; -use sp_core::keccak_256; use sp_weights::Weight; -use std::{ - collections::{HashMap, VecDeque}, - sync::Arc, - time::Duration, -}; +use std::{ops::ControlFlow, sync::Arc, time::Duration}; use subxt::{ backend::{ legacy::{rpc_methods::SystemHealth, LegacyRpcMethods}, @@ -54,11 +47,10 @@ use subxt::{ storage::Storage, Config, OnlineClient, }; -use subxt_client::transaction_payment::events::TransactionFeePaid; use thiserror::Error; -use tokio::sync::{watch::Sender, RwLock}; +use tokio::{sync::RwLock, try_join}; -use crate::subxt_client::{self, system::events::ExtrinsicSuccess, SrcChainConfig}; +use crate::subxt_client::{self, SrcChainConfig}; /// The substrate block type. pub type SubstrateBlock = subxt::blocks::Block>; @@ -75,29 +67,6 @@ pub type Shared = Arc>; /// The runtime balance type. pub type Balance = u128; -/// The cache maintains a buffer of the last N blocks, -#[derive(Default)] -struct BlockCache { - /// A double-ended queue of the last N blocks. - /// The most recent block is at the back of the queue, and the oldest block is at the front. - buffer: VecDeque>, - - /// A map of blocks by block number. - blocks_by_number: HashMap>, - - /// A map of blocks by block hash. - blocks_by_hash: HashMap>, - - /// A map of receipts by hash. - receipts_by_hash: HashMap, - - /// A map of Signed transaction by hash. - signed_tx_by_hash: HashMap, - - /// A map of receipt hashes by block hash. - tx_hashes_by_block_and_index: HashMap>, -} - /// Unwrap the original `jsonrpsee::core::client::Error::Call` error. fn unwrap_call_err(err: &subxt::error::RpcError) -> Option { use subxt::backend::rpc::reconnecting_rpc_client; @@ -167,6 +136,9 @@ pub enum ClientError { /// A [`RpcError`] wrapper error. #[error(transparent)] RpcError(#[from] RpcError), + /// A [`sqlx::Error`] wrapper error. + #[error(transparent)] + SqlxError(#[from] sqlx::Error), /// A [`codec::Error`] wrapper error. #[error(transparent)] CodecError(#[from] codec::Error), @@ -179,9 +151,18 @@ pub enum ClientError { /// The block hash was not found. #[error("hash not found")] BlockNotFound, + + #[error("No Ethereum extrinsic found")] + EthExtrinsicNotFound, /// The transaction fee could not be found #[error("transactionFeePaid event not found")] TxFeeNotFound, + /// Failed to decode a raw payload into a signed transaction. + #[error("Failed to decode a raw payload into a signed transaction")] + TxDecodingFailed, + /// Failed to recover eth address. + #[error("failed to recover eth address")] + RecoverEthAddressFailed, /// The cache is empty. #[error("cache is empty")] CacheEmpty, @@ -214,163 +195,18 @@ impl From for ErrorObjectOwned { } } -/// The number of recent blocks maintained by the cache. -/// For each block in the cache, we also store the EVM transaction receipts. -pub const CACHE_SIZE: usize = 256; - -impl BlockCache { - fn latest_block(&self) -> Option<&Arc> { - self.buffer.back() - } - - /// Insert an entry into the cache, and prune the oldest entry if the cache is full. - fn insert(&mut self, block: SubstrateBlock) { - if self.buffer.len() >= N { - if let Some(block) = self.buffer.pop_front() { - log::trace!(target: LOG_TARGET, "Pruning block: {}", block.number()); - let hash = block.hash(); - self.blocks_by_hash.remove(&hash); - self.blocks_by_number.remove(&block.number()); - if let Some(entries) = self.tx_hashes_by_block_and_index.remove(&hash) { - for hash in entries.values() { - self.receipts_by_hash.remove(hash); - } - } - } - } - - let block = Arc::new(block); - self.buffer.push_back(block.clone()); - self.blocks_by_number.insert(block.number(), block.clone()); - self.blocks_by_hash.insert(block.hash(), block); - } -} - /// A client connect to a node and maintains a cache of the last `CACHE_SIZE` blocks. #[derive(Clone)] pub struct Client { - /// The inner state of the client. - inner: Arc, - /// A watch channel to signal cache updates. - pub updates: tokio::sync::watch::Receiver<()>, -} - -/// The inner state of the client. -struct ClientInner { api: OnlineClient, rpc_client: ReconnectingRpcClient, rpc: LegacyRpcMethods, - cache: Shared>, + receipt_provider: Arc, + block_provider: Arc, chain_id: u64, max_block_weight: Weight, } -impl ClientInner { - /// Create a new client instance connecting to the substrate node at the given URL. - async fn from_url(url: &str) -> Result { - let rpc_client = ReconnectingRpcClient::builder() - .retry_policy(ExponentialBackoff::from_millis(100).max_delay(Duration::from_secs(10))) - .build(url.to_string()) - .await?; - - let api = OnlineClient::::from_rpc_client(rpc_client.clone()).await?; - let cache = Arc::new(RwLock::new(BlockCache::::default())); - - let rpc = LegacyRpcMethods::::new(RpcClient::new(rpc_client.clone())); - - let (chain_id, max_block_weight) = - tokio::try_join!(chain_id(&api), max_block_weight(&api))?; - - Ok(Self { api, rpc_client, rpc, cache, chain_id, max_block_weight }) - } - - /// Get the receipt infos from the extrinsics in a block. - async fn receipt_infos( - &self, - block: &SubstrateBlock, - ) -> Result, ClientError> { - // Get extrinsics from the block - let extrinsics = block.extrinsics().await?; - - // Filter extrinsics from pallet_revive - let extrinsics = extrinsics.iter().flat_map(|ext| { - let call = ext.as_extrinsic::().ok()??; - let transaction_hash = H256(keccak_256(&call.payload)); - let signed_tx = TransactionSigned::decode(&call.payload).ok()?; - let from = signed_tx.recover_eth_address().ok()?; - let tx_info = GenericTransaction::from_signed(signed_tx.clone(), Some(from)); - let contract_address = if tx_info.to.is_none() { - Some(create1(&from, tx_info.nonce.unwrap_or_default().try_into().ok()?)) - } else { - None - }; - - Some((from, signed_tx, tx_info, transaction_hash, contract_address, ext)) - }); - - // Map each extrinsic to a receipt - stream::iter(extrinsics) - .map(|(from, signed_tx, tx_info, transaction_hash, contract_address, ext)| async move { - let events = ext.events().await?; - let tx_fees = - events.find_first::()?.ok_or(ClientError::TxFeeNotFound)?; - - let gas_price = tx_info.gas_price.unwrap_or_default(); - let gas_used = (tx_fees.tip.saturating_add(tx_fees.actual_fee)) - .checked_div(gas_price.as_u128()) - .unwrap_or_default(); - - let success = events.has::()?; - let transaction_index = ext.index(); - let block_hash = block.hash(); - let block_number = block.number().into(); - - // get logs from ContractEmitted event - let logs = events.iter() - .filter_map(|event_details| { - let event_details = event_details.ok()?; - let event = event_details.as_event::().ok()??; - - Some(Log { - address: event.contract, - topics: event.topics, - data: Some(event.data.into()), - block_number: Some(block_number), - transaction_hash, - transaction_index: Some(transaction_index.into()), - block_hash: Some(block_hash), - log_index: Some(event_details.index().into()), - ..Default::default() - }) - }).collect(); - - - log::debug!(target: LOG_TARGET, "Adding receipt for tx hash: {transaction_hash:?} - block: {block_number:?}"); - let receipt = ReceiptInfo::new( - block_hash, - block_number, - contract_address, - from, - logs, - tx_info.to, - gas_price, - gas_used.into(), - success, - transaction_hash, - transaction_index.into(), - tx_info.r#type.unwrap_or_default() - ); - - Ok::<_, ClientError>((receipt.transaction_hash, (signed_tx, receipt))) - }) - .buffer_unordered(10) - .collect::>>() - .await - .into_iter() - .collect::, _>>() - } -} - /// Fetch the chain ID from the substrate chain. async fn chain_id(api: &OnlineClient) -> Result { let query = subxt_client::constants().revive().chain_id(); @@ -395,23 +231,181 @@ async fn extract_block_timestamp(block: &SubstrateBlock) -> Option { Some(ext.value.now / 1000) } +/// Connect to a node at the given URL, and return the underlying API, RPC client, and legacy RPC +/// clients. +pub async fn connect( + node_rpc_url: &str, +) -> Result< + (OnlineClient, ReconnectingRpcClient, LegacyRpcMethods), + ClientError, +> { + log::info!(target: LOG_TARGET, "Connecting to node at: {node_rpc_url} ..."); + let rpc_client = ReconnectingRpcClient::builder() + .retry_policy(ExponentialBackoff::from_millis(100).max_delay(Duration::from_secs(10))) + .build(node_rpc_url.to_string()) + .await?; + log::info!(target: LOG_TARGET, "Connected to node at: {node_rpc_url}"); + + let api = OnlineClient::::from_rpc_client(rpc_client.clone()).await?; + let rpc = LegacyRpcMethods::::new(RpcClient::new(rpc_client.clone())); + Ok((api, rpc_client, rpc)) +} + impl Client { /// Create a new client instance. - /// The client will subscribe to new blocks and maintain a cache of [`CACHE_SIZE`] blocks. - pub async fn from_url( - url: &str, - spawn_handle: &sc_service::SpawnEssentialTaskHandle, + pub async fn new( + api: OnlineClient, + rpc_client: ReconnectingRpcClient, + rpc: LegacyRpcMethods, + block_provider: Arc, + receipt_provider: Arc, ) -> Result { - log::info!(target: LOG_TARGET, "Connecting to node at: {url} ..."); - let inner: Arc = Arc::new(ClientInner::from_url(url).await?); - log::info!(target: LOG_TARGET, "Connected to node at: {url}"); + let (chain_id, max_block_weight) = + tokio::try_join!(chain_id(&api), max_block_weight(&api))?; - let (tx, mut updates) = tokio::sync::watch::channel(()); + Ok(Self { + api, + rpc_client, + rpc, + receipt_provider, + block_provider, + chain_id, + max_block_weight, + }) + } - spawn_handle.spawn("subscribe-blocks", None, Self::subscribe_blocks(inner.clone(), tx)); + /// Subscribe to past blocks executing the callback for each block. + /// The subscription continues iterating past blocks until the closure returns + /// `ControlFlow::Break`. Blocks are iterated starting from the latest block and moving + /// backward. + #[allow(dead_code)] + async fn subscribe_past_blocks(&self, callback: F) -> Result<(), ClientError> + where + F: Fn(SubstrateBlock) -> Fut + Send + Sync, + Fut: std::future::Future, ClientError>> + Send, + { + log::info!(target: LOG_TARGET, "Subscribing to past blocks"); + let mut block = self.api.blocks().at_latest().await.inspect_err(|err| { + log::error!(target: LOG_TARGET, "Failed to fetch latest block: {err:?}"); + })?; + + loop { + let block_number = block.number(); + log::debug!(target: LOG_TARGET, "Processing block {block_number}"); + + let parent_hash = block.header().parent_hash; + let control_flow = callback(block).await.inspect_err(|err| { + log::error!(target: LOG_TARGET, "Failed to process block {block_number}: {err:?}"); + })?; + + match control_flow { + ControlFlow::Continue(_) => { + if block_number == 0 { + log::info!(target: LOG_TARGET, "All past blocks processed"); + return Ok(()); + } + block = self.api.blocks().at(parent_hash).await.inspect_err(|err| { + log::error!(target: LOG_TARGET, "Failed to fetch block at {parent_hash:?}: {err:?}"); + })?; + }, + ControlFlow::Break(_) => { + log::info!(target: LOG_TARGET, "Stopping past block subscription at {block_number}"); + return Ok(()); + }, + } + } + } + + /// Subscribe to new best blocks, and execute the async closure with + /// the extracted block and ethereum transactions + async fn subscribe_new_blocks(&self, callback: F) -> Result<(), ClientError> + where + F: Fn(SubstrateBlock) -> Fut + Send + Sync, + Fut: std::future::Future> + Send, + { + log::info!(target: LOG_TARGET, "Subscribing to new blocks"); + let mut block_stream = match self.api.blocks().subscribe_best().await { + Ok(s) => s, + Err(err) => { + log::error!(target: LOG_TARGET, "Failed to subscribe to blocks: {err:?}"); + return Err(err.into()); + }, + }; + + while let Some(block) = block_stream.next().await { + let block = match block { + Ok(block) => block, + Err(err) => { + if err.is_disconnected_will_reconnect() { + log::warn!( + target: LOG_TARGET, + "The RPC connection was lost and we may have missed a few blocks" + ); + continue; + } + + log::error!(target: LOG_TARGET, "Failed to fetch block: {err:?}"); + return Err(err.into()); + }, + }; + + log::debug!(target: LOG_TARGET, "Pushing block: {}", block.number()); + callback(block).await?; + } - updates.changed().await.expect("tx is not dropped"); - Ok(Self { inner, updates }) + log::info!(target: LOG_TARGET, "Block subscription ended"); + Ok(()) + } + + /// Start the block subscription, and populate the block cache. + pub fn subscribe_and_cache_blocks(&self, spawn_handle: &sc_service::SpawnEssentialTaskHandle) { + let client = self.clone(); + spawn_handle.spawn("subscribe-blocks", None, async move { + let res = client + .subscribe_new_blocks(|block| async { + let receipts = extract_receipts_from_block(&block).await?; + + client.receipt_provider.insert(&block.hash(), &receipts).await; + if let Some(pruned) = client.block_provider.cache_block(block).await { + client.receipt_provider.remove(&pruned).await; + } + + Ok(()) + }) + .await; + + if let Err(err) = res { + log::error!(target: LOG_TARGET, "Block subscription error: {err:?}"); + } + }); + } + + /// Start the block subscription, and populate the block cache. + pub async fn subscribe_and_cache_receipts( + &self, + oldest_block: Option, + ) -> Result<(), ClientError> { + let new_blocks_fut = self.subscribe_new_blocks(|block| async move { + let receipts = extract_receipts_from_block(&block).await.inspect_err(|err| { + log::error!(target: LOG_TARGET, "Failed to extract receipts from block: {err:?}"); + })?; + self.receipt_provider.insert(&block.hash(), &receipts).await; + Ok(()) + }); + + let Some(oldest_block) = oldest_block else { return new_blocks_fut.await }; + + let old_blocks_fut = self.subscribe_past_blocks(|block| async move { + let receipts = extract_receipts_from_block(&block).await?; + self.receipt_provider.insert(&block.hash(), &receipts).await; + if block.number() == oldest_block { + Ok(ControlFlow::Break(())) + } else { + Ok(ControlFlow::Continue(())) + } + }); + + try_join!(new_blocks_fut, old_blocks_fut).map(|_| ()) } /// Expose the storage API. @@ -425,14 +419,14 @@ impl Client { (*block_number).try_into().map_err(|_| ClientError::ConversionFailed)?; let hash = self.get_block_hash(n).await?.ok_or(ClientError::BlockNotFound)?; - Ok(self.inner.api.storage().at(hash)) + Ok(self.api.storage().at(hash)) }, - BlockNumberOrTagOrHash::H256(hash) => Ok(self.inner.api.storage().at(*hash)), + BlockNumberOrTagOrHash::H256(hash) => Ok(self.api.storage().at(*hash)), BlockNumberOrTagOrHash::BlockTag(_) => { if let Some(block) = self.latest_block().await { - return Ok(self.inner.api.storage().at(block.hash())); + return Ok(self.api.storage().at(block.hash())); } - let storage = self.inner.api.storage().at_latest().await?; + let storage = self.api.storage().at_latest().await?; Ok(storage) }, } @@ -452,90 +446,24 @@ impl Client { (*block_number).try_into().map_err(|_| ClientError::ConversionFailed)?; let hash = self.get_block_hash(n).await?.ok_or(ClientError::BlockNotFound)?; - Ok(self.inner.api.runtime_api().at(hash)) + Ok(self.api.runtime_api().at(hash)) }, - BlockNumberOrTagOrHash::H256(hash) => Ok(self.inner.api.runtime_api().at(*hash)), + BlockNumberOrTagOrHash::H256(hash) => Ok(self.api.runtime_api().at(*hash)), BlockNumberOrTagOrHash::BlockTag(_) => { if let Some(block) = self.latest_block().await { - return Ok(self.inner.api.runtime_api().at(block.hash())); + return Ok(self.api.runtime_api().at(block.hash())); } - let api = self.inner.api.runtime_api().at_latest().await?; + let api = self.api.runtime_api().at_latest().await?; Ok(api) }, } } - /// Subscribe to new blocks and update the cache. - async fn subscribe_blocks(inner: Arc, tx: Sender<()>) { - log::info!(target: LOG_TARGET, "Subscribing to new blocks"); - let mut block_stream = match inner.as_ref().api.blocks().subscribe_best().await { - Ok(s) => s, - Err(err) => { - log::error!(target: LOG_TARGET, "Failed to subscribe to blocks: {err:?}"); - return; - }, - }; - - while let Some(block) = block_stream.next().await { - let block = match block { - Ok(block) => block, - Err(err) => { - if err.is_disconnected_will_reconnect() { - log::warn!( - target: LOG_TARGET, - "The RPC connection was lost and we may have missed a few blocks" - ); - continue; - } - - log::error!(target: LOG_TARGET, "Failed to fetch block: {err:?}"); - return; - }, - }; - - log::trace!(target: LOG_TARGET, "Pushing block: {}", block.number()); - let mut cache = inner.cache.write().await; - - let receipts = inner - .receipt_infos(&block) - .await - .inspect_err(|err| { - log::error!(target: LOG_TARGET, "Failed to get receipts: {err:?}"); - }) - .unwrap_or_default(); - - if !receipts.is_empty() { - let values = receipts - .iter() - .map(|(hash, (_, receipt))| (receipt.transaction_index, *hash)) - .collect::>(); - - cache.tx_hashes_by_block_and_index.insert(block.hash(), values); - - cache - .receipts_by_hash - .extend(receipts.iter().map(|(hash, (_, receipt))| (*hash, receipt.clone()))); - - cache.signed_tx_by_hash.extend( - receipts.iter().map(|(hash, (signed_tx, _))| (*hash, signed_tx.clone())), - ) - } - - cache.insert(block); - tx.send_replace(()); - } - - log::info!(target: LOG_TARGET, "Block subscription ended"); - } -} - -impl Client { /// Get the most recent block stored in the cache. pub async fn latest_block(&self) -> Option> { - let cache = self.inner.cache.read().await; - let block = cache.latest_block()?; - Some(block.clone()) + let block = self.block_provider.latest_block().await?; + Some(block) } /// Expose the transaction API. @@ -543,23 +471,22 @@ impl Client { &self, call: subxt::tx::DefaultPayload, ) -> Result { - let ext = self.inner.api.tx().create_unsigned(&call).map_err(ClientError::from)?; + let ext = self.api.tx().create_unsigned(&call).map_err(ClientError::from)?; let hash = ext.submit().await?; Ok(hash) } /// Get an EVM transaction receipt by hash. pub async fn receipt(&self, tx_hash: &H256) -> Option { - let cache = self.inner.cache.read().await; - cache.receipts_by_hash.get(tx_hash).cloned() + self.receipt_provider.receipt_by_hash(tx_hash).await } /// Get the syncing status of the chain. pub async fn syncing(&self) -> Result { - let health = self.inner.rpc.system_health().await?; + let health = self.rpc.system_health().await?; let status = if health.is_syncing { - let client = RpcClient::new(self.inner.rpc_client.clone()); + let client = RpcClient::new(self.rpc_client.clone()); let sync_state: sc_rpc::system::SyncState = client.request("system_syncState", Default::default()).await?; @@ -582,27 +509,23 @@ impl Client { block_hash: &H256, transaction_index: &U256, ) -> Option { - let cache = self.inner.cache.read().await; - let receipt_hash = - cache.tx_hashes_by_block_and_index.get(block_hash)?.get(transaction_index)?; - let receipt = cache.receipts_by_hash.get(receipt_hash)?; - Some(receipt.clone()) + self.receipt_provider + .receipt_by_block_hash_and_index(block_hash, transaction_index) + .await } pub async fn signed_tx_by_hash(&self, tx_hash: &H256) -> Option { - let cache = self.inner.cache.read().await; - cache.signed_tx_by_hash.get(tx_hash).cloned() + self.receipt_provider.signed_tx_by_hash(tx_hash).await } /// Get receipts count per block. pub async fn receipts_count_per_block(&self, block_hash: &SubstrateBlockHash) -> Option { - let cache = self.inner.cache.read().await; - cache.tx_hashes_by_block_and_index.get(block_hash).map(|v| v.len()) + self.receipt_provider.receipts_count_per_block(block_hash).await } /// Get the system health. pub async fn system_health(&self) -> Result { - let health = self.inner.rpc.system_health().await?; + let health = self.rpc.system_health().await?; Ok(health) } @@ -697,8 +620,8 @@ impl Client { /// Get the block number of the latest block. pub async fn block_number(&self) -> Result { - let cache = self.inner.cache.read().await; - let latest_block = cache.buffer.back().ok_or(ClientError::CacheEmpty)?; + let latest_block = + self.block_provider.latest_block().await.ok_or(ClientError::CacheEmpty)?; Ok(latest_block.number()) } @@ -707,13 +630,8 @@ impl Client { &self, block_number: SubstrateBlockNumber, ) -> Result, ClientError> { - let cache = self.inner.cache.read().await; - if let Some(block) = cache.blocks_by_number.get(&block_number) { - return Ok(Some(block.hash())); - } - - let hash = self.inner.rpc.chain_get_block_hash(Some(block_number.into())).await?; - Ok(hash) + let maybe_block = self.block_provider.block_by_number(block_number).await?; + Ok(maybe_block.map(|block| block.hash())) } /// Get a block for the specified hash or number. @@ -727,8 +645,8 @@ impl Client { self.block_by_number(n).await }, BlockNumberOrTag::BlockTag(_) => { - let cache = self.inner.cache.read().await; - Ok(cache.buffer.back().cloned()) + let block = self.block_provider.latest_block().await; + Ok(block) }, } } @@ -738,16 +656,7 @@ impl Client { &self, hash: &SubstrateBlockHash, ) -> Result>, ClientError> { - let cache = self.inner.cache.read().await; - if let Some(block) = cache.blocks_by_hash.get(hash) { - return Ok(Some(block.clone())); - } - - match self.inner.api.blocks().at(*hash).await { - Ok(block) => Ok(Some(Arc::new(block))), - Err(subxt::Error::Block(subxt::error::BlockError::NotFound(_))) => Ok(None), - Err(err) => Err(err.into()), - } + self.block_provider.block_by_hash(hash).await } /// Get a block by number @@ -755,21 +664,16 @@ impl Client { &self, block_number: SubstrateBlockNumber, ) -> Result>, ClientError> { - let cache = self.inner.cache.read().await; - if let Some(block) = cache.blocks_by_number.get(&block_number) { - return Ok(Some(block.clone())); - } - - let Some(hash) = self.get_block_hash(block_number).await? else { - return Ok(None); - }; - - self.block_by_hash(&hash).await + self.block_provider.block_by_number(block_number).await } /// Get the EVM block for the given hash. - pub async fn evm_block(&self, block: Arc) -> Result { - let runtime_api = self.inner.api.runtime_api().at(block.hash()); + pub async fn evm_block( + &self, + block: Arc, + hydrated_transactions: bool, + ) -> Result { + let runtime_api = self.api.runtime_api().at(block.hash()); let max_fee = Self::weight_to_fee(&runtime_api, self.max_block_weight()).await?; let gas_limit = gas_from_fee(max_fee); @@ -781,6 +685,23 @@ impl Client { let state_root = header.state_root.0.into(); let extrinsics_root = header.extrinsics_root.0.into(); + let receipts = extract_receipts_from_block(&block).await?; + let gas_used = + receipts.iter().fold(U256::zero(), |acc, (_, receipt)| acc + receipt.gas_used); + let transactions = if hydrated_transactions { + receipts + .into_iter() + .map(|(signed_tx, receipt)| TransactionInfo::new(receipt, signed_tx)) + .collect::>() + .into() + } else { + receipts + .into_iter() + .map(|(_, receipt)| receipt.transaction_hash) + .collect::>() + .into() + }; + Ok(Block { hash: block.hash(), parent_hash, @@ -789,9 +710,11 @@ impl Client { number: header.number.into(), timestamp: timestamp.into(), difficulty: Some(0u32.into()), + base_fee_per_gas: Some(crate::GAS_PRICE.into()), gas_limit, - logs_bloom: Bytes256([0u8; 256]), + gas_used, receipts_root: extrinsics_root, + transactions, ..Default::default() }) } @@ -811,11 +734,11 @@ impl Client { /// Get the chain ID. pub fn chain_id(&self) -> u64 { - self.inner.chain_id + self.chain_id } /// Get the Max Block Weight. pub fn max_block_weight(&self) -> Weight { - self.inner.max_block_weight + self.max_block_weight } } diff --git a/substrate/frame/revive/rpc/src/eth-indexer.rs b/substrate/frame/revive/rpc/src/eth-indexer.rs new file mode 100644 index 0000000000000..3e7f6b6fa91b8 --- /dev/null +++ b/substrate/frame/revive/rpc/src/eth-indexer.rs @@ -0,0 +1,88 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! The Ethereum JSON-RPC server. +use clap::Parser; +use pallet_revive_eth_rpc::{ + client::{connect, Client, SubstrateBlockNumber}, + BlockInfoProvider, BlockInfoProviderImpl, DBReceiptProvider, ReceiptProvider, +}; +use sc_cli::SharedParams; +use std::sync::Arc; + +// Parsed command instructions from the command line +#[derive(Parser, Debug)] +#[clap(author, about, version)] +pub struct CliCommand { + /// The node url to connect to + #[clap(long, default_value = "ws://127.0.0.1:9944")] + pub node_rpc_url: String, + + /// Specifies the block number to start indexing from, going backwards from the current block. + /// If not provided, only new blocks will be indexed + #[clap(long)] + pub oldest_block: Option, + + /// The database used to store Ethereum transaction hashes. + #[clap(long)] + pub database_url: String, + + #[allow(missing_docs)] + #[clap(flatten)] + pub shared_params: SharedParams, +} + +/// Initialize the logger +#[cfg(not(test))] +fn init_logger(params: &SharedParams) -> anyhow::Result<()> { + let mut logger = sc_cli::LoggerBuilder::new(params.log_filters().join(",")); + logger + .with_log_reloading(params.enable_log_reloading) + .with_detailed_output(params.detailed_log_output); + + if let Some(tracing_targets) = ¶ms.tracing_targets { + let tracing_receiver = params.tracing_receiver.into(); + logger.with_profiling(tracing_receiver, tracing_targets); + } + + if params.disable_log_color { + logger.with_colors(false); + } + + logger.init()?; + Ok(()) +} + +#[tokio::main] +pub async fn main() -> anyhow::Result<()> { + let CliCommand { + node_rpc_url, database_url, shared_params: _shared_params, oldest_block, .. + } = CliCommand::parse(); + + #[cfg(not(test))] + init_logger(&_shared_params)?; + + let (api, rpc_client, rpc) = connect(&node_rpc_url).await?; + let block_provider: Arc = + Arc::new(BlockInfoProviderImpl::new(0, api.clone(), rpc.clone())); + let receipt_provider: Arc = + Arc::new(DBReceiptProvider::new(&database_url, false, block_provider.clone()).await?); + + let client = Client::new(api, rpc_client, rpc, block_provider, receipt_provider).await?; + client.subscribe_and_cache_receipts(oldest_block).await?; + + Ok(()) +} diff --git a/substrate/frame/revive/rpc/src/lib.rs b/substrate/frame/revive/rpc/src/lib.rs index 230f2f8b7ef96..5e1341e2a29ab 100644 --- a/substrate/frame/revive/rpc/src/lib.rs +++ b/substrate/frame/revive/rpc/src/lib.rs @@ -24,6 +24,7 @@ use jsonrpsee::{ types::{ErrorCode, ErrorObjectOwned}, }; use pallet_revive::evm::*; +use sp_arithmetic::Permill; use sp_core::{keccak_256, H160, H256, U256}; use thiserror::Error; @@ -35,6 +36,12 @@ pub mod subxt_client; #[cfg(test)] mod tests; +mod block_info_provider; +pub use block_info_provider::*; + +mod receipt_provider; +pub use receipt_provider::*; + mod rpc_health; pub use rpc_health::*; @@ -121,7 +128,12 @@ impl EthRpcServer for EthRpcServerImpl { transaction_hash: H256, ) -> RpcResult> { let receipt = self.client.receipt(&transaction_hash).await; - log::debug!(target: LOG_TARGET, "transaction_receipt for {transaction_hash:?}: {}", receipt.is_some()); + log::debug!( + target: LOG_TARGET, + "transaction_receipt for {transaction_hash:?}: received: {received} - success: {success:?}", + received = receipt.is_some(), + success = receipt.as_ref().map(|r| r.status == Some(U256::one())) + ); Ok(receipt) } @@ -197,12 +209,12 @@ impl EthRpcServer for EthRpcServerImpl { async fn get_block_by_hash( &self, block_hash: H256, - _hydrated_transactions: bool, + hydrated_transactions: bool, ) -> RpcResult> { let Some(block) = self.client.block_by_hash(&block_hash).await? else { return Ok(None); }; - let block = self.client.evm_block(block).await?; + let block = self.client.evm_block(block, hydrated_transactions).await?; Ok(Some(block)) } @@ -220,6 +232,11 @@ impl EthRpcServer for EthRpcServerImpl { Ok(U256::from(GAS_PRICE)) } + async fn max_priority_fee_per_gas(&self) -> RpcResult { + // TODO: Provide better estimation + Ok(U256::from(Permill::from_percent(20).mul_ceil(GAS_PRICE))) + } + async fn get_code(&self, address: H160, block: BlockNumberOrTagOrHash) -> RpcResult { let code = self.client.get_contract_code(&address, block).await?; Ok(code.into()) @@ -232,12 +249,12 @@ impl EthRpcServer for EthRpcServerImpl { async fn get_block_by_number( &self, block: BlockNumberOrTag, - _hydrated_transactions: bool, + hydrated_transactions: bool, ) -> RpcResult> { let Some(block) = self.client.block_by_number_or_tag(&block).await? else { return Ok(None); }; - let block = self.client.evm_block(block).await?; + let block = self.client.evm_block(block, hydrated_transactions).await?; Ok(Some(block)) } diff --git a/substrate/frame/revive/rpc/src/receipt_provider.rs b/substrate/frame/revive/rpc/src/receipt_provider.rs new file mode 100644 index 0000000000000..5c102b3d3d41a --- /dev/null +++ b/substrate/frame/revive/rpc/src/receipt_provider.rs @@ -0,0 +1,240 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + client::SubstrateBlock, + subxt_client::{ + revive::{calls::types::EthTransact, events::ContractEmitted}, + system::events::ExtrinsicSuccess, + transaction_payment::events::TransactionFeePaid, + SrcChainConfig, + }, + ClientError, LOG_TARGET, +}; +use futures::{stream, StreamExt}; +use jsonrpsee::core::async_trait; +use pallet_revive::{ + create1, + evm::{GenericTransaction, Log, ReceiptInfo, TransactionSigned, H256, U256}, +}; +use sp_core::keccak_256; +use tokio::join; + +mod cache; +pub use cache::CacheReceiptProvider; + +mod db; +pub use db::DBReceiptProvider; + +/// Provide means to store and retrieve receipts. +#[async_trait] +pub trait ReceiptProvider: Send + Sync { + /// Insert receipts into the provider. + async fn insert(&self, block_hash: &H256, receipts: &[(TransactionSigned, ReceiptInfo)]); + + /// Remove receipts with the given block hash. + async fn remove(&self, block_hash: &H256); + + /// Get the receipt for the given block hash and transaction index. + async fn receipt_by_block_hash_and_index( + &self, + block_hash: &H256, + transaction_index: &U256, + ) -> Option; + + /// Get the number of receipts per block. + async fn receipts_count_per_block(&self, block_hash: &H256) -> Option; + + /// Get the receipt for the given transaction hash. + async fn receipt_by_hash(&self, transaction_hash: &H256) -> Option; + + /// Get the signed transaction for the given transaction hash. + async fn signed_tx_by_hash(&self, transaction_hash: &H256) -> Option; +} + +#[async_trait] +impl ReceiptProvider for (Main, Fallback) { + async fn insert(&self, block_hash: &H256, receipts: &[(TransactionSigned, ReceiptInfo)]) { + join!(self.0.insert(block_hash, receipts), self.1.insert(block_hash, receipts)); + } + + async fn remove(&self, block_hash: &H256) { + join!(self.0.remove(block_hash), self.1.remove(block_hash)); + } + + async fn receipt_by_block_hash_and_index( + &self, + block_hash: &H256, + transaction_index: &U256, + ) -> Option { + if let Some(receipt) = + self.0.receipt_by_block_hash_and_index(block_hash, transaction_index).await + { + return Some(receipt); + } + + self.1.receipt_by_block_hash_and_index(block_hash, transaction_index).await + } + + async fn receipts_count_per_block(&self, block_hash: &H256) -> Option { + if let Some(count) = self.0.receipts_count_per_block(block_hash).await { + return Some(count); + } + self.1.receipts_count_per_block(block_hash).await + } + + async fn receipt_by_hash(&self, hash: &H256) -> Option { + if let Some(receipt) = self.0.receipt_by_hash(hash).await { + return Some(receipt); + } + self.1.receipt_by_hash(hash).await + } + + async fn signed_tx_by_hash(&self, hash: &H256) -> Option { + if let Some(tx) = self.0.signed_tx_by_hash(hash).await { + return Some(tx); + } + self.1.signed_tx_by_hash(hash).await + } +} + +/// Extract a [`TransactionSigned`] and a [`ReceiptInfo`] and from an extrinsic. +pub async fn extract_receipt_from_extrinsic( + block: &SubstrateBlock, + ext: subxt::blocks::ExtrinsicDetails>, + call: EthTransact, +) -> Result<(TransactionSigned, ReceiptInfo), ClientError> { + let transaction_index = ext.index(); + let block_number = U256::from(block.number()); + let block_hash = block.hash(); + let events = ext.events().await?; + + let success = events.has::().inspect_err(|err| { + log::debug!(target: LOG_TARGET, "Failed to lookup for ExtrinsicSuccess event in block {block_number}: {err:?}") + })?; + let tx_fees = events + .find_first::()? + .ok_or(ClientError::TxFeeNotFound) + .inspect_err( + |err| log::debug!(target: LOG_TARGET, "TransactionFeePaid not found in events for block {block_number}\n{err:?}") + )?; + let transaction_hash = H256(keccak_256(&call.payload)); + + let signed_tx = + TransactionSigned::decode(&call.payload).map_err(|_| ClientError::TxDecodingFailed)?; + let from = signed_tx.recover_eth_address().map_err(|_| { + log::error!(target: LOG_TARGET, "Failed to recover eth address from signed tx"); + ClientError::RecoverEthAddressFailed + })?; + + let tx_info = GenericTransaction::from_signed(signed_tx.clone(), Some(from)); + let gas_price = tx_info.gas_price.unwrap_or_default(); + let gas_used = (tx_fees.tip.saturating_add(tx_fees.actual_fee)) + .checked_div(gas_price.as_u128()) + .unwrap_or_default(); + + // get logs from ContractEmitted event + let logs = events + .iter() + .filter_map(|event_details| { + let event_details = event_details.ok()?; + let event = event_details.as_event::().ok()??; + + Some(Log { + address: event.contract, + topics: event.topics, + data: Some(event.data.into()), + block_number: Some(block_number), + transaction_hash, + transaction_index: Some(transaction_index.into()), + block_hash: Some(block_hash), + log_index: Some(event_details.index().into()), + ..Default::default() + }) + }) + .collect(); + + let contract_address = if tx_info.to.is_none() { + Some(create1( + &from, + tx_info + .nonce + .unwrap_or_default() + .try_into() + .map_err(|_| ClientError::ConversionFailed)?, + )) + } else { + None + }; + + log::debug!(target: LOG_TARGET, "Adding receipt for tx hash: {transaction_hash:?} - block: {block_number:?}"); + let receipt = ReceiptInfo::new( + block_hash, + block_number, + contract_address, + from, + logs, + tx_info.to, + gas_price, + gas_used.into(), + success, + transaction_hash, + transaction_index.into(), + tx_info.r#type.unwrap_or_default(), + ); + Ok((signed_tx, receipt)) +} + +/// Extract receipts from block. +pub async fn extract_receipts_from_block( + block: &SubstrateBlock, +) -> Result, ClientError> { + // Filter extrinsics from pallet_revive + let extrinsics = block.extrinsics().await.inspect_err(|err| { + log::debug!(target: LOG_TARGET, "Error fetching for #{:?} extrinsics: {err:?}", block.number()); + })?; + + let extrinsics = extrinsics.iter().flat_map(|ext| { + let call = ext.as_extrinsic::().ok()??; + Some((ext, call)) + }); + + stream::iter(extrinsics) + .map(|(ext, call)| async move { extract_receipt_from_extrinsic(block, ext, call).await }) + .buffer_unordered(10) + .collect::>>() + .await + .into_iter() + .collect::, _>>() +} + +/// Extract receipt from transaction +pub async fn extract_receipts_from_transaction( + block: &SubstrateBlock, + transaction_index: usize, +) -> Result<(TransactionSigned, ReceiptInfo), ClientError> { + let extrinsics = block.extrinsics().await?; + let ext = extrinsics + .iter() + .nth(transaction_index) + .ok_or(ClientError::EthExtrinsicNotFound)?; + + let call = ext + .as_extrinsic::()? + .ok_or_else(|| ClientError::EthExtrinsicNotFound)?; + extract_receipt_from_extrinsic(block, ext, call).await +} diff --git a/substrate/frame/revive/rpc/src/receipt_provider/cache.rs b/substrate/frame/revive/rpc/src/receipt_provider/cache.rs new file mode 100644 index 0000000000000..39124929ec07d --- /dev/null +++ b/substrate/frame/revive/rpc/src/receipt_provider/cache.rs @@ -0,0 +1,148 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +use super::ReceiptProvider; +use jsonrpsee::core::async_trait; +use pallet_revive::evm::{ReceiptInfo, TransactionSigned, H256, U256}; +use std::{collections::HashMap, sync::Arc}; +use tokio::sync::RwLock; + +/// A `[ReceiptProvider]` that caches receipts in memory. +#[derive(Clone, Default)] +pub struct CacheReceiptProvider { + cache: Arc>, +} + +impl CacheReceiptProvider { + /// Get a read access on the shared cache. + async fn cache(&self) -> tokio::sync::RwLockReadGuard<'_, ReceiptCache> { + self.cache.read().await + } +} + +#[async_trait] +impl ReceiptProvider for CacheReceiptProvider { + async fn insert(&self, block_hash: &H256, receipts: &[(TransactionSigned, ReceiptInfo)]) { + let mut cache = self.cache.write().await; + cache.insert(block_hash, receipts); + } + + async fn remove(&self, block_hash: &H256) { + let mut cache = self.cache.write().await; + cache.remove(block_hash); + } + + async fn receipt_by_block_hash_and_index( + &self, + block_hash: &H256, + transaction_index: &U256, + ) -> Option { + let cache = self.cache().await; + let receipt_hash = cache + .transaction_hashes_by_block_and_index + .get(block_hash)? + .get(transaction_index)?; + let receipt = cache.receipts_by_hash.get(receipt_hash)?; + Some(receipt.clone()) + } + + async fn receipts_count_per_block(&self, block_hash: &H256) -> Option { + let cache = self.cache().await; + cache.transaction_hashes_by_block_and_index.get(block_hash).map(|v| v.len()) + } + + async fn receipt_by_hash(&self, hash: &H256) -> Option { + let cache = self.cache().await; + cache.receipts_by_hash.get(hash).cloned() + } + + async fn signed_tx_by_hash(&self, hash: &H256) -> Option { + let cache = self.cache().await; + cache.signed_tx_by_hash.get(hash).cloned() + } +} + +#[derive(Default)] +struct ReceiptCache { + /// A map of receipts by transaction hash. + receipts_by_hash: HashMap, + + /// A map of Signed transaction by transaction hash. + signed_tx_by_hash: HashMap, + + /// A map of receipt hashes by block hash. + transaction_hashes_by_block_and_index: HashMap>, +} + +impl ReceiptCache { + /// Insert new receipts into the cache. + pub fn insert(&mut self, block_hash: &H256, receipts: &[(TransactionSigned, ReceiptInfo)]) { + if !receipts.is_empty() { + let values = receipts + .iter() + .map(|(_, receipt)| (receipt.transaction_index, receipt.transaction_hash)) + .collect::>(); + + self.transaction_hashes_by_block_and_index.insert(*block_hash, values); + + self.receipts_by_hash.extend( + receipts.iter().map(|(_, receipt)| (receipt.transaction_hash, receipt.clone())), + ); + + self.signed_tx_by_hash.extend( + receipts + .iter() + .map(|(signed_tx, receipt)| (receipt.transaction_hash, signed_tx.clone())), + ) + } + } + + /// Remove entry from the cache. + pub fn remove(&mut self, hash: &H256) { + if let Some(entries) = self.transaction_hashes_by_block_and_index.remove(hash) { + for hash in entries.values() { + self.receipts_by_hash.remove(hash); + self.signed_tx_by_hash.remove(hash); + } + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn cache_insert_and_remove_works() { + let mut cache = ReceiptCache::default(); + + for i in 1u8..=3 { + let hash = H256::from([i; 32]); + cache.insert( + &hash, + &[( + TransactionSigned::default(), + ReceiptInfo { transaction_hash: hash, ..Default::default() }, + )], + ); + } + + cache.remove(&H256::from([1u8; 32])); + assert_eq!(cache.transaction_hashes_by_block_and_index.len(), 2); + assert_eq!(cache.receipts_by_hash.len(), 2); + assert_eq!(cache.signed_tx_by_hash.len(), 2); + } +} diff --git a/substrate/frame/revive/rpc/src/receipt_provider/db.rs b/substrate/frame/revive/rpc/src/receipt_provider/db.rs new file mode 100644 index 0000000000000..63917d6193ea7 --- /dev/null +++ b/substrate/frame/revive/rpc/src/receipt_provider/db.rs @@ -0,0 +1,216 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate::BlockInfoProvider; +use jsonrpsee::core::async_trait; +use pallet_revive::evm::{ReceiptInfo, TransactionSigned}; +use sp_core::{H256, U256}; +use sqlx::{query, SqlitePool}; +use std::sync::Arc; + +/// A `[ReceiptProvider]` that stores receipts in a SQLite database. +#[derive(Clone)] +pub struct DBReceiptProvider { + /// The database pool. + pool: SqlitePool, + /// The block provider used to fetch blocks, and reconstruct receipts. + block_provider: Arc, + /// weather or not we should write to the DB. + read_only: bool, +} + +impl DBReceiptProvider { + /// Create a new `DBReceiptProvider` with the given database URL and block provider. + pub async fn new( + database_url: &str, + read_only: bool, + block_provider: Arc, + ) -> Result { + let pool = SqlitePool::connect(database_url).await?; + Ok(Self { pool, block_provider, read_only }) + } + + async fn fetch_row(&self, transaction_hash: &H256) -> Option<(H256, usize)> { + let transaction_hash = hex::encode(transaction_hash); + let result = query!( + r#" + SELECT block_hash, transaction_index + FROM transaction_hashes + WHERE transaction_hash = $1 + "#, + transaction_hash + ) + .fetch_optional(&self.pool) + .await + .ok()??; + + let block_hash = result.block_hash.parse::().ok()?; + let transaction_index = result.transaction_index.try_into().ok()?; + Some((block_hash, transaction_index)) + } +} + +#[async_trait] +impl ReceiptProvider for DBReceiptProvider { + async fn insert(&self, block_hash: &H256, receipts: &[(TransactionSigned, ReceiptInfo)]) { + if self.read_only { + return + } + + let block_hash_str = hex::encode(block_hash); + for (_, receipt) in receipts { + let transaction_hash = hex::encode(receipt.transaction_hash); + let transaction_index = receipt.transaction_index.as_u32() as i32; + + let result = query!( + r#" + INSERT INTO transaction_hashes (transaction_hash, block_hash, transaction_index) + VALUES ($1, $2, $3) + + ON CONFLICT(transaction_hash) DO UPDATE SET + block_hash = EXCLUDED.block_hash, + transaction_index = EXCLUDED.transaction_index + "#, + transaction_hash, + block_hash_str, + transaction_index + ) + .execute(&self.pool) + .await; + + if let Err(err) = result { + log::error!( + "Error inserting transaction for block hash {block_hash:?}: {:?}", + err + ); + } + } + } + + async fn remove(&self, _block_hash: &H256) {} + + async fn receipts_count_per_block(&self, block_hash: &H256) -> Option { + let block_hash = hex::encode(block_hash); + let row = query!( + r#" + SELECT COUNT(*) as count + FROM transaction_hashes + WHERE block_hash = $1 + "#, + block_hash + ) + .fetch_one(&self.pool) + .await + .ok()?; + + let count = row.count as usize; + Some(count) + } + + async fn receipt_by_block_hash_and_index( + &self, + block_hash: &H256, + transaction_index: &U256, + ) -> Option { + let block = self.block_provider.block_by_hash(block_hash).await.ok()??; + let transaction_index: usize = transaction_index.as_usize(); // TODO: check for overflow + let (_, receipt) = + extract_receipts_from_transaction(&block, transaction_index).await.ok()?; + Some(receipt) + } + + async fn receipt_by_hash(&self, transaction_hash: &H256) -> Option { + let (block_hash, transaction_index) = self.fetch_row(transaction_hash).await?; + + let block = self.block_provider.block_by_hash(&block_hash).await.ok()??; + let (_, receipt) = + extract_receipts_from_transaction(&block, transaction_index).await.ok()?; + Some(receipt) + } + + async fn signed_tx_by_hash(&self, transaction_hash: &H256) -> Option { + let transaction_hash = hex::encode(transaction_hash); + let result = query!( + r#" + SELECT block_hash, transaction_index + FROM transaction_hashes + WHERE transaction_hash = $1 + "#, + transaction_hash + ) + .fetch_optional(&self.pool) + .await + .ok()??; + + let block_hash = result.block_hash.parse::().ok()?; + let transaction_index = result.transaction_index.try_into().ok()?; + + let block = self.block_provider.block_by_hash(&block_hash).await.ok()??; + let (signed_tx, _) = + extract_receipts_from_transaction(&block, transaction_index).await.ok()?; + Some(signed_tx) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test::MockBlockInfoProvider; + use pallet_revive::evm::{ReceiptInfo, TransactionSigned}; + use sp_core::H256; + use sqlx::SqlitePool; + + async fn setup_sqlite_provider(pool: SqlitePool) -> DBReceiptProvider { + DBReceiptProvider { + pool, + block_provider: Arc::new(MockBlockInfoProvider {}), + read_only: false, + } + } + + #[sqlx::test] + async fn test_insert(pool: SqlitePool) { + let provider = setup_sqlite_provider(pool).await; + let block_hash = H256::default(); + let receipts = vec![(TransactionSigned::default(), ReceiptInfo::default())]; + + provider.insert(&block_hash, &receipts).await; + let row = provider.fetch_row(&receipts[0].1.transaction_hash).await; + assert_eq!(row, Some((block_hash, 0))); + } + + #[sqlx::test] + async fn test_receipts_count_per_block(pool: SqlitePool) { + let provider = setup_sqlite_provider(pool).await; + let block_hash = H256::default(); + let receipts = vec![ + ( + TransactionSigned::default(), + ReceiptInfo { transaction_hash: H256::from([0u8; 32]), ..Default::default() }, + ), + ( + TransactionSigned::default(), + ReceiptInfo { transaction_hash: H256::from([1u8; 32]), ..Default::default() }, + ), + ]; + + provider.insert(&block_hash, &receipts).await; + let count = provider.receipts_count_per_block(&block_hash).await; + assert_eq!(count, Some(2)); + } +} diff --git a/substrate/frame/revive/rpc/src/rpc_health.rs b/substrate/frame/revive/rpc/src/rpc_health.rs index f94d4b82a80fb..35c5a588f284d 100644 --- a/substrate/frame/revive/rpc/src/rpc_health.rs +++ b/substrate/frame/revive/rpc/src/rpc_health.rs @@ -25,6 +25,10 @@ pub trait SystemHealthRpc { /// Proxy the substrate chain system_health RPC call. #[method(name = "system_health")] async fn system_health(&self) -> RpcResult; + + ///Returns the number of peers currently connected to the client. + #[method(name = "net_peerCount")] + async fn net_peer_count(&self) -> RpcResult; } pub struct SystemHealthRpcServerImpl { @@ -47,4 +51,9 @@ impl SystemHealthRpcServer for SystemHealthRpcServerImpl { should_have_peers: health.should_have_peers, }) } + + async fn net_peer_count(&self) -> RpcResult { + let health = self.client.system_health().await?; + Ok((health.peers as u64).into()) + } } diff --git a/substrate/frame/revive/rpc/src/rpc_methods_gen.rs b/substrate/frame/revive/rpc/src/rpc_methods_gen.rs index ad34dbfdfb491..da60360d9e61b 100644 --- a/substrate/frame/revive/rpc/src/rpc_methods_gen.rs +++ b/substrate/frame/revive/rpc/src/rpc_methods_gen.rs @@ -142,6 +142,10 @@ pub trait EthRpc { transaction_hash: H256, ) -> RpcResult>; + /// Returns the current maxPriorityFeePerGas per gas in wei. + #[method(name = "eth_maxPriorityFeePerGas")] + async fn max_priority_fee_per_gas(&self) -> RpcResult; + /// Submits a raw transaction. For EIP-4844 transactions, the raw form must be the network form. /// This means it includes the blobs, KZG commitments, and KZG proofs. #[method(name = "eth_sendRawTransaction")] diff --git a/substrate/frame/revive/src/evm/api/rpc_types.rs b/substrate/frame/revive/src/evm/api/rpc_types.rs index ed046cb4da445..b4b2c6ffcf17e 100644 --- a/substrate/frame/revive/src/evm/api/rpc_types.rs +++ b/substrate/frame/revive/src/evm/api/rpc_types.rs @@ -192,7 +192,11 @@ impl GenericTransaction { value: Some(tx.value), to: Some(tx.to), gas: Some(tx.gas), - gas_price: Some(tx.max_fee_per_blob_gas), + gas_price: Some( + U256::from(crate::GAS_PRICE) + .saturating_add(tx.max_priority_fee_per_gas) + .max(tx.max_fee_per_blob_gas), + ), access_list: Some(tx.access_list), blob_versioned_hashes: tx.blob_versioned_hashes, max_fee_per_blob_gas: Some(tx.max_fee_per_blob_gas), @@ -209,7 +213,11 @@ impl GenericTransaction { value: Some(tx.value), to: tx.to, gas: Some(tx.gas), - gas_price: Some(tx.gas_price), + gas_price: Some( + U256::from(crate::GAS_PRICE) + .saturating_add(tx.max_priority_fee_per_gas) + .max(tx.max_fee_per_gas), + ), access_list: Some(tx.access_list), max_fee_per_gas: Some(tx.max_fee_per_gas), max_priority_fee_per_gas: Some(tx.max_priority_fee_per_gas), diff --git a/substrate/frame/revive/src/evm/api/rpc_types_gen.rs b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs index 1d65fdefdde68..5d31613ca314b 100644 --- a/substrate/frame/revive/src/evm/api/rpc_types_gen.rs +++ b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs @@ -87,7 +87,7 @@ pub struct Block { /// Total difficulty #[serde(rename = "totalDifficulty", skip_serializing_if = "Option::is_none")] pub total_difficulty: Option, - pub transactions: H256OrTransactionInfo, + pub transactions: HashesOrTransactionInfos, /// Transactions root #[serde(rename = "transactionsRoot")] pub transactions_root: H256, @@ -357,15 +357,15 @@ pub enum BlockTag { Debug, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, From, TryInto, Eq, PartialEq, )] #[serde(untagged)] -pub enum H256OrTransactionInfo { +pub enum HashesOrTransactionInfos { /// Transaction hashes - H256s(Vec), + Hashes(Vec), /// Full transactions TransactionInfos(Vec), } -impl Default for H256OrTransactionInfo { +impl Default for HashesOrTransactionInfos { fn default() -> Self { - H256OrTransactionInfo::H256s(Default::default()) + HashesOrTransactionInfos::Hashes(Default::default()) } } diff --git a/substrate/frame/revive/src/wasm/mod.rs b/substrate/frame/revive/src/wasm/mod.rs index b24de61314f98..3bd4bde5679f2 100644 --- a/substrate/frame/revive/src/wasm/mod.rs +++ b/substrate/frame/revive/src/wasm/mod.rs @@ -193,8 +193,9 @@ where &HoldReason::CodeUploadDepositReserve.into(), &self.code_info.owner, deposit, - ) .map_err(|err| { log::debug!(target: LOG_TARGET, "failed to store code for owner: {:?}: {err:?}", self.code_info.owner); - >::StorageDepositNotEnoughFunds + ) .map_err(|err| { + log::debug!(target: LOG_TARGET, "failed to hold store code deposit {deposit:?} for owner: {:?}: {err:?}", self.code_info.owner); + >::StorageDepositNotEnoughFunds })?; } From 6878ba1f399b628cf456ad3abfe72f2553422e1f Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Tue, 14 Jan 2025 16:52:49 +0200 Subject: [PATCH 8/8] Retry approval on availability failure if the check is still needed (#6807) Recovering the POV can fail in situation where the node just restart and the DHT topology wasn't fully discovered yet, so the current node can't connect to most of its Peers. This is bad because for gossiping the assignment you need to be connected to just a few peers, so because we can't approve the candidate and other nodes will see this as a no show. This becomes bad in the scenario where you've got a lot of nodes restarting at the same time, so you end up having a lot of no-shows in the network that are never covered, in that case it makes sense for nodes to actually retry approving the candidate at a later data in time and retry several times if the block containing the candidate wasn't approved. ## TODO - [x] Add a subsystem test. --------- Signed-off-by: Alexandru Gheorghe --- polkadot/node/core/approval-voting/src/lib.rs | 137 +++++++++++++++- .../node/core/approval-voting/src/tests.rs | 146 ++++++++++++++++++ .../subsystem-bench/src/lib/approval/mod.rs | 2 + prdoc/pr_6807.prdoc | 19 +++ 4 files changed, 297 insertions(+), 7 deletions(-) create mode 100644 prdoc/pr_6807.prdoc diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index 7cea22d1a6a7a..27361df373104 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -21,6 +21,7 @@ //! of others. It uses this information to determine when candidates and blocks have //! been sufficiently approved to finalize. +use futures_timer::Delay; use polkadot_node_primitives::{ approval::{ v1::{BlockApprovalMeta, DelayTranche}, @@ -122,6 +123,9 @@ const APPROVAL_CHECKING_TIMEOUT: Duration = Duration::from_secs(120); const WAIT_FOR_SIGS_TIMEOUT: Duration = Duration::from_millis(500); const APPROVAL_CACHE_SIZE: u32 = 1024; +/// The maximum number of times we retry to approve a block if is still needed. +const MAX_APPROVAL_RETRIES: u32 = 16; + const APPROVAL_DELAY: Tick = 2; pub(crate) const LOG_TARGET: &str = "parachain::approval-voting"; @@ -165,6 +169,10 @@ pub struct ApprovalVotingSubsystem { metrics: Metrics, clock: Arc, spawner: Arc, + /// The maximum time we retry to approve a block if it is still needed and PoV fetch failed. + max_approval_retries: u32, + /// The backoff before we retry the approval. + retry_backoff: Duration, } #[derive(Clone)] @@ -493,6 +501,8 @@ impl ApprovalVotingSubsystem { metrics, Arc::new(SystemClock {}), spawner, + MAX_APPROVAL_RETRIES, + APPROVAL_CHECKING_TIMEOUT / 2, ) } @@ -505,6 +515,8 @@ impl ApprovalVotingSubsystem { metrics: Metrics, clock: Arc, spawner: Arc, + max_approval_retries: u32, + retry_backoff: Duration, ) -> Self { ApprovalVotingSubsystem { keystore, @@ -515,6 +527,8 @@ impl ApprovalVotingSubsystem { metrics, clock, spawner, + max_approval_retries, + retry_backoff, } } @@ -706,18 +720,53 @@ enum ApprovalOutcome { TimedOut, } +#[derive(Clone)] +struct RetryApprovalInfo { + candidate: CandidateReceipt, + backing_group: GroupIndex, + executor_params: ExecutorParams, + core_index: Option, + session_index: SessionIndex, + attempts_remaining: u32, + backoff: Duration, +} + struct ApprovalState { validator_index: ValidatorIndex, candidate_hash: CandidateHash, approval_outcome: ApprovalOutcome, + retry_info: Option, } impl ApprovalState { fn approved(validator_index: ValidatorIndex, candidate_hash: CandidateHash) -> Self { - Self { validator_index, candidate_hash, approval_outcome: ApprovalOutcome::Approved } + Self { + validator_index, + candidate_hash, + approval_outcome: ApprovalOutcome::Approved, + retry_info: None, + } } fn failed(validator_index: ValidatorIndex, candidate_hash: CandidateHash) -> Self { - Self { validator_index, candidate_hash, approval_outcome: ApprovalOutcome::Failed } + Self { + validator_index, + candidate_hash, + approval_outcome: ApprovalOutcome::Failed, + retry_info: None, + } + } + + fn failed_with_retry( + validator_index: ValidatorIndex, + candidate_hash: CandidateHash, + retry_info: Option, + ) -> Self { + Self { + validator_index, + candidate_hash, + approval_outcome: ApprovalOutcome::Failed, + retry_info, + } } } @@ -757,6 +806,7 @@ impl CurrentlyCheckingSet { candidate_hash, validator_index, approval_outcome: ApprovalOutcome::TimedOut, + retry_info: None, }, Some(approval_state) => approval_state, } @@ -1271,18 +1321,19 @@ where validator_index, candidate_hash, approval_outcome, + retry_info, } ) = approval_state; if matches!(approval_outcome, ApprovalOutcome::Approved) { let mut approvals: Vec = relay_block_hashes - .into_iter() + .iter() .map(|block_hash| Action::IssueApproval( candidate_hash, ApprovalVoteRequest { validator_index, - block_hash, + block_hash: *block_hash, }, ) ) @@ -1290,6 +1341,43 @@ where actions.append(&mut approvals); } + if let Some(retry_info) = retry_info { + for block_hash in relay_block_hashes { + if overlayed_db.load_block_entry(&block_hash).map(|block_info| block_info.is_some()).unwrap_or(false) { + let sender = to_other_subsystems.clone(); + let spawn_handle = subsystem.spawner.clone(); + let metrics = subsystem.metrics.clone(); + let retry_info = retry_info.clone(); + let executor_params = retry_info.executor_params.clone(); + let candidate = retry_info.candidate.clone(); + + currently_checking_set + .insert_relay_block_hash( + candidate_hash, + validator_index, + block_hash, + async move { + launch_approval( + sender, + spawn_handle, + metrics, + retry_info.session_index, + candidate, + validator_index, + block_hash, + retry_info.backing_group, + executor_params, + retry_info.core_index, + retry_info, + ) + .await + }, + ) + .await?; + } + } + } + actions }, (block_hash, validator_index) = delayed_approvals_timers.select_next_some() => { @@ -1340,6 +1428,8 @@ where &mut approvals_cache, &mut subsystem.mode, actions, + subsystem.max_approval_retries, + subsystem.retry_backoff, ) .await? { @@ -1389,6 +1479,8 @@ pub async fn start_approval_worker< metrics, clock, spawner, + MAX_APPROVAL_RETRIES, + APPROVAL_CHECKING_TIMEOUT / 2, ); let backend = DbBackend::new(db.clone(), approval_voting.db_config); let spawner = approval_voting.spawner.clone(); @@ -1456,6 +1548,8 @@ async fn handle_actions< approvals_cache: &mut LruMap, mode: &mut Mode, actions: Vec, + max_approval_retries: u32, + retry_backoff: Duration, ) -> SubsystemResult { let mut conclude = false; let mut actions_iter = actions.into_iter(); @@ -1542,6 +1636,16 @@ async fn handle_actions< let sender = sender.clone(); let spawn_handle = spawn_handle.clone(); + let retry = RetryApprovalInfo { + candidate: candidate.clone(), + backing_group, + executor_params: executor_params.clone(), + core_index, + session_index: session, + attempts_remaining: max_approval_retries, + backoff: retry_backoff, + }; + currently_checking_set .insert_relay_block_hash( candidate_hash, @@ -1559,6 +1663,7 @@ async fn handle_actions< backing_group, executor_params, core_index, + retry, ) .await }, @@ -3329,6 +3434,7 @@ async fn launch_approval< backing_group: GroupIndex, executor_params: ExecutorParams, core_index: Option, + retry: RetryApprovalInfo, ) -> SubsystemResult> { let (a_tx, a_rx) = oneshot::channel(); let (code_tx, code_rx) = oneshot::channel(); @@ -3360,6 +3466,7 @@ async fn launch_approval< let candidate_hash = candidate.hash(); let para_id = candidate.descriptor.para_id(); + let mut next_retry = None; gum::trace!(target: LOG_TARGET, ?candidate_hash, ?para_id, "Recovering data."); let timer = metrics.time_recover_and_approve(); @@ -3388,7 +3495,6 @@ async fn launch_approval< let background = async move { // Force the move of the timer into the background task. let _timer = timer; - let available_data = match a_rx.await { Err(_) => return ApprovalState::failed(validator_index, candidate_hash), Ok(Ok(a)) => a, @@ -3399,10 +3505,27 @@ async fn launch_approval< target: LOG_TARGET, ?para_id, ?candidate_hash, + attempts_remaining = retry.attempts_remaining, "Data unavailable for candidate {:?}", (candidate_hash, candidate.descriptor.para_id()), ); - // do nothing. we'll just be a no-show and that'll cause others to rise up. + // Availability could fail if we did not discover much of the network, so + // let's back off and order the subsystem to retry at a later point if the + // approval is still needed, because no-show wasn't covered yet. + if retry.attempts_remaining > 0 { + Delay::new(retry.backoff).await; + next_retry = Some(RetryApprovalInfo { + candidate, + backing_group, + executor_params, + core_index, + session_index, + attempts_remaining: retry.attempts_remaining - 1, + backoff: retry.backoff, + }); + } else { + next_retry = None; + } metrics_guard.take().on_approval_unavailable(); }, &RecoveryError::ChannelClosed => { @@ -3433,7 +3556,7 @@ async fn launch_approval< metrics_guard.take().on_approval_invalid(); }, } - return ApprovalState::failed(validator_index, candidate_hash) + return ApprovalState::failed_with_retry(validator_index, candidate_hash, next_retry) }, }; diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index be569a1de3ecb..b72993fe1a94a 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -78,6 +78,9 @@ const SLOT_DURATION_MILLIS: u64 = 5000; const TIMEOUT: Duration = Duration::from_millis(2000); +const NUM_APPROVAL_RETRIES: u32 = 3; +const RETRY_BACKOFF: Duration = Duration::from_millis(300); + #[derive(Clone)] struct TestSyncOracle { flag: Arc, @@ -573,6 +576,8 @@ fn test_harness>( Metrics::default(), clock.clone(), Arc::new(SpawnGlue(pool)), + NUM_APPROVAL_RETRIES, + RETRY_BACKOFF, ), assignment_criteria, backend, @@ -3202,6 +3207,20 @@ async fn recover_available_data(virtual_overseer: &mut VirtualOverseer) { ); } +async fn recover_available_data_failure(virtual_overseer: &mut VirtualOverseer) { + let available_data = RecoveryError::Unavailable; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityRecovery( + AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, _, tx) + ) => { + tx.send(Err(available_data)).unwrap(); + }, + "overseer did not receive recover available data message", + ); +} + struct TriggersAssignmentConfig { our_assigned_tranche: DelayTranche, assign_validator_tranche: F1, @@ -4791,6 +4810,133 @@ fn subsystem_relaunches_approval_work_on_restart() { }); } +/// Test that we retry the approval of candidate on availability failure, up to max retries. +#[test] +fn subsystem_relaunches_approval_work_on_availability_failure() { + let assignment_criteria = Box::new(MockAssignmentCriteria( + || { + let mut assignments = HashMap::new(); + + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFModuloCompact { + core_bitfield: vec![CoreIndex(0), CoreIndex(2)].try_into().unwrap(), + }), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + + let _ = assignments.insert( + CoreIndex(1), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFDelay { + core_index: CoreIndex(1), + }), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + assignments + }, + |_| Ok(0), + )); + let config = HarnessConfigBuilder::default().assignment_criteria(assignment_criteria).build(); + let store = config.backend(); + + test_harness(config, |test_harness| async move { + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle } = test_harness; + + setup_overseer_with_blocks_with_two_assignments_triggered( + &mut virtual_overseer, + store, + &clock, + sync_oracle_handle, + ) + .await; + + // We have two candidates for one we are going to fail the availability for up to + // max_retries and for the other we are going to succeed on the last retry, so we should + // see the approval being distributed. + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + recover_available_data_failure(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _ + )) => { + } + ); + + recover_available_data_failure(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + recover_available_data_failure(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + recover_available_data_failure(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + recover_available_data_failure(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + recover_available_data_failure(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + recover_available_data_failure(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive { + exec_kind, + response_sender, + .. + }) if exec_kind == PvfExecKind::Approval => { + response_sender.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))) + .unwrap(); + } + ); + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 1, + })); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_)) + ); + + // Assert that there are no more messages being sent by the subsystem + assert!(overseer_recv(&mut virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); + + virtual_overseer + }); +} + // Test that cached approvals, which are candidates that we approved but we didn't issue // the signature yet because we want to coalesce it with more candidate are sent after restart. #[test] diff --git a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs index 1b20960a3f8a6..5f1689cb226b3 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs @@ -891,6 +891,8 @@ fn build_overseer( state.approval_voting_parallel_metrics.approval_voting_metrics(), Arc::new(system_clock.clone()), Arc::new(SpawnGlue(spawn_task_handle.clone())), + 1, + Duration::from_secs(1), ); let approval_distribution = ApprovalDistribution::new_with_clock( diff --git a/prdoc/pr_6807.prdoc b/prdoc/pr_6807.prdoc new file mode 100644 index 0000000000000..b9564dfb2fe26 --- /dev/null +++ b/prdoc/pr_6807.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Retry approval on availability failure if the check is still needed + +doc: + - audience: Node Dev + description: | + Recovering the POV can fail in situation where the node just restart and the DHT topology + wasn't fully discovered yet, so the current node can't connect to most of its Peers. + This is bad because for gossiping the assignment you need to be connected to just a few + peers, so because we can't approve the candidate other nodes will see this as a no show. + Fix it by retrying to approve a candidate for a fixed number of atttempts if the block is + still needed. + + +crates: + - name: polkadot-node-core-approval-voting + bump: minor