From e5138efc118c124f6551fa58a4d5293b83ba25dd Mon Sep 17 00:00:00 2001 From: Fedor Sakharov Date: Tue, 3 Dec 2019 17:49:07 +0300 Subject: [PATCH] Erasure encoding availability (#345) * Erasure encoding availability initial commit * Modifications to availability store to keep chunks as well as reconstructed blocks and extrinsics. * Gossip messages containig signed erasure chunks. * Requesting eraure chunks with polkadot-specific messages. * Validation of erasure chunk messages. * Apply suggestions from code review Co-Authored-By: Luke Schoen * Fix build after a merge * Gossip erasure chunk messages under their own topic * erasure_chunks should use the appropriate topic * Updates Cargo.lock * Fixes after merge * Removes a couple of leftover pieces of code * Fixes simple stuff from review * Updates erasure and storage for more flexible logic * Changes validation and candidate receipt production. * Adds add_erasure_chunks method * Fixes most of the nits * Better validate_collation and validate_receipt functions * Fixes the tests * Apply suggestions from code review Co-Authored-By: Robert Habermeier * Removes unwrap() calls * Removes ErasureChunks primitive * Removes redundant fields from ErasureChunk struct * AvailabilityStore should store CandidateReceipt * Changes the way chunk messages are imported and validated. * Availability store now stores a validator_index and n_validators for each relay_parent. * Availability store now also stores candidate receipts. * Removes importing chunks in the table and moves it into network gossip validation. * Validation of erasure messages id done against receipts that are stored in the availability store. * Correctly compute topics for erasure messages * Removes an unused parameter * Refactors availability db querying into a helper * Adds the apis described in the writeup * Adds a runtime api to extract erasure roots form raw extrinsics. * Adds a barebone BlockImport impl for avalability store * Adds the implementation of the availability worker * Fix build after the merge with master. * Make availability store API async * Bring back the default wasmtime feature * Lines width * Bump runtime version * Formatting and dead code elimination * some style nits (#1) * More nits and api cleanup * Disable wasm CI for availability-store * Another nit * Formatting --- .gitlab-ci.yml | 1 - Cargo.lock | 33 + availability-store/Cargo.toml | 15 + availability-store/src/lib.rs | 484 ++++++++----- availability-store/src/store.rs | 689 ++++++++++++++++++ availability-store/src/worker.rs | 998 ++++++++++++++++++++++++++ cli/Cargo.toml | 3 +- collator/src/lib.rs | 15 +- erasure-coding/Cargo.toml | 1 + erasure-coding/src/lib.rs | 46 +- network/Cargo.toml | 1 + network/src/collator_pool.rs | 12 +- network/src/gossip.rs | 94 ++- network/src/lib.rs | 135 ++-- network/src/router.rs | 44 +- network/src/tests/mod.rs | 53 +- network/src/tests/validation.rs | 37 +- network/src/validation.rs | 4 +- primitives/Cargo.toml | 2 + primitives/src/parachain.rs | 112 ++- runtime/src/lib.rs | 15 +- runtime/src/parachains.rs | 10 + runtime/src/registrar.rs | 1 + service/src/lib.rs | 21 +- validation/Cargo.toml | 1 + validation/src/attestation_service.rs | 73 +- validation/src/collation.rs | 271 +++++-- validation/src/lib.rs | 130 ++-- validation/src/shared_table/mod.rs | 230 ++++-- 29 files changed, 2958 insertions(+), 573 deletions(-) create mode 100644 availability-store/src/store.rs create mode 100644 availability-store/src/worker.rs diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index eac6bbd19527..444fd1edb0d0 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -144,7 +144,6 @@ check-web-wasm: &test script: # WASM support is in progress. As more and more crates support WASM, we # should add entries here. See https://github.com/paritytech/polkadot/issues/625 - - time cargo build --locked --target=wasm32-unknown-unknown --manifest-path availability-store/Cargo.toml - time cargo build --locked --target=wasm32-unknown-unknown --manifest-path executor/Cargo.toml - time cargo build --locked --target=wasm32-unknown-unknown --manifest-path erasure-coding/Cargo.toml - time cargo build --locked --target=wasm32-unknown-unknown --manifest-path parachain/Cargo.toml diff --git a/Cargo.lock b/Cargo.lock index 3b94aaa5a2c7..0a4e053f493a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -777,6 +777,19 @@ dependencies = [ "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "derive_more" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "derive_more" version = "0.99.2" @@ -3455,14 +3468,29 @@ dependencies = [ name = "polkadot-availability-store" version = "0.7.5" dependencies = [ + "derive_more 0.99.2 (registry+https://github.com/rust-lang/crates.io-index)", + "exit-future 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures01 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "kvdb 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "kvdb-memorydb 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "kvdb-rocksdb 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "polkadot-erasure-coding 0.7.5", "polkadot-primitives 0.7.5", + "polkadot-runtime 0.7.5", + "sc-client 2.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-master)", + "sc-client-api 2.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-master)", + "sc-keystore 2.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-master)", + "sc-network 2.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-master)", + "sp-api 2.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-master)", + "sp-blockchain 2.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-master)", + "sp-consensus 2.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-master)", "sp-core 2.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-master)", + "sp-runtime 2.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-master)", + "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -3505,6 +3533,7 @@ dependencies = [ name = "polkadot-erasure-coding" version = "0.7.5" dependencies = [ + "derive_more 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)", "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "polkadot-primitives 0.7.5", "reed-solomon-erasure 4.0.0 (git+https://github.com/paritytech/reed-solomon-erasure)", @@ -3532,6 +3561,7 @@ dependencies = [ "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "polkadot-availability-store 0.7.5", + "polkadot-erasure-coding 0.7.5", "polkadot-primitives 0.7.5", "polkadot-validation 0.7.5", "sc-client 2.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-master)", @@ -3578,6 +3608,7 @@ dependencies = [ "sp-runtime 2.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-master)", "sp-serializer 2.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-master)", "sp-std 2.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-master)", + "sp-trie 2.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-master)", "sp-version 2.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-master)", ] @@ -3731,6 +3762,7 @@ dependencies = [ "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "polkadot-availability-store 0.7.5", + "polkadot-erasure-coding 0.7.5", "polkadot-parachain 0.7.5", "polkadot-primitives 0.7.5", "polkadot-runtime 0.7.5", @@ -6923,6 +6955,7 @@ dependencies = [ "checksum curve25519-dalek 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8b7dcd30ba50cdf88b55b033456138b7c0ac4afdc436d82e1b79f370f24cc66d" "checksum data-encoding 2.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f4f47ca1860a761136924ddd2422ba77b2ea54fe8cc75b9040804a0d9d32ad97" "checksum derive_more 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6d944ac6003ed268757ef1ee686753b57efc5fcf0ebe7b64c9fc81e7e32ff839" +"checksum derive_more 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a141330240c921ec6d074a3e188a7c7ef95668bb95e7d44fa0e5778ec2a7afe" "checksum derive_more 0.99.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2159be042979966de68315bce7034bb000c775f22e3e834e1c52ff78f041cae8" "checksum difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198" "checksum digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" diff --git a/availability-store/Cargo.toml b/availability-store/Cargo.toml index f1e50be1a777..6217d28f6f0f 100644 --- a/availability-store/Cargo.toml +++ b/availability-store/Cargo.toml @@ -7,9 +7,24 @@ edition = "2018" [dependencies] polkadot-primitives = { path = "../primitives" } +polkadot-erasure-coding = { path = "../erasure-coding" } +polkadot-runtime = { path = "../runtime" } parking_lot = "0.9.0" +derive_more = "0.99" log = "0.4.8" +futures01 = "0.1.17" +futures = { package = "futures", version = "0.3.1", features = ["compat"] } +tokio = "0.1.7" +exit-future = "0.1" codec = { package = "parity-scale-codec", version = "1.1.0", default-features = false, features = ["derive"] } +sc-network = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" } +consensus_common = { package = "sp-consensus", git = "https://github.com/paritytech/substrate", branch = "polkadot-master" } +client = { package = "sc-client-api", git = "https://github.com/paritytech/substrate", branch = "polkadot-master" } +sc-client = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-master" } +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" } +keystore = { package = "sc-keystore", git = "https://github.com/paritytech/substrate", branch = "polkadot-master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" } kvdb = "0.1.1" kvdb-memorydb = "0.1.2" diff --git a/availability-store/src/lib.rs b/availability-store/src/lib.rs index 9053bc67dc9c..7ccd87001333 100644 --- a/availability-store/src/lib.rs +++ b/availability-store/src/lib.rs @@ -14,28 +14,56 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Persistent database for parachain data: PoV block data and outgoing messages. +//! Persistent database for parachain data: PoV block data, erasure-coding chunks and outgoing messages. //! //! This will be written into during the block validation pipeline, and queried //! by networking code in order to circulate required data and maintain availability //! of it. -use codec::{Encode, Decode}; -use kvdb::{KeyValueDB, DBTransaction}; -use polkadot_primitives::Hash; -use polkadot_primitives::parachain::{Id as ParaId, BlockData, Message}; +#![warn(missing_docs)] + +use futures::prelude::*; +use futures::channel::{mpsc, oneshot}; +use keystore::KeyStorePtr; +use polkadot_primitives::{ + Hash, Block, + parachain::{ + Id as ParaId, BlockData, CandidateReceipt, Message, AvailableMessages, ErasureChunk, + ParachainHost, + }, +}; +use sp_runtime::traits::{BlakeTwo256, Hash as HashT, ProvideRuntimeApi}; +use sp_blockchain::{Result as ClientResult}; +use client::{ + BlockchainEvents, BlockBody, +}; +use sp_api::ApiExt; + use log::warn; +use std::sync::Arc; use std::collections::HashSet; use std::path::PathBuf; -use std::sync::Arc; use std::io; -mod columns { - pub const DATA: Option = Some(0); - pub const META: Option = Some(1); - pub const NUM_COLUMNS: u32 = 2; -} +mod worker; +mod store; + +pub use worker::AvailabilityBlockImport; + +use worker::{ + Worker, WorkerHandle, Chunks, ParachainBlocks, WorkerMsg, MakeAvailable, +}; + +use store::{Store as InnerStore}; + +/// Abstraction over an executor that lets you spawn tasks in the background. +pub(crate) type TaskExecutor = + Arc + Send> + > + Send + Sync>; + +const LOG_TARGET: &str = "availability"; /// Configuration for the availability store. pub struct Config { @@ -45,67 +73,153 @@ pub struct Config { pub path: PathBuf, } +/// Compute gossip topic for the erasure chunk messages given the relay parent, +/// root and the chunk index. +/// +/// Since at this point we are not able to use [`network`] directly, but both +/// of them need to compute these topics, this lives here and not there. +/// +/// [`network`]: ../polkadot_network/index.html +pub fn erasure_coding_topic(relay_parent: Hash, erasure_root: Hash, index: u32) -> Hash { + let mut v = relay_parent.as_ref().to_vec(); + v.extend(erasure_root.as_ref()); + v.extend(&index.to_le_bytes()[..]); + v.extend(b"erasure_chunks"); + + BlakeTwo256::hash(&v[..]) +} + +/// A trait that provides a shim for the [`NetworkService`] trait. +/// +/// Currently it is not possible to use the networking code in the availability store +/// core directly due to a number of loop dependencies it require: +/// +/// `availability-store` -> `network` -> `availability-store` +/// +/// `availability-store` -> `network` -> `validation` -> `availability-store` +/// +/// So we provide this shim trait that gets implemented for a wrapper newtype in +/// the [`network`] module. +/// +/// [`NetworkService`]: ../polkadot_network/trait.NetworkService.html +/// [`network`]: ../polkadot_network/index.html +pub trait ProvideGossipMessages { + /// Get a stream of gossip erasure chunk messages for a given topic. + /// + /// Each item is a tuple (relay_parent, candidate_hash, erasure_chunk) + fn gossip_messages_for( + &self, + topic: Hash, + ) -> Box + Send + Unpin>; + + /// Gossip an erasure chunk message. + fn gossip_erasure_chunk( + &self, + relay_parent: Hash, + candidate_hash: Hash, + erasure_root: Hash, + chunk: ErasureChunk, + ); +} + /// Some data to keep available about a parachain block candidate. +#[derive(Debug)] pub struct Data { /// The relay chain parent hash this should be localized to. pub relay_parent: Hash, /// The parachain index for this candidate. pub parachain_id: ParaId, - /// Unique candidate receipt hash. - pub candidate_hash: Hash, /// Block data. pub block_data: BlockData, /// Outgoing message queues from execution of the block, if any. /// /// The tuple pairs the message queue root and the queue data. - pub outgoing_queues: Option)>>, -} - -fn block_data_key(relay_parent: &Hash, candidate_hash: &Hash) -> Vec { - (relay_parent, candidate_hash, 0i8).encode() + pub outgoing_queues: Option, } /// Handle to the availability store. +/// +/// This provides a proxying API that +/// * in case of write operations provides async methods that send data to +/// the background worker and resolve when that data is processed by the worker +/// * in case of read opeartions queries the underlying storage synchronously. #[derive(Clone)] pub struct Store { - inner: Arc, + inner: InnerStore, + worker: Arc, + to_worker: mpsc::UnboundedSender, } impl Store { - /// Create a new `Store` with given config on disk. - #[cfg(not(target_os = "unknown"))] - pub fn new(config: Config) -> io::Result { - use kvdb_rocksdb::{Database, DatabaseConfig}; - let mut db_config = DatabaseConfig::with_columns(Some(columns::NUM_COLUMNS)); - - if let Some(cache_size) = config.cache_size { - let mut memory_budget = std::collections::HashMap::new(); - for i in 0..columns::NUM_COLUMNS { - memory_budget.insert(Some(i), cache_size / columns::NUM_COLUMNS as usize); - } - - db_config.memory_budget = memory_budget; - } - - let path = config.path.to_str().ok_or_else(|| io::Error::new( - io::ErrorKind::Other, - format!("Bad database path: {:?}", config.path), - ))?; - - let db = Database::open(&db_config, &path)?; - - Ok(Store { - inner: Arc::new(db), + /// Create a new `Store` with given condig on disk. + /// + /// Creating a store among other things starts a background worker thread which + /// handles most of the write operations to the storage. + pub fn new(config: Config, gossip: PGM) -> io::Result + where PGM: ProvideGossipMessages + Send + Sync + Clone + 'static + { + let inner = InnerStore::new(config)?; + let worker = Arc::new(Worker::start(inner.clone(), gossip)); + let to_worker = worker.to_worker().clone(); + + Ok(Self { + inner, + worker, + to_worker, }) } /// Create a new `Store` in-memory. Useful for tests. - pub fn new_in_memory() -> Self { - Store { - inner: Arc::new(::kvdb_memorydb::create(columns::NUM_COLUMNS)), + /// + /// Creating a store among other things starts a background worker thread + /// which handles most of the write operations to the storage. + pub fn new_in_memory(gossip: PGM) -> Self + where PGM: ProvideGossipMessages + Send + Sync + Clone + 'static + { + let inner = InnerStore::new_in_memory(); + let worker = Arc::new(Worker::start(inner.clone(), gossip)); + let to_worker = worker.to_worker().clone(); + + Self { + inner, + worker, + to_worker, } } + /// Obtain a [`BlockImport`] implementation to import blocks into this store. + /// + /// This block import will act upon all newly imported blocks sending information + /// about parachain heads included in them to this `Store`'s background worker. + /// The user may create multiple instances of [`BlockImport`]s with this call. + /// + /// [`BlockImport`]: https://substrate.dev/rustdocs/v1.0/substrate_consensus_common/trait.BlockImport.html + pub fn block_import( + &self, + wrapped_block_import: I, + client: Arc

, + thread_pool: TaskExecutor, + keystore: KeyStorePtr, + ) -> ClientResult<(AvailabilityBlockImport)> + where + P: ProvideRuntimeApi + BlockchainEvents + BlockBody + Send + Sync + 'static, + P::Api: ParachainHost, + P::Api: ApiExt, + { + let to_worker = self.to_worker.clone(); + + let import = AvailabilityBlockImport::new( + self.inner.clone(), + client, + wrapped_block_import, + thread_pool, + keystore, + to_worker, + ); + + Ok(import) + } + /// Make some data available provisionally. /// /// Validators with the responsibility of maintaining availability @@ -117,174 +231,164 @@ impl Store { /// to be present with the exception of the case where there is no message data /// due to the block's invalidity. Determination of invalidity is beyond the /// scope of this function. - pub fn make_available(&self, data: Data) -> io::Result<()> { - let mut tx = DBTransaction::new(); - - // note the meta key. - let mut v = match self.inner.get(columns::META, data.relay_parent.as_ref()) { - Ok(Some(raw)) => Vec::decode(&mut &raw[..]).expect("all stored data serialized correctly; qed"), - Ok(None) => Vec::new(), - Err(e) => { - warn!(target: "availability", "Error reading from availability store: {:?}", e); - Vec::new() - } - }; - - v.push(data.candidate_hash); - tx.put_vec(columns::META, &data.relay_parent[..], v.encode()); - - tx.put_vec( - columns::DATA, - block_data_key(&data.relay_parent, &data.candidate_hash).as_slice(), - data.block_data.encode() - ); - - if let Some(outgoing_queues) = data.outgoing_queues { - // This is kept forever and not pruned. - for (root, messages) in outgoing_queues { - tx.put_vec( - columns::DATA, - root.as_ref(), - messages.encode(), - ); - } - + /// + /// This method will send the `Data` to the background worker, allowing caller to + /// asynchrounously wait for the result. + pub async fn make_available(&self, data: Data) -> io::Result<()> { + let (s, r) = oneshot::channel(); + let msg = WorkerMsg::MakeAvailable(MakeAvailable { + data, + result: s, + }); + + let _ = self.to_worker.unbounded_send(msg); + + if let Ok(Ok(())) = r.await { + Ok(()) + } else { + Err(io::Error::new(io::ErrorKind::Other, format!("adding erasure chunks failed"))) } - self.inner.write(tx) } - /// Note that a set of candidates have been included in a finalized block with given hash and parent hash. - pub fn candidates_finalized(&self, parent: Hash, finalized_candidates: HashSet) -> io::Result<()> { - let mut tx = DBTransaction::new(); - - let v = match self.inner.get(columns::META, &parent[..]) { - Ok(Some(raw)) => Vec::decode(&mut &raw[..]).expect("all stored data serialized correctly; qed"), - Ok(None) => Vec::new(), - Err(e) => { - warn!(target: "availability", "Error reading from availability store: {:?}", e); - Vec::new() - } - }; - tx.delete(columns::META, &parent[..]); - - for candidate_hash in v { - if !finalized_candidates.contains(&candidate_hash) { - tx.delete(columns::DATA, block_data_key(&parent, &candidate_hash).as_slice()); - } - } + /// Get a set of all chunks we are waiting for grouped by + /// `(relay_parent, erasure_root, candidate_hash, our_id)`. + pub fn awaited_chunks(&self) -> Option> { + self.inner.awaited_chunks() + } - self.inner.write(tx) + /// Qery which candidates were included in the relay chain block by block's parent. + pub fn get_candidates_in_relay_block(&self, relay_block: &Hash) -> Option> { + self.inner.get_candidates_in_relay_block(relay_block) } - /// Query block data. - pub fn block_data(&self, relay_parent: Hash, candidate_hash: Hash) -> Option { - let encoded_key = block_data_key(&relay_parent, &candidate_hash); - match self.inner.get(columns::DATA, &encoded_key[..]) { - Ok(Some(raw)) => Some( - BlockData::decode(&mut &raw[..]).expect("all stored data serialized correctly; qed") - ), - Ok(None) => None, - Err(e) => { - warn!(target: "availability", "Error reading from availability store: {:?}", e); - None - } - } + /// Make a validator's index and a number of validators at a relay parent available. + /// + /// This information is needed before the `add_candidates_in_relay_block` is called + /// since that call forms the awaited frontier of chunks. + /// In the current implementation this function is called in the `get_or_instantiate` at + /// the start of the parachain agreement process on top of some parent hash. + pub fn add_validator_index_and_n_validators( + &self, + relay_parent: &Hash, + validator_index: u32, + n_validators: u32, + ) -> io::Result<()> { + self.inner.add_validator_index_and_n_validators( + relay_parent, + validator_index, + n_validators, + ) } - /// Query message queue data by message queue root hash. - pub fn queue_by_root(&self, queue_root: &Hash) -> Option> { - match self.inner.get(columns::DATA, queue_root.as_ref()) { - Ok(Some(raw)) => Some( - <_>::decode(&mut &raw[..]).expect("all stored data serialized correctly; qed") - ), - Ok(None) => None, - Err(e) => { - warn!(target: "availability", "Error reading from availability store: {:?}", e); - None - } - } + /// Query a validator's index and n_validators by relay parent. + pub fn get_validator_index_and_n_validators(&self, relay_parent: &Hash) -> Option<(u32, u32)> { + self.inner.get_validator_index_and_n_validators(relay_parent) } -} -#[cfg(test)] -mod tests { - use super::*; + /// Adds an erasure chunk to storage. + /// + /// The chunk should be checked for validity against the root of encoding + /// and its proof prior to calling this. + /// + /// This method will send the chunk to the background worker, allowing caller to + /// asynchrounously wait for the result. + pub async fn add_erasure_chunk( + &self, + relay_parent: Hash, + receipt: CandidateReceipt, + chunk: ErasureChunk, + ) -> io::Result<()> { + self.add_erasure_chunks(relay_parent, receipt, vec![chunk]).await + } - #[test] - fn finalization_removes_unneeded() { - let relay_parent = [1; 32].into(); + /// Adds a set of erasure chunks to storage. + /// + /// The chunks should be checked for validity against the root of encoding + /// and it's proof prior to calling this. + /// + /// This method will send the chunks to the background worker, allowing caller to + /// asynchrounously waiting for the result. + pub async fn add_erasure_chunks( + &self, + relay_parent: Hash, + receipt: CandidateReceipt, + chunks: I, + ) -> io::Result<()> + where I: IntoIterator + { + self.add_candidate(relay_parent, receipt.clone()).await?; + let (s, r) = oneshot::channel(); + let chunks = chunks.into_iter().collect(); + let candidate_hash = receipt.hash(); + let msg = WorkerMsg::Chunks(Chunks { + relay_parent, + candidate_hash, + chunks, + result: s, + }); - let para_id_1 = 5.into(); - let para_id_2 = 6.into(); + let _ = self.to_worker.unbounded_send(msg); - let candidate_1 = [2; 32].into(); - let candidate_2 = [3; 32].into(); + if let Ok(Ok(())) = r.await { + Ok(()) + } else { + Err(io::Error::new(io::ErrorKind::Other, format!("adding erasure chunks failed"))) + } + } - let block_data_1 = BlockData(vec![1, 2, 3]); - let block_data_2 = BlockData(vec![4, 5, 6]); + /// Queries an erasure chunk by its block's parent and hash and index. + pub fn get_erasure_chunk( + &self, + relay_parent: &Hash, + block_data_hash: Hash, + index: usize, + ) -> Option { + self.inner.get_erasure_chunk(relay_parent, block_data_hash, index) + } - let store = Store::new_in_memory(); - store.make_available(Data { - relay_parent, - parachain_id: para_id_1, - candidate_hash: candidate_1, - block_data: block_data_1.clone(), - outgoing_queues: None, - }).unwrap(); + /// Stores a candidate receipt. + pub async fn add_candidate( + &self, + relay_parent: Hash, + receipt: CandidateReceipt, + ) -> io::Result<()> { + let (s, r) = oneshot::channel(); - store.make_available(Data { + let msg = WorkerMsg::ParachainBlocks(ParachainBlocks { relay_parent, - parachain_id: para_id_2, - candidate_hash: candidate_2, - block_data: block_data_2.clone(), - outgoing_queues: None, - }).unwrap(); - - assert_eq!(store.block_data(relay_parent, candidate_1).unwrap(), block_data_1); - assert_eq!(store.block_data(relay_parent, candidate_2).unwrap(), block_data_2); + blocks: vec![(receipt, None)], + result: s, + }); - store.candidates_finalized(relay_parent, [candidate_1].iter().cloned().collect()).unwrap(); + let _ = self.to_worker.unbounded_send(msg); - assert_eq!(store.block_data(relay_parent, candidate_1).unwrap(), block_data_1); - assert!(store.block_data(relay_parent, candidate_2).is_none()); + if let Ok(Ok(())) = r.await { + Ok(()) + } else { + Err(io::Error::new(io::ErrorKind::Other, format!("adding erasure chunks failed"))) + } } - #[test] - fn queues_available_by_queue_root() { - let relay_parent = [1; 32].into(); - let para_id = 5.into(); - let candidate = [2; 32].into(); - let block_data = BlockData(vec![1, 2, 3]); - - let message_queue_root_1 = [0x42; 32].into(); - let message_queue_root_2 = [0x43; 32].into(); - - let message_a = Message(vec![1, 2, 3, 4]); - let message_b = Message(vec![4, 5, 6, 7]); + /// Queries a candidate receipt by it's hash. + pub fn get_candidate(&self, candidate_hash: &Hash) -> Option { + self.inner.get_candidate(candidate_hash) + } - let outgoing_queues = vec![ - (message_queue_root_1, vec![message_a.clone()]), - (message_queue_root_2, vec![message_b.clone()]), - ]; + /// Query block data. + pub fn block_data(&self, relay_parent: Hash, block_data_hash: Hash) -> Option { + self.inner.block_data(relay_parent, block_data_hash) + } - let store = Store::new_in_memory(); - store.make_available(Data { - relay_parent, - parachain_id: para_id, - candidate_hash: candidate, - block_data: block_data.clone(), - outgoing_queues: Some(outgoing_queues), - }).unwrap(); - - assert_eq!( - store.queue_by_root(&message_queue_root_1), - Some(vec![message_a]), - ); + /// Query block data by corresponding candidate receipt's hash. + pub fn block_data_by_candidate(&self, relay_parent: Hash, candidate_hash: Hash) + -> Option + { + self.inner.block_data_by_candidate(relay_parent, candidate_hash) + } - assert_eq!( - store.queue_by_root(&message_queue_root_2), - Some(vec![message_b]), - ); + /// Query message queue data by message queue root hash. + pub fn queue_by_root(&self, queue_root: &Hash) -> Option> { + self.inner.queue_by_root(queue_root) } } diff --git a/availability-store/src/store.rs b/availability-store/src/store.rs new file mode 100644 index 000000000000..5458a64b1712 --- /dev/null +++ b/availability-store/src/store.rs @@ -0,0 +1,689 @@ +// Copyright 2018 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use kvdb_rocksdb::{Database, DatabaseConfig}; +use kvdb::{KeyValueDB, DBTransaction}; +use codec::{Encode, Decode}; +use polkadot_erasure_coding::{self as erasure}; +use polkadot_primitives::{ + Hash, + parachain::{ + BlockData, CandidateReceipt, Message, ErasureChunk + }, +}; + +use log::{trace, warn}; +use std::collections::HashSet; +use std::sync::Arc; +use std::iter::FromIterator; +use std::io; + +use crate::{LOG_TARGET, Data, Config}; + +mod columns { + pub const DATA: Option = Some(0); + pub const META: Option = Some(1); + pub const NUM_COLUMNS: u32 = 2; +} + +#[derive(Clone)] +pub struct Store { + inner: Arc, +} + +fn block_data_key(relay_parent: &Hash, block_data_hash: &Hash) -> Vec { + (relay_parent, block_data_hash, 0i8).encode() +} + +fn erasure_chunks_key(relay_parent: &Hash, block_data_hash: &Hash) -> Vec { + (relay_parent, block_data_hash, 1i8).encode() +} + +fn awaited_chunks_key() -> Vec { + "awaited_chunks_key".encode() +} + +fn available_chunks_key(relay_parent: &Hash, erasure_root: &Hash) -> Vec { + (relay_parent, erasure_root, 2i8).encode() +} + +fn block_to_candidate_key(block_data_hash: &Hash) -> Vec { + (block_data_hash, 1i8).encode() +} + +fn candidate_key(candidate_hash: &Hash) -> Vec { + (candidate_hash, 2i8).encode() +} + +fn validator_index_and_n_validators_key(relay_parent: &Hash) -> Vec { + (relay_parent, 3i8).encode() +} + +fn candidates_in_relay_chain_block_key(relay_block: &Hash) -> Vec { + (relay_block, 4i8).encode() +} + +fn erasure_roots_in_relay_chain_block_key(relay_block: &Hash) -> Vec { + (relay_block, 5i8).encode() +} + +impl Store { + /// Create a new `Store` with given condig on disk. + pub(super) fn new(config: Config) -> io::Result { + let mut db_config = DatabaseConfig::with_columns(Some(columns::NUM_COLUMNS)); + + if let Some(cache_size) = config.cache_size { + let mut memory_budget = std::collections::HashMap::new(); + for i in 0..columns::NUM_COLUMNS { + memory_budget.insert(Some(i), cache_size / columns::NUM_COLUMNS as usize); + } + + db_config.memory_budget = memory_budget; + } + + let path = config.path.to_str().ok_or_else(|| io::Error::new( + io::ErrorKind::Other, + format!("Bad database path: {:?}", config.path), + ))?; + + let db = Database::open(&db_config, &path)?; + + Ok(Store { + inner: Arc::new(db), + }) + } + + /// Create a new `Store` in-memory. Useful for tests. + pub(super) fn new_in_memory() -> Self { + Store { + inner: Arc::new(::kvdb_memorydb::create(columns::NUM_COLUMNS)), + } + } + + /// Make some data available provisionally. + pub(crate) fn make_available(&self, data: Data) -> io::Result<()> { + let mut tx = DBTransaction::new(); + + // note the meta key. + let mut v = self.query_inner(columns::META, data.relay_parent.as_ref()).unwrap_or(Vec::new()); + v.push(data.block_data.hash()); + tx.put_vec(columns::META, &data.relay_parent[..], v.encode()); + + tx.put_vec( + columns::DATA, + block_data_key(&data.relay_parent, &data.block_data.hash()).as_slice(), + data.block_data.encode() + ); + + if let Some(outgoing_queues) = data.outgoing_queues { + // This is kept forever and not pruned. + for (root, messages) in outgoing_queues.0 { + tx.put_vec( + columns::DATA, + root.as_ref(), + messages.encode(), + ); + } + + } + + self.inner.write(tx) + } + + /// Get a set of all chunks we are waiting for grouped by + /// `(relay_parent, erasure_root, candidate_hash, our_id)`. + pub fn awaited_chunks(&self) -> Option> { + self.query_inner(columns::META, &awaited_chunks_key()).map(|vec: Vec<(Hash, Hash, Hash, u32)>| { + HashSet::from_iter(vec.into_iter()) + }) + } + + /// Adds a set of candidates hashes that were included in a relay block by the block's parent. + /// + /// If we already possess the receipts for these candidates _and_ our position at the specified + /// relay chain the awaited frontier of the erasure chunks will also be extended. + /// + /// This method modifies the erasure chunks awaited frontier by adding this validator's + /// chunks from `candidates` to it. In order to do so the information about this validator's + /// position at parent `relay_parent` should be known to the store prior to calling this + /// method, in other words `add_validator_index_and_n_validators` should be called for + /// the given `relay_parent` before calling this function. + pub(crate) fn add_candidates_in_relay_block( + &self, + relay_parent: &Hash, + candidates: Vec, + ) -> io::Result<()> { + let mut tx = DBTransaction::new(); + let dbkey = candidates_in_relay_chain_block_key(relay_parent); + + if let Some((validator_index, _)) = self.get_validator_index_and_n_validators(relay_parent) { + let candidates = candidates.clone(); + let awaited_frontier: Vec<(Hash, Hash, Hash, u32)> = self + .query_inner(columns::META, &awaited_chunks_key()) + .unwrap_or_else(|| Vec::new()); + + let mut awaited_frontier: HashSet<(Hash, Hash, Hash, u32)> = + HashSet::from_iter(awaited_frontier.into_iter()); + + awaited_frontier.extend(candidates.into_iter().filter_map(|candidate| { + self.get_candidate(&candidate) + .map(|receipt| (relay_parent.clone(), receipt.erasure_root, candidate, validator_index)) + })); + let awaited_frontier = Vec::from_iter(awaited_frontier.into_iter()); + tx.put_vec(columns::META, &awaited_chunks_key(), awaited_frontier.encode()); + } + tx.put_vec(columns::DATA, &dbkey, candidates.encode()); + + self.inner.write(tx) + } + + /// Qery which candidates were included in the relay chain block by block's parent. + pub fn get_candidates_in_relay_block(&self, relay_block: &Hash) -> Option> { + let dbkey = candidates_in_relay_chain_block_key(relay_block); + + self.query_inner(columns::DATA, &dbkey) + } + + /// Adds a set of erasure chunk roots that were included in a relay block by block's parent. + pub(crate) fn add_erasure_roots_in_relay_block( + &self, + relay_parent: &Hash, + erasure_roots: Vec, + ) -> io::Result<()> { + let mut tx = DBTransaction::new(); + let dbkey = erasure_roots_in_relay_chain_block_key(relay_parent); + + tx.put_vec(columns::DATA, &dbkey, erasure_roots.encode()); + + self.inner.write(tx) + } + + /// Make a validator's index and a number of validators at a relay parent available. + pub(crate) fn add_validator_index_and_n_validators( + &self, + relay_parent: &Hash, + validator_index: u32, + n_validators: u32, + ) -> io::Result<()> { + let mut tx = DBTransaction::new(); + let dbkey = validator_index_and_n_validators_key(relay_parent); + + tx.put_vec(columns::META, &dbkey, (validator_index, n_validators).encode()); + + self.inner.write(tx) + } + + /// Query a validator's index and n_validators by relay parent. + pub fn get_validator_index_and_n_validators(&self, relay_parent: &Hash) -> Option<(u32, u32)> { + let dbkey = validator_index_and_n_validators_key(relay_parent); + + self.query_inner(columns::META, &dbkey) + } + + /// Add a set of chunks. + /// + /// The same as `add_erasure_chunk` but adds a set of chunks in one atomic transaction. + /// Checks that all chunks have the same `relay_parent`, `block_data_hash` and `parachain_id` fields. + pub fn add_erasure_chunks( + &self, + n_validators: u32, + relay_parent: &Hash, + candidate_hash: &Hash, + chunks: I, + ) -> io::Result<()> + where I: IntoIterator + { + if let Some(receipt) = self.get_candidate(candidate_hash) { + let mut tx = DBTransaction::new(); + let dbkey = erasure_chunks_key(relay_parent, &receipt.block_data_hash); + + let mut v = self.query_inner(columns::DATA, &dbkey).unwrap_or(Vec::new()); + + let av_chunks_key = available_chunks_key(relay_parent, &receipt.erasure_root); + let mut have_chunks = self.query_inner(columns::META, &av_chunks_key).unwrap_or(Vec::new()); + + let awaited_frontier: Option> = self.query_inner( + columns::META, + &awaited_chunks_key() + ); + + for chunk in chunks.into_iter() { + if !have_chunks.contains(&chunk.index) { + have_chunks.push(chunk.index); + } + v.push(chunk); + } + + if let Some(mut awaited_frontier) = awaited_frontier { + awaited_frontier.retain(|&(p, r, c, index)| { + !( + *relay_parent == p && + r == receipt.erasure_root && + c == receipt.hash() && + have_chunks.contains(&index) + ) + }); + tx.put_vec(columns::META, &awaited_chunks_key(), awaited_frontier.encode()); + } + + // If therea are no block data and messages in the store at this point, + // check that they can be reconstructed now and add them to store if they can. + if let Ok(None) = self.inner.get( + columns::DATA, + &block_data_key(&relay_parent, &receipt.block_data_hash) + ) { + if let Ok((block_data, outgoing_queues)) = erasure::reconstruct( + n_validators as usize, + v.iter().map(|chunk| (chunk.chunk.as_ref(), chunk.index as usize))) { + self.make_available(Data { + relay_parent: *relay_parent, + parachain_id: receipt.parachain_index, + block_data, + outgoing_queues, + })?; + } + } + + tx.put_vec(columns::DATA, &dbkey, v.encode()); + tx.put_vec(columns::META, &av_chunks_key, have_chunks.encode()); + + self.inner.write(tx) + } else { + trace!(target: LOG_TARGET, "Candidate with hash {} not found", candidate_hash); + Ok(()) + } + } + + /// Queries an erasure chunk by its block's parent and hash and index. + pub fn get_erasure_chunk( + &self, + relay_parent: &Hash, + block_data_hash: Hash, + index: usize, + ) -> Option { + self.query_inner(columns::DATA, &erasure_chunks_key(&relay_parent, &block_data_hash)) + .and_then(|chunks: Vec| { + chunks.iter() + .find(|chunk: &&ErasureChunk| chunk.index == index as u32) + .map(|chunk| chunk.clone()) + }) + } + + /// Stores a candidate receipt. + pub fn add_candidate(&self, receipt: &CandidateReceipt) -> io::Result<()> { + let dbkey = candidate_key(&receipt.hash()); + let mut tx = DBTransaction::new(); + + tx.put_vec(columns::DATA, &dbkey, receipt.encode()); + tx.put_vec(columns::META, &block_to_candidate_key(&receipt.block_data_hash), receipt.hash().encode()); + + self.inner.write(tx) + } + + /// Queries a candidate receipt by it's hash. + pub fn get_candidate(&self, candidate_hash: &Hash) -> Option { + self.query_inner(columns::DATA, &candidate_key(candidate_hash)) + } + + /// Note that a set of candidates have been included in a finalized block with given hash and parent hash. + pub fn candidates_finalized( + &self, + parent: Hash, + finalized_candidates: HashSet, + ) -> io::Result<()> { + let mut tx = DBTransaction::new(); + + let v = self.query_inner(columns::META, &parent[..]).unwrap_or(Vec::new()); + tx.delete(columns::META, &parent[..]); + + let awaited_frontier: Option> = self + .query_inner(columns::META, &awaited_chunks_key()); + + if let Some(mut awaited_frontier) = awaited_frontier { + awaited_frontier.retain(|&(p, c, _, _)| (p != parent && !finalized_candidates.contains(&c))); + tx.put_vec(columns::META, &awaited_chunks_key(), awaited_frontier.encode()); + } + + for block_data_hash in v { + if let Some(candidate_hash) = self.block_hash_to_candidate_hash(block_data_hash) { + if !finalized_candidates.contains(&candidate_hash) { + tx.delete(columns::DATA, block_data_key(&parent, &block_data_hash).as_slice()); + tx.delete(columns::DATA, &erasure_chunks_key(&parent, &block_data_hash)); + tx.delete(columns::DATA, &candidate_key(&candidate_hash)); + tx.delete(columns::META, &block_to_candidate_key(&block_data_hash)); + } + } + } + + self.inner.write(tx) + } + + /// Query block data. + pub fn block_data(&self, relay_parent: Hash, block_data_hash: Hash) -> Option { + self.query_inner(columns::DATA, &block_data_key(&relay_parent, &block_data_hash)) + } + + /// Query block data by corresponding candidate receipt's hash. + pub fn block_data_by_candidate(&self, relay_parent: Hash, candidate_hash: Hash) -> Option { + let receipt_key = candidate_key(&candidate_hash); + + self.query_inner(columns::DATA, &receipt_key[..]).and_then(|receipt: CandidateReceipt| { + self.block_data(relay_parent, receipt.block_data_hash) + }) + } + + /// Query message queue data by message queue root hash. + pub fn queue_by_root(&self, queue_root: &Hash) -> Option> { + self.query_inner(columns::DATA, queue_root.as_ref()) + } + + fn block_hash_to_candidate_hash(&self, block_hash: Hash) -> Option { + self.query_inner(columns::META, &block_to_candidate_key(&block_hash)) + } + + fn query_inner(&self, column: Option, key: &[u8]) -> Option { + match self.inner.get(column, key) { + Ok(Some(raw)) => { + let res = T::decode(&mut &raw[..]).expect("all stored data serialized correctly; qed"); + Some(res) + } + Ok(None) => None, + Err(e) => { + warn!(target: LOG_TARGET, "Error reading from the availability store: {:?}", e); + None + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use polkadot_erasure_coding::{self as erasure}; + use polkadot_primitives::parachain::{Id as ParaId, AvailableMessages}; + + #[test] + fn finalization_removes_unneeded() { + let relay_parent = [1; 32].into(); + + let para_id_1 = 5.into(); + let para_id_2 = 6.into(); + + let block_data_1 = BlockData(vec![1, 2, 3]); + let block_data_2 = BlockData(vec![4, 5, 6]); + + let erasure_chunk_1 = ErasureChunk { + chunk: vec![10, 20, 30], + index: 1, + proof: vec![], + }; + + let erasure_chunk_2 = ErasureChunk { + chunk: vec![40, 50, 60], + index: 1, + proof: vec![], + }; + + let store = Store::new_in_memory(); + store.make_available(Data { + relay_parent, + parachain_id: para_id_1, + block_data: block_data_1.clone(), + outgoing_queues: None, + }).unwrap(); + + store.make_available(Data { + relay_parent, + parachain_id: para_id_2, + block_data: block_data_2.clone(), + outgoing_queues: None, + }).unwrap(); + + let candidate_1 = CandidateReceipt { + parachain_index: para_id_1, + collator: Default::default(), + signature: Default::default(), + head_data: Default::default(), + egress_queue_roots: Vec::new(), + fees: 0, + block_data_hash: block_data_1.hash(), + upward_messages: Vec::new(), + erasure_root: [6; 32].into(), + }; + + let candidate_2 = CandidateReceipt { + parachain_index: para_id_2, + collator: Default::default(), + signature: Default::default(), + head_data: Default::default(), + egress_queue_roots: Vec::new(), + fees: 0, + block_data_hash: block_data_2.hash(), + upward_messages: Vec::new(), + erasure_root: [6; 32].into(), + }; + + store.add_candidate(&candidate_1).unwrap(); + store.add_candidate(&candidate_2).unwrap(); + + assert!(store.add_erasure_chunks(3, &relay_parent, &candidate_1.hash(), vec![erasure_chunk_1.clone()]).is_ok()); + assert!(store.add_erasure_chunks(3, &relay_parent, &candidate_2.hash(), vec![erasure_chunk_2.clone()]).is_ok()); + + assert_eq!(store.block_data(relay_parent, block_data_1.hash()).unwrap(), block_data_1); + assert_eq!(store.block_data(relay_parent, block_data_2.hash()).unwrap(), block_data_2); + + assert_eq!(store.get_erasure_chunk(&relay_parent, block_data_1.hash(), 1).as_ref(), Some(&erasure_chunk_1)); + assert_eq!(store.get_erasure_chunk(&relay_parent, block_data_2.hash(), 1), Some(erasure_chunk_2)); + + assert_eq!(store.get_candidate(&candidate_1.hash()), Some(candidate_1.clone())); + assert_eq!(store.get_candidate(&candidate_2.hash()), Some(candidate_2.clone())); + + assert_eq!(store.block_data_by_candidate(relay_parent, candidate_1.hash()).unwrap(), block_data_1); + assert_eq!(store.block_data_by_candidate(relay_parent, candidate_2.hash()).unwrap(), block_data_2); + + store.candidates_finalized(relay_parent, [candidate_1.hash()].iter().cloned().collect()).unwrap(); + + assert_eq!(store.get_erasure_chunk(&relay_parent, block_data_1.hash(), 1).as_ref(), Some(&erasure_chunk_1)); + assert!(store.get_erasure_chunk(&relay_parent, block_data_2.hash(), 1).is_none()); + + assert_eq!(store.get_candidate(&candidate_1.hash()), Some(candidate_1)); + assert_eq!(store.get_candidate(&candidate_2.hash()), None); + + assert_eq!(store.block_data(relay_parent, block_data_1.hash()).unwrap(), block_data_1); + assert!(store.block_data(relay_parent, block_data_2.hash()).is_none()); + } + + #[test] + fn queues_available_by_queue_root() { + let relay_parent = [1; 32].into(); + let para_id = 5.into(); + let block_data = BlockData(vec![1, 2, 3]); + + let message_queue_root_1 = [0x42; 32].into(); + let message_queue_root_2 = [0x43; 32].into(); + + let message_a = Message(vec![1, 2, 3, 4]); + let message_b = Message(vec![4, 5, 6, 7]); + + let outgoing_queues = AvailableMessages(vec![ + (message_queue_root_1, vec![message_a.clone()]), + (message_queue_root_2, vec![message_b.clone()]), + ]); + + let store = Store::new_in_memory(); + store.make_available(Data { + relay_parent, + parachain_id: para_id, + block_data: block_data.clone(), + outgoing_queues: Some(outgoing_queues), + }).unwrap(); + + assert_eq!( + store.queue_by_root(&message_queue_root_1), + Some(vec![message_a]), + ); + + assert_eq!( + store.queue_by_root(&message_queue_root_2), + Some(vec![message_b]), + ); + } + + #[test] + fn erasure_coding() { + let relay_parent: Hash = [1; 32].into(); + let para_id: ParaId = 5.into(); + let block_data = BlockData(vec![42; 8]); + let block_data_hash = block_data.hash(); + let n_validators = 5; + + let message_queue_root_1 = [0x42; 32].into(); + let message_queue_root_2 = [0x43; 32].into(); + + let message_a = Message(vec![1, 2, 3, 4]); + let message_b = Message(vec![5, 6, 7, 8]); + + let outgoing_queues = Some(AvailableMessages(vec![ + (message_queue_root_1, vec![message_a.clone()]), + (message_queue_root_2, vec![message_b.clone()]), + ])); + + let erasure_chunks = erasure::obtain_chunks( + n_validators, + &block_data, + outgoing_queues.as_ref()).unwrap(); + + let branches = erasure::branches(erasure_chunks.as_ref()); + + let candidate = CandidateReceipt { + parachain_index: para_id, + collator: Default::default(), + signature: Default::default(), + head_data: Default::default(), + egress_queue_roots: Vec::new(), + fees: 0, + block_data_hash: block_data.hash(), + upward_messages: Vec::new(), + erasure_root: [6; 32].into(), + }; + + let chunks: Vec<_> = erasure_chunks + .iter() + .zip(branches.map(|(proof, _)| proof)) + .enumerate() + .map(|(index, (chunk, proof))| ErasureChunk { + chunk: chunk.clone(), + proof, + index: index as u32, + }) + .collect(); + + let store = Store::new_in_memory(); + + store.add_candidate(&candidate).unwrap(); + store.add_erasure_chunks(n_validators as u32, &relay_parent, &candidate.hash(), vec![chunks[0].clone()]).unwrap(); + assert_eq!(store.get_erasure_chunk(&relay_parent, block_data_hash, 0), Some(chunks[0].clone())); + + assert!(store.block_data(relay_parent, block_data_hash).is_none()); + + store.add_erasure_chunks(n_validators as u32, &relay_parent, &candidate.hash(), chunks).unwrap(); + assert_eq!(store.block_data(relay_parent, block_data_hash), Some(block_data)); + } + + #[test] + fn add_validator_index_works() { + let relay_parent = [42; 32].into(); + let store = Store::new_in_memory(); + + store.add_validator_index_and_n_validators(&relay_parent, 42, 24).unwrap(); + assert_eq!(store.get_validator_index_and_n_validators(&relay_parent).unwrap(), (42, 24)); + } + + #[test] + fn add_candidates_in_relay_block_works() { + let relay_parent = [42; 32].into(); + let store = Store::new_in_memory(); + + let candidates = vec![[1; 32].into(), [2; 32].into(), [3; 32].into()]; + + store.add_candidates_in_relay_block(&relay_parent, candidates.clone()).unwrap(); + assert_eq!(store.get_candidates_in_relay_block(&relay_parent).unwrap(), candidates); + } + + #[test] + fn awaited_chunks_works() { + use std::iter::FromIterator; + let validator_index = 3; + let n_validators = 10; + let relay_parent = [42; 32].into(); + let erasure_root_1 = [11; 32].into(); + let erasure_root_2 = [12; 32].into(); + let mut receipt_1 = CandidateReceipt::default(); + let mut receipt_2 = CandidateReceipt::default(); + + receipt_1.parachain_index = 1.into(); + receipt_1.erasure_root = erasure_root_1; + receipt_2.parachain_index = 2.into(); + receipt_2.erasure_root = erasure_root_2; + + let chunk = ErasureChunk { + chunk: vec![1, 2, 3], + index: validator_index, + proof: Vec::new(), + }; + let candidates = vec![receipt_1.hash(), receipt_2.hash()]; + let erasure_roots = vec![erasure_root_1, erasure_root_2]; + + let store = Store::new_in_memory(); + + store.add_validator_index_and_n_validators( + &relay_parent, + validator_index, + n_validators + ).unwrap(); + store.add_candidate(&receipt_1).unwrap(); + store.add_candidate(&receipt_2).unwrap(); + + // We are waiting for chunks from two candidates. + store.add_candidates_in_relay_block(&relay_parent, candidates.clone()).unwrap(); + + let awaited_frontier = store.awaited_chunks().unwrap(); + warn!(target: "availability", "awaited {:?}", awaited_frontier); + let expected: HashSet<_> = candidates + .clone() + .into_iter() + .zip(erasure_roots.iter()) + .map(|(c, e)| (relay_parent, *e, c, validator_index)) + .collect(); + assert_eq!(awaited_frontier, expected); + + // We add chunk from one of the candidates. + store.add_erasure_chunks(n_validators, &relay_parent, &receipt_1.hash(), vec![chunk]).unwrap(); + + let awaited_frontier = store.awaited_chunks().unwrap(); + // Now we wait for the other chunk that we haven't received yet. + let expected: HashSet<_> = vec![ + (relay_parent, erasure_roots[1], candidates[1], validator_index) + ].into_iter().collect(); + + assert_eq!(awaited_frontier, expected); + + // Finalizing removes awaited candidates from frontier. + store.candidates_finalized(relay_parent, HashSet::from_iter(candidates.into_iter())).unwrap(); + + assert_eq!(store.awaited_chunks().unwrap().len(), 0); + } +} diff --git a/availability-store/src/worker.rs b/availability-store/src/worker.rs new file mode 100644 index 000000000000..539b08548c59 --- /dev/null +++ b/availability-store/src/worker.rs @@ -0,0 +1,998 @@ +// Copyright 2018 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use std::collections::HashMap; +use std::io; +use std::sync::Arc; +use std::thread; + +use log::{error, info, trace, warn}; +use sp_blockchain::{Result as ClientResult}; +use sp_runtime::traits::{Header as HeaderT, ProvideRuntimeApi}; +use sp_api::ApiExt; +use client::{ + BlockchainEvents, BlockBody, + blockchain::ProvideCache, +}; +use consensus_common::{ + self, BlockImport, BlockCheckParams, BlockImportParams, Error as ConsensusError, + ImportResult, + import_queue::CacheKeyId, +}; +use polkadot_primitives::{Block, BlockId, Hash}; +use polkadot_primitives::parachain::{ + CandidateReceipt, ParachainHost, ValidatorId, + ValidatorPair, AvailableMessages, BlockData, ErasureChunk, +}; +use futures01::Future; +use futures::channel::{mpsc, oneshot}; +use futures::{FutureExt, Sink, SinkExt, TryFutureExt, StreamExt}; +use keystore::KeyStorePtr; + +use tokio::runtime::current_thread::{Handle, Runtime as LocalRuntime}; + +use crate::{LOG_TARGET, Data, TaskExecutor, ProvideGossipMessages, erasure_coding_topic}; +use crate::store::Store; + +/// Errors that may occur. +#[derive(Debug, derive_more::Display, derive_more::From)] +pub(crate) enum Error { + #[from] + StoreError(io::Error), + #[display(fmt = "Validator's id and number of validators at block with parent {} not found", relay_parent)] + IdAndNValidatorsNotFound { relay_parent: Hash }, + #[display(fmt = "Candidate receipt with hash {} not found", candidate_hash)] + CandidateNotFound { candidate_hash: Hash }, +} + +/// Messages sent to the `Worker`. +/// +/// Messages are sent in a number of different scenarios, +/// for instance, when: +/// * importing blocks in `BlockImport` implementation, +/// * recieving finality notifications, +/// * when the `Store` api is used by outside code. +#[derive(Debug)] +pub(crate) enum WorkerMsg { + ErasureRoots(ErasureRoots), + ParachainBlocks(ParachainBlocks), + ListenForChunks(ListenForChunks), + Chunks(Chunks), + CandidatesFinalized(CandidatesFinalized), + MakeAvailable(MakeAvailable), +} + +/// The erasure roots of the heads included in the block with a given parent. +#[derive(Debug)] +pub(crate) struct ErasureRoots { + /// The relay parent of the block these roots belong to. + pub relay_parent: Hash, + /// The roots themselves. + pub erasure_roots: Vec, + /// A sender to signal the result asynchronously. + pub result: oneshot::Sender>, +} + +/// The receipts of the heads included into the block with a given parent. +#[derive(Debug)] +pub(crate) struct ParachainBlocks { + /// The relay parent of the block these parachain blocks belong to. + pub relay_parent: Hash, + /// The blocks themselves. + pub blocks: Vec<(CandidateReceipt, Option<(BlockData, AvailableMessages)>)>, + /// A sender to signal the result asynchronously. + pub result: oneshot::Sender>, +} + +/// Listen gossip for these chunks. +#[derive(Debug)] +pub(crate) struct ListenForChunks { + /// The relay parent of the block the chunks from we want to listen to. + pub relay_parent: Hash, + /// The hash of the candidate chunk belongs to. + pub candidate_hash: Hash, + /// The index of the chunk we need. + pub index: u32, + /// A sender to signal the result asynchronously. + pub result: Option>>, +} + +/// We have received some chunks. +#[derive(Debug)] +pub(crate) struct Chunks { + /// The relay parent of the block these chunks belong to. + pub relay_parent: Hash, + /// The hash of the parachain candidate these chunks belong to. + pub candidate_hash: Hash, + /// The chunks. + pub chunks: Vec, + /// A sender to signal the result asynchronously. + pub result: oneshot::Sender>, +} + +/// These candidates have been finalized, so unneded availability may be now pruned +#[derive(Debug)] +pub(crate) struct CandidatesFinalized { + /// The relay parent of the block that was finalized. + relay_parent: Hash, + /// The parachain heads that were finalized in this block. + candidate_hashes: Vec, +} + +/// The message that corresponds to `make_available` call of the crate API. +#[derive(Debug)] +pub(crate) struct MakeAvailable { + /// The data being made available. + pub data: Data, + /// A sender to signal the result asynchronously. + pub result: oneshot::Sender>, +} + +/// An availability worker with it's inner state. +pub(super) struct Worker { + availability_store: Store, + provide_gossip_messages: PGM, + registered_gossip_streams: HashMap, + + sender: mpsc::UnboundedSender, +} + +/// The handle to the `Worker`. +pub(super) struct WorkerHandle { + exit_signal: Option, + thread: Option>>, + sender: mpsc::UnboundedSender, +} + +impl WorkerHandle { + pub(crate) fn to_worker(&self) -> &mpsc::UnboundedSender { + &self.sender + } +} + +impl Drop for WorkerHandle { + fn drop(&mut self) { + if let Some(signal) = self.exit_signal.take() { + signal.fire(); + } + + if let Some(thread) = self.thread.take() { + if let Err(_) = thread.join() { + error!(target: LOG_TARGET, "Errored stopping the thread"); + } + } + } +} + +async fn listen_for_chunks( + p: PGM, + topic: Hash, + mut sender: S +) +where + PGM: ProvideGossipMessages, + S: Sink + Unpin, +{ + trace!(target: LOG_TARGET, "Registering gossip listener for topic {}", topic); + let mut chunks_stream = p.gossip_messages_for(topic); + + while let Some(item) = chunks_stream.next().await { + let (s, _) = oneshot::channel(); + trace!(target: LOG_TARGET, "Received for {:?}", item); + let chunks = Chunks { + relay_parent: item.0, + candidate_hash: item.1, + chunks: vec![item.2], + result: s, + }; + + if let Err(_) = sender.send(WorkerMsg::Chunks(chunks)).await { + break; + } + } +} + + +fn fetch_candidates

(client: &P, block: &BlockId, parent: &BlockId) + -> ClientResult>> +where + P: BlockBody + ProvideRuntimeApi, + P::Api: ParachainHost + ApiExt, +{ + let extrinsics = client.block_body(block)?; + Ok(match extrinsics { + Some(extrinsics) => client.runtime_api() + .get_heads(&parent, extrinsics).map_err(|_| ConsensusError::ChainLookup("".into()))? + .and_then(|v| Some(v.into_iter())), + None => None, + }) +} + +/// Creates a task to prune entries in availability store upon block finalization. +async fn prune_unneeded_availability(client: Arc

, mut sender: S) +where + P: ProvideRuntimeApi + BlockchainEvents + BlockBody + Send + Sync + 'static, + P::Api: ParachainHost + ApiExt, + S: Sink + Clone + Send + Sync + Unpin, +{ + let mut finality_notification_stream = client.finality_notification_stream(); + + while let Some(notification) = finality_notification_stream.next().await { + let hash = notification.hash; + let parent_hash = notification.header.parent_hash; + let candidate_hashes = match fetch_candidates( + &*client, + &BlockId::hash(hash), + &BlockId::hash(parent_hash) + ) { + Ok(Some(candidates)) => candidates.map(|c| c.hash()).collect(), + Ok(None) => { + warn!( + target: LOG_TARGET, + "Failed to extract candidates from block body of imported block {:?}", hash + ); + continue; + } + Err(e) => { + warn!( + target: LOG_TARGET, + "Failed to fetch block body for imported block {:?}: {:?}", hash, e + ); + continue; + } + }; + + let msg = WorkerMsg::CandidatesFinalized(CandidatesFinalized { + relay_parent: parent_hash, + candidate_hashes + }); + + if let Err(_) = sender.send(msg).await { + break; + } + } +} + +impl Drop for Worker { + fn drop(&mut self) { + for (_, signal) in self.registered_gossip_streams.drain() { + signal.fire(); + } + } +} + +impl Worker +where + PGM: ProvideGossipMessages + Clone + Send + 'static, +{ + + // Called on startup of the worker to register listeners for all awaited chunks. + fn register_listeners( + &mut self, + runtime_handle: &mut Handle, + sender: &mut mpsc::UnboundedSender, + ) { + if let Some(awaited_chunks) = self.availability_store.awaited_chunks() { + for chunk in awaited_chunks { + if let Err(e) = self.register_chunks_listener( + runtime_handle, + sender, + chunk.0, + chunk.1, + ) { + warn!(target: LOG_TARGET, "Failed to register gossip listener: {}", e); + } + } + } + } + + fn register_chunks_listener( + &mut self, + runtime_handle: &mut Handle, + sender: &mut mpsc::UnboundedSender, + relay_parent: Hash, + erasure_root: Hash, + ) -> Result<(), Error> { + let (local_id, _) = self.availability_store + .get_validator_index_and_n_validators(&relay_parent) + .ok_or(Error::IdAndNValidatorsNotFound { relay_parent })?; + let topic = erasure_coding_topic(relay_parent, erasure_root, local_id); + trace!( + target: LOG_TARGET, + "Registering listener for erasure chunks topic {} for ({}, {})", + topic, + relay_parent, + erasure_root, + ); + + let (signal, exit) = exit_future::signal(); + + let fut = listen_for_chunks( + self.provide_gossip_messages.clone(), + topic, + sender.clone(), + ); + + self.registered_gossip_streams.insert(topic, signal); + + let _ = runtime_handle.spawn( + fut + .unit_error() + .boxed() + .compat() + .select(exit) + .then(|_| Ok(())) + ); + + Ok(()) + } + + fn on_parachain_blocks_received( + &mut self, + runtime_handle: &mut Handle, + sender: &mut mpsc::UnboundedSender, + relay_parent: Hash, + blocks: Vec<(CandidateReceipt, Option<(BlockData, AvailableMessages)>)>, + ) -> Result<(), Error> { + let hashes: Vec<_> = blocks.iter().map(|(c, _)| c.hash()).collect(); + + // First we have to add the receipts themselves. + for (candidate, block) in blocks.into_iter() { + let _ = self.availability_store.add_candidate(&candidate); + + if let Some((_block, _msgs)) = block { + // Should we be breaking block into chunks here and gossiping it and so on? + } + + if let Err(e) = self.register_chunks_listener( + runtime_handle, + sender, + relay_parent, + candidate.erasure_root + ) { + warn!(target: LOG_TARGET, "Failed to register chunk listener: {}", e); + } + } + + let _ = self.availability_store.add_candidates_in_relay_block( + &relay_parent, + hashes + ); + + Ok(()) + } + + // Processes chunks messages that contain awaited items. + // + // When an awaited item is received, it is placed into the availability store + // and removed from the frontier. Listener de-registered. + fn on_chunks_received( + &mut self, + relay_parent: Hash, + candidate_hash: Hash, + chunks: Vec, + ) -> Result<(), Error> { + let (_, n_validators) = self.availability_store + .get_validator_index_and_n_validators(&relay_parent) + .ok_or(Error::IdAndNValidatorsNotFound { relay_parent })?; + + let receipt = self.availability_store.get_candidate(&candidate_hash) + .ok_or(Error::CandidateNotFound { candidate_hash })?; + + for chunk in &chunks { + let topic = erasure_coding_topic(relay_parent, receipt.erasure_root, chunk.index); + // need to remove gossip listener and stop it. + if let Some(signal) = self.registered_gossip_streams.remove(&topic) { + signal.fire(); + } + } + + self.availability_store.add_erasure_chunks( + n_validators, + &relay_parent, + &candidate_hash, + chunks, + )?; + + Ok(()) + } + + // Adds the erasure roots into the store. + fn on_erasure_roots_received( + &mut self, + relay_parent: Hash, + erasure_roots: Vec + ) -> Result<(), Error> { + self.availability_store.add_erasure_roots_in_relay_block(&relay_parent, erasure_roots)?; + + Ok(()) + } + + // Processes the `ListenForChunks` message. + // + // When the worker receives a `ListenForChunk` message, it double-checks that + // we don't have that piece, and then it registers a listener. + fn on_listen_for_chunks_received( + &mut self, + runtime_handle: &mut Handle, + sender: &mut mpsc::UnboundedSender, + relay_parent: Hash, + candidate_hash: Hash, + id: usize + ) -> Result<(), Error> { + let candidate = self.availability_store.get_candidate(&candidate_hash) + .ok_or(Error::CandidateNotFound { candidate_hash })?; + + if self.availability_store + .get_erasure_chunk(&relay_parent, candidate.block_data_hash, id) + .is_none() { + if let Err(e) = self.register_chunks_listener( + runtime_handle, + sender, + relay_parent, + candidate.erasure_root + ) { + warn!(target: LOG_TARGET, "Failed to register a gossip listener: {}", e); + } + } + + Ok(()) + } + + /// Starts a worker with a given availability store and a gossip messages provider. + pub fn start( + availability_store: Store, + provide_gossip_messages: PGM, + ) -> WorkerHandle { + let (sender, mut receiver) = mpsc::unbounded(); + + let mut worker = Self { + availability_store, + provide_gossip_messages, + registered_gossip_streams: HashMap::new(), + sender: sender.clone(), + }; + + let sender = sender.clone(); + let (signal, exit) = exit_future::signal(); + + let handle = thread::spawn(move || -> io::Result<()> { + let mut runtime = LocalRuntime::new()?; + let mut sender = worker.sender.clone(); + + let mut runtime_handle = runtime.handle(); + + // On startup, registers listeners (gossip streams) for all + // (relay_parent, erasure-root, i) in the awaited frontier. + worker.register_listeners(&mut runtime_handle, &mut sender); + + let process_notification = async move { + while let Some(msg) = receiver.next().await { + trace!(target: LOG_TARGET, "Received message {:?}", msg); + + let res = match msg { + WorkerMsg::ErasureRoots(msg) => { + let ErasureRoots { relay_parent, erasure_roots, result} = msg; + let res = worker.on_erasure_roots_received( + relay_parent, + erasure_roots, + ); + let _ = result.send(res); + Ok(()) + } + WorkerMsg::ListenForChunks(msg) => { + let ListenForChunks { + relay_parent, + candidate_hash, + index, + result, + } = msg; + + let res = worker.on_listen_for_chunks_received( + &mut runtime_handle, + &mut sender, + relay_parent, + candidate_hash, + index as usize, + ); + + if let Some(result) = result { + let _ = result.send(res); + } + Ok(()) + } + WorkerMsg::ParachainBlocks(msg) => { + let ParachainBlocks { + relay_parent, + blocks, + result, + } = msg; + + let res = worker.on_parachain_blocks_received( + &mut runtime_handle, + &mut sender, + relay_parent, + blocks, + ); + + let _ = result.send(res); + Ok(()) + } + WorkerMsg::Chunks(msg) => { + let Chunks { relay_parent, candidate_hash, chunks, result } = msg; + let res = worker.on_chunks_received( + relay_parent, + candidate_hash, + chunks, + ); + + let _ = result.send(res); + Ok(()) + } + WorkerMsg::CandidatesFinalized(msg) => { + let CandidatesFinalized { relay_parent, candidate_hashes } = msg; + + worker.availability_store.candidates_finalized( + relay_parent, + candidate_hashes.into_iter().collect(), + ) + } + WorkerMsg::MakeAvailable(msg) => { + let MakeAvailable { data, result } = msg; + let res = worker.availability_store.make_available(data) + .map_err(|e| e.into()); + let _ = result.send(res); + Ok(()) + } + }; + + if let Err(_) = res { + warn!(target: LOG_TARGET, "An error occured while processing a message"); + } + } + + }; + + runtime.spawn( + process_notification + .unit_error() + .boxed() + .compat() + .select(exit.clone()) + .then(|_| Ok(())) + ); + + if let Err(e) = runtime.block_on(exit) { + warn!(target: LOG_TARGET, "Availability worker error {:?}", e); + } + + info!(target: LOG_TARGET, "Availability worker exiting"); + + Ok(()) + }); + + WorkerHandle { + thread: Some(handle), + sender, + exit_signal: Some(signal), + } + } +} + +/// Implementor of the [`BlockImport`] trait. +/// +/// Used to embed `availability-store` logic into the block imporing pipeline. +/// +/// [`BlockImport`]: https://substrate.dev/rustdocs/v1.0/substrate_consensus_common/trait.BlockImport.html +pub struct AvailabilityBlockImport { + availability_store: Store, + inner: I, + client: Arc

, + keystore: KeyStorePtr, + to_worker: mpsc::UnboundedSender, + exit_signal: Option, +} + +impl Drop for AvailabilityBlockImport { + fn drop(&mut self) { + if let Some(signal) = self.exit_signal.take() { + signal.fire(); + } + } +} + +impl BlockImport for AvailabilityBlockImport where + I: BlockImport + Send + Sync, + I::Error: Into, + P: ProvideRuntimeApi + ProvideCache, + P::Api: ParachainHost, + P::Api: ApiExt, +{ + type Error = ConsensusError; + + fn import_block( + &mut self, + block: BlockImportParams, + new_cache: HashMap>, + ) -> Result { + trace!( + target: LOG_TARGET, + "Importing block #{}, ({})", + block.header.number(), + block.post_header().hash() + ); + + if let Some(ref extrinsics) = block.body { + let relay_parent = *block.header.parent_hash(); + let parent_id = BlockId::hash(*block.header.parent_hash()); + // Extract our local position i from the validator set of the parent. + let validators = self.client.runtime_api().validators(&parent_id) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()))?; + + let our_id = self.our_id(&validators); + + // Use a runtime API to extract all included erasure-roots from the imported block. + let candidates = self.client.runtime_api().get_heads(&parent_id, extrinsics.clone()) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()))?; + + match candidates { + Some(candidates) => { + match our_id { + Some(our_id) => { + trace!( + target: LOG_TARGET, + "Our validator id is {}, the candidates included are {:?}", + our_id, candidates + ); + + for candidate in &candidates { + // If we don't yet have our chunk of this candidate, + // tell the worker to listen for one. + if self.availability_store.get_erasure_chunk( + &relay_parent, + candidate.block_data_hash, + our_id as usize, + ).is_none() { + let msg = WorkerMsg::ListenForChunks(ListenForChunks { + relay_parent, + candidate_hash: candidate.hash(), + index: our_id as u32, + result: None, + }); + + let _ = self.to_worker.unbounded_send(msg); + } + } + + let erasure_roots: Vec<_> = candidates + .iter() + .map(|c| c.erasure_root) + .collect(); + + // Inform the worker about new (relay_parent, erasure_roots) pairs + let (s, _) = oneshot::channel(); + let msg = WorkerMsg::ErasureRoots(ErasureRoots { + relay_parent, + erasure_roots, + result: s, + }); + + let _ = self.to_worker.unbounded_send(msg); + + let (s, _) = oneshot::channel(); + + // Inform the worker about the included parachain blocks. + let msg = WorkerMsg::ParachainBlocks(ParachainBlocks { + relay_parent, + blocks: candidates.into_iter().map(|c| (c, None)).collect(), + result: s, + }); + + let _ = self.to_worker.unbounded_send(msg); + } + None => (), + } + } + None => { + trace!( + target: LOG_TARGET, + "No parachain heads were included in block {}", block.header.hash() + ); + }, + } + } + + self.inner.import_block(block, new_cache).map_err(Into::into) + } + + fn check_block( + &mut self, + block: BlockCheckParams, + ) -> Result { + self.inner.check_block(block).map_err(Into::into) + } +} + +impl AvailabilityBlockImport { + pub(crate) fn new( + availability_store: Store, + client: Arc

, + block_import: I, + thread_pool: TaskExecutor, + keystore: KeyStorePtr, + to_worker: mpsc::UnboundedSender, + ) -> Self + where + P: ProvideRuntimeApi + BlockBody + BlockchainEvents + Send + Sync + 'static, + P::Api: ParachainHost, + P::Api: ApiExt, + { + let (signal, exit) = exit_future::signal(); + + // This is not the right place to spawn the finality future, + // it would be more appropriate to spawn it in the `start` method of the `Worker`. + // However, this would make the type of the `Worker` and the `Store` itself + // dependent on the types of client and executor, which would prove + // not not so handy in the testing code. + let mut exit_signal = Some(signal); + let prune_available = prune_unneeded_availability(client.clone(), to_worker.clone()) + .unit_error() + .boxed() + .compat() + .select(exit.clone()) + .then(|_| Ok(())); + + if let Err(_) = thread_pool.execute(Box::new(prune_available)) { + error!(target: LOG_TARGET, "Failed to spawn availability pruning task"); + exit_signal = None; + } + + AvailabilityBlockImport { + availability_store, + client, + inner: block_import, + to_worker, + keystore, + exit_signal, + } + } + + fn our_id(&self, validators: &[ValidatorId]) -> Option { + let keystore = self.keystore.read(); + validators + .iter() + .enumerate() + .find_map(|(i, v)| { + keystore.key_pair::(&v).map(|_| i as u32).ok() + }) + } +} + + +#[cfg(test)] +mod tests { + use super::*; + use std::time::Duration; + use futures::{stream, channel::mpsc, Stream}; + use std::sync::{Arc, Mutex}; + use tokio::runtime::Runtime; + + // Just contains topic->channel mapping to give to outer code on `gossip_messages_for` calls. + struct TestGossipMessages { + messages: Arc>>>, + } + + impl ProvideGossipMessages for TestGossipMessages { + fn gossip_messages_for(&self, topic: Hash) + -> Box + Send + Unpin> + { + match self.messages.lock().unwrap().remove(&topic) { + Some(receiver) => Box::new(receiver), + None => Box::new(stream::iter(vec![])), + } + } + + fn gossip_erasure_chunk( + &self, + _relay_parent: Hash, + _candidate_hash: Hash, + _erasure_root: Hash, + _chunk: ErasureChunk + ) {} + } + + impl Clone for TestGossipMessages { + fn clone(&self) -> Self { + TestGossipMessages { + messages: self.messages.clone(), + } + } + } + + // This test tests that as soon as the worker receives info about new parachain blocks + // included it registers gossip listeners for it's own chunks. Upon receiving the awaited + // chunk messages the corresponding listeners are deregistered and these chunks are removed + // from the awaited chunks set. + #[test] + fn receiving_gossip_chunk_removes_from_frontier() { + let mut runtime = Runtime::new().unwrap(); + let relay_parent = [1; 32].into(); + let erasure_root = [2; 32].into(); + let local_id = 2; + let n_validators = 4; + + let store = Store::new_in_memory(); + + // Tell the store our validator's position and the number of validators at given point. + store.add_validator_index_and_n_validators(&relay_parent, local_id, n_validators).unwrap(); + + let (gossip_sender, gossip_receiver) = mpsc::unbounded(); + + let topic = erasure_coding_topic(relay_parent, erasure_root, local_id); + + let messages = TestGossipMessages { + messages: Arc::new(Mutex::new(vec![ + (topic, gossip_receiver) + ].into_iter().collect())) + }; + + let mut candidate = CandidateReceipt::default(); + + candidate.erasure_root = erasure_root; + let candidate_hash = candidate.hash(); + + // At this point we shouldn't be waiting for any chunks. + assert!(store.awaited_chunks().is_none()); + + let (s, r) = oneshot::channel(); + + let msg = WorkerMsg::ParachainBlocks(ParachainBlocks { + relay_parent, + blocks: vec![(candidate, None)], + result: s, + }); + + let handle = Worker::start(store.clone(), messages); + + // Tell the worker that the new blocks have been included into the relay chain. + // This should trigger the registration of gossip message listeners for the + // chunk topics. + handle.sender.unbounded_send(msg).unwrap(); + + runtime.block_on(r.unit_error().boxed().compat()).unwrap().unwrap().unwrap(); + + // Make sure that at this point we are waiting for the appropriate chunk. + assert_eq!( + store.awaited_chunks().unwrap(), + vec![(relay_parent, erasure_root, candidate_hash, local_id)].into_iter().collect() + ); + + let msg = ( + relay_parent, + candidate_hash, + ErasureChunk { + chunk: vec![1, 2, 3], + index: local_id as u32, + proof: vec![], + } + ); + + // Send a gossip message with an awaited chunk + gossip_sender.unbounded_send(msg).unwrap(); + + // At the point the needed piece is received, the gossip listener for + // this topic is deregistered and it's receiver side is dropped. + // Wait for the sender side to become closed. + while !gossip_sender.is_closed() { + // Probably we can just .wait this somehow? + thread::sleep(Duration::from_millis(100)); + } + + // The awaited chunk has been received so at this point we no longer wait for any chunks. + assert_eq!(store.awaited_chunks().unwrap().len(), 0); + } + + #[test] + fn listen_for_chunk_registers_listener() { + let mut runtime = Runtime::new().unwrap(); + let relay_parent = [1; 32].into(); + let erasure_root_1 = [2; 32].into(); + let erasure_root_2 = [3; 32].into(); + let block_data_hash_1 = [4; 32].into(); + let block_data_hash_2 = [5; 32].into(); + let local_id = 2; + let n_validators = 4; + + let mut candidate_1 = CandidateReceipt::default(); + candidate_1.erasure_root = erasure_root_1; + candidate_1.block_data_hash = block_data_hash_1; + let candidate_1_hash = candidate_1.hash(); + + let mut candidate_2 = CandidateReceipt::default(); + candidate_2.erasure_root = erasure_root_2; + candidate_2.block_data_hash = block_data_hash_2; + let candidate_2_hash = candidate_2.hash(); + + let store = Store::new_in_memory(); + + // Tell the store our validator's position and the number of validators at given point. + store.add_validator_index_and_n_validators(&relay_parent, local_id, n_validators).unwrap(); + + // Let the store know about the candidates + store.add_candidate(&candidate_1).unwrap(); + store.add_candidate(&candidate_2).unwrap(); + + // And let the store know about the chunk from the second candidate. + store.add_erasure_chunks( + n_validators, + &relay_parent, + &candidate_2_hash, + vec![ErasureChunk { + chunk: vec![1, 2, 3], + index: local_id, + proof: Vec::default(), + }], + ).unwrap(); + + let (_, gossip_receiver_1) = mpsc::unbounded(); + let (_, gossip_receiver_2) = mpsc::unbounded(); + + let topic_1 = erasure_coding_topic(relay_parent, erasure_root_1, local_id); + let topic_2 = erasure_coding_topic(relay_parent, erasure_root_2, local_id); + + let messages = TestGossipMessages { + messages: Arc::new(Mutex::new( + vec![ + (topic_1, gossip_receiver_1), + (topic_2, gossip_receiver_2), + ].into_iter().collect())) + }; + + let handle = Worker::start(store.clone(), messages.clone()); + + let (s2, r2) = oneshot::channel(); + // Tell the worker to listen for chunks from candidate 2 (we alredy have a chunk from it). + let listen_msg_2 = WorkerMsg::ListenForChunks(ListenForChunks { + relay_parent, + candidate_hash: candidate_2_hash, + index: local_id as u32, + result: Some(s2), + }); + + handle.sender.unbounded_send(listen_msg_2).unwrap(); + + runtime.block_on(r2.unit_error().boxed().compat()).unwrap().unwrap().unwrap(); + // The gossip sender for this topic left intact => listener not registered. + assert!(messages.messages.lock().unwrap().contains_key(&topic_2)); + + let (s1, r1) = oneshot::channel(); + + // Tell the worker to listen for chunks from candidate 1. + // (we don't have a chunk from it yet). + let listen_msg_1 = WorkerMsg::ListenForChunks(ListenForChunks { + relay_parent, + candidate_hash: candidate_1_hash, + index: local_id as u32, + result: Some(s1), + }); + + handle.sender.unbounded_send(listen_msg_1).unwrap(); + runtime.block_on(r1.unit_error().boxed().compat()).unwrap().unwrap().unwrap(); + + // The gossip sender taken => listener registered. + assert!(!messages.messages.lock().unwrap().contains_key(&topic_1)); + } +} diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 6ea29f1173bb..d3a5eea77235 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -15,5 +15,6 @@ cli = { package = "sc-cli", git = "https://github.com/paritytech/substrate", bra service = { package = "polkadot-service", path = "../service" } [features] -default = [ "wasmtime" ] +# default = [ "wasmtime" ] +default = [] wasmtime = [ "cli/wasmtime" ] diff --git a/collator/src/lib.rs b/collator/src/lib.rs index fd9274594b98..6695acc0cb59 100644 --- a/collator/src/lib.rs +++ b/collator/src/lib.rs @@ -53,6 +53,7 @@ use futures::{ future, Future, Stream, FutureExt, TryFutureExt, StreamExt, compat::{Compat01As03, Future01CompatExt, Stream01CompatExt} }; +use futures01::{Future as _}; use log::{warn, error}; use client::BlockchainEvents; use primitives::{Pair, Blake2Hasher}; @@ -216,19 +217,18 @@ pub async fn collate( let egress_queue_roots = polkadot_validation::egress_roots(&mut outgoing.outgoing_messages); - let receipt = parachain::CandidateReceipt { + let info = parachain::CollationInfo { parachain_index: local_id, collator: key.public(), signature, - head_data, egress_queue_roots, - fees: 0, + head_data, block_data_hash, upward_messages: Vec::new(), }; let collation = parachain::Collation { - receipt, + info, pov: PoVBlock { block_data, ingress, @@ -393,6 +393,7 @@ impl Worker for CollationNode where let key = key.clone(); let parachain_context = parachain_context.clone(); let validation_network = validation_network.clone(); + let inner_exit_2 = inner_exit.clone(); let work = future::lazy(move |_| { let api = client.runtime_api(); @@ -432,9 +433,7 @@ impl Worker for CollationNode where outgoing, ); - if let Err(e) = res { - warn!("Unable to broadcast local collation: {:?}", e); - } + tokio::spawn(res.select(inner_exit_2.clone()).then(|_| Ok(()))); }) }); @@ -602,7 +601,7 @@ mod tests { let collation = futures::executor::block_on(future).unwrap().0; // ascending order by root. - assert_eq!(collation.receipt.egress_queue_roots, vec![(a, root_a), (b, root_b)]); + assert_eq!(collation.info.egress_queue_roots, vec![(a, root_a), (b, root_b)]); } } diff --git a/erasure-coding/Cargo.toml b/erasure-coding/Cargo.toml index d5edefdf1e23..cee5a3290ee9 100644 --- a/erasure-coding/Cargo.toml +++ b/erasure-coding/Cargo.toml @@ -10,3 +10,4 @@ reed_solomon = { package = "reed-solomon-erasure", git = "https://github.com/par codec = { package = "parity-scale-codec", version = "1.1.0", default-features = false, features = ["derive"] } sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" } trie = { package = "sp-trie", git = "https://github.com/paritytech/substrate", branch = "polkadot-master" } +derive_more = "0.15.0" diff --git a/erasure-coding/src/lib.rs b/erasure-coding/src/lib.rs index 09e5d4af0067..43154d486edf 100644 --- a/erasure-coding/src/lib.rs +++ b/erasure-coding/src/lib.rs @@ -27,7 +27,7 @@ use codec::{Encode, Decode}; use reed_solomon::galois_16::{self, ReedSolomon}; use primitives::{Hash as H256, BlakeTwo256, HashT}; -use primitives::parachain::{BlockData, OutgoingMessages}; +use primitives::parachain::{BlockData, AvailableMessages}; use sp_core::Blake2Hasher; use trie::{EMPTY_PREFIX, MemoryDB, Trie, TrieMut, trie_types::{TrieDBMut, TrieDB}}; @@ -39,7 +39,7 @@ mod wrapped_shard; const MAX_VALIDATORS: usize = ::ORDER; /// Errors in erasure coding. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, derive_more::Display)] pub enum Error { /// Returned when there are too many validators. TooManyValidators, @@ -56,6 +56,7 @@ pub enum Error { /// An uneven byte-length of a shard is not valid for GF(2^16) encoding. UnevenLength, /// Chunk index out of bounds. + #[display(fmt = "Chunk is out of bounds: {} {}", _0, _1)] ChunkIndexOutOfBounds(usize, usize), /// Bad payload in reconstructed bytes. BadPayload, @@ -124,10 +125,10 @@ fn code_params(n_validators: usize) -> Result { /// Obtain erasure-coded chunks, one for each validator. /// /// Works only up to 65536 validators, and `n_validators` must be non-zero. -pub fn obtain_chunks(n_validators: usize, block_data: &BlockData, outgoing: &OutgoingMessages) +pub fn obtain_chunks(n_validators: usize, block_data: &BlockData, outgoing: Option<&AvailableMessages>) -> Result>, Error> { - let params = code_params(n_validators)?; + let params = code_params(n_validators)?; let encoded = (block_data, outgoing).encode(); if encoded.is_empty() { @@ -150,7 +151,7 @@ pub fn obtain_chunks(n_validators: usize, block_data: &BlockData, outgoing: &Out /// /// Works only up to 65536 validators, and `n_validators` must be non-zero. pub fn reconstruct<'a, I: 'a>(n_validators: usize, chunks: I) - -> Result<(BlockData, OutgoingMessages), Error> + -> Result<(BlockData, Option), Error> where I: IntoIterator { let params = code_params(n_validators)?; @@ -199,19 +200,19 @@ pub fn reconstruct<'a, I: 'a>(n_validators: usize, chunks: I) /// An iterator that yields merkle branches and chunk data for all chunks to /// be sent to other validators. -pub struct Branches<'a> { +pub struct Branches<'a, I> { trie_storage: MemoryDB, root: H256, - chunks: Vec<&'a [u8]>, + chunks: &'a [I], current_pos: usize, } -impl<'a> Branches<'a> { +impl<'a, I: AsRef<[u8]>> Branches<'a, I> { /// Get the trie root. pub fn root(&self) -> H256 { self.root.clone() } } -impl<'a> Iterator for Branches<'a> { +impl<'a, I: AsRef<[u8]>> Iterator for Branches<'a, I> { type Item = (Vec>, &'a [u8]); fn next(&mut self) -> Option { @@ -228,11 +229,11 @@ impl<'a> Iterator for Branches<'a> { match res.expect("all nodes in trie present; qed") { Some(_) => { let nodes = recorder.drain().into_iter().map(|r| r.data).collect(); - let chunk = &self.chunks.get(self.current_pos) + let chunk = self.chunks.get(self.current_pos) .expect("there is a one-to-one mapping of chunks to valid merkle branches; qed"); self.current_pos += 1; - Some((nodes, chunk)) + Some((nodes, chunk.as_ref())) } None => None, } @@ -241,16 +242,18 @@ impl<'a> Iterator for Branches<'a> { /// Construct a trie from chunks of an erasure-coded value. This returns the root hash and an /// iterator of merkle proofs, one for each validator. -pub fn branches<'a>(chunks: Vec<&'a [u8]>) -> Branches<'a> { +pub fn branches<'a, I: 'a>(chunks: &'a [I]) -> Branches<'a, I> + where I: AsRef<[u8]>, +{ let mut trie_storage: MemoryDB = MemoryDB::default(); let mut root = H256::default(); // construct trie mapping each chunk's index to its hash. { let mut trie = TrieDBMut::new(&mut trie_storage, &mut root); - for (i, &chunk) in chunks.iter().enumerate() { + for (i, chunk) in chunks.as_ref().iter().enumerate() { (i as u32).using_encoded(|encoded_index| { - let chunk_hash = BlakeTwo256::hash(chunk); + let chunk_hash = BlakeTwo256::hash(chunk.as_ref()); trie.insert(encoded_index, chunk_hash.as_ref()) .expect("a fresh trie stored in memory cannot have errors loading nodes; qed"); }) @@ -260,7 +263,7 @@ pub fn branches<'a>(chunks: Vec<&'a [u8]>) -> Branches<'a> { Branches { trie_storage, root, - chunks, + chunks: chunks, current_pos: 0, } } @@ -399,11 +402,11 @@ mod tests { #[test] fn round_trip_block_data() { let block_data = BlockData((0..255).collect()); - let ex = OutgoingMessages { outgoing_messages: Vec::new() }; + let ex = Some(AvailableMessages(Vec::new())); let chunks = obtain_chunks( 10, &block_data, - &ex, + ex.as_ref(), ).unwrap(); assert_eq!(chunks.len(), 10); @@ -425,16 +428,17 @@ mod tests { #[test] fn construct_valid_branches() { let block_data = BlockData(vec![2; 256]); + let ex = Some(AvailableMessages(Vec::new())); + let chunks = obtain_chunks( 10, &block_data, - &OutgoingMessages { outgoing_messages: Vec::new() }, + ex.as_ref(), ).unwrap(); - let chunks: Vec<_> = chunks.iter().map(|c| &c[..]).collect(); assert_eq!(chunks.len(), 10); - let branches = branches(chunks.clone()); + let branches = branches(chunks.as_ref()); let root = branches.root(); let proofs: Vec<_> = branches.map(|(proof, _)| proof).collect(); @@ -442,7 +446,7 @@ mod tests { assert_eq!(proofs.len(), 10); for (i, proof) in proofs.into_iter().enumerate() { - assert_eq!(branch_hash(&root, &proof, i).unwrap(), BlakeTwo256::hash(chunks[i])); + assert_eq!(branch_hash(&root, &proof, i).unwrap(), BlakeTwo256::hash(&chunks[i])); } } } diff --git a/network/Cargo.toml b/network/Cargo.toml index 0eb07e136309..cf2aa9db10c5 100644 --- a/network/Cargo.toml +++ b/network/Cargo.toml @@ -11,6 +11,7 @@ parking_lot = "0.9.0" av_store = { package = "polkadot-availability-store", path = "../availability-store" } polkadot-validation = { path = "../validation" } polkadot-primitives = { path = "../primitives" } +polkadot-erasure-coding = { path = "../erasure-coding" } codec = { package = "parity-scale-codec", version = "1.1.0", default-features = false, features = ["derive"] } sc-network = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" } diff --git a/network/src/collator_pool.rs b/network/src/collator_pool.rs index 5673c6701188..01f13378e559 100644 --- a/network/src/collator_pool.rs +++ b/network/src/collator_pool.rs @@ -184,7 +184,7 @@ impl CollatorPool { /// The collation should have been checked for integrity of signature before passing to this function. pub fn on_collation(&mut self, collator_id: CollatorId, relay_parent: Hash, collation: Collation) { if let Some((para_id, _)) = self.collators.get(&collator_id) { - debug_assert_eq!(para_id, &collation.receipt.parachain_index); + debug_assert_eq!(para_id, &collation.info.parachain_index); // TODO: punish if not primary? (https://github.com/paritytech/polkadot/issues/213) @@ -279,7 +279,7 @@ mod tests { pool.await_collation(relay_parent, para_id, tx1); pool.await_collation(relay_parent, para_id, tx2); pool.on_collation(primary.clone(), relay_parent, Collation { - receipt: CandidateReceipt { + info: CandidateReceipt { parachain_index: para_id, collator: primary.clone().into(), signature: Default::default(), @@ -288,7 +288,8 @@ mod tests { fees: 0, block_data_hash: [3; 32].into(), upward_messages: Vec::new(), - }, + erasure_root: [1u8; 32].into(), + }.into(), pov: make_pov(vec![4, 5, 6]), }); @@ -307,7 +308,7 @@ mod tests { assert_eq!(pool.on_new_collator(primary.clone(), para_id.clone(), PeerId::random()), Role::Primary); pool.on_collation(primary.clone(), relay_parent, Collation { - receipt: CandidateReceipt { + info: CandidateReceipt { parachain_index: para_id, collator: primary, signature: Default::default(), @@ -316,7 +317,8 @@ mod tests { fees: 0, block_data_hash: [3; 32].into(), upward_messages: Vec::new(), - }, + erasure_root: [1u8; 32].into(), + }.into(), pov: make_pov(vec![4, 5, 6]), }); diff --git a/network/src/gossip.rs b/network/src/gossip.rs index 4d537cd3ab7d..77d964e2a91a 100644 --- a/network/src/gossip.rs +++ b/network/src/gossip.rs @@ -49,16 +49,19 @@ //! Peers who send information which was not allowed under a recent neighbor packet //! will be noted as non-beneficial to Substrate's peer-set management utility. -use sp_runtime::{generic::BlockId, traits::ProvideRuntimeApi}; +use sp_runtime::{generic::BlockId, traits::{ProvideRuntimeApi, BlakeTwo256, Hash as HashT}}; use sp_blockchain::Error as ClientError; use sc_network::{config::Roles, PeerId, ReputationChange}; use sc_network::consensus_gossip::{ self as network_gossip, ValidationResult as GossipValidationResult, ValidatorContext, MessageIntent, ConsensusMessage, }; -use polkadot_validation::SignedStatement; +use polkadot_validation::{SignedStatement}; use polkadot_primitives::{Block, Hash}; -use polkadot_primitives::parachain::{ParachainHost, ValidatorId, Message as ParachainMessage}; +use polkadot_primitives::parachain::{ + ParachainHost, ValidatorId, Message as ParachainMessage, ErasureChunk as PrimitiveChunk +}; +use polkadot_erasure_coding::{self as erasure}; use codec::{Decode, Encode}; use std::collections::HashMap; @@ -92,6 +95,8 @@ mod benefit { pub const NEW_CANDIDATE: Rep = Rep::new(100, "Polkadot: New candidate"); /// When a peer sends us a previously-unknown attestation. pub const NEW_ATTESTATION: Rep = Rep::new(50, "Polkadot: New attestation"); + /// When a peer sends us a previously-unknown erasure chunk. + pub const NEW_ERASURE_CHUNK: Rep = Rep::new(10, "Polkadot: New erasure chunk"); /// When a peer sends us a previously-unknown message packet. pub const NEW_ICMP_MESSAGES: Rep = Rep::new(50, "Polkadot: New ICMP messages"); } @@ -114,6 +119,10 @@ mod cost { pub const BAD_NEIGHBOR_PACKET: Rep = Rep::new(-300, "Polkadot: Bad neighbor"); /// A peer sent us an ICMP queue we haven't advertised a need for. pub const UNNEEDED_ICMP_MESSAGES: Rep = Rep::new(-100, "Polkadot: Unexpected ICMP message"); + /// A peer sent us an erasure chunk referring to a candidate that we are not aware of. + pub const ORPHANED_ERASURE_CHUNK: Rep = Rep::new(-10, "An erasure chunk from unknown candidate"); + /// A peer sent us an erasure chunk that does not match candidate's erasure root. + pub const ERASURE_CHUNK_WRONG_ROOT: Rep = Rep::new(-100, "Chunk doesn't match encoding root"); /// A peer sent us an ICMP queue with a bad root. pub fn icmp_messages_root_mismatch(n_messages: usize) -> Rep { @@ -137,7 +146,9 @@ pub enum GossipMessage { #[codec(index = "3")] ParachainMessages(GossipParachainMessages), // TODO: https://github.com/paritytech/polkadot/issues/253 - // erasure-coded chunks. + /// A packet containing one of the erasure-coding chunks of one candidate. + #[codec(index = "4")] + ErasureChunk(ErasureChunkMessage), } impl GossipMessage { @@ -187,6 +198,24 @@ impl GossipStatement { } } +/// A gossip message containing one erasure chunk of a candidate block. +/// For each chunk of block erasure encoding one of this messages is constructed. +#[derive(Encode, Decode, Clone, Debug)] +pub struct ErasureChunkMessage { + /// The chunk itself. + pub chunk: PrimitiveChunk, + /// The relay parent of the block this chunk belongs to. + pub relay_parent: Hash, + /// The hash of the candidate receipt of the block this chunk belongs to. + pub candidate_hash: Hash, +} + +impl From for GossipMessage { + fn from(chk: ErasureChunkMessage) -> Self { + GossipMessage::ErasureChunk(chk) + } +} + /// A packet of messages from one parachain to another. /// /// These are all the messages posted from one parachain to another during the @@ -303,6 +332,7 @@ pub fn register_validator( peers: HashMap::new(), attestation_view: Default::default(), message_routing_view: Default::default(), + availability_store: None, chain, }) }); @@ -368,6 +398,10 @@ impl RegisteredMessageValidator { RegisteredMessageValidator { inner: validator as _ } } + pub fn register_availability_store(&mut self, availability_store: av_store::Store) { + self.inner.inner.write().availability_store = Some(availability_store); + } + /// Note that we perceive a new leaf of the block-DAG. We will notify our neighbors that /// we now accept parachain candidate attestations and incoming message queues /// relevant to this leaf. @@ -475,6 +509,7 @@ struct Inner { peers: HashMap, attestation_view: AttestationView, message_routing_view: MessageRoutingView, + availability_store: Option, chain: C, } @@ -504,6 +539,52 @@ impl Inner { } } + fn validate_erasure_chunk_packet(&mut self, msg: ErasureChunkMessage) + -> (GossipValidationResult, ReputationChange) + { + if let Some(store) = &self.availability_store { + if let Some(receipt) = store.get_candidate(&msg.candidate_hash) { + let chunk_hash = erasure::branch_hash( + &receipt.erasure_root, + &msg.chunk.proof, + msg.chunk.index as usize + ); + + if chunk_hash != Ok(BlakeTwo256::hash(&msg.chunk.chunk)) { + ( + GossipValidationResult::Discard, + cost::ERASURE_CHUNK_WRONG_ROOT + ) + } else { + if let Some(awaited_chunks) = store.awaited_chunks() { + if awaited_chunks.contains(&( + msg.relay_parent, + receipt.erasure_root, + receipt.hash(), + msg.chunk.index, + )) { + let topic = av_store::erasure_coding_topic( + msg.relay_parent, + receipt.erasure_root, + msg.chunk.index, + ); + + return ( + GossipValidationResult::ProcessAndKeep(topic), + benefit::NEW_ERASURE_CHUNK, + ); + } + } + (GossipValidationResult::Discard, cost::NONE) + } + } else { + (GossipValidationResult::Discard, cost::ORPHANED_ERASURE_CHUNK) + } + } else { + (GossipValidationResult::Discard, cost::NONE) + } + } + fn multicast_neighbor_packet( &self, mut send_neighbor_packet: F, @@ -536,6 +617,7 @@ impl MessageValidator { peers: HashMap::new(), attestation_view: Default::default(), message_routing_view: Default::default(), + availability_store: None, chain, }), } @@ -594,6 +676,9 @@ impl network_gossip::Validator for MessageValid } (res, cb) } + Ok(GossipMessage::ErasureChunk(chunk)) => { + self.inner.write().validate_erasure_chunk_packet(chunk) + } }; self.report(sender, cost_benefit); @@ -775,6 +860,7 @@ mod tests { fees: 1_000_000, block_data_hash: [20u8; 32].into(), upward_messages: Vec::new(), + erasure_root: [1u8; 32].into(), }; let statement = GossipMessage::Statement(GossipStatement { diff --git a/network/src/lib.rs b/network/src/lib.rs index dfeb37e919c2..f73dae2e7c5d 100644 --- a/network/src/lib.rs +++ b/network/src/lib.rs @@ -27,12 +27,13 @@ pub mod gossip; use codec::{Decode, Encode}; use futures::sync::oneshot; +use futures::future::Either; use futures::prelude::*; -use futures03::{channel::mpsc, compat::Compat, StreamExt}; +use futures03::{channel::mpsc, compat::{Compat, Stream01CompatExt}, FutureExt, StreamExt, TryFutureExt}; use polkadot_primitives::{Block, Hash, Header}; use polkadot_primitives::parachain::{ - Id as ParaId, BlockData, CollatorId, CandidateReceipt, Collation, PoVBlock, - StructuredUnroutedIngress, ValidatorId, OutgoingMessages, + Id as ParaId, CollatorId, CandidateReceipt, Collation, PoVBlock, + StructuredUnroutedIngress, ValidatorId, OutgoingMessages, ErasureChunk, }; use sc_network::{ PeerId, RequestId, Context, StatusMessage as GenericFullStatus, @@ -48,7 +49,7 @@ use log::{trace, debug, warn}; use std::collections::{HashMap, HashSet}; -use crate::gossip::{POLKADOT_ENGINE_ID, GossipMessage}; +use crate::gossip::{POLKADOT_ENGINE_ID, GossipMessage, ErasureChunkMessage}; #[cfg(test)] mod tests; @@ -98,6 +99,63 @@ pub trait NetworkService: Send + Sync + 'static { where F: FnOnce(&mut PolkadotProtocol, &mut dyn Context); } +/// This is a newtype that implements a [`ProvideGossipMessages`] shim trait. +/// +/// For any wrapped [`NetworkService`] type it implements a [`ProvideGossipMessages`]. +/// For more details see documentation of [`ProvideGossipMessages`]. +/// +/// [`NetworkService`]: ./trait.NetworkService.html +/// [`ProvideGossipMessages`]: ../polkadot_availability_store/trait.ProvideGossipMessages.html +pub struct AvailabilityNetworkShim(pub std::sync::Arc); + +impl av_store::ProvideGossipMessages for AvailabilityNetworkShim + where T: NetworkService +{ + fn gossip_messages_for(&self, topic: Hash) + -> Box + Unpin + Send> + { + Box::new(self.0.gossip_messages_for(topic) + .compat() + .filter_map(|msg| async move { + match msg { + Ok(msg) => match msg.0 { + GossipMessage::ErasureChunk(chunk) => { + Some((chunk.relay_parent, chunk.candidate_hash, chunk.chunk)) + }, + _ => None, + } + _ => None, + } + }) + .boxed() + ) + } + + fn gossip_erasure_chunk( + &self, + relay_parent: Hash, + candidate_hash: Hash, + erasure_root: Hash, + chunk: ErasureChunk + ) { + let topic = av_store::erasure_coding_topic(relay_parent, erasure_root, chunk.index); + self.0.gossip_message( + topic, + GossipMessage::ErasureChunk(ErasureChunkMessage { + chunk, + relay_parent, + candidate_hash, + }) + ) + } +} + +impl Clone for AvailabilityNetworkShim { + fn clone(&self) -> Self { + AvailabilityNetworkShim(self.0.clone()) + } +} + impl NetworkService for PolkadotNetworkService { fn gossip_messages_for(&self, topic: Hash) -> GossipMessageStream { let (tx, rx) = std::sync::mpsc::channel(); @@ -280,10 +338,6 @@ pub enum Message { RequestPovBlock(RequestId, Hash, Hash), /// Provide requested proof-of-validation block data by candidate hash or nothing if unknown. PovBlock(RequestId, Option), - /// Request block data (relay_parent, candidate_hash) - RequestBlockData(RequestId, Hash, Hash), - /// Provide requested block data by candidate hash or nothing. - BlockData(RequestId, Option), /// Tell a collator their role. CollatorRole(Role), /// A collation provided by a peer. Relay parent and collation. @@ -444,24 +498,7 @@ impl PolkadotProtocol { send_polkadot_message(ctx, who, Message::PovBlock(req_id, pov_block)); } - Message::RequestBlockData(req_id, relay_parent, candidate_hash) => { - let block_data = self.live_validation_leaves - .with_pov_block( - &relay_parent, - &candidate_hash, - |res| res.ok().map(|b| b.block_data.clone()), - ) - .or_else(|| self.availability_store.as_ref() - .and_then(|s| s.block_data(relay_parent, candidate_hash)) - ); - - send_polkadot_message(ctx, who, Message::BlockData(req_id, block_data)); - } Message::PovBlock(req_id, data) => self.on_pov_block(ctx, who, req_id, data), - Message::BlockData(_req_id, _data) => { - // current block data is never requested bare by the node. - ctx.report_peer(who, cost::UNEXPECTED_MESSAGE); - } Message::Collation(relay_parent, collation) => self.on_collation(ctx, who, relay_parent, collation), Message::CollatorRole(role) => self.on_new_role(ctx, who, role), } @@ -731,8 +768,8 @@ impl PolkadotProtocol { relay_parent: Hash, collation: Collation ) { - let collation_para = collation.receipt.parachain_index; - let collated_acc = collation.receipt.collator.clone(); + let collation_para = collation.info.parachain_index; + let collated_acc = collation.info.collator.clone(); match self.peers.get(&from) { None => ctx.report_peer(from, cost::UNKNOWN_PEER), @@ -743,7 +780,7 @@ impl PolkadotProtocol { Some((ref acc_id, ref para_id)) => { ctx.report_peer(from.clone(), benefit::EXPECTED_MESSAGE); let structurally_valid = para_id == &collation_para && acc_id == &collated_acc; - if structurally_valid && collation.receipt.check_signature().is_ok() { + if structurally_valid && collation.info.check_signature().is_ok() { debug!(target: "p_net", "Received collation for parachain {:?} from peer {}", para_id, from); ctx.report_peer(from, benefit::GOOD_COLLATION); self.collators.on_collation(acc_id.clone(), relay_parent, collation) @@ -798,23 +835,31 @@ impl PolkadotProtocol { targets: HashSet, collation: Collation, outgoing_targeted: OutgoingMessages, - ) -> std::io::Result<()> { + ) -> impl futures::future::Future { debug!(target: "p_net", "Importing local collation on relay parent {:?} and parachain {:?}", - relay_parent, collation.receipt.parachain_index); - - let outgoing_queues = polkadot_validation::outgoing_queues(&outgoing_targeted) - .map(|(_target, root, data)| (root, data)) - .collect(); - - if let Some(ref availability_store) = self.availability_store { - availability_store.make_available(av_store::Data { - relay_parent, - parachain_id: collation.receipt.parachain_index, - candidate_hash: collation.receipt.hash(), - block_data: collation.pov.block_data.clone(), - outgoing_queues: Some(outgoing_queues), - })?; - } + relay_parent, collation.info.parachain_index); + + let res = match self.availability_store { + Some(ref availability_store) => { + let availability_store_cloned = availability_store.clone(); + let collation_cloned = collation.clone(); + Either::A((async move { + let _ = availability_store_cloned.make_available(av_store::Data { + relay_parent, + parachain_id: collation_cloned.info.parachain_index, + block_data: collation_cloned.pov.block_data.clone(), + outgoing_queues: Some(outgoing_targeted.clone().into()), + }).await; + } + ) + .unit_error() + .boxed() + .compat() + .then(|_| Ok(())) + ) + } + None => Either::B(futures::future::ok::<(), ()>(())), + }; for (primary, cloned_collation) in self.local_collations.add_collation(relay_parent, targets, collation.clone()) { match self.validators.get(&primary) { @@ -831,7 +876,7 @@ impl PolkadotProtocol { } } - Ok(()) + res } /// Give the network protocol a handle to an availability store, used for diff --git a/network/src/router.rs b/network/src/router.rs index 5e044856b8bf..fceb11423b71 100644 --- a/network/src/router.rs +++ b/network/src/router.rs @@ -29,11 +29,12 @@ use polkadot_validation::{ }; use polkadot_primitives::{Block, Hash}; use polkadot_primitives::parachain::{ - OutgoingMessages, CandidateReceipt, ParachainHost, ValidatorIndex, Collation, PoVBlock, + OutgoingMessages, CandidateReceipt, ParachainHost, ValidatorIndex, Collation, PoVBlock, ErasureChunk, }; -use crate::gossip::{RegisteredMessageValidator, GossipMessage, GossipStatement}; +use crate::gossip::{RegisteredMessageValidator, GossipMessage, GossipStatement, ErasureChunkMessage}; use futures::prelude::*; +use futures03::{future::FutureExt, TryFutureExt}; use parking_lot::Mutex; use log::{debug, trace}; @@ -52,7 +53,7 @@ pub(crate) fn attestation_topic(parent_hash: Hash) -> Hash { BlakeTwo256::hash(&v[..]) } -/// Create a `Stream` of checked statements. +/// Create a `Stream` of checked messages. /// /// The returned stream will not terminate, so it is required to make sure that the stream is /// dropped when it is not required anymore. Otherwise, it will stick around in memory @@ -192,19 +193,22 @@ impl Router w let parent_hash = self.parent_hash(); producer.prime(self.fetcher.api().clone()) + .validate() + .boxed() + .compat() .map(move |validated| { // store the data before broadcasting statements, so other peers can fetch. knowledge.lock().note_candidate( candidate_hash, - Some(validated.pov_block().clone()), - validated.outgoing_messages().cloned(), + Some(validated.0.pov_block().clone()), + validated.0.outgoing_messages().cloned(), ); // propagate the statement. // consider something more targeted than gossip in the future. let statement = GossipStatement::new( parent_hash, - match table.import_validated(validated) { + match table.import_validated(validated.0) { None => return, Some(s) => s, } @@ -225,11 +229,19 @@ impl TableRouter for Router wh type Error = io::Error; type FetchValidationProof = validation::PoVReceiver; - fn local_collation(&self, collation: Collation, outgoing: OutgoingMessages) { + // We have fetched from a collator and here the receipt should have been already formed. + fn local_collation( + &self, + collation: Collation, + receipt: CandidateReceipt, + outgoing: OutgoingMessages, + chunks: (ValidatorIndex, &[ErasureChunk]) + ) { // produce a signed statement - let hash = collation.receipt.hash(); + let hash = receipt.hash(); + let erasure_root = receipt.erasure_root; let validated = Validated::collated_local( - collation.receipt, + receipt, collation.pov.clone(), outgoing.clone(), ); @@ -245,6 +257,20 @@ impl TableRouter for Router wh // give to network to make available. self.fetcher.knowledge().lock().note_candidate(hash, Some(collation.pov), Some(outgoing)); self.network().gossip_message(self.attestation_topic, statement.into()); + + for chunk in chunks.1 { + let relay_parent = self.parent_hash(); + let message = ErasureChunkMessage { + chunk: chunk.clone(), + relay_parent, + candidate_hash: hash, + }; + + self.network().gossip_message( + av_store::erasure_coding_topic(relay_parent, erasure_root, chunk.index), + message.into() + ); + } } fn fetch_pov_block(&self, candidate: &CandidateReceipt) -> Self::FetchValidationProof { diff --git a/network/src/tests/mod.rs b/network/src/tests/mod.rs index ae0a38855060..e78bbfadf857 100644 --- a/network/src/tests/mod.rs +++ b/network/src/tests/mod.rs @@ -24,7 +24,7 @@ use polkadot_validation::GenericStatement; use polkadot_primitives::{Block, Hash}; use polkadot_primitives::parachain::{ CandidateReceipt, HeadData, PoVBlock, BlockData, CollatorId, ValidatorId, - StructuredUnroutedIngress + StructuredUnroutedIngress, }; use sp_core::crypto::UncheckedInto; use codec::Encode; @@ -183,6 +183,7 @@ fn fetches_from_those_with_knowledge() { fees: 1_000_000, block_data_hash, upward_messages: Vec::new(), + erasure_root: [1u8; 32].into(), }; let candidate_hash = candidate_receipt.hash(); @@ -248,56 +249,6 @@ fn fetches_from_those_with_knowledge() { } } -#[test] -fn fetches_available_block_data() { - let mut protocol = PolkadotProtocol::new(None); - - let peer_a = PeerId::random(); - let parent_hash = [0; 32].into(); - - let block_data = BlockData(vec![1, 2, 3, 4]); - let block_data_hash = block_data.hash(); - let para_id = 5.into(); - let candidate_receipt = CandidateReceipt { - parachain_index: para_id, - collator: [255; 32].unchecked_into(), - head_data: HeadData(vec![9, 9, 9]), - signature: Default::default(), - egress_queue_roots: Vec::new(), - fees: 1_000_000, - block_data_hash, - upward_messages: Vec::new(), - }; - - let candidate_hash = candidate_receipt.hash(); - let av_store = ::av_store::Store::new_in_memory(); - - let status = Status { collating_for: None }; - - protocol.register_availability_store(av_store.clone()); - - av_store.make_available(::av_store::Data { - relay_parent: parent_hash, - parachain_id: para_id, - candidate_hash, - block_data: block_data.clone(), - outgoing_queues: None, - }).unwrap(); - - // connect peer A - { - let mut ctx = TestContext::default(); - protocol.on_connect(&mut ctx, peer_a.clone(), make_status(&status, Roles::FULL)); - } - - // peer A asks for historic block data and gets response - { - let mut ctx = TestContext::default(); - on_message(&mut protocol, &mut ctx, peer_a.clone(), Message::RequestBlockData(1, parent_hash, candidate_hash)); - assert!(ctx.has_message(peer_a, Message::BlockData(1, Some(block_data)))); - } -} - #[test] fn remove_bad_collator() { let mut protocol = PolkadotProtocol::new(None); diff --git a/network/src/tests/validation.rs b/network/src/tests/validation.rs index 999e4dbd6335..9e5bc7e9370c 100644 --- a/network/src/tests/validation.rs +++ b/network/src/tests/validation.rs @@ -30,12 +30,12 @@ use polkadot_primitives::{Block, BlockNumber, Hash, Header, BlockId}; use polkadot_primitives::parachain::{ Id as ParaId, Chain, DutyRoster, ParachainHost, TargetedMessage, ValidatorId, StructuredUnroutedIngress, BlockIngressRoots, Status, - FeeSchedule, HeadData, Retriable, CollatorId + FeeSchedule, HeadData, Retriable, CollatorId, ErasureChunk, CandidateReceipt, }; use parking_lot::Mutex; use sp_blockchain::Result as ClientResult; use sp_api::{Core, RuntimeVersion, StorageProof, ApiExt}; -use sp_runtime::traits::{ApiRef, ProvideRuntimeApi}; +use sp_runtime::traits::{ApiRef, {Block as BlockT}, ProvideRuntimeApi}; use std::collections::HashMap; use std::sync::Arc; @@ -322,6 +322,16 @@ impl ParachainHost for RuntimeApi { let (id, _) = id.unwrap(); Ok(NativeOrEncoded::Native(self.data.lock().ingress.get(&id).cloned())) } + + fn ParachainHost_get_heads_runtime_api_impl( + &self, + _at: &BlockId, + _: ExecutionContext, + _extrinsics: Option::Extrinsic>>, + _: Vec, + ) -> ClientResult>>> { + Ok(NativeOrEncoded::Native(Some(Vec::new()))) + } } type TestValidationNetwork = crate::validation::ValidationNetwork< @@ -399,13 +409,34 @@ impl IngressBuilder { } } +#[derive(Clone)] +struct DummyGossipMessages; + +use futures::stream; +impl av_store::ProvideGossipMessages for DummyGossipMessages { + fn gossip_messages_for( + &self, + _topic: Hash + ) -> Box + Send + Unpin> { + Box::new(futures03::stream::empty()) + } + + fn gossip_erasure_chunk( + &self, + _relay_parent: Hash, + _candidate_hash: Hash, + _erasure_root: Hash, + _chunk: ErasureChunk, + ) {} +} + fn make_table(data: &ApiData, local_key: &Sr25519Keyring, parent_hash: Hash) -> Arc { use av_store::Store; use sp_core::crypto::Pair; let sr_pair = local_key.pair(); let local_key = polkadot_primitives::parachain::ValidatorPair::from(local_key.pair()); - let store = Store::new_in_memory(); + let store = Store::new_in_memory(DummyGossipMessages); let (group_info, _) = ::polkadot_validation::make_group_info( DutyRoster { validator_duty: data.duties.clone() }, &data.validators, // only possible as long as parachain crypto === aura crypto diff --git a/network/src/validation.rs b/network/src/validation.rs index e4c93239a365..8d8ed4d6f75c 100644 --- a/network/src/validation.rs +++ b/network/src/validation.rs @@ -27,7 +27,7 @@ use polkadot_validation::{ use polkadot_primitives::{Block, BlockId, Hash}; use polkadot_primitives::parachain::{ Id as ParaId, Collation, OutgoingMessages, ParachainHost, CandidateReceipt, CollatorId, - ValidatorId, PoVBlock + ValidatorId, PoVBlock, }; use futures::prelude::*; @@ -243,7 +243,7 @@ impl ParachainNetwork for ValidationNetwork where let table_router_clone = table_router.clone(); let work = table_router.checked_statements() .for_each(move |msg| { table_router_clone.import_statement(msg); Ok(()) }); - executor.spawn(work.select(exit).map(|_| ()).map_err(|_| ())); + executor.spawn(work.select(exit.clone()).map(|_| ()).map_err(|_| ())); table_router }); diff --git a/primitives/Cargo.toml b/primitives/Cargo.toml index 4674871cc030..6769c8e075ba 100644 --- a/primitives/Cargo.toml +++ b/primitives/Cargo.toml @@ -14,6 +14,7 @@ sp-version = { git = "https://github.com/paritytech/substrate", default-features rstd = { package = "sp-std", git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-master" } runtime_primitives = { package = "sp-runtime", git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-master" } polkadot-parachain = { path = "../parachain", default-features = false } +trie = { package = "sp-trie", git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-master" } bitvec = { version = "0.15.2", default-features = false, features = ["alloc"] } babe = { package = "pallet-babe", git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-master" } @@ -26,6 +27,7 @@ default = ["std"] std = [ "parity-scale-codec/std", "primitives/std", + "trie/std", "sp-api/std", "rstd/std", "sp-version/std", diff --git a/primitives/src/parachain.rs b/primitives/src/parachain.rs index 70b7e2514b99..66be2352c7fc 100644 --- a/primitives/src/parachain.rs +++ b/primitives/src/parachain.rs @@ -30,6 +30,9 @@ use primitives::bytes; use primitives::RuntimeDebug; use application_crypto::KeyTypeId; +#[cfg(feature = "std")] +use trie::TrieConfiguration; + pub use polkadot_parachain::{ Id, ParachainDispatchOrigin, LOWEST_USER_ID, UpwardMessage, }; @@ -227,9 +230,84 @@ impl OutgoingMessages { } } +/// Messages by queue root that are stored in the availability store. +#[derive(PartialEq, Clone, Decode)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Encode, Debug))] +pub struct AvailableMessages(pub Vec<(Hash, Vec)>); + + +/// Compute a trie root for a set of messages, given the raw message data. +#[cfg(feature = "std")] +pub fn message_queue_root>(messages: I) -> Hash + where A: AsRef<[u8]> +{ + trie::trie_types::Layout::::ordered_trie_root(messages) +} + +#[cfg(feature = "std")] +impl From for AvailableMessages { + fn from(outgoing: OutgoingMessages) -> Self { + let queues = outgoing.message_queues().filter_map(|queue| { + let queue_root = message_queue_root(queue); + let queue_data = queue.iter().map(|msg| msg.clone().into()).collect(); + Some((queue_root, queue_data)) + }).collect(); + + AvailableMessages(queues) + } +} + /// Candidate receipt type. #[derive(PartialEq, Eq, Clone, Encode, Decode)] #[cfg_attr(feature = "std", derive(Debug))] +pub struct CollationInfo { + /// The ID of the parachain this is a candidate for. + pub parachain_index: Id, + /// The collator's relay-chain account ID + pub collator: CollatorId, + /// Signature on blake2-256 of the block data by collator. + pub signature: CollatorSignature, + /// Egress queue roots. Must be sorted lexicographically (ascending) + /// by parachain ID. + pub egress_queue_roots: Vec<(Id, Hash)>, + /// The head-data + pub head_data: HeadData, + /// blake2-256 Hash of block data. + pub block_data_hash: Hash, + /// Messages destined to be interpreted by the Relay chain itself. + pub upward_messages: Vec, +} + +impl From for CollationInfo { + fn from(receipt: CandidateReceipt) -> Self { + CollationInfo { + parachain_index: receipt.parachain_index, + collator: receipt.collator, + signature: receipt.signature, + egress_queue_roots: receipt.egress_queue_roots, + head_data: receipt.head_data, + block_data_hash: receipt.block_data_hash, + upward_messages: receipt.upward_messages, + } + } +} + +impl CollationInfo { + /// Check integrity vs. provided block data. + pub fn check_signature(&self) -> Result<(), ()> { + use runtime_primitives::traits::AppVerify; + + if self.signature.verify(self.block_data_hash.as_ref(), &self.collator) { + Ok(()) + } else { + Err(()) + } + } +} + +/// Candidate receipt type. +#[derive(PartialEq, Eq, Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(Debug, Default))] pub struct CandidateReceipt { /// The ID of the parachain this is a candidate for. pub parachain_index: Id, @@ -248,6 +326,8 @@ pub struct CandidateReceipt { pub block_data_hash: Hash, /// Messages destined to be interpreted by the Relay chain itself. pub upward_messages: Vec, + /// The root of a block's erasure encoding Merkle tree. + pub erasure_root: Hash, } impl CandidateReceipt { @@ -275,6 +355,18 @@ impl PartialOrd for CandidateReceipt { } } +impl PartialEq for CandidateReceipt { + fn eq(&self, info: &CollationInfo) -> bool { + self.parachain_index == info.parachain_index && + self.collator == info.collator && + self.signature == info.signature && + self.egress_queue_roots == info.egress_queue_roots && + self.head_data == info.head_data && + self.block_data_hash == info.block_data_hash && + self.upward_messages == info.upward_messages + } +} + impl Ord for CandidateReceipt { fn cmp(&self, other: &Self) -> Ordering { // TODO: compare signatures or something more sane @@ -289,7 +381,7 @@ impl Ord for CandidateReceipt { #[cfg_attr(feature = "std", derive(Debug, Encode, Decode))] pub struct Collation { /// Candidate receipt itself. - pub receipt: CandidateReceipt, + pub info: CollationInfo, /// A proof-of-validation for the receipt. pub pov: PoVBlock, } @@ -369,6 +461,18 @@ pub struct ConsolidatedIngress(pub Vec<(Id, Vec)>); #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug))] pub struct BlockData(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); +/// A chunk of erasure-encoded block data. +#[derive(PartialEq, Eq, Clone, Encode, Decode, Default)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug))] +pub struct ErasureChunk { + /// The erasure-encoded chunk of data belonging to the candidate block. + pub chunk: Vec, + /// The index of this erasure-encoded chunk of data. + pub index: u32, + /// Proof for this chunk's branch in the Merkle tree. + pub proof: Vec>, +} + impl BlockData { /// Compute hash of block data. #[cfg(feature = "std")] @@ -384,7 +488,7 @@ pub struct Header(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec) /// Parachain head data included in the chain. #[derive(PartialEq, Eq, Clone, PartialOrd, Ord, Encode, Decode)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug))] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug, Default))] pub struct HeadData(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); /// Parachain validation code. @@ -483,6 +587,8 @@ pub struct Status { pub fee_schedule: FeeSchedule, } +use runtime_primitives::traits::{Block as BlockT}; + sp_api::decl_runtime_apis! { /// The API for querying the state of parachains on-chain. pub trait ParachainHost { @@ -502,6 +608,8 @@ sp_api::decl_runtime_apis! { /// If `since` is provided, only messages since (including those in) that block /// will be included. fn ingress(to: Id, since: Option) -> Option; + /// Extract the heads that were set by this set of extrinsics. + fn get_heads(extrinsics: Vec<::Extrinsic>) -> Option>; } } diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index df92eb51e978..3cc9c6b5b065 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -33,7 +33,7 @@ use sp_core::u32_trait::{_1, _2, _3, _4, _5}; use codec::{Encode, Decode}; use primitives::{ AccountId, AccountIndex, Balance, BlockNumber, Hash, Nonce, Signature, Moment, - parachain::{self, ActiveParas}, ValidityError, + parachain::{self, ActiveParas, CandidateReceipt}, ValidityError, }; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, @@ -711,6 +711,19 @@ sp_api::impl_runtime_apis! { { Parachains::ingress(to, since).map(parachain::StructuredUnroutedIngress) } + fn get_heads(extrinsics: Vec<::Extrinsic>) -> Option> { + extrinsics + .into_iter() + .find_map(|ex| match UncheckedExtrinsic::decode(&mut ex.encode().as_slice()) { + Ok(ex) => match ex.function { + Call::Parachains(ParachainsCall::set_heads(heads)) => { + Some(heads.into_iter().map(|c| c.candidate).collect()) + } + _ => None, + } + Err(_) => None, + }) + } } impl fg_primitives::GrandpaApi for Runtime { diff --git a/runtime/src/parachains.rs b/runtime/src/parachains.rs index 9acb46817ced..733f77dbebee 100644 --- a/runtime/src/parachains.rs +++ b/runtime/src/parachains.rs @@ -1247,6 +1247,7 @@ mod tests { fees: 0, block_data_hash: Default::default(), upward_messages: vec![], + erasure_root: [1u8; 32].into(), } } } @@ -1269,6 +1270,7 @@ mod tests { upward_messages: upward_messages.into_iter() .map(|x| UpwardMessage { origin: x.0, data: x.1 }) .collect(), + erasure_root: [1u8; 32].into(), } } } @@ -1676,6 +1678,7 @@ mod tests { fees: 0, block_data_hash: Default::default(), upward_messages: vec![], + erasure_root: [1u8; 32].into(), }, }; @@ -1707,6 +1710,7 @@ mod tests { fees: 0, block_data_hash: Default::default(), upward_messages: vec![], + erasure_root: [1u8; 32].into(), } }; @@ -1722,6 +1726,7 @@ mod tests { fees: 0, block_data_hash: Default::default(), upward_messages: vec![], + erasure_root: [1u8; 32].into(), } }; @@ -1761,6 +1766,7 @@ mod tests { fees: 0, block_data_hash: Default::default(), upward_messages: vec![], + erasure_root: [1u8; 32].into(), } }; @@ -1798,6 +1804,7 @@ mod tests { fees: 0, block_data_hash: Default::default(), upward_messages: vec![], + erasure_root: [1u8; 32].into(), } }; @@ -1843,6 +1850,7 @@ mod tests { fees: 0, block_data_hash: Default::default(), upward_messages: vec![], + erasure_root: [1u8; 32].into(), } }; @@ -1859,6 +1867,7 @@ mod tests { fees: 0, block_data_hash: Default::default(), upward_messages: vec![], + erasure_root: [1u8; 32].into(), } }; @@ -1920,6 +1929,7 @@ mod tests { fees: 0, block_data_hash: Default::default(), upward_messages: vec![], + erasure_root: [1u8; 32].into(), } }; make_attestations(&mut candidate_c); diff --git a/runtime/src/registrar.rs b/runtime/src/registrar.rs index efe74860e092..a7b25e16e027 100644 --- a/runtime/src/registrar.rs +++ b/runtime/src/registrar.rs @@ -836,6 +836,7 @@ mod tests { fees: 0, block_data_hash, upward_messages: vec![], + erasure_root: [1; 32].into(), }; let payload = (Statement::Valid(candidate.hash()), System::parent_hash()).encode(); let roster = Parachains::calculate_duty_roster().0.validator_duty; diff --git a/service/src/lib.rs b/service/src/lib.rs index 4d1d8ed6addd..6deca8b1fcc6 100644 --- a/service/src/lib.rs +++ b/service/src/lib.rs @@ -224,7 +224,7 @@ pub fn new_full(config: Configuration) } }; - let gossip_validator = network_gossip::register_validator( + let mut gossip_validator = network_gossip::register_validator( service.network(), (is_known, client.clone()), ); @@ -239,7 +239,9 @@ pub fn new_full(config: Configuration) av_store::Store::new(::av_store::Config { cache_size: None, path, - })? + }, + polkadot_network::AvailabilityNetworkShim(service.network()), + )? }; { @@ -249,6 +251,11 @@ pub fn new_full(config: Configuration) ); } + { + let availability_store = availability_store.clone(); + gossip_validator.register_availability_store(availability_store); + } + // collator connections and validation network both fulfilled by this let validation_network = ValidationNetwork::new( service.network(), @@ -265,7 +272,7 @@ pub fn new_full(config: Configuration) service.transaction_pool(), Arc::new(service.spawn_task_handle()), service.keystore(), - availability_store, + availability_store.clone(), polkadot_runtime::constants::time::SLOT_DURATION, max_block_data_size, ); @@ -275,6 +282,14 @@ pub fn new_full(config: Configuration) let can_author_with = consensus_common::CanAuthorWithNativeVersion::new(client.executor().clone()); + + let block_import = availability_store.block_import( + block_import, + client.clone(), + Arc::new(service.spawn_task_handle()), + service.keystore(), + )?; + let babe_config = babe::BabeParams { keystore: service.keystore(), client, diff --git a/validation/Cargo.toml b/validation/Cargo.toml index a1ce329464a7..490dea99a8f7 100644 --- a/validation/Cargo.toml +++ b/validation/Cargo.toml @@ -19,6 +19,7 @@ codec = { package = "parity-scale-codec", version = "1.1.0", default-features = availability_store = { package = "polkadot-availability-store", path = "../availability-store" } parachain = { package = "polkadot-parachain", path = "../parachain" } polkadot-primitives = { path = "../primitives" } +polkadot-erasure-coding = { path = "../erasure-coding" } polkadot-runtime = { path = "../runtime" } table = { package = "polkadot-statement-table", path = "../statement-table" } grandpa = { package = "sc-finality-grandpa", git = "https://github.com/paritytech/substrate", branch = "polkadot-master" } diff --git a/validation/src/attestation_service.rs b/validation/src/attestation_service.rs index 1521568a4393..f09307b7e029 100644 --- a/validation/src/attestation_service.rs +++ b/validation/src/attestation_service.rs @@ -26,15 +26,14 @@ use std::{thread, time::{Duration, Instant}, sync::Arc}; use client::{BlockchainEvents, BlockBody}; -use sp_blockchain::{HeaderBackend, Result as ClientResult}; +use sp_blockchain::HeaderBackend; use block_builder::BlockBuilderApi; use consensus::SelectChain; -use availability_store::Store as AvailabilityStore; use futures::prelude::*; use futures03::{TryStreamExt as _, StreamExt as _}; use log::error; -use polkadot_primitives::{Block, BlockId}; -use polkadot_primitives::parachain::{CandidateReceipt, ParachainHost}; +use polkadot_primitives::Block; +use polkadot_primitives::parachain::ParachainHost; use runtime_primitives::traits::{ProvideRuntimeApi}; use babe_primitives::BabeApi; use keystore::KeyStorePtr; @@ -47,62 +46,6 @@ use super::{Network, Collators}; type TaskExecutor = Arc + Send>> + Send + Sync>; -/// Gets a list of the candidates in a block. -pub(crate) fn fetch_candidates>(client: &P, block: &BlockId) - -> ClientResult>> -{ - use codec::{Encode, Decode}; - use polkadot_runtime::{Call, ParachainsCall, UncheckedExtrinsic as RuntimeExtrinsic}; - - let extrinsics = client.block_body(block)?; - Ok(match extrinsics { - Some(extrinsics) => extrinsics - .into_iter() - .filter_map(|ex| RuntimeExtrinsic::decode(&mut ex.encode().as_slice()).ok()) - .filter_map(|ex| match ex.function { - Call::Parachains(ParachainsCall::set_heads(heads)) => { - Some(heads.into_iter().map(|c| c.candidate)) - } - _ => None, - }) - .next(), - None => None, - }) -} - -// creates a task to prune redundant entries in availability store upon block finalization -// -// NOTE: this will need to be changed to finality notification rather than -// block import notifications when the consensus switches to non-instant finality. -fn prune_unneeded_availability

(client: Arc

, availability_store: AvailabilityStore) - -> impl Future + Send - where P: Send + Sync + BlockchainEvents + BlockBody + 'static -{ - client.finality_notification_stream() - .map(|v| Ok::<_, ()>(v)).compat() - .for_each(move |notification| { - let hash = notification.hash; - let parent_hash = notification.header.parent_hash; - let candidate_hashes = match fetch_candidates(&*client, &BlockId::hash(hash)) { - Ok(Some(candidates)) => candidates.map(|c| c.hash()).collect(), - Ok(None) => { - warn!("Could not extract candidates from block body of imported block {:?}", hash); - return Ok(()) - } - Err(e) => { - warn!("Failed to fetch block body for imported block {:?}: {:?}", hash, e); - return Ok(()) - } - }; - - if let Err(e) = availability_store.candidates_finalized(parent_hash, candidate_hashes) { - warn!(target: "validation", "Failed to prune unneeded available data: {:?}", e); - } - - Ok(()) - }) -} - /// Parachain candidate attestation service handle. pub(crate) struct ServiceHandle { thread: Option>, @@ -116,7 +59,6 @@ pub(crate) fn start( parachain_validation: Arc>, thread_pool: TaskExecutor, keystore: KeyStorePtr, - availability_store: AvailabilityStore, max_block_data_size: Option, ) -> ServiceHandle where @@ -197,15 +139,6 @@ pub(crate) fn start( error!("Failed to spawn old sessions pruning task"); } - let prune_available = prune_unneeded_availability(client, availability_store) - .select(exit.clone()) - .then(|_| Ok(())); - - // spawn this on the tokio executor since it's fine on a thread pool. - if let Err(_) = thread_pool.execute(Box::new(prune_available)) { - error!("Failed to spawn available pruning task"); - } - if let Err(e) = runtime.block_on(exit) { debug!("BFT event loop error {:?}", e); } diff --git a/validation/src/collation.rs b/validation/src/collation.rs index 4eebd0b3f833..e44e140c5c56 100644 --- a/validation/src/collation.rs +++ b/validation/src/collation.rs @@ -21,10 +21,12 @@ use std::sync::Arc; -use polkadot_primitives::{Block, Hash, BlockId, Balance, parachain::{ - CollatorId, ConsolidatedIngress, StructuredUnroutedIngress, CandidateReceipt, ParachainHost, - Id as ParaId, Collation, TargetedMessage, OutgoingMessages, UpwardMessage, FeeSchedule, +use polkadot_primitives::{BlakeTwo256, Block, Hash, HashT, BlockId, Balance, parachain::{ + CollatorId, ConsolidatedIngress, StructuredUnroutedIngress, CandidateReceipt, CollationInfo, ParachainHost, + Id as ParaId, Collation, TargetedMessage, OutgoingMessages, UpwardMessage, FeeSchedule, ErasureChunk, + HeadData, PoVBlock, }}; +use polkadot_erasure_coding::{self as erasure}; use runtime_primitives::traits::ProvideRuntimeApi; use parachain::{wasm_executor::{self, ExternalitiesError, ExecutionMode}, MessageRef, UpwardMessageRef}; use trie::TrieConfiguration; @@ -100,10 +102,10 @@ impl CollationFetch { impl Future for CollationFetch where P::Api: ParachainHost, { - type Item = (Collation, OutgoingMessages); + type Item = (Collation, OutgoingMessages, Balance); type Error = C::Error; - fn poll(&mut self) -> Poll<(Collation, OutgoingMessages), C::Error> { + fn poll(&mut self) -> Poll<(Collation, OutgoingMessages, Balance), C::Error> { loop { let collation = { let parachain = self.parachain.clone(); @@ -123,15 +125,15 @@ impl Future for CollationFetch ); match res { - Ok(e) => { - return Ok(Async::Ready((collation, e))) + Ok((messages, fees)) => { + return Ok(Async::Ready((collation, messages, fees))) } Err(e) => { debug!("Failed to validate parachain due to API error: {}", e); // just continue if we got a bad collation or failed to validate self.live_fetch = None; - self.collators.note_bad_collator(collation.receipt.collator) + self.collators.note_bad_collator(collation.info.collator) } } } @@ -145,6 +147,8 @@ pub enum Error { Client(sp_blockchain::Error), /// Wasm validation error WasmValidation(wasm_executor::Error), + /// Erasure-encoding error. + Erasure(erasure::Error), /// Collated for inactive parachain #[display(fmt = "Collated for inactive parachain: {:?}", _0)] InactiveParachain(ParaId), @@ -175,6 +179,13 @@ pub enum Error { /// Parachain validation produced wrong fees to charge to parachain. #[display(fmt = "Parachain validation produced wrong relay-chain fees (expected: {:?}, got {:?})", expected, got)] FeesChargedInvalid { expected: Balance, got: Balance }, + /// Candidate block has an erasure-encoded root that mismatches the actual + /// erasure-encoded root of block data and extrinsics. + #[display(fmt = "Got unexpected erasure root (expected: {:?}, got {:?})", expected, got)] + ErasureRootMismatch { expected: Hash, got: Hash }, + /// Candidate block collation info doesn't match candidate receipt. + #[display(fmt = "Got receipt mismatch for candidate {:?}", candidate)] + CandidateReceiptMismatch { candidate: Hash }, } impl std::error::Error for Error { @@ -325,27 +336,51 @@ impl Externalities { // Performs final checks of validity, producing the outgoing message data. fn final_checks( self, - candidate: &CandidateReceipt, - ) -> Result { - if &self.upward != &candidate.upward_messages { + upward_messages: &[UpwardMessage], + egress_queue_roots: &[(ParaId, Hash)], + fees_charged: Option, + ) -> Result<(OutgoingMessages, Balance), Error> { + if self.upward != upward_messages { return Err(Error::UpwardMessagesInvalid { - expected: candidate.upward_messages.clone(), + expected: upward_messages.to_vec(), got: self.upward.clone(), }); } - if self.fees_charged != candidate.fees { - return Err(Error::FeesChargedInvalid { - expected: candidate.fees.clone(), - got: self.fees_charged.clone(), - }); + if let Some(fees_charged) = fees_charged { + if self.fees_charged != fees_charged { + return Err(Error::FeesChargedInvalid { + expected: fees_charged.clone(), + got: self.fees_charged.clone(), + }); + } } - check_egress( + let messages = check_egress( self.outgoing, - &candidate.egress_queue_roots[..], - ) + &egress_queue_roots[..], + )?; + + Ok((messages, self.fees_charged)) + } +} + +/// Validate an erasure chunk against an expected root. +pub fn validate_chunk( + root: &Hash, + chunk: &ErasureChunk, +) -> Result<(), Error> { + let expected = erasure::branch_hash(root, &chunk.proof, chunk.index as usize)?; + let got = BlakeTwo256::hash(&chunk.chunk); + + if expected != got { + return Err(Error::ErasureRootMismatch { + expected, + got, + }) } + + Ok(()) } /// Validate incoming messages against expected roots. @@ -382,30 +417,34 @@ pub fn validate_incoming( Ok(()) } -/// Check whether a given collation is valid. Returns `Ok` on success, error otherwise. -/// -/// This assumes that basic validity checks have been done: -/// - Block data hash is the same as linked in candidate receipt. -pub fn validate_collation

( +// A utility function that implements most of the collation validation logic. +// +// Reused by `validate_collation` and `validate_receipt`. +// Returns outgoing messages and fees charged for later reuse. +fn do_validation

( client: &P, relay_parent: &BlockId, - collation: &Collation, + pov_block: &PoVBlock, + para_id: ParaId, max_block_data_size: Option, -) -> Result where + fees_charged: Option, + head_data: &HeadData, + queue_roots: &Vec<(ParaId, Hash)>, + upward_messages: &Vec, +) -> Result<(OutgoingMessages, Balance), Error> where P: ProvideRuntimeApi, P::Api: ParachainHost, { use parachain::{IncomingMessage, ValidationParams}; if let Some(max_size) = max_block_data_size { - let block_data_size = collation.pov.block_data.0.len() as u64; + let block_data_size = pov_block.block_data.0.len() as u64; if block_data_size > max_size { return Err(Error::BlockDataTooBig { size: block_data_size, max_size }); } } let api = client.runtime_api(); - let para_id = collation.receipt.parachain_index; let validation_code = api.parachain_code(relay_parent, para_id)? .ok_or_else(|| Error::InactiveParachain(para_id))?; @@ -415,12 +454,12 @@ pub fn validate_collation

( let roots = api.ingress(relay_parent, para_id, None)? .ok_or_else(|| Error::InactiveParachain(para_id))?; - validate_incoming(&roots, &collation.pov.ingress)?; + validate_incoming(&roots, &pov_block.ingress)?; let params = ValidationParams { parent_head: chain_status.head_data.0, - block_data: collation.pov.block_data.0.clone(), - ingress: collation.pov.ingress.0.iter() + block_data: pov_block.block_data.0.clone(), + ingress: pov_block.ingress.0.iter() .flat_map(|&(source, ref messages)| { messages.iter().map(move |msg| IncomingMessage { source, @@ -431,7 +470,7 @@ pub fn validate_collation

( }; let mut ext = Externalities { - parachain_index: collation.receipt.parachain_index.clone(), + parachain_index: para_id.clone(), outgoing: Vec::new(), upward: Vec::new(), free_balance: chain_status.balance, @@ -441,11 +480,17 @@ pub fn validate_collation

( match wasm_executor::validate_candidate(&validation_code, params, &mut ext, ExecutionMode::Remote) { Ok(result) => { - if result.head_data == collation.receipt.head_data.0 { - ext.final_checks(&collation.receipt) + if result.head_data == head_data.0 { + let (messages, fees) = ext.final_checks( + upward_messages, + queue_roots, + fees_charged + )?; + + Ok((messages, fees)) } else { Err(Error::WrongHeadData { - expected: collation.receipt.head_data.0.clone(), + expected: head_data.0.clone(), got: result.head_data }) } @@ -454,6 +499,132 @@ pub fn validate_collation

( } } +/// Produce a `CandidateReceipt` and erasure encoding chunks with a given collation. +/// +/// To produce a `CandidateReceipt` among other things the root of erasure encoding of +/// the block data and messages needs to be known. To avoid redundant re-computations +/// of erasure encoding this method creates an encoding and produces a candidate with +/// encoding's root returning both for re-use. +pub fn produce_receipt_and_chunks( + n_validators: usize, + pov: &PoVBlock, + messages: &OutgoingMessages, + fees: Balance, + info: &CollationInfo, +) -> Result<(CandidateReceipt, Vec), Error> +{ + let erasure_chunks = erasure::obtain_chunks( + n_validators, + &pov.block_data, + Some(&messages.clone().into()) + )?; + + let branches = erasure::branches(erasure_chunks.as_ref()); + let erasure_root = branches.root(); + + let chunks: Vec<_> = erasure_chunks + .iter() + .zip(branches.map(|(proof, _)| proof)) + .enumerate() + .map(|(index, (chunk, proof))| ErasureChunk { + // branches borrows the original chunks, but this clone could probably be dodged. + chunk: chunk.clone(), + index: index as u32, + proof, + }) + .collect(); + + let receipt = CandidateReceipt { + parachain_index: info.parachain_index, + collator: info.collator.clone(), + signature: info.signature.clone(), + head_data: info.head_data.clone(), + egress_queue_roots: info.egress_queue_roots.clone(), + fees, + block_data_hash: info.block_data_hash.clone(), + upward_messages: info.upward_messages.clone(), + erasure_root, + }; + + Ok((receipt, chunks)) +} + +/// Check if a given candidate receipt is valid with a given collation. +/// +/// This assumes that basic validity checks have been done: +/// - Block data hash is the same as linked in collation info and a receipt. +pub fn validate_receipt

( + client: &P, + relay_parent: &BlockId, + pov_block: &PoVBlock, + receipt: &CandidateReceipt, + max_block_data_size: Option, +) -> Result<(OutgoingMessages, Vec), Error> where + P: ProvideRuntimeApi, + P::Api: ParachainHost, +{ + let (messages, _fees) = do_validation( + client, + relay_parent, + pov_block, + receipt.parachain_index, + max_block_data_size, + Some(receipt.fees), + &receipt.head_data, + &receipt.egress_queue_roots, + &receipt.upward_messages, + )?; + + let api = client.runtime_api(); + let validators = api.validators(&relay_parent)?; + let n_validators = validators.len(); + + let (validated_receipt, chunks) = produce_receipt_and_chunks( + n_validators, + pov_block, + &messages, + receipt.fees, + &receipt.clone().into(), + )?; + + if validated_receipt.erasure_root != receipt.erasure_root { + return Err(Error::ErasureRootMismatch { + expected: validated_receipt.erasure_root, + got: receipt.erasure_root, + }); + } + + Ok((messages, chunks)) +} + +/// Check whether a given collation is valid. Returns `Ok` on success, error otherwise. +/// +/// This assumes that basic validity checks have been done: +/// - Block data hash is the same as linked in collation info. +pub fn validate_collation

( + client: &P, + relay_parent: &BlockId, + collation: &Collation, + max_block_data_size: Option, +) -> Result<(OutgoingMessages, Balance), Error> where + P: ProvideRuntimeApi, + P::Api: ParachainHost, +{ + let para_id = collation.info.parachain_index; + + do_validation( + client, + relay_parent, + &collation.pov, + para_id, + max_block_data_size, + None, + &collation.info.head_data, + &collation.info.egress_queue_roots, + &collation.info.upward_messages, + ) +} + #[cfg(test)] mod tests { use super::*; @@ -550,8 +721,13 @@ mod tests { UpwardMessage{ data: vec![42], origin: ParachainDispatchOrigin::Signed }, UpwardMessage{ data: vec![69], origin: ParachainDispatchOrigin::Parachain }, ], + erasure_root: [1u8; 32].into(), }; - assert!(ext().final_checks(&receipt).is_err()); + assert!(ext().final_checks( + &receipt.upward_messages, + &receipt.egress_queue_roots, + Some(receipt.fees), + ).is_err()); let receipt = CandidateReceipt { parachain_index: 5.into(), collator: Default::default(), @@ -563,8 +739,13 @@ mod tests { upward_messages: vec![ UpwardMessage{ data: vec![42], origin: ParachainDispatchOrigin::Signed }, ], + erasure_root: [1u8; 32].into(), }; - assert!(ext().final_checks(&receipt).is_err()); + assert!(ext().final_checks( + &receipt.upward_messages, + &receipt.egress_queue_roots, + Some(receipt.fees), + ).is_err()); let receipt = CandidateReceipt { parachain_index: 5.into(), collator: Default::default(), @@ -576,8 +757,13 @@ mod tests { upward_messages: vec![ UpwardMessage{ data: vec![69], origin: ParachainDispatchOrigin::Parachain }, ], + erasure_root: [1u8; 32].into(), }; - assert!(ext().final_checks(&receipt).is_err()); + assert!(ext().final_checks( + &receipt.upward_messages, + &receipt.egress_queue_roots, + Some(receipt.fees), + ).is_err()); let receipt = CandidateReceipt { parachain_index: 5.into(), collator: Default::default(), @@ -589,8 +775,13 @@ mod tests { upward_messages: vec![ UpwardMessage{ data: vec![42], origin: ParachainDispatchOrigin::Parachain }, ], + erasure_root: [1u8; 32].into(), }; - assert!(ext().final_checks(&receipt).is_ok()); + assert!(ext().final_checks( + &receipt.upward_messages, + &receipt.egress_queue_roots, + Some(receipt.fees), + ).is_ok()); } #[test] diff --git a/validation/src/lib.rs b/validation/src/lib.rs index d44e6ac767dc..61291c2adabe 100644 --- a/validation/src/lib.rs +++ b/validation/src/lib.rs @@ -48,9 +48,10 @@ use availability_store::Store as AvailabilityStore; use parking_lot::Mutex; use polkadot_primitives::{Hash, Block, BlockId, BlockNumber, Header}; use polkadot_primitives::parachain::{ - Id as ParaId, Chain, DutyRoster, OutgoingMessages, CandidateReceipt, - ParachainHost, AttestedCandidate, Statement as PrimitiveStatement, Message, - Collation, PoVBlock, ValidatorSignature, ValidatorPair, ValidatorId + Id as ParaId, Chain, DutyRoster, CandidateReceipt, + ParachainHost, AttestedCandidate, Statement as PrimitiveStatement, Message, OutgoingMessages, + Collation, PoVBlock, ErasureChunk, ValidatorSignature, ValidatorIndex, + ValidatorPair, ValidatorId, }; use primitives::Pair; use runtime_primitives::traits::{ProvideRuntimeApi, DigestFor}; @@ -60,7 +61,7 @@ use txpool_api::{TransactionPool, InPoolTransaction}; use attestation_service::ServiceHandle; use futures::prelude::*; -use futures03::{future::{self, Either}, FutureExt, StreamExt}; +use futures03::{future::{self, Either}, FutureExt, StreamExt, TryFutureExt}; use collation::CollationFetch; use dynamic_inclusion::DynamicInclusion; use inherents::InherentData; @@ -69,10 +70,14 @@ use log::{info, debug, warn, trace, error}; use keystore::KeyStorePtr; use sp_api::ApiExt; -type TaskExecutor = Arc + Send>> + Send + Sync>; +type TaskExecutor = + Arc< + dyn futures::future::Executor + Send>> + + Send + Sync>; pub use self::collation::{ validate_collation, validate_incoming, message_queue_root, egress_roots, Collators, + produce_receipt_and_chunks, }; pub use self::error::Error; pub use self::shared_table::{ @@ -106,7 +111,13 @@ pub trait TableRouter: Clone { /// Call with local candidate data. This will make the data available on the network, /// and sign, import, and broadcast a statement about the candidate. - fn local_collation(&self, collation: Collation, outgoing: OutgoingMessages); + fn local_collation( + &self, + collation: Collation, + receipt: CandidateReceipt, + outgoing: OutgoingMessages, + chunks: (ValidatorIndex, &[ErasureChunk]) + ); /// Fetch validation proof for a specific candidate. fn fetch_pov_block(&self, candidate: &CandidateReceipt) -> Self::FetchValidationProof; @@ -158,7 +169,12 @@ pub fn sign_table_statement(statement: &Statement, key: &ValidatorPair, parent_h } /// Check signature on table statement. -pub fn check_statement(statement: &Statement, signature: &ValidatorSignature, signer: ValidatorId, parent_hash: &Hash) -> bool { +pub fn check_statement( + statement: &Statement, + signature: &ValidatorSignature, + signer: ValidatorId, + parent_hash: &Hash +) -> bool { use runtime_primitives::traits::AppVerify; let mut encoded = PrimitiveStatement::from(statement.clone()).encode(); @@ -181,12 +197,14 @@ pub fn make_group_info( } let mut local_validation = None; + let mut local_index = 0; let mut map = HashMap::new(); let duty_iter = authorities.iter().zip(&roster.validator_duty); - for (authority, v_duty) in duty_iter { + for (i, (authority, v_duty)) in duty_iter.enumerate() { if Some(authority) == local_id.as_ref() { local_validation = Some(v_duty.clone()); + local_index = i; } match *v_duty { @@ -206,7 +224,8 @@ pub fn make_group_info( let local_duty = local_validation.map(|v| LocalDuty { - validation: v + validation: v, + index: local_index as u32, }); Ok((map, local_duty)) @@ -305,6 +324,21 @@ impl ParachainValidation where debug!(target: "validation", "Active parachains: {:?}", active_parachains); + // If we are a validator, we need to store our index in this round in availability store. + // This will tell which erasure chunk we should store. + if let Some(ref local_duty) = local_duty { + if let Err(e) = self.availability_store.add_validator_index_and_n_validators( + &parent_hash, + local_duty.index, + validators.len() as u32, + ) { + warn!( + target: "validation", + "Failed to add validator index and n_validators to the availability-store: {:?}", e + ) + } + } + let table = Arc::new(SharedTable::new( validators.clone(), group_info, @@ -322,8 +356,8 @@ impl ParachainValidation where exit.clone(), ); - if let Some(Chain::Parachain(id)) = local_duty.as_ref().map(|d| d.validation) { - self.launch_work(parent_hash, id, router, max_block_data_size, exit); + if let Some((Chain::Parachain(id), index)) = local_duty.as_ref().map(|d| (d.validation, d.index)) { + self.launch_work(parent_hash, id, router, max_block_data_size, validators.len(), index, exit); } let tracker = Arc::new(AttestationTracker { @@ -349,10 +383,10 @@ impl ParachainValidation where validation_para: ParaId, build_router: N::BuildTableRouter, max_block_data_size: Option, + authorities_num: usize, + local_id: ValidatorIndex, exit: exit_future::Exit, ) { - use availability_store::Data; - let (collators, client) = (self.collators.clone(), self.client.clone()); let availability_store = self.availability_store.clone(); @@ -362,42 +396,55 @@ impl ParachainValidation where validation_para, relay_parent, collators, - client, + client.clone(), max_block_data_size, ); collation_work.then(move |result| match result { - Ok((collation, outgoing_targeted)) => { - let outgoing_queues = crate::outgoing_queues(&outgoing_targeted) - .map(|(_target, root, data)| (root, data)) - .collect(); - - let res = availability_store.make_available(Data { - relay_parent, - parachain_id: collation.receipt.parachain_index, - candidate_hash: collation.receipt.hash(), - block_data: collation.pov.block_data.clone(), - outgoing_queues: Some(outgoing_queues), - }); - - match res { - Ok(()) => { - // TODO: https://github.com/paritytech/polkadot/issues/51 - // Erasure-code and provide merkle branches. - router.local_collation(collation, outgoing_targeted); + Ok((collation, outgoing_targeted, fees_charged)) => { + match produce_receipt_and_chunks( + authorities_num, + &collation.pov, + &outgoing_targeted, + fees_charged, + &collation.info, + ) { + Ok((receipt, chunks)) => { + // Apparently the `async move` block is the only way to convince + // the compiler that we are not moving values out of borrowed context. + let av_clone = availability_store.clone(); + let chunks_clone = chunks.clone(); + let receipt_clone = receipt.clone(); + + let res = async move { + if let Err(e) = av_clone.clone().add_erasure_chunks( + relay_parent.clone(), + receipt_clone, + chunks_clone, + ).await { + warn!(target: "validation", "Failed to add erasure chunks: {}", e); + } + } + .unit_error() + .boxed() + .compat() + .then(move |_| { + router.local_collation(collation, receipt, outgoing_targeted, (local_id, &chunks)); + Ok(()) + }); + + + Some(res) + } + Err(e) => { + warn!(target: "validation", "Failed to produce a receipt: {:?}", e); + None } - Err(e) => warn!( - target: "validation", - "Failed to make collation data available: {:?}", - e, - ), } - - Ok(()) } Err(e) => { warn!(target: "validation", "Failed to collate candidate: {:?}", e); - Ok(()) + None } }) }; @@ -408,6 +455,7 @@ impl ParachainValidation where warn!(target: "validation" , "Failed to build table router: {:?}", e); }) .and_then(with_router) + .then(|_| Ok(())) .select(exit) .then(|_| Ok(())); @@ -479,7 +527,6 @@ impl ProposerFactory where parachain_validation.clone(), thread_pool, keystore.clone(), - availability_store, max_block_data_size, ); @@ -541,6 +588,7 @@ impl consensus::Environment for ProposerFactory { work: Work, relay_parent: Hash, + local_index: usize, availability_store: AvailabilityStore, max_block_data_size: Option, } @@ -272,23 +273,28 @@ impl ParachainWork { pub fn prime(self, api: Arc

) -> PrimedParachainWork< Fetch, - impl Send + FnMut(&BlockId, &Collation) -> Result, + impl Send + FnMut(&BlockId, &PoVBlock, &CandidateReceipt) -> Result<(OutgoingMessages, ErasureChunk), ()>, > where P: Send + Sync + 'static, P::Api: ParachainHost, { let max_block_data_size = self.max_block_data_size; - let validate = move |id: &_, collation: &_| { - let res = crate::collation::validate_collation( + let local_index = self.local_index; + + let validate = move |id: &_, pov_block: &_, receipt: &_| { + let res = crate::collation::validate_receipt( &*api, id, - collation, + pov_block, + receipt, max_block_data_size, ); match res { - Ok(e) => Ok(e), + Ok((messages, mut chunks)) => { + Ok((messages, chunks.swap_remove(local_index))) + } Err(e) => { debug!(target: "validation", "Encountered bad collation: {}", e); Err(()) @@ -301,7 +307,7 @@ impl ParachainWork { /// Prime the parachain work with a custom validation function. pub fn prime_with(self, validate: F) -> PrimedParachainWork - where F: FnMut(&BlockId, &Collation) -> Result + where F: FnMut(&BlockId, &PoVBlock, &CandidateReceipt) -> Result<(OutgoingMessages, ErasureChunk), ()> { PrimedParachainWork { inner: self, validate } } @@ -318,23 +324,21 @@ pub struct PrimedParachainWork { validate: F, } -impl Future for PrimedParachainWork +impl PrimedParachainWork where Fetch: Future, - F: FnMut(&BlockId, &Collation) -> Result, + F: FnMut(&BlockId, &PoVBlock, &CandidateReceipt) -> Result<(OutgoingMessages, ErasureChunk), ()>, Err: From<::std::io::Error>, { - type Item = Validated; - type Error = Err; - - fn poll(&mut self) -> Poll { - let work = &mut self.inner.work; - let candidate = &work.candidate_receipt; + pub async fn validate(mut self) -> Result<(Validated, Option), Err> { + use futures03::compat::Future01CompatExt; + let candidate = &self.inner.work.candidate_receipt; + let pov_block = self.inner.work.fetch.compat().await?; - let pov_block = futures::try_ready!(work.fetch.poll()); let validation_res = (self.validate)( &BlockId::hash(self.inner.relay_parent), - &Collation { pov: pov_block.clone(), receipt: candidate.clone() }, + &pov_block, + &candidate, ); let candidate_hash = candidate.hash(); @@ -342,35 +346,30 @@ impl Future for PrimedParachainWork debug!(target: "validation", "Making validity statement about candidate {}: is_good? {:?}", candidate_hash, validation_res.is_ok()); - let (validity_statement, result) = match validation_res { - Err(()) => ( - GenericStatement::Invalid(candidate_hash), - Validation::Invalid(pov_block), - ), - Ok(outgoing_targeted) => { - let outgoing_queues = crate::outgoing_queues(&outgoing_targeted) - .map(|(_target, root, data)| (root, data)) - .collect(); - - self.inner.availability_store.make_available(Data { - relay_parent: self.inner.relay_parent, - parachain_id: work.candidate_receipt.parachain_index, - candidate_hash, - block_data: pov_block.block_data.clone(), - outgoing_queues: Some(outgoing_queues), - })?; - - ( - GenericStatement::Valid(candidate_hash), - Validation::Valid(pov_block, outgoing_targeted) - ) + match validation_res { + Err(()) => Ok(( + Validated { + statement: GenericStatement::Invalid(candidate_hash), + result: Validation::Invalid(pov_block), + }, + None, + )), + Ok((outgoing_targeted, our_chunk)) => { + self.inner.availability_store.add_erasure_chunk( + self.inner.relay_parent, + candidate.clone(), + our_chunk.clone(), + ).await?; + + Ok(( + Validated { + statement: GenericStatement::Valid(candidate_hash), + result: Validation::Valid(pov_block, outgoing_targeted), + }, + Some(our_chunk), + )) } - }; - - Ok(Async::Ready(Validated { - statement: validity_statement, - result, - })) + } } } @@ -573,8 +572,11 @@ mod tests { use super::*; use sp_keyring::Sr25519Keyring; use primitives::crypto::UncheckedInto; - use polkadot_primitives::parachain::{BlockData, ConsolidatedIngress}; - use futures::future; + use polkadot_primitives::parachain::{AvailableMessages, BlockData, ConsolidatedIngress, Collation}; + use polkadot_erasure_coding::{self as erasure}; + use availability_store::ProvideGossipMessages; + + use futures::{future}; fn pov_block_with_data(data: Vec) -> PoVBlock { PoVBlock { @@ -583,14 +585,39 @@ mod tests { } } + #[derive(Clone)] + struct DummyGossipMessages; + + impl ProvideGossipMessages for DummyGossipMessages { + fn gossip_messages_for( + &self, + _topic: Hash + ) -> Box + Unpin + Send> { + Box::new(futures03::stream::empty()) + } + + fn gossip_erasure_chunk( + &self, + _relay_parent: Hash, + _candidate_hash: Hash, + _erasure_root: Hash, + _chunk: ErasureChunk, + ) {} + } + #[derive(Clone)] struct DummyRouter; impl TableRouter for DummyRouter { type Error = ::std::io::Error; type FetchValidationProof = future::FutureResult; - fn local_collation(&self, _collation: Collation, _outgoing: OutgoingMessages) { - } + fn local_collation( + &self, + _collation: Collation, + _candidate: CandidateReceipt, + _outgoing: OutgoingMessages, + _chunks: (ValidatorIndex, &[ErasureChunk]) + ) {} fn fetch_pov_block(&self, _candidate: &CandidateReceipt) -> Self::FetchValidationProof { future::ok(pov_block_with_data(vec![1, 2, 3, 4, 5])) @@ -622,7 +649,7 @@ mod tests { groups, Some(local_key.clone()), parent_hash, - AvailabilityStore::new_in_memory(), + AvailabilityStore::new_in_memory(DummyGossipMessages), None, ); @@ -635,6 +662,7 @@ mod tests { fees: 1_000_000, block_data_hash: [2; 32].into(), upward_messages: Vec::new(), + erasure_root: [1u8; 32].into(), }; let candidate_statement = GenericStatement::Candidate(candidate); @@ -677,7 +705,7 @@ mod tests { groups, Some(local_key.clone()), parent_hash, - AvailabilityStore::new_in_memory(), + AvailabilityStore::new_in_memory(DummyGossipMessages), None, ); @@ -690,6 +718,7 @@ mod tests { fees: 1_000_000, block_data_hash: [2; 32].into(), upward_messages: Vec::new(), + erasure_root: [1u8; 32].into(), }; let candidate_statement = GenericStatement::Candidate(candidate); @@ -709,10 +738,13 @@ mod tests { #[test] fn evaluate_makes_block_data_available() { - let store = AvailabilityStore::new_in_memory(); + let store = AvailabilityStore::new_in_memory(DummyGossipMessages); let relay_parent = [0; 32].into(); let para_id = 5.into(); let pov_block = pov_block_with_data(vec![1, 2, 3]); + let block_data_hash = [2; 32].into(); + let local_index = 0; + let n_validators = 2; let candidate = CandidateReceipt { parachain_index: para_id, @@ -721,39 +753,62 @@ mod tests { head_data: ::polkadot_primitives::parachain::HeadData(vec![1, 2, 3, 4]), egress_queue_roots: Vec::new(), fees: 1_000_000, - block_data_hash: [2; 32].into(), + block_data_hash, upward_messages: Vec::new(), + erasure_root: [1u8; 32].into(), }; let hash = candidate.hash(); + store.add_validator_index_and_n_validators( + &relay_parent, + local_index as u32, + n_validators as u32, + ).unwrap(); + let producer: ParachainWork> = ParachainWork { work: Work { candidate_receipt: candidate, fetch: future::ok(pov_block.clone()), }, + local_index, relay_parent, availability_store: store.clone(), max_block_data_size: None, }; - let validated = producer.prime_with(|_, _| Ok(OutgoingMessages { outgoing_messages: Vec::new() })) - .wait() - .unwrap(); - - assert_eq!(validated.pov_block(), &pov_block); - assert_eq!(validated.statement, GenericStatement::Valid(hash)); - - assert_eq!(store.block_data(relay_parent, hash).unwrap(), pov_block.block_data); - // TODO: check that a message queue is included by root. + let validated = futures03::executor::block_on(producer.prime_with(|_, _, _| Ok(( + OutgoingMessages { outgoing_messages: Vec::new() }, + ErasureChunk { + chunk: vec![1, 2, 3], + index: local_index as u32, + proof: vec![], + }, + ))).validate()).unwrap(); + + assert_eq!(validated.0.pov_block(), &pov_block); + assert_eq!(validated.0.statement, GenericStatement::Valid(hash)); + + if let Some(messages) = validated.0.outgoing_messages() { + let available_messages: AvailableMessages = messages.clone().into(); + for (root, queue) in available_messages.0 { + assert_eq!(store.queue_by_root(&root), Some(queue)); + } + } + assert!(store.get_erasure_chunk(&relay_parent, block_data_hash, local_index).is_some()); + assert!(store.get_erasure_chunk(&relay_parent, block_data_hash, local_index + 1).is_none()); } #[test] fn full_availability() { - let store = AvailabilityStore::new_in_memory(); + let store = AvailabilityStore::new_in_memory(DummyGossipMessages); let relay_parent = [0; 32].into(); let para_id = 5.into(); let pov_block = pov_block_with_data(vec![1, 2, 3]); + let block_data_hash = pov_block.block_data.hash(); + let local_index = 0; + let n_validators = 2; + let ex = Some(AvailableMessages(Vec::new())); let candidate = CandidateReceipt { parachain_index: para_id, @@ -764,27 +819,48 @@ mod tests { fees: 1_000_000, block_data_hash: [2; 32].into(), upward_messages: Vec::new(), + erasure_root: [1u8; 32].into(), }; - let hash = candidate.hash(); + let chunks = erasure::obtain_chunks(n_validators, &pov_block.block_data, ex.as_ref()).unwrap(); + + store.add_validator_index_and_n_validators( + &relay_parent, + local_index as u32, + n_validators as u32, + ).unwrap(); let producer = ParachainWork { work: Work { candidate_receipt: candidate, fetch: future::ok::<_, ::std::io::Error>(pov_block.clone()), }, + local_index, relay_parent, availability_store: store.clone(), max_block_data_size: None, }; - let validated = producer.prime_with(|_, _| Ok(OutgoingMessages { outgoing_messages: Vec::new() })) - .wait() - .unwrap(); - - assert_eq!(validated.pov_block(), &pov_block); - - assert_eq!(store.block_data(relay_parent, hash).unwrap(), pov_block.block_data); + let validated = futures03::executor::block_on(producer.prime_with(|_, _, _| Ok(( + OutgoingMessages { outgoing_messages: Vec::new() }, + ErasureChunk { + chunk: chunks[local_index].clone(), + index: local_index as u32, + proof: vec![], + }, + ))).validate()).unwrap(); + + assert_eq!(validated.0.pov_block(), &pov_block); + + if let Some(messages) = validated.0.outgoing_messages() { + let available_messages: AvailableMessages = messages.clone().into(); + for (root, queue) in available_messages.0 { + assert_eq!(store.queue_by_root(&root), Some(queue)); + } + } + // This works since there are only two validators and one erasure chunk should be + // enough to reconstruct the block data. + assert_eq!(store.block_data(relay_parent, block_data_hash).unwrap(), pov_block.block_data); // TODO: check that a message queue is included by root. } @@ -813,7 +889,7 @@ mod tests { groups, Some(local_key.clone()), parent_hash, - AvailabilityStore::new_in_memory(), + AvailabilityStore::new_in_memory(DummyGossipMessages), None, ); @@ -826,6 +902,7 @@ mod tests { fees: 1_000_000, block_data_hash: [2; 32].into(), upward_messages: Vec::new(), + erasure_root: [1u8; 32].into(), }; let hash = candidate.hash(); @@ -879,7 +956,7 @@ mod tests { groups, Some(local_key.clone()), parent_hash, - AvailabilityStore::new_in_memory(), + AvailabilityStore::new_in_memory(DummyGossipMessages), None, ); @@ -892,6 +969,7 @@ mod tests { fees: 1_000_000, block_data_hash: [2; 32].into(), upward_messages: Vec::new(), + erasure_root: [1u8; 32].into(), }; let hash = candidate.hash();