(gossip: PGM) -> Self
+ where PGM: ProvideGossipMessages + Send + Sync + Clone + 'static
+ {
+ let inner = InnerStore::new_in_memory();
+ let worker = Arc::new(Worker::start(inner.clone(), gossip));
+ let to_worker = worker.to_worker().clone();
+
+ Self {
+ inner,
+ worker,
+ to_worker,
}
}
+ /// Obtain a [`BlockImport`] implementation to import blocks into this store.
+ ///
+ /// This block import will act upon all newly imported blocks sending information
+ /// about parachain heads included in them to this `Store`'s background worker.
+ /// The user may create multiple instances of [`BlockImport`]s with this call.
+ ///
+ /// [`BlockImport`]: https://substrate.dev/rustdocs/v1.0/substrate_consensus_common/trait.BlockImport.html
+ pub fn block_import(
+ &self,
+ wrapped_block_import: I,
+ client: Arc,
+ thread_pool: TaskExecutor,
+ keystore: KeyStorePtr,
+ ) -> ClientResult<(AvailabilityBlockImport)>
+ where
+ P: ProvideRuntimeApi + BlockchainEvents + BlockBody + Send + Sync + 'static,
+ P::Api: ParachainHost,
+ P::Api: ApiExt,
+ {
+ let to_worker = self.to_worker.clone();
+
+ let import = AvailabilityBlockImport::new(
+ self.inner.clone(),
+ client,
+ wrapped_block_import,
+ thread_pool,
+ keystore,
+ to_worker,
+ );
+
+ Ok(import)
+ }
+
/// Make some data available provisionally.
///
/// Validators with the responsibility of maintaining availability
@@ -117,174 +231,164 @@ impl Store {
/// to be present with the exception of the case where there is no message data
/// due to the block's invalidity. Determination of invalidity is beyond the
/// scope of this function.
- pub fn make_available(&self, data: Data) -> io::Result<()> {
- let mut tx = DBTransaction::new();
-
- // note the meta key.
- let mut v = match self.inner.get(columns::META, data.relay_parent.as_ref()) {
- Ok(Some(raw)) => Vec::decode(&mut &raw[..]).expect("all stored data serialized correctly; qed"),
- Ok(None) => Vec::new(),
- Err(e) => {
- warn!(target: "availability", "Error reading from availability store: {:?}", e);
- Vec::new()
- }
- };
-
- v.push(data.candidate_hash);
- tx.put_vec(columns::META, &data.relay_parent[..], v.encode());
-
- tx.put_vec(
- columns::DATA,
- block_data_key(&data.relay_parent, &data.candidate_hash).as_slice(),
- data.block_data.encode()
- );
-
- if let Some(outgoing_queues) = data.outgoing_queues {
- // This is kept forever and not pruned.
- for (root, messages) in outgoing_queues {
- tx.put_vec(
- columns::DATA,
- root.as_ref(),
- messages.encode(),
- );
- }
-
+ ///
+ /// This method will send the `Data` to the background worker, allowing caller to
+ /// asynchrounously wait for the result.
+ pub async fn make_available(&self, data: Data) -> io::Result<()> {
+ let (s, r) = oneshot::channel();
+ let msg = WorkerMsg::MakeAvailable(MakeAvailable {
+ data,
+ result: s,
+ });
+
+ let _ = self.to_worker.unbounded_send(msg);
+
+ if let Ok(Ok(())) = r.await {
+ Ok(())
+ } else {
+ Err(io::Error::new(io::ErrorKind::Other, format!("adding erasure chunks failed")))
}
- self.inner.write(tx)
}
- /// Note that a set of candidates have been included in a finalized block with given hash and parent hash.
- pub fn candidates_finalized(&self, parent: Hash, finalized_candidates: HashSet) -> io::Result<()> {
- let mut tx = DBTransaction::new();
-
- let v = match self.inner.get(columns::META, &parent[..]) {
- Ok(Some(raw)) => Vec::decode(&mut &raw[..]).expect("all stored data serialized correctly; qed"),
- Ok(None) => Vec::new(),
- Err(e) => {
- warn!(target: "availability", "Error reading from availability store: {:?}", e);
- Vec::new()
- }
- };
- tx.delete(columns::META, &parent[..]);
-
- for candidate_hash in v {
- if !finalized_candidates.contains(&candidate_hash) {
- tx.delete(columns::DATA, block_data_key(&parent, &candidate_hash).as_slice());
- }
- }
+ /// Get a set of all chunks we are waiting for grouped by
+ /// `(relay_parent, erasure_root, candidate_hash, our_id)`.
+ pub fn awaited_chunks(&self) -> Option> {
+ self.inner.awaited_chunks()
+ }
- self.inner.write(tx)
+ /// Qery which candidates were included in the relay chain block by block's parent.
+ pub fn get_candidates_in_relay_block(&self, relay_block: &Hash) -> Option> {
+ self.inner.get_candidates_in_relay_block(relay_block)
}
- /// Query block data.
- pub fn block_data(&self, relay_parent: Hash, candidate_hash: Hash) -> Option {
- let encoded_key = block_data_key(&relay_parent, &candidate_hash);
- match self.inner.get(columns::DATA, &encoded_key[..]) {
- Ok(Some(raw)) => Some(
- BlockData::decode(&mut &raw[..]).expect("all stored data serialized correctly; qed")
- ),
- Ok(None) => None,
- Err(e) => {
- warn!(target: "availability", "Error reading from availability store: {:?}", e);
- None
- }
- }
+ /// Make a validator's index and a number of validators at a relay parent available.
+ ///
+ /// This information is needed before the `add_candidates_in_relay_block` is called
+ /// since that call forms the awaited frontier of chunks.
+ /// In the current implementation this function is called in the `get_or_instantiate` at
+ /// the start of the parachain agreement process on top of some parent hash.
+ pub fn add_validator_index_and_n_validators(
+ &self,
+ relay_parent: &Hash,
+ validator_index: u32,
+ n_validators: u32,
+ ) -> io::Result<()> {
+ self.inner.add_validator_index_and_n_validators(
+ relay_parent,
+ validator_index,
+ n_validators,
+ )
}
- /// Query message queue data by message queue root hash.
- pub fn queue_by_root(&self, queue_root: &Hash) -> Option> {
- match self.inner.get(columns::DATA, queue_root.as_ref()) {
- Ok(Some(raw)) => Some(
- <_>::decode(&mut &raw[..]).expect("all stored data serialized correctly; qed")
- ),
- Ok(None) => None,
- Err(e) => {
- warn!(target: "availability", "Error reading from availability store: {:?}", e);
- None
- }
- }
+ /// Query a validator's index and n_validators by relay parent.
+ pub fn get_validator_index_and_n_validators(&self, relay_parent: &Hash) -> Option<(u32, u32)> {
+ self.inner.get_validator_index_and_n_validators(relay_parent)
}
-}
-#[cfg(test)]
-mod tests {
- use super::*;
+ /// Adds an erasure chunk to storage.
+ ///
+ /// The chunk should be checked for validity against the root of encoding
+ /// and its proof prior to calling this.
+ ///
+ /// This method will send the chunk to the background worker, allowing caller to
+ /// asynchrounously wait for the result.
+ pub async fn add_erasure_chunk(
+ &self,
+ relay_parent: Hash,
+ receipt: CandidateReceipt,
+ chunk: ErasureChunk,
+ ) -> io::Result<()> {
+ self.add_erasure_chunks(relay_parent, receipt, vec![chunk]).await
+ }
- #[test]
- fn finalization_removes_unneeded() {
- let relay_parent = [1; 32].into();
+ /// Adds a set of erasure chunks to storage.
+ ///
+ /// The chunks should be checked for validity against the root of encoding
+ /// and it's proof prior to calling this.
+ ///
+ /// This method will send the chunks to the background worker, allowing caller to
+ /// asynchrounously waiting for the result.
+ pub async fn add_erasure_chunks(
+ &self,
+ relay_parent: Hash,
+ receipt: CandidateReceipt,
+ chunks: I,
+ ) -> io::Result<()>
+ where I: IntoIterator-
+ {
+ self.add_candidate(relay_parent, receipt.clone()).await?;
+ let (s, r) = oneshot::channel();
+ let chunks = chunks.into_iter().collect();
+ let candidate_hash = receipt.hash();
+ let msg = WorkerMsg::Chunks(Chunks {
+ relay_parent,
+ candidate_hash,
+ chunks,
+ result: s,
+ });
- let para_id_1 = 5.into();
- let para_id_2 = 6.into();
+ let _ = self.to_worker.unbounded_send(msg);
- let candidate_1 = [2; 32].into();
- let candidate_2 = [3; 32].into();
+ if let Ok(Ok(())) = r.await {
+ Ok(())
+ } else {
+ Err(io::Error::new(io::ErrorKind::Other, format!("adding erasure chunks failed")))
+ }
+ }
- let block_data_1 = BlockData(vec![1, 2, 3]);
- let block_data_2 = BlockData(vec![4, 5, 6]);
+ /// Queries an erasure chunk by its block's parent and hash and index.
+ pub fn get_erasure_chunk(
+ &self,
+ relay_parent: &Hash,
+ block_data_hash: Hash,
+ index: usize,
+ ) -> Option {
+ self.inner.get_erasure_chunk(relay_parent, block_data_hash, index)
+ }
- let store = Store::new_in_memory();
- store.make_available(Data {
- relay_parent,
- parachain_id: para_id_1,
- candidate_hash: candidate_1,
- block_data: block_data_1.clone(),
- outgoing_queues: None,
- }).unwrap();
+ /// Stores a candidate receipt.
+ pub async fn add_candidate(
+ &self,
+ relay_parent: Hash,
+ receipt: CandidateReceipt,
+ ) -> io::Result<()> {
+ let (s, r) = oneshot::channel();
- store.make_available(Data {
+ let msg = WorkerMsg::ParachainBlocks(ParachainBlocks {
relay_parent,
- parachain_id: para_id_2,
- candidate_hash: candidate_2,
- block_data: block_data_2.clone(),
- outgoing_queues: None,
- }).unwrap();
-
- assert_eq!(store.block_data(relay_parent, candidate_1).unwrap(), block_data_1);
- assert_eq!(store.block_data(relay_parent, candidate_2).unwrap(), block_data_2);
+ blocks: vec![(receipt, None)],
+ result: s,
+ });
- store.candidates_finalized(relay_parent, [candidate_1].iter().cloned().collect()).unwrap();
+ let _ = self.to_worker.unbounded_send(msg);
- assert_eq!(store.block_data(relay_parent, candidate_1).unwrap(), block_data_1);
- assert!(store.block_data(relay_parent, candidate_2).is_none());
+ if let Ok(Ok(())) = r.await {
+ Ok(())
+ } else {
+ Err(io::Error::new(io::ErrorKind::Other, format!("adding erasure chunks failed")))
+ }
}
- #[test]
- fn queues_available_by_queue_root() {
- let relay_parent = [1; 32].into();
- let para_id = 5.into();
- let candidate = [2; 32].into();
- let block_data = BlockData(vec![1, 2, 3]);
-
- let message_queue_root_1 = [0x42; 32].into();
- let message_queue_root_2 = [0x43; 32].into();
-
- let message_a = Message(vec![1, 2, 3, 4]);
- let message_b = Message(vec![4, 5, 6, 7]);
+ /// Queries a candidate receipt by it's hash.
+ pub fn get_candidate(&self, candidate_hash: &Hash) -> Option {
+ self.inner.get_candidate(candidate_hash)
+ }
- let outgoing_queues = vec![
- (message_queue_root_1, vec![message_a.clone()]),
- (message_queue_root_2, vec![message_b.clone()]),
- ];
+ /// Query block data.
+ pub fn block_data(&self, relay_parent: Hash, block_data_hash: Hash) -> Option {
+ self.inner.block_data(relay_parent, block_data_hash)
+ }
- let store = Store::new_in_memory();
- store.make_available(Data {
- relay_parent,
- parachain_id: para_id,
- candidate_hash: candidate,
- block_data: block_data.clone(),
- outgoing_queues: Some(outgoing_queues),
- }).unwrap();
-
- assert_eq!(
- store.queue_by_root(&message_queue_root_1),
- Some(vec![message_a]),
- );
+ /// Query block data by corresponding candidate receipt's hash.
+ pub fn block_data_by_candidate(&self, relay_parent: Hash, candidate_hash: Hash)
+ -> Option
+ {
+ self.inner.block_data_by_candidate(relay_parent, candidate_hash)
+ }
- assert_eq!(
- store.queue_by_root(&message_queue_root_2),
- Some(vec![message_b]),
- );
+ /// Query message queue data by message queue root hash.
+ pub fn queue_by_root(&self, queue_root: &Hash) -> Option> {
+ self.inner.queue_by_root(queue_root)
}
}
diff --git a/availability-store/src/store.rs b/availability-store/src/store.rs
new file mode 100644
index 000000000000..5458a64b1712
--- /dev/null
+++ b/availability-store/src/store.rs
@@ -0,0 +1,689 @@
+// Copyright 2018 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot. If not, see .
+
+use kvdb_rocksdb::{Database, DatabaseConfig};
+use kvdb::{KeyValueDB, DBTransaction};
+use codec::{Encode, Decode};
+use polkadot_erasure_coding::{self as erasure};
+use polkadot_primitives::{
+ Hash,
+ parachain::{
+ BlockData, CandidateReceipt, Message, ErasureChunk
+ },
+};
+
+use log::{trace, warn};
+use std::collections::HashSet;
+use std::sync::Arc;
+use std::iter::FromIterator;
+use std::io;
+
+use crate::{LOG_TARGET, Data, Config};
+
+mod columns {
+ pub const DATA: Option = Some(0);
+ pub const META: Option = Some(1);
+ pub const NUM_COLUMNS: u32 = 2;
+}
+
+#[derive(Clone)]
+pub struct Store {
+ inner: Arc,
+}
+
+fn block_data_key(relay_parent: &Hash, block_data_hash: &Hash) -> Vec {
+ (relay_parent, block_data_hash, 0i8).encode()
+}
+
+fn erasure_chunks_key(relay_parent: &Hash, block_data_hash: &Hash) -> Vec {
+ (relay_parent, block_data_hash, 1i8).encode()
+}
+
+fn awaited_chunks_key() -> Vec {
+ "awaited_chunks_key".encode()
+}
+
+fn available_chunks_key(relay_parent: &Hash, erasure_root: &Hash) -> Vec {
+ (relay_parent, erasure_root, 2i8).encode()
+}
+
+fn block_to_candidate_key(block_data_hash: &Hash) -> Vec {
+ (block_data_hash, 1i8).encode()
+}
+
+fn candidate_key(candidate_hash: &Hash) -> Vec {
+ (candidate_hash, 2i8).encode()
+}
+
+fn validator_index_and_n_validators_key(relay_parent: &Hash) -> Vec {
+ (relay_parent, 3i8).encode()
+}
+
+fn candidates_in_relay_chain_block_key(relay_block: &Hash) -> Vec {
+ (relay_block, 4i8).encode()
+}
+
+fn erasure_roots_in_relay_chain_block_key(relay_block: &Hash) -> Vec {
+ (relay_block, 5i8).encode()
+}
+
+impl Store {
+ /// Create a new `Store` with given condig on disk.
+ pub(super) fn new(config: Config) -> io::Result {
+ let mut db_config = DatabaseConfig::with_columns(Some(columns::NUM_COLUMNS));
+
+ if let Some(cache_size) = config.cache_size {
+ let mut memory_budget = std::collections::HashMap::new();
+ for i in 0..columns::NUM_COLUMNS {
+ memory_budget.insert(Some(i), cache_size / columns::NUM_COLUMNS as usize);
+ }
+
+ db_config.memory_budget = memory_budget;
+ }
+
+ let path = config.path.to_str().ok_or_else(|| io::Error::new(
+ io::ErrorKind::Other,
+ format!("Bad database path: {:?}", config.path),
+ ))?;
+
+ let db = Database::open(&db_config, &path)?;
+
+ Ok(Store {
+ inner: Arc::new(db),
+ })
+ }
+
+ /// Create a new `Store` in-memory. Useful for tests.
+ pub(super) fn new_in_memory() -> Self {
+ Store {
+ inner: Arc::new(::kvdb_memorydb::create(columns::NUM_COLUMNS)),
+ }
+ }
+
+ /// Make some data available provisionally.
+ pub(crate) fn make_available(&self, data: Data) -> io::Result<()> {
+ let mut tx = DBTransaction::new();
+
+ // note the meta key.
+ let mut v = self.query_inner(columns::META, data.relay_parent.as_ref()).unwrap_or(Vec::new());
+ v.push(data.block_data.hash());
+ tx.put_vec(columns::META, &data.relay_parent[..], v.encode());
+
+ tx.put_vec(
+ columns::DATA,
+ block_data_key(&data.relay_parent, &data.block_data.hash()).as_slice(),
+ data.block_data.encode()
+ );
+
+ if let Some(outgoing_queues) = data.outgoing_queues {
+ // This is kept forever and not pruned.
+ for (root, messages) in outgoing_queues.0 {
+ tx.put_vec(
+ columns::DATA,
+ root.as_ref(),
+ messages.encode(),
+ );
+ }
+
+ }
+
+ self.inner.write(tx)
+ }
+
+ /// Get a set of all chunks we are waiting for grouped by
+ /// `(relay_parent, erasure_root, candidate_hash, our_id)`.
+ pub fn awaited_chunks(&self) -> Option> {
+ self.query_inner(columns::META, &awaited_chunks_key()).map(|vec: Vec<(Hash, Hash, Hash, u32)>| {
+ HashSet::from_iter(vec.into_iter())
+ })
+ }
+
+ /// Adds a set of candidates hashes that were included in a relay block by the block's parent.
+ ///
+ /// If we already possess the receipts for these candidates _and_ our position at the specified
+ /// relay chain the awaited frontier of the erasure chunks will also be extended.
+ ///
+ /// This method modifies the erasure chunks awaited frontier by adding this validator's
+ /// chunks from `candidates` to it. In order to do so the information about this validator's
+ /// position at parent `relay_parent` should be known to the store prior to calling this
+ /// method, in other words `add_validator_index_and_n_validators` should be called for
+ /// the given `relay_parent` before calling this function.
+ pub(crate) fn add_candidates_in_relay_block(
+ &self,
+ relay_parent: &Hash,
+ candidates: Vec,
+ ) -> io::Result<()> {
+ let mut tx = DBTransaction::new();
+ let dbkey = candidates_in_relay_chain_block_key(relay_parent);
+
+ if let Some((validator_index, _)) = self.get_validator_index_and_n_validators(relay_parent) {
+ let candidates = candidates.clone();
+ let awaited_frontier: Vec<(Hash, Hash, Hash, u32)> = self
+ .query_inner(columns::META, &awaited_chunks_key())
+ .unwrap_or_else(|| Vec::new());
+
+ let mut awaited_frontier: HashSet<(Hash, Hash, Hash, u32)> =
+ HashSet::from_iter(awaited_frontier.into_iter());
+
+ awaited_frontier.extend(candidates.into_iter().filter_map(|candidate| {
+ self.get_candidate(&candidate)
+ .map(|receipt| (relay_parent.clone(), receipt.erasure_root, candidate, validator_index))
+ }));
+ let awaited_frontier = Vec::from_iter(awaited_frontier.into_iter());
+ tx.put_vec(columns::META, &awaited_chunks_key(), awaited_frontier.encode());
+ }
+ tx.put_vec(columns::DATA, &dbkey, candidates.encode());
+
+ self.inner.write(tx)
+ }
+
+ /// Qery which candidates were included in the relay chain block by block's parent.
+ pub fn get_candidates_in_relay_block(&self, relay_block: &Hash) -> Option> {
+ let dbkey = candidates_in_relay_chain_block_key(relay_block);
+
+ self.query_inner(columns::DATA, &dbkey)
+ }
+
+ /// Adds a set of erasure chunk roots that were included in a relay block by block's parent.
+ pub(crate) fn add_erasure_roots_in_relay_block(
+ &self,
+ relay_parent: &Hash,
+ erasure_roots: Vec,
+ ) -> io::Result<()> {
+ let mut tx = DBTransaction::new();
+ let dbkey = erasure_roots_in_relay_chain_block_key(relay_parent);
+
+ tx.put_vec(columns::DATA, &dbkey, erasure_roots.encode());
+
+ self.inner.write(tx)
+ }
+
+ /// Make a validator's index and a number of validators at a relay parent available.
+ pub(crate) fn add_validator_index_and_n_validators(
+ &self,
+ relay_parent: &Hash,
+ validator_index: u32,
+ n_validators: u32,
+ ) -> io::Result<()> {
+ let mut tx = DBTransaction::new();
+ let dbkey = validator_index_and_n_validators_key(relay_parent);
+
+ tx.put_vec(columns::META, &dbkey, (validator_index, n_validators).encode());
+
+ self.inner.write(tx)
+ }
+
+ /// Query a validator's index and n_validators by relay parent.
+ pub fn get_validator_index_and_n_validators(&self, relay_parent: &Hash) -> Option<(u32, u32)> {
+ let dbkey = validator_index_and_n_validators_key(relay_parent);
+
+ self.query_inner(columns::META, &dbkey)
+ }
+
+ /// Add a set of chunks.
+ ///
+ /// The same as `add_erasure_chunk` but adds a set of chunks in one atomic transaction.
+ /// Checks that all chunks have the same `relay_parent`, `block_data_hash` and `parachain_id` fields.
+ pub fn add_erasure_chunks(
+ &self,
+ n_validators: u32,
+ relay_parent: &Hash,
+ candidate_hash: &Hash,
+ chunks: I,
+ ) -> io::Result<()>
+ where I: IntoIterator
-
+ {
+ if let Some(receipt) = self.get_candidate(candidate_hash) {
+ let mut tx = DBTransaction::new();
+ let dbkey = erasure_chunks_key(relay_parent, &receipt.block_data_hash);
+
+ let mut v = self.query_inner(columns::DATA, &dbkey).unwrap_or(Vec::new());
+
+ let av_chunks_key = available_chunks_key(relay_parent, &receipt.erasure_root);
+ let mut have_chunks = self.query_inner(columns::META, &av_chunks_key).unwrap_or(Vec::new());
+
+ let awaited_frontier: Option> = self.query_inner(
+ columns::META,
+ &awaited_chunks_key()
+ );
+
+ for chunk in chunks.into_iter() {
+ if !have_chunks.contains(&chunk.index) {
+ have_chunks.push(chunk.index);
+ }
+ v.push(chunk);
+ }
+
+ if let Some(mut awaited_frontier) = awaited_frontier {
+ awaited_frontier.retain(|&(p, r, c, index)| {
+ !(
+ *relay_parent == p &&
+ r == receipt.erasure_root &&
+ c == receipt.hash() &&
+ have_chunks.contains(&index)
+ )
+ });
+ tx.put_vec(columns::META, &awaited_chunks_key(), awaited_frontier.encode());
+ }
+
+ // If therea are no block data and messages in the store at this point,
+ // check that they can be reconstructed now and add them to store if they can.
+ if let Ok(None) = self.inner.get(
+ columns::DATA,
+ &block_data_key(&relay_parent, &receipt.block_data_hash)
+ ) {
+ if let Ok((block_data, outgoing_queues)) = erasure::reconstruct(
+ n_validators as usize,
+ v.iter().map(|chunk| (chunk.chunk.as_ref(), chunk.index as usize))) {
+ self.make_available(Data {
+ relay_parent: *relay_parent,
+ parachain_id: receipt.parachain_index,
+ block_data,
+ outgoing_queues,
+ })?;
+ }
+ }
+
+ tx.put_vec(columns::DATA, &dbkey, v.encode());
+ tx.put_vec(columns::META, &av_chunks_key, have_chunks.encode());
+
+ self.inner.write(tx)
+ } else {
+ trace!(target: LOG_TARGET, "Candidate with hash {} not found", candidate_hash);
+ Ok(())
+ }
+ }
+
+ /// Queries an erasure chunk by its block's parent and hash and index.
+ pub fn get_erasure_chunk(
+ &self,
+ relay_parent: &Hash,
+ block_data_hash: Hash,
+ index: usize,
+ ) -> Option {
+ self.query_inner(columns::DATA, &erasure_chunks_key(&relay_parent, &block_data_hash))
+ .and_then(|chunks: Vec| {
+ chunks.iter()
+ .find(|chunk: &&ErasureChunk| chunk.index == index as u32)
+ .map(|chunk| chunk.clone())
+ })
+ }
+
+ /// Stores a candidate receipt.
+ pub fn add_candidate(&self, receipt: &CandidateReceipt) -> io::Result<()> {
+ let dbkey = candidate_key(&receipt.hash());
+ let mut tx = DBTransaction::new();
+
+ tx.put_vec(columns::DATA, &dbkey, receipt.encode());
+ tx.put_vec(columns::META, &block_to_candidate_key(&receipt.block_data_hash), receipt.hash().encode());
+
+ self.inner.write(tx)
+ }
+
+ /// Queries a candidate receipt by it's hash.
+ pub fn get_candidate(&self, candidate_hash: &Hash) -> Option {
+ self.query_inner(columns::DATA, &candidate_key(candidate_hash))
+ }
+
+ /// Note that a set of candidates have been included in a finalized block with given hash and parent hash.
+ pub fn candidates_finalized(
+ &self,
+ parent: Hash,
+ finalized_candidates: HashSet,
+ ) -> io::Result<()> {
+ let mut tx = DBTransaction::new();
+
+ let v = self.query_inner(columns::META, &parent[..]).unwrap_or(Vec::new());
+ tx.delete(columns::META, &parent[..]);
+
+ let awaited_frontier: Option> = self
+ .query_inner(columns::META, &awaited_chunks_key());
+
+ if let Some(mut awaited_frontier) = awaited_frontier {
+ awaited_frontier.retain(|&(p, c, _, _)| (p != parent && !finalized_candidates.contains(&c)));
+ tx.put_vec(columns::META, &awaited_chunks_key(), awaited_frontier.encode());
+ }
+
+ for block_data_hash in v {
+ if let Some(candidate_hash) = self.block_hash_to_candidate_hash(block_data_hash) {
+ if !finalized_candidates.contains(&candidate_hash) {
+ tx.delete(columns::DATA, block_data_key(&parent, &block_data_hash).as_slice());
+ tx.delete(columns::DATA, &erasure_chunks_key(&parent, &block_data_hash));
+ tx.delete(columns::DATA, &candidate_key(&candidate_hash));
+ tx.delete(columns::META, &block_to_candidate_key(&block_data_hash));
+ }
+ }
+ }
+
+ self.inner.write(tx)
+ }
+
+ /// Query block data.
+ pub fn block_data(&self, relay_parent: Hash, block_data_hash: Hash) -> Option {
+ self.query_inner(columns::DATA, &block_data_key(&relay_parent, &block_data_hash))
+ }
+
+ /// Query block data by corresponding candidate receipt's hash.
+ pub fn block_data_by_candidate(&self, relay_parent: Hash, candidate_hash: Hash) -> Option {
+ let receipt_key = candidate_key(&candidate_hash);
+
+ self.query_inner(columns::DATA, &receipt_key[..]).and_then(|receipt: CandidateReceipt| {
+ self.block_data(relay_parent, receipt.block_data_hash)
+ })
+ }
+
+ /// Query message queue data by message queue root hash.
+ pub fn queue_by_root(&self, queue_root: &Hash) -> Option> {
+ self.query_inner(columns::DATA, queue_root.as_ref())
+ }
+
+ fn block_hash_to_candidate_hash(&self, block_hash: Hash) -> Option {
+ self.query_inner(columns::META, &block_to_candidate_key(&block_hash))
+ }
+
+ fn query_inner(&self, column: Option, key: &[u8]) -> Option {
+ match self.inner.get(column, key) {
+ Ok(Some(raw)) => {
+ let res = T::decode(&mut &raw[..]).expect("all stored data serialized correctly; qed");
+ Some(res)
+ }
+ Ok(None) => None,
+ Err(e) => {
+ warn!(target: LOG_TARGET, "Error reading from the availability store: {:?}", e);
+ None
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use polkadot_erasure_coding::{self as erasure};
+ use polkadot_primitives::parachain::{Id as ParaId, AvailableMessages};
+
+ #[test]
+ fn finalization_removes_unneeded() {
+ let relay_parent = [1; 32].into();
+
+ let para_id_1 = 5.into();
+ let para_id_2 = 6.into();
+
+ let block_data_1 = BlockData(vec![1, 2, 3]);
+ let block_data_2 = BlockData(vec![4, 5, 6]);
+
+ let erasure_chunk_1 = ErasureChunk {
+ chunk: vec![10, 20, 30],
+ index: 1,
+ proof: vec![],
+ };
+
+ let erasure_chunk_2 = ErasureChunk {
+ chunk: vec![40, 50, 60],
+ index: 1,
+ proof: vec![],
+ };
+
+ let store = Store::new_in_memory();
+ store.make_available(Data {
+ relay_parent,
+ parachain_id: para_id_1,
+ block_data: block_data_1.clone(),
+ outgoing_queues: None,
+ }).unwrap();
+
+ store.make_available(Data {
+ relay_parent,
+ parachain_id: para_id_2,
+ block_data: block_data_2.clone(),
+ outgoing_queues: None,
+ }).unwrap();
+
+ let candidate_1 = CandidateReceipt {
+ parachain_index: para_id_1,
+ collator: Default::default(),
+ signature: Default::default(),
+ head_data: Default::default(),
+ egress_queue_roots: Vec::new(),
+ fees: 0,
+ block_data_hash: block_data_1.hash(),
+ upward_messages: Vec::new(),
+ erasure_root: [6; 32].into(),
+ };
+
+ let candidate_2 = CandidateReceipt {
+ parachain_index: para_id_2,
+ collator: Default::default(),
+ signature: Default::default(),
+ head_data: Default::default(),
+ egress_queue_roots: Vec::new(),
+ fees: 0,
+ block_data_hash: block_data_2.hash(),
+ upward_messages: Vec::new(),
+ erasure_root: [6; 32].into(),
+ };
+
+ store.add_candidate(&candidate_1).unwrap();
+ store.add_candidate(&candidate_2).unwrap();
+
+ assert!(store.add_erasure_chunks(3, &relay_parent, &candidate_1.hash(), vec![erasure_chunk_1.clone()]).is_ok());
+ assert!(store.add_erasure_chunks(3, &relay_parent, &candidate_2.hash(), vec![erasure_chunk_2.clone()]).is_ok());
+
+ assert_eq!(store.block_data(relay_parent, block_data_1.hash()).unwrap(), block_data_1);
+ assert_eq!(store.block_data(relay_parent, block_data_2.hash()).unwrap(), block_data_2);
+
+ assert_eq!(store.get_erasure_chunk(&relay_parent, block_data_1.hash(), 1).as_ref(), Some(&erasure_chunk_1));
+ assert_eq!(store.get_erasure_chunk(&relay_parent, block_data_2.hash(), 1), Some(erasure_chunk_2));
+
+ assert_eq!(store.get_candidate(&candidate_1.hash()), Some(candidate_1.clone()));
+ assert_eq!(store.get_candidate(&candidate_2.hash()), Some(candidate_2.clone()));
+
+ assert_eq!(store.block_data_by_candidate(relay_parent, candidate_1.hash()).unwrap(), block_data_1);
+ assert_eq!(store.block_data_by_candidate(relay_parent, candidate_2.hash()).unwrap(), block_data_2);
+
+ store.candidates_finalized(relay_parent, [candidate_1.hash()].iter().cloned().collect()).unwrap();
+
+ assert_eq!(store.get_erasure_chunk(&relay_parent, block_data_1.hash(), 1).as_ref(), Some(&erasure_chunk_1));
+ assert!(store.get_erasure_chunk(&relay_parent, block_data_2.hash(), 1).is_none());
+
+ assert_eq!(store.get_candidate(&candidate_1.hash()), Some(candidate_1));
+ assert_eq!(store.get_candidate(&candidate_2.hash()), None);
+
+ assert_eq!(store.block_data(relay_parent, block_data_1.hash()).unwrap(), block_data_1);
+ assert!(store.block_data(relay_parent, block_data_2.hash()).is_none());
+ }
+
+ #[test]
+ fn queues_available_by_queue_root() {
+ let relay_parent = [1; 32].into();
+ let para_id = 5.into();
+ let block_data = BlockData(vec![1, 2, 3]);
+
+ let message_queue_root_1 = [0x42; 32].into();
+ let message_queue_root_2 = [0x43; 32].into();
+
+ let message_a = Message(vec![1, 2, 3, 4]);
+ let message_b = Message(vec![4, 5, 6, 7]);
+
+ let outgoing_queues = AvailableMessages(vec![
+ (message_queue_root_1, vec![message_a.clone()]),
+ (message_queue_root_2, vec![message_b.clone()]),
+ ]);
+
+ let store = Store::new_in_memory();
+ store.make_available(Data {
+ relay_parent,
+ parachain_id: para_id,
+ block_data: block_data.clone(),
+ outgoing_queues: Some(outgoing_queues),
+ }).unwrap();
+
+ assert_eq!(
+ store.queue_by_root(&message_queue_root_1),
+ Some(vec![message_a]),
+ );
+
+ assert_eq!(
+ store.queue_by_root(&message_queue_root_2),
+ Some(vec![message_b]),
+ );
+ }
+
+ #[test]
+ fn erasure_coding() {
+ let relay_parent: Hash = [1; 32].into();
+ let para_id: ParaId = 5.into();
+ let block_data = BlockData(vec![42; 8]);
+ let block_data_hash = block_data.hash();
+ let n_validators = 5;
+
+ let message_queue_root_1 = [0x42; 32].into();
+ let message_queue_root_2 = [0x43; 32].into();
+
+ let message_a = Message(vec![1, 2, 3, 4]);
+ let message_b = Message(vec![5, 6, 7, 8]);
+
+ let outgoing_queues = Some(AvailableMessages(vec![
+ (message_queue_root_1, vec![message_a.clone()]),
+ (message_queue_root_2, vec![message_b.clone()]),
+ ]));
+
+ let erasure_chunks = erasure::obtain_chunks(
+ n_validators,
+ &block_data,
+ outgoing_queues.as_ref()).unwrap();
+
+ let branches = erasure::branches(erasure_chunks.as_ref());
+
+ let candidate = CandidateReceipt {
+ parachain_index: para_id,
+ collator: Default::default(),
+ signature: Default::default(),
+ head_data: Default::default(),
+ egress_queue_roots: Vec::new(),
+ fees: 0,
+ block_data_hash: block_data.hash(),
+ upward_messages: Vec::new(),
+ erasure_root: [6; 32].into(),
+ };
+
+ let chunks: Vec<_> = erasure_chunks
+ .iter()
+ .zip(branches.map(|(proof, _)| proof))
+ .enumerate()
+ .map(|(index, (chunk, proof))| ErasureChunk {
+ chunk: chunk.clone(),
+ proof,
+ index: index as u32,
+ })
+ .collect();
+
+ let store = Store::new_in_memory();
+
+ store.add_candidate(&candidate).unwrap();
+ store.add_erasure_chunks(n_validators as u32, &relay_parent, &candidate.hash(), vec![chunks[0].clone()]).unwrap();
+ assert_eq!(store.get_erasure_chunk(&relay_parent, block_data_hash, 0), Some(chunks[0].clone()));
+
+ assert!(store.block_data(relay_parent, block_data_hash).is_none());
+
+ store.add_erasure_chunks(n_validators as u32, &relay_parent, &candidate.hash(), chunks).unwrap();
+ assert_eq!(store.block_data(relay_parent, block_data_hash), Some(block_data));
+ }
+
+ #[test]
+ fn add_validator_index_works() {
+ let relay_parent = [42; 32].into();
+ let store = Store::new_in_memory();
+
+ store.add_validator_index_and_n_validators(&relay_parent, 42, 24).unwrap();
+ assert_eq!(store.get_validator_index_and_n_validators(&relay_parent).unwrap(), (42, 24));
+ }
+
+ #[test]
+ fn add_candidates_in_relay_block_works() {
+ let relay_parent = [42; 32].into();
+ let store = Store::new_in_memory();
+
+ let candidates = vec![[1; 32].into(), [2; 32].into(), [3; 32].into()];
+
+ store.add_candidates_in_relay_block(&relay_parent, candidates.clone()).unwrap();
+ assert_eq!(store.get_candidates_in_relay_block(&relay_parent).unwrap(), candidates);
+ }
+
+ #[test]
+ fn awaited_chunks_works() {
+ use std::iter::FromIterator;
+ let validator_index = 3;
+ let n_validators = 10;
+ let relay_parent = [42; 32].into();
+ let erasure_root_1 = [11; 32].into();
+ let erasure_root_2 = [12; 32].into();
+ let mut receipt_1 = CandidateReceipt::default();
+ let mut receipt_2 = CandidateReceipt::default();
+
+ receipt_1.parachain_index = 1.into();
+ receipt_1.erasure_root = erasure_root_1;
+ receipt_2.parachain_index = 2.into();
+ receipt_2.erasure_root = erasure_root_2;
+
+ let chunk = ErasureChunk {
+ chunk: vec![1, 2, 3],
+ index: validator_index,
+ proof: Vec::new(),
+ };
+ let candidates = vec![receipt_1.hash(), receipt_2.hash()];
+ let erasure_roots = vec![erasure_root_1, erasure_root_2];
+
+ let store = Store::new_in_memory();
+
+ store.add_validator_index_and_n_validators(
+ &relay_parent,
+ validator_index,
+ n_validators
+ ).unwrap();
+ store.add_candidate(&receipt_1).unwrap();
+ store.add_candidate(&receipt_2).unwrap();
+
+ // We are waiting for chunks from two candidates.
+ store.add_candidates_in_relay_block(&relay_parent, candidates.clone()).unwrap();
+
+ let awaited_frontier = store.awaited_chunks().unwrap();
+ warn!(target: "availability", "awaited {:?}", awaited_frontier);
+ let expected: HashSet<_> = candidates
+ .clone()
+ .into_iter()
+ .zip(erasure_roots.iter())
+ .map(|(c, e)| (relay_parent, *e, c, validator_index))
+ .collect();
+ assert_eq!(awaited_frontier, expected);
+
+ // We add chunk from one of the candidates.
+ store.add_erasure_chunks(n_validators, &relay_parent, &receipt_1.hash(), vec![chunk]).unwrap();
+
+ let awaited_frontier = store.awaited_chunks().unwrap();
+ // Now we wait for the other chunk that we haven't received yet.
+ let expected: HashSet<_> = vec![
+ (relay_parent, erasure_roots[1], candidates[1], validator_index)
+ ].into_iter().collect();
+
+ assert_eq!(awaited_frontier, expected);
+
+ // Finalizing removes awaited candidates from frontier.
+ store.candidates_finalized(relay_parent, HashSet::from_iter(candidates.into_iter())).unwrap();
+
+ assert_eq!(store.awaited_chunks().unwrap().len(), 0);
+ }
+}
diff --git a/availability-store/src/worker.rs b/availability-store/src/worker.rs
new file mode 100644
index 000000000000..539b08548c59
--- /dev/null
+++ b/availability-store/src/worker.rs
@@ -0,0 +1,998 @@
+// Copyright 2018 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot. If not, see .
+
+use std::collections::HashMap;
+use std::io;
+use std::sync::Arc;
+use std::thread;
+
+use log::{error, info, trace, warn};
+use sp_blockchain::{Result as ClientResult};
+use sp_runtime::traits::{Header as HeaderT, ProvideRuntimeApi};
+use sp_api::ApiExt;
+use client::{
+ BlockchainEvents, BlockBody,
+ blockchain::ProvideCache,
+};
+use consensus_common::{
+ self, BlockImport, BlockCheckParams, BlockImportParams, Error as ConsensusError,
+ ImportResult,
+ import_queue::CacheKeyId,
+};
+use polkadot_primitives::{Block, BlockId, Hash};
+use polkadot_primitives::parachain::{
+ CandidateReceipt, ParachainHost, ValidatorId,
+ ValidatorPair, AvailableMessages, BlockData, ErasureChunk,
+};
+use futures01::Future;
+use futures::channel::{mpsc, oneshot};
+use futures::{FutureExt, Sink, SinkExt, TryFutureExt, StreamExt};
+use keystore::KeyStorePtr;
+
+use tokio::runtime::current_thread::{Handle, Runtime as LocalRuntime};
+
+use crate::{LOG_TARGET, Data, TaskExecutor, ProvideGossipMessages, erasure_coding_topic};
+use crate::store::Store;
+
+/// Errors that may occur.
+#[derive(Debug, derive_more::Display, derive_more::From)]
+pub(crate) enum Error {
+ #[from]
+ StoreError(io::Error),
+ #[display(fmt = "Validator's id and number of validators at block with parent {} not found", relay_parent)]
+ IdAndNValidatorsNotFound { relay_parent: Hash },
+ #[display(fmt = "Candidate receipt with hash {} not found", candidate_hash)]
+ CandidateNotFound { candidate_hash: Hash },
+}
+
+/// Messages sent to the `Worker`.
+///
+/// Messages are sent in a number of different scenarios,
+/// for instance, when:
+/// * importing blocks in `BlockImport` implementation,
+/// * recieving finality notifications,
+/// * when the `Store` api is used by outside code.
+#[derive(Debug)]
+pub(crate) enum WorkerMsg {
+ ErasureRoots(ErasureRoots),
+ ParachainBlocks(ParachainBlocks),
+ ListenForChunks(ListenForChunks),
+ Chunks(Chunks),
+ CandidatesFinalized(CandidatesFinalized),
+ MakeAvailable(MakeAvailable),
+}
+
+/// The erasure roots of the heads included in the block with a given parent.
+#[derive(Debug)]
+pub(crate) struct ErasureRoots {
+ /// The relay parent of the block these roots belong to.
+ pub relay_parent: Hash,
+ /// The roots themselves.
+ pub erasure_roots: Vec,
+ /// A sender to signal the result asynchronously.
+ pub result: oneshot::Sender>,
+}
+
+/// The receipts of the heads included into the block with a given parent.
+#[derive(Debug)]
+pub(crate) struct ParachainBlocks {
+ /// The relay parent of the block these parachain blocks belong to.
+ pub relay_parent: Hash,
+ /// The blocks themselves.
+ pub blocks: Vec<(CandidateReceipt, Option<(BlockData, AvailableMessages)>)>,
+ /// A sender to signal the result asynchronously.
+ pub result: oneshot::Sender>,
+}
+
+/// Listen gossip for these chunks.
+#[derive(Debug)]
+pub(crate) struct ListenForChunks {
+ /// The relay parent of the block the chunks from we want to listen to.
+ pub relay_parent: Hash,
+ /// The hash of the candidate chunk belongs to.
+ pub candidate_hash: Hash,
+ /// The index of the chunk we need.
+ pub index: u32,
+ /// A sender to signal the result asynchronously.
+ pub result: Option>>,
+}
+
+/// We have received some chunks.
+#[derive(Debug)]
+pub(crate) struct Chunks {
+ /// The relay parent of the block these chunks belong to.
+ pub relay_parent: Hash,
+ /// The hash of the parachain candidate these chunks belong to.
+ pub candidate_hash: Hash,
+ /// The chunks.
+ pub chunks: Vec,
+ /// A sender to signal the result asynchronously.
+ pub result: oneshot::Sender>,
+}
+
+/// These candidates have been finalized, so unneded availability may be now pruned
+#[derive(Debug)]
+pub(crate) struct CandidatesFinalized {
+ /// The relay parent of the block that was finalized.
+ relay_parent: Hash,
+ /// The parachain heads that were finalized in this block.
+ candidate_hashes: Vec,
+}
+
+/// The message that corresponds to `make_available` call of the crate API.
+#[derive(Debug)]
+pub(crate) struct MakeAvailable {
+ /// The data being made available.
+ pub data: Data,
+ /// A sender to signal the result asynchronously.
+ pub result: oneshot::Sender>,
+}
+
+/// An availability worker with it's inner state.
+pub(super) struct Worker {
+ availability_store: Store,
+ provide_gossip_messages: PGM,
+ registered_gossip_streams: HashMap,
+
+ sender: mpsc::UnboundedSender,
+}
+
+/// The handle to the `Worker`.
+pub(super) struct WorkerHandle {
+ exit_signal: Option,
+ thread: Option>>,
+ sender: mpsc::UnboundedSender,
+}
+
+impl WorkerHandle {
+ pub(crate) fn to_worker(&self) -> &mpsc::UnboundedSender {
+ &self.sender
+ }
+}
+
+impl Drop for WorkerHandle {
+ fn drop(&mut self) {
+ if let Some(signal) = self.exit_signal.take() {
+ signal.fire();
+ }
+
+ if let Some(thread) = self.thread.take() {
+ if let Err(_) = thread.join() {
+ error!(target: LOG_TARGET, "Errored stopping the thread");
+ }
+ }
+ }
+}
+
+async fn listen_for_chunks(
+ p: PGM,
+ topic: Hash,
+ mut sender: S
+)
+where
+ PGM: ProvideGossipMessages,
+ S: Sink + Unpin,
+{
+ trace!(target: LOG_TARGET, "Registering gossip listener for topic {}", topic);
+ let mut chunks_stream = p.gossip_messages_for(topic);
+
+ while let Some(item) = chunks_stream.next().await {
+ let (s, _) = oneshot::channel();
+ trace!(target: LOG_TARGET, "Received for {:?}", item);
+ let chunks = Chunks {
+ relay_parent: item.0,
+ candidate_hash: item.1,
+ chunks: vec![item.2],
+ result: s,
+ };
+
+ if let Err(_) = sender.send(WorkerMsg::Chunks(chunks)).await {
+ break;
+ }
+ }
+}
+
+
+fn fetch_candidates
(client: &P, block: &BlockId, parent: &BlockId)
+ -> ClientResult