diff --git a/.vscode/launch.json b/.vscode/launch.json index b88774d4..3006b777 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -183,4 +183,4 @@ "cwd": "${workspaceFolder}" } ] -} \ No newline at end of file +} diff --git a/Cargo.lock b/Cargo.lock index 9e45acdd..73a64e47 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4070,7 +4070,7 @@ dependencies = [ "alloy-sol-types", "awc", "base58", - "clap", + "color-eyre", "core_affinity", "eyre", "futures", @@ -4086,6 +4086,7 @@ dependencies = [ "irys-types", "irys-vdf", "k256", + "modular-bitfield", "nodit", "rand", "reth", @@ -4097,7 +4098,9 @@ dependencies = [ "sha2 0.10.8", "test-fuzz", "tokio", + "toml", "tracing", + "tracing-error", "tracing-subscriber", ] @@ -4123,7 +4126,6 @@ dependencies = [ "actix", "alloy-primitives", "arbitrary", - "assert_matches", "base58", "bytes", "eyre", @@ -4310,7 +4312,6 @@ dependencies = [ "alloy-signer", "alloy-signer-local", "arbitrary", - "assert_matches", "base58", "base64-url", "borsh", @@ -4321,7 +4322,7 @@ dependencies = [ "derive_more 2.0.1", "eyre", "fixed-hash", - "irys-macros", + "hex", "k256", "modular-bitfield", "nodit", @@ -4337,6 +4338,7 @@ dependencies = [ "serde_json", "test-fuzz", "tokio", + "toml", "tracing", "uint", "zerocopy 0.8.13", diff --git a/Cargo.toml b/Cargo.toml index 74dcd686..961af916 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,7 +27,6 @@ license = "MIT/Apache-2.0" repository = "https://github.com/Irys-xyz/irys" authors = [] - [workspace.dependencies] # Irys irys-macros = { path = "./crates/macros" } @@ -56,45 +55,37 @@ alloy-consensus = { path = "./ext/alloy/crates/consensus", default-features = fa alloy-core = { path = "./ext/alloy-core/crates/core" } alloy-eips = { path = "./ext/alloy/crates/eips", default-features = false } alloy-genesis = { path = "./ext/alloy/crates/genesis", default-features = false } -alloy-primitives = { path = "./ext/alloy-core/crates/primitives", features = [ - "arbitrary", -] } +alloy-primitives = { path = "./ext/alloy-core/crates/primitives", features = ["arbitrary"] } rand = "0.8.5" hex = "0.4" base64-url = "2.0.0" alloy-rlp = "0.3.4" alloy-rpc-types = { path = "./ext/alloy/crates/rpc-types" } -alloy-rpc-types-engine = { path = "./ext/alloy/crates/rpc-types-engine", features = [ - "serde", -] } +alloy-rpc-types-engine = { path = "./ext/alloy/crates/rpc-types-engine", features = ["serde"] } alloy-rpc-types-trace = { path = "./ext/alloy/crates/rpc-types-trace" } reth-e2e-test-utils = { path = "./ext/reth/crates/e2e-test-utils" } alloy-serde = { path = "./ext/alloy/crates/serde", default-features = false } alloy-signer-local = { path = "./ext/alloy/crates/signer-local" } -alloy-sol-macro = { path = "./ext/alloy-core/crates/sol-macro", features = [ - "json", -] } +alloy-sol-macro = { path = "./ext/alloy-core/crates/sol-macro", features = ["json"] } alloy-sol-types = { path = "./ext/alloy-core/crates/sol-types" } alloy-contract = { path = "./ext/alloy/crates/contract" } -alloy-provider = { path = "./ext/alloy/crates/provider", features = [ - "trace-api", -] } +alloy-provider = { path = "./ext/alloy/crates/provider", features = ["trace-api"] } arbitrary = { version = "1.3", features = ["derive"] } -once_cell = "1.19.0" -assert_matches = "1.5.0" +once_cell = "1" +assert_matches = "1" bytes = "1.5" derive_more = { version = "1", features = ["full"] } eyre = "0.6" -color-eyre = "0.6.2" -itertools = "0.13.0" +color-eyre = "0.6" +itertools = "0.13" futures = "0.3" bytemuck = "1" -nodit = { version = "0.9.2", features = ["serde"] } -modular-bitfield = "0.11.2" -openssl = { version = "0.10.57", features = ["vendored"] } +nodit = { version = "0.9", features = ["serde"] } +modular-bitfield = "0.11" +openssl = { version = "0.10", features = ["vendored"] } proptest-derive = "0.5" reth = { path = "./ext/reth/bin/reth" } reth-auto-seal-consensus = { path = "./ext/reth/crates/consensus/auto-seal" } @@ -154,27 +145,23 @@ serde = { version = "1", default-features = false } reth-rpc-layer = { path = "./ext/reth/crates/rpc/rpc-layer" } serde_json = "1" test-fuzz = "6" -thiserror = "1.0" -tokio = { version = "1.40.0", features = ["rt", "macros"] } +thiserror = "1" +tokio = { version = "1", features = ["rt", "macros"] } toml = "0.8" derive-syn-parse = "0" proc-macro2 = "1" quote = "1" syn = { version = "2", features = ["full"] } tracing = "0.1.0" -tracing-subscriber = { version = "0.3", default-features = false, features = [ - "env-filter", - "fmt", - "json", - "ansi", -] } +tracing-error = "0.2" +tracing-subscriber = { version = "0.3", default-features = false, features = ["env-filter", "fmt", "json", "ansi"] } alloy-signer = { path = "./ext/alloy/crates/signer" } -tempfile = "3.10" +tempfile = "3" jsonrpsee = "0.24" jsonrpsee-core = "0.24" jsonrpsee-http-client = "0.24" jsonrpsee-types = "0.24" -futures-util = "0.3.30" +futures-util = "0.3" [patch.crates-io] revm = { path = "./ext/revm/crates/revm" } @@ -359,11 +346,10 @@ too_long_first_doc_paragraph = "allow" [workspace.metadata.cargo-machete] ignored = ["modular-bitfield", "test-fuzz"] - # https://github.com/crate-ci/typos/blob/master/docs/reference.md [workspace.metadata.typos] default.extend-ignore-re = [ - "(?Rm)^.*(#|//)\\s*spellchecker:disable-line$", # disable a single line: spellchecker:disable-line + "(?Rm)^.*(#|//)\\s*spellchecker:disable-line$", # disable a single line: spellchecker:disable-line "(?s)(#|//)\\s*spellchecker:off.*?\\n\\s*(#|//)\\s*spellchecker:on", # disable blocks of lines: spellchecker: ] files.extend-exclude = ["ext/*", "fixtures/*"] diff --git a/crates/actors/Cargo.toml b/crates/actors/Cargo.toml index 448a0bef..f7acb56c 100644 --- a/crates/actors/Cargo.toml +++ b/crates/actors/Cargo.toml @@ -33,8 +33,13 @@ irys-testing-utils.workspace = true base58.workspace = true futures.workspace = true +[dev-dependencies] +irys-types = { workspace = true, features = ["test-utils"] } +irys-config = { workspace = true, features = ["test-utils"] } + [lints] workspace = true [features] nvidia = ["irys-packing/nvidia"] +test-utils = ["irys-types/test-utils", "irys-config/test-utils"] diff --git a/crates/actors/src/block_validation.rs b/crates/actors/src/block_validation.rs index e96d23c0..8e804eff 100644 --- a/crates/actors/src/block_validation.rs +++ b/crates/actors/src/block_validation.rs @@ -324,6 +324,7 @@ pub fn poa_is_valid( config.entropy_packing_iterations, config.chunk_size as usize, &mut entropy_chunk, + config.chain_id, ); let mut poa_chunk: Vec = poa.chunk.clone().into(); @@ -358,6 +359,7 @@ pub fn poa_is_valid( config.entropy_packing_iterations, config.chunk_size as usize, &mut entropy_chunk, + config.chain_id, ); if entropy_chunk != poa.chunk.0 { @@ -375,9 +377,6 @@ pub fn poa_is_valid( Ok(()) } -//============================================================================== -// Tests -//------------------------------------------------------------------------------ #[cfg(test)] mod tests { use crate::{ @@ -394,7 +393,7 @@ mod tests { use irys_database::{BlockIndex, Initialized}; use irys_testing_utils::utils::temporary_directory; use irys_types::{ - irys::IrysSigner, partition::PartitionAssignment, Address, Base64, H256List, + irys::IrysSigner, partition::PartitionAssignment, Address, Base64, Config, H256List, IrysTransaction, IrysTransactionHeader, Signature, TransactionLedger, H256, U256, }; use std::sync::{Arc, RwLock}; @@ -412,6 +411,7 @@ mod tests { pub miner_address: Address, pub partition_hash: H256, pub partition_assignment: PartitionAssignment, + pub testnet_config: Config, } async fn init() -> (TempDir, TestContext) { @@ -424,31 +424,29 @@ mod tests { .try_init(); let mut genesis_block = IrysBlockHeader::new_mock_header(); - let data_dir = temporary_directory(Some("block_validation_tests"), false); genesis_block.height = 0; - let arc_genesis = Arc::new(genesis_block); - - let miner_address = Address::random(); let chunk_size = 32; - - // Create epoch service with random miner address - let storage_config = StorageConfig { + let testnet_config = Config { chunk_size, num_chunks_in_partition: 10, num_chunks_in_recall_range: 2, - num_partitions_in_slot: 1, - miner_address, - min_writes_before_sync: 1, + num_partitions_per_slot: 1, + num_writes_before_sync: 1, entropy_packing_iterations: 1_000, - chunk_migration_depth: 1, // Testnet / single node config + chunk_migration_depth: 1, + ..Config::testnet() }; - let config = EpochServiceConfig { - storage_config: storage_config.clone(), - ..Default::default() - }; + let data_dir = temporary_directory(Some("block_validation_tests"), false); + let arc_genesis = Arc::new(genesis_block); + let signer = testnet_config.irys_signer(); + let miner_address = signer.address(); + + // Create epoch service with random miner address + let storage_config = StorageConfig::new(&testnet_config); + let epoch_config = EpochServiceConfig::new(&testnet_config); - let epoch_service = EpochServiceActor::new(Some(config.clone())); + let epoch_service = EpochServiceActor::new(epoch_config.clone(), &testnet_config); let epoch_service_addr = epoch_service.start(); // Tell the epoch service to initialize the ledgers @@ -473,11 +471,9 @@ mod tests { let sub_slots = ledgers.get_slots(Ledger::Submit); let partition_hash = sub_slots[0].partitions[0]; - - let arc_config = Arc::new(IrysNodeConfig { - base_directory: data_dir.path().to_path_buf(), - ..Default::default() - }); + let mut config = IrysNodeConfig::new(&testnet_config); + config.base_directory = data_dir.path().to_path_buf(); + let arc_config = Arc::new(config); let block_index: Arc>> = Arc::new(RwLock::new( BlockIndex::default() @@ -519,13 +515,13 @@ mod tests { miner_address, partition_hash, partition_assignment, + testnet_config, }, ) } #[actix::test] async fn poa_test_3_complete_txs() { - let chunk_size: usize = 32; let (_tmp, context) = init().await; // Create a bunch of TX chunks let data_chunks = vec![ @@ -536,7 +532,7 @@ mod tests { // Create a bunch of signed TX from the chunks // Loop though all the data_chunks and create wrapper tx for them - let signer = IrysSigner::random_signer_with_chunk_size(chunk_size); + let signer = IrysSigner::random_signer(&context.testnet_config); let mut txs: Vec = Vec::new(); for chunks in &data_chunks { @@ -559,7 +555,7 @@ mod tests { poa_tx_num, poa_chunk_num, 9, - chunk_size, + context.testnet_config.chunk_size as usize, ) .await; } @@ -569,10 +565,9 @@ mod tests { #[actix::test] async fn poa_not_complete_last_chunk_test() { let (_tmp, context) = init().await; - let chunk_size: usize = 32; // Create a signed TX from the chunks - let signer = IrysSigner::random_signer_with_chunk_size(chunk_size); + let signer = IrysSigner::random_signer(&context.testnet_config); let mut txs: Vec = Vec::new(); let data = vec![3; 40]; //32 + 8 last incomplete chunk @@ -581,9 +576,9 @@ mod tests { txs.push(tx); let poa_tx_num = 0; - + let chunk_size = context.testnet_config.chunk_size as usize; for poa_chunk_num in 0..2 { - let mut poa_chunk: Vec = data[poa_chunk_num * chunk_size + let mut poa_chunk: Vec = data[poa_chunk_num * (chunk_size) ..std::cmp::min((poa_chunk_num + 1) * chunk_size, data.len())] .to_vec(); poa_test( @@ -622,6 +617,7 @@ mod tests { context.storage_config.entropy_packing_iterations, chunk_size, &mut entropy_chunk, + context.storage_config.chain_id, ); xor_vec_u8_arrays_in_place(poa_chunk, &entropy_chunk); diff --git a/crates/actors/src/epoch_service.rs b/crates/actors/src/epoch_service.rs index 93152195..7e9e7a0b 100644 --- a/crates/actors/src/epoch_service.rs +++ b/crates/actors/src/epoch_service.rs @@ -5,12 +5,12 @@ use eyre::{Error, Result}; use irys_config::StorageSubmodulesConfig; use irys_database::{block_header_by_hash, data_ledger::*, database}; use irys_storage::{ie, StorageModuleInfo}; -use irys_types::H256List; use irys_types::{ partition::{PartitionAssignment, PartitionHash}, - DatabaseProvider, IrysBlockHeader, SimpleRNG, StorageConfig, CONFIG, H256, + DatabaseProvider, IrysBlockHeader, SimpleRNG, StorageConfig, H256, }; use irys_types::{partition_chunk_offset_ie, PartitionChunkOffset}; +use irys_types::{Config, H256List}; use openssl::sha; use reth_db::Database; use std::{ @@ -26,7 +26,7 @@ use crate::block_index_service::{ use crate::broadcast_mining_service::{BroadcastMiningService, BroadcastPartitionsExpiration}; /// Allows for overriding of the consensus parameters for ledgers and partitions -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] pub struct EpochServiceConfig { /// Capacity partitions are allocated on a logarithmic curve, this scalar /// shifts the curve on the Y axis. Allowing there to be more or less @@ -34,16 +34,18 @@ pub struct EpochServiceConfig { pub capacity_scalar: u64, /// The length of an epoch denominated in block heights pub num_blocks_in_epoch: u64, + pub num_capacity_partitions: Option, /// Reference to global storage config for node pub storage_config: StorageConfig, } -impl Default for EpochServiceConfig { - fn default() -> Self { +impl EpochServiceConfig { + pub fn new(config: &Config) -> Self { Self { - capacity_scalar: CONFIG.capacity_scalar, - num_blocks_in_epoch: CONFIG.num_blocks_in_epoch, - storage_config: StorageConfig::default(), + capacity_scalar: config.capacity_scalar, + num_blocks_in_epoch: config.num_blocks_in_epoch, + num_capacity_partitions: config.num_capacity_partitions, + storage_config: StorageConfig::new(config), } } } @@ -241,19 +243,13 @@ impl Handler for EpochServiceActor { impl EpochServiceActor { /// Create a new instance of the epoch service actor - pub fn new(config: Option) -> Self { - let config = match config { - Some(cfg) => cfg, - // If no config was provided, use the default protocol parameters - None => EpochServiceConfig::default(), - }; - + pub fn new(epoch_config: EpochServiceConfig, config: &Config) -> Self { Self { last_epoch_hash: H256::zero(), - ledgers: Arc::new(RwLock::new(Ledgers::new())), + ledgers: Arc::new(RwLock::new(Ledgers::new(config))), partition_assignments: Arc::new(RwLock::new(PartitionAssignments::new())), all_active_partitions: Vec::new(), - config, + config: epoch_config, } } @@ -309,10 +305,10 @@ impl EpochServiceActor { new_epoch_block: Arc, ) -> Result<(), EpochServiceError> { // Validate this is an epoch block height - if new_epoch_block.height % CONFIG.num_blocks_in_epoch != 0 { + if new_epoch_block.height % self.config.num_blocks_in_epoch != 0 { error!( "Not an epoch block height: {} num_blocks_in_epoch: {}", - new_epoch_block.height, CONFIG.num_blocks_in_epoch + new_epoch_block.height, self.config.num_blocks_in_epoch ); return Err(EpochServiceError::NotAnEpochBlock); } @@ -363,7 +359,9 @@ impl EpochServiceActor { + Self::get_num_capacity_partitions(num_data_partitions, &self.config); self.add_capacity_partitions(std::cmp::max( - CONFIG.num_capacity_partitions.unwrap_or(num_partitions), + self.config + .num_capacity_partitions + .unwrap_or(num_partitions), num_partitions, )); } else { @@ -517,10 +515,6 @@ impl EpochServiceActor { let trunc = truncate_to_3_decimals(log_10); let scaled = truncate_to_3_decimals(trunc * config.capacity_scalar as f64); - // println!( - // "- base_count: {}, log_10: {}, trunc: {}, scaled: {}, rounded: {}", - // base_count, log_10, trunc, scaled, rounded - // ); truncate_to_3_decimals(scaled).ceil() as u64 } @@ -740,7 +734,7 @@ mod tests { use irys_database::{open_or_create_db, tables::IrysTables}; use irys_storage::{ie, StorageModule, StorageModuleVec}; use irys_testing_utils::utils::setup_tracing_and_temp_dir; - use irys_types::{partition_chunk_offset_ie, Address, PartitionChunkRange, CONFIG}; + use irys_types::{partition_chunk_offset_ie, Address, PartitionChunkRange}; use tokio::time::sleep; use crate::{ @@ -756,11 +750,12 @@ mod tests { async fn genesis_test() { // Initialize genesis block at height 0 let mut genesis_block = IrysBlockHeader::new_mock_header(); + let testnet_config = Config::testnet(); genesis_block.height = 0; // Create epoch service with random miner address - let config = EpochServiceConfig::default(); - let mut epoch_service = EpochServiceActor::new(Some(config.clone())); + let config = EpochServiceConfig::new(&testnet_config); + let mut epoch_service = EpochServiceActor::new(config.clone(), &testnet_config); let miner_address = config.storage_config.miner_address; // Process genesis message directly instead of through actor system @@ -884,6 +879,7 @@ mod tests { async fn add_slots_test() { // Initialize genesis block at height 0 let mut genesis_block = IrysBlockHeader::new_mock_header(); + let testnet_config = Config::testnet(); genesis_block.height = 0; // Create a storage config for testing @@ -894,8 +890,9 @@ mod tests { num_partitions_in_slot: 1, miner_address: Address::random(), min_writes_before_sync: 1, - entropy_packing_iterations: CONFIG.entropy_packing_iterations, + entropy_packing_iterations: testnet_config.entropy_packing_iterations, chunk_migration_depth: 1, // Testnet / single node config + chain_id: 333, }; let num_chunks_in_partition = storage_config.num_chunks_in_partition; @@ -904,10 +901,11 @@ mod tests { capacity_scalar: 100, num_blocks_in_epoch: 100, storage_config, + num_capacity_partitions: Some(123), }; let num_blocks_in_epoch = config.num_blocks_in_epoch; - let mut epoch_service = EpochServiceActor::new(Some(config)); + let mut epoch_service = EpochServiceActor::new(config, &testnet_config); // Process genesis message directly instead of through actor system // This allows us to inspect the actor's state after processing @@ -951,9 +949,6 @@ mod tests { } } - #[actix::test] - async fn expire_slots_test() {} - #[actix::test] async fn capacity_projection_tests() { let max_data_parts = 1000; @@ -973,36 +968,34 @@ mod tests { #[actix::test] async fn partition_expiration_test() { // Initialize genesis block at height 0 - let mining_address = Address::random(); - let mut genesis_block = IrysBlockHeader::new_mock_header(); + let chunk_size = 32; let chunk_count = 10; - genesis_block.height = 0; - - // Create a storage config for testing - let storage_config = StorageConfig { - chunk_size: 32, + let testnet_config = Config { + chunk_size, num_chunks_in_partition: chunk_count, num_chunks_in_recall_range: 2, - num_partitions_in_slot: 1, // 1 replica per slot - miner_address: mining_address.clone(), - min_writes_before_sync: 1, - entropy_packing_iterations: CONFIG.entropy_packing_iterations, - chunk_migration_depth: 1, // Testnet / single node config + num_partitions_per_slot: 1, + num_writes_before_sync: 1, + chunk_migration_depth: 1, + capacity_scalar: 100, + ..Config::testnet() }; + let mining_address = testnet_config.miner_address(); + + let mut genesis_block = IrysBlockHeader::new_mock_header(); + genesis_block.height = 0; + + // Create a storage config for testing + let storage_config = StorageConfig::new(&testnet_config); let num_chunks_in_partition = storage_config.num_chunks_in_partition; let tmp_dir = setup_tracing_and_temp_dir(Some("partition_expiration_test"), false); let base_path = tmp_dir.path().to_path_buf(); - let num_blocks_in_epoch = CONFIG.num_blocks_in_epoch; + let num_blocks_in_epoch = testnet_config.num_blocks_in_epoch; // Create epoch service - let config = EpochServiceConfig { - capacity_scalar: 100, - num_blocks_in_epoch: CONFIG.num_blocks_in_epoch, - storage_config: storage_config.clone(), - }; - - let epoch_service = EpochServiceActor::new(Some(config)); + let config = EpochServiceConfig::new(&testnet_config); + let epoch_service = EpochServiceActor::new(config, &testnet_config); let epoch_service_actor = epoch_service.start(); // Process genesis message directly instead of through actor system @@ -1014,7 +1007,8 @@ mod tests { // Now create a new epoch block & give the Submit ledger enough size to add a slot let mut new_epoch_block = IrysBlockHeader::new_mock_header(); - new_epoch_block.height = (CONFIG.submit_ledger_epoch_length + 1) * num_blocks_in_epoch; // next epoch block, next multiple of num_blocks_in epoch, + new_epoch_block.height = + (testnet_config.submit_ledger_epoch_length + 1) * num_blocks_in_epoch; // next epoch block, next multiple of num_blocks_in epoch, new_epoch_block.ledgers[Ledger::Submit].max_chunk_offset = num_chunks_in_partition / 2; let storage_module_config = StorageSubmodulesConfig::load(base_path.clone()).unwrap(); diff --git a/crates/actors/src/mempool_service.rs b/crates/actors/src/mempool_service.rs index 03473a40..72811abc 100644 --- a/crates/actors/src/mempool_service.rs +++ b/crates/actors/src/mempool_service.rs @@ -10,7 +10,7 @@ use irys_types::{ app_state::DatabaseProvider, chunk::UnpackedChunk, hash_sha256, validate_path, IrysTransactionHeader, H256, }; -use irys_types::{DataRoot, StorageConfig, CONFIG, U256}; +use irys_types::{Config, DataRoot, StorageConfig, U256}; use reth::tasks::TaskExecutor; use reth_db::cursor::DbCursorRO; use reth_db::cursor::DbDupCursorRO; @@ -38,6 +38,8 @@ pub struct MempoolService { signer: Option, invalid_tx: Vec, storage_config: StorageConfig, + anchor_expiry_depth: u64, + max_data_txs_per_block: u64, storage_modules: StorageModuleVec, block_tree_read_guard: Option, } @@ -49,11 +51,7 @@ impl Actor for MempoolService { /// Allows this actor to live in the the local service registry impl Supervised for MempoolService {} -impl SystemService for MempoolService { - fn service_started(&mut self, _ctx: &mut Context) { - println!("mempool_service started"); - } -} +impl SystemService for MempoolService {} impl MempoolService { /// Create a new instance of the mempool actor passing in a reference @@ -66,8 +64,9 @@ impl MempoolService { storage_config: StorageConfig, storage_modules: StorageModuleVec, block_tree_read_guard: BlockTreeReadGuard, + config: &Config, ) -> Self { - println!("service started: mempool"); + info!("service started"); Self { irys_db: Some(irys_db), reth_db: Some(reth_db), @@ -77,6 +76,8 @@ impl MempoolService { task_exec: Some(task_exec), storage_config, storage_modules, + max_data_txs_per_block: config.max_data_txs_per_block, + anchor_expiry_depth: config.anchor_expiry_depth.into(), block_tree_read_guard: Some(block_tree_read_guard), } } @@ -212,11 +213,11 @@ impl Handler for MempoolService { match irys_database::block_header_by_hash(read_tx, &tx.anchor) { // note: we use addition here as it's safer - Ok(Some(hdr)) if hdr.height + (CONFIG.anchor_expiry_depth as u64) >= *latest_height => { + Ok(Some(hdr)) if hdr.height + self.anchor_expiry_depth >= *latest_height => { debug!("valid block hash anchor {} for tx {}", &tx.anchor, &tx.id); // update any associated ingress proofs if let Ok(Some(old_expiry)) = read_tx.get::(tx.data_root) { - let new_expiry = hdr.height + (CONFIG.anchor_expiry_depth as u64); + let new_expiry = hdr.height + self.anchor_expiry_depth; debug!( "Updating ingress proof for data root {} expiry from {} -> {}", &tx.data_root, &old_expiry, &new_expiry @@ -310,7 +311,7 @@ impl Handler for MempoolService { let target_offset = chunk.byte_offset(self.storage_config.chunk_size) as u128; let path_buff = &chunk.data_path; - println!( + info!( "chunk_offset:{} data_size:{} offset:{}", chunk.tx_offset, chunk.data_size, target_offset ); @@ -437,7 +438,7 @@ impl Handler for MempoolService { .last() .ok_or(ChunkIngressError::ServiceUninitialized)?; - let target_height = latest_height + CONFIG.anchor_expiry_depth as u64; + let target_height = latest_height + self.anchor_expiry_depth; let db1 = self.irys_db.clone().unwrap(); let signer1 = self.signer.clone().unwrap(); @@ -491,7 +492,7 @@ impl Handler for MempoolService { }; valid }) - .take(CONFIG.max_data_txs_per_block.try_into().unwrap()) + .take(self.max_data_txs_per_block.try_into().unwrap()) .map(|(_, header)| header.clone()) .collect() } @@ -689,206 +690,3 @@ pub fn generate_ingress_proof( Ok(()) } - -//============================================================================== -// Tests -//------------------------------------------------------------------------------ -// #[cfg(test)] -// mod tests { -// use std::{sync::Arc, time::Duration}; - -// use assert_matches::assert_matches; -// use irys_database::{open_or_create_db, tables::IrysTables}; -// use irys_packing::xor_vec_u8_arrays_in_place; -// use irys_storage::{ii, ChunkType, StorageModule, StorageModuleInfo}; -// use irys_testing_utils::utils::setup_tracing_and_temp_dir; -// use irys_types::{ -// irys::IrysSigner, -// partition::{PartitionAssignment, PartitionHash}, -// Address, Base64, MAX_CHUNK_SIZE, -// }; -// use rand::Rng; -// use reth::tasks::TaskManager; -// use tokio::time::{sleep, timeout}; - -// use super::*; - -// use actix::prelude::*; - -// #[actix::test] -// async fn post_transaction_and_chunks() -> eyre::Result<()> { -// let tmp_dir = setup_tracing_and_temp_dir(Some("post_transaction_and_chunks"), false); -// let base_path = tmp_dir.path().to_path_buf(); - -// let db = open_or_create_db(tmp_dir, IrysTables::ALL, None).unwrap(); -// let arc_db1 = DatabaseProvider(Arc::new(db)); -// let arc_db2 = DatabaseProvider(Arc::clone(&arc_db1)); - -// // Create an instance of the mempool actor -// let task_manager = TaskManager::current(); - -// let storage_config = StorageConfig::default(); -// let chunk_size = storage_config.chunk_size; - -// let storage_module_info = StorageModuleInfo { -// id: 0, -// partition_assignment: Some(PartitionAssignment { -// partition_hash: PartitionHash::zero(), -// miner_address: Address::random(), -// ledger_id: Some(0), -// slot_index: Some(0), -// }), -// submodules: vec![ -// (ii(0, 4), "hdd0-4TB".into()), // 0 to 4 inclusive -// ], -// }; - -// // Override the default StorageModule config for testing -// let config = StorageConfig { -// min_writes_before_sync: 1, -// chunk_size, -// num_chunks_in_partition: 5, -// ..Default::default() -// }; - -// let storage_module = Arc::new(StorageModule::new( -// &base_path, -// &storage_module_info, -// config, -// )?); - -// storage_module.pack_with_zeros(); - -// let mempool = MempoolService::new( -// arc_db1, -// task_manager.executor(), -// IrysSigner::random_signer(), -// storage_config, -// vec![storage_module.clone()], -// ); -// let addr: Addr = mempool.start(); - -// // Create 2.5 chunks worth of data * fill the data with random bytes -// let data_size = (MAX_CHUNK_SIZE as f64 * 2.5).round() as usize; -// let mut data_bytes = vec![0u8; data_size]; -// rand::thread_rng().fill(&mut data_bytes[..]); - -// // Create a new Irys API instance & a signed transaction -// let irys = IrysSigner::random_signer(); -// let tx = irys.create_transaction(data_bytes.clone(), None).unwrap(); -// let tx = irys.sign_transaction(tx).unwrap(); - -// println!("{:?}", tx.header); -// println!("{}", serde_json::to_string_pretty(&tx.header).unwrap()); - -// for proof in &tx.proofs { -// println!("offset: {}", proof.offset); -// } - -// // Wrap the transaction in a TxIngressMessage -// let data_root = tx.header.data_root; -// let data_size = tx.header.data_size; -// let tx_ingress_msg = TxIngressMessage(tx.header); - -// // Post the TxIngressMessage to the handle method on the mempool actor -// let result = addr.send(tx_ingress_msg).await.unwrap(); - -// // Verify the transaction was added -// assert_matches!(result, Ok(())); - -// let db_tx = arc_db2.tx()?; - -// // Verify the data_root was added to the cache -// let result = irys_database::cached_data_root_by_data_root(&db_tx, data_root).unwrap(); -// assert_matches!(result, Some(_)); -// let last_index = tx.chunks.len() - 1; -// // Loop though each of the transaction chunks -// for (tx_chunk_offset, chunk_node) in tx.chunks.iter().enumerate() { -// let min = chunk_node.min_byte_range; -// let max = chunk_node.max_byte_range; -// let data_path = Base64(tx.proofs[tx_chunk_offset].proof.clone()); -// let key: H256 = hash_sha256(&data_path.0).unwrap().into(); -// let chunk_bytes = Base64(data_bytes[min..max].to_vec()); -// // Create a ChunkIngressMessage for each chunk -// let chunk_ingress_msg = ChunkIngressMessage(UnpackedChunk { -// data_root, -// data_size, -// data_path: data_path.clone(), -// bytes: chunk_bytes.clone(), -// tx_offset: tx_chunk_offset as u32, -// }); - -// let is_last_chunk = tx_chunk_offset == last_index; -// let interval = ii(0, last_index as u64); -// if is_last_chunk { -// // artificially index the chunk with the submodule -// // this will cause the last chunk to show up in cache & on disk -// storage_module.index_transaction_data(vec![0], data_root, interval.into())?; -// } - -// // Post the ChunkIngressMessage to the handle method on the mempool -// let result = addr.send(chunk_ingress_msg).await.unwrap(); - -// // Verify the chunk was added -// assert_matches!(result, Ok(())); - -// // Verify the chunk is added to the ChunksCache -// // use a new read tx so we can see the writes -// let db_tx = arc_db2.tx()?; - -// let (meta, chunk) = irys_database::cached_chunk_by_chunk_offset( -// &db_tx, -// data_root, -// tx_chunk_offset as u32, -// ) -// .unwrap() -// .unwrap(); -// assert_eq!(meta.chunk_path_hash, key); -// assert_eq!(chunk.data_path, data_path); -// assert_eq!(chunk.chunk, Some(chunk_bytes.clone())); - -// let result = irys_database::cached_chunk_by_chunk_path_hash(&db_tx, &key).unwrap(); -// assert_matches!(result, Some(_)); - -// storage_module.sync_pending_chunks()?; - -// if is_last_chunk { -// // read the set of chunks -// // only offset 2 (last chunk) should have data -// let res = storage_module.read_chunks(ii(0, last_index as u32))?; -// let r = res.get(&2).unwrap(); -// let mut packed_bytes = r.0.clone(); -// // unpack the data (packing was all 0's) -// xor_vec_u8_arrays_in_place(&mut packed_bytes, &vec![0u8; chunk_size as usize]); -// let packed_bytes_slice = &packed_bytes[0..chunk_bytes.0.len()]; -// let chunk_bytes = chunk_bytes.0; -// assert_eq!(packed_bytes_slice.len(), chunk_bytes.len()); -// assert_eq!(packed_bytes_slice, chunk_bytes); -// assert_eq!(r.1, ChunkType::Data); -// } -// } - -// // Modify one of the chunks - -// // Attempt to post the chunk - -// // Verify there chunk is not accepted - -// task_manager.graceful_shutdown_with_timeout(Duration::from_secs(5)); -// // check the ingress proof is in the DB -// let timed_get = timeout(Duration::from_secs(5), async { -// loop { -// // don't reuse the tx! it has read isolation (won't see anything committed after it's creation) -// let ro_tx = &arc_db2.tx().unwrap(); -// match ro_tx.get::(data_root).unwrap() { -// Some(ip) => break ip, -// None => sleep(Duration::from_millis(100)).await, -// } -// } -// }) -// .await?; -// assert_eq!(&timed_get.data_root, &data_root); - -// Ok(()) -// } -// } diff --git a/crates/actors/src/mining.rs b/crates/actors/src/mining.rs index d6e15bf5..a7634a89 100644 --- a/crates/actors/src/mining.rs +++ b/crates/actors/src/mining.rs @@ -449,6 +449,7 @@ mod tests { min_writes_before_sync: 1, entropy_packing_iterations: 1, chunk_migration_depth: 1, // Testnet / single node config + chain_id: 1, }; let infos = vec![StorageModuleInfo { @@ -633,8 +634,10 @@ mod tests { max_seeds_num: 5, seeds: VecDeque::new(), }; - - let vdf_service = VdfService::from_atomic_state(Arc::new(RwLock::new(vdf_state))).start(); + let vdf_service = VdfService { + vdf_state: Arc::new(RwLock::new(vdf_state)), + } + .start(); let vdf_steps_guard: VdfStepsReadGuard = vdf_service.send(GetVdfStateMessage).await.unwrap(); diff --git a/crates/actors/src/packing.rs b/crates/actors/src/packing.rs index 96788932..f1a040dc 100644 --- a/crates/actors/src/packing.rs +++ b/crates/actors/src/packing.rs @@ -16,7 +16,7 @@ use { }; use irys_storage::{ChunkType, StorageModule}; -use irys_types::{PartitionChunkOffset, PartitionChunkRange, StorageConfig}; +use irys_types::{Config, PartitionChunkOffset, PartitionChunkRange, StorageConfig}; use reth::tasks::TaskExecutor; use tokio::{runtime::Handle, sync::Semaphore, time::sleep}; use tracing::{debug, warn}; @@ -57,13 +57,17 @@ pub struct PackingConfig { /// Max. number of chunks send to GPU packing #[allow(unused)] pub max_chunks: u32, + /// Irys chain id + pub chain_id: u64, } -impl Default for PackingConfig { - fn default() -> Self { + +impl PackingConfig { + pub fn new(config: &Config) -> Self { Self { poll_duration: Duration::from_millis(1000), concurrency: 4, max_chunks: 1024, + chain_id: config.chain_id, } } } @@ -74,9 +78,8 @@ impl PackingActor { actix_runtime_handle: Handle, task_executor: TaskExecutor, storage_module_ids: Vec, - config: Option, + config: PackingConfig, ) -> Self { - let config = config.unwrap_or_default(); let semaphore = storage_module_ids .iter() .map(|s| (*s, Arc::new(Semaphore::new(config.concurrency.into())))) @@ -163,6 +166,7 @@ impl PackingActor { entropy_packing_iterations, chunk_size as usize, &mut out, + self.config.chain_id ); debug!(target: "irys::packing::progress", "CPU Packing chunk offset {} for SM {} partition_hash {} mining_address {} iterations {}", &i, &storage_module_id, &partition_hash, &mining_address, &entropy_packing_iterations); @@ -218,6 +222,8 @@ impl PackingActor { partition_hash, Some(entropy_packing_iterations), &mut out, + entropy_packing_iterations, + self.config.chain_id, ); for i in 0..num_chunks { storage_module.write_chunk( @@ -357,20 +363,30 @@ mod tests { use irys_testing_utils::utils::setup_tracing_and_temp_dir; use irys_types::{ partition::{PartitionAssignment, PartitionHash}, - partition_chunk_offset_ii, Address, PartitionChunkOffset, PartitionChunkRange, + partition_chunk_offset_ii, Address, Config, PartitionChunkOffset, PartitionChunkRange, StorageConfig, }; use reth::tasks::TaskManager; use tokio::runtime::Handle; use crate::packing::{ - cast_vec_u8_to_vec_u8_array, wait_for_packing, PackingActor, PackingRequest, + cast_vec_u8_to_vec_u8_array, wait_for_packing, PackingActor, PackingConfig, PackingRequest, }; #[actix::test] async fn test_packing_actor() -> eyre::Result<()> { + // setup let mining_address = Address::random(); let partition_hash = PartitionHash::zero(); + let testnet_config = Config { + num_writes_before_sync: 1, + entropy_packing_iterations: 1000, + num_chunks_in_partition: 5, + chunk_size: 32, + ..Config::testnet() + }; + let config = PackingConfig::new(&testnet_config); + let infos = vec![StorageModuleInfo { id: 0, partition_assignment: Some(PartitionAssignment { @@ -383,18 +399,9 @@ mod tests { (partition_chunk_offset_ii!(0, 4), "hdd0-4TB".into()), // 0 to 4 inclusive ], }]; - - // Override the default StorageModule config for testing - let storage_config = StorageConfig { - min_writes_before_sync: 1, - entropy_packing_iterations: 1_000, - num_chunks_in_partition: 5, - ..Default::default() - }; - + let storage_config = StorageConfig::new(&testnet_config); let tmp_dir = setup_tracing_and_temp_dir(Some("test_packing_actor"), false); let base_path = tmp_dir.path().to_path_buf(); - // Create a StorageModule with the specified submodules and config let storage_module_info = &infos[0]; let storage_module = Arc::new(StorageModule::new( @@ -409,18 +416,21 @@ mod tests { }; // Create an instance of the mempool actor let task_manager = TaskManager::current(); - let sm_ids = vec![storage_module.id]; - - let packing = PackingActor::new(Handle::current(), task_manager.executor(), sm_ids, None); - + let packing = PackingActor::new( + Handle::current(), + task_manager.executor(), + sm_ids, + config.clone(), + ); let packing_addr = packing.start(); + // action packing_addr.send(request).await?; - wait_for_packing(packing_addr, None).await?; - storage_module.sync_pending_chunks()?; + + // assert // check that the chunks are marked as packed let intervals = storage_module.get_intervals(ChunkType::Entropy); assert_eq!( @@ -446,6 +456,7 @@ mod tests { storage_config.entropy_packing_iterations, storage_config.chunk_size.try_into().unwrap(), &mut out, + config.chain_id, ); assert_eq!(chunk.0.first(), out.first()); } diff --git a/crates/actors/src/vdf_service.rs b/crates/actors/src/vdf_service.rs index 4060abe3..ba7bbef9 100644 --- a/crates/actors/src/vdf_service.rs +++ b/crates/actors/src/vdf_service.rs @@ -8,100 +8,101 @@ use std::{ }; use tracing::info; -use irys_types::{block_production::Seed, DatabaseProvider, CONFIG}; +use irys_types::{block_production::Seed, Config, DatabaseProvider}; use crate::block_index_service::BlockIndexReadGuard; -#[derive(Debug)] +#[derive(Debug, Default)] pub struct VdfService { pub vdf_state: AtomicVdfState, } -impl Default for VdfService { - fn default() -> Self { - Self::new(None, None) - } -} - impl VdfService { /// Creates a new `VdfService` setting up how many steps are stored in memory, and loads state from path if available - pub fn new(block_index: Option, db: Option) -> Self { - let vdf_state = Self::create_state(block_index, db); + pub fn new(block_index: BlockIndexReadGuard, db: DatabaseProvider, config: &Config) -> Self { + let vdf_state = create_state(block_index, db, &config); Self { vdf_state: Arc::new(RwLock::new(vdf_state)), } } - /// Creates a new `VdfService` setting up how many steps are stored in memory, and loads state from path if available - pub fn from_atomic_state(vdf_state: AtomicVdfState) -> Self { - Self { vdf_state } + #[cfg(any(feature = "test-utils", test))] + pub fn from_capacity(capacity: usize) -> Self { + VdfService { + vdf_state: Arc::new(RwLock::new(VdfState { + global_step: 0, + max_seeds_num: capacity, + seeds: VecDeque::with_capacity(capacity), + })), + } } +} - pub fn create_state( - block_index: Option, - db: Option, - ) -> VdfState { - // set up a minimum cache size of 10_000 steps for testing purposes, chunks number can be very low in testing setups so may need more cached steps than strictly efficient sampling needs. - let capacity = std::cmp::max( - 10_000, - (CONFIG.num_chunks_in_partition / CONFIG.num_chunks_in_recall_range) - .try_into() - .unwrap(), - ); - - let latest_block_hash = if let Some(bi) = block_index { - bi.read().get_latest_item().map(|item| item.block_hash) - } else { - None - }; - - if let Some(block_hash) = latest_block_hash { - if let Some(db) = db { - let mut seeds: VecDeque = VecDeque::with_capacity(capacity); - let tx = db.tx().unwrap(); - - let mut block = block_header_by_hash(&tx, &block_hash).unwrap().unwrap(); - let global_step_number = block.vdf_limiter_info.global_step_number; - let mut steps_remaining = capacity; - - while steps_remaining > 0 && block.height > 0 { - // get all the steps out of the block - for step in block.vdf_limiter_info.steps.0.iter().rev() { - seeds.push_front(Seed(*step)); - steps_remaining -= 1; - if steps_remaining == 0 { - break; - } - } - // get the previous block - block = block_header_by_hash(&tx, &block.previous_block_hash) - .unwrap() - .unwrap(); +fn create_state( + block_index: BlockIndexReadGuard, + db: DatabaseProvider, + config: &Config, +) -> VdfState { + let capacity = calc_capacity(config); + + if let Some(block_hash) = block_index + .read() + .get_latest_item() + .map(|item| item.block_hash) + { + let mut seeds: VecDeque = VecDeque::with_capacity(capacity); + let tx = db.tx().unwrap(); + + let mut block = block_header_by_hash(&tx, &block_hash).unwrap().unwrap(); + let global_step_number = block.vdf_limiter_info.global_step_number; + let mut steps_remaining = capacity; + + while steps_remaining > 0 && block.height > 0 { + // get all the steps out of the block + for step in block.vdf_limiter_info.steps.0.iter().rev() { + seeds.push_front(Seed(*step)); + steps_remaining -= 1; + if steps_remaining == 0 { + break; } - info!( - "Initializing vdf service from block's info in step number {}", - global_step_number - ); - VdfState { - global_step: global_step_number, - seeds, - max_seeds_num: capacity, - } - } else { - panic!("Can't initialize VdfService without a DatabaseProvider"); - } - } else { - info!("No block index found, initializing VdfState from zero"); - VdfState { - global_step: 0, - seeds: VecDeque::with_capacity(capacity), - max_seeds_num: capacity, } + // get the previous block + block = block_header_by_hash(&tx, &block.previous_block_hash) + .unwrap() + .unwrap(); } + info!( + "Initializing vdf service from block's info in step number {}", + global_step_number + ); + return VdfState { + global_step: global_step_number, + seeds, + max_seeds_num: capacity, + }; + }; + + info!("No block index found, initializing VdfState from zero"); + VdfState { + global_step: 0, + seeds: VecDeque::with_capacity(capacity), + max_seeds_num: capacity, } } +pub fn calc_capacity(config: &Config) -> usize { + const DEFAULT_CAPACITY: usize = 10_000; + let capacity = std::cmp::max( + DEFAULT_CAPACITY, + (config.num_chunks_in_partition / config.num_chunks_in_recall_range) + .try_into() + .unwrap(), + ); + + capacity +} + impl Supervised for VdfService {} impl SystemService for VdfService { @@ -154,7 +155,8 @@ mod tests { #[actix_rt::test] async fn test_vdf() { - let service = VdfService::new(None, None); + let testnet_config = Config::testnet(); + let service = VdfService::from_capacity(calc_capacity(&testnet_config)); service.vdf_state.write().unwrap().seeds = VecDeque::with_capacity(4); service.vdf_state.write().unwrap().max_seeds_num = 4; let addr = service.start(); diff --git a/crates/api-server/src/lib.rs b/crates/api-server/src/lib.rs index 4953b986..4301a9ec 100644 --- a/crates/api-server/src/lib.rs +++ b/crates/api-server/src/lib.rs @@ -17,7 +17,7 @@ use irys_actors::{ }; use irys_reth_node_bridge::node::RethNodeProvider; use irys_storage::ChunkProvider; -use irys_types::{app_state::DatabaseProvider, CONFIG}; +use irys_types::{app_state::DatabaseProvider, Config}; use routes::{block, get_chunk, index, network_config, post_chunk, price, proxy::proxy, tx}; use tracing::{debug, info}; @@ -26,6 +26,7 @@ pub struct ApiState { pub mempool: Addr, pub chunk_provider: Arc, pub db: DatabaseProvider, + pub config: Config, // TODO: slim this down to what we actually use - beware the types! // TODO: remove the Option<> pub reth_provider: Option, @@ -61,7 +62,8 @@ pub fn routes() -> impl HttpServiceFactory { } pub async fn run_server(app_state: ApiState) { - info!("Starting API server on port {}", CONFIG.port); + let port = app_state.config.port; + info!(?port, "Starting API server"); HttpServer::new(move || { let awc_client = awc::Client::new(); @@ -81,7 +83,7 @@ pub async fn run_server(app_state: ApiState) { .route("/", web::get().to(index::info_route)) .wrap(Cors::permissive()) }) - .bind(("0.0.0.0", CONFIG.port)) + .bind(("0.0.0.0", port)) .unwrap() .run() .await diff --git a/crates/api-server/src/routes/price.rs b/crates/api-server/src/routes/price.rs index bd11dc90..7f8852d5 100644 --- a/crates/api-server/src/routes/price.rs +++ b/crates/api-server/src/routes/price.rs @@ -1,17 +1,24 @@ -use actix_web::{web::Path, HttpResponse}; +use actix_web::{ + web::{self, Path}, + HttpResponse, +}; use irys_config::{PRICE_PER_CHUNK_5_EPOCH, PRICE_PER_CHUNK_PERM}; use irys_database::Ledger; -use irys_types::CONFIG; -pub async fn get_price(path: Path<(String, u64)>) -> actix_web::Result { +use crate::ApiState; + +pub async fn get_price( + path: Path<(String, u64)>, + state: web::Data, +) -> actix_web::Result { let size = path.1; let ledger = Ledger::from_url(&path.0); - let num_of_chunks = if size < CONFIG.chunk_size { + let num_of_chunks = if size < state.config.chunk_size { 1u128 } else { // Safe because u128 > u64 - (size % CONFIG.chunk_size + 1) as u128 + (size % state.config.chunk_size + 1) as u128 }; if let Ok(l) = ledger { diff --git a/crates/c/src/capacity_single.rs b/crates/c/src/capacity_single.rs index 2dfca781..b8c3850f 100644 --- a/crates/c/src/capacity_single.rs +++ b/crates/c/src/capacity_single.rs @@ -1,5 +1,4 @@ use irys_primitives::Address; -use irys_types::CONFIG; use openssl::sha; pub const SHA_HASH_SIZE: usize = 32; @@ -9,12 +8,13 @@ pub fn compute_seed_hash( address: Address, offset: std::ffi::c_ulong, hash: [u8; SHA_HASH_SIZE], + irys_chain_id: u64, ) -> [u8; SHA_HASH_SIZE] { let mut hasher = sha::Sha256::new(); let address_buffer: [u8; 20] = address.0.into(); hasher.update(&address_buffer); hasher.update(&hash); - hasher.update(&CONFIG.irys_chain_id.to_le_bytes()); + hasher.update(&irys_chain_id.to_le_bytes()); hasher.update(&offset.to_le_bytes()); hasher.finish() } @@ -30,8 +30,10 @@ pub fn compute_entropy_chunk( iterations: u32, chunk_size: usize, out_entropy_chunk: &mut Vec, + irys_chain_id: u64, ) { - let mut previous_segment = compute_seed_hash(mining_address, chunk_offset, partition_hash); + let mut previous_segment = + compute_seed_hash(mining_address, chunk_offset, partition_hash, irys_chain_id); out_entropy_chunk.clear(); // Phase 1: sequential hashing for _i in 0..(chunk_size / SHA_HASH_SIZE) { @@ -70,23 +72,26 @@ mod tests { capacity_single::{self, SHA_HASH_SIZE}, }; use irys_primitives::Address; - use irys_types::{CHUNK_SIZE, CONFIG}; + use irys_types::{Config, CHUNK_SIZE}; use rand; use rand::Rng; use std::time::Instant; #[test] fn test_seed_hash() { + let testnet_config = Config::testnet(); let mut rng = rand::thread_rng(); let mining_address = Address::random(); let chunk_offset = rng.gen_range(1..=1000); let mut partition_hash = [0u8; SHA_HASH_SIZE]; rng.fill(&mut partition_hash[..]); - let now = Instant::now(); - - let rust_hash = - capacity_single::compute_seed_hash(mining_address, chunk_offset, partition_hash); + let rust_hash = capacity_single::compute_seed_hash( + mining_address, + chunk_offset, + partition_hash, + testnet_config.chain_id, + ); let elapsed = now.elapsed(); println!("Rust seed implementation: {:.2?}", elapsed); @@ -100,7 +105,7 @@ mod tests { let c_hash_ptr = c_hash.as_ptr() as *mut u8; let now = Instant::now(); - let chain_id: u64 = CONFIG.irys_chain_id; + let chain_id = testnet_config.chain_id; unsafe { compute_seed_hash( @@ -124,6 +129,7 @@ mod tests { #[test] fn test_compute_entropy_chunk() { + let testnet_config = Config::testnet(); let mut rng = rand::thread_rng(); let mining_address = Address::random(); let chunk_offset = rng.gen_range(1..=1000); @@ -140,8 +146,9 @@ mod tests { chunk_offset, partition_hash, iterations, - CONFIG.chunk_size as usize, + testnet_config.chunk_size as usize, &mut chunk, + testnet_config.chain_id, ); let elapsed = now.elapsed(); @@ -157,7 +164,7 @@ mod tests { let c_chunk_ptr = c_chunk.as_ptr() as *mut u8; let now = Instant::now(); - let chain_id: u64 = CONFIG.irys_chain_id; + let chain_id = testnet_config.chain_id; unsafe { compute_entropy_chunk( diff --git a/crates/chain/Cargo.toml b/crates/chain/Cargo.toml index fb67b78e..2c62b3cb 100644 --- a/crates/chain/Cargo.toml +++ b/crates/chain/Cargo.toml @@ -7,34 +7,34 @@ version = "0.1.0" name = "irys" path = "src/main.rs" - [features] nvidia = ["irys-actors/nvidia"] - [dependencies] # Irys irys-database.workspace = true irys-storage.workspace = true irys-reth-node-bridge.workspace = true -irys-types.workspace = true +irys-types = { workspace = true, features = ["test-utils"] } irys-api-server.workspace = true irys-config.workspace = true irys-testing-utils.workspace = true irys-actors.workspace = true irys-packing.workspace = true irys-vdf.workspace = true -awc = "3.5.1" -base58 = "0.2.0" -clap = "4.5.20" -eyre = "0.6.8" -rand = "0.8.5" + +base58.workspace = true +color-eyre.workspace = true +tracing-error.workspace = true +eyre.workspace = true +rand.workspace = true nodit.workspace = true reth-db.workspace = true reth-primitives.workspace = true reth.workspace = true -serde_json = "1.0.107" -sha2 = "0.10.8" +toml.workspace = true +serde_json = "1" +sha2 = "0.10" actix-rt.workspace = true actix-web.workspace = true actix-http.workspace = true @@ -48,18 +48,21 @@ alloy-core.workspace = true actix.workspace = true alloy-consensus.workspace = true alloy-eips.workspace = true + +modular-bitfield.workspace = true reth-cli-runner.workspace = true futures.workspace = true reth-tracing.workspace = true hex.workspace = true test-fuzz.workspace = true -k256 = { version = "0.13", default-features = false, features = [ - "ecdsa", - "serde", -] } +k256 = { version = "0.13", default-features = false, features = ["ecdsa", "serde"] } alloy-sol-macro = { workspace = true, features = ["json"] } alloy-provider.workspace = true core_affinity = "0.8.1" + +[dev-dependencies] +awc = "3.5" +irys-actors = { workspace = true, features = ["test-utils"] } alloy-sol-types.workspace = true alloy-contract.workspace = true diff --git a/crates/chain/src/chain.rs b/crates/chain/src/chain.rs index ee75852d..42b714f8 100644 --- a/crates/chain/src/chain.rs +++ b/crates/chain/src/chain.rs @@ -2,6 +2,7 @@ use ::irys_database::{tables::IrysTables, BlockIndex, Initialized}; use actix::{Actor, System, SystemRegistry}; use actix::{Arbiter, SystemService}; use alloy_eips::BlockNumberOrTag; +use irys_actors::packing::PackingConfig; use irys_actors::reth_service::{BlockHashType, ForkChoiceUpdateMessage, RethServiceActor}; use irys_actors::{ block_discovery::BlockDiscoveryActor, @@ -22,7 +23,7 @@ use irys_actors::{ ActorAddresses, BlockFinalizedMessage, }; use irys_api_server::{run_server, ApiState}; -use irys_config::{decode_hex, IrysNodeConfig, StorageSubmodulesConfig}; +use irys_config::{IrysNodeConfig, StorageSubmodulesConfig}; use irys_database::database; use irys_packing::{PackingType, PACKING_TYPE}; use irys_reth_node_bridge::adapter::node::RethNodeContext; @@ -33,11 +34,11 @@ use irys_storage::{ reth_provider::{IrysRethProvider, IrysRethProviderInner}, ChunkProvider, ChunkType, StorageModule, StorageModuleVec, }; -use irys_types::PartitionChunkRange; use irys_types::{ - app_state::DatabaseProvider, calculate_initial_difficulty, irys::IrysSigner, - vdf_config::VDFStepsConfig, StorageConfig, CHUNK_SIZE, CONFIG, H256, + app_state::DatabaseProvider, calculate_initial_difficulty, vdf_config::VDFStepsConfig, + StorageConfig, CHUNK_SIZE, H256, }; +use irys_types::{Config, DifficultyAdjustmentConfig, PartitionChunkRange}; use irys_vdf::vdf_state::VdfStepsReadGuard; use reth::rpc::eth::EthApiServer as _; use reth::{ @@ -64,62 +65,12 @@ use tokio::{ use crate::vdf::run_vdf; use irys_database::migration::check_db_version_and_run_migrations_if_needed; use irys_storage::irys_consensus_data_db::open_or_create_irys_consensus_data_db; -use irys_testing_utils::utils::setup_tracing_and_temp_dir; - -pub async fn start() -> eyre::Result { - let config: IrysNodeConfig = IrysNodeConfig { - mining_signer: IrysSigner::mainnet_from_slice(&decode_hex(CONFIG.mining_key).unwrap()), - ..IrysNodeConfig::default() - }; - - let storage_config = StorageConfig { - chunk_size: CONFIG.chunk_size, - num_chunks_in_partition: CONFIG.num_chunks_in_partition, - num_chunks_in_recall_range: CONFIG.num_chunks_in_recall_range, - num_partitions_in_slot: CONFIG.num_partitions_per_slot, - miner_address: config.mining_signer.address(), - min_writes_before_sync: 1, - entropy_packing_iterations: CONFIG.entropy_packing_iterations, - chunk_migration_depth: CONFIG.chunk_migration_depth, // Testnet / single node config - }; - - start_irys_node(config, storage_config).await -} -pub async fn start_for_testing(config: IrysNodeConfig) -> eyre::Result { - let storage_config = StorageConfig { - chunk_size: 32, - num_chunks_in_partition: 10, - num_chunks_in_recall_range: 2, - num_partitions_in_slot: 1, - miner_address: config.mining_signer.address(), - min_writes_before_sync: 1, - entropy_packing_iterations: 1_000, - chunk_migration_depth: 1, // Testnet / single node config - }; - - start_irys_node(config, storage_config).await -} +pub async fn start(config: Config) -> eyre::Result { + let irys_node_config = IrysNodeConfig::new(&config); + let storage_config = StorageConfig::new(&config); -pub async fn start_for_testing_default( - name: Option<&str>, - keep: bool, - miner_signer: IrysSigner, - storage_config: StorageConfig, -) -> eyre::Result { - let config = IrysNodeConfig { - base_directory: setup_tracing_and_temp_dir(name, keep).into_path(), - mining_signer: miner_signer.clone(), - ..IrysNodeConfig::default() - }; - - let storage_config = StorageConfig { - miner_address: miner_signer.address(), // just in case to keep the same miner address - chunk_migration_depth: 1, // Testnet / single node config - ..storage_config - }; - - start_irys_node(config, storage_config).await + start_irys_node(irys_node_config, storage_config, config).await } #[derive(Debug, Clone)] @@ -138,12 +89,13 @@ pub struct IrysNodeCtx { pub async fn start_irys_node( node_config: IrysNodeConfig, storage_config: StorageConfig, + config: Config, ) -> eyre::Result { info!("Using directory {:?}", &node_config.base_directory); // Delete the .irys folder if we are not persisting data on restart let base_dir = node_config.instance_directory(); - if fs::exists(&base_dir).unwrap_or(false) && CONFIG.reset_state_on_restart { + if fs::exists(&base_dir).unwrap_or(false) && config.reset_state_on_restart { // remove existing data directory as storage modules are packed with a different miner_signer generated next info!("Removing .irys folder {:?}", &base_dir); fs::remove_dir_all(&base_dir).expect("Unable to remove .irys folder"); @@ -161,7 +113,7 @@ pub async fn start_irys_node( let (irys_node_handle_sender, irys_node_handle_receiver) = oneshot::channel::(); let (reth_chainspec, mut irys_genesis) = node_config.chainspec_builder.build(); let arc_config = Arc::new(node_config); - let mut difficulty_adjustment_config = CONFIG.clone().into(); + let mut difficulty_adjustment_config = DifficultyAdjustmentConfig::new(&config); // TODO: Hard coding 3 for storage module count isn't great here, // eventually we'll want to relate this to the genesis config @@ -179,9 +131,7 @@ pub async fn start_irys_node( let at_genesis; let latest_block_index: Option; - #[allow(unused_assignments)] // this does get read by passing it through to reth - let mut latest_block_height: u64 = 0; - + let latest_block_height; let block_index: Arc>> = Arc::new(RwLock::new({ let idx = BlockIndex::default(); let i = idx.init(arc_config.clone()).await.unwrap(); @@ -199,12 +149,6 @@ pub async fn start_irys_node( &latest_block_height ); - // // trim the last block off the block index - // let trimmed_items = &i.items[0..i.items.len() - 1]; - // irys_database::save_block_index(trimmed_items, &arc_config.clone())?; - // dbg!("written block index! {}", &trimmed_items.len()); - // std::process::exit(0); - i })); @@ -230,11 +174,10 @@ pub async fn start_irys_node( let reth_node = RethNodeProvider(Arc::new(reth_handle_receiver.await.unwrap())); let reth_db = DatabaseProvider(reth_node.provider.database.db.clone()); let irys_db = DatabaseProvider(irys_db_env.clone()); + let vdf_config = VDFStepsConfig::new(&config); check_db_version_and_run_migrations_if_needed(&reth_db, &irys_db).unwrap(); - let vdf_config = VDFStepsConfig::default(); - let latest_block = latest_block_index .map(|b| { database::block_header_by_hash(&irys_db.tx().unwrap(), &b.block_hash) @@ -245,10 +188,7 @@ pub async fn start_irys_node( .unwrap_or(arc_genesis.clone()); // Initialize the epoch_service actor to handle partition ledger assignments - let config = EpochServiceConfig { - storage_config: storage_config.clone(), - ..EpochServiceConfig::default() - }; + let epoch_config = EpochServiceConfig::new(&config); let miner_address = node_config.mining_signer.address(); debug!("Miner address {:?}", miner_address); @@ -343,7 +283,7 @@ pub async fn start_irys_node( }); SystemRegistry::set(broadcast_mining_service.clone()); - let mut epoch_service = EpochServiceActor::new(Some(config)); + let mut epoch_service = EpochServiceActor::new(epoch_config, &config); epoch_service.initialize(&irys_db).await; let epoch_service_actor_addr = epoch_service.start(); @@ -417,7 +357,8 @@ pub async fn start_irys_node( node_config.mining_signer.clone(), storage_config.clone(), storage_modules.clone(), - block_tree_guard.clone() + block_tree_guard.clone(), + &config, ); let mempool_arbiter = Arbiter::new(); SystemRegistry::set(MempoolService::start_in_arbiter( @@ -434,14 +375,9 @@ pub async fn start_irys_node( ); SystemRegistry::set(chunk_migration_service.start()); - let vdf_state = Arc::new(RwLock::new(VdfService::create_state( - Some(block_index_guard.clone()), - Some(irys_db.clone()), - ))); - - let vdf_service_actor = VdfService::from_atomic_state(vdf_state); + let vdf_service_actor = VdfService::new(block_index_guard.clone(), irys_db.clone(), &config); let vdf_service = vdf_service_actor.start(); - SystemRegistry::set(vdf_service.clone()); // register it as a service + SystemRegistry::set(vdf_service.clone()); let vdf_steps_guard: VdfStepsReadGuard = vdf_service.send(GetVdfStateMessage).await.unwrap(); @@ -505,7 +441,7 @@ pub async fn start_irys_node( Handle::current(), reth_node.task_executor.clone(), sm_ids, - None, + PackingConfig::new(&config), ) .start(); @@ -544,11 +480,6 @@ pub async fn start_irys_node( }) .collect::>(); } - - // let _ = wait_for_packing(packing_actor_addr.clone(), None).await; - // debug!("Packing complete"); - - let part_actors_clone = part_actors.clone(); // Let the partition actors know about the genesis difficulty @@ -635,6 +566,7 @@ pub async fn start_irys_node( reth_provider: Some(reth_node.clone()), block_tree: Some(block_tree_guard.clone()), block_index: Some(block_index_guard.clone()), + config }) .await; diff --git a/crates/chain/src/main.rs b/crates/chain/src/main.rs index d35d76b6..fca703eb 100644 --- a/crates/chain/src/main.rs +++ b/crates/chain/src/main.rs @@ -1,29 +1,61 @@ -use clap::{command, Parser}; +use std::path::PathBuf; + use irys_chain::chain::start; -use reth_tracing::tracing_subscriber::fmt::SubscriberBuilder; +use irys_types::Config; use reth_tracing::tracing_subscriber::util::SubscriberInitExt; -use tracing_subscriber::EnvFilter; - -#[derive(Parser, Debug)] -#[command(version, about, long_about = None)] -struct Args { - /// Name of the person to greet - #[arg(short, long, default_value = "./database")] - database: String, -} +use tracing_error::ErrorLayer; +use tracing_subscriber::{layer::SubscriberExt, EnvFilter, Layer, Registry}; #[tokio::main] async fn main() -> eyre::Result<()> { - // TODO: fix this, we used to await the reth node exit future but can't anymore - // so we need another near-infinite blocking future - let _ = SubscriberBuilder::default() - .with_env_filter(EnvFilter::from_default_env()) - .finish() - .try_init(); - - let handle = start().await?; + // init logging + init_tracing().expect("initializing tracing should work"); + color_eyre::install().expect("color eyre could not be installed"); + + // load the config + let config_file = std::env::var("CONFIG") + .unwrap_or_else(|_| "config.toml".to_owned()) + .parse::() + .expect("invalid file path"); + + let config = std::fs::read_to_string(config_file) + .map(|config_file| toml::from_str::(&config_file).expect("invalid config file")) + .unwrap_or_else(|_err| { + tracing::warn!("config file not provided, defaulting to testnet config"); + Config::testnet() + }); + + // start the node + tracing::info!("starting the node"); + let handle = start(config).await?; handle.actor_addresses.start_mining()?; std::thread::park(); Ok(()) } + +fn init_tracing() -> eyre::Result<()> { + let subscriber = Registry::default(); + let filter = EnvFilter::new("info") + .add_directive("actix_web=info".parse()?) + .add_directive("actix=info".parse()?) + .add_directive(EnvFilter::from_default_env().to_string().parse()?); + + let output_layer = tracing_subscriber::fmt::layer() + .with_line_number(true) + .with_ansi(true) + .with_file(true) + .with_writer(std::io::stdout); + + // use json logging for release builds + let subscriber = subscriber.with(filter).with(ErrorLayer::default()); + let subscriber = if cfg!(debug_assertions) { + subscriber.with(output_layer.boxed()) + } else { + subscriber.with(output_layer.json().with_current_span(true).boxed()) + }; + + subscriber.init(); + + Ok(()) +} diff --git a/crates/chain/src/vdf.rs b/crates/chain/src/vdf.rs index 3ab8114e..1c303e58 100644 --- a/crates/chain/src/vdf.rs +++ b/crates/chain/src/vdf.rs @@ -92,7 +92,7 @@ pub fn run_vdf( mod tests { use super::*; use actix::*; - use irys_actors::vdf_service::GetVdfStateMessage; + use irys_actors::vdf_service::{calc_capacity, GetVdfStateMessage}; use irys_types::*; use irys_vdf::{vdf_sha_verification, vdf_state::VdfStepsReadGuard, vdf_steps_are_valid}; use nodit::interval::ii; @@ -109,10 +109,12 @@ mod tests { .finish() .try_init(); } + #[actix_rt::test] async fn test_vdf_step() { + let config = Config::testnet(); let mut hasher = Sha256::new(); - let mut checkpoints: Vec = vec![H256::default(); CONFIG.num_checkpoints_in_vdf_step]; + let mut checkpoints: Vec = vec![H256::default(); config.num_checkpoints_in_vdf_step]; let mut hash: H256 = H256::random(); let original_hash = hash; let mut salt: U256 = U256::from(10); @@ -120,7 +122,7 @@ mod tests { init_tracing(); - let config = VDFStepsConfig::default(); + let config = VDFStepsConfig::new(&config); debug!("VDF difficulty: {}", config.vdf_difficulty); let now = Instant::now(); @@ -150,19 +152,20 @@ mod tests { #[actix_rt::test] async fn test_vdf_service() { + let mut config = Config::testnet(); + config.vdf_reset_frequency = 2; + config.vdf_sha_1s = 1; let seed = H256::random(); let reset_seed = H256::random(); - let vdf_config = VDFStepsConfig { - vdf_reset_frequency: 2, // so to validation get into reset point - vdf_difficulty: 1, // go quicker - ..VDFStepsConfig::default() - }; + let vdf_config = VDFStepsConfig::new(&config); init_tracing(); let broadcast_mining_service = BroadcastMiningService::from_registry(); - let vdf_service = VdfService::from_registry(); + let capacity = calc_capacity(&config); + let vdf_service = VdfService::from_capacity(capacity).start(); + SystemRegistry::set(vdf_service.clone()); let vdf_steps: VdfStepsReadGuard = vdf_service.send(GetVdfStateMessage).await.unwrap(); let vdf_config2 = vdf_config.clone(); diff --git a/crates/chain/tests/api/api.rs b/crates/chain/tests/api/api.rs index 9c955156..11c62a35 100644 --- a/crates/chain/tests/api/api.rs +++ b/crates/chain/tests/api/api.rs @@ -1,5 +1,9 @@ +// todo delete the whole module. the tests are ignored anyway. They can be restored in the future + +use actix_http::StatusCode; use irys_api_server::{routes, ApiState}; -use irys_chain::chain::start_for_testing_default; +use irys_chain::start_irys_node; +use irys_config::IrysNodeConfig; use irys_packing::{unpack, PackingType, PACKING_TYPE}; use actix_web::{ @@ -8,9 +12,8 @@ use actix_web::{ web::{self, JsonConfig}, App, }; -use awc::http::StatusCode; use base58::ToBase58; -use irys_types::TxChunkOffset; +use irys_types::{Config, TxChunkOffset}; use tracing::info; #[ignore] @@ -37,8 +40,11 @@ async fn api_end_to_end_test(chunk_size: usize) { use std::time::Duration; use tokio::time::sleep; use tracing::{debug, info}; - - let miner_signer = IrysSigner::random_signer_with_chunk_size(chunk_size); + let testnet_config = Config { + chunk_size: chunk_size.try_into().unwrap(), + ..Config::testnet() + }; + let miner_signer = IrysSigner::from_config(&testnet_config); let storage_config = StorageConfig { chunk_size: chunk_size as u64, @@ -49,13 +55,14 @@ async fn api_end_to_end_test(chunk_size: usize) { min_writes_before_sync: 1, entropy_packing_iterations: 1_000, chunk_migration_depth: 1, // Testnet / single node config + chain_id: testnet_config.chain_id, }; + let entropy_packing_iterations = storage_config.entropy_packing_iterations; - let handle = start_for_testing_default( - Some("api_end_to_end_test"), - false, - miner_signer, - storage_config.clone(), + let handle = start_irys_node( + IrysNodeConfig::new(&testnet_config), + storage_config, + testnet_config.clone(), ) .await .unwrap(); @@ -68,6 +75,7 @@ async fn api_end_to_end_test(chunk_size: usize) { db: handle.db, mempool: handle.actor_addresses.mempool, chunk_provider: handle.chunk_provider.clone(), + config: testnet_config.clone(), }; // Initialize the app @@ -86,7 +94,7 @@ async fn api_end_to_end_test(chunk_size: usize) { rand::thread_rng().fill(&mut data_bytes[..]); // Create a new Irys API instance & a signed transaction - let irys = IrysSigner::random_signer_with_chunk_size(chunk_size); + let irys = IrysSigner::random_signer(&testnet_config); let tx = irys.create_transaction(data_bytes.clone(), None).unwrap(); let tx = irys.sign_transaction(tx).unwrap(); @@ -189,8 +197,9 @@ async fn api_end_to_end_test(chunk_size: usize) { let unpacked_chunk = unpack( &packed_chunk, - storage_config.entropy_packing_iterations, + entropy_packing_iterations, chunk_size, + testnet_config.chain_id, ); assert_eq!( unpacked_chunk.bytes.0, diff --git a/crates/chain/tests/block_production/analytics.rs b/crates/chain/tests/block_production/analytics.rs index 5b77868d..7878166a 100644 --- a/crates/chain/tests/block_production/analytics.rs +++ b/crates/chain/tests/block_production/analytics.rs @@ -9,6 +9,7 @@ use alloy_provider::Provider; use alloy_provider::ProviderBuilder; use alloy_signer_local::LocalSigner; use alloy_signer_local::PrivateKeySigner; +use irys_types::Config; use irys_types::TxChunkOffset; use irys_types::UnpackedChunk; use rand::Rng; @@ -18,9 +19,7 @@ use irys_chain::start_irys_node; use irys_config::IrysNodeConfig; use irys_reth_node_bridge::adapter::{node::RethNodeContext, transaction::TransactionTestContext}; use irys_testing_utils::utils::setup_tracing_and_temp_dir; -use irys_types::{ - irys::IrysSigner, serialization::*, IrysTransaction, SimpleRNG, StorageConfig, CONFIG, -}; +use irys_types::{irys::IrysSigner, serialization::*, IrysTransaction, SimpleRNG, StorageConfig}; use k256::ecdsa::SigningKey; use reth::rpc::types::TransactionRequest; use reth_primitives::GenesisAccount; @@ -36,12 +35,13 @@ async fn test_blockprod_with_evm_txs() -> eyre::Result<()> { std::env::set_var("RUST_LOG", "debug"); let temp_dir = setup_tracing_and_temp_dir(Some("test_blockprod"), false); - let mut config = IrysNodeConfig::default(); + let testnet_config = Config::testnet(); + let mut config = IrysNodeConfig::new(&testnet_config); config.base_directory = temp_dir.path().to_path_buf(); - let account1 = IrysSigner::random_signer_with_chunk_size(32); - let account2 = IrysSigner::random_signer_with_chunk_size(32); - let account3 = IrysSigner::random_signer_with_chunk_size(32); + let account1 = IrysSigner::random_signer(&testnet_config); + let account2 = IrysSigner::random_signer(&testnet_config); + let account3 = IrysSigner::random_signer(&testnet_config); config.extend_genesis_accounts(vec![ ( account1.address(), @@ -77,7 +77,9 @@ async fn test_blockprod_with_evm_txs() -> eyre::Result<()> { min_writes_before_sync: 1, entropy_packing_iterations: 1_000, chunk_migration_depth: 1, // Testnet / single node config + chain_id: testnet_config.chain_id, }, + testnet_config.clone(), ) .await?; let _reth_context = RethNodeContext::new(node.reth_handle.into()).await?; @@ -156,7 +158,7 @@ async fn test_blockprod_with_evm_txs() -> eyre::Result<()> { gas: Some(21000), value: Some(U256::from(simple_rng.next_range(20_000))), nonce: Some(alloy_provider.get_transaction_count(a.address()).await?), - chain_id: Some(CONFIG.irys_chain_id), + chain_id: Some(testnet_config.chain_id), ..Default::default() }; diff --git a/crates/chain/tests/block_production/basic_contract.rs b/crates/chain/tests/block_production/basic_contract.rs index 3e545109..8e7a72cd 100644 --- a/crates/chain/tests/block_production/basic_contract.rs +++ b/crates/chain/tests/block_production/basic_contract.rs @@ -5,10 +5,10 @@ use alloy_network::EthereumWallet; use alloy_provider::ProviderBuilder; use alloy_signer_local::PrivateKeySigner; use alloy_sol_macro::sol; -use irys_chain::chain::start_for_testing; +use irys_chain::start_irys_node; use irys_config::IrysNodeConfig; use irys_testing_utils::utils::setup_tracing_and_temp_dir; -use irys_types::irys::IrysSigner; +use irys_types::{irys::IrysSigner, Config}; use reth_primitives::GenesisAccount; use tracing::info; @@ -24,11 +24,12 @@ sol!( #[tokio::test] async fn serial_test_erc20() -> eyre::Result<()> { let temp_dir = setup_tracing_and_temp_dir(Some("test_erc20"), false); - let mut config = IrysNodeConfig::default(); + let testnet_config = Config::testnet(); + let mut config = IrysNodeConfig::new(&testnet_config); config.base_directory = temp_dir.path().to_path_buf(); let main_address = config.mining_signer.address(); - let account1 = IrysSigner::random_signer(); + let account1 = IrysSigner::random_signer(&testnet_config); config.extend_genesis_accounts(vec![ ( @@ -47,14 +48,14 @@ async fn serial_test_erc20() -> eyre::Result<()> { ), ]); - let node = start_for_testing(config.clone()).await?; + let storage_config = irys_types::StorageConfig::new(&testnet_config); + let signer: PrivateKeySigner = config.mining_signer.clone().into(); - let signer: PrivateKeySigner = config.mining_signer.signer.into(); - let wallet: EthereumWallet = EthereumWallet::from(signer); + let node = start_irys_node(config, storage_config, testnet_config.clone()).await?; let alloy_provider = ProviderBuilder::new() .with_recommended_fillers() - .wallet(wallet) + .wallet(EthereumWallet::from(signer)) .on_http("http://localhost:8080/v1/execution-rpc".parse()?); let mut deploy_fut = Box::pin(IrysERC20::deploy(alloy_provider, account1.address())); diff --git a/crates/chain/tests/block_production/block_production.rs b/crates/chain/tests/block_production/block_production.rs index 7d235f1b..43773025 100644 --- a/crates/chain/tests/block_production/block_production.rs +++ b/crates/chain/tests/block_production/block_production.rs @@ -6,11 +6,11 @@ use alloy_eips::eip2718::Encodable2718; use alloy_signer_local::LocalSigner; use eyre::eyre; use irys_actors::{block_producer::SolutionFoundMessage, mempool_service::TxIngressMessage}; -use irys_chain::chain::start_for_testing; +use irys_chain::start_irys_node; use irys_config::IrysNodeConfig; use irys_reth_node_bridge::adapter::{node::RethNodeContext, transaction::TransactionTestContext}; use irys_testing_utils::utils::setup_tracing_and_temp_dir; -use irys_types::{irys::IrysSigner, IrysTransaction, CONFIG}; +use irys_types::{irys::IrysSigner, Config, IrysTransaction}; use k256::ecdsa::SigningKey; use reth::{providers::BlockReader, rpc::types::TransactionRequest}; use reth_db::Database; @@ -28,12 +28,13 @@ use crate::utils::capacity_chunk_solution; async fn serial_test_blockprod() -> eyre::Result<()> { std::env::set_var("RUST_LOG", "debug"); let temp_dir = setup_tracing_and_temp_dir(Some("test_blockprod"), false); - let mut config = IrysNodeConfig::default(); + let testnet_config = Config::testnet(); + let mut config = IrysNodeConfig::new(&testnet_config); config.base_directory = temp_dir.path().to_path_buf(); - let account1 = IrysSigner::random_signer(); - let account2 = IrysSigner::random_signer(); - let account3 = IrysSigner::random_signer(); + let account1 = IrysSigner::random_signer(&testnet_config); + let account2 = IrysSigner::random_signer(&testnet_config); + let account3 = IrysSigner::random_signer(&testnet_config); config.extend_genesis_accounts(vec![ ( @@ -59,9 +60,8 @@ async fn serial_test_blockprod() -> eyre::Result<()> { ), ]); - let node = start_for_testing(config).await?; - - // let node_signer = PrivateKeySigner::from_signing_key(node.config.mining_signer.signer); + let storage_config = irys_types::StorageConfig::new(&testnet_config); + let node = start_irys_node(config, storage_config, testnet_config.clone()).await?; let mut txs: HashMap = HashMap::new(); for a in [&account1, &account2, &account3] { @@ -130,10 +130,11 @@ async fn serial_test_blockprod() -> eyre::Result<()> { #[tokio::test] async fn serial_mine_ten_blocks_with_capacity_poa_solution() -> eyre::Result<()> { let temp_dir = setup_tracing_and_temp_dir(Some("test_blockprod"), false); - let mut config = IrysNodeConfig::default(); + let testnet_config = Config::testnet(); + let mut config = IrysNodeConfig::new(&testnet_config); config.base_directory = temp_dir.path().to_path_buf(); - - let node = start_for_testing(config).await?; + let storage_config = irys_types::StorageConfig::new(&testnet_config); + let node = start_irys_node(config, storage_config, testnet_config.clone()).await?; let reth_context = RethNodeContext::new(node.reth_handle.into()).await?; @@ -177,10 +178,11 @@ async fn serial_mine_ten_blocks_with_capacity_poa_solution() -> eyre::Result<()> #[tokio::test] async fn serial_mine_ten_blocks() -> eyre::Result<()> { let temp_dir = setup_tracing_and_temp_dir(Some("test_blockprod"), false); - let mut config = IrysNodeConfig::default(); + let testnet_config = Config::testnet(); + let mut config = IrysNodeConfig::new(&testnet_config); config.base_directory = temp_dir.path().to_path_buf(); - - let node = start_for_testing(config).await?; + let storage_config = irys_types::StorageConfig::new(&testnet_config); + let node = start_irys_node(config, storage_config, testnet_config.clone()).await?; node.actor_addresses.start_mining()?; let reth_context = RethNodeContext::new(node.reth_handle.into()).await?; @@ -222,10 +224,13 @@ async fn serial_mine_ten_blocks() -> eyre::Result<()> { #[tokio::test] async fn serial_test_basic_blockprod() -> eyre::Result<()> { let temp_dir = setup_tracing_and_temp_dir(Some("test_blockprod"), false); - let mut config = IrysNodeConfig::default(); + + let testnet_config = Config::testnet(); + let mut config = IrysNodeConfig::new(&testnet_config); config.base_directory = temp_dir.path().to_path_buf(); - let node = start_for_testing(config).await?; + let storage_config = irys_types::StorageConfig::new(&testnet_config); + let node = start_irys_node(config, storage_config, testnet_config.clone()).await?; let poa_solution = capacity_chunk_solution( node.config.mining_signer.address(), @@ -267,12 +272,24 @@ async fn serial_test_basic_blockprod() -> eyre::Result<()> { #[tokio::test] async fn serial_test_blockprod_with_evm_txs() -> eyre::Result<()> { let temp_dir = setup_tracing_and_temp_dir(Some("test_blockprod"), false); - let mut config = IrysNodeConfig::default(); + let testnet_config = Config { + chunk_size: 32, + num_chunks_in_partition: 10, + num_chunks_in_recall_range: 2, + num_partitions_per_slot: 1, + num_writes_before_sync: 1, + entropy_packing_iterations: 1_000, + chunk_migration_depth: 1, + ..Config::testnet() + }; + let mut config = IrysNodeConfig::new(&testnet_config); config.base_directory = temp_dir.path().to_path_buf(); + let storage_config = irys_types::StorageConfig::new(&testnet_config); - let account1 = IrysSigner::random_signer(); - let account2 = IrysSigner::random_signer(); - let account3 = IrysSigner::random_signer(); + let mining_signer_addr = config.mining_signer.address(); + let account1 = IrysSigner::random_signer(&testnet_config); + let account2 = IrysSigner::random_signer(&testnet_config); + let account3 = IrysSigner::random_signer(&testnet_config); config.extend_genesis_accounts(vec![ ( @@ -298,21 +315,25 @@ async fn serial_test_blockprod_with_evm_txs() -> eyre::Result<()> { ), ]); - let node = start_for_testing(config.clone()).await?; + let node = start_irys_node(config, storage_config, testnet_config.clone()).await?; let reth_context = RethNodeContext::new(node.reth_handle.into()).await?; + let miner_init_balance = reth_context + .rpc + .get_balance(mining_signer_addr, None) + .await?; let mut irys_txs: HashMap = HashMap::new(); let mut evm_txs: HashMap = HashMap::new(); for (i, a) in [(1, &account1), (2, &account2), (3, &account3)] { let es: LocalSigner = a.clone().into(); let evm_tx_req = TransactionRequest { - to: Some(TxKind::Call(config.mining_signer.address())), + to: Some(TxKind::Call(mining_signer_addr)), max_fee_per_gas: Some(20e9 as u128), max_priority_fee_per_gas: Some(20e9 as u128), gas: Some(21000), value: Some(U256::from(1)), nonce: Some(0), - chain_id: Some(CONFIG.irys_chain_id), + chain_id: Some(testnet_config.chain_id), ..Default::default() }; let tx_env = TransactionTestContext::sign_tx(es, evm_tx_req).await; @@ -358,7 +379,6 @@ async fn serial_test_blockprod_with_evm_txs() -> eyre::Result<()> { .await .unwrap(); irys_txs.insert(IrysTxId::from_slice(tx.header.id.as_bytes()), tx); - // txs.push(tx); } let poa_solution = capacity_chunk_solution( @@ -393,15 +413,14 @@ async fn serial_test_blockprod_with_evm_txs() -> eyre::Result<()> { .unwrap(); // height is hardcoded at 42 right now - // assert_eq!(reth_block.number, block.height); assert!(evm_txs.contains_key(&reth_block.body.transactions.first().unwrap().hash())); assert_eq!( reth_context .rpc - .get_balance(config.mining_signer.address(), None) + .get_balance(mining_signer_addr, None) .await?, - U256::from(1) + miner_init_balance + U256::from(1) ); // check irys DB for built block let db_irys_block = &node diff --git a/crates/chain/tests/external/block_production.rs b/crates/chain/tests/external/block_production.rs index 23684959..054e36da 100644 --- a/crates/chain/tests/external/block_production.rs +++ b/crates/chain/tests/external/block_production.rs @@ -2,12 +2,12 @@ use std::time::Duration; use alloy_core::primitives::{TxHash, U256}; use irys_actors::block_producer::SolutionFoundMessage; -use irys_chain::chain::start_for_testing; +use irys_chain::start_irys_node; use irys_config::IrysNodeConfig; use irys_reth_node_bridge::adapter::node::RethNodeContext; use irys_testing_utils::utils::setup_tracing_and_temp_dir; use irys_types::{ - block_production::SolutionContext, irys::IrysSigner, Address, CONFIG, MAX_CHUNK_SIZE, + block_production::SolutionContext, irys::IrysSigner, Address, Config, MAX_CHUNK_SIZE, }; use k256::ecdsa::SigningKey; use reth::{providers::BlockReader, transaction_pool::TransactionPool as _}; @@ -29,16 +29,16 @@ const DEV2_ADDRESS: &str = "Bea4f456A5801cf9Af196a582D6Ec425c970c2C6"; async fn continuous_blockprod_evm_tx() -> eyre::Result<()> { let dev_wallet = hex::decode(DEV_PRIVATE_KEY)?; let expected_addr = hex::decode(DEV_ADDRESS)?; + let testnet_config = Config::testnet(); let temp_dir = setup_tracing_and_temp_dir(Some("continuous_blockprod_evm_tx"), false); - let mut config = IrysNodeConfig { - mining_signer: IrysSigner { - signer: SigningKey::from_slice(dev_wallet.as_slice())?, - chain_id: CONFIG.irys_chain_id, - chunk_size: MAX_CHUNK_SIZE, - }, - base_directory: temp_dir.path().to_path_buf(), - ..Default::default() + let mut config = IrysNodeConfig::new(&testnet_config); + config.mining_signer = IrysSigner { + signer: SigningKey::from_slice(dev_wallet.as_slice())?, + chain_id: testnet_config.chain_id, + chunk_size: MAX_CHUNK_SIZE, }; + config.base_directory = temp_dir.path().to_path_buf(); + let storage_config = irys_types::StorageConfig::new(&testnet_config); assert_eq!( config.mining_signer.address(), @@ -48,7 +48,7 @@ async fn continuous_blockprod_evm_tx() -> eyre::Result<()> { let account1_address = hex::decode(DEV2_ADDRESS)?; let account1 = IrysSigner { signer: SigningKey::from_slice(hex::decode(DEV2_PRIVATE_KEY)?.as_slice())?, - chain_id: CONFIG.irys_chain_id, + chain_id: testnet_config.chain_id, chunk_size: MAX_CHUNK_SIZE, }; assert_eq!( @@ -80,7 +80,7 @@ async fn continuous_blockprod_evm_tx() -> eyre::Result<()> { ), ]); - let node = start_for_testing(config).await?; + let node = start_irys_node(config, storage_config, testnet_config).await?; let reth_context = RethNodeContext::new(node.reth_handle.into()).await?; diff --git a/crates/chain/tests/external/programmable_data_basic.rs b/crates/chain/tests/external/programmable_data_basic.rs index 6e5f1389..f8a0987e 100644 --- a/crates/chain/tests/external/programmable_data_basic.rs +++ b/crates/chain/tests/external/programmable_data_basic.rs @@ -9,10 +9,11 @@ use irys_actors::mempool_service::GetBestMempoolTxs; use irys_actors::packing::wait_for_packing; use irys_actors::SolutionFoundMessage; use irys_api_server::routes::tx::TxOffset; -use irys_chain::chain::start_for_testing; +use irys_chain::start_irys_node; +use irys_config::IrysNodeConfig; use irys_database::tables::IngressProofs; use irys_testing_utils::utils::setup_tracing_and_temp_dir; -use irys_types::{irys::IrysSigner, Address}; +use irys_types::{irys::IrysSigner, Address, Config}; use k256::ecdsa::SigningKey; use reth_db::transaction::DbTx; use reth_db::Database as _; @@ -47,14 +48,14 @@ async fn test_programmable_data_basic_external() -> eyre::Result<()> { std::env::set_var("RUST_LOG", "info"); let temp_dir = setup_tracing_and_temp_dir(Some("test_programmable_data_basic_external"), false); - let mut config = irys_config::IrysNodeConfig { - base_directory: temp_dir.path().to_path_buf(), - ..Default::default() - }; - let main_address = config.mining_signer.address(); + let testnet_config = Config::testnet(); + let mut config = IrysNodeConfig::new(&testnet_config); + config.base_directory = temp_dir.path().to_path_buf(); - let account1 = IrysSigner::random_signer(); + let storage_config = irys_types::StorageConfig::new(&testnet_config); + let main_address = config.mining_signer.address(); + let account1 = IrysSigner::random_signer(&testnet_config); config.extend_genesis_accounts(vec![ ( @@ -80,7 +81,7 @@ async fn test_programmable_data_basic_external() -> eyre::Result<()> { ), ]); - let node = start_for_testing(config.clone()).await?; + let node = start_irys_node(config, storage_config, testnet_config.clone()).await?; node.actor_addresses.stop_mining()?; wait_for_packing( node.actor_addresses.packing.clone(), diff --git a/crates/chain/tests/programmable_data/basic.rs b/crates/chain/tests/programmable_data/basic.rs index 8775dc23..3cc05478 100644 --- a/crates/chain/tests/programmable_data/basic.rs +++ b/crates/chain/tests/programmable_data/basic.rs @@ -10,11 +10,12 @@ use alloy_sol_macro::sol; use base58::ToBase58; use irys_actors::packing::wait_for_packing; use irys_api_server::routes::tx::TxOffset; -use irys_chain::chain::start_for_testing; +use irys_chain::start_irys_node; +use irys_config::IrysNodeConfig; use irys_reth_node_bridge::adapter::node::RethNodeContext; use irys_testing_utils::utils::setup_tracing_and_temp_dir; use irys_types::{irys::IrysSigner, Address}; -use irys_types::{Base64, IrysTransactionHeader, TxChunkOffset, UnpackedChunk}; +use irys_types::{Base64, Config, IrysTransactionHeader, TxChunkOffset, UnpackedChunk}; use k256::ecdsa::SigningKey; use reth::rpc::eth::EthApiServer; @@ -43,17 +44,18 @@ const DEV_ADDRESS: &str = "64f1a2829e0e698c18e7792d6e74f67d89aa0a32"; #[actix_web::test] async fn serial_test_programmable_data_basic() -> eyre::Result<()> { - std::env::set_var("RUST_LOG", "debug"); + std::env::set_var("RUST_LOG", "info"); let temp_dir = setup_tracing_and_temp_dir(Some("test_programmable_data_basic"), false); - let mut config = irys_config::IrysNodeConfig { + let mut testnet_config = Config::testnet(); + testnet_config.chunk_size = 32; + + let main_address = testnet_config.miner_address(); + let account1 = IrysSigner::random_signer(&testnet_config); + let mut config = IrysNodeConfig { base_directory: temp_dir.path().to_path_buf(), - ..Default::default() + ..IrysNodeConfig::new(&testnet_config) }; - let main_address = config.mining_signer.address(); - - let account1 = IrysSigner::random_signer(); - config.extend_genesis_accounts(vec![ ( main_address, @@ -77,8 +79,9 @@ async fn serial_test_programmable_data_basic() -> eyre::Result<()> { }, ), ]); + let storage_config = irys_types::StorageConfig::new(&testnet_config); - let node = start_for_testing(config.clone()).await?; + let node = start_irys_node(config, storage_config, testnet_config.clone()).await?; wait_for_packing( node.actor_addresses.packing.clone(), Some(Duration::from_secs(10)), diff --git a/crates/chain/tests/promotion/data_promotion_basic.rs b/crates/chain/tests/promotion/data_promotion_basic.rs index 11374576..a96eaee0 100644 --- a/crates/chain/tests/promotion/data_promotion_basic.rs +++ b/crates/chain/tests/promotion/data_promotion_basic.rs @@ -1,3 +1,7 @@ +use irys_chain::start_irys_node; +use irys_config::IrysNodeConfig; +use irys_types::Config; + #[actix_web::test] async fn serial_data_promotion_test() { use actix_web::{ @@ -11,7 +15,6 @@ async fn serial_data_promotion_test() { use base58::ToBase58; use irys_actors::packing::wait_for_packing; use irys_api_server::{routes, ApiState}; - use irys_chain::start_for_testing; use irys_database::Ledger; use irys_testing_utils::utils::setup_tracing_and_temp_dir; use irys_types::{ @@ -24,27 +27,25 @@ async fn serial_data_promotion_test() { use crate::utils::{get_block_parent, get_chunk, post_chunk, verify_published_chunk}; - let chunk_size = 32; // 32Byte chunks - - let miner_signer = IrysSigner::random_signer_with_chunk_size(chunk_size); + let chunk_size = 32_u64; // 32 byte chunks - let storage_config = StorageConfig { - chunk_size: chunk_size as u64, + let mut testnet_config = Config { + chunk_size, num_chunks_in_partition: 10, num_chunks_in_recall_range: 2, - num_partitions_in_slot: 1, - miner_address: miner_signer.address(), - min_writes_before_sync: 1, + num_partitions_per_slot: 1, + num_writes_before_sync: 1, entropy_packing_iterations: 1_000, chunk_migration_depth: 1, // Testnet / single node config + ..Config::testnet() }; + testnet_config.chunk_size = chunk_size; + let storage_config = StorageConfig::new(&testnet_config); let temp_dir = setup_tracing_and_temp_dir(Some("data_promotion_test"), false); - let mut config = irys_config::IrysNodeConfig { - base_directory: temp_dir.path().to_path_buf(), - ..Default::default() - }; - let signer = IrysSigner::random_signer_with_chunk_size(chunk_size as usize); + let mut config = IrysNodeConfig::new(&testnet_config); + config.base_directory = temp_dir.path().to_path_buf(); + let signer = IrysSigner::random_signer(&testnet_config); config.extend_genesis_accounts(vec![( signer.address(), @@ -55,7 +56,13 @@ async fn serial_data_promotion_test() { )]); // This will create 3 storage modules, one for submit, one for publish, and one for capacity - let node_context = start_for_testing(config.clone()).await.unwrap(); + let node_context = start_irys_node( + config.clone(), + storage_config.clone(), + testnet_config.clone(), + ) + .await + .unwrap(); wait_for_packing( node_context.actor_addresses.packing.clone(), @@ -73,6 +80,7 @@ async fn serial_data_promotion_test() { db: node_context.db.clone(), mempool: node_context.actor_addresses.mempool, chunk_provider: node_context.chunk_provider.clone(), + config: testnet_config, }; // Initialize the app diff --git a/crates/chain/tests/promotion/data_promotion_double.rs b/crates/chain/tests/promotion/data_promotion_double.rs index 6d8c2ca4..13d2e8bb 100644 --- a/crates/chain/tests/promotion/data_promotion_double.rs +++ b/crates/chain/tests/promotion/data_promotion_double.rs @@ -1,7 +1,10 @@ use crate::utils::{mine_blocks, post_chunk}; use awc::http::StatusCode; +use irys_chain::start_irys_node; +use irys_config::IrysNodeConfig; use irys_database::Ledger; +use irys_types::Config; use tracing::debug; #[actix_web::test] @@ -18,7 +21,6 @@ async fn serial_double_root_data_promotion_test() { use base58::ToBase58; use irys_actors::packing::wait_for_packing; use irys_api_server::{routes, ApiState}; - use irys_chain::start_for_testing; use irys_database::{tables::IngressProofs, walk_all}; use irys_testing_utils::utils::setup_tracing_and_temp_dir; use irys_types::{ @@ -31,30 +33,26 @@ async fn serial_double_root_data_promotion_test() { use crate::utils::{get_block_parent, get_chunk, mine_block, verify_published_chunk}; - // std::env::set_var("RUST_LOG", "debug"); - - let chunk_size = 32; // 32Byte chunks - - let miner_signer = IrysSigner::random_signer_with_chunk_size(chunk_size); - - let storage_config = StorageConfig { + let chunk_size = 32; // 32 byte chunks + let mut testnet_config = Config { chunk_size: chunk_size as u64, num_chunks_in_partition: 10, num_chunks_in_recall_range: 2, - num_partitions_in_slot: 1, - miner_address: miner_signer.address(), - min_writes_before_sync: 1, + num_partitions_per_slot: 1, + num_writes_before_sync: 1, entropy_packing_iterations: 1_000, chunk_migration_depth: 1, // Testnet / single node config + ..Config::testnet() }; + testnet_config.chunk_size = chunk_size; + + let storage_config = StorageConfig::new(&testnet_config); let temp_dir = setup_tracing_and_temp_dir(Some("double_root_data_promotion_test"), false); - let mut config = irys_config::IrysNodeConfig { - base_directory: temp_dir.path().to_path_buf(), - ..Default::default() - }; - let signer = IrysSigner::random_signer_with_chunk_size(chunk_size as usize); - let signer2 = IrysSigner::random_signer_with_chunk_size(chunk_size as usize); + let mut config = IrysNodeConfig::new(&testnet_config); + config.base_directory = temp_dir.path().to_path_buf(); + let signer = IrysSigner::random_signer(&testnet_config); + let signer2 = IrysSigner::random_signer(&testnet_config); config.extend_genesis_accounts(vec![ ( @@ -74,7 +72,13 @@ async fn serial_double_root_data_promotion_test() { ]); // This will create 3 storage modules, one for submit, one for publish, and one for capacity - let node_context = start_for_testing(config.clone()).await.unwrap(); + let node_context = start_irys_node( + config.clone(), + storage_config.clone(), + testnet_config.clone(), + ) + .await + .unwrap(); wait_for_packing( node_context.actor_addresses.packing.clone(), @@ -94,6 +98,7 @@ async fn serial_double_root_data_promotion_test() { db: node_context.db.clone(), mempool: node_context.actor_addresses.mempool.clone(), chunk_provider: node_context.chunk_provider.clone(), + config: testnet_config, }; // Initialize the app diff --git a/crates/chain/tests/utils.rs b/crates/chain/tests/utils.rs index 417475aa..3371b82d 100644 --- a/crates/chain/tests/utils.rs +++ b/crates/chain/tests/utils.rs @@ -8,7 +8,7 @@ use irys_storage::ii; use irys_types::{ block_production::Seed, block_production::SolutionContext, Address, H256List, H256, }; -use irys_types::{StorageConfig, TxChunkOffset, VDFStepsConfig}; +use irys_types::{Config, StorageConfig, TxChunkOffset, VDFStepsConfig}; use irys_vdf::vdf_state::VdfStepsReadGuard; use irys_vdf::{step_number_to_salt_number, vdf_sha}; use reth::rpc::types::engine::ExecutionPayloadEnvelopeV1Irys; @@ -42,6 +42,7 @@ pub async fn capacity_chunk_solution( storage_config: &StorageConfig, ) -> SolutionContext { let max_retries = 20; + let testnet_config = Config::testnet(); let mut i = 1; let initial_step_num = vdf_steps_guard.read().global_step; let mut step_num: u64 = 0; @@ -90,6 +91,7 @@ pub async fn capacity_chunk_solution( storage_config.entropy_packing_iterations, storage_config.chunk_size as usize, // take it from storage config &mut entropy_chunk, + testnet_config.chain_id, ); debug!("Chunk mining address: {:?} chunk_offset: {} partition hash: {:?} iterations: {} chunk size: {}", miner_addr, 0, partition_hash, storage_config.entropy_packing_iterations, storage_config.chunk_size); @@ -322,6 +324,7 @@ pub async fn verify_published_chunk( &packed_chunk, storage_config.entropy_packing_iterations, storage_config.chunk_size as usize, + storage_config.chain_id, ); if unpacked_chunk.bytes.0 != expected_bytes { println!( diff --git a/crates/config/Cargo.toml b/crates/config/Cargo.toml index 57dba072..0591d8f1 100644 --- a/crates/config/Cargo.toml +++ b/crates/config/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition.workspace = true rust-version.workspace = true +[features] +test-utils = ["irys-types/test-utils"] + [dependencies] reth-primitives.workspace = true irys-types.workspace = true @@ -11,10 +14,12 @@ eyre.workspace = true reth-chainspec.workspace = true irys-primitives.workspace = true once_cell.workspace = true -# reth-cli.workspace = true tracing.workspace = true serde.workspace = true toml.workspace = true +[dev-dependencies] +irys-types = { workspace = true, features = ["test-utils"] } + [lints] workspace = true diff --git a/crates/config/src/chain/chain.rs b/crates/config/src/chain/chain.rs index 63bed8ce..6e105548 100644 --- a/crates/config/src/chain/chain.rs +++ b/crates/config/src/chain/chain.rs @@ -1,6 +1,6 @@ use irys_primitives::{Genesis, GenesisAccount, U256}; -use irys_types::{Address, IrysBlockHeader, CONFIG}; -use once_cell::sync::{Lazy, OnceCell}; +use irys_types::{Address, IrysBlockHeader}; +use once_cell::sync::OnceCell; use reth_chainspec::EthereumHardfork::{ ArrowGlacier, Berlin, Byzantium, Cancun, Constantinople, Dao, Frontier, GrayGlacier, Homestead, Istanbul, London, MuirGlacier, Paris, Petersburg, Shanghai, SpuriousDragon, Tangerine, @@ -9,14 +9,13 @@ use reth_chainspec::{BaseFeeParams, BaseFeeParamsKind, Chain, ChainSpec, ForkCon use reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT; use reth_primitives::revm_primitives::hex; use std::collections::BTreeMap; -use std::sync::Arc; +use std::sync::{Arc, LazyLock}; -pub const SUPPORTED_CHAINS: &[&str] = &["mainnet" /* , "devnet", "testnet" */]; +pub const IRYS_TESTNET_CHAIN_ID: u64 = 1275; -/// note: for testing this is overridden -pub static IRYS_MAINNET: Lazy> = Lazy::new(|| { +pub static IRYS_TESTNET: LazyLock> = LazyLock::new(|| { let mut spec = ChainSpec { - chain: Chain::from_id(CONFIG.irys_chain_id), + chain: Chain::from_id(IRYS_TESTNET_CHAIN_ID), // TODO: A proper genesis block genesis: Genesis { gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, diff --git a/crates/config/src/chain/chainspec.rs b/crates/config/src/chain/chainspec.rs index 77cce989..76f5391c 100644 --- a/crates/config/src/chain/chainspec.rs +++ b/crates/config/src/chain/chainspec.rs @@ -3,7 +3,7 @@ use irys_types::{Address, IrysBlockHeader}; use reth_chainspec::{ChainSpec, ChainSpecBuilder}; use tracing::debug; -use super::chain::IRYS_MAINNET; +use super::chain::IRYS_TESTNET; /// A helper to build custom chain specs #[derive(Debug, Default, Clone)] @@ -14,14 +14,14 @@ pub struct IrysChainSpecBuilder { impl IrysChainSpecBuilder { /// Construct a new builder from the mainnet chain spec. - pub fn mainnet() -> Self { + pub fn testnet() -> Self { let mut genesis = IrysBlockHeader::new_mock_header(); genesis.height = 0; Self { reth_builder: ChainSpecBuilder { - chain: Some(IRYS_MAINNET.chain), - genesis: Some(IRYS_MAINNET.genesis.clone()), - hardforks: IRYS_MAINNET.hardforks.clone(), + chain: Some(IRYS_TESTNET.chain), + genesis: Some(IRYS_TESTNET.genesis.clone()), + hardforks: IRYS_TESTNET.hardforks.clone(), }, genesis, } @@ -56,274 +56,3 @@ impl IrysChainSpecBuilder { self } } - -// impl into - -impl IrysChainSpecBuilder { - // /// Set the chain ID - // pub const fn chain(mut self, chain: Chain) -> Self { - // self.chain = Some(chain); - // self - // } - - // /// Set the genesis block. - // pub fn genesis(mut self, genesis: Genesis) -> Self { - // self.genesis = Some(genesis); - // self - // } - - // /// Add the given fork with the given activation condition to the spec. - // pub fn with_fork(mut self, fork: EthereumHardfork, condition: ForkCondition) -> Self { - // self.hardforks.insert(fork, condition); - // self - // } - - // /// Remove the given fork from the spec. - // pub fn without_fork(mut self, fork: EthereumHardfork) -> Self { - // self.hardforks.remove(fork); - // self - // } - - // /// Enable the Paris hardfork at the given TTD. - // /// - // /// Does not set the merge netsplit block. - // pub fn paris_at_ttd(self, ttd: U256) -> Self { - // self.with_fork( - // EthereumHardfork::Paris, - // ForkCondition::TTD { - // total_difficulty: ttd, - // fork_block: None, - // }, - // ) - // } - - // /// Enable Frontier at genesis. - // pub fn frontier_activated(mut self) -> Self { - // self.hardforks - // .insert(EthereumHardfork::Frontier, ForkCondition::Block(0)); - // self - // } - - // /// Enable Homestead at genesis. - // pub fn homestead_activated(mut self) -> Self { - // self = self.frontier_activated(); - // self.hardforks - // .insert(EthereumHardfork::Homestead, ForkCondition::Block(0)); - // self - // } - - // /// Enable Tangerine at genesis. - // pub fn tangerine_whistle_activated(mut self) -> Self { - // self = self.homestead_activated(); - // self.hardforks - // .insert(EthereumHardfork::Tangerine, ForkCondition::Block(0)); - // self - // } - - // /// Enable Spurious Dragon at genesis. - // pub fn spurious_dragon_activated(mut self) -> Self { - // self = self.tangerine_whistle_activated(); - // self.hardforks - // .insert(EthereumHardfork::SpuriousDragon, ForkCondition::Block(0)); - // self - // } - - // /// Enable Byzantium at genesis. - // pub fn byzantium_activated(mut self) -> Self { - // self = self.spurious_dragon_activated(); - // self.hardforks - // .insert(EthereumHardfork::Byzantium, ForkCondition::Block(0)); - // self - // } - - // /// Enable Constantinople at genesis. - // pub fn constantinople_activated(mut self) -> Self { - // self = self.byzantium_activated(); - // self.hardforks - // .insert(EthereumHardfork::Constantinople, ForkCondition::Block(0)); - // self - // } - - // /// Enable Petersburg at genesis. - // pub fn petersburg_activated(mut self) -> Self { - // self = self.constantinople_activated(); - // self.hardforks - // .insert(EthereumHardfork::Petersburg, ForkCondition::Block(0)); - // self - // } - - // /// Enable Istanbul at genesis. - // pub fn istanbul_activated(mut self) -> Self { - // self = self.petersburg_activated(); - // self.hardforks - // .insert(EthereumHardfork::Istanbul, ForkCondition::Block(0)); - // self - // } - - // /// Enable Berlin at genesis. - // pub fn berlin_activated(mut self) -> Self { - // self = self.istanbul_activated(); - // self.hardforks - // .insert(EthereumHardfork::Berlin, ForkCondition::Block(0)); - // self - // } - - // /// Enable London at genesis. - // pub fn london_activated(mut self) -> Self { - // self = self.berlin_activated(); - // self.hardforks - // .insert(EthereumHardfork::London, ForkCondition::Block(0)); - // self - // } - - // /// Enable Paris at genesis. - // pub fn paris_activated(mut self) -> Self { - // self = self.london_activated(); - // self.hardforks.insert( - // EthereumHardfork::Paris, - // ForkCondition::TTD { - // fork_block: Some(0), - // total_difficulty: U256::ZERO, - // }, - // ); - // self - // } - - // /// Enable Shanghai at genesis. - // pub fn shanghai_activated(mut self) -> Self { - // self = self.paris_activated(); - // self.hardforks - // .insert(EthereumHardfork::Shanghai, ForkCondition::Timestamp(0)); - // self - // } - - // /// Enable Cancun at genesis. - // pub fn cancun_activated(mut self) -> Self { - // self = self.shanghai_activated(); - // self.hardforks - // .insert(EthereumHardfork::Cancun, ForkCondition::Timestamp(0)); - // self - // } - - // /// Enable Prague at genesis. - // pub fn prague_activated(mut self) -> Self { - // self = self.cancun_activated(); - // self.hardforks - // .insert(EthereumHardfork::Prague, ForkCondition::Timestamp(0)); - // self - // } - - // /// Enable Bedrock at genesis - // #[cfg(feature = "optimism")] - // pub fn bedrock_activated(mut self) -> Self { - // self = self.paris_activated(); - // self.hardforks.insert( - // reth_optimism_forks::OptimismHardfork::Bedrock, - // ForkCondition::Block(0), - // ); - // self - // } - - // /// Enable Regolith at genesis - // #[cfg(feature = "optimism")] - // pub fn regolith_activated(mut self) -> Self { - // self = self.bedrock_activated(); - // self.hardforks.insert( - // reth_optimism_forks::OptimismHardfork::Regolith, - // ForkCondition::Timestamp(0), - // ); - // self - // } - - // /// Enable Canyon at genesis - // #[cfg(feature = "optimism")] - // pub fn canyon_activated(mut self) -> Self { - // self = self.regolith_activated(); - // // Canyon also activates changes from L1's Shanghai hardfork - // self.hardforks - // .insert(EthereumHardfork::Shanghai, ForkCondition::Timestamp(0)); - // self.hardforks.insert( - // reth_optimism_forks::OptimismHardfork::Canyon, - // ForkCondition::Timestamp(0), - // ); - // self - // } - - // /// Enable Ecotone at genesis - // #[cfg(feature = "optimism")] - // pub fn ecotone_activated(mut self) -> Self { - // self = self.canyon_activated(); - // self.hardforks - // .insert(EthereumHardfork::Cancun, ForkCondition::Timestamp(0)); - // self.hardforks.insert( - // reth_optimism_forks::OptimismHardfork::Ecotone, - // ForkCondition::Timestamp(0), - // ); - // self - // } - - // /// Enable Fjord at genesis - // #[cfg(feature = "optimism")] - // pub fn fjord_activated(mut self) -> Self { - // self = self.ecotone_activated(); - // self.hardforks.insert( - // reth_optimism_forks::OptimismHardfork::Fjord, - // ForkCondition::Timestamp(0), - // ); - // self - // } - - // /// Enable Granite at genesis - // #[cfg(feature = "optimism")] - // pub fn granite_activated(mut self) -> Self { - // self = self.fjord_activated(); - // self.hardforks.insert( - // reth_optimism_forks::OptimismHardfork::Granite, - // ForkCondition::Timestamp(0), - // ); - // self - // } - - // /// Build the resulting [`ChainSpec`]. - // /// - // /// # Panics - // /// - // /// This function panics if the chain ID and genesis is not set ([`Self::chain`] and - // /// [`Self::genesis`]) - // pub fn build(self) -> ChainSpec { - // let paris_block_and_final_difficulty = { - // self.hardforks - // .get(EthereumHardfork::Paris) - // .and_then(|cond| { - // if let ForkCondition::TTD { - // fork_block, - // total_difficulty, - // } = cond - // { - // fork_block.map(|fork_block| (fork_block, total_difficulty)) - // } else { - // None - // } - // }) - // }; - // ChainSpec { - // chain: self.chain.expect("The chain is required"), - // genesis: self.genesis.expect("The genesis is required"), - // genesis_hash: OnceCell::new(), - // hardforks: self.hardforks, - // paris_block_and_final_difficulty, - // deposit_contract: None, - // ..Default::default() - // } - // } -} - -// impl From<&Arc> for IrysChainSpecBuilder { -// fn from(value: &Arc) -> Self { -// Self { -// chain: Some(value.chain), -// genesis: Some(value.genesis.clone()), -// hardforks: value.hardforks.clone(), -// } -// } -// } diff --git a/crates/config/src/lib.rs b/crates/config/src/lib.rs index d560cbbb..22cd7ac0 100644 --- a/crates/config/src/lib.rs +++ b/crates/config/src/lib.rs @@ -8,13 +8,11 @@ use std::{ use chain::chainspec::IrysChainSpecBuilder; use irys_primitives::GenesisAccount; -use irys_types::{irys::IrysSigner, Address, CONFIG}; +use irys_types::{config, irys::IrysSigner, Address}; use serde::{Deserialize, Serialize}; pub mod chain; -// TODO: convert this into a set of clap args - #[derive(Debug, Clone)] /// Top level configuration struct for the node pub struct IrysNodeConfig { @@ -30,15 +28,19 @@ pub struct IrysNodeConfig { } /// "sane" default configuration +#[cfg(any(feature = "test-utils", test))] impl Default for IrysNodeConfig { fn default() -> Self { + use irys_types::Config; let base_dir = env::current_dir() .expect("Unable to determine working dir, aborting") .join(".irys"); + let testent_config = Config::testnet(); + let chainspec_builder = IrysChainSpecBuilder::testnet(); Self { - chainspec_builder: IrysChainSpecBuilder::mainnet(), - mining_signer: IrysSigner::random_signer(), + mining_signer: IrysSigner::random_signer(&testent_config), + chainspec_builder, instance_number: None, // no instance dir base_directory: base_dir, } @@ -53,14 +55,14 @@ pub fn decode_hex(s: &str) -> Result, ParseIntError> { } impl IrysNodeConfig { - pub fn mainnet() -> Self { + pub fn new(config: &config::Config) -> Self { Self { - mining_signer: IrysSigner::mainnet_from_slice(&decode_hex(CONFIG.mining_key).unwrap()), + mining_signer: IrysSigner::from_config(&config), instance_number: None, base_directory: env::current_dir() .expect("Unable to determine working dir, aborting") .join(".irys"), - chainspec_builder: IrysChainSpecBuilder::mainnet(), + chainspec_builder: IrysChainSpecBuilder::testnet(), } } @@ -109,59 +111,6 @@ impl IrysNodeConfig { } } -// pub struct IrysConfigBuilder { -// /// Signer instance used for mining -// pub mining_signer: IrysSigner, -// /// Node ID/instance number: used for testing -// pub instance_number: u32, -// /// configuration of partitions and their associated storage providers - -// /// base data directory, i.e `./.tmp` -// /// should not be used directly, instead use the appropriate methods, i.e `instance_directory` -// pub base_directory: PathBuf, -// /// ChainSpec builder - used to generate ChainSpec, which defines most of the chain-related parameters -// pub chainspec_builder: IrysChainSpecBuilder, -// } - -// impl Default for IrysConfigBuilder { -// fn default() -> Self { -// Self { -// instance_number: 0, -// base_directory:absolute(PathBuf::from_str("../../.tmp").unwrap()).unwrap(), -// chainspec_builder: IrysChainSpecBuilder::mainnet(), -// mining_signer: IrysSigner::random_signer(), -// } -// } -// } - -// impl IrysConfigBuilder { -// pub fn new() -> Self { -// return IrysConfigBuilder::default(); -// } -// pub fn instance_number(mut self, number: u32) -> Self { -// self.instance_number = number; -// self -// } -// pub fn base_directory(mut self, path: PathBuf) -> Self { -// self.base_directory = path; -// self -// } -// // pub fn base_directory(mut self, path: PathBuf) -> Self { -// // self.base_directory = path; -// // self -// // } -// // pub fn add_partition_and_sm(mut self, partition: Partition, storage_module: ) -// pub fn mainnet() -> Self { -// return IrysConfigBuilder::new() -// .base_directory(absolute(PathBuf::from_str("../../.irys").unwrap()).unwrap()); -// } - -// pub fn build(mut self) -> IrysNodeConfig { -// -// return self.config; -// } -// } - pub const PRICE_PER_CHUNK_PERM: u128 = 10000; pub const PRICE_PER_CHUNK_5_EPOCH: u128 = 10; @@ -297,6 +246,5 @@ impl StorageSubmodulesConfig { // Load the config to verify it parses StorageSubmodulesConfig::from_toml(config_path_local) } - // let _ = STORAGE_SUBMODULES_CONFIG.submodule_paths.len(); } } diff --git a/crates/database/Cargo.toml b/crates/database/Cargo.toml index 16c208ea..a1c9cae8 100644 --- a/crates/database/Cargo.toml +++ b/crates/database/Cargo.toml @@ -7,7 +7,6 @@ rust-version.workspace = true [dependencies] alloy-primitives.workspace = true actix.workspace = true -assert_matches.workspace = true base58.workspace = true tokio.workspace = true reth-node-metrics.workspace = true @@ -26,5 +25,8 @@ tracing.workspace = true irys-testing-utils.workspace = true test-fuzz.workspace = true +[dev-dependencies] +irys-config = { workspace = true, features = ["test-utils"] } + [lints] workspace = true diff --git a/crates/database/src/block_index_data.rs b/crates/database/src/block_index_data.rs index 120bf454..5bc41a9f 100644 --- a/crates/database/src/block_index_data.rs +++ b/crates/database/src/block_index_data.rs @@ -395,7 +395,6 @@ mod tests { data_ledger::Ledger, BlockBounds, BlockIndexItem, LedgerIndexItem, }; - use assert_matches::assert_matches; use irys_config::IrysNodeConfig; use irys_types::H256; @@ -453,7 +452,7 @@ mod tests { let _ = ensure_path_exists(&arc_config); let save_result = save_block_index(&block_items, &arc_config); - assert_matches!(save_result, Ok(())); + assert!(save_result.is_ok()); // Load the items from disk let block_index = BlockIndex::new(); diff --git a/crates/database/src/data_ledger.rs b/crates/database/src/data_ledger.rs index 04482fc7..7ffb1ed8 100644 --- a/crates/database/src/data_ledger.rs +++ b/crates/database/src/data_ledger.rs @@ -1,4 +1,4 @@ -use irys_types::{Compact, TransactionLedger, CONFIG, H256}; +use irys_types::{Compact, Config, TransactionLedger, H256}; use serde::{Deserialize, Serialize}; use std::ops::{Index, IndexMut}; /// Manages the global ledger state within the epoch service, tracking: @@ -27,6 +27,7 @@ pub struct PermanentLedger { pub slots: Vec, /// Unique identifier for this ledger, see `Ledger` enum pub ledger_id: u32, + pub num_partitions_per_slot: u64, } #[derive(Debug, Clone)] @@ -38,31 +39,30 @@ pub struct TermLedger { pub ledger_id: u32, /// Number of epochs slots in this ledger exist for pub epoch_length: u64, -} - -impl Default for PermanentLedger { - fn default() -> Self { - Self::new() - } + pub num_blocks_in_epoch: u64, + pub num_partitions_per_slot: u64, } impl PermanentLedger { /// Constructs a permanent ledger, always with `Ledger::Publish` as the id - pub const fn new() -> Self { + pub fn new(config: &Config) -> Self { Self { slots: Vec::new(), ledger_id: Ledger::Publish as u32, + num_partitions_per_slot: config.num_partitions_per_slot, } } } impl TermLedger { /// Creates a term ledger with specified index and duration - pub const fn new(ledger: Ledger, epoch_length: u64) -> Self { + pub fn new(ledger: Ledger, config: &Config) -> Self { Self { slots: Vec::new(), ledger_id: ledger as u32, - epoch_length, + epoch_length: config.submit_ledger_epoch_length, + num_blocks_in_epoch: config.num_blocks_in_epoch, + num_partitions_per_slot: config.num_partitions_per_slot, } } @@ -76,11 +76,11 @@ impl TermLedger { let mut expired_indices = Vec::new(); // Make sure enough blocks have transpired before calculating expiry height - if epoch_height < self.epoch_length * CONFIG.num_blocks_in_epoch { + if epoch_height < self.epoch_length * self.num_blocks_in_epoch { return expired_indices; } - let expiry_height = epoch_height - self.epoch_length * CONFIG.num_blocks_in_epoch; + let expiry_height = epoch_height - self.epoch_length * self.num_blocks_in_epoch; // Collect indices of slots to expire for (idx, slot) in self.slots.iter().enumerate() { @@ -130,7 +130,7 @@ impl LedgerCore for PermanentLedger { is_expired: false, last_height: 0, }); - num_partitions_added += CONFIG.num_partitions_per_slot; + num_partitions_added += self.num_partitions_per_slot; } num_partitions_added } @@ -139,7 +139,7 @@ impl LedgerCore for PermanentLedger { .iter() .enumerate() .filter_map(|(idx, slot)| { - let needed = CONFIG.num_partitions_per_slot as usize - slot.partitions.len(); + let needed = self.num_partitions_per_slot as usize - slot.partitions.len(); if needed > 0 { Some((idx, needed)) } else { @@ -170,7 +170,7 @@ impl LedgerCore for TermLedger { is_expired: false, last_height: 0, }); - num_partitions_added += CONFIG.num_partitions_per_slot; + num_partitions_added += self.num_partitions_per_slot; } num_partitions_added } @@ -180,7 +180,7 @@ impl LedgerCore for TermLedger { .iter() .enumerate() .filter_map(|(idx, slot)| { - let needed = CONFIG.num_partitions_per_slot as usize - slot.partitions.len(); + let needed = self.num_partitions_per_slot as usize - slot.partitions.len(); if needed > 0 && !slot.is_expired { Some((idx, needed)) } else { @@ -275,21 +275,12 @@ pub struct Ledgers { term: Vec, } -impl Default for Ledgers { - fn default() -> Self { - Self::new() - } -} - impl Ledgers { /// Instantiate a Ledgers struct with the correct Ledgers - pub fn new() -> Self { + pub fn new(config: &Config) -> Self { Self { - perm: PermanentLedger::new(), - term: vec![TermLedger::new( - Ledger::Submit, - CONFIG.submit_ledger_epoch_length, - )], + perm: PermanentLedger::new(config), + term: vec![TermLedger::new(Ledger::Submit, config)], } } diff --git a/crates/packing/src/lib.rs b/crates/packing/src/lib.rs index b5120099..a2261ff8 100644 --- a/crates/packing/src/lib.rs +++ b/crates/packing/src/lib.rs @@ -3,7 +3,7 @@ use std::ops::BitXor; pub use irys_c::{capacity, capacity_single}; use irys_types::{ - partition::PartitionHash, Address, Base64, ChunkBytes, PackedChunk, UnpackedChunk, CONFIG, + partition::PartitionHash, Address, Base64, ChunkBytes, PackedChunk, UnpackedChunk, }; use irys_types::CHUNK_SIZE; // do not change where is used for CONFIG.chunk_size as this is hardcoded in C implementation @@ -17,6 +17,7 @@ pub fn unpack( packed_chunk: &PackedChunk, entropy_packing_iterations: u32, chunk_size: usize, + irys_chain_id: u64, ) -> UnpackedChunk { let mut entropy: Vec = Vec::with_capacity(chunk_size); capacity_single::compute_entropy_chunk( @@ -26,6 +27,7 @@ pub fn unpack( entropy_packing_iterations, chunk_size, &mut entropy, + irys_chain_id, ); let unpacked_data = unpack_with_entropy(packed_chunk, entropy, chunk_size); @@ -77,6 +79,8 @@ pub fn capacity_pack_range_c( partition_hash: PartitionHash, iterations: Option, out_entropy_chunk: &mut Vec, + entropy_packing_iterations: u32, + irys_chain_id: u64, ) { let mining_addr_len = mining_address.len(); // note: might not line up with capacity? that should be fine... let partition_hash_len = partition_hash.0.len(); @@ -84,15 +88,14 @@ pub fn capacity_pack_range_c( let partition_hash = partition_hash.as_ptr() as *const std::os::raw::c_uchar; let entropy_chunk_ptr = out_entropy_chunk.as_ptr() as *mut u8; - let iterations: u32 = iterations.unwrap_or(CONFIG.entropy_packing_iterations); - let chain_id: u64 = CONFIG.irys_chain_id; + let iterations: u32 = iterations.unwrap_or(entropy_packing_iterations); unsafe { capacity::compute_entropy_chunk( mining_addr, mining_addr_len, chunk_offset, - chain_id, + irys_chain_id, partition_hash, partition_hash_len, entropy_chunk_ptr, @@ -112,15 +115,16 @@ pub fn capacity_pack_range_cuda_c( partition_hash: PartitionHash, iterations: Option, entropy: &mut Vec, + entropy_packing_iterations: u32, + irys_chain_id: u64, ) -> u32 { let mining_addr_len = mining_address.len(); let partition_hash_len = partition_hash.0.len(); let mining_addr = mining_address.as_ptr() as *const std::os::raw::c_uchar; let partition_hash = partition_hash.as_ptr() as *const std::os::raw::c_uchar; - let iterations: u32 = iterations.unwrap_or(CONFIG.entropy_packing_iterations); + let iterations = iterations.unwrap_or(entropy_packing_iterations); let entropy_ptr = entropy.as_ptr() as *mut u8; - let chain_id: u64 = CONFIG.irys_chain_id; let result; unsafe { @@ -128,7 +132,7 @@ pub fn capacity_pack_range_cuda_c( mining_addr, mining_addr_len, chunk_offset, - chain_id, + irys_chain_id, num_chunks as i64, partition_hash, partition_hash_len, @@ -149,6 +153,8 @@ pub fn capacity_pack_range_with_data_cuda_c( chunk_offset: std::ffi::c_ulong, partition_hash: PartitionHash, iterations: Option, + entropy_packing_iterations: u32, + irys_chain_id: u64, ) { let num_chunks: u32 = data.len() as u32 / CHUNK_SIZE as u32; // do not change it for CONFIG.chunk_size this is hardcoded in C implementation let mut entropy: Vec = Vec::with_capacity(data.len()); @@ -159,6 +165,8 @@ pub fn capacity_pack_range_with_data_cuda_c( partition_hash, iterations, &mut entropy, + entropy_packing_iterations, + irys_chain_id, ); // TODO: check if it is worth to move this to GPU ? implies big data transfer from host to device that now is not needed @@ -188,8 +196,10 @@ pub fn capacity_pack_range_with_data( partition_hash: PartitionHash, iterations: Option, chunk_size: usize, + entropy_packing_iterations: u32, + irys_chain_id: u64, ) { - let iterations: u32 = iterations.unwrap_or(CONFIG.entropy_packing_iterations); + let iterations: u32 = iterations.unwrap_or(entropy_packing_iterations); let mut entropy_chunk = Vec::::with_capacity(chunk_size); data.iter_mut().enumerate().for_each(|(pos, chunk)| { @@ -200,6 +210,7 @@ pub fn capacity_pack_range_with_data( iterations, chunk_size, &mut entropy_chunk, + irys_chain_id, ); xor_vec_u8_arrays_in_place(chunk, &entropy_chunk); }) @@ -212,6 +223,8 @@ pub fn capacity_pack_range_with_data_c( chunk_offset: std::ffi::c_ulong, partition_hash: PartitionHash, iterations: Option, + entropy_packing_iterations: u32, + irys_chain_id: u64, ) { let mut entropy_chunk = Vec::::with_capacity(CHUNK_SIZE as usize); data.iter_mut().enumerate().for_each(|(pos, chunk)| { @@ -221,6 +234,8 @@ pub fn capacity_pack_range_with_data_c( partition_hash, iterations, &mut entropy_chunk, + entropy_packing_iterations, + irys_chain_id, ); xor_vec_u8_arrays_in_place(chunk, &entropy_chunk); }) @@ -249,7 +264,7 @@ pub fn packing_xor_vec_u8(mut entropy: Vec, data: &[u8]) -> Vec { mod tests { use crate::capacity_single::SHA_HASH_SIZE; use crate::*; - use irys_types::{PartitionChunkOffset, TxChunkOffset, H256}; + use irys_types::{Config, PartitionChunkOffset, TxChunkOffset, H256}; use rand::{Rng, RngCore}; use std::time::*; @@ -258,7 +273,8 @@ mod tests { #[test] fn test_compute_entropy_chunk() { let mut rng = rand::thread_rng(); - let mining_address = Address::random(); + let testnet_config = Config::testnet(); + let mining_address = testnet_config.miner_address(); let chunk_offset = rng.gen_range(1..=1000); let mut partition_hash = [0u8; SHA_HASH_SIZE]; rng.fill(&mut partition_hash[..]); @@ -277,6 +293,7 @@ mod tests { iterations, CHUNK_SIZE as usize, &mut chunk, + testnet_config.chain_id, ); capacity_single::compute_entropy_chunk( @@ -286,6 +303,7 @@ mod tests { iterations, CHUNK_SIZE as usize, &mut chunk2, + testnet_config.chain_id, ); let elapsed = now.elapsed(); @@ -301,6 +319,8 @@ mod tests { partition_hash.into(), Some(iterations), &mut c_chunk, + testnet_config.entropy_packing_iterations, + testnet_config.chain_id, ); capacity_pack_range_c( @@ -309,6 +329,8 @@ mod tests { partition_hash.into(), Some(iterations), &mut c_chunk2, + testnet_config.entropy_packing_iterations, + testnet_config.chain_id, ); let elapsed = now.elapsed(); @@ -327,6 +349,8 @@ mod tests { partition_hash.into(), Some(iterations), &mut c_chunk_cuda, + testnet_config.entropy_packing_iterations, + testnet_config.chain_id, ); println!("CUDA result: {}", result); @@ -352,6 +376,7 @@ mod tests { #[test] fn test_bench_chunks_packing() { + let testnet_config = Config::testnet(); let mut rng: rand::prelude::ThreadRng = rand::thread_rng(); let mining_address = Address::random(); let chunk_offset = rng.gen_range(1..=1000); @@ -384,6 +409,8 @@ mod tests { chunk_offset, partition_hash.into(), iterations, + testnet_config.entropy_packing_iterations, + testnet_config.chain_id, ); let elapsed = now.elapsed(); @@ -398,6 +425,8 @@ mod tests { partition_hash.into(), iterations, CHUNK_SIZE as usize, + testnet_config.entropy_packing_iterations, + testnet_config.chain_id, ); let elapsed = now.elapsed(); @@ -413,6 +442,8 @@ mod tests { partition_hash.into(), iterations, &mut entropy_chunk, + testnet_config.entropy_packing_iterations, + testnet_config.chain_id, ); // sign picked random chunk with entropy @@ -424,6 +455,7 @@ mod tests { #[cfg(feature = "nvidia")] #[test] fn test_bench_chunks_packing_cuda() { + let testnet_config = Config::testnet(); let mut rng = rand::thread_rng(); let mining_address = Address::random(); let chunk_offset = rng.gen_range(1..=1000); @@ -452,6 +484,8 @@ mod tests { chunk_offset, partition_hash.into(), iterations, + testnet_config.entropy_packing_iterations, + testnet_config.chain_id, ); let elapsed = now.elapsed(); @@ -465,6 +499,8 @@ mod tests { partition_hash.into(), iterations, CHUNK_SIZE as usize, + testnet_config.entropy_packing_iterations, + testnet_config.chain_id, ); let elapsed = now.elapsed(); @@ -482,6 +518,7 @@ mod tests { fn test_chunk_packing_unpacking() { let mut rng = rand::thread_rng(); + let testnet_config = Config::testnet(); let mining_address = Address::random(); let chunk_offset = rng.gen_range(1..=1000); let mut partition_hash = [0u8; SHA_HASH_SIZE]; @@ -499,6 +536,7 @@ mod tests { iterations, chunk_size, &mut entropy_chunk, + testnet_config.chain_id, ); // simulate a smaller end chunk @@ -520,7 +558,12 @@ mod tests { partition_hash: H256::from(partition_hash), }; - let unpacked_chunk = unpack(&packed_chunk, iterations, chunk_size); + let unpacked_chunk = unpack( + &packed_chunk, + iterations, + chunk_size, + testnet_config.chain_id, + ); assert_eq!(unpacked_chunk.bytes.0, data_bytes); } diff --git a/crates/reth-node-bridge/src/precompile/read_bytes.rs b/crates/reth-node-bridge/src/precompile/read_bytes.rs index 118afe3d..f4595d2b 100644 --- a/crates/reth-node-bridge/src/precompile/read_bytes.rs +++ b/crates/reth-node-bridge/src/precompile/read_bytes.rs @@ -31,8 +31,8 @@ impl ReadBytesRangeByIndexArgs { pub fn read_bytes_range_by_index( call_data: &Bytes, - gas_limit: u64, - env: &Env, + _gas_limit: u64, + _env: &Env, state_provider: &IrysRethProviderInner, access_lists: ParsedAccessLists, ) -> PrecompileResult { @@ -45,7 +45,7 @@ pub fn read_bytes_range_by_index( .ok_or(PrecompileErrors::Error(PrecompileError::Other( "Internal error - unable to parse access list".to_owned(), )))?; - read_bytes_range(bytes_range, gas_limit, env, state_provider, access_lists) + read_bytes_range(bytes_range, state_provider, access_lists) } struct ReadPartialByteRangeArgs { @@ -72,8 +72,8 @@ impl ReadPartialByteRangeArgs { // this method overrides the length, and augments the start pub fn read_partial_byte_range( call_data: &Bytes, - gas_limit: u64, - env: &Env, + _gas_limit: u64, + _env: &Env, state_provider: &IrysRethProviderInner, access_lists: ParsedAccessLists, ) -> PrecompileResult { @@ -112,13 +112,11 @@ pub fn read_partial_byte_range( )) })?; - read_bytes_range(bytes_range, gas_limit, env, state_provider, access_lists) + read_bytes_range(bytes_range, state_provider, access_lists) } pub fn read_bytes_range( bytes_range: ByteRangeSpecifier, - _gas_limit: u64, - _env: &Env, state_provider: &IrysRethProviderInner, access_lists: ParsedAccessLists, ) -> PrecompileResult { @@ -187,6 +185,7 @@ pub fn read_bytes_range( &chunk, storage_config.entropy_packing_iterations, storage_config.chunk_size as usize, + storage_config.chain_id, ); bytes.extend(unpacked_chunk.bytes.0) } diff --git a/crates/storage/src/chunk_provider.rs b/crates/storage/src/chunk_provider.rs index ede4744e..bbf73783 100644 --- a/crates/storage/src/chunk_provider.rs +++ b/crates/storage/src/chunk_provider.rs @@ -152,7 +152,7 @@ mod tests { use irys_testing_utils::utils::setup_tracing_and_temp_dir; use irys_types::{ irys::IrysSigner, ledger_chunk_offset_ii, partition::PartitionAssignment, - partition_chunk_offset_ie, Base64, LedgerChunkRange, PartitionChunkOffset, + partition_chunk_offset_ie, Base64, Config, LedgerChunkRange, PartitionChunkOffset, TransactionLedger, UnpackedChunk, }; use nodit::interval::{ie, ii}; @@ -160,6 +160,12 @@ mod tests { #[test] fn get_by_data_tx_offset_test() -> eyre::Result<()> { + let testnet_config = Config { + num_writes_before_sync: 1, + chunk_size: 32, + num_chunks_in_partition: 100, + ..Config::testnet() + }; let infos = vec![StorageModuleInfo { id: 0, partition_assignment: Some(PartitionAssignment::default()), @@ -175,12 +181,7 @@ mod tests { let arc_db = DatabaseProvider(Arc::new(db)); // Override the default StorageModule config for testing - let config = StorageConfig { - min_writes_before_sync: 1, - chunk_size: 32, - num_chunks_in_partition: 100, - ..Default::default() - }; + let config = StorageConfig::new(&testnet_config); // Create a StorageModule with the specified submodules and config let storage_module_info = &infos[0]; @@ -190,7 +191,7 @@ mod tests { let mut data_bytes = vec![0u8; data_size]; rand::thread_rng().fill(&mut data_bytes[..]); - let irys = IrysSigner::random_signer_with_chunk_size(config.chunk_size); + let irys = IrysSigner::random_signer(&testnet_config); let tx = irys.create_transaction(data_bytes.clone(), None).unwrap(); let tx = irys.sign_transaction(tx).unwrap(); diff --git a/crates/storage/src/storage_module.rs b/crates/storage/src/storage_module.rs index cf7fdee8..13929a00 100644 --- a/crates/storage/src/storage_module.rs +++ b/crates/storage/src/storage_module.rs @@ -1117,6 +1117,7 @@ pub fn validate_packing_at_point(sm: &Arc, point: u32) -> eyre::R sm.storage_config.entropy_packing_iterations, chunk_size.try_into()?, &mut out, + sm.storage_config.chain_id, ); Ok(out == chunk) diff --git a/crates/storage/tests/storage_module_index_tests.rs b/crates/storage/tests/storage_module_index_tests.rs index 99abf580..d25e1494 100644 --- a/crates/storage/tests/storage_module_index_tests.rs +++ b/crates/storage/tests/storage_module_index_tests.rs @@ -9,7 +9,7 @@ use irys_storage::*; use irys_testing_utils::utils::setup_tracing_and_temp_dir; use irys_types::{ irys::IrysSigner, ledger_chunk_offset_ii, partition::PartitionAssignment, - partition_chunk_offset_ie, partition_chunk_offset_ii, Address, Base64, IrysTransaction, + partition_chunk_offset_ie, partition_chunk_offset_ii, Base64, Config, IrysTransaction, IrysTransactionHeader, LedgerChunkOffset, LedgerChunkRange, PartitionChunkOffset, PartitionChunkRange, StorageConfig, TransactionLedger, TxChunkOffset, UnpackedChunk, H256, }; @@ -19,17 +19,18 @@ use tracing::info; #[test] fn tx_path_overlap_tests() -> eyre::Result<()> { - // Set up the storage geometry for this test - let storage_config = StorageConfig { + let testnet_config = Config { chunk_size: 32, num_chunks_in_partition: 20, num_chunks_in_recall_range: 5, - num_partitions_in_slot: 1, - miner_address: Address::random(), - min_writes_before_sync: 1, + num_partitions_per_slot: 1, + num_writes_before_sync: 1, entropy_packing_iterations: 1, - chunk_migration_depth: 1, // Testnet / single node config + chunk_migration_depth: 1, + ..Config::testnet() }; + // Set up the storage geometry for this test + let storage_config = StorageConfig::new(&testnet_config); let chunk_size = storage_config.chunk_size; // Configure 3 storage modules that are assigned to the submit ledger in @@ -130,7 +131,7 @@ fn tx_path_overlap_tests() -> eyre::Result<()> { // } // Loop though all the data_chunks and create wrapper tx for them - let signer = IrysSigner::random_signer_with_chunk_size(chunk_size as usize); + let signer = IrysSigner::random_signer(&testnet_config); let mut txs: Vec = Vec::new(); for chunks in data_chunks { diff --git a/crates/types/Cargo.toml b/crates/types/Cargo.toml index 8157a36a..2c34b1d5 100644 --- a/crates/types/Cargo.toml +++ b/crates/types/Cargo.toml @@ -4,7 +4,8 @@ name = "irys-types" version = "0.1.0" [features] -dev = [] # Empty list defined to prevent a warning that the macro `construct_fixed_hash!` generates without it. fixed-hash = "0.8.0" +dev = [] # Empty list defined to prevent a warning that the macro `construct_fixed_hash!` generates without it. fixed-hash = "0.8.0" +test-utils = [] [dependencies] uint = "0.9.5" @@ -12,7 +13,7 @@ base58 = "0.2.0" base64-url.workspace = true derive_more = { version = "2.0.1", features = ["add", "mul", "from", "into"], default-features = true } eyre = "0.6.8" -fixed-hash = "0.8.0" # TODO: if removing the dependency, ensure you also remove the [features] dev = [] section above. +fixed-hash = "0.8.0" # TODO: if removing the dependency, ensure you also remove the [features] dev = [] section above. rand = "0.8.5" reth-codecs = { path = "../../ext/reth/crates/storage/codecs" } reth-db-api = { path = "../../ext/reth/crates/storage/db-api" } @@ -22,7 +23,6 @@ borsh = "1.3.0" borsh-derive = "1.3.0" alloy-rpc-types-engine.workspace = true k256 = { version = "0.13", default-features = false, features = ["ecdsa"] } -assert_matches = "1.5.0" tokio.workspace = true actix.workspace = true openssl.workspace = true @@ -41,10 +41,12 @@ alloy-rlp.workspace = true zerocopy = "0.8.9" rust_decimal.workspace = true rust_decimal_macros.workspace = true -irys-macros.workspace = true bytemuck.workspace = true -test-fuzz.workspace = true +hex.workspace = true +[dev-dependencies] +toml.workspace = true +test-fuzz.workspace = true [build-dependencies] -build-print = "0" \ No newline at end of file +build-print = "0" diff --git a/crates/types/src/block.rs b/crates/types/src/block.rs index b30f0097..d81d1d43 100644 --- a/crates/types/src/block.rs +++ b/crates/types/src/block.rs @@ -338,12 +338,11 @@ impl IrysBlockHeader { #[cfg(test)] mod tests { - use crate::{irys::IrysSigner, validate_path, TxIngressProof, CONFIG, MAX_CHUNK_SIZE}; + use crate::{validate_path, Config, TxIngressProof}; use super::*; use alloy_primitives::Signature; use alloy_rlp::Decodable; - use k256::ecdsa::SigningKey; use rand::{rngs::StdRng, Rng, SeedableRng}; use serde_json; use zerocopy::IntoBytes; @@ -486,13 +485,8 @@ mod tests { fn test_irys_block_header_signing() { // setup let mut header = mock_header(); - let mut rng = rand::thread_rng(); - let signer = SigningKey::random(&mut rng); - let signer = IrysSigner { - signer, - chain_id: CONFIG.irys_chain_id, - chunk_size: MAX_CHUNK_SIZE, - }; + let testnet_config = Config::testnet(); + let signer = testnet_config.irys_signer(); // action // sign the block header diff --git a/crates/types/src/config.rs b/crates/types/src/config.rs index 0ad9812a..f3a90a6b 100644 --- a/crates/types/src/config.rs +++ b/crates/types/src/config.rs @@ -1,8 +1,15 @@ -use irys_macros::load_toml; +use alloy_primitives::Address; use rust_decimal::Decimal; use serde::{Deserialize, Serialize}; -use crate::{DifficultyAdjustmentConfig, U256}; +use crate::{ + irys::IrysSigner, + storage_pricing::{ + phantoms::{Percentage, Usd}, + Amount, + }, + IrysTokenPrice, +}; #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct Config { @@ -20,7 +27,7 @@ pub struct Config { pub num_checkpoints_in_vdf_step: usize, pub vdf_sha_1s: u64, pub entropy_packing_iterations: u32, - pub irys_chain_id: u64, + pub chain_id: u64, /// Scaling factor for the capacity projection curve pub capacity_scalar: u64, pub num_blocks_in_epoch: u64, @@ -38,61 +45,207 @@ pub struct Config { /// - 20 confirmations protects against attackers with <40% hashpower /// - No number of confirmations is secure against attackers with >50% hashpower pub chunk_migration_depth: u32, - pub mining_key: &'static str, + #[serde( + deserialize_with = "serde_utils::signing_key_from_hex", + serialize_with = "serde_utils::serializes_signing_key" + )] + pub mining_key: k256::ecdsa::SigningKey, // TODO: enable this after fixing option in toml pub num_capacity_partitions: Option, pub port: u16, /// the number of block a given anchor (tx or block hash) is valid for. /// The anchor must be included within the last X blocks otherwise the transaction it anchors will drop. pub anchor_expiry_depth: u8, + /// defines for how long the protocol should use the geneses token price for (expressed in epoch count) + pub genesis_price_valid_for_n_epochs: u8, + /// defines the genesis price of the $IRYS, expressed in $USD + #[serde(deserialize_with = "serde_utils::token_amount")] + pub genesis_token_price: Amount<(IrysTokenPrice, Usd)>, + /// defines the range of how much can the token fluctuate since the last EMA price for it to be accepted + #[serde(deserialize_with = "serde_utils::percentage_amount")] + pub token_price_safe_range: Amount, } -pub const DEFAULT_BLOCK_TIME: u64 = 5; - -pub const CONFIG: Config = load_toml!( - "CONFIG_TOML_PATH", - Config { - block_time: DEFAULT_BLOCK_TIME, - max_data_txs_per_block: 100, - difficulty_adjustment_interval: (24u64 * 60 * 60 * 1000).div_ceil(DEFAULT_BLOCK_TIME) * 14, // 2 weeks worth of blocks - max_difficulty_adjustment_factor: rust_decimal_macros::dec!(4), // A difficulty adjustment can be 4x larger or 1/4th the current difficulty - min_difficulty_adjustment_factor: rust_decimal_macros::dec!(0.25), // A 10% change must be required before a difficulty adjustment will occur - chunk_size: 256 * 1024, - num_chunks_in_partition: 10, - num_chunks_in_recall_range: 2, - vdf_reset_frequency: 10 * 120, // Reset the nonce limiter (vdf) once every 1200 steps/seconds or every ~20 min - vdf_parallel_verification_thread_limit: 4, - num_checkpoints_in_vdf_step: 25, // 25 checkpoints 40 ms each = 1000 ms - vdf_sha_1s: 7_000, - entropy_packing_iterations: 22_500_000, - irys_chain_id: 1275, // mainnet chainID (testnet is 1270) - capacity_scalar: 100, - num_blocks_in_epoch: 100, - submit_ledger_epoch_length: 5, - num_partitions_per_slot: 1, - num_writes_before_sync: 5, - reset_state_on_restart: false, - chunk_migration_depth: 1, // Number of confirmations before moving chunks to storage modules - mining_key: "db793353b633df950842415065f769699541160845d73db902eadee6bc5042d0", // Burner PrivateKey (PK) - num_capacity_partitions: None, - port: 8080, - anchor_expiry_depth: 10 // lower for tests +impl Config { + pub fn irys_signer(&self) -> IrysSigner { + IrysSigner::from_config(&self) } -); - -pub const PARTITION_SIZE: u64 = CONFIG.chunk_size * CONFIG.num_chunks_in_partition; -pub const NUM_RECALL_RANGES_IN_PARTITION: u64 = - CONFIG.num_chunks_in_partition / CONFIG.num_chunks_in_recall_range; - -impl From for DifficultyAdjustmentConfig { - fn from(config: Config) -> Self { - DifficultyAdjustmentConfig { - target_block_time: config.block_time, - adjustment_interval: config.difficulty_adjustment_interval, - max_adjustment_factor: config.max_difficulty_adjustment_factor, - min_adjustment_factor: config.min_difficulty_adjustment_factor, - min_difficulty: U256::one(), // TODO: make this customizable if desirable - max_difficulty: U256::MAX, + + pub fn miner_address(&self) -> Address { + Address::from_private_key(&self.mining_key) + } + + #[cfg(any(test, feature = "test-utils"))] + pub fn testnet() -> Self { + use k256::ecdsa::SigningKey; + + const DEFAULT_BLOCK_TIME: u64 = 5; + + Config { + block_time: DEFAULT_BLOCK_TIME, + max_data_txs_per_block: 100, + difficulty_adjustment_interval: (24u64 * 60 * 60 * 1000).div_ceil(DEFAULT_BLOCK_TIME) + * 14, + max_difficulty_adjustment_factor: rust_decimal_macros::dec!(4), + min_difficulty_adjustment_factor: rust_decimal_macros::dec!(0.25), + chunk_size: 256 * 1024, + num_chunks_in_partition: 10, + num_chunks_in_recall_range: 2, + vdf_reset_frequency: 10 * 120, + vdf_parallel_verification_thread_limit: 4, + num_checkpoints_in_vdf_step: 25, + vdf_sha_1s: 7_000, + entropy_packing_iterations: 1000, + chain_id: 1275, + capacity_scalar: 100, + num_blocks_in_epoch: 100, + submit_ledger_epoch_length: 5, + num_partitions_per_slot: 1, + num_writes_before_sync: 1, + reset_state_on_restart: false, + chunk_migration_depth: 1, + mining_key: SigningKey::from_slice( + &hex::decode(b"db793353b633df950842415065f769699541160845d73db902eadee6bc5042d0") + .expect("valid hex"), + ) + .expect("valid key"), + num_capacity_partitions: None, + port: 8080, + anchor_expiry_depth: 10, + genesis_price_valid_for_n_epochs: 2, + genesis_token_price: Amount::token(rust_decimal_macros::dec!(1)) + .expect("valid token amount"), + token_price_safe_range: Amount::percentage(rust_decimal_macros::dec!(1)) + .expect("valid percentage"), } } } + +pub mod serde_utils { + + use rust_decimal::Decimal; + use serde::{Deserialize as _, Deserializer, Serializer}; + + use crate::storage_pricing::Amount; + + /// deserialize the token amount from a string. + /// The string is expected to be in a format of "1.42". + pub fn token_amount<'de, T: std::fmt::Debug, D>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + { + amount_from_string(deserializer, |dec| Amount::::token(dec)) + } + + /// deserialize the percentage amount from a string. + /// + /// The string is expected to be: + /// - "0.1" (10%) + /// - "1.0" (100%) + pub fn percentage_amount<'de, T: std::fmt::Debug, D>( + deserializer: D, + ) -> Result, D::Error> + where + D: Deserializer<'de>, + { + amount_from_string(deserializer, |dec| Amount::::percentage(dec)) + } + + fn amount_from_string<'de, T: std::fmt::Debug, D>( + deserializer: D, + dec_to_amount: impl Fn(Decimal) -> eyre::Result>, + ) -> Result, D::Error> + where + D: Deserializer<'de>, + { + use core::str::FromStr as _; + + let raw_string = String::deserialize(deserializer)?; + let decimal = Decimal::from_str(&raw_string).map_err(serde::de::Error::custom)?; + let amount = dec_to_amount(decimal).map_err(serde::de::Error::custom)?; + Ok(amount) + } + + /// Deserialize a secp256k1 private key from a hex encoded string slice + pub fn signing_key_from_hex<'de, D>( + deserializer: D, + ) -> Result + where + D: Deserializer<'de>, + { + let bytes = String::deserialize(deserializer)?; + let decoded = hex::decode(bytes.as_bytes()).map_err(serde::de::Error::custom)?; + let key = + k256::ecdsa::SigningKey::from_slice(&decoded).map_err(serde::de::Error::custom)?; + Ok(key) + } + + pub fn serializes_signing_key( + key: &k256::ecdsa::SigningKey, + serializer: S, + ) -> Result + where + S: Serializer, + { + // Convert to bytes and then hex-encode + let key_bytes = key.to_bytes(); + let hex_string = hex::encode(key_bytes); + serializer.serialize_str(&hex_string) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use rust_decimal_macros::dec; + use toml; + + #[test] + fn test_deserialize_config_from_toml() { + let toml_data = r#" +block_time = 10 +max_data_txs_per_block = 20 +difficulty_adjustment_interval = 100 +max_difficulty_adjustment_factor = "4" +min_difficulty_adjustment_factor = "0.25" +chunk_size = 262144 +num_chunks_in_partition = 10 +num_chunks_in_recall_range = 2 +vdf_reset_frequency = 1200 +vdf_parallel_verification_thread_limit = 4 +num_checkpoints_in_vdf_step = 25 +vdf_sha_1s = 7000 +entropy_packing_iterations = 22500000 +chain_id = 1275 +capacity_scalar = 100 +num_blocks_in_epoch = 100 +submit_ledger_epoch_length = 5 +num_partitions_per_slot = 1 +num_writes_before_sync = 5 +reset_state_on_restart = false +chunk_migration_depth = 1 +mining_key = "db793353b633df950842415065f769699541160845d73db902eadee6bc5042d0" +num_capacity_partitions = 16 +port = 8080 +anchor_expiry_depth = 10 +genesis_price_valid_for_n_epochs = 2 +genesis_token_price = "1.0" +token_price_safe_range = "0.25" +"#; + + // Attempt to deserialize the TOML string into a Config + let config: Config = + toml::from_str(toml_data).expect("Failed to deserialize Config from TOML"); + + // Basic assertions to verify deserialization succeeded + assert_eq!(config.block_time, 10); + assert_eq!(config.max_data_txs_per_block, 20); + assert_eq!(config.difficulty_adjustment_interval, 100); + assert_eq!(config.reset_state_on_restart, false); + assert_eq!( + config.genesis_token_price, + Amount::token(dec!(1.0)).unwrap() + ); + assert_eq!(config.port, 8080); + } +} diff --git a/crates/types/src/difficulty_adjustment_config.rs b/crates/types/src/difficulty_adjustment_config.rs index 40c02eff..9c9901f6 100644 --- a/crates/types/src/difficulty_adjustment_config.rs +++ b/crates/types/src/difficulty_adjustment_config.rs @@ -1,6 +1,6 @@ use std::time::Duration; -use crate::{StorageConfig, CONFIG, U256}; +use crate::{Config, StorageConfig, U256}; use rust_decimal::Decimal; use rust_decimal_macros::dec; @@ -20,9 +20,16 @@ pub struct DifficultyAdjustmentConfig { pub max_difficulty: U256, } -impl Default for DifficultyAdjustmentConfig { - fn default() -> Self { - CONFIG.into() +impl DifficultyAdjustmentConfig { + pub fn new(config: &Config) -> Self { + DifficultyAdjustmentConfig { + target_block_time: config.block_time, + adjustment_interval: config.difficulty_adjustment_interval, + max_adjustment_factor: config.max_difficulty_adjustment_factor, + min_adjustment_factor: config.min_difficulty_adjustment_factor, + min_difficulty: U256::one(), // TODO: make this customizable if desirable + max_difficulty: U256::MAX, + } } } @@ -145,9 +152,7 @@ pub fn next_cumulative_diff(previous_cumulative_diff: U256, new_diff: U256) -> U let network_hash_rate = max_diff / (max_diff - new_diff); previous_cumulative_diff + network_hash_rate } -//============================================================================== -// Tests -//------------------------------------------------------------------------------ + #[cfg(test)] mod tests { use std::time::Duration; @@ -156,14 +161,13 @@ mod tests { use alloy_primitives::Address; use openssl::sha; - use crate::{ - adjust_difficulty, calculate_initial_difficulty, StorageConfig, CONFIG, H256, U256, - }; + use crate::{adjust_difficulty, calculate_initial_difficulty, StorageConfig, H256, U256}; use super::DifficultyAdjustmentConfig; #[test] fn test_adjustments() { + let config = Config::testnet(); let difficulty_config = DifficultyAdjustmentConfig { target_block_time: 5, // 5 seconds adjustment_interval: 10, // every X blocks @@ -180,8 +184,9 @@ mod tests { num_partitions_in_slot: 1, miner_address: Address::random(), min_writes_before_sync: 1, - entropy_packing_iterations: CONFIG.entropy_packing_iterations, + entropy_packing_iterations: config.entropy_packing_iterations, chunk_migration_depth: 1, // Testnet / single node config + chain_id: config.chain_id, }; let mut storage_module_count = 3; diff --git a/crates/types/src/ingress.rs b/crates/types/src/ingress.rs index 72bfc20f..16e7abe2 100644 --- a/crates/types/src/ingress.rs +++ b/crates/types/src/ingress.rs @@ -98,17 +98,18 @@ mod tests { use crate::{ generate_data_root, generate_leaves, hash_sha256, ingress::verify_ingress_proof, - irys::IrysSigner, H256, MAX_CHUNK_SIZE, + irys::IrysSigner, Config, H256, MAX_CHUNK_SIZE, }; use super::generate_ingress_proof; #[test] fn interleave_test() -> eyre::Result<()> { + let testnet_config = Config::testnet(); let data_size = (MAX_CHUNK_SIZE as f64 * 2.5).round() as usize; let mut data_bytes = vec![0u8; data_size]; rand::thread_rng().fill(&mut data_bytes[..]); - let signer = IrysSigner::random_signer(); + let signer = IrysSigner::random_signer(&testnet_config); let leaves = generate_leaves(&data_bytes, MAX_CHUNK_SIZE)?; let interleave_value = signer.address(); let interleave_hash = hash_sha256(&interleave_value.0 .0)?; @@ -128,6 +129,7 @@ mod tests { #[test] fn basic() -> eyre::Result<()> { // Create some random data + let testnet_config = Config::testnet(); let data_size = (MAX_CHUNK_SIZE as f64 * 2.5).round() as usize; let mut data_bytes = vec![0u8; data_size]; rand::thread_rng().fill(&mut data_bytes[..]); @@ -138,7 +140,7 @@ mod tests { let data_root = H256(root.id); // Generate an ingress proof - let signer = IrysSigner::random_signer(); + let signer = IrysSigner::random_signer(&testnet_config); let chunks: Vec<&[u8]> = data_bytes.chunks(MAX_CHUNK_SIZE).collect(); let proof = generate_ingress_proof(signer.clone(), data_root, &chunks)?; diff --git a/crates/types/src/irys.rs b/crates/types/src/irys.rs index 42a2826e..0e4711fa 100644 --- a/crates/types/src/irys.rs +++ b/crates/types/src/irys.rs @@ -1,6 +1,6 @@ use crate::{ - generate_data_root, generate_leaves, resolve_proofs, Address, Base64, IrysBlockHeader, - IrysSignature, IrysTransaction, IrysTransactionHeader, Signature, CONFIG, H256, MAX_CHUNK_SIZE, + generate_data_root, generate_leaves, resolve_proofs, Address, Base64, Config, IrysBlockHeader, + IrysSignature, IrysTransaction, IrysTransactionHeader, Signature, H256, }; use alloy_core::primitives::keccak256; @@ -8,7 +8,6 @@ use alloy_signer::utils::secret_key_to_address; use alloy_signer_local::LocalSigner; use eyre::Result; use k256::ecdsa::SigningKey; -use rand::rngs::OsRng; #[derive(Debug, Clone)] @@ -21,32 +20,28 @@ pub struct IrysSigner { /// Encapsulates an Irys API for doing client type things, making transactions, /// signing them, posting them etc. impl IrysSigner { - pub fn mainnet_from_slice(key_slice: &[u8]) -> Self { + pub fn from_config(config: &Config) -> Self { IrysSigner { - signer: k256::ecdsa::SigningKey::from_slice(key_slice).unwrap(), - chain_id: CONFIG.irys_chain_id, - chunk_size: CONFIG.chunk_size.try_into().unwrap(), + signer: config.mining_key.clone(), + chain_id: config.chain_id, + chunk_size: config + .chunk_size + .try_into() + .expect("invalid chunk size specified in the config"), } } - // DO NOT USE IN PROD - pub fn random_signer() -> Self { - IrysSigner { - signer: k256::ecdsa::SigningKey::random(&mut OsRng), - chain_id: CONFIG.irys_chain_id, - chunk_size: MAX_CHUNK_SIZE, - } - } + #[cfg(any(feature = "test-utils", test))] + pub fn random_signer(config: &Config) -> Self { + use rand::rngs::OsRng; - pub fn random_signer_with_chunk_size(chunk_size: T) -> Self - where - T: TryInto, - >::Error: std::fmt::Debug, - { IrysSigner { signer: k256::ecdsa::SigningKey::random(&mut OsRng), - chain_id: CONFIG.irys_chain_id, - chunk_size: chunk_size.try_into().unwrap(), + chain_id: config.chain_id, + chunk_size: config + .chunk_size + .try_into() + .expect("invalid chunk size specified"), } } @@ -151,7 +146,6 @@ impl From for LocalSigner { #[cfg(test)] mod tests { use crate::{hash_sha256, validate_chunk, MAX_CHUNK_SIZE}; - use assert_matches::assert_matches; use rand::Rng; use reth_primitives::transaction::recover_signer; @@ -160,12 +154,13 @@ mod tests { #[tokio::test] async fn create_and_sign_transaction() { // Create 2.5 chunks worth of data * fill the data with random bytes + let config = crate::Config::testnet(); let data_size = (MAX_CHUNK_SIZE as f64 * 2.5).round() as usize; let mut data_bytes = vec![0u8; data_size]; rand::thread_rng().fill(&mut data_bytes[..]); // Create a new Irys API instance - let irys = IrysSigner::random_signer(); + let irys = IrysSigner::random_signer(&config); // Create a transaction from the random bytes let mut tx = irys.create_transaction(data_bytes.clone(), None).unwrap(); @@ -206,7 +201,7 @@ mod tests { let root_id = tx.header.data_root.0; let proof = tx.proofs[index].clone(); let proof_result = validate_chunk(root_id, chunk_node, &proof); - assert_matches!(proof_result, Ok(_)); + assert!(proof_result.is_ok()); // Ensure the data_hash is valid by hashing the chunk data let chunk_bytes: &[u8] = &data_bytes[min..max]; diff --git a/crates/types/src/signature.rs b/crates/types/src/signature.rs index a669fb77..a47794f5 100644 --- a/crates/types/src/signature.rs +++ b/crates/types/src/signature.rs @@ -150,7 +150,7 @@ mod tests { use super::*; use crate::{ - irys::IrysSigner, IrysTransaction, IrysTransactionHeader, CONFIG, H256, MAX_CHUNK_SIZE, + irys::IrysSigner, Config, IrysTransaction, IrysTransactionHeader, H256, MAX_CHUNK_SIZE, }; use alloy_core::hex::{self}; use alloy_primitives::Address; @@ -168,10 +168,11 @@ mod tests { #[test] fn signature_signing_serialization() -> eyre::Result<()> { + let testnet_config = Config::testnet(); let irys_signer = IrysSigner { signer: SigningKey::from_slice(hex::decode(DEV_PRIVATE_KEY).unwrap().as_slice()) .unwrap(), - chain_id: CONFIG.irys_chain_id, + chain_id: testnet_config.chain_id, chunk_size: MAX_CHUNK_SIZE, }; @@ -185,7 +186,7 @@ mod tests { perm_fee: Some(1), ledger_id: 0, bundle_format: Some(0), - chain_id: CONFIG.irys_chain_id, + chain_id: testnet_config.chain_id, version: 0, ingress_proofs: None, signature: Default::default(), diff --git a/crates/types/src/storage.rs b/crates/types/src/storage.rs index 95684cf6..f5a691c7 100644 --- a/crates/types/src/storage.rs +++ b/crates/types/src/storage.rs @@ -4,7 +4,7 @@ use std::{ path::PathBuf, }; -use crate::{RelativeChunkOffset, CONFIG}; +use crate::{Config, RelativeChunkOffset}; use derive_more::{Add, Div, From, Into, Mul, Sub}; use nodit::{ interval::{ie, ii}, @@ -390,15 +390,13 @@ pub struct PartitionStorageProviderConfig { pub struct StorageModuleConfig { pub directory_path: PathBuf, pub size_bytes: u64, - // pub chunks_per_lock_segment: u32, } -impl Default for StorageModuleConfig { - fn default() -> Self { +impl StorageModuleConfig { + pub fn new(config: &Config) -> Self { Self { directory_path: "/tmp".into(), - size_bytes: 100 * CONFIG.chunk_size, - // chunks_per_lock_segment: 800, // 200MB + size_bytes: 100 * config.chunk_size, } } } diff --git a/crates/types/src/storage_config.rs b/crates/types/src/storage_config.rs index 9000a5a9..ee78f753 100644 --- a/crates/types/src/storage_config.rs +++ b/crates/types/src/storage_config.rs @@ -2,11 +2,11 @@ use serde::{Deserialize, Serialize}; use crate::*; -/// This is hardcoded here to be used just by C packing related staff as it is also hardcoded right now in C sources +/// This is hardcoded here to be used just by C packing related stuff as it is also hardcoded right now in C sources pub const CHUNK_SIZE: u64 = 256 * 1024; /// Protocol storage sizing configuration -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] pub struct StorageConfig { /// Size of each chunk in bytes pub chunk_size: u64, @@ -24,20 +24,23 @@ pub struct StorageConfig { pub entropy_packing_iterations: u32, /// Number of confirmations before storing tx data in `StorageModule`s pub chunk_migration_depth: u32, + /// Irys chain id + pub chain_id: u64, } -impl Default for StorageConfig { - fn default() -> Self { +impl StorageConfig { + pub fn new(config: &Config) -> Self { Self { - chunk_size: CONFIG.chunk_size, - num_chunks_in_partition: CONFIG.num_chunks_in_partition, - num_chunks_in_recall_range: CONFIG.num_chunks_in_recall_range, - num_partitions_in_slot: CONFIG.num_partitions_per_slot, - miner_address: Address::random(), - min_writes_before_sync: CONFIG.num_writes_before_sync, + chain_id: config.chain_id, + chunk_size: config.chunk_size, + num_chunks_in_partition: config.num_chunks_in_partition, + num_chunks_in_recall_range: config.num_chunks_in_recall_range, + num_partitions_in_slot: config.num_partitions_per_slot, + miner_address: Address::from_private_key(&config.mining_key), + min_writes_before_sync: config.num_writes_before_sync, // TODO: revert this back entropy_packing_iterations: 1_000, /* PACKING_SHA_1_5_S */ - chunk_migration_depth: CONFIG.chunk_migration_depth, + chunk_migration_depth: config.chunk_migration_depth, } } } diff --git a/crates/types/src/transaction.rs b/crates/types/src/transaction.rs index 1542d0bf..0a82ca1a 100644 --- a/crates/types/src/transaction.rs +++ b/crates/types/src/transaction.rs @@ -1,6 +1,6 @@ use crate::{ address_base58_stringify, optional_string_u64, string_u64, Address, Arbitrary, Base64, Compact, - IrysSignature, Node, Proof, Signature, TxIngressProof, CONFIG, H256, + Config, IrysSignature, Node, Proof, Signature, TxIngressProof, H256, }; use alloy_primitives::keccak256; use alloy_rlp::{Encodable, RlpDecodable, RlpEncodable}; @@ -13,6 +13,7 @@ pub type IrysTransactionId = H256; Debug, Eq, Serialize, + Default, Deserialize, PartialEq, Arbitrary, @@ -143,8 +144,8 @@ impl IrysTransaction { } } -impl Default for IrysTransactionHeader { - fn default() -> Self { +impl IrysTransactionHeader { + pub fn new(config: &Config) -> Self { IrysTransactionHeader { id: H256::zero(), anchor: H256::zero(), @@ -156,7 +157,7 @@ impl Default for IrysTransactionHeader { ledger_id: 0, bundle_format: None, version: 0, - chain_id: CONFIG.irys_chain_id, + chain_id: config.chain_id, signature: Signature::test_signature().into(), ingress_proofs: None, } @@ -181,7 +182,8 @@ mod tests { #[test] fn test_irys_transaction_header_rlp_round_trip() { // setup - let mut header = mock_header(); + let config = Config::testnet(); + let mut header = mock_header(&config); // action let mut buffer = vec![]; @@ -198,7 +200,8 @@ mod tests { #[test] fn test_irys_transaction_header_serde() { // Create a sample IrysTransactionHeader - let original_header = mock_header(); + let config = Config::testnet(); + let original_header = mock_header(&config); // Serialize the IrysTransactionHeader to JSON let serialized = serde_json::to_string(&original_header).expect("Failed to serialize"); @@ -214,40 +217,20 @@ mod tests { #[test] fn test_tx_encode_and_signing() { - // Create a sample IrysTransactionHeader - // commented out fields are defaulted by the RLP decoder - let original_header = IrysTransactionHeader { - // id: H256::from([255u8; 32]), - id: Default::default(), - anchor: H256::from([1u8; 32]), - signer: Address::ZERO, - data_root: H256::from([3u8; 32]), - data_size: 1024, - term_fee: 100, - // perm_fee: Some(200), - perm_fee: None, - ledger_id: 0, - chain_id: CONFIG.irys_chain_id, - bundle_format: None, - version: 0, - ingress_proofs: None, - signature: Default::default(), - }; - + // setup + let config = Config::testnet(); + let original_header = mock_header(&config); let mut sig_data = Vec::new(); - original_header.encode(&mut sig_data); - let dec: IrysTransactionHeader = IrysTransactionHeader::decode(&mut sig_data.as_slice()).unwrap(); - assert_eq!(&dec, &original_header); + // action let signer = IrysSigner { signer: SigningKey::random(&mut rand::thread_rng()), - chain_id: CONFIG.irys_chain_id, + chain_id: config.chain_id, chunk_size: MAX_CHUNK_SIZE, }; - let tx = IrysTransaction { header: dec, ..Default::default() @@ -258,7 +241,7 @@ mod tests { assert!(signed_tx.header.is_signature_valid()); } - fn mock_header() -> IrysTransactionHeader { + fn mock_header(config: &Config) -> IrysTransactionHeader { let original_header = IrysTransactionHeader { id: H256::from([255u8; 32]), anchor: H256::from([1u8; 32]), @@ -269,7 +252,7 @@ mod tests { perm_fee: Some(200), ledger_id: 1, bundle_format: None, - chain_id: CONFIG.irys_chain_id, + chain_id: config.chain_id, version: 0, ingress_proofs: None, signature: Signature::test_signature().into(), diff --git a/crates/types/src/vdf_config.rs b/crates/types/src/vdf_config.rs index c8c4fa89..50588f1b 100644 --- a/crates/types/src/vdf_config.rs +++ b/crates/types/src/vdf_config.rs @@ -5,7 +5,7 @@ use crate::*; pub type AtomicVdfStepNumber = Arc; /// Allows for overriding of the vdf steps generation parameters -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] pub struct VDFStepsConfig { pub num_checkpoints_in_vdf_step: usize, pub vdf_reset_frequency: usize, @@ -13,13 +13,13 @@ pub struct VDFStepsConfig { pub vdf_parallel_verification_thread_limit: usize, } -impl Default for VDFStepsConfig { - fn default() -> Self { +impl VDFStepsConfig { + pub fn new(config: &Config) -> Self { VDFStepsConfig { - num_checkpoints_in_vdf_step: CONFIG.num_checkpoints_in_vdf_step, - vdf_reset_frequency: CONFIG.vdf_reset_frequency, - vdf_difficulty: CONFIG.vdf_sha_1s, - vdf_parallel_verification_thread_limit: CONFIG.vdf_parallel_verification_thread_limit, + num_checkpoints_in_vdf_step: config.num_checkpoints_in_vdf_step, + vdf_reset_frequency: config.vdf_reset_frequency, + vdf_difficulty: config.vdf_sha_1s, + vdf_parallel_verification_thread_limit: config.vdf_parallel_verification_thread_limit, } } } diff --git a/crates/vdf/src/lib.rs b/crates/vdf/src/lib.rs index d9111a8e..35881e6a 100644 --- a/crates/vdf/src/lib.rs +++ b/crates/vdf/src/lib.rs @@ -386,6 +386,7 @@ fn warn_mismatches(a: &H256List, b: &H256List) { #[cfg(test)] mod tests { use base58::{FromBase58, ToBase58}; + use irys_types::Config; use super::*; fn _generate_next_vdf_step() { @@ -396,21 +397,16 @@ mod tests { .unwrap(), ); + let testnet_config = Config::testnet(); let reset_seed = H256([0; 32]); - // seed = apply_reset_seed(seed, reset_seed); - // println!("seed after reset {:?}", seed); let start_step_number = 0; let mut hasher = Sha256::new(); - let mut salt = U256::from(step_number_to_salt_number( - &VDFStepsConfig::default(), - start_step_number, - )); - let mut checkpoints: Vec = vec![H256::default(); 25]; - - //seed = apply_reset_seed(seed, reset_seed); + let config = VDFStepsConfig::new(&testnet_config); + let mut salt = U256::from(step_number_to_salt_number(&config, start_step_number)); + let mut checkpoints: Vec = vec![H256::default(); 25]; vdf_sha( &mut hasher, &mut salt, @@ -445,6 +441,7 @@ mod tests { #[tokio::test] async fn test_checkpoints_for_single_step_block() { // step: 44398 output: 0x893d + let testnet_config = Config::testnet(); let vdf_info = VDFLimiterInfo { output: to_hash("AEj76XfsPWoB2CjcDm3RXTwaM5AKs7SbWnkHR8umvgmW"), global_step_number: 44398, @@ -487,10 +484,8 @@ mod tests { next_vdf_difficulty: None, }; - let config = VDFStepsConfig { - vdf_difficulty: 100_000, - ..VDFStepsConfig::default() - }; + let mut config = VDFStepsConfig::new(&testnet_config); + config.vdf_difficulty = 100_000; let x = last_step_checkpoints_is_valid(&vdf_info, &config).await; assert!(x.is_ok()); @@ -522,6 +517,7 @@ mod tests { #[tokio::test] async fn test_checkpoints_for_single_step_block_before_reset() { + let testnet_config = Config::testnet(); // step: 44398 output: 0x893d let vdf_info = VDFLimiterInfo { output: H256( @@ -711,10 +707,8 @@ mod tests { next_vdf_difficulty: None, }; - let config = VDFStepsConfig { - vdf_difficulty: 100_000, - ..VDFStepsConfig::default() - }; + let mut config = VDFStepsConfig::new(&testnet_config); + config.vdf_difficulty = 100_000; let x = last_step_checkpoints_is_valid(&vdf_info, &config).await; assert!(x.is_ok()); @@ -742,6 +736,9 @@ mod tests { #[tokio::test] async fn test_checkpoints_for_single_step_block_after_reset() { + let mut testnet_config = Config::testnet(); + testnet_config.vdf_sha_1s = 100_000; + // step: 44398 output: 0x893d let vdf_info = VDFLimiterInfo { output: H256( @@ -931,10 +928,7 @@ mod tests { next_vdf_difficulty: None, }; - let config = VDFStepsConfig { - vdf_difficulty: 100_000, - ..VDFStepsConfig::default() - }; + let config = VDFStepsConfig::new(&testnet_config); let x = last_step_checkpoints_is_valid(&vdf_info, &config).await; assert!(x.is_ok()); @@ -943,6 +937,9 @@ mod tests { // one special case that do not apply reset seed #[tokio::test] async fn test_checkpoints_for_single_step_one() { + let mut testnet_config = Config::testnet(); + testnet_config.vdf_sha_1s = 100_000; + let vdf_info = VDFLimiterInfo { output: H256( hex::decode("68230a9b96fbd924982a3d29485ad2c67285d76f2c8fc0a4770d50ed5fd41efd") @@ -1131,11 +1128,7 @@ mod tests { next_vdf_difficulty: None, }; - let config = VDFStepsConfig { - vdf_difficulty: 100_000, - ..VDFStepsConfig::default() - }; - + let config = VDFStepsConfig::new(&testnet_config); let x = last_step_checkpoints_is_valid(&vdf_info, &config).await; assert!(x.is_ok()); diff --git a/crates/vdf/src/vdf_state.rs b/crates/vdf/src/vdf_state.rs index be3bdbef..1e9fba4b 100644 --- a/crates/vdf/src/vdf_state.rs +++ b/crates/vdf/src/vdf_state.rs @@ -14,7 +14,7 @@ pub type AtomicVdfState = Arc>; use tokio::time::sleep; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] pub struct VdfState { /// last global step stored pub global_step: u64,