From 541ec427bfccea1032450bd99e3e7220755ce531 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 3 Jan 2025 20:25:25 -0500 Subject: [PATCH 01/16] Fix clippy::needless_borrow throughout stackslib Signed-off-by: Jacinta Ferrant --- stackslib/src/burnchains/affirmation.rs | 4 +- stackslib/src/burnchains/bitcoin/address.rs | 2 +- stackslib/src/burnchains/bitcoin/bits.rs | 6 +- stackslib/src/burnchains/bitcoin/blocks.rs | 19 +- stackslib/src/burnchains/bitcoin/indexer.rs | 6 +- stackslib/src/burnchains/bitcoin/spv.rs | 4 +- stackslib/src/burnchains/burnchain.rs | 20 +- stackslib/src/burnchains/db.rs | 10 +- stackslib/src/burnchains/mod.rs | 2 +- stackslib/src/burnchains/tests/affirmation.rs | 4 +- stackslib/src/burnchains/tests/db.rs | 20 +- stackslib/src/burnchains/tests/mod.rs | 8 +- .../src/chainstate/burn/db/processing.rs | 6 +- stackslib/src/chainstate/burn/db/sortdb.rs | 160 ++++---- stackslib/src/chainstate/burn/distribution.rs | 2 +- .../burn/operations/leader_block_commit.rs | 2 +- stackslib/src/chainstate/coordinator/mod.rs | 151 +++---- stackslib/src/chainstate/coordinator/tests.rs | 12 +- .../chainstate/nakamoto/coordinator/mod.rs | 10 +- .../chainstate/nakamoto/coordinator/tests.rs | 10 +- stackslib/src/chainstate/nakamoto/miner.rs | 30 +- stackslib/src/chainstate/nakamoto/mod.rs | 30 +- stackslib/src/chainstate/nakamoto/shadow.rs | 8 +- .../src/chainstate/nakamoto/signer_set.rs | 10 +- .../src/chainstate/nakamoto/staging_blocks.rs | 12 +- stackslib/src/chainstate/nakamoto/tenure.rs | 2 +- .../src/chainstate/nakamoto/test_signers.rs | 2 +- .../src/chainstate/nakamoto/tests/mod.rs | 12 +- .../src/chainstate/nakamoto/tests/node.rs | 34 +- stackslib/src/chainstate/stacks/auth.rs | 2 +- stackslib/src/chainstate/stacks/block.rs | 21 +- .../chainstate/stacks/boot/contract_tests.rs | 6 +- stackslib/src/chainstate/stacks/boot/mod.rs | 144 +++---- .../src/chainstate/stacks/boot/pox_2_tests.rs | 50 +-- .../src/chainstate/stacks/boot/pox_3_tests.rs | 14 +- .../src/chainstate/stacks/boot/pox_4_tests.rs | 296 +++++++------- .../chainstate/stacks/boot/signers_tests.rs | 4 +- .../src/chainstate/stacks/db/accounts.rs | 4 +- stackslib/src/chainstate/stacks/db/blocks.rs | 387 +++++++++--------- stackslib/src/chainstate/stacks/db/headers.rs | 4 +- stackslib/src/chainstate/stacks/db/mod.rs | 16 +- .../src/chainstate/stacks/db/transactions.rs | 60 ++- .../src/chainstate/stacks/db/unconfirmed.rs | 24 +- stackslib/src/chainstate/stacks/index/marf.rs | 8 +- stackslib/src/chainstate/stacks/index/node.rs | 2 +- .../src/chainstate/stacks/index/proofs.rs | 11 +- .../src/chainstate/stacks/index/storage.rs | 44 +- .../src/chainstate/stacks/index/test/file.rs | 6 +- .../src/chainstate/stacks/index/test/marf.rs | 2 +- .../src/chainstate/stacks/index/test/mod.rs | 2 +- .../chainstate/stacks/index/test/storage.rs | 14 +- stackslib/src/chainstate/stacks/index/trie.rs | 4 +- stackslib/src/chainstate/stacks/miner.rs | 76 ++-- .../src/chainstate/stacks/tests/accounting.rs | 20 +- .../stacks/tests/block_construction.rs | 126 +++--- .../stacks/tests/chain_histories.rs | 64 +-- stackslib/src/chainstate/stacks/tests/mod.rs | 14 +- .../src/chainstate/stacks/transaction.rs | 10 +- stackslib/src/clarity_cli.rs | 47 +-- stackslib/src/clarity_vm/clarity.rs | 44 +- stackslib/src/clarity_vm/database/marf.rs | 2 +- stackslib/src/clarity_vm/tests/contracts.rs | 32 +- stackslib/src/clarity_vm/tests/forking.rs | 8 +- .../src/clarity_vm/tests/simple_tests.rs | 2 +- stackslib/src/cli.rs | 26 +- stackslib/src/core/mempool.rs | 40 +- stackslib/src/core/tests/mod.rs | 4 +- stackslib/src/cost_estimates/fee_medians.rs | 4 +- stackslib/src/cost_estimates/fee_scalar.rs | 4 +- stackslib/src/cost_estimates/pessimistic.rs | 4 +- .../cost_estimates/tests/cost_estimators.rs | 8 +- stackslib/src/main.rs | 29 +- .../src/net/api/getmicroblocks_indexed.rs | 2 +- .../src/net/api/getmicroblocks_unconfirmed.rs | 2 +- stackslib/src/net/api/getpoxinfo.rs | 4 +- stackslib/src/net/api/gettenure.rs | 2 +- .../src/net/api/gettransaction_unconfirmed.rs | 2 +- stackslib/src/net/api/mod.rs | 4 +- stackslib/src/net/api/postblock.rs | 2 +- stackslib/src/net/api/postblock_proposal.rs | 2 +- stackslib/src/net/api/postblock_v3.rs | 2 +- stackslib/src/net/api/postfeerate.rs | 2 +- stackslib/src/net/api/postmicroblock.rs | 2 +- stackslib/src/net/api/tests/getheaders.rs | 3 +- .../net/api/tests/getmicroblocks_confirmed.rs | 2 +- .../net/api/tests/getmicroblocks_indexed.rs | 2 +- .../api/tests/getmicroblocks_unconfirmed.rs | 2 +- stackslib/src/net/api/tests/mod.rs | 10 +- .../src/net/api/tests/postblock_proposal.rs | 2 +- .../src/net/api/tests/postmempoolquery.rs | 2 +- stackslib/src/net/atlas/db.rs | 4 +- stackslib/src/net/atlas/download.rs | 14 +- stackslib/src/net/atlas/tests.rs | 4 +- stackslib/src/net/chat.rs | 164 ++++---- stackslib/src/net/connection.rs | 2 +- stackslib/src/net/db.rs | 26 +- stackslib/src/net/dns.rs | 2 +- stackslib/src/net/download/epoch2x.rs | 24 +- .../nakamoto/download_state_machine.rs | 8 +- .../nakamoto/tenure_downloader_set.rs | 10 +- stackslib/src/net/http/mod.rs | 2 +- stackslib/src/net/http/request.rs | 2 +- stackslib/src/net/http/response.rs | 2 +- stackslib/src/net/httpcore.rs | 16 +- stackslib/src/net/inv/epoch2x.rs | 28 +- stackslib/src/net/inv/nakamoto.rs | 4 +- stackslib/src/net/mempool/mod.rs | 2 +- stackslib/src/net/mod.rs | 34 +- stackslib/src/net/neighbors/comms.rs | 4 +- stackslib/src/net/neighbors/db.rs | 20 +- stackslib/src/net/neighbors/neighbor.rs | 6 +- stackslib/src/net/neighbors/walk.rs | 22 +- stackslib/src/net/p2p.rs | 38 +- stackslib/src/net/prune.rs | 16 +- stackslib/src/net/relay.rs | 29 +- stackslib/src/net/server.rs | 6 +- stackslib/src/net/stackerdb/db.rs | 32 +- stackslib/src/net/stackerdb/mod.rs | 4 +- stackslib/src/net/stackerdb/sync.rs | 18 +- stackslib/src/net/stackerdb/tests/config.rs | 4 +- stackslib/src/net/stackerdb/tests/db.rs | 24 +- stackslib/src/net/tests/convergence.rs | 16 +- stackslib/src/net/tests/download/epoch2x.rs | 26 +- stackslib/src/net/tests/inv/epoch2x.rs | 18 +- stackslib/src/net/tests/mempool/mod.rs | 32 +- stackslib/src/net/tests/mod.rs | 6 +- stackslib/src/net/tests/relay/epoch2x.rs | 54 +-- stackslib/src/net/unsolicited.rs | 48 ++- stackslib/src/util_lib/bloom.rs | 8 +- stackslib/src/util_lib/db.rs | 4 +- .../src/util_lib/signed_structured_data.rs | 6 +- stackslib/src/util_lib/strings.rs | 10 +- 132 files changed, 1532 insertions(+), 1603 deletions(-) diff --git a/stackslib/src/burnchains/affirmation.rs b/stackslib/src/burnchains/affirmation.rs index 88ad745800..b798d69f6a 100644 --- a/stackslib/src/burnchains/affirmation.rs +++ b/stackslib/src/burnchains/affirmation.rs @@ -557,7 +557,7 @@ pub fn read_prepare_phase_commits( let mut ret = vec![]; for header in headers.into_iter() { - let blk = BurnchainDB::get_burnchain_block(&burnchain_tx.conn(), &header.block_hash) + let blk = BurnchainDB::get_burnchain_block(burnchain_tx.conn(), &header.block_hash) .unwrap_or_else(|_| { panic!( "BUG: failed to load prepare-phase block {} ({})", @@ -1126,7 +1126,7 @@ pub fn find_pox_anchor_block( let prepare_ops_valid = inner_find_valid_prepare_phase_commits(burnchain_tx, reward_cycle, indexer, burnchain)?; let anchor_block_and_descendancy_opt = find_heaviest_block_commit( - &burnchain_tx, + burnchain_tx, indexer, &prepare_ops_valid, burnchain.pox_constants.anchor_threshold, diff --git a/stackslib/src/burnchains/bitcoin/address.rs b/stackslib/src/burnchains/bitcoin/address.rs index 24e0ef8f9d..56456b8398 100644 --- a/stackslib/src/burnchains/bitcoin/address.rs +++ b/stackslib/src/burnchains/bitcoin/address.rs @@ -290,7 +290,7 @@ impl SegwitBitcoinAddress { let mut bytes_u5: Vec = vec![u5::try_from_u8(self.witness_version()) .expect("FATAL: bad witness version does not fit into a u5")]; bytes_u5.extend_from_slice(&bytes.to_base32()); - let addr = bech32::encode(&hrp, bytes_u5, self.bech32_variant()) + let addr = bech32::encode(hrp, bytes_u5, self.bech32_variant()) .expect("FATAL: could not encode segwit address"); addr } diff --git a/stackslib/src/burnchains/bitcoin/bits.rs b/stackslib/src/burnchains/bitcoin/bits.rs index 4198bf3278..f3f90a15a4 100644 --- a/stackslib/src/burnchains/bitcoin/bits.rs +++ b/stackslib/src/burnchains/bitcoin/bits.rs @@ -59,7 +59,7 @@ impl BitcoinTxInputStructured { let i2 = &instructions[1]; match (i1, i2) { - (Instruction::PushBytes(ref _data1), Instruction::PushBytes(ref data2)) => { + (Instruction::PushBytes(_data1), Instruction::PushBytes(data2)) => { // data2 is a pubkey? match BitcoinPublicKey::from_slice(data2) { Ok(pubkey) => { @@ -1277,7 +1277,7 @@ mod tests { let raw_in = BitcoinTxInputRaw::from_bitcoin_witness_script_sig( &txin.script_sig, txin.witness.clone(), - to_txid(&txin), + to_txid(txin), ); assert_eq!(raw_in, inputs[i]); } @@ -1290,7 +1290,7 @@ mod tests { } let segwit_out = - BitcoinTxOutput::from_bitcoin_txout(BitcoinNetworkType::Mainnet, &txout) + BitcoinTxOutput::from_bitcoin_txout(BitcoinNetworkType::Mainnet, txout) .unwrap(); assert_eq!(segwit_out, outputs[j]); j += 1; diff --git a/stackslib/src/burnchains/bitcoin/blocks.rs b/stackslib/src/burnchains/bitcoin/blocks.rs index b9623bd210..bdd4a0f12f 100644 --- a/stackslib/src/burnchains/bitcoin/blocks.rs +++ b/stackslib/src/burnchains/bitcoin/blocks.rs @@ -253,8 +253,7 @@ impl BitcoinBlockParser { } // block transactions must match header merkle root - let tx_merkle_root = - bitcoin_merkle_root(block.txdata.iter().map(|ref tx| tx.txid()).collect()); + let tx_merkle_root = bitcoin_merkle_root(block.txdata.iter().map(|tx| tx.txid()).collect()); if block.header.merkle_root != tx_merkle_root { return false; @@ -275,7 +274,7 @@ impl BitcoinBlockParser { return None; } - let script_pieces = bits::parse_script(&data_output); + let script_pieces = bits::parse_script(data_output); if script_pieces.len() != 2 { // not OP_RETURN test_debug!("Data output does not encode a valid OP_RETURN"); @@ -283,7 +282,7 @@ impl BitcoinBlockParser { } match (&script_pieces[0], &script_pieces[1]) { - (Instruction::Op(ref opcode), Instruction::PushBytes(ref data)) => { + (Instruction::Op(ref opcode), Instruction::PushBytes(data)) => { if *opcode != btc_opcodes::OP_RETURN { test_debug!("Data output does not use a standard OP_RETURN"); return None; @@ -351,7 +350,7 @@ impl BitcoinBlockParser { fn parse_inputs_structured(tx: &Transaction) -> Option> { let mut ret = vec![]; for inp in &tx.input { - match BitcoinTxInput::from_bitcoin_txin_structured(&inp) { + match BitcoinTxInput::from_bitcoin_txin_structured(inp) { None => { test_debug!("Failed to parse input"); return None; @@ -369,7 +368,7 @@ impl BitcoinBlockParser { fn parse_inputs_raw(tx: &Transaction) -> Vec { let mut ret = vec![]; for inp in &tx.input { - ret.push(BitcoinTxInput::from_bitcoin_txin_raw(&inp)); + ret.push(BitcoinTxInput::from_bitcoin_txin_raw(inp)); } ret } @@ -388,9 +387,9 @@ impl BitcoinBlockParser { let mut ret = vec![]; for outp in &tx.output[1..tx.output.len()] { let out_opt = if BitcoinBlockParser::allow_segwit_outputs(epoch_id) { - BitcoinTxOutput::from_bitcoin_txout(self.network_id, &outp) + BitcoinTxOutput::from_bitcoin_txout(self.network_id, outp) } else { - BitcoinTxOutput::from_bitcoin_txout_legacy(self.network_id, &outp) + BitcoinTxOutput::from_bitcoin_txout_legacy(self.network_id, outp) }; match out_opt { None => { @@ -509,7 +508,7 @@ impl BitcoinBlockParser { } // parse it - let burn_block = self.parse_block(&block, height, epoch_id); + let burn_block = self.parse_block(block, height, epoch_id); Some(burn_block) } } @@ -525,7 +524,7 @@ impl BurnchainBlockParser for BitcoinBlockParser { match ipc_block.block_message { btc_message::NetworkMessage::Block(ref block) => { match self.process_block( - &block, + block, &ipc_block.header_data.block_header, ipc_block.header_data.block_height, epoch_id, diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index af9bc24864..93675b0fcb 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -469,7 +469,7 @@ impl BitcoinIndexer { network_id: BitcoinNetworkType, ) -> Result { SpvClient::new_without_migration( - &reorg_headers_path, + reorg_headers_path, start_block, end_block, network_id, @@ -486,7 +486,7 @@ impl BitcoinIndexer { network_id: BitcoinNetworkType, ) -> Result { SpvClient::new( - &reorg_headers_path, + reorg_headers_path, start_block, end_block, network_id, @@ -3476,7 +3476,7 @@ mod test { // set up SPV client so we don't have chain work at first let mut spv_client = SpvClient::new_without_migration( - &db_path, + db_path, 0, None, BitcoinNetworkType::Regtest, diff --git a/stackslib/src/burnchains/bitcoin/spv.rs b/stackslib/src/burnchains/bitcoin/spv.rs index 861baed580..55ad629412 100644 --- a/stackslib/src/burnchains/bitcoin/spv.rs +++ b/stackslib/src/burnchains/bitcoin/spv.rs @@ -724,13 +724,13 @@ impl SpvClient { .next() .map_err(|e| btc_error::DBError(db_error::SqliteError(e)))? { - let height: u64 = u64::from_column(&row, "height")?; + let height: u64 = u64::from_column(row, "height")?; if height != next_height { break; } next_height += 1; - let next_header = BlockHeader::from_row(&row)?; + let next_header = BlockHeader::from_row(row)?; headers.push(LoneBlockHeader { header: next_header, tx_count: VarInt(0), diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index caeefe538c..ae344f706f 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -98,7 +98,7 @@ impl BurnchainStateTransition { /// Get the transaction IDs of all accepted burnchain operations in this block pub fn txids(&self) -> Vec { - self.accepted_ops.iter().map(|ref op| op.txid()).collect() + self.accepted_ops.iter().map(|op| op.txid()).collect() } /// Get the sum of all burnchain tokens spent in this burnchain block's accepted operations @@ -196,7 +196,7 @@ impl BurnchainStateTransition { // find all VRF leader keys that were consumed by the block commits of this block let consumed_leader_keys = - sort_tx.get_consumed_leader_keys(&parent_snapshot, &block_commits)?; + sort_tx.get_consumed_leader_keys(parent_snapshot, &block_commits)?; // assemble the commit windows let mut windowed_block_commits = vec![block_commits]; @@ -355,7 +355,7 @@ impl BurnchainStateTransition { ); } - accepted_ops.sort_by(|ref a, ref b| a.vtxindex().partial_cmp(&b.vtxindex()).unwrap()); + accepted_ops.sort_by(|a, b| a.vtxindex().partial_cmp(&b.vtxindex()).unwrap()); Ok(BurnchainStateTransition { burn_dist, @@ -425,7 +425,7 @@ impl BurnchainBlock { BurnchainBlock::Bitcoin(ref data) => data .txs .iter() - .map(|ref tx| BurnchainTransaction::Bitcoin((*tx).clone())) + .map(|tx| BurnchainTransaction::Bitcoin((*tx).clone())) .collect(), } } @@ -850,7 +850,7 @@ impl Burnchain { } x if x == Opcodes::TransferStx as u8 => { let pre_stx_txid = TransferStxOp::get_sender_txid(burn_tx).ok()?; - let pre_stx_tx = match pre_stx_op_map.get(&pre_stx_txid) { + let pre_stx_tx = match pre_stx_op_map.get(pre_stx_txid) { Some(tx_ref) => Some(BlockstackOperationType::PreStx(tx_ref.clone())), None => burnchain_db.find_burnchain_op(indexer, pre_stx_txid), }; @@ -879,7 +879,7 @@ impl Burnchain { } x if x == Opcodes::StackStx as u8 => { let pre_stx_txid = StackStxOp::get_sender_txid(burn_tx).ok()?; - let pre_stx_tx = match pre_stx_op_map.get(&pre_stx_txid) { + let pre_stx_tx = match pre_stx_op_map.get(pre_stx_txid) { Some(tx_ref) => Some(BlockstackOperationType::PreStx(tx_ref.clone())), None => burnchain_db.find_burnchain_op(indexer, pre_stx_txid), }; @@ -914,7 +914,7 @@ impl Burnchain { } x if x == Opcodes::DelegateStx as u8 => { let pre_stx_txid = DelegateStxOp::get_sender_txid(burn_tx).ok()?; - let pre_stx_tx = match pre_stx_op_map.get(&pre_stx_txid) { + let pre_stx_tx = match pre_stx_op_map.get(pre_stx_txid) { Some(tx_ref) => Some(BlockstackOperationType::PreStx(tx_ref.clone())), None => burnchain_db.find_burnchain_op(indexer, pre_stx_txid), }; @@ -943,7 +943,7 @@ impl Burnchain { } x if x == Opcodes::VoteForAggregateKey as u8 => { let pre_stx_txid = VoteForAggregateKeyOp::get_sender_txid(burn_tx).ok()?; - let pre_stx_tx = match pre_stx_op_map.get(&pre_stx_txid) { + let pre_stx_tx = match pre_stx_op_map.get(pre_stx_txid) { Some(tx_ref) => Some(BlockstackOperationType::PreStx(tx_ref.clone())), None => burnchain_db.find_burnchain_op(indexer, pre_stx_txid), }; @@ -1039,7 +1039,7 @@ impl Burnchain { ); let _blockstack_txs = - burnchain_db.store_new_burnchain_block(burnchain, indexer, &block, epoch_id)?; + burnchain_db.store_new_burnchain_block(burnchain, indexer, block, epoch_id)?; Burnchain::process_affirmation_maps( burnchain, burnchain_db, @@ -1111,7 +1111,7 @@ impl Burnchain { let blockstack_txs = burnchain_db.store_new_burnchain_block( burnchain, indexer, - &block, + block, cur_epoch.epoch_id, )?; diff --git a/stackslib/src/burnchains/db.rs b/stackslib/src/burnchains/db.rs index 1f42881ac2..d18e7d5d27 100644 --- a/stackslib/src/burnchains/db.rs +++ b/stackslib/src/burnchains/db.rs @@ -152,7 +152,7 @@ pub(crate) fn apply_blockstack_txs_safety_checks( ); // safety -- make sure these are in order - blockstack_txs.sort_by(|ref a, ref b| a.vtxindex().partial_cmp(&b.vtxindex()).unwrap()); + blockstack_txs.sort_by(|a, b| a.vtxindex().partial_cmp(&b.vtxindex()).unwrap()); // safety -- no duplicate vtxindex (shouldn't happen but crash if so) if blockstack_txs.len() > 1 { @@ -349,7 +349,7 @@ impl BurnchainDBTransaction<'_> { let args = params![affirmation_map.encode(), u64_to_sql(weight)?]; match self.sql_tx.execute(sql, args) { Ok(_) => { - let am_id = BurnchainDB::get_affirmation_map_id(&self.sql_tx, &affirmation_map)? + let am_id = BurnchainDB::get_affirmation_map_id(&self.sql_tx, affirmation_map)? .expect("BUG: no affirmation ID for affirmation map we just inserted"); Ok(am_id) } @@ -1231,7 +1231,7 @@ impl BurnchainDB { self, block_header, epoch_id, - &tx, + tx, &pre_stx_ops, ); if let Some(classified_tx) = result { @@ -1409,7 +1409,7 @@ impl BurnchainDB { blockstack_ops.len() ); db_tx.store_burnchain_db_entry(block_header)?; - db_tx.store_blockstack_ops(burnchain, indexer, &block_header, blockstack_ops)?; + db_tx.store_blockstack_ops(burnchain, indexer, block_header, blockstack_ops)?; db_tx.commit()?; Ok(()) @@ -1459,7 +1459,7 @@ impl BurnchainDB { ) -> Result, DBError> { let qry = "SELECT txid FROM block_commit_metadata WHERE block_height = ?1 AND vtxindex = ?2 AND burn_block_hash = ?3"; let args = params![block_ptr, vtxindex, header_hash]; - let txid = match query_row(&conn, qry, args) { + let txid = match query_row(conn, qry, args) { Ok(Some(txid)) => txid, Ok(None) => { test_debug!( diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index 3e153df53b..34cc0e3253 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -231,7 +231,7 @@ impl BurnchainTransaction { BurnchainTransaction::Bitcoin(ref btc) => btc .outputs .iter() - .map(|ref o| BurnchainRecipient::try_from_bitcoin_output(o)) + .map(BurnchainRecipient::try_from_bitcoin_output) .collect(), } } diff --git a/stackslib/src/burnchains/tests/affirmation.rs b/stackslib/src/burnchains/tests/affirmation.rs index eaa872876e..9d86e60fa5 100644 --- a/stackslib/src/burnchains/tests/affirmation.rs +++ b/stackslib/src/burnchains/tests/affirmation.rs @@ -331,7 +331,7 @@ pub fn make_reward_cycle_with_vote( let mut commits = vec![]; for i in 0..parent_commits.len() { let mut block_commit = make_simple_block_commit( - &burnchain, + burnchain, parent_commits[i].as_ref(), &block_header, next_block_hash(), @@ -388,7 +388,7 @@ pub fn make_reward_cycle_with_vote( block_commit.parent_vtxindex ); - if let Some(ref parent_commit) = parent_commits[i].as_ref() { + if let Some(parent_commit) = parent_commits[i].as_ref() { assert!(parent_commit.block_height != block_commit.block_height); assert!( parent_commit.block_height == u64::from(block_commit.parent_block_ptr) diff --git a/stackslib/src/burnchains/tests/db.rs b/stackslib/src/burnchains/tests/db.rs index c8f568b5bf..4576ca4863 100644 --- a/stackslib/src/burnchains/tests/db.rs +++ b/stackslib/src/burnchains/tests/db.rs @@ -554,7 +554,7 @@ pub fn make_simple_block_commit( new_op.commit_outs = vec![PoxAddress::standard_burn_address(false)]; } - if let Some(ref op) = parent { + if let Some(op) = parent { new_op.parent_block_ptr = op.block_height as u32; new_op.parent_vtxindex = op.vtxindex as u16; }; @@ -639,18 +639,14 @@ fn test_get_commit_at() { } for i in 0..5 { - let cmt = BurnchainDB::get_commit_at( - &burnchain_db.conn(), - &headers, - (first_height + i) as u32, - 0, - ) - .unwrap() - .unwrap(); + let cmt = + BurnchainDB::get_commit_at(burnchain_db.conn(), &headers, (first_height + i) as u32, 0) + .unwrap() + .unwrap(); assert_eq!(cmt, cmts[i as usize]); } - let cmt = BurnchainDB::get_commit_at(&burnchain_db.conn(), &headers, 5, 0) + let cmt = BurnchainDB::get_commit_at(burnchain_db.conn(), &headers, 5, 0) .unwrap() .unwrap(); assert_eq!(cmt, cmts[4]); @@ -681,12 +677,12 @@ fn test_get_commit_at() { ) .unwrap(); - let cmt = BurnchainDB::get_commit_at(&burnchain_db.conn(), &headers, 5, 0) + let cmt = BurnchainDB::get_commit_at(burnchain_db.conn(), &headers, 5, 0) .unwrap() .unwrap(); assert_eq!(cmt, cmts[4]); - let cmt = BurnchainDB::get_commit_at(&burnchain_db.conn(), &fork_headers, 5, 1) + let cmt = BurnchainDB::get_commit_at(burnchain_db.conn(), &fork_headers, 5, 1) .unwrap() .unwrap(); assert_eq!(cmt, fork_cmt); diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index ab3763dac0..dc02d0380d 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -240,7 +240,7 @@ impl TestMiner { last_sortition_hash ); match self.vrf_key_map.get(vrf_pubkey) { - Some(ref prover_key) => { + Some(prover_key) => { let proof = VRF::prove(prover_key, &last_sortition_hash.as_bytes().to_vec()); let valid = match VRF::verify(vrf_pubkey, &proof, &last_sortition_hash.as_bytes().to_vec()) @@ -422,7 +422,7 @@ impl TestBurnchainBlock { let pubks = miner .privks .iter() - .map(|ref pk| StacksPublicKey::from_private(pk)) + .map(StacksPublicKey::from_private) .collect(); let apparent_sender = BurnchainSigner::mock_parts(miner.hash_mode.clone(), miner.num_sigs as usize, pubks); @@ -623,7 +623,7 @@ impl TestBurnchainBlock { let blockstack_txs = self.txs.clone(); let burnchain_db = - BurnchainDB::connect(&burnchain.get_burnchaindb_path(), &burnchain, true).unwrap(); + BurnchainDB::connect(&burnchain.get_burnchaindb_path(), burnchain, true).unwrap(); let new_snapshot = sortition_db_handle .process_block_txs( @@ -719,7 +719,7 @@ impl TestBurnchainFork { start_height, mined: 0, tip_header_hash: start_header_hash.clone(), - tip_sortition_id: SortitionId::stubbed(&start_header_hash), + tip_sortition_id: SortitionId::stubbed(start_header_hash), tip_index_root: start_index_root.clone(), blocks: vec![], pending_blocks: vec![], diff --git a/stackslib/src/chainstate/burn/db/processing.rs b/stackslib/src/chainstate/burn/db/processing.rs index 17e2546389..d6c6dc4a6d 100644 --- a/stackslib/src/chainstate/burn/db/processing.rs +++ b/stackslib/src/chainstate/burn/db/processing.rs @@ -260,7 +260,7 @@ impl SortitionHandleTx<'_> { &block_header.block_hash ); - blockstack_txs.sort_by(|ref a, ref b| a.vtxindex().partial_cmp(&b.vtxindex()).unwrap()); + blockstack_txs.sort_by(|a, b| a.vtxindex().partial_cmp(&b.vtxindex()).unwrap()); // check each transaction, and filter out only the ones that are valid debug!( @@ -338,8 +338,8 @@ impl SortitionHandleTx<'_> { let new_snapshot = self.process_block_ops( mainnet, burnchain, - &parent_snapshot, - &this_block_header, + parent_snapshot, + this_block_header, blockstack_txs, next_pox_info, parent_pox, diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 74317ba639..5d753e23c3 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -875,7 +875,7 @@ pub fn get_ancestor_sort_id( None => return Ok(None), }; - ic.get_ancestor_block_hash(adjusted_height, &tip_block_hash) + ic.get_ancestor_block_hash(adjusted_height, tip_block_hash) } pub fn get_ancestor_sort_id_tx( @@ -888,7 +888,7 @@ pub fn get_ancestor_sort_id_tx( None => return Ok(None), }; - ic.get_ancestor_block_hash(adjusted_height, &tip_block_hash) + ic.get_ancestor_block_hash(adjusted_height, tip_block_hash) } /// Returns the difference between `block_height` and `context.first_block_height()`, if this @@ -1138,7 +1138,7 @@ pub trait SortitionHandle { // step back to the parent test_debug!("No parent sortition memo for {}", &sn.winning_block_txid); let block_commit = get_block_commit_by_txid( - &self.sqlite(), + self.sqlite(), &sn.sortition_id, &sn.winning_block_txid, )? @@ -1186,7 +1186,7 @@ impl<'a> SortitionHandleTx<'a> { chain_tip: &SortitionId, ) -> Result, db_error> { let sortition_identifier_key = db_keys::sortition_id_for_bhh(burn_header_hash); - let sortition_id = match self.get_indexed(&chain_tip, &sortition_identifier_key)? { + let sortition_id = match self.get_indexed(chain_tip, &sortition_identifier_key)? { None => return Ok(None), Some(x) => SortitionId::from_hex(&x).expect("FATAL: bad Sortition ID stored in DB"), }; @@ -1318,7 +1318,7 @@ impl<'a> SortitionHandleTx<'a> { } }; - let ancestor_hash = match self.get_indexed(&get_from, &db_keys::last_sortition())? { + let ancestor_hash = match self.get_indexed(&get_from, db_keys::last_sortition())? { Some(hex_str) => BurnchainHeaderHash::from_hex(&hex_str).unwrap_or_else(|_| { panic!( "FATAL: corrupt database: failed to parse {} into a hex string", @@ -1696,7 +1696,7 @@ impl SortitionHandleTx<'_> { } pub fn get_reward_set_size_at(&mut self, sortition_id: &SortitionId) -> Result { - self.get_indexed(sortition_id, &db_keys::pox_reward_set_size()) + self.get_indexed(sortition_id, db_keys::pox_reward_set_size()) .map(|x| { db_keys::reward_set_size_from_string( &x.expect("CORRUPTION: no current reward set size written"), @@ -1731,7 +1731,7 @@ impl SortitionHandleTx<'_> { pub fn get_last_anchor_block_hash(&mut self) -> Result, db_error> { let chain_tip = self.context.chain_tip.clone(); let anchor_block_hash = SortitionDB::parse_last_anchor_block_hash( - self.get_indexed(&chain_tip, &db_keys::pox_last_anchor())?, + self.get_indexed(&chain_tip, db_keys::pox_last_anchor())?, ); Ok(anchor_block_hash) } @@ -1739,14 +1739,14 @@ impl SortitionHandleTx<'_> { pub fn get_last_anchor_block_txid(&mut self) -> Result, db_error> { let chain_tip = self.context.chain_tip.clone(); let anchor_block_txid = SortitionDB::parse_last_anchor_block_txid( - self.get_indexed(&chain_tip, &db_keys::pox_last_anchor_txid())?, + self.get_indexed(&chain_tip, db_keys::pox_last_anchor_txid())?, ); Ok(anchor_block_txid) } pub fn get_sortition_affirmation_map(&mut self) -> Result { let chain_tip = self.context.chain_tip.clone(); - let affirmation_map = match self.get_indexed(&chain_tip, &db_keys::pox_affirmation_map())? { + let affirmation_map = match self.get_indexed(&chain_tip, db_keys::pox_affirmation_map())? { Some(am_str) => { AffirmationMap::decode(&am_str).expect("FATAL: corrupt affirmation map") } @@ -1760,7 +1760,7 @@ impl SortitionHandleTx<'_> { ) -> Result, db_error> { let chain_tip = self.context.chain_tip.clone(); let anchor_block_hash = SortitionDB::parse_last_anchor_block_hash( - self.get_indexed(&chain_tip, &db_keys::pox_last_selected_anchor())?, + self.get_indexed(&chain_tip, db_keys::pox_last_selected_anchor())?, ); Ok(anchor_block_hash) } @@ -1768,7 +1768,7 @@ impl SortitionHandleTx<'_> { pub fn get_last_selected_anchor_block_txid(&mut self) -> Result, db_error> { let chain_tip = self.context.chain_tip.clone(); let anchor_block_txid = SortitionDB::parse_last_anchor_block_txid( - self.get_indexed(&chain_tip, &db_keys::pox_last_selected_anchor_txid())?, + self.get_indexed(&chain_tip, db_keys::pox_last_selected_anchor_txid())?, ); Ok(anchor_block_txid) } @@ -1860,7 +1860,7 @@ impl SortitionHandleTx<'_> { let sn_current = SortitionDB::get_block_snapshot_consensus(self, &cur_ch)? .ok_or(db_error::NotFoundError)?; let sn_accepted = - SortitionDB::get_block_snapshot_consensus(self, &consensus_hash)? + SortitionDB::get_block_snapshot_consensus(self, consensus_hash)? .ok_or(db_error::NotFoundError)?; sn_current.block_height < sn_accepted.block_height } @@ -1931,7 +1931,7 @@ impl<'a> SortitionHandleConn<'a> { connection: &'a SortitionDBConn<'a>, chain_tip: &ConsensusHash, ) -> Result, db_error> { - let sn = match SortitionDB::get_block_snapshot_consensus(&connection.conn(), chain_tip)? { + let sn = match SortitionDB::get_block_snapshot_consensus(connection.conn(), chain_tip)? { Some(sn) => { if !sn.pox_valid { warn!( @@ -1952,7 +1952,7 @@ impl<'a> SortitionHandleConn<'a> { } pub fn get_reward_set_size_at(&self, sortition_id: &SortitionId) -> Result { - self.get_indexed(sortition_id, &db_keys::pox_reward_set_size()) + self.get_indexed(sortition_id, db_keys::pox_reward_set_size()) .map(|x| { db_keys::reward_set_size_from_string( &x.expect("CORRUPTION: no current reward set size written"), @@ -1962,21 +1962,21 @@ impl<'a> SortitionHandleConn<'a> { pub fn get_last_anchor_block_hash(&self) -> Result, db_error> { let anchor_block_hash = SortitionDB::parse_last_anchor_block_hash( - self.get_indexed(&self.context.chain_tip, &db_keys::pox_last_anchor())?, + self.get_indexed(&self.context.chain_tip, db_keys::pox_last_anchor())?, ); Ok(anchor_block_hash) } pub fn get_last_anchor_block_txid(&self) -> Result, db_error> { let anchor_block_txid = SortitionDB::parse_last_anchor_block_txid( - self.get_indexed(&self.context.chain_tip, &db_keys::pox_last_anchor_txid())?, + self.get_indexed(&self.context.chain_tip, db_keys::pox_last_anchor_txid())?, ); Ok(anchor_block_txid) } pub fn get_sortition_affirmation_map(&self) -> Result { let chain_tip = self.context.chain_tip.clone(); - let affirmation_map = match self.get_indexed(&chain_tip, &db_keys::pox_affirmation_map())? { + let affirmation_map = match self.get_indexed(&chain_tip, db_keys::pox_affirmation_map())? { Some(am_str) => { AffirmationMap::decode(&am_str).expect("FATAL: corrupt affirmation map") } @@ -1986,17 +1986,16 @@ impl<'a> SortitionHandleConn<'a> { } pub fn get_last_selected_anchor_block_hash(&self) -> Result, db_error> { - let anchor_block_hash = SortitionDB::parse_last_anchor_block_hash(self.get_indexed( - &self.context.chain_tip, - &db_keys::pox_last_selected_anchor(), - )?); + let anchor_block_hash = SortitionDB::parse_last_anchor_block_hash( + self.get_indexed(&self.context.chain_tip, db_keys::pox_last_selected_anchor())?, + ); Ok(anchor_block_hash) } pub fn get_last_selected_anchor_block_txid(&self) -> Result, db_error> { let anchor_block_txid = SortitionDB::parse_last_anchor_block_txid(self.get_indexed( &self.context.chain_tip, - &db_keys::pox_last_selected_anchor_txid(), + db_keys::pox_last_selected_anchor_txid(), )?); Ok(anchor_block_txid) } @@ -2034,7 +2033,7 @@ impl<'a> SortitionHandleConn<'a> { pox_constants: connection.context.pox_constants.clone(), dryrun: connection.context.dryrun, }, - index: &connection.index, + index: connection.index, }) } @@ -2125,7 +2124,7 @@ impl<'a> SortitionHandleConn<'a> { let block_commit = match SortitionDB::get_block_commit_for_stacks_block( self.conn(), consensus_hash, - &block_hash, + block_hash, )? { Some(bc) => bc, None => { @@ -2197,7 +2196,7 @@ impl<'a> SortitionHandleConn<'a> { } }; - let ancestor_hash = match self.get_indexed(&get_from, &db_keys::last_sortition())? { + let ancestor_hash = match self.get_indexed(&get_from, db_keys::last_sortition())? { Some(hex_str) => BurnchainHeaderHash::from_hex(&hex_str).unwrap_or_else(|_| { panic!( "FATAL: corrupt database: failed to parse {} into a hex string", @@ -2219,7 +2218,7 @@ impl<'a> SortitionHandleConn<'a> { /// Get the latest block snapshot on this fork where a sortition occured. pub fn get_last_snapshot_with_sortition_from_tip(&self) -> Result { let ancestor_hash = - match self.get_indexed(&self.context.chain_tip, &db_keys::last_sortition())? { + match self.get_indexed(&self.context.chain_tip, db_keys::last_sortition())? { Some(hex_str) => BurnchainHeaderHash::from_hex(&hex_str).unwrap_or_else(|_| { panic!( "FATAL: corrupt database: failed to parse {} into a hex string", @@ -2716,7 +2715,7 @@ impl SortitionDB { })?, }; - let snapshot = SortitionDB::get_block_snapshot_consensus(&self.conn(), &burn_view)? + let snapshot = SortitionDB::get_block_snapshot_consensus(self.conn(), &burn_view)? .ok_or(db_error::NotFoundError)?; Ok(self.index_handle(&snapshot.sortition_id)) } @@ -2985,7 +2984,7 @@ impl SortitionDB { }); let new_epoch_idx = - StacksEpoch::find_epoch(&epochs, tip.block_height).unwrap_or_else(|| { + StacksEpoch::find_epoch(epochs, tip.block_height).unwrap_or_else(|| { panic!( "FATAL: Sortition tip {} has no epoch in the configured epochs list", tip.block_height @@ -3142,7 +3141,7 @@ impl SortitionDB { let index_path = db_mkdirs(path)?; let marf = SortitionDB::open_index(&index_path)?; let sql = "SELECT MAX(block_height) FROM snapshots"; - Ok(query_rows(&marf.sqlite_conn(), sql, NO_PARAMS)? + Ok(query_rows(marf.sqlite_conn(), sql, NO_PARAMS)? .pop() .expect("BUG: no snapshots in block_snapshots")) } @@ -3182,7 +3181,7 @@ impl SortitionDB { tx.execute_batch(sql_exec)?; } - SortitionDB::validate_and_insert_epochs(&tx, epochs)?; + SortitionDB::validate_and_insert_epochs(tx, epochs)?; tx.execute( "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", @@ -3239,7 +3238,7 @@ impl SortitionDB { tx.execute_batch(sql_exec)?; } - SortitionDB::validate_and_insert_epochs(&tx, epochs)?; + SortitionDB::validate_and_insert_epochs(tx, epochs)?; tx.execute( "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", @@ -3255,7 +3254,7 @@ impl SortitionDB { tx.execute_batch(sql_exec)?; } - SortitionDB::validate_and_insert_epochs(&tx, epochs)?; + SortitionDB::validate_and_insert_epochs(tx, epochs)?; tx.execute( "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", @@ -3271,7 +3270,7 @@ impl SortitionDB { tx.execute_batch(sql_exec)?; } - SortitionDB::validate_and_insert_epochs(&tx, epochs)?; + SortitionDB::validate_and_insert_epochs(tx, epochs)?; tx.execute( "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", @@ -3292,7 +3291,7 @@ impl SortitionDB { tx.execute_batch(sql_exec)?; } - SortitionDB::validate_and_insert_epochs(&tx, epochs)?; + SortitionDB::validate_and_insert_epochs(tx, epochs)?; Ok(()) } @@ -3407,7 +3406,7 @@ impl SortitionDB { tx.execute_batch(sql_exec)?; } - SortitionDB::validate_and_replace_epochs(&tx, epochs)?; + SortitionDB::validate_and_replace_epochs(tx, epochs)?; tx.execute( "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", @@ -3448,38 +3447,38 @@ impl SortitionDB { Ok(Some(version)) => { if version == "1" { let tx = self.tx_begin()?; - SortitionDB::apply_schema_2(&tx.deref(), epochs)?; + SortitionDB::apply_schema_2(tx.deref(), epochs)?; tx.commit()?; } else if version == "2" { // add the tables of schema 3, but do not populate them. let tx = self.tx_begin()?; - SortitionDB::apply_schema_3(&tx.deref())?; + SortitionDB::apply_schema_3(tx.deref())?; tx.commit()?; } else if version == "3" { let tx = self.tx_begin()?; - SortitionDB::apply_schema_4(&tx.deref())?; + SortitionDB::apply_schema_4(tx.deref())?; tx.commit()?; } else if version == "4" { let tx = self.tx_begin()?; - SortitionDB::apply_schema_5(&tx.deref(), epochs)?; + SortitionDB::apply_schema_5(tx.deref(), epochs)?; tx.commit()?; } else if version == "5" { let tx = self.tx_begin()?; - SortitionDB::apply_schema_6(&tx.deref(), epochs)?; + SortitionDB::apply_schema_6(tx.deref(), epochs)?; tx.commit()?; } else if version == "6" { let tx = self.tx_begin()?; - SortitionDB::apply_schema_7(&tx.deref(), epochs)?; + SortitionDB::apply_schema_7(tx.deref(), epochs)?; tx.commit()?; } else if version == "7" { let tx = self.tx_begin()?; - SortitionDB::apply_schema_8_tables(&tx.deref(), epochs)?; + SortitionDB::apply_schema_8_tables(tx.deref(), epochs)?; tx.commit()?; self.apply_schema_8_migration(migrator.take())?; } else if version == "8" { let tx = self.tx_begin()?; - SortitionDB::apply_schema_9(&tx.deref(), epochs)?; + SortitionDB::apply_schema_9(tx.deref(), epochs)?; tx.commit()?; } else if version == expected_version { // this transaction is almost never needed @@ -3676,7 +3675,7 @@ impl SortitionDB { /// Get the number of entries in the reward set, given a sortition ID within the reward cycle /// for which this set is active. pub fn get_preprocessed_reward_set_size(&self, tip: &SortitionId) -> Option { - let Ok(reward_info) = &self.get_preprocessed_reward_set_of(&tip) else { + let Ok(reward_info) = &self.get_preprocessed_reward_set_of(tip) else { return None; }; let Some(reward_set) = reward_info.known_selected_anchor_block() else { @@ -3704,7 +3703,7 @@ impl SortitionDBTx<'_> { &mut self, chain_tip: &SortitionId, ) -> Result { - let affirmation_map = match self.get_indexed(chain_tip, &db_keys::pox_affirmation_map())? { + let affirmation_map = match self.get_indexed(chain_tip, db_keys::pox_affirmation_map())? { Some(am_str) => { AffirmationMap::decode(&am_str).expect("FATAL: corrupt affirmation map") } @@ -3836,41 +3835,40 @@ impl SortitionDBConn<'_> { block_hash: &BlockHeaderHash, ) -> Result, db_error> { let db_handle = SortitionHandleConn::open_reader_consensus(self, consensus_hash)?; - let parent_block_snapshot = match db_handle - .get_block_snapshot_of_parent_stacks_block(consensus_hash, &block_hash) - { - Ok(Some((_, sn))) => { - debug!( - "Parent of {}/{} is {}/{}", - consensus_hash, block_hash, sn.consensus_hash, sn.winning_stacks_block_hash - ); - sn - } - Ok(None) => { - debug!( - "Received block with unknown parent snapshot: {}/{}", - consensus_hash, block_hash, - ); - return Ok(None); - } - Err(db_error::InvalidPoxSortition) => { - warn!( - "Received block {}/{} on a non-canonical PoX sortition", - consensus_hash, block_hash, - ); - return Ok(None); - } - Err(e) => { - return Err(e); - } - }; + let parent_block_snapshot = + match db_handle.get_block_snapshot_of_parent_stacks_block(consensus_hash, block_hash) { + Ok(Some((_, sn))) => { + debug!( + "Parent of {}/{} is {}/{}", + consensus_hash, block_hash, sn.consensus_hash, sn.winning_stacks_block_hash + ); + sn + } + Ok(None) => { + debug!( + "Received block with unknown parent snapshot: {}/{}", + consensus_hash, block_hash, + ); + return Ok(None); + } + Err(db_error::InvalidPoxSortition) => { + warn!( + "Received block {}/{} on a non-canonical PoX sortition", + consensus_hash, block_hash, + ); + return Ok(None); + } + Err(e) => { + return Err(e); + } + }; Ok(Some(parent_block_snapshot)) } #[cfg_attr(test, mutants::skip)] pub fn get_reward_set_size_at(&mut self, sortition_id: &SortitionId) -> Result { - self.get_indexed(sortition_id, &db_keys::pox_reward_set_size()) + self.get_indexed(sortition_id, db_keys::pox_reward_set_size()) .map(|x| { db_keys::reward_set_size_from_string( &x.expect("CORRUPTION: no current reward set size written"), @@ -4246,7 +4244,7 @@ impl SortitionDB { /// Compute the next PoX ID pub fn make_next_pox_id(parent_pox: PoxId, next_pox_info: Option<&RewardCycleInfo>) -> PoxId { let mut next_pox = parent_pox; - if let Some(ref next_pox_info) = next_pox_info { + if let Some(next_pox_info) = next_pox_info { if next_pox_info.is_reward_info_known() { info!( "Begin reward-cycle sortition with present anchor block={:?}", @@ -5370,7 +5368,7 @@ impl SortitionDB { } }; - let ancestor_hash = match tx.get_indexed(&get_from, &db_keys::last_sortition())? { + let ancestor_hash = match tx.get_indexed(&get_from, db_keys::last_sortition())? { Some(hex_str) => BurnchainHeaderHash::from_hex(&hex_str).unwrap_or_else(|_| { panic!( "FATAL: corrupt database: failed to parse {} into a hex string", @@ -5513,7 +5511,7 @@ impl SortitionHandleTx<'_> { &mut self, chain_tip: &SortitionId, ) -> Result { - self.get_indexed(&chain_tip, db_keys::initial_mining_bonus_remaining())? + self.get_indexed(chain_tip, db_keys::initial_mining_bonus_remaining())? .map(|s| Ok(s.parse().expect("BUG: bad mining bonus stored in DB"))) .unwrap_or(Ok(0)) } @@ -5523,7 +5521,7 @@ impl SortitionHandleTx<'_> { chain_tip: &SortitionId, ) -> Result, db_error> { Ok(self - .get_indexed(&chain_tip, db_keys::initial_mining_bonus_per_block())? + .get_indexed(chain_tip, db_keys::initial_mining_bonus_per_block())? .map(|s| s.parse().expect("BUG: bad mining bonus stored in DB"))) } @@ -6348,7 +6346,7 @@ impl SortitionHandleTx<'_> { .expect("FATAL: zero-length list of tied block IDs"); let winner_index = *mapping - .get(&winner) + .get(winner) .expect("FATAL: winning block ID not mapped"); Some(winner_index) @@ -6873,7 +6871,7 @@ pub mod tests { let leader_key_sql = "SELECT * FROM leader_keys WHERE txid = ?1 LIMIT 1"; let args = [&txid]; - let leader_key_res = query_row_panic(conn, &leader_key_sql, &args, || { + let leader_key_res = query_row_panic(conn, leader_key_sql, &args, || { "Multiple leader keys with same txid".to_string() })?; if let Some(leader_key) = leader_key_res { @@ -6883,7 +6881,7 @@ pub mod tests { // block commit? let block_commit_sql = "SELECT * FROM block_commits WHERE txid = ?1 LIMIT 1"; - let block_commit_res = query_row_panic(conn, &block_commit_sql, &args, || { + let block_commit_res = query_row_panic(conn, block_commit_sql, &args, || { "Multiple block commits with same txid".to_string() })?; if let Some(block_commit) = block_commit_res { diff --git a/stackslib/src/chainstate/burn/distribution.rs b/stackslib/src/chainstate/burn/distribution.rs index 0d94c7e78d..8d0b9cc216 100644 --- a/stackslib/src/chainstate/burn/distribution.rs +++ b/stackslib/src/chainstate/burn/distribution.rs @@ -379,7 +379,7 @@ impl BurnSamplePoint { // total burns for valid blocks? // NOTE: this can't overflow -- there's no way we get that many (u64) burns - let total_burns_u128 = BurnSamplePoint::get_total_burns(&burn_sample).unwrap() as u128; + let total_burns_u128 = BurnSamplePoint::get_total_burns(burn_sample).unwrap() as u128; let total_burns = Uint512::from_u128(total_burns_u128); // determine range start/end for each sample. diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index 136e4d4a75..4e4f6d8cf1 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -547,7 +547,7 @@ impl RewardSetInfo { ) -> Result, op_error> { // did this block-commit pay to the correct PoX addresses? let intended_recipients = tx - .get_reward_set_payouts_at(&intended_sortition) + .get_reward_set_payouts_at(intended_sortition) .map_err(|_e| op_error::BlockCommitBadOutputs)? .0; let block_height = SortitionDB::get_block_snapshot(tx.tx(), intended_sortition) diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 122aedbefb..0e0846db38 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -798,12 +798,12 @@ pub fn get_reward_cycle_info( None }; - ic.get_chosen_pox_anchor(burnchain_db_conn_opt, &parent_bhh, &burnchain.pox_constants) + ic.get_chosen_pox_anchor(burnchain_db_conn_opt, parent_bhh, &burnchain.pox_constants) }?; let reward_cycle_info = if let Some((consensus_hash, stacks_block_hash, txid)) = reward_cycle_info { let anchor_block_known = StacksChainState::is_stacks_block_processed( - &chain_state.db(), + chain_state.db(), &consensus_hash, &stacks_block_hash, )?; @@ -966,7 +966,7 @@ fn forget_orphan_stacks_blocks( burn_header: &BurnchainHeaderHash, invalidation_height: u64, ) -> Result<(), Error> { - if let Ok(sns) = SortitionDB::get_all_snapshots_for_burn_block(&sort_conn, &burn_header) { + if let Ok(sns) = SortitionDB::get_all_snapshots_for_burn_block(sort_conn, burn_header) { for sn in sns.into_iter() { // only retry blocks that are truly in descendant // sortitions. @@ -1140,12 +1140,12 @@ impl< let mut ret = Vec::with_capacity(sort_ids.len()); for sort_id in sort_ids.iter() { - let sn = SortitionDB::get_block_snapshot(self.sortition_db.conn(), &sort_id)? + let sn = SortitionDB::get_block_snapshot(self.sortition_db.conn(), sort_id)? .expect("FATAL: have sortition ID without snapshot"); let sort_am = self .sortition_db - .find_sortition_tip_affirmation_map(&sort_id)?; + .find_sortition_tip_affirmation_map(sort_id)?; ret.push((sn, sort_am)); } @@ -1475,16 +1475,14 @@ impl< let mut found = false; for (sn, sn_am) in snapshots_and_ams.into_iter() { debug!( - "Snapshot {} height {} has AM `{}` (is prefix of `{}`?: {})", + "Snapshot {} height {} has AM `{sn_am}` (is prefix of `{compare_am}`?: {})", &sn.sortition_id, sn.block_height, - &sn_am, - &compare_am, &compare_am.has_prefix(&sn_am), ); if compare_am.has_prefix(&sn_am) { // have already processed this sortitoin - debug!("Already processed sortition {} at height {} with AM `{}` on comparative affirmation map {}", &sn.sortition_id, sn.block_height, &sn_am, &compare_am); + debug!("Already processed sortition {} at height {} with AM `{sn_am}` on comparative affirmation map {compare_am}", &sn.sortition_id, sn.block_height); found = true; last_invalidate_start_block = height; debug!( @@ -1563,12 +1561,10 @@ impl< for sort_id in sort_ids.iter() { let sort_am = self .sortition_db - .find_sortition_tip_affirmation_map(&sort_id)?; + .find_sortition_tip_affirmation_map(sort_id)?; debug!( - "Compare {} as prefix of {}? {}", - &compare_am, - &sort_am, + "Compare {compare_am} as prefix of {sort_am}? {}", compare_am.has_prefix(&sort_am) ); if compare_am.has_prefix(&sort_am) { @@ -1590,14 +1586,14 @@ impl< if prior_compare_am.has_prefix(&prior_sort_am) { // this is the first reward cycle where history diverged. found_diverged = true; - debug!("{} diverges from {}", &sort_am, &compare_am); + debug!("{sort_am} diverges from {compare_am}"); // careful -- we might have already procesed sortitions in this // reward cycle with this PoX ID, but that were never confirmed // by a subsequent prepare phase. let (new_last_invalidate_start_block, mut next_valid_sortitions) = self .find_valid_sortitions( - &compare_am, + compare_am, last_invalidate_start_block, canonical_burnchain_tip.block_height, )?; @@ -1666,7 +1662,7 @@ impl< &burn_header.block_hash, burn_header.block_height ); forget_orphan_stacks_blocks( - &ic, + ic, chainstate_db_tx, &burn_header.block_hash, burn_height.saturating_sub(1), @@ -1728,8 +1724,8 @@ impl< let last_2_05_rc = self.sortition_db.get_last_epoch_2_05_reward_cycle()?; let sortition_height = - SortitionDB::get_block_snapshot(self.sortition_db.conn(), &sortition_tip)? - .unwrap_or_else(|| panic!("FATAL: no sortition {}", &sortition_tip)) + SortitionDB::get_block_snapshot(self.sortition_db.conn(), sortition_tip)? + .unwrap_or_else(|| panic!("FATAL: no sortition {sortition_tip}")) .block_height; let sortition_reward_cycle = self @@ -1737,19 +1733,18 @@ impl< .block_height_to_reward_cycle(sortition_height) .unwrap_or(0); - let heaviest_am = self.get_heaviest_affirmation_map(&sortition_tip)?; + let heaviest_am = self.get_heaviest_affirmation_map(sortition_tip)?; if let Some(changed_reward_cycle) = self.check_chainstate_against_burnchain_affirmations()? { debug!( - "Canonical sortition tip is {} height {} (rc {}); changed reward cycle is {}", - &sortition_tip, sortition_height, sortition_reward_cycle, changed_reward_cycle + "Canonical sortition tip is {sortition_tip} height {sortition_height} (rc {sortition_reward_cycle}); changed reward cycle is {changed_reward_cycle}" ); if changed_reward_cycle >= sortition_reward_cycle { // nothing we can do - debug!("Changed reward cycle is {} but canonical sortition is in {}, so no affirmation reorg is possible", &changed_reward_cycle, sortition_reward_cycle); + debug!("Changed reward cycle is {changed_reward_cycle} but canonical sortition is in {sortition_reward_cycle}, so no affirmation reorg is possible"); return Ok(()); } @@ -1776,10 +1771,10 @@ impl< // If the sortition AM is not consistent with the canonical AM, then it // means that we have new anchor blocks to consider let canonical_affirmation_map = - self.get_canonical_affirmation_map(&sortition_tip)?; + self.get_canonical_affirmation_map(sortition_tip)?; let sort_am = self .sortition_db - .find_sortition_tip_affirmation_map(&sortition_tip)?; + .find_sortition_tip_affirmation_map(sortition_tip)?; let revalidation_params = if canonical_affirmation_map.len() == sort_am.len() && canonical_affirmation_map != sort_am @@ -1788,8 +1783,7 @@ impl< canonical_affirmation_map.find_divergence(&sort_am) { debug!( - "Sortition AM `{}` diverges from canonical AM `{}` at cycle {}", - &sort_am, &canonical_affirmation_map, diverged_rc + "Sortition AM `{sort_am}` diverges from canonical AM `{canonical_affirmation_map}` at cycle {diverged_rc}" ); let (last_invalid_sortition_height, valid_sortitions) = self .find_valid_sortitions( @@ -1811,8 +1805,7 @@ impl< }; if let Some(x) = revalidation_params { debug!( - "Sortition AM `{}` is not consistent with canonical AM `{}`", - &sort_am, &canonical_affirmation_map + "Sortition AM `{sort_am}` is not consistent with canonical AM `{canonical_affirmation_map}`" ); x } else { @@ -1826,16 +1819,16 @@ impl< let (canonical_ch, canonical_bhh, canonical_height) = Self::find_highest_stacks_block_with_compatible_affirmation_map( &heaviest_am, - &sortition_tip, + sortition_tip, &self.burnchain_blocks_db, &mut sort_tx, - &self.chain_state_db.db(), + self.chain_state_db.db(), )?; let stacks_am = inner_static_get_stacks_tip_affirmation_map( &self.burnchain_blocks_db, last_2_05_rc, - &sort_tx.find_sortition_tip_affirmation_map(&sortition_tip)?, + &sort_tx.find_sortition_tip_affirmation_map(sortition_tip)?, &sort_tx, &canonical_ch, &canonical_bhh, @@ -1845,7 +1838,7 @@ impl< SortitionDB::revalidate_snapshot_with_block( &sort_tx, - &sortition_tip, + sortition_tip, &canonical_ch, &canonical_bhh, canonical_height, @@ -1859,7 +1852,7 @@ impl< // check valid_sortitions -- it may correspond to a range of sortitions beyond our // current highest-valid sortition (in which case, *do not* revalidate them) - let valid_sortitions = if let Some(ref first_sn) = valid_sortitions.first() { + let valid_sortitions = if let Some(first_sn) = valid_sortitions.first() { if first_sn.block_height > sortition_height { debug!("No sortitions to revalidate: highest is {},{}, first candidate is {},{}. Will not revalidate.", sortition_height, &sortition_tip, first_sn.block_height, &first_sn.sortition_id); vec![] @@ -1917,7 +1910,7 @@ impl< let invalidate_sn = SortitionDB::get_ancestor_snapshot( &ic, last_invalidate_start_block - 1, - &sortition_tip, + sortition_tip, )? .unwrap_or_else(|| { panic!( @@ -1953,7 +1946,7 @@ impl< |sort_tx| { // no more sortitions to invalidate -- all now-incompatible // sortitions have been invalidated. - let (canonical_ch, canonical_bhh, canonical_height) = Self::find_highest_stacks_block_with_compatible_affirmation_map(&heaviest_am, &highest_valid_sortition_id, &self.burnchain_blocks_db, sort_tx, &chainstate_db_conn) + let (canonical_ch, canonical_bhh, canonical_height) = Self::find_highest_stacks_block_with_compatible_affirmation_map(&heaviest_am, &highest_valid_sortition_id, &self.burnchain_blocks_db, sort_tx, chainstate_db_conn) .expect("FATAL: could not find a valid parent Stacks block"); let stacks_am = inner_static_get_stacks_tip_affirmation_map( @@ -1972,7 +1965,7 @@ impl< for valid_sn in valid_sortitions.iter() { test_debug!("Revalidate snapshot {},{}", valid_sn.block_height, &valid_sn.sortition_id); let block_known = StacksChainState::is_stacks_block_processed( - &chainstate_db_conn, + chainstate_db_conn, &valid_sn.consensus_hash, &valid_sn.winning_stacks_block_hash, ).expect("FATAL: failed to query chainstate DB"); @@ -1986,7 +1979,7 @@ impl< let invalidate_sn = SortitionDB::get_ancestor_snapshot_tx( sort_tx, last_invalidate_start_block - 1, - &sortition_tip, + sortition_tip, ) .expect("FATAL: failed to query the sortition DB") .unwrap_or_else(|| panic!("BUG: no ancestral sortition at height {}", @@ -2003,7 +1996,7 @@ impl< }; // recalculate highest valid stacks tip - let (canonical_ch, canonical_bhh, canonical_height) = Self::find_highest_stacks_block_with_compatible_affirmation_map(&heaviest_am, &highest_valid_sortition_id, &self.burnchain_blocks_db, sort_tx, &chainstate_db_conn) + let (canonical_ch, canonical_bhh, canonical_height) = Self::find_highest_stacks_block_with_compatible_affirmation_map(&heaviest_am, &highest_valid_sortition_id, &self.burnchain_blocks_db, sort_tx, chainstate_db_conn) .expect("FATAL: could not find a valid parent Stacks block"); let stacks_am = inner_static_get_stacks_tip_affirmation_map( @@ -2030,7 +2023,7 @@ impl< .expect("FATAL: no such dirty sortition"); let block_known = StacksChainState::is_stacks_block_processed( - &chainstate_db_conn, + chainstate_db_conn, &dirty_sort_sn.consensus_hash, &dirty_sort_sn.winning_stacks_block_hash, ).expect("FATAL: failed to query chainstate DB"); @@ -2040,7 +2033,7 @@ impl< } // recalculate highest valid stacks tip once more - let (canonical_ch, canonical_bhh, canonical_height) = Self::find_highest_stacks_block_with_compatible_affirmation_map(&heaviest_am, &highest_valid_sortition_id, &self.burnchain_blocks_db, sort_tx, &chainstate_db_conn) + let (canonical_ch, canonical_bhh, canonical_height) = Self::find_highest_stacks_block_with_compatible_affirmation_map(&heaviest_am, &highest_valid_sortition_id, &self.burnchain_blocks_db, sort_tx, chainstate_db_conn) .expect("FATAL: could not find a valid parent Stacks block"); let stacks_am = inner_static_get_stacks_tip_affirmation_map( @@ -2060,7 +2053,7 @@ impl< .expect("FATAL: highest valid sortition ID does not have a snapshot"); let block_known = StacksChainState::is_stacks_block_processed( - &chainstate_db_conn, + chainstate_db_conn, &highest_valid_sn.consensus_hash, &highest_valid_sn.winning_stacks_block_hash, ).expect("FATAL: failed to query chainstate DB"); @@ -2086,7 +2079,7 @@ impl< // un-orphan blocks that had been orphaned but were tied to this now-revalidated sortition history Self::undo_stacks_block_orphaning( - &self.burnchain_blocks_db.conn(), + self.burnchain_blocks_db.conn(), &self.burnchain_indexer, &ic, &mut chainstate_db_tx, @@ -2102,7 +2095,7 @@ impl< .map_err(|e| DBError::SqliteError(e))?; let highest_valid_snapshot = SortitionDB::get_block_snapshot( - &self.sortition_db.conn(), + self.sortition_db.conn(), &highest_valid_sortition_id, )? .expect("FATAL: highest valid sortition doesn't exist"); @@ -2131,7 +2124,7 @@ impl< self.canonical_sortition_tip = Some(highest_valid_snapshot.sortition_id); } else { let highest_valid_snapshot = - SortitionDB::get_block_snapshot(&self.sortition_db.conn(), &sortition_tip)? + SortitionDB::get_block_snapshot(self.sortition_db.conn(), sortition_tip)? .expect("FATAL: highest valid sortition doesn't exist"); let stacks_tip_affirmation_map = static_get_stacks_tip_affirmation_map( @@ -2181,7 +2174,7 @@ impl< test_debug!( "Verify affirmation against PoX info in reward cycle {} canonical affirmation map {}", new_reward_cycle, - &canonical_affirmation_map + canonical_affirmation_map ); let new_status = if new_reward_cycle > 0 @@ -2195,7 +2188,7 @@ impl< .at(affirmed_rc) .expect("BUG: checked index overflow") .to_owned(); - test_debug!("Affirmation '{}' for anchor block of previous reward cycle {} canonical affirmation map {}", &affirmation, affirmed_rc, &canonical_affirmation_map); + test_debug!("Affirmation '{affirmation}' for anchor block of previous reward cycle {affirmed_rc} canonical affirmation map {canonical_affirmation_map}"); // switch reward cycle info assessment based on what the network // affirmed. @@ -2213,7 +2206,7 @@ impl< AffirmationMapEntry::PoxAnchorBlockAbsent => { // network actually affirms that this anchor block // is absent. - warn!("Chose PoX anchor block for reward cycle {}, but it is affirmed absent by the network", affirmed_rc; "affirmation map" => %&canonical_affirmation_map); + warn!("Chose PoX anchor block for reward cycle {affirmed_rc}, but it is affirmed absent by the network"; "affirmation map" => %&canonical_affirmation_map); PoxAnchorBlockStatus::SelectedAndUnknown( block_hash.clone(), txid.clone(), @@ -2232,7 +2225,7 @@ impl< // exists, but we don't have it locally. Stop // processing here and wait for it to arrive, via // the downloader. - info!("Anchor block {} (txid {}) for reward cycle {} is affirmed by the network ({}), but must be downloaded", block_hash, txid, affirmed_rc, canonical_affirmation_map); + info!("Anchor block {block_hash} (txid {txid}) for reward cycle {affirmed_rc} is affirmed by the network ({canonical_affirmation_map}), but must be downloaded"); return Ok(Some(block_hash.clone())); } AffirmationMapEntry::PoxAnchorBlockAbsent => { @@ -2369,20 +2362,19 @@ impl< // NOTE: this mutates rc_info if it returns None if let Some(missing_anchor_block) = self.reinterpret_affirmed_pox_anchor_block_status( - &canonical_affirmation_map, - &header, + canonical_affirmation_map, + header, rc_info, )? { if self.config.require_affirmed_anchor_blocks { // missing this anchor block -- cannot proceed until we have it info!( - "Burnchain block processing stops due to missing affirmed anchor stacks block hash {}", - &missing_anchor_block + "Burnchain block processing stops due to missing affirmed anchor stacks block hash {missing_anchor_block}" ); return Ok(Some(missing_anchor_block)); } else { // this and descendant sortitions might already exist - info!("Burnchain block processing will continue in spite of missing affirmed anchor stacks block hash {}", &missing_anchor_block); + info!("Burnchain block processing will continue in spite of missing affirmed anchor stacks block hash {missing_anchor_block}"); } } } @@ -2429,7 +2421,7 @@ impl< sn_tip ) }), - None => SortitionDB::get_canonical_burn_chain_tip(&self.sortition_db.conn())?, + None => SortitionDB::get_canonical_burn_chain_tip(self.sortition_db.conn())?, }; let target_epoch_index = StacksEpoch::find_epoch(&epochs, canonical_snapshot.block_height) .expect("FATAL: epoch not defined for BlockSnapshot height"); @@ -2491,7 +2483,7 @@ impl< sn_tip ) }), - None => SortitionDB::get_canonical_burn_chain_tip(&self.sortition_db.conn())?, + None => SortitionDB::get_canonical_burn_chain_tip(self.sortition_db.conn())?, }; let cur_epoch = SortitionDB::get_stacks_epoch( self.sortition_db.conn(), @@ -2517,7 +2509,7 @@ impl< sn_tip ) }), - None => SortitionDB::get_canonical_burn_chain_tip(&self.sortition_db.conn())?, + None => SortitionDB::get_canonical_burn_chain_tip(self.sortition_db.conn())?, }; let canonical_burnchain_tip = self.burnchain_blocks_db.get_canonical_chain_tip()?; @@ -2537,15 +2529,12 @@ impl< // We halt the ancestry research as soon as we find a processed parent let mut last_processed_ancestor = loop { if let Some(found_sortition) = self.sortition_db.is_sortition_processed(&cursor)? { - debug!( - "Ancestor sortition {} of block {} is processed", - &found_sortition, &cursor - ); + debug!("Ancestor sortition {found_sortition} of block {cursor} is processed"); break found_sortition; } let current_block = - BurnchainDB::get_burnchain_block(&self.burnchain_blocks_db.conn(), &cursor) + BurnchainDB::get_burnchain_block(self.burnchain_blocks_db.conn(), &cursor) .map_err(|e| { warn!( "ChainsCoordinator: could not retrieve block burnhash={}", @@ -2665,7 +2654,7 @@ impl< if sortition.sortition { if let Some(stacks_block_header) = StacksChainState::get_stacks_block_header_info_by_index_block_hash( - &self.chain_state_db.db(), + self.chain_state_db.db(), &StacksBlockId::new( &sortition.consensus_hash, &sortition.winning_stacks_block_hash, @@ -2858,7 +2847,7 @@ impl< &highest_valid_sortition_id, &self.burnchain_blocks_db, &mut sort_tx, - &chainstate_db_conn, + chainstate_db_conn, ) .expect("FATAL: could not find a valid parent Stacks block"); @@ -2886,7 +2875,7 @@ impl< .expect("FATAL: no snapshot for highest valid sortition ID"); let block_known = StacksChainState::is_stacks_block_processed( - &chainstate_db_conn, + chainstate_db_conn, &highest_valid_sn.consensus_hash, &highest_valid_sn.winning_stacks_block_hash, ) @@ -3113,7 +3102,7 @@ impl< ); self.replay_stacks_blocks( - &canonical_snapshot, + canonical_snapshot, vec![next_snapshot.winning_stacks_block_hash.clone()], )?; } @@ -3212,11 +3201,11 @@ impl< ) -> Result, Error> { // use affirmation maps even if they're not supported yet. // if the chain is healthy, this won't cause a chain split. - match self.check_pox_anchor_affirmation(pox_anchor, &pox_anchor_snapshot) { + match self.check_pox_anchor_affirmation(pox_anchor, pox_anchor_snapshot) { Ok(Some(pox_anchor)) => { // yup, affirmed. Report it for subsequent reward cycle calculation. let block_id = StacksBlockId::new(&pox_anchor_snapshot.consensus_hash, &pox_anchor); - if !StacksChainState::has_stacks_block(&self.chain_state_db.db(), &block_id)? { + if !StacksChainState::has_stacks_block(self.chain_state_db.db(), &block_id)? { debug!( "Have NOT processed anchor block {}/{}", &pox_anchor_snapshot.consensus_hash, pox_anchor @@ -3496,42 +3485,36 @@ pub fn check_chainstate_db_versions( let mut cur_epoch_opt = None; if fs::metadata(&sortdb_path).is_ok() { // check sortition DB and load up the current epoch - let max_height = SortitionDB::get_highest_block_height_from_path(&sortdb_path) + let max_height = SortitionDB::get_highest_block_height_from_path(sortdb_path) .expect("FATAL: could not query sortition DB for maximum block height"); let cur_epoch_idx = StacksEpoch::find_epoch(epochs, max_height) - .unwrap_or_else(|| panic!("FATAL: no epoch defined for burn height {}", max_height)); + .unwrap_or_else(|| panic!("FATAL: no epoch defined for burn height {max_height}")); let cur_epoch = epochs[cur_epoch_idx].epoch_id; // save for later cur_epoch_opt = Some(cur_epoch.clone()); - let db_version = SortitionDB::get_db_version_from_path(&sortdb_path)? + let db_version = SortitionDB::get_db_version_from_path(sortdb_path)? .expect("FATAL: could not load sortition DB version"); if !SortitionDB::is_db_version_supported_in_epoch(cur_epoch, &db_version) { - error!( - "Sortition DB at {} does not support epoch {}", - &sortdb_path, cur_epoch - ); + error!("Sortition DB at {sortdb_path} does not support epoch {cur_epoch}"); return Ok(false); } } else { warn!("Sortition DB {} does not exist; assuming it will be instantiated with the correct version", sortdb_path); } - if fs::metadata(&chainstate_path).is_ok() { + if fs::metadata(chainstate_path).is_ok() { let cur_epoch = cur_epoch_opt.expect( "FATAL: chainstate corruption: sortition DB does not exist, but chainstate does.", ); - let db_config = StacksChainState::get_db_config_from_path(&chainstate_path)?; + let db_config = StacksChainState::get_db_config_from_path(chainstate_path)?; if !db_config.supports_epoch(cur_epoch) { - error!( - "Chainstate DB at {} does not support epoch {}", - &chainstate_path, cur_epoch - ); + error!("Chainstate DB at {chainstate_path} does not support epoch {cur_epoch}"); return Ok(false); } } else { - warn!("Chainstate DB {} does not exist; assuming it will be instantiated with the correct version", chainstate_path); + warn!("Chainstate DB {chainstate_path} does not exist; assuming it will be instantiated with the correct version"); } Ok(true) @@ -3554,7 +3537,7 @@ impl SortitionDBMigrator { chainstate_path: &str, marf_opts: Option, ) -> Result { - let db_config = StacksChainState::get_db_config_from_path(&chainstate_path)?; + let db_config = StacksChainState::get_db_config_from_path(chainstate_path)?; let (chainstate, _) = StacksChainState::open( db_config.mainnet, db_config.chain_id, @@ -3647,11 +3630,11 @@ pub fn migrate_chainstate_dbs( chainstate_path, chainstate_marf_opts.clone(), )?; - SortitionDB::migrate_if_exists(&sortdb_path, epochs, migrator)?; + SortitionDB::migrate_if_exists(sortdb_path, epochs, migrator)?; } if fs::metadata(&chainstate_path).is_ok() { info!("Migrating chainstate DB to the latest schema version"); - let db_config = StacksChainState::get_db_config_from_path(&chainstate_path)?; + let db_config = StacksChainState::get_db_config_from_path(chainstate_path)?; // this does the migration internally let _ = StacksChainState::open( diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index 0863708122..7c05a9537d 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -128,7 +128,7 @@ pub fn produce_burn_block<'a, I: Iterator>( ) -> BurnchainHeaderHash { let BurnchainBlockData { header: par_header, .. - } = BurnchainDB::get_burnchain_block(&burnchain_db.conn(), par).unwrap(); + } = BurnchainDB::get_burnchain_block(burnchain_db.conn(), par).unwrap(); assert_eq!(&par_header.block_hash, par); let block_height = par_header.block_height + 1; for op in ops.iter_mut() { @@ -159,7 +159,7 @@ fn produce_burn_block_do_not_set_height<'a, I: Iterator BurnchainHeaderHash { let BurnchainBlockData { header: par_header, .. - } = BurnchainDB::get_burnchain_block(&burnchain_db.conn(), par).unwrap(); + } = BurnchainDB::get_burnchain_block(burnchain_db.conn(), par).unwrap(); assert_eq!(&par_header.block_hash, par); let block_height = par_header.block_height + 1; let timestamp = par_header.timestamp + 1; @@ -902,7 +902,7 @@ fn make_stacks_block_with_input( eprintln!( "Find parents stacks header: {} in sortition {} (height {}, parent {}/{},{}, index block hash {})", - &parent_block, &parents_sortition.sortition_id, parents_sortition.block_height, &parents_sortition.consensus_hash, parent_block, parent_height, &StacksBlockHeader::make_index_block_hash(&parents_sortition.consensus_hash, &parent_block) + &parent_block, &parents_sortition.sortition_id, parents_sortition.block_height, &parents_sortition.consensus_hash, parent_block, parent_height, &StacksBlockHeader::make_index_block_hash(&parents_sortition.consensus_hash, parent_block) ); let parent_vtxindex = @@ -6409,7 +6409,7 @@ fn test_pox_no_anchor_selected() { path_blinded, &sort_db_blind, &mut coord_blind, - &sort_id, + sort_id, block, ); } @@ -6805,7 +6805,7 @@ fn reveal_block { F: FnMut(&mut NakamotoBlockBuilder), G: FnMut(&mut NakamotoBlock) -> bool, { - let sender_addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_key)); + let sender_addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(sender_key)); let mut test_signers = self.config.test_signers.clone().unwrap(); let recipient_addr = StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); @@ -629,7 +629,7 @@ impl TestPeer<'_> { let stx_transfer = make_token_transfer( chainstate, sortdb, - &sender_key, + sender_key, sender_acct.nonce, 200, 1, @@ -987,7 +987,7 @@ fn block_info_tests(use_primary_testnet: bool) { let output = chainstate .clarity_eval_read_only( &sortdb_handle, - &tip_block_id, + tip_block_id, contract_id, &format!("(get-info u{query_ht})"), ) diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index fe6b75f9cb..6c4392517b 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -265,7 +265,7 @@ impl NakamotoBlockBuilder { debug!("Nakamoto miner tenure begin"; "shadow" => shadow_block, "tenure_change" => ?cause); let Some(tenure_election_sn) = - SortitionDB::get_block_snapshot_consensus(&burn_dbconn, &self.header.consensus_hash)? + SortitionDB::get_block_snapshot_consensus(burn_dbconn, &self.header.consensus_hash)? else { warn!("Could not find sortition snapshot for burn block that elected the miner"; "consensus_hash" => %self.header.consensus_hash, @@ -279,7 +279,7 @@ impl NakamotoBlockBuilder { None } else { let Some(tenure_block_commit) = SortitionDB::get_block_commit( - &burn_dbconn, + burn_dbconn, &tenure_election_sn.winning_block_txid, &tenure_election_sn.sortition_id, )? @@ -674,7 +674,7 @@ impl BlockBuilder for NakamotoBlockBuilder { ast_rules: ASTRules, ) -> TransactionResult { if self.bytes_so_far + tx_len >= MAX_EPOCH_SIZE.into() { - return TransactionResult::skipped_due_to_error(&tx, Error::BlockTooBigError); + return TransactionResult::skipped_due_to_error(tx, Error::BlockTooBigError); } let non_boot_code_contract_call = match &tx.payload { @@ -687,14 +687,14 @@ impl BlockBuilder for NakamotoBlockBuilder { BlockLimitFunction::CONTRACT_LIMIT_HIT => { if non_boot_code_contract_call { return TransactionResult::skipped( - &tx, + tx, "BlockLimitFunction::CONTRACT_LIMIT_HIT".to_string(), ); } } BlockLimitFunction::LIMIT_REACHED => { return TransactionResult::skipped( - &tx, + tx, "BlockLimitFunction::LIMIT_REACHED".to_string(), ) } @@ -707,14 +707,14 @@ impl BlockBuilder for NakamotoBlockBuilder { if let Err(e) = Relayer::static_check_problematic_relayed_tx( clarity_tx.config.mainnet, clarity_tx.get_epoch(), - &tx, + tx, ast_rules, ) { info!( "Detected problematic tx {} while mining; dropping from mempool", tx.txid() ); - return TransactionResult::problematic(&tx, Error::NetError(e)); + return TransactionResult::problematic(tx, Error::NetError(e)); } let cost_before = clarity_tx.cost_so_far(); @@ -745,7 +745,7 @@ impl BlockBuilder for NakamotoBlockBuilder { // save self.txs.push(tx.clone()); - TransactionResult::success_with_soft_limit(&tx, fee, receipt, soft_limit_reached) + TransactionResult::success_with_soft_limit(tx, fee, receipt, soft_limit_reached) }; self.bytes_so_far += tx_len; @@ -758,9 +758,9 @@ fn parse_process_transaction_error( tx: &StacksTransaction, e: Error, ) -> TransactionResult { - let (is_problematic, e) = TransactionResult::is_problematic(&tx, e, clarity_tx.get_epoch()); + let (is_problematic, e) = TransactionResult::is_problematic(tx, e, clarity_tx.get_epoch()); if is_problematic { - TransactionResult::problematic(&tx, e) + TransactionResult::problematic(tx, e) } else { match e { Error::CostOverflowError(cost_before, cost_after, total_budget) => { @@ -781,18 +781,16 @@ fn parse_process_transaction_error( warn!("Failed to compute measured cost of a too big transaction"); None }; - TransactionResult::error(&tx, Error::TransactionTooBigError(measured_cost)) + TransactionResult::error(tx, Error::TransactionTooBigError(measured_cost)) } else { warn!( - "Transaction {} reached block cost {}; budget was {}", + "Transaction {} reached block cost {cost_after}; budget was {total_budget}", tx.txid(), - &cost_after, - &total_budget ); - TransactionResult::skipped_due_to_error(&tx, Error::BlockTooBigError) + TransactionResult::skipped_due_to_error(tx, Error::BlockTooBigError) } } - _ => TransactionResult::error(&tx, e), + _ => TransactionResult::error(tx, e), } } } diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 5f30ac51fc..6740916b38 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -1765,7 +1765,7 @@ impl NakamotoChainState { continue; }; - let Ok(_) = staging_block_tx.set_block_orphaned(&block_id).map_err(|e| { + let Ok(_) = staging_block_tx.set_block_orphaned(block_id).map_err(|e| { warn!("Failed to mark {} as orphaned: {:?}", &block_id, &e); e }) else { @@ -2122,7 +2122,7 @@ impl NakamotoChainState { // succeeds, since *we have already processed* the block. Self::infallible_set_block_processed(stacks_chain_state, &block_id); - let signer_bitvec = (&next_ready_block).header.pox_treatment.clone(); + let signer_bitvec = (next_ready_block).header.pox_treatment.clone(); let block_timestamp = next_ready_block.header.timestamp; @@ -2172,7 +2172,7 @@ impl NakamotoChainState { dispatcher.announce_block( &block_event, &receipt.header.clone(), - &tx_receipts, + tx_receipts, &parent_block_id, next_ready_block_snapshot.winning_block_txid, &receipt.matured_rewards, @@ -2949,7 +2949,7 @@ impl NakamotoChainState { let parent_sortition_id = SortitionDB::get_block_commit_parent_sortition_id( sortdb_conn, - &block_commit_txid, + block_commit_txid, &sn.sortition_id, )? .ok_or(ChainstateError::InvalidStacksBlock( @@ -3153,7 +3153,7 @@ impl NakamotoChainState { let block_hash = header.block_hash(); - let index_block_hash = StacksBlockId::new(&consensus_hash, &block_hash); + let index_block_hash = StacksBlockId::new(consensus_hash, &block_hash); assert!(*stacks_block_height < u64::try_from(i64::MAX).unwrap()); @@ -3277,7 +3277,7 @@ impl NakamotoChainState { StacksBlockHeaderTypes::Epoch2(..) => { assert_eq!( new_tip.parent_block_id, - StacksBlockId::new(&parent_consensus_hash, &parent_tip.block_hash()) + StacksBlockId::new(parent_consensus_hash, &parent_tip.block_hash()) ); } StacksBlockHeaderTypes::Nakamoto(nakamoto_header) => { @@ -3401,7 +3401,7 @@ impl NakamotoChainState { + if new_tenure { 0 } else { - Self::get_total_tenure_tx_fees_at(&headers_tx, &parent_hash)?.ok_or_else(|| { + Self::get_total_tenure_tx_fees_at(headers_tx, &parent_hash)?.ok_or_else(|| { warn!( "Failed to fetch parent block's total tx fees"; "parent_block_id" => %parent_hash, @@ -3432,7 +3432,7 @@ impl NakamotoChainState { Self::insert_stacks_block_header( headers_tx.deref_mut(), &new_tip_info, - &new_tip, + new_tip, new_vrf_proof, anchor_block_cost, total_tenure_cost, @@ -3530,7 +3530,7 @@ impl NakamotoChainState { let signer_sighash = block.header.signer_signature_hash(); for signer_signature in &block.header.signer_signature { let signer_pubkey = - StacksPublicKey::recover_to_pubkey(signer_sighash.bits(), &signer_signature) + StacksPublicKey::recover_to_pubkey(signer_sighash.bits(), signer_signature) .map_err(|e| ChainstateError::InvalidStacksBlock(e.to_string()))?; let sql = "INSERT INTO signer_stats(public_key,reward_cycle) VALUES(?1,?2) ON CONFLICT(public_key,reward_cycle) DO UPDATE SET blocks_signed=blocks_signed+1"; let params = params![signer_pubkey.to_hex(), reward_cycle]; @@ -4042,7 +4042,7 @@ impl NakamotoChainState { signer_set_calc = NakamotoSigners::check_and_handle_prepare_phase_start( &mut clarity_tx, first_block_height, - &pox_constants, + pox_constants, burn_header_height.into(), coinbase_height, )?; @@ -4091,7 +4091,7 @@ impl NakamotoChainState { miner_payouts: Option<&MaturedMinerRewards>, ) -> Result, ChainstateError> { // add miner payments - if let Some(ref rewards) = miner_payouts { + if let Some(rewards) = miner_payouts { // grant in order by miner, then users let matured_ustx = StacksChainState::process_matured_miner_rewards( clarity_tx, @@ -4220,7 +4220,7 @@ impl NakamotoChainState { > { // get burn block stats, for the transaction receipt - let parent_sn = SortitionDB::get_block_snapshot_consensus(burn_dbconn, &parent_ch)? + let parent_sn = SortitionDB::get_block_snapshot_consensus(burn_dbconn, parent_ch)? .ok_or_else(|| { // shouldn't happen warn!( @@ -4477,7 +4477,7 @@ impl NakamotoChainState { burn_dbconn, first_block_height, pox_constants, - &parent_chain_tip, + parent_chain_tip, parent_ch, parent_block_hash, parent_chain_tip.burn_header_height, @@ -4639,7 +4639,7 @@ impl NakamotoChainState { &mut chainstate_tx.tx, &parent_chain_tip.anchored_header, &parent_chain_tip.consensus_hash, - &block, + block, vrf_proof_opt, chain_tip_burn_header_hash, chain_tip_burn_header_height, @@ -4849,7 +4849,7 @@ impl NakamotoChainState { tip: &BlockSnapshot, election_sortition: &ConsensusHash, ) -> Result>, ChainstateError> { - let (stackerdb_config, miners_info) = Self::make_miners_stackerdb_config(sortdb, &tip)?; + let (stackerdb_config, miners_info) = Self::make_miners_stackerdb_config(sortdb, tip)?; // find out which slot we're in let Some(signer_ix) = miners_info diff --git a/stackslib/src/chainstate/nakamoto/shadow.rs b/stackslib/src/chainstate/nakamoto/shadow.rs index dad10f62e0..3bf157d3e5 100644 --- a/stackslib/src/chainstate/nakamoto/shadow.rs +++ b/stackslib/src/chainstate/nakamoto/shadow.rs @@ -484,7 +484,7 @@ impl NakamotoBlockBuilder { tip: &StacksHeaderInfo, ) -> Result { let snapshot = - SortitionDB::get_block_snapshot_consensus(&sortdb.conn(), &tip.consensus_hash)? + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tip.consensus_hash)? .ok_or_else(|| Error::NoSuchBlockError)?; let account = chainstate @@ -734,7 +734,7 @@ impl NakamotoBlockBuilder { block_txs.append(&mut txs); let (mut shadow_block, _size, _cost) = Self::make_shadow_block_from_txs( builder, - &chainstate, + chainstate, &sortdb.index_handle(&burn_tip.sortition_id), &tenure_id_consensus_hash, block_txs, @@ -968,7 +968,7 @@ pub fn shadow_chainstate_repair( ) -> Result, ChainstateError> { let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn())?; - let header = NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db)? + let header = NakamotoChainState::get_canonical_block_header(chain_state.db(), sort_db)? .ok_or_else(|| ChainstateError::NoSuchBlockError)?; let header_sn = @@ -987,7 +987,7 @@ pub fn shadow_chainstate_repair( .get_block_snapshot_by_height(burn_height)? .ok_or_else(|| ChainstateError::InvalidStacksBlock("No sortition at height".into()))?; - let header = NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db)? + let header = NakamotoChainState::get_canonical_block_header(chain_state.db(), sort_db)? .ok_or_else(|| ChainstateError::NoSuchBlockError)?; let chain_tip = header.index_block_hash(); diff --git a/stackslib/src/chainstate/nakamoto/signer_set.rs b/stackslib/src/chainstate/nakamoto/signer_set.rs index 38e76f7e51..f947d4abc7 100644 --- a/stackslib/src/chainstate/nakamoto/signer_set.rs +++ b/stackslib/src/chainstate/nakamoto/signer_set.rs @@ -232,7 +232,7 @@ impl NakamotoSigners { let liquid_ustx = clarity.with_clarity_db_readonly(|db| db.get_total_liquid_ustx())?; let reward_slots = Self::get_reward_slots(clarity, reward_cycle, pox_contract)?; let (threshold, participation) = StacksChainState::get_reward_threshold_and_participation( - &pox_constants, + pox_constants, &reward_slots[..], liquid_ustx, ); @@ -322,13 +322,13 @@ impl NakamotoSigners { |vm_env| { vm_env.execute_in_env(sender_addr.clone(), None, None, |env| { env.execute_contract_allow_private( - &signers_contract, + signers_contract, "stackerdb-set-signer-slots", &set_stackerdb_args, false, )?; env.execute_contract_allow_private( - &signers_contract, + signers_contract, "set-signers", &set_signers_args, false, @@ -435,7 +435,7 @@ impl NakamotoSigners { .as_free_transaction(|clarity| { Self::handle_signer_stackerdb_update( clarity, - &pox_constants, + pox_constants, cycle_of_prepare_phase, active_pox_contract, coinbase_height, @@ -568,7 +568,7 @@ impl NakamotoSigners { transactions: Vec, ) { for transaction in transactions { - if NakamotoSigners::valid_vote_transaction(&account_nonces, &transaction, mainnet) { + if NakamotoSigners::valid_vote_transaction(account_nonces, &transaction, mainnet) { let origin_address = transaction.origin_address(); let origin_nonce = transaction.get_origin_nonce(); if let Some(entry) = filtered_transactions.get_mut(&origin_address) { diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index 9190bf99af..d11d81fe7f 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -520,7 +520,7 @@ impl NakamotoStagingBlocksTx<'_> { "UPDATE nakamoto_staging_blocks SET processed = 1, processed_time = ?2 WHERE index_block_hash = ?1"; self.execute( - &clear_staged_block, + clear_staged_block, params![block, u64_to_sql(get_epoch_time_secs())?], )?; @@ -534,13 +534,13 @@ impl NakamotoStagingBlocksTx<'_> { let update_dependents = "UPDATE nakamoto_staging_blocks SET orphaned = 1 WHERE parent_block_id = ?"; - self.execute(&update_dependents, &[&block])?; + self.execute(update_dependents, &[&block])?; let clear_staged_block = "UPDATE nakamoto_staging_blocks SET processed = 1, processed_time = ?2, orphaned = 1 WHERE index_block_hash = ?1"; self.execute( - &clear_staged_block, + clear_staged_block, params![block, u64_to_sql(get_epoch_time_secs())?], )?; @@ -555,7 +555,7 @@ impl NakamotoStagingBlocksTx<'_> { ) -> Result<(), ChainstateError> { let update_dependents = "UPDATE nakamoto_staging_blocks SET burn_attachable = 1 WHERE consensus_hash = ?"; - self.execute(&update_dependents, &[consensus_hash])?; + self.execute(update_dependents, &[consensus_hash])?; Ok(()) } @@ -743,13 +743,13 @@ impl StacksChainState { pub fn get_nakamoto_staging_blocks_db_version( conn: &Connection, ) -> Result { - let db_version_exists = table_exists(&conn, "db_version")?; + let db_version_exists = table_exists(conn, "db_version")?; if !db_version_exists { return Ok(1); } let qry = "SELECT version FROM db_version ORDER BY version DESC LIMIT 1"; let args = NO_PARAMS; - let version: Option = match query_row(&conn, qry, args) { + let version: Option = match query_row(conn, qry, args) { Ok(x) => x, Err(e) => { error!("Failed to get Nakamoto staging blocks DB version: {:?}", &e); diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index a0e516f283..3d218f85e0 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -372,7 +372,7 @@ impl NakamotoChainState { let matured_coinbase_height = coinbase_height - MINER_REWARD_MATURITY; let matured_tenure_block_header = Self::get_header_by_coinbase_height( chainstate_tx.deref_mut(), - &tip_index_hash, + tip_index_hash, matured_coinbase_height, )? .ok_or_else(|| { diff --git a/stackslib/src/chainstate/nakamoto/test_signers.rs b/stackslib/src/chainstate/nakamoto/test_signers.rs index 6fd559da69..46e4345333 100644 --- a/stackslib/src/chainstate/nakamoto/test_signers.rs +++ b/stackslib/src/chainstate/nakamoto/test_signers.rs @@ -128,7 +128,7 @@ impl TestSigners { self.generate_aggregate_key(cycle); } - let signer_signature = self.generate_block_signatures(&block); + let signer_signature = self.generate_block_signatures(block); test_debug!( "Signed Nakamoto block {} with {} signatures (rc {})", diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index bd415b68b0..aa3acc2546 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -136,7 +136,7 @@ pub fn get_account( &tip ); - let snapshot = SortitionDB::get_block_snapshot_consensus(&sortdb.conn(), &tip.consensus_hash) + let snapshot = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tip.consensus_hash) .unwrap() .unwrap(); chainstate @@ -577,7 +577,7 @@ pub fn test_nakamoto_first_tenure_block_syntactic_validation() { #[test] pub fn test_load_store_update_nakamoto_blocks() { let test_name = function_name!(); - let path = test_path(&test_name); + let path = test_path(test_name); let pox_constants = PoxConstants::new(5, 3, 3, 25, 5, 0, 0, 0, 0, 0, 0); let epochs = StacksEpoch::unit_test_3_0_only(1); let _ = std::fs::remove_dir_all(&path); @@ -2243,7 +2243,7 @@ fn test_make_miners_stackerdb_config() { let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let miner_privkey = &miner_keys[i]; let miner_pubkey = StacksPublicKey::from_private(miner_privkey); - let slot_id = NakamotoChainState::get_miner_slot(&sort_db, &tip, &tip.consensus_hash) + let slot_id = NakamotoChainState::get_miner_slot(sort_db, &tip, &tip.consensus_hash) .expect("Failed to get miner slot"); if sortition { let slot_id = slot_id.expect("No miner slot exists for this miner").start; @@ -2544,7 +2544,7 @@ fn parse_vote_for_aggregate_public_key_invalid() { .enumerate() { assert!( - NakamotoSigners::parse_vote_for_aggregate_public_key(&tx).is_none(), + NakamotoSigners::parse_vote_for_aggregate_public_key(tx).is_none(), "{}", format!("parsed the {i}th transaction: {tx:?}") ); @@ -3051,7 +3051,7 @@ fn filter_one_transaction_per_signer_duplicate_nonces() { let filtered_txs: Vec<_> = filtered_transactions.into_values().collect(); txs.sort_by(|a, b| a.txid().cmp(&b.txid())); assert_eq!(filtered_txs.len(), 1); - assert!(filtered_txs.contains(&txs.first().expect("failed to get first tx"))); + assert!(filtered_txs.contains(txs.first().expect("failed to get first tx"))); } pub mod nakamoto_block_signatures { @@ -3066,7 +3066,7 @@ pub mod nakamoto_block_signatures { .map(|(s, w)| { let mut signing_key = [0u8; 33]; signing_key.copy_from_slice( - &Secp256k1PublicKey::from_private(s) + Secp256k1PublicKey::from_private(s) .to_bytes_compressed() .as_slice(), ); diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index cd21c7eeaa..384145b41a 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -410,7 +410,7 @@ impl TestStacksNode { sortdb, burn_block, miner, - &last_tenure_id, + last_tenure_id, burn_amount, miner_key, parent_block_snapshot_opt, @@ -510,7 +510,7 @@ impl TestStacksNode { let mut cursor = first_parent.header.consensus_hash; let parent_sortition = loop { let parent_sortition = - SortitionDB::get_block_snapshot_consensus(&sortdb.conn(), &cursor) + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &cursor) .unwrap() .unwrap(); @@ -618,7 +618,7 @@ impl TestStacksNode { ) } else { let hdr = - NakamotoChainState::get_canonical_block_header(self.chainstate.db(), &sortdb) + NakamotoChainState::get_canonical_block_header(self.chainstate.db(), sortdb) .unwrap() .unwrap(); if hdr.anchored_header.as_stacks_nakamoto().is_some() { @@ -766,7 +766,7 @@ impl TestStacksNode { Some(nakamoto_parent) } else { warn!("Produced Tenure change transaction does not point to a real block"); - NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb)? + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb)? } } else if let Some(tenure_change) = tenure_change.as_ref() { // make sure parent tip is consistent with a tenure change @@ -782,13 +782,13 @@ impl TestStacksNode { Some(nakamoto_parent) } else { debug!("Use parent tip identified by canonical tip pointer (no parent block {})", &payload.previous_tenure_end); - NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb)? + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb)? } } else { panic!("Tenure change transaction does not have a TenureChange payload"); } } else { - NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb)? + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb)? }; let burn_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; @@ -952,7 +952,7 @@ impl TestStacksNode { // canonical tip let stacks_chain_tip = NakamotoChainState::get_canonical_block_header( chainstate.db(), - &sortdb, + sortdb, )? .ok_or_else(|| ChainstateError::NoSuchBlockError)?; let nakamoto_chain_tip = stacks_chain_tip @@ -1628,7 +1628,7 @@ impl TestPeer<'_> { let tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); node.add_nakamoto_tenure_blocks(blocks.clone()); - for block in blocks.into_iter() { + for block in blocks.iter() { let mut sort_handle = sortdb.index_handle(&tip); let block_id = block.block_id(); debug!("Process Nakamoto block {} ({:?}", &block_id, &block.header); @@ -1638,7 +1638,7 @@ impl TestPeer<'_> { &mut sort_handle, &mut node.chainstate, &self.network.stacks_tip.block_id(), - &block, + block, None, NakamotoBlockObtainMethod::Pushed, ) @@ -1648,7 +1648,7 @@ impl TestPeer<'_> { self.coord.handle_new_nakamoto_stacks_block().unwrap(); debug!("Begin check Nakamoto block {}", &block.block_id()); - TestPeer::check_processed_nakamoto_block(&mut sortdb, &mut node.chainstate, &block); + TestPeer::check_processed_nakamoto_block(&mut sortdb, &mut node.chainstate, block); debug!("Eegin check Nakamoto block {}", &block.block_id()); } else { test_debug!("Did NOT accept Nakamoto block {}", &block_id); @@ -1668,7 +1668,7 @@ impl TestPeer<'_> { ) -> StacksHeaderInfo { let Ok(Some(tenure_start_header)) = NakamotoChainState::get_tenure_start_block_header( &mut chainstate.index_conn(), - &tip_block_id, + tip_block_id, tenure_id_consensus_hash, ) else { panic!( @@ -1699,7 +1699,7 @@ impl TestPeer<'_> { // get the tenure-start block of the last tenure let Ok(Some(prev_tenure_start_header)) = NakamotoChainState::get_tenure_start_block_header( &mut chainstate.index_conn(), - &tip_block_id, + tip_block_id, prev_tenure_consensus_hash, ) else { panic!( @@ -1960,7 +1960,7 @@ impl TestPeer<'_> { let parent_vrf_proof = NakamotoChainState::get_parent_vrf_proof( &mut chainstate.index_conn(), &block.header.parent_block_id, - &sortdb.conn(), + sortdb.conn(), &block.header.consensus_hash, &tenure_block_commit.txid, ) @@ -2186,7 +2186,7 @@ impl TestPeer<'_> { assert!(NakamotoChainState::check_block_commit_vrf_seed( &mut chainstate.index_conn(), sortdb.conn(), - &block + block ) .is_ok()); @@ -2412,7 +2412,7 @@ impl TestPeer<'_> { chainstate.nakamoto_blocks_db(), &sortdb.index_handle_at_tip(), None, - &block, + block, false, 0x80000000, ) @@ -2423,7 +2423,7 @@ impl TestPeer<'_> { chainstate.nakamoto_blocks_db(), &sortdb.index_handle_at_tip(), Some(block.header.burn_spent), - &block, + block, false, 0x80000000, ) @@ -2435,7 +2435,7 @@ impl TestPeer<'_> { chainstate.nakamoto_blocks_db(), &sortdb.index_handle_at_tip(), Some(block.header.burn_spent + 1), - &block, + block, false, 0x80000000, ) diff --git a/stackslib/src/chainstate/stacks/auth.rs b/stackslib/src/chainstate/stacks/auth.rs index f867b82f53..47834b0e2a 100644 --- a/stackslib/src/chainstate/stacks/auth.rs +++ b/stackslib/src/chainstate/stacks/auth.rs @@ -459,7 +459,7 @@ impl OrderIndependentMultisigSpendingCondition { } let (pubkey, _next_sighash) = TransactionSpendingCondition::next_verification( - &initial_sighash, + initial_sighash, cond_code, self.tx_fee, self.nonce, diff --git a/stackslib/src/chainstate/stacks/block.rs b/stackslib/src/chainstate/stacks/block.rs index 51c53c94de..02a8f285c6 100644 --- a/stackslib/src/chainstate/stacks/block.rs +++ b/stackslib/src/chainstate/stacks/block.rs @@ -388,10 +388,7 @@ impl StacksBlock { state_index_root: &TrieHash, microblock_pubkey_hash: &Hash160, ) -> StacksBlock { - let txids: Vec<_> = txs - .iter() - .map(|ref tx| tx.txid().as_bytes().to_vec()) - .collect(); + let txids: Vec<_> = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); let merkle_tree = MerkleTree::::new(&txids); let tx_merkle_root = merkle_tree.root(); let header = StacksBlockHeader::from_parent( @@ -880,10 +877,7 @@ impl StacksMicroblock { parent_block_hash: &BlockHeaderHash, txs: Vec, ) -> StacksMicroblock { - let txids: Vec<_> = txs - .iter() - .map(|ref tx| tx.txid().as_bytes().to_vec()) - .collect(); + let txids: Vec<_> = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); let merkle_tree = MerkleTree::::new(&txids); let tx_merkle_root = merkle_tree.root(); let header = StacksMicroblockHeader::first_unsigned(parent_block_hash, &tx_merkle_root); @@ -894,10 +888,7 @@ impl StacksMicroblock { parent_header: &StacksMicroblockHeader, txs: Vec, ) -> Option { - let txids: Vec<_> = txs - .iter() - .map(|ref tx| tx.txid().as_bytes().to_vec()) - .collect(); + let txids: Vec<_> = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); let merkle_tree = MerkleTree::::new(&txids); let tx_merkle_root = merkle_tree.root(); let header = @@ -1770,17 +1761,17 @@ mod test { if *epoch_id < activation_epoch_id { assert!(!StacksBlock::validate_transactions_static_epoch( - &txs, + txs, epoch_id.clone(), )); } else if deactivation_epoch_id.is_none() || deactivation_epoch_id.unwrap() > *epoch_id { assert!(StacksBlock::validate_transactions_static_epoch( - &txs, *epoch_id, + txs, *epoch_id, )); } else { assert!(!StacksBlock::validate_transactions_static_epoch( - &txs, *epoch_id, + txs, *epoch_id, )); } } diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 58701a2861..31fab9f148 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -1778,9 +1778,9 @@ fn test_deploy_smart_contract( ) -> std::result::Result<(), ClarityError> { block.as_transaction(|tx| { let (ast, analysis) = - tx.analyze_smart_contract(&contract_id, version, content, ASTRules::PrecheckSize)?; - tx.initialize_smart_contract(&contract_id, version, &ast, content, None, |_, _| false)?; - tx.save_analysis(&contract_id, &analysis)?; + tx.analyze_smart_contract(contract_id, version, content, ASTRules::PrecheckSize)?; + tx.initialize_smart_contract(contract_id, version, &ast, content, None, |_, _| false)?; + tx.save_analysis(contract_id, &analysis)?; return Ok(()); }) } diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index b941bed938..1e047ffa49 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -108,17 +108,17 @@ pub mod docs; lazy_static! { pub static ref BOOT_CODE_POX_MAINNET: String = - format!("{}\n{}", BOOT_CODE_POX_MAINNET_CONSTS, BOOT_CODE_POX_BODY); + format!("{BOOT_CODE_POX_MAINNET_CONSTS}\n{BOOT_CODE_POX_BODY}"); pub static ref BOOT_CODE_POX_TESTNET: String = - format!("{}\n{}", BOOT_CODE_POX_TESTNET_CONSTS, BOOT_CODE_POX_BODY); + format!("{BOOT_CODE_POX_TESTNET_CONSTS}\n{BOOT_CODE_POX_BODY}"); pub static ref POX_2_MAINNET_CODE: String = - format!("{}\n{}", BOOT_CODE_POX_MAINNET_CONSTS, POX_2_BODY); + format!("{BOOT_CODE_POX_MAINNET_CONSTS}\n{POX_2_BODY}"); pub static ref POX_2_TESTNET_CODE: String = - format!("{}\n{}", BOOT_CODE_POX_TESTNET_CONSTS, POX_2_BODY); + format!("{BOOT_CODE_POX_TESTNET_CONSTS}\n{POX_2_BODY}"); pub static ref POX_3_MAINNET_CODE: String = - format!("{}\n{}", BOOT_CODE_POX_MAINNET_CONSTS, POX_3_BODY); + format!("{BOOT_CODE_POX_MAINNET_CONSTS}\n{POX_3_BODY}"); pub static ref POX_3_TESTNET_CODE: String = - format!("{}\n{}", BOOT_CODE_POX_TESTNET_CONSTS, POX_3_BODY); + format!("{BOOT_CODE_POX_TESTNET_CONSTS}\n{POX_3_BODY}"); pub static ref POX_4_CODE: String = POX_4_BODY.to_string(); pub static ref BOOT_CODE_COST_VOTING_TESTNET: String = make_testnet_cost_voting(); pub static ref STACKS_BOOT_CODE_MAINNET: [(&'static str, &'static str); 6] = [ @@ -126,16 +126,16 @@ lazy_static! { ("lockup", BOOT_CODE_LOCKUP), ("costs", BOOT_CODE_COSTS), ("cost-voting", BOOT_CODE_COST_VOTING_MAINNET), - ("bns", &BOOT_CODE_BNS), - ("genesis", &BOOT_CODE_GENESIS), + ("bns", BOOT_CODE_BNS), + ("genesis", BOOT_CODE_GENESIS), ]; pub static ref STACKS_BOOT_CODE_TESTNET: [(&'static str, &'static str); 6] = [ ("pox", &BOOT_CODE_POX_TESTNET), ("lockup", BOOT_CODE_LOCKUP), ("costs", BOOT_CODE_COSTS), ("cost-voting", &BOOT_CODE_COST_VOTING_TESTNET), - ("bns", &BOOT_CODE_BNS), - ("genesis", &BOOT_CODE_GENESIS), + ("bns", BOOT_CODE_BNS), + ("genesis", BOOT_CODE_GENESIS), ]; } @@ -530,7 +530,7 @@ impl StacksChainState { // 4. delete the user's stacking-state entry. clarity.with_clarity_db(|db| { // lookup the Stacks account and alter their unlock height to next block - let mut balance = db.get_stx_balance_snapshot(&principal)?; + let mut balance = db.get_stx_balance_snapshot(principal)?; let canonical_locked = balance.canonical_balance_repr()?.amount_locked(); if canonical_locked < *amount_locked { panic!("Principal missed reward slots, but did not have as many locked tokens as expected. Actual: {}, Expected: {}", canonical_locked, *amount_locked); @@ -599,7 +599,7 @@ impl StacksChainState { let headers_db = HeadersDBConn(StacksDBConn::new(&ro_index, ())); self.clarity_state .eval_read_only( - &stacks_block_id, + stacks_block_id, &headers_db, &iconn, &boot::boot_code_id(boot_contract_name, self.mainnet), @@ -1871,7 +1871,7 @@ pub mod test { chainstate .with_read_only_clarity_tx( &sortdb - .index_handle_at_block(&chainstate, &stacks_block_id) + .index_handle_at_block(chainstate, &stacks_block_id) .unwrap(), &stacks_block_id, |clarity_tx| StacksChainState::get_account(clarity_tx, addr), @@ -2816,7 +2816,7 @@ pub mod test { } pub fn get_current_reward_cycle(peer: &TestPeer, burnchain: &Burnchain) -> u128 { - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); burnchain .block_height_to_reward_cycle(tip.block_height) @@ -2844,7 +2844,7 @@ pub mod test { let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( @@ -2971,7 +2971,7 @@ pub mod test { let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( @@ -3038,7 +3038,7 @@ pub mod test { let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure(|ref mut miner, ref mut sortdb, ref mut chainstate, vrf_proof, ref parent_opt, ref parent_microblock_header_opt| { @@ -3155,7 +3155,7 @@ pub mod test { let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( @@ -3266,7 +3266,7 @@ pub mod test { let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( @@ -3335,14 +3335,14 @@ pub mod test { assert_eq!(alice_account.stx_balance.amount_locked(), 0); assert_eq!(alice_account.stx_balance.unlock_height(), 0); } - let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.get_stacking_minimum(sortdb, &tip_index_block) }) .unwrap(); assert_eq!(min_ustx, total_liquid_ustx / TESTNET_STACKING_THRESHOLD_25); // no reward addresses - let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { get_reward_addresses_with_par_tip( chainstate, &burnchain, @@ -3379,11 +3379,11 @@ pub mod test { let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); assert_eq!(alice_balance, 0); - let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.get_stacking_minimum(sortdb, &tip_index_block) }) .unwrap(); - let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { get_reward_addresses_with_par_tip( chainstate, &burnchain, @@ -3392,7 +3392,7 @@ pub mod test { ) }) .unwrap(); - let total_stacked = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let total_stacked = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.test_get_total_ustx_stacked( sortdb, &tip_index_block, @@ -3477,7 +3477,7 @@ pub mod test { let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let cur_reward_cycle = burnchain @@ -3575,10 +3575,10 @@ pub mod test { // No locks have taken place for key in keys.iter() { // has not locked up STX - let balance = get_balance(&mut peer, &key_to_stacks_addr(&key).into()); + let balance = get_balance(&mut peer, &key_to_stacks_addr(key).into()); assert_eq!(balance, 1024 * POX_THRESHOLD_STEPS_USTX); - let account = get_account(&mut peer, &key_to_stacks_addr(&key).into()); + let account = get_account(&mut peer, &key_to_stacks_addr(key).into()); assert_eq!( account.stx_balance.amount_unlocked(), 1024 * POX_THRESHOLD_STEPS_USTX @@ -3587,14 +3587,14 @@ pub mod test { assert_eq!(account.stx_balance.unlock_height(), 0); } } - let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.get_stacking_minimum(sortdb, &tip_index_block) }) .unwrap(); assert_eq!(min_ustx, total_liquid_ustx / TESTNET_STACKING_THRESHOLD_25); // no reward addresses - let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { get_reward_addresses_with_par_tip( chainstate, &burnchain, @@ -3633,11 +3633,11 @@ pub mod test { assert_eq!(balance, 0); } - let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.get_stacking_minimum(sortdb, &tip_index_block) }) .unwrap(); - let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { get_reward_addresses_with_par_tip( chainstate, &burnchain, @@ -3646,7 +3646,7 @@ pub mod test { ) }) .unwrap(); - let total_stacked = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let total_stacked = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.test_get_total_ustx_stacked( sortdb, &tip_index_block, @@ -3672,24 +3672,24 @@ pub mod test { assert_eq!(reward_addrs.len(), 4); let mut all_addrbytes = HashSet::new(); for key in keys.iter() { - all_addrbytes.insert(key_to_stacks_addr(&key).bytes); + all_addrbytes.insert(key_to_stacks_addr(key).bytes); } for key in keys.iter() { let (amount_ustx, pox_addr, lock_period, first_reward_cycle) = - get_stacker_info(&mut peer, &key_to_stacks_addr(&key).into()).unwrap(); + get_stacker_info(&mut peer, &key_to_stacks_addr(key).into()).unwrap(); eprintln!("\n{}: {} uSTX stacked for {} cycle(s); addr is {:?}; first reward cycle is {}\n", key.to_hex(), amount_ustx, lock_period, &pox_addr, first_reward_cycle); assert_eq!( (reward_addrs[0].0).version(), AddressHashMode::SerializeP2PKH as u8 ); - assert!(all_addrbytes.contains(&key_to_stacks_addr(&key).bytes)); - all_addrbytes.remove(&key_to_stacks_addr(&key).bytes); + assert!(all_addrbytes.contains(&key_to_stacks_addr(key).bytes)); + all_addrbytes.remove(&key_to_stacks_addr(key).bytes); assert_eq!(reward_addrs[0].1, 1024 * POX_THRESHOLD_STEPS_USTX); // Lock-up is consistent with stacker state - let account = get_account(&mut peer, &key_to_stacks_addr(&key).into()); + let account = get_account(&mut peer, &key_to_stacks_addr(key).into()); assert_eq!(account.stx_balance.amount_unlocked(), 0); assert_eq!( account.stx_balance.amount_locked(), @@ -3738,7 +3738,7 @@ pub mod test { let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( @@ -3803,14 +3803,14 @@ pub mod test { let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); assert_eq!(alice_balance, 1024 * POX_THRESHOLD_STEPS_USTX); } - let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.get_stacking_minimum(sortdb, &tip_index_block) }) .unwrap(); assert_eq!(min_ustx, total_liquid_ustx / TESTNET_STACKING_THRESHOLD_25); // no reward addresses - let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { get_reward_addresses_with_par_tip( chainstate, &burnchain, @@ -3846,11 +3846,11 @@ pub mod test { let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); assert_eq!(alice_balance, 0); - let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.get_stacking_minimum(sortdb, &tip_index_block) }) .unwrap(); - let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { get_reward_addresses_with_par_tip( chainstate, &burnchain, @@ -3859,7 +3859,7 @@ pub mod test { ) }) .unwrap(); - let total_stacked = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let total_stacked = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.test_get_total_ustx_stacked( sortdb, &tip_index_block, @@ -4005,7 +4005,7 @@ pub mod test { let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( @@ -4083,14 +4083,14 @@ pub mod test { assert_eq!(bob_balance, 1024 * POX_THRESHOLD_STEPS_USTX); } - let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.get_stacking_minimum(sortdb, &tip_index_block) }) .unwrap(); assert_eq!(min_ustx, total_liquid_ustx / TESTNET_STACKING_THRESHOLD_25); // no reward addresses - let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { get_reward_addresses_with_par_tip( chainstate, &burnchain, @@ -4133,11 +4133,11 @@ pub mod test { 1024 * POX_THRESHOLD_STEPS_USTX - (4 * 1024 * POX_THRESHOLD_STEPS_USTX) / 5 ); - let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.get_stacking_minimum(sortdb, &tip_index_block) }) .unwrap(); - let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { get_reward_addresses_with_par_tip( chainstate, &burnchain, @@ -4221,7 +4221,7 @@ pub mod test { let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure(|ref mut miner, ref mut sortdb, ref mut chainstate, vrf_proof, ref parent_opt, ref parent_microblock_header_opt| { @@ -4328,7 +4328,7 @@ pub mod test { if tenure_id <= 1 { // no reward addresses - let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { get_reward_addresses_with_par_tip( chainstate, &burnchain, @@ -4434,7 +4434,7 @@ pub mod test { let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( @@ -4496,14 +4496,14 @@ pub mod test { assert_eq!(alice_balance, 1024 * POX_THRESHOLD_STEPS_USTX); } - let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.get_stacking_minimum(sortdb, &tip_index_block) }) .unwrap(); assert_eq!(min_ustx, total_liquid_ustx / TESTNET_STACKING_THRESHOLD_25); // no reward addresses - let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { get_reward_addresses_with_par_tip( chainstate, &burnchain, @@ -4538,11 +4538,11 @@ pub mod test { let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); - let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.get_stacking_minimum(sortdb, &tip_index_block) }) .unwrap(); - let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { get_reward_addresses_with_par_tip( chainstate, &burnchain, @@ -4551,7 +4551,7 @@ pub mod test { ) }) .unwrap(); - let total_stacked = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let total_stacked = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.test_get_total_ustx_stacked( sortdb, &tip_index_block, @@ -4683,7 +4683,7 @@ pub mod test { let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( @@ -4800,15 +4800,15 @@ pub mod test { ); let charlie_balance = get_balance(&mut peer, &key_to_stacks_addr(&charlie).into()); - let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { get_reward_addresses_with_par_tip(chainstate, &burnchain, sortdb, &tip_index_block) }) .unwrap(); - let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.get_stacking_minimum(sortdb, &tip_index_block) }) .unwrap(); - let total_stacked = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let total_stacked = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.test_get_total_ustx_stacked(sortdb, &tip_index_block, cur_reward_cycle) }) .unwrap(); @@ -4822,7 +4822,7 @@ pub mod test { assert_eq!(charlie_contract_balance, 0); } - let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.get_stacking_minimum(sortdb, &tip_index_block) }) .unwrap(); @@ -4858,7 +4858,7 @@ pub mod test { // should have just re-locked // stacking minimum should be minimum, since we haven't // locked up 25% of the tokens yet - let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.get_stacking_minimum(sortdb, &tip_index_block) }) .unwrap(); @@ -5205,7 +5205,7 @@ pub mod test { let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( @@ -5445,15 +5445,15 @@ pub mod test { 512 * POX_THRESHOLD_STEPS_USTX - 1, ]; - let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.get_stacking_minimum(sortdb, &tip_index_block) }) .unwrap(); - let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { get_reward_addresses_with_par_tip(chainstate, &burnchain, sortdb, &tip_index_block) }) .unwrap(); - let total_stacked = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let total_stacked = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.test_get_total_ustx_stacked(sortdb, &tip_index_block, cur_reward_cycle) }) .unwrap(); @@ -5472,7 +5472,7 @@ pub mod test { assert_eq!(min_ustx, total_liquid_ustx / TESTNET_STACKING_THRESHOLD_25); // no reward addresses - let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { get_reward_addresses_with_par_tip( chainstate, &burnchain, @@ -5651,7 +5651,7 @@ pub mod test { let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure(|ref mut miner, ref mut sortdb, ref mut chainstate, vrf_proof, ref parent_opt, ref parent_microblock_header_opt| { @@ -5747,19 +5747,19 @@ pub mod test { .unwrap() as u128; let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); - let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.get_stacking_minimum(sortdb, &tip_index_block) }) .unwrap(); - let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { get_reward_addresses_with_par_tip(chainstate, &burnchain, sortdb, &tip_index_block) }) .unwrap(); - let total_stacked = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let total_stacked = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.test_get_total_ustx_stacked(sortdb, &tip_index_block, cur_reward_cycle) }) .unwrap(); - let total_stacked_next = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let total_stacked_next = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.test_get_total_ustx_stacked( sortdb, &tip_index_block, diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index 47b57cdd2c..4168c9c8cc 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -73,7 +73,7 @@ const USTX_PER_HOLDER: u128 = 1_000_000; /// Return the BlockSnapshot for the latest sortition in the provided /// SortitionDB option-reference. Panics on any errors. fn get_tip(sortdb: Option<&SortitionDB>) -> BlockSnapshot { - SortitionDB::get_canonical_burn_chain_tip(&sortdb.unwrap().conn()).unwrap() + SortitionDB::get_canonical_burn_chain_tip(sortdb.unwrap().conn()).unwrap() } /// Get the reward set entries if evaluated at the given StacksBlock @@ -83,7 +83,7 @@ pub fn get_reward_set_entries_at( at_burn_ht: u64, ) -> Vec { let burnchain = peer.config.burnchain.clone(); - with_sortdb(peer, |ref mut c, ref sortdb| { + with_sortdb(peer, |ref mut c, sortdb| { get_reward_set_entries_at_block(c, &burnchain, sortdb, tip, at_burn_ht).unwrap() }) } @@ -96,7 +96,7 @@ pub fn get_reward_set_entries_index_order_at( at_burn_ht: u64, ) -> Vec { let burnchain = peer.config.burnchain.clone(); - with_sortdb(peer, |ref mut c, ref sortdb| { + with_sortdb(peer, |ref mut c, sortdb| { c.get_reward_addresses(&burnchain, sortdb, at_burn_ht, tip) .unwrap() }) @@ -665,7 +665,7 @@ pub fn with_clarity_db_ro(peer: &mut TestPeer, tip: &StacksBlockId, todo: where F: FnOnce(&mut ClarityDatabase) -> R, { - with_sortdb(peer, |ref mut c, ref sortdb| { + with_sortdb(peer, |ref mut c, sortdb| { let headers_db = HeadersDBConn(StacksDBConn::new(&c.state_index, ())); let burn_db = sortdb.index_handle_at_tip(); let mut read_only_clar = c @@ -745,7 +745,7 @@ fn test_simple_pox_lockup_transition_pox_2() { .block_height_to_reward_cycle(tip_burn_block_height) .unwrap() as u128; let (min_ustx, reward_addrs, total_stacked) = - with_sortdb(&mut peer, |ref mut c, ref sortdb| { + with_sortdb(&mut peer, |ref mut c, sortdb| { ( c.get_stacking_minimum(sortdb, &tip_index_block).unwrap(), get_reward_addresses_with_par_tip(c, &burnchain, sortdb, &tip_index_block) @@ -844,7 +844,7 @@ fn test_simple_pox_lockup_transition_pox_2() { // check the stacking minimum let total_liquid_ustx = get_liquid_ustx(&mut peer); - let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.get_stacking_minimum(sortdb, &tip_index_block) }) .unwrap(); @@ -854,7 +854,7 @@ fn test_simple_pox_lockup_transition_pox_2() { ); // no reward addresses - let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { get_reward_addresses_with_par_tip(chainstate, &burnchain, sortdb, &tip_index_block) }) .unwrap(); @@ -2127,7 +2127,7 @@ fn test_lock_period_invariant_extend_transition() { // check the stacking minimum let total_liquid_ustx = get_liquid_ustx(&mut peer); - let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.get_stacking_minimum(sortdb, &tip_index_block) }) .unwrap(); @@ -2137,7 +2137,7 @@ fn test_lock_period_invariant_extend_transition() { ); // no reward addresses - let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { get_reward_addresses_with_par_tip(chainstate, &burnchain, sortdb, &tip_index_block) }) .unwrap(); @@ -2264,7 +2264,7 @@ fn test_pox_extend_transition_pox_2() { let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip_burn_block_height) .unwrap() as u128; - let (min_ustx, reward_addrs, total_stacked) = with_sortdb(peer, |ref mut c, ref sortdb| { + let (min_ustx, reward_addrs, total_stacked) = with_sortdb(peer, |ref mut c, sortdb| { ( c.get_stacking_minimum(sortdb, &tip_index_block).unwrap(), get_reward_addresses_with_par_tip(c, &burnchain, sortdb, &tip_index_block).unwrap(), @@ -2305,7 +2305,7 @@ fn test_pox_extend_transition_pox_2() { let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip_burn_block_height) .unwrap() as u128; - let (min_ustx, reward_addrs, total_stacked) = with_sortdb(peer, |ref mut c, ref sortdb| { + let (min_ustx, reward_addrs, total_stacked) = with_sortdb(peer, |ref mut c, sortdb| { ( c.get_stacking_minimum(sortdb, &tip_index_block).unwrap(), get_reward_addresses_with_par_tip(c, &burnchain, sortdb, &tip_index_block).unwrap(), @@ -2379,7 +2379,7 @@ fn test_pox_extend_transition_pox_2() { // check the stacking minimum let total_liquid_ustx = get_liquid_ustx(&mut peer); - let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.get_stacking_minimum(sortdb, &tip_index_block) }) .unwrap(); @@ -2389,7 +2389,7 @@ fn test_pox_extend_transition_pox_2() { ); // no reward addresses - let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { get_reward_addresses_with_par_tip(chainstate, &burnchain, sortdb, &tip_index_block) }) .unwrap(); @@ -2711,7 +2711,7 @@ fn test_delegate_extend_transition_pox_2() { let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip_burn_block_height) .unwrap() as u128; - let (min_ustx, reward_addrs, total_stacked) = with_sortdb(peer, |ref mut c, ref sortdb| { + let (min_ustx, reward_addrs, total_stacked) = with_sortdb(peer, |ref mut c, sortdb| { ( c.get_stacking_minimum(sortdb, &tip_index_block).unwrap(), get_reward_addresses_with_par_tip(c, &burnchain, sortdb, &tip_index_block).unwrap(), @@ -2742,7 +2742,7 @@ fn test_delegate_extend_transition_pox_2() { let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip_burn_block_height) .unwrap() as u128; - let (min_ustx, reward_addrs, total_stacked) = with_sortdb(peer, |ref mut c, ref sortdb| { + let (min_ustx, reward_addrs, total_stacked) = with_sortdb(peer, |ref mut c, sortdb| { ( c.get_stacking_minimum(sortdb, &tip_index_block).unwrap(), get_reward_addresses_with_par_tip(c, &burnchain, sortdb, &tip_index_block).unwrap(), @@ -2885,7 +2885,7 @@ fn test_delegate_extend_transition_pox_2() { // check the stacking minimum let total_liquid_ustx = get_liquid_ustx(&mut peer); - let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.get_stacking_minimum(sortdb, &tip_index_block) }) .unwrap(); @@ -2895,7 +2895,7 @@ fn test_delegate_extend_transition_pox_2() { ); // no reward addresses - let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { get_reward_addresses_with_par_tip(chainstate, &burnchain, sortdb, &tip_index_block) }) .unwrap(); @@ -3739,7 +3739,7 @@ fn test_get_pox_addrs() { let microblock_privkey = StacksPrivateKey::new(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let cur_reward_cycle = burnchain @@ -3857,15 +3857,15 @@ fn test_get_pox_addrs() { ); } if tenure_id > 1 { - let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.get_stacking_minimum(sortdb, &tip_index_block) }) .unwrap(); - let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { get_reward_addresses_with_par_tip(chainstate, &burnchain, sortdb, &tip_index_block) }) .unwrap(); - let total_stacked = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let total_stacked = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.test_get_total_ustx_stacked(sortdb, &tip_index_block, cur_reward_cycle) }) .unwrap(); @@ -4013,7 +4013,7 @@ fn test_stack_with_segwit() { let microblock_privkey = StacksPrivateKey::new(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let cur_reward_cycle = burnchain @@ -4153,15 +4153,15 @@ fn test_stack_with_segwit() { ); } if tenure_id > 1 { - let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.get_stacking_minimum(sortdb, &tip_index_block) }) .unwrap(); - let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { get_reward_addresses_with_par_tip(chainstate, &burnchain, sortdb, &tip_index_block) }) .unwrap(); - let total_stacked = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let total_stacked = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.test_get_total_ustx_stacked(sortdb, &tip_index_block, cur_reward_cycle) }) .unwrap(); diff --git a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs index 5c52297969..52a95e2afd 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs @@ -78,7 +78,7 @@ const USTX_PER_HOLDER: u128 = 1_000_000; /// Return the BlockSnapshot for the latest sortition in the provided /// SortitionDB option-reference. Panics on any errors. fn get_tip(sortdb: Option<&SortitionDB>) -> BlockSnapshot { - SortitionDB::get_canonical_burn_chain_tip(&sortdb.unwrap().conn()).unwrap() + SortitionDB::get_canonical_burn_chain_tip(sortdb.unwrap().conn()).unwrap() } fn make_test_epochs_pox() -> (EpochList, PoxConstants) { @@ -250,7 +250,7 @@ fn simple_pox_lockup_transition_pox_2() { // check the stacking minimum let total_liquid_ustx = get_liquid_ustx(&mut peer); - let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.get_stacking_minimum(sortdb, &tip_index_block) }) .unwrap(); @@ -260,7 +260,7 @@ fn simple_pox_lockup_transition_pox_2() { ); // no reward addresses - let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { get_reward_addresses_with_par_tip(chainstate, &burnchain, sortdb, &tip_index_block) }) .unwrap(); @@ -2108,7 +2108,7 @@ fn pox_extend_transition() { let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip_burn_block_height) .unwrap() as u128; - let (min_ustx, reward_addrs, total_stacked) = with_sortdb(peer, |ref mut c, ref sortdb| { + let (min_ustx, reward_addrs, total_stacked) = with_sortdb(peer, |ref mut c, sortdb| { ( c.get_stacking_minimum(sortdb, &tip_index_block).unwrap(), get_reward_addresses_with_par_tip(c, &burnchain, sortdb, &tip_index_block).unwrap(), @@ -2149,7 +2149,7 @@ fn pox_extend_transition() { let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip_burn_block_height) .unwrap() as u128; - let (min_ustx, reward_addrs, total_stacked) = with_sortdb(peer, |ref mut c, ref sortdb| { + let (min_ustx, reward_addrs, total_stacked) = with_sortdb(peer, |ref mut c, sortdb| { ( c.get_stacking_minimum(sortdb, &tip_index_block).unwrap(), get_reward_addresses_with_par_tip(c, &burnchain, sortdb, &tip_index_block).unwrap(), @@ -2213,7 +2213,7 @@ fn pox_extend_transition() { // check the stacking minimum let total_liquid_ustx = get_liquid_ustx(&mut peer); - let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { chainstate.get_stacking_minimum(sortdb, &tip_index_block) }) .unwrap(); @@ -2223,7 +2223,7 @@ fn pox_extend_transition() { ); // no reward addresses - let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, sortdb| { get_reward_addresses_with_par_tip(chainstate, &burnchain, sortdb, &tip_index_block) }) .unwrap(); diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 072f1d33ef..a9472b03d6 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -93,7 +93,7 @@ const ERR_REUSED_SIGNER_KEY: i128 = 33; /// Return the BlockSnapshot for the latest sortition in the provided /// SortitionDB option-reference. Panics on any errors. pub fn get_tip(sortdb: Option<&SortitionDB>) -> BlockSnapshot { - SortitionDB::get_canonical_burn_chain_tip(&sortdb.unwrap().conn()).unwrap() + SortitionDB::get_canonical_burn_chain_tip(sortdb.unwrap().conn()).unwrap() } /// Helper rstest template for running tests in both 2.5 @@ -112,7 +112,7 @@ fn make_simple_pox_4_lock( ) -> StacksTransaction { let addr = key_to_stacks_addr(key); let pox_addr = PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes.clone()); - let signer_pk = StacksPublicKey::from_private(&key); + let signer_pk = StacksPublicKey::from_private(key); let tip = get_tip(peer.sortdb.as_ref()); let next_reward_cycle = peer .config @@ -124,7 +124,7 @@ fn make_simple_pox_4_lock( let signature = make_signer_key_signature( &pox_addr, - &key, + key, next_reward_cycle.into(), &Pox4SignatureTopic::StackStx, lock_period, @@ -313,7 +313,7 @@ fn pox_extend_transition() { let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip_burn_block_height) .unwrap() as u128; - let (min_ustx, reward_addrs, total_stacked) = with_sortdb(peer, |ref mut c, ref sortdb| { + let (min_ustx, reward_addrs, total_stacked) = with_sortdb(peer, |ref mut c, sortdb| { ( c.get_stacking_minimum(sortdb, &tip_index_block).unwrap(), get_reward_addresses_with_par_tip(c, &burnchain, sortdb, &tip_index_block).unwrap(), @@ -354,7 +354,7 @@ fn pox_extend_transition() { let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip_burn_block_height) .unwrap() as u128; - let (min_ustx, reward_addrs, total_stacked) = with_sortdb(peer, |ref mut c, ref sortdb| { + let (min_ustx, reward_addrs, total_stacked) = with_sortdb(peer, |ref mut c, sortdb| { ( c.get_stacking_minimum(sortdb, &tip_index_block).unwrap(), get_reward_addresses_with_par_tip(c, &burnchain, sortdb, &tip_index_block).unwrap(), @@ -965,7 +965,7 @@ fn pox_lock_unlock() { let signer_key = key; let signature = make_signer_key_signature( &pox_addr, - &signer_key, + signer_key, reward_cycle, &Pox4SignatureTopic::StackStx, lock_period.into(), @@ -978,7 +978,7 @@ fn pox_lock_unlock() { 1024 * POX_THRESHOLD_STEPS_USTX, &pox_addr, lock_period, - &StacksPublicKey::from_private(&signer_key), + &StacksPublicKey::from_private(signer_key), tip_height, Some(signature), u128::MAX, @@ -2976,7 +2976,7 @@ fn verify_signer_key_sig( ) -> Value { let result: Value = with_sortdb(peer, |ref mut chainstate, ref mut sortdb| { chainstate - .with_read_only_clarity_tx(&sortdb.index_handle_at_tip(), &latest_block, |clarity_tx| { + .with_read_only_clarity_tx(&sortdb.index_handle_at_tip(), latest_block, |clarity_tx| { clarity_tx .with_readonly_clarity_env( false, @@ -2992,7 +2992,7 @@ fn verify_signer_key_sig( reward_cycle, topic.get_name_str(), period, - to_hex(&signature), + to_hex(signature), signing_key.to_hex(), amount, max_amount, @@ -3314,10 +3314,10 @@ fn stack_stx_verify_signer_sig(use_nakamoto: bool) { let mut stacker_nonce = 0; let stacker_key = &keys[0]; let min_ustx = get_stacking_minimum(&mut peer, &latest_block); - let stacker_addr = key_to_stacks_addr(&stacker_key); + let stacker_addr = key_to_stacks_addr(stacker_key); let signer_key = &keys[1]; let signer_public_key = StacksPublicKey::from_private(signer_key); - let pox_addr = pox_addr_from(&stacker_key); + let pox_addr = pox_addr_from(stacker_key); let second_stacker = &keys[2]; let second_stacker_addr = key_to_stacks_addr(second_stacker); @@ -3333,7 +3333,7 @@ fn stack_stx_verify_signer_sig(use_nakamoto: bool) { // Test 1: invalid reward cycle let signature = make_signer_key_signature( &pox_addr, - &signer_key, + signer_key, reward_cycle - 1, &topic, lock_period, @@ -3342,7 +3342,7 @@ fn stack_stx_verify_signer_sig(use_nakamoto: bool) { ); let invalid_cycle_nonce = stacker_nonce; let invalid_cycle_stack = make_pox_4_lockup( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, &pox_addr, @@ -3358,7 +3358,7 @@ fn stack_stx_verify_signer_sig(use_nakamoto: bool) { stacker_nonce += 1; let signature = make_signer_key_signature( &second_stacker_pox_addr, - &signer_key, + signer_key, reward_cycle, &topic, lock_period, @@ -3367,7 +3367,7 @@ fn stack_stx_verify_signer_sig(use_nakamoto: bool) { ); let invalid_pox_addr_nonce = stacker_nonce; let invalid_pox_addr_tx = make_pox_4_lockup( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, &pox_addr, @@ -3383,7 +3383,7 @@ fn stack_stx_verify_signer_sig(use_nakamoto: bool) { stacker_nonce += 1; let signature = make_signer_key_signature( &pox_addr, - &second_stacker, + second_stacker, reward_cycle, &topic, lock_period, @@ -3392,7 +3392,7 @@ fn stack_stx_verify_signer_sig(use_nakamoto: bool) { ); let invalid_key_nonce = stacker_nonce; let invalid_key_tx = make_pox_4_lockup( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, &pox_addr, @@ -3408,7 +3408,7 @@ fn stack_stx_verify_signer_sig(use_nakamoto: bool) { stacker_nonce += 1; let signature = make_signer_key_signature( &pox_addr, - &signer_key, + signer_key, reward_cycle, &Pox4SignatureTopic::StackExtend, // wrong topic lock_period, @@ -3417,7 +3417,7 @@ fn stack_stx_verify_signer_sig(use_nakamoto: bool) { ); let invalid_topic_nonce = stacker_nonce; let invalid_topic_tx = make_pox_4_lockup( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, &pox_addr, @@ -3433,7 +3433,7 @@ fn stack_stx_verify_signer_sig(use_nakamoto: bool) { stacker_nonce += 1; let signature = make_signer_key_signature( &pox_addr, - &signer_key, + signer_key, reward_cycle, &topic, lock_period + 1, // wrong period @@ -3442,7 +3442,7 @@ fn stack_stx_verify_signer_sig(use_nakamoto: bool) { ); let invalid_period_nonce = stacker_nonce; let invalid_period_tx = make_pox_4_lockup( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, &pox_addr, @@ -3458,7 +3458,7 @@ fn stack_stx_verify_signer_sig(use_nakamoto: bool) { stacker_nonce += 1; let signature = make_signer_key_signature( &pox_addr, - &signer_key, + signer_key, reward_cycle, &topic, lock_period, @@ -3467,7 +3467,7 @@ fn stack_stx_verify_signer_sig(use_nakamoto: bool) { ); let invalid_auth_id_nonce = stacker_nonce; let invalid_auth_id_tx = make_pox_4_lockup( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, &pox_addr, @@ -3483,7 +3483,7 @@ fn stack_stx_verify_signer_sig(use_nakamoto: bool) { stacker_nonce += 1; let signature = make_signer_key_signature( &pox_addr, - &signer_key, + signer_key, reward_cycle, &topic, lock_period, @@ -3492,7 +3492,7 @@ fn stack_stx_verify_signer_sig(use_nakamoto: bool) { ); let invalid_amount_nonce = stacker_nonce; let invalid_amount_tx = make_pox_4_lockup( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, &pox_addr, @@ -3508,7 +3508,7 @@ fn stack_stx_verify_signer_sig(use_nakamoto: bool) { stacker_nonce += 1; let signature = make_signer_key_signature( &pox_addr, - &signer_key, + signer_key, reward_cycle, &topic, lock_period, @@ -3517,7 +3517,7 @@ fn stack_stx_verify_signer_sig(use_nakamoto: bool) { ); let invalid_max_amount_nonce = stacker_nonce; let invalid_max_amount_tx = make_pox_4_lockup( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, &pox_addr, @@ -3533,7 +3533,7 @@ fn stack_stx_verify_signer_sig(use_nakamoto: bool) { stacker_nonce += 1; let signature = make_signer_key_signature( &pox_addr, - &signer_key, + signer_key, reward_cycle, &topic, lock_period, @@ -3542,7 +3542,7 @@ fn stack_stx_verify_signer_sig(use_nakamoto: bool) { ); let valid_nonce = stacker_nonce; let valid_tx = make_pox_4_lockup( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, &pox_addr, @@ -3634,10 +3634,10 @@ fn stack_extend_verify_sig() { let mut stacker_nonce = 0; let stacker_key = &keys[0]; let min_ustx = get_stacking_minimum(&mut peer, &latest_block); - let stacker_addr = key_to_stacks_addr(&stacker_key); + let stacker_addr = key_to_stacks_addr(stacker_key); let signer_key = &keys[1]; let signer_public_key = StacksPublicKey::from_private(signer_key); - let pox_addr = pox_addr_from(&signer_key); + let pox_addr = pox_addr_from(signer_key); let reward_cycle = get_current_reward_cycle(&peer, &burnchain); let topic = Pox4SignatureTopic::StackExtend; @@ -3645,7 +3645,7 @@ fn stack_extend_verify_sig() { // Setup: stack-stx let signature = make_signer_key_signature( &pox_addr, - &signer_key, + signer_key, reward_cycle, &Pox4SignatureTopic::StackStx, lock_period, @@ -3654,7 +3654,7 @@ fn stack_extend_verify_sig() { ); let stack_nonce = stacker_nonce; let stack_tx = make_pox_4_lockup( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, &pox_addr, @@ -3683,7 +3683,7 @@ fn stack_extend_verify_sig() { stacker_nonce += 1; let invalid_cycle_nonce = stacker_nonce; let invalid_cycle_tx = make_pox_4_extend( - &stacker_key, + stacker_key, stacker_nonce, pox_addr.clone(), lock_period, @@ -3707,7 +3707,7 @@ fn stack_extend_verify_sig() { ); let invalid_pox_addr_nonce = stacker_nonce; let invalid_pox_addr_tx = make_pox_4_extend( - &stacker_key, + stacker_key, stacker_nonce, pox_addr.clone(), lock_period, @@ -3731,7 +3731,7 @@ fn stack_extend_verify_sig() { ); let invalid_key_nonce = stacker_nonce; let invalid_key_tx = make_pox_4_extend( - &stacker_key, + stacker_key, stacker_nonce, pox_addr.clone(), lock_period, @@ -3754,7 +3754,7 @@ fn stack_extend_verify_sig() { ); let invalid_auth_id_nonce = stacker_nonce; let invalid_auth_id_tx = make_pox_4_extend( - &stacker_key, + stacker_key, stacker_nonce, pox_addr.clone(), lock_period, @@ -3777,7 +3777,7 @@ fn stack_extend_verify_sig() { ); let invalid_max_amount_nonce = stacker_nonce; let invalid_max_amount_tx = make_pox_4_extend( - &stacker_key, + stacker_key, stacker_nonce, pox_addr.clone(), lock_period, @@ -3800,7 +3800,7 @@ fn stack_extend_verify_sig() { ); let valid_nonce = stacker_nonce; let valid_tx = make_pox_4_extend( - &stacker_key, + stacker_key, stacker_nonce, pox_addr.clone(), lock_period, @@ -3889,15 +3889,15 @@ fn stack_agg_commit_verify_sig() { let min_ustx = get_stacking_minimum(&mut peer, &latest_block); let stacker_key = &keys[0]; - let stacker_addr = PrincipalData::from(key_to_stacks_addr(&stacker_key)); + let stacker_addr = PrincipalData::from(key_to_stacks_addr(stacker_key)); let signer_sk = &keys[1]; let signer_pk = StacksPublicKey::from_private(signer_sk); let delegate_key = &keys[2]; - let delegate_addr = key_to_stacks_addr(&delegate_key); + let delegate_addr = key_to_stacks_addr(delegate_key); - let pox_addr = pox_addr_from(&delegate_key); + let pox_addr = pox_addr_from(delegate_key); let reward_cycle = burnchain .block_height_to_reward_cycle(block_height) @@ -3907,7 +3907,7 @@ fn stack_agg_commit_verify_sig() { // Setup: delegate-stx and delegate-stack-stx let delegate_tx = make_pox_4_delegate_stx( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, delegate_addr.clone().into(), @@ -3917,7 +3917,7 @@ fn stack_agg_commit_verify_sig() { let delegate_stack_stx_nonce = delegate_nonce; let delegate_stack_stx_tx = make_pox_4_delegate_stack_stx( - &delegate_key, + delegate_key, delegate_nonce, stacker_addr, min_ustx, @@ -3933,7 +3933,7 @@ fn stack_agg_commit_verify_sig() { let next_reward_cycle = reward_cycle + 1; let signature = make_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, reward_cycle, // wrong cycle &topic, 1_u128, @@ -3942,7 +3942,7 @@ fn stack_agg_commit_verify_sig() { ); let invalid_cycle_nonce = delegate_nonce; let invalid_cycle_tx = make_pox_4_aggregation_commit_indexed( - &delegate_key, + delegate_key, delegate_nonce, &pox_addr, next_reward_cycle, @@ -3957,7 +3957,7 @@ fn stack_agg_commit_verify_sig() { let other_pox_addr = pox_addr_from(&Secp256k1PrivateKey::new()); let signature = make_signer_key_signature( &other_pox_addr, - &signer_sk, + signer_sk, next_reward_cycle, &topic, 1_u128, @@ -3966,7 +3966,7 @@ fn stack_agg_commit_verify_sig() { ); let invalid_pox_addr_nonce = delegate_nonce; let invalid_pox_addr_tx = make_pox_4_aggregation_commit_indexed( - &delegate_key, + delegate_key, delegate_nonce, &pox_addr, next_reward_cycle, @@ -3980,7 +3980,7 @@ fn stack_agg_commit_verify_sig() { delegate_nonce += 1; let signature = make_signer_key_signature( &pox_addr, - &delegate_key, + delegate_key, next_reward_cycle, &topic, 1_u128, @@ -3989,7 +3989,7 @@ fn stack_agg_commit_verify_sig() { ); let invalid_key_nonce = delegate_nonce; let invalid_key_tx = make_pox_4_aggregation_commit_indexed( - &delegate_key, + delegate_key, delegate_nonce, &pox_addr, next_reward_cycle, @@ -4003,7 +4003,7 @@ fn stack_agg_commit_verify_sig() { delegate_nonce += 1; let signature = make_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, next_reward_cycle, &topic, 2_u128, // wrong period @@ -4012,7 +4012,7 @@ fn stack_agg_commit_verify_sig() { ); let invalid_period_nonce = delegate_nonce; let invalid_period_tx = make_pox_4_aggregation_commit_indexed( - &delegate_key, + delegate_key, delegate_nonce, &pox_addr, next_reward_cycle, @@ -4026,7 +4026,7 @@ fn stack_agg_commit_verify_sig() { delegate_nonce += 1; let signature = make_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, next_reward_cycle, &Pox4SignatureTopic::StackStx, // wrong topic 1_u128, @@ -4035,7 +4035,7 @@ fn stack_agg_commit_verify_sig() { ); let invalid_topic_nonce = delegate_nonce; let invalid_topic_tx = make_pox_4_aggregation_commit_indexed( - &delegate_key, + delegate_key, delegate_nonce, &pox_addr, next_reward_cycle, @@ -4049,7 +4049,7 @@ fn stack_agg_commit_verify_sig() { delegate_nonce += 1; let signature = make_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, next_reward_cycle, &topic, 1_u128, @@ -4058,7 +4058,7 @@ fn stack_agg_commit_verify_sig() { ); let invalid_auth_id_nonce = delegate_nonce; let invalid_auth_id_tx = make_pox_4_aggregation_commit_indexed( - &delegate_key, + delegate_key, delegate_nonce, &pox_addr, next_reward_cycle, @@ -4072,7 +4072,7 @@ fn stack_agg_commit_verify_sig() { delegate_nonce += 1; let signature = make_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, next_reward_cycle, &topic, 1_u128, @@ -4081,7 +4081,7 @@ fn stack_agg_commit_verify_sig() { ); let invalid_max_amount_nonce = delegate_nonce; let invalid_max_amount_tx = make_pox_4_aggregation_commit_indexed( - &delegate_key, + delegate_key, delegate_nonce, &pox_addr, next_reward_cycle, @@ -4095,7 +4095,7 @@ fn stack_agg_commit_verify_sig() { delegate_nonce += 1; let signature = make_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, next_reward_cycle, &topic, 1_u128, @@ -4104,7 +4104,7 @@ fn stack_agg_commit_verify_sig() { ); let invalid_amount_nonce = delegate_nonce; let invalid_amount_tx = make_pox_4_aggregation_commit_indexed( - &delegate_key, + delegate_key, delegate_nonce, &pox_addr, next_reward_cycle, @@ -4118,7 +4118,7 @@ fn stack_agg_commit_verify_sig() { delegate_nonce += 1; let signature = make_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, next_reward_cycle, &topic, 1_u128, @@ -4127,7 +4127,7 @@ fn stack_agg_commit_verify_sig() { ); let valid_nonce = delegate_nonce; let valid_tx = make_pox_4_aggregation_commit_indexed( - &delegate_key, + delegate_key, delegate_nonce, &pox_addr, next_reward_cycle, @@ -4262,7 +4262,7 @@ fn advance_to_block_height( peer.get_burn_block_height(), passed_txs.len() ); - latest_block = Some(tenure_with_txs(peer, &passed_txs, peer_nonce, test_signers)); + latest_block = Some(tenure_with_txs(peer, passed_txs, peer_nonce, test_signers)); passed_txs = &[]; if tx_block.is_none() { tx_block = Some(observer.get_blocks().last().unwrap().clone()); @@ -4690,7 +4690,7 @@ fn stack_agg_increase() { burnchain_unlock_height: Value::UInt(0), }; - check_pox_print_event(&aggregation_increase_event, common_data, increase_op_data); + check_pox_print_event(aggregation_increase_event, common_data, increase_op_data); // Check that Bob's second pool has an assigned reward index of 1 let bob_aggregate_commit_reward_index = &tx_block @@ -4716,10 +4716,10 @@ fn stack_increase_verify_signer_key(use_nakamoto: bool) { let mut stacker_nonce = 0; let stacker_key = &keys[0]; let min_ustx = get_stacking_minimum(&mut peer, &latest_block); - let stacker_addr = key_to_stacks_addr(&stacker_key); + let stacker_addr = key_to_stacks_addr(stacker_key); let signer_sk = &keys[1]; let signer_pk = StacksPublicKey::from_private(signer_sk); - let pox_addr = pox_addr_from(&signer_sk); + let pox_addr = pox_addr_from(signer_sk); let reward_cycle = get_current_reward_cycle(&peer, &burnchain); let topic = Pox4SignatureTopic::StackIncrease; @@ -4727,7 +4727,7 @@ fn stack_increase_verify_signer_key(use_nakamoto: bool) { // Setup: stack-stx let signature = make_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, reward_cycle, &Pox4SignatureTopic::StackStx, lock_period, @@ -4736,7 +4736,7 @@ fn stack_increase_verify_signer_key(use_nakamoto: bool) { ); let stack_nonce = stacker_nonce; let stack_tx = make_pox_4_lockup( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, &pox_addr, @@ -4752,7 +4752,7 @@ fn stack_increase_verify_signer_key(use_nakamoto: bool) { stacker_nonce += 1; let signature = make_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, reward_cycle - 1, // invalid &topic, lock_period, @@ -4761,7 +4761,7 @@ fn stack_increase_verify_signer_key(use_nakamoto: bool) { ); let invalid_cycle_nonce = stacker_nonce; let invalid_cycle_tx = make_pox_4_stack_increase( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, &signer_pk, @@ -4775,7 +4775,7 @@ fn stack_increase_verify_signer_key(use_nakamoto: bool) { let other_pox_addr = pox_addr_from(&Secp256k1PrivateKey::new()); let signature = make_signer_key_signature( &other_pox_addr, // different than existing - &signer_sk, + signer_sk, reward_cycle, &topic, lock_period, @@ -4784,7 +4784,7 @@ fn stack_increase_verify_signer_key(use_nakamoto: bool) { ); let invalid_pox_addr_nonce = stacker_nonce; let invalid_pox_addr_tx = make_pox_4_stack_increase( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, &signer_pk, @@ -4797,7 +4797,7 @@ fn stack_increase_verify_signer_key(use_nakamoto: bool) { stacker_nonce += 1; let signature = make_signer_key_signature( &pox_addr, - &stacker_key, // different than signer + stacker_key, // different than signer reward_cycle, &topic, lock_period, @@ -4806,7 +4806,7 @@ fn stack_increase_verify_signer_key(use_nakamoto: bool) { ); let invalid_key_nonce = stacker_nonce; let invalid_key_tx = make_pox_4_stack_increase( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, &signer_pk, @@ -4819,7 +4819,7 @@ fn stack_increase_verify_signer_key(use_nakamoto: bool) { stacker_nonce += 1; let signature = make_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, reward_cycle, &topic, lock_period + 1, // wrong @@ -4828,7 +4828,7 @@ fn stack_increase_verify_signer_key(use_nakamoto: bool) { ); let invalid_period_nonce = stacker_nonce; let invalid_period_tx = make_pox_4_stack_increase( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, &signer_pk, @@ -4841,7 +4841,7 @@ fn stack_increase_verify_signer_key(use_nakamoto: bool) { stacker_nonce += 1; let signature = make_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, reward_cycle, &Pox4SignatureTopic::StackExtend, // wrong topic lock_period, @@ -4850,7 +4850,7 @@ fn stack_increase_verify_signer_key(use_nakamoto: bool) { ); let invalid_topic_nonce = stacker_nonce; let invalid_topic_tx = make_pox_4_stack_increase( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, &signer_pk, @@ -4863,7 +4863,7 @@ fn stack_increase_verify_signer_key(use_nakamoto: bool) { stacker_nonce += 1; let signature = make_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, reward_cycle, &topic, lock_period, @@ -4872,7 +4872,7 @@ fn stack_increase_verify_signer_key(use_nakamoto: bool) { ); let invalid_auth_id_nonce = stacker_nonce; let invalid_auth_id_tx = make_pox_4_stack_increase( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, &signer_pk, @@ -4885,7 +4885,7 @@ fn stack_increase_verify_signer_key(use_nakamoto: bool) { stacker_nonce += 1; let signature = make_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, reward_cycle, &topic, lock_period, @@ -4894,7 +4894,7 @@ fn stack_increase_verify_signer_key(use_nakamoto: bool) { ); let invalid_max_amount_nonce = stacker_nonce; let invalid_max_amount_tx = make_pox_4_stack_increase( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, &signer_pk, @@ -4907,7 +4907,7 @@ fn stack_increase_verify_signer_key(use_nakamoto: bool) { stacker_nonce += 1; let signature = make_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, reward_cycle, &topic, lock_period, @@ -4916,7 +4916,7 @@ fn stack_increase_verify_signer_key(use_nakamoto: bool) { ); let invalid_amount_nonce = stacker_nonce; let invalid_amount_tx = make_pox_4_stack_increase( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, &signer_pk, @@ -4929,7 +4929,7 @@ fn stack_increase_verify_signer_key(use_nakamoto: bool) { stacker_nonce += 1; let signature = make_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, reward_cycle, &Pox4SignatureTopic::StackIncrease, lock_period, @@ -4938,7 +4938,7 @@ fn stack_increase_verify_signer_key(use_nakamoto: bool) { ); let valid_nonce = stacker_nonce; let stack_increase = make_pox_4_stack_increase( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, &signer_pk, @@ -5006,10 +5006,10 @@ fn stack_increase_different_signer_keys(use_nakamoto: bool) { let mut stacker_nonce = 0; let stacker_key = &keys[0]; let min_ustx = get_stacking_minimum(&mut peer, &latest_block); - let stacker_addr = key_to_stacks_addr(&stacker_key); + let stacker_addr = key_to_stacks_addr(stacker_key); let signer_sk = &keys[1]; let signer_pk = StacksPublicKey::from_private(signer_sk); - let pox_addr = pox_addr_from(&signer_sk); + let pox_addr = pox_addr_from(signer_sk); // Second key is used in `stack-extend` let second_signer_sk = &keys[2]; @@ -5020,7 +5020,7 @@ fn stack_increase_different_signer_keys(use_nakamoto: bool) { // Setup: stack-stx let signature = make_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, reward_cycle, &Pox4SignatureTopic::StackStx, lock_period, @@ -5029,7 +5029,7 @@ fn stack_increase_different_signer_keys(use_nakamoto: bool) { ); let stack_nonce = stacker_nonce; let stack_tx = make_pox_4_lockup( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, &pox_addr, @@ -5044,7 +5044,7 @@ fn stack_increase_different_signer_keys(use_nakamoto: bool) { stacker_nonce += 1; let signature = make_signer_key_signature( &pox_addr, - &second_signer_sk, + second_signer_sk, reward_cycle, &Pox4SignatureTopic::StackExtend, lock_period, @@ -5053,7 +5053,7 @@ fn stack_increase_different_signer_keys(use_nakamoto: bool) { ); let extend_nonce = stacker_nonce; let extend_tx = make_pox_4_extend( - &stacker_key, + stacker_key, stacker_nonce, pox_addr.clone(), lock_period, @@ -5066,7 +5066,7 @@ fn stack_increase_different_signer_keys(use_nakamoto: bool) { stacker_nonce += 1; let signature = make_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, reward_cycle, &Pox4SignatureTopic::StackIncrease, 2, // 2 cycles total (1 from stack-stx, 1 from extend) @@ -5075,7 +5075,7 @@ fn stack_increase_different_signer_keys(use_nakamoto: bool) { ); let increase_nonce = stacker_nonce; let stack_increase = make_pox_4_stack_increase( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, &signer_pk, @@ -5212,11 +5212,11 @@ fn stack_stx_signer_key(use_nakamoto: bool) { // (start-burn-ht uint) // (lock-period uint) // (signer-key (buff 33))) - let pox_addr = pox_addr_from(&stacker_key); + let pox_addr = pox_addr_from(stacker_key); let pox_addr_val = Value::Tuple(pox_addr.clone().as_clarity_tuple().unwrap()); let signature = make_signer_key_signature( &pox_addr, - &signer_key, + signer_key, reward_cycle, &Pox4SignatureTopic::StackStx, 2_u128, @@ -5250,7 +5250,7 @@ fn stack_stx_signer_key(use_nakamoto: bool) { .expect_tuple(); let stacker_txs = - get_last_block_sender_transactions(&observer, key_to_stacks_addr(&stacker_key)); + get_last_block_sender_transactions(&observer, key_to_stacks_addr(stacker_key)); let stacking_tx = stacker_txs.get(0).unwrap(); let events: Vec<&STXLockEventData> = stacking_tx @@ -5312,7 +5312,7 @@ fn stack_stx_signer_auth(use_nakamoto: bool) { let reward_cycle = get_current_reward_cycle(&peer, &burnchain); - let pox_addr = pox_addr_from(&stacker_key); + let pox_addr = pox_addr_from(stacker_key); let pox_addr_val = Value::Tuple(pox_addr.clone().as_clarity_tuple().unwrap()); let lock_period = 6; @@ -5320,7 +5320,7 @@ fn stack_stx_signer_auth(use_nakamoto: bool) { let failed_stack_nonce = stacker_nonce; let failed_stack_tx = make_pox_4_lockup( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, &pox_addr, @@ -5335,7 +5335,7 @@ fn stack_stx_signer_auth(use_nakamoto: bool) { let enable_auth_nonce = signer_nonce; let enable_auth_tx = make_pox_4_set_signer_key_auth( &pox_addr, - &signer_key, + signer_key, reward_cycle, &topic, lock_period, @@ -5350,7 +5350,7 @@ fn stack_stx_signer_auth(use_nakamoto: bool) { stacker_nonce += 1; let successful_stack_nonce = stacker_nonce; let valid_stack_tx = make_pox_4_lockup( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, &pox_addr, @@ -5374,7 +5374,7 @@ fn stack_stx_signer_auth(use_nakamoto: bool) { .expect_tuple(); let stacker_txs = - get_last_block_sender_transactions(&observer, key_to_stacks_addr(&stacker_key)); + get_last_block_sender_transactions(&observer, key_to_stacks_addr(stacker_key)); let expected_error = Value::error(Value::Int(19)).unwrap(); @@ -5391,7 +5391,7 @@ fn stack_stx_signer_auth(use_nakamoto: bool) { .expect_result_ok() .expect("Expected ok result from stack-stx tx"); - let signer_txs = get_last_block_sender_transactions(&observer, key_to_stacks_addr(&signer_key)); + let signer_txs = get_last_block_sender_transactions(&observer, key_to_stacks_addr(signer_key)); // enable auth worked let enable_tx_result = signer_txs @@ -5417,15 +5417,15 @@ fn stack_agg_commit_signer_auth(use_nakamoto: bool) { let min_ustx = get_stacking_minimum(&mut peer, &latest_block); let stacker_key = &keys[0]; - let stacker_addr = PrincipalData::from(key_to_stacks_addr(&stacker_key)); + let stacker_addr = PrincipalData::from(key_to_stacks_addr(stacker_key)); let signer_sk = &keys[1]; let signer_pk = StacksPublicKey::from_private(signer_sk); let delegate_key = &keys[2]; - let delegate_addr = key_to_stacks_addr(&delegate_key); + let delegate_addr = key_to_stacks_addr(delegate_key); - let pox_addr = pox_addr_from(&delegate_key); + let pox_addr = pox_addr_from(delegate_key); let reward_cycle = burnchain .block_height_to_reward_cycle(block_height) @@ -5435,7 +5435,7 @@ fn stack_agg_commit_signer_auth(use_nakamoto: bool) { // Setup: delegate-stx and delegate-stack-stx let delegate_tx = make_pox_4_delegate_stx( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, delegate_addr.clone().into(), @@ -5445,7 +5445,7 @@ fn stack_agg_commit_signer_auth(use_nakamoto: bool) { let delegate_stack_stx_nonce = delegate_nonce; let delegate_stack_stx_tx = make_pox_4_delegate_stack_stx( - &delegate_key, + delegate_key, delegate_nonce, stacker_addr, min_ustx, @@ -5460,7 +5460,7 @@ fn stack_agg_commit_signer_auth(use_nakamoto: bool) { delegate_nonce += 1; let invalid_agg_nonce = delegate_nonce; let invalid_agg_tx = make_pox_4_aggregation_commit_indexed( - &delegate_key, + delegate_key, delegate_nonce, &pox_addr, next_reward_cycle, @@ -5474,7 +5474,7 @@ fn stack_agg_commit_signer_auth(use_nakamoto: bool) { let enable_auth_nonce = 0; let enable_auth_tx = make_pox_4_set_signer_key_auth( &pox_addr, - &signer_sk, + signer_sk, next_reward_cycle, &topic, 1, @@ -5489,7 +5489,7 @@ fn stack_agg_commit_signer_auth(use_nakamoto: bool) { delegate_nonce += 1; let valid_agg_nonce = delegate_nonce; let valid_agg_tx = make_pox_4_aggregation_commit_indexed( - &delegate_key, + delegate_key, delegate_nonce, &pox_addr, next_reward_cycle, @@ -5536,10 +5536,10 @@ fn stack_extend_signer_auth(use_nakamoto: bool) { let mut stacker_nonce = 0; let stacker_key = &keys[0]; let min_ustx = get_stacking_minimum(&mut peer, &latest_block); - let stacker_addr = key_to_stacks_addr(&stacker_key); + let stacker_addr = key_to_stacks_addr(stacker_key); let signer_key = &keys[1]; let signer_public_key = StacksPublicKey::from_private(signer_key); - let pox_addr = pox_addr_from(&signer_key); + let pox_addr = pox_addr_from(signer_key); let reward_cycle = get_current_reward_cycle(&peer, &burnchain); let topic = Pox4SignatureTopic::StackExtend; @@ -5547,7 +5547,7 @@ fn stack_extend_signer_auth(use_nakamoto: bool) { // Setup: stack-stx let signature = make_signer_key_signature( &pox_addr, - &signer_key, + signer_key, reward_cycle, &Pox4SignatureTopic::StackStx, lock_period, @@ -5556,7 +5556,7 @@ fn stack_extend_signer_auth(use_nakamoto: bool) { ); let stack_nonce = stacker_nonce; let stack_tx = make_pox_4_lockup( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, &pox_addr, @@ -5572,7 +5572,7 @@ fn stack_extend_signer_auth(use_nakamoto: bool) { stacker_nonce += 1; let invalid_extend_nonce = stacker_nonce; let invalid_cycle_tx = make_pox_4_extend( - &stacker_key, + stacker_key, stacker_nonce, pox_addr.clone(), lock_period, @@ -5586,7 +5586,7 @@ fn stack_extend_signer_auth(use_nakamoto: bool) { let enable_auth_nonce = 0; let enable_auth_tx = make_pox_4_set_signer_key_auth( &pox_addr, - &signer_key, + signer_key, reward_cycle, &topic, lock_period, @@ -5601,7 +5601,7 @@ fn stack_extend_signer_auth(use_nakamoto: bool) { stacker_nonce += 1; let valid_extend_nonce = stacker_nonce; let valid_tx = make_pox_4_extend( - &stacker_key, + stacker_key, stacker_nonce, pox_addr, lock_period, @@ -5642,12 +5642,12 @@ fn test_set_signer_key_auth(use_nakamoto: bool) { let alice_nonce = 0; let alice_key = &keys[0]; let min_ustx = get_stacking_minimum(&mut peer, &latest_block); - let alice_addr = key_to_stacks_addr(&alice_key); + let alice_addr = key_to_stacks_addr(alice_key); let mut signer_nonce = 0; let signer_key = &keys[1]; let signer_public_key = StacksPublicKey::from_private(signer_key); - let signer_addr = key_to_stacks_addr(&signer_key); - let pox_addr = pox_addr_from(&signer_key); + let signer_addr = key_to_stacks_addr(signer_key); + let pox_addr = pox_addr_from(signer_key); let current_reward_cycle = get_current_reward_cycle(&peer, &burnchain); @@ -5655,13 +5655,13 @@ fn test_set_signer_key_auth(use_nakamoto: bool) { let invalid_enable_nonce = alice_nonce; let invalid_enable_tx = make_pox_4_set_signer_key_auth( &pox_addr, - &signer_key, + signer_key, 1, &Pox4SignatureTopic::StackStx, lock_period, true, invalid_enable_nonce, - Some(&alice_key), + Some(alice_key), u128::MAX, 1, ); @@ -5671,13 +5671,13 @@ fn test_set_signer_key_auth(use_nakamoto: bool) { signer_nonce += 1; let invalid_tx_period: StacksTransaction = make_pox_4_set_signer_key_auth( &pox_addr, - &signer_key, + signer_key, current_reward_cycle, &Pox4SignatureTopic::StackStx, 0, false, signer_invalid_period_nonce, - Some(&signer_key), + Some(signer_key), u128::MAX, 1, ); @@ -5687,13 +5687,13 @@ fn test_set_signer_key_auth(use_nakamoto: bool) { // Test that confirmed reward cycle is at least current reward cycle let invalid_tx_cycle: StacksTransaction = make_pox_4_set_signer_key_auth( &pox_addr, - &signer_key, + signer_key, 1, &Pox4SignatureTopic::StackStx, 1, false, signer_invalid_cycle_nonce, - Some(&signer_key), + Some(signer_key), u128::MAX, 1, ); @@ -5701,7 +5701,7 @@ fn test_set_signer_key_auth(use_nakamoto: bool) { // Disable auth for `signer-key` let disable_auth_tx: StacksTransaction = make_pox_4_set_signer_key_auth( &pox_addr, - &signer_key, + signer_key, current_reward_cycle, &Pox4SignatureTopic::StackStx, lock_period, @@ -5780,7 +5780,7 @@ fn test_set_signer_key_auth(use_nakamoto: bool) { let enable_auth_nonce = signer_nonce; let enable_auth_tx = make_pox_4_set_signer_key_auth( &pox_addr, - &signer_key, + signer_key, current_reward_cycle, &Pox4SignatureTopic::StackStx, lock_period, @@ -5817,7 +5817,7 @@ fn test_set_signer_key_auth(use_nakamoto: bool) { let disable_auth_nonce = signer_nonce; let disable_auth_tx = make_pox_4_set_signer_key_auth( &pox_addr, - &signer_key, + signer_key, current_reward_cycle, &Pox4SignatureTopic::StackStx, lock_period, @@ -5867,7 +5867,7 @@ fn stack_extend_signer_key(use_nakamoto: bool) { let stacker_key = &keys[0]; let min_ustx = get_stacking_minimum(&mut peer, &latest_block) * 2; - let pox_addr = pox_addr_from(&stacker_key); + let pox_addr = pox_addr_from(stacker_key); let pox_addr_val = Value::Tuple(pox_addr.clone().as_clarity_tuple().unwrap()); let signer_sk = Secp256k1PrivateKey::from_seed(&[0]); @@ -5897,7 +5897,7 @@ fn stack_extend_signer_key(use_nakamoto: bool) { ); let txs = vec![make_pox_4_lockup( - &stacker_key, + stacker_key, stacker_nonce, min_ustx, &pox_addr, @@ -5924,7 +5924,7 @@ fn stack_extend_signer_key(use_nakamoto: bool) { ); let update_txs = vec![make_pox_4_extend( - &stacker_key, + stacker_key, stacker_nonce, pox_addr.clone(), 1, @@ -6015,7 +6015,7 @@ fn delegate_stack_stx_signer_key(use_nakamoto: bool) { // (delegate-to principal) // (until-burn-ht (optional uint)) // (pox-addr (optional { version: (buff 1), hashbytes: (buff 32) }))) - let pox_addr = pox_addr_from(&stacker_key); + let pox_addr = pox_addr_from(stacker_key); let pox_addr_val = Value::Tuple(pox_addr.clone().as_clarity_tuple().unwrap()); let signer_sk = Secp256k1PrivateKey::from_seed(&[1, 1, 1]); let signer_key = Secp256k1PublicKey::from_private(&signer_sk); @@ -6476,7 +6476,7 @@ fn stack_increase(use_nakamoto: bool) { burnchain_unlock_height: Value::UInt(expected_unlock_height as u128), }; - check_pox_print_event(&increase_event, common_data, increase_op_data); + check_pox_print_event(increase_event, common_data, increase_op_data); // Testing stack_increase response is equal to expected response // Test is straightforward because 'stack-increase' in PoX-4 is the same as PoX-3 @@ -6695,7 +6695,7 @@ pub fn pox_4_scenario_test_setup<'a>( peer_config.burnchain.pox_constants.reward_cycle_length = 20; peer_config.burnchain.pox_constants.prepare_length = 5; - let mut peer = TestPeer::new_with_observer(peer_config.clone(), Some(&observer)); + let mut peer = TestPeer::new_with_observer(peer_config.clone(), Some(observer)); let mut peer_nonce = 0; @@ -8561,7 +8561,7 @@ fn delegate_stack_increase_err(use_nakamoto: bool) { // Bob's Aggregate Increase let bobs_aggregate_increase = make_pox_4_aggregation_increase( - &bob_delegate_key, + bob_delegate_key, bob_nonce, &pox_addr, next_reward_cycle.into(), @@ -8662,11 +8662,11 @@ pub fn get_signer_key_authorization_pox_4( ) -> Option { with_clarity_db_ro(peer, tip, |db| { let lookup_tuple = make_signer_key_authorization_lookup_key( - &pox_addr, + pox_addr, reward_cycle, - &topic, + topic, period, - &signer_key, + signer_key, max_amount, auth_id, ); @@ -8700,11 +8700,11 @@ pub fn get_signer_key_authorization_used_pox_4( ) -> bool { with_clarity_db_ro(peer, tip, |db| { let lookup_tuple = make_signer_key_authorization_lookup_key( - &pox_addr, + pox_addr, reward_cycle, - &topic, + topic, period, - &signer_key, + signer_key, max_amount, auth_id, ); @@ -8785,8 +8785,8 @@ pub fn get_delegation_state_pox_4( } pub fn get_stacking_minimum(peer: &mut TestPeer, latest_block: &StacksBlockId) -> u128 { - with_sortdb(peer, |ref mut chainstate, ref sortdb| { - chainstate.get_stacking_minimum(sortdb, &latest_block) + with_sortdb(peer, |ref mut chainstate, sortdb| { + chainstate.get_stacking_minimum(sortdb, latest_block) }) .unwrap() } @@ -8827,7 +8827,7 @@ pub fn prepare_pox4_test<'a>( signer_private_key: key.clone(), stacker_private_key: key.clone(), amount: 1024 * POX_THRESHOLD_STEPS_USTX, - pox_addr: Some(pox_addr_from(&key)), + pox_addr: Some(pox_addr_from(key)), max_amount: None, }) .collect::>(); @@ -8999,7 +8999,7 @@ fn missed_slots_no_unlock() { let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( &burnchain, - &function_name!(), + function_name!(), Some(epochs.clone()), Some(&observer), ); @@ -9252,7 +9252,7 @@ fn no_lockups_2_5() { let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( &burnchain, - &function_name!(), + function_name!(), Some(epochs.clone()), Some(&observer), ); diff --git a/stackslib/src/chainstate/stacks/boot/signers_tests.rs b/stackslib/src/chainstate/stacks/boot/signers_tests.rs index bf3b5f312c..efcf9ae6bd 100644 --- a/stackslib/src/chainstate/stacks/boot/signers_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/signers_tests.rs @@ -445,12 +445,12 @@ fn advance_blocks( test_signers, |miner, chainstate, sortdb, blocks| { if blocks.len() < num_blocks as usize { - let addr = key_to_stacks_addr(&stacker_private_key); + let addr = key_to_stacks_addr(stacker_private_key); let account = get_account(chainstate, sortdb, &addr); let stx_transfer = make_token_transfer( chainstate, sortdb, - &stacker_private_key, + stacker_private_key, account.nonce, 1, 1, diff --git a/stackslib/src/chainstate/stacks/db/accounts.rs b/stackslib/src/chainstate/stacks/db/accounts.rs index 0ad5687f12..d0ff67e2d4 100644 --- a/stackslib/src/chainstate/stacks/db/accounts.rs +++ b/stackslib/src/chainstate/stacks/db/accounts.rs @@ -420,7 +420,7 @@ impl StacksChainState { panic!(); }); - db.set_account_nonce(&principal, next_nonce)?; + db.set_account_nonce(principal, next_nonce)?; Ok(()) }) .unwrap_or_else(|e| { @@ -1190,7 +1190,7 @@ mod test { new_tip.burn_header_height, new_tip.burn_header_timestamp, new_tip.microblock_tail.clone(), - &block_reward, + block_reward, None, &ExecutionCost::ZERO, 123, diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index b29ffe022e..2e768e70ec 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -668,8 +668,7 @@ impl StacksChainState { consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, ) -> Result<(), Error> { - let block_path = - StacksChainState::make_block_dir(blocks_path, consensus_hash, &block_hash)?; + let block_path = StacksChainState::make_block_dir(blocks_path, consensus_hash, block_hash)?; StacksChainState::atomic_file_write(&block_path, &vec![]) } @@ -680,7 +679,7 @@ impl StacksChainState { block_header_hash: &BlockHeaderHash, ) { let block_path = - StacksChainState::make_block_dir(blocks_dir, consensus_hash, &block_header_hash) + StacksChainState::make_block_dir(blocks_dir, consensus_hash, block_header_hash) .expect("FATAL: failed to create block directory"); let sz = fs::metadata(&block_path) @@ -1156,7 +1155,7 @@ impl StacksChainState { ) -> Result, Error> { match StacksChainState::load_staging_microblock_info( blocks_conn, - &parent_index_hash, + parent_index_hash, microblock_hash, )? { Some(mut staging_microblock) => { @@ -1484,7 +1483,7 @@ impl StacksChainState { // find all blocks that we have that could be this block's parent let sql = "SELECT * FROM snapshots WHERE winning_stacks_block_hash = ?1"; let possible_parent_snapshots = - query_rows::(&sort_handle, &sql, &[parent_block_hash])?; + query_rows::(&sort_handle, sql, &[parent_block_hash])?; for possible_parent in possible_parent_snapshots.into_iter() { let burn_ancestor = sort_handle.get_block_snapshot(&possible_parent.burn_header_hash)?; @@ -1521,7 +1520,7 @@ impl StacksChainState { // find all blocks that we have that could be this block's parent let sql = "SELECT * FROM snapshots WHERE winning_stacks_block_hash = ?1"; let possible_parent_snapshots = - query_rows::(&sort_handle, &sql, &[&header.parent_block])?; + query_rows::(&sort_handle, sql, &[&header.parent_block])?; for possible_parent in possible_parent_snapshots.into_iter() { let burn_ancestor = sort_handle.get_block_snapshot(&possible_parent.burn_header_hash)?; @@ -1564,7 +1563,7 @@ impl StacksChainState { let block_hash = block.block_hash(); let index_block_hash = - StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_hash); + StacksBlockHeader::make_index_block_hash(consensus_hash, &block_hash); let attachable = { // if this block has an unprocessed staging parent, then it's not attachable until its parent is. @@ -1572,14 +1571,14 @@ impl StacksChainState { let has_parent_sql = "SELECT anchored_block_hash FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2 LIMIT 1"; let has_parent_args = params![block.header.parent_block, parent_consensus_hash]; let has_unprocessed_parent_rows = query_row_columns::( - &tx, + tx, has_unprocessed_parent_sql, has_parent_args, "anchored_block_hash", ) .map_err(Error::DBError)?; let has_parent_rows = query_row_columns::( - &tx, + tx, has_parent_sql, has_parent_args, "anchored_block_hash", @@ -1642,7 +1641,7 @@ impl StacksChainState { u64_to_sql(download_time)?, ]; - tx.execute(&sql, args) + tx.execute(sql, args) .map_err(|e| Error::DBError(db_error::SqliteError(e)))?; StacksChainState::store_block(blocks_path, consensus_hash, block)?; @@ -1653,7 +1652,7 @@ impl StacksChainState { "UPDATE staging_blocks SET attachable = 0 WHERE parent_anchored_block_hash = ?1"; let children_args = [&block_hash]; - tx.execute(&children_sql, &children_args) + tx.execute(children_sql, &children_args) .map_err(|e| Error::DBError(db_error::SqliteError(e)))?; Ok(()) @@ -1707,7 +1706,7 @@ impl StacksChainState { 0, ]; - tx.execute(&sql, args) + tx.execute(sql, args) .map_err(|e| Error::DBError(db_error::SqliteError(e)))?; // store microblock bytes @@ -1716,7 +1715,7 @@ impl StacksChainState { VALUES (?1, ?2)"; let block_args = params![microblock.block_hash(), microblock_bytes]; - tx.execute(&block_sql, block_args) + tx.execute(block_sql, block_args) .map_err(|e| Error::DBError(db_error::SqliteError(e)))?; Ok(()) @@ -1808,7 +1807,7 @@ impl StacksChainState { parent_block_hash: &BlockHeaderHash, microblock_hash: &BlockHeaderHash, ) -> Result, Error> { - StacksChainState::read_i64s(&self.db(), "SELECT processed FROM staging_microblocks WHERE anchored_block_hash = ?1 AND microblock_hash = ?2 AND consensus_hash = ?3", &[&parent_block_hash, microblock_hash, &parent_consensus_hash]) + StacksChainState::read_i64s(self.db(), "SELECT processed FROM staging_microblocks WHERE anchored_block_hash = ?1 AND microblock_hash = ?2 AND consensus_hash = ?3", &[&parent_block_hash, microblock_hash, &parent_consensus_hash]) .and_then(|processed| { if processed.is_empty() { Ok(None) @@ -1833,8 +1832,8 @@ impl StacksChainState { ) -> Result { let (parent_consensus_hash, parent_block_hash) = match StacksChainState::get_parent_block_header_hashes( - &self.db(), - &child_index_block_hash, + self.db(), + child_index_block_hash, )? { Some(x) => x, None => { @@ -1848,7 +1847,7 @@ impl StacksChainState { let parent_microblock_hash = match StacksChainState::get_staging_block_parent_microblock_hash( - &self.db(), + self.db(), child_index_block_hash, )? { Some(x) => x, @@ -1932,8 +1931,8 @@ impl StacksChainState { // TODO: just do a stat? cache this? match StacksChainState::load_block_header( &self.blocks_path, - &consensus_hash, - &stacks_header_hash, + consensus_hash, + stacks_header_hash, ) { Ok(Some(hdr)) => { test_debug!( @@ -2261,11 +2260,11 @@ impl StacksChainState { // and `heaviest_am` against each other depending on their lengths. if (stacks_tip_affirmation_map.len() > heaviest_am.len() && stacks_tip_affirmation_map - .find_divergence(&heaviest_am) + .find_divergence(heaviest_am) .is_some()) || (stacks_tip_affirmation_map.len() <= heaviest_am.len() && heaviest_am - .find_divergence(&stacks_tip_affirmation_map) + .find_divergence(stacks_tip_affirmation_map) .is_some()) { return Ok(false); @@ -2428,7 +2427,7 @@ impl StacksChainState { }; let stacks_block_id = - StacksBlockHeader::make_index_block_hash(&consensus_hash, &anchored_block_hash); + StacksBlockHeader::make_index_block_hash(consensus_hash, anchored_block_hash); if !block.processed { if !has_stored_block { if accept { @@ -2620,7 +2619,7 @@ impl StacksChainState { // garbage-collect for mblock_hash in orphaned_microblock_hashes.iter() { - StacksChainState::delete_microblock_data(tx, &mblock_hash)?; + StacksChainState::delete_microblock_data(tx, mblock_hash)?; } for mblock_hash in orphaned_microblock_hashes.iter() { @@ -2704,8 +2703,8 @@ impl StacksChainState { ) -> Result { let (parent_consensus_hash, parent_block_hash) = match StacksChainState::get_parent_block_header_hashes( - &self.db(), - &child_index_block_hash, + self.db(), + child_index_block_hash, )? { Some(x) => x, None => { @@ -2714,7 +2713,7 @@ impl StacksChainState { }; let parent_index_block_hash = StacksBlockHeader::make_index_block_hash(&parent_consensus_hash, &parent_block_hash); - StacksChainState::read_i64s(&self.db(), "SELECT processed FROM staging_microblocks WHERE index_block_hash = ?1 AND sequence = ?2", &[&parent_index_block_hash, &seq]) + StacksChainState::read_i64s(self.db(), "SELECT processed FROM staging_microblocks WHERE index_block_hash = ?1 AND sequence = ?2", &[&parent_index_block_hash, &seq]) .and_then(|processed| { if processed.is_empty() { Ok(false) @@ -2737,7 +2736,7 @@ impl StacksChainState { let sql = "SELECT 1 FROM staging_microblocks WHERE index_microblock_hash = ?1 AND processed = 1 AND orphaned = 0"; let args = params![index_microblock_hash]; let res = conn - .query_row(&sql, args, |_r| Ok(())) + .query_row(sql, args, |_r| Ok(())) .optional() .expect("DB CORRUPTION: block header DB corrupted!") .is_some(); @@ -2751,7 +2750,7 @@ impl StacksChainState { ) -> Result, Error> { // get parent's consensus hash and block hash let (parent_consensus_hash, _) = match StacksChainState::get_parent_block_header_hashes( - &self.db(), + self.db(), child_index_block_hash, )? { Some(x) => x, @@ -2763,7 +2762,7 @@ impl StacksChainState { // get the child's staging block info let child_block_info = - match StacksChainState::load_staging_block_info(&self.db(), child_index_block_hash)? { + match StacksChainState::load_staging_block_info(self.db(), child_index_block_hash)? { Some(hdr) => hdr, None => { test_debug!("No such block: {:?}", &child_index_block_hash); @@ -2786,7 +2785,7 @@ impl StacksChainState { parent_index_block_hash: &StacksBlockId, min_seq: u16, ) -> Result { - StacksChainState::read_i64s(&self.db(), "SELECT processed FROM staging_microblocks WHERE index_block_hash = ?1 AND sequence >= ?2 LIMIT 1", &[&parent_index_block_hash, &min_seq]) + StacksChainState::read_i64s(self.db(), "SELECT processed FROM staging_microblocks WHERE index_block_hash = ?1 AND sequence >= ?2 LIMIT 1", &[&parent_index_block_hash, &min_seq]) .and_then(|processed| Ok(!processed.is_empty())) } @@ -2799,7 +2798,7 @@ impl StacksChainState { parent_index_block_hash: &StacksBlockId, microblock_hash: &BlockHeaderHash, ) -> Result { - StacksChainState::read_i64s(&self.db(), "SELECT processed FROM staging_microblocks WHERE index_block_hash = ?1 AND microblock_hash = ?2 LIMIT 1", &[parent_index_block_hash, microblock_hash]) + StacksChainState::read_i64s(self.db(), "SELECT processed FROM staging_microblocks WHERE index_block_hash = ?1 AND microblock_hash = ?2 LIMIT 1", &[parent_index_block_hash, microblock_hash]) .and_then(|processed| Ok(!processed.is_empty())) } @@ -2811,7 +2810,7 @@ impl StacksChainState { parent_index_block_hash: &StacksBlockId, ) -> Result { StacksChainState::read_i64s( - &self.db(), + self.db(), "SELECT processed FROM staging_microblocks WHERE index_block_hash = ?1 LIMIT 1", &[&parent_index_block_hash], ) @@ -2849,7 +2848,7 @@ impl StacksChainState { index_block_hash: &StacksBlockId, ) -> Result, Error> { StacksChainState::inner_get_block_header_hashes( - &self.db(), + self.db(), index_block_hash, "consensus_hash", "anchored_block_hash", @@ -3011,7 +3010,7 @@ impl StacksChainState { } let signed_microblocks = if verify_signatures { - StacksChainState::extract_signed_microblocks(&parent_anchored_block_header, microblocks) + StacksChainState::extract_signed_microblocks(parent_anchored_block_header, microblocks) } else { microblocks.to_owned() }; @@ -3319,7 +3318,7 @@ impl StacksChainState { let index_block_hash = StacksBlockHeader::make_index_block_hash(consensus_hash, &block.block_hash()); if StacksChainState::has_stored_block( - &conn, + conn, blocks_path, consensus_hash, &block.block_hash(), @@ -3339,7 +3338,7 @@ impl StacksChainState { &index_block_hash ); return Ok(true); - } else if StacksChainState::has_valid_block_indexed(&blocks_path, &index_block_hash)? { + } else if StacksChainState::has_valid_block_indexed(blocks_path, &index_block_hash)? { debug!( "Block already stored to chunk store: {}/{} ({})", consensus_hash, @@ -3459,7 +3458,7 @@ impl StacksChainState { &mut block_tx, &blocks_path, consensus_hash, - &block, + block, parent_consensus_hash, commit_burn, sortition_burn, @@ -3817,7 +3816,7 @@ impl StacksChainState { ) -> Result, Error> { let sql = "SELECT processed_time - arrival_time FROM staging_blocks WHERE processed = 1 AND height >= ?1 AND height < ?2"; let args = params![u64_to_sql(start_height)?, u64_to_sql(end_height)?]; - let list = query_rows::(blocks_conn, &sql, args)?; + let list = query_rows::(blocks_conn, sql, args)?; Ok(list) } @@ -3830,7 +3829,7 @@ impl StacksChainState { ) -> Result, Error> { let sql = "SELECT download_time FROM staging_blocks WHERE height >= ?1 AND height < ?2"; let args = params![u64_to_sql(start_height)?, u64_to_sql(end_height)?]; - let list = query_rows::(blocks_conn, &sql, args)?; + let list = query_rows::(blocks_conn, sql, args)?; Ok(list) } @@ -3853,9 +3852,9 @@ impl StacksChainState { // go through staging blocks and see if any of them match headers, are attachable, and are // recent (i.e. less than 10 minutes old) // pick randomly -- don't allow the network sender to choose the processing order! - let sql = "SELECT * FROM staging_blocks WHERE processed = 0 AND attachable = 1 AND orphaned = 0 ORDER BY RANDOM()".to_string(); + let sql = "SELECT * FROM staging_blocks WHERE processed = 0 AND attachable = 1 AND orphaned = 0 ORDER BY RANDOM()"; let mut stmt = blocks_tx - .prepare(&sql) + .prepare(sql) .map_err(|e| Error::DBError(db_error::SqliteError(e)))?; let mut rows = stmt @@ -3863,7 +3862,7 @@ impl StacksChainState { .map_err(|e| Error::DBError(db_error::SqliteError(e)))?; while let Some(row) = rows.next().map_err(|e| db_error::SqliteError(e))? { - let mut candidate = StagingBlock::from_row(&row).map_err(Error::DBError)?; + let mut candidate = StagingBlock::from_row(row).map_err(Error::DBError)?; // block must correspond to a valid PoX snapshot let sn_opt = @@ -4713,7 +4712,7 @@ impl StacksChainState { mainnet: bool, latest_matured_miners: &[MinerPaymentSchedule], ) -> Result { - let parent_miner = if let Some(ref miner) = latest_matured_miners.first().as_ref() { + let parent_miner = if let Some(miner) = latest_matured_miners.first().as_ref() { StacksChainState::get_scheduled_block_rewards_at_block( conn, &StacksBlockHeader::make_index_block_hash( @@ -5093,7 +5092,7 @@ impl StacksChainState { // microblock stream is non-empty. let parent_block_cost = if miner_id_opt.is_none() || !parent_microblocks.is_empty() { let cost = StacksChainState::get_stacks_block_anchored_cost( - &chainstate_tx.deref().deref(), + chainstate_tx.deref().deref(), &parent_index_hash, )? .ok_or_else(|| { @@ -5160,7 +5159,7 @@ impl StacksChainState { let (microblock_fees, microblock_burns, microblock_txs_receipts) = match StacksChainState::process_microblocks_transactions( &mut clarity_tx, - &parent_microblocks, + parent_microblocks, microblock_ast_rules, ) { Ok((fees, burns, events)) => (fees, burns, events), @@ -5219,7 +5218,7 @@ impl StacksChainState { signer_set_calc = NakamotoSigners::check_and_handle_prepare_phase_start( &mut clarity_tx, first_block_height.into(), - &pox_constants, + pox_constants, burn_tip_height.into(), // this is the block height that the write occurs *during* chain_tip.stacks_block_height + 1, @@ -5495,8 +5494,8 @@ impl StacksChainState { // get the burnchain block that precedes this block's sortition let parent_burn_hash = SortitionDB::get_block_snapshot_consensus( - &burn_dbconn.tx(), - &chain_tip_consensus_hash, + burn_dbconn.tx(), + chain_tip_consensus_hash, )? .expect("BUG: Failed to load snapshot for block snapshot during Stacks block processing") .parent_burn_header_hash; @@ -5522,9 +5521,9 @@ impl StacksChainState { clarity_instance, burn_dbconn, burn_dbconn, - &burn_dbconn.tx(), + burn_dbconn.tx(), pox_constants, - &parent_chain_tip, + parent_chain_tip, parent_burn_hash, chain_tip_burn_header_height, parent_consensus_hash, @@ -6547,7 +6546,7 @@ impl StacksChainState { SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn())?; let sql = "SELECT * FROM staging_blocks WHERE processed = 1 AND orphaned = 0 AND consensus_hash = ?1 AND anchored_block_hash = ?2"; let args = params![consensus_hash, block_bhh]; - query_row(&self.db(), sql, args).map_err(Error::DBError) + query_row(self.db(), sql, args).map_err(Error::DBError) } /// Get all possible canonical chain tips @@ -6557,7 +6556,7 @@ impl StacksChainState { let sql = "SELECT * FROM staging_blocks WHERE processed = 1 AND orphaned = 0 AND consensus_hash = ?1 AND anchored_block_hash = ?2"; let args = params![consensus_hash, block_bhh]; let Some(staging_block): Option = - query_row(&self.db(), sql, args).map_err(Error::DBError)? + query_row(self.db(), sql, args).map_err(Error::DBError)? else { return Ok(vec![]); }; @@ -6569,7 +6568,7 @@ impl StacksChainState { let sql = "SELECT * FROM staging_blocks WHERE processed = 1 AND orphaned = 0 AND height = ?1"; let args = params![u64_to_sql(height)?]; - query_rows(&self.db(), sql, args).map_err(Error::DBError) + query_rows(self.db(), sql, args).map_err(Error::DBError) } /// Get the parent block of `staging_block`. @@ -6582,7 +6581,7 @@ impl StacksChainState { staging_block.parent_consensus_hash, staging_block.parent_anchored_block_hash, ]; - query_row(&self.db(), sql, args).map_err(Error::DBError) + query_row(self.db(), sql, args).map_err(Error::DBError) } /// Get the height of a staging block @@ -6593,7 +6592,7 @@ impl StacksChainState { ) -> Result, Error> { let sql = "SELECT height FROM staging_blocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; let args = params![consensus_hash, block_hash]; - query_row(&self.db(), sql, args).map_err(Error::DBError) + query_row(self.db(), sql, args).map_err(Error::DBError) } /// This runs checks for the validity of a transaction that @@ -6673,7 +6672,7 @@ impl StacksChainState { // 2: it must be validly signed. let epoch = clarity_connection.get_epoch().clone(); - StacksChainState::process_transaction_precheck(&chainstate_config, &tx, epoch) + StacksChainState::process_transaction_precheck(chainstate_config, tx, epoch) .map_err(|e| MemPoolRejection::FailedToValidate(e))?; // 3: it must pay a tx fee @@ -6695,7 +6694,7 @@ impl StacksChainState { // 5: the account nonces must be correct let (origin, payer) = - match StacksChainState::check_transaction_nonces(clarity_connection, &tx, true) { + match StacksChainState::check_transaction_nonces(clarity_connection, tx, true) { Ok(x) => x, // if errored, check if MEMPOOL_TX_CHAINING would admit this TX Err((e, (origin, payer))) => { @@ -6853,7 +6852,7 @@ impl StacksChainState { let epoch = clarity_connection.get_epoch().clone(); clarity_connection.with_analysis_db_readonly(|db| { let function_type = db - .get_public_function_type(&contract_identifier, &function_name, &epoch) + .get_public_function_type(&contract_identifier, function_name, &epoch) .map_err(|_e| MemPoolRejection::NoSuchContract)? .ok_or_else(|| MemPoolRejection::NoSuchPublicFunction)?; let clarity_version = db @@ -6862,7 +6861,7 @@ impl StacksChainState { function_type .check_args_by_allowing_trait_cast( db, - &function_args, + function_args, epoch, clarity_version, ) @@ -7127,7 +7126,7 @@ pub mod test { for i in 0..49 { let random_bytes = rng.gen::<[u8; 8]>(); let random_bytes_str = to_hex(&random_bytes); - let auth = TransactionAuth::from_p2pkh(&privk).unwrap(); + let auth = TransactionAuth::from_p2pkh(privk).unwrap(); // 16k + 8 contract let contract_16k = { @@ -7153,7 +7152,7 @@ pub mod test { tx_big_contract.anchor_mode = TransactionAnchorMode::OffChainOnly; let mut tx_signer = StacksTransactionSigner::new(&tx_big_contract); - tx_signer.sign_origin(&privk).unwrap(); + tx_signer.sign_origin(privk).unwrap(); let tx_big_contract_signed = tx_signer.get_tx().unwrap(); all_txs.push(tx_big_contract_signed); @@ -7224,7 +7223,7 @@ pub mod test { block: &StacksBlock, ) { assert!(StacksChainState::load_staging_block_data( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, consensus_hash, &block.block_hash() @@ -7233,7 +7232,7 @@ pub mod test { .is_some()); assert_eq!( StacksChainState::load_staging_block_data( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, consensus_hash, &block.block_hash() @@ -7244,7 +7243,7 @@ pub mod test { ); assert_eq!( StacksChainState::get_staging_block_status( - &chainstate.db(), + chainstate.db(), consensus_hash, &block.block_hash() ) @@ -7267,7 +7266,7 @@ pub mod test { block: &StacksBlock, ) { assert!(!StacksChainState::has_stored_block( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, consensus_hash, &block.block_hash() @@ -7275,7 +7274,7 @@ pub mod test { .unwrap()); assert_eq!( StacksChainState::load_staging_block_pubkey_hash( - &chainstate.db(), + chainstate.db(), consensus_hash, &block.block_hash() ) @@ -7291,7 +7290,7 @@ pub mod test { block: &StacksBlock, ) { assert!(StacksChainState::has_stored_block( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, consensus_hash, &block.block_hash() @@ -7312,7 +7311,7 @@ pub mod test { .unwrap() .is_none()); assert!(StacksChainState::load_staging_block_pubkey_hash( - &chainstate.db(), + chainstate.db(), consensus_hash, &block.block_hash() ) @@ -7321,7 +7320,7 @@ pub mod test { assert_eq!( StacksChainState::get_staging_block_status( - &chainstate.db(), + chainstate.db(), consensus_hash, &block.block_hash() ) @@ -7330,7 +7329,7 @@ pub mod test { true ); assert!(StacksChainState::load_staging_block_data( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, consensus_hash, &block.block_hash() @@ -7352,7 +7351,7 @@ pub mod test { block: &StacksBlock, ) { assert!(StacksChainState::has_stored_block( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, consensus_hash, &block.block_hash() @@ -7386,7 +7385,7 @@ pub mod test { block.header ); assert!(StacksChainState::load_staging_block_pubkey_hash( - &chainstate.db(), + chainstate.db(), consensus_hash, &block.block_hash() ) @@ -7395,7 +7394,7 @@ pub mod test { assert_eq!( StacksChainState::get_staging_block_status( - &chainstate.db(), + chainstate.db(), consensus_hash, &block.block_hash() ) @@ -7404,7 +7403,7 @@ pub mod test { true ); assert!(StacksChainState::load_staging_block_data( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, consensus_hash, &block.block_hash() @@ -7538,7 +7537,7 @@ pub mod test { ); let (parent_consensus_hash, parent_block_hash) = StacksChainState::get_parent_block_header_hashes( - &chainstate.db(), + chainstate.db(), &child_index_block_hash, ) .unwrap() @@ -7547,7 +7546,7 @@ pub mod test { StacksBlockHeader::make_index_block_hash(&parent_consensus_hash, &parent_block_hash); let parent_microblock_index_hash = - StacksBlockHeader::make_index_block_hash(&parent_consensus_hash, &tail_microblock_hash); + StacksBlockHeader::make_index_block_hash(&parent_consensus_hash, tail_microblock_hash); let mut tx = chainstate.db_tx_begin().unwrap(); @@ -7555,7 +7554,7 @@ pub mod test { &mut tx, child_consensus_hash, child_anchored_block_hash, - &tail_microblock_hash, + tail_microblock_hash, ) .unwrap(); tx.commit().unwrap(); @@ -7608,7 +7607,7 @@ pub mod test { .unwrap(); assert!(fs::metadata(&path).is_err()); assert!(!StacksChainState::has_stored_block( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, &ConsensusHash([1u8; 20]), &BlockHeaderHash([2u8; 32]) @@ -7625,7 +7624,7 @@ pub mod test { // empty block is considered _not_ stored assert!(!StacksChainState::has_stored_block( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, &ConsensusHash([1u8; 20]), &BlockHeaderHash([2u8; 32]) @@ -7662,7 +7661,7 @@ pub mod test { .unwrap(); assert!(fs::metadata(&path).is_err()); assert!(!StacksChainState::has_stored_block( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, &ConsensusHash([1u8; 20]), &block.block_hash() @@ -7670,7 +7669,7 @@ pub mod test { .unwrap()); assert!(!StacksChainState::has_stored_block( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, &ConsensusHash([1u8; 20]), &block.block_hash() @@ -7694,7 +7693,7 @@ pub mod test { ); assert!(StacksChainState::has_stored_block( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, &ConsensusHash([1u8; 20]), &block.block_hash() @@ -7737,7 +7736,7 @@ pub mod test { // database determines that it's still there assert!(StacksChainState::has_stored_block( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, &ConsensusHash([1u8; 20]), &block.block_hash() @@ -7760,7 +7759,7 @@ pub mod test { // still technically stored -- we processed it assert!(StacksChainState::has_stored_block( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, &ConsensusHash([1u8; 20]), &block.block_hash() @@ -7778,7 +7777,7 @@ pub mod test { // *now* it's not there assert!(!StacksChainState::has_stored_block( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, &ConsensusHash([1u8; 20]), &block.block_hash() @@ -7812,7 +7811,7 @@ pub mod test { let block = make_empty_coinbase_block(&privk); assert!(StacksChainState::load_staging_block_data( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, &ConsensusHash([2u8; 20]), &block.block_hash() @@ -7863,7 +7862,7 @@ pub mod test { let block = make_empty_coinbase_block(&privk); assert!(StacksChainState::load_staging_block_data( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, &ConsensusHash([2u8; 20]), &block.block_hash() @@ -7915,7 +7914,7 @@ pub mod test { let microblocks = make_sample_microblock_stream(&privk, &block.block_hash()); assert!(!StacksChainState::has_stored_block( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, &ConsensusHash([2u8; 20]), µblocks[0].block_hash() @@ -7923,7 +7922,7 @@ pub mod test { .unwrap()); assert!(StacksChainState::load_microblock_stream_fork( - &chainstate.db(), + chainstate.db(), &ConsensusHash([2u8; 20]), &block.block_hash(), µblocks.last().as_ref().unwrap().block_hash(), @@ -7942,7 +7941,7 @@ pub mod test { assert_eq!( StacksChainState::load_microblock_stream_fork( - &chainstate.db(), + chainstate.db(), &ConsensusHash([2u8; 20]), &block.block_hash(), µblocks.last().as_ref().unwrap().block_hash(), @@ -7954,7 +7953,7 @@ pub mod test { // not processed assert!(StacksChainState::load_processed_microblock_stream_fork( - &chainstate.db(), + chainstate.db(), &ConsensusHash([2u8; 20]), &block.block_hash(), µblocks.last().as_ref().unwrap().block_hash(), @@ -7981,7 +7980,7 @@ pub mod test { microblocks.last().as_ref().unwrap().header.sequence; assert!(StacksChainState::load_staging_microblock( - &chainstate.db(), + chainstate.db(), &ConsensusHash([2u8; 20]), &block.block_hash(), µblocks[0].block_hash() @@ -7990,7 +7989,7 @@ pub mod test { .is_none()); assert!(StacksChainState::load_descendant_staging_microblock_stream( - &chainstate.db(), + chainstate.db(), &StacksBlockHeader::make_index_block_hash( &ConsensusHash([2u8; 20]), &block.block_hash() @@ -8036,7 +8035,7 @@ pub mod test { // microblock stream should be stored to staging assert!(StacksChainState::load_staging_microblock( - &chainstate.db(), + chainstate.db(), &ConsensusHash([2u8; 20]), &block.block_hash(), µblocks[0].block_hash() @@ -8046,7 +8045,7 @@ pub mod test { assert_eq!( StacksChainState::load_staging_microblock( - &chainstate.db(), + chainstate.db(), &ConsensusHash([2u8; 20]), &block.block_hash(), µblocks[0].block_hash() @@ -8059,7 +8058,7 @@ pub mod test { ); assert_eq!( StacksChainState::load_descendant_staging_microblock_stream( - &chainstate.db(), + chainstate.db(), &StacksBlockHeader::make_index_block_hash( &ConsensusHash([2u8; 20]), &block.block_hash() @@ -8078,7 +8077,7 @@ pub mod test { // microblocks present assert_eq!( StacksChainState::load_microblock_stream_fork( - &chainstate.db(), + chainstate.db(), &ConsensusHash([2u8; 20]), &block.block_hash(), µblocks.last().as_ref().unwrap().block_hash(), @@ -8090,7 +8089,7 @@ pub mod test { // microblocks not processed yet assert!(StacksChainState::load_processed_microblock_stream_fork( - &chainstate.db(), + chainstate.db(), &ConsensusHash([2u8; 20]), &block.block_hash(), µblocks.last().as_ref().unwrap().block_hash(), @@ -8123,7 +8122,7 @@ pub mod test { assert_eq!( StacksChainState::load_microblock_stream_fork( - &chainstate.db(), + chainstate.db(), &ConsensusHash([2u8; 20]), &block.block_hash(), µblocks.last().as_ref().unwrap().block_hash(), @@ -8158,7 +8157,7 @@ pub mod test { // but we should still load the full stream if asked assert!(StacksChainState::load_descendant_staging_microblock_stream( - &chainstate.db(), + chainstate.db(), &StacksBlockHeader::make_index_block_hash( &ConsensusHash([2u8; 20]), &block.block_hash() @@ -8170,7 +8169,7 @@ pub mod test { .is_some()); assert_eq!( StacksChainState::load_descendant_staging_microblock_stream( - &chainstate.db(), + chainstate.db(), &StacksBlockHeader::make_index_block_hash( &ConsensusHash([2u8; 20]), &block.block_hash() @@ -8202,7 +8201,7 @@ pub mod test { microblocks.first().as_ref().unwrap().header.sequence; assert!(StacksChainState::load_staging_microblock( - &chainstate.db(), + chainstate.db(), &ConsensusHash([2u8; 20]), &block.block_hash(), µblocks[0].block_hash() @@ -8210,7 +8209,7 @@ pub mod test { .unwrap() .is_none()); assert!(StacksChainState::load_descendant_staging_microblock_stream( - &chainstate.db(), + chainstate.db(), &StacksBlockHeader::make_index_block_hash( &ConsensusHash([2u8; 20]), &block.block_hash() @@ -8258,7 +8257,7 @@ pub mod test { // microblock stream should be stored to staging assert!(StacksChainState::load_staging_microblock( - &chainstate.db(), + chainstate.db(), &ConsensusHash([2u8; 20]), &block.block_hash(), µblocks[0].block_hash() @@ -8267,7 +8266,7 @@ pub mod test { .is_some()); assert_eq!( StacksChainState::load_staging_microblock( - &chainstate.db(), + chainstate.db(), &ConsensusHash([2u8; 20]), &block.block_hash(), µblocks[0].block_hash() @@ -8280,7 +8279,7 @@ pub mod test { ); assert_eq!( StacksChainState::load_descendant_staging_microblock_stream( - &chainstate.db(), + chainstate.db(), &StacksBlockHeader::make_index_block_hash( &ConsensusHash([2u8; 20]), &block.block_hash() @@ -8294,7 +8293,7 @@ pub mod test { ); assert_eq!( StacksChainState::load_microblock_stream_fork( - &chainstate.db(), + chainstate.db(), &ConsensusHash([2u8; 20]), &block.block_hash(), µblocks.last().as_ref().unwrap().block_hash(), @@ -8306,7 +8305,7 @@ pub mod test { // not processed assert!(StacksChainState::load_processed_microblock_stream_fork( - &chainstate.db(), + chainstate.db(), &ConsensusHash([2u8; 20]), &block.block_hash(), µblocks.last().as_ref().unwrap().block_hash(), @@ -8342,7 +8341,7 @@ pub mod test { // microblocks should not be in the chunk store, except for block 0 which was confirmed assert_eq!( StacksChainState::load_microblock_stream_fork( - &chainstate.db(), + chainstate.db(), &ConsensusHash([2u8; 20]), &block.block_hash(), µblocks.last().as_ref().unwrap().block_hash(), @@ -8354,7 +8353,7 @@ pub mod test { assert_eq!( StacksChainState::load_processed_microblock_stream_fork( - &chainstate.db(), + chainstate.db(), &ConsensusHash([2u8; 20]), &block.block_hash(), µblocks.first().as_ref().unwrap().block_hash(), @@ -8366,7 +8365,7 @@ pub mod test { assert_eq!( StacksChainState::load_processed_microblock_stream_fork( - &chainstate.db(), + chainstate.db(), &ConsensusHash([2u8; 20]), &block.block_hash(), µblocks[1].block_hash(), @@ -8416,7 +8415,7 @@ pub mod test { // can load the entire stream still assert!(StacksChainState::load_descendant_staging_microblock_stream( - &chainstate.db(), + chainstate.db(), &StacksBlockHeader::make_index_block_hash( &ConsensusHash([2u8; 20]), &block.block_hash() @@ -8428,7 +8427,7 @@ pub mod test { .is_some()); assert_eq!( StacksChainState::load_descendant_staging_microblock_stream( - &chainstate.db(), + chainstate.db(), &StacksBlockHeader::make_index_block_hash( &ConsensusHash([2u8; 20]), &block.block_hash() @@ -8460,7 +8459,7 @@ pub mod test { microblocks.first().as_ref().unwrap().header.sequence; assert!(StacksChainState::load_staging_microblock( - &chainstate.db(), + chainstate.db(), &ConsensusHash([2u8; 20]), &block.block_hash(), µblocks[0].block_hash() @@ -8468,7 +8467,7 @@ pub mod test { .unwrap() .is_none()); assert!(StacksChainState::load_descendant_staging_microblock_stream( - &chainstate.db(), + chainstate.db(), &StacksBlockHeader::make_index_block_hash( &ConsensusHash([2u8; 20]), &block.block_hash() @@ -8520,7 +8519,7 @@ pub mod test { // missing head assert!(StacksChainState::load_staging_microblock( - &chainstate.db(), + chainstate.db(), &ConsensusHash([2u8; 20]), &block.block_hash(), µblocks[0].block_hash() @@ -8530,7 +8529,7 @@ pub mod test { // subsequent microblock stream should be stored to staging assert!(StacksChainState::load_staging_microblock( - &chainstate.db(), + chainstate.db(), &ConsensusHash([2u8; 20]), &block.block_hash(), µblocks[1].block_hash() @@ -8539,7 +8538,7 @@ pub mod test { .is_some()); assert_eq!( StacksChainState::load_staging_microblock( - &chainstate.db(), + chainstate.db(), &ConsensusHash([2u8; 20]), &block.block_hash(), µblocks[1].block_hash() @@ -8553,7 +8552,7 @@ pub mod test { // can't load descendent stream because missing head assert!(StacksChainState::load_descendant_staging_microblock_stream( - &chainstate.db(), + chainstate.db(), &StacksBlockHeader::make_index_block_hash( &ConsensusHash([2u8; 20]), &block.block_hash() @@ -8929,7 +8928,7 @@ pub mod test { .zip(&parent_consensus_hashes) { assert!(StacksChainState::load_staging_block_data( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, consensus_hash, &block.block_hash() @@ -8950,7 +8949,7 @@ pub mod test { // first block is attachable, but all the rest are not assert_eq!( StacksChainState::load_staging_block( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, &consensus_hashes[0], &block_1.block_hash() @@ -8964,7 +8963,7 @@ pub mod test { for (block, consensus_hash) in blocks[1..].iter().zip(&consensus_hashes[1..]) { assert_eq!( StacksChainState::load_staging_block( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, consensus_hash, &block.block_hash() @@ -8984,7 +8983,7 @@ pub mod test { let child_block = &blocks[i + 1]; assert_eq!( StacksChainState::load_staging_block( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, child_consensus_hash, &child_block.block_hash() @@ -9010,7 +9009,7 @@ pub mod test { let child_block = &blocks[i + 1]; assert_eq!( StacksChainState::load_staging_block( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, child_consensus_hash, &child_block.block_hash() @@ -9066,7 +9065,7 @@ pub mod test { .rev() { assert!(StacksChainState::load_staging_block_data( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, consensus_hash, &block.block_hash() @@ -9087,7 +9086,7 @@ pub mod test { // first block is accepted, but all the rest are not assert_eq!( StacksChainState::load_staging_block( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, &consensus_hashes[0], &block_1.block_hash() @@ -9101,7 +9100,7 @@ pub mod test { for (block, consensus_hash) in blocks[1..].iter().zip(&consensus_hashes[1..]) { assert_eq!( StacksChainState::load_staging_block( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, consensus_hash, &block.block_hash() @@ -9121,7 +9120,7 @@ pub mod test { let child_block = &blocks[i + 1]; assert_eq!( StacksChainState::load_staging_block( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, child_consensus_hash, &child_block.block_hash() @@ -9147,7 +9146,7 @@ pub mod test { let child_block = &blocks[i + 1]; assert_eq!( StacksChainState::load_staging_block( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, child_consensus_hash, &child_block.block_hash() @@ -9211,7 +9210,7 @@ pub mod test { .rev() { assert!(StacksChainState::load_staging_block_data( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, consensus_hash, &block.block_hash() @@ -9237,7 +9236,7 @@ pub mod test { ]) { assert_eq!( StacksChainState::load_staging_block( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, consensus_hash, &block.block_hash() @@ -9251,7 +9250,7 @@ pub mod test { // store block 1 assert!(StacksChainState::load_staging_block_data( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, &consensus_hashes[0], &block_1.block_hash() @@ -9271,7 +9270,7 @@ pub mod test { // first block is attachable assert_eq!( StacksChainState::load_staging_block( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, &consensus_hashes[0], &block_1.block_hash() @@ -9289,7 +9288,7 @@ pub mod test { { assert_eq!( StacksChainState::load_staging_block( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, consensus_hash, &block.block_hash() @@ -9315,7 +9314,7 @@ pub mod test { for (block, consensus_hash) in blocks[1..3].iter().zip(&consensus_hashes[1..3]) { assert_eq!( StacksChainState::load_staging_block( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, consensus_hash, &block.block_hash() @@ -9330,7 +9329,7 @@ pub mod test { // and block 4 is still not assert_eq!( StacksChainState::load_staging_block( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, &consensus_hashes[3], &block_4.block_hash() @@ -9402,7 +9401,7 @@ pub mod test { // store block 1 to staging assert!(StacksChainState::load_staging_block_data( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, &consensus_hashes[0], &blocks[0].block_hash() @@ -9412,12 +9411,12 @@ pub mod test { store_staging_block( &mut chainstate, &consensus_hashes[0], - &blocks[0], + blocks[0], &parent_consensus_hash, 1, 2, ); - assert_block_staging_not_processed(&mut chainstate, &consensus_hashes[0], &blocks[0]); + assert_block_staging_not_processed(&mut chainstate, &consensus_hashes[0], blocks[0]); set_block_processed( &mut chainstate, @@ -9425,35 +9424,34 @@ pub mod test { &blocks[0].block_hash(), true, ); - assert_block_stored_not_staging(&mut chainstate, &consensus_hashes[0], &blocks[0]); + assert_block_stored_not_staging(&mut chainstate, &consensus_hashes[0], blocks[0]); // process and store blocks 1 and N, as well as microblocks in-between - let len = blocks.len(); - for i in 1..len { + for (i, block) in blocks.iter().skip(1).enumerate() { // this is what happens at the end of append_block() // store block to staging and process it assert!(StacksChainState::load_staging_block_data( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, &consensus_hashes[i], - &blocks[i].block_hash() + &block.block_hash() ) .unwrap() .is_none()); store_staging_block( &mut chainstate, &consensus_hashes[i], - &blocks[i], + block, &consensus_hashes[0], 1, 2, ); - assert_block_staging_not_processed(&mut chainstate, &consensus_hashes[i], &blocks[i]); + assert_block_staging_not_processed(&mut chainstate, &consensus_hashes[i], block); set_block_processed( &mut chainstate, &consensus_hashes[i], - &blocks[i].block_hash(), + &block.block_hash(), true, ); @@ -9461,17 +9459,17 @@ pub mod test { set_microblocks_processed( &mut chainstate, &consensus_hashes[i], - &blocks[i].block_hash(), - &blocks[i].header.parent_microblock, + &block.block_hash(), + &block.header.parent_microblock, ); - assert_block_stored_not_staging(&mut chainstate, &consensus_hashes[i], &blocks[i]); + assert_block_stored_not_staging(&mut chainstate, &consensus_hashes[i], block); let mblocks_confirmed = StacksChainState::load_processed_microblock_stream_fork( - &chainstate.db(), + chainstate.db(), &consensus_hashes[0], &blocks[0].block_hash(), - &blocks[i].header.parent_microblock, + &block.header.parent_microblock, ) .unwrap() .unwrap(); @@ -9536,7 +9534,7 @@ pub mod test { mblock, ); assert!(StacksChainState::load_staging_microblock( - &chainstate.db(), + chainstate.db(), consensus_hash, &block.block_hash(), &mblock.block_hash() @@ -9547,24 +9545,24 @@ pub mod test { } // store blocks to staging - for i in 0..blocks.len() { + for (i, block) in blocks.iter().enumerate() { assert!(StacksChainState::load_staging_block_data( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, &consensus_hashes[i], - &blocks[i].block_hash() + &block.block_hash() ) .unwrap() .is_none()); store_staging_block( &mut chainstate, &consensus_hashes[i], - &blocks[i], + block, &parent_consensus_hashes[i], 1, 2, ); - assert_block_staging_not_processed(&mut chainstate, &consensus_hashes[i], &blocks[i]); + assert_block_staging_not_processed(&mut chainstate, &consensus_hashes[i], block); } // reject block 1 @@ -9583,7 +9581,7 @@ pub mod test { // block i's microblocks should all be marked as processed, orphaned, and deleted for mblock in microblocks[i].iter() { assert!(StacksChainState::load_staging_microblock( - &chainstate.db(), + chainstate.db(), &consensus_hashes[i], &blocks[i].block_hash(), &mblock.block_hash() @@ -9592,7 +9590,7 @@ pub mod test { .is_none()); assert!(StacksChainState::load_staging_microblock_bytes( - &chainstate.db(), + chainstate.db(), &mblock.block_hash() ) .unwrap() @@ -9602,7 +9600,7 @@ pub mod test { if i + 1 < blocks.len() { // block i+1 should be marked as an orphan, but its data should still be there assert!(StacksChainState::load_staging_block( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, &consensus_hashes[i + 1], &blocks[i + 1].block_hash() @@ -9620,7 +9618,7 @@ pub mod test { for mblock in microblocks[i + 1].iter() { let staging_mblock = StacksChainState::load_staging_microblock( - &chainstate.db(), + chainstate.db(), &consensus_hashes[i + 1], &blocks[i + 1].block_hash(), &mblock.block_hash(), @@ -9670,7 +9668,7 @@ pub mod test { mblock, ); assert!(StacksChainState::load_staging_microblock( - &chainstate.db(), + chainstate.db(), &consensus_hash, &block.block_hash(), &mblock.block_hash() @@ -9681,7 +9679,7 @@ pub mod test { // store block to staging assert!(StacksChainState::load_staging_block_data( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, &consensus_hash, &block.block_hash() @@ -9710,7 +9708,7 @@ pub mod test { if i < len - 1 { assert_eq!( StacksChainState::load_descendant_staging_microblock_stream( - &chainstate.db(), + chainstate.db(), &StacksBlockHeader::make_index_block_hash( &consensus_hash, &block.block_hash() @@ -9726,7 +9724,7 @@ pub mod test { } else { // last time we do this, there will be no more stream assert!(StacksChainState::load_descendant_staging_microblock_stream( - &chainstate.db(), + chainstate.db(), &StacksBlockHeader::make_index_block_hash(&consensus_hash, &block.block_hash()), 0, u16::MAX @@ -9784,7 +9782,7 @@ pub mod test { .unwrap()); assert_eq!( - StacksChainState::stream_microblock_get_info(&chainstate.db(), &index_block_header) + StacksChainState::stream_microblock_get_info(chainstate.db(), &index_block_header) .unwrap() .len(), 0 @@ -9793,7 +9791,7 @@ pub mod test { // store microblocks to staging for (i, mblock) in mblocks.iter().enumerate() { assert!(StacksChainState::stream_microblock_get_rowid( - &chainstate.db(), + chainstate.db(), &index_block_header, &mblock.header.block_hash(), ) @@ -9807,7 +9805,7 @@ pub mod test { mblock, ); assert!(StacksChainState::load_staging_microblock( - &chainstate.db(), + chainstate.db(), &consensus_hash, &block.block_hash(), &mblock.block_hash() @@ -9819,7 +9817,7 @@ pub mod test { .has_microblocks_indexed(&index_block_header) .unwrap()); assert!(StacksChainState::stream_microblock_get_rowid( - &chainstate.db(), + chainstate.db(), &index_block_header, &mblock.header.block_hash(), ) @@ -9833,7 +9831,7 @@ pub mod test { .unwrap()); let mblock_info = - StacksChainState::stream_microblock_get_info(&chainstate.db(), &index_block_header) + StacksChainState::stream_microblock_get_info(chainstate.db(), &index_block_header) .unwrap(); assert_eq!(mblock_info.len(), i + 1); @@ -9895,7 +9893,7 @@ pub mod test { for i in 0..mblocks.len() { assert!(StacksChainState::stream_microblock_get_rowid( - &chainstate.db(), + chainstate.db(), &index_block_header, &mblocks[i].block_hash(), ) @@ -9914,7 +9912,7 @@ pub mod test { .unwrap()); let mblock_info = - StacksChainState::stream_microblock_get_info(&chainstate.db(), &index_block_header) + StacksChainState::stream_microblock_get_info(chainstate.db(), &index_block_header) .unwrap(); assert_eq!(mblock_info.len(), mblocks.len()); @@ -9999,12 +9997,11 @@ pub mod test { microblocks.push(mblocks); } - let block_hashes: Vec = - blocks.iter().map(|ref b| b.block_hash()).collect(); + let block_hashes: Vec = blocks.iter().map(|b| b.block_hash()).collect(); let header_hashes_all: Vec<(ConsensusHash, Option)> = consensus_hashes .iter() .zip(block_hashes.iter()) - .map(|(ref burn, ref block)| ((*burn).clone(), Some((*block).clone()))) + .map(|(burn, block)| ((*burn).clone(), Some((*block).clone()))) .collect(); // nothing is stored, so our inventory should be empty @@ -10048,7 +10045,7 @@ pub mod test { for i in 0..blocks.len() { test_debug!("Store block {} to staging", i); assert!(StacksChainState::load_staging_block_data( - &chainstate.db(), + chainstate.db(), &chainstate.blocks_path, &consensus_hashes[i], &blocks[i].block_hash() @@ -10209,7 +10206,7 @@ pub mod test { // The first burnchain block with a Stacks block is at first_stacks_block_height + 1. let (first_stacks_block_height, canonical_sort_id) = { let sn = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); (sn.block_height, sn.sortition_id) }; @@ -10239,7 +10236,7 @@ pub mod test { // make some blocks, up to and including a fractional reward cycle for tenure_id in 0..(last_stacks_block_height - first_stacks_block_height) { let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); assert_eq!(tip.block_height, first_stacks_block_height + tenure_id); @@ -10432,7 +10429,7 @@ pub mod test { set_block_orphaned( &mut chainstate, &header_hashes[block_height as usize].0, - &hdr_hash, + hdr_hash, ); test_debug!( "Orphaned {}/{}", @@ -10483,7 +10480,7 @@ pub mod test { let num_blocks = 10; let first_stacks_block_height = { let sn = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -10492,7 +10489,7 @@ pub mod test { let mut last_parent_opt: Option = None; for tenure_id in 0..num_blocks { let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); assert_eq!( @@ -10720,7 +10717,7 @@ pub mod test { // both streams should be present assert_eq!( StacksChainState::load_microblock_stream_fork( - &chainstate.db(), + chainstate.db(), &consensus_hashes[0], &block_1.block_hash(), &mblocks_1.last().as_ref().unwrap().block_hash(), @@ -10732,7 +10729,7 @@ pub mod test { assert_eq!( StacksChainState::load_microblock_stream_fork( - &chainstate.db(), + chainstate.db(), &consensus_hashes[0], &block_1.block_hash(), &mblocks_2.last().as_ref().unwrap().block_hash(), @@ -10746,7 +10743,7 @@ pub mod test { // seq 0 assert_eq!( StacksChainState::load_descendant_staging_microblock_stream( - &chainstate.db(), + chainstate.db(), &StacksBlockHeader::make_index_block_hash( &consensus_hashes[0], &block_1.block_hash() @@ -10813,7 +10810,7 @@ pub mod test { store_staging_block( &mut chainstate, &consensus_hashes[i + 1], - &block, + block, &consensus_hashes[0], 1, 2, @@ -10868,7 +10865,7 @@ pub mod test { // all streams should be present assert_eq!( StacksChainState::load_microblock_stream_fork( - &chainstate.db(), + chainstate.db(), &consensus_hashes[0], &block_1.block_hash(), &mblocks.last().as_ref().unwrap().block_hash(), @@ -10887,7 +10884,7 @@ pub mod test { assert_eq!( StacksChainState::load_microblock_stream_fork( - &chainstate.db(), + chainstate.db(), &consensus_hashes[0], &block_1.block_hash(), &mblock_branch.last().as_ref().unwrap().block_hash() @@ -10902,7 +10899,7 @@ pub mod test { // seq 1 assert_eq!( StacksChainState::load_descendant_staging_microblock_stream( - &chainstate.db(), + chainstate.db(), &StacksBlockHeader::make_index_block_hash( &consensus_hashes[0], &block_1.block_hash() @@ -11031,7 +11028,7 @@ pub mod test { let first_stacks_block_height = { let sn = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -11040,7 +11037,7 @@ pub mod test { for tenure_id in 0..num_blocks { let del_addr = del_addrs[tenure_id]; let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); assert_eq!( @@ -11201,7 +11198,7 @@ pub mod test { } let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let sortdb = peer.sortdb.take().unwrap(); @@ -11357,7 +11354,7 @@ pub mod test { let first_stacks_block_height = { let sn = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -11366,7 +11363,7 @@ pub mod test { for tenure_id in 0..num_blocks { let del_addr = del_addrs[tenure_id]; let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); assert_eq!( @@ -11884,7 +11881,7 @@ pub mod test { } let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let sortdb = peer.sortdb.take().unwrap(); diff --git a/stackslib/src/chainstate/stacks/db/headers.rs b/stackslib/src/chainstate/stacks/db/headers.rs index 92584e362a..686073a5fd 100644 --- a/stackslib/src/chainstate/stacks/db/headers.rs +++ b/stackslib/src/chainstate/stacks/db/headers.rs @@ -135,7 +135,7 @@ impl StacksChainState { let block_hash = header.block_hash(); let index_block_hash = - StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_hash); + StacksBlockHeader::make_index_block_hash(consensus_hash, &block_hash); assert!(block_height < (i64::MAX as u64)); @@ -362,7 +362,7 @@ impl StacksChainState { for _i in 0..count { let parent_index_block_hash = { let cur_index_block_hash = ret.last().expect("FATAL: empty list of ancestors"); - match StacksChainState::get_parent_block_id(conn, &cur_index_block_hash)? { + match StacksChainState::get_parent_block_id(conn, cur_index_block_hash)? { Some(ibhh) => ibhh, None => { // out of ancestors diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 31159137ac..d483f17669 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -252,7 +252,7 @@ fn ExtendedStacksHeader_StacksBlockHeader_serialize( ) -> Result { let bytes = header.serialize_to_vec(); let header_hex = to_hex(&bytes); - s.serialize_str(&header_hex.as_str()) + s.serialize_str(header_hex.as_str()) } /// In ExtendedStacksHeader, encode the StacksBlockHeader as a hex string @@ -1009,10 +1009,10 @@ impl StacksChainState { )?; if migrate { - StacksChainState::apply_schema_migrations(&tx, mainnet, chain_id)?; + StacksChainState::apply_schema_migrations(tx, mainnet, chain_id)?; } - StacksChainState::add_indexes(&tx)?; + StacksChainState::add_indexes(tx)?; } dbtx.instantiate_index()?; @@ -1227,12 +1227,12 @@ impl StacksChainState { fn parse_genesis_address(addr: &str, mainnet: bool) -> PrincipalData { // Typical entries are BTC encoded addresses that need converted to STX - let mut stacks_address = match LegacyBitcoinAddress::from_b58(&addr) { + let mut stacks_address = match LegacyBitcoinAddress::from_b58(addr) { Ok(addr) => StacksAddress::from_legacy_bitcoin_address(&addr), // A few addresses (from legacy placeholder accounts) are already STX addresses _ => match StacksAddress::from_string(addr) { Some(addr) => addr, - None => panic!("Failed to parsed genesis address {}", addr), + None => panic!("Failed to parsed genesis address {addr}"), }, }; // Convert a given address to the currently running network mode (mainnet vs testnet). @@ -1518,7 +1518,7 @@ impl StacksChainState { let namespace = { let namespace_str = components[1]; - if !BNS_CHARS_REGEX.is_match(&namespace_str) { + if !BNS_CHARS_REGEX.is_match(namespace_str) { panic!("Invalid namespace characters"); } let buffer = namespace_str.as_bytes(); @@ -2172,7 +2172,7 @@ impl StacksChainState { where F: FnOnce(&mut ClarityReadOnlyConnection) -> R, { - if let Some(ref unconfirmed) = self.unconfirmed_state.as_ref() { + if let Some(unconfirmed) = self.unconfirmed_state.as_ref() { if !unconfirmed.is_readable() { return Ok(None); } @@ -2638,7 +2638,7 @@ impl StacksChainState { &vec![], &vec![], )?; - let index_block_hash = new_tip.index_block_hash(&new_consensus_hash); + let index_block_hash = new_tip.index_block_hash(new_consensus_hash); test_debug!( "Headers index_indexed_all finished {}-{}", &parent_hash, diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index e56624b84f..4e3b920a90 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -719,11 +719,10 @@ impl StacksChainState { match asset_entry { AssetMapEntry::Asset(values) => { // this is a NFT - if let Some(ref checked_nft_asset_map) = + if let Some(checked_nft_asset_map) = checked_nonfungible_assets.get(&principal) { - if let Some(ref nfts) = checked_nft_asset_map.get(&asset_identifier) - { + if let Some(nfts) = checked_nft_asset_map.get(&asset_identifier) { // each value must be covered for v in values { if !nfts.contains(&v.clone().try_into()?) { @@ -744,7 +743,7 @@ impl StacksChainState { } _ => { // This is STX or a fungible token - if let Some(ref checked_ft_asset_ids) = + if let Some(checked_ft_asset_ids) = checked_fungible_assets.get(&principal) { if !checked_ft_asset_ids.contains(&asset_identifier) { @@ -811,7 +810,7 @@ impl StacksChainState { // encodes MARF reads for loading microblock height and current height, and loading and storing a // poison-microblock report runtime_cost(ClarityCostFunction::PoisonMicroblock, env, 0) - .map_err(|e| Error::from_cost_error(e, cost_before.clone(), &env.global_context))?; + .map_err(|e| Error::from_cost_error(e, cost_before.clone(), env.global_context))?; let sender_principal = match &env.sender { Some(ref sender) => { @@ -840,11 +839,11 @@ impl StacksChainState { // for the microblock public key hash we had to process env.add_memory(20) - .map_err(|e| Error::from_cost_error(e, cost_before.clone(), &env.global_context))?; + .map_err(|e| Error::from_cost_error(e, cost_before.clone(), env.global_context))?; // for the block height we had to load env.add_memory(4) - .map_err(|e| Error::from_cost_error(e, cost_before.clone(), &env.global_context))?; + .map_err(|e| Error::from_cost_error(e, cost_before.clone(), env.global_context))?; // was the referenced public key hash used anytime in the past // MINER_REWARD_MATURITY blocks? @@ -892,11 +891,11 @@ impl StacksChainState { .size() .map_err(InterpreterError::from)?, )) - .map_err(|e| Error::from_cost_error(e, cost_before.clone(), &env.global_context))?; + .map_err(|e| Error::from_cost_error(e, cost_before.clone(), env.global_context))?; // u128 sequence env.add_memory(16) - .map_err(|e| Error::from_cost_error(e, cost_before.clone(), &env.global_context))?; + .map_err(|e| Error::from_cost_error(e, cost_before.clone(), env.global_context))?; if mblock_header_1.sequence < seq { // this sender reports a point lower in the stream where a fork occurred, and is now @@ -1981,7 +1980,7 @@ pub mod test { .iter() .zip(error_frags.clone()) { - let mut signer = StacksTransactionSigner::new(&tx_stx_transfer); + let mut signer = StacksTransactionSigner::new(tx_stx_transfer); signer.sign_origin(&privk).unwrap(); if tx_stx_transfer.auth.is_sponsored() { @@ -2352,8 +2351,7 @@ pub mod test { let mut tx_contract = StacksTransaction::new( TransactionVersion::Testnet, auth.clone(), - TransactionPayload::new_smart_contract(&contract_name, &contract, None) - .unwrap(), + TransactionPayload::new_smart_contract(contract_name, &contract, None).unwrap(), ); tx_contract.chain_id = 0x80000000; @@ -3947,7 +3945,7 @@ pub mod test { for tx_pass in post_conditions_pass.iter() { let (_fee, _) = StacksChainState::process_transaction( &mut conn, - &tx_pass, + tx_pass, false, ASTRules::PrecheckSize, ) @@ -3977,7 +3975,7 @@ pub mod test { for tx_pass in post_conditions_pass_payback.iter() { let (_fee, _) = StacksChainState::process_transaction( &mut conn, - &tx_pass, + tx_pass, false, ASTRules::PrecheckSize, ) @@ -4021,10 +4019,10 @@ pub mod test { assert_eq!(account_recv_publisher_after.nonce, expected_recv_nonce); } - for (_i, tx_pass) in post_conditions_pass_nft.iter().enumerate() { + for tx_pass in post_conditions_pass_nft.iter() { let (_fee, _) = StacksChainState::process_transaction( &mut conn, - &tx_pass, + tx_pass, false, ASTRules::PrecheckSize, ) @@ -4054,7 +4052,7 @@ pub mod test { for tx_fail in post_conditions_fail.iter() { let (_fee, _) = StacksChainState::process_transaction( &mut conn, - &tx_fail, + tx_fail, false, ASTRules::PrecheckSize, ) @@ -4097,7 +4095,7 @@ pub mod test { for tx_fail in post_conditions_fail_payback.iter() { let (_fee, _) = StacksChainState::process_transaction( &mut conn, - &tx_fail, + tx_fail, false, ASTRules::PrecheckSize, ) @@ -4142,10 +4140,10 @@ pub mod test { assert_eq!(account_publisher_after.nonce, expected_recv_nonce); } - for (_i, tx_fail) in post_conditions_fail_nft.iter().enumerate() { + for tx_fail in post_conditions_fail_nft.iter() { let (_fee, _) = StacksChainState::process_transaction( &mut conn, - &tx_fail, + tx_fail, false, ASTRules::PrecheckSize, ) @@ -4666,10 +4664,10 @@ pub mod test { let mut expected_recv_nonce = 0; let mut expected_payback_stackaroos_balance = 0; - for (_i, tx_pass) in post_conditions_pass.iter().enumerate() { + for tx_pass in post_conditions_pass.iter() { let (_fee, _) = StacksChainState::process_transaction( &mut conn, - &tx_pass, + tx_pass, false, ASTRules::PrecheckSize, ) @@ -4713,10 +4711,10 @@ pub mod test { assert_eq!(account_publisher_after.nonce, expected_nonce); } - for (_i, tx_pass) in post_conditions_pass_payback.iter().enumerate() { + for tx_pass in post_conditions_pass_payback.iter() { let (_fee, _) = StacksChainState::process_transaction( &mut conn, - &tx_pass, + tx_pass, false, ASTRules::PrecheckSize, ) @@ -4779,10 +4777,10 @@ pub mod test { assert_eq!(account_recv_publisher_after.nonce, expected_recv_nonce); } - for (_i, tx_fail) in post_conditions_fail.iter().enumerate() { + for tx_fail in post_conditions_fail.iter() { let (_fee, _) = StacksChainState::process_transaction( &mut conn, - &tx_fail, + tx_fail, false, ASTRules::PrecheckSize, ) @@ -4836,11 +4834,11 @@ pub mod test { assert_eq!(account_publisher_after.nonce, expected_nonce); } - for (_i, tx_fail) in post_conditions_fail_payback.iter().enumerate() { - eprintln!("tx fail {:?}", &tx_fail); + for tx_fail in post_conditions_fail_payback.iter() { + eprintln!("tx fail {tx_fail:?}"); let (_fee, _) = StacksChainState::process_transaction( &mut conn, - &tx_fail, + tx_fail, false, ASTRules::PrecheckSize, ) @@ -8212,7 +8210,7 @@ pub mod test { (stx-transfer? amount tx-sender recipient)) "#; - let auth = TransactionAuth::from_p2pkh(&tx_privk).unwrap(); + let auth = TransactionAuth::from_p2pkh(tx_privk).unwrap(); let addr = auth.origin().address_testnet(); let mut rng = rand::thread_rng(); @@ -8232,7 +8230,7 @@ pub mod test { tx_contract_create.set_tx_fee(0); let mut signer = StacksTransactionSigner::new(&tx_contract_create); - signer.sign_origin(&tx_privk).unwrap(); + signer.sign_origin(tx_privk).unwrap(); let signed_contract_tx = signer.get_tx().unwrap(); diff --git a/stackslib/src/chainstate/stacks/db/unconfirmed.rs b/stackslib/src/chainstate/stacks/db/unconfirmed.rs index b39de26c18..c8e3dc3756 100644 --- a/stackslib/src/chainstate/stacks/db/unconfirmed.rs +++ b/stackslib/src/chainstate/stacks/db/unconfirmed.rs @@ -382,7 +382,7 @@ impl UnconfirmedState { }; StacksChainState::load_descendant_staging_microblock_stream( - &chainstate.db(), + chainstate.db(), &StacksBlockId::new(&consensus_hash, &anchored_block_hash), 0, u16::MAX, @@ -684,7 +684,7 @@ mod test { let num_blocks = 10; let first_stacks_block_height = { let sn = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -697,7 +697,7 @@ mod test { // send transactions to the mempool let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); assert_eq!( @@ -778,7 +778,7 @@ mod test { let microblocks = { let sortdb = peer.sortdb.take().unwrap(); let sort_iconn = sortdb - .index_handle_at_block(&peer.chainstate(), &canonical_tip) + .index_handle_at_block(peer.chainstate(), &canonical_tip) .unwrap(); peer.chainstate() .reload_unconfirmed_state(&sort_iconn, canonical_tip.clone()) @@ -853,7 +853,7 @@ mod test { // process microblock stream to generate unconfirmed state let sortdb = peer.sortdb.take().unwrap(); let iconn = sortdb - .index_handle_at_block(&peer.chainstate(), &canonical_tip) + .index_handle_at_block(peer.chainstate(), &canonical_tip) .unwrap(); peer.chainstate() .reload_unconfirmed_state(&iconn, canonical_tip.clone()) @@ -879,7 +879,7 @@ mod test { let sortdb = peer.sortdb.take().unwrap(); let iconn = sortdb - .index_handle_at_block(&peer.chainstate(), &canonical_tip) + .index_handle_at_block(peer.chainstate(), &canonical_tip) .unwrap(); let confirmed_recv_balance = peer .chainstate() @@ -921,7 +921,7 @@ mod test { let num_blocks = 10; let first_stacks_block_height = { let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); tip.block_height }; @@ -934,7 +934,7 @@ mod test { // send transactions to the mempool let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); assert_eq!( @@ -1015,7 +1015,7 @@ mod test { let sortdb = peer.sortdb.take().unwrap(); let microblocks = { let sort_iconn = sortdb - .index_handle_at_block(&peer.chainstate(), &canonical_tip) + .index_handle_at_block(peer.chainstate(), &canonical_tip) .unwrap(); peer.chainstate() .reload_unconfirmed_state(&sort_iconn, canonical_tip.clone()) @@ -1175,7 +1175,7 @@ mod test { let num_microblocks = 3; let first_stacks_block_height = { let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); tip.block_height }; @@ -1193,7 +1193,7 @@ mod test { // send transactions to the mempool let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); assert_eq!( @@ -1402,7 +1402,7 @@ mod test { // process microblock stream to generate unconfirmed state let sortdb = peer.sortdb.take().unwrap(); let iconn = sortdb - .index_handle_at_block(&peer.chainstate(), &canonical_tip) + .index_handle_at_block(peer.chainstate(), &canonical_tip) .unwrap(); peer.chainstate() .reload_unconfirmed_state(&iconn, canonical_tip.clone()) diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index b917dffe41..65d4f86833 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -1173,7 +1173,7 @@ impl MARF { ) -> Result, Error> { let (cur_block_hash, cur_block_id) = storage.get_cur_block_and_id(); - let result = MARF::get_path(storage, block_hash, &path).or_else(|e| match e { + let result = MARF::get_path(storage, block_hash, path).or_else(|e| match e { Error::NotFoundError => Ok(None), _ => Err(e), }); @@ -1233,7 +1233,7 @@ impl MARF { ) -> Result, Error> { let (cur_block_hash, cur_block_id) = storage.get_cur_block_and_id(); - let result = MARF::get_path(storage, block_hash, &path).or_else(|e| match e { + let result = MARF::get_path(storage, block_hash, path).or_else(|e| match e { Error::NotFoundError => Ok(None), _ => Err(e), }); @@ -1427,11 +1427,11 @@ impl MARF { path: &TrieHash, ) -> Result)>, Error> { let mut conn = self.storage.connection(); - let marf_value = match MARF::get_by_path(&mut conn, block_hash, &path)? { + let marf_value = match MARF::get_by_path(&mut conn, block_hash, path)? { None => return Ok(None), Some(x) => x, }; - let proof = TrieMerkleProof::from_path(&mut conn, &path, &marf_value, block_hash)?; + let proof = TrieMerkleProof::from_path(&mut conn, path, &marf_value, block_hash)?; Ok(Some((marf_value, proof))) } diff --git a/stackslib/src/chainstate/stacks/index/node.rs b/stackslib/src/chainstate/stacks/index/node.rs index b689035675..6c82127449 100644 --- a/stackslib/src/chainstate/stacks/index/node.rs +++ b/stackslib/src/chainstate/stacks/index/node.rs @@ -433,7 +433,7 @@ impl TrieCursor { for i in 0..node_path.len() { if node_path[i] != path_bytes[self.index] { // diverged - trace!("cursor: diverged({} != {}): i = {}, self.index = {}, self.node_path_index = {}", to_hex(&node_path), to_hex(path_bytes), i, self.index, self.node_path_index); + trace!("cursor: diverged({} != {}): i = {}, self.index = {}, self.node_path_index = {}", to_hex(node_path), to_hex(path_bytes), i, self.index, self.node_path_index); self.last_error = Some(CursorError::PathDiverged); return Err(CursorError::PathDiverged); } diff --git a/stackslib/src/chainstate/stacks/index/proofs.rs b/stackslib/src/chainstate/stacks/index/proofs.rs index 4d399c9f70..6361dfd044 100644 --- a/stackslib/src/chainstate/stacks/index/proofs.rs +++ b/stackslib/src/chainstate/stacks/index/proofs.rs @@ -857,13 +857,10 @@ impl TrieMerkleProof { let mut i = ptrs.len() - 1; loop { let ptr = &ptrs[i]; - let proof_node = TrieMerkleProof::ptr_to_segment_proof_node(storage, &ptr, prev_chr)?; + let proof_node = TrieMerkleProof::ptr_to_segment_proof_node(storage, ptr, prev_chr)?; trace!( - "make_segment_proof: Add proof node from {:?} child 0x{:02x}: {:?}", - &ptr, - prev_chr, - &proof_node + "make_segment_proof: Add proof node from {ptr:?} child 0x{prev_chr:02x}: {proof_node:?}" ); proof_segment.push(proof_node); @@ -1125,7 +1122,7 @@ impl TrieMerkleProof { root_hash: &TrieHash, root_to_block: &HashMap, ) -> bool { - if !TrieMerkleProof::is_proof_well_formed(&proof, path) { + if !TrieMerkleProof::is_proof_well_formed(proof, path) { test_debug!("Invalid proof -- proof is not well-formed"); return false; } @@ -1355,7 +1352,7 @@ impl TrieMerkleProof { root_hash: &TrieHash, root_to_block: &HashMap, ) -> bool { - TrieMerkleProof::::verify_proof(&self.0, &path, &marf_value, root_hash, root_to_block) + TrieMerkleProof::::verify_proof(&self.0, path, marf_value, root_hash, root_to_block) } /// Walk down the trie pointed to by s until we reach a backptr or a leaf diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index d8d1b9133a..3e0e024cfd 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -170,7 +170,7 @@ impl BlockMap for TrieSqlHashMapCursor<'_, T> { type TrieId = T; fn get_block_hash(&self, id: u32) -> Result { - trie_sql::get_block_hash(&self.db, id) + trie_sql::get_block_hash(self.db, id) } fn get_block_hash_caching(&mut self, id: u32) -> Result<&T, Error> { @@ -186,7 +186,7 @@ impl BlockMap for TrieSqlHashMapCursor<'_, T> { } fn get_block_id(&self, block_hash: &T) -> Result { - trie_sql::get_block_identifier(&self.db, block_hash) + trie_sql::get_block_identifier(self.db, block_hash) } fn get_block_id_caching(&mut self, block_hash: &T) -> Result { @@ -836,7 +836,7 @@ impl TrieRAM { while let Some(pointer) = frontier.pop_front() { let (node, _node_hash) = self.get_nodetype(pointer)?; // calculate size - let num_written = get_node_byte_len(&node); + let num_written = get_node_byte_len(node); ptr += num_written as u64; // queue each child @@ -1590,7 +1590,7 @@ impl<'a, T: MarfTrieId> TrieStorageTransaction<'a, T> { pub fn reopen_readonly(&self) -> Result, Error> { let db = marf_sqlite_open(&self.db_path, OpenFlags::SQLITE_OPEN_READ_ONLY, false)?; let blobs = if self.blobs.is_some() { - Some(TrieFile::from_db_path(&self.db_path, true)?) + Some(TrieFile::from_db_path(self.db_path, true)?) } else { None }; @@ -1679,10 +1679,10 @@ impl<'a, T: MarfTrieId> TrieStorageTransaction<'a, T> { return Err(Error::UnconfirmedError); } self.with_trie_blobs(|db, blobs| match blobs { - Some(blobs) => blobs.store_trie_blob(&db, &bhh, &buffer), + Some(blobs) => blobs.store_trie_blob(db, &bhh, &buffer), None => { - test_debug!("Stored trie blob {} to db", &bhh); - trie_sql::write_trie_blob(&db, &bhh, &buffer) + test_debug!("Stored trie blob {bhh} to db"); + trie_sql::write_trie_blob(db, &bhh, &buffer) } })? } @@ -2342,7 +2342,7 @@ impl TrieStorageConnection<'_, T> { let mut map = TrieSqlHashMapCursor { db: &self.db, - cache: &mut self.cache, + cache: self.cache, unconfirmed: self.data.unconfirmed, }; @@ -2356,7 +2356,7 @@ impl TrieStorageConnection<'_, T> { &mut map, node, w, - &mut self.bench, + self.bench, ); self.bench.write_children_hashes_finish(start_time, true); return res; @@ -2377,7 +2377,7 @@ impl TrieStorageConnection<'_, T> { &mut map, node, w, - &mut self.bench, + self.bench, ); self.bench.write_children_hashes_finish(start_time, false); res @@ -2396,7 +2396,7 @@ impl TrieStorageConnection<'_, T> { &mut map, node, w, - &mut self.bench, + self.bench, ); self.bench.write_children_hashes_finish(start_time, false); res @@ -2536,38 +2536,36 @@ impl TrieStorageConnection<'_, T> { read_hash: bool, ) -> Result<(TrieNodeType, TrieHash), Error> { trace!( - "inner_read_persisted_nodetype({}): {:?} (unconfirmed={:?},{})", - block_id, - ptr, + "inner_read_persisted_nodetype({block_id}): {ptr:?} (unconfirmed={:?},{})", &self.unconfirmed_block_id, self.unconfirmed() ); if self.unconfirmed_block_id == Some(block_id) { - trace!("Read persisted node from unconfirmed block id {}", block_id); + trace!("Read persisted node from unconfirmed block id {block_id}"); // read from unconfirmed trie if read_hash { - return trie_sql::read_node_type(&self.db, block_id, &ptr); + return trie_sql::read_node_type(&self.db, block_id, ptr); } else { - return trie_sql::read_node_type_nohash(&self.db, block_id, &ptr) + return trie_sql::read_node_type_nohash(&self.db, block_id, ptr) .map(|node| (node, TrieHash([0u8; TRIEHASH_ENCODED_SIZE]))); } } let (node_inst, node_hash) = match self.blobs.as_mut() { Some(blobs) => { if read_hash { - blobs.read_node_type(&self.db, block_id, &ptr)? + blobs.read_node_type(&self.db, block_id, ptr)? } else { blobs - .read_node_type_nohash(&self.db, block_id, &ptr) + .read_node_type_nohash(&self.db, block_id, ptr) .map(|node| (node, TrieHash([0u8; TRIEHASH_ENCODED_SIZE])))? } } None => { if read_hash { - trie_sql::read_node_type(&self.db, block_id, &ptr)? + trie_sql::read_node_type(&self.db, block_id, ptr)? } else { - trie_sql::read_node_type_nohash(&self.db, block_id, &ptr) + trie_sql::read_node_type_nohash(&self.db, block_id, ptr) .map(|node| (node, TrieHash([0u8; TRIEHASH_ENCODED_SIZE])))? } } @@ -2739,11 +2737,11 @@ impl TrieStorageConnection<'_, T> { #[cfg(test)] pub fn transient_data(&self) -> &TrieStorageTransientData { - &self.data + self.data } #[cfg(test)] pub fn transient_data_mut(&mut self) -> &mut TrieStorageTransientData { - &mut self.data + self.data } } diff --git a/stackslib/src/chainstate/stacks/index/test/file.rs b/stackslib/src/chainstate/stacks/index/test/file.rs index 19ac5e60e4..a4e5715a92 100644 --- a/stackslib/src/chainstate/stacks/index/test/file.rs +++ b/stackslib/src/chainstate/stacks/index/test/file.rs @@ -92,7 +92,7 @@ fn test_migrate_existing_trie_blobs() { let (data, last_block_header, root_header_map) = { let marf_opts = MARFOpenOpts::new(TrieHashCalculationMode::Deferred, "noop", false); - let f = TrieFileStorage::open(&test_file, marf_opts).unwrap(); + let f = TrieFileStorage::open(test_file, marf_opts).unwrap(); let mut marf = MARF::from_storage(f); // make data to insert @@ -124,7 +124,7 @@ fn test_migrate_existing_trie_blobs() { let mut marf_opts = MARFOpenOpts::new(TrieHashCalculationMode::Deferred, "noop", true); marf_opts.force_db_migrate = true; - let f = TrieFileStorage::open(&test_file, marf_opts).unwrap(); + let f = TrieFileStorage::open(test_file, marf_opts).unwrap(); let mut marf = MARF::from_storage(f); // blobs file exists @@ -132,7 +132,7 @@ fn test_migrate_existing_trie_blobs() { // verify that the new blob structure is well-formed let blob_root_header_map = { - let mut blobs = TrieFile::from_db_path(&test_file, false).unwrap(); + let mut blobs = TrieFile::from_db_path(test_file, false).unwrap(); let blob_root_header_map = blobs .read_all_block_hashes_and_roots::(marf.sqlite_conn()) .unwrap(); diff --git a/stackslib/src/chainstate/stacks/index/test/marf.rs b/stackslib/src/chainstate/stacks/index/test/marf.rs index 7f92bb678d..a96e7ad34f 100644 --- a/stackslib/src/chainstate/stacks/index/test/marf.rs +++ b/stackslib/src/chainstate/stacks/index/test/marf.rs @@ -1479,7 +1479,7 @@ fn marf_insert_random_4096_128_merkle_proof() { m.begin(&prev_block_header, &block_header).unwrap(); - let marf_values = values.iter().map(|x| MARFValue::from_value(&x)).collect(); + let marf_values = values.iter().map(|x| MARFValue::from_value(x)).collect(); m.insert_batch(&keys, marf_values).unwrap(); m.commit().unwrap(); diff --git a/stackslib/src/chainstate/stacks/index/test/mod.rs b/stackslib/src/chainstate/stacks/index/test/mod.rs index f563d507a7..2fdc389c2b 100644 --- a/stackslib/src/chainstate/stacks/index/test/mod.rs +++ b/stackslib/src/chainstate/stacks/index/test/mod.rs @@ -188,7 +188,7 @@ pub fn merkle_test_marf_key_value( s.open_block(header).unwrap(); let (_, root_hash) = Trie::read_root(s).unwrap(); - let proof = TrieMerkleProof::from_entry(s, key, value, &header).unwrap(); + let proof = TrieMerkleProof::from_entry(s, key, value, header).unwrap(); test_debug!("---------"); test_debug!("MARF merkle verify: {:?}", &proof); diff --git a/stackslib/src/chainstate/stacks/index/test/storage.rs b/stackslib/src/chainstate/stacks/index/test/storage.rs index ebd97fd5c7..dfa795f5f9 100644 --- a/stackslib/src/chainstate/stacks/index/test/storage.rs +++ b/stackslib/src/chainstate/stacks/index/test/storage.rs @@ -121,13 +121,13 @@ fn trie_cmp( // search children for ptr in n1_data.ptrs() { if ptr.id != TrieNodeID::Empty as u8 && !is_backptr(ptr.id) { - let (child_data, child_hash) = t1.read_nodetype(&ptr).unwrap(); + let (child_data, child_hash) = t1.read_nodetype(ptr).unwrap(); frontier_1.push_back((child_data, child_hash)) } } for ptr in n2_data.ptrs() { if ptr.id != TrieNodeID::Empty as u8 && !is_backptr(ptr.id) { - let (child_data, child_hash) = t2.read_nodetype(&ptr).unwrap(); + let (child_data, child_hash) = t2.read_nodetype(ptr).unwrap(); frontier_2.push_back((child_data, child_hash)) } } @@ -254,7 +254,7 @@ fn load_store_trie_m_n_same(m: u64, n: u64, same: bool) { // verify that all new keys are there, off the unconfirmed tip for (path, expected_value) in new_inserted.iter() { - let value = MARF::get_path(&mut marf.borrow_storage_backend(), &unconfirmed_tip, &path) + let value = MARF::get_path(&mut marf.borrow_storage_backend(), &unconfirmed_tip, path) .unwrap() .unwrap(); assert_eq!(expected_value.data, value.data); @@ -280,9 +280,9 @@ fn load_store_trie_m_n_same(m: u64, n: u64, same: bool) { // test rollback for path in all_new_paths.iter() { - eprintln!("path present? {:?}", &path); + eprintln!("path present? {path:?}"); assert!( - MARF::get_path(&mut marf.borrow_storage_backend(), &unconfirmed_tip, &path) + MARF::get_path(&mut marf.borrow_storage_backend(), &unconfirmed_tip, path) .unwrap() .is_some() ); @@ -291,8 +291,8 @@ fn load_store_trie_m_n_same(m: u64, n: u64, same: bool) { marf.drop_unconfirmed(); for path in all_new_paths.iter() { - eprintln!("path absent? {:?}", &path); - assert!(MARF::get_path(&mut marf.borrow_storage_backend(), &confirmed_tip, &path).is_err()); + eprintln!("path absent? {path:?}"); + assert!(MARF::get_path(&mut marf.borrow_storage_backend(), &confirmed_tip, path).is_err()); } } diff --git a/stackslib/src/chainstate/stacks/index/trie.rs b/stackslib/src/chainstate/stacks/index/trie.rs index 54026eb03a..b3e338273d 100644 --- a/stackslib/src/chainstate/stacks/index/trie.rs +++ b/stackslib/src/chainstate/stacks/index/trie.rs @@ -641,7 +641,7 @@ impl Trie { node.set_path(new_cur_node_path); - let new_cur_node_hash = get_nodetype_hash(storage, &node)?; + let new_cur_node_hash = get_nodetype_hash(storage, node)?; let mut new_node4 = TrieNode4::new(&shared_path_prefix); new_node4.insert(&leaf_ptr); @@ -684,7 +684,7 @@ impl Trie { ); cursor.repair_retarget(&new_node, &ret, &storage.get_cur_block()); - trace!("splice_leaf: node-X' at {:?}", &ret); + trace!("splice_leaf: node-X' at {ret:?}"); Ok(ret) } diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index eae3e1f14d..60edeb498a 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -884,7 +884,7 @@ impl<'a> StacksMicroblockBuilder<'a> { let merkle_tree = MerkleTree::::new(&txid_vecs); let tx_merkle_root = merkle_tree.root(); - let mut next_microblock_header = if let Some(ref prev_microblock) = prev_microblock_header { + let mut next_microblock_header = if let Some(prev_microblock) = prev_microblock_header { StacksMicroblockHeader::from_parent_unsigned(prev_microblock, &tx_merkle_root) .ok_or(Error::MicroblockStreamTooLongError)? } else { @@ -1052,7 +1052,7 @@ impl<'a> StacksMicroblockBuilder<'a> { // note: this path _does_ not perform the tx block budget % heuristic, // because this code path is not directly called with a mempool handle. clarity_tx.reset_cost(cost_before.clone()); - if total_budget.proportion_largest_dimension(&cost_before) + if total_budget.proportion_largest_dimension(cost_before) < TX_BLOCK_LIMIT_PROPORTION_HEURISTIC { warn!( @@ -1358,7 +1358,7 @@ impl<'a> StacksMicroblockBuilder<'a> { if let Some(measured_cost) = measured_cost { if let Err(e) = estimator.notify_event( &mempool_tx.tx.payload, - &measured_cost, + measured_cost, &block_limit, &stacks_epoch_id, ) { @@ -1525,7 +1525,7 @@ impl StacksBlockBuilder { parent_microblock_hash: parent_chain_tip .microblock_tail .as_ref() - .map(|ref hdr| hdr.block_hash()), + .map(|hdr| hdr.block_hash()), prev_microblock_header: StacksMicroblockHeader::first_unsigned( &EMPTY_MICROBLOCK_PARENT_HASH, &Sha512Trunc256Sum([0u8; 32]), @@ -1836,19 +1836,19 @@ impl StacksBlockBuilder { if let Some(microblock_parent_hash) = self.parent_microblock_hash.as_ref() { // load up a microblock fork let microblocks = StacksChainState::load_microblock_stream_fork( - &chainstate.db(), - &parent_consensus_hash, - &parent_header_hash, - µblock_parent_hash, + chainstate.db(), + parent_consensus_hash, + parent_header_hash, + microblock_parent_hash, )? .ok_or(Error::NoSuchBlockError)?; debug!( "Loaded {} microblocks made by {}/{} tipped at {}", microblocks.len(), - &parent_consensus_hash, - &parent_header_hash, - µblock_parent_hash + parent_consensus_hash, + parent_header_hash, + microblock_parent_hash ); Ok(microblocks) } else { @@ -1859,7 +1859,7 @@ impl StacksBlockBuilder { ); let (parent_microblocks, _) = match StacksChainState::load_descendant_staging_microblock_stream_with_poison( - &chainstate.db(), + chainstate.db(), &parent_index_hash, 0, u16::MAX, @@ -1871,8 +1871,8 @@ impl StacksBlockBuilder { debug!( "Loaded {} microblocks made by {}/{}", parent_microblocks.len(), - &parent_consensus_hash, - &parent_header_hash + parent_consensus_hash, + parent_header_hash ); Ok(parent_microblocks) } @@ -2480,7 +2480,7 @@ impl StacksBlockBuilder { if let Some(measured_cost) = measured_cost { if let Err(e) = estimator.notify_event( &txinfo.tx.payload, - &measured_cost, + measured_cost, &block_limit, &stacks_epoch_id, ) { @@ -2719,7 +2719,7 @@ impl BlockBuilder for StacksBlockBuilder { ast_rules: ASTRules, ) -> TransactionResult { if self.bytes_so_far + tx_len >= MAX_EPOCH_SIZE.into() { - return TransactionResult::skipped_due_to_error(&tx, Error::BlockTooBigError); + return TransactionResult::skipped_due_to_error(tx, Error::BlockTooBigError); } match limit_behavior { @@ -2730,14 +2730,14 @@ impl BlockBuilder for StacksBlockBuilder { // other contract calls if !cc.address.is_boot_code_addr() { return TransactionResult::skipped( - &tx, + tx, "BlockLimitFunction::CONTRACT_LIMIT_HIT".to_string(), ); } } TransactionPayload::SmartContract(..) => { return TransactionResult::skipped( - &tx, + tx, "BlockLimitFunction::CONTRACT_LIMIT_HIT".to_string(), ); } @@ -2746,7 +2746,7 @@ impl BlockBuilder for StacksBlockBuilder { } BlockLimitFunction::LIMIT_REACHED => { return TransactionResult::skipped( - &tx, + tx, "BlockLimitFunction::LIMIT_REACHED".to_string(), ) } @@ -2772,14 +2772,14 @@ impl BlockBuilder for StacksBlockBuilder { if let Err(e) = Relayer::static_check_problematic_relayed_tx( clarity_tx.config.mainnet, clarity_tx.get_epoch(), - &tx, + tx, ast_rules, ) { info!( "Detected problematic tx {} while mining; dropping from mempool", tx.txid() ); - return TransactionResult::problematic(&tx, Error::NetError(e)); + return TransactionResult::problematic(tx, Error::NetError(e)); } let (fee, receipt) = match StacksChainState::process_transaction( clarity_tx, tx, quiet, ast_rules, @@ -2787,9 +2787,9 @@ impl BlockBuilder for StacksBlockBuilder { Ok((fee, receipt)) => (fee, receipt), Err(e) => { let (is_problematic, e) = - TransactionResult::is_problematic(&tx, e, clarity_tx.get_epoch()); + TransactionResult::is_problematic(tx, e, clarity_tx.get_epoch()); if is_problematic { - return TransactionResult::problematic(&tx, e); + return TransactionResult::problematic(tx, e); } else { match e { Error::CostOverflowError(cost_before, cost_after, total_budget) => { @@ -2813,7 +2813,7 @@ impl BlockBuilder for StacksBlockBuilder { None }; return TransactionResult::error( - &tx, + tx, Error::TransactionTooBigError(measured_cost), ); } else { @@ -2824,12 +2824,12 @@ impl BlockBuilder for StacksBlockBuilder { &total_budget ); return TransactionResult::skipped_due_to_error( - &tx, + tx, Error::BlockTooBigError, ); } } - _ => return TransactionResult::error(&tx, e), + _ => return TransactionResult::error(tx, e), } } } @@ -2843,7 +2843,7 @@ impl BlockBuilder for StacksBlockBuilder { self.txs.push(tx.clone()); self.total_anchored_fees += fee; - TransactionResult::success(&tx, fee, receipt) + TransactionResult::success(tx, fee, receipt) } else { // building up the microblocks if tx.anchor_mode != TransactionAnchorMode::OffChainOnly @@ -2862,14 +2862,14 @@ impl BlockBuilder for StacksBlockBuilder { if let Err(e) = Relayer::static_check_problematic_relayed_tx( clarity_tx.config.mainnet, clarity_tx.get_epoch(), - &tx, + tx, ast_rules, ) { info!( "Detected problematic tx {} while mining; dropping from mempool", tx.txid() ); - return TransactionResult::problematic(&tx, Error::NetError(e)); + return TransactionResult::problematic(tx, Error::NetError(e)); } let (fee, receipt) = match StacksChainState::process_transaction( clarity_tx, tx, quiet, ast_rules, @@ -2877,9 +2877,9 @@ impl BlockBuilder for StacksBlockBuilder { Ok((fee, receipt)) => (fee, receipt), Err(e) => { let (is_problematic, e) = - TransactionResult::is_problematic(&tx, e, clarity_tx.get_epoch()); + TransactionResult::is_problematic(tx, e, clarity_tx.get_epoch()); if is_problematic { - return TransactionResult::problematic(&tx, e); + return TransactionResult::problematic(tx, e); } else { match e { Error::CostOverflowError(cost_before, cost_after, total_budget) => { @@ -2904,23 +2904,21 @@ impl BlockBuilder for StacksBlockBuilder { }; return TransactionResult::error( - &tx, + tx, Error::TransactionTooBigError(measured_cost), ); } else { warn!( - "Transaction {} reached block cost {}; budget was {}", - tx.txid(), - &cost_after, - &total_budget + "Transaction {} reached block cost {cost_after}; budget was {total_budget}", + tx.txid() ); return TransactionResult::skipped_due_to_error( - &tx, + tx, Error::BlockTooBigError, ); } } - _ => return TransactionResult::error(&tx, e), + _ => return TransactionResult::error(tx, e), } } } @@ -2935,7 +2933,7 @@ impl BlockBuilder for StacksBlockBuilder { self.micro_txs.push(tx.clone()); self.total_streamed_fees += fee; - TransactionResult::success(&tx, fee, receipt) + TransactionResult::success(tx, fee, receipt) }; self.bytes_so_far += tx_len; diff --git a/stackslib/src/chainstate/stacks/tests/accounting.rs b/stackslib/src/chainstate/stacks/tests/accounting.rs index 9ca3016a1b..23a2fc5f2a 100644 --- a/stackslib/src/chainstate/stacks/tests/accounting.rs +++ b/stackslib/src/chainstate/stacks/tests/accounting.rs @@ -137,7 +137,7 @@ fn test_bad_microblock_fees_pre_v210() { let chainstate_path = peer.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -148,7 +148,7 @@ fn test_bad_microblock_fees_pre_v210() { let mut block_ids = vec![]; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let acct = get_stacks_account(&mut peer, &addr.to_account_principal()); @@ -460,7 +460,7 @@ fn test_bad_microblock_fees_fix_transition() { let chainstate_path = peer.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -471,7 +471,7 @@ fn test_bad_microblock_fees_fix_transition() { let mut block_ids = vec![]; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let acct = get_stacks_account(&mut peer, &addr.to_account_principal()); @@ -817,7 +817,7 @@ fn test_get_block_info_v210() { let chainstate_path = peer.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -827,7 +827,7 @@ fn test_get_block_info_v210() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let acct = get_stacks_account(&mut peer, &addr.to_account_principal()); @@ -1189,7 +1189,7 @@ fn test_get_block_info_v210_no_microblocks() { let chainstate_path = peer.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -1199,7 +1199,7 @@ fn test_get_block_info_v210_no_microblocks() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let acct = get_stacks_account(&mut peer, &addr.to_account_principal()); @@ -1510,7 +1510,7 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { let chainstate_path = peer.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -1526,7 +1526,7 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let acct = get_stacks_account(&mut peer, &addr.to_account_principal()); diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index bcf7611695..c4b367055a 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -77,7 +77,7 @@ fn test_build_anchored_blocks_empty() { let num_blocks = 10; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -85,7 +85,7 @@ fn test_build_anchored_blocks_empty() { let mut last_block: Option = None; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); assert_eq!( @@ -178,7 +178,7 @@ fn test_build_anchored_blocks_stx_transfers_single() { let num_blocks = 10; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -190,7 +190,7 @@ fn test_build_anchored_blocks_stx_transfers_single() { let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( @@ -315,7 +315,7 @@ fn test_build_anchored_blocks_empty_with_builder_timeout() { let num_blocks = 10; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -327,7 +327,7 @@ fn test_build_anchored_blocks_empty_with_builder_timeout() { let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( @@ -453,7 +453,7 @@ fn test_build_anchored_blocks_stx_transfers_multi() { let chainstate_path = peer.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -465,7 +465,7 @@ fn test_build_anchored_blocks_stx_transfers_multi() { let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( @@ -658,7 +658,7 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch() { let chainstate_path = peer.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -669,7 +669,7 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch() { let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let acct = get_stacks_account(&mut peer, &addr.to_account_principal()); @@ -894,7 +894,7 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch_invalid() { let chainstate_path = peer.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -907,7 +907,7 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch_invalid() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let acct = get_stacks_account(&mut peer, &addr.to_account_principal()); @@ -1131,7 +1131,7 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch_invalid() { } last_block_ch = Some( - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap() .consensus_hash, ); @@ -1183,7 +1183,7 @@ fn test_build_anchored_blocks_incrementing_nonces() { // during the tenure, let's push transactions to the mempool let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()).unwrap(); + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()).unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -1366,7 +1366,7 @@ fn test_build_anchored_blocks_skip_too_expensive() { let chainstate_path = peer.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -1378,7 +1378,7 @@ fn test_build_anchored_blocks_skip_too_expensive() { let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( @@ -1456,7 +1456,7 @@ fn test_build_anchored_blocks_skip_too_expensive() { &privks_expensive[tenure_id], 0, (2 * contract.len()) as u64, - &format!("hello-world-{}", tenure_id), + &format!("hello-world-{tenure_id}"), &contract, ); @@ -1562,7 +1562,7 @@ fn test_build_anchored_blocks_mempool_fee_transaction_too_low() { let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()).unwrap(); + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()).unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -1682,7 +1682,7 @@ fn test_build_anchored_blocks_zero_fee_transaction() { let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()).unwrap(); + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()).unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -1789,7 +1789,7 @@ fn test_build_anchored_blocks_multiple_chaintips() { let mut blank_mempool = MemPoolDB::open_test(false, 1, &blank_chainstate.root_path).unwrap(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -1797,7 +1797,7 @@ fn test_build_anchored_blocks_multiple_chaintips() { let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( @@ -1846,8 +1846,8 @@ fn test_build_anchored_blocks_multiple_chaintips() { &privks[tenure_id], 0, (2 * contract.len()) as u64, - &format!("hello-world-{}", tenure_id), - &contract, + &format!("hello-world-{tenure_id}"), + contract, ); mempool .submit( @@ -1931,7 +1931,7 @@ fn test_build_anchored_blocks_empty_chaintips() { let chainstate_path = peer.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -1939,7 +1939,7 @@ fn test_build_anchored_blocks_empty_chaintips() { let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( @@ -2004,8 +2004,8 @@ fn test_build_anchored_blocks_empty_chaintips() { &privks[tenure_id], 0, 2000, - &format!("hello-world-{}", tenure_id), - &contract, + &format!("hello-world-{tenure_id}"), + contract, ); mempool .submit( @@ -2074,7 +2074,7 @@ fn test_build_anchored_blocks_too_expensive_transactions() { let chainstate_path = peer.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -2082,7 +2082,7 @@ fn test_build_anchored_blocks_too_expensive_transactions() { let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( @@ -2132,8 +2132,8 @@ fn test_build_anchored_blocks_too_expensive_transactions() { &privks[tenure_id], 0, 100000000 / 2 + 1, - &format!("hello-world-{}", tenure_id), - &contract, + &format!("hello-world-{tenure_id}"), + contract, ); let mut contract_tx_bytes = vec![]; contract_tx @@ -2160,8 +2160,8 @@ fn test_build_anchored_blocks_too_expensive_transactions() { &privks[tenure_id], 1, 100000000 / 2, - &format!("hello-world-{}-2", tenure_id), - &contract, + &format!("hello-world-{tenure_id}-2"), + contract, ); let mut contract_tx_bytes = vec![]; contract_tx @@ -2228,7 +2228,7 @@ fn test_build_anchored_blocks_invalid() { let num_blocks = 10; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -2254,7 +2254,7 @@ fn test_build_anchored_blocks_invalid() { for tenure_id in 0..num_blocks { // send transactions to the mempool let mut tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); if tenure_id == bad_block_ancestor_tenure { @@ -2303,7 +2303,7 @@ fn test_build_anchored_blocks_invalid() { Some(ref block) => { let ic = sortdb.index_conn(); let parent_block_hash = - if let Some(ref block) = last_valid_block.as_ref() { + if let Some(block) = last_valid_block.as_ref() { block.block_hash() } else { @@ -2439,7 +2439,7 @@ fn test_build_anchored_blocks_bad_nonces() { let chainstate_path = peer.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -2448,7 +2448,7 @@ fn test_build_anchored_blocks_bad_nonces() { for tenure_id in 0..num_blocks { eprintln!("Start tenure {:?}", tenure_id); // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( @@ -2498,8 +2498,8 @@ fn test_build_anchored_blocks_bad_nonces() { &privks[tenure_id], 0, 10000, - &format!("hello-world-{}", tenure_id), - &contract, + &format!("hello-world-{tenure_id}"), + contract, ); let mut contract_tx_bytes = vec![]; contract_tx @@ -2527,8 +2527,8 @@ fn test_build_anchored_blocks_bad_nonces() { &privks[tenure_id], 1, 10000, - &format!("hello-world-{}-2", tenure_id), - &contract, + &format!("hello-world-{tenure_id}-2"), + contract, ); let mut contract_tx_bytes = vec![]; contract_tx @@ -2564,8 +2564,8 @@ fn test_build_anchored_blocks_bad_nonces() { &privks[tenure_id], 0, 10000, - &format!("hello-world-{}", tenure_id), - &contract, + &format!("hello-world-{tenure_id}"), + contract, ); let mut contract_tx_bytes = vec![]; contract_tx @@ -2593,8 +2593,8 @@ fn test_build_anchored_blocks_bad_nonces() { &privks[tenure_id], 1, 10000, - &format!("hello-world-{}-2", tenure_id), - &contract, + &format!("hello-world-{tenure_id}-2"), + contract, ); let mut contract_tx_bytes = vec![]; contract_tx @@ -2691,7 +2691,7 @@ fn test_build_microblock_stream_forks() { let chainstate_path = peer.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -2702,7 +2702,7 @@ fn test_build_microblock_stream_forks() { let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( @@ -2826,7 +2826,7 @@ fn test_build_microblock_stream_forks() { // find the poison-microblock at seq 2 let (_, poison_opt) = match StacksChainState::load_descendant_staging_microblock_stream_with_poison( - &chainstate.db(), + chainstate.db(), &parent_index_hash, 0, u16::MAX @@ -2992,7 +2992,7 @@ fn test_build_microblock_stream_forks_with_descendants() { let chainstate_path = peer.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -3012,7 +3012,7 @@ fn test_build_microblock_stream_forks_with_descendants() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let (mut burn_ops, stacks_block, microblocks) = peer.make_tenure( @@ -3143,7 +3143,7 @@ fn test_build_microblock_stream_forks_with_descendants() { if mblock.header.sequence < 2 { tail = Some((mblock.block_hash(), mblock.header.sequence)); } - let stored = chainstate.preprocess_streamed_microblock(&parent_consensus_hash, &parent_header_hash, &mblock).unwrap(); + let stored = chainstate.preprocess_streamed_microblock(&parent_consensus_hash, &parent_header_hash, mblock).unwrap(); assert!(stored); } for mblock in forked_parent_microblock_stream[2..].iter() { @@ -3153,7 +3153,7 @@ fn test_build_microblock_stream_forks_with_descendants() { // find the poison-microblock at seq 2 let (_, poison_opt) = match StacksChainState::load_descendant_staging_microblock_stream_with_poison( - &chainstate.db(), + chainstate.db(), &parent_index_hash, 0, u16::MAX @@ -3505,7 +3505,7 @@ fn test_contract_call_across_clarity_versions() { let chainstate_path = peer.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -3515,7 +3515,7 @@ fn test_contract_call_across_clarity_versions() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let acct = get_stacks_account(&mut peer, &addr.to_account_principal()); @@ -4056,7 +4056,7 @@ fn test_is_tx_problematic() { let chainstate_path = peer.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -4067,7 +4067,7 @@ fn test_is_tx_problematic() { let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( @@ -4124,7 +4124,7 @@ fn test_is_tx_problematic() { &privks_expensive[tenure_id], 0, (2 * contract_spends_too_much.len()) as u64, - &format!("hello-world-{}", &tenure_id), + &format!("hello-world-{tenure_id}"), &contract_spends_too_much ); let contract_spends_too_much_txid = contract_spends_too_much_tx.txid(); @@ -4273,7 +4273,7 @@ fn test_is_tx_problematic() { &privks_expensive[tenure_id], 4, (2 * contract_spends_too_much.len()) as u64, - &format!("hello-world-{}", &tenure_id), + &format!("hello-world-{tenure_id}"), &contract_spends_too_much ); let contract_spends_too_much_txid = contract_spends_too_much_tx.txid(); @@ -4539,7 +4539,7 @@ fn mempool_incorporate_pox_unlocks() { let chainstate_path = peer.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -4569,7 +4569,7 @@ fn mempool_incorporate_pox_unlocks() { let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( @@ -4754,7 +4754,7 @@ fn test_fee_order_mismatch_nonce_order() { let chainstate_path = peer.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -4766,7 +4766,7 @@ fn test_fee_order_mismatch_nonce_order() { let mut last_block = None; // send transactions to the mempool let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()).unwrap(); + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()).unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, diff --git a/stackslib/src/chainstate/stacks/tests/chain_histories.rs b/stackslib/src/chainstate/stacks/tests/chain_histories.rs index b8441a3cbb..9240626e85 100644 --- a/stackslib/src/chainstate/stacks/tests/chain_histories.rs +++ b/stackslib/src/chainstate/stacks/tests/chain_histories.rs @@ -63,7 +63,7 @@ use crate::util_lib::db::Error as db_error; fn connect_burnchain_db(burnchain: &Burnchain) -> BurnchainDB { let burnchain_db = - BurnchainDB::connect(&burnchain.get_burnchaindb_path(), &burnchain, true).unwrap(); + BurnchainDB::connect(&burnchain.get_burnchaindb_path(), burnchain, true).unwrap(); burnchain_db } @@ -140,7 +140,7 @@ where &last_key, parent_block_opt.as_ref(), 1000, - |mut builder, ref mut miner, ref sortdb| { + |mut builder, ref mut miner, sortdb| { test_debug!("Produce anchored stacks block"); let mut miner_chainstate = open_chainstate(false, 0x80000000, &full_test_name); @@ -326,7 +326,7 @@ where &last_key, parent_block_opt.as_ref(), 1000, - |mut builder, ref mut miner, ref sortdb| { + |mut builder, ref mut miner, sortdb| { test_debug!("Produce anchored stacks block"); let mut miner_chainstate = open_chainstate(false, 0x80000000, &full_test_name); @@ -470,7 +470,7 @@ where &last_key_1, parent_block_opt.as_ref(), 1000, - |mut builder, ref mut miner, ref sortdb| { + |mut builder, ref mut miner, sortdb| { test_debug!( "Produce anchored stacks block in stacks fork 1 via {}", miner.origin_address().unwrap().to_string() @@ -518,7 +518,7 @@ where &last_key_2, parent_block_opt.as_ref(), 1000, - |mut builder, ref mut miner, ref sortdb| { + |mut builder, ref mut miner, sortdb| { test_debug!( "Produce anchored stacks block in stacks fork 2 via {}", miner.origin_address().unwrap().to_string() @@ -807,7 +807,7 @@ where &last_key_1, parent_block_opt.as_ref(), 1000, - |mut builder, ref mut miner, ref sortdb| { + |mut builder, ref mut miner, sortdb| { test_debug!( "Produce anchored stacks block in stacks fork 1 via {}", miner.origin_address().unwrap().to_string() @@ -855,7 +855,7 @@ where &last_key_2, parent_block_opt.as_ref(), 1000, - |mut builder, ref mut miner, ref sortdb| { + |mut builder, ref mut miner, sortdb| { test_debug!( "Produce anchored stacks block in stacks fork 2 via {}", miner.origin_address().unwrap().to_string() @@ -1071,7 +1071,7 @@ where &last_key_1, parent_block_opt_1.as_ref(), 1000, - |mut builder, ref mut miner, ref sortdb| { + |mut builder, ref mut miner, sortdb| { test_debug!( "Miner {}: Produce anchored stacks block in stacks fork 1 via {}", miner.id, @@ -1120,7 +1120,7 @@ where &last_key_2, parent_block_opt_2.as_ref(), 1000, - |mut builder, ref mut miner, ref sortdb| { + |mut builder, ref mut miner, sortdb| { test_debug!( "Miner {}: Produce anchored stacks block in stacks fork 2 via {}", miner.id, @@ -1423,7 +1423,7 @@ where &last_key_1, parent_block_opt.as_ref(), 1000, - |mut builder, ref mut miner, ref sortdb| { + |mut builder, ref mut miner, sortdb| { test_debug!("Produce anchored stacks block from miner 1"); let mut miner_chainstate = open_chainstate(false, 0x80000000, &full_test_name); @@ -1468,7 +1468,7 @@ where &last_key_2, parent_block_opt.as_ref(), 1000, - |mut builder, ref mut miner, ref sortdb| { + |mut builder, ref mut miner, sortdb| { test_debug!("Produce anchored stacks block from miner 2"); let mut miner_chainstate = open_chainstate(false, 0x80000000, &full_test_name); @@ -1667,7 +1667,7 @@ where &last_key_1, parent_block_opt_1.as_ref(), 1000, - |mut builder, ref mut miner, ref sortdb| { + |mut builder, ref mut miner, sortdb| { test_debug!( "Produce anchored stacks block in stacks fork 1 via {}", miner.origin_address().unwrap().to_string() @@ -1715,7 +1715,7 @@ where &last_key_2, parent_block_opt_2.as_ref(), 1000, - |mut builder, ref mut miner, ref sortdb| { + |mut builder, ref mut miner, sortdb| { test_debug!( "Produce anchored stacks block in stacks fork 2 via {}", miner.origin_address().unwrap().to_string() @@ -1978,7 +1978,7 @@ where &last_key_1, parent_block_opt_1.as_ref(), 1000, - |mut builder, ref mut miner, ref sortdb| { + |mut builder, ref mut miner, sortdb| { test_debug!("Produce anchored stacks block"); let mut miner_chainstate = open_chainstate(false, 0x80000000, &full_test_name); @@ -2023,7 +2023,7 @@ where &last_key_2, parent_block_opt_2.as_ref(), 1000, - |mut builder, ref mut miner, ref sortdb| { + |mut builder, ref mut miner, sortdb| { test_debug!("Produce anchored stacks block"); let mut miner_chainstate = open_chainstate(false, 0x80000000, &full_test_name); @@ -2222,7 +2222,7 @@ where &last_key_1, parent_block_opt_1.as_ref(), 1000, - |mut builder, ref mut miner, ref sortdb| { + |mut builder, ref mut miner, sortdb| { test_debug!( "Produce anchored stacks block in stacks fork 1 via {}", miner.origin_address().unwrap().to_string() @@ -2270,7 +2270,7 @@ where &last_key_2, parent_block_opt_2.as_ref(), 1000, - |mut builder, ref mut miner, ref sortdb| { + |mut builder, ref mut miner, sortdb| { test_debug!( "Produce anchored stacks block in stacks fork 2 via {}", miner.origin_address().unwrap().to_string() @@ -2436,8 +2436,8 @@ fn assert_chainstate_blocks_eq(test_name_1: &str, test_name_2: &str) { let ch2 = open_chainstate(false, 0x80000000, test_name_2); // check presence of anchored blocks - let mut all_blocks_1 = StacksChainState::list_blocks(&ch1.db()).unwrap(); - let mut all_blocks_2 = StacksChainState::list_blocks(&ch2.db()).unwrap(); + let mut all_blocks_1 = StacksChainState::list_blocks(ch1.db()).unwrap(); + let mut all_blocks_2 = StacksChainState::list_blocks(ch2.db()).unwrap(); all_blocks_1.sort(); all_blocks_2.sort(); @@ -2449,9 +2449,9 @@ fn assert_chainstate_blocks_eq(test_name_1: &str, test_name_2: &str) { // check presence and ordering of microblocks let mut all_microblocks_1 = - StacksChainState::list_microblocks(&ch1.db(), &ch1.blocks_path).unwrap(); + StacksChainState::list_microblocks(ch1.db(), &ch1.blocks_path).unwrap(); let mut all_microblocks_2 = - StacksChainState::list_microblocks(&ch2.db(), &ch2.blocks_path).unwrap(); + StacksChainState::list_microblocks(ch2.db(), &ch2.blocks_path).unwrap(); all_microblocks_1.sort(); all_microblocks_2.sort(); @@ -2470,14 +2470,14 @@ fn assert_chainstate_blocks_eq(test_name_1: &str, test_name_2: &str) { // compare block status (staging vs confirmed) and contents for i in 0..all_blocks_1.len() { let staging_1_opt = StacksChainState::load_staging_block( - &ch1.db(), + ch1.db(), &ch2.blocks_path, &all_blocks_1[i].0, &all_blocks_1[i].1, ) .unwrap(); let staging_2_opt = StacksChainState::load_staging_block( - &ch2.db(), + ch2.db(), &ch2.blocks_path, &all_blocks_2[i].0, &all_blocks_2[i].1, @@ -2518,7 +2518,7 @@ fn assert_chainstate_blocks_eq(test_name_1: &str, test_name_2: &str) { } let chunk_1_opt = StacksChainState::load_descendant_staging_microblock_stream( - &ch1.db(), + ch1.db(), &StacksBlockHeader::make_index_block_hash( &all_microblocks_1[i].0, &all_microblocks_1[i].1, @@ -2528,7 +2528,7 @@ fn assert_chainstate_blocks_eq(test_name_1: &str, test_name_2: &str) { ) .unwrap(); let chunk_2_opt = StacksChainState::load_descendant_staging_microblock_stream( - &ch1.db(), + ch1.db(), &StacksBlockHeader::make_index_block_hash( &all_microblocks_2[i].0, &all_microblocks_2[i].1, @@ -2550,14 +2550,14 @@ fn assert_chainstate_blocks_eq(test_name_1: &str, test_name_2: &str) { for j in 0..all_microblocks_1[i].2.len() { // staging status is the same let staging_1_opt = StacksChainState::load_staging_microblock( - &ch1.db(), + ch1.db(), &all_microblocks_1[i].0, &all_microblocks_1[i].1, &all_microblocks_1[i].2[j], ) .unwrap(); let staging_2_opt = StacksChainState::load_staging_microblock( - &ch2.db(), + ch2.db(), &all_microblocks_2[i].0, &all_microblocks_2[i].1, &all_microblocks_2[i].2[j], @@ -2600,7 +2600,7 @@ fn miner_trace_replay_randomized(miner_trace: &mut TestMinerTrace) { miner_trace .miners .iter() - .map(|ref miner| miner.origin_address().unwrap()) + .map(|miner| miner.origin_address().unwrap()) .collect(), ); nodes.insert(test_name, next_node); @@ -2638,11 +2638,11 @@ fn miner_trace_replay_randomized(miner_trace: &mut TestMinerTrace) { // "discover" the stacks block and its microblocks in all nodes // TODO: randomize microblock discovery order too - for (node_name, mut node) in nodes.iter_mut() { + for (node_name, node) in nodes.iter_mut() { microblocks.as_mut_slice().shuffle(&mut rng); preprocess_stacks_block_data( - &mut node, + node, &mut miner_trace.burn_node, &fork_snapshot, &stacks_block, @@ -2671,7 +2671,7 @@ fn miner_trace_replay_randomized(miner_trace: &mut TestMinerTrace) { } else { for mblock in microblocks.iter() { preprocess_stacks_block_data( - &mut node, + node, &mut miner_trace.burn_node, &fork_snapshot, &stacks_block, @@ -3605,7 +3605,7 @@ fn mine_anchored_invalid_token_transfer_blocks_single() { .unwrap() .unwrap(); assert!(StacksChainState::is_block_orphaned( - &chainstate.db(), + chainstate.db(), &sn.consensus_hash, &bc.block_header_hash ) diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 9f5dd9c860..ba11a6ab97 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -88,8 +88,8 @@ pub fn copy_dir(src_dir: &str, dest_dir: &str) -> Result<(), io::Error> { while !dir_queue.is_empty() { let next_dir = dir_queue.pop_front().unwrap(); - let next_src_dir = path_join(&src_dir, &next_dir); - let next_dest_dir = path_join(&dest_dir, &next_dir); + let next_src_dir = path_join(src_dir, &next_dir); + let next_dest_dir = path_join(dest_dir, &next_dir); eprintln!("mkdir {next_dest_dir}"); fs::create_dir_all(&next_dest_dir)?; @@ -99,11 +99,11 @@ pub fn copy_dir(src_dir: &str, dest_dir: &str) -> Result<(), io::Error> { let path = dirent.path(); let md = fs::metadata(&path)?; if md.is_dir() { - let frontier = path_join(&next_dir, &dirent.file_name().to_str().unwrap()); + let frontier = path_join(&next_dir, dirent.file_name().to_str().unwrap()); eprintln!("push {frontier}"); dir_queue.push_back(frontier); } else { - let dest_path = path_join(&next_dest_dir, &dirent.file_name().to_str().unwrap()); + let dest_path = path_join(&next_dest_dir, dirent.file_name().to_str().unwrap()); eprintln!("copy {} to {dest_path}", &path.to_str().unwrap()); fs::copy(path, dest_path)?; } @@ -782,7 +782,7 @@ pub fn preprocess_stacks_block_data( .preprocess_anchored_block( &ic, &commit_snapshot.consensus_hash, - &stacks_block, + stacks_block, &parent_block_consensus_hash, 5, ) @@ -923,7 +923,7 @@ pub fn check_mining_reward( if confirmed_block_height as u64 > block_height - MINER_REWARD_MATURITY { continue; } - if let Some(ref parent_reward) = stream_rewards.get(&parent_block) { + if let Some(parent_reward) = stream_rewards.get(&parent_block) { if parent_reward.address == miner.origin_address().unwrap() { let streamed = match &parent_reward.tx_fees { MinerPaymentTxFees::Epoch2 { streamed, .. } => streamed, @@ -968,7 +968,7 @@ pub fn get_last_microblock_header( parent_block_opt: Option<&StacksBlock>, ) -> Option { let last_microblocks_opt = match parent_block_opt { - Some(ref block) => node.get_microblock_stream(&miner, &block.block_hash()), + Some(block) => node.get_microblock_stream(miner, &block.block_hash()), None => None, }; diff --git a/stackslib/src/chainstate/stacks/transaction.rs b/stackslib/src/chainstate/stacks/transaction.rs index d813dbcf01..f0fdf4f192 100644 --- a/stackslib/src/chainstate/stacks/transaction.rs +++ b/stackslib/src/chainstate/stacks/transaction.rs @@ -193,7 +193,7 @@ impl StacksMessageCodec for TransactionPayload { if let Some(version) = version_opt { // caller requests a specific Clarity version write_next(fd, &(TransactionPayloadID::VersionedSmartContract as u8))?; - ClarityVersion_consensus_serialize(&version, fd)?; + ClarityVersion_consensus_serialize(version, fd)?; sc.consensus_serialize(fd)?; } else { // caller requests to use whatever the current clarity version is @@ -1020,7 +1020,7 @@ impl StacksTransaction { /// Get a copy of the sending condition that will pay the tx fee pub fn get_payer(&self) -> TransactionSpendingCondition { match self.auth.sponsor() { - Some(ref tsc) => (*tsc).clone(), + Some(tsc) => tsc.clone(), None => self.auth.origin().clone(), } } @@ -3502,14 +3502,14 @@ mod test { // length asset_name.len(), ]; - asset_name_bytes.extend_from_slice(&asset_name.to_string().as_str().as_bytes()); + asset_name_bytes.extend_from_slice(asset_name.to_string().as_str().as_bytes()); let contract_name = ContractName::try_from("hello-world").unwrap(); let mut contract_name_bytes = vec![ // length contract_name.len(), ]; - contract_name_bytes.extend_from_slice(&contract_name.to_string().as_str().as_bytes()); + contract_name_bytes.extend_from_slice(contract_name.to_string().as_str().as_bytes()); let asset_info = AssetInfo { contract_address: addr.clone(), @@ -3863,7 +3863,7 @@ mod test { test_debug!("---------"); test_debug!("text tx bytes:\n{}", &to_hex(&tx_bytes)); - check_codec_and_corruption::(&tx, &tx_bytes); + check_codec_and_corruption::(tx, &tx_bytes); } } diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index d07399a027..6951ed062c 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -74,8 +74,8 @@ lazy_static! { ("lockup", BOOT_CODE_LOCKUP), ("costs", BOOT_CODE_COSTS), ("cost-voting", BOOT_CODE_COST_VOTING_MAINNET), - ("bns", &BOOT_CODE_BNS), - ("genesis", &BOOT_CODE_GENESIS), + ("bns", BOOT_CODE_BNS), + ("genesis", BOOT_CODE_GENESIS), ("costs-2", BOOT_CODE_COSTS_2), ("pox-2", &POX_2_MAINNET_CODE), ("costs-3", BOOT_CODE_COSTS_3), @@ -85,8 +85,8 @@ lazy_static! { ("lockup", BOOT_CODE_LOCKUP), ("costs", BOOT_CODE_COSTS), ("cost-voting", &BOOT_CODE_COST_VOTING_TESTNET), - ("bns", &BOOT_CODE_BNS), - ("genesis", &BOOT_CODE_GENESIS), + ("bns", BOOT_CODE_BNS), + ("genesis", BOOT_CODE_GENESIS), ("costs-2", BOOT_CODE_COSTS_2_TESTNET), ("pox-2", &POX_2_TESTNET_CODE), ("costs-3", BOOT_CODE_COSTS_3), @@ -300,7 +300,7 @@ fn get_cli_chain_tip(conn: &Connection) -> StacksBlockId { let mut hash_opt = None; while let Some(row) = rows.next().expect("FATAL: could not read block hash") { let bhh = friendly_expect( - StacksBlockId::from_column(&row, "block_hash"), + StacksBlockId::from_column(row, "block_hash"), "FATAL: could not parse block hash", ); hash_opt = Some(bhh); @@ -320,10 +320,7 @@ fn get_cli_block_height(conn: &Connection, block_id: &StacksBlockId) -> Option Option { // mock it let conn = self.conn(); - if let Some(_) = get_cli_block_height(&conn, id_bhh) { + if let Some(_) = get_cli_block_height(conn, id_bhh) { let hash_bytes = Sha512Trunc256Sum::from_data(&id_bhh.0); Some(BurnchainHeaderHash(hash_bytes.0)) } else { @@ -660,7 +657,7 @@ impl HeadersDB for CLIHeadersDB { ) -> Option { // mock it let conn = self.conn(); - if let Some(_) = get_cli_block_height(&conn, id_bhh) { + if let Some(_) = get_cli_block_height(conn, id_bhh) { let hash_bytes = Hash160::from_data(&id_bhh.0); Some(ConsensusHash(hash_bytes.0)) } else { @@ -674,7 +671,7 @@ impl HeadersDB for CLIHeadersDB { _epoch: &StacksEpochId, ) -> Option { let conn = self.conn(); - if let Some(_) = get_cli_block_height(&conn, id_bhh) { + if let Some(_) = get_cli_block_height(conn, id_bhh) { // mock it, but make it unique let hash_bytes = Sha512Trunc256Sum::from_data(&id_bhh.0); let hash_bytes_2 = Sha512Trunc256Sum::from_data(&hash_bytes.0); @@ -690,7 +687,7 @@ impl HeadersDB for CLIHeadersDB { _epoch: &StacksEpochId, ) -> Option { let conn = self.conn(); - if let Some(_) = get_cli_block_height(&conn, id_bhh) { + if let Some(_) = get_cli_block_height(conn, id_bhh) { // mock it, but make it unique let hash_bytes = Sha512Trunc256Sum::from_data(&id_bhh.0); let hash_bytes_2 = Sha512Trunc256Sum::from_data(&hash_bytes.0); @@ -707,7 +704,7 @@ impl HeadersDB for CLIHeadersDB { _epoch: Option<&StacksEpochId>, ) -> Option { let conn = self.conn(); - if let Some(height) = get_cli_block_height(&conn, id_bhh) { + if let Some(height) = get_cli_block_height(conn, id_bhh) { Some(height * 600 + 1231006505) } else { None @@ -716,7 +713,7 @@ impl HeadersDB for CLIHeadersDB { fn get_stacks_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { let conn = self.conn(); - if let Some(height) = get_cli_block_height(&conn, id_bhh) { + if let Some(height) = get_cli_block_height(conn, id_bhh) { Some(height * 10 + 1713799973) } else { None @@ -725,7 +722,7 @@ impl HeadersDB for CLIHeadersDB { fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option { let conn = self.conn(); - if let Some(height) = get_cli_block_height(&conn, id_bhh) { + if let Some(height) = get_cli_block_height(conn, id_bhh) { Some(height as u32) } else { None @@ -746,7 +743,7 @@ impl HeadersDB for CLIHeadersDB { _epoch: &StacksEpochId, ) -> Option { // if the block is defined at all, then return a constant - get_cli_block_height(&self.conn(), id_bhh).map(|_| 2000) + get_cli_block_height(self.conn(), id_bhh).map(|_| 2000) } fn get_burnchain_tokens_spent_for_winning_block( @@ -755,7 +752,7 @@ impl HeadersDB for CLIHeadersDB { _epoch: &StacksEpochId, ) -> Option { // if the block is defined at all, then return a constant - get_cli_block_height(&self.conn(), id_bhh).map(|_| 1000) + get_cli_block_height(self.conn(), id_bhh).map(|_| 1000) } fn get_tokens_earned_for_block( @@ -764,7 +761,7 @@ impl HeadersDB for CLIHeadersDB { _epoch: &StacksEpochId, ) -> Option { // if the block is defined at all, then return a constant - get_cli_block_height(&self.conn(), id_bhh).map(|_| 3000) + get_cli_block_height(self.conn(), id_bhh).map(|_| 3000) } fn get_stacks_height_for_tenure_height( @@ -911,7 +908,7 @@ fn install_boot_code(header_db: &CLIHeadersDB, marf: &mut C) let mut ast = friendly_expect( parse( &contract_identifier, - &contract_content, + contract_content, ClarityVersion::Clarity2, ), "Failed to parse program.", @@ -931,7 +928,7 @@ fn install_boot_code(header_db: &CLIHeadersDB, marf: &mut C) .initialize_versioned_contract( contract_identifier, ClarityVersion::Clarity2, - &contract_content, + contract_content, None, ASTRules::PrecheckSize, ) @@ -987,7 +984,7 @@ pub fn add_assets(result: &mut serde_json::Value, assets: bool, asset_map: Asset pub fn add_serialized_output(result: &mut serde_json::Value, value: Value) { let result_raw = { - let bytes = (&value).serialize_to_vec().unwrap(); + let bytes = value.serialize_to_vec().unwrap(); bytes_to_hex(&bytes) }; result["output_serialized"] = serde_json::to_value(result_raw.as_str()).unwrap(); @@ -1055,8 +1052,8 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (i32, Option { where F: FnOnce(&mut AnalysisDatabase) -> R, { - self.with_analysis_db(|mut db, cost_tracker| { + self.with_analysis_db(|db, cost_tracker| { db.begin(); - let result = to_do(&mut db); + let result = to_do(db); db.roll_back() .expect("FATAL: failed to rollback changes during read-only connection"); (cost_tracker, result) @@ -1927,7 +1927,7 @@ mod tests { tx.analyze_smart_contract( &contract_identifier, ClarityVersion::Clarity1, - &contract, + contract, ASTRules::PrecheckSize, ) }) @@ -1940,7 +1940,7 @@ mod tests { tx.analyze_smart_contract( &contract_identifier, ClarityVersion::Clarity1, - &contract, + contract, ASTRules::PrecheckSize, ) }) @@ -1988,7 +1988,7 @@ mod tests { .analyze_smart_contract( &contract_identifier, ClarityVersion::Clarity1, - &contract, + contract, ASTRules::PrecheckSize, ) .unwrap(); @@ -1996,7 +1996,7 @@ mod tests { &contract_identifier, ClarityVersion::Clarity1, &ct_ast, - &contract, + contract, None, |_, _| false, ) @@ -2041,7 +2041,7 @@ mod tests { .analyze_smart_contract( &contract_identifier, ClarityVersion::Clarity1, - &contract, + contract, ASTRules::PrecheckSize, ) .unwrap(); @@ -2049,7 +2049,7 @@ mod tests { &contract_identifier, ClarityVersion::Clarity1, &ct_ast, - &contract, + contract, None, |_, _| false, ) @@ -2069,7 +2069,7 @@ mod tests { .analyze_smart_contract( &contract_identifier, ClarityVersion::Clarity1, - &contract, + contract, ASTRules::PrecheckSize, ) .unwrap(); @@ -2077,7 +2077,7 @@ mod tests { &contract_identifier, ClarityVersion::Clarity1, &ct_ast, - &contract, + contract, None, |_, _| false, ) @@ -2099,7 +2099,7 @@ mod tests { .analyze_smart_contract( &contract_identifier, ClarityVersion::Clarity1, - &contract, + contract, ASTRules::PrecheckSize, ) .unwrap(); @@ -2109,7 +2109,7 @@ mod tests { &contract_identifier, ClarityVersion::Clarity1, &ct_ast, - &contract, + contract, None, |_, _| false ) @@ -2153,7 +2153,7 @@ mod tests { .analyze_smart_contract( &contract_identifier, ClarityVersion::Clarity1, - &contract, + contract, ASTRules::PrecheckSize, ) .unwrap(); @@ -2161,7 +2161,7 @@ mod tests { &contract_identifier, ClarityVersion::Clarity1, &ct_ast, - &contract, + contract, None, |_, _| false, ) @@ -2213,7 +2213,7 @@ mod tests { .analyze_smart_contract( &contract_identifier, ClarityVersion::Clarity1, - &contract, + contract, ASTRules::PrecheckSize, ) .unwrap(); @@ -2221,7 +2221,7 @@ mod tests { &contract_identifier, ClarityVersion::Clarity1, &ct_ast, - &contract, + contract, None, |_, _| false, ) @@ -2305,7 +2305,7 @@ mod tests { .analyze_smart_contract( &contract_identifier, ClarityVersion::Clarity1, - &contract, + contract, ASTRules::PrecheckSize, ) .unwrap(); @@ -2313,7 +2313,7 @@ mod tests { &contract_identifier, ClarityVersion::Clarity1, &ct_ast, - &contract, + contract, None, |_, _| false, ) @@ -2436,7 +2436,7 @@ mod tests { .analyze_smart_contract( &contract_identifier, ClarityVersion::Clarity1, - &contract, + contract, ASTRules::PrecheckSize, ) .unwrap(); @@ -2444,7 +2444,7 @@ mod tests { &contract_identifier, ClarityVersion::Clarity1, &ct_ast, - &contract, + contract, None, |_, _| false, ) @@ -2820,7 +2820,7 @@ mod tests { .analyze_smart_contract( &contract_identifier, ClarityVersion::Clarity1, - &contract, + contract, ASTRules::PrecheckSize, ) .unwrap(); @@ -2828,7 +2828,7 @@ mod tests { &contract_identifier, ClarityVersion::Clarity1, &ct_ast, - &contract, + contract, None, |_, _| false, ) diff --git a/stackslib/src/clarity_vm/database/marf.rs b/stackslib/src/clarity_vm/database/marf.rs index 56a1fde107..7eaed3d1f7 100644 --- a/stackslib/src/clarity_vm/database/marf.rs +++ b/stackslib/src/clarity_vm/database/marf.rs @@ -67,7 +67,7 @@ impl MarfedKV { .map_err(|err| InterpreterError::MarfFailure(err.to_string()))? }; - if SqliteConnection::check_schema(&marf.sqlite_conn()).is_ok() { + if SqliteConnection::check_schema(marf.sqlite_conn()).is_ok() { // no need to initialize return Ok(marf); } diff --git a/stackslib/src/clarity_vm/tests/contracts.rs b/stackslib/src/clarity_vm/tests/contracts.rs index c7de36aa1c..394bd328e9 100644 --- a/stackslib/src/clarity_vm/tests/contracts.rs +++ b/stackslib/src/clarity_vm/tests/contracts.rs @@ -917,14 +917,14 @@ fn test_block_heights() { let (ast, analysis) = clarity_db.analyze_smart_contract( &contract_identifier1, ClarityVersion::Clarity1, - &contract_clarity1, + contract_clarity1, ASTRules::PrecheckSize, ).unwrap(); let res = clarity_db.analyze_smart_contract( &contract_identifier2, ClarityVersion::Clarity1, - &contract_clarity3, + contract_clarity3, ASTRules::PrecheckSize, ); if let Err(ClarityError::Analysis(check_error)) = res { @@ -952,14 +952,14 @@ fn test_block_heights() { let (ast, analysis) = clarity_db.analyze_smart_contract( &contract_identifier1, ClarityVersion::Clarity2, - &contract_clarity1, + contract_clarity1, ASTRules::PrecheckSize, ).unwrap(); let res = clarity_db.analyze_smart_contract( &contract_identifier2, ClarityVersion::Clarity2, - &contract_clarity3, + contract_clarity3, ASTRules::PrecheckSize, ); if let Err(ClarityError::Analysis(check_error)) = res { @@ -976,7 +976,7 @@ fn test_block_heights() { let res = clarity_db.analyze_smart_contract( &contract_identifier1, ClarityVersion::Clarity3, - &contract_clarity1, + contract_clarity1, ASTRules::PrecheckSize, ); if let Err(ClarityError::Analysis(check_error)) = res { @@ -992,7 +992,7 @@ fn test_block_heights() { let (ast, analysis) = clarity_db.analyze_smart_contract( &contract_identifier2, ClarityVersion::Clarity3, - &contract_clarity3, + contract_clarity3, ASTRules::PrecheckSize, ).unwrap(); @@ -1207,7 +1207,7 @@ fn test_block_heights_across_versions() { .analyze_smart_contract( &contract_id_e2c1, ClarityVersion::Clarity1, - &contract_e2c1_2, + contract_e2c1_2, ASTRules::PrecheckSize, ) .unwrap(); @@ -1237,7 +1237,7 @@ fn test_block_heights_across_versions() { .analyze_smart_contract( &contract_id_e2c2, ClarityVersion::Clarity2, - &contract_e2c1_2, + contract_e2c1_2, ASTRules::PrecheckSize, ) .unwrap(); @@ -1340,7 +1340,7 @@ fn test_block_heights_across_versions_traits_3_from_2() { .analyze_smart_contract( &contract_id_e2c1, ClarityVersion::Clarity1, - &contract_e2c1_2, + contract_e2c1_2, ASTRules::PrecheckSize, ) .unwrap(); @@ -1367,7 +1367,7 @@ fn test_block_heights_across_versions_traits_3_from_2() { .analyze_smart_contract( &contract_id_e2c2, ClarityVersion::Clarity2, - &contract_e2c1_2, + contract_e2c1_2, ASTRules::PrecheckSize, ) .unwrap(); @@ -1484,7 +1484,7 @@ fn test_block_heights_across_versions_traits_2_from_3() { .analyze_smart_contract( &contract_id_e2c1, ClarityVersion::Clarity1, - &contract_e2c1_2, + contract_e2c1_2, ASTRules::PrecheckSize, ) .unwrap(); @@ -1511,7 +1511,7 @@ fn test_block_heights_across_versions_traits_2_from_3() { .analyze_smart_contract( &contract_id_e2c2, ClarityVersion::Clarity2, - &contract_e2c1_2, + contract_e2c1_2, ASTRules::PrecheckSize, ) .unwrap(); @@ -1618,7 +1618,7 @@ fn test_block_heights_at_block() { let (ast, analysis) = clarity_db.analyze_smart_contract( &contract_identifier, ClarityVersion::Clarity3, - &contract, + contract, ASTRules::PrecheckSize, ).unwrap(); @@ -1679,7 +1679,7 @@ fn test_get_block_info_time() { .analyze_smart_contract( &contract_identifier2, ClarityVersion::Clarity2, - &contract2, + contract2, ASTRules::PrecheckSize, ) .unwrap(); @@ -1701,7 +1701,7 @@ fn test_get_block_info_time() { .analyze_smart_contract( &contract_identifier3, ClarityVersion::Clarity3, - &contract3, + contract3, ASTRules::PrecheckSize, ) .unwrap(); @@ -1723,7 +1723,7 @@ fn test_get_block_info_time() { .analyze_smart_contract( &contract_identifier3_3, ClarityVersion::Clarity3, - &contract3_3, + contract3_3, ASTRules::PrecheckSize, ) .unwrap(); diff --git a/stackslib/src/clarity_vm/tests/forking.rs b/stackslib/src/clarity_vm/tests/forking.rs index 22a3f07321..59e544195c 100644 --- a/stackslib/src/clarity_vm/tests/forking.rs +++ b/stackslib/src/clarity_vm/tests/forking.rs @@ -71,7 +71,7 @@ fn test_at_block_mutations(#[case] version: ClarityVersion, #[case] epoch: Stack eprintln!("Initializing contract..."); owned_env - .initialize_contract(c.clone(), &contract, None, ASTRules::PrecheckSize) + .initialize_contract(c.clone(), contract, None, ASTRules::PrecheckSize) .unwrap(); } @@ -150,7 +150,7 @@ fn test_at_block_good(#[case] version: ClarityVersion, #[case] epoch: StacksEpoc eprintln!("Initializing contract..."); owned_env - .initialize_contract(c.clone(), &contract, None, ASTRules::PrecheckSize) + .initialize_contract(c.clone(), contract, None, ASTRules::PrecheckSize) .unwrap(); } @@ -224,7 +224,7 @@ fn test_at_block_missing_defines(#[case] version: ClarityVersion, #[case] epoch: eprintln!("Initializing contract..."); owned_env - .initialize_contract(c_a.clone(), &contract, None, ASTRules::PrecheckSize) + .initialize_contract(c_a.clone(), contract, None, ASTRules::PrecheckSize) .unwrap(); } @@ -239,7 +239,7 @@ fn test_at_block_missing_defines(#[case] version: ClarityVersion, #[case] epoch: eprintln!("Initializing contract..."); let e = owned_env - .initialize_contract(c_b.clone(), &contract, None, ASTRules::PrecheckSize) + .initialize_contract(c_b.clone(), contract, None, ASTRules::PrecheckSize) .unwrap_err(); e } diff --git a/stackslib/src/clarity_vm/tests/simple_tests.rs b/stackslib/src/clarity_vm/tests/simple_tests.rs index a73489bb95..0fb38cdf9e 100644 --- a/stackslib/src/clarity_vm/tests/simple_tests.rs +++ b/stackslib/src/clarity_vm/tests/simple_tests.rs @@ -55,7 +55,7 @@ fn test_at_unknown_block() { let err = owned_env .initialize_contract( QualifiedContractIdentifier::local("contract").unwrap(), - &contract, + contract, None, clarity::vm::ast::ASTRules::PrecheckSize, ) diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index 66e14d4b5d..2cca8ce601 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -84,7 +84,7 @@ pub fn drain_common_opts(argv: &mut Vec, start_at: usize) -> CommonOpts "config" => { let path = &argv[i]; i += 1; - let config_file = ConfigFile::from_path(&path).unwrap_or_else(|e| { + let config_file = ConfigFile::from_path(path).unwrap_or_else(|e| { panic!("Failed to read '{path}' as stacks-node config: {e}") }); let config = Config::from_config_file(config_file, false).unwrap_or_else(|e| { @@ -279,7 +279,7 @@ pub fn command_replay_block_nakamoto(argv: &[String], conf: Option<&Config>) { if i % 100 == 0 { println!("Checked {i}..."); } - replay_naka_staging_block(db_path, index_block_hash, &conf); + replay_naka_staging_block(db_path, index_block_hash, conf); } println!("Finished. run_time_seconds = {}", start.elapsed().as_secs()); } @@ -374,7 +374,7 @@ pub fn command_replay_mock_mining(argv: &[String], conf: Option<&Config>) { "block_height" => bh, "block" => ?block ); - replay_mock_mined_block(&db_path, block, conf); + replay_mock_mined_block(db_path, block, conf); } } @@ -715,7 +715,7 @@ fn replay_block( let Some(next_microblocks) = StacksChainState::inner_find_parent_microblock_stream( &chainstate_tx.tx, - &block_hash, + block_hash, &parent_block_hash, &parent_header_info.consensus_hash, parent_microblock_hash, @@ -727,7 +727,7 @@ fn replay_block( }; let (burn_header_hash, burn_header_height, burn_header_timestamp, _winning_block_txid) = - match SortitionDB::get_block_snapshot_consensus(&sort_tx, &block_consensus_hash).unwrap() { + match SortitionDB::get_block_snapshot_consensus(&sort_tx, block_consensus_hash).unwrap() { Some(sn) => ( sn.burn_header_hash, sn.block_height as u32, @@ -745,10 +745,10 @@ fn replay_block( block_consensus_hash, block_hash, &block_id, &burn_header_hash, parent_microblock_hash, ); - if !StacksChainState::check_block_attachment(&parent_block_header, &block.header) { + if !StacksChainState::check_block_attachment(parent_block_header, &block.header) { let msg = format!( "Invalid stacks block {}/{} -- does not attach to parent {}/{}", - &block_consensus_hash, + block_consensus_hash, block.block_hash(), parent_block_header.block_hash(), &parent_header_info.consensus_hash @@ -760,9 +760,9 @@ fn replay_block( // validation check -- validate parent microblocks and find the ones that connect the // block's parent to this block. let next_microblocks = StacksChainState::extract_connecting_microblocks( - &parent_header_info, - &block_consensus_hash, - &block_hash, + parent_header_info, + block_consensus_hash, + block_hash, block, next_microblocks, ) @@ -795,12 +795,12 @@ fn replay_block( clarity_instance, &mut sort_tx, &pox_constants, - &parent_header_info, + parent_header_info, block_consensus_hash, &burn_header_hash, burn_header_height, burn_header_timestamp, - &block, + block, block_size, &next_microblocks, block_commit_burn, @@ -1080,7 +1080,7 @@ fn replay_block_nakamoto( .try_into() .expect("Failed to downcast u64 to u32"), next_ready_block_snapshot.burn_header_timestamp, - &block, + block, block_size, commit_burn, sortition_burn, diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 865f99d3b0..d219699caf 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -333,7 +333,7 @@ impl MemPoolAdmitter { tx_size: u64, ) -> Result<(), MemPoolRejection> { let sortition_id = match SortitionDB::get_sortition_id_by_consensus( - &sortdb.conn(), + sortdb.conn(), &self.cur_consensus_hash, ) { Ok(Some(x)) => x, @@ -887,11 +887,11 @@ impl<'a> MemPoolTx<'a> { where F: FnOnce(&mut DBTx<'a>, &mut BloomCounter) -> R, { - let mut bc = tx + let bc = tx .bloom_counter .take() .expect("BUG: did not replace bloom filter"); - let res = f(&mut tx.tx, &mut bc); + let res = f(&mut tx.tx, bc); tx.bloom_counter.replace(bc); res } @@ -968,7 +968,7 @@ impl<'a> MemPoolTx<'a> { // keep the bloom counter un-saturated -- remove at most one transaction from it to keep // the error rate at or below the target error rate let evict_txid = { - let num_recents = MemPoolDB::get_num_recent_txs(&dbtx)?; + let num_recents = MemPoolDB::get_num_recent_txs(dbtx)?; if num_recents >= MAX_BLOOM_COUNTER_TXS.into() { // remove lowest-fee tx (they're paying the least, so replication is // deprioritized) @@ -976,7 +976,7 @@ impl<'a> MemPoolTx<'a> { let args = params![u64_to_sql( coinbase_height.saturating_sub(BLOOM_COUNTER_DEPTH as u64), )?]; - let evict_txid: Option = query_row(&dbtx, sql, args)?; + let evict_txid: Option = query_row(dbtx, sql, args)?; if let Some(evict_txid) = evict_txid { bloom_counter.remove_raw(dbtx, &evict_txid.0)?; @@ -1144,7 +1144,7 @@ fn db_get_nonce(conn: &DBConn, address: &StacksAddress) -> Result, d #[cfg(test)] pub fn db_get_all_nonces(conn: &DBConn) -> Result, db_error> { let sql = "SELECT * FROM nonces"; - let mut stmt = conn.prepare(&sql).map_err(|e| db_error::SqliteError(e))?; + let mut stmt = conn.prepare(sql).map_err(|e| db_error::SqliteError(e))?; let mut iter = stmt .query(NO_PARAMS) .map_err(|e| db_error::SqliteError(e))?; @@ -1297,7 +1297,7 @@ impl MemPoolDB { /// Apply all schema migrations up to the latest schema. fn apply_schema_migrations(tx: &mut DBTx) -> Result<(), db_error> { loop { - let version = MemPoolDB::get_schema_version(&tx)?.unwrap_or(1); + let version = MemPoolDB::get_schema_version(tx)?.unwrap_or(1); match version { 1 => { MemPoolDB::instantiate_cost_estimator(tx)?; @@ -1672,7 +1672,7 @@ impl MemPoolDB { "; let mut query_stmt_null = self .db - .prepare(&sql) + .prepare(sql) .map_err(|err| Error::SqliteError(err))?; let mut null_iterator = query_stmt_null .query(NO_PARAMS) @@ -1686,7 +1686,7 @@ impl MemPoolDB { "; let mut query_stmt_fee = self .db - .prepare(&sql) + .prepare(sql) .map_err(|err| Error::SqliteError(err))?; let mut fee_iterator = query_stmt_fee .query(NO_PARAMS) @@ -1808,7 +1808,7 @@ impl MemPoolDB { }; // Read in and deserialize the transaction. - let tx_info_option = MemPoolDB::get_tx(&self.conn(), &candidate.txid)?; + let tx_info_option = MemPoolDB::get_tx(self.conn(), &candidate.txid)?; let tx_info = match tx_info_option { Some(tx) => tx, None => { @@ -1983,7 +1983,7 @@ impl MemPoolDB { #[cfg(test)] pub fn get_all_txs(conn: &DBConn) -> Result, db_error> { let sql = "SELECT * FROM mempool"; - let rows = query_rows::(conn, &sql, NO_PARAMS)?; + let rows = query_rows::(conn, sql, NO_PARAMS)?; Ok(rows) } @@ -1996,7 +1996,7 @@ impl MemPoolDB { ) -> Result { let sql = "SELECT * FROM mempool WHERE consensus_hash = ?1 AND block_header_hash = ?2"; let args = params![consensus_hash, block_header_hash]; - let rows = query_rows::(conn, &sql, args)?; + let rows = query_rows::(conn, sql, args)?; Ok(rows.len()) } @@ -2016,7 +2016,7 @@ impl MemPoolDB { block_header_hash, u64_to_sql(count)?, ]; - let rows = query_rows::(conn, &sql, args)?; + let rows = query_rows::(conn, sql, args)?; Ok(rows) } @@ -2385,7 +2385,7 @@ impl MemPoolDB { if do_admission_checks { mempool_tx .admitter - .set_block(&block_hash, (*consensus_hash).clone()); + .set_block(block_hash, (*consensus_hash).clone()); mempool_tx .admitter .will_admit_tx(chainstate, sortdb, tx, len)?; @@ -2394,8 +2394,8 @@ impl MemPoolDB { MemPoolDB::try_add_tx( mempool_tx, chainstate, - &consensus_hash, - &block_hash, + consensus_hash, + block_hash, true, txid.clone(), tx_data, @@ -2734,7 +2734,7 @@ impl MemPoolDB { /// Get the bloom filter that represents the set of recent transactions we have pub fn get_txid_bloom_filter(&self) -> Result, db_error> { - self.bloom_counter.to_bloom_filter(&self.conn()) + self.bloom_counter.to_bloom_filter(self.conn()) } /// Find maximum Stacks coinbase height represented in the mempool. @@ -2752,7 +2752,7 @@ impl MemPoolDB { /// Get the transaction ID list that represents the set of transactions that are represented in /// the bloom counter. pub fn get_bloom_txids(&self) -> Result, db_error> { - let max_height = match MemPoolDB::get_max_coinbase_height(&self.conn())? { + let max_height = match MemPoolDB::get_max_coinbase_height(self.conn())? { Some(h) => h, None => { // mempool is empty @@ -2762,7 +2762,7 @@ impl MemPoolDB { let min_height = max_height.saturating_sub(BLOOM_COUNTER_DEPTH as u64); let sql = "SELECT mempool.txid FROM mempool WHERE height > ?1 AND height <= ?2 AND NOT EXISTS (SELECT 1 FROM removed_txids WHERE txid = mempool.txid)"; let args = params![u64_to_sql(min_height)?, u64_to_sql(max_height)?]; - query_rows(&self.conn(), sql, args) + query_rows(self.conn(), sql, args) } /// Get the transaction tag list that represents the set of recent transactions we have. @@ -2811,7 +2811,7 @@ impl MemPoolDB { pub fn get_randomized_txid(&self, txid: &Txid) -> Result, db_error> { let sql = "SELECT hashed_txid FROM randomized_txids WHERE txid = ?1 LIMIT 1"; let args = params![txid]; - query_row(&self.conn(), sql, args) + query_row(self.conn(), sql, args) } pub fn find_next_missing_transactions( diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 4477c93b93..a563a2772a 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -2119,7 +2119,7 @@ fn test_make_mempool_sync_data() { assert!(in_bf >= recent_txids.len()); for txid in txids.iter() { - if !recent_set.contains(&txid) && bf.contains_raw(&txid.0) { + if !recent_set.contains(txid) && bf.contains_raw(&txid.0) { fp_count += 1; } if bf.contains_raw(&txid.0) { @@ -2166,7 +2166,7 @@ fn test_make_mempool_sync_data() { ); } - let total_count = MemPoolDB::get_num_recent_txs(&mempool.conn()).unwrap(); + let total_count = MemPoolDB::get_num_recent_txs(mempool.conn()).unwrap(); eprintln!( "present_count: {}, absent count: {}, total sent: {}, total recent: {}", present_count, diff --git a/stackslib/src/cost_estimates/fee_medians.rs b/stackslib/src/cost_estimates/fee_medians.rs index 38d200d8a2..b2a3c0dc74 100644 --- a/stackslib/src/cost_estimates/fee_medians.rs +++ b/stackslib/src/cost_estimates/fee_medians.rs @@ -192,7 +192,7 @@ impl FeeEstimator for WeightedMedianFeeRateEstimator { .tx_receipts .iter() .filter_map(|tx_receipt| { - fee_rate_and_weight_from_receipt(&self.metric, &tx_receipt, block_limit) + fee_rate_and_weight_from_receipt(&self.metric, tx_receipt, block_limit) }) .collect(); @@ -327,7 +327,7 @@ fn fee_rate_and_weight_from_receipt( | TransactionPayload::TenureChange(..) => { // These transaction payload types all "work" the same: they have associated ExecutionCosts // and contibute to the block length limit with their tx_len - metric.from_cost_and_len(&tx_receipt.execution_cost, &block_limit, tx_size) + metric.from_cost_and_len(&tx_receipt.execution_cost, block_limit, tx_size) } }; let denominator = cmp::max(scalar_cost, 1) as f64; diff --git a/stackslib/src/cost_estimates/fee_scalar.rs b/stackslib/src/cost_estimates/fee_scalar.rs index c3ad8bd40c..4915dd529d 100644 --- a/stackslib/src/cost_estimates/fee_scalar.rs +++ b/stackslib/src/cost_estimates/fee_scalar.rs @@ -180,7 +180,7 @@ impl FeeEstimator for ScalarFeeRateEstimator { read_count: 2, runtime: 4640, // taken from .costs-3 }, - &block_limit, + block_limit, tx_size, ) } @@ -196,7 +196,7 @@ impl FeeEstimator for ScalarFeeRateEstimator { // and contibute to the block length limit with their tx_len self.metric.from_cost_and_len( &tx_receipt.execution_cost, - &block_limit, + block_limit, tx_size, ) } diff --git a/stackslib/src/cost_estimates/pessimistic.rs b/stackslib/src/cost_estimates/pessimistic.rs index 9894180480..04579331f1 100644 --- a/stackslib/src/cost_estimates/pessimistic.rs +++ b/stackslib/src/cost_estimates/pessimistic.rs @@ -266,9 +266,9 @@ impl CostEstimator for PessimisticEstimator { // only log the estimate error if an estimate could be constructed if let Ok(estimated_cost) = self.estimate_cost(tx, evaluated_epoch) { let estimated_scalar = - estimated_cost.proportion_dot_product(&block_limit, PROPORTION_RESOLUTION); + estimated_cost.proportion_dot_product(block_limit, PROPORTION_RESOLUTION); let actual_scalar = - actual_cost.proportion_dot_product(&block_limit, PROPORTION_RESOLUTION); + actual_cost.proportion_dot_product(block_limit, PROPORTION_RESOLUTION); info!("PessimisticEstimator received event"; "key" => %PessimisticEstimator::get_estimate_key(tx, &CostField::RuntimeCost, evaluated_epoch), "estimate" => estimated_scalar, diff --git a/stackslib/src/cost_estimates/tests/cost_estimators.rs b/stackslib/src/cost_estimates/tests/cost_estimators.rs index 1ed6b034e5..1d772e620e 100644 --- a/stackslib/src/cost_estimates/tests/cost_estimators.rs +++ b/stackslib/src/cost_estimates/tests/cost_estimators.rs @@ -827,11 +827,7 @@ fn test_cost_estimator_epochs_independent() { // Setup: "notify" cost_200 in Epoch20. estimator.notify_block( - &vec![make_dummy_cc_tx( - &contract_name, - &func_name, - cost_200.clone(), - )], + &vec![make_dummy_cc_tx(contract_name, func_name, cost_200.clone())], &BLOCK_LIMIT_MAINNET_20, &StacksEpochId::Epoch20, ); @@ -842,7 +838,7 @@ fn test_cost_estimator_epochs_independent() { make_dummy_coinbase_tx(), make_dummy_transfer_tx(), make_dummy_transfer_tx(), - make_dummy_cc_tx(&contract_name, &func_name, cost_205.clone()), + make_dummy_cc_tx(contract_name, func_name, cost_205.clone()), ], &BLOCK_LIMIT_MAINNET_20, &StacksEpochId::Epoch2_05, diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 730303cbd2..c2df93da91 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -507,7 +507,7 @@ fn main() { } let index_block_hash = &argv[3]; - let index_block_hash = StacksBlockId::from_hex(&index_block_hash).unwrap(); + let index_block_hash = StacksBlockId::from_hex(index_block_hash).unwrap(); let chain_state_path = format!("{}/mainnet/chainstate/", &argv[2]); let (chainstate, _) = @@ -686,11 +686,11 @@ check if the associated microblocks can be downloaded }; let index_block_hash = - StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_hash); + StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); let start_load_header = get_epoch_time_ms(); let parent_header_opt = { let child_block_info = match StacksChainState::load_staging_block_info( - &chain_state.db(), + chain_state.db(), &index_block_hash, ) { Ok(Some(hdr)) => hdr, @@ -725,8 +725,8 @@ check if the associated microblocks can be downloaded &chain_state, &parent_consensus_hash, &parent_header.block_hash(), - &consensus_hash, - &block_hash, + consensus_hash, + block_hash, ) .unwrap(); } else { @@ -1029,7 +1029,7 @@ check if the associated microblocks can be downloaded let vals: Vec<_> = line.split(" => ").map(|x| x.trim()).collect(); let hex_string = &vals[0]; let expected_value_display = &vals[1]; - let value = clarity::vm::Value::try_deserialize_hex_untyped(&hex_string).unwrap(); + let value = clarity::vm::Value::try_deserialize_hex_untyped(hex_string).unwrap(); assert_eq!(&value.to_string(), expected_value_display); } @@ -1177,7 +1177,7 @@ check if the associated microblocks can be downloaded let txs = argv[5..] .iter() .map(|tx_str| { - let tx_bytes = hex_bytes(&tx_str).unwrap(); + let tx_bytes = hex_bytes(tx_str).unwrap(); let tx = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); tx }) @@ -1345,7 +1345,7 @@ check if the associated microblocks can be downloaded ), ]; - let burnchain = Burnchain::regtest(&burnchain_db_path); + let burnchain = Burnchain::regtest(burnchain_db_path); let first_burnchain_block_height = burnchain.first_block_height; let first_burnchain_block_hash = burnchain.first_block_hash; let epochs = StacksEpoch::all(first_burnchain_block_height, u64::MAX, u64::MAX); @@ -1358,8 +1358,7 @@ check if the associated microblocks can be downloaded ) .unwrap(); - let old_burnchaindb = - BurnchainDB::connect(&old_burnchaindb_path, &burnchain, true).unwrap(); + let old_burnchaindb = BurnchainDB::connect(old_burnchaindb_path, &burnchain, true).unwrap(); let mut boot_data = ChainStateBootData { initial_balances, @@ -1385,7 +1384,7 @@ check if the associated microblocks can be downloaded let all_snapshots = old_sortition_db.get_all_snapshots().unwrap(); let all_stacks_blocks = - StacksChainState::get_all_staging_block_headers(&old_chainstate.db()).unwrap(); + StacksChainState::get_all_staging_block_headers(old_chainstate.db()).unwrap(); // order block hashes by arrival index let mut stacks_blocks_arrival_indexes = vec![]; @@ -1402,7 +1401,7 @@ check if the associated microblocks can be downloaded ); stacks_blocks_arrival_indexes.push((index_hash, snapshot.arrival_index)); } - stacks_blocks_arrival_indexes.sort_by(|ref a, ref b| a.1.partial_cmp(&b.1).unwrap()); + stacks_blocks_arrival_indexes.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap()); let stacks_blocks_arrival_order: Vec = stacks_blocks_arrival_indexes .into_iter() .map(|(h, _)| h) @@ -1464,7 +1463,7 @@ check if the associated microblocks can be downloaded header: burn_block_header, ops: blockstack_txs, } = BurnchainDB::get_burnchain_block( - &old_burnchaindb.conn(), + old_burnchaindb.conn(), &old_snapshot.burn_header_hash, ) .unwrap(); @@ -2071,10 +2070,10 @@ fn analyze_sortition_mev(argv: Vec) { for (winner, count) in all_wins_epoch3.into_iter() { let degradation = (count as f64) / (all_wins_epoch2 - .get(&winner) + .get(winner) .map(|cnt| *cnt as f64) .unwrap_or(0.00000000000001f64)); - println!("{},{},{}", &winner, count, degradation); + println!("{winner},{count},{degradation}"); } process::exit(0); diff --git a/stackslib/src/net/api/getmicroblocks_indexed.rs b/stackslib/src/net/api/getmicroblocks_indexed.rs index 4a1b310ae0..a13b26dbd9 100644 --- a/stackslib/src/net/api/getmicroblocks_indexed.rs +++ b/stackslib/src/net/api/getmicroblocks_indexed.rs @@ -73,7 +73,7 @@ impl StacksIndexedMicroblockStream { ) -> Result { // look up parent let mblock_info = StacksChainState::load_staging_microblock_info_indexed( - &chainstate.db(), + chainstate.db(), tail_index_microblock_hash, )? .ok_or(ChainError::NoSuchBlockError)?; diff --git a/stackslib/src/net/api/getmicroblocks_unconfirmed.rs b/stackslib/src/net/api/getmicroblocks_unconfirmed.rs index 41d0b77681..4eb2837022 100644 --- a/stackslib/src/net/api/getmicroblocks_unconfirmed.rs +++ b/stackslib/src/net/api/getmicroblocks_unconfirmed.rs @@ -77,7 +77,7 @@ impl StacksUnconfirmedMicroblockStream { seq: u16, ) -> Result { let mblock_info = StacksChainState::load_next_descendant_microblock( - &chainstate.db(), + chainstate.db(), parent_block_id, seq, )? diff --git a/stackslib/src/net/api/getpoxinfo.rs b/stackslib/src/net/api/getpoxinfo.rs index 81868c81f8..f576229110 100644 --- a/stackslib/src/net/api/getpoxinfo.rs +++ b/stackslib/src/net/api/getpoxinfo.rs @@ -318,7 +318,7 @@ impl RPCPoxInfoData { .active_pox_contract(burnchain.reward_cycle_to_block_height(reward_cycle_id + 1)); let cur_cycle_stacked_ustx = chainstate.get_total_ustx_stacked( - &sortdb, + sortdb, tip, reward_cycle_id as u128, cur_cycle_pox_contract, @@ -326,7 +326,7 @@ impl RPCPoxInfoData { let next_cycle_stacked_ustx = // next_cycle_pox_contract might not be instantiated yet match chainstate.get_total_ustx_stacked( - &sortdb, + sortdb, tip, reward_cycle_id as u128 + 1, next_cycle_pox_contract, diff --git a/stackslib/src/net/api/gettenure.rs b/stackslib/src/net/api/gettenure.rs index 9888b5563f..f569407c22 100644 --- a/stackslib/src/net/api/gettenure.rs +++ b/stackslib/src/net/api/gettenure.rs @@ -188,7 +188,7 @@ impl HttpRequest for RPCNakamotoTenureRequestHandler { let req_contents = HttpRequestContents::new().query_string(query); let last_block_id = req_contents .get_query_arg("stop") - .map(|last_block_id_hex| StacksBlockId::from_hex(&last_block_id_hex)) + .map(|last_block_id_hex| StacksBlockId::from_hex(last_block_id_hex)) .transpose() .map_err(|e| { Error::DecodeError(format!("Failed to parse stop= query parameter: {:?}", &e)) diff --git a/stackslib/src/net/api/gettransaction_unconfirmed.rs b/stackslib/src/net/api/gettransaction_unconfirmed.rs index 9628817b40..110bf063b4 100644 --- a/stackslib/src/net/api/gettransaction_unconfirmed.rs +++ b/stackslib/src/net/api/gettransaction_unconfirmed.rs @@ -123,7 +123,7 @@ impl RPCRequestHandler for RPCGetTransactionUnconfirmedRequestHandler { let txinfo_res = node.with_node_state(|_network, _sortdb, chainstate, mempool, _rpc_args| { // present in the unconfirmed state? - if let Some(ref unconfirmed) = chainstate.unconfirmed_state.as_ref() { + if let Some(unconfirmed) = chainstate.unconfirmed_state.as_ref() { if let Some((transaction, mblock_hash, seq)) = unconfirmed.get_unconfirmed_transaction(&txid) { diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index 8d32308d9d..9604b3eb69 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -192,7 +192,7 @@ pub mod prefix_opt_hex { &"at least length 2 string", )); }; - let val = T::try_from(&hex_str).map_err(serde::de::Error::custom)?; + let val = T::try_from(hex_str).map_err(serde::de::Error::custom)?; Ok(Some(val)) } } @@ -218,7 +218,7 @@ pub mod prefix_hex { &"at least length 2 string", )); }; - T::try_from(&hex_str).map_err(serde::de::Error::custom) + T::try_from(hex_str).map_err(serde::de::Error::custom) } } diff --git a/stackslib/src/net/api/postblock.rs b/stackslib/src/net/api/postblock.rs index 4fc50244f9..90d4e166e0 100644 --- a/stackslib/src/net/api/postblock.rs +++ b/stackslib/src/net/api/postblock.rs @@ -164,7 +164,7 @@ impl RPCRequestHandler for RPCPostBlockRequestHandler { let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { - match SortitionDB::get_sortition_id_by_consensus(&sortdb.conn(), &consensus_hash) { + match SortitionDB::get_sortition_id_by_consensus(sortdb.conn(), &consensus_hash) { Ok(Some(_)) => { // we recognize this consensus hash let ic = sortdb.index_conn(); diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index ca0d71815f..32152e90a3 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -525,7 +525,7 @@ impl NakamotoBlockProposal { let tx_len = tx.tx_len(); let tx_result = builder.try_mine_tx_with_len( &mut tenure_tx, - &tx, + tx, tx_len, &BlockLimitFunction::NO_LIMIT_HIT, ASTRules::PrecheckSize, diff --git a/stackslib/src/net/api/postblock_v3.rs b/stackslib/src/net/api/postblock_v3.rs index e1c794ea2d..1290cc8e8b 100644 --- a/stackslib/src/net/api/postblock_v3.rs +++ b/stackslib/src/net/api/postblock_v3.rs @@ -164,7 +164,7 @@ impl RPCRequestHandler for RPCPostBlockRequestHandler { let stacks_tip = network.stacks_tip.block_id(); Relayer::process_new_nakamoto_block_ext( &network.burnchain, - &sortdb, + sortdb, &mut handle_conn, chainstate, &stacks_tip, diff --git a/stackslib/src/net/api/postfeerate.rs b/stackslib/src/net/api/postfeerate.rs index 376d8bf3da..cb012bbc6c 100644 --- a/stackslib/src/net/api/postfeerate.rs +++ b/stackslib/src/net/api/postfeerate.rs @@ -119,7 +119,7 @@ impl RPCPostFeeRateRequestHandler { metric.from_cost_and_len(&estimated_cost, &stacks_epoch.block_limit, estimated_len); let fee_rates = fee_estimator.get_rate_estimates().map_err(|e| { StacksHttpResponse::new_error( - &preamble, + preamble, &HttpBadRequest::new(format!( "Estimator RPC endpoint failed to estimate fees for tx: {:?}", &e diff --git a/stackslib/src/net/api/postmicroblock.rs b/stackslib/src/net/api/postmicroblock.rs index 370ba1f34d..fa434d7c65 100644 --- a/stackslib/src/net/api/postmicroblock.rs +++ b/stackslib/src/net/api/postmicroblock.rs @@ -118,7 +118,7 @@ impl HttpRequest for RPCPostMicroblockRequestHandler { )); } - let microblock = Self::parse_postmicroblock_octets(&body)?; + let microblock = Self::parse_postmicroblock_octets(body)?; self.microblock = Some(microblock); Ok(HttpRequestContents::new().query_string(query)) diff --git a/stackslib/src/net/api/tests/getheaders.rs b/stackslib/src/net/api/tests/getheaders.rs index 4ea4480082..43e6383a27 100644 --- a/stackslib/src/net/api/tests/getheaders.rs +++ b/stackslib/src/net/api/tests/getheaders.rs @@ -386,8 +386,7 @@ fn test_stream_getheaders() { // ask for only a few let mut stream = - StacksHeaderStream::new(&chainstate, &blocks_fork_index_hashes.last().unwrap(), 10) - .unwrap(); + StacksHeaderStream::new(&chainstate, blocks_fork_index_hashes.last().unwrap(), 10).unwrap(); let header_bytes = stream_headers_to_vec(&mut stream); let headers: Vec = serde_json::from_reader(&mut &header_bytes[..]).unwrap(); diff --git a/stackslib/src/net/api/tests/getmicroblocks_confirmed.rs b/stackslib/src/net/api/tests/getmicroblocks_confirmed.rs index 421264fd9a..e37b5749be 100644 --- a/stackslib/src/net/api/tests/getmicroblocks_confirmed.rs +++ b/stackslib/src/net/api/tests/getmicroblocks_confirmed.rs @@ -143,7 +143,7 @@ fn test_try_make_response() { rpc_test.peer_2.chainstate(), &parent_consensus_hash, &parent_block.block_hash(), - &mblock, + mblock, ); } diff --git a/stackslib/src/net/api/tests/getmicroblocks_indexed.rs b/stackslib/src/net/api/tests/getmicroblocks_indexed.rs index aba7fd5c23..ca879034c4 100644 --- a/stackslib/src/net/api/tests/getmicroblocks_indexed.rs +++ b/stackslib/src/net/api/tests/getmicroblocks_indexed.rs @@ -147,7 +147,7 @@ fn test_try_make_response() { rpc_test.peer_2.chainstate(), &parent_consensus_hash, &parent_block.block_hash(), - &mblock, + mblock, ); } diff --git a/stackslib/src/net/api/tests/getmicroblocks_unconfirmed.rs b/stackslib/src/net/api/tests/getmicroblocks_unconfirmed.rs index f4facf717c..3f31613e67 100644 --- a/stackslib/src/net/api/tests/getmicroblocks_unconfirmed.rs +++ b/stackslib/src/net/api/tests/getmicroblocks_unconfirmed.rs @@ -106,7 +106,7 @@ fn test_try_make_response() { rpc_test.peer_2.chainstate(), &consensus_hash, &anchored_block_hash, - &mblock, + mblock, ); } diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index 35e12b5593..b737a9d56f 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -428,9 +428,8 @@ impl<'a> TestRPC<'a> { tx.commit().unwrap(); } - let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) + .unwrap(); let mut anchor_cost = ExecutionCost::ZERO; let mut anchor_size = 0; @@ -699,9 +698,8 @@ impl<'a> TestRPC<'a> { .unwrap(); // next tip, coinbase - let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) + .unwrap(); let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index 231ffe3366..30fb9de432 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -234,7 +234,7 @@ fn test_try_make_response() { let mut requests = vec![]; let tip = - SortitionDB::get_canonical_burn_chain_tip(&rpc_test.peer_1.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(rpc_test.peer_1.sortdb.as_ref().unwrap().conn()) .unwrap(); let (stacks_tip_ch, stacks_tip_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash( diff --git a/stackslib/src/net/api/tests/postmempoolquery.rs b/stackslib/src/net/api/tests/postmempoolquery.rs index 8f921525a3..76583d4c01 100644 --- a/stackslib/src/net/api/tests/postmempoolquery.rs +++ b/stackslib/src/net/api/tests/postmempoolquery.rs @@ -275,7 +275,7 @@ fn test_stream_mempool_txs() { decoded_txs.append(&mut next_txs); // for fun, use a page ID that is actually a well-formed prefix of a transaction - if let Some(ref tx) = decoded_txs.last() { + if let Some(tx) = decoded_txs.last() { let mut evil_buf = tx.serialize_to_vec(); let mut evil_page_id = [0u8; 32]; evil_page_id.copy_from_slice(&evil_buf[0..32]); diff --git a/stackslib/src/net/atlas/db.rs b/stackslib/src/net/atlas/db.rs index d11dd9995d..1bb0c7aab6 100644 --- a/stackslib/src/net/atlas/db.rs +++ b/stackslib/src/net/atlas/db.rs @@ -462,7 +462,7 @@ impl AtlasDB { let max = (page_index + 1) * AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; let qry = "SELECT MIN(block_height) as min, MAX(block_height) as max FROM attachment_instances WHERE attachment_index >= ?1 AND attachment_index < ?2"; let args = params![min, max]; - let mut stmt = self.conn.prepare(&qry)?; + let mut stmt = self.conn.prepare(qry)?; let mut rows = stmt.query(args)?; match rows.next() { @@ -502,7 +502,7 @@ impl AtlasDB { .ok_or(db_error::Overflow)?; let qry = "SELECT attachment_index, is_available FROM attachment_instances WHERE attachment_index >= ?1 AND attachment_index < ?2 AND index_block_hash = ?3 ORDER BY attachment_index ASC"; let args = params![min, max, block_id,]; - let rows = query_rows::<(u32, u32), _>(&self.conn, &qry, args)?; + let rows = query_rows::<(u32, u32), _>(&self.conn, qry, args)?; let mut bool_vector = vec![true; AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE as usize]; for (attachment_index, is_available) in rows.into_iter() { diff --git a/stackslib/src/net/atlas/download.rs b/stackslib/src/net/atlas/download.rs index f877a0da3a..9f958f7d26 100644 --- a/stackslib/src/net/atlas/download.rs +++ b/stackslib/src/net/atlas/download.rs @@ -78,7 +78,7 @@ impl AttachmentsDownloader { /// Because AttachmentBatches are ordered first by their retry deadlines, it follows that if /// there are any ready AttachmentBatches, they'll be at the head of the queue. pub fn pop_next_ready_batch(&mut self) -> Option { - let next_is_ready = if let Some(ref next) = self.priority_queue.peek() { + let next_is_ready = if let Some(next) = self.priority_queue.peek() { next.retry_deadline < get_epoch_time_secs() } else { false @@ -305,10 +305,10 @@ impl AttachmentsDownloader { atlas_db, new_attachments, |atlas_db, attachment_instance| { - atlas_db.mark_attachment_instance_checked(&attachment_instance, true) + atlas_db.mark_attachment_instance_checked(attachment_instance, true) }, |atlas_db, attachment_instance| { - atlas_db.mark_attachment_instance_checked(&attachment_instance, false) + atlas_db.mark_attachment_instance_checked(attachment_instance, false) }, ) } @@ -331,7 +331,7 @@ impl AttachmentsDownloader { atlas_db, initial_batch, |atlas_db, attachment_instance| { - atlas_db.insert_initial_attachment_instance(&attachment_instance) + atlas_db.insert_initial_attachment_instance(attachment_instance) }, |_atlas_db, _attachment_instance| { // If attachment not found, don't insert attachment instance @@ -411,7 +411,7 @@ impl AttachmentsBatchStateContext { let missing_attachments = match self .attachments_batch .attachments_instances - .get(&contract_id) + .get(contract_id) { None => continue, Some(missing_attachments) => missing_attachments, @@ -1108,7 +1108,7 @@ impl Ord for AttachmentRequest { other.sources.len().cmp(&self.sources.len()).then_with(|| { let (_, report) = self.get_most_reliable_source(); let (_, other_report) = other.get_most_reliable_source(); - report.cmp(&other_report) + report.cmp(other_report) }) } } @@ -1219,7 +1219,7 @@ impl AttachmentsBatch { contract_id: &QualifiedContractIdentifier, ) -> Vec { let mut pages_indexes = HashSet::new(); - if let Some(missing_attachments) = self.attachments_instances.get(&contract_id) { + if let Some(missing_attachments) = self.attachments_instances.get(contract_id) { for (attachment_index, _) in missing_attachments.iter() { let page_index = attachment_index / AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; pages_indexes.insert(page_index); diff --git a/stackslib/src/net/atlas/tests.rs b/stackslib/src/net/atlas/tests.rs index 8094c77799..8000db776b 100644 --- a/stackslib/src/net/atlas/tests.rs +++ b/stackslib/src/net/atlas/tests.rs @@ -71,7 +71,7 @@ fn new_attachments_batch_from( ) -> AttachmentsBatch { let mut attachments_batch = AttachmentsBatch::new(); for attachment_instance in attachment_instances.iter() { - attachments_batch.track_attachment(&attachment_instance); + attachments_batch.track_attachment(attachment_instance); } for _ in 0..retry_count { attachments_batch.bump_retry_count(); @@ -287,7 +287,7 @@ fn test_attachment_instance_parsing() { for value in values.iter() { assert!(AttachmentInstance::try_new_from_value( - &value, + value, &contract_id, index_block_hash.clone(), stacks_block_height, diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 5d9c67227b..c9418c2b61 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -525,7 +525,7 @@ impl Neighbor { }; } - neighbor.handshake_update(conn, &handshake_data)?; + neighbor.handshake_update(conn, handshake_data)?; Ok((neighbor, present)) } @@ -636,7 +636,7 @@ impl ConversationP2P { } pub fn to_neighbor_address(&self) -> NeighborAddress { - let pubkh = if let Some(ref pubk) = self.ref_public_key() { + let pubkh = if let Some(pubk) = self.ref_public_key() { Hash160::from_node_public_key(pubk) } else { Hash160([0u8; 20]) @@ -650,7 +650,7 @@ impl ConversationP2P { } pub fn to_handshake_neighbor_address(&self) -> NeighborAddress { - let pubkh = if let Some(ref pubk) = self.ref_public_key() { + let pubkh = if let Some(pubk) = self.ref_public_key() { Hash160::from_node_public_key(pubk) } else { Hash160([0u8; 20]) @@ -1411,7 +1411,7 @@ impl ConversationP2P { StacksMessageType::Ping(ref data) => data, _ => panic!("Message is not a ping"), }; - let pong_data = PongData::from_ping(&ping_data); + let pong_data = PongData::from_ping(ping_data); Ok(Some(StacksMessage::from_chain_view( self.version, self.network_id, @@ -1562,7 +1562,7 @@ impl ConversationP2P { } let base_snapshot_or_nack = Self::validate_consensus_hash_reward_cycle_start( - &_local_peer, + _local_peer, sortdb, &get_blocks_inv.consensus_hash, )?; @@ -1617,7 +1617,7 @@ impl ConversationP2P { Err(db_error::NotFoundError) | Err(db_error::InvalidPoxSortition) => { debug!( "{:?}: Failed to load ancestor hashes from {}", - &_local_peer, &tip_snapshot.consensus_hash + _local_peer, &tip_snapshot.consensus_hash ); // make this into a NACK @@ -1722,7 +1722,7 @@ impl ConversationP2P { let _local_peer = network.get_local_peer(); let base_snapshot_or_nack = Self::validate_consensus_hash_reward_cycle_start( - &_local_peer, + _local_peer, sortdb, &get_nakamoto_inv.consensus_hash, )?; @@ -2518,7 +2518,7 @@ impl ConversationP2P { burnchain_view: &BurnchainView, ) -> Result { // validate message preamble - if let Err(e) = self.is_preamble_valid(&msg, burnchain_view) { + if let Err(e) = self.is_preamble_valid(msg, burnchain_view) { match e { net_error::InvalidMessage => { // Disconnect from this peer. If it thinks nothing's wrong, it'll @@ -2634,7 +2634,7 @@ impl ConversationP2P { // // Anything else will be nack'ed -- the peer will first need to handshake. let mut consume = false; - let solicited = self.connection.is_solicited(&msg); + let solicited = self.connection.is_solicited(msg); let reply_opt = match msg.payload { StacksMessageType::Handshake(_) => { monitoring::increment_msg_counter("p2p_unauthenticated_handshake".to_string()); @@ -3137,8 +3137,8 @@ mod test { PeerAddress::from_ipv4(127, 0, 0, 1), NETWORK_P2P_PORT, data_url.clone(), - &asn4_entries, - Some(&initial_neighbors), + asn4_entries, + Some(initial_neighbors), &vec![ QualifiedContractIdentifier::parse("SP000000000000000000002Q6VF78.sbtc").unwrap(), ], @@ -3165,7 +3165,7 @@ mod test { let first_burnchain_block_height = burnchain.first_block_height; let first_burnchain_block_hash = burnchain.first_block_hash; - let mut boot_data = ChainStateBootData::new(&burnchain, vec![], None); + let mut boot_data = ChainStateBootData::new(burnchain, vec![], None); let (chainstate, _) = StacksChainState::open_and_exec( false, @@ -3445,8 +3445,8 @@ mod test { &chain_view_2, ); - let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); - let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap(); + let local_peer_1 = PeerDB::get_local_peer(peerdb_1.conn()).unwrap(); + let local_peer_2 = PeerDB::get_local_peer(peerdb_2.conn()).unwrap(); peerdb_1 .update_local_peer( @@ -3474,8 +3474,8 @@ mod test { ) .unwrap(); - let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); - let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap(); + let local_peer_1 = PeerDB::get_local_peer(peerdb_1.conn()).unwrap(); + let local_peer_2 = PeerDB::get_local_peer(peerdb_2.conn()).unwrap(); assert_eq!( local_peer_1.stacker_dbs, @@ -3754,7 +3754,7 @@ mod test { ); let mut net_1 = db_setup( - &test_name_1, + test_name_1, &burnchain_1, 0x9abcdef0, &mut peerdb_1, @@ -3763,7 +3763,7 @@ mod test { &chain_view, ); let mut net_2 = db_setup( - &test_name_2, + test_name_2, &burnchain_2, 0x9abcdef0, &mut peerdb_2, @@ -3772,8 +3772,8 @@ mod test { &chain_view, ); - let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); - let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap(); + let local_peer_1 = PeerDB::get_local_peer(peerdb_1.conn()).unwrap(); + let local_peer_2 = PeerDB::get_local_peer(peerdb_2.conn()).unwrap(); let mut convo_1 = ConversationP2P::new( 123, @@ -3934,7 +3934,7 @@ mod test { ); let mut net_1 = db_setup( - &test_name_1, + test_name_1, &burnchain_1, 0x9abcdef0, &mut peerdb_1, @@ -3943,7 +3943,7 @@ mod test { &chain_view, ); let mut net_2 = db_setup( - &test_name_2, + test_name_2, &burnchain_2, 0x9abcdef0, &mut peerdb_2, @@ -3952,8 +3952,8 @@ mod test { &chain_view, ); - let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); - let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap(); + let local_peer_1 = PeerDB::get_local_peer(peerdb_1.conn()).unwrap(); + let local_peer_2 = PeerDB::get_local_peer(peerdb_2.conn()).unwrap(); let mut convo_1 = ConversationP2P::new( 123, @@ -4079,7 +4079,7 @@ mod test { ); let mut net_1 = db_setup( - &test_name_1, + test_name_1, &burnchain_1, 0x9abcdef0, &mut peerdb_1, @@ -4088,7 +4088,7 @@ mod test { &chain_view, ); let mut net_2 = db_setup( - &test_name_2, + test_name_2, &burnchain_2, 0x9abcdef0, &mut peerdb_2, @@ -4097,8 +4097,8 @@ mod test { &chain_view, ); - let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); - let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap(); + let local_peer_1 = PeerDB::get_local_peer(peerdb_1.conn()).unwrap(); + let local_peer_2 = PeerDB::get_local_peer(peerdb_2.conn()).unwrap(); let mut convo_1 = ConversationP2P::new( 123, @@ -4223,7 +4223,7 @@ mod test { ); let mut net_1 = db_setup( - &test_name_1, + test_name_1, &burnchain_1, 0x9abcdef0, &mut peerdb_1, @@ -4232,7 +4232,7 @@ mod test { &chain_view, ); let mut net_2 = db_setup( - &test_name_2, + test_name_2, &burnchain_2, 0x9abcdef0, &mut peerdb_2, @@ -4241,8 +4241,8 @@ mod test { &chain_view, ); - let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); - let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap(); + let local_peer_1 = PeerDB::get_local_peer(peerdb_1.conn()).unwrap(); + let local_peer_2 = PeerDB::get_local_peer(peerdb_2.conn()).unwrap(); let mut convo_1 = ConversationP2P::new( 123, @@ -4380,7 +4380,7 @@ mod test { ); let mut net_1 = db_setup( - &test_name_1, + test_name_1, &burnchain_1, 0x9abcdef0, &mut peerdb_1, @@ -4389,7 +4389,7 @@ mod test { &chain_view, ); let mut net_2 = db_setup( - &test_name_2, + test_name_2, &burnchain_2, 0x9abcdef0, &mut peerdb_2, @@ -4398,8 +4398,8 @@ mod test { &chain_view, ); - let mut local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); - let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap(); + let mut local_peer_1 = PeerDB::get_local_peer(peerdb_1.conn()).unwrap(); + let local_peer_2 = PeerDB::get_local_peer(peerdb_2.conn()).unwrap(); let mut convo_1 = ConversationP2P::new( 123, @@ -4579,7 +4579,7 @@ mod test { ); let mut net_1 = db_setup( - &test_name_1, + test_name_1, &burnchain_1, 0x9abcdef0, &mut peerdb_1, @@ -4588,7 +4588,7 @@ mod test { &chain_view, ); let mut net_2 = db_setup( - &test_name_2, + test_name_2, &burnchain_2, 0x9abcdef0, &mut peerdb_2, @@ -4597,8 +4597,8 @@ mod test { &chain_view, ); - let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); - let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap(); + let local_peer_1 = PeerDB::get_local_peer(peerdb_1.conn()).unwrap(); + let local_peer_2 = PeerDB::get_local_peer(peerdb_2.conn()).unwrap(); let mut convo_1 = ConversationP2P::new( 123, @@ -4723,7 +4723,7 @@ mod test { ); let mut net_1 = db_setup( - &test_name_1, + test_name_1, &burnchain_1, 0x9abcdef0, &mut peerdb_1, @@ -4732,7 +4732,7 @@ mod test { &chain_view, ); let mut net_2 = db_setup( - &test_name_2, + test_name_2, &burnchain_2, 0x9abcdef0, &mut peerdb_2, @@ -4741,8 +4741,8 @@ mod test { &chain_view, ); - let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); - let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap(); + let local_peer_1 = PeerDB::get_local_peer(peerdb_1.conn()).unwrap(); + let local_peer_2 = PeerDB::get_local_peer(peerdb_2.conn()).unwrap(); let mut convo_1 = ConversationP2P::new( 123, @@ -4899,7 +4899,7 @@ mod test { ); let mut net_1 = db_setup( - &test_name_1, + test_name_1, &burnchain_1, 0x9abcdef0, &mut peerdb_1, @@ -4908,7 +4908,7 @@ mod test { &chain_view, ); let mut net_2 = db_setup( - &test_name_2, + test_name_2, &burnchain_2, 0x9abcdef0, &mut peerdb_2, @@ -4917,8 +4917,8 @@ mod test { &chain_view, ); - let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); - let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap(); + let local_peer_1 = PeerDB::get_local_peer(peerdb_1.conn()).unwrap(); + let local_peer_2 = PeerDB::get_local_peer(peerdb_2.conn()).unwrap(); let mut convo_1 = ConversationP2P::new( 123, @@ -5126,7 +5126,7 @@ mod test { ); let mut net_1 = db_setup( - &test_name_1, + test_name_1, &burnchain_1, 0x9abcdef0, &mut peerdb_1, @@ -5135,7 +5135,7 @@ mod test { &chain_view, ); let mut net_2 = db_setup( - &test_name_2, + test_name_2, &burnchain_2, 0x9abcdef0, &mut peerdb_2, @@ -5144,8 +5144,8 @@ mod test { &chain_view, ); - let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); - let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap(); + let local_peer_1 = PeerDB::get_local_peer(peerdb_1.conn()).unwrap(); + let local_peer_2 = PeerDB::get_local_peer(peerdb_2.conn()).unwrap(); let mut convo_1 = ConversationP2P::new( 123, @@ -5276,7 +5276,7 @@ mod test { ); let mut net_1 = db_setup( - &test_name_1, + test_name_1, &burnchain_1, 0x9abcdef0, &mut peerdb_1, @@ -5285,7 +5285,7 @@ mod test { &chain_view, ); let mut net_2 = db_setup( - &test_name_2, + test_name_2, &burnchain_2, 0x9abcdef0, &mut peerdb_2, @@ -5294,8 +5294,8 @@ mod test { &chain_view, ); - let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); - let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap(); + let local_peer_1 = PeerDB::get_local_peer(peerdb_1.conn()).unwrap(); + let local_peer_2 = PeerDB::get_local_peer(peerdb_2.conn()).unwrap(); let mut convo_1 = ConversationP2P::new( 123, @@ -5447,7 +5447,7 @@ mod test { ); let mut net_1 = db_setup( - &test_name_1, + test_name_1, &burnchain_1, 0x9abcdef0, &mut peerdb_1, @@ -5456,7 +5456,7 @@ mod test { &chain_view, ); let mut net_2 = db_setup( - &test_name_2, + test_name_2, &burnchain_2, 0x9abcdef0, &mut peerdb_2, @@ -5465,8 +5465,8 @@ mod test { &chain_view, ); - let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); - let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap(); + let local_peer_1 = PeerDB::get_local_peer(peerdb_1.conn()).unwrap(); + let local_peer_2 = PeerDB::get_local_peer(peerdb_2.conn()).unwrap(); let mut convo_1 = ConversationP2P::new( 123, @@ -5726,7 +5726,7 @@ mod test { ); let mut net_1 = db_setup( - &test_name_1, + test_name_1, &burnchain_1, 0x9abcdef0, &mut peerdb_1, @@ -5735,7 +5735,7 @@ mod test { &chain_view, ); let mut net_2 = db_setup( - &test_name_2, + test_name_2, &burnchain_2, 0x9abcdef0, &mut peerdb_2, @@ -5744,8 +5744,8 @@ mod test { &chain_view, ); - let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); - let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap(); + let local_peer_1 = PeerDB::get_local_peer(peerdb_1.conn()).unwrap(); + let local_peer_2 = PeerDB::get_local_peer(peerdb_2.conn()).unwrap(); let mut convo_1 = ConversationP2P::new( 123, @@ -6006,7 +6006,7 @@ mod test { ); let mut net_1 = db_setup( - &test_name_1, + test_name_1, &burnchain_1, 0x9abcdef0, &mut peerdb_1, @@ -6015,7 +6015,7 @@ mod test { &chain_view, ); let mut net_2 = db_setup( - &test_name_2, + test_name_2, &burnchain_2, 0x9abcdef0, &mut peerdb_2, @@ -6024,8 +6024,8 @@ mod test { &chain_view, ); - let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); - let local_peer_2 = PeerDB::get_local_peer(&peerdb_2.conn()).unwrap(); + let local_peer_1 = PeerDB::get_local_peer(peerdb_1.conn()).unwrap(); + let local_peer_2 = PeerDB::get_local_peer(peerdb_2.conn()).unwrap(); let mut convo_1 = ConversationP2P::new( 123, @@ -6130,7 +6130,7 @@ mod test { ); let net_1 = db_setup( - &test_name_1, + test_name_1, &burnchain, 0x9abcdef0, &mut peerdb_1, @@ -6139,7 +6139,7 @@ mod test { &chain_view, ); - let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); + let local_peer_1 = PeerDB::get_local_peer(peerdb_1.conn()).unwrap(); // network ID check { @@ -6798,7 +6798,7 @@ mod test { ); let net_1 = db_setup( - &test_name_1, + test_name_1, &burnchain, 0x9abcdef0, &mut peerdb_1, @@ -6807,7 +6807,7 @@ mod test { &chain_view, ); - let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); + let local_peer_1 = PeerDB::get_local_peer(peerdb_1.conn()).unwrap(); let mut convo_1 = ConversationP2P::new( 123, @@ -6916,7 +6916,7 @@ mod test { ); let net_1 = db_setup( - &test_name_1, + test_name_1, &burnchain, 0x9abcdef0, &mut peerdb_1, @@ -6925,7 +6925,7 @@ mod test { &chain_view, ); - let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); + let local_peer_1 = PeerDB::get_local_peer(peerdb_1.conn()).unwrap(); let mut convo_1 = ConversationP2P::new( 123, @@ -6983,7 +6983,7 @@ mod test { ); let net_1 = db_setup( - &test_name_1, + test_name_1, &burnchain, 0x9abcdef0, &mut peerdb_1, @@ -6992,7 +6992,7 @@ mod test { &chain_view, ); - let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); + let local_peer_1 = PeerDB::get_local_peer(peerdb_1.conn()).unwrap(); let mut convo_1 = ConversationP2P::new( 123, @@ -7117,7 +7117,7 @@ mod test { ); let net_1 = db_setup( - &test_name_1, + test_name_1, &burnchain, 0x9abcdef0, &mut peerdb_1, @@ -7126,7 +7126,7 @@ mod test { &chain_view, ); - let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); + let local_peer_1 = PeerDB::get_local_peer(peerdb_1.conn()).unwrap(); let mut convo_1 = ConversationP2P::new( 123, @@ -7251,7 +7251,7 @@ mod test { ); let net_1 = db_setup( - &test_name_1, + test_name_1, &burnchain, 0x9abcdef0, &mut peerdb_1, @@ -7260,7 +7260,7 @@ mod test { &chain_view, ); - let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); + let local_peer_1 = PeerDB::get_local_peer(peerdb_1.conn()).unwrap(); let mut convo_1 = ConversationP2P::new( 123, @@ -7385,7 +7385,7 @@ mod test { ); let net_1 = db_setup( - &test_name_1, + test_name_1, &burnchain, 0x9abcdef0, &mut peerdb_1, @@ -7394,7 +7394,7 @@ mod test { &chain_view, ); - let local_peer_1 = PeerDB::get_local_peer(&peerdb_1.conn()).unwrap(); + let local_peer_1 = PeerDB::get_local_peer(peerdb_1.conn()).unwrap(); let mut convo_1 = ConversationP2P::new( 123, diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 954b16ced8..9449d29b5c 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -2016,7 +2016,7 @@ mod test { // the combined ping buffers should be the serialized ping let mut combined_ping_buf = vec![]; combined_ping_buf.append(&mut half_ping); - combined_ping_buf.extend_from_slice(&write_buf_05.get_mut()); + combined_ping_buf.extend_from_slice(write_buf_05.get_mut()); assert_eq!(combined_ping_buf, serialized_ping); diff --git a/stackslib/src/net/db.rs b/stackslib/src/net/db.rs index 35471183f3..641f240c72 100644 --- a/stackslib/src/net/db.rs +++ b/stackslib/src/net/db.rs @@ -485,7 +485,7 @@ impl PeerDB { } for asn4 in asn4_entries { - PeerDB::asn4_insert(&tx, &asn4)?; + PeerDB::asn4_insert(&tx, asn4)?; } for neighbor in initial_neighbors { @@ -673,7 +673,7 @@ impl PeerDB { if create_flag { // instantiate! match initial_neighbors { - Some(ref neighbors) => { + Some(neighbors) => { db.instantiate( network_id, parent_network_id, @@ -823,8 +823,8 @@ impl PeerDB { /// Read the local peer record pub fn get_local_peer(conn: &DBConn) -> Result { - let qry = "SELECT * FROM local_peer LIMIT 1".to_string(); - let rows = query_rows::(conn, &qry, NO_PARAMS)?; + let qry = "SELECT * FROM local_peer LIMIT 1"; + let rows = query_rows::(conn, qry, NO_PARAMS)?; match rows.len() { 1 => Ok(rows[0].clone()), @@ -979,7 +979,7 @@ impl PeerDB { ) -> Result { let qry = "SELECT 1 FROM frontier WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3"; let args = params![network_id, peer_addr.to_bin(), peer_port]; - Ok(query_row::(conn, &qry, args)? + Ok(query_row::(conn, qry, args)? .map(|x| x == 1) .unwrap_or(false)) } @@ -1006,14 +1006,14 @@ impl PeerDB { let args = params![network_id, slot]; // N.B. we don't use Self::query_peer() here because `slot` is the primary key - query_row::(conn, &qry, args) + query_row::(conn, qry, args) } /// Is there any peer at a particular slot? pub fn has_peer_at(conn: &DBConn, network_id: u32, slot: u32) -> Result { let qry = "SELECT 1 FROM frontier WHERE network_id = ?1 AND slot = ?2"; let args = params![network_id, slot]; - Ok(query_row::(conn, &qry, args)? + Ok(query_row::(conn, qry, args)? .map(|x| x == 1) .unwrap_or(false)) } @@ -1036,7 +1036,7 @@ impl PeerDB { return Ok(false); } None => { - if PeerDB::is_address_denied(conn, &peer_addr)? { + if PeerDB::is_address_denied(conn, peer_addr)? { return Ok(true); } return Ok(false); @@ -1703,7 +1703,7 @@ impl PeerDB { u64_to_sql(now_secs)?, network_epoch, ]; - let mut allow_rows = Self::query_peers(conn, &allow_qry, allow_args)?; + let mut allow_rows = Self::query_peers(conn, allow_qry, allow_args)?; if allow_rows.len() >= (count as usize) { // return a random subset @@ -1807,7 +1807,7 @@ impl PeerDB { let qry = "SELECT * FROM asn4 WHERE prefix = (?1 & ~((1 << (32 - mask)) - 1)) ORDER BY prefix DESC LIMIT 1"; let args = params![addr_u32]; - let rows = query_rows::(conn, &qry, args)?; + let rows = query_rows::(conn, qry, args)?; match rows.len() { 0 => Ok(None), _ => Ok(Some(rows[0].asn)), @@ -1830,20 +1830,20 @@ impl PeerDB { pub fn asn_count(conn: &DBConn, asn: u32) -> Result { let qry = "SELECT COUNT(*) FROM frontier WHERE asn = ?1"; let args = params![asn]; - let count = query_count(conn, &qry, args)?; + let count = query_count(conn, qry, args)?; Ok(count as u64) } #[cfg_attr(test, mutants::skip)] pub fn get_frontier_size(conn: &DBConn) -> Result { let qry = "SELECT COUNT(*) FROM frontier"; - let count = query_count(conn, &qry, NO_PARAMS)?; + let count = query_count(conn, qry, NO_PARAMS)?; Ok(count as u64) } pub fn get_all_peers(conn: &DBConn) -> Result, db_error> { let qry = "SELECT * FROM frontier ORDER BY addrbytes ASC, port ASC"; - let rows = Self::query_peers(conn, &qry, NO_PARAMS)?; + let rows = Self::query_peers(conn, qry, NO_PARAMS)?; Ok(rows) } diff --git a/stackslib/src/net/dns.rs b/stackslib/src/net/dns.rs index b610f2a156..ddc37ff516 100644 --- a/stackslib/src/net/dns.rs +++ b/stackslib/src/net/dns.rs @@ -130,7 +130,7 @@ impl DNSResolver { } pub fn resolve(&self, req: DNSRequest) -> DNSResponse { - if let Some(ref addrs) = self.hardcoded.get(&(req.host.clone(), req.port)) { + if let Some(addrs) = self.hardcoded.get(&(req.host.clone(), req.port)) { return DNSResponse::new(req, Ok(addrs.to_vec())); } diff --git a/stackslib/src/net/download/epoch2x.rs b/stackslib/src/net/download/epoch2x.rs index d58321118e..4494399bed 100644 --- a/stackslib/src/net/download/epoch2x.rs +++ b/stackslib/src/net/download/epoch2x.rs @@ -455,7 +455,7 @@ impl BlockDownloader { self.requested_blocks.remove(&block_key.index_block_hash); let is_always_allowed = match PeerDB::get_peer( - &network.peerdb.conn(), + network.peerdb.conn(), block_key.neighbor.network_id, &block_key.neighbor.addrbytes, block_key.neighbor.port, @@ -582,7 +582,7 @@ impl BlockDownloader { .remove(&block_key.index_block_hash); let is_always_allowed = match PeerDB::get_peer( - &network.peerdb.conn(), + network.peerdb.conn(), block_key.neighbor.network_id, &block_key.neighbor.addrbytes, block_key.neighbor.port, @@ -1058,8 +1058,8 @@ impl PeerNetwork { /// Get the data URL for a neighbor pub fn get_data_url(&self, neighbor_key: &NeighborKey) -> Option { match self.events.get(neighbor_key) { - Some(ref event_id) => match self.peers.get(event_id) { - Some(ref convo) => { + Some(event_id) => match self.peers.get(event_id) { + Some(convo) => { if convo.data_url.is_empty() { None } else { @@ -1107,9 +1107,9 @@ impl PeerNetwork { // if the child is processed, then we have all the microblocks we need. // this is the overwhelmingly likely case. if let Ok(Some(true)) = StacksChainState::get_staging_block_status( - &chainstate.db(), - &child_consensus_hash, - &child_block_hash, + chainstate.db(), + child_consensus_hash, + child_block_hash, ) { test_debug!( "{:?}: Already processed block {}/{}, so must have stream between it and {}/{}", @@ -1167,7 +1167,7 @@ impl PeerNetwork { // try and load the connecting stream. If we have it, then we're good to go. // SLOW match StacksChainState::load_microblock_stream_fork( - &chainstate.db(), + chainstate.db(), parent_consensus_hash, parent_block_hash, &child_header.parent_microblock, @@ -1337,7 +1337,7 @@ impl PeerNetwork { // does this anchor block _confirm_ a microblock stream that we don't know about? let parent_header_opt = { let child_block_info = match StacksChainState::load_staging_block_info( - &chainstate.db(), + chainstate.db(), &index_block_hash, )? { Some(hdr) => hdr, @@ -1444,7 +1444,7 @@ impl PeerNetwork { neighbors.len() ); - (&mut neighbors[..]).shuffle(&mut thread_rng()); + neighbors[..].shuffle(&mut thread_rng()); let mut requests = VecDeque::new(); for nk in neighbors.into_iter() { @@ -1731,7 +1731,7 @@ impl PeerNetwork { &requests.front().as_ref().unwrap().consensus_hash, &requests.front().as_ref().unwrap().anchor_block_hash, &index_block_hash, - requests.iter().map(|ref r| &r.data_url).collect::>() + requests.iter().map(|r| &r.data_url).collect::>() ); downloader.blocks_to_try.insert(height, requests); @@ -1795,7 +1795,7 @@ impl PeerNetwork { debug!("{:?}: will request microblock stream confirmed by sortition {}: {}/{} ({}) from {:?}", &network.local_peer, mblock_height, &requests.front().as_ref().unwrap().consensus_hash, &requests.front().as_ref().unwrap().anchor_block_hash, &index_block_hash, - requests.iter().map(|ref r| &r.data_url).collect::>() + requests.iter().map(|r| &r.data_url).collect::>() ); downloader diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 4c509ed5c1..84586540a1 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -182,7 +182,7 @@ impl NakamotoDownloadStateMachine { StacksBlockId(cursor.winning_stacks_block_hash.0), cursor.block_height, )); - cursor = SortitionDB::get_block_snapshot(&ih, &cursor.parent_sortition_id)? + cursor = SortitionDB::get_block_snapshot(ih, &cursor.parent_sortition_id)? .ok_or(DBError::NotFoundError)?; } wanted_tenures.reverse(); @@ -1179,8 +1179,8 @@ impl NakamotoDownloadStateMachine { finished.push(naddr.clone()); continue; } - if neighbor_rpc.has_inflight(&naddr) { - debug!("Peer {} has an inflight request", naddr); + if neighbor_rpc.has_inflight(naddr) { + debug!("Peer {naddr} has an inflight request"); continue; } @@ -1565,7 +1565,7 @@ impl NakamotoDownloadStateMachine { ) -> Result>, NetError> { self.nakamoto_tip = network.stacks_tip.block_id(); debug!("Downloader: Nakamoto tip is {:?}", &self.nakamoto_tip); - self.update_wanted_tenures(&network, sortdb)?; + self.update_wanted_tenures(network, sortdb)?; self.update_processed_tenures(chainstate)?; let new_blocks = self.run_downloads(burnchain_height, network, sortdb, chainstate, ibd); self.last_sort_tip = Some(network.burnchain_tip.clone()); diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index e5b796181a..f8054828b6 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -434,7 +434,7 @@ impl NakamotoTenureDownloaderSet { if self.try_resume_peer(naddr.clone()) { continue; }; - if self.has_downloader_for_tenure(&ch) { + if self.has_downloader_for_tenure(ch) { schedule.pop_front(); continue; } @@ -491,11 +491,11 @@ impl NakamotoTenureDownloaderSet { continue; }; - let attempt_count = *self.attempted_tenures.get(&ch).unwrap_or(&0); + let attempt_count = *self.attempted_tenures.get(ch).unwrap_or(&0); self.attempted_tenures .insert(ch.clone(), attempt_count.saturating_add(1)); - let attempt_failed_count = *self.attempt_failed_tenures.get(&ch).unwrap_or(&0); + let attempt_failed_count = *self.attempt_failed_tenures.get(ch).unwrap_or(&0); info!("Download tenure {ch}"; "peer" => %naddr, @@ -551,7 +551,7 @@ impl NakamotoTenureDownloaderSet { // send requests for (naddr, index) in self.peers.iter() { - if neighbor_rpc.has_inflight(&naddr) { + if neighbor_rpc.has_inflight(naddr) { debug!("Peer {naddr} has an inflight request"); continue; } @@ -608,7 +608,7 @@ impl NakamotoTenureDownloaderSet { for naddr in addrs.iter() { if neighbor_rpc.is_dead_or_broken(network, naddr) { debug!("Remove dead/broken downloader for {naddr}"); - self.clear_downloader(&naddr); + self.clear_downloader(naddr); } } for done_naddr in finished.drain(..) { diff --git a/stackslib/src/net/http/mod.rs b/stackslib/src/net/http/mod.rs index ca7a97c5be..fb6d96c0e0 100644 --- a/stackslib/src/net/http/mod.rs +++ b/stackslib/src/net/http/mod.rs @@ -195,7 +195,7 @@ pub fn write_headers( fd: &mut W, headers: &BTreeMap, ) -> Result<(), CodecError> { - for (ref key, ref value) in headers.iter() { + for (key, value) in headers.iter() { fd.write_all(key.as_str().as_bytes()) .map_err(CodecError::WriteError)?; fd.write_all(": ".as_bytes()) diff --git a/stackslib/src/net/http/request.rs b/stackslib/src/net/http/request.rs index 6535f4a14a..192de1fa4f 100644 --- a/stackslib/src/net/http/request.rs +++ b/stackslib/src/net/http/request.rs @@ -655,7 +655,7 @@ impl HttpRequestContents { let buf = "".to_string(); let mut serializer = form_urlencoded::Serializer::new(buf); for (k, v) in self.query_args.iter() { - serializer.append_pair(&k, &v); + serializer.append_pair(k, v); } serializer.finish() } diff --git a/stackslib/src/net/http/response.rs b/stackslib/src/net/http/response.rs index 3ebed7e9d2..97a828e387 100644 --- a/stackslib/src/net/http/response.rs +++ b/stackslib/src/net/http/response.rs @@ -668,7 +668,7 @@ impl HttpResponsePayload { match self { Self::Empty => Ok(()), Self::JSON(value) => serde_json::to_writer(fd, &value).map_err(Error::JsonError), - Self::Bytes(value) => fd.write_all(&value).map_err(Error::WriteError), + Self::Bytes(value) => fd.write_all(value).map_err(Error::WriteError), Self::Text(value) => fd.write_all(value.as_bytes()).map_err(Error::WriteError), } } diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index b7f9d15602..d3a521123e 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -189,7 +189,7 @@ pub mod request { contract_key: &str, ) -> Result { let address = if let Some(address_str) = captures.name(address_key) { - if let Some(addr) = StacksAddress::from_string(&address_str.as_str()) { + if let Some(addr) = StacksAddress::from_string(address_str.as_str()) { addr } else { return Err(HttpError::Http( @@ -383,7 +383,7 @@ pub trait RPCRequestHandler: HttpRequest + HttpResponse + RPCRequestHandlerClone ) -> Result { SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).map_err(|e| { StacksHttpResponse::new_error( - &preamble, + preamble, &HttpServerError::new(format!("Failed to load canonical burnchain tip: {:?}", &e)), ) }) @@ -398,7 +398,7 @@ pub trait RPCRequestHandler: HttpRequest + HttpResponse + RPCRequestHandlerClone ) -> Result { SortitionDB::get_stacks_epoch(sortdb.conn(), block_height) .map_err(|e| { - StacksHttpResponse::new_error(&preamble, &HttpServerError::new(format!("Could not load Stacks epoch for canonical burn height: {:?}", &e))) + StacksHttpResponse::new_error(preamble, &HttpServerError::new(format!("Could not load Stacks epoch for canonical burn height: {:?}", &e))) })? .ok_or_else(|| { let msg = format!( @@ -406,7 +406,7 @@ pub trait RPCRequestHandler: HttpRequest + HttpResponse + RPCRequestHandlerClone block_height ); warn!("{}", &msg); - StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) + StacksHttpResponse::new_error(preamble, &HttpServerError::new(msg)) }) } @@ -421,14 +421,14 @@ pub trait RPCRequestHandler: HttpRequest + HttpResponse + RPCRequestHandlerClone .map_err(|e| { let msg = format!("Failed to load stacks chain tip header: {:?}", &e); warn!("{}", &msg); - StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) + StacksHttpResponse::new_error(preamble, &HttpServerError::new(msg)) })? .ok_or_else(|| { let msg = "No stacks tip exists yet. Perhaps no blocks have been processed by this node" .to_string(); warn!("{}", &msg); - StacksHttpResponse::new_error(&preamble, &HttpNotFound::new(msg)) + StacksHttpResponse::new_error(preamble, &HttpNotFound::new(msg)) }) } } @@ -1332,7 +1332,7 @@ impl StacksHttp { /// This can only return a finite set of identifiers, which makes it safer to use for Prometheus metrics /// For details see https://github.com/stacks-network/stacks-core/issues/4574 pub fn metrics_identifier(&self, req: &mut StacksHttpRequest) -> &str { - let Ok((decoded_path, _)) = decode_request_path(&req.request_path()) else { + let Ok((decoded_path, _)) = decode_request_path(req.request_path()) else { return ""; }; @@ -1385,7 +1385,7 @@ impl StacksHttp { )), } } else { - let (message, _) = http.read_payload(&preamble, &mut message_bytes)?; + let (message, _) = http.read_payload(&preamble, message_bytes)?; Ok(message) } } diff --git a/stackslib/src/net/inv/epoch2x.rs b/stackslib/src/net/inv/epoch2x.rs index 782ce8a876..bbc8312f85 100644 --- a/stackslib/src/net/inv/epoch2x.rs +++ b/stackslib/src/net/inv/epoch2x.rs @@ -693,7 +693,7 @@ impl NeighborBlockStats { self.status = NeighborBlockStats::diagnose_nack( &self.nk, nack_data, - &chain_view, + chain_view, preamble_burn_block_height, preamble_burn_stable_block_height, preamble_burn_block_hash, @@ -792,7 +792,7 @@ impl NeighborBlockStats { StacksMessageType::Nack(nack_data) => { debug!("Remote neighbor {:?} nack'ed our GetPoxInv at reward cycle {}: NACK code {}", &self.nk, self.target_pox_reward_cycle, nack_data.error_code); let is_bootstrap_peer = PeerDB::is_initial_peer( - &network.peerdb.conn(), + network.peerdb.conn(), self.nk.network_id, &self.nk.addrbytes, self.nk.port, @@ -892,7 +892,7 @@ impl NeighborBlockStats { StacksMessageType::Nack(nack_data) => { debug!("Remote neighbor {:?} nack'ed our GetBlocksInv at reward cycle {}: NACK code {}", &self.nk, self.target_block_reward_cycle, nack_data.error_code); let is_bootstrap_peer = PeerDB::is_initial_peer( - &network.peerdb.conn(), + network.peerdb.conn(), self.nk.network_id, &self.nk.addrbytes, self.nk.port, @@ -1024,7 +1024,7 @@ impl InvState { if let Some(stats) = self.block_stats.get_mut(peer) { debug!("Already tracking inventories of peer {:?}", &peer); stats.reset_pox_scan(0); - stats.is_bootstrap_peer = bootstrap_peers.contains(&peer); + stats.is_bootstrap_peer = bootstrap_peers.contains(peer); } else if self.block_stats.len() < max_neighbors { debug!("Will track inventories of new peer {:?}", &peer); self.block_stats.insert( @@ -1032,7 +1032,7 @@ impl InvState { NeighborBlockStats::new( peer.clone(), self.first_block_height, - bootstrap_peers.contains(&peer), + bootstrap_peers.contains(peer), ), ); added += 1; @@ -1051,7 +1051,7 @@ impl InvState { // if we're still connected to these peers, then keep them pinned self.pinned.clear(); for peer in peers.iter() { - if let Some(event_id) = network.get_event_id(&peer) { + if let Some(event_id) = network.get_event_id(peer) { self.pinned.insert(event_id); } } @@ -1175,7 +1175,7 @@ impl InvState { } pub fn del_peer(&mut self, nk: &NeighborKey) { - self.block_stats.remove(&nk); + self.block_stats.remove(nk); } /// Is there any downloader-actionable data available? @@ -1211,7 +1211,7 @@ impl InvState { consensus_hash: &ConsensusHash, microblocks: bool, ) -> Result, net_error> { - let sn = match SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &consensus_hash)? { + let sn = match SortitionDB::get_block_snapshot_consensus(sortdb.conn(), consensus_hash)? { Some(sn) => { if !sn.pox_valid { debug!( @@ -1845,7 +1845,7 @@ impl PeerNetwork { }; let payload = StacksMessageType::GetPoxInv(getpoxinv); - let event_id_opt = self.get_event_id(&nk); + let event_id_opt = self.get_event_id(nk); let message = self.sign_for_neighbor(nk, payload)?; let request = self @@ -2277,7 +2277,7 @@ impl PeerNetwork { let mut ibd_diverged_height: Option = None; let bootstrap_peers: HashSet<_> = - PeerDB::get_bootstrap_peers(&network.peerdb.conn(), network.local_peer.network_id) + PeerDB::get_bootstrap_peers(network.peerdb.conn(), network.local_peer.network_id) .unwrap_or(vec![]) .into_iter() .map(|neighbor| neighbor.addr) @@ -2343,7 +2343,7 @@ impl PeerNetwork { // if this node diverged from us, and we're in ibd, and this is an // always-allowed peer, then start scanning here (or lower) if ibd - && bootstrap_peers.contains(&nk) + && bootstrap_peers.contains(nk) && stats.status == NodeStatus::Diverged { inv_state.last_change_at = get_epoch_time_secs(); @@ -2719,7 +2719,7 @@ impl PeerNetwork { // only count an inv_sync as passing if there's an always-allowed node // in our inv state let always_allowed: HashSet<_> = - PeerDB::get_always_allowed_peers(&self.peerdb.conn(), self.local_peer.network_id) + PeerDB::get_always_allowed_peers(self.peerdb.conn(), self.local_peer.network_id) .unwrap_or(vec![]) .into_iter() .map(|neighbor| neighbor.addr) @@ -2742,7 +2742,7 @@ impl PeerNetwork { }; for (nk, stats) in inv_state.block_stats.iter() { - if self.is_bound(&nk) { + if self.is_bound(nk) { // this is the same address we're bound to continue; } @@ -2750,7 +2750,7 @@ impl PeerNetwork { // this is a peer at our address continue; } - if !always_allowed.contains(&nk) { + if !always_allowed.contains(nk) { // this peer isn't in the always-allowed set continue; } diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index e832b70184..5ca3c10127 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -294,7 +294,7 @@ impl InvGenerator { // we have not loaded the tenure info for this tip, or it was cleared via cache // maintenance. Either way, got get it from disk. let loaded_info_opt = - InvTenureInfo::load(chainstate, &tip_block_id, &tenure_id_consensus_hash)?; + InvTenureInfo::load(chainstate, &tip_block_id, tenure_id_consensus_hash)?; tenure_infos.insert(tenure_id_consensus_hash.clone(), loaded_info_opt.clone()); self.cache_misses = self.cache_misses.saturating_add(1); @@ -873,7 +873,7 @@ impl NakamotoInvStateMachine { if ibd { // in IBD, only connect to initial peers let is_initial = PeerDB::is_initial_peer( - &network.peerdb_conn(), + network.peerdb_conn(), convo.peer_network_id, &convo.peer_addrbytes, convo.peer_port, diff --git a/stackslib/src/net/mempool/mod.rs b/stackslib/src/net/mempool/mod.rs index 27253180d4..54c3210360 100644 --- a/stackslib/src/net/mempool/mod.rs +++ b/stackslib/src/net/mempool/mod.rs @@ -499,7 +499,7 @@ impl MempoolSync { // 3. ask for the remote peer's mempool's novel txs // address must be resolvable if !network.get_connection_opts().private_neighbors - && PeerAddress::from_socketaddr(&addr).is_in_private_range() + && PeerAddress::from_socketaddr(addr).is_in_private_range() { debug!( "{:?}: Mempool sync skips {}, which has private IP", diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 415f74c739..4cc943300c 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -1575,7 +1575,7 @@ impl NetworkResult { let mut blocks: HashSet<_> = self .blocks .iter() - .map(|(ch, blk, _)| StacksBlockId::new(&ch, &blk.block_hash())) + .map(|(ch, blk, _)| StacksBlockId::new(ch, &blk.block_hash())) .collect(); let pushed_blocks: HashSet<_> = self @@ -1778,7 +1778,7 @@ impl NetworkResult { // only retain blocks not found in `newer` self.blocks.retain(|(ch, blk, _)| { - let block_id = StacksBlockId::new(&ch, &blk.block_hash()); + let block_id = StacksBlockId::new(ch, &blk.block_hash()); let retain = !newer_blocks.contains(&block_id); if !retain { debug!("Drop duplicate downloaded block {}", &block_id); @@ -2810,7 +2810,7 @@ pub mod test { } pub fn make_test_path(config: &TestPeerConfig) -> String { - let test_path = TestPeer::test_path(&config); + let test_path = TestPeer::test_path(config); match fs::metadata(&test_path) { Ok(_) => { fs::remove_dir_all(&test_path).unwrap(); @@ -2835,7 +2835,7 @@ pub mod test { let initial_peers = PeerDB::find_stacker_db_replicas( peerdb.conn(), local_peer.network_id, - &contract_id, + contract_id, 0, 10000000, ) @@ -2848,7 +2848,7 @@ pub mod test { let stacker_dbs = StackerDBs::connect(&stackerdb_path, true).unwrap(); let stacker_db_sync = StackerDBSync::new( contract_id.clone(), - &db_config, + db_config, PeerNetworkComms::new(), stacker_dbs, ); @@ -3115,7 +3115,7 @@ pub mod test { let local_peer = PeerDB::get_local_peer(peerdb.conn()).unwrap(); let burnchain_view = { - let chaintip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let chaintip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); SortitionDB::get_burnchain_view(&sortdb.index_conn(), &config.burnchain, &chaintip) .unwrap() }; @@ -3664,7 +3664,7 @@ pub mod test { indexer.raw_store_header(block_header.clone()).unwrap(); burnchain_db .raw_store_burnchain_block( - &burnchain, + burnchain, &indexer, block_header.clone(), blockstack_ops, @@ -3672,7 +3672,7 @@ pub mod test { .unwrap(); Burnchain::process_affirmation_maps( - &burnchain, + burnchain, &mut burnchain_db, &indexer, block_header.block_height, @@ -3707,8 +3707,8 @@ pub mod test { ) { let sortdb = self.sortdb.take().unwrap(); let (block_height, block_hash, epoch_id) = { - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); - let epoch_id = SortitionDB::get_stacks_epoch(&sortdb.conn(), tip.block_height + 1) + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let epoch_id = SortitionDB::get_stacks_epoch(sortdb.conn(), tip.block_height + 1) .unwrap() .unwrap() .epoch_id; @@ -3769,7 +3769,7 @@ pub mod test { &pox_id ); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); self.sortdb = Some(sortdb); ( block_height, @@ -4184,7 +4184,7 @@ pub mod test { let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = - SortitionDB::get_canonical_burn_chain_tip(&self.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(self.sortdb.as_ref().unwrap().conn()) .unwrap(); let burnchain = self.config.burnchain.clone(); @@ -4429,7 +4429,7 @@ pub mod test { &last_key, parent_block_opt.as_ref(), 1000, - |mut builder, ref mut miner, ref sortdb| { + |mut builder, ref mut miner, sortdb| { let (mut miner_chainstate, _) = StacksChainState::open(false, network_id, &chainstate_path, None).unwrap(); let sort_iconn = sortdb.index_handle_at_tip(); @@ -4479,7 +4479,7 @@ pub mod test { } pub fn get_public_key(&self) -> Secp256k1PublicKey { - let local_peer = PeerDB::get_local_peer(&self.network.peerdb.conn()).unwrap(); + let local_peer = PeerDB::get_local_peer(self.network.peerdb.conn()).unwrap(); Secp256k1PublicKey::from_private(&local_peer.private_key) } @@ -4555,7 +4555,7 @@ pub mod test { pub fn get_burn_block_height(&self) -> u64 { SortitionDB::get_canonical_burn_chain_tip( - &self.sortdb.as_ref().expect("Failed to get sortdb").conn(), + self.sortdb.as_ref().expect("Failed to get sortdb").conn(), ) .expect("Failed to get canonical burn chain tip") .block_height @@ -4657,7 +4657,7 @@ pub mod test { .unwrap() .into_iter() .filter(|(sort_id, rc_info)| { - let sn = SortitionDB::get_block_snapshot(sortdb.conn(), &sort_id) + let sn = SortitionDB::get_block_snapshot(sortdb.conn(), sort_id) .unwrap() .unwrap(); let rc_sn = sortdb @@ -4695,7 +4695,7 @@ pub mod test { .unwrap() .into_iter() .filter(|(sort_id, rc_info)| { - let sn = SortitionDB::get_block_snapshot(sortdb.conn(), &sort_id) + let sn = SortitionDB::get_block_snapshot(sortdb.conn(), sort_id) .unwrap() .unwrap(); sn.block_height < epoch_3.start_height diff --git a/stackslib/src/net/neighbors/comms.rs b/stackslib/src/net/neighbors/comms.rs index ed0e03f5c6..7bd973c024 100644 --- a/stackslib/src/net/neighbors/comms.rs +++ b/stackslib/src/net/neighbors/comms.rs @@ -232,7 +232,7 @@ pub trait NeighborComms { neighbor_pubkh: &Hash160, ) -> Result, net_error> { let nk = neighbor_addr.to_neighbor_key(network); - match network.can_register_peer_with_pubkey(&nk, true, &neighbor_pubkh) { + match network.can_register_peer_with_pubkey(&nk, true, neighbor_pubkh) { Ok(_) => self.neighbor_connect_and_handshake(network, &nk), Err(net_error::AlreadyConnected(event_id, handshake_nk)) => { // already connected, but on a possibly-different address. @@ -242,7 +242,7 @@ pub trait NeighborComms { if let Some(convo) = network.get_p2p_convo(event_id) { if !convo.is_outbound() { test_debug!("{:?}: Already connected to {:?} on inbound event {} (address {:?}). Try to establish outbound connection to {:?} {:?}.", - network.get_local_peer(), &nk, &event_id, &handshake_nk, &neighbor_pubkh, &nk); + network.get_local_peer(), &nk, &event_id, &handshake_nk, neighbor_pubkh, &nk); self.remove_connecting(network, &nk); return self diff --git a/stackslib/src/net/neighbors/db.rs b/stackslib/src/net/neighbors/db.rs index ebf83af962..f448d545a6 100644 --- a/stackslib/src/net/neighbors/db.rs +++ b/stackslib/src/net/neighbors/db.rs @@ -186,7 +186,7 @@ pub trait NeighborWalkDB { let block_height = network.get_chain_view().burn_block_height; let cur_epoch = network.get_current_epoch(); let neighbors = PeerDB::get_random_walk_neighbors( - &network.peerdb_conn(), + network.peerdb_conn(), network.get_local_peer().network_id, cur_epoch.network_epoch, min_age, @@ -202,7 +202,7 @@ pub trait NeighborWalkDB { min_age ); let seed_nodes = PeerDB::get_bootstrap_peers( - &network.peerdb_conn(), + network.peerdb_conn(), network.get_local_peer().network_id, )?; if seed_nodes.is_empty() { @@ -436,10 +436,7 @@ impl NeighborWalkDB for PeerDBNeighborWalk { ) -> Result, net_error> { let allowed_peers = if ibd { // only get bootstrap peers (will be randomized) - PeerDB::get_bootstrap_peers( - &network.peerdb_conn(), - network.get_local_peer().network_id, - )? + PeerDB::get_bootstrap_peers(network.peerdb_conn(), network.get_local_peer().network_id)? } else { // can be any peer marked 'always-allowed' (will be randomized) PeerDB::get_always_allowed_peers( @@ -456,12 +453,7 @@ impl NeighborWalkDB for PeerDBNeighborWalk { nk: &NeighborKey, ) -> Result<(), net_error> { // don't proceed if denied - if PeerDB::is_peer_denied( - &network.peerdb_conn(), - nk.network_id, - &nk.addrbytes, - nk.port, - )? { + if PeerDB::is_peer_denied(network.peerdb_conn(), nk.network_id, &nk.addrbytes, nk.port)? { debug!( "{:?}: neighbor {:?} is denied", network.get_local_peer(), @@ -504,7 +496,7 @@ impl NeighborWalkDB for PeerDBNeighborWalk { local_peer_str, &replaced.addr, &replacement.addr ); - PeerDB::insert_or_replace_peer(&tx, &replacement, *slot)?; + PeerDB::insert_or_replace_peer(&tx, replacement, *slot)?; result.add_replaced(replaced.addr.clone()); } } @@ -519,7 +511,7 @@ impl NeighborWalkDB for PeerDBNeighborWalk { data: &HandshakeAcceptData, ) -> Result { Neighbor::load_and_update( - &network.peerdb_conn(), + network.peerdb_conn(), preamble.peer_version, preamble.network_id, &data.handshake, diff --git a/stackslib/src/net/neighbors/neighbor.rs b/stackslib/src/net/neighbors/neighbor.rs index 64a033ce9c..dbefeca7c0 100644 --- a/stackslib/src/net/neighbors/neighbor.rs +++ b/stackslib/src/net/neighbors/neighbor.rs @@ -55,9 +55,9 @@ impl Neighbor { stacker_dbs: Option<&[QualifiedContractIdentifier]>, ) -> Result<(), net_error> { self.last_contact_time = get_epoch_time_secs(); - PeerDB::update_peer(tx, &self).map_err(net_error::DBError)?; + PeerDB::update_peer(tx, self).map_err(net_error::DBError)?; if let Some(stacker_dbs) = stacker_dbs { - PeerDB::update_peer_stacker_dbs(tx, &self, stacker_dbs).map_err(net_error::DBError)?; + PeerDB::update_peer_stacker_dbs(tx, self, stacker_dbs).map_err(net_error::DBError)?; } Ok(()) } @@ -72,7 +72,7 @@ impl Neighbor { stacker_dbs: Option<&[QualifiedContractIdentifier]>, ) -> Result { self.last_contact_time = get_epoch_time_secs(); - PeerDB::try_insert_peer(tx, &self, stacker_dbs.unwrap_or(&[])).map_err(net_error::DBError) + PeerDB::try_insert_peer(tx, self, stacker_dbs.unwrap_or(&[])).map_err(net_error::DBError) } /// Attempt to load a neighbor from our peer DB, given its NeighborAddress reported by another diff --git a/stackslib/src/net/neighbors/walk.rs b/stackslib/src/net/neighbors/walk.rs index f16483b361..da48ad4ebd 100644 --- a/stackslib/src/net/neighbors/walk.rs +++ b/stackslib/src/net/neighbors/walk.rs @@ -275,7 +275,7 @@ impl NeighborWalk { &first_neighbor, true, network.get_walk_pingbacks().clone(), - &network.get_connection_opts(), + network.get_connection_opts(), ); debug!( @@ -326,7 +326,7 @@ impl NeighborWalk { &allowed_peer, true, network.get_walk_pingbacks().clone(), - &network.get_connection_opts(), + network.get_connection_opts(), ); debug!( @@ -397,7 +397,7 @@ impl NeighborWalk { &empty_neighbor, false, network.get_walk_pingbacks().clone(), - &network.get_connection_opts(), + network.get_connection_opts(), ); debug!( @@ -454,7 +454,7 @@ impl NeighborWalk { let nk = NeighborKey::from_neighbor_address( pingback_peer.peer_version, pingback_peer.network_id, - &addr, + addr, ); // don't proceed if denied @@ -469,7 +469,7 @@ impl NeighborWalk { &empty_neighbor, false, network.get_walk_pingbacks().clone(), - &network.get_connection_opts(), + network.get_connection_opts(), ); debug!( @@ -909,7 +909,7 @@ impl NeighborWalk { debug!( "{:?}: will handshake with {} neighbors out of {} reported by {:?}", network.get_local_peer(), - &network.get_connection_opts().max_neighbors_of_neighbor, + network.get_connection_opts().max_neighbors_of_neighbor, neighbor_addrs_to_resolve.len(), &self.cur_neighbor.addr ); @@ -1078,7 +1078,7 @@ impl NeighborWalk { // Do we know about this peer already? let (new, neighbor) = self.neighbor_db.add_or_schedule_replace_neighbor( network, - &preamble, + preamble, &data.handshake, db_data, &mut self.neighbor_replacements, @@ -1477,7 +1477,7 @@ impl NeighborWalk { // won the coin toss; will take a step. // take care not to step back to the neighbor from which we // stepped previously - if let Some(ref prev_neighbor) = self.prev_neighbor.as_ref() { + if let Some(prev_neighbor) = self.prev_neighbor.as_ref() { if prev_neighbor.addr == next_neighbor.addr { // oops, backtracked. Try to pick a different neighbor, if possible. if self.frontier.len() == 1 { @@ -1488,14 +1488,14 @@ impl NeighborWalk { // acceptance by probabilistically deciding to step to an alternative // instead of backtracking. let alt_next_neighbor = - Self::pick_random_neighbor(&self.frontier, Some(&prev_neighbor)) + Self::pick_random_neighbor(&self.frontier, Some(prev_neighbor)) .expect("BUG: empty frontier size"); let alt_prob: f64 = rnd.gen(); let cur_to_alt = self.degree_ratio(network, &self.cur_neighbor, &alt_next_neighbor); let prev_to_cur = - self.degree_ratio(network, &prev_neighbor, &self.cur_neighbor); + self.degree_ratio(network, prev_neighbor, &self.cur_neighbor); let trans_prob = fmin!( fmin!(1.0, cur_to_alt * cur_to_alt), fmax!(1.0, prev_to_cur * prev_to_cur) @@ -1722,7 +1722,7 @@ impl NeighborWalk { if let Err(e) = self.comms.neighbor_send( network, - &naddr, + naddr, StacksMessageType::Handshake(HandshakeData::from_local_peer( network.get_local_peer(), )), diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 8d25907cb2..e6a7f4134e 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -1008,10 +1008,10 @@ impl PeerNetwork { neighbor_key: &NeighborKey, message: StacksMessage, ) -> Result<(), net_error> { - let event_id = if let Some(event_id) = self.events.get(&neighbor_key) { + let event_id = if let Some(event_id) = self.events.get(neighbor_key) { *event_id } else { - info!("Not connected to {:?}", &neighbor_key); + info!("Not connected to {:?}", neighbor_key); return Err(net_error::NoSuchNeighbor); }; @@ -1202,7 +1202,7 @@ impl PeerNetwork { // don't talk if denied if PeerDB::is_peer_denied( - &self.peerdb.conn(), + self.peerdb.conn(), neighbor.network_id, &neighbor.addrbytes, neighbor.port, @@ -1467,7 +1467,7 @@ impl PeerNetwork { // receive all in-bound requests for i in 0..self.handles.len() { match self.handles.get(i) { - Some(ref handle) => { + Some(handle) => { loop { // drain all inbound requests let inbound_request_res = handle.chan_in.try_recv(); @@ -1686,7 +1686,7 @@ impl PeerNetwork { // denied? if PeerDB::is_peer_denied( - &self.peerdb.conn(), + self.peerdb.conn(), neighbor_key.network_id, &neighbor_key.addrbytes, neighbor_key.port, @@ -1699,10 +1699,10 @@ impl PeerNetwork { } // already connected? - if let Some(event_id) = self.get_event_id(&neighbor_key) { + if let Some(event_id) = self.get_event_id(neighbor_key) { debug!( "{:?}: already connected to {:?} on event {}", - &self.local_peer, &neighbor_key, event_id + &self.local_peer, neighbor_key, event_id ); return Err(net_error::AlreadyConnected(event_id, neighbor_key.clone())); } @@ -1711,7 +1711,7 @@ impl PeerNetwork { if !self.connection_opts.private_neighbors && neighbor_key.addrbytes.is_in_private_range() { debug!("{:?}: Peer {:?} is in private range and we are configured to drop private neighbors", &self.local_peer, - &neighbor_key + neighbor_key ); return Err(net_error::Denied); } @@ -1957,7 +1957,7 @@ impl PeerNetwork { /// Deregister by neighbor key pub fn deregister_neighbor(&mut self, neighbor_key: &NeighborKey) { debug!("Disconnect from {:?}", neighbor_key); - let event_id = match self.events.get(&neighbor_key) { + let event_id = match self.events.get(neighbor_key) { None => { return; } @@ -1987,7 +1987,7 @@ impl PeerNetwork { peer_key: &NeighborKey, message_payload: StacksMessageType, ) -> Result { - match self.events.get(&peer_key) { + match self.events.get(peer_key) { None => { // not connected debug!("Could not sign for peer {:?}: not connected", peer_key); @@ -2280,11 +2280,11 @@ impl PeerNetwork { /// Get stats for a neighbor pub fn get_neighbor_stats(&self, nk: &NeighborKey) -> Option { - match self.events.get(&nk) { + match self.events.get(nk) { None => None, - Some(eid) => match self.peers.get(&eid) { + Some(eid) => match self.peers.get(eid) { None => None, - Some(ref convo) => Some(convo.stats.clone()), + Some(convo) => Some(convo.stats.clone()), }, } } @@ -3130,7 +3130,7 @@ impl PeerNetwork { }; let block_info = match StacksChainState::load_staging_block_info( - &chainstate.db(), + chainstate.db(), &StacksBlockHeader::make_index_block_hash( &ancestor_sn.consensus_hash, &ancestor_sn.winning_stacks_block_hash, @@ -3159,7 +3159,7 @@ impl PeerNetwork { }; let microblocks = match StacksChainState::load_processed_microblock_stream_fork( - &chainstate.db(), + chainstate.db(), &block_info.parent_consensus_hash, &block_info.parent_anchored_block_hash, &block_info.parent_microblock_hash, @@ -4062,7 +4062,7 @@ impl PeerNetwork { // drop one at random let idx = thread_rng().gen::() % self.walk_pingbacks.len(); let drop_addr = match self.walk_pingbacks.keys().skip(idx).next() { - Some(ref addr) => (*addr).clone(), + Some(addr) => (*addr).clone(), None => { continue; } @@ -4117,7 +4117,7 @@ impl PeerNetwork { /// Get the local peer from the peer DB, but also preserve the public IP address pub fn load_local_peer(&self) -> Result { - let mut lp = PeerDB::get_local_peer(&self.peerdb.conn())?; + let mut lp = PeerDB::get_local_peer(self.peerdb.conn())?; lp.public_ip_address = self.local_peer.public_ip_address.clone(); Ok(lp) } @@ -4907,7 +4907,7 @@ impl PeerNetwork { } // update our relay statistics, so we know who to forward messages to - self.update_relayer_stats(&network_result); + self.update_relayer_stats(network_result); // finally, handle network I/O requests from other threads, and get back reply handles to them. // do this after processing new sockets, so we don't accidentally re-use an event ID. @@ -5006,7 +5006,7 @@ impl PeerNetwork { ) }; - let sn = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn())?; + let sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; let mut ret: HashMap, StacksTransaction)>> = HashMap::new(); diff --git a/stackslib/src/net/prune.rs b/stackslib/src/net/prune.rs index 96edb12c2a..2d14568742 100644 --- a/stackslib/src/net/prune.rs +++ b/stackslib/src/net/prune.rs @@ -54,7 +54,7 @@ impl PeerNetwork { None => { continue; } - Some(ref convo) => { + Some(convo) => { if !convo.stats.outbound { continue; } @@ -88,7 +88,7 @@ impl PeerNetwork { "==== ORG NEIGHBOR DISTRIBUTION OF {:?} ===", &self.local_peer ); - for (ref _org, ref neighbor_infos) in org_neighbor.iter() { + for (ref _org, neighbor_infos) in org_neighbor.iter() { let _neighbors: Vec = neighbor_infos.iter().map(|ni| ni.0.clone()).collect(); test_debug!( @@ -196,7 +196,7 @@ impl PeerNetwork { // likely to be up for X more seconds, so we only really want to distinguish between nodes that // have wildly different uptimes. // Within uptime buckets, sort by health. - match org_neighbors.get_mut(&org) { + match org_neighbors.get_mut(org) { None => {} Some(ref mut neighbor_infos) => { neighbor_infos.sort_unstable_by( @@ -211,7 +211,7 @@ impl PeerNetwork { // don't let a single organization have more than // soft_max_neighbors_per_org neighbors. for org in orgs.iter() { - match org_neighbors.get_mut(&org) { + match org_neighbors.get_mut(org) { None => {} Some(ref mut neighbor_infos) => { if neighbor_infos.len() as u64 > self.connection_opts.soft_max_neighbors_per_org @@ -324,8 +324,8 @@ impl PeerNetwork { if preserve.contains(event_id) { continue; } - match self.peers.get(&event_id) { - Some(ref convo) => { + match self.peers.get(event_id) { + Some(convo) => { if !convo.stats.outbound { let stats = convo.stats.clone(); if let Some(entry) = ip_neighbor.get_mut(&nk.addrbytes) { @@ -415,7 +415,7 @@ impl PeerNetwork { for prune in pruned_by_ip.iter() { debug!("{:?}: prune by IP: {:?}", &self.local_peer, prune); - self.deregister_neighbor(&prune); + self.deregister_neighbor(prune); if !self.prune_inbound_counts.contains_key(prune) { self.prune_inbound_counts.insert(prune.clone(), 1); @@ -437,7 +437,7 @@ impl PeerNetwork { for prune in pruned_by_org.iter() { debug!("{:?}: prune by Org: {:?}", &self.local_peer, prune); - self.deregister_neighbor(&prune); + self.deregister_neighbor(prune); if !self.prune_outbound_counts.contains_key(prune) { self.prune_outbound_counts.insert(prune.clone(), 1); diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 4e1d7eaf18..b07e070ca1 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -1002,7 +1002,7 @@ impl Relayer { if !Relayer::static_check_problematic_relayed_nakamoto_block( chainstate.mainnet, epoch_id, - &block, + block, ASTRules::PrecheckSize, ) { warn!( @@ -1230,9 +1230,8 @@ impl Relayer { &block.block_hash() ); if chainstate.fault_injection.hide_blocks { - if let Some(sn) = - SortitionDB::get_block_snapshot_consensus(sort_ic, &consensus_hash) - .expect("FATAL: failed to query downloaded block snapshot") + if let Some(sn) = SortitionDB::get_block_snapshot_consensus(sort_ic, consensus_hash) + .expect("FATAL: failed to query downloaded block snapshot") { if Self::fault_injection_is_block_hidden(&block.header, sn.block_height) { continue; @@ -1345,15 +1344,13 @@ impl Relayer { } for BlocksDatum(consensus_hash, block) in blocks_data.blocks.iter() { - match SortitionDB::get_block_snapshot_consensus( - sort_ic.conn(), - &consensus_hash, - )? { + match SortitionDB::get_block_snapshot_consensus(sort_ic.conn(), consensus_hash)? + { Some(sn) => { if !sn.pox_valid { warn!( "Consensus hash {} is not on the valid PoX fork", - &consensus_hash + consensus_hash ); continue; } @@ -1367,14 +1364,14 @@ impl Relayer { } } None => { - warn!("Consensus hash {} not known to this node", &consensus_hash); + warn!("Consensus hash {} not known to this node", consensus_hash); continue; } }; debug!( "Received pushed block {}/{} from {}", - &consensus_hash, + consensus_hash, block.block_hash(), neighbor_key ); @@ -1382,7 +1379,7 @@ impl Relayer { match Relayer::process_new_anchored_block( sort_ic, chainstate, - &consensus_hash, + consensus_hash, block, 0, ) { @@ -1390,20 +1387,20 @@ impl Relayer { if BlockAcceptResponse::Accepted == accept_response { debug!( "Accepted block {}/{} from {}", - &consensus_hash, &bhh, &neighbor_key + consensus_hash, &bhh, &neighbor_key ); new_blocks.insert(consensus_hash.clone(), block.clone()); } else { debug!( "Rejected block {}/{} from {}: {:?}", - &consensus_hash, &bhh, &neighbor_key, &accept_response + consensus_hash, &bhh, &neighbor_key, &accept_response ); } } Err(chainstate_error::InvalidStacksBlock(msg)) => { warn!( "Invalid pushed Stacks block {}/{}: {}", - &consensus_hash, + consensus_hash, block.block_hash(), msg ); @@ -1412,7 +1409,7 @@ impl Relayer { Err(e) => { warn!( "Could not process pushed Stacks block {}/{}: {:?}", - &consensus_hash, + consensus_hash, block.block_hash(), &e ); diff --git a/stackslib/src/net/server.rs b/stackslib/src/net/server.rs index 78f0f6fbb5..7410e5afa1 100644 --- a/stackslib/src/net/server.rs +++ b/stackslib/src/net/server.rs @@ -91,8 +91,8 @@ impl HttpPeer { #[cfg_attr(test, mutants::skip)] pub fn find_free_conversation(&self, data_url: &UrlString) -> Option { for (event_id, convo) in self.peers.iter() { - if let Some(ref url) = convo.get_url() { - if *url == data_url && !convo.is_request_inflight() { + if let Some(url) = convo.get_url() { + if url == data_url && !convo.is_request_inflight() { return Some(*event_id); } } @@ -560,7 +560,7 @@ impl HttpPeer { let mut to_remove = vec![]; let mut msgs = vec![]; for event_id in &poll_state.ready { - let Some(client_sock) = self.sockets.get_mut(&event_id) else { + let Some(client_sock) = self.sockets.get_mut(event_id) else { debug!("Rogue socket event {}", event_id); to_remove.push(*event_id); continue; diff --git a/stackslib/src/net/stackerdb/db.rs b/stackslib/src/net/stackerdb/db.rs index 0faf5bbe03..59ab9fe4eb 100644 --- a/stackslib/src/net/stackerdb/db.rs +++ b/stackslib/src/net/stackerdb/db.rs @@ -173,7 +173,7 @@ fn inner_get_slot_metadata( let stackerdb_id = inner_get_stackerdb_id(conn, smart_contract)?; let sql = "SELECT slot_id,version,data_hash,signature FROM chunks WHERE stackerdb_id = ?1 AND slot_id = ?2"; let args = params![stackerdb_id, slot_id]; - query_row(conn, &sql, args).map_err(|e| e.into()) + query_row(conn, sql, args).map_err(|e| e.into()) } /// Load up validation information from the database, keyed by the chunk's database's smart @@ -188,7 +188,7 @@ fn inner_get_slot_validation( let sql = "SELECT signer,write_time,version FROM chunks WHERE stackerdb_id = ?1 AND slot_id = ?2"; let args = params![stackerdb_id, slot_id]; - query_row(conn, &sql, args).map_err(|e| e.into()) + query_row(conn, sql, args).map_err(|e| e.into()) } impl StackerDBTx<'_> { @@ -218,7 +218,7 @@ impl StackerDBTx<'_> { &self, ) -> Result, net_error> { let sql = "SELECT smart_contract_id FROM databases ORDER BY smart_contract_id"; - query_rows(&self.conn(), sql, NO_PARAMS).map_err(|e| e.into()) + query_rows(self.conn(), sql, NO_PARAMS).map_err(|e| e.into()) } /// Get the Stacker DB ID for a smart contract @@ -226,7 +226,7 @@ impl StackerDBTx<'_> { &self, smart_contract: &QualifiedContractIdentifier, ) -> Result { - inner_get_stackerdb_id(&self.conn(), smart_contract) + inner_get_stackerdb_id(self.conn(), smart_contract) } /// Set up a database's storage slots. @@ -246,14 +246,14 @@ impl StackerDBTx<'_> { } let qry = "INSERT OR REPLACE INTO databases (smart_contract_id) VALUES (?1)"; - let mut stmt = self.sql_tx.prepare(&qry)?; + let mut stmt = self.sql_tx.prepare(qry)?; let args = params![smart_contract.to_string()]; stmt.execute(args)?; let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let qry = "INSERT OR REPLACE INTO chunks (stackerdb_id,signer,slot_id,version,write_time,data,data_hash,signature) VALUES (?1,?2,?3,?4,?5,?6,?7,?8)"; - let mut stmt = self.sql_tx.prepare(&qry)?; + let mut stmt = self.sql_tx.prepare(qry)?; let mut slot_id = 0u32; for (principal, slot_count) in slots.iter() { @@ -288,7 +288,7 @@ impl StackerDBTx<'_> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let qry = "DELETE FROM chunks WHERE stackerdb_id = ?1"; let args = params![stackerdb_id]; - let mut stmt = self.sql_tx.prepare(&qry)?; + let mut stmt = self.sql_tx.prepare(qry)?; stmt.execute(args)?; Ok(()) } @@ -297,7 +297,7 @@ impl StackerDBTx<'_> { fn shrink_stackerdb(&self, stackerdb_id: i64, first_slot_id: u32) -> Result<(), net_error> { let qry = "DELETE FROM chunks WHERE stackerdb_id = ?1 AND slot_id >= ?2"; let args = params![&stackerdb_id, &first_slot_id]; - let mut stmt = self.sql_tx.prepare(&qry)?; + let mut stmt = self.sql_tx.prepare(qry)?; stmt.execute(args)?; Ok(()) } @@ -337,7 +337,7 @@ impl StackerDBTx<'_> { // new slot, or existing slot with a different signer let qry = "INSERT OR REPLACE INTO chunks (stackerdb_id,signer,slot_id,version,write_time,data,data_hash,signature) VALUES (?1,?2,?3,?4,?5,?6,?7,?8)"; - let mut stmt = self.sql_tx.prepare(&qry)?; + let mut stmt = self.sql_tx.prepare(qry)?; let args = params![ stackerdb_id, principal.to_string(), @@ -386,7 +386,7 @@ impl StackerDBTx<'_> { ) -> Result<(), net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let sql = "UPDATE chunks SET version = ?1, data_hash = ?2, signature = ?3, data = ?4, write_time = ?5 WHERE stackerdb_id = ?6 AND slot_id = ?7"; - let mut stmt = self.sql_tx.prepare(&sql)?; + let mut stmt = self.sql_tx.prepare(sql)?; let args = params![ slot_desc.slot_version, @@ -560,7 +560,7 @@ impl StackerDBs { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let sql = "SELECT signer FROM chunks WHERE stackerdb_id = ?1 AND slot_id = ?2"; let args = params![stackerdb_id, slot_id]; - query_row(&self.conn, &sql, args).map_err(|e| e.into()) + query_row(&self.conn, sql, args).map_err(|e| e.into()) } /// Get all principals who can write to a particular stacker DB. @@ -573,7 +573,7 @@ impl StackerDBs { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let sql = "SELECT signer FROM chunks WHERE stackerdb_id = ?1 GROUP BY signer"; let args = params![stackerdb_id]; - query_rows(&self.conn, &sql, args).map_err(|e| e.into()) + query_rows(&self.conn, sql, args).map_err(|e| e.into()) } /// Get the slot metadata @@ -594,7 +594,7 @@ impl StackerDBs { let stackerdb_id = inner_get_stackerdb_id(&self.conn, smart_contract)?; let sql = "SELECT slot_id,version,data_hash,signature FROM chunks WHERE stackerdb_id = ?1 ORDER BY slot_id ASC"; let args = params![stackerdb_id]; - query_rows(&self.conn, &sql, args).map_err(|e| e.into()) + query_rows(&self.conn, sql, args).map_err(|e| e.into()) } /// Get a slot's validation data @@ -633,7 +633,7 @@ impl StackerDBs { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let sql = "SELECT version FROM chunks WHERE stackerdb_id = ?1 ORDER BY slot_id"; let args = params![stackerdb_id]; - query_rows(&self.conn, &sql, args).map_err(|e| e.into()) + query_rows(&self.conn, sql, args).map_err(|e| e.into()) } /// Get the list of slot write timestamps for a given DB instance at a given reward cycle @@ -644,7 +644,7 @@ impl StackerDBs { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let sql = "SELECT write_time FROM chunks WHERE stackerdb_id = ?1 ORDER BY slot_id"; let args = params![stackerdb_id]; - query_rows(&self.conn, &sql, args).map_err(|e| e.into()) + query_rows(&self.conn, sql, args).map_err(|e| e.into()) } /// Get the latest chunk out of the database. @@ -692,6 +692,6 @@ impl StackerDBs { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let qry = "SELECT slot_id,version,signature,data FROM chunks WHERE stackerdb_id = ?1 AND slot_id = ?2 AND version = ?3"; let args = params![stackerdb_id, slot_id, slot_version]; - query_row(&self.conn, &qry, args).map_err(|e| e.into()) + query_row(&self.conn, qry, args).map_err(|e| e.into()) } } diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index 899990402d..f4a9d1a302 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -313,7 +313,7 @@ impl StackerDBs { // attempt to load the config from the contract itself StackerDBConfig::from_smart_contract( chainstate, - &sortdb, + sortdb, &stackerdb_contract_id, num_neighbors, connection_opts @@ -546,7 +546,7 @@ impl PeerNetwork { if let Ok(Some(_)) = NakamotoChainState::get_tenure_start_block_header( &mut chainstate.index_conn(), &tip_block_id, - &rc_consensus_hash, + rc_consensus_hash, ) { debug!("{:?}: NACK StackerDBGetChunksInv / StackerDBPushChunk from {} since {} != {} (remote is stale)", self.get_local_peer(), &naddr, &self.get_chain_view().rc_consensus_hash, rc_consensus_hash); return StacksMessageType::Nack(NackData::new(NackErrorCodes::StaleView)); diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 7dfeb809c7..4115827c58 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -289,7 +289,7 @@ impl StackerDBSync { if let Some(event_id) = network.get_event_id(&nk) { self.comms.unpin_connection(event_id); } - self.connected_replicas.remove(&naddr); + self.connected_replicas.remove(naddr); } /// Make a chunk inv request @@ -531,7 +531,7 @@ impl StackerDBSync { // validate -- must be a valid chunk if !network.validate_received_chunk( &self.smart_contract_id, - &config, + config, data, &self.expected_versions, )? { @@ -984,7 +984,7 @@ impl StackerDBSync { } // got everything. Calculate download priority - let priorities = self.make_chunk_request_schedule(&network, None)?; + let priorities = self.make_chunk_request_schedule(network, None)?; let expected_versions = self.stackerdbs.get_slot_versions(&self.smart_contract_id)?; self.chunk_fetch_priorities = priorities; @@ -1050,7 +1050,7 @@ impl StackerDBSync { if let Err(e) = self.comms.neighbor_send( network, - &selected_neighbor, + selected_neighbor, StacksMessageType::StackerDBGetChunk(chunk_request.clone()), ) { info!( @@ -1058,7 +1058,7 @@ impl StackerDBSync { network.get_local_peer(), &self.smart_contract_id, chunk_request.slot_id, - &selected_neighbor, + selected_neighbor, &e ); unpin.insert(selected_neighbor.clone()); @@ -1159,7 +1159,7 @@ impl StackerDBSync { pub fn pushchunks_begin(&mut self, network: &mut PeerNetwork) -> Result { if self.chunk_push_priorities.is_empty() && self.push_round != self.rounds { // only do this once per round - let priorities = self.make_chunk_push_schedule(&network)?; + let priorities = self.make_chunk_push_schedule(network)?; self.chunk_push_priorities = priorities; self.push_round = self.rounds; } @@ -1224,7 +1224,7 @@ impl StackerDBSync { let slot_version = chunk_push.chunk_data.slot_version; if let Err(e) = self.comms.neighbor_send( network, - &selected_neighbor, + selected_neighbor, StacksMessageType::StackerDBPushChunk(chunk_push), ) { info!( @@ -1232,7 +1232,7 @@ impl StackerDBSync { network.get_local_peer(), &self.smart_contract_id, slot_id, - &selected_neighbor, + selected_neighbor, &e ); continue; @@ -1342,7 +1342,7 @@ impl StackerDBSync { } let priorities = - self.make_chunk_request_schedule(&network, Some(expected_versions.clone()))?; + self.make_chunk_request_schedule(network, Some(expected_versions.clone()))?; self.chunk_fetch_priorities = priorities; self.expected_versions = expected_versions; diff --git a/stackslib/src/net/stackerdb/tests/config.rs b/stackslib/src/net/stackerdb/tests/config.rs index cff4ca1059..932193acdc 100644 --- a/stackslib/src/net/stackerdb/tests/config.rs +++ b/stackslib/src/net/stackerdb/tests/config.rs @@ -53,7 +53,7 @@ fn make_smart_contract( tx_contract.set_tx_fee(fee); let mut tx_signer = StacksTransactionSigner::new(&tx_contract); - tx_signer.sign_origin(&pk).unwrap(); + tx_signer.sign_origin(pk).unwrap(); let tx_contract_signed = tx_signer.get_tx().unwrap(); tx_contract_signed @@ -646,7 +646,7 @@ fn test_hint_replicas_override() { max_neighbors: 7, }; - let tx = make_smart_contract("test-0", &config_contract, &contract_owner, 0, 10000); + let tx = make_smart_contract("test-0", config_contract, &contract_owner, 0, 10000); txs.push(tx); peer.tenure_with_txs(&txs, &mut coinbase_nonce); diff --git a/stackslib/src/net/stackerdb/tests/db.rs b/stackslib/src/net/stackerdb/tests/db.rs index 9bcf800529..a8b7617a13 100644 --- a/stackslib/src/net/stackerdb/tests/db.rs +++ b/stackslib/src/net/stackerdb/tests/db.rs @@ -217,7 +217,7 @@ fn test_stackerdb_create_list_delete() { // each DB's single chunk exists for sc in dbs.iter() { - db.get_latest_chunk(&sc, 0).unwrap().expect("missing chunk"); + db.get_latest_chunk(sc, 0).unwrap().expect("missing chunk"); } // remove a db @@ -260,7 +260,7 @@ fn test_stackerdb_create_list_delete() { // only existing DBs still have chunks for sc in dbs.iter() { - db.get_latest_chunk(&sc, 0).unwrap().expect("missing chunk"); + db.get_latest_chunk(sc, 0).unwrap().expect("missing chunk"); } // deletion is idempotent @@ -302,7 +302,7 @@ fn test_stackerdb_create_list_delete() { ); // only existing DBs still have chunks for sc in dbs.iter() { - db.get_latest_chunk(&sc, 0).unwrap().expect("missing chunk"); + db.get_latest_chunk(sc, 0).unwrap().expect("missing chunk"); } } @@ -448,7 +448,7 @@ fn test_stackerdb_insert_query_chunks() { C32_ADDRESS_VERSION_MAINNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, 1, - &vec![StacksPublicKey::from_private(&pk)], + &vec![StacksPublicKey::from_private(pk)], ) .unwrap() }) @@ -473,7 +473,7 @@ fn test_stackerdb_insert_query_chunks() { data: vec![i as u8; 128], }; - chunk_data.sign(&pk).unwrap(); + chunk_data.sign(pk).unwrap(); let slot_metadata = tx.get_slot_metadata(&sc, i as u32).unwrap().unwrap(); assert_eq!(slot_metadata.slot_id, i as u32); @@ -505,7 +505,7 @@ fn test_stackerdb_insert_query_chunks() { // should fail -- too many writes version chunk_data.slot_version = db_config.max_writes + 1; - chunk_data.sign(&pk).unwrap(); + chunk_data.sign(pk).unwrap(); if let Err(net_error::TooManySlotWrites { supplied_version, max_writes, @@ -549,7 +549,7 @@ fn test_stackerdb_insert_query_chunks() { assert_eq!(chunk.data, vec![i as u8; 128]); assert_eq!(chunk.slot_version, 1); assert_eq!(chunk.slot_id, i as u32); - assert!(chunk.verify(&addr).unwrap()); + assert!(chunk.verify(addr).unwrap()); // incorrect version let chunk = db.get_chunk(&sc, i as u32, 0).unwrap(); @@ -560,7 +560,7 @@ fn test_stackerdb_insert_query_chunks() { assert!(chunk.is_none()); let slot_metadata = db.get_slot_metadata(&sc, i as u32).unwrap().unwrap(); - assert!(slot_metadata.verify(&addr).unwrap()); + assert!(slot_metadata.verify(addr).unwrap()); } let versions = db.get_slot_versions(&sc).unwrap(); @@ -603,7 +603,7 @@ fn test_reconfigure_stackerdb() { C32_ADDRESS_VERSION_MAINNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, 1, - &vec![StacksPublicKey::from_private(&pk)], + &vec![StacksPublicKey::from_private(pk)], ) .unwrap() }) @@ -629,7 +629,7 @@ fn test_reconfigure_stackerdb() { data: vec![i as u8; 128], }; - chunk_data.sign(&pk).unwrap(); + chunk_data.sign(pk).unwrap(); let slot_metadata = tx.get_slot_metadata(&sc, i as u32).unwrap().unwrap(); assert_eq!(slot_metadata.slot_id, i as u32); @@ -677,7 +677,7 @@ fn test_reconfigure_stackerdb() { C32_ADDRESS_VERSION_MAINNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, 1, - &vec![StacksPublicKey::from_private(&pk)], + &vec![StacksPublicKey::from_private(pk)], ) .unwrap() }) @@ -759,7 +759,7 @@ fn test_reconfigure_stackerdb() { C32_ADDRESS_VERSION_MAINNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, 1, - &vec![StacksPublicKey::from_private(&pk)], + &vec![StacksPublicKey::from_private(pk)], ) .unwrap() }) diff --git a/stackslib/src/net/tests/convergence.rs b/stackslib/src/net/tests/convergence.rs index 627db94758..af5afaea11 100644 --- a/stackslib/src/net/tests/convergence.rs +++ b/stackslib/src/net/tests/convergence.rs @@ -861,7 +861,7 @@ fn dump_peers(peers: &Vec) { } let all_neighbors = PeerDB::get_all_peers(peers[i].network.peerdb.conn()).unwrap(); - let num_allowed = all_neighbors.iter().fold(0, |mut sum, ref n2| { + let num_allowed = all_neighbors.iter().fold(0, |mut sum, n2| { sum += if n2.allowed < 0 { 1 } else { 0 }; sum }); @@ -1002,7 +1002,7 @@ fn run_topology_test_ex( // allowed peers are still connected match initial_allowed.get(&nk) { - Some(ref peer_list) => { + Some(peer_list) => { for pnk in peer_list.iter() { if !peers[i].network.events.contains_key(&pnk.clone()) { error!( @@ -1018,7 +1018,7 @@ fn run_topology_test_ex( // denied peers are never connected match initial_denied.get(&nk) { - Some(ref peer_list) => { + Some(peer_list) => { for pnk in peer_list.iter() { if peers[i].network.events.contains_key(&pnk.clone()) { error!("{:?}: Perma-denied peer {:?} connected", &nk, &pnk); @@ -1041,7 +1041,7 @@ fn run_topology_test_ex( // done? let now_finished = if use_finished_check { - finished_check(&peers) + finished_check(peers) } else { let mut done = true; let all_neighbors = PeerDB::get_all_peers(peers[i].network.peerdb.conn()).unwrap(); @@ -1082,13 +1082,13 @@ fn run_topology_test_ex( } test_debug!("Finished walking the network {} times", count); - dump_peers(&peers); - dump_peer_histograms(&peers); + dump_peers(peers); + dump_peer_histograms(peers); } test_debug!("Converged after {} calls to network.run()", count); - dump_peers(&peers); - dump_peer_histograms(&peers); + dump_peers(peers); + dump_peer_histograms(peers); // each peer learns each other peer's stacker DBs for (i, peer) in peers.iter().enumerate() { diff --git a/stackslib/src/net/tests/download/epoch2x.rs b/stackslib/src/net/tests/download/epoch2x.rs index 9c995f1f32..d80e6f3de2 100644 --- a/stackslib/src/net/tests/download/epoch2x.rs +++ b/stackslib/src/net/tests/download/epoch2x.rs @@ -92,7 +92,7 @@ fn test_get_block_availability() { let num_blocks = 10; let first_stacks_block_height = { let sn = - SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -111,7 +111,7 @@ fn test_get_block_availability() { peer_1.next_burnchain_block_raw(burn_ops); let sn = - SortitionDB::get_canonical_burn_chain_tip(&peer_2.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer_2.sortdb.as_ref().unwrap().conn()) .unwrap(); block_data.push((sn.consensus_hash.clone(), stacks_block, microblocks)); } @@ -289,7 +289,7 @@ where let mut num_blocks = 10; let first_stacks_block_height = { let sn = - SortitionDB::get_canonical_burn_chain_tip(&peers[0].sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -553,7 +553,7 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_plain() { peers[0].next_burnchain_block_raw(burn_ops); let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[1].sortdb.as_ref().unwrap().conn(), + peers[1].sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -834,7 +834,7 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_plain_100_blocks() { peers[0].next_burnchain_block_raw(burn_ops); let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[1].sortdb.as_ref().unwrap().conn(), + peers[1].sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -925,7 +925,7 @@ pub fn test_get_blocks_and_microblocks_5_peers_star() { } let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -999,7 +999,7 @@ pub fn test_get_blocks_and_microblocks_5_peers_line() { } let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -1081,7 +1081,7 @@ pub fn test_get_blocks_and_microblocks_overwhelmed_connections() { } let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -1160,7 +1160,7 @@ pub fn test_get_blocks_and_microblocks_overwhelmed_sockets() { } let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -1248,7 +1248,7 @@ pub fn test_get_blocks_and_microblocks_ban_url() { peers[0].next_burnchain_block_raw(burn_ops); let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[1].sortdb.as_ref().unwrap().conn(), + peers[1].sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -1376,7 +1376,7 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_multiple_microblock_desc peers[0].next_burnchain_block_raw(burn_ops); let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[1].sortdb.as_ref().unwrap().conn(), + peers[1].sortdb.as_ref().unwrap().conn(), ) .unwrap(); @@ -1391,7 +1391,7 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_multiple_microblock_desc } else { test_debug!("Build child block {}", i); let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[1].sortdb.as_ref().unwrap().conn(), + peers[1].sortdb.as_ref().unwrap().conn(), ) .unwrap(); @@ -1466,7 +1466,7 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_multiple_microblock_desc peers[0].next_burnchain_block_raw(burn_ops); let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[1].sortdb.as_ref().unwrap().conn(), + peers[1].sortdb.as_ref().unwrap().conn(), ) .unwrap(); diff --git a/stackslib/src/net/tests/inv/epoch2x.rs b/stackslib/src/net/tests/inv/epoch2x.rs index aed43bdcba..cbf3a14449 100644 --- a/stackslib/src/net/tests/inv/epoch2x.rs +++ b/stackslib/src/net/tests/inv/epoch2x.rs @@ -527,7 +527,7 @@ fn test_sync_inv_set_blocks_microblocks_available() { ] .iter_mut() { - let working_dir = get_burnchain(&test_path, None).working_dir; + let working_dir = get_burnchain(test_path, None).working_dir; // pre-populate headers let mut indexer = BitcoinIndexer::new_unit_test(&working_dir); @@ -582,7 +582,7 @@ fn test_sync_inv_set_blocks_microblocks_available() { let num_blocks = 5; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -752,7 +752,7 @@ fn test_sync_inv_make_inv_messages() { let mut peer_1 = TestPeer::new(peer_1_config); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -1343,7 +1343,7 @@ fn test_sync_inv_2_peers_plain() { let num_blocks = GETPOXINV_MAX_BITLEN * 2; let first_stacks_block_height = { let sn = - SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height + 1 }; @@ -1520,7 +1520,7 @@ fn test_sync_inv_2_peers_stale() { let num_blocks = GETPOXINV_MAX_BITLEN * 2; let first_stacks_block_height = { let sn = - SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height + 1 }; @@ -1560,7 +1560,7 @@ fn test_sync_inv_2_peers_stale() { assert_eq!(inv.get_dead_peers().len(), 0); assert_eq!(inv.get_diverged_peers().len(), 0); - if let Some(ref peer_2_inv) = inv.block_stats.get(&peer_2.to_neighbor().addr) { + if let Some(peer_2_inv) = inv.block_stats.get(&peer_2.to_neighbor().addr) { if peer_2_inv.inv.num_sortitions == first_stacks_block_height - peer_1.config.burnchain.first_block_height @@ -1583,7 +1583,7 @@ fn test_sync_inv_2_peers_stale() { assert_eq!(inv.get_dead_peers().len(), 0); assert_eq!(inv.get_diverged_peers().len(), 0); - if let Some(ref peer_1_inv) = inv.block_stats.get(&peer_1.to_neighbor().addr) { + if let Some(peer_1_inv) = inv.block_stats.get(&peer_1.to_neighbor().addr) { if peer_1_inv.inv.num_sortitions == first_stacks_block_height - peer_1.config.burnchain.first_block_height @@ -1629,7 +1629,7 @@ fn test_sync_inv_2_peers_unstable() { let first_stacks_block_height = { let sn = - SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height + 1 }; @@ -1842,7 +1842,7 @@ fn test_sync_inv_2_peers_different_pox_vectors() { let first_stacks_block_height = { let sn = - SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height + 1 }; diff --git a/stackslib/src/net/tests/mempool/mod.rs b/stackslib/src/net/tests/mempool/mod.rs index 436e5a315a..9efc405bd1 100644 --- a/stackslib/src/net/tests/mempool/mod.rs +++ b/stackslib/src/net/tests/mempool/mod.rs @@ -71,7 +71,7 @@ fn test_mempool_sync_2_peers() { let num_blocks = 10; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height + 1 }; @@ -104,7 +104,7 @@ fn test_mempool_sync_2_peers() { let mut tx = StacksTransaction { version: TransactionVersion::Testnet, chain_id: 0x80000000, - auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + auth: TransactionAuth::from_p2pkh(pk).unwrap(), anchor_mode: TransactionAnchorMode::Any, post_condition_mode: TransactionPostConditionMode::Allow, post_conditions: vec![], @@ -118,7 +118,7 @@ fn test_mempool_sync_2_peers() { tx.set_origin_nonce(0); let mut tx_signer = StacksTransactionSigner::new(&tx); - tx_signer.sign_origin(&pk).unwrap(); + tx_signer.sign_origin(pk).unwrap(); let tx = tx_signer.get_tx().unwrap(); @@ -184,7 +184,7 @@ fn test_mempool_sync_2_peers() { let mut tx = StacksTransaction { version: TransactionVersion::Testnet, chain_id: 0x80000000, - auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + auth: TransactionAuth::from_p2pkh(pk).unwrap(), anchor_mode: TransactionAnchorMode::Any, post_condition_mode: TransactionPostConditionMode::Allow, post_conditions: vec![], @@ -198,7 +198,7 @@ fn test_mempool_sync_2_peers() { tx.set_origin_nonce(1); let mut tx_signer = StacksTransactionSigner::new(&tx); - tx_signer.sign_origin(&pk).unwrap(); + tx_signer.sign_origin(pk).unwrap(); let tx = tx_signer.get_tx().unwrap(); @@ -339,7 +339,7 @@ fn test_mempool_sync_2_peers_paginated() { let num_blocks = 10; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height + 1 }; @@ -371,7 +371,7 @@ fn test_mempool_sync_2_peers_paginated() { let mut tx = StacksTransaction { version: TransactionVersion::Testnet, chain_id: 0x80000000, - auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + auth: TransactionAuth::from_p2pkh(pk).unwrap(), anchor_mode: TransactionAnchorMode::Any, post_condition_mode: TransactionPostConditionMode::Allow, post_conditions: vec![], @@ -385,7 +385,7 @@ fn test_mempool_sync_2_peers_paginated() { tx.set_origin_nonce(0); let mut tx_signer = StacksTransactionSigner::new(&tx); - tx_signer.sign_origin(&pk).unwrap(); + tx_signer.sign_origin(pk).unwrap(); let tx = tx_signer.get_tx().unwrap(); @@ -530,7 +530,7 @@ fn test_mempool_sync_2_peers_blacklisted() { let num_blocks = 10; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height + 1 }; @@ -563,7 +563,7 @@ fn test_mempool_sync_2_peers_blacklisted() { let mut tx = StacksTransaction { version: TransactionVersion::Testnet, chain_id: 0x80000000, - auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + auth: TransactionAuth::from_p2pkh(pk).unwrap(), anchor_mode: TransactionAnchorMode::Any, post_condition_mode: TransactionPostConditionMode::Allow, post_conditions: vec![], @@ -577,7 +577,7 @@ fn test_mempool_sync_2_peers_blacklisted() { tx.set_origin_nonce(0); let mut tx_signer = StacksTransactionSigner::new(&tx); - tx_signer.sign_origin(&pk).unwrap(); + tx_signer.sign_origin(pk).unwrap(); let tx = tx_signer.get_tx().unwrap(); @@ -741,7 +741,7 @@ fn test_mempool_sync_2_peers_problematic() { let num_blocks = 10; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height + 1 }; @@ -777,7 +777,7 @@ fn test_mempool_sync_2_peers_problematic() { let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); let tx = make_contract_tx( - &pk, + pk, 0, (tx_exceeds_body.len() * 100) as u64, "test-exceeds", @@ -1022,7 +1022,7 @@ pub fn test_mempool_storage_nakamoto() { &sortdb, &tip.consensus_hash, &tip.anchored_header.block_hash(), - &mempool_tx, + mempool_tx, None, &epoch.block_limit, &epoch.epoch_id, @@ -1176,7 +1176,7 @@ fn test_mempool_sync_2_peers_nakamoto_paginated() { let mut tx = StacksTransaction { version: TransactionVersion::Testnet, chain_id: 0x80000000, - auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + auth: TransactionAuth::from_p2pkh(pk).unwrap(), anchor_mode: TransactionAnchorMode::Any, post_condition_mode: TransactionPostConditionMode::Allow, post_conditions: vec![], @@ -1190,7 +1190,7 @@ fn test_mempool_sync_2_peers_nakamoto_paginated() { tx.set_origin_nonce(0); let mut tx_signer = StacksTransactionSigner::new(&tx); - tx_signer.sign_origin(&pk).unwrap(); + tx_signer.sign_origin(pk).unwrap(); let tx = tx_signer.get_tx().unwrap(); diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 3a07ed006c..7b6379db22 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -280,7 +280,7 @@ impl NakamotoBootPlan { &mut sort_handle, &mut node.chainstate, &peer.network.stacks_tip.block_id(), - &block, + block, None, NakamotoBlockObtainMethod::Pushed, ) @@ -317,7 +317,7 @@ impl NakamotoBootPlan { &mut sort_handle, &mut node.chainstate, &peer.network.stacks_tip.block_id(), - &block, + block, None, NakamotoBlockObtainMethod::Pushed, ) @@ -1147,7 +1147,7 @@ fn test_boot_nakamoto_peer() { 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, ]); - let plan = NakamotoBootPlan::new(&function_name!()) + let plan = NakamotoBootPlan::new(function_name!()) .with_private_key(private_key) .with_pox_constants(10, 3) .with_initial_balances(vec![(addr.into(), 1_000_000)]) diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index 1106721e38..22be02bd78 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -577,7 +577,7 @@ fn test_get_blocks_and_microblocks_3_peers_push_available() { }, |num_blocks, ref mut peers| { let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); let this_reward_cycle = peers[0] @@ -594,7 +594,7 @@ fn test_get_blocks_and_microblocks_3_peers_push_available() { // announcements in reward cycles the remote // peer doesn't know about won't work. let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); if peers[0] @@ -620,7 +620,7 @@ fn test_get_blocks_and_microblocks_3_peers_push_available() { } let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -669,7 +669,7 @@ fn test_get_blocks_and_microblocks_3_peers_push_available() { } let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); let this_reward_cycle = peers[0] @@ -1107,7 +1107,7 @@ fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks( }, |num_blocks, ref mut peers| { let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); let this_reward_cycle = peers[0] @@ -1120,7 +1120,7 @@ fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks( let mut block_data = vec![]; for _ in 0..num_blocks { let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); if peers[0] @@ -1145,7 +1145,7 @@ fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks( } let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -1415,7 +1415,7 @@ fn test_get_blocks_and_microblocks_upload_blocks_http() { }, |num_blocks, ref mut peers| { let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); let this_reward_cycle = peers[0] @@ -1432,7 +1432,7 @@ fn test_get_blocks_and_microblocks_upload_blocks_http() { // announcements in reward cycles the remote // peer doesn't know about won't work. let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); if peers[0] @@ -1458,7 +1458,7 @@ fn test_get_blocks_and_microblocks_upload_blocks_http() { } let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -1638,7 +1638,7 @@ fn test_get_blocks_and_microblocks_2_peers_push_transactions() { }, |num_blocks, ref mut peers| { let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); let this_reward_cycle = peers[0] @@ -1651,7 +1651,7 @@ fn test_get_blocks_and_microblocks_2_peers_push_transactions() { let mut block_data = vec![]; for b in 0..num_blocks { let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); if peers[0] @@ -1680,7 +1680,7 @@ fn test_get_blocks_and_microblocks_2_peers_push_transactions() { } let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -2015,7 +2015,7 @@ fn test_get_blocks_and_microblocks_peers_broadcast() { }, |num_blocks, ref mut peers| { let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); let this_reward_cycle = peers[0] @@ -2028,7 +2028,7 @@ fn test_get_blocks_and_microblocks_peers_broadcast() { let mut block_data = vec![]; for _ in 0..num_blocks { let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); if peers[0] @@ -2053,7 +2053,7 @@ fn test_get_blocks_and_microblocks_peers_broadcast() { } let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); @@ -2327,7 +2327,7 @@ fn test_get_blocks_and_microblocks_2_peers_antientropy() { }, |num_blocks, ref mut peers| { let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); let this_reward_cycle = peers[0] @@ -2340,7 +2340,7 @@ fn test_get_blocks_and_microblocks_2_peers_antientropy() { let mut block_data = vec![]; for _ in 0..num_blocks { let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); if peers[0] @@ -2365,7 +2365,7 @@ fn test_get_blocks_and_microblocks_2_peers_antientropy() { } let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -2382,7 +2382,7 @@ fn test_get_blocks_and_microblocks_2_peers_antientropy() { peers[i].next_burnchain_block_raw(vec![]); } let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push((sn.consensus_hash.clone(), None, None)); @@ -2459,7 +2459,7 @@ fn test_get_blocks_and_microblocks_2_peers_buffered_messages() { }, |num_blocks, ref mut peers| { let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); let this_reward_cycle = peers[0] @@ -2472,7 +2472,7 @@ fn test_get_blocks_and_microblocks_2_peers_buffered_messages() { let mut block_data = vec![]; for block_num in 0..num_blocks { let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); @@ -2494,7 +2494,7 @@ fn test_get_blocks_and_microblocks_2_peers_buffered_messages() { } let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -2794,7 +2794,7 @@ fn process_new_blocks_rejects_problematic_asts() { let chainstate_path = peer.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) .unwrap(); sn.block_height }; @@ -2822,7 +2822,7 @@ fn process_new_blocks_rejects_problematic_asts() { }; let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()).unwrap(); + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()).unwrap(); let mblock_privk = StacksPrivateKey::new(); @@ -2885,7 +2885,7 @@ fn process_new_blocks_rejects_problematic_asts() { peer.process_stacks_epoch(&block, &consensus_hash, &vec![]); let tip = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()).unwrap(); + SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()).unwrap(); let (burn_ops, bad_block, mut microblocks) = peer.make_tenure( |ref mut miner, @@ -3156,7 +3156,7 @@ fn process_new_blocks_rejects_problematic_asts() { &mut network_result, &sortdb, &mut peer.stacks_node.as_mut().unwrap().chainstate, - &mut peer.mempool.as_mut().unwrap(), + peer.mempool.as_mut().unwrap(), None, ) .unwrap(); diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index e7f1c256a4..1dbd3d7c37 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -138,7 +138,7 @@ impl PeerNetwork { // punish this peer info!( "Peer {:?} sent an invalid update for {}", - &outbound_neighbor_key, + outbound_neighbor_key, if microblocks { "streamed microblocks" } else { @@ -147,7 +147,7 @@ impl PeerNetwork { ); self.bans.insert(event_id); - if let Some(outbound_event_id) = self.events.get(&outbound_neighbor_key) { + if let Some(outbound_event_id) = self.events.get(outbound_neighbor_key) { self.bans.insert(*outbound_event_id); } return Ok(None); @@ -155,7 +155,7 @@ impl PeerNetwork { Err(e) => { warn!( "Failed to update inv state for {:?}: {:?}", - &outbound_neighbor_key, &e + outbound_neighbor_key, &e ); return Ok(None); } @@ -368,7 +368,7 @@ impl PeerNetwork { consensus_hash: &ConsensusHash, is_microblock: bool, ) -> Result { - let sn = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &consensus_hash)? + let sn = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), consensus_hash)? .ok_or(ChainstateError::NoSuchBlockError)?; let block_hash_opt = if sn.sortition { Some(sn.winning_stacks_block_hash) @@ -421,7 +421,7 @@ impl PeerNetwork { debug!( "{:?}: Process BlocksAvailable from {:?} with {} entries", &self.get_local_peer(), - &outbound_neighbor_key, + outbound_neighbor_key, new_blocks.available.len() ); @@ -449,9 +449,9 @@ impl PeerNetwork { info!( "{:?}: Failed to handle BlocksAvailable({}/{}) from {}: {:?}", &self.get_local_peer(), - &consensus_hash, + consensus_hash, &block_hash, - &outbound_neighbor_key, + outbound_neighbor_key, &e ); continue; @@ -461,14 +461,14 @@ impl PeerNetwork { let need_block = match PeerNetwork::need_block_or_microblock_stream( sortdb, chainstate, - &consensus_hash, + consensus_hash, false, ) { Ok(x) => x, Err(e) => { warn!( "Failed to determine if we need block for consensus hash {}: {:?}", - &consensus_hash, &e + consensus_hash, &e ); false } @@ -476,7 +476,7 @@ impl PeerNetwork { debug!( "Need block {}/{}? {}", - &consensus_hash, &block_hash, need_block + consensus_hash, &block_hash, need_block ); if need_block { @@ -565,9 +565,9 @@ impl PeerNetwork { info!( "{:?}: Failed to handle MicroblocksAvailable({}/{}) from {:?}: {:?}", &self.get_local_peer(), - &consensus_hash, + consensus_hash, &block_hash, - &outbound_neighbor_key, + outbound_neighbor_key, &e ); continue; @@ -577,7 +577,7 @@ impl PeerNetwork { let need_microblock_stream = match PeerNetwork::need_block_or_microblock_stream( sortdb, chainstate, - &consensus_hash, + consensus_hash, true, ) { Ok(x) => x, @@ -589,7 +589,7 @@ impl PeerNetwork { debug!( "Need microblock stream {}/{}? {}", - &consensus_hash, &block_hash, need_microblock_stream + consensus_hash, &block_hash, need_microblock_stream ); if need_microblock_stream { @@ -648,20 +648,18 @@ impl PeerNetwork { let mut to_buffer = false; for BlocksDatum(consensus_hash, block) in new_blocks.blocks.iter() { - let sn = match SortitionDB::get_block_snapshot_consensus( - &sortdb.conn(), - &consensus_hash, - ) { + let sn = match SortitionDB::get_block_snapshot_consensus(sortdb.conn(), consensus_hash) + { Ok(Some(sn)) => sn, Ok(None) => { if buffer { debug!( "{:?}: Will buffer unsolicited BlocksData({}/{}) ({}) -- consensus hash not (yet) recognized", &self.get_local_peer(), - &consensus_hash, + consensus_hash, &block.block_hash(), StacksBlockHeader::make_index_block_hash( - &consensus_hash, + consensus_hash, &block.block_hash() ) ); @@ -670,10 +668,10 @@ impl PeerNetwork { debug!( "{:?}: Will drop unsolicited BlocksData({}/{}) ({}) -- consensus hash not (yet) recognized", &self.get_local_peer(), - &consensus_hash, + consensus_hash, &block.block_hash(), StacksBlockHeader::make_index_block_hash( - &consensus_hash, + consensus_hash, &block.block_hash() ) ); @@ -717,7 +715,7 @@ impl PeerNetwork { let _ = self.handle_unsolicited_inv_update_epoch2x( sortdb, event_id, - &outbound_neighbor_key, + outbound_neighbor_key, &sn.consensus_hash, false, ); @@ -846,7 +844,7 @@ impl PeerNetwork { nakamoto_block: &NakamotoBlock, ) -> (Option, bool) { let (reward_set_sn, can_process) = match SortitionDB::get_block_snapshot_consensus( - &sortdb.conn(), + sortdb.conn(), &nakamoto_block.header.consensus_hash, ) { Ok(Some(sn)) => (sn, true), @@ -1217,7 +1215,7 @@ impl PeerNetwork { && !self.can_buffer_data_message( *event_id, self.pending_messages.get(&(*event_id, neighbor_key.clone())).unwrap_or(&vec![]), - &message, + message, ) { // unable to store this due to quota being exceeded diff --git a/stackslib/src/util_lib/bloom.rs b/stackslib/src/util_lib/bloom.rs index bd9706fd59..d7cf93fa9d 100644 --- a/stackslib/src/util_lib/bloom.rs +++ b/stackslib/src/util_lib/bloom.rs @@ -351,7 +351,7 @@ impl BloomCounter { max_items: u32, hasher: H, ) -> Result, db_error> { - let sql = format!("CREATE TABLE IF NOT EXISTS {}(counts BLOB NOT NULL, num_bins INTEGER NOT NULL, num_hashes INTEGER NOT NULL, hasher BLOB NOT NULL);", table_name); + let sql = format!("CREATE TABLE IF NOT EXISTS {table_name}(counts BLOB NOT NULL, num_bins INTEGER NOT NULL, num_hashes INTEGER NOT NULL, hasher BLOB NOT NULL);"); tx.execute(&sql, NO_PARAMS).map_err(db_error::SqliteError)?; let (num_bins, num_hashes) = bloom_hash_count(error_rate, max_items); @@ -366,8 +366,8 @@ impl BloomCounter { tx.execute(&sql, args).map_err(db_error::SqliteError)?; - let sql = format!("SELECT rowid FROM {}", table_name); - let counts_rowid: u64 = query_expect_row(&tx, &sql, NO_PARAMS)? + let sql = format!("SELECT rowid FROM {table_name}"); + let counts_rowid: u64 = query_expect_row(tx, &sql, NO_PARAMS)? .expect("BUG: inserted bloom counter but can't find row ID"); Ok(BloomCounter { @@ -380,7 +380,7 @@ impl BloomCounter { } pub fn try_load(conn: &DBConn, table_name: &str) -> Result>, db_error> { - let sql = format!("SELECT rowid,* FROM {}", table_name); + let sql = format!("SELECT rowid,* FROM {table_name}"); let result = conn.query_row_and_then(&sql, NO_PARAMS, |row| { let mut hasher_blob = row .get_ref("hasher")? diff --git a/stackslib/src/util_lib/db.rs b/stackslib/src/util_lib/db.rs index 2e9e5c4b1c..6006f8521d 100644 --- a/stackslib/src/util_lib/db.rs +++ b/stackslib/src/util_lib/db.rs @@ -490,7 +490,7 @@ where // gather let mut row_data = vec![]; while let Some(row) = rows.next().map_err(|e| Error::SqliteError(e))? { - let next_row = T::from_column(&row, column_name)?; + let next_row = T::from_column(row, column_name)?; row_data.push(next_row); } @@ -918,7 +918,7 @@ impl<'a, C: Clone, T: MarfTrieId> IndexDBTx<'a, C, T> { marf_values.push(marf_value); } - self.index_mut().insert_batch(&keys, marf_values)?; + self.index_mut().insert_batch(keys, marf_values)?; let root_hash = self.index_mut().seal()?; Ok(root_hash) } diff --git a/stackslib/src/util_lib/signed_structured_data.rs b/stackslib/src/util_lib/signed_structured_data.rs index 14882c2fb9..0826f262be 100644 --- a/stackslib/src/util_lib/signed_structured_data.rs +++ b/stackslib/src/util_lib/signed_structured_data.rs @@ -30,7 +30,7 @@ pub const STRUCTURED_DATA_PREFIX: [u8; 6] = [0x53, 0x49, 0x50, 0x30, 0x31, 0x38] pub fn structured_data_hash(value: Value) -> Sha256Sum { let mut bytes = vec![]; value.serialize_write(&mut bytes).unwrap(); - Sha256Sum::from_data(&bytes.as_slice()) + Sha256Sum::from_data(bytes.as_slice()) } /// Generate a message hash for signing structured Clarity data. @@ -241,7 +241,7 @@ pub mod pox4 { .analyze_smart_contract( &pox_contract_id, clarity_version, - &body, + body, ASTRules::PrecheckSize, ) .unwrap(); @@ -250,7 +250,7 @@ pub mod pox4 { &pox_contract_id, clarity_version, &ast, - &body, + body, None, |_, _| false, ) diff --git a/stackslib/src/util_lib/strings.rs b/stackslib/src/util_lib/strings.rs index d1fb48c86b..ae9ea3e4f7 100644 --- a/stackslib/src/util_lib/strings.rs +++ b/stackslib/src/util_lib/strings.rs @@ -74,13 +74,13 @@ impl fmt::Display for VecDisplay<'_, T> { impl fmt::Display for StacksString { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(String::from_utf8_lossy(&self).into_owned().as_str()) + f.write_str(String::from_utf8_lossy(self).into_owned().as_str()) } } impl fmt::Debug for StacksString { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(String::from_utf8_lossy(&self).into_owned().as_str()) + f.write_str(String::from_utf8_lossy(self).into_owned().as_str()) } } @@ -330,7 +330,7 @@ mod test { #[test] fn tx_stacks_strings_codec() { let s = "hello-world"; - let stacks_str = StacksString::from_str(&s).unwrap(); + let stacks_str = StacksString::from_str(s).unwrap(); let clarity_str = ClarityName::try_from(s).unwrap(); let contract_str = ContractName::try_from(s).unwrap(); @@ -359,10 +359,10 @@ mod test { #[test] fn tx_stacks_string_invalid() { let s = "hello\rworld"; - assert!(StacksString::from_str(&s).is_none()); + assert!(StacksString::from_str(s).is_none()); let s = "hello\x01world"; - assert!(StacksString::from_str(&s).is_none()); + assert!(StacksString::from_str(s).is_none()); } #[test] From 154ffbafa3ffdf5d6c2048739992f83dfc47e82e Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 8 Jan 2025 12:28:11 +0100 Subject: [PATCH 02/16] fixed race condition in tests assuming TEST_EVENT_OBSERVER_SKIP_RETRY is disabled --- testnet/stacks-node/src/event_dispatcher.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index da1668cdd2..d712092ecf 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -2058,6 +2058,9 @@ mod test { let url = &format!("{}/api", &server.url()); + // Ensure retrying is enabled on the test (as other tests will run in parallel) + TEST_EVENT_OBSERVER_SKIP_RETRY.lock().unwrap().replace(false); + // Insert payload EventObserver::insert_payload(&conn, url, &payload, timeout) .expect("Failed to insert payload"); @@ -2129,6 +2132,9 @@ mod test { let observer = EventObserver::new(Some(working_dir.clone()), endpoint, timeout); + // Ensure retrying is enabled on the test (as other tests will run in parallel) + TEST_EVENT_OBSERVER_SKIP_RETRY.lock().unwrap().replace(false); + // Call send_payload observer.send_payload(&payload, "/test"); From e8f003a0b50282aa9a174b9bd8b34f1bcd7d466a Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 8 Jan 2025 13:56:12 +0100 Subject: [PATCH 03/16] stacks-fmt --- testnet/stacks-node/src/event_dispatcher.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index d712092ecf..4c01ae4ac3 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -2059,7 +2059,10 @@ mod test { let url = &format!("{}/api", &server.url()); // Ensure retrying is enabled on the test (as other tests will run in parallel) - TEST_EVENT_OBSERVER_SKIP_RETRY.lock().unwrap().replace(false); + TEST_EVENT_OBSERVER_SKIP_RETRY + .lock() + .unwrap() + .replace(false); // Insert payload EventObserver::insert_payload(&conn, url, &payload, timeout) @@ -2133,7 +2136,10 @@ mod test { let observer = EventObserver::new(Some(working_dir.clone()), endpoint, timeout); // Ensure retrying is enabled on the test (as other tests will run in parallel) - TEST_EVENT_OBSERVER_SKIP_RETRY.lock().unwrap().replace(false); + TEST_EVENT_OBSERVER_SKIP_RETRY + .lock() + .unwrap() + .replace(false); // Call send_payload observer.send_payload(&payload, "/test"); From e45112a938e8b7f10f38e7c12b1317fecf4807d5 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 9 Jan 2025 11:14:29 -0500 Subject: [PATCH 04/16] CRC: cleanup Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/stacks/db/blocks.rs | 5 +++-- stackslib/src/chainstate/stacks/tests/mod.rs | 6 ++---- stackslib/src/net/api/gettenure.rs | 2 +- stackslib/src/net/p2p.rs | 11 ++++------- stackslib/src/net/tests/convergence.rs | 5 +---- 5 files changed, 11 insertions(+), 18 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 736a446652..29f633f43d 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -10020,8 +10020,9 @@ pub mod test { let block_hashes: Vec = blocks.iter().map(|b| b.block_hash()).collect(); let header_hashes_all: Vec<(ConsensusHash, Option)> = consensus_hashes .iter() - .zip(block_hashes.iter()) - .map(|(burn, block)| ((*burn).clone(), Some((*block).clone()))) + .cloned() + .zip(block_hashes.iter().cloned()) + .map(|(burn, block)| (burn, Some(block))) .collect(); // nothing is stored, so our inventory should be empty diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 1f347b4a53..49eddc57d8 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -967,10 +967,8 @@ pub fn get_last_microblock_header( miner: &TestMiner, parent_block_opt: Option<&StacksBlock>, ) -> Option { - let last_microblocks_opt = match parent_block_opt { - Some(block) => node.get_microblock_stream(miner, &block.block_hash()), - None => None, - }; + let last_microblocks_opt = + parent_block_opt.and_then(|block| node.get_microblock_stream(miner, &block.block_hash())); let last_microblock_header_opt = match last_microblocks_opt { Some(last_microblocks) => { diff --git a/stackslib/src/net/api/gettenure.rs b/stackslib/src/net/api/gettenure.rs index f569407c22..bfa314b686 100644 --- a/stackslib/src/net/api/gettenure.rs +++ b/stackslib/src/net/api/gettenure.rs @@ -191,7 +191,7 @@ impl HttpRequest for RPCNakamotoTenureRequestHandler { .map(|last_block_id_hex| StacksBlockId::from_hex(last_block_id_hex)) .transpose() .map_err(|e| { - Error::DecodeError(format!("Failed to parse stop= query parameter: {:?}", &e)) + Error::DecodeError(format!("Failed to parse stop= query parameter: {e:?}")) })?; self.last_block_id = last_block_id; diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 18cef6aafa..4bafd3447d 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -2280,13 +2280,10 @@ impl PeerNetwork { /// Get stats for a neighbor pub fn get_neighbor_stats(&self, nk: &NeighborKey) -> Option { - match self.events.get(nk) { - None => None, - Some(eid) => match self.peers.get(eid) { - None => None, - Some(convo) => Some(convo.stats.clone()), - }, - } + self.events + .get(nk) + .and_then(|eid| self.peers.get(eid)) + .map(|convo| convo.stats.clone()) } /// Update peer connections as a result of a peer graph walk. diff --git a/stackslib/src/net/tests/convergence.rs b/stackslib/src/net/tests/convergence.rs index af5afaea11..a86828d095 100644 --- a/stackslib/src/net/tests/convergence.rs +++ b/stackslib/src/net/tests/convergence.rs @@ -861,10 +861,7 @@ fn dump_peers(peers: &Vec) { } let all_neighbors = PeerDB::get_all_peers(peers[i].network.peerdb.conn()).unwrap(); - let num_allowed = all_neighbors.iter().fold(0, |mut sum, n2| { - sum += if n2.allowed < 0 { 1 } else { 0 }; - sum - }); + let num_allowed = all_neighbors.iter().filter(|n2| n2.allowed < 0).count(); test_debug!("Neighbor {} (all={}, outbound={}) (total neighbors = {}, total allowed = {}): outbound={:?} all={:?}", i, neighbor_index.len(), outbound_neighbor_index.len(), all_neighbors.len(), num_allowed, &outbound_neighbor_index, &neighbor_index); } test_debug!("\n"); From b8a51e386d3277d310b5a0c44d185027e930b7ef Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 21 Jan 2025 16:12:29 -0800 Subject: [PATCH 05/16] Fix test Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/stacks/db/blocks.rs | 49 +++++++++----------- 1 file changed, 23 insertions(+), 26 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index df37f46d07..681e4b2973 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -757,8 +757,8 @@ impl StacksChainState { /// Get all stacks block headers. Great for testing! pub fn get_all_staging_block_headers(blocks_conn: &DBConn) -> Result, Error> { - let sql = "SELECT * FROM staging_blocks ORDER BY height".to_string(); - query_rows::(blocks_conn, &sql, NO_PARAMS).map_err(Error::DBError) + let sql = "SELECT * FROM staging_blocks ORDER BY height"; + query_rows::(blocks_conn, sql, NO_PARAMS).map_err(Error::DBError) } /// Get a list of all microblocks' hashes, and their anchored blocks' hashes @@ -929,7 +929,7 @@ impl StacksChainState { table: &str, block_hash: &BlockHeaderHash, ) -> Result>, Error> { - let sql = format!("SELECT block_data FROM {} WHERE block_hash = ?1", table); + let sql = format!("SELECT block_data FROM {table} WHERE block_hash = ?1"); let args = [&block_hash]; let mut blobs = StacksChainState::load_block_data_blobs(block_conn, &sql, &args)?; let len = blobs.len(); @@ -982,10 +982,10 @@ impl StacksChainState { consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, ) -> Result, Error> { - let sql = "SELECT * FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2 AND orphaned = 0 AND processed = 0".to_string(); + let sql = "SELECT * FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2 AND orphaned = 0 AND processed = 0"; let args = params![block_hash, consensus_hash]; let mut rows = - query_rows::(block_conn, &sql, args).map_err(Error::DBError)?; + query_rows::(block_conn, sql, args).map_err(Error::DBError)?; let len = rows.len(); match len { 0 => Ok(None), @@ -1330,22 +1330,18 @@ impl StacksChainState { let sql = if start_seq == last_seq { // takes the same arguments as the range case below, but will - "SELECT * FROM staging_microblocks WHERE index_block_hash = ?1 AND sequence == ?2 AND sequence == ?3 AND orphaned = 0 ORDER BY sequence ASC".to_string() + "SELECT * FROM staging_microblocks WHERE index_block_hash = ?1 AND sequence == ?2 AND sequence == ?3 AND orphaned = 0 ORDER BY sequence ASC" } else { - "SELECT * FROM staging_microblocks WHERE index_block_hash = ?1 AND sequence >= ?2 AND sequence < ?3 AND orphaned = 0 ORDER BY sequence ASC".to_string() + "SELECT * FROM staging_microblocks WHERE index_block_hash = ?1 AND sequence >= ?2 AND sequence < ?3 AND orphaned = 0 ORDER BY sequence ASC" }; let args = params![parent_index_block_hash, start_seq, last_seq]; let staging_microblocks = - query_rows::(blocks_conn, &sql, args).map_err(Error::DBError)?; + query_rows::(blocks_conn, sql, args).map_err(Error::DBError)?; if staging_microblocks.is_empty() { // haven't seen any microblocks that descend from this block yet - test_debug!( - "No microblocks built on {} up to {}", - &parent_index_block_hash, - last_seq - ); + test_debug!("No microblocks built on {parent_index_block_hash} up to {last_seq}"); return Ok(None); } @@ -9444,31 +9440,32 @@ pub mod test { assert_block_stored_not_staging(&mut chainstate, &consensus_hashes[0], blocks[0]); // process and store blocks 1 and N, as well as microblocks in-between - for (i, block) in blocks.iter().skip(1).enumerate() { + let len = blocks.len(); + for i in 1..len { // this is what happens at the end of append_block() // store block to staging and process it assert!(StacksChainState::load_staging_block_data( chainstate.db(), &chainstate.blocks_path, &consensus_hashes[i], - &block.block_hash() + &blocks[i].block_hash() ) .unwrap() .is_none()); store_staging_block( &mut chainstate, &consensus_hashes[i], - block, + blocks[i], &consensus_hashes[0], 1, 2, ); - assert_block_staging_not_processed(&mut chainstate, &consensus_hashes[i], block); + assert_block_staging_not_processed(&mut chainstate, &consensus_hashes[i], blocks[i]); set_block_processed( &mut chainstate, &consensus_hashes[i], - &block.block_hash(), + &blocks[i].block_hash(), true, ); @@ -9476,17 +9473,17 @@ pub mod test { set_microblocks_processed( &mut chainstate, &consensus_hashes[i], - &block.block_hash(), - &block.header.parent_microblock, + &blocks[i].block_hash(), + &blocks[i].header.parent_microblock, ); - assert_block_stored_not_staging(&mut chainstate, &consensus_hashes[i], block); + assert_block_stored_not_staging(&mut chainstate, &consensus_hashes[i], blocks[i]); let mblocks_confirmed = StacksChainState::load_processed_microblock_stream_fork( chainstate.db(), &consensus_hashes[0], &blocks[0].block_hash(), - &block.header.parent_microblock, + &blocks[i].header.parent_microblock, ) .unwrap() .unwrap(); @@ -9562,24 +9559,24 @@ pub mod test { } // store blocks to staging - for (i, block) in blocks.iter().enumerate() { + for i in 0..blocks.len() { assert!(StacksChainState::load_staging_block_data( chainstate.db(), &chainstate.blocks_path, &consensus_hashes[i], - &block.block_hash() + &blocks[i].block_hash() ) .unwrap() .is_none()); store_staging_block( &mut chainstate, &consensus_hashes[i], - block, + &blocks[i], &parent_consensus_hashes[i], 1, 2, ); - assert_block_staging_not_processed(&mut chainstate, &consensus_hashes[i], block); + assert_block_staging_not_processed(&mut chainstate, &consensus_hashes[i], &blocks[i]); } // reject block 1 From 4585774893c54e3d1787563c5e345dc108865da5 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 22 Jan 2025 18:31:23 +0100 Subject: [PATCH 06/16] use LazyLock + TestFlag --- testnet/stacks-node/src/event_dispatcher.rs | 36 ++++++--------------- 1 file changed, 10 insertions(+), 26 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 4c01ae4ac3..89ac5e75de 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -73,6 +73,9 @@ use stacks_common::util::hash::{bytes_to_hex, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; use url::Url; +#[cfg(test)] +use std::sync::LazyLock; + #[cfg(any(test, feature = "testing"))] lazy_static! { /// Do not announce a signed/mined block to the network when set to true. @@ -330,7 +333,7 @@ impl RewardSetEventPayload { } #[cfg(test)] -static TEST_EVENT_OBSERVER_SKIP_RETRY: std::sync::Mutex> = std::sync::Mutex::new(None); +static TEST_EVENT_OBSERVER_SKIP_RETRY: LazyLock> = LazyLock::new(TestFlag::default); impl EventObserver { fn init_db(db_path: &str) -> Result { @@ -440,11 +443,7 @@ impl EventObserver { Self::send_payload_directly(&payload, &url, timeout); #[cfg(test)] - if TEST_EVENT_OBSERVER_SKIP_RETRY - .lock() - .unwrap() - .unwrap_or(false) - { + if TEST_EVENT_OBSERVER_SKIP_RETRY.get() { warn!("Fault injection: delete_payload"); return; } @@ -509,11 +508,7 @@ impl EventObserver { } #[cfg(test)] - if TEST_EVENT_OBSERVER_SKIP_RETRY - .lock() - .unwrap() - .unwrap_or(false) - { + if TEST_EVENT_OBSERVER_SKIP_RETRY.get() { warn!("Fault injection: skipping retry of payload"); return; } @@ -2058,11 +2053,7 @@ mod test { let url = &format!("{}/api", &server.url()); - // Ensure retrying is enabled on the test (as other tests will run in parallel) - TEST_EVENT_OBSERVER_SKIP_RETRY - .lock() - .unwrap() - .replace(false); + TEST_EVENT_OBSERVER_SKIP_RETRY.set(false); // Insert payload EventObserver::insert_payload(&conn, url, &payload, timeout) @@ -2135,11 +2126,7 @@ mod test { let observer = EventObserver::new(Some(working_dir.clone()), endpoint, timeout); - // Ensure retrying is enabled on the test (as other tests will run in parallel) - TEST_EVENT_OBSERVER_SKIP_RETRY - .lock() - .unwrap() - .replace(false); + TEST_EVENT_OBSERVER_SKIP_RETRY.set(false); // Call send_payload observer.send_payload(&payload, "/test"); @@ -2400,7 +2387,7 @@ mod test { // Disable retrying so that it sends the payload only once // and that payload will be ignored by the test server. - TEST_EVENT_OBSERVER_SKIP_RETRY.lock().unwrap().replace(true); + TEST_EVENT_OBSERVER_SKIP_RETRY.set(true); info!("Sending payload 1"); @@ -2408,10 +2395,7 @@ mod test { observer.send_payload(&payload, "/test"); // Re-enable retrying - TEST_EVENT_OBSERVER_SKIP_RETRY - .lock() - .unwrap() - .replace(false); + TEST_EVENT_OBSERVER_SKIP_RETRY.set(false); info!("Sending payload 2"); From d892a8996b506ba8672f4343725aebec166e2b9f Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 22 Jan 2025 16:31:42 -0500 Subject: [PATCH 07/16] chore: improved loops --- stackslib/src/chainstate/stacks/db/blocks.rs | 46 ++++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 681e4b2973..d9fba0ea48 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -9440,32 +9440,31 @@ pub mod test { assert_block_stored_not_staging(&mut chainstate, &consensus_hashes[0], blocks[0]); // process and store blocks 1 and N, as well as microblocks in-between - let len = blocks.len(); - for i in 1..len { + for (i, block) in blocks.iter().enumerate().skip(1) { // this is what happens at the end of append_block() // store block to staging and process it assert!(StacksChainState::load_staging_block_data( chainstate.db(), &chainstate.blocks_path, &consensus_hashes[i], - &blocks[i].block_hash() + &block.block_hash() ) .unwrap() .is_none()); store_staging_block( &mut chainstate, &consensus_hashes[i], - blocks[i], + block, &consensus_hashes[0], 1, 2, ); - assert_block_staging_not_processed(&mut chainstate, &consensus_hashes[i], blocks[i]); + assert_block_staging_not_processed(&mut chainstate, &consensus_hashes[i], block); set_block_processed( &mut chainstate, &consensus_hashes[i], - &blocks[i].block_hash(), + &block.block_hash(), true, ); @@ -9473,17 +9472,17 @@ pub mod test { set_microblocks_processed( &mut chainstate, &consensus_hashes[i], - &blocks[i].block_hash(), - &blocks[i].header.parent_microblock, + &block.block_hash(), + &block.header.parent_microblock, ); - assert_block_stored_not_staging(&mut chainstate, &consensus_hashes[i], blocks[i]); + assert_block_stored_not_staging(&mut chainstate, &consensus_hashes[i], block); let mblocks_confirmed = StacksChainState::load_processed_microblock_stream_fork( chainstate.db(), &consensus_hashes[0], &blocks[0].block_hash(), - &blocks[i].header.parent_microblock, + &block.header.parent_microblock, ) .unwrap() .unwrap(); @@ -9559,24 +9558,24 @@ pub mod test { } // store blocks to staging - for i in 0..blocks.len() { + for (i, block) in blocks.iter().enumerate() { assert!(StacksChainState::load_staging_block_data( chainstate.db(), &chainstate.blocks_path, &consensus_hashes[i], - &blocks[i].block_hash() + &block.block_hash() ) .unwrap() .is_none()); store_staging_block( &mut chainstate, &consensus_hashes[i], - &blocks[i], + &block, &parent_consensus_hashes[i], 1, 2, ); - assert_block_staging_not_processed(&mut chainstate, &consensus_hashes[i], &blocks[i]); + assert_block_staging_not_processed(&mut chainstate, &consensus_hashes[i], &block); } // reject block 1 @@ -9588,16 +9587,16 @@ pub mod test { ); // destroy all descendants - for i in 0..blocks.len() { + for (i, block) in blocks.iter().enumerate() { // confirm that block i is deleted, as are its microblocks - assert_block_stored_rejected(&mut chainstate, &consensus_hashes[i], &blocks[i]); + assert_block_stored_rejected(&mut chainstate, &consensus_hashes[i], block); // block i's microblocks should all be marked as processed, orphaned, and deleted - for mblock in microblocks[i].iter() { + for mblock in µblocks[i] { assert!(StacksChainState::load_staging_microblock( chainstate.db(), &consensus_hashes[i], - &blocks[i].block_hash(), + &block.block_hash(), &mblock.block_hash() ) .unwrap() @@ -9611,30 +9610,31 @@ pub mod test { .is_none()); } - if i + 1 < blocks.len() { + // Check block i+1 if it exists + if let Some(next_block) = blocks.get(i + 1) { // block i+1 should be marked as an orphan, but its data should still be there assert!(StacksChainState::load_staging_block( chainstate.db(), &chainstate.blocks_path, &consensus_hashes[i + 1], - &blocks[i + 1].block_hash() + &next_block.block_hash() ) .unwrap() .is_none()); assert!(!StacksChainState::load_block_bytes( &chainstate.blocks_path, &consensus_hashes[i + 1], - &blocks[i + 1].block_hash() + &next_block.block_hash() ) .unwrap() .unwrap() .is_empty()); - for mblock in microblocks[i + 1].iter() { + for mblock in µblocks[i + 1] { let staging_mblock = StacksChainState::load_staging_microblock( chainstate.db(), &consensus_hashes[i + 1], - &blocks[i + 1].block_hash(), + &next_block.block_hash(), &mblock.block_hash(), ) .unwrap() From af9f7eabf6e024b94a1998fe2cc2c6f87fb848f5 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 22 Jan 2025 13:54:24 -0800 Subject: [PATCH 08/16] Fix last of needless refs in loops Signed-off-by: Jacinta Ferrant --- clarity/src/vm/ast/parser/v2/mod.rs | 4 +--- stackslib/src/chainstate/stacks/db/blocks.rs | 4 ++-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/clarity/src/vm/ast/parser/v2/mod.rs b/clarity/src/vm/ast/parser/v2/mod.rs index d6d1500e54..dd5a900364 100644 --- a/clarity/src/vm/ast/parser/v2/mod.rs +++ b/clarity/src/vm/ast/parser/v2/mod.rs @@ -1121,9 +1121,7 @@ mod tests { use super::*; use crate::vm::diagnostic::Level; use crate::vm::representations::PreSymbolicExpressionType; - use crate::vm::types::{ - ASCIIData, CharType, PrincipalData, SequenceData, StandardPrincipalData, UTF8Data, - }; + use crate::vm::types::{ASCIIData, CharType, PrincipalData, SequenceData}; #[test] fn test_parse_int() { diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 89e6e4b436..4e2c702471 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -9570,12 +9570,12 @@ pub mod test { store_staging_block( &mut chainstate, &consensus_hashes[i], - &block, + block, &parent_consensus_hashes[i], 1, 2, ); - assert_block_staging_not_processed(&mut chainstate, &consensus_hashes[i], &block); + assert_block_staging_not_processed(&mut chainstate, &consensus_hashes[i], block); } // reject block 1 From f95abc55762d205984e2660b704295930b3d7e4e Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 23 Jan 2025 14:47:34 -0800 Subject: [PATCH 09/16] Apply clippy::unnecessary_to_owned and clippy::unwrap_or_default Signed-off-by: Jacinta Ferrant --- stackslib/src/burnchains/bitcoin/blocks.rs | 2 +- stackslib/src/burnchains/bitcoin/indexer.rs | 4 +- stackslib/src/burnchains/tests/mod.rs | 16 ++--- stackslib/src/chainstate/coordinator/tests.rs | 2 +- .../chainstate/nakamoto/coordinator/tests.rs | 2 +- stackslib/src/chainstate/nakamoto/mod.rs | 4 +- .../src/chainstate/nakamoto/tests/mod.rs | 6 +- stackslib/src/chainstate/stacks/block.rs | 8 +-- .../src/chainstate/stacks/boot/pox_4_tests.rs | 4 +- .../src/chainstate/stacks/db/accounts.rs | 30 +++------ stackslib/src/chainstate/stacks/db/blocks.rs | 12 ++-- stackslib/src/chainstate/stacks/db/mod.rs | 2 +- .../src/chainstate/stacks/index/test/marf.rs | 62 +++++++++---------- .../src/chainstate/stacks/index/test/node.rs | 2 +- .../src/chainstate/stacks/index/test/trie.rs | 2 +- stackslib/src/chainstate/stacks/mod.rs | 6 +- .../stacks/tests/chain_histories.rs | 2 +- .../src/chainstate/stacks/transaction.rs | 4 +- stackslib/src/main.rs | 2 +- stackslib/src/net/api/getneighbors.rs | 4 +- .../src/net/api/tests/postblock_proposal.rs | 2 +- stackslib/src/net/db.rs | 7 +-- stackslib/src/net/dns.rs | 7 ++- .../nakamoto/download_state_machine.rs | 8 +-- stackslib/src/net/http/request.rs | 2 +- stackslib/src/net/inv/epoch2x.rs | 4 +- stackslib/src/net/p2p.rs | 4 +- stackslib/src/net/prune.rs | 2 +- stackslib/src/net/relay.rs | 10 +-- stackslib/src/net/stackerdb/tests/sync.rs | 2 +- stackslib/src/net/tests/download/nakamoto.rs | 14 ++--- stackslib/src/net/tests/mod.rs | 2 +- .../burnchains/bitcoin_regtest_controller.rs | 2 +- 33 files changed, 116 insertions(+), 126 deletions(-) diff --git a/stackslib/src/burnchains/bitcoin/blocks.rs b/stackslib/src/burnchains/bitcoin/blocks.rs index 879df2e82b..3436224e81 100644 --- a/stackslib/src/burnchains/bitcoin/blocks.rs +++ b/stackslib/src/burnchains/bitcoin/blocks.rs @@ -437,7 +437,7 @@ impl BitcoinBlockParser { match (inputs_opt, outputs_opt) { (Some(inputs), Some(outputs)) => { Some(BitcoinTransaction { - txid: Txid::from_vec_be(&tx.txid().as_bytes().to_vec()).unwrap(), // this *should* panic if it fails + txid: Txid::from_vec_be(tx.txid().as_bytes().as_ref()).unwrap(), // this *should* panic if it fails vtxindex: vtxindex as u32, opcode, data, diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index 81e08c9017..0fd59eda6e 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -227,7 +227,7 @@ impl BitcoinIndexer { // instantiate headers DB let _ = SpvClient::new( - &working_dir_path.to_str().unwrap().to_string(), + working_dir_path.to_str().unwrap(), 0, None, BitcoinNetworkType::Regtest, @@ -236,7 +236,7 @@ impl BitcoinIndexer { ) .expect(&format!( "Failed to open {:?}", - &working_dir_path.to_str().unwrap().to_string() + working_dir_path.to_str().unwrap() )); BitcoinIndexer { diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index 23232ac3b4..ec67bc1c5e 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -241,13 +241,15 @@ impl TestMiner { ); match self.vrf_key_map.get(vrf_pubkey) { Some(ref prover_key) => { - let proof = VRF::prove(prover_key, &last_sortition_hash.as_bytes().to_vec()); - let valid = - match VRF::verify(vrf_pubkey, &proof, &last_sortition_hash.as_bytes().to_vec()) - { - Ok(v) => v, - Err(e) => false, - }; + let proof = VRF::prove(prover_key, last_sortition_hash.as_bytes().as_ref()); + let valid = match VRF::verify( + vrf_pubkey, + &proof, + last_sortition_hash.as_bytes().as_ref(), + ) { + Ok(v) => v, + Err(e) => false, + }; assert!(valid); Some(proof) } diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index d1a1506e18..be42a14f0a 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -369,7 +369,7 @@ pub fn setup_states_with_epochs( ); let block_limit = ExecutionCost::max_value(); - let initial_balances = initial_balances.unwrap_or(vec![]); + let initial_balances = initial_balances.unwrap_or_default(); for path in paths.iter() { let burnchain = get_burnchain(path, pox_consts.clone()); diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 3c4a2a4057..f624168b4d 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -237,7 +237,7 @@ pub fn make_replay_peer<'a>(peer: &mut TestPeer<'a>) -> TestPeer<'a> { replay_config.http_port = 0; replay_config.test_stackers = peer.config.test_stackers.clone(); - let test_stackers = replay_config.test_stackers.clone().unwrap_or(vec![]); + let test_stackers = replay_config.test_stackers.clone().unwrap_or_default(); let mut test_signers = replay_config.test_signers.clone().unwrap(); let mut replay_peer = TestPeer::new(replay_config); let observer = TestEventObserver::new(); diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index e574af50c2..2273c56773 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -4123,7 +4123,7 @@ impl NakamotoChainState { .iter() .enumerate() .fold(HashMap::new(), |mut map, (ix, addr)| { - map.entry(addr).or_insert_with(Vec::new).push(ix); + map.entry(addr).or_default().push(ix); map }); @@ -4524,7 +4524,7 @@ impl NakamotoChainState { let matured_rewards = matured_miner_rewards_opt .as_ref() .map(|matured_miner_rewards| matured_miner_rewards.consolidate()) - .unwrap_or(vec![]); + .unwrap_or_default(); let mut lockup_events = match Self::finish_block(&mut clarity_tx, matured_miner_rewards_opt.as_ref()) { diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index dc8ebb453a..990cc2aff9 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -259,7 +259,7 @@ pub fn test_nakamoto_first_tenure_block_syntactic_validation() { }; let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); - let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..]).unwrap(); let coinbase_payload = TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None, Some(proof.clone())); @@ -591,10 +591,10 @@ pub fn test_load_store_update_nakamoto_blocks() { let private_key = StacksPrivateKey::new(); let epoch2_proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); - let epoch2_proof = VRFProof::from_bytes(&epoch2_proof_bytes[..].to_vec()).unwrap(); + let epoch2_proof = VRFProof::from_bytes(&epoch2_proof_bytes[..]).unwrap(); let nakamoto_proof_bytes = hex_bytes("973c815ac3e81a4aff3243f3d8310d24ab9783acd6caa4dcfab20a3744584b2f966acf08140e1a7e1e685695d51b1b511f4f19260a21887244a6c47f7637b8bdeaf5eafe85c1975bab75bc0668fe8a0b").unwrap(); - let nakamoto_proof = VRFProof::from_bytes(&nakamoto_proof_bytes[..].to_vec()).unwrap(); + let nakamoto_proof = VRFProof::from_bytes(&nakamoto_proof_bytes[..]).unwrap(); let coinbase_payload = TransactionPayload::Coinbase( CoinbasePayload([0x12; 32]), diff --git a/stackslib/src/chainstate/stacks/block.rs b/stackslib/src/chainstate/stacks/block.rs index 71956c6886..47df44644e 100644 --- a/stackslib/src/chainstate/stacks/block.rs +++ b/stackslib/src/chainstate/stacks/block.rs @@ -969,7 +969,7 @@ mod test { #[test] fn codec_stacks_block_ecvrf_proof() { let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); - let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..]).unwrap(); check_codec_and_corruption::(&proof, &proof_bytes); } @@ -991,7 +991,7 @@ mod test { #[test] fn codec_stacks_block_header() { let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); - let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..]).unwrap(); let header = StacksBlockHeader { version: 0x12, @@ -1710,7 +1710,7 @@ mod test { tx_merkle_root }; let mut block_header_dup_tx = header.clone(); - block_header_dup_tx.tx_merkle_root = get_tx_root(&txs.to_vec()); + block_header_dup_tx.tx_merkle_root = get_tx_root(txs); let block = StacksBlock { header: block_header_dup_tx, @@ -1957,7 +1957,7 @@ mod test { ); let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); - let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..]).unwrap(); let tx_coinbase_proof = StacksTransaction::new( TransactionVersion::Testnet, origin_auth.clone(), diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 58de8664b7..28d5e731ef 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -7592,8 +7592,8 @@ fn test_deser_abort() { "; let tx_payload = TransactionPayload::new_smart_contract( - &format!("hello-world"), - &contract.to_string(), + "hello-world", + contract, Some(ClarityVersion::Clarity2), ) .unwrap(); diff --git a/stackslib/src/chainstate/stacks/db/accounts.rs b/stackslib/src/chainstate/stacks/db/accounts.rs index bf00b00b54..6cf29cd7d7 100644 --- a/stackslib/src/chainstate/stacks/db/accounts.rs +++ b/stackslib/src/chainstate/stacks/db/accounts.rs @@ -1209,11 +1209,9 @@ mod test { fn get_tip_ancestor() { let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); let miner_1 = - StacksAddress::from_string(&"SP1A2K3ENNA6QQ7G8DVJXM24T6QMBDVS7D0TRTAR5".to_string()) - .unwrap(); + StacksAddress::from_string("SP1A2K3ENNA6QQ7G8DVJXM24T6QMBDVS7D0TRTAR5").unwrap(); let user_1 = - StacksAddress::from_string(&"SP2837ZMC89J40K4YTS64B00M7065C6X46JX6ARG0".to_string()) - .unwrap(); + StacksAddress::from_string("SP2837ZMC89J40K4YTS64B00M7065C6X46JX6ARG0").unwrap(); let mut miner_reward = make_dummy_miner_payment_schedule(&miner_1, 500, 0, 0, 1000, 1000); let user_reward = make_dummy_user_payment_schedule(&user_1, 500, 0, 0, 750, 1000, 1); @@ -1276,8 +1274,7 @@ mod test { fn load_store_miner_payment_schedule() { let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); let miner_1 = - StacksAddress::from_string(&"SP1A2K3ENNA6QQ7G8DVJXM24T6QMBDVS7D0TRTAR5".to_string()) - .unwrap(); + StacksAddress::from_string("SP1A2K3ENNA6QQ7G8DVJXM24T6QMBDVS7D0TRTAR5").unwrap(); let mut miner_reward = make_dummy_miner_payment_schedule(&miner_1, 500, 0, 0, 1000, 1000); @@ -1322,8 +1319,7 @@ mod test { fn load_store_miner_payment_schedule_pay_contract() { let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); let miner_1 = - StacksAddress::from_string(&"SP1A2K3ENNA6QQ7G8DVJXM24T6QMBDVS7D0TRTAR5".to_string()) - .unwrap(); + StacksAddress::from_string("SP1A2K3ENNA6QQ7G8DVJXM24T6QMBDVS7D0TRTAR5").unwrap(); let mut miner_reward = make_dummy_miner_payment_schedule(&miner_1, 500, 0, 0, 1000, 1000); miner_reward.recipient = PrincipalData::Contract(QualifiedContractIdentifier::transient()); @@ -1368,8 +1364,7 @@ mod test { #[test] fn miner_reward_one_miner_no_tx_fees_no_users() { let miner_1 = - StacksAddress::from_string(&"SP1A2K3ENNA6QQ7G8DVJXM24T6QMBDVS7D0TRTAR5".to_string()) - .unwrap(); + StacksAddress::from_string("SP1A2K3ENNA6QQ7G8DVJXM24T6QMBDVS7D0TRTAR5").unwrap(); let participant = make_dummy_miner_payment_schedule(&miner_1, 500, 0, 0, 1000, 1000); let (parent_reward, miner_reward) = StacksChainState::calculate_miner_reward( @@ -1398,8 +1393,7 @@ mod test { #[test] fn miner_reward_one_miner_no_tx_fees_no_users_pay_contract() { let miner_1 = - StacksAddress::from_string(&"SP1A2K3ENNA6QQ7G8DVJXM24T6QMBDVS7D0TRTAR5".to_string()) - .unwrap(); + StacksAddress::from_string("SP1A2K3ENNA6QQ7G8DVJXM24T6QMBDVS7D0TRTAR5").unwrap(); let mut participant = make_dummy_miner_payment_schedule(&miner_1, 500, 0, 0, 1000, 1000); participant.recipient = PrincipalData::Contract(QualifiedContractIdentifier::transient()); @@ -1437,11 +1431,9 @@ mod test { #[test] fn miner_reward_one_miner_one_user_no_tx_fees() { let miner_1 = - StacksAddress::from_string(&"SP1A2K3ENNA6QQ7G8DVJXM24T6QMBDVS7D0TRTAR5".to_string()) - .unwrap(); + StacksAddress::from_string("SP1A2K3ENNA6QQ7G8DVJXM24T6QMBDVS7D0TRTAR5").unwrap(); let user_1 = - StacksAddress::from_string(&"SP2837ZMC89J40K4YTS64B00M7065C6X46JX6ARG0".to_string()) - .unwrap(); + StacksAddress::from_string("SP2837ZMC89J40K4YTS64B00M7065C6X46JX6ARG0").unwrap(); let miner = make_dummy_miner_payment_schedule(&miner_1, 500, 0, 0, 250, 1000); let user = make_dummy_user_payment_schedule(&user_1, 500, 0, 0, 750, 1000, 1); @@ -1485,12 +1477,10 @@ mod test { #[test] fn miner_reward_tx_fees() { let miner_1 = - StacksAddress::from_string(&"SP1A2K3ENNA6QQ7G8DVJXM24T6QMBDVS7D0TRTAR5".to_string()) - .unwrap(); + StacksAddress::from_string("SP1A2K3ENNA6QQ7G8DVJXM24T6QMBDVS7D0TRTAR5").unwrap(); let parent_miner_1 = - StacksAddress::from_string(&"SP2QDF700V0FWXVNQJJ4XFGBWE6R2Y4APTSFQNBVE".to_string()) - .unwrap(); + StacksAddress::from_string("SP2QDF700V0FWXVNQJJ4XFGBWE6R2Y4APTSFQNBVE").unwrap(); let participant = make_dummy_miner_payment_schedule(&miner_1, 500, 100, 105, 1000, 1000); let parent_participant = diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 2ea8604bc9..10c026423d 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -475,7 +475,7 @@ impl StacksChainState { let _ = StacksChainState::mkdirs(&block_path)?; - block_path.push(to_hex(block_hash_bytes).to_string()); + block_path.push(to_hex(block_hash_bytes)); let blocks_path_str = block_path .to_str() .ok_or_else(|| Error::DBError(db_error::ParseError))? @@ -996,7 +996,7 @@ impl StacksChainState { // load up associated block data staging_block.block_data = StacksChainState::load_block_bytes(blocks_path, consensus_hash, block_hash)? - .unwrap_or(vec![]); + .unwrap_or_default(); Ok(Some(staging_block)) } _ => { @@ -1162,7 +1162,7 @@ impl StacksChainState { // load associated block data staging_microblock.block_data = StacksChainState::load_staging_microblock_bytes(blocks_conn, microblock_hash)? - .unwrap_or(vec![]); + .unwrap_or_default(); Ok(Some(staging_microblock)) } None => { @@ -6980,7 +6980,7 @@ pub mod test { .unwrap(); let auth = TransactionAuth::from_p2pkh(&privk).unwrap(); let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); - let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..]).unwrap(); let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, @@ -7045,7 +7045,7 @@ pub mod test { .unwrap(); let auth = TransactionAuth::from_p2pkh(&privk).unwrap(); let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); - let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..]).unwrap(); let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, @@ -8593,7 +8593,7 @@ pub mod test { let num_mblocks = microblocks.len(); let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); - let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..]).unwrap(); let child_block_header = StacksBlockHeader { version: 0x01, diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index b70eaf0d0c..a90d9830cf 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -2511,7 +2511,7 @@ impl StacksChainState { Ok(txids) }) .optional()? - .unwrap_or(vec![]); + .unwrap_or_default(); Ok(txids) } diff --git a/stackslib/src/chainstate/stacks/index/test/marf.rs b/stackslib/src/chainstate/stacks/index/test/marf.rs index c08e230fc5..350f4e25cc 100644 --- a/stackslib/src/chainstate/stacks/index/test/marf.rs +++ b/stackslib/src/chainstate/stacks/index/test/marf.rs @@ -77,8 +77,8 @@ fn marf_insert_different_leaf_same_block_100() { merkle_test_marf( &mut marf.borrow_storage_backend(), &block_header, - &path_bytes.to_vec(), - &[99; 40].to_vec(), + path_bytes.as_ref(), + [99; 40].as_ref(), None, ); @@ -151,8 +151,8 @@ fn marf_insert_different_leaf_different_path_different_block_100() { merkle_test_marf( &mut marf.borrow_storage_backend(), &block_header, - &path_bytes.to_vec(), - &[i; 40].to_vec(), + path_bytes.as_ref(), + [i; 40].as_ref(), None, ); } @@ -232,8 +232,8 @@ fn marf_insert_same_leaf_different_block_100() { merkle_test_marf( &mut marf.borrow_storage_backend(), &next_block_header, - &path_bytes.to_vec(), - &[i; 40].to_vec(), + path_bytes.as_ref(), + [i; 40].as_ref(), None, ); } @@ -312,8 +312,8 @@ fn marf_insert_leaf_sequence_2() { merkle_test_marf( &mut marf.borrow_storage_backend(), &last_block_header, - &path_bytes.to_vec(), - &[i; 40].to_vec(), + path_bytes.as_ref(), + [i; 40].as_ref(), None, ); } @@ -383,8 +383,8 @@ fn marf_insert_leaf_sequence_100() { merkle_test_marf( &mut f, &last_block_header, - &path_bytes.to_vec(), - &[i; 40].to_vec(), + path_bytes.as_ref(), + [i; 40].as_ref(), None, ); } @@ -616,15 +616,15 @@ where debug!( "MARF verify {:?} {:?} from current block header (immediate) {:?}", &prev_path, - &[j as u8; 40].to_vec(), + [j as u8; 40].as_ref(), &next_block_header ); debug!("----------------------------------------"); merkle_test_marf( &mut marf.borrow_storage_backend(), &next_block_header, - &prev_path.to_vec(), - &[j as u8; 40].to_vec(), + prev_path.as_ref(), + [j as u8; 40].as_ref(), None, ); } @@ -641,15 +641,15 @@ where debug!( "MARF verify {:?} {:?} from current block header (deferred) {:?}", &prev_path, - &[j as u8; 40].to_vec(), + [j as u8; 40].as_ref(), &next_block_header ); debug!("----------------------------------------"); merkle_test_marf( &mut marf.borrow_storage_backend(), &next_block_header, - &prev_path.to_vec(), - &[j as u8; 40].to_vec(), + prev_path.as_ref(), + [j as u8; 40].as_ref(), None, ); } @@ -662,8 +662,8 @@ where merkle_test_marf( &mut marf.borrow_storage_backend(), &next_block_header, - &next_path.to_vec(), - &[i as u8; 40].to_vec(), + next_path.as_ref(), + [i as u8; 40].as_ref(), None, ); } @@ -692,15 +692,15 @@ where debug!( "MARF verify {:?} {:?} from last block header {:?}", &next_path, - &[i as u8; 40].to_vec(), + [i as u8; 40].as_ref(), &last_block_header ); debug!("----------------------------------------"); merkle_test_marf( &mut marf.borrow_storage_backend(), &last_block_header, - &next_path.to_vec(), - &[i as u8; 40].to_vec(), + next_path.as_ref(), + [i as u8; 40].as_ref(), None, ); } @@ -882,7 +882,7 @@ fn marf_merkle_verify_backptrs() { &mut marf.borrow_storage_backend(), &block_header_3, &path_3, - &[21; 40].to_vec(), + [21; 40].as_ref(), None, ); if let Some(root_hashes) = last_root_hashes.take() { @@ -957,7 +957,7 @@ where root_table_cache = Some(merkle_test_marf( &mut marf.borrow_storage_backend(), &block_header, - &path.to_vec(), + path.as_ref(), &value.data.to_vec(), root_table_cache, )); @@ -1018,7 +1018,7 @@ where root_table_cache = Some(merkle_test_marf( &mut marf.borrow_storage_backend(), &block_header, - &path.to_vec(), + path.as_ref(), &value.data.to_vec(), root_table_cache, )); @@ -1331,11 +1331,11 @@ fn marf_insert_random_10485760_4096_file_storage() { let key = to_hex(&path); let value = to_hex( - &[ + [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, i0 as u8, i1 as u8, i2 as u8, i3 as u8, ] - .to_vec(), + .as_ref(), ); keys.push(key); @@ -1389,11 +1389,11 @@ fn marf_insert_random_10485760_4096_file_storage() { let key = to_hex(&path); let value = to_hex( - &[ + [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, i0 as u8, i1 as u8, i2 as u8, i3 as u8, ] - .to_vec(), + .as_ref(), ); keys.push(key); @@ -1621,7 +1621,7 @@ fn marf_read_random_1048576_4096_file_storage() { merkle_test_marf( &mut f, &block_header, - &path.to_vec(), + path.as_ref(), &value.data.to_vec(), None, ); @@ -1927,7 +1927,7 @@ fn marf_insert_flush_to_different_block() { root_table_cache = Some(merkle_test_marf( &mut marf.borrow_storage_backend(), &target_block, - &path.to_vec(), + path.as_ref(), &value.data.to_vec(), root_table_cache, )); @@ -2047,7 +2047,7 @@ fn marf_insert_flush_to_different_block() { root_table_cache = Some(merkle_test_marf( &mut marf.borrow_storage_backend(), &read_from_block, - &path.to_vec(), + path.as_ref(), &value.data.to_vec(), root_table_cache, )); diff --git a/stackslib/src/chainstate/stacks/index/test/node.rs b/stackslib/src/chainstate/stacks/index/test/node.rs index dc9518267a..9246689d74 100644 --- a/stackslib/src/chainstate/stacks/index/test/node.rs +++ b/stackslib/src/chainstate/stacks/index/test/node.rs @@ -4253,7 +4253,7 @@ fn trie_cursor_walk_full() { assert_eq!(ptr, node_ptrs[31]); assert_eq!( node, - TrieNodeType::Leaf(TrieLeaf::new(&[], &[31u8; 40].to_vec())) + TrieNodeType::Leaf(TrieLeaf::new(&[], [31u8; 40].as_ref())) ); assert_eq!(hash, hashes[31]); diff --git a/stackslib/src/chainstate/stacks/index/test/trie.rs b/stackslib/src/chainstate/stacks/index/test/trie.rs index 53b86bad83..f4be2fdfd0 100644 --- a/stackslib/src/chainstate/stacks/index/test/trie.rs +++ b/stackslib/src/chainstate/stacks/index/test/trie.rs @@ -172,7 +172,7 @@ fn trie_cursor_try_attach_leaf() { assert!(leaf_opt.is_some()); let leaf = leaf_opt.unwrap(); - assert_eq!(leaf, TrieLeaf::new(&path[i + 1..].to_vec(), &[i as u8; 40])); + assert_eq!(leaf, TrieLeaf::new(&path[i + 1..], &[i as u8; 40])); // without a MARF commit, merkle tests will fail in deferred mode if f.hash_calculation_mode != TrieHashCalculationMode::Deferred { diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index dd4191a578..c7df211857 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -1395,7 +1395,7 @@ pub mod test { let stx_address = StacksAddress::new(1, Hash160([0xff; 20])).unwrap(); let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); - let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..]).unwrap(); let mut tx_payloads = vec![ TransactionPayload::TokenTransfer( stx_address.into(), @@ -1531,7 +1531,7 @@ pub mod test { pub fn make_codec_test_block(num_txs: usize, epoch_id: StacksEpochId) -> StacksBlock { let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); - let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..]).unwrap(); let privk = StacksPrivateKey::from_hex( "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", @@ -1626,7 +1626,7 @@ pub mod test { miner_privk: &StacksPrivateKey, ) -> NakamotoBlock { let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); - let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..]).unwrap(); let privk = StacksPrivateKey::from_hex( "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", diff --git a/stackslib/src/chainstate/stacks/tests/chain_histories.rs b/stackslib/src/chainstate/stacks/tests/chain_histories.rs index e9063df4bb..815d9357f0 100644 --- a/stackslib/src/chainstate/stacks/tests/chain_histories.rs +++ b/stackslib/src/chainstate/stacks/tests/chain_histories.rs @@ -2634,7 +2634,7 @@ fn miner_trace_replay_randomized(miner_trace: &mut TestMinerTrace) { match stacks_block_opt { Some(stacks_block) => { - let mut microblocks = microblocks_opt.unwrap_or(vec![]); + let mut microblocks = microblocks_opt.unwrap_or_default(); // "discover" the stacks block and its microblocks in all nodes // TODO: randomize microblock discovery order too diff --git a/stackslib/src/chainstate/stacks/transaction.rs b/stackslib/src/chainstate/stacks/transaction.rs index 177e7bf3f2..a4212d9b0b 100644 --- a/stackslib/src/chainstate/stacks/transaction.rs +++ b/stackslib/src/chainstate/stacks/transaction.rs @@ -2160,7 +2160,7 @@ mod test { #[test] fn tx_stacks_transaction_payload_nakamoto_coinbase() { let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); - let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..]).unwrap(); let coinbase_payload = TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None, Some(proof)); @@ -2291,7 +2291,7 @@ mod test { #[test] fn tx_stacks_transaction_payload_nakamoto_coinbase_alt_recipient() { let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); - let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..]).unwrap(); let recipient = PrincipalData::from(QualifiedContractIdentifier { issuer: StacksAddress::new(1, Hash160([0xff; 20])).unwrap().into(), diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 2e63d0d128..afaa9b575f 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -540,7 +540,7 @@ fn main() { let microblocks = StacksChainState::find_parent_microblock_stream(chainstate.db(), &block_info) .unwrap() - .unwrap_or(vec![]); + .unwrap_or_default(); let mut mblock_report = vec![]; for mblock in microblocks.iter() { diff --git a/stackslib/src/net/api/getneighbors.rs b/stackslib/src/net/api/getneighbors.rs index 06f01e6e85..d0631ced72 100644 --- a/stackslib/src/net/api/getneighbors.rs +++ b/stackslib/src/net/api/getneighbors.rs @@ -136,7 +136,7 @@ impl RPCNeighborsInfo { .into_iter() .map(|n| { let stackerdb_contract_ids = - PeerDB::static_get_peer_stacker_dbs(peerdb_conn, &n).unwrap_or(vec![]); + PeerDB::static_get_peer_stacker_dbs(peerdb_conn, &n).unwrap_or_default(); RPCNeighbor::from_neighbor_key_and_pubkh( n.addr.clone(), Hash160::from_node_public_key(&n.public_key), @@ -163,7 +163,7 @@ impl RPCNeighborsInfo { .into_iter() .map(|n| { let stackerdb_contract_ids = - PeerDB::static_get_peer_stacker_dbs(peerdb_conn, &n).unwrap_or(vec![]); + PeerDB::static_get_peer_stacker_dbs(peerdb_conn, &n).unwrap_or_default(); RPCNeighbor::from_neighbor_key_and_pubkh( n.addr.clone(), Hash160::from_node_public_key(&n.public_key), diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index af239ee078..11158070ef 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -253,7 +253,7 @@ fn test_try_make_response() { .unwrap(); let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); - let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..]).unwrap(); let privk = StacksPrivateKey::from_hex( "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", diff --git a/stackslib/src/net/db.rs b/stackslib/src/net/db.rs index d9ebfcb40d..172adb051c 100644 --- a/stackslib/src/net/db.rs +++ b/stackslib/src/net/db.rs @@ -140,7 +140,7 @@ impl LocalPeer { data_url: UrlString, stacker_dbs: Vec, ) -> LocalPeer { - let mut pkey = privkey.unwrap_or(Secp256k1PrivateKey::new()); + let mut pkey = privkey.unwrap_or_default(); pkey.set_compress_public(true); let mut rng = thread_rng(); @@ -2869,10 +2869,7 @@ mod test { let n15_fresh = PeerDB::get_initial_neighbors(db.conn(), 0x9abcdef0, 0x78, 15, 23456 + 14).unwrap(); - assert!(are_present( - &n15_fresh[10..15].to_vec(), - &initial_neighbors[10..20].to_vec() - )); + assert!(are_present(&n15_fresh[10..15], &initial_neighbors[10..20])); for n in &n15_fresh[10..15] { assert!(n.expire_block > 23456 + 14); assert!(n.allowed == 0); diff --git a/stackslib/src/net/dns.rs b/stackslib/src/net/dns.rs index 6529001d7d..11a70e3c5a 100644 --- a/stackslib/src/net/dns.rs +++ b/stackslib/src/net/dns.rs @@ -420,13 +420,14 @@ mod test { client.try_recv().unwrap(); for name in names.iter() { - if resolved_addrs.contains_key(&name.to_string()) { + let name_string = name.to_string(); + if resolved_addrs.contains_key(&name_string) { continue; } match client.poll_lookup(name, 80).unwrap() { Some(addrs) => { - test_debug!("name {} addrs: {:?}", name, &addrs); - resolved_addrs.insert(name.to_string(), addrs); + test_debug!("name {name} addrs: {addrs:?}"); + resolved_addrs.insert(name_string, addrs); break; } None => {} diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 53e5aec0af..6c0b2da19b 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -759,7 +759,7 @@ impl NakamotoDownloadStateMachine { inventories.iter(), ) }) - .unwrap_or(HashMap::new()); + .unwrap_or_default(); let mut available = Self::find_available_tenures( self.reward_cycle, @@ -783,7 +783,7 @@ impl NakamotoDownloadStateMachine { inventories.iter(), ) }) - .unwrap_or(HashMap::new()); + .unwrap_or_default(); let mut tenure_block_ids = { debug!( @@ -822,7 +822,7 @@ impl NakamotoDownloadStateMachine { &available, ) }) - .unwrap_or(VecDeque::new()); + .unwrap_or_default(); let schedule = Self::make_ibd_download_schedule( self.nakamoto_start_height, @@ -843,7 +843,7 @@ impl NakamotoDownloadStateMachine { &available, ) }) - .unwrap_or(VecDeque::new()); + .unwrap_or_default(); let schedule = Self::make_rarest_first_download_schedule( self.nakamoto_start_height, diff --git a/stackslib/src/net/http/request.rs b/stackslib/src/net/http/request.rs index 014ab8ef49..35b4c6293a 100644 --- a/stackslib/src/net/http/request.rs +++ b/stackslib/src/net/http/request.rs @@ -543,7 +543,7 @@ impl HttpRequestContents { } kv }) - .unwrap_or(HashMap::new()) + .unwrap_or_default() } /// chain constructor -- add a query strings' values to the existing values, and also diff --git a/stackslib/src/net/inv/epoch2x.rs b/stackslib/src/net/inv/epoch2x.rs index 9b9e7b3682..9e25e687d3 100644 --- a/stackslib/src/net/inv/epoch2x.rs +++ b/stackslib/src/net/inv/epoch2x.rs @@ -2275,7 +2275,7 @@ impl PeerNetwork { let bootstrap_peers: HashSet<_> = PeerDB::get_bootstrap_peers(&network.peerdb.conn(), network.local_peer.network_id) - .unwrap_or(vec![]) + .unwrap_or_default() .into_iter() .map(|neighbor| neighbor.addr) .collect(); @@ -2717,7 +2717,7 @@ impl PeerNetwork { // in our inv state let always_allowed: HashSet<_> = PeerDB::get_always_allowed_peers(&self.peerdb.conn(), self.local_peer.network_id) - .unwrap_or(vec![]) + .unwrap_or_default() .into_iter() .map(|neighbor| neighbor.addr) .collect(); diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index c4cb86f88e..0fdfd469a3 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -3256,7 +3256,7 @@ impl PeerNetwork { .inv_state .as_ref() .map(|inv_state| inv_state.block_stats.keys().cloned().collect()) - .unwrap_or(vec![]); + .unwrap_or_default(); if self.antientropy_start_reward_cycle == 0 { debug!( @@ -3680,7 +3680,7 @@ impl PeerNetwork { ); e }) - .unwrap_or(HashMap::new()); + .unwrap_or_default(); network_result.consume_nakamoto_blocks(new_blocks); diff --git a/stackslib/src/net/prune.rs b/stackslib/src/net/prune.rs index f178ea719a..cfb31fae5a 100644 --- a/stackslib/src/net/prune.rs +++ b/stackslib/src/net/prune.rs @@ -423,7 +423,7 @@ impl PeerNetwork { let pruned_by_org = self .prune_frontier_outbound_orgs(preserve) - .unwrap_or(vec![]); + .unwrap_or_default(); debug!( "{:?}: remove {} outbound peers by shared Org", diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 4c351db408..2a74c7b730 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -2608,8 +2608,8 @@ impl Relayer { ) { // have the p2p thread tell our neighbors about newly-discovered blocks let new_block_chs = new_blocks.keys().cloned().collect(); - let available = Relayer::load_blocks_available_data(sortdb, new_block_chs) - .unwrap_or(BlocksAvailableMap::new()); + let available = + Relayer::load_blocks_available_data(sortdb, new_block_chs).unwrap_or_default(); if !available.is_empty() { debug!("{:?}: Blocks available: {}", &_local_peer, available.len()); if let Err(e) = self.p2p.advertize_blocks(available, new_blocks) { @@ -2619,8 +2619,8 @@ impl Relayer { // have the p2p thread tell our neighbors about newly-discovered confirmed microblock streams let new_mblock_chs = new_confirmed_microblocks.keys().cloned().collect(); - let mblocks_available = Relayer::load_blocks_available_data(sortdb, new_mblock_chs) - .unwrap_or(BlocksAvailableMap::new()); + let mblocks_available = + Relayer::load_blocks_available_data(sortdb, new_mblock_chs).unwrap_or_default(); if !mblocks_available.is_empty() { debug!( "{:?}: Confirmed microblock streams available: {}", @@ -2929,7 +2929,7 @@ impl Relayer { mempool, event_observer.map(|obs| obs.as_mempool_event_dispatcher()), ) - .unwrap_or(vec![]); + .unwrap_or_default(); if !new_txs.is_empty() { debug!( diff --git a/stackslib/src/net/stackerdb/tests/sync.rs b/stackslib/src/net/stackerdb/tests/sync.rs index d4660803d2..6071d0c697 100644 --- a/stackslib/src/net/stackerdb/tests/sync.rs +++ b/stackslib/src/net/stackerdb/tests/sync.rs @@ -177,7 +177,7 @@ fn load_stackerdb(peer: &TestPeer, idx: usize) -> Vec<(SlotMetadata, Vec)> { .stackerdbs .get_latest_chunk(&peer.config.stacker_dbs[idx], i) .unwrap() - .unwrap_or(vec![]); + .unwrap_or_default(); ret.push((chunk_metadata, chunk)); } ret diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 8485a6f3e0..108045a427 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -173,7 +173,7 @@ fn test_nakamoto_tenure_downloader() { pubkey_hash: Hash160([0x02; 20]), }; let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); - let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..]).unwrap(); let coinbase_payload = TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None, Some(proof)); @@ -2149,7 +2149,7 @@ fn test_nakamoto_download_run_2_peers() { for height in 25..tip.block_height { let ops = peer .get_burnchain_block_ops_at_height(height + 1) - .unwrap_or(vec![]); + .unwrap_or_default(); let sn = { let ih = peer.sortdb().index_handle(&tip.sortition_id); let sn = ih.get_block_snapshot_by_height(height).unwrap().unwrap(); @@ -2256,7 +2256,7 @@ fn test_nakamoto_unconfirmed_download_run_2_peers() { for height in 25..tip.block_height { let ops = peer .get_burnchain_block_ops_at_height(height + 1) - .unwrap_or(vec![]); + .unwrap_or_default(); let sn = { let ih = peer.sortdb().index_handle(&tip.sortition_id); let sn = ih.get_block_snapshot_by_height(height).unwrap().unwrap(); @@ -2436,7 +2436,7 @@ fn test_nakamoto_microfork_download_run_2_peers() { for height in 25..tip.block_height { let ops = peer .get_burnchain_block_ops_at_height(height + 1) - .unwrap_or(vec![]); + .unwrap_or_default(); let sn = { let ih = peer.sortdb().index_handle(&tip.sortition_id); let sn = ih.get_block_snapshot_by_height(height).unwrap().unwrap(); @@ -2611,7 +2611,7 @@ fn test_nakamoto_download_run_2_peers_with_one_shadow_block() { for height in 25..tip.block_height { let ops = peer .get_burnchain_block_ops_at_height(height + 1) - .unwrap_or(vec![]); + .unwrap_or_default(); let sn = { let ih = peer.sortdb().index_handle(&tip.sortition_id); let sn = ih.get_block_snapshot_by_height(height).unwrap().unwrap(); @@ -2813,7 +2813,7 @@ fn test_nakamoto_download_run_2_peers_shadow_prepare_phase() { for height in 25..tip.block_height { let ops = peer .get_burnchain_block_ops_at_height(height + 1) - .unwrap_or(vec![]); + .unwrap_or_default(); let sn = { let ih = peer.sortdb().index_handle(&tip.sortition_id); let sn = ih.get_block_snapshot_by_height(height).unwrap().unwrap(); @@ -3018,7 +3018,7 @@ fn test_nakamoto_download_run_2_peers_shadow_reward_cycles() { for height in 25..tip.block_height { let ops = peer .get_burnchain_block_ops_at_height(height + 1) - .unwrap_or(vec![]); + .unwrap_or_default(); let sn = { let ih = peer.sortdb().index_handle(&tip.sortition_id); let sn = ih.get_block_snapshot_by_height(height).unwrap().unwrap(); diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 0a26839ca4..91ad5ac38c 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -515,7 +515,7 @@ impl NakamotoBootPlan { .config .test_stackers .clone() - .unwrap_or(vec![]) + .unwrap_or_default() .iter() .map(|test_stacker| { let pox_addr = test_stacker diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 6dbb43c1cc..21076f1d69 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -2301,7 +2301,7 @@ pub struct SerializedTx { impl SerializedTx { pub fn new(tx: Transaction) -> SerializedTx { - let txid = Txid::from_vec_be(&tx.txid().as_bytes().to_vec()).unwrap(); + let txid = Txid::from_vec_be(tx.txid().as_bytes().as_ref()).unwrap(); let mut encoder = RawEncoder::new(Cursor::new(vec![])); tx.consensus_encode(&mut encoder) .expect("BUG: failed to serialize to a vec"); From fe244d94513065276b8846b93d76fcc356339c4d Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 24 Jan 2025 08:07:20 +0100 Subject: [PATCH 10/16] added serial_test to dependancies, use test serialization for event_dispatcher tests --- Cargo.lock | 41 +++++++++++++++++++++ testnet/stacks-node/Cargo.toml | 1 + testnet/stacks-node/src/event_dispatcher.rs | 5 +++ 3 files changed, 47 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 3b05c44ef1..5569bf1f88 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2668,6 +2668,15 @@ version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" +[[package]] +name = "scc" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28e1c91382686d21b5ac7959341fcb9780fa7c03773646995a87c950fa7be640" +dependencies = [ + "sdd", +] + [[package]] name = "scoped-tls" version = "1.0.1" @@ -2690,6 +2699,12 @@ dependencies = [ "untrusted", ] +[[package]] +name = "sdd" +version = "3.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "478f121bb72bbf63c52c93011ea1791dca40140dfe13f8336c4c5ac952c33aa9" + [[package]] name = "secp256k1" version = "0.24.3" @@ -2794,6 +2809,31 @@ dependencies = [ "serde", ] +[[package]] +name = "serial_test" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b258109f244e1d6891bf1053a55d63a5cd4f8f4c30cf9a1280989f80e7a1fa9" +dependencies = [ + "futures", + "log", + "once_cell", + "parking_lot", + "scc", + "serial_test_derive", +] + +[[package]] +name = "serial_test_derive" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + [[package]] name = "sha1" version = "0.6.1" @@ -3049,6 +3089,7 @@ dependencies = [ "serde", "serde_derive", "serde_json", + "serial_test", "slog", "stacks-common", "stacks-signer", diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 3d253c8b89..249e05ac30 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -32,6 +32,7 @@ async-h1 = { version = "2.3.2", optional = true } async-std = { version = "1.6", optional = true, features = ["attributes"] } http-types = { version = "2.12", optional = true } thiserror = { workspace = true } +serial_test = "3.2.0" [target.'cfg(not(any(target_os = "macos", target_os="windows", target_arch = "arm")))'.dependencies] tikv-jemallocator = {workspace = true} diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index c8ba1426fe..6a36deeb71 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -1754,6 +1754,7 @@ mod test { use std::time::Instant; use clarity::vm::costs::ExecutionCost; + use serial_test::serial; use stacks::burnchains::{PoxConstants, Txid}; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use stacks::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksHeaderInfo}; @@ -2037,6 +2038,7 @@ mod test { } #[test] + #[serial] fn test_process_pending_payloads() { use mockito::Matcher; @@ -2112,6 +2114,7 @@ mod test { } #[test] + #[serial] fn test_send_payload_with_db() { use mockito::Matcher; @@ -2261,6 +2264,7 @@ mod test { } #[test] + #[serial] fn test_send_payload_timeout() { let port = get_random_port(); let timeout = Duration::from_secs(3); @@ -2323,6 +2327,7 @@ mod test { } #[test] + #[serial] fn test_send_payload_with_db_force_restart() { let port = get_random_port(); let timeout = Duration::from_secs(3); From 0aae8cb4ae21b95f06a10a8583d9bb1ffce8ab86 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 24 Jan 2025 08:15:36 +0100 Subject: [PATCH 11/16] fixed formatting --- testnet/stacks-node/src/event_dispatcher.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 6a36deeb71..91adb167b9 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -18,6 +18,8 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::path::PathBuf; use std::sync::mpsc::{channel, Receiver, Sender}; +#[cfg(test)] +use std::sync::LazyLock; use std::sync::{Arc, Mutex}; use std::thread::sleep; use std::time::Duration; @@ -73,9 +75,6 @@ use stacks_common::util::hash::{bytes_to_hex, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; use url::Url; -#[cfg(test)] -use std::sync::LazyLock; - #[cfg(any(test, feature = "testing"))] lazy_static! { /// Do not announce a signed/mined block to the network when set to true. From 64733dbb3ca554cc2340a14ca7fa0be8a5a26a07 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 24 Jan 2025 18:18:11 +0100 Subject: [PATCH 12/16] moved serial_test to dev_dependencies --- testnet/stacks-node/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 249e05ac30..56d79e0289 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -32,7 +32,6 @@ async-h1 = { version = "2.3.2", optional = true } async-std = { version = "1.6", optional = true, features = ["attributes"] } http-types = { version = "2.12", optional = true } thiserror = { workspace = true } -serial_test = "3.2.0" [target.'cfg(not(any(target_os = "macos", target_os="windows", target_arch = "arm")))'.dependencies] tikv-jemallocator = {workspace = true} @@ -52,6 +51,7 @@ tiny_http = "0.12.0" http-types = "2.12" tempfile = "3.3" mockito = "1.5" +serial_test = "3.2.0" [[bin]] name = "stacks-node" From 4a6bc747fbc0bff07e80d18cd343686d24e23d29 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 24 Jan 2025 10:19:06 -0800 Subject: [PATCH 13/16] CRC: cleanup Signed-off-by: Jacinta Ferrant --- stackslib/src/burnchains/bitcoin/blocks.rs | 2 +- stackslib/src/burnchains/tests/mod.rs | 8 +- .../src/chainstate/stacks/index/test/marf.rs | 92 +++++++------------ .../src/chainstate/stacks/index/test/node.rs | 5 +- .../burnchains/bitcoin_regtest_controller.rs | 2 +- 5 files changed, 39 insertions(+), 70 deletions(-) diff --git a/stackslib/src/burnchains/bitcoin/blocks.rs b/stackslib/src/burnchains/bitcoin/blocks.rs index 2f8b28bcc8..46cda957d9 100644 --- a/stackslib/src/burnchains/bitcoin/blocks.rs +++ b/stackslib/src/burnchains/bitcoin/blocks.rs @@ -436,7 +436,7 @@ impl BitcoinBlockParser { match (inputs_opt, outputs_opt) { (Some(inputs), Some(outputs)) => { Some(BitcoinTransaction { - txid: Txid::from_vec_be(tx.txid().as_bytes().as_ref()).unwrap(), // this *should* panic if it fails + txid: Txid::from_vec_be(tx.txid().as_bytes()).unwrap(), // this *should* panic if it fails vtxindex: vtxindex as u32, opcode, data, diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index 6f81737d8b..5af5848ed2 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -241,12 +241,8 @@ impl TestMiner { ); match self.vrf_key_map.get(vrf_pubkey) { Some(prover_key) => { - let proof = VRF::prove(prover_key, last_sortition_hash.as_bytes().as_ref()); - let valid = match VRF::verify( - vrf_pubkey, - &proof, - last_sortition_hash.as_bytes().as_ref(), - ) { + let proof = VRF::prove(prover_key, last_sortition_hash.as_bytes()); + let valid = match VRF::verify(vrf_pubkey, &proof, last_sortition_hash.as_bytes()) { Ok(v) => v, Err(e) => false, }; diff --git a/stackslib/src/chainstate/stacks/index/test/marf.rs b/stackslib/src/chainstate/stacks/index/test/marf.rs index a605ed43b2..ec099ef7cd 100644 --- a/stackslib/src/chainstate/stacks/index/test/marf.rs +++ b/stackslib/src/chainstate/stacks/index/test/marf.rs @@ -77,8 +77,8 @@ fn marf_insert_different_leaf_same_block_100() { merkle_test_marf( &mut marf.borrow_storage_backend(), &block_header, - path_bytes.as_ref(), - [99; 40].as_ref(), + &path_bytes, + &[99; 40], None, ); @@ -151,8 +151,8 @@ fn marf_insert_different_leaf_different_path_different_block_100() { merkle_test_marf( &mut marf.borrow_storage_backend(), &block_header, - path_bytes.as_ref(), - [i; 40].as_ref(), + &path_bytes, + &[i; 40], None, ); } @@ -232,8 +232,8 @@ fn marf_insert_same_leaf_different_block_100() { merkle_test_marf( &mut marf.borrow_storage_backend(), &next_block_header, - path_bytes.as_ref(), - [i; 40].as_ref(), + &path_bytes, + &[i; 40], None, ); } @@ -312,8 +312,8 @@ fn marf_insert_leaf_sequence_2() { merkle_test_marf( &mut marf.borrow_storage_backend(), &last_block_header, - path_bytes.as_ref(), - [i; 40].as_ref(), + &path_bytes, + &[i; 40], None, ); } @@ -380,13 +380,7 @@ fn marf_insert_leaf_sequence_100() { assert_eq!(leaf.data.to_vec(), [i; 40].to_vec()); - merkle_test_marf( - &mut f, - &last_block_header, - path_bytes.as_ref(), - [i; 40].as_ref(), - None, - ); + merkle_test_marf(&mut f, &last_block_header, &path_bytes, &[i; 40], None); } if let Some(root_hashes) = last_root_hashes.take() { let next_root_hashes = f.read_root_to_block_table().unwrap(); @@ -615,16 +609,14 @@ where debug!("---------------------------------------"); debug!( "MARF verify {:?} {:?} from current block header (immediate) {:?}", - &prev_path, - [j as u8; 40].as_ref(), - &next_block_header + &prev_path, &[j as u8; 40], &next_block_header ); debug!("----------------------------------------"); merkle_test_marf( &mut marf.borrow_storage_backend(), &next_block_header, - prev_path.as_ref(), - [j as u8; 40].as_ref(), + &prev_path, + &[j as u8; 40], None, ); } @@ -640,16 +632,14 @@ where debug!("---------------------------------------"); debug!( "MARF verify {:?} {:?} from current block header (deferred) {:?}", - &prev_path, - [j as u8; 40].as_ref(), - &next_block_header + &prev_path, &[j as u8; 40], &next_block_header ); debug!("----------------------------------------"); merkle_test_marf( &mut marf.borrow_storage_backend(), &next_block_header, - prev_path.as_ref(), - [j as u8; 40].as_ref(), + &prev_path, + &[j as u8; 40], None, ); } @@ -662,8 +652,8 @@ where merkle_test_marf( &mut marf.borrow_storage_backend(), &next_block_header, - next_path.as_ref(), - [i as u8; 40].as_ref(), + &next_path, + &[i as u8; 40], None, ); } @@ -691,16 +681,14 @@ where debug!("---------------------------------------"); debug!( "MARF verify {:?} {:?} from last block header {:?}", - &next_path, - [i as u8; 40].as_ref(), - &last_block_header + &next_path, &[i as u8; 40], &last_block_header ); debug!("----------------------------------------"); merkle_test_marf( &mut marf.borrow_storage_backend(), &last_block_header, - next_path.as_ref(), - [i as u8; 40].as_ref(), + &next_path, + &[i as u8; 40], None, ); } @@ -882,7 +870,7 @@ fn marf_merkle_verify_backptrs() { &mut marf.borrow_storage_backend(), &block_header_3, &path_3, - [21; 40].as_ref(), + &[21; 40], None, ); if let Some(root_hashes) = last_root_hashes.take() { @@ -957,7 +945,7 @@ where root_table_cache = Some(merkle_test_marf( &mut marf.borrow_storage_backend(), &block_header, - path.as_ref(), + &path, &value.data.to_vec(), root_table_cache, )); @@ -1018,7 +1006,7 @@ where root_table_cache = Some(merkle_test_marf( &mut marf.borrow_storage_backend(), &block_header, - path.as_ref(), + &path, &value.data.to_vec(), root_table_cache, )); @@ -1330,13 +1318,10 @@ fn marf_insert_random_10485760_4096_file_storage() { seed = path.clone(); let key = to_hex(&path); - let value = to_hex( - [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, i0 as u8, i1 as u8, i2 as u8, i3 as u8, - ] - .as_ref(), - ); + let value = to_hex(&[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, i0 as u8, i1 as u8, i2 as u8, i3 as u8, + ]); keys.push(key); values.push(value); @@ -1388,13 +1373,10 @@ fn marf_insert_random_10485760_4096_file_storage() { seed = path.clone(); let key = to_hex(&path); - let value = to_hex( - [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, i0 as u8, i1 as u8, i2 as u8, i3 as u8, - ] - .as_ref(), - ); + let value = to_hex(&[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, i0 as u8, i1 as u8, i2 as u8, i3 as u8, + ]); keys.push(key); values.push(value); @@ -1618,13 +1600,7 @@ fn marf_read_random_1048576_4096_file_storage() { // can make a merkle proof to each one if do_merkle_check { - merkle_test_marf( - &mut f, - &block_header, - path.as_ref(), - &value.data.to_vec(), - None, - ); + merkle_test_marf(&mut f, &block_header, &path, &value.data.to_vec(), None); } if i % 128 == 0 { let end_time = get_epoch_time_ms(); @@ -1927,7 +1903,7 @@ fn marf_insert_flush_to_different_block() { root_table_cache = Some(merkle_test_marf( &mut marf.borrow_storage_backend(), &target_block, - path.as_ref(), + &path, &value.data.to_vec(), root_table_cache, )); @@ -2047,7 +2023,7 @@ fn marf_insert_flush_to_different_block() { root_table_cache = Some(merkle_test_marf( &mut marf.borrow_storage_backend(), &read_from_block, - path.as_ref(), + &path, &value.data.to_vec(), root_table_cache, )); diff --git a/stackslib/src/chainstate/stacks/index/test/node.rs b/stackslib/src/chainstate/stacks/index/test/node.rs index 9246689d74..0c8a92f21c 100644 --- a/stackslib/src/chainstate/stacks/index/test/node.rs +++ b/stackslib/src/chainstate/stacks/index/test/node.rs @@ -4251,10 +4251,7 @@ fn trie_cursor_walk_full() { let (ptr, node, hash) = fields_opt.unwrap(); assert_eq!(ptr, node_ptrs[31]); - assert_eq!( - node, - TrieNodeType::Leaf(TrieLeaf::new(&[], [31u8; 40].as_ref())) - ); + assert_eq!(node, TrieNodeType::Leaf(TrieLeaf::new(&[], &[31u8; 40]))); assert_eq!(hash, hashes[31]); // cursor's last-visited node points at the penultimate node (the last node4), diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 21076f1d69..d76c16641c 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -2301,7 +2301,7 @@ pub struct SerializedTx { impl SerializedTx { pub fn new(tx: Transaction) -> SerializedTx { - let txid = Txid::from_vec_be(tx.txid().as_bytes().as_ref()).unwrap(); + let txid = Txid::from_vec_be(tx.txid().as_bytes()).unwrap(); let mut encoder = RawEncoder::new(Cursor::new(vec![])); tx.consensus_encode(&mut encoder) .expect("BUG: failed to serialize to a vec"); From ccbf1af8fb6934bc1254b570638f500abb9e5505 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 24 Jan 2025 10:30:46 -0800 Subject: [PATCH 14/16] CRC: replace StacksPrivateKey::new with StacksPrivateKey::random and remove default implementation Signed-off-by: Jacinta Ferrant --- libsigner/src/tests/mod.rs | 2 +- libsigner/src/v0/messages.rs | 20 +-- libstackerdb/src/tests/mod.rs | 2 +- stacks-common/src/util/secp256k1.rs | 14 +-- stacks-signer/src/client/mod.rs | 4 +- stacks-signer/src/client/stackerdb.rs | 2 +- stacks-signer/src/client/stacks_client.rs | 2 +- stacks-signer/src/main.rs | 4 +- stacks-signer/src/monitor_signers.rs | 2 +- stacks-signer/src/runloop.rs | 3 +- stacks-signer/src/signerdb.rs | 4 +- stacks-signer/src/tests/chainstate.rs | 2 +- stackslib/src/blockstack_cli.rs | 4 +- stackslib/src/burnchains/tests/burnchain.rs | 2 +- stackslib/src/chainstate/coordinator/tests.rs | 90 ++++++------- stackslib/src/chainstate/nakamoto/shadow.rs | 2 +- .../src/chainstate/nakamoto/test_signers.rs | 2 +- .../src/chainstate/nakamoto/tests/mod.rs | 88 ++++++------- .../chainstate/stacks/boot/contract_tests.rs | 16 +-- stackslib/src/chainstate/stacks/boot/mod.rs | 28 ++--- .../src/chainstate/stacks/boot/pox_2_tests.rs | 4 +- .../src/chainstate/stacks/boot/pox_4_tests.rs | 18 +-- stackslib/src/chainstate/stacks/db/blocks.rs | 14 +-- .../src/chainstate/stacks/db/transactions.rs | 2 +- .../src/chainstate/stacks/db/unconfirmed.rs | 12 +- stackslib/src/chainstate/stacks/miner.rs | 2 +- .../src/chainstate/stacks/tests/accounting.rs | 22 ++-- .../stacks/tests/block_construction.rs | 34 ++--- .../src/chainstate/stacks/transaction.rs | 30 ++--- stackslib/src/cli.rs | 2 +- stackslib/src/config/mod.rs | 2 +- stackslib/src/core/tests/mod.rs | 14 +-- stackslib/src/main.rs | 4 +- stackslib/src/net/api/tests/getsigner.rs | 4 +- stackslib/src/net/api/tests/mod.rs | 4 +- .../src/net/api/tests/postblock_proposal.rs | 2 +- .../src/net/api/tests/postmempoolquery.rs | 4 +- stackslib/src/net/chat.rs | 12 +- stackslib/src/net/codec.rs | 4 +- stackslib/src/net/connection.rs | 10 +- stackslib/src/net/db.rs | 34 ++--- stackslib/src/net/dns.rs | 5 +- stackslib/src/net/mod.rs | 2 +- stackslib/src/net/stackerdb/tests/db.rs | 8 +- stackslib/src/net/tests/download/epoch2x.rs | 2 +- stackslib/src/net/tests/download/nakamoto.rs | 10 +- stackslib/src/net/tests/inv/nakamoto.rs | 10 +- stackslib/src/net/tests/mempool/mod.rs | 10 +- stackslib/src/net/tests/mod.rs | 14 +-- stackslib/src/net/tests/relay/epoch2x.rs | 10 +- testnet/stacks-node/src/tests/epoch_205.rs | 12 +- testnet/stacks-node/src/tests/epoch_21.rs | 54 ++++---- testnet/stacks-node/src/tests/epoch_22.rs | 18 +-- testnet/stacks-node/src/tests/epoch_23.rs | 2 +- testnet/stacks-node/src/tests/epoch_24.rs | 8 +- testnet/stacks-node/src/tests/epoch_25.rs | 4 +- testnet/stacks-node/src/tests/mempool.rs | 2 +- .../src/tests/nakamoto_integrations.rs | 118 +++++++++--------- .../src/tests/neon_integrations.rs | 74 +++++------ testnet/stacks-node/src/tests/signer/mod.rs | 8 +- testnet/stacks-node/src/tests/signer/v0.rs | 110 ++++++++-------- 61 files changed, 505 insertions(+), 503 deletions(-) diff --git a/libsigner/src/tests/mod.rs b/libsigner/src/tests/mod.rs index 8ef6d38eee..9c04eb09ad 100644 --- a/libsigner/src/tests/mod.rs +++ b/libsigner/src/tests/mod.rs @@ -128,7 +128,7 @@ fn test_simple_signer() { reward_cycle: 1, }; for i in 0..max_events { - let privk = Secp256k1PrivateKey::new(); + let privk = Secp256k1PrivateKey::random(); let message = SignerMessage::BlockProposal(block_proposal.clone()); let message_bytes = message.serialize_to_vec(); let mut chunk = StackerDBChunkData::new(i as u32, 1, message_bytes); diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 7565b3bd7e..7d2daf560a 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -1192,7 +1192,7 @@ mod test { let rejection = BlockRejection::new( Sha512Trunc256Sum([0u8; 32]), RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), - &StacksPrivateKey::new(), + &StacksPrivateKey::random(), thread_rng().gen_bool(0.5), thread_rng().next_u64(), ); @@ -1204,7 +1204,7 @@ mod test { let rejection = BlockRejection::new( Sha512Trunc256Sum([1u8; 32]), RejectCode::ConnectivityIssues, - &StacksPrivateKey::new(), + &StacksPrivateKey::random(), thread_rng().gen_bool(0.5), thread_rng().next_u64(), ); @@ -1231,7 +1231,7 @@ mod test { let response = BlockResponse::Rejected(BlockRejection::new( Sha512Trunc256Sum([1u8; 32]), RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), - &StacksPrivateKey::new(), + &StacksPrivateKey::random(), thread_rng().gen_bool(0.5), thread_rng().next_u64(), )); @@ -1318,10 +1318,10 @@ mod test { #[test] fn verify_sign_mock_proposal() { - let private_key = StacksPrivateKey::new(); + let private_key = StacksPrivateKey::random(); let public_key = StacksPublicKey::from_private(&private_key); - let bad_private_key = StacksPrivateKey::new(); + let bad_private_key = StacksPrivateKey::random(); let bad_public_key = StacksPublicKey::from_private(&bad_private_key); let mut mock_proposal = random_mock_proposal(); @@ -1353,7 +1353,7 @@ mod test { #[test] fn serde_mock_proposal() { let mut mock_signature = random_mock_proposal(); - mock_signature.sign(&StacksPrivateKey::new()).unwrap(); + mock_signature.sign(&StacksPrivateKey::random()).unwrap(); let serialized_signature = mock_signature.serialize_to_vec(); let deserialized_signature = read_next::(&mut &serialized_signature[..]) .expect("Failed to deserialize MockSignature"); @@ -1368,7 +1368,7 @@ mod test { metadata: SignerMessageMetadata::default(), }; mock_signature - .sign(&StacksPrivateKey::new()) + .sign(&StacksPrivateKey::random()) .expect("Failed to sign MockSignature"); let serialized_signature = mock_signature.serialize_to_vec(); let deserialized_signature = read_next::(&mut &serialized_signature[..]) @@ -1379,8 +1379,10 @@ mod test { #[test] fn serde_mock_block() { let mock_proposal = random_mock_proposal(); - let mock_signature_1 = MockSignature::new(mock_proposal.clone(), &StacksPrivateKey::new()); - let mock_signature_2 = MockSignature::new(mock_proposal.clone(), &StacksPrivateKey::new()); + let mock_signature_1 = + MockSignature::new(mock_proposal.clone(), &StacksPrivateKey::random()); + let mock_signature_2 = + MockSignature::new(mock_proposal.clone(), &StacksPrivateKey::random()); let mock_block = MockBlock { mock_proposal, mock_signatures: vec![mock_signature_1, mock_signature_2], diff --git a/libstackerdb/src/tests/mod.rs b/libstackerdb/src/tests/mod.rs index f0e166a67b..fe94f70c60 100644 --- a/libstackerdb/src/tests/mod.rs +++ b/libstackerdb/src/tests/mod.rs @@ -24,7 +24,7 @@ use crate::*; #[test] fn test_stackerdb_slot_metadata_sign_verify() { - let pk = StacksPrivateKey::new(); + let pk = StacksPrivateKey::random(); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_MAINNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, diff --git a/stacks-common/src/util/secp256k1.rs b/stacks-common/src/util/secp256k1.rs index 5c64838855..e569a8ba0d 100644 --- a/stacks-common/src/util/secp256k1.rs +++ b/stacks-common/src/util/secp256k1.rs @@ -123,7 +123,7 @@ impl Default for Secp256k1PublicKey { impl Secp256k1PublicKey { #[cfg(any(test, feature = "testing"))] pub fn new() -> Secp256k1PublicKey { - Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::new()) + Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::random()) } pub fn from_hex(hex_string: &str) -> Result { @@ -249,14 +249,8 @@ impl PublicKey for Secp256k1PublicKey { } } -impl Default for Secp256k1PrivateKey { - fn default() -> Self { - Self::new() - } -} - impl Secp256k1PrivateKey { - pub fn new() -> Secp256k1PrivateKey { + pub fn random() -> Secp256k1PrivateKey { let mut rng = rand::thread_rng(); loop { // keep trying to generate valid bytes @@ -460,7 +454,7 @@ mod tests { #[test] fn test_parse_serialize_compressed() { - let mut t1 = Secp256k1PrivateKey::new(); + let mut t1 = Secp256k1PrivateKey::random(); t1.set_compress_public(true); let h_comp = t1.to_hex(); t1.set_compress_public(false); @@ -654,7 +648,7 @@ mod tests { let mut rng = rand::thread_rng(); for i in 0..100 { - let privk = Secp256k1PrivateKey::new(); + let privk = Secp256k1PrivateKey::random(); let pubk = Secp256k1PublicKey::from_private(&privk); let mut msg = [0u8; 32]; diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index bdaa368567..a3d9bed159 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -302,7 +302,7 @@ pub(crate) mod tests { pox_consensus_hash: Option, ) -> (String, RPCPeerInfoData) { // Generate some random info - let private_key = StacksPrivateKey::new(); + let private_key = StacksPrivateKey::random(); let public_key = StacksPublicKey::from_private(&private_key); let public_key_buf = StacksPublicKeyBuffer::from_public_key(&public_key); let public_key_hash = Hash160::from_node_public_key(&public_key); @@ -376,7 +376,7 @@ pub(crate) mod tests { let private_key = if signer_id == 0 { config.stacks_private_key } else { - StacksPrivateKey::new() + StacksPrivateKey::random() }; let public_key = StacksPublicKey::from_private(&private_key); diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index 0316976a4c..dc6525b144 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -248,7 +248,7 @@ mod tests { #[test] fn send_signer_message_should_succeed() { let signer_config = build_signer_config_tomls( - &[StacksPrivateKey::new()], + &[StacksPrivateKey::random()], "localhost:20443", Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. &Network::Testnet, diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index f6cb9c6d8b..db0b356fb4 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -1197,7 +1197,7 @@ mod tests { #[test] fn get_reward_set_should_succeed() { let mock = MockServerClient::new(); - let private_key = StacksPrivateKey::new(); + let private_key = StacksPrivateKey::random(); let public_key = StacksPublicKey::from_private(&private_key); let mut bytes = [0u8; 33]; bytes.copy_from_slice(&public_key.to_bytes_compressed()); diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index eac60cc53f..821f2e1c6e 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -409,10 +409,10 @@ pub mod tests { #[test] fn test_verify_vote() { let mut rand = rand::thread_rng(); - let private_key = Secp256k1PrivateKey::new(); + let private_key = Secp256k1PrivateKey::random(); let public_key = StacksPublicKey::from_private(&private_key); - let invalid_private_key = Secp256k1PrivateKey::new(); + let invalid_private_key = Secp256k1PrivateKey::random(); let invalid_public_key = StacksPublicKey::from_private(&invalid_private_key); let sip = rand.next_u32(); diff --git a/stacks-signer/src/monitor_signers.rs b/stacks-signer/src/monitor_signers.rs index 4bc017fa27..65b4fdda3e 100644 --- a/stacks-signer/src/monitor_signers.rs +++ b/stacks-signer/src/monitor_signers.rs @@ -55,7 +55,7 @@ impl SignerMonitor { pub fn new(args: MonitorSignersArgs) -> Self { url::Url::parse(&format!("http://{}", args.host)).expect("Failed to parse node host"); let stacks_client = StacksClient::try_from_host( - StacksPrivateKey::new(), // We don't need a private key to read + StacksPrivateKey::random(), // We don't need a private key to read args.host.clone(), "FOO".to_string(), // We don't care about authorized paths. Just accessing public info ) diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 69dc2dd843..84c1c592f5 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -544,7 +544,8 @@ mod tests { let weight = 10; let mut signer_entries = Vec::with_capacity(nmb_signers); for _ in 0..nmb_signers { - let key = StacksPublicKey::from_private(&StacksPrivateKey::new()).to_bytes_compressed(); + let key = + StacksPublicKey::from_private(&StacksPrivateKey::random()).to_bytes_compressed(); let mut signing_key = [0u8; 33]; signing_key.copy_from_slice(&key); signer_entries.push(NakamotoSignerEntry { diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index a2b7c7fe37..79325d1d13 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -1674,13 +1674,13 @@ mod tests { previous_tenure_blocks: 1, cause: TenureChangeCause::BlockFound, pubkey_hash: Hash160::from_node_public_key(&StacksPublicKey::from_private( - &StacksPrivateKey::new(), + &StacksPrivateKey::random(), )), }; let tenure_change_tx_payload = TransactionPayload::TenureChange(tenure_change_payload); let tenure_change_tx = StacksTransaction::new( TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&StacksPrivateKey::new()).unwrap(), + TransactionAuth::from_p2pkh(&StacksPrivateKey::random()).unwrap(), tenure_change_tx_payload, ); diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index 92b7a6ed53..19f0d843c8 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -95,7 +95,7 @@ fn setup_test_environment( }; let stacks_client = StacksClient::new( - StacksPrivateKey::new(), + StacksPrivateKey::random(), SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 10000).to_string(), "FOO".into(), false, diff --git a/stackslib/src/blockstack_cli.rs b/stackslib/src/blockstack_cli.rs index 06ea43359f..13bf8596cc 100644 --- a/stackslib/src/blockstack_cli.rs +++ b/stackslib/src/blockstack_cli.rs @@ -579,7 +579,7 @@ fn generate_secret_key(args: &[String], version: TransactionVersion) -> Result C32_ADDRESS_VERSION_MAINNET_SINGLESIG, @@ -1157,7 +1157,7 @@ mod test { .contains("Failed to decode hex") ); - let sk = StacksPrivateKey::new(); + let sk = StacksPrivateKey::random(); let s = format!( "{}", sign_transaction_single_sig_standard("01zz", &sk).unwrap_err() diff --git a/stackslib/src/burnchains/tests/burnchain.rs b/stackslib/src/burnchains/tests/burnchain.rs index d06c7e4358..7cd80c7a38 100644 --- a/stackslib/src/burnchains/tests/burnchain.rs +++ b/stackslib/src/burnchains/tests/burnchain.rs @@ -710,7 +710,7 @@ fn test_burn_snapshot_sequence() { let pubkey_hex = vrf_pubkey.to_hex(); leader_public_keys.push(pubkey_hex); - let bitcoin_privkey = Secp256k1PrivateKey::new(); + let bitcoin_privkey = Secp256k1PrivateKey::random(); let bitcoin_publickey = BitcoinPublicKey::from_private(&bitcoin_privkey); leader_bitcoin_public_keys.push(to_hex(&bitcoin_publickey.to_bytes())); diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index c8c7e56d06..b0ddcba585 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -1013,10 +1013,10 @@ fn missed_block_commits_2_05() { let burnchain_conf = get_burnchain(path, pox_consts.clone()); let vrf_keys: Vec<_> = (0..50).map(|_| VRFPrivateKey::new()).collect(); - let committers: Vec<_> = (0..50).map(|_| StacksPrivateKey::new()).collect(); + let committers: Vec<_> = (0..50).map(|_| StacksPrivateKey::random()).collect(); - let stacker = p2pkh_from(&StacksPrivateKey::new()); - let rewards = pox_addr_from(&StacksPrivateKey::new()); + let stacker = p2pkh_from(&StacksPrivateKey::random()); + let rewards = pox_addr_from(&StacksPrivateKey::random()); let balance = 6_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); let stacked_amt = 1_000_000_000 * (core::MICROSTACKS_PER_STACKS as u128); let initial_balances = vec![(stacker.clone().into(), balance)]; @@ -1333,10 +1333,10 @@ fn missed_block_commits_2_1() { let burnchain_conf = get_burnchain(path, pox_consts.clone()); let vrf_keys: Vec<_> = (0..50).map(|_| VRFPrivateKey::new()).collect(); - let committers: Vec<_> = (0..50).map(|_| StacksPrivateKey::new()).collect(); + let committers: Vec<_> = (0..50).map(|_| StacksPrivateKey::random()).collect(); - let stacker = p2pkh_from(&StacksPrivateKey::new()); - let rewards = pox_addr_from(&StacksPrivateKey::new()); + let stacker = p2pkh_from(&StacksPrivateKey::random()); + let rewards = pox_addr_from(&StacksPrivateKey::random()); let balance = 6_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); let stacked_amt = 1_000_000_000 * (core::MICROSTACKS_PER_STACKS as u128); let initial_balances = vec![(stacker.clone().into(), balance)]; @@ -1677,10 +1677,10 @@ fn late_block_commits_2_1() { let burnchain_conf = get_burnchain(path, pox_consts.clone()); let vrf_keys: Vec<_> = (0..50).map(|_| VRFPrivateKey::new()).collect(); - let committers: Vec<_> = (0..50).map(|_| StacksPrivateKey::new()).collect(); + let committers: Vec<_> = (0..50).map(|_| StacksPrivateKey::random()).collect(); - let stacker = p2pkh_from(&StacksPrivateKey::new()); - let rewards = pox_addr_from(&StacksPrivateKey::new()); + let stacker = p2pkh_from(&StacksPrivateKey::random()); + let rewards = pox_addr_from(&StacksPrivateKey::random()); let balance = 6_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); let stacked_amt = 1_000_000_000 * (core::MICROSTACKS_PER_STACKS as u128); let initial_balances = vec![(stacker.clone().into(), balance)]; @@ -2005,7 +2005,7 @@ fn test_simple_setup() { let _r = std::fs::remove_dir_all(path_blinded); let vrf_keys: Vec<_> = (0..50).map(|_| VRFPrivateKey::new()).collect(); - let committers: Vec<_> = (0..50).map(|_| StacksPrivateKey::new()).collect(); + let committers: Vec<_> = (0..50).map(|_| StacksPrivateKey::random()).collect(); setup_states( &[path, path_blinded], @@ -2216,11 +2216,11 @@ fn test_sortition_with_reward_set() { let _r = std::fs::remove_dir_all(path); let mut vrf_keys: Vec<_> = (0..150).map(|_| VRFPrivateKey::new()).collect(); - let mut committers: Vec<_> = (0..150).map(|_| StacksPrivateKey::new()).collect(); + let mut committers: Vec<_> = (0..150).map(|_| StacksPrivateKey::random()).collect(); let reward_set_size = 4; let reward_set: Vec<_> = (0..reward_set_size) - .map(|_| pox_addr_from(&StacksPrivateKey::new())) + .map(|_| pox_addr_from(&StacksPrivateKey::random())) .collect(); setup_states( @@ -2390,7 +2390,7 @@ fn test_sortition_with_reward_set() { vec![(pox_addr_from(miner_wrong_out), 0)] } else { (0..OUTPUTS_PER_COMMIT) - .map(|ix| (pox_addr_from(&StacksPrivateKey::new()), ix as u16)) + .map(|ix| (pox_addr_from(&StacksPrivateKey::random()), ix as u16)) .collect() }; let bad_block_recipients = Some(RewardSetInfo { @@ -2487,13 +2487,13 @@ fn test_sortition_with_burner_reward_set() { let _r = std::fs::remove_dir_all(path); let mut vrf_keys: Vec<_> = (0..150).map(|_| VRFPrivateKey::new()).collect(); - let mut committers: Vec<_> = (0..150).map(|_| StacksPrivateKey::new()).collect(); + let mut committers: Vec<_> = (0..150).map(|_| StacksPrivateKey::random()).collect(); let reward_set_size = 3; let mut reward_set: Vec<_> = (0..reward_set_size - 1) .map(|_| PoxAddress::standard_burn_address(false)) .collect(); - reward_set.push(pox_addr_from(&StacksPrivateKey::new())); + reward_set.push(pox_addr_from(&StacksPrivateKey::random())); setup_states( &[path], @@ -2636,7 +2636,7 @@ fn test_sortition_with_burner_reward_set() { vec![(pox_addr_from(miner_wrong_out), 0)] } else { (0..OUTPUTS_PER_COMMIT) - .map(|ix| (pox_addr_from(&StacksPrivateKey::new()), ix as u16)) + .map(|ix| (pox_addr_from(&StacksPrivateKey::random()), ix as u16)) .collect() }; let bad_block_recipients = Some(RewardSetInfo { @@ -2751,10 +2751,10 @@ fn test_pox_btc_ops() { let burnchain_conf = get_burnchain(path, pox_consts.clone()); let vrf_keys: Vec<_> = (0..50).map(|_| VRFPrivateKey::new()).collect(); - let committers: Vec<_> = (0..50).map(|_| StacksPrivateKey::new()).collect(); + let committers: Vec<_> = (0..50).map(|_| StacksPrivateKey::random()).collect(); - let stacker = p2pkh_from(&StacksPrivateKey::new()); - let rewards = pox_addr_from(&StacksPrivateKey::new()); + let stacker = p2pkh_from(&StacksPrivateKey::random()); + let rewards = pox_addr_from(&StacksPrivateKey::random()); let balance = 6_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); let stacked_amt = 1_000_000_000 * (core::MICROSTACKS_PER_STACKS as u128); let initial_balances = vec![(stacker.clone().into(), balance)]; @@ -3043,10 +3043,10 @@ fn test_stx_transfer_btc_ops() { let burnchain_conf = get_burnchain(path, pox_consts.clone()); let vrf_keys: Vec<_> = (0..50).map(|_| VRFPrivateKey::new()).collect(); - let committers: Vec<_> = (0..50).map(|_| StacksPrivateKey::new()).collect(); + let committers: Vec<_> = (0..50).map(|_| StacksPrivateKey::random()).collect(); - let stacker = p2pkh_from(&StacksPrivateKey::new()); - let recipient = p2pkh_from(&StacksPrivateKey::new()); + let stacker = p2pkh_from(&StacksPrivateKey::random()); + let recipient = p2pkh_from(&StacksPrivateKey::random()); let balance = 6_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); let transfer_amt = 1_000_000_000 * (core::MICROSTACKS_PER_STACKS as u128); let initial_balances = vec![(stacker.clone().into(), balance)]; @@ -3474,11 +3474,11 @@ fn test_delegate_stx_btc_ops() { let burnchain_conf = get_burnchain(path, pox_consts.clone()); let vrf_keys: Vec<_> = (0..50).map(|_| VRFPrivateKey::new()).collect(); - let committers: Vec<_> = (0..50).map(|_| StacksPrivateKey::new()).collect(); + let committers: Vec<_> = (0..50).map(|_| StacksPrivateKey::random()).collect(); - let first_del = p2pkh_from(&StacksPrivateKey::new()); - let second_del = p2pkh_from(&StacksPrivateKey::new()); - let delegator_addr = p2pkh_from(&StacksPrivateKey::new()); + let first_del = p2pkh_from(&StacksPrivateKey::random()); + let second_del = p2pkh_from(&StacksPrivateKey::random()); + let delegator_addr = p2pkh_from(&StacksPrivateKey::random()); let balance = 6_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); let delegated_amt = 1_000_000_000 * (core::MICROSTACKS_PER_STACKS as u128); let initial_balances = vec![ @@ -3781,10 +3781,10 @@ fn test_initial_coinbase_reward_distributions() { let burnchain_conf = get_burnchain(path, pox_consts.clone()); let vrf_keys: Vec<_> = (0..50).map(|_| VRFPrivateKey::new()).collect(); - let committers: Vec<_> = (0..50).map(|_| StacksPrivateKey::new()).collect(); + let committers: Vec<_> = (0..50).map(|_| StacksPrivateKey::random()).collect(); - let stacker = p2pkh_from(&StacksPrivateKey::new()); - let rewards = p2pkh_from(&StacksPrivateKey::new()); + let stacker = p2pkh_from(&StacksPrivateKey::random()); + let rewards = p2pkh_from(&StacksPrivateKey::random()); let balance = 6_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); let stacked_amt = 1_000_000_000 * (core::MICROSTACKS_PER_STACKS as u128); let initial_balances = vec![(stacker.clone().into(), balance)]; @@ -4022,7 +4022,7 @@ fn test_epoch_switch_cost_contract_instantiation() { let burnchain_conf = get_burnchain(path, pox_consts.clone()); let vrf_keys: Vec<_> = (0..10).map(|_| VRFPrivateKey::new()).collect(); - let committers: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); + let committers: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); setup_states( &[path], @@ -4225,7 +4225,7 @@ fn test_epoch_switch_pox_2_contract_instantiation() { let burnchain_conf = get_burnchain(path, pox_consts.clone()); let vrf_keys: Vec<_> = (0..15).map(|_| VRFPrivateKey::new()).collect(); - let committers: Vec<_> = (0..15).map(|_| StacksPrivateKey::new()).collect(); + let committers: Vec<_> = (0..15).map(|_| StacksPrivateKey::random()).collect(); setup_states( &[path], @@ -4431,7 +4431,7 @@ fn test_epoch_switch_pox_3_contract_instantiation() { let burnchain_conf = get_burnchain(path, pox_consts.clone()); let vrf_keys: Vec<_> = (0..25).map(|_| VRFPrivateKey::new()).collect(); - let committers: Vec<_> = (0..25).map(|_| StacksPrivateKey::new()).collect(); + let committers: Vec<_> = (0..25).map(|_| StacksPrivateKey::random()).collect(); setup_states( &[path], @@ -4653,9 +4653,9 @@ fn atlas_stop_start() { let atlas_name: clarity::vm::ContractName = "atlas-test".into(); let vrf_keys: Vec<_> = (0..15).map(|_| VRFPrivateKey::new()).collect(); - let committers: Vec<_> = (0..15).map(|_| StacksPrivateKey::new()).collect(); + let committers: Vec<_> = (0..15).map(|_| StacksPrivateKey::random()).collect(); - let signer_sk = StacksPrivateKey::new(); + let signer_sk = StacksPrivateKey::random(); let signer_pk = p2pkh_from(&signer_sk); let balance = 6_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); let stacked_amt = 1_000_000_000 * (core::MICROSTACKS_PER_STACKS as u128); @@ -4948,11 +4948,11 @@ fn test_epoch_verify_active_pox_contract() { let burnchain_conf = get_burnchain(path, pox_consts.clone()); let vrf_keys: Vec<_> = (0..20).map(|_| VRFPrivateKey::new()).collect(); - let committers: Vec<_> = (0..20).map(|_| StacksPrivateKey::new()).collect(); + let committers: Vec<_> = (0..20).map(|_| StacksPrivateKey::random()).collect(); - let stacker = p2pkh_from(&StacksPrivateKey::new()); - let stacker_2 = p2pkh_from(&StacksPrivateKey::new()); - let rewards = pox_addr_from(&StacksPrivateKey::new()); + let stacker = p2pkh_from(&StacksPrivateKey::random()); + let stacker_2 = p2pkh_from(&StacksPrivateKey::random()); + let rewards = pox_addr_from(&StacksPrivateKey::random()); let balance = 6_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); let stacked_amt = 1_000_000_000 * (core::MICROSTACKS_PER_STACKS as u128); let initial_balances = vec![ @@ -5250,12 +5250,12 @@ fn test_sortition_with_sunset() { let burnchain_conf = get_burnchain(path, pox_consts.clone()); let mut vrf_keys: Vec<_> = (0..200).map(|_| VRFPrivateKey::new()).collect(); - let mut committers: Vec<_> = (0..200).map(|_| StacksPrivateKey::new()).collect(); + let mut committers: Vec<_> = (0..200).map(|_| StacksPrivateKey::random()).collect(); let reward_set_size = pox_consts.as_ref().unwrap().reward_slots() as usize; assert_eq!(reward_set_size, 6); let reward_set: Vec<_> = (0..reward_set_size) - .map(|_| pox_addr_from(&StacksPrivateKey::new())) + .map(|_| pox_addr_from(&StacksPrivateKey::random())) .collect(); setup_states( @@ -5562,12 +5562,12 @@ fn test_sortition_with_sunset_and_epoch_switch() { let burnchain_conf = get_burnchain(path, pox_consts.clone()); let mut vrf_keys: Vec<_> = (0..200).map(|_| VRFPrivateKey::new()).collect(); - let mut committers: Vec<_> = (0..200).map(|_| StacksPrivateKey::new()).collect(); + let mut committers: Vec<_> = (0..200).map(|_| StacksPrivateKey::random()).collect(); let reward_set_size = pox_consts.as_ref().unwrap().reward_slots() as usize; assert_eq!(reward_set_size, 6); let reward_set: Vec<_> = (0..reward_set_size) - .map(|_| pox_addr_from(&StacksPrivateKey::new())) + .map(|_| pox_addr_from(&StacksPrivateKey::random())) .collect(); setup_states_with_epochs( @@ -5913,7 +5913,7 @@ fn test_pox_processable_block_in_different_pox_forks() { let b_blind = get_burnchain(path_blinded, pox_consts.clone()); let vrf_keys: Vec<_> = (0..20).map(|_| VRFPrivateKey::new()).collect(); - let committers: Vec<_> = (0..20).map(|_| StacksPrivateKey::new()).collect(); + let committers: Vec<_> = (0..20).map(|_| StacksPrivateKey::random()).collect(); setup_states_with_epochs( &[path, path_blinded], @@ -6203,7 +6203,7 @@ fn test_pox_no_anchor_selected() { let _r = std::fs::remove_dir_all(path_blinded); let vrf_keys: Vec<_> = (0..10).map(|_| VRFPrivateKey::new()).collect(); - let committers: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); + let committers: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); setup_states( &[path, path_blinded], @@ -6418,7 +6418,7 @@ fn test_pox_fork_out_of_order() { let _r = std::fs::remove_dir_all(path_blinded); let vrf_keys: Vec<_> = (0..15).map(|_| VRFPrivateKey::new()).collect(); - let committers: Vec<_> = (0..15).map(|_| StacksPrivateKey::new()).collect(); + let committers: Vec<_> = (0..15).map(|_| StacksPrivateKey::random()).collect(); setup_states( &[path, path_blinded], diff --git a/stackslib/src/chainstate/nakamoto/shadow.rs b/stackslib/src/chainstate/nakamoto/shadow.rs index 67a57a2ca0..bd9b28fac7 100644 --- a/stackslib/src/chainstate/nakamoto/shadow.rs +++ b/stackslib/src/chainstate/nakamoto/shadow.rs @@ -643,7 +643,7 @@ impl NakamotoBlockBuilder { let coinbase_payload = CoinbasePayload(naka_tip_tenure_start_header.index_block_hash().0); // the miner key is irrelevant - let miner_key = StacksPrivateKey::new(); + let miner_key = StacksPrivateKey::random(); let miner_addr = StacksAddress::p2pkh(mainnet, &StacksPublicKey::from_private(&miner_key)); let miner_tx_auth = TransactionAuth::from_p2pkh(&miner_key).ok_or_else(|| { Error::InvalidStacksBlock( diff --git a/stackslib/src/chainstate/nakamoto/test_signers.rs b/stackslib/src/chainstate/nakamoto/test_signers.rs index 03bcc0e0b6..56a868dbd3 100644 --- a/stackslib/src/chainstate/nakamoto/test_signers.rs +++ b/stackslib/src/chainstate/nakamoto/test_signers.rs @@ -86,7 +86,7 @@ impl Default for TestSigners { let mut signer_keys = Vec::::new(); for _ in 0..num_signers { - signer_keys.push(Secp256k1PrivateKey::default()); + signer_keys.push(Secp256k1PrivateKey::random()); } Self { threshold, diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 06b7703206..5e525f2f88 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -210,7 +210,7 @@ fn codec_nakamoto_header() { #[test] pub fn test_nakamoto_first_tenure_block_syntactic_validation() { - let private_key = StacksPrivateKey::new(); + let private_key = StacksPrivateKey::random(); let header = NakamotoBlockHeader { version: 1, chain_length: 2, @@ -589,7 +589,7 @@ pub fn test_load_store_update_nakamoto_blocks() { Some(epochs), ); - let private_key = StacksPrivateKey::new(); + let private_key = StacksPrivateKey::random(); let epoch2_proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); let epoch2_proof = VRFProof::from_bytes(&epoch2_proof_bytes[..]).unwrap(); @@ -1664,8 +1664,8 @@ pub fn test_load_store_update_nakamoto_blocks() { /// * NakamotoBlockHeader::check_shadow_coinbase_tx #[test] fn test_nakamoto_block_static_verification() { - let private_key = StacksPrivateKey::new(); - let private_key_2 = StacksPrivateKey::new(); + let private_key = StacksPrivateKey::random(); + let private_key_2 = StacksPrivateKey::random(); let vrf_privkey = VRFPrivateKey::new(); let vrf_pubkey = VRFPublicKey::from_private(&vrf_privkey); @@ -2044,7 +2044,7 @@ fn test_make_miners_stackerdb_config() { ); let naka_miner_hash160 = peer.miner.nakamoto_miner_hash160(); - let miner_keys: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); + let miner_keys: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); let miner_hash160s: Vec<_> = miner_keys .iter() .map(|miner_privkey| { @@ -2312,7 +2312,7 @@ fn test_make_miners_stackerdb_config() { #[test] fn parse_vote_for_aggregate_public_key_valid() { - let signer_private_key = StacksPrivateKey::new(); + let signer_private_key = StacksPrivateKey::random(); let mainnet = false; let chainid = CHAIN_ID_TESTNET; let vote_contract_id = boot_code_id(SIGNERS_VOTING_NAME, mainnet); @@ -2359,7 +2359,7 @@ fn parse_vote_for_aggregate_public_key_valid() { #[test] fn parse_vote_for_aggregate_public_key_invalid() { - let signer_private_key = StacksPrivateKey::new(); + let signer_private_key = StacksPrivateKey::random(); let mainnet = false; let chainid = CHAIN_ID_TESTNET; let vote_contract_id = boot_code_id(SIGNERS_VOTING_NAME, mainnet); @@ -2542,7 +2542,7 @@ fn parse_vote_for_aggregate_public_key_invalid() { #[test] fn valid_vote_transaction() { - let signer_private_key = StacksPrivateKey::new(); + let signer_private_key = StacksPrivateKey::random(); let mainnet = false; let chainid = CHAIN_ID_TESTNET; let vote_contract_id = boot_code_id(SIGNERS_VOTING_NAME, mainnet); @@ -2592,7 +2592,7 @@ fn valid_vote_transaction() { #[test] fn valid_vote_transaction_malformed_transactions() { - let signer_private_key = StacksPrivateKey::new(); + let signer_private_key = StacksPrivateKey::random(); let mainnet = false; let chainid = CHAIN_ID_TESTNET; let vote_contract_id = boot_code_id(SIGNERS_VOTING_NAME, mainnet); @@ -2825,8 +2825,8 @@ fn valid_vote_transaction_malformed_transactions() { #[test] fn filter_one_transaction_per_signer_multiple_addresses() { - let signer_private_key_1 = StacksPrivateKey::new(); - let signer_private_key_2 = StacksPrivateKey::new(); + let signer_private_key_1 = StacksPrivateKey::random(); + let signer_private_key_2 = StacksPrivateKey::random(); let mainnet = false; let chainid = CHAIN_ID_TESTNET; let vote_contract_id = boot_code_id(SIGNERS_VOTING_NAME, mainnet); @@ -2954,7 +2954,7 @@ fn filter_one_transaction_per_signer_multiple_addresses() { #[test] fn filter_one_transaction_per_signer_duplicate_nonces() { - let signer_private_key = StacksPrivateKey::new(); + let signer_private_key = StacksPrivateKey::random(); let mainnet = false; let chainid = CHAIN_ID_TESTNET; let vote_contract_id = boot_code_id(SIGNERS_VOTING_NAME, mainnet); @@ -3074,9 +3074,9 @@ pub mod nakamoto_block_signatures { // Test that signatures succeed with exactly 70% of the votes pub fn test_exactly_enough_votes() { let signers = [ - (Secp256k1PrivateKey::default(), 35), - (Secp256k1PrivateKey::default(), 35), - (Secp256k1PrivateKey::default(), 30), + (Secp256k1PrivateKey::random(), 35), + (Secp256k1PrivateKey::random(), 35), + (Secp256k1PrivateKey::random(), 30), ]; let reward_set = make_reward_set(&signers); @@ -3101,9 +3101,9 @@ pub mod nakamoto_block_signatures { /// Test that signatures fail with just under 70% of the votes pub fn test_just_not_enough_votes() { let signers = [ - (Secp256k1PrivateKey::default(), 3500), - (Secp256k1PrivateKey::default(), 3499), - (Secp256k1PrivateKey::default(), 3001), + (Secp256k1PrivateKey::random(), 3500), + (Secp256k1PrivateKey::random(), 3499), + (Secp256k1PrivateKey::random(), 3001), ]; let reward_set = make_reward_set(&signers); @@ -3132,9 +3132,9 @@ pub mod nakamoto_block_signatures { /// Base success case - 3 signers of equal weight, all signing the block pub fn test_nakamoto_block_verify_signatures() { let signers = [ - Secp256k1PrivateKey::default(), - Secp256k1PrivateKey::default(), - Secp256k1PrivateKey::default(), + Secp256k1PrivateKey::random(), + Secp256k1PrivateKey::random(), + Secp256k1PrivateKey::random(), ]; let reward_set = @@ -3162,9 +3162,9 @@ pub mod nakamoto_block_signatures { /// Fully signed block, but not in order fn test_out_of_order_signer_signatures() { let signers = [ - (Secp256k1PrivateKey::default(), 100), - (Secp256k1PrivateKey::default(), 100), - (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::random(), 100), + (Secp256k1PrivateKey::random(), 100), + (Secp256k1PrivateKey::random(), 100), ]; let reward_set = make_reward_set(&signers); @@ -3193,9 +3193,9 @@ pub mod nakamoto_block_signatures { // Test with 3 equal signers, and only two sign fn test_insufficient_signatures() { let signers = [ - (Secp256k1PrivateKey::default(), 100), - (Secp256k1PrivateKey::default(), 100), - (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::random(), 100), + (Secp256k1PrivateKey::random(), 100), + (Secp256k1PrivateKey::random(), 100), ]; let reward_set = make_reward_set(&signers); @@ -3225,10 +3225,10 @@ pub mod nakamoto_block_signatures { // and the block is valid fn test_single_signature_threshold() { let signers = [ - (Secp256k1PrivateKey::default(), 75), - (Secp256k1PrivateKey::default(), 10), - (Secp256k1PrivateKey::default(), 5), - (Secp256k1PrivateKey::default(), 10), + (Secp256k1PrivateKey::random(), 75), + (Secp256k1PrivateKey::random(), 10), + (Secp256k1PrivateKey::random(), 5), + (Secp256k1PrivateKey::random(), 10), ]; let reward_set = make_reward_set(&signers); @@ -3252,7 +3252,7 @@ pub mod nakamoto_block_signatures { #[test] // Test with a signature that didn't come from the signer set fn test_invalid_signer() { - let signers = [(Secp256k1PrivateKey::default(), 100)]; + let signers = [(Secp256k1PrivateKey::random(), 100)]; let reward_set = make_reward_set(&signers); @@ -3266,7 +3266,7 @@ pub mod nakamoto_block_signatures { .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) .collect::>(); - let invalid_signature = Secp256k1PrivateKey::default() + let invalid_signature = Secp256k1PrivateKey::random() .sign(&message) .expect("Failed to sign block sighash"); @@ -3286,9 +3286,9 @@ pub mod nakamoto_block_signatures { #[test] fn test_duplicate_signatures() { let signers = [ - (Secp256k1PrivateKey::default(), 100), - (Secp256k1PrivateKey::default(), 100), - (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::random(), 100), + (Secp256k1PrivateKey::random(), 100), + (Secp256k1PrivateKey::random(), 100), ]; let reward_set = make_reward_set(&signers); @@ -3326,10 +3326,10 @@ pub mod nakamoto_block_signatures { // Test where a signature used a different message fn test_signature_invalid_message() { let signers = [ - (Secp256k1PrivateKey::default(), 100), - (Secp256k1PrivateKey::default(), 100), - (Secp256k1PrivateKey::default(), 100), - (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::random(), 100), + (Secp256k1PrivateKey::random(), 100), + (Secp256k1PrivateKey::random(), 100), + (Secp256k1PrivateKey::random(), 100), ]; let reward_set = make_reward_set(&signers); @@ -3367,10 +3367,10 @@ pub mod nakamoto_block_signatures { // Test where a signature is not recoverable fn test_unrecoverable_signature() { let signers = [ - (Secp256k1PrivateKey::default(), 100), - (Secp256k1PrivateKey::default(), 100), - (Secp256k1PrivateKey::default(), 100), - (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::random(), 100), + (Secp256k1PrivateKey::random(), 100), + (Secp256k1PrivateKey::random(), 100), + (Secp256k1PrivateKey::random(), 100), ]; let reward_set = make_reward_set(&signers); diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 17102771ab..9387c02bff 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -63,14 +63,14 @@ lazy_static! { pub static ref COST_VOTING_CONTRACT_TESTNET: QualifiedContractIdentifier = boot_code_id("cost-voting", false); pub static ref USER_KEYS: Vec = - (0..50).map(|_| StacksPrivateKey::new()).collect(); + (0..50).map(|_| StacksPrivateKey::random()).collect(); pub static ref POX_ADDRS: Vec = (0..50u64) .map(|ix| execute(&format!( "{{ version: 0x00, hashbytes: 0x000000000000000000000000{} }}", &to_hex(&ix.to_le_bytes()) ))) .collect(); - pub static ref MINER_KEY: StacksPrivateKey = StacksPrivateKey::new(); + pub static ref MINER_KEY: StacksPrivateKey = StacksPrivateKey::random(); pub static ref MINER_ADDR: StacksAddress = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -663,7 +663,7 @@ impl HeadersDB for TestSimHeadersDB { fn pox_2_contract_caller_units() { let mut sim = ClarityTestSim::new(); sim.epoch_bounds = vec![0, 1, 2]; - let delegator = StacksPrivateKey::new(); + let delegator = StacksPrivateKey::random(); let expected_unlock_height = POX_TESTNET_CYCLE_LENGTH * 4; @@ -893,7 +893,7 @@ fn pox_2_contract_caller_units() { fn pox_2_lock_extend_units() { let mut sim = ClarityTestSim::new(); sim.epoch_bounds = vec![0, 1, 2]; - let delegator = StacksPrivateKey::new(); + let delegator = StacksPrivateKey::random(); let reward_cycle_len = 5; let expected_user_1_unlock = 4 * reward_cycle_len + 9 * reward_cycle_len; @@ -1146,7 +1146,7 @@ fn pox_2_lock_extend_units() { fn pox_2_delegate_extend_units() { let mut sim = ClarityTestSim::new(); sim.epoch_bounds = vec![0, 1, 2]; - let delegator = StacksPrivateKey::new(); + let delegator = StacksPrivateKey::random(); // execute past 2.1 epoch initialization sim.execute_next_block(|_env| {}); @@ -1682,7 +1682,7 @@ fn pox_2_delegate_extend_units() { fn simple_epoch21_test() { let mut sim = ClarityTestSim::new(); sim.epoch_bounds = vec![0, 1, 3]; - let delegator = StacksPrivateKey::new(); + let delegator = StacksPrivateKey::random(); let clarity_2_0_id = QualifiedContractIdentifier::new(StandardPrincipalData::transient(), "contract-2-0".into()); @@ -1813,7 +1813,7 @@ fn max_stackerdb_list() { #[test] fn recency_tests() { let mut sim = ClarityTestSim::new(); - let delegator = StacksPrivateKey::new(); + let delegator = StacksPrivateKey::random(); sim.execute_next_block(|env| { env.initialize_versioned_contract( @@ -1890,7 +1890,7 @@ fn recency_tests() { #[test] fn delegation_tests() { let mut sim = ClarityTestSim::new(); - let delegator = StacksPrivateKey::new(); + let delegator = StacksPrivateKey::random(); const REWARD_CYCLE_LENGTH: u128 = 1050; sim.execute_next_block(|env| { diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 49b790a5b0..0277ceb586 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -1610,7 +1610,7 @@ pub mod test { } fn rand_addr() -> StacksAddress { - key_to_stacks_addr(&StacksPrivateKey::new()) + key_to_stacks_addr(&StacksPrivateKey::random()) } pub fn key_to_stacks_addr(key: &StacksPrivateKey) -> StacksAddress { @@ -2820,7 +2820,7 @@ pub mod test { let mut missed_initial_blocks = 0; for tenure_id in 0..num_blocks { - let microblock_privkey = StacksPrivateKey::new(); + let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = @@ -2947,7 +2947,7 @@ pub mod test { assert_eq!(bob_balance, 4000); } } - let microblock_privkey = StacksPrivateKey::new(); + let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = @@ -3014,7 +3014,7 @@ pub mod test { let alice = keys.pop().unwrap(); for tenure_id in 0..num_blocks { - let microblock_privkey = StacksPrivateKey::new(); + let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = @@ -3131,7 +3131,7 @@ pub mod test { let alice = keys.pop().unwrap(); for tenure_id in 0..num_blocks { - let microblock_privkey = StacksPrivateKey::new(); + let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = @@ -3242,7 +3242,7 @@ pub mod test { let mut alice_reward_cycle = 0; for tenure_id in 0..num_blocks { - let microblock_privkey = StacksPrivateKey::new(); + let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = @@ -3453,7 +3453,7 @@ pub mod test { let mut rewarded = false; for tenure_id in 0..num_blocks { - let microblock_privkey = StacksPrivateKey::new(); + let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = @@ -3714,7 +3714,7 @@ pub mod test { let mut alice_reward_cycle = 0; for tenure_id in 0..num_blocks { - let microblock_privkey = StacksPrivateKey::new(); + let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = @@ -3981,7 +3981,7 @@ pub mod test { let mut first_reward_cycle = 0; for tenure_id in 0..num_blocks { - let microblock_privkey = StacksPrivateKey::new(); + let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = @@ -4197,7 +4197,7 @@ pub mod test { let mut first_reward_cycle = 0; for tenure_id in 0..num_blocks { - let microblock_privkey = StacksPrivateKey::new(); + let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = @@ -4410,7 +4410,7 @@ pub mod test { let mut alice_reward_cycle = 0; for tenure_id in 0..num_blocks { - let microblock_privkey = StacksPrivateKey::new(); + let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = @@ -4659,7 +4659,7 @@ pub mod test { let mut test_after_second_reward_cycle = false; for tenure_id in 0..num_blocks { - let microblock_privkey = StacksPrivateKey::new(); + let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = @@ -5181,7 +5181,7 @@ pub mod test { let mut test_between_reward_cycles = false; for tenure_id in 0..num_blocks { - let microblock_privkey = StacksPrivateKey::new(); + let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = @@ -5627,7 +5627,7 @@ pub mod test { let mut alice_reward_cycle = 0; for tenure_id in 0..num_blocks { - let microblock_privkey = StacksPrivateKey::new(); + let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index 446ad615de..67485301ad 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -3724,7 +3724,7 @@ fn test_get_pox_addrs() { let mut all_reward_addrs = vec![]; for tenure_id in 0..num_blocks { - let microblock_privkey = StacksPrivateKey::new(); + let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) @@ -3997,7 +3997,7 @@ fn test_stack_with_segwit() { let mut all_reward_addrs = vec![]; for tenure_id in 0..num_blocks { - let microblock_privkey = StacksPrivateKey::new(); + let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 8433afec07..2fd23fb2e6 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -557,7 +557,7 @@ fn pox_extend_transition() { let tip = get_tip(peer.sortdb.as_ref()); - let alice_signer_private = Secp256k1PrivateKey::new(); + let alice_signer_private = Secp256k1PrivateKey::random(); let alice_signer_key = Secp256k1PublicKey::from_private(&alice_signer_private); let reward_cycle = get_current_reward_cycle(&peer, &burnchain); @@ -636,7 +636,7 @@ fn pox_extend_transition() { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } - let bob_signer_private = Secp256k1PrivateKey::new(); + let bob_signer_private = Secp256k1PrivateKey::random(); let reward_cycle = get_current_reward_cycle(&peer, &burnchain); @@ -670,7 +670,7 @@ fn pox_extend_transition() { ); // new signing key needed - let alice_signer_private = Secp256k1PrivateKey::default(); + let alice_signer_private = Secp256k1PrivateKey::random(); let alice_signer_key = StacksPublicKey::from_private(&alice_signer_private); let alice_signature = make_signer_key_signature( @@ -3674,7 +3674,7 @@ fn stack_extend_verify_sig() { ); // We need a new signer-key for the extend tx - let signer_key = Secp256k1PrivateKey::new(); + let signer_key = Secp256k1PrivateKey::random(); let signer_public_key = StacksPublicKey::from_private(&signer_key); // Test 1: invalid reward cycle @@ -3702,7 +3702,7 @@ fn stack_extend_verify_sig() { // Test 2: invalid pox-addr stacker_nonce += 1; - let other_pox_addr = pox_addr_from(&Secp256k1PrivateKey::new()); + let other_pox_addr = pox_addr_from(&Secp256k1PrivateKey::random()); let signature = make_signer_key_signature( &other_pox_addr, &signer_key, @@ -3726,7 +3726,7 @@ fn stack_extend_verify_sig() { // Test 3: invalid key used to sign stacker_nonce += 1; - let other_key = Secp256k1PrivateKey::new(); + let other_key = Secp256k1PrivateKey::random(); let signature = make_signer_key_signature( &pox_addr, &other_key, @@ -3961,7 +3961,7 @@ fn stack_agg_commit_verify_sig() { // Test 2: invalid pox addr delegate_nonce += 1; - let other_pox_addr = pox_addr_from(&Secp256k1PrivateKey::new()); + let other_pox_addr = pox_addr_from(&Secp256k1PrivateKey::random()); let signature = make_signer_key_signature( &other_pox_addr, signer_sk, @@ -4227,7 +4227,7 @@ struct StackerSignerInfo { impl StackerSignerInfo { fn new() -> Self { - let private_key = StacksPrivateKey::new(); + let private_key = StacksPrivateKey::random(); let public_key = StacksPublicKey::from_private(&private_key); let address = key_to_stacks_addr(&private_key); let pox_address = @@ -4779,7 +4779,7 @@ fn stack_increase_verify_signer_key(use_nakamoto: bool) { // invalid pox addr stacker_nonce += 1; - let other_pox_addr = pox_addr_from(&Secp256k1PrivateKey::new()); + let other_pox_addr = pox_addr_from(&Secp256k1PrivateKey::random()); let signature = make_signer_key_signature( &other_pox_addr, // different than existing signer_sk, diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index d84044bcfd..46682b6f86 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -9983,7 +9983,7 @@ pub mod test { for i in 0..32 { test_debug!("Making block {}", i); - let privk = StacksPrivateKey::new(); + let privk = StacksPrivateKey::random(); let block = make_empty_coinbase_block(&privk); blocks.push(block); @@ -10198,7 +10198,7 @@ pub mod test { fn stacks_db_get_blocks_inventory_for_reward_cycle() { let mut peer_config = TestPeerConfig::new(function_name!(), 21313, 21314); - let privk = StacksPrivateKey::new(); + let privk = StacksPrivateKey::random(); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -10290,7 +10290,7 @@ pub mod test { let coinbase_tx = make_coinbase_with_nonce(miner, tenure_id as usize, tenure_id, None); - let microblock_privkey = StacksPrivateKey::new(); + let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key( &StacksPublicKey::from_private(µblock_privkey), ); @@ -11008,13 +11008,13 @@ pub mod test { C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, 1, - &vec![StacksPublicKey::from_private(&StacksPrivateKey::new())], + &vec![StacksPublicKey::from_private(&StacksPrivateKey::random())], ) .unwrap() }) .collect(); - let recipient_privk = StacksPrivateKey::new(); + let recipient_privk = StacksPrivateKey::random(); let recipient_addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -11333,13 +11333,13 @@ pub mod test { C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, 1, - &vec![StacksPublicKey::from_private(&StacksPrivateKey::new())], + &vec![StacksPublicKey::from_private(&StacksPrivateKey::random())], ) .unwrap() }) .collect(); - let recipient_privk = StacksPrivateKey::new(); + let recipient_privk = StacksPrivateKey::random(); let recipient_addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index 3ddf771f77..17e5a3c6e3 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -1673,7 +1673,7 @@ pub mod test { ); let mut tx_conn = next_block.start_transaction_processing(); - let sk = secp256k1::Secp256k1PrivateKey::new(); + let sk = secp256k1::Secp256k1PrivateKey::random(); let tx = StacksTransaction { version: TransactionVersion::Testnet, diff --git a/stackslib/src/chainstate/stacks/db/unconfirmed.rs b/stackslib/src/chainstate/stacks/db/unconfirmed.rs index af0393eafa..53f174974a 100644 --- a/stackslib/src/chainstate/stacks/db/unconfirmed.rs +++ b/stackslib/src/chainstate/stacks/db/unconfirmed.rs @@ -663,7 +663,7 @@ mod test { #[test] fn test_unconfirmed_refresh_one_microblock_stx_transfer() { - let privk = StacksPrivateKey::new(); + let privk = StacksPrivateKey::random(); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -691,7 +691,7 @@ mod test { let mut last_block: Option = None; for tenure_id in 0..num_blocks { - let microblock_privkey = StacksPrivateKey::new(); + let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); @@ -900,7 +900,7 @@ mod test { #[test] fn test_unconfirmed_refresh_10_microblocks_10_stx_transfers() { - let privk = StacksPrivateKey::new(); + let privk = StacksPrivateKey::random(); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -928,7 +928,7 @@ mod test { let mut last_block: Option = None; for tenure_id in 0..num_blocks { - let microblock_privkey = StacksPrivateKey::new(); + let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); @@ -1147,7 +1147,7 @@ mod test { #[test] fn test_unconfirmed_refresh_invalid_microblock() { - let privk = StacksPrivateKey::new(); + let privk = StacksPrivateKey::random(); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -1187,7 +1187,7 @@ mod test { let mut recv_balance = 0; for tenure_id in 0..num_blocks { - let microblock_privkey = StacksPrivateKey::new(); + let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 3c3211672d..5897995d68 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -1526,7 +1526,7 @@ impl StacksBlockBuilder { &EMPTY_MICROBLOCK_PARENT_HASH, &Sha512Trunc256Sum([0u8; 32]), ), // will be updated - miner_privkey: StacksPrivateKey::new(), // caller should overwrite this, or refrain from mining microblocks + miner_privkey: StacksPrivateKey::random(), // caller should overwrite this, or refrain from mining microblocks miner_payouts: None, miner_id, } diff --git a/stackslib/src/chainstate/stacks/tests/accounting.rs b/stackslib/src/chainstate/stacks/tests/accounting.rs index a6f9a986b4..80df67d592 100644 --- a/stackslib/src/chainstate/stacks/tests/accounting.rs +++ b/stackslib/src/chainstate/stacks/tests/accounting.rs @@ -128,7 +128,7 @@ fn test_bad_microblock_fees_pre_v210() { let mut mblock_privks = vec![]; for _ in 0..num_blocks { - let mblock_privk = StacksPrivateKey::new(); + let mblock_privk = StacksPrivateKey::random(); mblock_privks.push(mblock_privk); } @@ -196,7 +196,7 @@ fn test_bad_microblock_fees_pre_v210() { make_coinbase(miner, tenure_id / 2) } else { - let pk = StacksPrivateKey::new(); + let pk = StacksPrivateKey::random(); let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, TransactionAuth::from_p2pkh(&pk).unwrap(), @@ -451,7 +451,7 @@ fn test_bad_microblock_fees_fix_transition() { let mut mblock_privks = vec![]; for _ in 0..num_blocks { - let mblock_privk = StacksPrivateKey::new(); + let mblock_privk = StacksPrivateKey::random(); mblock_privks.push(mblock_privk); } @@ -519,7 +519,7 @@ fn test_bad_microblock_fees_fix_transition() { make_coinbase(miner, tenure_id / 2) } else { - let pk = StacksPrivateKey::new(); + let pk = StacksPrivateKey::random(); let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, TransactionAuth::from_p2pkh(&pk).unwrap(), @@ -808,7 +808,7 @@ fn test_get_block_info_v210() { let mut mblock_privks = vec![]; for _ in 0..num_blocks { - let mblock_privk = StacksPrivateKey::new(); + let mblock_privk = StacksPrivateKey::random(); mblock_privks.push(mblock_privk); } @@ -875,7 +875,7 @@ fn test_get_block_info_v210() { make_coinbase(miner, tenure_id / 2) } else { - let pk = StacksPrivateKey::new(); + let pk = StacksPrivateKey::random(); let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, TransactionAuth::from_p2pkh(&pk).unwrap(), @@ -1180,7 +1180,7 @@ fn test_get_block_info_v210_no_microblocks() { let mut mblock_privks = vec![]; for _ in 0..num_blocks { - let mblock_privk = StacksPrivateKey::new(); + let mblock_privk = StacksPrivateKey::random(); mblock_privks.push(mblock_privk); } @@ -1247,7 +1247,7 @@ fn test_get_block_info_v210_no_microblocks() { make_coinbase(miner, tenure_id / 2) } else { - let pk = StacksPrivateKey::new(); + let pk = StacksPrivateKey::random(); let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, TransactionAuth::from_p2pkh(&pk).unwrap(), @@ -1414,7 +1414,7 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { "f67c7437f948ca1834602b28595c12ac744f287a4efaf70d437042a6afed81bc01", ) .unwrap(); - let privk_recipient = StacksPrivateKey::new(); + let privk_recipient = StacksPrivateKey::random(); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, @@ -1501,7 +1501,7 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { let mut mblock_privks = vec![]; for _ in 0..num_blocks { - let mblock_privk = StacksPrivateKey::new(); + let mblock_privk = StacksPrivateKey::random(); mblock_privks.push(mblock_privk); } @@ -1613,7 +1613,7 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { make_coinbase(miner, tenure_id) } } else { - let pk = StacksPrivateKey::new(); + let pk = StacksPrivateKey::random(); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 5e368054fa..06cf84db6f 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -431,7 +431,7 @@ fn test_build_anchored_blocks_stx_transfers_multi() { let num_blocks = 10; for _ in 0..num_blocks { - let privk = StacksPrivateKey::new(); + let privk = StacksPrivateKey::random(); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -649,7 +649,7 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch() { let mut mblock_privks = vec![]; for _ in 0..num_blocks { - let mblock_privk = StacksPrivateKey::new(); + let mblock_privk = StacksPrivateKey::random(); mblock_privks.push(mblock_privk); } @@ -885,7 +885,7 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch_invalid() { let mut mblock_privks = vec![]; for _ in 0..num_blocks { - let mblock_privk = StacksPrivateKey::new(); + let mblock_privk = StacksPrivateKey::random(); mblock_privks.push(mblock_privk); } @@ -1152,7 +1152,7 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch_invalid() { /// to consider an origin's "next" transaction immediately. Prior behavior would /// only do so after processing any other origin's transactions. fn test_build_anchored_blocks_incrementing_nonces() { - let private_keys: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); + let private_keys: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); let addresses: Vec<_> = private_keys .iter() .map(|sk| { @@ -1310,7 +1310,7 @@ fn test_build_anchored_blocks_skip_too_expensive() { let mut initial_balances = vec![]; let num_blocks = 10; for i in 0..num_blocks { - let pk = StacksPrivateKey::new(); + let pk = StacksPrivateKey::random(); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -1762,7 +1762,7 @@ fn test_build_anchored_blocks_multiple_chaintips() { let num_blocks = 10; for _ in 0..num_blocks { - let privk = StacksPrivateKey::new(); + let privk = StacksPrivateKey::random(); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -1909,7 +1909,7 @@ fn test_build_anchored_blocks_empty_chaintips() { let num_blocks = 10; for _ in 0..num_blocks { - let privk = StacksPrivateKey::new(); + let privk = StacksPrivateKey::random(); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -2052,7 +2052,7 @@ fn test_build_anchored_blocks_too_expensive_transactions() { let num_blocks = 3; for _ in 0..num_blocks { - let privk = StacksPrivateKey::new(); + let privk = StacksPrivateKey::random(); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -2417,7 +2417,7 @@ fn test_build_anchored_blocks_bad_nonces() { let num_blocks = 10; for _ in 0..num_blocks { - let privk = StacksPrivateKey::new(); + let privk = StacksPrivateKey::random(); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -2665,8 +2665,8 @@ fn test_build_microblock_stream_forks() { let initial_balance = 100000000; for _ in 0..num_blocks { - let privk = StacksPrivateKey::new(); - let mblock_privk = StacksPrivateKey::new(); + let privk = StacksPrivateKey::random(); + let mblock_privk = StacksPrivateKey::random(); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, @@ -2965,8 +2965,8 @@ fn test_build_microblock_stream_forks_with_descendants() { let initial_balance = 100000000; for _ in 0..num_blocks { - let privk = StacksPrivateKey::new(); - let mblock_privk = StacksPrivateKey::new(); + let privk = StacksPrivateKey::random(); + let mblock_privk = StacksPrivateKey::random(); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, @@ -3496,7 +3496,7 @@ fn test_contract_call_across_clarity_versions() { let mut mblock_privks = vec![]; for _ in 0..num_blocks { - let mblock_privk = StacksPrivateKey::new(); + let mblock_privk = StacksPrivateKey::random(); mblock_privks.push(mblock_privk); } @@ -3999,7 +3999,7 @@ fn test_is_tx_problematic() { let mut initial_balances = vec![]; let num_blocks = 10; for i in 0..num_blocks { - let pk = StacksPrivateKey::new(); + let pk = StacksPrivateKey::random(); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -4493,7 +4493,7 @@ fn test_is_tx_problematic() { fn mempool_incorporate_pox_unlocks() { let mut initial_balances = vec![]; let total_balance = 10_000_000_000; - let pk = StacksPrivateKey::new(); + let pk = StacksPrivateKey::random(); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -4918,7 +4918,7 @@ fn paramaterized_mempool_walk_test( ) { let key_address_pairs: Vec<(Secp256k1PrivateKey, StacksAddress)> = (0..num_users) .map(|_user_index| { - let privk = StacksPrivateKey::new(); + let privk = StacksPrivateKey::random(); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, diff --git a/stackslib/src/chainstate/stacks/transaction.rs b/stackslib/src/chainstate/stacks/transaction.rs index 8871bb5197..10a09c883e 100644 --- a/stackslib/src/chainstate/stacks/transaction.rs +++ b/stackslib/src/chainstate/stacks/transaction.rs @@ -4400,7 +4400,7 @@ mod test { ) .unwrap(); - let mut random_sponsor = StacksPrivateKey::new(); // what the origin sees + let mut random_sponsor = StacksPrivateKey::random(); // what the origin sees random_sponsor.set_compress_public(true); let auth = TransactionAuth::Sponsored( @@ -4625,7 +4625,7 @@ mod test { let pubk_2 = StacksPublicKey::from_private(&privk_2); let pubk_3 = StacksPublicKey::from_private(&privk_3); - let random_sponsor = StacksPrivateKey::new(); // what the origin sees + let random_sponsor = StacksPrivateKey::random(); // what the origin sees let auth = TransactionAuth::Sponsored( TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( @@ -4867,7 +4867,7 @@ mod test { let pubk_2 = StacksPublicKey::from_private(&privk_2); let pubk_3 = StacksPublicKey::from_private(&privk_3); - let random_sponsor = StacksPrivateKey::new(); // what the origin sees + let random_sponsor = StacksPrivateKey::random(); // what the origin sees let auth = TransactionAuth::Sponsored( TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( @@ -5104,7 +5104,7 @@ mod test { let pubk_2 = StacksPublicKey::from_private(&privk_2); let pubk_3 = StacksPublicKey::from_private(&privk_3); - let random_sponsor = StacksPrivateKey::new(); // what the origin sees + let random_sponsor = StacksPrivateKey::random(); // what the origin sees let auth = TransactionAuth::Sponsored( TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( @@ -5300,7 +5300,7 @@ mod test { ) .unwrap(); - let random_sponsor = StacksPrivateKey::new(); + let random_sponsor = StacksPrivateKey::random(); let auth = TransactionAuth::Sponsored( TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( @@ -5522,7 +5522,7 @@ mod test { let pubk_2 = StacksPublicKey::from_private(&privk_2); let pubk_3 = StacksPublicKey::from_private(&privk_3); - let random_sponsor = StacksPrivateKey::new(); + let random_sponsor = StacksPrivateKey::random(); let auth = TransactionAuth::Sponsored( TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( @@ -5852,7 +5852,7 @@ mod test { let pubk_2 = StacksPublicKey::from_private(&privk_2); let pubk_3 = StacksPublicKey::from_private(&privk_3); - let random_sponsor = StacksPrivateKey::new(); // what the origin sees + let random_sponsor = StacksPrivateKey::random(); // what the origin sees let auth = TransactionAuth::Sponsored( TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( @@ -6097,7 +6097,7 @@ mod test { let pubk_2 = StacksPublicKey::from_private(&privk_2); let pubk_3 = StacksPublicKey::from_private(&privk_3); - let random_sponsor = StacksPrivateKey::new(); // what the origin sees + let random_sponsor = StacksPrivateKey::random(); // what the origin sees let auth = TransactionAuth::Sponsored( TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( @@ -6496,7 +6496,7 @@ mod test { let pubk_2 = StacksPublicKey::from_private(&privk_2); let pubk_3 = StacksPublicKey::from_private(&privk_3); - let random_sponsor = StacksPrivateKey::new(); // what the origin sees + let random_sponsor = StacksPrivateKey::random(); // what the origin sees let auth = TransactionAuth::Sponsored( TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( @@ -6661,7 +6661,7 @@ mod test { let pubk_4 = StacksPublicKey::from_private(&privk_4); let pubk_5 = StacksPublicKey::from_private(&privk_5); - let random_sponsor = StacksPrivateKey::new(); // what the origin sees + let random_sponsor = StacksPrivateKey::random(); // what the origin sees let auth = TransactionAuth::Sponsored( TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( @@ -7074,7 +7074,7 @@ mod test { let pubk_2 = StacksPublicKey::from_private(&privk_2); let pubk_3 = StacksPublicKey::from_private(&privk_3); - let random_sponsor = StacksPrivateKey::new(); // what the origin sees + let random_sponsor = StacksPrivateKey::random(); // what the origin sees let auth = TransactionAuth::Sponsored( TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( @@ -7250,7 +7250,7 @@ mod test { let pubk_6 = StacksPublicKey::from_private(&privk_6); let pubk_7 = StacksPublicKey::from_private(&privk_7); - let random_sponsor = StacksPrivateKey::new(); // what the origin sees + let random_sponsor = StacksPrivateKey::random(); // what the origin sees let auth = TransactionAuth::Sponsored( TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( @@ -7892,7 +7892,7 @@ mod test { let pubk_2 = StacksPublicKey::from_private(&privk_2); let pubk_3 = StacksPublicKey::from_private(&privk_3); - let random_sponsor = StacksPrivateKey::new(); // what the origin sees + let random_sponsor = StacksPrivateKey::random(); // what the origin sees let auth = TransactionAuth::Sponsored( TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( @@ -8141,7 +8141,7 @@ mod test { let pubk_2 = StacksPublicKey::from_private(&privk_2); let pubk_3 = StacksPublicKey::from_private(&privk_3); - let random_sponsor = StacksPrivateKey::new(); // what the origin sees + let random_sponsor = StacksPrivateKey::random(); // what the origin sees let auth = TransactionAuth::Sponsored( TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( @@ -8399,7 +8399,7 @@ mod test { let pubk_2 = StacksPublicKey::from_private(&privk_2); let pubk_3 = StacksPublicKey::from_private(&privk_3); - let random_sponsor = StacksPrivateKey::new(); // what the origin sees + let random_sponsor = StacksPrivateKey::random(); // what the origin sees let auth = TransactionAuth::Sponsored( TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index dcd6e62127..286e7f1854 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -453,7 +453,7 @@ pub fn command_try_mine(argv: &[String], conf: Option<&Config>) { let result = match &parent_stacks_header.anchored_header { StacksBlockHeaderTypes::Epoch2(..) => { - let sk = StacksPrivateKey::new(); + let sk = StacksPrivateKey::random(); let mut tx_auth = TransactionAuth::from_p2pkh(&sk).unwrap(); tx_auth.set_origin_nonce(0); diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index f4472d8fbc..ca800db3c1 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -2061,7 +2061,7 @@ impl NodeConfig { let sockaddr = deny_node.to_socket_addrs().unwrap().next().unwrap(); let neighbor = NodeConfig::default_neighbor( sockaddr, - Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::new()), + Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::random()), chain_id, peer_version, ); diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 842c06e5b1..d5a655d980 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -1783,7 +1783,7 @@ fn test_add_txs_bloom_filter() { let bf = mempool.get_txid_bloom_filter().unwrap(); let mut mempool_tx = mempool.tx_begin().unwrap(); for i in 0..128 { - let pk = StacksPrivateKey::new(); + let pk = StacksPrivateKey::random(); let mut tx = StacksTransaction { version: TransactionVersion::Testnet, chain_id: 0x80000000, @@ -1889,7 +1889,7 @@ fn test_txtags() { let mut mempool_tx = mempool.tx_begin().unwrap(); for i in 0..128 { - let pk = StacksPrivateKey::new(); + let pk = StacksPrivateKey::random(); let mut tx = StacksTransaction { version: TransactionVersion::Testnet, chain_id: 0x80000000, @@ -1981,7 +1981,7 @@ fn test_make_mempool_sync_data() { for i in 0..((MAX_BLOOM_COUNTER_TXS + 128) as usize) { let mut mempool_tx = mempool.tx_begin().unwrap(); for j in 0..128 { - let pk = StacksPrivateKey::new(); + let pk = StacksPrivateKey::random(); let mut tx = StacksTransaction { version: TransactionVersion::Testnet, chain_id: 0x80000000, @@ -2154,7 +2154,7 @@ fn test_find_next_missing_transactions() { let mut mempool_tx = mempool.tx_begin().unwrap(); for i in 0..(2 * MAX_BLOOM_COUNTER_TXS) { - let pk = StacksPrivateKey::new(); + let pk = StacksPrivateKey::random(); let mut tx = StacksTransaction { version: TransactionVersion::Testnet, chain_id: 0x80000000, @@ -2421,7 +2421,7 @@ fn test_drop_and_blacklist_txs_by_time() { let mut mempool_tx = mempool.tx_begin().unwrap(); for i in 0..10 { - let pk = StacksPrivateKey::new(); + let pk = StacksPrivateKey::random(); let mut tx = StacksTransaction { version: TransactionVersion::Testnet, chain_id: 0x80000000, @@ -2538,7 +2538,7 @@ fn test_drop_and_blacklist_txs_by_size() { let mut mempool_tx = mempool.tx_begin().unwrap(); for i in 0..10 { - let pk = StacksPrivateKey::new(); + let pk = StacksPrivateKey::random(); let mut tx = StacksTransaction { version: TransactionVersion::Testnet, chain_id: 0x80000000, @@ -2651,7 +2651,7 @@ fn test_filter_txs_by_type() { let mut mempool_tx = mempool.tx_begin().unwrap(); for i in 0..10 { - let pk = StacksPrivateKey::new(); + let pk = StacksPrivateKey::random(); let mut tx = StacksTransaction { version: TransactionVersion::Testnet, chain_id: 0x80000000, diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 00daea5d94..8731b78f42 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -211,7 +211,7 @@ impl P2PSession { peer_info.parent_network_id, PeerAddress::from_socketaddr(&peer_addr), peer_addr.port(), - Some(StacksPrivateKey::new()), + Some(StacksPrivateKey::random()), u64::MAX, UrlString::try_from(format!("http://127.0.0.1:{}", data_port).as_str()).unwrap(), vec![], @@ -1811,7 +1811,7 @@ simulating a miner. .expect("Failed to load chain tip header info") .expect("Failed to load chain tip header info"); - let sk = StacksPrivateKey::new(); + let sk = StacksPrivateKey::random(); let mut tx_auth = TransactionAuth::from_p2pkh(&sk).unwrap(); tx_auth.set_origin_nonce(0); diff --git a/stackslib/src/net/api/tests/getsigner.rs b/stackslib/src/net/api/tests/getsigner.rs index a3b112d0e3..381706c50e 100644 --- a/stackslib/src/net/api/tests/getsigner.rs +++ b/stackslib/src/net/api/tests/getsigner.rs @@ -48,7 +48,7 @@ fn make_preamble(query: &str) -> HttpRequestPreamble { fn test_try_parse_request() { let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); let http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); - let private_key = StacksPrivateKey::new(); + let private_key = StacksPrivateKey::random(); let signer_pubkey = StacksPublicKey::from_private(&private_key); let signer_pubkey_hex = signer_pubkey.to_hex(); let cycle_num = thread_rng().next_u32() as u64; @@ -108,7 +108,7 @@ fn test_try_make_response() { ) .unwrap(); - let random_private_key = StacksPrivateKey::new(); + let random_private_key = StacksPrivateKey::random(); let random_public_key = StacksPublicKey::from_private(&random_private_key); let nakamoto_chain_tip = rpc_test.canonical_tip.clone(); diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index 794808ea33..14034e3eaf 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -241,7 +241,7 @@ impl<'a> TestRPC<'a> { "94c319327cc5cd04da7147d32d836eb2e4c44f4db39aa5ede7314a761183d0c701", ) .unwrap(); - let microblock_privkey = StacksPrivateKey::new(); + let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); @@ -561,7 +561,7 @@ impl<'a> TestRPC<'a> { let mut mempool_tx = mempool.tx_begin().unwrap(); let mut sendable_txs = vec![]; for i in 0..20 { - let pk = StacksPrivateKey::new(); + let pk = StacksPrivateKey::random(); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index 41792c1839..9347d8384b 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -61,7 +61,7 @@ fn test_try_parse_request() { let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); - let block = make_codec_test_nakamoto_block(StacksEpochId::Epoch30, &StacksPrivateKey::new()); + let block = make_codec_test_nakamoto_block(StacksEpochId::Epoch30, &StacksPrivateKey::random()); let proposal = NakamotoBlockProposal { block: block.clone(), chain_id: 0x80000000, diff --git a/stackslib/src/net/api/tests/postmempoolquery.rs b/stackslib/src/net/api/tests/postmempoolquery.rs index b8f594a90d..b0033493fd 100644 --- a/stackslib/src/net/api/tests/postmempoolquery.rs +++ b/stackslib/src/net/api/tests/postmempoolquery.rs @@ -138,7 +138,7 @@ fn test_stream_mempool_txs() { let mut mempool_tx = mempool.tx_begin().unwrap(); for i in 0..10 { - let pk = StacksPrivateKey::new(); + let pk = StacksPrivateKey::random(); let mut tx = StacksTransaction { version: TransactionVersion::Testnet, chain_id: 0x80000000, @@ -351,7 +351,7 @@ fn test_decode_tx_stream() { let addr = StacksAddress::new(1, Hash160([0xff; 20])).unwrap(); let mut txs = vec![]; for _i in 0..10 { - let pk = StacksPrivateKey::new(); + let pk = StacksPrivateKey::random(); let mut tx = StacksTransaction { version: TransactionVersion::Testnet, chain_id: 0x80000000, diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 740d81b254..fe45b62f12 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -4474,7 +4474,7 @@ mod test { let old_peer_1_pubkey = Secp256k1PublicKey::from_private(&old_peer_1_privkey); // peer 1 updates their private key - local_peer_1.private_key = Secp256k1PrivateKey::new(); + local_peer_1.private_key = Secp256k1PrivateKey::random(); // peer 1 re-handshakes // convo_1 sends a handshake to convo_2 @@ -5058,7 +5058,7 @@ mod test { ); // regenerate keys and expiries in peer 1 - let new_privkey = Secp256k1PrivateKey::new(); + let new_privkey = Secp256k1PrivateKey::random(); { let tx = peerdb_1.tx_begin().unwrap(); PeerDB::set_local_private_key(&tx, &new_privkey, (12350 + i) as u64).unwrap(); @@ -7045,7 +7045,7 @@ mod test { // mock a second local peer with a different private key let mut local_peer_2 = local_peer_1.clone(); - local_peer_2.private_key = Secp256k1PrivateKey::new(); + local_peer_2.private_key = Secp256k1PrivateKey::random(); // NOTE: payload can be anything since we only look at premable length here let payload = StacksMessageType::Nack(NackData { error_code: 123 }); @@ -7174,7 +7174,7 @@ mod test { // mock a second local peer with a different private key let mut local_peer_2 = local_peer_1.clone(); - local_peer_2.private_key = Secp256k1PrivateKey::new(); + local_peer_2.private_key = Secp256k1PrivateKey::random(); // NOTE: payload can be anything since we only look at premable length here let payload = StacksMessageType::Nack(NackData { error_code: 123 }); @@ -7303,7 +7303,7 @@ mod test { // mock a second local peer with a different private key let mut local_peer_2 = local_peer_1.clone(); - local_peer_2.private_key = Secp256k1PrivateKey::new(); + local_peer_2.private_key = Secp256k1PrivateKey::random(); // NOTE: payload can be anything since we only look at premable length here let payload = StacksMessageType::Nack(NackData { error_code: 123 }); @@ -7432,7 +7432,7 @@ mod test { // mock a second local peer with a different private key let mut local_peer_2 = local_peer_1.clone(); - local_peer_2.private_key = Secp256k1PrivateKey::new(); + local_peer_2.private_key = Secp256k1PrivateKey::random(); // NOTE: payload can be anything since we only look at premable length here let payload = StacksMessageType::Nack(NackData { error_code: 123 }); diff --git a/stackslib/src/net/codec.rs b/stackslib/src/net/codec.rs index f431ff91ab..7db39834d7 100644 --- a/stackslib/src/net/codec.rs +++ b/stackslib/src/net/codec.rs @@ -2776,7 +2776,7 @@ pub mod test { #[test] fn codec_sign_and_verify() { - let privkey = Secp256k1PrivateKey::new(); + let privkey = Secp256k1PrivateKey::random(); let pubkey_buf = StacksPublicKeyBuffer::from_public_key(&Secp256k1PublicKey::from_private(&privkey)); @@ -2797,7 +2797,7 @@ pub mod test { #[test] fn codec_stacks_public_key_roundtrip() { for i in 0..100 { - let privkey = Secp256k1PrivateKey::new(); + let privkey = Secp256k1PrivateKey::random(); let pubkey = Secp256k1PublicKey::from_private(&privkey); let pubkey_buf = StacksPublicKeyBuffer::from_public_key(&pubkey); diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index eb65e02e61..09465721ba 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -1873,7 +1873,7 @@ mod test { &BurnchainHeaderHash([0x22; 32]), StacksMessageType::Ping(PingData { nonce }), ); - let privkey = Secp256k1PrivateKey::new(); + let privkey = Secp256k1PrivateKey::random(); ping.sign(request_id, &privkey).unwrap(); ping } @@ -1919,7 +1919,7 @@ mod test { StacksMessageType::Ping(PingData { nonce: 0x01020304 }), ); - let privkey = Secp256k1PrivateKey::new(); + let privkey = Secp256k1PrivateKey::random(); ping.sign(1, &privkey).unwrap(); let mut pipes = vec![]; // keep pipes in-scope @@ -2041,7 +2041,7 @@ mod test { #[test] fn connection_relay_send_recv() { - let privkey = Secp256k1PrivateKey::new(); + let privkey = Secp256k1PrivateKey::random(); let pubkey = Secp256k1PublicKey::from_private(&privkey); let neighbor = Neighbor { @@ -2139,7 +2139,7 @@ mod test { #[test] fn connection_send_recv() { with_timeout(100, || { - let privkey = Secp256k1PrivateKey::new(); + let privkey = Secp256k1PrivateKey::random(); let pubkey = Secp256k1PublicKey::from_private(&privkey); let neighbor = Neighbor { @@ -2254,7 +2254,7 @@ mod test { #[test] fn connection_send_recv_timeout() { - let privkey = Secp256k1PrivateKey::new(); + let privkey = Secp256k1PrivateKey::random(); let pubkey = Secp256k1PublicKey::from_private(&privkey); let neighbor = Neighbor { diff --git a/stackslib/src/net/db.rs b/stackslib/src/net/db.rs index 74ab57f211..3c31ce41d4 100644 --- a/stackslib/src/net/db.rs +++ b/stackslib/src/net/db.rs @@ -140,7 +140,7 @@ impl LocalPeer { data_url: UrlString, stacker_dbs: Vec, ) -> LocalPeer { - let mut pkey = privkey.unwrap_or_default(); + let mut pkey = privkey.unwrap_or(Secp256k1PrivateKey::random()); pkey.set_compress_public(true); let mut rng = thread_rng(); @@ -880,7 +880,7 @@ impl PeerDB { return Err(db_error::Overflow); } - let new_key = Secp256k1PrivateKey::new(); + let new_key = Secp256k1PrivateKey::random(); { let tx = self.tx_begin()?; @@ -1241,7 +1241,7 @@ impl PeerDB { addrbytes: peer_addr.clone(), port: peer_port, }; - let empty_key = StacksPublicKey::from_private(&StacksPrivateKey::new()); + let empty_key = StacksPublicKey::from_private(&StacksPrivateKey::random()); let mut empty_neighbor = Neighbor::empty(&nk, &empty_key, 0); empty_neighbor.allowed = allow_deadline; @@ -1287,7 +1287,7 @@ impl PeerDB { addrbytes: peer_addr.clone(), port: peer_port, }; - let empty_key = StacksPublicKey::from_private(&StacksPrivateKey::new()); + let empty_key = StacksPublicKey::from_private(&StacksPrivateKey::random()); let mut empty_neighbor = Neighbor::empty(&nk, &empty_key, 0); empty_neighbor.denied = deny_deadline as i64; @@ -2307,7 +2307,7 @@ mod test { out_degree: 1, }; - let key1 = Secp256k1PrivateKey::new(); + let key1 = Secp256k1PrivateKey::random(); let path = "/tmp/test-peerdb-try_insert_peer_with_stackerdbs.db".to_string(); if fs::metadata(&path).is_ok() { @@ -2508,7 +2508,7 @@ mod test { out_degree: 1, }; - let key1 = Secp256k1PrivateKey::new(); + let key1 = Secp256k1PrivateKey::random(); let path = "/tmp/test-peerdb-find_stacker_db_replicas.db".to_string(); if fs::metadata(&path).is_ok() { @@ -2800,7 +2800,7 @@ mod test { addrbytes: PeerAddress([i as u8; 16]), port: i, }, - public_key: Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::new()), + public_key: Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::random()), expire_block: (i + 23456) as u64, last_contact_time: (1552509642 + (i as u64)), allowed: (now_secs + 600) as i64, @@ -2820,7 +2820,7 @@ mod test { addrbytes: PeerAddress([i as u8; 16]), port: i, }, - public_key: Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::new()), + public_key: Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::random()), expire_block: (i + 23456) as u64, last_contact_time: (1552509642 + (i as u64)), allowed: 0, @@ -2901,7 +2901,7 @@ mod test { addrbytes: PeerAddress([i as u8; 16]), port: i, }, - public_key: Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::new()), + public_key: Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::random()), expire_block: (i + 23456) as u64, last_contact_time: (1552509642 + (i as u64)), allowed: -1, @@ -2922,7 +2922,7 @@ mod test { addrbytes: PeerAddress([i as u8; 16]), port: i, }, - public_key: Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::new()), + public_key: Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::random()), expire_block: (i + 23456) as u64, last_contact_time: (1552509642 + (i as u64)), allowed: -1, @@ -3503,8 +3503,8 @@ mod test { /// latest key. #[test] fn test_connect_new_key() { - let key1 = Secp256k1PrivateKey::new(); - let key2 = Secp256k1PrivateKey::new(); + let key1 = Secp256k1PrivateKey::random(); + let key2 = Secp256k1PrivateKey::random(); let path = "/tmp/test-connect-new-key.db".to_string(); if fs::metadata(&path).is_ok() { @@ -3571,7 +3571,7 @@ mod test { /// Test DB instantiation -- it must work. #[test] fn test_db_instantiation() { - let key1 = Secp256k1PrivateKey::new(); + let key1 = Secp256k1PrivateKey::random(); let path = "/tmp/test-peerdb-instantiation.db".to_string(); if fs::metadata(&path).is_ok() { @@ -3598,7 +3598,7 @@ mod test { /// Test `public` setting in DB migration #[test] fn test_db_schema_3_public_ip_migration() { - let key = Secp256k1PrivateKey::new(); + let key = Secp256k1PrivateKey::random(); let path = "/tmp/test-peerdb-schema-3-public-ip-migration.db".to_string(); if fs::metadata(&path).is_ok() { @@ -3669,12 +3669,12 @@ mod test { for private in private_addrbytes.iter() { neighbor.addr.addrbytes = private.clone(); - neighbor.public_key = Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::new()); + neighbor.public_key = Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::random()); assert!(PeerDB::try_insert_peer(&tx, &neighbor, &[]).unwrap()); } for public in public_addrbytes.iter() { neighbor.addr.addrbytes = public.clone(); - neighbor.public_key = Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::new()); + neighbor.public_key = Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::random()); assert!(PeerDB::try_insert_peer(&tx, &neighbor, &[]).unwrap()); } tx.execute("UPDATE frontier SET public = 1", params![]) @@ -3745,7 +3745,7 @@ mod test { /// Verify that multiple peers with the same public key are coalesced by last-contact-time #[test] fn test_query_peers() { - let key = Secp256k1PrivateKey::new(); + let key = Secp256k1PrivateKey::random(); let path = "/tmp/test-query-peers.db".to_string(); if fs::metadata(&path).is_ok() { diff --git a/stackslib/src/net/dns.rs b/stackslib/src/net/dns.rs index e64a51626d..d94f45d4d6 100644 --- a/stackslib/src/net/dns.rs +++ b/stackslib/src/net/dns.rs @@ -420,14 +420,13 @@ mod test { client.try_recv().unwrap(); for name in names.iter() { - let name_string = name.to_string(); - if resolved_addrs.contains_key(&name_string) { + if resolved_addrs.contains_key(*name) { continue; } match client.poll_lookup(name, 80).unwrap() { Some(addrs) => { test_debug!("name {name} addrs: {addrs:?}"); - resolved_addrs.insert(name_string, addrs); + resolved_addrs.insert(name.to_string(), addrs); break; } None => {} diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 1f48c06f0a..29b0e17bdc 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2622,7 +2622,7 @@ pub mod test { network_id: 0x80000000, peer_version: 0x01020304, current_block: start_block + (burnchain.consensus_hash_lifetime + 1) as u64, - private_key: Secp256k1PrivateKey::new(), + private_key: Secp256k1PrivateKey::random(), private_key_expire: start_block + conn_opts.private_key_lifetime, initial_neighbors: vec![], asn4_entries: vec![], diff --git a/stackslib/src/net/stackerdb/tests/db.rs b/stackslib/src/net/stackerdb/tests/db.rs index 287dc0ac71..0153803395 100644 --- a/stackslib/src/net/stackerdb/tests/db.rs +++ b/stackslib/src/net/stackerdb/tests/db.rs @@ -353,7 +353,7 @@ fn test_stackerdb_insert_query_chunks() { let tx = db.tx_begin(db_config.clone()).unwrap(); - let pks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); + let pks: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); let addrs: Vec<_> = pks .iter() .map(|pk| { @@ -506,7 +506,7 @@ fn test_reconfigure_stackerdb() { let tx = db.tx_begin(db_config).unwrap(); - let pks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); + let pks: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); let addrs: Vec<_> = pks .iter() .map(|pk| { @@ -567,7 +567,7 @@ fn test_reconfigure_stackerdb() { } let tx = db.tx_begin(StackerDBConfig::noop()).unwrap(); - let new_pks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); + let new_pks: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); let reconfigured_pks = vec![ // first five slots are unchanged pks[0], pks[1], pks[2], pks[3], pks[4], @@ -648,7 +648,7 @@ fn test_reconfigure_stackerdb() { } // reconfigure with fewer slots - let new_pks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); + let new_pks: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); let reconfigured_pks = vec![ // first five slots are unchanged pks[0], pks[1], pks[2], pks[3], pks[4], diff --git a/stackslib/src/net/tests/download/epoch2x.rs b/stackslib/src/net/tests/download/epoch2x.rs index d90f1e72ad..9d0bdbc6b7 100644 --- a/stackslib/src/net/tests/download/epoch2x.rs +++ b/stackslib/src/net/tests/download/epoch2x.rs @@ -782,7 +782,7 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_plain_100_blocks() { 4, ); - let mblock_privkey = StacksPrivateKey::new(); + let mblock_privkey = StacksPrivateKey::random(); let mblock_pubkey_hash_bytes = Hash160::from_data( &StacksPublicKey::from_private(&mblock_privkey).to_bytes(), diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 108045a427..0577ef3019 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -144,7 +144,7 @@ impl NakamotoStagingBlocksConnRef<'_> { #[test] fn test_nakamoto_tenure_downloader() { let ch = ConsensusHash([0x11; 20]); - let private_key = StacksPrivateKey::new(); + let private_key = StacksPrivateKey::random(); let mut test_signers = TestSigners::new(vec![]); let reward_set = test_signers.synthesize_reward_set(); @@ -2328,7 +2328,7 @@ fn test_nakamoto_unconfirmed_download_run_2_peers() { /// tenure _T + 1_. The unconfirmed downloader should be able to handle this case. #[test] fn test_nakamoto_microfork_download_run_2_peers() { - let sender_key = StacksPrivateKey::new(); + let sender_key = StacksPrivateKey::random(); let sender_addr = to_addr(&sender_key); let initial_balances = vec![(sender_addr.to_account_principal(), 1000000000)]; @@ -2509,7 +2509,7 @@ fn test_nakamoto_microfork_download_run_2_peers() { #[test] fn test_nakamoto_download_run_2_peers_with_one_shadow_block() { let observer = TestEventObserver::new(); - let sender_key = StacksPrivateKey::new(); + let sender_key = StacksPrivateKey::random(); let sender_addr = to_addr(&sender_key); let initial_balances = vec![(sender_addr.to_account_principal(), 1000000000)]; let bitvecs = vec![vec![true, true, false, false]]; @@ -2689,7 +2689,7 @@ fn test_nakamoto_download_run_2_peers_with_one_shadow_block() { #[test] fn test_nakamoto_download_run_2_peers_shadow_prepare_phase() { let observer = TestEventObserver::new(); - let sender_key = StacksPrivateKey::new(); + let sender_key = StacksPrivateKey::random(); let sender_addr = to_addr(&sender_key); let initial_balances = vec![(sender_addr.to_account_principal(), 1000000000)]; let bitvecs = vec![vec![true, true]]; @@ -2892,7 +2892,7 @@ fn test_nakamoto_download_run_2_peers_shadow_prepare_phase() { #[test] fn test_nakamoto_download_run_2_peers_shadow_reward_cycles() { let observer = TestEventObserver::new(); - let sender_key = StacksPrivateKey::new(); + let sender_key = StacksPrivateKey::random(); let sender_addr = to_addr(&sender_key); let initial_balances = vec![(sender_addr.to_account_principal(), 1000000000)]; let bitvecs = vec![vec![true, true]]; diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index b8a4987100..c0b0f4ff81 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -60,7 +60,7 @@ pub fn peer_get_nakamoto_invs<'a>( mut peer: TestPeer<'a>, reward_cycles: &[u64], ) -> (TestPeer<'a>, Vec) { - let privk = StacksPrivateKey::new(); + let privk = StacksPrivateKey::random(); let mut convo = peer.make_client_convo(); let client_peer = peer.make_client_local_peer(privk.clone()); let peer_addr = peer.p2p_socketaddr(); @@ -1122,7 +1122,7 @@ fn test_nakamoto_inv_sync_across_epoch_change() { #[test] fn test_nakamoto_make_tenure_inv_in_forks() { - let sender_key = StacksPrivateKey::new(); + let sender_key = StacksPrivateKey::random(); let sender_addr = to_addr(&sender_key); let initial_balances = vec![(sender_addr.to_account_principal(), 1000000000)]; @@ -1739,7 +1739,7 @@ fn test_nakamoto_make_tenure_inv_in_forks() { #[test] fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { - let sender_key = StacksPrivateKey::new(); + let sender_key = StacksPrivateKey::random(); let sender_addr = to_addr(&sender_key); let initial_balances = vec![(sender_addr.to_account_principal(), 1000000000)]; @@ -2187,7 +2187,7 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { #[test] fn test_nakamoto_make_tenure_inv_from_old_tips() { - let sender_key = StacksPrivateKey::new(); + let sender_key = StacksPrivateKey::random(); let sender_addr = to_addr(&sender_key); let initial_balances = vec![(sender_addr.to_account_principal(), 1000000000)]; @@ -2362,7 +2362,7 @@ fn test_nakamoto_make_tenure_inv_from_old_tips() { #[test] fn test_nakamoto_invs_shadow_blocks() { let observer = TestEventObserver::new(); - let sender_key = StacksPrivateKey::new(); + let sender_key = StacksPrivateKey::random(); let sender_addr = to_addr(&sender_key); let initial_balances = vec![(sender_addr.to_account_principal(), 1000000000)]; let mut bitvecs = vec![vec![ diff --git a/stackslib/src/net/tests/mempool/mod.rs b/stackslib/src/net/tests/mempool/mod.rs index 8ca254fa75..700a64a739 100644 --- a/stackslib/src/net/tests/mempool/mod.rs +++ b/stackslib/src/net/tests/mempool/mod.rs @@ -53,7 +53,7 @@ fn test_mempool_sync_2_peers() { peer_2_config.connection_opts.mempool_sync_interval = 1; let num_txs = 10; - let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); + let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::random()).collect(); let addrs: Vec<_> = pks.iter().map(to_addr).collect(); let initial_balances: Vec<_> = addrs .iter() @@ -319,7 +319,7 @@ fn test_mempool_sync_2_peers_paginated() { peer_2_config.connection_opts.mempool_sync_interval = 1; let num_txs = 1024; - let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); + let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::random()).collect(); let addrs: Vec<_> = pks.iter().map(to_addr).collect(); let initial_balances: Vec<_> = addrs .iter() @@ -508,7 +508,7 @@ fn test_mempool_sync_2_peers_blacklisted() { peer_2_config.connection_opts.mempool_sync_interval = 1; let num_txs = 1024; - let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); + let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::random()).collect(); let addrs: Vec<_> = pks.iter().map(to_addr).collect(); let initial_balances: Vec<_> = addrs .iter() @@ -717,7 +717,7 @@ fn test_mempool_sync_2_peers_problematic() { peer_2_config.connection_opts.mempool_sync_interval = 1; let num_txs = 128; - let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); + let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::random()).collect(); let addrs: Vec<_> = pks.iter().map(to_addr).collect(); let initial_balances: Vec<_> = addrs .iter() @@ -1089,7 +1089,7 @@ fn test_mempool_sync_2_peers_nakamoto_paginated() { vec![true, true, true, true, true, true, true, true, true, true], ]; let num_txs = 1024; - let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); + let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::random()).collect(); let addrs: Vec<_> = pks.iter().map(to_addr).collect(); let initial_balances: Vec<_> = addrs .iter() diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 7a78a82544..f21aba3cad 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -1216,16 +1216,16 @@ fn test_network_result_update() { &BurnchainHeaderHash([0x22; 32]), StacksMessageType::Ping(PingData { nonce: 2 }), ); - msg2.sign(2, &StacksPrivateKey::new()).unwrap(); + msg2.sign(2, &StacksPrivateKey::random()).unwrap(); - let pkey_1 = StacksPrivateKey::new(); - let pkey_2 = StacksPrivateKey::new(); + let pkey_1 = StacksPrivateKey::random(); + let pkey_2 = StacksPrivateKey::random(); - let pushed_pkey_1 = StacksPrivateKey::new(); - let pushed_pkey_2 = StacksPrivateKey::new(); + let pushed_pkey_1 = StacksPrivateKey::random(); + let pushed_pkey_2 = StacksPrivateKey::random(); - let uploaded_pkey_1 = StacksPrivateKey::new(); - let uploaded_pkey_2 = StacksPrivateKey::new(); + let uploaded_pkey_1 = StacksPrivateKey::random(); + let uploaded_pkey_2 = StacksPrivateKey::random(); let blk1 = make_empty_coinbase_block(&pkey_1); let blk2 = make_empty_coinbase_block(&pkey_2); diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index 3857be9399..9d3de7aacd 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -1944,7 +1944,7 @@ fn test_get_blocks_and_microblocks_peers_broadcast() { let sent_txs = RefCell::new(vec![]); let done = RefCell::new(false); let num_peers = 3; - let privk = StacksPrivateKey::new(); + let privk = StacksPrivateKey::random(); let peers = run_get_blocks_and_microblocks( "test_get_blocks_and_microblocks_peers_broadcast", @@ -2642,9 +2642,9 @@ pub fn make_contract_tx( #[test] fn test_static_problematic_tests() { - let spender_sk_1 = StacksPrivateKey::new(); - let spender_sk_2 = StacksPrivateKey::new(); - let spender_sk_3 = StacksPrivateKey::new(); + let spender_sk_1 = StacksPrivateKey::random(); + let spender_sk_2 = StacksPrivateKey::random(); + let spender_sk_3 = StacksPrivateKey::random(); let edge_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) - 1; let tx_edge_body_start = "{ a : ".repeat(edge_repeat_factor as usize); @@ -2814,7 +2814,7 @@ fn process_new_blocks_rejects_problematic_asts() { let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()).unwrap(); - let mblock_privk = StacksPrivateKey::new(); + let mblock_privk = StacksPrivateKey::random(); // make one tenure with a valid block, but problematic microblocks let (burn_ops, block, microblocks) = peer.make_tenure( diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 4505ef8da3..7d36b64310 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -45,7 +45,7 @@ fn test_exact_block_costs() { return; } - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let spender_addr = PrincipalData::from(to_addr(&spender_sk)); let spender_addr_c32 = to_addr(&spender_sk); @@ -280,7 +280,7 @@ fn test_dynamic_db_method_costs() { return; } - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let spender_addr = PrincipalData::from(to_addr(&spender_sk)); let spender_addr_c32 = to_addr(&spender_sk); let contract_name = "test-contract"; @@ -694,15 +694,15 @@ fn test_cost_limit_switch_version205() { .to_string(); // Create three characters, `creator`, `alice` and `bob`. - let creator_sk = StacksPrivateKey::new(); + let creator_sk = StacksPrivateKey::random(); let creator_addr = to_addr(&creator_sk); let creator_pd: PrincipalData = creator_addr.into(); - let alice_sk = StacksPrivateKey::new(); + let alice_sk = StacksPrivateKey::random(); let alice_addr = to_addr(&alice_sk); let alice_pd: PrincipalData = alice_addr.into(); - let bob_sk = StacksPrivateKey::new(); + let bob_sk = StacksPrivateKey::random(); let bob_addr = to_addr(&bob_sk); let bob_pd: PrincipalData = bob_addr.into(); @@ -909,7 +909,7 @@ fn bigger_microblock_streams_in_2_05() { return; } - let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); + let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 3b8b93f154..68e5f60fd1 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -282,7 +282,7 @@ fn transition_adds_burn_block_height() { // very simple test to verify that after the 2.1 transition, get-burn-block-info? works as // expected - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let spender_addr = PrincipalData::from(to_addr(&spender_sk)); let spender_addr_c32 = to_addr(&spender_sk); @@ -682,7 +682,7 @@ fn transition_fixes_bitcoin_rigidity() { // applied, even though it's within 6 blocks of the next Stacks block, which will be in epoch // 2.1. This verifies that the new burnchain consideration window only applies to sortitions // that happen in Stacks 2.1. - let recipient_sk = StacksPrivateKey::new(); + let recipient_sk = StacksPrivateKey::random(); let recipient_addr = to_addr(&recipient_sk); let transfer_stx_op = TransferStxOp { sender: spender_stx_addr, @@ -834,7 +834,7 @@ fn transition_fixes_bitcoin_rigidity() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // let's fire off our transfer op. - let recipient_sk = StacksPrivateKey::new(); + let recipient_sk = StacksPrivateKey::random(); let recipient_addr = to_addr(&recipient_sk); let transfer_stx_op = TransferStxOp { sender: spender_stx_addr, @@ -1052,7 +1052,7 @@ fn transition_adds_get_pox_addr_recipients() { let stacked = 100_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); for _i in 0..7 { - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); spender_sks.push(spender_sk); @@ -1360,7 +1360,7 @@ fn transition_adds_mining_from_segwit() { let stacked = 100_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); for _i in 0..7 { - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); spender_sks.push(spender_sk); @@ -1443,7 +1443,7 @@ fn transition_removes_pox_sunset() { return; } - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); let first_bal = 6_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -2026,9 +2026,9 @@ fn test_pox_reorgs_three_flaps() { epochs[StacksEpochId::Epoch21].start_height = 151; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::random()).collect(); - let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::random()).collect(); let balances: Vec<_> = privks .iter() @@ -2059,7 +2059,7 @@ fn test_pox_reorgs_three_flaps() { let mut miner_status = vec![]; for i in 0..num_miners { - let seed = StacksPrivateKey::new().to_bytes(); + let seed = StacksPrivateKey::random().to_bytes(); let (mut conf, _) = neon_integration_test_conf_with_seed(seed); conf.initial_balances.clear(); @@ -2546,9 +2546,9 @@ fn test_pox_reorg_one_flap() { epochs[StacksEpochId::Epoch21].start_height = 151; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::random()).collect(); - let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::random()).collect(); let balances: Vec<_> = privks .iter() @@ -2579,7 +2579,7 @@ fn test_pox_reorg_one_flap() { let mut miner_status = vec![]; for i in 0..num_miners { - let seed = StacksPrivateKey::new().to_bytes(); + let seed = StacksPrivateKey::random().to_bytes(); let (mut conf, _) = neon_integration_test_conf_with_seed(seed); conf.initial_balances.clear(); @@ -2950,9 +2950,9 @@ fn test_pox_reorg_flap_duel() { epochs[StacksEpochId::Epoch21].start_height = 151; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::random()).collect(); - let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::random()).collect(); let balances: Vec<_> = privks .iter() @@ -2983,7 +2983,7 @@ fn test_pox_reorg_flap_duel() { let mut miner_status = vec![]; for i in 0..num_miners { - let seed = StacksPrivateKey::new().to_bytes(); + let seed = StacksPrivateKey::random().to_bytes(); let (mut conf, _) = neon_integration_test_conf_with_seed(seed); conf.initial_balances.clear(); @@ -3369,9 +3369,9 @@ fn test_pox_reorg_flap_reward_cycles() { epochs[StacksEpochId::Epoch21].start_height = 151; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::random()).collect(); - let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::random()).collect(); let balances: Vec<_> = privks .iter() @@ -3402,7 +3402,7 @@ fn test_pox_reorg_flap_reward_cycles() { let mut miner_status = vec![]; for i in 0..num_miners { - let seed = StacksPrivateKey::new().to_bytes(); + let seed = StacksPrivateKey::random().to_bytes(); let (mut conf, _) = neon_integration_test_conf_with_seed(seed); conf.initial_balances.clear(); @@ -3779,9 +3779,9 @@ fn test_pox_missing_five_anchor_blocks() { epochs[StacksEpochId::Epoch21].start_height = 151; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::random()).collect(); - let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::random()).collect(); let balances: Vec<_> = privks .iter() @@ -3812,7 +3812,7 @@ fn test_pox_missing_five_anchor_blocks() { let mut miner_status = vec![]; for i in 0..num_miners { - let seed = StacksPrivateKey::new().to_bytes(); + let seed = StacksPrivateKey::random().to_bytes(); let (mut conf, _) = neon_integration_test_conf_with_seed(seed); conf.initial_balances.clear(); @@ -4157,9 +4157,9 @@ fn test_sortition_divergence_pre_21() { epochs[StacksEpochId::Epoch21].start_height = 241; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::random()).collect(); - let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::random()).collect(); let balances: Vec<_> = privks .iter() @@ -4190,7 +4190,7 @@ fn test_sortition_divergence_pre_21() { let mut miner_status = vec![]; for i in 0..num_miners { - let seed = StacksPrivateKey::new().to_bytes(); + let seed = StacksPrivateKey::random().to_bytes(); let (mut conf, _) = neon_integration_test_conf_with_seed(seed); conf.initial_balances.clear(); @@ -4558,7 +4558,7 @@ fn trait_invocation_cross_epoch() { return; } - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let spender_addr = PrincipalData::from(to_addr(&spender_sk)); let spender_addr_c32 = to_addr(&spender_sk); @@ -4812,7 +4812,7 @@ fn test_v1_unlock_height_with_current_stackers() { let stacked = 100_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); let mut initial_balances = vec![]; @@ -5065,7 +5065,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { let stacked = 100_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); let mut initial_balances = vec![]; diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 4d759cba77..c111da98f6 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -59,13 +59,13 @@ fn disable_pox() { let stacked = 100_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); let increase_by = 10_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); - let spender_2_sk = StacksPrivateKey::new(); + let spender_2_sk = StacksPrivateKey::random(); let spender_2_addr: PrincipalData = to_addr(&spender_2_sk).into(); - let spender_3_sk = StacksPrivateKey::new(); + let spender_3_sk = StacksPrivateKey::random(); let spender_3_addr: PrincipalData = to_addr(&spender_3_sk).into(); let mut initial_balances = vec![]; @@ -580,13 +580,13 @@ fn pox_2_unlock_all() { let stacked = 100_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); - let spender_2_sk = StacksPrivateKey::new(); + let spender_2_sk = StacksPrivateKey::random(); let spender_2_addr: PrincipalData = to_addr(&spender_2_sk).into(); - let spender_3_sk = StacksPrivateKey::new(); + let spender_3_sk = StacksPrivateKey::random(); let spender_3_addr: PrincipalData = to_addr(&spender_3_sk).into(); let mut initial_balances = vec![]; @@ -1268,9 +1268,9 @@ fn test_pox_reorg_one_flap() { epochs.truncate_after(StacksEpochId::Epoch22); conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::random()).collect(); - let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::random()).collect(); let balances: Vec<_> = privks .iter() @@ -1301,7 +1301,7 @@ fn test_pox_reorg_one_flap() { let mut miner_status = vec![]; for i in 0..num_miners { - let seed = StacksPrivateKey::new().to_bytes(); + let seed = StacksPrivateKey::random().to_bytes(); let (mut conf, _) = neon_integration_test_conf_with_seed(seed); conf.initial_balances.clear(); diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index c95d59f797..057669547a 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -46,7 +46,7 @@ fn trait_invocation_behavior() { let epoch_2_2 = 235; let epoch_2_3 = 241; - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let contract_addr = to_addr(&spender_sk); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index 97ae050759..ffe9572045 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -88,10 +88,10 @@ fn fix_to_pox_contract() { let stacked = 100_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); let increase_by = 10_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); - let spender_2_sk = StacksPrivateKey::new(); + let spender_2_sk = StacksPrivateKey::random(); let spender_2_addr: PrincipalData = to_addr(&spender_2_sk).into(); let mut initial_balances = vec![]; @@ -695,11 +695,11 @@ fn verify_auto_unlock_behavior() { let first_stacked_incr = 40_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); let small_stacked = 17_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let spender_stx_addr: StacksAddress = to_addr(&spender_sk); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); - let spender_2_sk = StacksPrivateKey::new(); + let spender_2_sk = StacksPrivateKey::random(); let spender_2_stx_addr: StacksAddress = to_addr(&spender_2_sk); let spender_2_addr: PrincipalData = to_addr(&spender_2_sk).into(); diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index 7c6fb7a707..3864d9c350 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -52,10 +52,10 @@ fn microblocks_disabled() { let spender_1_bal = 10_000_000 * (core::MICROSTACKS_PER_STACKS as u64); let spender_2_bal = 10_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - let spender_1_sk = StacksPrivateKey::new(); + let spender_1_sk = StacksPrivateKey::random(); let spender_1_addr: PrincipalData = to_addr(&spender_1_sk).into(); - let spender_2_sk = StacksPrivateKey::new(); + let spender_2_sk = StacksPrivateKey::random(); let spender_2_addr: PrincipalData = to_addr(&spender_2_sk).into(); let mut initial_balances = vec![]; diff --git a/testnet/stacks-node/src/tests/mempool.rs b/testnet/stacks-node/src/tests/mempool.rs index ab5989867b..fa83181529 100644 --- a/testnet/stacks-node/src/tests/mempool.rs +++ b/testnet/stacks-node/src/tests/mempool.rs @@ -61,7 +61,7 @@ pub fn make_bad_stacks_transfer( let mut tx_signer = StacksTransactionSigner::new(&unsigned_tx); - tx_signer.sign_origin(&StacksPrivateKey::new()).unwrap(); + tx_signer.sign_origin(&StacksPrivateKey::random()).unwrap(); let mut buf = vec![]; tx_signer diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index be3a4213f6..30c3cfed3b 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -855,7 +855,7 @@ pub fn next_block_and_wait_for_commits( } pub fn setup_stacker(naka_conf: &mut Config) -> Secp256k1PrivateKey { - let stacker_sk = Secp256k1PrivateKey::new(); + let stacker_sk = Secp256k1PrivateKey::random(); let stacker_address = tests::to_addr(&stacker_sk); naka_conf.add_initial_balance( PrincipalData::from(stacker_address).to_string(), @@ -1501,7 +1501,7 @@ fn simple_neon_integration() { let prom_bind = "127.0.0.1:6000".to_string(); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(5); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); // setup sender + recipient for a test stx transfer let sender_addr = tests::to_addr(&sender_sk); let send_amt = 1000; @@ -1510,7 +1510,7 @@ fn simple_neon_integration() { PrincipalData::from(sender_addr).to_string(), send_amt * 2 + send_fee, ); - let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::random(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); let mut signers = TestSigners::new(vec![sender_signer_sk]); naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); @@ -1760,7 +1760,7 @@ fn flash_blocks_on_epoch_3() { let prom_bind = "127.0.0.1:6000".to_string(); naka_conf.node.prometheus_bind = Some(prom_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); // setup sender + recipient for a test stx transfer let sender_addr = tests::to_addr(&sender_sk); let send_amt = 1000; @@ -1769,7 +1769,7 @@ fn flash_blocks_on_epoch_3() { PrincipalData::from(sender_addr).to_string(), send_amt * 2 + send_fee, ); - let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::random(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); let mut signers = TestSigners::new(vec![sender_signer_sk]); naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); @@ -2015,8 +2015,8 @@ fn mine_multiple_per_tenure_integration() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - let sender_sk = Secp256k1PrivateKey::new(); - let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); + let sender_signer_sk = Secp256k1PrivateKey::random(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); let tenure_count = 5; let inter_blocks_per_tenure = 9; @@ -2200,8 +2200,8 @@ fn multiple_miners() { let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); naka_conf.node.pox_sync_sample_secs = 30; - let sender_sk = Secp256k1PrivateKey::new(); - let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); + let sender_signer_sk = Secp256k1PrivateKey::random(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); let tenure_count = 15; let inter_blocks_per_tenure = 6; @@ -2469,7 +2469,7 @@ fn correct_burn_outs() { } let stacker_accounts = accounts[0..3].to_vec(); - let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::random(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); @@ -2787,7 +2787,7 @@ fn block_proposal_api_endpoint() { conf.connection_options.auth_token = Some(password.clone()); let account_keys = add_initial_balances(&mut conf, 10, 1_000_000); let stacker_sk = setup_stacker(&mut conf); - let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::random(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); @@ -3150,7 +3150,7 @@ fn miner_writes_proposed_block_to_stackerdb() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); // setup sender + recipient for a test stx transfer let sender_addr = tests::to_addr(&sender_sk); let send_amt = 1000; @@ -3161,7 +3161,7 @@ fn miner_writes_proposed_block_to_stackerdb() { ); let stacker_sk = setup_stacker(&mut naka_conf); - let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::random(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); @@ -3273,7 +3273,7 @@ fn vote_for_aggregate_key_burn_op() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let _http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - let signer_sk = Secp256k1PrivateKey::new(); + let signer_sk = Secp256k1PrivateKey::random(); let signer_addr = tests::to_addr(&signer_sk); let mut signers = TestSigners::new(vec![signer_sk]); @@ -3503,8 +3503,8 @@ fn follower_bootup_simple() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - let sender_sk = Secp256k1PrivateKey::new(); - let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); + let sender_signer_sk = Secp256k1PrivateKey::random(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); let mut signers = TestSigners::new(vec![sender_signer_sk]); let tenure_count = 5; @@ -3824,8 +3824,8 @@ fn follower_bootup_across_multiple_cycles() { naka_conf.node.pox_sync_sample_secs = 180; naka_conf.burnchain.max_rbf = 10_000_000; - let sender_sk = Secp256k1PrivateKey::new(); - let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); + let sender_signer_sk = Secp256k1PrivateKey::random(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); let mut signers = TestSigners::new(vec![sender_signer_sk]); let tenure_count = 5; @@ -4025,8 +4025,8 @@ fn follower_bootup_custom_chain_id() { naka_conf.burnchain.chain_id = 0x87654321; let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - let sender_sk = Secp256k1PrivateKey::new(); - let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); + let sender_signer_sk = Secp256k1PrivateKey::random(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); let mut signers = TestSigners::new(vec![sender_signer_sk]); let tenure_count = 5; @@ -4363,16 +4363,16 @@ fn burn_ops_integration_test() { let signer_sk_1 = setup_stacker(&mut naka_conf); let signer_addr_1 = tests::to_addr(&signer_sk_1); - let signer_sk_2 = Secp256k1PrivateKey::new(); + let signer_sk_2 = Secp256k1PrivateKey::random(); let signer_addr_2 = tests::to_addr(&signer_sk_2); - let stacker_sk_1 = Secp256k1PrivateKey::new(); + let stacker_sk_1 = Secp256k1PrivateKey::random(); let stacker_addr_1 = tests::to_addr(&stacker_sk_1); - let stacker_sk_2 = Secp256k1PrivateKey::new(); + let stacker_sk_2 = Secp256k1PrivateKey::random(); let stacker_addr_2 = tests::to_addr(&stacker_sk_2); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let mut sender_nonce = 0; @@ -4972,7 +4972,7 @@ fn forked_tenure_is_ignored() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(10); naka_conf.miner.block_commit_delay = Duration::from_secs(0); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); // setup sender + recipient for a test stx transfer let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; @@ -4981,7 +4981,7 @@ fn forked_tenure_is_ignored() { PrincipalData::from(sender_addr).to_string(), send_amt + send_fee, ); - let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::random(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); let mut signers = TestSigners::new(vec![sender_signer_sk]); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); @@ -5328,8 +5328,8 @@ fn check_block_heights() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - let sender_sk = Secp256k1PrivateKey::new(); - let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); + let sender_signer_sk = Secp256k1PrivateKey::random(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); let tenure_count = 5; let inter_blocks_per_tenure = 9; @@ -5755,11 +5755,11 @@ fn nakamoto_attempt_time() { naka_conf.miner.nakamoto_attempt_time_ms = nakamoto_attempt_time_ms; let stacker_sk = setup_stacker(&mut naka_conf); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); naka_conf.add_initial_balance(PrincipalData::from(sender_addr).to_string(), 1_000_000_000); - let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::random(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100_000); @@ -6072,8 +6072,8 @@ fn clarity_burn_state() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - let sender_sk = Secp256k1PrivateKey::new(); - let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); + let sender_signer_sk = Secp256k1PrivateKey::random(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); let tenure_count = 5; let inter_blocks_per_tenure = 9; @@ -6344,7 +6344,7 @@ fn signer_chainstate() { let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); // setup sender + recipient for a test stx transfer let sender_addr = tests::to_addr(&sender_sk); let send_amt = 1000; @@ -6353,7 +6353,7 @@ fn signer_chainstate() { PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * 20, ); - let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::random(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); @@ -6936,7 +6936,7 @@ fn continue_tenure_extend() { naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); naka_conf.connection_options.block_proposal_max_age_secs = u64::MAX; let http_origin = naka_conf.node.data_url.clone(); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); // setup sender + recipient for a test stx transfer let sender_addr = tests::to_addr(&sender_sk); let send_amt = 1000; @@ -6945,7 +6945,7 @@ fn continue_tenure_extend() { PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * 20, ); - let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::random(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); @@ -7431,8 +7431,8 @@ fn check_block_times() { let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); naka_conf.burnchain.chain_id = CHAIN_ID_TESTNET + 1; - let sender_sk = Secp256k1PrivateKey::new(); - let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); + let sender_signer_sk = Secp256k1PrivateKey::random(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); // setup sender + recipient for some test stx transfers @@ -7830,8 +7830,8 @@ fn check_block_info() { let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); naka_conf.miner.tenure_cost_limit_per_block_percentage = None; - let sender_sk = Secp256k1PrivateKey::new(); - let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); + let sender_signer_sk = Secp256k1PrivateKey::random(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); // setup sender + recipient for some test stx transfers @@ -8462,8 +8462,8 @@ fn check_block_info_rewards() { let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); naka_conf.burnchain.chain_id = CHAIN_ID_TESTNET + 1; - let sender_sk = Secp256k1PrivateKey::new(); - let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); + let sender_signer_sk = Secp256k1PrivateKey::random(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); // setup sender + recipient for some test stx transfers @@ -8797,8 +8797,8 @@ fn mock_mining() { naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); naka_conf.node.pox_sync_sample_secs = 30; naka_conf.miner.tenure_cost_limit_per_block_percentage = None; - let sender_sk = Secp256k1PrivateKey::new(); - let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); + let sender_signer_sk = Secp256k1PrivateKey::random(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); let mut signers = TestSigners::new(vec![sender_signer_sk]); let tenure_count = 3; @@ -9237,10 +9237,10 @@ fn v3_signer_api_endpoint() { conf.connection_options.auth_token = Some(password); conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let stacker_sk = setup_stacker(&mut conf); - let signer_sk = Secp256k1PrivateKey::new(); + let signer_sk = Secp256k1PrivateKey::random(); let signer_addr = tests::to_addr(&signer_sk); let signer_pubkey = Secp256k1PublicKey::from_private(&signer_sk); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); // setup sender + recipient for some test stx transfers // these are necessary for the interim blocks to get mined at all let sender_addr = tests::to_addr(&sender_sk); @@ -9411,9 +9411,9 @@ fn v3_blockbyheight_api_endpoint() { conf.connection_options.auth_token = Some(password); conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let stacker_sk = setup_stacker(&mut conf); - let signer_sk = Secp256k1PrivateKey::new(); + let signer_sk = Secp256k1PrivateKey::random(); let signer_addr = tests::to_addr(&signer_sk); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); // setup sender + recipient for some test stx transfers // these are necessary for the interim blocks to get mined at all let sender_addr = tests::to_addr(&sender_sk); @@ -9536,10 +9536,10 @@ fn nakamoto_lockup_events() { conf.connection_options.auth_token = Some(password); conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let stacker_sk = setup_stacker(&mut conf); - let signer_sk = Secp256k1PrivateKey::new(); + let signer_sk = Secp256k1PrivateKey::random(); let signer_addr = tests::to_addr(&signer_sk); let _signer_pubkey = Secp256k1PublicKey::from_private(&signer_sk); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); // setup sender + recipient for some test stx transfers // these are necessary for the interim blocks to get mined at all let sender_addr = tests::to_addr(&sender_sk); @@ -9725,7 +9725,7 @@ fn skip_mining_long_tx() { send_amt * 15 + send_fee * 15, ); naka_conf.add_initial_balance(PrincipalData::from(sender_2_addr).to_string(), 10000); - let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::random(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); let mut signers = TestSigners::new(vec![sender_signer_sk]); naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); @@ -10099,8 +10099,8 @@ fn sip029_coinbase_change() { naka_conf.node.pox_sync_sample_secs = 180; naka_conf.burnchain.max_rbf = 10_000_000; - let sender_sk = Secp256k1PrivateKey::new(); - let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); + let sender_signer_sk = Secp256k1PrivateKey::random(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); let mut signers = TestSigners::new(vec![sender_signer_sk]); let tenure_count = 5; @@ -10297,10 +10297,10 @@ fn clarity_cost_spend_down() { let num_signers = 30; naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let sender_sks: Vec<_> = (0..num_signers) - .map(|_| Secp256k1PrivateKey::new()) + .map(|_| Secp256k1PrivateKey::random()) .collect(); let sender_signer_sks: Vec<_> = (0..num_signers) - .map(|_| Secp256k1PrivateKey::new()) + .map(|_| Secp256k1PrivateKey::random()) .collect(); let sender_signer_addrs: Vec<_> = sender_signer_sks.iter().map(tests::to_addr).collect(); let sender_addrs: Vec<_> = sender_sks.iter().map(tests::to_addr).collect(); @@ -10589,9 +10589,9 @@ fn consensus_hash_event_dispatcher() { conf.connection_options.auth_token = Some(password.clone()); conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let stacker_sk = setup_stacker(&mut conf); - let signer_sk = Secp256k1PrivateKey::new(); + let signer_sk = Secp256k1PrivateKey::random(); let signer_addr = tests::to_addr(&signer_sk); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); // setup sender + recipient for some test stx transfers // these are necessary for the interim blocks to get mined at all let sender_addr = tests::to_addr(&sender_sk); @@ -11140,9 +11140,9 @@ fn mine_invalid_principal_from_consensus_buff() { conf.connection_options.auth_token = Some(password.clone()); conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let stacker_sk = setup_stacker(&mut conf); - let signer_sk = Secp256k1PrivateKey::new(); + let signer_sk = Secp256k1PrivateKey::random(); let signer_addr = tests::to_addr(&signer_sk); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); // setup sender + recipient for some test stx transfers // these are necessary for the interim blocks to get mined at all let sender_addr = tests::to_addr(&sender_sk); diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 9c2c71997d..2c224c8e34 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -1476,7 +1476,7 @@ fn deep_contract() { ")".repeat(stack_limit + 1) ); - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let spender_addr = to_addr(&spender_sk); let spender_princ: PrincipalData = spender_addr.into(); @@ -1656,7 +1656,7 @@ fn liquid_ustx_integration() { (ok stx-liquid-supply)) "; - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let spender_addr = to_addr(&spender_sk); let spender_princ: PrincipalData = spender_addr.into(); @@ -1987,7 +1987,7 @@ fn stx_transfer_btc_integration_test() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // let's fire off our transfer op. - let recipient_sk = StacksPrivateKey::new(); + let recipient_sk = StacksPrivateKey::random(); let recipient_addr = to_addr(&recipient_sk); let transfer_stx_op = TransferStxOp { sender: spender_stx_addr, @@ -2128,7 +2128,7 @@ fn stx_delegate_btc_integration_test() { let spender_stx_addr: StacksAddress = to_addr(&spender_sk); let spender_addr: PrincipalData = spender_stx_addr.into(); - let recipient_sk = StacksPrivateKey::new(); + let recipient_sk = StacksPrivateKey::random(); let recipient_addr = to_addr(&recipient_sk); let pox_pubkey = Secp256k1PublicKey::from_hex( "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", @@ -2383,7 +2383,7 @@ fn stack_stx_burn_op_test() { let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); let spender_stx_addr_2: StacksAddress = to_addr(&spender_sk_2); - let recipient_sk = StacksPrivateKey::new(); + let recipient_sk = StacksPrivateKey::random(); let recipient_addr = to_addr(&recipient_sk); let (mut conf, _miner_account) = neon_integration_test_conf(); @@ -4193,7 +4193,7 @@ fn filter_low_fee_tx_integration_test() { return; } - let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); + let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, _) = neon_integration_test_conf(); @@ -4292,7 +4292,7 @@ fn filter_long_runtime_tx_integration_test() { return; } - let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); + let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, _) = neon_integration_test_conf(); @@ -4379,7 +4379,7 @@ fn miner_submit_twice() { return; } - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); let contract_content = " (define-public (foo (a int)) @@ -4487,7 +4487,7 @@ fn size_check_integration_test() { giant_contract.push(' '); } - let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); + let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -4656,7 +4656,7 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { small_contract.push(' '); } - let spender_sks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); + let spender_sks: Vec<_> = (0..5).map(|_| StacksPrivateKey::random()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -4860,7 +4860,7 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { small_contract.push(' '); } - let spender_sks: Vec<_> = (0..20).map(|_| StacksPrivateKey::new()).collect(); + let spender_sks: Vec<_> = (0..20).map(|_| StacksPrivateKey::random()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -5042,7 +5042,7 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { small_contract.push(' '); } - let spender_sks: Vec<_> = (0..25).map(|_| StacksPrivateKey::new()).collect(); + let spender_sks: Vec<_> = (0..25).map(|_| StacksPrivateKey::random()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -5198,7 +5198,7 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { return; } - let spender_sks: Vec<_> = (0..4).map(|_| StacksPrivateKey::new()).collect(); + let spender_sks: Vec<_> = (0..4).map(|_| StacksPrivateKey::random()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let spender_addrs_c32: Vec = spender_sks.iter().map(to_addr).collect(); @@ -5626,7 +5626,7 @@ fn cost_voting_integration() { (ok proposal-id))) "; - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let spender_addr = to_addr(&spender_sk); let spender_princ: PrincipalData = spender_addr.into(); @@ -6221,11 +6221,11 @@ fn block_limit_hit_integration_test() { .join(" "), ); - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let addr = to_addr(&spender_sk); - let second_spender_sk = StacksPrivateKey::new(); + let second_spender_sk = StacksPrivateKey::random(); let second_spender_addr: PrincipalData = to_addr(&second_spender_sk).into(); - let third_spender_sk = StacksPrivateKey::new(); + let third_spender_sk = StacksPrivateKey::random(); let third_spender_addr: PrincipalData = to_addr(&third_spender_sk).into(); let (mut conf, _miner_account) = neon_integration_test_conf(); @@ -6432,11 +6432,11 @@ fn microblock_limit_hit_integration_test() { .join(" "), ); - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let addr = to_addr(&spender_sk); - let second_spender_sk = StacksPrivateKey::new(); + let second_spender_sk = StacksPrivateKey::random(); let second_spender_addr: PrincipalData = to_addr(&second_spender_sk).into(); - let third_spender_sk = StacksPrivateKey::new(); + let third_spender_sk = StacksPrivateKey::random(); let third_spender_addr: PrincipalData = to_addr(&third_spender_sk).into(); let (mut conf, _) = neon_integration_test_conf(); @@ -6675,7 +6675,7 @@ fn block_large_tx_integration_test() { .join(" ") ); - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let spender_addr = to_addr(&spender_sk); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -6807,7 +6807,7 @@ fn microblock_large_tx_integration_test_FLAKY() { .join(" ") ); - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let addr = to_addr(&spender_sk); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -6924,13 +6924,13 @@ fn pox_integration_test() { return; } - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); - let spender_2_sk = StacksPrivateKey::new(); + let spender_2_sk = StacksPrivateKey::random(); let spender_2_addr: PrincipalData = to_addr(&spender_2_sk).into(); - let spender_3_sk = StacksPrivateKey::new(); + let spender_3_sk = StacksPrivateKey::random(); let spender_3_addr: PrincipalData = to_addr(&spender_3_sk).into(); let pox_pubkey = Secp256k1PublicKey::from_hex( @@ -6939,7 +6939,7 @@ fn pox_integration_test() { .unwrap(); let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); - let pox_2_pubkey = Secp256k1PublicKey::from_private(&StacksPrivateKey::new()); + let pox_2_pubkey = Secp256k1PublicKey::from_private(&StacksPrivateKey::random()); let pox_2_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_2_pubkey).to_bytes()); let pox_2_address = BitcoinAddress::from_bytes_legacy( @@ -7449,7 +7449,7 @@ fn atlas_integration_test() { return; } - let user_1 = StacksPrivateKey::new(); + let user_1 = StacksPrivateKey::random(); let initial_balance_user_1 = InitialBalance { address: to_addr(&user_1).into(), amount: 1_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS), @@ -7865,7 +7865,7 @@ fn atlas_integration_test() { // executing the transactions, once mined. let namespace = "passport"; for i in 1..10 { - let user = StacksPrivateKey::new(); + let user = StacksPrivateKey::random(); let zonefile_hex = format!("facade0{i}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); let name = format!("johndoe{i}"); @@ -7968,7 +7968,7 @@ fn antientropy_integration_test() { return; } - let user_1 = StacksPrivateKey::new(); + let user_1 = StacksPrivateKey::random(); let initial_balance_user_1 = InitialBalance { address: to_addr(&user_1).into(), amount: 1_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS), @@ -8248,7 +8248,7 @@ fn atlas_stress_integration_test() { let batch_size = 20; for _i in 0..(2 * batches * batch_size + 1) { - let user = StacksPrivateKey::new(); + let user = StacksPrivateKey::random(); let initial_balance_user = InitialBalance { address: to_addr(&user).into(), amount: 1_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS), @@ -8993,7 +8993,7 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value (ok (var-get counter)))) "#; - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let spender_addr = to_addr(&spender_sk); let (mut conf, _) = neon_integration_test_conf(); @@ -11521,7 +11521,7 @@ fn test_competing_miners_build_on_same_chain( return; } - let privks: Vec<_> = (0..100).map(|_| StacksPrivateKey::new()).collect(); + let privks: Vec<_> = (0..100).map(|_| StacksPrivateKey::random()).collect(); let balances: Vec<_> = privks .iter() .map(|privk| { @@ -11538,7 +11538,7 @@ fn test_competing_miners_build_on_same_chain( let mut blocks_processed = vec![]; for _i in 0..num_miners { - let seed = StacksPrivateKey::new().to_bytes(); + let seed = StacksPrivateKey::random().to_bytes(); let (mut conf, _) = neon_integration_test_conf_with_seed(seed); conf.initial_balances.append(&mut balances.clone()); @@ -11777,7 +11777,7 @@ fn microblock_miner_multiple_attempts() { conf.burnchain.max_rbf = 1000000; conf.node.wait_time_for_blocks = 1_000; - let privks: Vec<_> = (0..100).map(|_| StacksPrivateKey::new()).collect(); + let privks: Vec<_> = (0..100).map(|_| StacksPrivateKey::random()).collect(); let balances: Vec<_> = privks .iter() .map(|privk| { @@ -11859,7 +11859,7 @@ fn min_txs() { return; } - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let spender_addr = to_addr(&spender_sk); let spender_princ: PrincipalData = spender_addr.into(); @@ -11962,7 +11962,7 @@ fn filter_txs_by_type() { return; } - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let spender_addr = to_addr(&spender_sk); let spender_princ: PrincipalData = spender_addr.into(); @@ -12075,7 +12075,7 @@ fn filter_txs_by_origin() { return; } - let spender_sk = StacksPrivateKey::new(); + let spender_sk = StacksPrivateKey::random(); let spender_addr = to_addr(&spender_sk); let spender_princ: PrincipalData = spender_addr.into(); diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index af33cf6841..6ef2431a3a 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -147,7 +147,11 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest BlockResponse { let mut stackerdb = StackerDB::new( &self.running_nodes.conf.node.rpc_bind, - StacksPrivateKey::new(), // We are just reading so don't care what the key is + StacksPrivateKey::random(), // We are just reading so don't care what the key is false, self.get_current_reward_cycle(), SignerSlotID(0), // We are just reading so again, don't care about index. diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 6869b598d7..f9050644dc 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -825,7 +825,7 @@ fn reloads_signer_set_in() { .init(); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -941,7 +941,7 @@ fn forked_tenure_testing( .init(); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -1279,7 +1279,7 @@ fn bitcoind_forking_test() { } let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -1511,7 +1511,7 @@ fn multiple_miners() { } let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -1784,7 +1784,7 @@ fn miner_forking() { } let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -2248,7 +2248,7 @@ fn end_of_tenure() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -2396,7 +2396,7 @@ fn retry_on_rejection() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -2528,7 +2528,7 @@ fn signers_broadcast_signed_blocks() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -2623,7 +2623,7 @@ fn tenure_extend_after_idle_signers() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -2675,7 +2675,7 @@ fn tenure_extend_after_idle_miner() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -2751,7 +2751,7 @@ fn tenure_extend_succeeds_after_rejected_attempt() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -2841,7 +2841,7 @@ fn stx_transfers_dont_effect_idle_timeout() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -2964,9 +2964,9 @@ fn idle_tenure_extend_active_mining() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); - let deployer_sk = Secp256k1PrivateKey::new(); + let deployer_sk = Secp256k1PrivateKey::random(); let deployer_addr = tests::to_addr(&deployer_sk); let send_amt = 100; let send_fee = 180; @@ -3224,7 +3224,7 @@ fn empty_sortition() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -3332,7 +3332,7 @@ fn empty_sortition() { let reward_cycle = signer_test.get_current_reward_cycle(); let mut stackerdb = StackerDB::new( &signer_test.running_nodes.conf.node.rpc_bind, - StacksPrivateKey::new(), // We are just reading so don't care what the key is + StacksPrivateKey::random(), // We are just reading so don't care what the key is false, reward_cycle, SignerSlotID(0), // We are just reading so again, don't care about index. @@ -3408,7 +3408,7 @@ fn empty_sortition_before_approval() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -3563,7 +3563,7 @@ fn empty_sortition_before_proposal() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -3715,7 +3715,7 @@ fn mock_sign_epoch_25() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -3839,7 +3839,7 @@ fn multiple_miners_mock_sign_epoch_25() { } let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -4046,14 +4046,14 @@ fn signer_set_rollover() { let new_num_signers = 4; let new_signer_private_keys: Vec<_> = (0..new_num_signers) - .map(|_| StacksPrivateKey::new()) + .map(|_| StacksPrivateKey::random()) .collect(); let new_signer_public_keys: Vec<_> = new_signer_private_keys .iter() .map(|sk| Secp256k1PublicKey::from_private(sk).to_bytes_compressed()) .collect(); let new_signer_addresses: Vec<_> = new_signer_private_keys.iter().map(tests::to_addr).collect(); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -4342,7 +4342,7 @@ fn min_gap_between_blocks() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -4463,7 +4463,7 @@ fn duplicate_signers() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; let mut signer_stacks_private_keys = (0..num_signers) - .map(|_| StacksPrivateKey::new()) + .map(|_| StacksPrivateKey::random()) .collect::>(); // First two signers have same private key @@ -4558,7 +4558,7 @@ fn multiple_miners_with_nakamoto_blocks() { let inter_blocks_per_tenure = 5; // setup sender + recipient for a test stx transfer - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 1000; let send_fee = 180; @@ -4826,7 +4826,7 @@ fn partial_tenure_fork() { let inter_blocks_per_tenure = 5; // setup sender + recipient for a test stx transfer - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 1000; let send_fee = 180; @@ -5306,7 +5306,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -5487,7 +5487,7 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -5705,7 +5705,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -5925,7 +5925,7 @@ fn reorg_locally_accepted_blocks_across_tenures_fails() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -6141,7 +6141,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -6460,7 +6460,7 @@ fn continue_after_fast_block_no_sortition() { let num_signers = 5; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -7006,7 +7006,7 @@ fn continue_after_tenure_extend() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let send_amt = 100; @@ -7238,7 +7238,7 @@ fn multiple_miners_with_custom_chain_id() { let inter_blocks_per_tenure = 5; // setup sender + recipient for a test stx transfer - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 1000; let send_fee = 180; @@ -7639,7 +7639,7 @@ fn block_validation_response_timeout() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; let timeout = Duration::from_secs(30); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -7814,7 +7814,7 @@ fn block_validation_pending_table() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; let timeout = Duration::from_secs(30); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -7974,7 +7974,7 @@ fn new_tenure_while_validating_previous_scenario() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; let timeout = Duration::from_secs(30); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -8141,7 +8141,7 @@ fn tenure_extend_after_failed_miner() { let num_signers = 5; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -8503,7 +8503,7 @@ fn tenure_extend_after_bad_commit() { let num_signers = 5; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -8985,7 +8985,7 @@ fn tenure_extend_after_2_bad_commits() { let num_signers = 5; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -9676,7 +9676,7 @@ fn global_acceptance_depends_on_block_announcement() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -9931,7 +9931,7 @@ fn no_reorg_due_to_successive_block_validation_ok() { let num_signers = 5; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -10464,7 +10464,7 @@ fn incoming_signers_ignore_block_proposals() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -10526,7 +10526,7 @@ fn incoming_signers_ignore_block_proposals() { let blocks_before = mined_blocks.load(Ordering::SeqCst); let mut stackerdb = StackerDB::new( &signer_test.running_nodes.conf.node.rpc_bind, - StacksPrivateKey::new(), // We are just reading so don't care what the key is + StacksPrivateKey::random(), // We are just reading so don't care what the key is false, next_reward_cycle, SignerSlotID(0), // We are just reading so again, don't care about index. @@ -10636,7 +10636,7 @@ fn outgoing_signers_ignore_block_proposals() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -10701,7 +10701,7 @@ fn outgoing_signers_ignore_block_proposals() { let blocks_before = mined_blocks.load(Ordering::SeqCst); let mut stackerdb = StackerDB::new( &signer_test.running_nodes.conf.node.rpc_bind, - StacksPrivateKey::new(), // We are just reading so don't care what the key is + StacksPrivateKey::random(), // We are just reading so don't care what the key is false, old_reward_cycle, SignerSlotID(0), // We are just reading so again, don't care about index. @@ -10808,8 +10808,10 @@ fn injected_signatures_are_ignored_across_boundaries() { info!("------------------------- Test Setup -------------------------"); let num_signers = 4; let new_num_signers = 5_usize; - let signer_private_keys: Vec<_> = (0..num_signers).map(|_| StacksPrivateKey::new()).collect(); - let new_signer_private_key = StacksPrivateKey::new(); + let signer_private_keys: Vec<_> = (0..num_signers) + .map(|_| StacksPrivateKey::random()) + .collect(); + let new_signer_private_key = StacksPrivateKey::random(); let mut new_signer_private_keys = signer_private_keys.clone(); new_signer_private_keys.push(new_signer_private_key); @@ -10818,7 +10820,7 @@ fn injected_signatures_are_ignored_across_boundaries() { .map(|sk| Secp256k1PublicKey::from_private(sk).to_bytes_compressed()) .collect(); let new_signer_addresses: Vec<_> = new_signer_private_keys.iter().map(tests::to_addr).collect(); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -11211,7 +11213,7 @@ fn rejected_blocks_count_towards_miner_validity() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; @@ -11372,7 +11374,7 @@ fn fast_sortition() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let mut sender_nonce = 0; @@ -11455,7 +11457,7 @@ fn multiple_miners_empty_sortition() { return; } let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_fee = 180; @@ -11729,7 +11731,7 @@ fn single_miner_empty_sortition() { return; } let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_fee = 180; @@ -12014,7 +12016,7 @@ fn allow_reorg_within_first_proposal_burn_block_timing_secs() { let num_signers = 5; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let mut sender_nonce = 0; let send_amt = 100; From 0c5c2c52f23ba2f3a43990ca49e06816f6fc2027 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 24 Jan 2025 12:43:54 -0500 Subject: [PATCH 15/16] chore: Apply Clippy lint `match_like_matches_macro` --- stackslib/src/burnchains/bitcoin/address.rs | 18 +--- stackslib/src/burnchains/mod.rs | 8 +- .../burn/operations/delegate_stx.rs | 20 +---- .../burn/operations/leader_block_commit.rs | 6 +- stackslib/src/chainstate/stacks/auth.rs | 10 +-- stackslib/src/chainstate/stacks/index/node.rs | 25 ++---- stackslib/src/chainstate/stacks/miner.rs | 10 +-- stackslib/src/chainstate/stacks/mod.rs | 10 +-- .../stacks/tests/block_construction.rs | 10 +-- .../src/chainstate/stacks/transaction.rs | 13 ++- stackslib/src/clarity_cli.rs | 90 ++++--------------- stackslib/src/net/atlas/download.rs | 14 +-- stackslib/src/net/codec.rs | 7 +- stackslib/src/net/http/common.rs | 6 +- stackslib/src/net/tests/inv/nakamoto.rs | 10 +-- 15 files changed, 59 insertions(+), 198 deletions(-) diff --git a/stackslib/src/burnchains/bitcoin/address.rs b/stackslib/src/burnchains/bitcoin/address.rs index 4cbc1ce80d..ae63aa98c3 100644 --- a/stackslib/src/burnchains/bitcoin/address.rs +++ b/stackslib/src/burnchains/bitcoin/address.rs @@ -396,27 +396,15 @@ impl SegwitBitcoinAddress { } pub fn is_p2wpkh(&self) -> bool { - if let SegwitBitcoinAddress::P2WPKH(..) = self { - true - } else { - false - } + matches!(self, SegwitBitcoinAddress::P2WPKH(..)) } pub fn is_p2wsh(&self) -> bool { - if let SegwitBitcoinAddress::P2WSH(..) = self { - true - } else { - false - } + matches!(self, SegwitBitcoinAddress::P2WSH(..)) } pub fn is_p2tr(&self) -> bool { - if let SegwitBitcoinAddress::P2TR(..) = self { - true - } else { - false - } + matches!(self, SegwitBitcoinAddress::P2TR(..)) } } diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index 0ec69454c4..b1d4a103ce 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -150,10 +150,10 @@ impl BurnchainParameters { } pub fn is_testnet(network_id: u32) -> bool { - match network_id { - BITCOIN_NETWORK_ID_TESTNET | BITCOIN_NETWORK_ID_REGTEST => true, - _ => false, - } + matches!( + network_id, + BITCOIN_NETWORK_ID_TESTNET | BITCOIN_NETWORK_ID_REGTEST + ) } } diff --git a/stackslib/src/chainstate/burn/operations/delegate_stx.rs b/stackslib/src/chainstate/burn/operations/delegate_stx.rs index dd9badba22..93c254cca3 100644 --- a/stackslib/src/chainstate/burn/operations/delegate_stx.rs +++ b/stackslib/src/chainstate/burn/operations/delegate_stx.rs @@ -457,10 +457,7 @@ mod tests { &sender, ) .unwrap_err(); - assert!(match err { - op_error::ParseError => true, - _ => false, - }); + assert!(matches!(err, op_error::ParseError)); // Data is length 17. The 16th byte is set to 1, which signals that until_burn_height // is Some(u64), so the deserialize function expects another 8 bytes @@ -496,10 +493,7 @@ mod tests { &sender, ) .unwrap_err(); - assert!(match err { - op_error::ParseError => true, - _ => false, - }); + assert!(matches!(err, op_error::ParseError)); } // This test sets the op code to the op code of the StackStx @@ -540,10 +534,7 @@ mod tests { ) .unwrap_err(); - assert!(match err { - op_error::InvalidInput => true, - _ => false, - }); + assert!(matches!(err, op_error::InvalidInput)); } // This test constructs a tx with zero outputs, which causes @@ -576,10 +567,7 @@ mod tests { ) .unwrap_err(); - assert!(match err { - op_error::InvalidInput => true, - _ => false, - }); + assert!(matches!(err, op_error::InvalidInput)); } // Parse a normal DelegateStx op in which the reward_addr is set to output index 2. diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index f996fd295a..33f8dd3af0 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -1280,11 +1280,7 @@ mod tests { ) .unwrap_err(); - assert!(if let op_error::BlockCommitBadOutputs = err { - true - } else { - false - }); + assert!(matches!(err, op_error::BlockCommitBadOutputs)); // should succeed in epoch 2.1 -- can be PoX in 2.1 let _op = LeaderBlockCommitOp::parse_from_tx( diff --git a/stackslib/src/chainstate/stacks/auth.rs b/stackslib/src/chainstate/stacks/auth.rs index a10925b5a4..386902b1d1 100644 --- a/stackslib/src/chainstate/stacks/auth.rs +++ b/stackslib/src/chainstate/stacks/auth.rs @@ -1256,17 +1256,11 @@ impl TransactionAuth { } pub fn is_standard(&self) -> bool { - match *self { - TransactionAuth::Standard(_) => true, - _ => false, - } + matches!(self, TransactionAuth::Standard(_)) } pub fn is_sponsored(&self) -> bool { - match *self { - TransactionAuth::Sponsored(_, _) => true, - _ => false, - } + matches!(self, TransactionAuth::Sponsored(..)) } /// When beginning to sign a sponsored transaction, the origin account will not commit to any diff --git a/stackslib/src/chainstate/stacks/index/node.rs b/stackslib/src/chainstate/stacks/index/node.rs index 54480b43bd..2f577f0cb0 100644 --- a/stackslib/src/chainstate/stacks/index/node.rs +++ b/stackslib/src/chainstate/stacks/index/node.rs @@ -1240,38 +1240,23 @@ macro_rules! with_node { impl TrieNodeType { pub fn is_leaf(&self) -> bool { - match self { - TrieNodeType::Leaf(_) => true, - _ => false, - } + matches!(self, TrieNodeType::Leaf(_)) } pub fn is_node4(&self) -> bool { - match self { - TrieNodeType::Node4(_) => true, - _ => false, - } + matches!(self, TrieNodeType::Node4(_)) } pub fn is_node16(&self) -> bool { - match self { - TrieNodeType::Node16(_) => true, - _ => false, - } + matches!(self, TrieNodeType::Node16(_)) } pub fn is_node48(&self) -> bool { - match self { - TrieNodeType::Node48(_) => true, - _ => false, - } + matches!(self, TrieNodeType::Node48(_)) } pub fn is_node256(&self) -> bool { - match self { - TrieNodeType::Node256(_) => true, - _ => false, - } + matches!(self, TrieNodeType::Node256(_)) } pub fn id(&self) -> u8 { diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 3c3211672d..49789bb8eb 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -551,10 +551,7 @@ impl TransactionResult { /// Returns true iff this enum is backed by `TransactionSuccess`. pub fn is_ok(&self) -> bool { - match &self { - TransactionResult::Success(_) => true, - _ => false, - } + matches!(self, TransactionResult::Success(_)) } /// Returns a TransactionSuccess result as a pair of 1) fee and 2) receipt. @@ -568,10 +565,7 @@ impl TransactionResult { /// Returns true iff this enum is backed by `Error`. pub fn is_err(&self) -> bool { - match &self { - TransactionResult::ProcessingError(_) => true, - _ => false, - } + matches!(self, TransactionResult::ProcessingError(_)) } /// Returns an Error result as an Error. diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index dd4191a578..24434d4e95 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -461,17 +461,11 @@ pub enum TransactionAuthField { impl TransactionAuthField { pub fn is_public_key(&self) -> bool { - match *self { - TransactionAuthField::PublicKey(_) => true, - _ => false, - } + matches!(self, TransactionAuthField::PublicKey(_)) } pub fn is_signature(&self) -> bool { - match *self { - TransactionAuthField::Signature(_, _) => true, - _ => false, - } + matches!(self, TransactionAuthField::Signature(..)) } pub fn as_public_key(&self) -> Option { diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 5e368054fa..6829b7860d 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -1277,20 +1277,14 @@ fn test_build_anchored_blocks_incrementing_nonces() { // because the tx fee for each transaction increases with the nonce for (i, tx) in stacks_block.txs.iter().enumerate() { if i == 0 { - let okay = if let TransactionPayload::Coinbase(..) = tx.payload { - true - } else { - false - }; + let okay = matches!(tx.payload, TransactionPayload::Coinbase(..)); assert!(okay, "Coinbase should be first tx"); } else { let expected_nonce = (i - 1) % 25; assert_eq!( tx.get_origin_nonce(), expected_nonce as u64, - "{}th transaction should have nonce = {}", - i, - expected_nonce + "{i}th transaction should have nonce = {expected_nonce}", ); } } diff --git a/stackslib/src/chainstate/stacks/transaction.rs b/stackslib/src/chainstate/stacks/transaction.rs index d778ea0068..3c7a1c3385 100644 --- a/stackslib/src/chainstate/stacks/transaction.rs +++ b/stackslib/src/chainstate/stacks/transaction.rs @@ -1030,10 +1030,7 @@ impl StacksTransaction { /// Is this a mainnet transaction? false means 'testnet' pub fn is_mainnet(&self) -> bool { - match self.version { - TransactionVersion::Mainnet => true, - _ => false, - } + self.version == TransactionVersion::Mainnet } /// Is this a phantom transaction? @@ -3993,10 +3990,10 @@ mod test { TransactionAuth::Standard(origin) => origin, TransactionAuth::Sponsored(_, sponsor) => sponsor, }; - match spending_condition { - TransactionSpendingCondition::OrderIndependentMultisig(..) => true, - _ => false, - } + matches!( + spending_condition, + TransactionSpendingCondition::OrderIndependentMultisig(..) + ) } fn check_oversign_origin_multisig(signed_tx: &StacksTransaction) { diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index 45bc67efa9..32ef034098 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -1001,11 +1001,7 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option { let mut argv = args.to_vec(); - let mainnet = if let Ok(Some(_)) = consume_arg(&mut argv, &["--testnet"], false) { - false - } else { - true - }; + let mainnet = !matches!(consume_arg(&mut argv, &["--testnet"], false), Ok(Some(_))); let (db_name, allocations) = if argv.len() == 3 { let filename = &argv[1]; @@ -1147,11 +1143,7 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option (i32, Option { let mut argv = args.to_vec(); - let mainnet = if let Ok(Some(_)) = consume_arg(&mut argv, &["--testnet"], false) { - false - } else { - true - }; + let mainnet = !matches!(consume_arg(&mut argv, &["--testnet"], false), Ok(Some(_))); let mut marf = MemoryBackingStore::new(); let mut vm_env = OwnedEnvironment::new_free( mainnet, @@ -1384,11 +1372,7 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option { let mut argv = args.to_vec(); - let costs = if let Ok(Some(_)) = consume_arg(&mut argv, &["--costs"], false) { - true - } else { - false - }; + let costs = matches!(consume_arg(&mut argv, &["--costs"], false), Ok(Some(_))); let evalInput = get_eval_input(invoked_by, &argv); let vm_filename = if argv.len() == 3 { &argv[2] } else { &argv[3] }; @@ -1447,16 +1431,8 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option { let mut argv = args.to_vec(); - let costs = if let Ok(Some(_)) = consume_arg(&mut argv, &["--costs"], false) { - true - } else { - false - }; - let coverage_folder = if let Ok(covarg) = consume_arg(&mut argv, &["--c"], true) { - covarg - } else { - None - }; + let costs = matches!(consume_arg(&mut argv, &["--costs"], false), Ok(Some(_))); + let coverage_folder = consume_arg(&mut argv, &["--c"], true).unwrap_or(None); let evalInput = get_eval_input(invoked_by, &argv); let vm_filename = if argv.len() == 3 { &argv[2] } else { &argv[3] }; @@ -1529,11 +1505,7 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option { let mut argv = args.to_vec(); - let costs = if let Ok(Some(_)) = consume_arg(&mut argv, &["--costs"], false) { - true - } else { - false - }; + let costs = matches!(consume_arg(&mut argv, &["--costs"], false), Ok(Some(_))); if argv.len() != 4 { eprintln!( @@ -1610,27 +1582,15 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option { let mut argv = args.to_vec(); - let coverage_folder = if let Ok(covarg) = consume_arg(&mut argv, &["--c"], true) { - covarg - } else { - None - }; - let costs = if let Ok(Some(_)) = consume_arg(&mut argv, &["--costs"], false) { - true - } else { - false - }; - let assets = if let Ok(Some(_)) = consume_arg(&mut argv, &["--assets"], false) { - true - } else { - false - }; - let output_analysis = - if let Ok(Some(_)) = consume_arg(&mut argv, &["--output_analysis"], false) { - true - } else { - false - }; + let coverage_folder = consume_arg(&mut argv, &["--c"], true).unwrap_or(None); + + let costs = matches!(consume_arg(&mut argv, &["--costs"], false), Ok(Some(_))); + let assets = matches!(consume_arg(&mut argv, &["--assets"], false), Ok(Some(_))); + let output_analysis = matches!( + consume_arg(&mut argv, &["--output_analysis"], false), + Ok(Some(_)) + ); + if argv.len() < 4 { eprintln!( "Usage: {} {} [--costs] [--assets] [--output_analysis] [contract-identifier] [contract-definition.clar] [vm-state.db]", @@ -1765,22 +1725,10 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option { let mut argv = args.to_vec(); - let coverage_folder = if let Ok(covarg) = consume_arg(&mut argv, &["--c"], true) { - covarg - } else { - None - }; + let coverage_folder = consume_arg(&mut argv, &["--c"], true).unwrap_or(None); - let costs = if let Ok(Some(_)) = consume_arg(&mut argv, &["--costs"], false) { - true - } else { - false - }; - let assets = if let Ok(Some(_)) = consume_arg(&mut argv, &["--assets"], false) { - true - } else { - false - }; + let costs = matches!(consume_arg(&mut argv, &["--costs"], false), Ok(Some(_))); + let assets = matches!(consume_arg(&mut argv, &["--assets"], false), Ok(Some(_))); if argv.len() < 5 { eprintln!("Usage: {} {} [--costs] [--assets] [vm-state.db] [contract-identifier] [public-function-name] [sender-address] [args...]", invoked_by, argv[0]); diff --git a/stackslib/src/net/atlas/download.rs b/stackslib/src/net/atlas/download.rs index 7282ef0f5a..77f414dcb0 100644 --- a/stackslib/src/net/atlas/download.rs +++ b/stackslib/src/net/atlas/download.rs @@ -442,16 +442,10 @@ impl AttachmentsBatchStateContext { .iter() .position(|page| page.index == page_index); - let has_attachment = match index { - Some(index) => match response.pages[index] - .inventory - .get(position_in_page as usize) - { - Some(result) if *result == 1 => true, - _ => false, - }, - None => false, - }; + let has_attachment = index + .and_then(|i| response.pages[i].inventory.get(position_in_page as usize)) + .map(|result| *result == 1) + .unwrap_or(false); if !has_attachment { debug!( diff --git a/stackslib/src/net/codec.rs b/stackslib/src/net/codec.rs index f431ff91ab..c2ff06206a 100644 --- a/stackslib/src/net/codec.rs +++ b/stackslib/src/net/codec.rs @@ -1641,13 +1641,10 @@ pub mod test { fn check_deserialize(r: Result) -> bool { match r { Ok(m) => { - test_debug!("deserialized {:?}", &m); + test_debug!("deserialized {m:?}"); false } - Err(e) => match e { - codec_error::DeserializeError(_) => true, - _ => false, - }, + Err(e) => matches!(e, codec_error::DeserializeError(_)), } } diff --git a/stackslib/src/net/http/common.rs b/stackslib/src/net/http/common.rs index ced3d9a52c..b289c18ae7 100644 --- a/stackslib/src/net/http/common.rs +++ b/stackslib/src/net/http/common.rs @@ -46,11 +46,7 @@ pub enum HttpReservedHeader { impl HttpReservedHeader { pub fn is_reserved(header: &str) -> bool { - let hdr = header.to_string(); - match hdr.as_str() { - "content-length" | "content-type" | "host" => true, - _ => false, - } + matches!(header, "content-length" | "content-type" | "host") } pub fn try_from_str(header: &str, value: &str) -> Option { diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index b8a4987100..0c383a6518 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -126,16 +126,12 @@ pub fn peer_get_nakamoto_invs<'a>( loop { // read back the message let msg: StacksMessage = read_next(&mut tcp_socket).unwrap(); - let is_inv_reply = if let StacksMessageType::NakamotoInv(..) = &msg.payload { - true - } else { - false - }; - if is_inv_reply { + + if matches!(&msg.payload, StacksMessageType::NakamotoInv(..)) { replies.push(msg.payload); break; } else { - debug!("Got spurious meessage {:?}", &msg); + debug!("Got spurious meessage {msg:?}"); } } } From 14157325d6a6741d4ea1bec7c13c80b6ba34c359 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 24 Jan 2025 21:00:09 -0500 Subject: [PATCH 16/16] fix: `cargo clippy-stacks` errors --- clarity/src/libclarity.rs | 3 +-- clarity/src/vm/contexts.rs | 4 ++-- clarity/src/vm/test_util/mod.rs | 2 +- clarity/src/vm/tests/simple_apply_eval.rs | 10 ++++------ clarity/src/vm/types/mod.rs | 5 ++--- 5 files changed, 10 insertions(+), 14 deletions(-) diff --git a/clarity/src/libclarity.rs b/clarity/src/libclarity.rs index 9f1a0a06ba..7ce2a4f903 100644 --- a/clarity/src/libclarity.rs +++ b/clarity/src/libclarity.rs @@ -60,8 +60,7 @@ pub mod boot_util { pub fn boot_code_id(name: &str, mainnet: bool) -> QualifiedContractIdentifier { let addr = boot_code_addr(mainnet); QualifiedContractIdentifier::new( - addr.try_into() - .expect("FATAL: boot contract addr is not a legal principal"), + addr.into(), ContractName::try_from(name.to_string()) .expect("FATAL: boot contract name is not a legal ContractName"), ) diff --git a/clarity/src/vm/contexts.rs b/clarity/src/vm/contexts.rs index c716538f6d..a9779e96e6 100644 --- a/clarity/src/vm/contexts.rs +++ b/clarity/src/vm/contexts.rs @@ -2140,8 +2140,8 @@ mod test { // not simply rollback the tx and squelch the error as includable. let e = env .stx_transfer( - &PrincipalData::try_from(u1).unwrap(), - &PrincipalData::try_from(u2).unwrap(), + &PrincipalData::from(u1), + &PrincipalData::from(u2), 1000, &BuffData::empty(), ) diff --git a/clarity/src/vm/test_util/mod.rs b/clarity/src/vm/test_util/mod.rs index 07e557119c..37a40182eb 100644 --- a/clarity/src/vm/test_util/mod.rs +++ b/clarity/src/vm/test_util/mod.rs @@ -108,7 +108,7 @@ impl From<&StacksPrivateKey> for StandardPrincipalData { &vec![StacksPublicKey::from_private(o)], ) .unwrap(); - StandardPrincipalData::try_from(stacks_addr).unwrap() + StandardPrincipalData::from(stacks_addr) } } diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index 861cf60224..ceeb7f9ddb 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -430,7 +430,7 @@ fn test_secp256k1() { ) .unwrap(); eprintln!("addr from privk {:?}", &addr); - let principal = addr.try_into().unwrap(); + let principal = addr.into(); if let PrincipalData::Standard(data) = principal { eprintln!("test_secp256k1 principal {:?}", data.to_address()); } @@ -446,7 +446,7 @@ fn test_secp256k1() { ) .unwrap(); eprintln!("addr from hex {:?}", addr); - let principal: PrincipalData = addr.try_into().unwrap(); + let principal: PrincipalData = addr.into(); if let PrincipalData::Standard(data) = principal.clone() { eprintln!("test_secp256k1 principal {:?}", data.to_address()); } @@ -491,8 +491,7 @@ fn test_principal_of_fix() { .unwrap()], ) .unwrap() - .try_into() - .unwrap(); + .into(); let testnet_principal: PrincipalData = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -503,8 +502,7 @@ fn test_principal_of_fix() { .unwrap()], ) .unwrap() - .try_into() - .unwrap(); + .into(); // Clarity2, mainnet, should have a mainnet principal. assert_eq!( diff --git a/clarity/src/vm/types/mod.rs b/clarity/src/vm/types/mod.rs index 804d5f2eb1..d34a9cdf70 100644 --- a/clarity/src/vm/types/mod.rs +++ b/clarity/src/vm/types/mod.rs @@ -1531,9 +1531,8 @@ impl From for StacksAddress { fn from(o: StandardPrincipalData) -> StacksAddress { // should be infallible because it's impossible to construct a StandardPrincipalData with // an unsupported version byte - StacksAddress::new(o.version(), hash::Hash160(o.1)).unwrap_or_else(|_| { - panic!("FATAL: could not convert a StandardPrincipalData to StacksAddress") - }) + StacksAddress::new(o.version(), hash::Hash160(o.1)) + .expect("FATAL: could not convert a StandardPrincipalData to StacksAddress") } }