diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000..94621b33d3 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,16 @@ +# These owners will be the default owners for everything in +# the repo. Unless a later match takes precedence, +# +# require both blockchain-team-codeowners and blockchain-team to review all PR's not specifically matched below +* @stacks-network/blockchain-team-codeowners @stacks-network/blockchain-team + + +# Signer code +# require both blockchain-team-codeowners and blockchain-team-signer to review PR's for the signer folder(s) +libsigner/**/*.rs @stacks-network/blockchain-team-codeowners @stacks-network/blockchain-team-signer +stacks-signer/**/*.rs @stacks-network/blockchain-team-codeowners @stacks-network/blockchain-team-signer + +# CI workflows +# require both blockchain-team and blockchain-team-ci teams to review PR's modifying CI workflows +/.github/workflows/ @stacks-network/blockchain-team @stacks-network/blockchain-team-ci +/.github/actions/ @stacks-network/blockchain-team @stacks-network/blockchain-team-ci diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6446197e42..cac4627619 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -94,6 +94,7 @@ jobs: needs: - rustfmt - check-release + secrets: inherit uses: ./.github/workflows/github-release.yml with: node_tag: ${{ needs.check-release.outputs.node_tag }} diff --git a/.github/workflows/clarity-js-sdk-pr.yml b/.github/workflows/clarity-js-sdk-pr.yml deleted file mode 100644 index e369f8a583..0000000000 --- a/.github/workflows/clarity-js-sdk-pr.yml +++ /dev/null @@ -1,65 +0,0 @@ -## -## Auto-opens a PR on the clarity-js-sdk repo to update the binary reference when a new release is published. -## - -name: Open Clarity JS SDK PR - -defaults: - run: - shell: bash - -env: - CLARITY_JS_SDK_REPOSITORY: stacks-network/clarity-js-sdk - COMMIT_USER: Hiro DevOps - COMMIT_EMAIL: 45208873+blockstack-devops@users.noreply.github.com -on: - release: - types: - - released - workflow_dispatch: - -jobs: - run: - name: Open Clarity JS SDK PR - runs-on: ubuntu-latest - # This condition can be removed once the main `stacks-core` workflow creates pre-releases - # when appropriate, instead of full releases for every tag passed in. - if: "!contains(github.ref, '-rc')" - steps: - - name: Checkout latest clarity js sdk - id: git_checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - token: ${{ secrets.GH_TOKEN }} - repository: ${{ env.CLARITY_JS_SDK_REPOSITORY }} - ref: master - - - name: Determine Release Version - id: get_release_version - run: | - RELEASE_VERSION=$(echo ${GITHUB_REF#refs/*/} | tr / -) - echo "RELEASE_VERSION=$RELEASE_VERSION" >> $GITHUB_ENV - - - name: Update SDK Tag - id: update_sdk_tag - run: sed -i "s@CORE_SDK_TAG = \".*\"@CORE_SDK_TAG = \"$RELEASE_VERSION\"@g" packages/clarity-native-bin/src/index.ts - - - name: Create Pull Request - id: create_pr - uses: peter-evans/create-pull-request@67ccf781d68cd99b580ae25a5c18a1cc84ffff1f # v7.0.6 - with: - token: ${{ secrets.GH_TOKEN }} - commit-message: "chore: update clarity-native-bin tag" - committer: ${{ env.COMMIT_USER }} <${{ env.COMMIT_EMAIL }}> - author: ${{ env.COMMIT_USER }} <${{ env.COMMIT_EMAIL }}> - branch: auto/update-bin-tag - delete-branch: true - title: "clarity-native-bin tag update: ${{ env.RELEASE_VERSION }}" - labels: | - dependencies - body: | - :robot: This is an automated pull request created from a new release in [stacks-core](https://github.com/stacks-network/stacks-core/releases). - - Updates the clarity-native-bin tag. - assignees: zone117x - reviewers: zone117x diff --git a/.github/workflows/github-release.yml b/.github/workflows/github-release.yml index 6474ad2c07..ef0620c4c7 100644 --- a/.github/workflows/github-release.yml +++ b/.github/workflows/github-release.yml @@ -38,9 +38,9 @@ concurrency: run-name: ${{ inputs.node_tag || inputs.signer_tag }} jobs: - ## This job's sole purpose is trigger a secondary approval outside of the matrix jobs below. + ## This job's sole purpose is trigger a secondary approval outside of the matrix jobs below. ## - If this job isn't approved to run, then the subsequent jobs will also not run - for this reason, we always exit 0 - ## - `andon-cord` requires the repo environment "Build Release", which will trigger a secondary approval step before running this workflow. + ## - `andon-cord` requires the repo environment "Build Release", which will trigger a secondary approval step before running this workflow. andon-cord: if: | inputs.node_tag != '' || @@ -65,6 +65,9 @@ jobs: runs-on: ubuntu-latest needs: - andon-cord + permissions: + id-token: write + attestations: write strategy: ## Run a maximum of 10 builds concurrently, using the matrix defined in inputs.arch max-parallel: 10 @@ -135,6 +138,10 @@ jobs: inputs.signer_tag != '' name: Docker Image (Binary) runs-on: ubuntu-latest + environment: "Push to Docker" + permissions: + id-token: write + attestations: write needs: - andon-cord - build-binaries @@ -164,7 +171,6 @@ jobs: ## Create the downstream PR for the release branch to master,develop create-pr: if: | - !contains(github.ref, '-rc') && ( inputs.node_tag != '' || inputs.signer_tag != '' diff --git a/.github/workflows/image-build-source.yml b/.github/workflows/image-build-source.yml index 45f04b93c4..2535b812e2 100644 --- a/.github/workflows/image-build-source.yml +++ b/.github/workflows/image-build-source.yml @@ -23,6 +23,9 @@ jobs: runs-on: ubuntu-latest ## Requires the repo environment "Push to Docker", which will trigger a secondary approval step before running this workflow. environment: "Push to Docker" + permissions: + id-token: write + attestations: write steps: ## Increase swapfile - name: Increase swapfile @@ -78,3 +81,22 @@ jobs: GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }} TARGET_CPU=x86-64-v3 push: ${{ env.DOCKER_PUSH }} + + ## Generate docker image attestation(s) + - name: Generate artifact attestation (${{ github.event.repository.name }}) + id: attest_primary + uses: actions/attest-build-provenance@c074443f1aee8d4aeeae555aebba3282517141b2 # v2.2.3 + with: + subject-name: | + index.docker.io/${{env.docker-org}}/${{ github.event.repository.name }} + subject-digest: ${{ steps.docker_build.outputs.digest }} + push-to-registry: true + + - name: Generate artifact attestation (stacks-blockchain) + id: attest_secondary + uses: actions/attest-build-provenance@c074443f1aee8d4aeeae555aebba3282517141b2 # v2.2.3 + with: + subject-name: | + index.docker.io/${{env.docker-org}}/stacks-blockchain + subject-digest: ${{ steps.docker_build.outputs.digest }} + push-to-registry: true diff --git a/CHANGELOG.md b/CHANGELOG.md index ed53ae581d..bc8c0e1f92 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,25 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [Unreleased] + +### Added + +- Added field `vm_error` to EventObserver transaction outputs +- Added new `ValidateRejectCode` values to the `/v3/block_proposal` endpoint +- Added `StateMachineUpdateContent::V1` to support a vector of `StacksTransaction` expected to be replayed in subsequent Stacks blocks +- Include a reason string in the transaction receipt when a transaction is rolled back due to a post-condition. This should help users in understanding what went wrong. + +### Changed + +- Reduce the default `block_rejection_timeout_steps` configuration so that miners will retry faster when blocks fail to reach 70% approved or 30% rejected. +- Added index for `next_ready_nakamoto_block()` which improves block processing performance. +- Added a new field, `parent_burn_block_hash`, to the payload that is included in the `/new_burn_block` event observer payload. + +### Fixed + +- Fix regression in mock-mining, allowing the mock miner to continue mining blocks throughout a tenure instead of failing after mining the tenure change block. + ## [3.1.0.0.8] ### Added @@ -19,8 +38,9 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - When a miner times out waiting for signatures, it will re-propose the same block instead of building a new block ([#5877](https://github.com/stacks-network/stacks-core/pull/5877)) - Improve tenure downloader trace verbosity applying proper logging level depending on the tenure state ("debug" if unconfirmed, "info" otherwise) ([#5871](https://github.com/stacks-network/stacks-core/issues/5871)) -- Remove warning log about missing UTXOs when a node is configured as `miner` with `mock_mining` mode enabled ([#5841](https://github.com/stacks-network/stacks-core/issues/5841)) -- Deprecated the `wait_on_interim_blocks` option in the miner config file. This option is no longer needed, as the miner will always wait for interim blocks to be processed before mining a new block. To wait extra time in between blocks, use the `min_time_between_blocks_ms` option instead. +- Remove warning log about missing UTXOs when a node is configured as `miner` with `mock_mining` mode enabled ([#5841](https://github.com/stacks-network/stacks-core/issues/5841)) +- Deprecated the `wait_on_interim_blocks` option in the miner config file. This option is no longer needed, as the miner will always wait for interim blocks to be processed before mining a new block. To wait extra time in between blocks, use the `min_time_between_blocks_ms` option instead. ([#5979](https://github.com/stacks-network/stacks-core/pull/5979)) +- Added `empty_mempool_sleep_ms` to the miner config file to control the time to wait in between mining attempts when the mempool is empty. If not set, the default sleep time is 2.5s. ([#5997](https://github.com/stacks-network/stacks-core/pull/5997)) ## [3.1.0.0.7] diff --git a/CODEOWNERS b/CODEOWNERS deleted file mode 100644 index 59bf3d1cde..0000000000 --- a/CODEOWNERS +++ /dev/null @@ -1,3 +0,0 @@ -# For this branch (`feat/clarity-wasm-develop`) the code owners are simplified -# to only require approvals from the Clarity Wasm team. -* @stacks-network/clarity-wasm diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 577d417c2c..86291f170c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -361,17 +361,31 @@ A non-exhaustive list of examples of consensus-critical changes include: - Every consensus-critical change needs an integration test to verify that the feature activates only when the hard fork activates. -PRs must include test coverage. However, if your PR includes large tests or tests which cannot run in parallel +- PRs must include test coverage. However, if your PR includes large tests or tests which cannot run in parallel (which is the default operation of the `cargo test` command), these tests should be decorated with `#[ignore]`. - A test should be marked `#[ignore]` if: -1. It does not _always_ pass `cargo test` in a vanilla environment + 1. It does not _always_ pass `cargo test` in a vanilla environment (i.e., it does not need to run with `--test-threads 1`). -2. Or, it runs for over a minute via a normal `cargo test` execution + 2. Or, it runs for over a minute via a normal `cargo test` execution (the `cargo test` command will warn if this is not the case). +- **Integration tests need to be properly tagged** using [pinny-rs](https://github.com/BitcoinL2-Labs/pinny-rs/) crate. Tagging requires two fundamental steps: + 1. Define allowed tags in the package `Cargo.toml` file (if needed). + 2. Apply relevant tags to the tests, picking from the allowed set. + + Then it will be possible to run tests with filtering based on the tags using `cargo test` and `cargo nextest` runner. + > For more information and examples on how tagging works, refer to the [pinny-rs](https://github.com/BitcoinL2-Labs/pinny-rs/) readme. + + Below the tag set currently defined with related purpose: + + | Tag | Description | + |-----------------|----------------------------------------------| + | `slow` | tests running over a minute | + | `bitcoind` | tests requiring bitcoin daemon | + | `flaky` | tests that exhibit flaky behavior | + ## Formatting PRs will be checked against `rustfmt` and will _fail_ if not properly formatted. diff --git a/Cargo.lock b/Cargo.lock index 0bb217a0d3..0f279dca54 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2460,12 +2460,20 @@ dependencies = [ ] [[package]] +<<<<<<< HEAD name = "mach" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" dependencies = [ "libc", +======= +name = "madhouse" +version = "0.1.0" +source = "git+https://github.com/stacks-network/madhouse-rs.git?rev=fc651ddcbaf85e888b06d4a87aa788c4b7ba9309#fc651ddcbaf85e888b06d4a87aa788c4b7ba9309" +dependencies = [ + "proptest 1.6.0 (git+https://github.com/proptest-rs/proptest.git?rev=c9bdf18c232665b2b740c667c81866b598d06dc7)", +>>>>>>> develop ] [[package]] @@ -2792,6 +2800,19 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pinny" +version = "0.0.2" +source = "git+https://github.com/BitcoinL2-Labs/pinny-rs.git?rev=54ba9d533a7b84525a5e65a3eae1a3ae76b9ea49#54ba9d533a7b84525a5e65a3eae1a3ae76b9ea49" +dependencies = [ + "proc-macro2", + "quote", + "regex", + "syn 2.0.58", + "thiserror", + "toml", +] + [[package]] name = "piper" version = "0.2.4" @@ -2968,6 +2989,25 @@ dependencies = [ "unarray", ] +[[package]] +name = "proptest" +version = "1.6.0" +source = "git+https://github.com/proptest-rs/proptest.git?rev=c9bdf18c232665b2b740c667c81866b598d06dc7#c9bdf18c232665b2b740c667c81866b598d06dc7" +dependencies = [ + "bit-set", + "bit-vec", + "bitflags 2.4.2", + "lazy_static", + "num-traits", + "rand 0.8.5", + "rand_chacha 0.3.1", + "rand_xorshift", + "regex-syntax 0.8.2", + "rusty-fork", + "tempfile", + "unarray", +] + [[package]] name = "protobuf" version = "2.28.0" @@ -3880,8 +3920,12 @@ dependencies = [ "libsecp256k1 0.5.0", "libsecp256k1 0.7.2", "nix", +<<<<<<< HEAD "percent-encoding", "proptest", +======= + "proptest 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", +>>>>>>> develop "rand 0.8.5", "rand_core 0.6.4", "ripemd", @@ -3915,9 +3959,12 @@ dependencies = [ "lazy_static", "libc", "libsigner", + "madhouse", "mockito", "mutants", "pico-args", + "pinny", + "proptest 1.6.0 (git+https://github.com/proptest-rs/proptest.git?rev=c9bdf18c232665b2b740c667c81866b598d06dc7)", "rand 0.8.5", "regex", "reqwest", diff --git a/clarity/src/vm/clarity.rs b/clarity/src/vm/clarity.rs index 94633e14ae..e937fe42b0 100644 --- a/clarity/src/vm/clarity.rs +++ b/clarity/src/vm/clarity.rs @@ -24,18 +24,30 @@ pub enum Error { Wasm(WasmError), BadTransaction(String), CostError(ExecutionCost, ExecutionCost), - AbortedByCallback(Option, AssetMap, Vec), + AbortedByCallback { + /// What the output value of the transaction would have been. + /// This will be a Some for contract-calls, and None for contract initialization txs. + output: Option, + /// The asset map which was evaluated by the abort callback + assets_modified: AssetMap, + /// The events from the transaction processing + tx_events: Vec, + /// A human-readable explanation for aborting the transaction + reason: String, + }, } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { + match self { Error::CostError(ref a, ref b) => { - write!(f, "Cost Error: {} cost exceeded budget of {} cost", a, b) + write!(f, "Cost Error: {a} cost exceeded budget of {b} cost") } Error::Analysis(ref e) => fmt::Display::fmt(e, f), Error::Parse(ref e) => fmt::Display::fmt(e, f), - Error::AbortedByCallback(..) => write!(f, "Post condition aborted transaction"), + Error::AbortedByCallback { reason, .. } => { + write!(f, "Post condition aborted transaction: {reason}") + } Error::Interpreter(ref e) => fmt::Display::fmt(e, f), Error::BadTransaction(ref s) => fmt::Display::fmt(s, f), #[cfg(feature = "clarity-wasm")] @@ -48,7 +60,7 @@ impl std::error::Error for Error { fn cause(&self) -> Option<&dyn std::error::Error> { match *self { Error::CostError(ref _a, ref _b) => None, - Error::AbortedByCallback(..) => None, + Error::AbortedByCallback { .. } => None, Error::Analysis(ref e) => Some(e), Error::Parse(ref e) => Some(e), Error::Interpreter(ref e) => Some(e), @@ -176,16 +188,17 @@ pub trait TransactionConnection: ClarityConnection { /// * the asset changes during `to_do` in an `AssetMap` /// * the Stacks events during the transaction /// - /// and a `bool` value which is `true` if the `abort_call_back` caused the changes to abort. + /// and an optional string value which is the result of `abort_call_back`, + /// containing a human-readable reason for aborting the transaction. /// /// If `to_do` returns an `Err` variant, then the changes are aborted. fn with_abort_callback( &mut self, to_do: F, abort_call_back: A, - ) -> Result<(R, AssetMap, Vec, bool), E> + ) -> Result<(R, AssetMap, Vec, Option), E> where - A: FnOnce(&AssetMap, &mut ClarityDatabase) -> bool, + A: FnOnce(&AssetMap, &mut ClarityDatabase) -> Option, F: FnOnce(&mut OwnedEnvironment) -> Result<(R, AssetMap, Vec), E>, E: From; @@ -291,16 +304,16 @@ pub trait TransactionConnection: ClarityConnection { .stx_transfer(from, to, amount, memo) .map_err(Error::from) }, - |_, _| false, + |_, _| None, ) .map(|(value, assets, events, _)| (value, assets, events)) } /// Execute a contract call in the current block. - /// If an error occurs while processing the transaction, its modifications will be rolled back. - /// abort_call_back is called with an AssetMap and a ClarityDatabase reference, - /// if abort_call_back returns true, all modifications from this transaction will be rolled back. - /// otherwise, they will be committed (though they may later be rolled back if the block itself is rolled back). + /// If an error occurs while processing the transaction, its modifications will be rolled back. + /// `abort_call_back` is called with an `AssetMap` and a `ClarityDatabase` reference, + /// If `abort_call_back` returns `Some(reason)`, all modifications from this transaction will be rolled back. + /// Otherwise, they will be committed (though they may later be rolled back if the block itself is rolled back). #[allow(clippy::too_many_arguments)] fn run_contract_call( &mut self, @@ -313,7 +326,7 @@ pub trait TransactionConnection: ClarityConnection { max_execution_time: Option, ) -> Result<(Value, AssetMap, Vec), Error> where - F: FnOnce(&AssetMap, &mut ClarityDatabase) -> bool, + F: FnOnce(&AssetMap, &mut ClarityDatabase) -> Option, { let expr_args: Vec<_> = args .iter() @@ -339,20 +352,25 @@ pub trait TransactionConnection: ClarityConnection { }, abort_call_back, ) - .and_then(|(value, assets, events, aborted)| { - if aborted { - Err(Error::AbortedByCallback(Some(value), assets, events)) + .and_then(|(value, assets_modified, tx_events, reason)| { + if let Some(reason) = reason { + Err(Error::AbortedByCallback { + output: Some(value), + assets_modified, + tx_events, + reason, + }) } else { - Ok((value, assets, events)) + Ok((value, assets_modified, tx_events)) } }) } /// Initialize a contract in the current block. /// If an error occurs while processing the initialization, it's modifications will be rolled back. - /// abort_call_back is called with an AssetMap and a ClarityDatabase reference, - /// if abort_call_back returns true, all modifications from this transaction will be rolled back. - /// otherwise, they will be committed (though they may later be rolled back if the block itself is rolled back). + /// `abort_call_back` is called with an `AssetMap` and a `ClarityDatabase` reference, + /// If `abort_call_back` returns `Some(reason)`, all modifications from this transaction will be rolled back. + /// Otherwise, they will be committed (though they may later be rolled back if the block itself is rolled back). #[allow(clippy::too_many_arguments)] fn initialize_smart_contract( &mut self, @@ -366,9 +384,9 @@ pub trait TransactionConnection: ClarityConnection { max_execution_time: Option, ) -> Result<(AssetMap, Vec), Error> where - F: FnOnce(&AssetMap, &mut ClarityDatabase) -> bool, + F: FnOnce(&AssetMap, &mut ClarityDatabase) -> Option, { - let (_, asset_map, events, aborted) = self.with_abort_callback( + let (_, assets_modified, tx_events, reason) = self.with_abort_callback( |vm_env| { if let Some(max_execution_time_duration) = max_execution_time { vm_env @@ -388,10 +406,15 @@ pub trait TransactionConnection: ClarityConnection { }, abort_call_back, )?; - if aborted { - Err(Error::AbortedByCallback(None, asset_map, events)) + if let Some(reason) = reason { + Err(Error::AbortedByCallback { + output: None, + assets_modified, + tx_events, + reason, + }) } else { - Ok((asset_map, events)) + Ok((assets_modified, tx_events)) } } } diff --git a/clarity/src/vm/database/sqlite.rs b/clarity/src/vm/database/sqlite.rs index b5da5efedf..b92e79cbf2 100644 --- a/clarity/src/vm/database/sqlite.rs +++ b/clarity/src/vm/database/sqlite.rs @@ -254,16 +254,13 @@ impl SqliteConnection { Ok(()) } + pub fn memory() -> Result { let contract_db = SqliteConnection::inner_open(":memory:")?; SqliteConnection::initialize_conn(&contract_db)?; Ok(contract_db) } - pub fn open(filename: &str) -> Result { - let contract_db = SqliteConnection::inner_open(filename)?; - SqliteConnection::check_schema(&contract_db)?; - Ok(contract_db) - } + pub fn check_schema(conn: &Connection) -> Result<()> { let sql = "SELECT sql FROM sqlite_master WHERE name=?"; let _: String = conn @@ -272,10 +269,13 @@ impl SqliteConnection { let _: String = conn .query_row(sql, params!["metadata_table"], |row| row.get(0)) .map_err(|x| InterpreterError::SqliteError(IncomparableError { err: x }))?; + let _: String = conn + .query_row(sql, params!["md_blockhashes"], |row| row.get(0)) + .map_err(|x| InterpreterError::SqliteError(IncomparableError { err: x }))?; Ok(()) } - pub fn inner_open(filename: &str) -> Result { + fn inner_open(filename: &str) -> Result { let conn = Connection::open(filename) .map_err(|x| InterpreterError::SqliteError(IncomparableError { err: x }))?; diff --git a/docs/event-dispatcher.md b/docs/event-dispatcher.md index da11740922..5cf063bf69 100644 --- a/docs/event-dispatcher.md +++ b/docs/event-dispatcher.md @@ -27,13 +27,13 @@ These events are sent to the configured endpoint at two URLs: This payload includes data related to a newly processed block, and any events emitted from Stacks transactions during the block. -If the transaction originally comes from the parent microblock stream +If the transaction originally comes from the parent microblock stream preceding this block, the microblock related fields will be filled in. If the `raw_tx` field for a particular transaction is "0x00", that indicates -that it is a burnchain operation. A burnchain operation is a transaction that +that it is a burnchain operation. A burnchain operation is a transaction that is executed on the Stacks network, but was sent through the Bitcoin network. -The Stacks network supports a few specific burnchain operations. You can read +The Stacks network supports a few specific burnchain operations. You can read more about them [here](https://github.com/stacksgov/sips/blob/main/sips/sip-007/sip-007-stacking-consensus.md#stx-operations-on-bitcoin). The section below has example json encodings for each of the burnchain operations. @@ -152,8 +152,8 @@ Example: } ``` -#### Example json values for burnchain operations -- TransferStx +#### Example json values for burnchain operations +- TransferStx ```json { "transfer_stx": { @@ -233,6 +233,8 @@ Example: ```json { "burn_block_hash": "0x4eaabcd105865e471f697eff5dd5bd85d47ecb5a26a3379d74fae0ae87c40904", + "consensus_hash": "0x53c166a709a9abd64a92a57f928a8b26aad08992", + "parent_burn_block_hash": "0x6eaebcd105865e471f697eff5dd5bd85d47ecb5a26a3379d74fae0ae87c40904", "burn_block_height": 331, "reward_recipients": [ { @@ -258,8 +260,8 @@ Example: ### `POST /new_microblocks` -This payload includes data related to one or more microblocks that are either emmitted by the -node itself, or received through the network. +This payload includes data related to one or more microblocks that are either emmitted by the +node itself, or received through the network. Example: @@ -311,9 +313,9 @@ Example: } ``` -* `burn_block_{}` are the stats related to the burn block that is associated with the stacks +* `burn_block_{}` are the stats related to the burn block that is associated with the stacks block that precedes this microblock stream. -* Each transaction json object includes information about the microblock the transaction was packaged into. +* Each transaction json object includes information about the microblock the transaction was packaged into. ### `POST /new_mempool_tx` @@ -384,23 +386,23 @@ Example: "tx_events": [ { "Success": { - "txid": "3e04ada5426332bfef446ba0a06d124aace4ade5c11840f541bf88e2e919faf6", - "fee": 0, - "execution_cost": { - "write_length": 0, - "write_count": 0, - "read_length": 0, - "read_count": 0, + "txid": "3e04ada5426332bfef446ba0a06d124aace4ade5c11840f541bf88e2e919faf6", + "fee": 0, + "execution_cost": { + "write_length": 0, + "write_count": 0, + "read_length": 0, + "read_count": 0, "runtime": 0 - }, + }, "result": { - "ResponseData": + "ResponseData": { "committed": true, "data": true } } - }}, + }}, { "ProcessingError": { "txid": "eef9f46b20fb637bd07ec92ad3ec175a5a4bdf3e8799259fc5b16a272090d4de", @@ -432,23 +434,23 @@ Example: "tx_events": [ { "Success": { - "txid": "3e04ada5426332bfef446ba0a06d124aace4ade5c11840f541bf88e2e919faf6", - "fee": 0, - "execution_cost": { - "write_length": 10, - "write_count": 10, - "read_length": 20, - "read_count": 10, + "txid": "3e04ada5426332bfef446ba0a06d124aace4ade5c11840f541bf88e2e919faf6", + "fee": 0, + "execution_cost": { + "write_length": 10, + "write_count": 10, + "read_length": 20, + "read_count": 10, "runtime": 1290 - }, + }, "result": { - "ResponseData": + "ResponseData": { "committed": true, "data": true } } - }}, + }}, { "Skipped": { "txid": "eef9f46b20fb637bd07ec92ad3ec175a5a4bdf3e8799259fc5b16a272090d4de", diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index ad0583a057..bba3df86d8 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -192,8 +192,14 @@ pub enum SignerEvent { /// The `Vec` will contain any signer messages made by the miner. MinerMessages(Vec), /// The signer messages for other signers and miners to observe - /// The u32 is the signer set to which the message belongs (either 0 or 1) - SignerMessages(u32, Vec), + SignerMessages { + /// The signer set to which the message belongs (either 0 or 1) + signer_set: u32, + /// Each message of type `T` is paired with the `StacksPublicKey` of the slot from which it was retreived + messages: Vec<(StacksPublicKey, T)>, + /// the time at which this event was received by the signer's event processor + received_time: SystemTime, + }, /// A new block proposal validation response from the node BlockValidationResponse(BlockValidateResponse), /// Status endpoint request @@ -518,6 +524,7 @@ impl TryFrom for SignerEvent { type Error = EventError; fn try_from(event: StackerDBChunksEvent) -> Result { + let received_time = SystemTime::now(); let signer_event = if event.contract_id.name.as_str() == MINERS_NAME && event.contract_id.is_boot() { @@ -536,12 +543,21 @@ impl TryFrom for SignerEvent { return Err(EventError::UnrecognizedStackerDBContract(event.contract_id)); }; // signer-XXX-YYY boot contract - let signer_messages: Vec = event + let messages: Vec<(StacksPublicKey, T)> = event .modified_slots .iter() - .filter_map(|chunk| read_next::(&mut &chunk.data[..]).ok()) + .filter_map(|chunk| { + Some(( + chunk.recover_pk().ok()?, + read_next::(&mut &chunk.data[..]).ok()?, + )) + }) .collect(); - SignerEvent::SignerMessages(signer_set, signer_messages) + SignerEvent::SignerMessages { + signer_set, + messages, + received_time, + } } else { return Err(EventError::UnrecognizedStackerDBContract(event.contract_id)); }; @@ -589,6 +605,7 @@ struct BlockEvent { #[serde(with = "prefix_hex")] index_block_hash: StacksBlockId, #[serde(with = "prefix_opt_hex")] + #[serde(default)] signer_signature_hash: Option, #[serde(with = "prefix_hex")] consensus_hash: ConsensusHash, diff --git a/libsigner/src/tests/mod.rs b/libsigner/src/tests/mod.rs index b94eb7608a..e52a6f1355 100644 --- a/libsigner/src/tests/mod.rs +++ b/libsigner/src/tests/mod.rs @@ -20,7 +20,7 @@ use std::fmt::Debug; use std::io::{Read, Write}; use std::net::{SocketAddr, TcpStream, ToSocketAddrs}; use std::sync::mpsc::{channel, Receiver, Sender}; -use std::time::Duration; +use std::time::{Duration, SystemTime}; use std::{mem, thread}; use blockstack_lib::chainstate::nakamoto::signer_set::NakamotoSigners; @@ -28,7 +28,7 @@ use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use blockstack_lib::chainstate::stacks::boot::SIGNERS_NAME; use blockstack_lib::chainstate::stacks::events::StackerDBChunksEvent; use blockstack_lib::util_lib::boot::boot_code_id; -use clarity::types::chainstate::{ConsensusHash, StacksBlockId, TrieHash}; +use clarity::types::chainstate::{ConsensusHash, StacksBlockId, StacksPublicKey, TrieHash}; use clarity::util::hash::Sha512Trunc256Sum; use clarity::util::secp256k1::MessageSignature; use clarity::vm::types::QualifiedContractIdentifier; @@ -177,23 +177,48 @@ fn test_simple_signer() { sleep_ms(5000); let accepted_events = running_signer.stop().unwrap(); - chunks.sort_by(|ev1, ev2| { - ev1.modified_slots[0] - .slot_id - .partial_cmp(&ev2.modified_slots[0].slot_id) - .unwrap() - }); - let sent_events: Vec> = chunks .iter() - .map(|chunk| { - let msg = chunk.modified_slots[0].data.clone(); - let signer_message = read_next::(&mut &msg[..]).unwrap(); - SignerEvent::SignerMessages(0, vec![signer_message]) + .map(|event| { + let messages: Vec<(StacksPublicKey, SignerMessage)> = event + .modified_slots + .iter() + .filter_map(|chunk| { + Some(( + chunk.recover_pk().ok()?, + read_next::(&mut &chunk.data[..]).ok()?, + )) + }) + .collect(); + SignerEvent::SignerMessages { + signer_set: 0, + messages, + received_time: SystemTime::now(), + } }) .collect(); - assert_eq!(sent_events, accepted_events); + for event in sent_events { + let SignerEvent::SignerMessages { + signer_set: sent_signer_set, + messages: sent_messages, + .. + } = event + else { + panic!("We expect ONLY signer messages"); + }; + assert!(accepted_events.iter().any(|e| { + let SignerEvent::SignerMessages { + signer_set: accepted_signer_set, + messages: accepted_messages, + .. + } = e + else { + panic!("We expect ONLY signer messages"); + }; + *accepted_signer_set == sent_signer_set && *accepted_messages == sent_messages + })) + } mock_stacks_node.join().unwrap(); } diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index e09920e573..ccd098a8b3 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -24,6 +24,7 @@ //! and the `SignerMessage` enum. use std::fmt::{Debug, Display}; +use std::hash::{Hash, Hasher}; use std::io::{Read, Write}; use std::marker::PhantomData; use std::net::{SocketAddr, TcpListener, TcpStream}; @@ -570,10 +571,21 @@ pub enum StateMachineUpdateContent { /// The signer's view of who the current miner should be (and their tenure building info) current_miner: StateMachineUpdateMinerState, }, + /// Version 1 + V1 { + /// The tip burn block (i.e., the latest bitcoin block) seen by this signer + burn_block: ConsensusHash, + /// The tip burn block height (i.e., the latest bitcoin block) seen by this signer + burn_block_height: u64, + /// The signer's view of who the current miner should be (and their tenure building info) + current_miner: StateMachineUpdateMinerState, + /// The replay transactions + replay_transactions: Vec, + }, } /// Message for update the Signer State infos -#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, Eq, Hash)] pub enum StateMachineUpdateMinerState { /// There is an active miner ActiveMiner { @@ -675,6 +687,7 @@ impl StateMachineUpdateContent { fn is_protocol_version_compatible(&self, version: u64) -> bool { match self { Self::V0 { .. } => version == 0, + Self::V1 { .. } => version == 1, } } @@ -689,6 +702,17 @@ impl StateMachineUpdateContent { burn_block_height.consensus_serialize(fd)?; current_miner.consensus_serialize(fd)?; } + Self::V1 { + burn_block, + burn_block_height, + current_miner, + replay_transactions, + } => { + burn_block.consensus_serialize(fd)?; + burn_block_height.consensus_serialize(fd)?; + current_miner.consensus_serialize(fd)?; + replay_transactions.consensus_serialize(fd)?; + } } Ok(()) } @@ -704,6 +728,18 @@ impl StateMachineUpdateContent { current_miner, }) } + 1 => { + let burn_block = read_next(fd)?; + let burn_block_height = read_next(fd)?; + let current_miner = read_next(fd)?; + let replay_transactions = read_next(fd)?; + Ok(Self::V1 { + burn_block, + burn_block_height, + current_miner, + replay_transactions, + }) + } other => Err(CodecError::DeserializeError(format!( "Unknown state machine update version: {other}" ))), @@ -1715,15 +1751,17 @@ impl From for SignerMessage { mod test { use blockstack_lib::chainstate::nakamoto::NakamotoBlockHeader; use blockstack_lib::chainstate::stacks::{ - TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionPostConditionMode, - TransactionSmartContract, TransactionVersion, + TransactionAnchorMode, TransactionAuth, TransactionContractCall, TransactionPayload, + TransactionPostConditionMode, TransactionSmartContract, TransactionSpendingCondition, + TransactionVersion, }; use blockstack_lib::util_lib::strings::StacksString; use clarity::consts::CHAIN_ID_MAINNET; - use clarity::types::chainstate::{ConsensusHash, StacksBlockId, TrieHash}; + use clarity::types::chainstate::{ConsensusHash, StacksAddress, StacksBlockId, TrieHash}; use clarity::types::PrivateKey; use clarity::util::hash::{hex_bytes, MerkleTree}; use clarity::util::secp256k1::MessageSignature; + use clarity::vm::{ClarityName, ContractName}; use rand::rngs::mock; use rand::{thread_rng, Rng, RngCore}; use rand_core::OsRng; @@ -2315,4 +2353,84 @@ mod test { assert_eq!(signer_message, signer_message_deserialized); } + + #[test] + fn deserialize_state_machine_update_v1() { + let signer_message = StateMachineUpdate::new( + 1, + 3, + StateMachineUpdateContent::V1 { + burn_block: ConsensusHash([0x55; 20]), + burn_block_height: 100, + current_miner: StateMachineUpdateMinerState::ActiveMiner { + current_miner_pkh: Hash160([0xab; 20]), + tenure_id: ConsensusHash([0x44; 20]), + parent_tenure_id: ConsensusHash([0x22; 20]), + parent_tenure_last_block: StacksBlockId([0x33; 32]), + parent_tenure_last_block_height: 1, + }, + replay_transactions: vec![], + }, + ) + .unwrap(); + + let mut bytes = vec![]; + signer_message.consensus_serialize(&mut bytes).unwrap(); + + // check for raw content for avoiding regressions when structure changes + let raw_signer_message: Vec<&[u8]> = vec![ + /* active_signer_protocol_version*/ &[0, 0, 0, 0, 0, 0, 0, 1], + /* local_supported_signer_protocol_version*/ &[0, 0, 0, 0, 0, 0, 0, 3], + /* content_len*/ &[0, 0, 0, 133], + /* burn_block*/ &[0x55; 20], + /* burn_block_height*/ &[0, 0, 0, 0, 0, 0, 0, 100], + /* current_miner_variant */ &[0x01], + /* current_miner_pkh */ &[0xab; 20], + /* tenure_id*/ &[0x44; 20], + /* parent_tenure_id*/ &[0x22; 20], + /* parent_tenure_last_block */ &[0x33; 32], + /* parent_tenure_last_block_height*/ &[0, 0, 0, 0, 0, 0, 0, 1], + /* replay_transactions */ &[0, 0, 0, 0], + ]; + + assert_eq!(bytes, raw_signer_message.concat()); + + let signer_message_deserialized = + StateMachineUpdate::consensus_deserialize(&mut &bytes[..]).unwrap(); + + assert_eq!(signer_message, signer_message_deserialized); + + let signer_message = StateMachineUpdate::new( + 1, + 4, + StateMachineUpdateContent::V1 { + burn_block: ConsensusHash([0x55; 20]), + burn_block_height: 100, + current_miner: StateMachineUpdateMinerState::NoValidMiner, + replay_transactions: vec![], + }, + ) + .unwrap(); + + let mut bytes = vec![]; + signer_message.consensus_serialize(&mut bytes).unwrap(); + + // check for raw content for avoiding regressions when structure changes + let raw_signer_message: Vec<&[u8]> = vec![ + /* active_signer_protocol_version*/ &[0, 0, 0, 0, 0, 0, 0, 1], + /* local_supported_signer_protocol_version*/ &[0, 0, 0, 0, 0, 0, 0, 4], + /* content_len*/ &[0, 0, 0, 33], + /* burn_block*/ &[0x55; 20], + /* burn_block_height*/ &[0, 0, 0, 0, 0, 0, 0, 100], + /* current_miner_variant */ &[0x00], + /* replay_transactions */ &[0, 0, 0, 0], + ]; + + assert_eq!(bytes, raw_signer_message.concat()); + + let signer_message_deserialized = + StateMachineUpdate::consensus_deserialize(&mut &bytes[..]).unwrap(); + + assert_eq!(signer_message, signer_message_deserialized); + } } diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index ca471474a4..33679040ec 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -5,6 +5,19 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [Unreleased] + +### Changed + +- Upgraded `SUPPORTED_SIGNER_PROTOCOL_VERSION` to 1 + +## [3.1.0.0.8.1] + +### Added + +- The signer will now check if their associated stacks-node has processed the parent block for a block proposal before submitting that block proposal. If it cannot confirm that the parent block has been processed, it waits a default time of 15s before submitting, configurable via `proposal_wait_for_parent_time_secs` in the signer config.toml. + + ## [3.1.0.0.8.0] ### Changed diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 0f51c05fc5..614a4be4ae 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -141,6 +141,8 @@ pub struct ProposalEvalConfig { /// Time following the last block of the previous tenure's global acceptance that a signer will consider an attempt by /// the new miner to reorg it as valid towards miner activity pub reorg_attempts_activity_timeout: Duration, + /// Time to wait before submitting a block proposal to the stacks-node + pub proposal_wait_for_parent_time: Duration, } impl From<&SignerConfig> for ProposalEvalConfig { @@ -152,6 +154,7 @@ impl From<&SignerConfig> for ProposalEvalConfig { tenure_idle_timeout: value.tenure_idle_timeout, reorg_attempts_activity_timeout: value.reorg_attempts_activity_timeout, tenure_idle_timeout_buffer: value.tenure_idle_timeout_buffer, + proposal_wait_for_parent_time: value.proposal_wait_for_parent_time, } } } diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 59b8309d53..c1f6d5e1d6 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -418,6 +418,7 @@ pub(crate) mod tests { tenure_idle_timeout_buffer: config.tenure_idle_timeout_buffer, block_proposal_max_age_secs: config.block_proposal_max_age_secs, reorg_attempts_activity_timeout: config.reorg_attempts_activity_timeout, + proposal_wait_for_parent_time: config.proposal_wait_for_parent_time, } } diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 38c0a66bbb..47b538679c 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -20,11 +20,7 @@ use std::time::{Duration, Instant}; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::boot::{NakamotoSignerEntry, SIGNERS_NAME}; use blockstack_lib::chainstate::stacks::db::StacksBlockHeaderTypes; -use blockstack_lib::chainstate::stacks::{ - StacksTransaction, StacksTransactionSigner, TransactionAnchorMode, TransactionAuth, - TransactionContractCall, TransactionPayload, TransactionPostConditionMode, - TransactionSpendingCondition, TransactionVersion, -}; +use blockstack_lib::chainstate::stacks::TransactionVersion; use blockstack_lib::net::api::callreadonly::CallReadOnlyResponse; use blockstack_lib::net::api::get_tenures_fork_info::{ TenureForkingInfo, RPC_TENURE_FORKING_INFO_PATH, @@ -61,8 +57,6 @@ use crate::runloop::RewardCycleInfo; pub struct StacksClient { /// The stacks address of the signer stacks_address: StacksAddress, - /// The private key used in all stacks node communications - stacks_private_key: StacksPrivateKey, /// The stacks node HTTP base endpoint http_origin: String, /// The types of transactions @@ -94,7 +88,6 @@ pub struct CurrentAndLastSortition { impl From<&GlobalConfig> for StacksClient { fn from(config: &GlobalConfig) -> Self { Self { - stacks_private_key: config.stacks_private_key, stacks_address: config.stacks_address, http_origin: format!("http://{}", config.node_host), tx_version: config.network.to_transaction_version(), @@ -123,7 +116,6 @@ impl StacksClient { }; let stacks_address = StacksAddress::p2pkh(mainnet, &pubkey); Self { - stacks_private_key, stacks_address, http_origin: format!("http://{}", node_host), tx_version, @@ -321,6 +313,7 @@ impl StacksClient { let block_proposal = NakamotoBlockProposal { block, chain_id: self.chain_id, + replay_txs: None, }; let timer = crate::monitoring::actions::new_rpc_call_timer( &self.block_proposal_path(), @@ -756,60 +749,6 @@ impl StacksClient { fn tenure_tip_path(&self, consensus_hash: &ConsensusHash) -> String { format!("{}/v3/tenures/tip/{}", self.http_origin, consensus_hash) } - - /// Helper function to create a stacks transaction for a modifying contract call - #[allow(clippy::too_many_arguments)] - pub fn build_unsigned_contract_call_transaction( - contract_addr: &StacksAddress, - contract_name: ContractName, - function_name: ClarityName, - function_args: &[ClarityValue], - stacks_private_key: &StacksPrivateKey, - tx_version: TransactionVersion, - chain_id: u32, - nonce: u64, - ) -> Result { - let tx_payload = TransactionPayload::ContractCall(TransactionContractCall { - address: *contract_addr, - contract_name, - function_name, - function_args: function_args.to_vec(), - }); - let public_key = StacksPublicKey::from_private(stacks_private_key); - let tx_auth = TransactionAuth::Standard( - TransactionSpendingCondition::new_singlesig_p2pkh(public_key).ok_or( - ClientError::TransactionGenerationFailure(format!( - "Failed to create spending condition from public key: {}", - public_key.to_hex() - )), - )?, - ); - - let mut unsigned_tx = StacksTransaction::new(tx_version, tx_auth, tx_payload); - unsigned_tx.set_origin_nonce(nonce); - - unsigned_tx.anchor_mode = TransactionAnchorMode::Any; - unsigned_tx.post_condition_mode = TransactionPostConditionMode::Allow; - unsigned_tx.chain_id = chain_id; - Ok(unsigned_tx) - } - - /// Sign an unsigned transaction - pub fn sign_transaction( - &self, - unsigned_tx: StacksTransaction, - ) -> Result { - let mut tx_signer = StacksTransactionSigner::new(&unsigned_tx); - tx_signer - .sign_origin(&self.stacks_private_key) - .map_err(|e| ClientError::TransactionGenerationFailure(e.to_string()))?; - - tx_signer - .get_tx() - .ok_or(ClientError::TransactionGenerationFailure( - "Failed to generate transaction from a transaction signer".to_string(), - )) - } } #[cfg(test)] diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 4b4c990c85..e642288593 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -45,6 +45,10 @@ const DEFAULT_REORG_ATTEMPTS_ACTIVITY_TIMEOUT_MS: u64 = 200_000; /// Default number of seconds to add to the tenure extend time, after computing the idle timeout, /// to allow for clock skew between the signer and the miner const DEFAULT_TENURE_IDLE_TIMEOUT_BUFFER_SECS: u64 = 2; +/// Default time (in ms) to wait before submitting a proposal if we +/// cannot determine that our stacks-node has processed the parent +/// block +const DEFAULT_PROPOSAL_WAIT_TIME_FOR_PARENT_SECS: u64 = 15; #[derive(thiserror::Error, Debug)] /// An error occurred parsing the provided configuration @@ -175,6 +179,9 @@ pub struct SignerConfig { pub reorg_attempts_activity_timeout: Duration, /// The running mode for the signer (dry-run or normal) pub signer_mode: SignerConfigMode, + /// Time to wait before submitting a block proposal to the stacks-node if we cannot + /// determine that the stacks-node has processed the parent + pub proposal_wait_for_parent_time: Duration, } /// The parsed configuration for the signer @@ -221,6 +228,9 @@ pub struct GlobalConfig { /// Time following the last block of the previous tenure's global acceptance that a signer will consider an attempt by /// the new miner to reorg it as valid towards miner activity pub reorg_attempts_activity_timeout: Duration, + /// Time to wait before submitting a block proposal to the stacks-node if we cannot + /// determine that the stacks-node has processed the parent + pub proposal_wait_for_parent_time: Duration, /// Is this signer binary going to be running in dry-run mode? pub dry_run: bool, } @@ -268,6 +278,8 @@ struct RawConfigFile { /// Time (in millisecs) following a block's global acceptance that a signer will consider an attempt by a miner /// to reorg the block as valid towards miner activity pub reorg_attempts_activity_timeout_ms: Option, + /// Time to wait (in millisecs) before submitting a block proposal to the stacks-node + pub proposal_wait_for_parent_time_secs: Option, /// Is this signer binary going to be running in dry-run mode? pub dry_run: Option, } @@ -385,6 +397,12 @@ impl TryFrom for GlobalConfig { .unwrap_or(DEFAULT_TENURE_IDLE_TIMEOUT_BUFFER_SECS), ); + let proposal_wait_for_parent_time = Duration::from_secs( + raw_data + .proposal_wait_for_parent_time_secs + .unwrap_or(DEFAULT_PROPOSAL_WAIT_TIME_FOR_PARENT_SECS), + ); + Ok(Self { node_host: raw_data.node_host, endpoint, @@ -405,6 +423,7 @@ impl TryFrom for GlobalConfig { reorg_attempts_activity_timeout, dry_run, tenure_idle_timeout_buffer, + proposal_wait_for_parent_time, }) } } diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 1c69132e79..71727653d8 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -78,6 +78,8 @@ pub trait Signer: Debug + Display { fn has_unprocessed_blocks(&self) -> bool; /// Get a reference to the local state machine of the signer fn get_local_state_machine(&self) -> &LocalStateMachine; + /// Get the number of pending block proposals + fn get_pending_proposals_count(&self) -> u64; } /// A wrapper around the running signer type for the signer diff --git a/stacks-signer/src/monitoring/mod.rs b/stacks-signer/src/monitoring/mod.rs index 27029241ff..dabf529b6b 100644 --- a/stacks-signer/src/monitoring/mod.rs +++ b/stacks-signer/src/monitoring/mod.rs @@ -33,6 +33,21 @@ SignerAgreementStateChangeReason { InactiveMiner("inactive_miner"), /// Signer agreement protocol version has been upgraded ProtocolUpgrade("protocol_upgrade"), + /// An update related to the Miner view + MinerViewUpdate("miner_view_update"), + /// A specific Miner View update related to the parent tenure + MinerParentTenureUpdate("miner_parent_tenure_update"), +}); + +define_named_enum!( +/// Represent different conflict types on signer agreement protocol +SignerAgreementStateConflict { + /// Waiting for burn block propagation to be aligned with the signer set + BurnBlockDelay("burn_block_delay"), + /// Waiting for stacks block propagation to be aligned with the signer set + StacksBlockDelay("stacks_block_delay"), + /// No agreement on miner view with the signer set + MinerView("miner_view"), }); /// Actions for updating metrics @@ -44,7 +59,7 @@ pub mod actions { use crate::config::GlobalConfig; use crate::monitoring::prometheus::*; - use crate::monitoring::SignerAgreementStateChangeReason; + use crate::monitoring::{SignerAgreementStateChangeReason, SignerAgreementStateConflict}; use crate::v0::signer_state::LocalStateMachine; /// Update stacks tip height gauge @@ -134,6 +149,21 @@ pub mod actions { .inc(); } + /// Increment signer agreement state conflict counter + pub fn increment_signer_agreement_state_conflict(conflict: SignerAgreementStateConflict) { + let label_value = conflict.get_name(); + SIGNER_AGREEMENT_STATE_CONFLICTS + .with_label_values(&[&label_value]) + .inc(); + } + + /// Record the time (seconds) taken for a signer to agree with the signer set + pub fn record_signer_agreement_capitulation_latency(latency_s: u64) { + SIGNER_AGREEMENT_CAPITULATION_LATENCIES_HISTOGRAM + .with_label_values(&[]) + .observe(latency_s as f64); + } + /// Start serving monitoring metrics. /// This will only serve the metrics if the `monitoring_prom` feature is enabled. pub fn start_serving_monitoring_metrics(config: GlobalConfig) -> Result<(), String> { @@ -157,7 +187,7 @@ pub mod actions { use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use stacks_common::info; - use crate::monitoring::SignerAgreementStateChangeReason; + use crate::monitoring::{SignerAgreementStateChangeReason, SignerAgreementStateConflict}; use crate::v0::signer_state::LocalStateMachine; use crate::GlobalConfig; @@ -212,6 +242,12 @@ pub mod actions { ) { } + /// Increment signer agreement state conflict counter + pub fn increment_signer_agreement_state_conflict(_conflict: SignerAgreementStateConflict) {} + + /// Record the time (seconds) taken for a signer to agree with the signer set + pub fn record_signer_agreement_capitulation_latency(_latency_s: u64) {} + /// Start serving monitoring metrics. /// This will only serve the metrics if the `monitoring_prom` feature is enabled. pub fn start_serving_monitoring_metrics(config: GlobalConfig) -> Result<(), String> { diff --git a/stacks-signer/src/monitoring/prometheus.rs b/stacks-signer/src/monitoring/prometheus.rs index be9f89191e..2114b7f4cd 100644 --- a/stacks-signer/src/monitoring/prometheus.rs +++ b/stacks-signer/src/monitoring/prometheus.rs @@ -81,10 +81,22 @@ lazy_static! { pub static ref SIGNER_AGREEMENT_STATE_CHANGE_REASONS: IntCounterVec = register_int_counter_vec!( "stacks_signer_agreement_state_change_reasons", - "The number of state machine changes in signer agreement protocol. `reason` can be one of: 'burn_block_arrival', 'stacks_block_arrival', 'inactive_miner', 'protocol_upgrade'", + "The number of state machine changes in signer agreement protocol. `reason` can be one of: 'burn_block_arrival', 'stacks_block_arrival', 'inactive_miner', 'protocol_upgrade', 'miner_view_update', 'miner_parent_tenure_update'", &["reason"] ).unwrap(); + pub static ref SIGNER_AGREEMENT_STATE_CONFLICTS: IntCounterVec = register_int_counter_vec!( + "stacks_signer_agreement_state_conflicts", + "The number of state machine conflicts in signer agreement protocol. `conflict` can be one of: 'burn_block_delay', 'stacks_block_delay', 'miner_view'", + &["conflict"] + ).unwrap(); + + pub static ref SIGNER_AGREEMENT_CAPITULATION_LATENCIES_HISTOGRAM: HistogramVec = register_histogram_vec!(histogram_opts!( + "stacks_signer_agreement_capitulation_latencies_histogram", + "Measuring the time (in seconds) for the signer to agree (capitulate) with the signer set", + vec![0.0, 1.0, 3.0, 5.0, 10.0, 20.0, 30.0, 60.0, 120.0] + ), &[]).unwrap(); + pub static ref SIGNER_LOCAL_STATE_MACHINE: Mutex> = Mutex::new(None); } diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 0bb8cd651b..0cc2510923 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -56,6 +56,8 @@ pub struct StateInfo { /// The local state machines for the running signers /// as a pair of (reward-cycle, state-machine) pub signer_state_machines: Vec<(u64, Option)>, + /// The number of pending block proposals for this signer + pub pending_proposals_count: u64, } /// The signer result that can be sent across threads @@ -320,6 +322,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo tenure_idle_timeout_buffer: self.config.tenure_idle_timeout_buffer, block_proposal_max_age_secs: self.config.block_proposal_max_age_secs, reorg_attempts_activity_timeout: self.config.reorg_attempts_activity_timeout, + proposal_wait_for_parent_time: self.config.proposal_wait_for_parent_time, })) } @@ -466,9 +469,15 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo // We are either the current or a future reward cycle, so we are not stale. continue; } - if let ConfiguredSigner::RegisteredSigner(signer) = signer { - if !signer.has_unprocessed_blocks() { - debug!("{signer}: Signer's tenure has completed."); + match signer { + ConfiguredSigner::RegisteredSigner(signer) => { + if !signer.has_unprocessed_blocks() { + debug!("{signer}: Signer's tenure has completed."); + to_delete.push(*idx); + } + } + ConfiguredSigner::NotRegistered { .. } => { + debug!("{signer}: Unregistered signer's tenure has completed."); to_delete.push(*idx); } } @@ -523,6 +532,17 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> ) }) .collect(), + pending_proposals_count: self + .stacks_signers + .values() + .find_map(|signer| { + if let ConfiguredSigner::RegisteredSigner(signer) = signer { + Some(signer.get_pending_proposals_count()) + } else { + None + } + }) + .unwrap_or(0), }; info!("Signer status check requested: {state_info:?}"); diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 1be7a40f40..5de793edc0 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::HashMap; use std::fmt::Display; use std::path::Path; use std::time::{Duration, SystemTime}; @@ -28,7 +29,7 @@ use blockstack_lib::util_lib::db::{ use blockstack_lib::util_lib::db::{FromColumn, FromRow}; use clarity::types::chainstate::{BurnchainHeaderHash, StacksAddress}; use clarity::types::Address; -use libsigner::v0::messages::{RejectReason, RejectReasonPrefix}; +use libsigner::v0::messages::{RejectReason, RejectReasonPrefix, StateMachineUpdate}; use libsigner::BlockProposal; use rusqlite::functions::FunctionFlags; use rusqlite::{ @@ -366,6 +367,10 @@ CREATE INDEX IF NOT EXISTS blocks_consensus_hash_status_height ON blocks (consen CREATE INDEX IF NOT EXISTS blocks_reward_cycle_state on blocks (reward_cycle, state); "#; +static CREATE_INDEXES_11: &str = r#" +CREATE INDEX IF NOT EXISTS signer_state_machine_updates_reward_cycle_received_time ON signer_state_machine_updates (reward_cycle, received_time ASC); +"#; + static CREATE_SIGNER_STATE_TABLE: &str = " CREATE TABLE IF NOT EXISTS signer_states ( reward_cycle INTEGER PRIMARY KEY, @@ -518,6 +523,15 @@ static ADD_CONSENSUS_HASH_INDEX: &str = r#" CREATE INDEX IF NOT EXISTS burn_blocks_ch on burn_blocks (consensus_hash); "#; +static CREATE_SIGNER_STATE_MACHINE_UPDATES_TABLE: &str = r#" +CREATE TABLE IF NOT EXISTS signer_state_machine_updates ( + signer_addr TEXT NOT NULL, + reward_cycle INTEGER NOT NULL, + state_update TEXT NOT NULL, + received_time INTEGER NOT NULL, + PRIMARY KEY (signer_addr, reward_cycle) +) STRICT;"#; + static SCHEMA_1: &[&str] = &[ DROP_SCHEMA_0, CREATE_DB_CONFIG, @@ -593,9 +607,15 @@ static SCHEMA_10: &[&str] = &[ "INSERT INTO db_config (version) VALUES (10);", ]; +static SCHEMA_11: &[&str] = &[ + CREATE_SIGNER_STATE_MACHINE_UPDATES_TABLE, + CREATE_INDEXES_11, + "INSERT INTO db_config (version) VALUES (11);", +]; + impl SignerDb { /// The current schema version used in this build of the signer binary. - pub const SCHEMA_VERSION: u32 = 10; + pub const SCHEMA_VERSION: u32 = 11; /// Create a new `SignerState` instance. /// This will create a new SQLite database at the given path @@ -765,6 +785,20 @@ impl SignerDb { Ok(()) } + /// Migrate from schema 10 to schema 11 + fn schema_11_migration(tx: &Transaction) -> Result<(), DBError> { + if Self::get_schema_version(tx)? >= 11 { + // no migration necessary + return Ok(()); + } + + for statement in SCHEMA_11.iter() { + tx.execute_batch(statement)?; + } + + Ok(()) + } + /// Register custom scalar functions used by the database fn register_scalar_functions(&self) -> Result<(), DBError> { // Register helper function for determining if a block is a tenure change transaction @@ -808,7 +842,8 @@ impl SignerDb { 7 => Self::schema_8_migration(&sql_tx)?, 8 => Self::schema_9_migration(&sql_tx)?, 9 => Self::schema_10_migration(&sql_tx)?, - 10 => break, + 10 => Self::schema_11_migration(&sql_tx)?, + 11 => break, x => return Err(DBError::Other(format!( "Database schema is newer than supported by this binary. Expected version = {}, Database version = {x}", Self::SCHEMA_VERSION, @@ -1180,12 +1215,18 @@ impl SignerDb { /// If found, remove it from the pending table. pub fn get_and_remove_pending_block_validation( &self, - ) -> Result, DBError> { - let qry = "DELETE FROM block_validations_pending WHERE signer_signature_hash = (SELECT signer_signature_hash FROM block_validations_pending ORDER BY added_time ASC LIMIT 1) RETURNING signer_signature_hash"; + ) -> Result, DBError> { + let qry = "DELETE FROM block_validations_pending WHERE signer_signature_hash = (SELECT signer_signature_hash FROM block_validations_pending ORDER BY added_time ASC LIMIT 1) RETURNING signer_signature_hash, added_time"; let args = params![]; let mut stmt = self.db.prepare(qry)?; - let sighash: Option = stmt.query_row(args, |row| row.get(0)).optional()?; - Ok(sighash.and_then(|sighash| Sha512Trunc256Sum::from_hex(&sighash).ok())) + let result: Option<(String, i64)> = stmt + .query_row(args, |row| Ok((row.get(0)?, row.get(1)?))) + .optional()?; + Ok(result.and_then(|(sighash, ts_i64)| { + let signer_sighash = Sha512Trunc256Sum::from_hex(&sighash).ok()?; + let ts = u64::try_from(ts_i64).ok()?; + Some((signer_sighash, ts)) + })) } /// Remove a pending block validation @@ -1319,6 +1360,74 @@ impl SignerDb { })?; Ok(Some(last_activity_time)) } + + /// Insert the signer state machine update + pub fn insert_state_machine_update( + &mut self, + reward_cycle: u64, + address: &StacksAddress, + update: &StateMachineUpdate, + received_time: &SystemTime, + ) -> Result<(), DBError> { + let received_ts = received_time + .duration_since(std::time::UNIX_EPOCH) + .map_err(|e| DBError::Other(format!("Bad system time: {e}")))? + .as_secs(); + let update_str = + serde_json::to_string(&update).expect("Unable to serialize state machine update"); + debug!("Inserting update."; + "reward_cycle" => reward_cycle, + "address" => %address, + "active_signer_protocol_version" => update.active_signer_protocol_version, + "local_supported_signer_protocol_version" => update.local_supported_signer_protocol_version + ); + self.db.execute("INSERT OR REPLACE INTO signer_state_machine_updates (signer_addr, reward_cycle, state_update, received_time) VALUES (?1, ?2, ?3, ?4)", params![ + address.to_string(), + u64_to_sql(reward_cycle)?, + update_str, + u64_to_sql(received_ts)? + ])?; + Ok(()) + } + + /// Get all signer states from the signer state machine for the given reward cycle + pub fn get_signer_state_machine_updates( + &mut self, + reward_cycle: u64, + ) -> Result, DBError> { + let query = "SELECT signer_addr, state_update FROM signer_state_machine_updates WHERE reward_cycle = ?"; + let args = params![u64_to_sql(reward_cycle)?]; + let mut stmt = self.db.prepare(query)?; + let rows = stmt.query_map(args, |row| { + let address_str: String = row.get(0)?; + let update_str: String = row.get(1)?; + Ok((address_str, update_str)) + })?; + let mut result = HashMap::new(); + for row in rows { + let (address_str, update_str) = row?; + let address = StacksAddress::from_string(&address_str).ok_or(DBError::Corruption)?; + let update: StateMachineUpdate = serde_json::from_str(&update_str)?; + result.insert(address, update); + } + Ok(result) + } + + /// Retrieve the elapsed time (in seconds) between + /// the oldest and the newest state machine update messages + /// produced by the signer set + pub fn get_signer_state_machine_updates_latency( + &self, + reward_cycle: u64, + ) -> Result { + let query = "SELECT COALESCE( (MAX(received_time) - MIN(received_time)), 0 ) AS elapsed_time FROM signer_state_machine_updates WHERE reward_cycle = ?1"; + let args = params![u64_to_sql(reward_cycle)?]; + let elapsed_time_opt: Option = query_row(&self.db, query, args)?; + match elapsed_time_opt { + Some(seconds) => Ok(seconds), + None => Ok(0), + } + } } fn try_deserialize(s: Option) -> Result, DBError> @@ -1376,7 +1485,7 @@ impl SignerDb { /// Tests for SignerDb #[cfg(test)] -mod tests { +pub mod tests { use std::fs; use std::path::PathBuf; @@ -1388,6 +1497,7 @@ mod tests { use clarity::types::chainstate::{StacksBlockId, StacksPrivateKey, StacksPublicKey}; use clarity::util::hash::Hash160; use clarity::util::secp256k1::MessageSignature; + use libsigner::v0::messages::{StateMachineUpdateContent, StateMachineUpdateMinerState}; use libsigner::{BlockProposal, BlockProposalData}; use super::*; @@ -1399,7 +1509,8 @@ mod tests { } } - fn create_block_override( + /// Override the creation of a block from a block proposal with the provided function + pub fn create_block_override( overrides: impl FnOnce(&mut BlockProposal), ) -> (BlockInfo, BlockProposal) { let header = NakamotoBlockHeader::empty(); @@ -1421,7 +1532,8 @@ mod tests { create_block_override(|_| {}) } - fn tmp_db_path() -> PathBuf { + /// Create a temporary db path for testing purposes + pub fn tmp_db_path() -> PathBuf { std::env::temp_dir().join(format!( "stacks-signer-test-{}.sqlite", rand::random::() @@ -2093,20 +2205,29 @@ mod tests { db.insert_pending_block_validation(&Sha512Trunc256Sum([0x03; 32]), 3000) .unwrap(); - let pending_hash = db.get_and_remove_pending_block_validation().unwrap(); - assert_eq!(pending_hash, Some(Sha512Trunc256Sum([0x01; 32]))); + let (pending_hash, _) = db + .get_and_remove_pending_block_validation() + .unwrap() + .unwrap(); + assert_eq!(pending_hash, Sha512Trunc256Sum([0x01; 32])); let pendings = db.get_all_pending_block_validations().unwrap(); assert_eq!(pendings.len(), 2); - let pending_hash = db.get_and_remove_pending_block_validation().unwrap(); - assert_eq!(pending_hash, Some(Sha512Trunc256Sum([0x02; 32]))); + let (pending_hash, _) = db + .get_and_remove_pending_block_validation() + .unwrap() + .unwrap(); + assert_eq!(pending_hash, Sha512Trunc256Sum([0x02; 32])); let pendings = db.get_all_pending_block_validations().unwrap(); assert_eq!(pendings.len(), 1); - let pending_hash = db.get_and_remove_pending_block_validation().unwrap(); - assert_eq!(pending_hash, Some(Sha512Trunc256Sum([0x03; 32]))); + let (pending_hash, _) = db + .get_and_remove_pending_block_validation() + .unwrap() + .unwrap(); + assert_eq!(pending_hash, Sha512Trunc256Sum([0x03; 32])); let pendings = db.get_all_pending_block_validations().unwrap(); assert!(pendings.is_empty()); @@ -2319,4 +2440,183 @@ mod tests { assert_eq!(block_info.ext, block_info_prev.ext); assert!(block_info.reject_reason.is_none()); } + + #[test] + fn insert_and_get_state_machine_updates() { + let db_path = tmp_db_path(); + let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + let reward_cycle_1 = 1; + let address_1 = StacksAddress::p2pkh(false, &StacksPublicKey::new()); + let update_1 = StateMachineUpdate::new( + 0, + 3, + StateMachineUpdateContent::V0 { + burn_block: ConsensusHash([0x55; 20]), + burn_block_height: 100, + current_miner: StateMachineUpdateMinerState::ActiveMiner { + current_miner_pkh: Hash160([0xab; 20]), + tenure_id: ConsensusHash([0x44; 20]), + parent_tenure_id: ConsensusHash([0x22; 20]), + parent_tenure_last_block: StacksBlockId([0x33; 32]), + parent_tenure_last_block_height: 1, + }, + }, + ) + .unwrap(); + + let address_2 = StacksAddress::p2pkh(false, &StacksPublicKey::new()); + let update_2 = StateMachineUpdate::new( + 0, + 4, + StateMachineUpdateContent::V0 { + burn_block: ConsensusHash([0x55; 20]), + burn_block_height: 100, + current_miner: StateMachineUpdateMinerState::NoValidMiner, + }, + ) + .unwrap(); + + let address_3 = StacksAddress::p2pkh(false, &StacksPublicKey::new()); + let update_3 = StateMachineUpdate::new( + 0, + 2, + StateMachineUpdateContent::V0 { + burn_block: ConsensusHash([0x66; 20]), + burn_block_height: 101, + current_miner: StateMachineUpdateMinerState::NoValidMiner, + }, + ) + .unwrap(); + + assert!( + db.get_signer_state_machine_updates(reward_cycle_1) + .unwrap() + .is_empty(), + "The database should be empty for reward_cycle {reward_cycle_1}" + ); + + db.insert_state_machine_update(reward_cycle_1, &address_1, &update_1, &SystemTime::now()) + .expect("Unable to insert block into db"); + db.insert_state_machine_update(reward_cycle_1, &address_2, &update_2, &SystemTime::now()) + .expect("Unable to insert block into db"); + db.insert_state_machine_update( + reward_cycle_1 + 1, + &address_3, + &update_3, + &SystemTime::now(), + ) + .expect("Unable to insert block into db"); + + let updates = db.get_signer_state_machine_updates(reward_cycle_1).unwrap(); + assert_eq!(updates.len(), 2); + + assert_eq!(updates.get(&address_1), Some(&update_1)); + assert_eq!(updates.get(&address_2), Some(&update_2)); + assert_eq!(updates.get(&address_3), None); + + db.insert_state_machine_update(reward_cycle_1, &address_2, &update_3, &SystemTime::now()) + .expect("Unable to insert block into db"); + let updates = db.get_signer_state_machine_updates(reward_cycle_1).unwrap(); + assert_eq!(updates.len(), 2); + + assert_eq!(updates.get(&address_1), Some(&update_1)); + assert_eq!(updates.get(&address_2), Some(&update_3)); + assert_eq!(updates.get(&address_3), None); + + let updates = db + .get_signer_state_machine_updates(reward_cycle_1 + 1) + .unwrap(); + assert_eq!(updates.len(), 1); + assert_eq!(updates.get(&address_1), None); + assert_eq!(updates.get(&address_2), None); + assert_eq!(updates.get(&address_3), Some(&update_3)); + } + + #[test] + fn retrieve_latency_for_signer_state_machine_updates() { + let db_path = tmp_db_path(); + let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + let reward_cycle_1 = 1; + let address_1 = StacksAddress::p2pkh(false, &StacksPublicKey::new()); + let update_1 = StateMachineUpdate::new( + 0, + 3, + StateMachineUpdateContent::V0 { + burn_block: ConsensusHash([0x55; 20]), + burn_block_height: 100, + current_miner: StateMachineUpdateMinerState::ActiveMiner { + current_miner_pkh: Hash160([0xab; 20]), + tenure_id: ConsensusHash([0x44; 20]), + parent_tenure_id: ConsensusHash([0x22; 20]), + parent_tenure_last_block: StacksBlockId([0x33; 32]), + parent_tenure_last_block_height: 1, + }, + }, + ) + .unwrap(); + let time_1 = SystemTime::UNIX_EPOCH; + + let address_2 = StacksAddress::p2pkh(false, &StacksPublicKey::new()); + let update_2 = StateMachineUpdate::new( + 0, + 4, + StateMachineUpdateContent::V0 { + burn_block: ConsensusHash([0x55; 20]), + burn_block_height: 100, + current_miner: StateMachineUpdateMinerState::NoValidMiner, + }, + ) + .unwrap(); + let time_2 = SystemTime::UNIX_EPOCH + Duration::from_secs(1); + + let address_3 = StacksAddress::p2pkh(false, &StacksPublicKey::new()); + let update_3 = StateMachineUpdate::new( + 0, + 2, + StateMachineUpdateContent::V0 { + burn_block: ConsensusHash([0x66; 20]), + burn_block_height: 101, + current_miner: StateMachineUpdateMinerState::NoValidMiner, + }, + ) + .unwrap(); + let time_3 = SystemTime::UNIX_EPOCH + Duration::from_secs(10); + + assert_eq!( + 0, + db.get_signer_state_machine_updates_latency(reward_cycle_1) + .unwrap(), + "latency on empty database should be 0 seconds for reward_cycle {reward_cycle_1}" + ); + + db.insert_state_machine_update(reward_cycle_1, &address_1, &update_1, &time_1) + .expect("Unable to insert block into db"); + + assert_eq!( + 0, + db.get_signer_state_machine_updates_latency(reward_cycle_1) + .unwrap(), + "latency between same update should be 0 seconds" + ); + + db.insert_state_machine_update(reward_cycle_1, &address_2, &update_2, &time_2) + .expect("Unable to insert block into db"); + + assert_eq!( + 1, + db.get_signer_state_machine_updates_latency(reward_cycle_1) + .unwrap(), + "latency between updates should be 1 second" + ); + + db.insert_state_machine_update(reward_cycle_1, &address_3, &update_3, &time_3) + .expect("Unable to insert block into db"); + + assert_eq!( + 10, + db.get_signer_state_machine_updates_latency(reward_cycle_1) + .unwrap(), + "latency between updates should be 10 second" + ); + } } diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index 17450501c6..422b0c84d4 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -91,6 +91,7 @@ fn setup_test_environment( tenure_idle_timeout: Duration::from_secs(300), tenure_idle_timeout_buffer: Duration::from_secs(2), reorg_attempts_activity_timeout: Duration::from_secs(3), + proposal_wait_for_parent_time: Duration::from_secs(0), }, }; diff --git a/stacks-signer/src/tests/mod.rs b/stacks-signer/src/tests/mod.rs index a92c85da71..b879f903bb 100644 --- a/stacks-signer/src/tests/mod.rs +++ b/stacks-signer/src/tests/mod.rs @@ -1 +1,2 @@ mod chainstate; +mod signer_state; diff --git a/stacks-signer/src/tests/signer_state.rs b/stacks-signer/src/tests/signer_state.rs new file mode 100644 index 0000000000..6b888f3bc1 --- /dev/null +++ b/stacks-signer/src/tests/signer_state.rs @@ -0,0 +1,376 @@ +use std::collections::HashMap; + +use clarity::types::chainstate::{ + ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, +}; +use clarity::util::hash::Hash160; +use clarity::util::secp256k1::MessageSignature; +use libsigner::v0::messages::{ + StateMachineUpdate as StateMachineUpdateMessage, StateMachineUpdateContent, + StateMachineUpdateMinerState, +}; + +use crate::signerdb::tests::{create_block_override, tmp_db_path}; +use crate::signerdb::SignerDb; +use crate::v0::signer_state::{GlobalStateEvaluator, SignerStateMachine}; + +fn generate_global_state_evaluator(num_addresses: u32) -> GlobalStateEvaluator { + let address_weights = generate_random_address_with_equal_weights(num_addresses); + let active_protocol_version = 0; + let local_supported_signer_protocol_version = 1; + + let update = StateMachineUpdateMessage::new( + active_protocol_version, + local_supported_signer_protocol_version, + StateMachineUpdateContent::V0 { + burn_block: ConsensusHash([0x55; 20]), + burn_block_height: 100, + current_miner: StateMachineUpdateMinerState::ActiveMiner { + current_miner_pkh: Hash160([0xab; 20]), + tenure_id: ConsensusHash([0x44; 20]), + parent_tenure_id: ConsensusHash([0x22; 20]), + parent_tenure_last_block: StacksBlockId([0x33; 32]), + parent_tenure_last_block_height: 1, + }, + }, + ) + .unwrap(); + + let mut address_updates = HashMap::new(); + for address in address_weights.keys() { + address_updates.insert(*address, update.clone()); + } + GlobalStateEvaluator::new(address_updates, address_weights) +} + +fn generate_random_address_with_equal_weights(num_addresses: u32) -> HashMap { + let mut address_weights = HashMap::new(); + for _ in 0..num_addresses { + let stacks_address = StacksAddress::p2pkh( + false, + &StacksPublicKey::from_private(&StacksPrivateKey::random()), + ); + address_weights.insert(stacks_address, 10); + } + address_weights +} + +#[test] +fn determine_latest_supported_signer_protocol_versions() { + let mut global_eval = generate_global_state_evaluator(5); + + let addresses: Vec<_> = global_eval.address_weights.keys().cloned().collect(); + let local_address = addresses[0]; + + let local_update = global_eval + .address_updates + .get(&local_address) + .unwrap() + .clone(); + assert_eq!( + global_eval + .determine_latest_supported_signer_protocol_version(local_address, &local_update,) + .unwrap(), + local_update.local_supported_signer_protocol_version + ); + + let StateMachineUpdateMessage { + active_signer_protocol_version, + local_supported_signer_protocol_version, + content: + StateMachineUpdateContent::V0 { + burn_block, + burn_block_height, + current_miner, + }, + .. + } = local_update.clone() + else { + panic!("Unexpected state machine update message version"); + }; + + // Let's update 3 signers (60 percent) to support seperate but greater protocol versions + for (i, address) in addresses.into_iter().skip(1).take(3).enumerate() { + let new_version = local_update.local_supported_signer_protocol_version + i as u64 + 1; + let new_update = StateMachineUpdateMessage::new( + active_signer_protocol_version, + new_version, + StateMachineUpdateContent::V0 { + burn_block, + burn_block_height, + current_miner: current_miner.clone(), + }, + ) + .unwrap(); + global_eval.insert_update(address, new_update); + } + + assert_eq!( + global_eval + .determine_latest_supported_signer_protocol_version(local_address, &local_update) + .unwrap(), + local_supported_signer_protocol_version + ); + + // Let's tip the scales over to version number 2 by updating the local signer's version... + // i.e. > 70% will have version 2 or higher in their map + let local_update = StateMachineUpdateMessage::new( + active_signer_protocol_version, + 3, + StateMachineUpdateContent::V0 { + burn_block, + burn_block_height, + current_miner, + }, + ) + .unwrap(); + + assert_eq!( + global_eval + .determine_latest_supported_signer_protocol_version(local_address, &local_update) + .unwrap(), + local_supported_signer_protocol_version + 1 + ); +} + +#[test] +fn determine_global_burn_views() { + let mut global_eval = generate_global_state_evaluator(5); + + let addresses: Vec<_> = global_eval.address_weights.keys().cloned().collect(); + let local_address = addresses[0]; + let local_update = global_eval + .address_updates + .get(&local_address) + .unwrap() + .clone(); + let StateMachineUpdateMessage { + active_signer_protocol_version, + local_supported_signer_protocol_version, + content: + StateMachineUpdateContent::V0 { + burn_block, + burn_block_height, + current_miner, + }, + .. + } = local_update.clone() + else { + panic!("Unexpected state machine update message version"); + }; + + assert_eq!( + global_eval + .determine_global_burn_view(local_address, &local_update) + .unwrap(), + (burn_block, burn_block_height) + ); + + // Let's update 3 signers (60 percent) to support a new burn block view + let new_update = StateMachineUpdateMessage::new( + active_signer_protocol_version, + local_supported_signer_protocol_version, + StateMachineUpdateContent::V0 { + burn_block, + burn_block_height: burn_block_height.wrapping_add(1), + current_miner: current_miner.clone(), + }, + ) + .unwrap(); + for address in addresses.into_iter().skip(1).take(3) { + global_eval.insert_update(address, new_update.clone()); + } + + assert!( + global_eval + .determine_global_burn_view(local_address, &local_update) + .is_none(), + "We should not have reached agreement on the burn block height" + ); + + // Let's tip the scales over to burn block height + 1 + assert_eq!( + global_eval + .determine_global_burn_view(local_address, &new_update) + .unwrap(), + (burn_block, burn_block_height.wrapping_add(1)) + ); +} + +#[test] +fn determine_global_states() { + let mut global_eval = generate_global_state_evaluator(5); + + let addresses: Vec<_> = global_eval.address_weights.keys().cloned().collect(); + let local_address = addresses[0]; + let local_update = global_eval + .address_updates + .get(&local_address) + .unwrap() + .clone(); + let StateMachineUpdateMessage { + active_signer_protocol_version, + local_supported_signer_protocol_version, + content: + StateMachineUpdateContent::V0 { + burn_block, + burn_block_height, + current_miner, + }, + .. + } = local_update.clone() + else { + panic!("Unexpected state machine update message version"); + }; + + let state_machine = SignerStateMachine { + burn_block, + burn_block_height, + current_miner: (¤t_miner).into(), + active_signer_protocol_version: local_supported_signer_protocol_version, // a majority of signers are saying they support version the same local_supported_signer_protocol_version, so update it here... + tx_replay_set: None, + }; + + assert_eq!( + global_eval + .determine_global_state(local_address, &local_update) + .unwrap(), + state_machine + ); + let new_miner = StateMachineUpdateMinerState::ActiveMiner { + current_miner_pkh: Hash160([0x00; 20]), + tenure_id: ConsensusHash([0x44; 20]), + parent_tenure_id: ConsensusHash([0x22; 20]), + parent_tenure_last_block: StacksBlockId([0x33; 32]), + parent_tenure_last_block_height: 1, + }; + + let new_update = StateMachineUpdateMessage::new( + active_signer_protocol_version, + local_supported_signer_protocol_version, + StateMachineUpdateContent::V0 { + burn_block, + burn_block_height, + current_miner: new_miner.clone(), + }, + ) + .unwrap(); + + // Let's update 3 signers to some new miner key (60 percent) + for address in addresses.into_iter().skip(1).take(3) { + global_eval.insert_update(address, new_update.clone()); + } + + assert!( + global_eval + .determine_global_state(local_address, &local_update) + .is_none(), + "We should have a disagreement about the current miner" + ); + + let state_machine = SignerStateMachine { + burn_block, + burn_block_height, + current_miner: (&new_miner).into(), + active_signer_protocol_version: local_supported_signer_protocol_version, // a majority of signers are saying they support version the same local_supported_signer_protocol_version, so update it here... + tx_replay_set: None, + }; + + // Let's tip the scales over to a different miner + assert_eq!( + global_eval + .determine_global_state(local_address, &new_update) + .unwrap(), + state_machine + ) +} + +#[test] +fn check_capitulate_miner_view() { + let mut global_eval = generate_global_state_evaluator(5); + + let addresses: Vec<_> = global_eval.address_weights.keys().cloned().collect(); + let local_address = addresses[0]; + let local_update = global_eval + .address_updates + .get(&local_address) + .unwrap() + .clone(); + let StateMachineUpdateMessage { + active_signer_protocol_version, + local_supported_signer_protocol_version, + content: + StateMachineUpdateContent::V0 { + burn_block, + burn_block_height, + current_miner, + }, + .. + } = local_update.clone() + else { + panic!("Unexpected state machine update message version"); + }; + // Let's create a new miner view + let new_tenure_id = ConsensusHash([0x00; 20]); + let new_miner = StateMachineUpdateMinerState::ActiveMiner { + current_miner_pkh: Hash160([0x00; 20]), + tenure_id: new_tenure_id, + parent_tenure_id: ConsensusHash([0x22; 20]), + parent_tenure_last_block: StacksBlockId([0x33; 32]), + parent_tenure_last_block_height: 1, + }; + + let new_update = StateMachineUpdateMessage::new( + active_signer_protocol_version, + local_supported_signer_protocol_version, + StateMachineUpdateContent::V0 { + burn_block, + burn_block_height, + current_miner: new_miner.clone(), + }, + ) + .unwrap(); + + let db_path = tmp_db_path(); + let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + let (mut block_info_1, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = new_tenure_id; + b.block.header.miner_signature = MessageSignature([0x01; 65]); + b.block.header.chain_length = 1; + b.burn_height = 1; + }); + + db.insert_block(&block_info_1).unwrap(); + // Let's update only our own view: the evaluator will tell me to revert my viewpoint to the original miner + assert_eq!( + global_eval + .capitulate_miner_view(&mut db, local_address, &new_update) + .unwrap(), + current_miner + ); + + // Let's set a blocking minority to this different view: evaluator should see no global blocks for the blocking majority and return none + // I.e. only if the blocking minority is attempting to reject an reorg should it take priority over the rest. + // Let's update 1 other signer to some new miner key (60 percent) + for address in addresses.into_iter().skip(1).take(1) { + global_eval.insert_update(address, new_update.clone()); + } + assert!( + global_eval + .capitulate_miner_view(&mut db, local_address, &new_update) + .is_none(), + "Evaluator should have been unable to determine a majority view and return none" + ); + + db.mark_block_globally_accepted(&mut block_info_1).unwrap(); + + db.insert_block(&block_info_1).unwrap(); + + // Now that the blocking minority references a tenure which would actually get reorged, lets capitulate to their view + assert_eq!( + global_eval + .capitulate_miner_view(&mut db, local_address, &new_update) + .unwrap(), + new_miner + ); +} diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 4442df6d67..1080b0543a 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -17,7 +17,7 @@ use std::fmt::Debug; use std::sync::mpsc::Sender; #[cfg(any(test, feature = "testing"))] use std::sync::LazyLock; -use std::time::{Duration, Instant}; +use std::time::{Duration, Instant, SystemTime}; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use blockstack_lib::net::api::postblock_proposal::{ @@ -25,9 +25,7 @@ use blockstack_lib::net::api::postblock_proposal::{ TOO_MANY_REQUESTS_STATUS, }; use blockstack_lib::util_lib::db::Error as DBError; -use clarity::types::chainstate::StacksPrivateKey; -#[cfg(any(test, feature = "testing"))] -use clarity::types::chainstate::StacksPublicKey; +use clarity::types::chainstate::{StacksBlockId, StacksPrivateKey}; use clarity::types::{PrivateKey, StacksEpochId}; use clarity::util::hash::{MerkleHashFunc, Sha512Trunc256Sum}; use clarity::util::secp256k1::Secp256k1PublicKey; @@ -37,15 +35,17 @@ use clarity::util::sleep_ms; use clarity::util::tests::TestFlag; use libsigner::v0::messages::{ BlockAccepted, BlockRejection, BlockResponse, MessageSlotID, MockProposal, MockSignature, - RejectReason, RejectReasonPrefix, SignerMessage, + RejectReason, RejectReasonPrefix, SignerMessage, StateMachineUpdate, }; use libsigner::{BlockProposal, SignerEvent}; -use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::chainstate::{StacksAddress, StacksPublicKey}; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::{debug, error, info, warn}; -use super::signer_state::LocalStateMachine; +#[cfg(not(any(test, feature = "testing")))] +use super::signer_state::SUPPORTED_SIGNER_PROTOCOL_VERSION; +use super::signer_state::{GlobalStateEvaluator, LocalStateMachine}; use crate::chainstate::{ProposalEvalConfig, SortitionMinerStatus, SortitionsView}; use crate::client::{ClientError, SignerSlotID, StackerDB, StacksClient}; use crate::config::{SignerConfig, SignerConfigMode}; @@ -73,6 +73,12 @@ pub enum SignerMode { }, } +/// Track N most recently processed block identifiers +pub struct RecentlyProcessedBlocks { + blocks: Vec, + write_head: usize, +} + /// The stacks signer registered for the reward cycle #[derive(Debug)] pub struct Signer { @@ -82,6 +88,8 @@ pub struct Signer { #[cfg(not(any(test, feature = "testing")))] /// The private key of the signer private_key: StacksPrivateKey, + /// The signer address + pub stacks_address: StacksAddress, /// The stackerdb client pub stackerdb: StackerDB, /// Whether the signer is a mainnet signer or not @@ -109,6 +117,10 @@ pub struct Signer { pub block_proposal_max_age_secs: u64, /// The signer's local state machine used in signer set agreement pub local_state_machine: LocalStateMachine, + /// Cache of stacks block IDs for blocks recently processed by our stacks-node + recently_processed: RecentlyProcessedBlocks<100>, + /// The signer's global state evaluator + pub global_state_evaluator: GlobalStateEvaluator, } impl std::fmt::Display for SignerMode { @@ -126,6 +138,51 @@ impl std::fmt::Display for Signer { } } +impl std::fmt::Debug for RecentlyProcessedBlocks { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "RecentlyProcessed({:?})", self.blocks) + } +} + +impl Default for RecentlyProcessedBlocks { + fn default() -> Self { + Self::new() + } +} + +impl RecentlyProcessedBlocks { + /// Construct a new recently processed blocks cache + pub fn new() -> Self { + Self { + blocks: Vec::with_capacity(N), + write_head: 0, + } + } + + /// Is `block` known to have been processed by our stacks-node? + pub fn is_processed(&self, block: &StacksBlockId) -> bool { + self.blocks.contains(block) + } + + /// Add a block that we know has been processed by our stacks-node + pub fn add_block(&mut self, block: StacksBlockId) { + if self.blocks.len() < N { + self.blocks.push(block); + return; + } + let Some(location) = self.blocks.get_mut(self.write_head) else { + warn!( + "Failed to cache processing information about {block}, write_head {} was improperly set for cache size {N} with blocks length {}", + self.write_head, + self.blocks.len() + ); + return; + }; + *location = block; + self.write_head = (self.write_head + 1) % self.blocks.len(); + } +} + impl SignerTrait for Signer { /// Create a new signer from the given configuration fn new(stacks_client: &StacksClient, signer_config: SignerConfig) -> Self { @@ -137,7 +194,7 @@ impl SignerTrait for Signer { debug!("Reward cycle #{} {mode}", signer_config.reward_cycle); - let signer_db = + let mut signer_db = SignerDb::new(&signer_config.db_path).expect("Failed to connect to signer Db"); let proposal_config = ProposalEvalConfig::from(&signer_config); @@ -146,8 +203,24 @@ impl SignerTrait for Signer { warn!("Failed to initialize local state machine for signer: {e:?}"); LocalStateMachine::Uninitialized }); + let stacks_address = StacksAddress::p2pkh( + signer_config.mainnet, + &StacksPublicKey::from_private(&signer_config.stacks_private_key), + ); + + let updates = signer_db + .get_signer_state_machine_updates(signer_config.reward_cycle) + .inspect_err(|e| { + warn!("An error occurred retrieving state machine updates from the db: {e}") + }) + .unwrap_or_default(); + let global_state_evaluator = GlobalStateEvaluator::new( + updates, + signer_config.signer_entries.signer_addr_to_weight.clone(), + ); Self { private_key: signer_config.stacks_private_key, + stacks_address, stackerdb, mainnet: signer_config.mainnet, mode, @@ -161,6 +234,8 @@ impl SignerTrait for Signer { block_proposal_validation_timeout: signer_config.block_proposal_validation_timeout, block_proposal_max_age_secs: signer_config.block_proposal_max_age_secs, local_state_machine: signer_state, + recently_processed: RecentlyProcessedBlocks::new(), + global_state_evaluator, } } @@ -187,13 +262,16 @@ impl SignerTrait for Signer { | Some(SignerEvent::NewBlock { .. }) | Some(SignerEvent::StatusCheck) | None => None, - Some(SignerEvent::SignerMessages(msg_parity, ..)) => Some(u64::from(*msg_parity) % 2), + Some(SignerEvent::SignerMessages { signer_set, .. }) => { + Some(u64::from(*signer_set) % 2) + } }; let other_signer_parity = (self.reward_cycle + 1) % 2; if event_parity == Some(other_signer_parity) { return; } self.check_submitted_block_proposal(); + self.check_pending_block_validations(stacks_client); debug!("{self}: Processing event: {event:?}"); let Some(event) = event else { // No event. Do nothing. @@ -218,6 +296,92 @@ impl SignerTrait for Signer { .unwrap_or_else(|e| error!("{self}: failed to update local state machine for pending update"; "err" => ?e)); } + self.handle_event_match(stacks_client, sortition_state, event, current_reward_cycle); + + self.check_submitted_block_proposal(); + self.check_pending_block_validations(stacks_client); + + if prior_state != self.local_state_machine { + let version = self.get_signer_protocol_version(); + self.local_state_machine + .send_signer_update_message(&mut self.stackerdb, version); + } + } + + fn has_unprocessed_blocks(&self) -> bool { + self.signer_db + .has_unprocessed_blocks(self.reward_cycle) + .unwrap_or_else(|e| { + error!("{self}: Failed to check for pending blocks: {e:?}",); + // Assume we have pending blocks to prevent premature cleanup + true + }) + } + + fn get_local_state_machine(&self) -> &LocalStateMachine { + &self.local_state_machine + } + + #[cfg(not(any(test, feature = "testing")))] + fn get_pending_proposals_count(&self) -> u64 { + 0 + } + + #[cfg(any(test, feature = "testing"))] + fn get_pending_proposals_count(&self) -> u64 { + self.signer_db + .get_all_pending_block_validations() + .map(|results| u64::try_from(results.len()).unwrap()) + .unwrap_or(0) + } +} + +impl Signer { + /// Determine this signers response to a proposed block + /// Returns a BlockResponse if we have already validated the block + /// Returns None otherwise + fn determine_response(&mut self, block_info: &BlockInfo) -> Option { + let valid = block_info.valid?; + let response = if valid { + debug!("{self}: Accepting block {}", block_info.block.block_id()); + self.create_block_acceptance(&block_info.block) + } else { + debug!("{self}: Rejecting block {}", block_info.block.block_id()); + self.create_block_rejection(RejectReason::RejectedInPriorRound, &block_info.block) + }; + Some(response) + } + + /// Create a block acceptance response for a block + pub fn create_block_acceptance(&self, block: &NakamotoBlock) -> BlockResponse { + let signature = self + .private_key + .sign(block.header.signer_signature_hash().bits()) + .expect("Failed to sign block"); + BlockResponse::accepted( + block.header.signer_signature_hash(), + signature, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config + .tenure_idle_timeout + .saturating_add(self.proposal_config.tenure_idle_timeout_buffer), + block, + true, + ), + ) + } + + /// The actual switch-on-event processing of an event. + /// This is separated from the Signer trait implementation of process_event + /// so that the "do on every event" functionality can run after every event processing + /// (i.e. even if the event_match does an early return). + fn handle_event_match( + &mut self, + stacks_client: &StacksClient, + sortition_state: &mut Option, + event: &SignerEvent, + current_reward_cycle: u64, + ) { match event { SignerEvent::BlockValidationResponse(block_validate_response) => { debug!("{self}: Received a block proposal result from the stacks node..."); @@ -227,22 +391,25 @@ impl SignerTrait for Signer { sortition_state, ) } - SignerEvent::SignerMessages(_signer_set, messages) => { + SignerEvent::SignerMessages { + received_time, + messages, + .. + } => { debug!( "{self}: Received {} messages from the other signers", messages.len() ); // try and gather signatures - for message in messages { + for (signer_public_key, message) in messages { match message { SignerMessage::BlockResponse(block_response) => self.handle_block_response( stacks_client, block_response, sortition_state, ), - SignerMessage::StateMachineUpdate(_update) => { - // TODO: should make note of this update view point to determine if there is an agreed upon global state - } + SignerMessage::StateMachineUpdate(update) => self + .handle_state_machine_update(signer_public_key, update, received_time), _ => {} } } @@ -352,6 +519,7 @@ impl SignerTrait for Signer { debug!("{self}: received a new block event for a pre-nakamoto block, no processing necessary"); return; }; + self.recently_processed.add_block(*block_id); debug!( "{self}: Received a new block event."; "block_id" => %block_id, @@ -382,60 +550,6 @@ impl SignerTrait for Signer { } } } - if prior_state != self.local_state_machine { - self.local_state_machine - .send_signer_update_message(&mut self.stackerdb); - } - } - - fn has_unprocessed_blocks(&self) -> bool { - self.signer_db - .has_unprocessed_blocks(self.reward_cycle) - .unwrap_or_else(|e| { - error!("{self}: Failed to check for pending blocks: {e:?}",); - // Assume we have pending blocks to prevent premature cleanup - true - }) - } - - fn get_local_state_machine(&self) -> &LocalStateMachine { - &self.local_state_machine - } -} - -impl Signer { - /// Determine this signers response to a proposed block - /// Returns a BlockResponse if we have already validated the block - /// Returns None otherwise - fn determine_response(&mut self, block_info: &BlockInfo) -> Option { - let valid = block_info.valid?; - let response = if valid { - debug!("{self}: Accepting block {}", block_info.block.block_id()); - self.create_block_acceptance(&block_info.block) - } else { - debug!("{self}: Rejecting block {}", block_info.block.block_id()); - self.create_block_rejection(RejectReason::RejectedInPriorRound, &block_info.block) - }; - Some(response) - } - - /// Create a block acceptance response for a block - pub fn create_block_acceptance(&self, block: &NakamotoBlock) -> BlockResponse { - let signature = self - .private_key - .sign(block.header.signer_signature_hash().bits()) - .expect("Failed to sign block"); - BlockResponse::accepted( - block.header.signer_signature_hash(), - signature, - self.signer_db.calculate_tenure_extend_timestamp( - self.proposal_config - .tenure_idle_timeout - .saturating_add(self.proposal_config.tenure_idle_timeout_buffer), - block, - true, - ), - ) } /// Create a block rejection response for a block with the given reject code @@ -459,6 +573,29 @@ impl Signer { ) } + /// Check some heuristics to see if our stacks-node has processed the parent of `block`. + /// Note: this can be wrong in both directions. It may return false for some blocks that + /// have been processed, and it may return true for some blocks that have not been processed. + /// The caller should not depend on this being 100% accurate. + fn maybe_processed_parent(&self, client: &StacksClient, block: &NakamotoBlock) -> bool { + let parent_block_id = &block.header.parent_block_id; + if self.recently_processed.is_processed(parent_block_id) { + return true; + } + let Ok(peer_info) = client.get_peer_info().inspect_err(|e| { + warn!( + "Failed to fetch stacks-node peer info, assuming block not processed yet"; + "error" => ?e + ) + }) else { + return false; + }; + + // if our stacks node has processed block height >= block proposal's parent + // return true + peer_info.stacks_tip_height >= block.header.chain_length.saturating_sub(1) + } + /// Check if block should be rejected based on sortition state /// Will return a BlockResponse::Rejection if the block is invalid, none otherwise. fn check_block_against_sortition_state( @@ -528,22 +665,43 @@ impl Signer { /// The actual `send_block_response` implementation. Declared so that we do /// not need to duplicate in testing. - fn impl_send_block_response(&mut self, block_response: BlockResponse) { - let res = self + fn impl_send_block_response( + &mut self, + block: Option<&NakamotoBlock>, + block_response: BlockResponse, + ) { + info!( + "{self}: Broadcasting a block response to stacks node: {block_response:?}"; + ); + let accepted = matches!(block_response, BlockResponse::Accepted(..)); + match self .stackerdb - .send_message_with_retry::(block_response.clone().into()); - match res { - Err(e) => warn!("{self}: Failed to send block rejection to stacker-db: {e:?}"), - Ok(ack) if !ack.accepted => warn!( - "{self}: Block rejection not accepted by stacker-db: {:?}", - ack.reason - ), - Ok(_) => debug!("{self}: Block rejection accepted by stacker-db"), + .send_message_with_retry::(block_response.into()) + { + Ok(ack) => { + if !ack.accepted { + warn!( + "{self}: Block response not accepted by stacker-db: {:?}", + ack.reason + ); + } + crate::monitoring::actions::increment_block_responses_sent(accepted); + if let Some(block) = block { + crate::monitoring::actions::record_block_response_latency(block); + } + } + Err(e) => { + warn!("{self}: Failed to send block response to stacker-db: {e:?}",); + } } } #[cfg(any(test, feature = "testing"))] - fn send_block_response(&mut self, block_response: BlockResponse) { + fn send_block_response( + &mut self, + block: Option<&NakamotoBlock>, + block_response: BlockResponse, + ) { const NUM_REPEATS: usize = 1; let mut count = 0; let public_keys = TEST_REPEAT_PROPOSAL_RESPONSE.get(); @@ -553,7 +711,7 @@ impl Signer { count = NUM_REPEATS; } while count <= NUM_REPEATS { - self.impl_send_block_response(block_response.clone()); + self.impl_send_block_response(block, block_response.clone()); count += 1; sleep_ms(1000); @@ -561,8 +719,43 @@ impl Signer { } #[cfg(not(any(test, feature = "testing")))] - fn send_block_response(&mut self, block_response: BlockResponse) { - self.impl_send_block_response(block_response) + fn send_block_response( + &mut self, + block: Option<&NakamotoBlock>, + block_response: BlockResponse, + ) { + self.impl_send_block_response(block, block_response) + } + + /// Handle signer state update message + fn handle_state_machine_update( + &mut self, + signer_public_key: &Secp256k1PublicKey, + update: &StateMachineUpdate, + received_time: &SystemTime, + ) { + let address = StacksAddress::p2pkh(self.mainnet, signer_public_key); + // Store the state machine update so we can reload it if we crash + if let Err(e) = self.signer_db.insert_state_machine_update( + self.reward_cycle, + &address, + update, + received_time, + ) { + warn!("{self}: Failed to update global state in signerdb: {e}"); + } + self.global_state_evaluator + .insert_update(address, update.clone()); + + // See if this update means we should capitulate our viewpoint... + let version = self.get_signer_protocol_version(); + self.local_state_machine.capitulate_viewpoint( + &mut self.signer_db, + &mut self.global_state_evaluator, + self.stacks_address, + version, + self.reward_cycle, + ); } /// Handle block proposal messages submitted to signers stackerdb @@ -661,8 +854,7 @@ impl Signer { if let Some(block_response) = block_response { // We know proposal is invalid. Send rejection message, do not do further validation and do not store it. - debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); - self.send_block_response(block_response); + self.send_block_response(Some(&block_info.block), block_response); } else { // Just in case check if the last block validation submission timed out. self.check_submitted_block_proposal(); @@ -678,7 +870,11 @@ impl Signer { #[cfg(any(test, feature = "testing"))] self.test_stall_block_validation_submission(); - self.submit_block_for_validation(stacks_client, &block_proposal.block); + self.submit_block_for_validation( + stacks_client, + &block_proposal.block, + get_epoch_time_secs(), + ); } else { // Still store the block but log we can't submit it for validation. We may receive enough signatures/rejections // from other signers to push the proposed block into a global rejection/acceptance regardless of our participation. @@ -712,19 +908,7 @@ impl Signer { return; }; - // Submit a proposal response to the .signers contract for miners - debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); - - let accepted = matches!(block_response, BlockResponse::Accepted(..)); - if let Err(e) = self - .stackerdb - .send_message_with_retry::(block_response.into()) - { - warn!("{self}: Failed to send block response to stacker-db: {e:?}"); - } else { - crate::monitoring::actions::increment_block_responses_sent(accepted); - crate::monitoring::actions::record_block_response_latency(&block_info.block); - } + self.impl_send_block_response(Some(&block_info.block), block_response); } /// Handle block response messages from a signer @@ -860,21 +1044,7 @@ impl Signer { warn!("{self}: Failed to mark block as locally rejected: {e:?}"); } }; - debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); - let res = self - .stackerdb - .send_message_with_retry::(block_response.into()); - - crate::monitoring::actions::record_block_response_latency(&block_info.block); - - match res { - Err(e) => warn!("{self}: Failed to send block rejection to stacker-db: {e:?}"), - Ok(ack) if !ack.accepted => warn!( - "{self}: Block rejection not accepted by stacker-db: {:?}", - ack.reason - ), - Ok(_) => debug!("{self}: Block rejection accepted by stacker-db"), - } + self.impl_send_block_response(Some(&block_info.block), block_response); self.signer_db .insert_block(&block_info) .unwrap_or_else(|e| self.handle_insert_block_error(e)); @@ -981,51 +1151,47 @@ impl Signer { .unwrap_or_else(|e| warn!("{self}: Failed to remove pending block validation: {e:?}")); if let Some(response) = block_response { - // Submit a proposal response to the .signers contract for miners - info!( - "{self}: Broadcasting a block response to stacks node: {response:?}"; - ); - let accepted = matches!(response, BlockResponse::Accepted(..)); - match self - .stackerdb - .send_message_with_retry::(response.into()) - { - Ok(_) => { - crate::monitoring::actions::increment_block_responses_sent(accepted); - if let Ok(Some(block_info)) = self - .signer_db - .block_lookup(&block_validate_response.signer_signature_hash()) - { - crate::monitoring::actions::record_block_response_latency( - &block_info.block, - ); - } - } - Err(e) => { - warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); - } - } + let block = self + .signer_db + .block_lookup(&signer_sig_hash) + .unwrap_or_default() + .map(|info| info.block); + self.impl_send_block_response(block.as_ref(), response); }; // Check if there is a pending block validation that we need to submit to the node - match self.signer_db.get_and_remove_pending_block_validation() { - Ok(Some(signer_sig_hash)) => { - info!("{self}: Found a pending block validation: {signer_sig_hash:?}"); - match self.signer_db.block_lookup(&signer_sig_hash) { - Ok(Some(block_info)) => { - self.submit_block_for_validation(stacks_client, &block_info.block); - } - Ok(None) => { - // This should never happen - error!( - "{self}: Pending block validation not found in DB: {signer_sig_hash:?}" - ); - } - Err(e) => error!("{self}: Failed to get block info: {e:?}"), + self.check_pending_block_validations(stacks_client); + } + + /// Check if we can submit a block validation, and do so if we have pending block proposals + fn check_pending_block_validations(&mut self, stacks_client: &StacksClient) { + // if we're already waiting on a submitted block proposal, we cannot submit yet. + if self.submitted_block_proposal.is_some() { + return; + } + + let (signer_sig_hash, insert_ts) = + match self.signer_db.get_and_remove_pending_block_validation() { + Ok(Some(x)) => x, + Ok(None) => { + return; + } + Err(e) => { + warn!("{self}: Failed to get pending block validation: {e:?}"); + return; } + }; + + info!("{self}: Found a pending block validation: {signer_sig_hash:?}"); + match self.signer_db.block_lookup(&signer_sig_hash) { + Ok(Some(block_info)) => { + self.submit_block_for_validation(stacks_client, &block_info.block, insert_ts); + } + Ok(None) => { + // This should never happen + error!("{self}: Pending block validation not found in DB: {signer_sig_hash:?}"); } - Ok(None) => {} - Err(e) => warn!("{self}: Failed to get pending block validation: {e:?}"), + Err(e) => error!("{self}: Failed to get block info: {e:?}"), } } @@ -1083,21 +1249,7 @@ impl Signer { warn!("{self}: Failed to mark block as locally rejected: {e:?}"); } }; - debug!("{self}: Broadcasting a block response to stacks node: {rejection:?}"); - let res = self - .stackerdb - .send_message_with_retry::(rejection.into()); - - crate::monitoring::actions::record_block_response_latency(&block_info.block); - - match res { - Err(e) => warn!("{self}: Failed to send block rejection to stacker-db: {e:?}"), - Ok(ack) if !ack.accepted => warn!( - "{self}: Block rejection not accepted by stacker-db: {:?}", - ack.reason - ), - Ok(_) => debug!("{self}: Block rejection accepted by stacker-db"), - } + self.impl_send_block_response(Some(&block_info.block), rejection); self.signer_db .insert_block(&block_info) @@ -1405,8 +1557,31 @@ impl Signer { /// Submit a block for validation, and mark it as pending if the node /// is busy with a previous request. - fn submit_block_for_validation(&mut self, stacks_client: &StacksClient, block: &NakamotoBlock) { + fn submit_block_for_validation( + &mut self, + stacks_client: &StacksClient, + block: &NakamotoBlock, + added_epoch_time: u64, + ) { let signer_signature_hash = block.header.signer_signature_hash(); + if !self.maybe_processed_parent(stacks_client, block) { + let time_elapsed = get_epoch_time_secs().saturating_sub(added_epoch_time); + if Duration::from_secs(time_elapsed) + < self.proposal_config.proposal_wait_for_parent_time + { + info!("{self}: Have not processed parent of block proposal yet, inserting pending block validation and will try again later"; + "signer_signature_hash" => %signer_signature_hash, + ); + self.signer_db + .insert_pending_block_validation(&signer_signature_hash, added_epoch_time) + .unwrap_or_else(|e| { + warn!("{self}: Failed to insert pending block validation: {e:?}") + }); + return; + } else { + debug!("{self}: Cannot confirm that we have processed parent, but we've waited proposal_wait_for_parent_time, will submit proposal"); + } + } match stacks_client.submit_block_for_validation(block.clone()) { Ok(_) => { self.submitted_block_proposal = Some((signer_signature_hash, Instant::now())); @@ -1417,10 +1592,7 @@ impl Signer { "signer_signature_hash" => %signer_signature_hash, ); self.signer_db - .insert_pending_block_validation( - &signer_signature_hash, - get_epoch_time_secs(), - ) + .insert_pending_block_validation(&signer_signature_hash, added_epoch_time) .unwrap_or_else(|e| { warn!("{self}: Failed to insert pending block validation: {e:?}") }); @@ -1472,6 +1644,16 @@ impl Signer { None } } + + #[cfg(not(any(test, feature = "testing")))] + fn get_signer_protocol_version(&self) -> u64 { + SUPPORTED_SIGNER_PROTOCOL_VERSION + } + + #[cfg(any(test, feature = "testing"))] + fn get_signer_protocol_version(&self) -> u64 { + self.test_get_signer_protocol_version() + } } /// Determine if a block should be re-evaluated based on its rejection reason˝ diff --git a/stacks-signer/src/v0/signer_state.rs b/stacks-signer/src/v0/signer_state.rs index 4dcf3abcad..c14cb775d6 100644 --- a/stacks-signer/src/v0/signer_state.rs +++ b/stacks-signer/src/v0/signer_state.rs @@ -13,10 +13,13 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::HashMap; use std::time::{Duration, UNIX_EPOCH}; use blockstack_lib::chainstate::burn::ConsensusHashExtensions; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; +use blockstack_lib::chainstate::stacks::StacksTransaction; +use clarity::types::chainstate::StacksAddress; use libsigner::v0::messages::{ MessageSlotID, SignerMessage, StateMachineUpdate as StateMachineUpdateMessage, StateMachineUpdateContent, StateMachineUpdateMinerState, @@ -27,7 +30,7 @@ use stacks_common::codec::Error as CodecError; use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId, TrieHash}; use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; -use stacks_common::{info, warn}; +use stacks_common::{debug, info, warn}; use crate::chainstate::{ ProposalEvalConfig, SignerChainstateError, SortitionState, SortitionsView, @@ -36,12 +39,240 @@ use crate::client::{ClientError, CurrentAndLastSortition, StackerDB, StacksClien use crate::signerdb::SignerDb; /// This is the latest supported protocol version for this signer binary -pub static SUPPORTED_SIGNER_PROTOCOL_VERSION: u64 = 0; +pub static SUPPORTED_SIGNER_PROTOCOL_VERSION: u64 = 1; + +/// A struct used to determine the current global state +#[derive(Debug)] +pub struct GlobalStateEvaluator { + /// A mapping of signer addresses to their corresponding vote weight + pub address_weights: HashMap, + /// A mapping of signer addresses to their corresponding updates + pub address_updates: HashMap, + /// The total weight of all signers + pub total_weight: u32, +} + +impl GlobalStateEvaluator { + /// Create a new state evaluator + pub fn new( + address_updates: HashMap, + address_weights: HashMap, + ) -> Self { + let total_weight = address_weights + .values() + .fold(0u32, |acc, val| acc.saturating_add(*val)); + Self { + address_weights, + address_updates, + total_weight, + } + } + + /// Determine what the maximum signer protocol version that a majority of signers can support + pub fn determine_latest_supported_signer_protocol_version( + &mut self, + local_address: StacksAddress, + local_update: &StateMachineUpdateMessage, + ) -> Option { + self.insert_update(local_address, local_update.clone()); + let mut protocol_versions = HashMap::new(); + for (address, update) in &self.address_updates { + let Some(weight) = self.address_weights.get(address) else { + continue; + }; + let entry = protocol_versions + .entry(update.local_supported_signer_protocol_version) + .or_insert_with(|| 0); + *entry += weight; + } + // find the highest version number supported by a threshold number of signers + let mut protocol_versions: Vec<_> = protocol_versions.into_iter().collect(); + protocol_versions.sort_by_key(|(version, _)| *version); + let mut total_weight_support = 0; + for (version, weight_support) in protocol_versions.into_iter().rev() { + total_weight_support += weight_support; + if total_weight_support >= self.total_weight * 7 / 10 { + return Some(version); + } + } + None + } + + /// Determine what the global burn view is if there is one + pub fn determine_global_burn_view( + &mut self, + local_address: StacksAddress, + local_update: &StateMachineUpdateMessage, + ) -> Option<(ConsensusHash, u64)> { + self.insert_update(local_address, local_update.clone()); + let mut burn_blocks = HashMap::new(); + for (address, update) in &self.address_updates { + let Some(weight) = self.address_weights.get(address) else { + continue; + }; + let (burn_block, burn_block_height) = match update.content { + StateMachineUpdateContent::V0 { + burn_block, + burn_block_height, + .. + } + | StateMachineUpdateContent::V1 { + burn_block, + burn_block_height, + .. + } => (burn_block, burn_block_height), + }; + + let entry = burn_blocks + .entry((burn_block, burn_block_height)) + .or_insert_with(|| 0); + *entry += weight; + if self.reached_agreement(*entry) { + return Some((burn_block, burn_block_height)); + } + } + None + } + + /// Check if there is an agreed upon global state + pub fn determine_global_state( + &mut self, + local_address: StacksAddress, + local_update: &StateMachineUpdateMessage, + ) -> Option { + let active_signer_protocol_version = + self.determine_latest_supported_signer_protocol_version(local_address, local_update)?; + let mut state_views = HashMap::new(); + for (address, update) in &self.address_updates { + let Some(weight) = self.address_weights.get(address) else { + continue; + }; + let (burn_block, burn_block_height, current_miner, tx_replay_set) = + match &update.content { + StateMachineUpdateContent::V0 { + burn_block, + burn_block_height, + current_miner, + .. + } => (burn_block, burn_block_height, current_miner, None), + StateMachineUpdateContent::V1 { + burn_block, + burn_block_height, + current_miner, + replay_transactions, + } => ( + burn_block, + burn_block_height, + current_miner, + Some(replay_transactions.clone()), + ), + }; + let state_machine = SignerStateMachine { + burn_block: *burn_block, + burn_block_height: *burn_block_height, + current_miner: current_miner.into(), + active_signer_protocol_version, + tx_replay_set, + }; + let entry = state_views + .entry(state_machine.clone()) + .or_insert_with(|| 0); + *entry += weight; + if self.reached_agreement(*entry) { + return Some(state_machine); + } + } + None + } + + /// Determines whether a signer with the `local_address` and `local_update` should capitulate + /// its current miner view to a new state. This is not necessarily the same as the current global + /// view of the miner as it is up to signers to capitulate before this becomes the finalized view. + pub fn capitulate_miner_view( + &mut self, + signerdb: &mut SignerDb, + local_address: StacksAddress, + local_update: &StateMachineUpdateMessage, + ) -> Option { + let current_burn_block = match local_update.content { + StateMachineUpdateContent::V0 { burn_block, .. } + | StateMachineUpdateContent::V1 { burn_block, .. } => burn_block, + }; + let (global_burn_view, _) = self.determine_global_burn_view(local_address, local_update)?; + if current_burn_block != global_burn_view { + crate::monitoring::actions::increment_signer_agreement_state_conflict( + crate::monitoring::SignerAgreementStateConflict::BurnBlockDelay, + ); + return None; + } + let mut current_miners = HashMap::new(); + for (address, update) in &self.address_updates { + let Some(weight) = self.address_weights.get(address) else { + continue; + }; + let (burn_block, current_miner) = match &update.content { + StateMachineUpdateContent::V0 { + burn_block, + current_miner, + .. + } + | StateMachineUpdateContent::V1 { + burn_block, + current_miner, + .. + } => (burn_block, current_miner), + }; + + if *burn_block != global_burn_view { + continue; + } + + let StateMachineUpdateMinerState::ActiveMiner { tenure_id, .. } = current_miner else { + continue; + }; + + let entry = current_miners.entry(current_miner).or_insert_with(|| 0); + *entry += weight; + + if *entry >= self.total_weight * 3 / 10 { + let nmb_blocks = signerdb + .get_globally_accepted_block_count_in_tenure(tenure_id) + .unwrap_or(0); + if nmb_blocks > 0 || self.reached_agreement(*entry) { + return Some(current_miner.clone()); + } + } + } + crate::monitoring::actions::increment_signer_agreement_state_conflict( + crate::monitoring::SignerAgreementStateConflict::MinerView, + ); + None + } + + /// Will insert the update for the given address and weight only if the GlobalStateMachineEvaluator already is aware of this address + pub fn insert_update( + &mut self, + address: StacksAddress, + update: StateMachineUpdateMessage, + ) -> bool { + if !self.address_weights.contains_key(&address) { + return false; + } + self.address_updates.insert(address, update); + true + } + + /// Check if the supplied vote weight crosses the global agreement threshold. + /// Returns true if it has, false otherwise. + fn reached_agreement(&self, vote_weight: u32) -> bool { + vote_weight >= self.total_weight * 7 / 10 + } +} /// A signer state machine view. This struct can /// be used to encode the local signer's view or /// the global view. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Eq, Hash)] pub struct SignerStateMachine { /// The tip burn block (i.e., the latest bitcoin block) seen by this signer pub burn_block: ConsensusHash, @@ -51,9 +282,11 @@ pub struct SignerStateMachine { pub current_miner: MinerState, /// The active signing protocol version pub active_signer_protocol_version: u64, + /// Transaction replay set + pub tx_replay_set: Option>, } -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Eq, Hash)] /// Enum for capturing the signer state machine's view of who /// should be the active miner and what their tenure should be /// built on top of. @@ -77,6 +310,27 @@ pub enum MinerState { NoValidMiner, } +impl From<&StateMachineUpdateMinerState> for MinerState { + fn from(val: &StateMachineUpdateMinerState) -> Self { + match *val { + StateMachineUpdateMinerState::NoValidMiner => MinerState::NoValidMiner, + StateMachineUpdateMinerState::ActiveMiner { + current_miner_pkh, + tenure_id, + parent_tenure_id, + parent_tenure_last_block, + parent_tenure_last_block_height, + } => MinerState::ActiveMiner { + current_miner_pkh, + tenure_id, + parent_tenure_id, + parent_tenure_last_block, + parent_tenure_last_block_height, + }, + } + } +} + /// The local signer state machine #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub enum LocalStateMachine { @@ -100,10 +354,25 @@ pub enum StateMachineUpdate { BurnBlock(u64), } -impl TryInto for &LocalStateMachine { - type Error = CodecError; +impl LocalStateMachine { + /// Initialize a local state machine by querying the local stacks-node + /// and signerdb for the current sortition information + pub fn new( + db: &SignerDb, + client: &StacksClient, + proposal_config: &ProposalEvalConfig, + ) -> Result { + let mut instance = Self::Uninitialized; + instance.bitcoin_block_arrival(db, client, proposal_config, None)?; + + Ok(instance) + } - fn try_into(self) -> Result { + /// Convert the local state machine into update message with the specificed supported protocol version + pub fn try_into_update_message_with_version( + &self, + local_supported_signer_protocol_version: u64, + ) -> Result { let LocalStateMachine::Initialized(state_machine) = self else { return Err(CodecError::SerializeError( "Local state machine is not ready to be serialized into an update message".into(), @@ -127,31 +396,30 @@ impl TryInto for &LocalStateMachine { MinerState::NoValidMiner => StateMachineUpdateMinerState::NoValidMiner, }; - StateMachineUpdateMessage::new( - state_machine.active_signer_protocol_version, - SUPPORTED_SIGNER_PROTOCOL_VERSION, - StateMachineUpdateContent::V0 { + let content = match state_machine.active_signer_protocol_version { + 0 => StateMachineUpdateContent::V0 { burn_block: state_machine.burn_block, burn_block_height: state_machine.burn_block_height, current_miner, }, + 1 => StateMachineUpdateContent::V1 { + burn_block: state_machine.burn_block, + burn_block_height: state_machine.burn_block_height, + current_miner, + replay_transactions: state_machine.tx_replay_set.clone().unwrap_or_default(), + }, + other => { + return Err(CodecError::DeserializeError(format!( + "Active signer ptocol version is unknown: {other}" + ))) + } + }; + StateMachineUpdateMessage::new( + state_machine.active_signer_protocol_version, + local_supported_signer_protocol_version, + content, ) } -} - -impl LocalStateMachine { - /// Initialize a local state machine by querying the local stacks-node - /// and signerdb for the current sortition information - pub fn new( - db: &SignerDb, - client: &StacksClient, - proposal_config: &ProposalEvalConfig, - ) -> Result { - let mut instance = Self::Uninitialized; - instance.bitcoin_block_arrival(db, client, proposal_config, None)?; - - Ok(instance) - } fn place_holder() -> SignerStateMachine { SignerStateMachine { @@ -159,14 +427,21 @@ impl LocalStateMachine { burn_block_height: 0, current_miner: MinerState::NoValidMiner, active_signer_protocol_version: SUPPORTED_SIGNER_PROTOCOL_VERSION, + tx_replay_set: None, } } /// Send the local state machine as a signer update message to stackerdb - pub fn send_signer_update_message(&self, stackerdb: &mut StackerDB) { - let update: Result = self.try_into(); + pub fn send_signer_update_message( + &self, + stackerdb: &mut StackerDB, + version: u64, + ) { + let update: Result = + self.try_into_update_message_with_version(version); match update { Ok(update) => { + debug!("Sending signer update message to stackerdb: {update:?}"); if let Err(e) = stackerdb.send_message_with_retry::(update.into()) { warn!("Failed to send signer update to stacker-db: {e:?}",); } @@ -534,6 +809,7 @@ impl LocalStateMachine { burn_block_height: next_burn_block_height, current_miner: miner_state, active_signer_protocol_version: prior_state_machine.active_signer_protocol_version, + tx_replay_set: prior_state_machine.tx_replay_set, }); if prior_state != *self { @@ -544,4 +820,165 @@ impl LocalStateMachine { Ok(()) } + + /// Updates the local state machine's viewpoint as necessary based on the global state + pub fn capitulate_viewpoint( + &mut self, + signerdb: &mut SignerDb, + eval: &mut GlobalStateEvaluator, + local_address: StacksAddress, + local_supported_signer_protocol_version: u64, + reward_cycle: u64, + ) { + // Before we ever access eval...we should make sure to include our own local state machine update message in the evaluation + let Ok(mut local_update) = + self.try_into_update_message_with_version(local_supported_signer_protocol_version) + else { + return; + }; + + let old_protocol_version = local_update.active_signer_protocol_version; + // First check if we should update our active protocol version + let active_signer_protocol_version = eval + .determine_latest_supported_signer_protocol_version(local_address, &local_update) + .unwrap_or(old_protocol_version); + + let (burn_block, burn_block_height, current_miner, tx_replay_set) = + match &local_update.content { + StateMachineUpdateContent::V0 { + burn_block, + burn_block_height, + current_miner, + .. + } => (burn_block, burn_block_height, current_miner, None), + StateMachineUpdateContent::V1 { + burn_block, + burn_block_height, + current_miner, + replay_transactions, + } => ( + burn_block, + burn_block_height, + current_miner, + Some(replay_transactions), + ), + }; + + if active_signer_protocol_version != old_protocol_version { + info!("Updating active signer protocol version from {old_protocol_version} to {active_signer_protocol_version}"); + crate::monitoring::actions::increment_signer_agreement_state_change_reason( + crate::monitoring::SignerAgreementStateChangeReason::ProtocolUpgrade, + ); + *self = Self::Initialized(SignerStateMachine { + burn_block: *burn_block, + burn_block_height: *burn_block_height, + current_miner: current_miner.into(), + active_signer_protocol_version, + tx_replay_set: tx_replay_set.cloned(), + }); + // Because we updated our active signer protocol version, update local_update so its included in the subsequent evaluations + let Ok(update) = + self.try_into_update_message_with_version(local_supported_signer_protocol_version) + else { + return; + }; + local_update = update; + } + + // Check if we should also capitulate our miner viewpoint + let Some(new_miner) = eval.capitulate_miner_view(signerdb, local_address, &local_update) + else { + return; + }; + + let (burn_block, burn_block_height, current_miner, tx_replay_set) = + match local_update.content { + StateMachineUpdateContent::V0 { + burn_block, + burn_block_height, + current_miner, + .. + } => (burn_block, burn_block_height, current_miner, None), + StateMachineUpdateContent::V1 { + burn_block, + burn_block_height, + current_miner, + replay_transactions, + } => ( + burn_block, + burn_block_height, + current_miner, + Some(replay_transactions), + ), + }; + + if current_miner != new_miner { + info!("Capitulating local state machine's current miner viewpoint"; + "current_miner" => ?current_miner, + "new_miner" => ?new_miner, + ); + crate::monitoring::actions::increment_signer_agreement_state_change_reason( + crate::monitoring::SignerAgreementStateChangeReason::MinerViewUpdate, + ); + Self::monitor_miner_parent_tenure_update(¤t_miner, &new_miner); + Self::monitor_capitulation_latency(signerdb, reward_cycle); + + *self = Self::Initialized(SignerStateMachine { + burn_block, + burn_block_height, + current_miner: (&new_miner).into(), + active_signer_protocol_version, + tx_replay_set, + }); + } + } + + #[allow(unused_variables)] + fn monitor_miner_parent_tenure_update( + current_miner: &StateMachineUpdateMinerState, + new_miner: &StateMachineUpdateMinerState, + ) { + #[cfg(feature = "monitoring_prom")] + if let ( + StateMachineUpdateMinerState::ActiveMiner { + parent_tenure_id: current_parent_tenure, + .. + }, + StateMachineUpdateMinerState::ActiveMiner { + parent_tenure_id: new_parent_tenure, + .. + }, + ) = (¤t_miner, &new_miner) + { + if current_parent_tenure != new_parent_tenure { + crate::monitoring::actions::increment_signer_agreement_state_change_reason( + crate::monitoring::SignerAgreementStateChangeReason::MinerParentTenureUpdate, + ); + } + } + } + + #[allow(unused_variables)] + fn monitor_capitulation_latency(signer_db: &SignerDb, reward_cycle: u64) { + #[cfg(feature = "monitoring_prom")] + { + let latency_result = signer_db.get_signer_state_machine_updates_latency(reward_cycle); + match latency_result { + Ok(seconds) => { + crate::monitoring::actions::record_signer_agreement_capitulation_latency( + seconds, + ) + } + Err(e) => warn!("Failed to retrieve state updates latency in signerdb: {e}"), + } + } + } + + /// Extract out the tx replay set if it exists + pub fn get_tx_replay_set(&self) -> Option> { + let Self::Initialized(state) = self else { + return None; + }; + state.tx_replay_set.clone() + } } diff --git a/stacks-signer/src/v0/tests.rs b/stacks-signer/src/v0/tests.rs index 6fb7ffa9fe..9580468c9a 100644 --- a/stacks-signer/src/v0/tests.rs +++ b/stacks-signer/src/v0/tests.rs @@ -13,6 +13,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::HashMap; use std::sync::LazyLock; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; @@ -24,8 +25,14 @@ use stacks_common::util::tests::TestFlag; use stacks_common::{info, warn}; use super::signer::Signer; +use super::signer_state::SUPPORTED_SIGNER_PROTOCOL_VERSION; use crate::signerdb::BlockInfo; +/// A global variable that can be used to pin a signer's highest supported protocol version if the signer's public key is in the provided list +pub static TEST_PIN_SUPPORTED_SIGNER_PROTOCOL_VERSION: LazyLock< + TestFlag>, +> = LazyLock::new(TestFlag::default); + /// A global variable that can be used to reject all block proposals if the signer's public key is in the provided list pub static TEST_REJECT_ALL_BLOCK_PROPOSAL: LazyLock>> = LazyLock::new(TestFlag::default); @@ -151,4 +158,16 @@ impl Signer { warn!("{self}: Block validation submission is no longer stalled due to testing directive. Continuing..."); } } + + /// Get the pinned signer version for the signer + pub fn test_get_signer_protocol_version(&self) -> u64 { + let public_keys = TEST_PIN_SUPPORTED_SIGNER_PROTOCOL_VERSION.get(); + if let Some(version) = public_keys.get( + &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), + ) { + warn!("{self}: signer version is pinned to {version}"); + return *version; + } + SUPPORTED_SIGNER_PROTOCOL_VERSION + } } diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index c5ffc04772..035048304c 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -14,13 +14,16 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::cell::RefCell; use std::cmp::Ordering; use std::collections::{HashMap, HashSet}; use std::io::{ErrorKind, Write}; use std::ops::{Deref, DerefMut}; use std::str::FromStr; +use std::sync::{Arc, LazyLock, Mutex, MutexGuard}; use std::{cmp, fmt, fs}; +use clarity::util::lru_cache::LruCache; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::representations::{ClarityName, ContractName}; @@ -94,6 +97,10 @@ pub const REWARD_WINDOW_END: u64 = 144 * 90 + REWARD_WINDOW_START; pub type BlockHeaderCache = HashMap, ConsensusHash)>; +const DESCENDANCY_CACHE_SIZE: usize = 2000; +static DESCENDANCY_CACHE: LazyLock>>> = + LazyLock::new(|| Arc::new(Mutex::new(LruCache::new(DESCENDANCY_CACHE_SIZE)))); + pub enum FindIter { Found(R), Continue, @@ -1084,6 +1091,38 @@ pub trait SortitionHandle { Ok(Some(StacksBlockId::new(&ch, &bhh))) } + /// Check if the descendancy cache has an entry for whether or not the winning block in `key.0` + /// descends from `key.1` + /// + /// If it does, return the cached entry + fn descendancy_cache_get( + cache: &mut MutexGuard<'_, LruCache<(SortitionId, BlockHeaderHash), bool>>, + key: &(SortitionId, BlockHeaderHash), + ) -> Option { + match cache.get(key) { + Ok(result) => result, + // cache is broken, create a new one + Err(e) => { + error!("SortitionDB's descendant cache errored. Will continue operation with cleared cache"; "err" => %e); + **cache = LruCache::new(DESCENDANCY_CACHE_SIZE); + None + } + } + } + + /// Cache the result of the descendancy check on whether or not the winning block in `key.0` + /// descends from `key.1` + fn descendancy_cache_put( + cache: &mut MutexGuard<'_, LruCache<(SortitionId, BlockHeaderHash), bool>>, + key: (SortitionId, BlockHeaderHash), + is_descended: bool, + ) { + if let Err(e) = cache.insert_clean(key, is_descended) { + error!("SortitionDB's descendant cache errored. Will continue operation with cleared cache"; "err" => %e); + **cache = LruCache::new(DESCENDANCY_CACHE_SIZE); + } + } + /// is the given block a descendant of `potential_ancestor`? /// * block_at_burn_height: the burn height of the sortition that chose the stacks block to check /// * potential_ancestor: the stacks block hash of the potential ancestor @@ -1111,12 +1150,43 @@ pub trait SortitionHandle { test_debug!("No snapshot at height {}", block_at_burn_height); db_error::NotFoundError })?; + let top_sortition_id = sn.sortition_id; + + let mut cache = DESCENDANCY_CACHE + .lock() + .expect("FATAL: lock poisoned in SortitionDB"); while sn.block_height >= earliest_block_height { + let cache_check_key = (sn.sortition_id, potential_ancestor.clone()); + match Self::descendancy_cache_get(&mut cache, &cache_check_key) { + Some(result) => { + if sn.sortition_id != top_sortition_id { + Self::descendancy_cache_put( + &mut cache, + (top_sortition_id, cache_check_key.1), + result, + ); + } + return Ok(result); + } + // not cached, don't need to do anything. + None => {} + } + if !sn.sortition { + Self::descendancy_cache_put( + &mut cache, + (top_sortition_id, cache_check_key.1), + false, + ); return Ok(false); } if &sn.winning_stacks_block_hash == potential_ancestor { + Self::descendancy_cache_put( + &mut cache, + (top_sortition_id, cache_check_key.1), + true, + ); return Ok(true); } @@ -1152,6 +1222,11 @@ pub trait SortitionHandle { } } } + Self::descendancy_cache_put( + &mut cache, + (top_sortition_id, potential_ancestor.clone()), + false, + ); return Ok(false); } } @@ -1188,13 +1263,25 @@ impl<'a> SortitionHandleTx<'a> { burn_header_hash: &BurnchainHeaderHash, chain_tip: &SortitionId, ) -> Result, db_error> { + let Some(sortition_id) = self.get_sortition_id_for_bhh(burn_header_hash, chain_tip)? else { + return Ok(None); + }; + + SortitionDB::get_block_snapshot(self.tx(), &sortition_id) + } + + fn get_sortition_id_for_bhh( + &mut self, + burn_header_hash: &BurnchainHeaderHash, + chain_tip: &SortitionId, + ) -> Result, db_error> { let sortition_identifier_key = db_keys::sortition_id_for_bhh(burn_header_hash); let sortition_id = match self.get_indexed(chain_tip, &sortition_identifier_key)? { None => return Ok(None), Some(x) => SortitionId::from_hex(&x).expect("FATAL: bad Sortition ID stored in DB"), }; - SortitionDB::get_block_snapshot(self.tx(), &sortition_id) + Ok(Some(sortition_id)) } /// Get a leader key at a specific location in the burn chain's fork history, given the @@ -2027,15 +2114,15 @@ impl<'a> SortitionHandleConn<'a> { connection: &'a SortitionDBConn<'a>, chain_tip: &SortitionId, ) -> Result, db_error> { - Ok(SortitionHandleConn { - context: SortitionHandleContext { + Ok(SortitionHandleConn::new( + &connection.index, + SortitionHandleContext { chain_tip: chain_tip.clone(), first_block_height: connection.context.first_block_height, pox_constants: connection.context.pox_constants.clone(), dryrun: connection.context.dryrun, }, - index: connection.index, - }) + )) } fn get_tip_indexed(&self, key: &str) -> Result, db_error> { @@ -3722,15 +3809,15 @@ impl SortitionDBTx<'_> { impl SortitionDBConn<'_> { pub fn as_handle<'b>(&'b self, chain_tip: &SortitionId) -> SortitionHandleConn<'b> { - SortitionHandleConn { - index: self.index, - context: SortitionHandleContext { + SortitionHandleConn::new( + &self.index, + SortitionHandleContext { first_block_height: self.context.first_block_height.clone(), chain_tip: chain_tip.clone(), pox_constants: self.context.pox_constants.clone(), dryrun: self.context.dryrun, }, - } + ) } /// Given a burnchain consensus hash, @@ -6452,25 +6539,25 @@ impl SortitionHandleTx<'_> { } // must be an ancestor of this tip, or must be this tip - if let Some(sn) = - self.get_block_snapshot(&arrival_sn.burn_header_hash, &parent_tip.sortition_id)? + if let Some(sortition_id) = self + .get_sortition_id_for_bhh(&arrival_sn.burn_header_hash, &parent_tip.sortition_id)? { - if !sn.pox_valid || sn != arrival_sn { + if sortition_id != arrival_sn.sortition_id { continue; } debug!( "New Stacks anchored block arrived: block {}/{} ({}) ari={} tip={}", - &sn.consensus_hash, - &sn.winning_stacks_block_hash, - sn.stacks_block_height, + &arrival_sn.consensus_hash, + &arrival_sn.winning_stacks_block_hash, + arrival_sn.stacks_block_height, ari, &parent_tip.burn_header_hash ); new_block_arrivals.push(( - sn.consensus_hash, - sn.winning_stacks_block_hash, - sn.stacks_block_height, + arrival_sn.consensus_hash, + arrival_sn.winning_stacks_block_hash, + arrival_sn.stacks_block_height, )); } else { // this block did not arrive on an ancestor block diff --git a/stackslib/src/chainstate/burn/operations/mod.rs b/stackslib/src/chainstate/burn/operations/mod.rs index 3d032d4c8a..0eb0ab62b8 100644 --- a/stackslib/src/chainstate/burn/operations/mod.rs +++ b/stackslib/src/chainstate/burn/operations/mod.rs @@ -17,7 +17,8 @@ use std::{error, fmt, fs, io}; use clarity::vm::types::PrincipalData; -use serde::Deserialize; +use serde::de::Error as DeError; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_json::json; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, TrieHash, VRFSeed, @@ -374,6 +375,132 @@ pub fn stacks_addr_serialize(addr: &StacksAddress) -> serde_json::Value { }) } +fn normalize_stacks_addr_fields<'de, D>( + inner: &mut serde_json::Map, +) -> Result<(), D::Error> +where + D: Deserializer<'de>, +{ + // Rename `address_version` to `version` + if let Some(address_version) = inner.remove("address_version") { + inner.insert("version".to_string(), address_version); + } + + // Rename `address_hash_bytes` to `bytes` and convert to bytes + if let Some(address_bytes) = inner + .remove("address_hash_bytes") + .and_then(|addr| serde_json::Value::as_str(&addr).map(|x| x.to_string())) + { + let address_hex: String = address_bytes.chars().skip(2).collect(); // Remove "0x" prefix + inner.insert( + "bytes".to_string(), + serde_json::to_value(&address_hex).map_err(DeError::custom)?, + ); + } + + Ok(()) +} + +/// Serialization function for serializing extended information within the BlockstackOperationType +/// that is not printed via the standard serde implementation. Specifically, serializes additional +/// StacksAddress information. +pub fn blockstack_op_extended_serialize_opt( + op: &Option, + s: S, +) -> Result { + match op { + Some(op) => { + let value = op.blockstack_op_to_json(); + value.serialize(s) + } + None => s.serialize_none(), + } +} + +/// Deserialize the burnchain op that was serialized with blockstack_op_to_json +pub fn blockstack_op_extended_deserialize<'de, D>( + deserializer: D, +) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + use serde::de::Error as DeError; + use serde_json::{Map, Value}; + + let raw: Option = Option::deserialize(deserializer)?; + let Some(Value::Object(mut obj)) = raw else { + return Ok(None); + }; + + let Some((key, value)) = obj.iter_mut().next() else { + return Ok(None); + }; + + let inner = value + .as_object_mut() + .ok_or_else(|| DeError::custom("Expected blockstack op to be an object"))?; + + let normalized_key = match key.as_str() { + "pre_stx" => { + BlockstackOperationType::normalize_pre_stx_fields::(inner)?; + "PreStx" + } + "stack_stx" => { + BlockstackOperationType::normalize_stack_stx_fields::(inner)?; + "StackStx" + } + "transfer_stx" => { + BlockstackOperationType::normalize_transfer_stx_fields::(inner)?; + "TransferStx" + } + "delegate_stx" => { + BlockstackOperationType::normalize_delegate_stx_fields::(inner)?; + "DelegateStx" + } + "vote_for_aggregate_key" => { + BlockstackOperationType::normalize_vote_for_aggregate_key_fields::(inner)?; + "VoteForAggregateKey" + } + "leader_key_register" => "LeaderKeyRegister", + "leader_block_commit" => "LeaderBlockCommit", + other => other, + }; + + let mut map = Map::new(); + map.insert(normalized_key.to_string(), value.clone()); + + let normalized = Value::Object(map); + + serde_json::from_value(normalized) + .map(Some) + .map_err(serde::de::Error::custom) +} + +fn normalize_common_fields<'de, D>( + map: &mut serde_json::Map, +) -> Result<(), D::Error> +where + D: Deserializer<'de>, +{ + if let Some(hex_str) = map + .get("burn_header_hash") + .and_then(serde_json::Value::as_str) + { + let cleaned = hex_str.strip_prefix("0x").unwrap_or(hex_str); + let val = BurnchainHeaderHash::from_hex(cleaned).map_err(DeError::custom)?; + let ser_val = serde_json::to_value(val).map_err(DeError::custom)?; + map.insert("burn_header_hash".to_string(), ser_val); + } + + if let Some(val) = map.remove("burn_txid") { + map.insert("txid".to_string(), val); + } + if let Some(val) = map.remove("burn_block_height") { + map.insert("block_height".to_string(), val); + } + Ok(()) +} + impl BlockstackOperationType { pub fn opcode(&self) -> Opcodes { match *self { @@ -475,6 +602,114 @@ impl BlockstackOperationType { }; } + // Replace all the normalize_* functions with minimal implementations + fn normalize_pre_stx_fields<'de, D>( + map: &mut serde_json::Map, + ) -> Result<(), D::Error> + where + D: Deserializer<'de>, + { + normalize_common_fields::(map)?; + if let Some(serde_json::Value::Object(obj)) = map.get_mut("output") { + normalize_stacks_addr_fields::(obj)?; + } + Ok(()) + } + + fn normalize_stack_stx_fields<'de, D>( + map: &mut serde_json::Map, + ) -> Result<(), D::Error> + where + D: Deserializer<'de>, + { + normalize_common_fields::(map)?; + if let Some(serde_json::Value::Object(obj)) = map.get_mut("sender") { + normalize_stacks_addr_fields::(obj)?; + } + if let Some(reward_val) = map.get("reward_addr") { + let b58_str = reward_val + .as_str() + .ok_or_else(|| DeError::custom("Expected base58 string in reward_addr"))?; + let addr = PoxAddress::from_b58(b58_str) + .ok_or_else(|| DeError::custom("Invalid stacks address"))?; + let val = serde_json::to_value(addr).map_err(DeError::custom)?; + map.insert("reward_addr".into(), val); + } + Ok(()) + } + + fn normalize_transfer_stx_fields<'de, D>( + map: &mut serde_json::Map, + ) -> Result<(), D::Error> + where + D: Deserializer<'de>, + { + normalize_common_fields::(map)?; + for field in ["recipient", "sender"] { + if let Some(serde_json::Value::Object(obj)) = map.get_mut(field) { + normalize_stacks_addr_fields::(obj)?; + } + } + if let Some(memo_str) = map.get("memo").and_then(serde_json::Value::as_str) { + let memo_hex = memo_str.trim_start_matches("0x"); + let memo_bytes = hex_bytes(memo_hex).map_err(DeError::custom)?; + let val = serde_json::to_value(memo_bytes).map_err(DeError::custom)?; + map.insert("memo".into(), val); + } + Ok(()) + } + + fn normalize_delegate_stx_fields<'de, D>( + map: &mut serde_json::Map, + ) -> Result<(), D::Error> + where + D: Deserializer<'de>, + { + normalize_common_fields::(map)?; + if let Some(serde_json::Value::Array(arr)) = map.get("reward_addr") { + if arr.len() == 2 { + let index = arr[0] + .as_u64() + .ok_or_else(|| DeError::custom("Expected u64 index"))? + as u32; + let b58_str = arr[1] + .as_str() + .ok_or_else(|| DeError::custom("Expected base58 string"))?; + let addr = PoxAddress::from_b58(b58_str) + .ok_or_else(|| DeError::custom("Invalid stacks address"))?; + let val = serde_json::to_value((index, addr)).map_err(DeError::custom)?; + map.insert("reward_addr".into(), val); + } + } + for field in ["delegate_to", "sender"] { + if let Some(serde_json::Value::Object(obj)) = map.get_mut(field) { + normalize_stacks_addr_fields::(obj)?; + } + } + Ok(()) + } + + fn normalize_vote_for_aggregate_key_fields<'de, D>( + map: &mut serde_json::Map, + ) -> Result<(), D::Error> + where + D: Deserializer<'de>, + { + normalize_common_fields::(map)?; + for field in ["aggregate_key", "signer_key"] { + if let Some(hex_str) = map.get(field).and_then(serde_json::Value::as_str) { + let cleaned = hex_str.strip_prefix("0x").unwrap_or(hex_str); + let val = StacksPublicKeyBuffer::from_hex(cleaned).map_err(DeError::custom)?; + let ser_val = serde_json::to_value(val).map_err(DeError::custom)?; + map.insert(field.to_string(), ser_val); + } + } + if let Some(serde_json::Value::Object(obj)) = map.get_mut("sender") { + normalize_stacks_addr_fields::(obj)?; + } + Ok(()) + } + pub fn pre_stx_to_json(op: &PreStxOp) -> serde_json::Value { json!({ "pre_stx": { diff --git a/stackslib/src/chainstate/burn/operations/test/mod.rs b/stackslib/src/chainstate/burn/operations/test/mod.rs index a27afaffd0..8524dcd94d 100644 --- a/stackslib/src/chainstate/burn/operations/test/mod.rs +++ b/stackslib/src/chainstate/burn/operations/test/mod.rs @@ -1,5 +1,11 @@ +use clarity::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, StacksAddress, StacksPublicKey, VRFSeed, +}; +use clarity::types::StacksPublicKeyBuffer; +use clarity::util::vrf::{VRFPrivateKey, VRFPublicKey}; use rand::rngs::StdRng; use rand::SeedableRng; +use stacks_common::address::AddressHashMode; use stacks_common::util::hash::Hash160; use crate::burnchains::bitcoin::address::{ @@ -9,8 +15,14 @@ use crate::burnchains::bitcoin::{ BitcoinInputType, BitcoinNetworkType, BitcoinTransaction, BitcoinTxInputStructured, BitcoinTxOutput, }; -use crate::burnchains::{BurnchainBlockHeader, BurnchainTransaction, Txid}; +use crate::burnchains::{BurnchainBlockHeader, BurnchainSigner, BurnchainTransaction, Txid}; +use crate::chainstate::burn::operations::{ + blockstack_op_extended_deserialize, blockstack_op_extended_serialize_opt, + BlockstackOperationType, DelegateStxOp, LeaderBlockCommitOp, LeaderKeyRegisterOp, PreStxOp, + StackStxOp, TransferStxOp, VoteForAggregateKeyOp, +}; use crate::chainstate::burn::Opcodes; +use crate::chainstate::stacks::address::PoxAddress; mod serialization; @@ -85,3 +97,169 @@ impl Output { } } } + +#[test] +fn serde_blockstack_ops() { + #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] + struct TestOpHolder { + #[serde( + serialize_with = "blockstack_op_extended_serialize_opt", + deserialize_with = "blockstack_op_extended_deserialize" + )] + burnchain_op: Option, + } + let holder = TestOpHolder { + burnchain_op: Some(BlockstackOperationType::PreStx(PreStxOp { + output: StacksAddress::new(0, Hash160([2u8; 20])) + .expect("Unable to create StacksAddress"), + txid: Txid([3u8; 32]), + vtxindex: 1, + block_height: 20, + burn_header_hash: BurnchainHeaderHash([4u8; 32]), + })), + }; + let json_str = serde_json::to_string_pretty(&holder).expect("Failed to convert to json string"); + + let deserialized: TestOpHolder = + serde_json::from_str(&json_str).expect("Failed to deserialize PreStxOp"); + assert_eq!(holder, deserialized); + + let holder = TestOpHolder { + burnchain_op: Some(BlockstackOperationType::DelegateStx(DelegateStxOp { + sender: StacksAddress::new(0, Hash160([2u8; 20])) + .expect("Unable to create StacksAddress"), + delegate_to: StacksAddress::new(1, Hash160([10u8; 20])) + .expect("Unable ot create StacksAddress"), + reward_addr: Some(( + 30, + PoxAddress::Standard(StacksAddress::new(22, Hash160([0x01; 20])).unwrap(), None), + )), + delegated_ustx: 200, + until_burn_height: None, + txid: Txid([3u8; 32]), + vtxindex: 1, + block_height: 20, + burn_header_hash: BurnchainHeaderHash([4u8; 32]), + })), + }; + let json_str = serde_json::to_string_pretty(&holder).expect("Failed to convert to json string"); + + let deserialized: TestOpHolder = + serde_json::from_str(&json_str).expect("Failed to deserialize DelegateStxOp"); + assert_eq!(holder, deserialized); + + let holder = TestOpHolder { + burnchain_op: Some(BlockstackOperationType::StackStx(StackStxOp { + sender: StacksAddress::new(0, Hash160([2u8; 20])) + .expect("Unable to create StacksAddress"), + reward_addr: PoxAddress::Standard( + StacksAddress::new(22, Hash160([0x01; 20])).unwrap(), + None, + ), + stacked_ustx: 42, + num_cycles: 3, + max_amount: None, + signer_key: None, + auth_id: None, + txid: Txid([3u8; 32]), + vtxindex: 1, + block_height: 20, + burn_header_hash: BurnchainHeaderHash([4u8; 32]), + })), + }; + let json_str = serde_json::to_string_pretty(&holder).expect("Failed to convert to json string"); + + let deserialized: TestOpHolder = + serde_json::from_str(&json_str).expect("Failed to deserialize json value into StackStxOp"); + assert_eq!(holder, deserialized); + + let holder = TestOpHolder { + burnchain_op: Some(BlockstackOperationType::TransferStx(TransferStxOp { + sender: StacksAddress::new(0, Hash160([2u8; 20])) + .expect("Unable to create StacksAddress"), + recipient: StacksAddress::new(0, Hash160([6u8; 20])) + .expect("Unable to create StacksAddress"), + transfered_ustx: 20, + memo: vec![], + txid: Txid([3u8; 32]), + vtxindex: 1, + block_height: 20, + burn_header_hash: BurnchainHeaderHash([4u8; 32]), + })), + }; + let json_str = serde_json::to_string_pretty(&holder).expect("Failed to convert to json string"); + + let deserialized: TestOpHolder = serde_json::from_str(&json_str) + .expect("Failed to deserialize json value into TransferStxOp"); + assert_eq!(holder, deserialized); + + let holder = TestOpHolder { + burnchain_op: Some(BlockstackOperationType::VoteForAggregateKey( + VoteForAggregateKeyOp { + sender: StacksAddress::new(0, Hash160([2u8; 20])) + .expect("Unable to create StacksAddress"), + aggregate_key: StacksPublicKeyBuffer([3u8; 33]), + round: 10, + signer_index: 11, + reward_cycle: 2, + signer_key: StacksPublicKeyBuffer([2u8; 33]), + txid: Txid([3u8; 32]), + vtxindex: 1, + block_height: 20, + burn_header_hash: BurnchainHeaderHash([4u8; 32]), + }, + )), + }; + let json_str = serde_json::to_string_pretty(&holder).expect("Failed to convert to json string"); + + let deserialized: TestOpHolder = serde_json::from_str(&json_str) + .expect("Failed to deserialize json value into VoteForAggregateKeyOp"); + assert_eq!(holder, deserialized); + + let holder = TestOpHolder { + burnchain_op: Some(BlockstackOperationType::LeaderBlockCommit( + LeaderBlockCommitOp { + block_header_hash: BlockHeaderHash([8u8; 32]), + new_seed: VRFSeed([12u8; 32]), + txid: Txid([3u8; 32]), + parent_block_ptr: 1, + parent_vtxindex: 2, + key_block_ptr: 3, + key_vtxindex: 4, + memo: vec![], + burn_fee: 5, + vtxindex: 1, + input: (Txid([1u8; 32]), 1), + block_height: 20, + burn_parent_modulus: 6, + apparent_sender: BurnchainSigner("Hello there".into()), + commit_outs: vec![], + treatment: vec![], + sunset_burn: 6, + burn_header_hash: BurnchainHeaderHash([4u8; 32]), + }, + )), + }; + let json_str = serde_json::to_string_pretty(&holder).expect("Failed to convert to json string"); + let deserialized: TestOpHolder = serde_json::from_str(&json_str) + .expect("Failed to deserialize json value into LeaderBlockCommitOp"); + assert!(deserialized.burnchain_op.is_none()); + + let holder = TestOpHolder { + burnchain_op: Some(BlockstackOperationType::LeaderKeyRegister( + LeaderKeyRegisterOp { + consensus_hash: ConsensusHash([0u8; 20]), + public_key: VRFPublicKey::from_private(&VRFPrivateKey::new()), + memo: vec![], + txid: Txid([3u8; 32]), + vtxindex: 0, + block_height: 1, + burn_header_hash: BurnchainHeaderHash([9u8; 32]), + }, + )), + }; + let json_str = serde_json::to_string_pretty(&holder).expect("Failed to convert to json string"); + let deserialized: TestOpHolder = serde_json::from_str(&json_str) + .expect("Failed to deserialize json value into LeaderBlockCommitOp"); + assert!(deserialized.burnchain_op.is_none()); +} diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 233a2ad0e6..81f99f816d 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -195,6 +195,7 @@ pub trait BlockEventDispatcher { burns: u64, reward_recipients: Vec, consensus_hash: &ConsensusHash, + parent_burn_block_hash: &BurnchainHeaderHash, ); } @@ -964,6 +965,7 @@ pub fn dispatcher_announce_burn_ops( paid_rewards.burns, recipients, consensus_hash, + &burn_header.parent_block_hash, ); } diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index 39d692474d..f2bee0af47 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -391,7 +391,7 @@ pub fn setup_states_with_epochs( Value::UInt(burnchain.pox_constants.reward_cycle_length as u128), Value::UInt(burnchain.pox_constants.pox_rejection_fraction as u128), ], - |_, _| false, + |_, _| None, None, ) .expect("Failed to set burnchain parameters in PoX contract"); @@ -446,6 +446,7 @@ impl BlockEventDispatcher for NullEventDispatcher { _burns: u64, _slot_holders: Vec, _consensus_hash: &ConsensusHash, + _parent_burn_block_hash: &BurnchainHeaderHash, ) { } } diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index d1c05f71f2..5056297646 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -4946,7 +4946,7 @@ impl NakamotoChainState { &analysis, &contract_content, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); diff --git a/stackslib/src/chainstate/nakamoto/signer_set.rs b/stackslib/src/chainstate/nakamoto/signer_set.rs index 931e6f6144..e6856fb6b0 100644 --- a/stackslib/src/chainstate/nakamoto/signer_set.rs +++ b/stackslib/src/chainstate/nakamoto/signer_set.rs @@ -335,7 +335,7 @@ impl NakamotoSigners { ) }) }, - |_, _| false, + |_, _| None, ) .expect("FATAL: failed to update signer stackerdb"); diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index 32d79ceb13..3505d41ebd 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -160,7 +160,12 @@ pub const NAKAMOTO_STAGING_DB_SCHEMA_3: &[&str] = &[ r#"UPDATE db_version SET version = 3"#, ]; -pub const NAKAMOTO_STAGING_DB_SCHEMA_LATEST: u32 = 3; +pub const NAKAMOTO_STAGING_DB_SCHEMA_4: &[&str] = &[ + r#"CREATE INDEX nakamoto_staging_blocks_by_ready_and_height ON nakamoto_staging_blocks(burn_attachable, orphaned, processed, height);"#, + r#"UPDATE db_version SET version = 4"#, +]; + +pub const NAKAMOTO_STAGING_DB_SCHEMA_LATEST: u32 = 4; pub struct NakamotoStagingBlocksConn(rusqlite::Connection); @@ -796,6 +801,15 @@ impl StacksChainState { assert_eq!(version, 3, "Nakamoto staging DB migration failure"); debug!("Migrated Nakamoto staging blocks DB to schema 3"); } + 3 => { + debug!("Migrate Nakamoto staging blocks DB to schema 3"); + for cmd in NAKAMOTO_STAGING_DB_SCHEMA_4.iter() { + conn.execute(cmd, NO_PARAMS)?; + } + let version = Self::get_nakamoto_staging_blocks_db_version(conn)?; + assert_eq!(version, 4, "Nakamoto staging DB migration failure"); + debug!("Migrated Nakamoto staging blocks DB to schema 3"); + } NAKAMOTO_STAGING_DB_SCHEMA_LATEST => { break; } diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 4f5029cb9c..d03625e387 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -1177,7 +1177,7 @@ fn pox_2_delegate_extend_units() { Value::UInt(25), Value::UInt(0), ], - |_, _| false, + |_, _| None, None, ) }) diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 8afb1ef55e..42d2fddee0 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -562,7 +562,7 @@ impl StacksChainState { ) }) }, - |_, _| false, + |_, _| None, ) .expect("FATAL: failed to handle PoX unlock"); diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 8d6c5de1f3..d9f53df272 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -205,6 +205,7 @@ impl BlockEventDispatcher for DummyEventDispatcher { _burns: u64, _slot_holders: Vec, _consensus_hash: &ConsensusHash, + _parent_burn_block_hash: &BurnchainHeaderHash, ) { error!("We should never try to announce to the dummy dispatcher"); panic!(); @@ -4175,7 +4176,7 @@ impl StacksChainState { &boot_code_id(active_pox_contract, mainnet), "stack-stx", &args, - |_, _| false, + |_, _| None, None, ) }); @@ -4384,7 +4385,7 @@ impl StacksChainState { until_burn_height_val, reward_addr_val, ], - |_, _| false, + |_, _| None, None, ) }); @@ -4491,7 +4492,7 @@ impl StacksChainState { Value::UInt(round.clone().into()), Value::UInt(reward_cycle.clone().into()), ], - |_, _| false, + |_, _| None, None, ) }); diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 11dc377c37..86bf57f46c 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -294,15 +294,15 @@ impl DBConfig { }); match epoch_id { StacksEpochId::Epoch10 => true, - StacksEpochId::Epoch20 => version_u32 >= 1 && version_u32 <= 8, - StacksEpochId::Epoch2_05 => version_u32 >= 2 && version_u32 <= 8, - StacksEpochId::Epoch21 => version_u32 >= 3 && version_u32 <= 8, - StacksEpochId::Epoch22 => version_u32 >= 3 && version_u32 <= 8, - StacksEpochId::Epoch23 => version_u32 >= 3 && version_u32 <= 8, - StacksEpochId::Epoch24 => version_u32 >= 3 && version_u32 <= 8, - StacksEpochId::Epoch25 => version_u32 >= 3 && version_u32 <= 8, - StacksEpochId::Epoch30 => version_u32 >= 3 && version_u32 <= 8, - StacksEpochId::Epoch31 => version_u32 >= 3 && version_u32 <= 8, + StacksEpochId::Epoch20 => version_u32 >= 1 && version_u32 <= 9, + StacksEpochId::Epoch2_05 => version_u32 >= 2 && version_u32 <= 9, + StacksEpochId::Epoch21 => version_u32 >= 3 && version_u32 <= 9, + StacksEpochId::Epoch22 => version_u32 >= 3 && version_u32 <= 9, + StacksEpochId::Epoch23 => version_u32 >= 3 && version_u32 <= 9, + StacksEpochId::Epoch24 => version_u32 >= 3 && version_u32 <= 9, + StacksEpochId::Epoch25 => version_u32 >= 3 && version_u32 <= 9, + StacksEpochId::Epoch30 => version_u32 >= 3 && version_u32 <= 9, + StacksEpochId::Epoch31 => version_u32 >= 3 && version_u32 <= 9, } } } @@ -654,7 +654,7 @@ impl<'a> DerefMut for ChainstateTx<'a> { } } -pub const CHAINSTATE_VERSION: &str = "8"; +pub const CHAINSTATE_VERSION: &str = "9"; const CHAINSTATE_INITIAL_SCHEMA: &[&str] = &[ "PRAGMA foreign_keys = ON;", @@ -853,6 +853,15 @@ const CHAINSTATE_SCHEMA_3: &[&str] = &[ "#, ]; +const CHAINSTATE_SCHEMA_4: &[&str] = &[ + // schema change is JUST a new index, so just bump db_config.version + // and add the index to `CHAINSTATE_INDEXES` (which gets re-execed + // on every schema change) + r#" + UPDATE db_config SET version = "9"; + "#, +]; + const CHAINSTATE_INDEXES: &[&str] = &[ "CREATE INDEX IF NOT EXISTS index_block_hash_to_primary_key ON block_headers(index_block_hash,consensus_hash,block_hash);", "CREATE INDEX IF NOT EXISTS block_headers_hash_index ON block_headers(block_hash,block_height);", @@ -877,6 +886,7 @@ const CHAINSTATE_INDEXES: &[&str] = &[ "CREATE INDEX IF NOT EXISTS index_block_header_by_affirmation_weight ON block_headers(affirmation_weight);", "CREATE INDEX IF NOT EXISTS index_block_header_by_height_and_affirmation_weight ON block_headers(block_height,affirmation_weight);", "CREATE INDEX IF NOT EXISTS index_headers_by_consensus_hash ON block_headers(consensus_hash);", + "CREATE INDEX IF NOT EXISTS processable_block ON staging_blocks(processed, orphaned, attachable);", ]; pub use stacks_common::consts::MINER_REWARD_MATURITY; @@ -1108,6 +1118,14 @@ impl StacksChainState { tx.execute_batch(cmd)?; } } + "8" => { + info!( + "Migrating chainstate schema from version 8 to 9: add index for staging_blocks" + ); + for cmd in CHAINSTATE_SCHEMA_4.iter() { + tx.execute_batch(cmd)?; + } + } _ => { error!( "Invalid chain state database: expected version = {}, got {}", @@ -1628,7 +1646,7 @@ impl StacksChainState { &contract, "set-burnchain-parameters", ¶ms, - |_, _| false, + |_, _| None, None, ) .expect("Failed to set burnchain parameters in PoX contract"); diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index ae8ae60b34..88314c5b6f 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -109,6 +109,7 @@ impl StacksTransactionReceipt { result: Value, burned: u128, cost: ExecutionCost, + vm_error: Option, ) -> StacksTransactionReceipt { StacksTransactionReceipt { transaction: tx.into(), @@ -120,7 +121,7 @@ impl StacksTransactionReceipt { execution_cost: cost, microblock_header: None, tx_index: 0, - vm_error: None, + vm_error, } } @@ -130,6 +131,7 @@ impl StacksTransactionReceipt { result: Value, burned: u128, cost: ExecutionCost, + reason: String, ) -> StacksTransactionReceipt { StacksTransactionReceipt { transaction: tx.into(), @@ -141,7 +143,7 @@ impl StacksTransactionReceipt { execution_cost: cost, microblock_header: None, tx_index: 0, - vm_error: None, + vm_error: Some(reason), } } @@ -172,6 +174,7 @@ impl StacksTransactionReceipt { burned: u128, analysis: ContractAnalysis, cost: ExecutionCost, + reason: String, ) -> StacksTransactionReceipt { StacksTransactionReceipt { transaction: tx.into(), @@ -183,7 +186,7 @@ impl StacksTransactionReceipt { execution_cost: cost, microblock_header: None, tx_index: 0, - vm_error: None, + vm_error: Some(reason), } } @@ -279,7 +282,7 @@ impl StacksTransactionReceipt { execution_cost: cost, microblock_header: None, tx_index: 0, - vm_error: Some(format!("{}", &error)), + vm_error: Some(error.to_string()), } } @@ -298,7 +301,7 @@ impl StacksTransactionReceipt { execution_cost: cost, microblock_header: None, tx_index: 0, - vm_error: Some(format!("{}", &error)), + vm_error: Some(error.to_string()), } } @@ -369,7 +372,17 @@ pub enum ClarityRuntimeTxError { error: clarity_error, err_type: &'static str, }, - AbortedByCallback(Option, AssetMap, Vec), + AbortedByCallback { + /// What the output value of the transaction would have been. + /// This will be a Some for contract-calls, and None for contract initialization txs. + output: Option, + /// The asset map which was evaluated by the abort callback + assets_modified: AssetMap, + /// The events from the transaction processing + tx_events: Vec, + /// A human-readable explanation for aborting the transaction + reason: String, + }, CostError(ExecutionCost, ExecutionCost), AnalysisError(CheckErrors), Rejectable(clarity_error), @@ -399,9 +412,17 @@ pub fn handle_clarity_runtime_error(error: clarity_error) -> ClarityRuntimeTxErr ClarityRuntimeTxError::AnalysisError(check_error) } } - clarity_error::AbortedByCallback(val, assets, events) => { - ClarityRuntimeTxError::AbortedByCallback(val, assets, events) - } + clarity_error::AbortedByCallback { + output, + assets_modified, + tx_events, + reason, + } => ClarityRuntimeTxError::AbortedByCallback { + output, + assets_modified, + tx_events, + reason, + }, clarity_error::CostError(cost, budget) => ClarityRuntimeTxError::CostError(cost, budget), unhandled_error => ClarityRuntimeTxError::Rejectable(unhandled_error), } @@ -574,15 +595,16 @@ impl StacksChainState { } /// Apply a post-conditions check. - /// Return true if they all pass. - /// Return false if at least one fails. + /// Return `Ok(None)` if the check passes. + /// Return `Ok(Some(reason))` if the check fails. + /// Return `Err` if the check cannot be performed. fn check_transaction_postconditions( post_conditions: &[TransactionPostCondition], post_condition_mode: &TransactionPostConditionMode, origin_account: &StacksAccount, asset_map: &AssetMap, txid: Txid, - ) -> Result { + ) -> Result, InterpreterError> { let mut checked_fungible_assets: HashMap> = HashMap::new(); let mut checked_nonfungible_assets: HashMap< @@ -608,11 +630,11 @@ impl StacksChainState { .expect("FATAL: sent waaaaay too much STX"); if !condition_code.check(u128::from(*amount_sent_condition), amount_sent) { - info!( - "Post-condition check failure on STX owned by {}: {:?} {:?} {}", - account_principal, amount_sent_condition, condition_code, amount_sent; "txid" => %txid + let reason = format!( + "Post-condition check failure on STX owned by {account_principal}: {amount_sent_condition:?} {condition_code:?} {amount_sent}", ); - return Ok(false); + info!("{reason}"; "txid" => %txid); + return Ok(Some(reason)); } if let Some(ref mut asset_ids) = @@ -654,8 +676,9 @@ impl StacksChainState { .get_fungible_tokens(&account_principal, &asset_id) .unwrap_or(0); if !condition_code.check(u128::from(*amount_sent_condition), amount_sent) { - info!("Post-condition check failure on fungible asset {} owned by {}: {} {:?} {}", &asset_id, account_principal, amount_sent_condition, condition_code, amount_sent; "txid" => %txid); - return Ok(false); + let reason = format!("Post-condition check failure on fungible asset {asset_id} owned by {account_principal}: {amount_sent_condition} {condition_code:?} {amount_sent}"); + info!("{reason}"; "txid" => %txid); + return Ok(Some(reason)); } if let Some(ref mut asset_ids) = @@ -688,8 +711,11 @@ impl StacksChainState { .get_nonfungible_tokens(&account_principal, &asset_id) .unwrap_or(&empty_assets); if !condition_code.check(asset_value, assets_sent) { - info!("Post-condition check failure on non-fungible asset {} owned by {}: {:?} {:?}", &asset_id, account_principal, &asset_value, condition_code; "txid" => %txid); - return Ok(false); + let reason = format!( + "Post-condition check failure on non-fungible asset {asset_id} owned by {account_principal}: {asset_value:?} {condition_code:?} {assets_sent:?}" + ); + info!("{reason}"; "txid" => %txid); + return Ok(Some(reason)); } if let Some(ref mut asset_id_map) = @@ -729,19 +755,28 @@ impl StacksChainState { // each value must be covered for v in values { if !nfts.contains(&v.clone().try_into()?) { - info!("Post-condition check failure: Non-fungible asset {} value {:?} was moved by {} but not checked", &asset_identifier, &v, &principal; "txid" => %txid); - return Ok(false); + let reason = format!( + "Post-condition check failure: Non-fungible asset {asset_identifier} value {v:?} was moved by {principal} but not checked" + ); + info!("{reason}"; "txid" => %txid); + return Ok(Some(reason)); } } } else { // no values covered - info!("Post-condition check failure: No checks for non-fungible asset type {} moved by {}", &asset_identifier, &principal; "txid" => %txid); - return Ok(false); + let reason = format!( + "Post-condition check failure: Non-fungible asset {asset_identifier} was moved by {principal} but not checked" + ); + info!("{reason}"; "txid" => %txid); + return Ok(Some(reason)); } } else { // no NFT for this principal - info!("Post-condition check failure: No checks for any non-fungible assets, but moved {} by {}", &asset_identifier, &principal; "txid" => %txid); - return Ok(false); + let reason = format!( + "Post-condition check failure: No checks for non-fungible asset {asset_identifier} moved by {principal}" + ); + info!("{reason}"; "txid" => %txid); + return Ok(Some(reason)); } } _ => { @@ -750,19 +785,25 @@ impl StacksChainState { checked_fungible_assets.get(&principal) { if !checked_ft_asset_ids.contains(&asset_identifier) { - info!("Post-condition check failure: checks did not cover transfer of {} by {}", &asset_identifier, &principal; "txid" => %txid); - return Ok(false); + let reason = format!( + "Post-condition check failure: Fungible asset {asset_identifier} was moved by {principal} but not checked" + ); + info!("{reason}"; "txid" => %txid); + return Ok(Some(reason)); } } else { - info!("Post-condition check failure: No checks for fungible token type {} moved by {}", &asset_identifier, &principal; "txid" => %txid); - return Ok(false); + let reason = format!( + "Post-condition check failure: Fungible asset {asset_identifier} was moved by {principal} but not checked" + ); + info!("{reason}"; "txid" => %txid); + return Ok(Some(reason)); } } } } } } - return Ok(true); + return Ok(None); } /// Given two microblock headers, were they signed by the same key? @@ -1038,7 +1079,7 @@ impl StacksChainState { &contract_call.function_name, &contract_call.function_args, |asset_map, _| { - !StacksChainState::check_transaction_postconditions( + StacksChainState::check_transaction_postconditions( &tx.post_conditions, &tx.post_condition_mode, origin_account, @@ -1055,7 +1096,7 @@ impl StacksChainState { .sub(&cost_before) .expect("BUG: total block cost decreased"); - let (result, asset_map, events) = match contract_call_resp { + let (result, asset_map, events, vm_error) = match contract_call_resp { Ok((return_value, asset_map, events)) => { info!("Contract-call successfully processed"; "txid" => %tx.txid(), @@ -1066,7 +1107,7 @@ impl StacksChainState { "function_args" => %VecDisplay(&contract_call.function_args), "return_value" => %return_value, "cost" => ?total_cost); - (return_value, asset_map, events) + (return_value, asset_map, events, None) } Err(e) => match handle_clarity_runtime_error(e) { ClarityRuntimeTxError::Acceptable { error, err_type } => { @@ -1078,9 +1119,19 @@ impl StacksChainState { "function_name" => %contract_call.function_name, "function_args" => %VecDisplay(&contract_call.function_args), "error" => ?error); - (Value::err_none(), AssetMap::new(), vec![]) + ( + Value::err_none(), + AssetMap::new(), + vec![], + Some(error.to_string()), + ) } - ClarityRuntimeTxError::AbortedByCallback(value, assets, events) => { + ClarityRuntimeTxError::AbortedByCallback { + output, + assets_modified, + tx_events, + reason, + } => { info!("Contract-call aborted by post-condition"; "txid" => %tx.txid(), "origin" => %origin_account.principal, @@ -1090,10 +1141,12 @@ impl StacksChainState { "function_args" => %VecDisplay(&contract_call.function_args)); let receipt = StacksTransactionReceipt::from_condition_aborted_contract_call( tx.clone(), - events, - value.expect("BUG: Post condition contract call must provide would-have-been-returned value"), - assets.get_stx_burned_total()?, - total_cost); + tx_events, + output.expect("BUG: Post condition contract call must provide would-have-been-returned value"), + assets_modified.get_stx_burned_total()?, + total_cost, + reason, + ); return Ok(receipt); } ClarityRuntimeTxError::CostError(cost_after, budget) => { @@ -1155,6 +1208,7 @@ impl StacksChainState { result, asset_map.get_stx_burned_total()?, total_cost, + vm_error, ); Ok(receipt) } @@ -1266,18 +1320,15 @@ impl StacksChainState { .expect("BUG: total block cost decreased"); let sponsor = tx.sponsor_address().map(|a| a.to_account_principal()); - // Beginning in epoch 3.0, smart contracts are compiled to Wasm - // and executed using the Wasm runtime. - debug!("Before Epoch 3.0 check"); - if epoch_id >= StacksEpochId::Epoch30 { - debug!("Compiling the contract to wasm binary"); - let mut module = compile_contract(contract_analysis.clone()).map_err(|e| { - Error::ClarityError(clarity_error::Wasm(WasmError::WasmGeneratorError( - e.message(), - ))) - })?; - contract_ast.wasm_module = Some(module.emit_wasm()); - } + // Compile contracts to Wasm and + // execute using the Wasm runtime. + debug!("Compiling the contract to wasm binary"); + let mut module = compile_contract(contract_analysis.clone()).map_err(|e| { + Error::ClarityError(clarity_error::Wasm(WasmError::WasmGeneratorError( + e.message(), + ))) + })?; + contract_ast.wasm_module = Some(module.emit_wasm()); // execution -- if this fails due to a runtime error, then the transaction is still // accepted, but the contract does not materialize (but the sender is out their fee). @@ -1289,7 +1340,7 @@ impl StacksChainState { &contract_code_str, sponsor, |asset_map, _| { - !StacksChainState::check_transaction_postconditions( + StacksChainState::check_transaction_postconditions( &tx.post_conditions, &tx.post_condition_mode, origin_account, @@ -1338,14 +1389,20 @@ impl StacksChainState { }; return Ok(receipt); } - ClarityRuntimeTxError::AbortedByCallback(_, assets, events) => { + ClarityRuntimeTxError::AbortedByCallback { + assets_modified, + tx_events, + reason, + .. + } => { let receipt = StacksTransactionReceipt::from_condition_aborted_smart_contract( tx.clone(), - events, - assets.get_stx_burned_total()?, + tx_events, + assets_modified.get_stx_burned_total()?, contract_analysis, total_cost, + reason, ); return Ok(receipt); } @@ -6878,7 +6935,8 @@ pub mod test { ) .unwrap(); assert_eq!( - result, expected_result, + result.is_none(), + expected_result, "test failed:\nasset map: {ft_transfer_2:?}\nscenario: {test:?}" ); } @@ -7223,7 +7281,8 @@ pub mod test { ) .unwrap(); assert_eq!( - result, expected_result, + result.is_none(), + expected_result, "test failed:\nasset map: {nft_transfer_2:?}\nscenario: {test:?}" ); } @@ -8035,7 +8094,8 @@ pub mod test { ) .unwrap(); assert_eq!( - result, expected_result, + result.is_none(), + expected_result, "test failed:\nasset map: {asset_map:?}\nscenario: {test:?}" ); } diff --git a/stackslib/src/chainstate/stacks/index/cache.rs b/stackslib/src/chainstate/stacks/index/cache.rs index 1a1e9673ae..5ac94ec12d 100644 --- a/stackslib/src/chainstate/stacks/index/cache.rs +++ b/stackslib/src/chainstate/stacks/index/cache.rs @@ -15,6 +15,7 @@ // along with this program. If not, see . use std::char::from_digit; +use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet, VecDeque}; use std::hash::{Hash, Hasher}; use std::io::{BufWriter, Cursor, Read, Seek, SeekFrom, Write}; @@ -137,6 +138,25 @@ impl TrieCacheState { self.block_hash_cache.get(&block_id).cloned() } + /// Get cached entry for a block hash, given its ID, or, if not + /// found, use `lookup` to get the corresponding block hash and + /// store it in the cache + pub fn get_block_hash_caching Result>( + &mut self, + id: u32, + lookup: F, + ) -> Result<&T, E> { + match self.block_hash_cache.entry(id) { + Entry::Occupied(occupied_entry) => Ok(occupied_entry.into_mut()), + Entry::Vacant(vacant_entry) => { + let block_hash = lookup(id)?; + let block_hash_ref = vacant_entry.insert(block_hash.clone()); + self.block_id_cache.insert(block_hash, id); + Ok(block_hash_ref) + } + } + } + /// Cache a block hash, given its ID pub fn store_block_hash(&mut self, block_id: u32, block_hash: T) { assert!(!self.block_hash_cache.contains_key(&block_id)); @@ -309,6 +329,17 @@ impl TrieCache { self.state_mut().load_block_hash(block_id) } + /// Get cached entry for a block hash, given its ID, or, if not + /// found, use `lookup` to get the corresponding block hash and + /// store it in the cache + pub fn get_block_hash_caching Result>( + &mut self, + id: u32, + lookup: F, + ) -> Result<&T, E> { + self.state_mut().get_block_hash_caching(id, lookup) + } + /// Store a block's ID and hash to teh cache. pub fn store_block_hash(&mut self, block_id: u32, block_hash: T) { self.state_mut().store_block_hash(block_id, block_hash) diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index 7da5e856e7..f8c81eef49 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -25,6 +25,7 @@ use stacks_common::types::chainstate::{BlockHeaderHash, TrieHash, TRIEHASH_ENCOD use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::log; +use super::storage::ReopenedTrieStorageConnection; use crate::chainstate::stacks::index::bits::{get_leaf_hash, get_node_hash, read_root_hash}; use crate::chainstate::stacks::index::node::{ clear_backptr, is_backptr, set_backptr, CursorError, TrieCursor, TrieNode, TrieNode16, @@ -251,6 +252,20 @@ impl MarfConnection for MarfTransaction<'_, T> { } } +impl MarfConnection for ReopenedTrieStorageConnection<'_, T> { + fn with_conn(&mut self, exec: F) -> R + where + F: FnOnce(&mut TrieStorageConnection) -> R, + { + let mut conn = self.connection(); + exec(&mut conn) + } + + fn sqlite_conn(&self) -> &Connection { + self.db_conn() + } +} + impl MarfConnection for MARF { fn with_conn(&mut self, exec: F) -> R where @@ -1620,6 +1635,21 @@ impl MARF { }) } + /// Build a read-only storage connection which can be used for reads without modifying the + /// calling MARF struct (i.e., the tip pointer is only changed in the connection) + /// but reusing self's existing SQLite Connection (avoiding the overhead of + /// `reopen_readonly`). + pub fn reopen_connection(&self) -> Result, Error> { + if self.open_chain_tip.is_some() { + error!( + "MARF at {} is already in the process of writing", + &self.storage.db_path + ); + return Err(Error::InProgressError); + } + self.storage.reopen_connection() + } + /// Get the root trie hash at a particular block pub fn get_root_hash_at(&mut self, block_hash: &T) -> Result { self.storage.connection().get_root_hash_at(block_hash) diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index efc19b0afb..8029453523 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -15,6 +15,7 @@ // along with this program. If not, see . use std::char::from_digit; +use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet, VecDeque}; use std::hash::{Hash, Hasher}; use std::io::{BufWriter, Cursor, Read, Seek, SeekFrom, Write}; @@ -72,11 +73,8 @@ impl BlockMap for TrieFileStorage { } fn get_block_hash_caching(&mut self, id: u32) -> Result<&T, Error> { - if !self.is_block_hash_cached(id) { - let block_hash = self.get_block_hash(id)?; - self.cache.store_block_hash(id, block_hash); - } - self.cache.ref_block_hash(id).ok_or(Error::NotFoundError) + self.cache + .get_block_hash_caching(id, |id| trie_sql::get_block_hash(&self.db, id)) } fn is_block_hash_cached(&self, id: u32) -> bool { @@ -108,12 +106,9 @@ impl BlockMap for TrieStorageConnection<'_, T> { trie_sql::get_block_hash(&self.db, id) } - fn get_block_hash_caching(&mut self, id: u32) -> Result<&T, Error> { - if !self.is_block_hash_cached(id) { - let block_hash = self.get_block_hash(id)?; - self.cache.store_block_hash(id, block_hash); - } - self.cache.ref_block_hash(id).ok_or(Error::NotFoundError) + fn get_block_hash_caching<'a>(&'a mut self, id: u32) -> Result<&'a T, Error> { + self.cache + .get_block_hash_caching(id, |id| trie_sql::get_block_hash(&self.db, id)) } fn is_block_hash_cached(&self, id: u32) -> bool { @@ -170,11 +165,8 @@ impl BlockMap for TrieSqlHashMapCursor<'_, T> { } fn get_block_hash_caching(&mut self, id: u32) -> Result<&T, Error> { - if !self.is_block_hash_cached(id) { - let block_hash = self.get_block_hash(id)?; - self.cache.store_block_hash(id, block_hash); - } - self.cache.ref_block_hash(id).ok_or(Error::NotFoundError) + self.cache + .get_block_hash_caching(id, |id| trie_sql::get_block_hash(&self.db, id)) } fn is_block_hash_cached(&self, id: u32) -> bool { @@ -1310,6 +1302,50 @@ impl TrieStorageTransientData { } } +pub struct ReopenedTrieStorageConnection<'a, T: MarfTrieId> { + pub db_path: &'a str, + db: &'a Connection, + blobs: Option, + data: TrieStorageTransientData, + cache: TrieCache, + bench: TrieBenchmark, + pub hash_calculation_mode: TrieHashCalculationMode, + + /// row ID of a trie that represents unconfirmed state (i.e. trie state that will never become + /// part of the MARF, but nevertheless represents a persistent scratch space). If this field + /// is Some(..), then the storage connection here was used to (re-)open an unconfirmed trie + /// (via `open_unconfirmed()` or `open_block()` when `self.unconfirmed()` is `true`), or used + /// to create an unconfirmed trie (via `extend_to_unconfirmed_block()`). + unconfirmed_block_id: Option, + + // used in testing in order to short-circuit block-height lookups + // when the trie struct is tested outside of marf.rs usage + #[cfg(test)] + pub test_genesis_block: Option, +} + +impl<'a, T: MarfTrieId> ReopenedTrieStorageConnection<'a, T> { + pub fn db_conn(&self) -> &Connection { + self.db + } + + pub fn connection(&mut self) -> TrieStorageConnection<'_, T> { + TrieStorageConnection { + db: SqliteConnection::ConnRef(&self.db), + db_path: self.db_path, + data: &mut self.data, + blobs: self.blobs.as_mut(), + cache: &mut self.cache, + bench: &mut self.bench, + hash_calculation_mode: self.hash_calculation_mode, + unconfirmed_block_id: None, + + #[cfg(test)] + test_genesis_block: &mut self.test_genesis_block, + } + } +} + impl TrieFileStorage { pub fn connection(&mut self) -> TrieStorageConnection<'_, T> { TrieStorageConnection { @@ -1327,6 +1363,54 @@ impl TrieFileStorage { } } + /// Build a read-only storage connection which can be used for reads without modifying the + /// calling TrieFileStorage struct (i.e., the tip pointer is only changed in the connection) + /// but reusing the TrieFileStorage's existing SQLite Connection (avoiding the overhead of + /// `reopen_readonly`). + pub fn reopen_connection(&self) -> Result, Error> { + let data = TrieStorageTransientData { + uncommitted_writes: self.data.uncommitted_writes.clone(), + cur_block: self.data.cur_block.clone(), + cur_block_id: self.data.cur_block_id.clone(), + + read_count: 0, + read_backptr_count: 0, + read_node_count: 0, + read_leaf_count: 0, + + write_count: 0, + write_node_count: 0, + write_leaf_count: 0, + + trie_ancestor_hash_bytes_cache: None, + + readonly: true, + unconfirmed: self.unconfirmed(), + }; + // perf note: should we attempt to clone the cache + let cache = TrieCache::default(); + let blobs = if self.blobs.is_some() { + Some(TrieFile::from_db_path(&self.db_path, true)?) + } else { + None + }; + let bench = TrieBenchmark::new(); + let hash_calculation_mode = self.hash_calculation_mode; + let unconfirmed_block_id = None; + Ok(ReopenedTrieStorageConnection { + db_path: &self.db_path, + db: &self.db, + blobs, + data, + cache, + bench, + hash_calculation_mode, + unconfirmed_block_id, + #[cfg(test)] + test_genesis_block: self.test_genesis_block.clone(), + }) + } + pub fn transaction(&mut self) -> Result, Error> { if self.readonly() { return Err(Error::ReadOnlyError); diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index a2aef06485..386a59ce03 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -650,9 +650,17 @@ impl TransactionResult { clarity_err } } - ClarityRuntimeTxError::AbortedByCallback(val, assets, events) => { - Error::ClarityError(clarity_error::AbortedByCallback(val, assets, events)) - } + ClarityRuntimeTxError::AbortedByCallback { + output, + assets_modified, + tx_events, + reason, + } => Error::ClarityError(clarity_error::AbortedByCallback { + output, + assets_modified, + tx_events, + reason, + }), }, Error::InvalidFee => { // The transaction didn't have enough STX left over after it was run. diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index f82da31499..115383d566 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::hash::{Hash, Hasher}; use std::io::prelude::*; use std::io::{Read, Write}; use std::ops::{Deref, DerefMut}; @@ -1011,6 +1012,14 @@ pub struct StacksTransaction { pub payload: TransactionPayload, } +impl Hash for StacksTransaction { + fn hash(&self, state: &mut H) { + self.txid().hash(state) + } +} + +impl Eq for StacksTransaction {} + #[derive(Debug, Clone, PartialEq)] pub struct StacksTransactionSigner { pub tx: StacksTransaction, diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index ce1af752be..528d74945f 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -387,7 +387,7 @@ impl ClarityInstance { &analysis, BOOT_CODE_COSTS, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -410,7 +410,7 @@ impl ClarityInstance { &analysis, &*BOOT_CODE_COST_VOTING, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -437,7 +437,7 @@ impl ClarityInstance { &analysis, &*BOOT_CODE_POX_TESTNET, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -490,7 +490,7 @@ impl ClarityInstance { &analysis, BOOT_CODE_COSTS_2, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -513,7 +513,7 @@ impl ClarityInstance { &analysis, BOOT_CODE_COSTS_3, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -536,7 +536,7 @@ impl ClarityInstance { &analysis, &*POX_2_TESTNET_CODE, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -1025,7 +1025,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { &pox_2_contract_id, "set-burnchain-parameters", ¶ms, - |_, _| false, + |_, _| None, None, ) .expect("Failed to set burnchain parameters in PoX-2 contract"); @@ -1269,7 +1269,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { &pox_3_contract_id, "set-burnchain-parameters", ¶ms, - |_, _| false, + |_, _| None, None, ) .expect("Failed to set burnchain parameters in PoX-3 contract"); @@ -1387,7 +1387,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { &pox_4_contract_id, "set-burnchain-parameters", ¶ms, - |_, _| false, + |_, _| None, None, ) .expect("Failed to set burnchain parameters in PoX-3 contract"); @@ -1725,9 +1725,9 @@ impl TransactionConnection for ClarityTransactionConnection<'_, '_> { &mut self, to_do: F, abort_call_back: A, - ) -> Result<(R, AssetMap, Vec, bool), E> + ) -> Result<(R, AssetMap, Vec, Option), E> where - A: FnOnce(&AssetMap, &mut ClarityDatabase) -> bool, + A: FnOnce(&AssetMap, &mut ClarityDatabase) -> Option, F: FnOnce(&mut OwnedEnvironment) -> Result<(R, AssetMap, Vec), E>, E: From, { @@ -1759,7 +1759,11 @@ impl TransactionConnection for ClarityTransactionConnection<'_, '_> { let result = match result { Ok((value, asset_map, events)) => { let aborted = abort_call_back(&asset_map, &mut db); - let db_result = if aborted { db.roll_back() } else { db.commit() }; + let db_result = if aborted.is_some() { + db.roll_back() + } else { + db.commit() + }; match db_result { Ok(_) => Ok((value, asset_map, events, aborted)), Err(e) => Err(e.into()), @@ -1854,7 +1858,7 @@ impl ClarityTransactionConnection<'_, '_> { }) .map_err(Error::from) }, - |_, _| false, + |_, _| None, ) .map(|(value, ..)| value) } @@ -1914,7 +1918,7 @@ impl ClarityTransactionConnection<'_, '_> { ) .map_err(Error::from) }, - |_, _| true, + |_, _| Some("read-only".to_string()), )?; Ok(result) } @@ -1924,7 +1928,7 @@ impl ClarityTransactionConnection<'_, '_> { pub fn clarity_eval_raw(&mut self, code: &str) -> Result { let (result, _, _, _) = self.with_abort_callback( |vm_env| vm_env.eval_raw(code).map_err(Error::from), - |_, _| false, + |_, _| None, )?; Ok(result) } @@ -1937,7 +1941,7 @@ impl ClarityTransactionConnection<'_, '_> { ) -> Result { let (result, _, _, _) = self.with_abort_callback( |vm_env| vm_env.eval_read_only(contract, code).map_err(Error::from), - |_, _| false, + |_, _| None, )?; Ok(result) } @@ -1946,6 +1950,7 @@ impl ClarityTransactionConnection<'_, '_> { #[cfg(test)] mod tests { use std::fs; + use std::path::PathBuf; use clarity::vm::analysis::errors::CheckErrors; use clarity::vm::database::{ClarityBackingStore, STXBalance}; @@ -1956,10 +1961,57 @@ mod tests { use stacks_common::types::sqlite::NO_PARAMS; use super::*; + use crate::chainstate::stacks::index::marf::{MARFOpenOpts, MarfConnection as _}; use crate::chainstate::stacks::index::ClarityMarfTrieId; use crate::clarity_vm::database::marf::MarfedKV; use crate::core::{PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05}; + #[test] + pub fn create_md_index() { + let path_db = "/tmp/stacks-node-tests/creat_md_index"; + let _ = std::fs::remove_dir_all(path_db); + let mut path = PathBuf::from(path_db); + + std::fs::create_dir_all(&path).unwrap(); + + path.push("marf.sqlite"); + let marf_path = path.to_str().unwrap().to_string(); + + let mut marf_opts = MARFOpenOpts::default(); + marf_opts.external_blobs = true; + + let mut marf: MARF = MARF::from_path(&marf_path, marf_opts).unwrap(); + + let tx = marf.storage_tx().unwrap(); + + tx.query_row("PRAGMA journal_mode = WAL;", NO_PARAMS, |_row| Ok(())) + .unwrap(); + + tx.execute( + "CREATE TABLE IF NOT EXISTS data_table + (key TEXT PRIMARY KEY, value TEXT)", + NO_PARAMS, + ) + .unwrap(); + + tx.execute( + "CREATE TABLE IF NOT EXISTS metadata_table + (key TEXT NOT NULL, blockhash TEXT, value TEXT, + UNIQUE (key, blockhash))", + NO_PARAMS, + ) + .unwrap(); + + tx.commit().unwrap(); + + assert!(SqliteConnection::check_schema(marf.sqlite_conn()).is_err()); + + MarfedKV::open(path_db, None, None).unwrap(); + + // schema should be good now + assert!(SqliteConnection::check_schema(marf.sqlite_conn()).is_ok()); + } + #[test] pub fn bad_syntax_test() { let marf = MarfedKV::temporary(); @@ -2063,7 +2115,7 @@ mod tests { &ct_analysis, contract, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -2118,7 +2170,7 @@ mod tests { &ct_analysis, contract, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -2148,7 +2200,7 @@ mod tests { &ct_analysis, contract, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -2182,7 +2234,7 @@ mod tests { &ct_analysis, contract, None, - |_, _| false, + |_, _| None, None ) .unwrap_err() @@ -2236,7 +2288,7 @@ mod tests { &ct_analysis, contract, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -2251,7 +2303,7 @@ mod tests { &contract_identifier, "foo", &[Value::Int(1)], - |_, _| false, + |_, _| None, None )) .unwrap() @@ -2299,7 +2351,7 @@ mod tests { &ct_analysis, contract, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -2393,7 +2445,7 @@ mod tests { &ct_analysis, contract, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -2526,7 +2578,7 @@ mod tests { &ct_analysis, contract, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -2541,7 +2593,7 @@ mod tests { &contract_identifier, "get-bar", &[], - |_, _| false, + |_, _| None, None )) .unwrap() @@ -2556,7 +2608,7 @@ mod tests { &contract_identifier, "set-bar", &[Value::Int(1), Value::Int(1)], - |_, _| false, + |_, _| None, None )) .unwrap() @@ -2572,13 +2624,13 @@ mod tests { &contract_identifier, "set-bar", &[Value::Int(10), Value::Int(1)], - |_, _| true, + |_, _| Some("testing rollback".to_string()), None, ) }) .unwrap_err(); - let result_value = if let Error::AbortedByCallback(v, ..) = e { - v.unwrap() + let result_value = if let Error::AbortedByCallback { output, .. } = e { + output.unwrap() } else { panic!("Expects a AbortedByCallback error") }; @@ -2593,7 +2645,7 @@ mod tests { &contract_identifier, "get-bar", &[], - |_, _| false, + |_, _| None, None )) .unwrap() @@ -2609,7 +2661,7 @@ mod tests { &contract_identifier, "set-bar", &[Value::Int(10), Value::Int(0)], - |_, _| true, + |_, _| Some("testing rollback".to_string()), None )) .unwrap_err() @@ -2624,7 +2676,7 @@ mod tests { &contract_identifier, "get-bar", &[], - |_, _| false, + |_, _| None, None )) .unwrap() @@ -2919,7 +2971,7 @@ mod tests { &ct_analysis, contract, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -2944,7 +2996,7 @@ mod tests { &contract_identifier, "do-expand", &[], - |_, _| false, + |_, _| None, None )) .unwrap_err() diff --git a/stackslib/src/clarity_vm/database/marf.rs b/stackslib/src/clarity_vm/database/marf.rs index a5dcefc529..6271183ec4 100644 --- a/stackslib/src/clarity_vm/database/marf.rs +++ b/stackslib/src/clarity_vm/database/marf.rs @@ -263,10 +263,7 @@ impl MarfedKV { } pub fn index_conn(&self, context: C) -> IndexDBConn<'_, C, StacksBlockId> { - IndexDBConn { - index: &self.marf, - context, - } + IndexDBConn::new(&self.marf, context) } } diff --git a/stackslib/src/clarity_vm/tests/analysis_costs.rs b/stackslib/src/clarity_vm/tests/analysis_costs.rs index 685999f7d2..db2f489af1 100644 --- a/stackslib/src/clarity_vm/tests/analysis_costs.rs +++ b/stackslib/src/clarity_vm/tests/analysis_costs.rs @@ -132,7 +132,7 @@ fn setup_tracked_cost_test( &ct_analysis, contract_trait, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -167,7 +167,7 @@ fn setup_tracked_cost_test( &ct_analysis, contract_other, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -244,7 +244,7 @@ fn test_tracked_costs( &ct_analysis, &contract_self, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); diff --git a/stackslib/src/clarity_vm/tests/contracts.rs b/stackslib/src/clarity_vm/tests/contracts.rs index b575cb1882..3446faeb5e 100644 --- a/stackslib/src/clarity_vm/tests/contracts.rs +++ b/stackslib/src/clarity_vm/tests/contracts.rs @@ -132,7 +132,7 @@ fn test_get_burn_block_info_eval() { &analysis, contract, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -335,7 +335,7 @@ fn publish_contract( &analysis, contract, None, - |_, _| false, + |_, _| None, None, )?; tx.save_analysis(contract_id, &analysis)?; @@ -458,7 +458,7 @@ fn trait_invocation_cross_epoch() { &invoke_contract_id, "invocation-1", &[], - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -477,7 +477,8 @@ fn trait_invocation_cross_epoch() { &invoke_contract_id, "invocation-1", &[], - |_, _| false, None + |_, _| None, + None ) .unwrap_err(); @@ -500,7 +501,8 @@ fn trait_invocation_cross_epoch() { &invoke_contract_id, "invocation-2", &[Value::Principal(impl_contract_id.clone().into())], - |_, _| false, None + |_, _| None, + None ) .unwrap_err(); @@ -524,7 +526,7 @@ fn trait_invocation_cross_epoch() { &invoke_contract_id, "invocation-1", &[], - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -542,7 +544,7 @@ fn trait_invocation_cross_epoch() { &invoke_contract_id, "invocation-2", &[Value::Principal(impl_contract_id.clone().into())], - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -627,7 +629,7 @@ fn trait_with_trait_invocation_cross_epoch() { &analysis, math_trait, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -653,7 +655,7 @@ fn trait_with_trait_invocation_cross_epoch() { &analysis, compute_trait, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -679,7 +681,7 @@ fn trait_with_trait_invocation_cross_epoch() { &analysis, impl_compute, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -705,7 +707,7 @@ fn trait_with_trait_invocation_cross_epoch() { &analysis, impl_math, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -731,7 +733,7 @@ fn trait_with_trait_invocation_cross_epoch() { &analysis, use_compute, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -764,7 +766,7 @@ fn trait_with_trait_invocation_cross_epoch() { &analysis, use_compute, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -790,7 +792,7 @@ fn trait_with_trait_invocation_cross_epoch() { &analysis, use_compute, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -811,7 +813,7 @@ fn trait_with_trait_invocation_cross_epoch() { &use_compute_20_id, "do-it-static", &[], - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -832,7 +834,7 @@ fn trait_with_trait_invocation_cross_epoch() { Value::Principal(impl_math_id.clone().into()), Value::UInt(1), ], - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -850,7 +852,7 @@ fn trait_with_trait_invocation_cross_epoch() { &use_compute_21_c1_id, "do-it-static", &[], - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -871,7 +873,7 @@ fn trait_with_trait_invocation_cross_epoch() { Value::Principal(impl_math_id.clone().into()), Value::UInt(1), ], - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -889,7 +891,7 @@ fn trait_with_trait_invocation_cross_epoch() { &use_compute_21_c2_id, "do-it-static", &[], - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -910,7 +912,7 @@ fn trait_with_trait_invocation_cross_epoch() { Value::Principal(impl_math_id.into()), Value::UInt(1), ], - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -980,7 +982,7 @@ fn test_block_heights() { &analysis, contract_clarity1, None, - |_, _| false, + |_, _| None, None ).unwrap(); @@ -1041,7 +1043,7 @@ fn test_block_heights() { &analysis, contract_clarity3, None, - |_, _| false, + |_, _| None, None ).unwrap(); }); @@ -1262,7 +1264,7 @@ fn test_block_heights_across_versions() { &analysis, contract_e2c1_2, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -1294,7 +1296,7 @@ fn test_block_heights_across_versions() { &analysis, contract_e2c1_2, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -1328,7 +1330,7 @@ fn test_block_heights_across_versions() { &analysis, &contract_e3c3, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -1397,7 +1399,7 @@ fn test_block_heights_across_versions_traits_3_from_2() { &analysis, contract_e2c1_2, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -1426,7 +1428,7 @@ fn test_block_heights_across_versions_traits_3_from_2() { &analysis, contract_e2c1_2, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -1460,7 +1462,7 @@ fn test_block_heights_across_versions_traits_3_from_2() { &analysis, &contract_e3c3, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -1477,7 +1479,7 @@ fn test_block_heights_across_versions_traits_3_from_2() { &contract_id_e2c1, "get-it", &[Value::Principal(contract_id_e3c3.clone().into())], - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -1490,7 +1492,7 @@ fn test_block_heights_across_versions_traits_3_from_2() { &contract_id_e2c2, "get-it", &[Value::Principal(contract_id_e3c3.clone().into())], - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -1548,7 +1550,7 @@ fn test_block_heights_across_versions_traits_2_from_3() { &analysis, contract_e2c1_2, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -1577,7 +1579,7 @@ fn test_block_heights_across_versions_traits_2_from_3() { &analysis, contract_e2c1_2, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -1611,7 +1613,7 @@ fn test_block_heights_across_versions_traits_2_from_3() { &analysis, &contract_e3c3, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -1628,7 +1630,7 @@ fn test_block_heights_across_versions_traits_2_from_3() { &contract_id_e3c3, "get-it", &[Value::Principal(contract_id_e2c1.clone().into())], - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -1641,7 +1643,7 @@ fn test_block_heights_across_versions_traits_2_from_3() { &contract_id_e3c3, "get-it", &[Value::Principal(contract_id_e2c2.clone().into())], - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -1689,7 +1691,7 @@ fn test_block_heights_at_block() { &analysis, contract, None, - |_, _| false, + |_, _| None, None ).unwrap(); }); @@ -1753,7 +1755,7 @@ fn test_get_block_info_time() { &analysis, contract2, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -1777,7 +1779,7 @@ fn test_get_block_info_time() { &analysis, contract3, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -1801,7 +1803,7 @@ fn test_get_block_info_time() { &analysis, contract3_3, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); diff --git a/stackslib/src/clarity_vm/tests/costs.rs b/stackslib/src/clarity_vm/tests/costs.rs index 1659879daa..b153fa1d8d 100644 --- a/stackslib/src/clarity_vm/tests/costs.rs +++ b/stackslib/src/clarity_vm/tests/costs.rs @@ -1184,7 +1184,7 @@ fn test_cost_contract_short_circuits(use_mainnet: bool, clarity_version: Clarity &analysis, contract_src, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -1470,7 +1470,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi &analysis, contract_src, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); diff --git a/stackslib/src/clarity_vm/tests/large_contract.rs b/stackslib/src/clarity_vm/tests/large_contract.rs index 0f2817333e..69bb0b78ec 100644 --- a/stackslib/src/clarity_vm/tests/large_contract.rs +++ b/stackslib/src/clarity_vm/tests/large_contract.rs @@ -160,7 +160,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &analysis, BOOT_CODE_COSTS_2, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -187,7 +187,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &analysis, BOOT_CODE_COSTS_3, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -225,7 +225,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_analysis, tokens_contract, None, - |_, _| false, + |_, _| None, None, ) .unwrap() @@ -239,7 +239,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "token-transfer", &[p1.clone().into(), Value::UInt(210)], - |_, _| false, + |_, _| None, None )) .unwrap() @@ -253,7 +253,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "token-transfer", &[p2.clone().into(), Value::UInt(9000)], - |_, _| false, + |_, _| None, None )) .unwrap() @@ -268,7 +268,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "token-transfer", &[p2.clone().into(), Value::UInt(1001)], - |_, _| false, + |_, _| None, None )) .unwrap() @@ -277,7 +277,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac assert!(is_committed( & // send to self! block.as_transaction(|tx| tx.run_contract_call(&p1, None, &contract_identifier, "token-transfer", - &[p1.clone().into(), Value::UInt(1000)], |_, _| false, None)).unwrap().0 + &[p1.clone().into(), Value::UInt(1000)], |_, _| None, None)).unwrap().0 )); assert_eq!( @@ -307,7 +307,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "faucet", &[], - |_, _| false, + |_, _| None, None )) .unwrap() @@ -322,7 +322,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "faucet", &[], - |_, _| false, + |_, _| None, None )) .unwrap() @@ -337,7 +337,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "faucet", &[], - |_, _| false, + |_, _| None, None )) .unwrap() @@ -362,7 +362,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "mint-after", &[Value::UInt(25)], - |_, _| false, + |_, _| None, None )) .unwrap() @@ -400,7 +400,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "mint-after", &[Value::UInt(25)], - |_, _| false, + |_, _| None, None )) .unwrap() @@ -415,7 +415,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "faucet", &[], - |_, _| false, + |_, _| None, None )) .unwrap() @@ -439,7 +439,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "my-get-token-balance", &[p1.clone().into()], - |_, _| false, + |_, _| None, None )) .unwrap() @@ -763,7 +763,7 @@ pub fn rollback_log_memory_test( &ct_analysis, &contract, None, - |_, _| { false }, + |_, _| None, None ) .unwrap_err() @@ -841,7 +841,7 @@ pub fn let_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_id &ct_analysis, &contract, None, - |_, _| { false }, + |_, _| None, None ) .unwrap_err() @@ -922,7 +922,7 @@ pub fn argument_memory_test( &ct_analysis, &contract, None, - |_, _| { false }, + |_, _| None, None ) .unwrap_err() @@ -1019,12 +1019,12 @@ pub fn fcall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ &ct_analysis, &contract_ok, None, - |_, _| true, + |_, _| Some("abort".to_string()), None ) .unwrap_err() { - ClarityError::AbortedByCallback(..) => true, + ClarityError::AbortedByCallback { .. } => true, _ => false, }); }); @@ -1047,7 +1047,7 @@ pub fn fcall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ &ct_analysis, &contract_err, None, - |_, _| false, + |_, _| None, None ) .unwrap_err() @@ -1135,7 +1135,7 @@ pub fn ccall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ &ct_analysis, &contract, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); @@ -1161,7 +1161,7 @@ pub fn ccall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ &ct_analysis, &contract, None, - |_, _| false, + |_, _| None, None ) .unwrap_err() diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index 2057720d9e..41a7825e5f 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -123,6 +123,9 @@ const DEFAULT_TENURE_TIMEOUT_SECS: u64 = 180; /// Default percentage of block budget that must be used before attempting a /// time-based tenure extend const DEFAULT_TENURE_EXTEND_COST_THRESHOLD: u64 = 50; +/// Default number of milliseconds that the miner should sleep between mining +/// attempts when the mempool is empty. +const DEFAULT_EMPTY_MEMPOOL_SLEEP_MS: u64 = 2_500; static HELIUM_DEFAULT_CONNECTION_OPTIONS: LazyLock = LazyLock::new(|| ConnectionOptions { @@ -2172,6 +2175,9 @@ pub struct MinerConfig { /// The minimum time to wait between mining blocks in milliseconds. The value must be greater than or equal to 1000 ms because if a block is mined /// within the same second as its parent, it will be rejected by the signers. pub min_time_between_blocks_ms: u64, + /// The amount of time that the miner should sleep in between attempts to + /// mine a block when the mempool is empty + pub empty_mempool_sleep_time: Duration, /// Time in milliseconds to pause after receiving the first threshold rejection, before proposing a new block. pub first_rejection_pause_ms: u64, /// Time in milliseconds to pause after receiving subsequent threshold rejections, before proposing a new block. @@ -2224,6 +2230,7 @@ impl Default for MinerConfig { max_reorg_depth: 3, pre_nakamoto_mock_signing: false, // Should only default true if mining key is set min_time_between_blocks_ms: DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS, + empty_mempool_sleep_time: Duration::from_millis(DEFAULT_EMPTY_MEMPOOL_SLEEP_MS), first_rejection_pause_ms: DEFAULT_FIRST_REJECTION_PAUSE_MS, subsequent_rejection_pause_ms: DEFAULT_SUBSEQUENT_REJECTION_PAUSE_MS, block_commit_delay: Duration::from_millis(DEFAULT_BLOCK_COMMIT_DELAY_MS), @@ -2237,9 +2244,9 @@ impl Default for MinerConfig { block_rejection_timeout_steps: { let mut rejections_timeouts_default_map = HashMap::::new(); - rejections_timeouts_default_map.insert(0, Duration::from_secs(600)); - rejections_timeouts_default_map.insert(10, Duration::from_secs(300)); - rejections_timeouts_default_map.insert(20, Duration::from_secs(150)); + rejections_timeouts_default_map.insert(0, Duration::from_secs(180)); + rejections_timeouts_default_map.insert(10, Duration::from_secs(90)); + rejections_timeouts_default_map.insert(20, Duration::from_secs(45)); rejections_timeouts_default_map.insert(30, Duration::from_secs(0)); rejections_timeouts_default_map }, @@ -2639,6 +2646,7 @@ pub struct MinerConfigFile { pub max_reorg_depth: Option, pub pre_nakamoto_mock_signing: Option, pub min_time_between_blocks_ms: Option, + pub empty_mempool_sleep_ms: Option, pub first_rejection_pause_ms: Option, pub subsequent_rejection_pause_ms: Option, pub block_commit_delay_ms: Option, @@ -2795,6 +2803,7 @@ impl MinerConfigFile { } else { ms }).unwrap_or(miner_default_config.min_time_between_blocks_ms), + empty_mempool_sleep_time: self.empty_mempool_sleep_ms.map(Duration::from_millis).unwrap_or(miner_default_config.empty_mempool_sleep_time), first_rejection_pause_ms: self.first_rejection_pause_ms.unwrap_or(miner_default_config.first_rejection_pause_ms), subsequent_rejection_pause_ms: self.subsequent_rejection_pause_ms.unwrap_or(miner_default_config.subsequent_rejection_pause_ms), block_commit_delay: self.block_commit_delay_ms.map(Duration::from_millis).unwrap_or(miner_default_config.block_commit_delay), @@ -2826,6 +2835,7 @@ impl MinerConfigFile { }) } } + #[derive(Clone, Deserialize, Default, Debug)] #[serde(deny_unknown_fields)] pub struct AtlasConfigFile { diff --git a/stackslib/src/core/test_util.rs b/stackslib/src/core/test_util.rs index 519d660013..f8a829088b 100644 --- a/stackslib/src/core/test_util.rs +++ b/stackslib/src/core/test_util.rs @@ -5,6 +5,7 @@ use clarity::codec::StacksMessageCodec; use clarity::types::chainstate::{ BlockHeaderHash, ConsensusHash, StacksAddress, StacksPrivateKey, StacksPublicKey, }; +use clarity::vm::costs::ExecutionCost; use clarity::vm::tests::BurnStateDB; use clarity::vm::types::PrincipalData; use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value}; @@ -269,17 +270,29 @@ pub fn to_addr(sk: &StacksPrivateKey) -> StacksAddress { StacksAddress::p2pkh(false, &StacksPublicKey::from_private(sk)) } -pub fn make_stacks_transfer( +pub fn make_stacks_transfer_tx( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, chain_id: u32, recipient: &PrincipalData, amount: u64, -) -> Vec { +) -> StacksTransaction { let payload = TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); - let tx = sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id); + sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) +} + +/// Make a stacks transfer transaction, returning the serialized transaction bytes +pub fn make_stacks_transfer_serialized( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + recipient: &PrincipalData, + amount: u64, +) -> Vec { + let tx = make_stacks_transfer_tx(sender, nonce, tx_fee, chain_id, recipient, amount); let mut tx_bytes = vec![]; tx.consensus_serialize(&mut tx_bytes).unwrap(); tx_bytes @@ -512,3 +525,25 @@ pub fn insert_tx_in_mempool( .execute(sql, args) .expect("Failed to insert transaction into mempool"); } + +/// Generate source code for a contract that exposes a public function +/// `big-tx`. This function uses `proportion` of read_count when called +pub fn make_big_read_count_contract(limit: ExecutionCost, proportion: u64) -> String { + let read_count = (limit.read_count * proportion) / 100; + + let read_lines = (0..read_count) + .map(|_| format!("(var-get my-var)")) + .collect::>() + .join("\n"); + + format!( + " +(define-data-var my-var uint u0) +(define-public (big-tx) +(begin +{} +(ok true))) + ", + read_lines + ) +} diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 00010874ca..5b25a43826 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -65,7 +65,7 @@ use crate::core::mempool::{ db_get_all_nonces, MemPoolSyncData, MemPoolWalkSettings, MemPoolWalkTxTypes, TxTag, BLOOM_COUNTER_DEPTH, BLOOM_COUNTER_ERROR_RATE, MAX_BLOOM_COUNTER_TXS, }; -use crate::core::test_util::{insert_tx_in_mempool, make_stacks_transfer, to_addr}; +use crate::core::test_util::{insert_tx_in_mempool, make_stacks_transfer_serialized, to_addr}; use crate::core::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; use crate::net::Error as NetError; use crate::util_lib::bloom::test::setup_bloom_counter; @@ -2795,7 +2795,7 @@ fn large_mempool() { let sender_addr = to_addr(sender_sk); let fee = thread_rng().gen_range(180..2000); let transfer_tx = - make_stacks_transfer(sender_sk, *nonce, fee, 0x80000000, &recipient, 1); + make_stacks_transfer_serialized(sender_sk, *nonce, fee, 0x80000000, &recipient, 1); insert_tx_in_mempool( &mempool_tx, transfer_tx, diff --git a/stackslib/src/cost_estimates/tests/cost_estimators.rs b/stackslib/src/cost_estimates/tests/cost_estimators.rs index e3b2515e01..39b0cb0a7f 100644 --- a/stackslib/src/cost_estimates/tests/cost_estimators.rs +++ b/stackslib/src/cost_estimates/tests/cost_estimators.rs @@ -123,6 +123,7 @@ fn make_dummy_cc_tx( Value::okay(Value::Bool(true)).unwrap(), 0, execution_cost, + None, ) } diff --git a/stackslib/src/cost_estimates/tests/fee_medians.rs b/stackslib/src/cost_estimates/tests/fee_medians.rs index e89af4ca41..2812a4e161 100644 --- a/stackslib/src/cost_estimates/tests/fee_medians.rs +++ b/stackslib/src/cost_estimates/tests/fee_medians.rs @@ -78,6 +78,7 @@ fn make_dummy_cc_tx(fee: u64, execution_cost: &ExecutionCost) -> StacksTransacti Value::okay(Value::Bool(true)).unwrap(), 0, execution_cost.clone(), + None, ) } diff --git a/stackslib/src/cost_estimates/tests/fee_scalar.rs b/stackslib/src/cost_estimates/tests/fee_scalar.rs index c7f39c2921..74f19eeb68 100644 --- a/stackslib/src/cost_estimates/tests/fee_scalar.rs +++ b/stackslib/src/cost_estimates/tests/fee_scalar.rs @@ -116,6 +116,7 @@ fn make_dummy_cc_tx(fee: u64) -> StacksTransactionReceipt { Value::okay(Value::Bool(true)).unwrap(), 0, ExecutionCost::ZERO, + None, ) } diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index c26333c052..45fbdda42f 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -225,6 +225,37 @@ pub mod prefix_hex { } } +/// This module serde encode and decodes structs that +/// implement StacksMessageCodec as a 0x-prefixed hex string. +pub mod prefix_hex_codec { + use clarity::codec::StacksMessageCodec; + use clarity::util::hash::{hex_bytes, to_hex}; + + pub fn serialize( + val: &T, + s: S, + ) -> Result { + let mut bytes = vec![]; + val.consensus_serialize(&mut bytes) + .map_err(serde::ser::Error::custom)?; + s.serialize_str(&format!("0x{}", to_hex(&bytes))) + } + + pub fn deserialize<'de, D: serde::Deserializer<'de>, T: StacksMessageCodec>( + d: D, + ) -> Result { + let inst_str: String = serde::Deserialize::deserialize(d)?; + let Some(hex_str) = inst_str.get(2..) else { + return Err(serde::de::Error::invalid_length( + inst_str.len(), + &"at least length 2 string", + )); + }; + let bytes = hex_bytes(hex_str).map_err(serde::de::Error::custom)?; + T::consensus_deserialize(&mut &bytes[..]).map_err(serde::de::Error::custom) + } +} + pub trait HexDeser: Sized { fn try_from(hex: &str) -> Result; } @@ -247,3 +278,4 @@ impl_hex_deser!(ConsensusHash); impl_hex_deser!(BlockHeaderHash); impl_hex_deser!(Hash160); impl_hex_deser!(Sha512Trunc256Sum); +impl_hex_deser!(Txid); diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 0ba13f2f8f..eaec726d8b 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::VecDeque; use std::io::{Read, Write}; #[cfg(any(test, feature = "testing"))] use std::sync::LazyLock; @@ -48,10 +49,14 @@ use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NAKAMOTO_BLOCK_VERSION}; use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState}; -use crate::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction, TransactionResult}; +use crate::chainstate::stacks::miner::{ + BlockBuilder, BlockLimitFunction, TransactionError, TransactionProblematic, TransactionResult, + TransactionSkipped, +}; use crate::chainstate::stacks::{ Error as ChainError, StacksBlock, StacksBlockHeader, StacksTransaction, TransactionPayload, }; +use crate::clarity_vm::clarity::Error as ClarityError; use crate::core::mempool::{MemPoolDB, ProposalCallbackReceiver}; use crate::cost_estimates::FeeRateEstimate; use crate::net::http::{ @@ -76,6 +81,11 @@ pub static TEST_VALIDATE_STALL: LazyLock> = LazyLock::new(TestFla /// Artificial delay to add to block validation. pub static TEST_VALIDATE_DELAY_DURATION_SECS: LazyLock> = LazyLock::new(TestFlag::default); +#[cfg(any(test, feature = "testing"))] +/// Mock for the set of transactions that must be replayed +pub static TEST_REPLAY_TRANSACTIONS: LazyLock< + TestFlag>, +> = LazyLock::new(TestFlag::default); // This enum is used to supply a `reason_code` for validation // rejection responses. This is serialized as an enum with string @@ -87,7 +97,11 @@ define_u8_enum![ValidateRejectCode { ChainstateError = 3, UnknownParent = 4, NonCanonicalTenure = 5, - NoSuchTenure = 6 + NoSuchTenure = 6, + InvalidTransactionReplay = 7, + InvalidParentBlock = 8, + InvalidTimestamp = 9, + NetworkChainMismatch = 10 }]; pub static TOO_MANY_REQUESTS_STATUS: u16 = 429; @@ -207,6 +221,8 @@ pub struct NakamotoBlockProposal { pub block: NakamotoBlock, /// Identifies which chain block is for (Mainnet, Testnet, etc.) pub chain_id: u32, + /// Optional transaction replay set + pub replay_txs: Option>, } impl NakamotoBlockProposal { @@ -287,7 +303,7 @@ impl NakamotoBlockProposal { "highest_header.height" => highest_header.anchored_header.height(), ); return Err(BlockValidateRejectReason { - reason_code: ValidateRejectCode::InvalidBlock, + reason_code: ValidateRejectCode::InvalidParentBlock, reason: "Block is not higher than the highest block in its tenure".into(), }); } @@ -372,6 +388,11 @@ impl NakamotoBlockProposal { /// - Miner signature is valid /// - Validation of transactions by executing them agains current chainstate. /// This is resource intensive, and therefore done only if previous checks pass + /// + /// During transaction replay, we also check that the block only contains the unmined + /// transactions that need to be replayed, up until either: + /// - The set of transactions that must be replayed is exhausted + /// - A cost limit is hit pub fn validate( &self, sortdb: &SortitionDB, @@ -405,7 +426,7 @@ impl NakamotoBlockProposal { "received_mainnet" => mainnet, ); return Err(BlockValidateRejectReason { - reason_code: ValidateRejectCode::InvalidBlock, + reason_code: ValidateRejectCode::NetworkChainMismatch, reason: "Wrong network/chain_id".into(), }); } @@ -428,8 +449,8 @@ impl NakamotoBlockProposal { &self.block.header.parent_block_id, )? .ok_or_else(|| BlockValidateRejectReason { - reason_code: ValidateRejectCode::InvalidBlock, - reason: "Invalid parent block".into(), + reason_code: ValidateRejectCode::UnknownParent, + reason: "Unknown parent block".into(), })?; let burn_view_consensus_hash = @@ -494,7 +515,7 @@ impl NakamotoBlockProposal { "parent_block_timestamp" => parent_nakamoto_header.timestamp, ); return Err(BlockValidateRejectReason { - reason_code: ValidateRejectCode::InvalidBlock, + reason_code: ValidateRejectCode::InvalidTimestamp, reason: "Block timestamp is not greater than parent block".into(), }); } @@ -507,7 +528,7 @@ impl NakamotoBlockProposal { "current_time" => get_epoch_time_secs(), ); return Err(BlockValidateRejectReason { - reason_code: ValidateRejectCode::InvalidBlock, + reason_code: ValidateRejectCode::InvalidTimestamp, reason: "Block timestamp is too far into the future".into(), }); } @@ -541,8 +562,88 @@ impl NakamotoBlockProposal { builder.load_tenure_info(chainstate, &burn_dbconn, tenure_cause)?; let mut tenure_tx = builder.tenure_begin(&burn_dbconn, &mut miner_tenure_info)?; + let mut replay_txs_maybe: Option> = + self.replay_txs.clone().map(|txs| txs.into()); + for (i, tx) in self.block.txs.iter().enumerate() { let tx_len = tx.tx_len(); + + // If a list of replay transactions is set, this transaction must be the next + // mineable transaction from this list. + if let Some(ref mut replay_txs) = replay_txs_maybe { + loop { + let Some(replay_tx) = replay_txs.pop_front() else { + // During transaction replay, we expect that the block only + // contains transactions from the replay set. Thus, if we're here, + // the block contains a transaction that is not in the replay set, + // and we should reject the block. + return Err(BlockValidateRejectReason { + reason_code: ValidateRejectCode::InvalidTransactionReplay, + reason: "Transaction is not in the replay set".into(), + }); + }; + if replay_tx.txid() == tx.txid() { + break; + } + + // The included tx doesn't match the next tx in the + // replay set. Check to see if the tx is skipped because + // it was unmineable. + let tx_result = builder.try_mine_tx_with_len( + &mut tenure_tx, + &replay_tx, + replay_tx.tx_len(), + &BlockLimitFunction::NO_LIMIT_HIT, + ASTRules::PrecheckSize, + None, + ); + match tx_result { + TransactionResult::Skipped(TransactionSkipped { error, .. }) + | TransactionResult::ProcessingError(TransactionError { error, .. }) + | TransactionResult::Problematic(TransactionProblematic { + error, .. + }) => { + // The tx wasn't able to be mined. Check the underlying error, to + // see if we should reject the block or allow the tx to be + // dropped from the replay set. + + match error { + ChainError::CostOverflowError(..) + | ChainError::BlockTooBigError + | ChainError::ClarityError(ClarityError::CostError(..)) => { + // block limit reached; add tx back to replay set. + // BUT we know that the block should have ended at this point, so + // return an error. + let txid = replay_tx.txid(); + replay_txs.push_front(replay_tx); + + warn!("Rejecting block proposal. Next replay tx exceeds cost limits, so should have been in the next block."; + "error" => %error, + "txid" => %txid, + ); + + return Err(BlockValidateRejectReason { + reason_code: ValidateRejectCode::InvalidTransactionReplay, + reason: "Transaction is not in the replay set".into(), + }); + } + _ => { + // it's ok, drop it + continue; + } + } + } + TransactionResult::Success(_) => { + // Tx should have been included + return Err(BlockValidateRejectReason { + reason_code: ValidateRejectCode::InvalidTransactionReplay, + reason: "Transaction is not in the replay set".into(), + }); + } + }; + } + } + let tx_result = builder.try_mine_tx_with_len( &mut tenure_tx, tx, diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index f561567d3c..f44aa54e3f 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -14,48 +14,42 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::cell::RefCell; +use std::collections::VecDeque; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use std::rc::Rc; use std::sync::{Arc, Condvar, Mutex}; -use clarity::types::chainstate::{StacksPrivateKey, TrieHash}; -use clarity::util::secp256k1::MessageSignature; -use clarity::util::vrf::VRFProof; +use clarity::consts::CHAIN_ID_TESTNET; +use clarity::types::chainstate::StacksPrivateKey; use clarity::vm::ast::ASTRules; -use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; -use clarity::vm::{ClarityName, ContractName, Value}; +use clarity::vm::types::StandardPrincipalData; use mempool::{MemPoolDB, MemPoolEventDispatcher, ProposalCallbackReceiver}; use postblock_proposal::{NakamotoBlockProposal, ValidateRejectCode}; -use stacks_common::bitvec::BitVec; -use stacks_common::types::chainstate::{ConsensusHash, StacksAddress}; -use stacks_common::types::net::PeerHost; -use stacks_common::types::{Address, StacksEpochId}; -use stacks_common::util::hash::{hex_bytes, Hash160, MerkleTree, Sha512Trunc256Sum}; +use stacks_common::types::chainstate::ConsensusHash; +use stacks_common::types::StacksEpochId; use super::TestRPC; use crate::chainstate::burn::db::sortdb::SortitionDB; -use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; -use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; +use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction}; -use crate::chainstate::stacks::test::{make_codec_test_block, make_codec_test_nakamoto_block}; -use crate::chainstate::stacks::{ - CoinbasePayload, StacksBlockHeader, StacksTransactionSigner, TenureChangeCause, - TenureChangePayload, TokenTransferMemo, TransactionAnchorMode, TransactionAuth, - TransactionPayload, TransactionPostConditionMode, TransactionVersion, +use crate::chainstate::stacks::test::make_codec_test_nakamoto_block; +use crate::core::test_util::{ + make_big_read_count_contract, make_contract_call, make_contract_publish, + make_stacks_transfer_tx, to_addr, }; use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::api::postblock_proposal::{ + BlockValidateOk, BlockValidateReject, TEST_REPLAY_TRANSACTIONS, +}; use crate::net::api::*; use crate::net::connection::ConnectionOptions; -use crate::net::httpcore::{ - HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, -}; +use crate::net::httpcore::{RPCRequestHandler, StacksHttp, StacksHttpRequest}; use crate::net::relay::Relayer; use crate::net::test::TestEventObserver; -use crate::net::{ProtocolFamily, TipRequest}; +use crate::net::ProtocolFamily; +#[warn(unused)] #[test] fn test_try_parse_request() { let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); @@ -65,6 +59,7 @@ fn test_try_parse_request() { let proposal = NakamotoBlockProposal { block: block.clone(), chain_id: 0x80000000, + replay_txs: None, }; let mut request = StacksHttpRequest::new_for_peer( addr.into(), @@ -109,7 +104,8 @@ fn test_try_parse_request() { handler.block_proposal, Some(NakamotoBlockProposal { block, - chain_id: 0x80000000 + chain_id: 0x80000000, + replay_txs: None, }) ); @@ -117,7 +113,7 @@ fn test_try_parse_request() { parsed_request.clear_headers(); // but the authorization header should still be there parsed_request.add_header("authorization".into(), "password".into()); - let (preamble, contents) = parsed_request.destruct(); + let (preamble, _contents) = parsed_request.destruct(); assert_eq!(&preamble, request.preamble()); @@ -252,32 +248,15 @@ fn test_try_make_response() { .unwrap() .unwrap(); - let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); - let proof = VRFProof::from_bytes(&proof_bytes[..]).unwrap(); - - let privk = StacksPrivateKey::from_hex( - "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", - ) - .unwrap(); - - let stx_address = StacksAddress::new(1, Hash160([0xff; 20])).unwrap(); - let payload = TransactionPayload::TokenTransfer( - stx_address.into(), + let tx = make_stacks_transfer_tx( + miner_privk, + 36, + 300, + CHAIN_ID_TESTNET, + &StandardPrincipalData::transient().into(), 123, - TokenTransferMemo([0u8; 34]), ); - let auth = TransactionAuth::from_p2pkh(miner_privk).unwrap(); - let addr = auth.origin().address_testnet(); - let mut tx = StacksTransaction::new(TransactionVersion::Testnet, auth, payload); - tx.chain_id = 0x80000000; - tx.auth.set_origin_nonce(36); - tx.set_post_condition_mode(TransactionPostConditionMode::Allow); - tx.set_tx_fee(300); - let mut tx_signer = StacksTransactionSigner::new(&tx); - tx_signer.sign_origin(miner_privk).unwrap(); - let tx = tx_signer.get_tx().unwrap(); - let mut builder = NakamotoBlockBuilder::new( &parent_stacks_header, &parent_stacks_header.consensus_hash, @@ -326,6 +305,7 @@ fn test_try_make_response() { let proposal = NakamotoBlockProposal { block: good_block.clone(), chain_id: 0x80000000, + replay_txs: None, }; let mut request = StacksHttpRequest::new_for_peer( @@ -350,6 +330,7 @@ fn test_try_make_response() { let proposal = NakamotoBlockProposal { block: early_time_block, chain_id: 0x80000000, + replay_txs: None, }; let mut request = StacksHttpRequest::new_for_peer( @@ -374,6 +355,7 @@ fn test_try_make_response() { let proposal = NakamotoBlockProposal { block: late_time_block, chain_id: 0x80000000, + replay_txs: None, }; let mut request = StacksHttpRequest::new_for_peer( @@ -395,6 +377,7 @@ fn test_try_make_response() { let proposal = NakamotoBlockProposal { block: stale_block, chain_id: 0x80000000, + replay_txs: None, }; let mut request = StacksHttpRequest::new_for_peer( @@ -473,7 +456,7 @@ fn test_try_make_response() { reason, .. }) => { - assert_eq!(reason_code, ValidateRejectCode::InvalidBlock); + assert_eq!(reason_code, ValidateRejectCode::InvalidTimestamp); assert_eq!(reason, "Block timestamp is not greater than parent block"); } } @@ -486,8 +469,483 @@ fn test_try_make_response() { reason, .. }) => { - assert_eq!(reason_code, ValidateRejectCode::InvalidBlock); + assert_eq!(reason_code, ValidateRejectCode::InvalidTimestamp); assert_eq!(reason, "Block timestamp is too far into the future"); } } } + +#[warn(unused)] +fn replay_validation_test( + setup_fn: impl FnOnce(&mut TestRPC) -> (VecDeque, Vec), +) -> Result { + let test_observer = TestEventObserver::new(); + let mut rpc_test = TestRPC::setup_nakamoto(function_name!(), &test_observer); + + let (expected_replay_txs, block_txs) = setup_fn(&mut rpc_test); + let mut requests = vec![]; + + let (stacks_tip_ch, stacks_tip_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash( + rpc_test.peer_1.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bhh); + + let mut proposed_block = { + let chainstate = rpc_test.peer_1.chainstate(); + let parent_stacks_header = + NakamotoChainState::get_block_header(chainstate.db(), &stacks_tip) + .unwrap() + .unwrap(); + + let mut builder = NakamotoBlockBuilder::new( + &parent_stacks_header, + &parent_stacks_header.consensus_hash, + 26000, + None, + None, + 8, + None, + ) + .unwrap(); + + rpc_test + .peer_1 + .with_db_state( + |sort_db: &mut SortitionDB, + chainstate: &mut StacksChainState, + _: &mut Relayer, + _: &mut MemPoolDB| { + let burn_dbconn = sort_db.index_handle_at_tip(); + let mut miner_tenure_info = builder + .load_tenure_info(chainstate, &burn_dbconn, None) + .unwrap(); + let mut tenure_tx = builder + .tenure_begin(&burn_dbconn, &mut miner_tenure_info) + .unwrap(); + for tx in block_txs { + builder.try_mine_tx_with_len( + &mut tenure_tx, + &tx, + tx.tx_len(), + &BlockLimitFunction::NO_LIMIT_HIT, + ASTRules::PrecheckSize, + None, + ); + } + let block = builder.mine_nakamoto_block(&mut tenure_tx); + Ok(block) + }, + ) + .unwrap() + }; + + // Increment the timestamp by 1 to ensure it is different from the previous block + proposed_block.header.timestamp += 1; + rpc_test + .peer_1 + .miner + .sign_nakamoto_block(&mut proposed_block); + + let proposal = NakamotoBlockProposal { + block: proposed_block.clone(), + chain_id: 0x80000000, + replay_txs: Some(expected_replay_txs.into()), + }; + + let mut request = StacksHttpRequest::new_for_peer( + rpc_test.peer_1.to_peer_host(), + "POST".into(), + "/v3/block_proposal".into(), + HttpRequestContents::new().payload_json(serde_json::to_value(proposal).unwrap()), + ) + .expect("failed to construct request"); + request.add_header("authorization".into(), "password".into()); + requests.push(request); + + // Execute the request + let observer = ProposalTestObserver::new(); + let proposal_observer = Arc::clone(&observer.proposal_observer); + + info!("Run request with observer for replay mismatch test"); + let responses = rpc_test.run_with_observer(requests, Some(&observer)); + + // Expect 202 Accepted initially + assert_eq!(responses[0].preamble().status_code, 202); + + // Wait for the asynchronous validation result + let start = std::time::Instant::now(); + loop { + info!("Wait for replay mismatch result to be non-empty"); + if proposal_observer + .lock() + .unwrap() + .results + .lock() + .unwrap() + .len() + >= 1 + // Expecting one result + { + break; + } + std::thread::sleep(std::time::Duration::from_secs(1)); + assert!( + start.elapsed().as_secs() < 60, + "Timed out waiting for replay mismatch result" + ); + } + + let observer_locked = proposal_observer.lock().unwrap(); + let mut results = observer_locked.results.lock().unwrap(); + let result = results.pop().unwrap(); + + TEST_REPLAY_TRANSACTIONS.set(Default::default()); + + result +} + +#[test] +#[ignore] +/// Tx replay test with mismatching mineable transactions. +fn replay_validation_test_transaction_mismatch() { + let result = replay_validation_test(|rpc_test| { + let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); + // Transaction expected in the replay set (different amount) + let tx_for_replay = make_stacks_transfer_tx( + miner_privk, + 36, + 300, + CHAIN_ID_TESTNET, + &StandardPrincipalData::transient().into(), + 1234, + ); + + let tx = make_stacks_transfer_tx( + miner_privk, + 36, + 300, + CHAIN_ID_TESTNET, + &StandardPrincipalData::transient().into(), + 123, + ); + + (vec![tx_for_replay].into(), vec![tx]) + }); + + match result { + Ok(_) => panic!("Expected error due to replay transaction mismatch, but got Ok"), + Err(postblock_proposal::BlockValidateReject { reason_code, .. }) => { + assert_eq!( + reason_code, + ValidateRejectCode::InvalidTransactionReplay, + "Expected InvalidTransactionReplay reason code" + ); + } + } +} + +#[test] +#[ignore] +/// Replay set has one unmineable tx, and one mineable tx. +/// The block has the one mineable tx. +fn replay_validation_test_transaction_unmineable_match() { + let result = replay_validation_test(|rpc_test| { + let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); + // Transaction expected in the replay set (different amount) + let unmineable_tx = make_stacks_transfer_tx( + miner_privk, + 37, + 300, + CHAIN_ID_TESTNET, + &StandardPrincipalData::transient().into(), + 1234, + ); + + let mineable_tx = make_stacks_transfer_tx( + miner_privk, + 36, + 300, + CHAIN_ID_TESTNET, + &StandardPrincipalData::transient().into(), + 123, + ); + + ( + vec![unmineable_tx, mineable_tx.clone()].into(), + vec![mineable_tx], + ) + }); + + match result { + Ok(_) => {} + Err(rejection) => { + panic!("Expected validation to be OK, but got {:?}", rejection); + } + } +} + +#[test] +#[ignore] +/// Replay set has [mineable, unmineable, mineable] +/// The block has [mineable, mineable] +fn replay_validation_test_transaction_unmineable_match_2() { + let result = replay_validation_test(|rpc_test| { + let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); + // Unmineable tx + let unmineable_tx = make_stacks_transfer_tx( + miner_privk, + 38, + 300, + CHAIN_ID_TESTNET, + &StandardPrincipalData::transient().into(), + 123, + ); + + let mineable_tx = make_stacks_transfer_tx( + miner_privk, + 36, + 300, + CHAIN_ID_TESTNET, + &StandardPrincipalData::transient().into(), + 123, + ); + + let mineable_tx_2 = make_stacks_transfer_tx( + miner_privk, + 37, + 300, + CHAIN_ID_TESTNET, + &StandardPrincipalData::transient().into(), + 123, + ); + + ( + vec![unmineable_tx, mineable_tx.clone(), mineable_tx_2.clone()].into(), + vec![mineable_tx, mineable_tx_2], + ) + }); + + match result { + Ok(_) => { + // pass + } + Err(rejection) => { + panic!("Expected validation to be OK, but got {:?}", rejection); + } + } +} + +#[test] +#[ignore] +/// Replay set has [mineable, mineable, tx_a, mineable] +/// The block has [mineable, mineable, tx_b, mineable] +fn replay_validation_test_transaction_mineable_mismatch_series() { + let result = replay_validation_test(|rpc_test| { + let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); + // Mineable tx + let mineable_tx_1 = make_stacks_transfer_tx( + miner_privk, + 36, + 300, + CHAIN_ID_TESTNET, + &StandardPrincipalData::transient().into(), + 123, + ); + + let mineable_tx_2 = make_stacks_transfer_tx( + miner_privk, + 37, + 300, + CHAIN_ID_TESTNET, + &StandardPrincipalData::transient().into(), + 123, + ); + + let tx_a = make_stacks_transfer_tx( + miner_privk, + 38, + 300, + CHAIN_ID_TESTNET, + &StandardPrincipalData::transient().into(), + 123, + ); + + let tx_b = make_stacks_transfer_tx( + miner_privk, + 38, + 300, + CHAIN_ID_TESTNET, + &StandardPrincipalData::transient().into(), + 1234, // different amount + ); + + let mineable_tx_3 = make_stacks_transfer_tx( + miner_privk, + 39, + 300, + CHAIN_ID_TESTNET, + &StandardPrincipalData::transient().into(), + 123, + ); + + ( + vec![ + mineable_tx_1.clone(), + mineable_tx_2.clone(), + tx_a.clone(), + mineable_tx_3.clone(), + ] + .into(), + vec![mineable_tx_1, mineable_tx_2, tx_b, mineable_tx_3], + ) + }); + + match result { + Ok(_) => { + panic!("Expected validation to be rejected, but got Ok"); + } + Err(rejection) => { + assert_eq!( + rejection.reason_code, + ValidateRejectCode::InvalidTransactionReplay + ); + } + } +} + +#[test] +#[ignore] +/// Replay set has [mineable, tx_b, tx_a] +/// The block has [mineable, tx_a, tx_b] +fn replay_validation_test_transaction_mineable_mismatch_series_2() { + let result = replay_validation_test(|rpc_test| { + let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); + + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = to_addr(&recipient_sk); + let miner_addr = to_addr(miner_privk); + + let mineable_tx_1 = make_stacks_transfer_tx( + miner_privk, + 36, + 300, + CHAIN_ID_TESTNET, + &recipient_addr.into(), + 1000000, + ); + + let tx_b = make_stacks_transfer_tx( + &recipient_sk, + 0, + 300, + CHAIN_ID_TESTNET, + &miner_addr.into(), + 123, + ); + + let tx_a = make_stacks_transfer_tx( + miner_privk, + 37, + 300, + CHAIN_ID_TESTNET, + &recipient_addr.into(), + 123, + ); + + ( + vec![mineable_tx_1.clone(), tx_b.clone(), tx_a.clone()].into(), + vec![mineable_tx_1, tx_a, tx_b], + ) + }); + + match result { + Ok(_) => { + panic!("Expected validation to be rejected, but got Ok"); + } + Err(rejection) => { + assert_eq!( + rejection.reason_code, + ValidateRejectCode::InvalidTransactionReplay + ); + } + } +} + +#[test] +#[ignore] +/// Replay set has [deploy, big_a, big_b, c] +/// The block has [deploy, big_a, c] +/// +/// The block should have ended at big_a, because big_b would +/// have cost too much to include. +fn replay_validation_test_budget_exceeded() { + let result = replay_validation_test(|rpc_test| { + let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); + let miner_addr = to_addr(miner_privk); + + let contract_code = make_big_read_count_contract(BLOCK_LIMIT_MAINNET_21, 50); + + let deploy_tx_bytes = make_contract_publish( + miner_privk, + 36, + 1000, + CHAIN_ID_TESTNET, + &"big-contract", + &contract_code, + ); + + let big_a_bytes = make_contract_call( + miner_privk, + 37, + 1000, + CHAIN_ID_TESTNET, + &miner_addr, + &"big-contract", + "big-tx", + &vec![], + ); + + let big_b_bytes = make_contract_call( + miner_privk, + 38, + 1000, + CHAIN_ID_TESTNET, + &miner_addr, + &"big-contract", + "big-tx", + &vec![], + ); + + let deploy_tx = + StacksTransaction::consensus_deserialize(&mut deploy_tx_bytes.as_slice()).unwrap(); + let big_a = StacksTransaction::consensus_deserialize(&mut big_a_bytes.as_slice()).unwrap(); + let big_b = StacksTransaction::consensus_deserialize(&mut big_b_bytes.as_slice()).unwrap(); + + let transfer_tx = make_stacks_transfer_tx( + miner_privk, + 38, + 1000, + CHAIN_ID_TESTNET, + &StandardPrincipalData::transient().into(), + 100, + ); + + ( + vec![deploy_tx.clone(), big_a.clone(), big_b.clone()].into(), + vec![deploy_tx, big_a, transfer_tx], + ) + }); + + match result { + Ok(_) => { + panic!("Expected validation to be rejected, but got Ok"); + } + Err(rejection) => { + assert_eq!( + rejection.reason_code, + ValidateRejectCode::InvalidTransactionReplay + ); + } + } +} diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 3aea021ec9..0a7f957fe5 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2576,6 +2576,7 @@ pub mod test { _burns: u64, _reward_recipients: Vec, _consensus_hash: &ConsensusHash, + _parent_burn_block_hash: &BurnchainHeaderHash, ) { // pass } diff --git a/stackslib/src/net/tests/convergence.rs b/stackslib/src/net/tests/convergence.rs index 4e1f3a064c..482b913c8e 100644 --- a/stackslib/src/net/tests/convergence.rs +++ b/stackslib/src/net/tests/convergence.rs @@ -52,7 +52,7 @@ fn stacker_db_id(i: usize) -> QualifiedContractIdentifier { fn make_stacker_db_ids(i: usize) -> Vec { let mut dbs = vec![]; - for j in 0..i { + for j in 0..i + 1 { dbs.push(stacker_db_id(j)); } dbs @@ -1053,6 +1053,45 @@ fn run_topology_test_ex( (100.0 * (peer_counts as f64)) / ((peer_count * peer_count) as f64), ); + // wait for stacker DBs to converge + for (i, peer) in peers.iter().enumerate() { + if i % 2 != 0 { + continue; + } + for (j, other_peer) in peers.iter().enumerate() { + if i == j { + continue; + } + + let all_neighbors = + PeerDB::get_all_peers(other_peer.network.peerdb.conn()).unwrap(); + + if (all_neighbors.len() as u64) < ((peer_count - 1) as u64) { + // this is a simulated-NAT'ed node -- it won't learn about other NAT'ed nodes' + // DBs + continue; + } + + if j % 2 != 0 { + continue; // this peer doesn't support Stacker DBs + } + let dbs = peer + .network + .peerdb + .get_peer_stacker_dbs(&other_peer.config.to_neighbor()) + .unwrap(); + if dbs.is_empty() { + test_debug!( + "waiting for peer {i} {} to learn about peer {j} {}'s stacker DBs", + &peer.config.to_neighbor(), + &other_peer.config.to_neighbor() + ); + finished = false; + break; + } + } + } + if finished { break; } diff --git a/stackslib/src/util_lib/db.rs b/stackslib/src/util_lib/db.rs index 77329832d0..39f07c3ea9 100644 --- a/stackslib/src/util_lib/db.rs +++ b/stackslib/src/util_lib/db.rs @@ -613,13 +613,13 @@ impl<'a, C, T: MarfTrieId> IndexDBConn<'a, C, T> { ancestor_block_hash: &T, tip_block_hash: &T, ) -> Result, Error> { - get_ancestor_block_height(self.index, ancestor_block_hash, tip_block_hash) + get_ancestor_block_height(&self.index, ancestor_block_hash, tip_block_hash) } /// Get a value from the fork index pub fn get_indexed(&self, header_hash: &T, key: &str) -> Result, Error> { - let mut ro_index = self.index.reopen_readonly()?; - get_indexed(&mut ro_index, header_hash, key) + let mut connection = self.index.reopen_connection()?; + get_indexed(&mut connection, header_hash, key) } pub fn conn(&self) -> &DBConn { @@ -727,7 +727,7 @@ pub fn get_ancestor_block_hash( tip_block_hash: &T, ) -> Result, Error> { assert!(block_height <= u32::MAX as u64); - let mut read_only = index.reopen_readonly()?; + let mut read_only = index.reopen_connection()?; let bh = read_only.get_block_at_height(block_height as u32, tip_block_hash)?; Ok(bh) } @@ -738,7 +738,7 @@ pub fn get_ancestor_block_height( ancestor_block_hash: &T, tip_block_hash: &T, ) -> Result, Error> { - let mut read_only = index.reopen_readonly()?; + let mut read_only = index.reopen_connection()?; let height_opt = read_only .get_block_height(ancestor_block_hash, tip_block_hash)? .map(|height| height as u64); diff --git a/stackslib/src/util_lib/signed_structured_data.rs b/stackslib/src/util_lib/signed_structured_data.rs index 81f1c6b36a..a3d8e1310f 100644 --- a/stackslib/src/util_lib/signed_structured_data.rs +++ b/stackslib/src/util_lib/signed_structured_data.rs @@ -253,7 +253,7 @@ pub mod pox4 { &analysis, body, None, - |_, _| false, + |_, _| None, None, ) .unwrap(); diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index ed7b0b3114..5577dee3bd 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -51,6 +51,9 @@ http-types = "2.12" tempfile = "3.3" mockito = "1.5" serial_test = "3.2.0" +pinny = { git = "https://github.com/BitcoinL2-Labs/pinny-rs.git", rev = "54ba9d533a7b84525a5e65a3eae1a3ae76b9ea49" } #v0.0.2 +madhouse = { git = "https://github.com/stacks-network/madhouse-rs.git", rev = "fc651ddcbaf85e888b06d4a87aa788c4b7ba9309" } +proptest = { git = "https://github.com/proptest-rs/proptest.git", rev = "c9bdf18c232665b2b740c667c81866b598d06dc7" } [dependencies.rusqlite] workspace = true @@ -71,3 +74,6 @@ prod-genesis-chainstate = [] default = [] testing = [] wasm = ["stacks-common/wasm"] + +[package.metadata.pinny] +allowed = ["bitcoind", "flaky", "slow"] diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 84e251206d..8a61dfbd29 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -25,7 +25,9 @@ use std::thread::sleep; use std::time::Duration; use clarity::rusqlite::{params, Connection}; -use clarity::vm::analysis::contract_interface_builder::build_contract_interface; +use clarity::vm::analysis::contract_interface_builder::{ + build_contract_interface, ContractInterface, +}; use clarity::vm::costs::ExecutionCost; use clarity::vm::events::{FTEventType, NFTEventType, STXEventType}; use clarity::vm::types::{AssetIdentifier, QualifiedContractIdentifier, Value}; @@ -34,7 +36,10 @@ use lazy_static::lazy_static; use rand::Rng; use serde_json::json; use stacks::burnchains::{PoxConstants, Txid}; -use stacks::chainstate::burn::operations::BlockstackOperationType; +use stacks::chainstate::burn::operations::{ + blockstack_op_extended_deserialize, blockstack_op_extended_serialize_opt, + BlockstackOperationType, +}; use stacks::chainstate::burn::ConsensusHash; use stacks::chainstate::coordinator::BlockEventDispatcher; use stacks::chainstate::nakamoto::NakamotoBlock; @@ -59,6 +64,7 @@ use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, }; +use stacks::net::api::{prefix_hex, prefix_hex_codec, prefix_opt_hex}; use stacks::net::atlas::{Attachment, AttachmentInstance}; use stacks::net::http::HttpRequestContents; use stacks::net::httpcore::{send_http_request, StacksHttpRequest}; @@ -95,15 +101,6 @@ pub struct EventObserver { pub disable_retries: bool, } -struct ReceiptPayloadInfo<'a> { - txid: String, - success: &'a str, - raw_result: String, - raw_tx: String, - contract_interface_json: serde_json::Value, - burnchain_op_json: serde_json::Value, -} - const STATUS_RESP_TRUE: &str = "success"; const STATUS_RESP_NOT_COMMITTED: &str = "abort_by_response"; const STATUS_RESP_POST_CONDITION: &str = "abort_by_post_condition"; @@ -334,6 +331,51 @@ impl RewardSetEventPayload { } } +pub fn hex_prefix_string( + hex_string: &String, + s: S, +) -> Result { + let prefixed = format!("0x{hex_string}"); + s.serialize_str(&prefixed) +} + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct TransactionEventPayload<'a> { + #[serde(with = "prefix_hex")] + /// The transaction id + pub txid: Txid, + /// The transaction index + pub tx_index: u32, + /// The transaction status + pub status: &'a str, + #[serde(with = "prefix_hex_codec")] + /// The raw transaction result + pub raw_result: Value, + /// The hex encoded raw transaction + #[serde(serialize_with = "hex_prefix_string")] + pub raw_tx: String, + /// The contract interface + pub contract_interface: Option, + /// The burnchain op + #[serde( + serialize_with = "blockstack_op_extended_serialize_opt", + deserialize_with = "blockstack_op_extended_deserialize" + )] + pub burnchain_op: Option, + /// The transaction execution cost + pub execution_cost: ExecutionCost, + /// The microblock sequence + pub microblock_sequence: Option, + #[serde(with = "prefix_opt_hex")] + /// The microblock hash + pub microblock_hash: Option, + #[serde(with = "prefix_opt_hex")] + /// The microblock parent hash + pub microblock_parent_hash: Option, + /// Error information if one occurred in the Clarity VM + pub vm_error: Option, +} + #[cfg(test)] static TEST_EVENT_OBSERVER_SKIP_RETRY: LazyLock> = LazyLock::new(TestFlag::default); @@ -547,6 +589,7 @@ impl EventObserver { burns: u64, slot_holders: Vec, consensus_hash: &ConsensusHash, + parent_burn_block_hash: &BurnchainHeaderHash, ) -> serde_json::Value { let reward_recipients = rewards .into_iter() @@ -570,14 +613,18 @@ impl EventObserver { "reward_slot_holders": serde_json::Value::Array(reward_slot_holders), "burn_amount": burns, "consensus_hash": format!("0x{consensus_hash}"), + "parent_burn_block_hash": format!("0x{parent_burn_block_hash}"), }) } - /// Returns tuple of (txid, success, raw_result, raw_tx, contract_interface_json) - fn generate_payload_info_for_receipt(receipt: &StacksTransactionReceipt) -> ReceiptPayloadInfo { + /// Returns transaction event payload to send for new block or microblock event + fn make_new_block_txs_payload( + receipt: &StacksTransactionReceipt, + tx_index: u32, + ) -> TransactionEventPayload { let tx = &receipt.transaction; - let success = match (receipt.post_condition_aborted, &receipt.result) { + let status = match (receipt.post_condition_aborted, &receipt.result) { (false, Value::Response(response_data)) => { if response_data.committed { STATUS_RESP_TRUE @@ -587,77 +634,47 @@ impl EventObserver { } (true, Value::Response(_)) => STATUS_RESP_POST_CONDITION, _ => { - if let TransactionOrigin::Stacks(inner_tx) = &tx { - if let TransactionPayload::PoisonMicroblock(..) = &inner_tx.payload { - STATUS_RESP_TRUE - } else { - unreachable!() // Transaction results should otherwise always be a Value::Response type - } - } else { - unreachable!() // Transaction results should always be a Value::Response type + if !matches!( + tx, + TransactionOrigin::Stacks(StacksTransaction { + payload: TransactionPayload::PoisonMicroblock(_, _), + .. + }) + ) { + unreachable!("Unexpected transaction result type"); } + STATUS_RESP_TRUE } }; - let (txid, raw_tx, burnchain_op_json) = match tx { - TransactionOrigin::Burn(op) => ( - op.txid().to_string(), - "00".to_string(), - BlockstackOperationType::blockstack_op_to_json(op), - ), + let (txid, raw_tx, burnchain_op) = match tx { + TransactionOrigin::Burn(op) => (op.txid(), "00".to_string(), Some(op.clone())), TransactionOrigin::Stacks(ref tx) => { - let txid = tx.txid().to_string(); - let bytes = tx.serialize_to_vec(); - (txid, bytes_to_hex(&bytes), json!(null)) + let txid = tx.txid(); + let bytes = bytes_to_hex(&tx.serialize_to_vec()); + (txid, bytes, None) } }; - let raw_result = { - let bytes = receipt - .result - .serialize_to_vec() - .expect("FATAL: failed to serialize transaction receipt"); - bytes_to_hex(&bytes) - }; - let contract_interface_json = { - match &receipt.contract_analysis { - Some(analysis) => json!(build_contract_interface(analysis) - .expect("FATAL: failed to serialize contract publish receipt")), - None => json!(null), - } - }; - ReceiptPayloadInfo { + TransactionEventPayload { txid, - success, - raw_result, + tx_index, + status, + raw_result: receipt.result.clone(), raw_tx, - contract_interface_json, - burnchain_op_json, + contract_interface: receipt.contract_analysis.as_ref().map(|analysis| { + build_contract_interface(analysis) + .expect("FATAL: failed to serialize contract publish receipt") + }), + burnchain_op, + execution_cost: receipt.execution_cost.clone(), + microblock_sequence: receipt.microblock_header.as_ref().map(|x| x.sequence), + microblock_hash: receipt.microblock_header.as_ref().map(|x| x.block_hash()), + microblock_parent_hash: receipt.microblock_header.as_ref().map(|x| x.prev_block), + vm_error: receipt.vm_error.clone(), } } - /// Returns json payload to send for new block or microblock event - fn make_new_block_txs_payload( - receipt: &StacksTransactionReceipt, - tx_index: u32, - ) -> serde_json::Value { - let receipt_payload_info = EventObserver::generate_payload_info_for_receipt(receipt); - - json!({ - "txid": format!("0x{}", &receipt_payload_info.txid), - "tx_index": tx_index, - "status": receipt_payload_info.success, - "raw_result": format!("0x{}", &receipt_payload_info.raw_result), - "raw_tx": format!("0x{}", &receipt_payload_info.raw_tx), - "contract_abi": receipt_payload_info.contract_interface_json, - "burnchain_op": receipt_payload_info.burnchain_op_json, - "execution_cost": receipt.execution_cost, - "microblock_sequence": receipt.microblock_header.as_ref().map(|x| x.sequence), - "microblock_hash": receipt.microblock_header.as_ref().map(|x| format!("0x{}", x.block_hash())), - "microblock_parent_hash": receipt.microblock_header.as_ref().map(|x| format!("0x{}", x.prev_block)), - }) - } - fn make_new_attachment_payload( attachment: &(AttachmentInstance, Attachment), ) -> serde_json::Value { @@ -686,7 +703,7 @@ impl EventObserver { &self, parent_index_block_hash: StacksBlockId, filtered_events: Vec<(usize, &(bool, Txid, &StacksTransactionEvent))>, - serialized_txs: &Vec, + serialized_txs: &Vec, burn_block_hash: BurnchainHeaderHash, burn_block_height: u32, burn_block_timestamp: u64, @@ -1062,6 +1079,7 @@ impl BlockEventDispatcher for EventDispatcher { burns: u64, recipient_info: Vec, consensus_hash: &ConsensusHash, + parent_burn_block_hash: &BurnchainHeaderHash, ) { self.process_burn_block( burn_block, @@ -1070,6 +1088,7 @@ impl BlockEventDispatcher for EventDispatcher { burns, recipient_info, consensus_hash, + parent_burn_block_hash, ) } } @@ -1114,6 +1133,7 @@ impl EventDispatcher { burns: u64, recipient_info: Vec, consensus_hash: &ConsensusHash, + parent_burn_block_hash: &BurnchainHeaderHash, ) { // lazily assemble payload only if we have observers let interested_observers = self.filter_observers(&self.burn_block_observers_lookup, true); @@ -1128,6 +1148,7 @@ impl EventDispatcher { burns, recipient_info, consensus_hash, + parent_burn_block_hash, ); for observer in interested_observers.iter() { @@ -1841,14 +1862,27 @@ mod test { use std::thread; use std::time::Instant; + use clarity::boot_util::boot_code_id; use clarity::vm::costs::ExecutionCost; + use clarity::vm::events::SmartContractEventData; + use clarity::vm::types::StacksAddressExtensions; use serial_test::serial; + use stacks::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; use stacks::burnchains::{PoxConstants, Txid}; + use stacks::chainstate::burn::operations::PreStxOp; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use stacks::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksHeaderInfo}; use stacks::chainstate::stacks::events::StacksBlockEventData; - use stacks::chainstate::stacks::StacksBlock; - use stacks::types::chainstate::BlockHeaderHash; + use stacks::chainstate::stacks::{ + SinglesigHashMode, SinglesigSpendingCondition, StacksBlock, TenureChangeCause, + TenureChangePayload, TokenTransferMemo, TransactionAnchorMode, TransactionAuth, + TransactionPostConditionMode, TransactionPublicKeyEncoding, TransactionSpendingCondition, + TransactionVersion, + }; + use stacks::types::chainstate::{ + BlockHeaderHash, StacksAddress, StacksPrivateKey, StacksPublicKey, + }; + use stacks::util::hash::Hash160; use stacks::util::secp256k1::MessageSignature; use stacks_common::bitvec::BitVec; use stacks_common::types::chainstate::{BurnchainHeaderHash, StacksBlockId}; @@ -2659,4 +2693,172 @@ mod test { assert_eq!(event_dispatcher.registered_observers.len(), 1); } + + #[test] + /// This test checks that tx payloads properly convert the stacks transaction receipt regardless of the presence of the vm_error + fn make_new_block_txs_payload_vm_error() { + let privkey = StacksPrivateKey::random(); + let pubkey = StacksPublicKey::from_private(&privkey); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![pubkey], + ) + .unwrap(); + + let tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&privkey).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + 123, + TokenTransferMemo([0u8; 34]), + ), + }; + + let mut receipt = StacksTransactionReceipt { + transaction: TransactionOrigin::Burn(BlockstackOperationType::PreStx(PreStxOp { + output: StacksAddress::new(0, Hash160([1; 20])).unwrap(), + txid: tx.txid(), + vtxindex: 0, + block_height: 1, + burn_header_hash: BurnchainHeaderHash([5u8; 32]), + })), + events: vec![], + post_condition_aborted: true, + result: Value::okay_true(), + contract_analysis: None, + execution_cost: ExecutionCost { + write_length: 0, + write_count: 0, + read_length: 0, + read_count: 0, + runtime: 0, + }, + microblock_header: None, + vm_error: None, + stx_burned: 0u128, + tx_index: 0, + }; + + let payload_no_error = EventObserver::make_new_block_txs_payload(&receipt, 0); + assert_eq!(payload_no_error.vm_error, receipt.vm_error); + + receipt.vm_error = Some("Inconceivable!".into()); + + let payload_with_error = EventObserver::make_new_block_txs_payload(&receipt, 0); + assert_eq!(payload_with_error.vm_error, receipt.vm_error); + } + + fn make_tenure_change_payload() -> TenureChangePayload { + TenureChangePayload { + tenure_consensus_hash: ConsensusHash([0; 20]), + prev_tenure_consensus_hash: ConsensusHash([0; 20]), + burn_view_consensus_hash: ConsensusHash([0; 20]), + previous_tenure_end: StacksBlockId([0; 32]), + previous_tenure_blocks: 1, + cause: TenureChangeCause::Extended, + pubkey_hash: Hash160([0; 20]), + } + } + + fn make_tenure_change_tx(payload: TenureChangePayload) -> StacksTransaction { + StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 1, + auth: TransactionAuth::Standard(TransactionSpendingCondition::Singlesig( + SinglesigSpendingCondition { + hash_mode: SinglesigHashMode::P2PKH, + signer: Hash160([0; 20]), + nonce: 0, + tx_fee: 0, + key_encoding: TransactionPublicKeyEncoding::Compressed, + signature: MessageSignature([0; 65]), + }, + )), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TenureChange(payload), + } + } + + #[test] + fn backwards_compatibility_transaction_event_payload() { + let tx = make_tenure_change_tx(make_tenure_change_payload()); + let receipt = StacksTransactionReceipt { + transaction: TransactionOrigin::Burn(BlockstackOperationType::PreStx(PreStxOp { + output: StacksAddress::new(0, Hash160([1; 20])).unwrap(), + txid: tx.txid(), + vtxindex: 0, + block_height: 1, + burn_header_hash: BurnchainHeaderHash([5u8; 32]), + })), + events: vec![StacksTransactionEvent::SmartContractEvent( + SmartContractEventData { + key: (boot_code_id("some-contract", false), "some string".into()), + value: Value::Bool(false), + }, + )], + post_condition_aborted: false, + result: Value::okay_true(), + stx_burned: 100, + contract_analysis: None, + execution_cost: ExecutionCost { + write_length: 1, + write_count: 2, + read_length: 3, + read_count: 4, + runtime: 5, + }, + microblock_header: None, + tx_index: 1, + vm_error: None, + }; + let payload = EventObserver::make_new_block_txs_payload(&receipt, 0); + let new_serialized_data = serde_json::to_string_pretty(&payload).expect("Failed"); + let old_serialized_data = r#" + { + "burnchain_op": { + "pre_stx": { + "burn_block_height": 1, + "burn_header_hash": "0505050505050505050505050505050505050505050505050505050505050505", + "burn_txid": "ace70e63009a2c2d22c0f948b146d8a28df13a2900f3b5f3cc78b56459ffef05", + "output": { + "address": "S0G2081040G2081040G2081040G2081054GYN98", + "address_hash_bytes": "0x0101010101010101010101010101010101010101", + "address_version": 0 + }, + "vtxindex": 0 + } + }, + "contract_abi": null, + "execution_cost": { + "read_count": 4, + "read_length": 3, + "runtime": 5, + "write_count": 2, + "write_length": 1 + }, + "microblock_hash": null, + "microblock_parent_hash": null, + "microblock_sequence": null, + "raw_result": "0x0703", + "raw_tx": "0x00", + "status": "success", + "tx_index": 0, + "txid": "0xace70e63009a2c2d22c0f948b146d8a28df13a2900f3b5f3cc78b56459ffef05" + } + "#; + let new_value: TransactionEventPayload = serde_json::from_str(&new_serialized_data) + .expect("Failed to deserialize new data as TransactionEventPayload"); + let old_value: TransactionEventPayload = serde_json::from_str(&old_serialized_data) + .expect("Failed to deserialize old data as TransactionEventPayload"); + assert_eq!(new_value, old_value); + } } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 82fff1fb5c..2f8aa36110 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -45,6 +45,7 @@ use stacks::net::api::poststackerdbchunk::StackerDBErrorCodes; use stacks::net::p2p::NetworkHandle; use stacks::net::stackerdb::StackerDBs; use stacks::net::{NakamotoBlocksData, StacksMessageType}; +use stacks::types::chainstate::BlockHeaderHash; use stacks::util::get_epoch_time_secs; use stacks::util::secp256k1::MessageSignature; #[cfg(test)] @@ -187,8 +188,8 @@ pub struct BlockMinerThread { keychain: Keychain, /// burnchain configuration burnchain: Burnchain, - /// Last block mined - last_block_mined: Option, + /// Consensus hash and header hash of the last block mined + last_block_mined: Option<(ConsensusHash, BlockHeaderHash)>, /// Number of blocks mined since a tenure change/extend was attempted mined_blocks: u64, /// Cost consumed by the current tenure @@ -444,7 +445,7 @@ impl BlockMinerThread { // now, actually run this tenure loop { - if let Err(e) = self.miner_main_loop( + if let Err(e) = self.attempt_mine_and_propose_block( &mut coordinator, &sortdb, &mut stackerdbs, @@ -481,9 +482,14 @@ impl BlockMinerThread { *last_block_rejected = true; } - /// The main loop for the miner thread. This is where the miner will mine - /// blocks and then attempt to sign and broadcast them. - fn miner_main_loop( + /// Attempts to mine a block, propose it, and broadcast it if successful. + /// + /// Note: `Ok(())` does not guarantee that a block was mined, only that the + /// mining attempt completed and a subsequent attempt should be tried + /// + /// Returns `Ok(())` if mining completes successfully or should be retried. + /// Returns `Err` if the mining thread should exit (e.g., due to tenure changes or shutdown). + fn attempt_mine_and_propose_block( &mut self, coordinator: &mut SignerCoordinator, sortdb: &SortitionDB, @@ -507,52 +513,112 @@ impl BlockMinerThread { info!("Miner: finished mining a late tenure"); return Err(NakamotoNodeError::StacksTipChanged); } + // If we're mock mining, we may not have processed the block that the + // actual tenure winner committed to yet. So, before attempting to + // mock mine, check if the parent is processed. + if self.config.get_node_config(false).mock_mining + && !self.is_parent_processed(&mut chain_state)? + { + info!("Mock miner has not processed parent block yet, sleeping and trying again"); + thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); + return Ok(()); + } + + if self.reset_nonce_cache { + let mut mem_pool = self + .config + .connect_mempool_db() + .expect("Database failure opening mempool"); + mem_pool.reset_mempool_caches()?; + } + + let Some(new_block) = self.mine_block_and_handle_result(coordinator)? else { + // We should reattempt to mine + return Ok(()); + }; + + if !self.propose_new_block_and_broadcast( + coordinator, + sortdb, + stackerdbs, + last_block_rejected, + reward_set, + new_block, + )? { + // We should reattempt to mine + return Ok(()); + } - let new_block = loop { - if self.reset_nonce_cache { - let mut mem_pool = self - .config - .connect_mempool_db() - .expect("Database failure opening mempool"); - mem_pool.reset_mempool_caches()?; + // Wait until the last block has been mined and processed + self.wait_for_last_block_mined_and_processed(&mut chain_state)?; + + Ok(()) + } + + /// Check if the parent block has been processed + fn is_parent_processed( + &mut self, + chain_state: &mut StacksChainState, + ) -> Result { + let burn_db_path = self.config.get_burn_db_file_path(); + let mut burn_db = + SortitionDB::open(&burn_db_path, true, self.burnchain.pox_constants.clone()) + .expect("FATAL: could not open sortition DB"); + self.check_burn_tip_changed(&burn_db)?; + match self.load_block_parent_info(&mut burn_db, chain_state) { + Ok(..) => Ok(true), + Err(NakamotoNodeError::ParentNotFound) => Ok(false), + Err(e) => { + warn!("Failed to load parent info: {e:?}"); + Err(e) } + } + } - // If we're mock mining, we may not have processed the block that the - // actual tenure winner committed to yet. So, before attempting to - // mock mine, check if the parent is processed. - if self.config.get_node_config(false).mock_mining { - let burn_db_path = self.config.get_burn_db_file_path(); - let mut burn_db = - SortitionDB::open(&burn_db_path, true, self.burnchain.pox_constants.clone()) - .expect("FATAL: could not open sortition DB"); - let burn_tip_changed = self.check_burn_tip_changed(&burn_db); - match burn_tip_changed - .and_then(|_| self.load_block_parent_info(&mut burn_db, &mut chain_state)) - { - Ok(..) => {} - Err(NakamotoNodeError::ParentNotFound) => { - info!("Mock miner has not processed parent block yet, sleeping and trying again"); - thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); - continue; - } - Err(e) => { - warn!("Mock miner failed to load parent info: {e:?}"); - return Err(e); - } + /// Attempts to mine a block and handle the result. + /// + /// - Returns `Ok(Some(NakamotoBlock))` if a block is successfully mined and passes timestamp validation. + /// - Returns `Ok(None)` if mining should be retried (e.g. due to early block timestamp or no transactions). + /// - Returns `Err(NakamotoNodeError)` if mining should be aborted (e.g. shutdown signal or unexpected error). + fn mine_block_and_handle_result( + &mut self, + coordinator: &mut SignerCoordinator, + ) -> Result, NakamotoNodeError> { + match self.mine_block(coordinator) { + Ok(x) => { + if !self.validate_timestamp(&x)? { + info!("Block mined too quickly. Will try again."; + "block_timestamp" => x.header.timestamp, + ); + return Ok(None); } + Ok(Some(x)) } - - match self.mine_block(coordinator) { - Ok(x) => { - if !self.validate_timestamp(&x)? { - info!("Block mined too quickly. Will try again."; - "block_timestamp" => x.header.timestamp, - ); - continue; - } - break Some(x); + Err(NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted)) => { + if self.abort_flag.load(Ordering::SeqCst) { + info!("Miner interrupted while mining in order to shut down"); + self.globals + .raise_initiative(format!("MiningFailure: aborted by node")); + return Err(ChainstateError::MinerAborted.into()); } - Err(NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted)) => { + + info!("Miner interrupted while mining, will try again"); + + // sleep, and try again. if the miner was interrupted because the burnchain + // view changed, the next `mine_block()` invocation will error + thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); + Ok(None) + } + Err(NakamotoNodeError::MiningFailure(ChainstateError::NoTransactionsToMine)) => { + debug!( + "Miner did not find any transactions to mine, sleeping for {:?}", + self.config.miner.empty_mempool_sleep_time + ); + self.reset_nonce_cache = false; + + // Pause the miner to wait for transactions to arrive + let now = Instant::now(); + while now.elapsed() < self.config.miner.empty_mempool_sleep_time { if self.abort_flag.load(Ordering::SeqCst) { info!("Miner interrupted while mining in order to shut down"); self.globals @@ -560,142 +626,195 @@ impl BlockMinerThread { return Err(ChainstateError::MinerAborted.into()); } - info!("Miner interrupted while mining, will try again"); + // Check if the burnchain tip has changed + let Ok(sort_db) = SortitionDB::open( + &self.config.get_burn_db_file_path(), + false, + self.burnchain.pox_constants.clone(), + ) else { + error!("Failed to open sortition DB. Will try mining again."); + return Ok(None); + }; + if self.check_burn_tip_changed(&sort_db).is_err() { + return Err(NakamotoNodeError::BurnchainTipChanged); + } - // sleep, and try again. if the miner was interrupted because the burnchain - // view changed, the next `mine_block()` invocation will error thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); - continue; - } - Err(NakamotoNodeError::MiningFailure(ChainstateError::NoTransactionsToMine)) => { - debug!("Miner did not find any transactions to mine"); - self.reset_nonce_cache = false; - break None; } - Err(e) => { - warn!("Failed to mine block: {e:?}"); + Ok(None) + } + Err(e) => { + warn!("Failed to mine block: {e:?}"); - // try again, in case a new sortition is pending - self.globals - .raise_initiative(format!("MiningFailure: {e:?}")); - return Err(ChainstateError::MinerAborted.into()); - } + // try again, in case a new sortition is pending + self.globals + .raise_initiative(format!("MiningFailure: {e:?}")); + Err(ChainstateError::MinerAborted.into()) } - }; + } + } - if let Some(mut new_block) = new_block { - Self::fault_injection_block_proposal_stall(&new_block); + /// Attempts to propose a new block and broadcast it upon success. + /// + /// - Returns `Ok(true)` if the block was successfully proposed and broadcasted. + /// - Returns `Ok(false)` if the proposal failed but the miner should retry (e.g. due to tip change or recoverable upload error). + /// - Returns `Err(NakamotoNodeError)` if the operation should be aborted (e.g. unrecoverable error during proposal or broadcasting). + fn propose_new_block_and_broadcast( + &mut self, + coordinator: &mut SignerCoordinator, + sortdb: &SortitionDB, + stackerdbs: &mut StackerDBs, + last_block_rejected: &mut bool, + reward_set: &RewardSet, + mut new_block: NakamotoBlock, + ) -> Result { + Self::fault_injection_block_proposal_stall(&new_block); - let signer_signature = match self.propose_block( - coordinator, - &mut new_block, - sortdb, - stackerdbs, - ) { - Ok(x) => x, - Err(e) => match e { - NakamotoNodeError::StacksTipChanged => { - info!("Stacks tip changed while waiting for signatures"; - "signer_signature_hash" => %new_block.header.signer_signature_hash(), - "block_height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash, - ); - return Ok(()); - } - NakamotoNodeError::BurnchainTipChanged => { - info!("Burnchain tip changed while waiting for signatures"; + let signer_signature = match self.propose_block( + coordinator, + &mut new_block, + sortdb, + stackerdbs, + ) { + Ok(x) => x, + Err(e) => match e { + NakamotoNodeError::StacksTipChanged => { + info!("Stacks tip changed while waiting for signatures"; + "signer_signature_hash" => %new_block.header.signer_signature_hash(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); + return Ok(false); + } + NakamotoNodeError::BurnchainTipChanged => { + info!("Burnchain tip changed while waiting for signatures"; + "signer_signature_hash" => %new_block.header.signer_signature_hash(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); + return Err(e); + } + NakamotoNodeError::StackerDBUploadError(ref ack) => { + if ack.code == Some(StackerDBErrorCodes::BadSigner.code()) { + error!("Error while gathering signatures: failed to upload miner StackerDB data: {ack:?}. Giving up."; "signer_signature_hash" => %new_block.header.signer_signature_hash(), "block_height" => new_block.header.chain_length, "consensus_hash" => %new_block.header.consensus_hash, ); return Err(e); } - NakamotoNodeError::StackerDBUploadError(ref ack) => { - if ack.code == Some(StackerDBErrorCodes::BadSigner.code()) { - error!("Error while gathering signatures: failed to upload miner StackerDB data: {ack:?}. Giving up."; - "signer_signature_hash" => %new_block.header.signer_signature_hash(), - "block_height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash, - ); - return Err(e); - } - self.pause_and_retry(&new_block, last_block_rejected, e); - return Ok(()); - } - _ => { - self.pause_and_retry(&new_block, last_block_rejected, e); - return Ok(()); - } - }, - }; - *last_block_rejected = false; + self.pause_and_retry(&new_block, last_block_rejected, e); + return Ok(false); + } + _ => { + self.pause_and_retry(&new_block, last_block_rejected, e); + return Ok(false); + } + }, + }; + *last_block_rejected = false; - new_block.header.signer_signature = signer_signature; - if let Err(e) = self.broadcast(new_block.clone(), reward_set, stackerdbs) { - warn!("Error accepting own block: {e:?}. Will try mining again."); - return Ok(()); - } else { - info!( - "Miner: Block signed by signer set and broadcasted"; - "signer_signature_hash" => %new_block.header.signer_signature_hash(), - "stacks_block_hash" => %new_block.header.block_hash(), - "stacks_block_id" => %new_block.header.block_id(), - "block_height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash, - ); + new_block.header.signer_signature = signer_signature; + if let Err(e) = self.broadcast(new_block.clone(), reward_set, stackerdbs) { + warn!("Error accepting own block: {e:?}. Will try mining again."); + return Ok(false); + } else { + info!( + "Miner: Block signed by signer set and broadcasted"; + "signer_signature_hash" => %new_block.header.signer_signature_hash(), + "stacks_block_hash" => %new_block.header.block_hash(), + "stacks_block_id" => %new_block.header.block_id(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); - // We successfully mined, so the mempool caches are valid. - self.reset_nonce_cache = false; - } + // We successfully mined, so the mempool caches are valid. + self.reset_nonce_cache = false; + } - // update mined-block counters and mined-tenure counters - self.globals.counters.bump_naka_mined_blocks(); - if self.last_block_mined.is_none() { - // this is the first block of the tenure, bump tenure counter - self.globals.counters.bump_naka_mined_tenures(); - } + // update mined-block counters and mined-tenure counters + self.globals.counters.bump_naka_mined_blocks(); + if self.last_block_mined.is_none() { + // this is the first block of the tenure, bump tenure counter + self.globals.counters.bump_naka_mined_tenures(); + } + + // wake up chains coordinator + Self::fault_injection_block_announce_stall(&new_block); + self.globals.coord().announce_new_stacks_block(); + + self.last_block_mined = Some(( + new_block.header.consensus_hash, + new_block.header.block_hash(), + )); + self.mined_blocks += 1; + Ok(true) + } - // wake up chains coordinator - Self::fault_injection_block_announce_stall(&new_block); - self.globals.coord().announce_new_stacks_block(); + /// Blocks until the most recently mined block has been fully processed by the chainstate + /// and the miner is unblocked. + /// + /// - Returns `Ok(())` when the block is processed and the miner is ready to continue. + /// - Returns `Err(NakamotoNodeError)` if mining is aborted or the chainstate is inconsistent. + fn wait_for_last_block_mined_and_processed( + &mut self, + chain_state: &mut StacksChainState, + ) -> Result<(), NakamotoNodeError> { + let Some((last_consensus_hash, last_bhh)) = &self.last_block_mined else { + return Ok(()); + }; - self.last_block_mined = Some(new_block); - self.mined_blocks += 1; + // If mock-mining, we don't need to wait for the last block to be + // processed (because it will never be). Instead just wait + // `min_time_between_blocks_ms`, then resume mining. + if self.config.node.mock_mining { + thread::sleep(Duration::from_millis( + self.config.miner.min_time_between_blocks_ms, + )); + return Ok(()); } - if let Some(last_block_mined) = &self.last_block_mined { - // Wait until the last block mined has been processed - loop { - let (_, processed, _, _) = chain_state - .nakamoto_blocks_db() - .get_block_processed_and_signed_weight( - &last_block_mined.header.consensus_hash, - &last_block_mined.header.block_hash(), - )? - .ok_or_else(|| NakamotoNodeError::UnexpectedChainState)?; - - if processed { - break; - } + loop { + let (_, processed, _, _) = chain_state + .nakamoto_blocks_db() + .get_block_processed_and_signed_weight(last_consensus_hash, &last_bhh)? + .ok_or_else(|| NakamotoNodeError::UnexpectedChainState)?; + + // Once the block has been processed and the miner is no longer + // blocked, we can continue mining. + if processed + && !(*self + .globals + .get_miner_status() + .lock() + .expect("FATAL: mutex poisoned")) + .is_blocked() + { + return Ok(()); + } - thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); + thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); - // Check if the burnchain tip has changed - let Ok(sort_db) = SortitionDB::open( - &self.config.get_burn_db_file_path(), - false, - self.burnchain.pox_constants.clone(), - ) else { - error!("Failed to open sortition DB. Will try mining again."); - return Ok(()); - }; - if self.check_burn_tip_changed(&sort_db).is_err() { - return Err(NakamotoNodeError::BurnchainTipChanged); - } + if self.abort_flag.load(Ordering::SeqCst) { + info!("Miner interrupted while mining in order to shut down"); + self.globals + .raise_initiative(format!("MiningFailure: aborted by node")); + return Err(ChainstateError::MinerAborted.into()); } - } - Ok(()) + // Check if the burnchain tip has changed + let Ok(sort_db) = SortitionDB::open( + &self.config.get_burn_db_file_path(), + false, + self.burnchain.pox_constants.clone(), + ) else { + error!("Failed to open sortition DB. Will try mining again."); + return Ok(()); + }; + if self.check_burn_tip_changed(&sort_db).is_err() { + return Err(NakamotoNodeError::BurnchainTipChanged); + } + } } fn propose_block( @@ -1273,6 +1392,30 @@ impl BlockMinerThread { return Err(NakamotoNodeError::ParentNotFound); }; + // If we're mock mining, we need to manipulate the `last_block_mined` + // to match what it should be based on the actual chainstate. + if self.config.node.mock_mining { + if let Some((last_block_consensus_hash, _)) = &self.last_block_mined { + // If the parent block is in the same tenure, then we should + // pretend that we mined it. + if last_block_consensus_hash + == &parent_block_info.stacks_parent_header.consensus_hash + { + self.last_block_mined = Some(( + parent_block_info.stacks_parent_header.consensus_hash, + parent_block_info + .stacks_parent_header + .anchored_header + .block_hash(), + )); + } else { + // If the parent block is not in the same tenure, then we + // should act as though we haven't mined anything yet. + self.last_block_mined = None; + } + } + } + // create our coinbase if this is the first block we've mined this tenure let tenure_start_info = self.make_tenure_start_info( &chain_state, diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index ef2d6bdec9..b0a317c9e1 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -235,7 +235,12 @@ impl StackerDBListener { }) else { continue; }; - let SignerEvent::SignerMessages(signer_set, messages) = signer_event else { + let SignerEvent::SignerMessages { + signer_set, + messages, + .. + } = signer_event + else { debug!("StackerDBListener: Received signer event other than a signer message. Ignoring."); continue; }; @@ -255,7 +260,7 @@ impl StackerDBListener { "slot_ids" => ?slot_ids, ); - for (message, slot_id) in messages.into_iter().zip(slot_ids) { + for ((_, message), slot_id) in messages.into_iter().zip(slot_ids) { let Some(signer_entry) = &self.signer_entries.get(&slot_id) else { return Err(NakamotoNodeError::SignerSignatureError( "Signer entry not found".into(), diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 209bd5a913..5fdee316cb 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -166,6 +166,11 @@ impl RunLoop { /// If there's a network error, then assume that we're not a miner. fn check_is_miner(&mut self, burnchain: &mut BitcoinRegtestController) -> bool { if self.config.node.miner { + // If we are mock mining, then we don't need to check for UTXOs and + // we can just return true. + if self.config.get_node_config(false).mock_mining { + return true; + } let keychain = Keychain::default(self.config.node.seed.clone()); let mut op_signer = keychain.generate_op_signer(); if let Err(e) = burnchain.create_wallet_if_dne() { @@ -206,10 +211,6 @@ impl RunLoop { return true; } } - if self.config.get_node_config(false).mock_mining { - info!("No UTXOs found, but configured to mock mine"); - return true; - } thread::sleep(std::time::Duration::from_secs(Self::UTXO_RETRY_INTERVAL)); } panic!("No UTXOs found, exiting"); diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 9119f9c336..810ea2a63c 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -414,6 +414,11 @@ impl RunLoop { /// If there's a network error, then assume that we're not a miner. fn check_is_miner(&mut self, burnchain: &mut BitcoinRegtestController) -> bool { if self.config.node.miner { + // If we are mock mining, then we don't need to check for UTXOs and + // we can just return true. + if self.config.get_node_config(false).mock_mining { + return true; + } let keychain = Keychain::default(self.config.node.seed.clone()); let mut op_signer = keychain.generate_op_signer(); if let Err(e) = burnchain.create_wallet_if_dne() { @@ -454,10 +459,6 @@ impl RunLoop { return true; } } - if self.config.get_node_config(false).mock_mining { - info!("No UTXOs found, but configured to mock mine"); - return true; - } thread::sleep(std::time::Duration::from_secs(Self::UTXO_RETRY_INTERVAL)); } panic!("No UTXOs found, exiting"); diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index e9bdeb7022..3a9df900b9 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -9,7 +9,7 @@ use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready}; use stacks::clarity_cli::vm_execute as execute; use stacks::config::{EventKeyType, EventObserverConfig, InitialBalance}; -use stacks::core::test_util::{make_contract_call, make_stacks_transfer}; +use stacks::core::test_util::{make_contract_call, make_stacks_transfer_serialized}; use stacks::core::{self, EpochList, STACKS_EPOCH_MAX}; use stacks::util_lib::boot::boot_code_id; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; @@ -961,7 +961,7 @@ fn pox_2_unlock_all() { ); // perform a transfer - let tx = make_stacks_transfer( + let tx = make_stacks_transfer_serialized( &spender_sk, 5, tx_fee, diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index 8a4059722a..4943c6cfde 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -13,6 +13,7 @@ use clarity::vm::types::{ }; use clarity::vm::{ClarityVersion, Value}; use lazy_static::lazy_static; +use pinny::tag; use reqwest; use serde_json::json; use stacks::burnchains::Address; @@ -28,7 +29,7 @@ use stacks::config::InitialBalance; use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; use stacks::core::test_util::{ make_contract_call, make_contract_publish, make_sponsored_stacks_transfer_on_testnet, - make_stacks_transfer, to_addr, + make_stacks_transfer_serialized, to_addr, }; use stacks::core::{ EpochList, StacksEpoch, StacksEpochId, CHAIN_ID_TESTNET, PEER_VERSION_EPOCH_2_0, @@ -41,7 +42,7 @@ use stacks::net::api::getistraitimplemented::GetIsTraitImplementedResponse; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId, VRFSeed}; use stacks_common::util::hash::{hex_bytes, to_hex, Sha256Sum}; -use super::{ADDR_4, SK_1, SK_2, SK_3}; +use super::{new_test_conf, ADDR_4, SK_1, SK_2, SK_3}; use crate::helium::RunLoop; const OTHER_CONTRACT: &str = " @@ -162,10 +163,11 @@ lazy_static! { static ref HTTP_BINDING: Mutex> = Mutex::new(None); } +#[tag(slow)] #[test] #[ignore] fn integration_test_get_info() { - let mut conf = super::new_test_conf(); + let mut conf = new_test_conf(); let spender_addr = to_addr(&StacksPrivateKey::from_hex(SK_3).unwrap()).into(); let principal_sk = StacksPrivateKey::from_hex(SK_2).unwrap(); let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); @@ -336,7 +338,7 @@ fn integration_test_get_info() { } if round >= 1 { - let tx_xfer = make_stacks_transfer( + let tx_xfer = make_stacks_transfer_serialized( &spender_sk, round - 1, 10, @@ -807,7 +809,7 @@ fn integration_test_get_info() { eprintln!("Test: POST {path} (valid)"); // tx_xfer is 180 bytes long - let tx_xfer = make_stacks_transfer( + let tx_xfer = make_stacks_transfer_serialized( &spender_sk, round, 200, @@ -845,7 +847,7 @@ fn integration_test_get_info() { // tx_xfer_invalid is 180 bytes long // bad nonce - let tx_xfer_invalid = make_stacks_transfer(&spender_sk, round + 30, 200, CHAIN_ID_TESTNET, + let tx_xfer_invalid = make_stacks_transfer_serialized(&spender_sk, round + 30, 200, CHAIN_ID_TESTNET, &StacksAddress::from_string(ADDR_4).unwrap().into(), 456); let tx_xfer_invalid_tx = StacksTransaction::consensus_deserialize(&mut &tx_xfer_invalid[..]).unwrap(); @@ -1086,9 +1088,10 @@ const FAUCET_CONTRACT: &str = " (print (as-contract (stx-transfer? u1 .faucet recipient))))) "; +#[tag(slow)] #[test] fn contract_stx_transfer() { - let mut conf = super::new_test_conf(); + let mut conf = new_test_conf(); let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let sk_3 = StacksPrivateKey::from_hex(SK_3).unwrap(); @@ -1126,7 +1129,7 @@ fn contract_stx_transfer() { if round == 1 { // block-height = 2 - let xfer_to_contract = make_stacks_transfer( + let xfer_to_contract = make_stacks_transfer_serialized( &sk_3, 0, 10, @@ -1221,7 +1224,7 @@ fn contract_stx_transfer() { } else if round == 4 { // let's testing "chaining": submit MAXIMUM_MEMPOOL_TX_CHAINING - 1 txs, which should succeed for i in 0..MAXIMUM_MEMPOOL_TX_CHAINING { - let xfer_to_contract = make_stacks_transfer( + let xfer_to_contract = make_stacks_transfer_serialized( &sk_3, 1 + i, 200, @@ -1247,7 +1250,7 @@ fn contract_stx_transfer() { .unwrap(); } // this one should fail because the nonce is already in the mempool - let xfer_to_contract = make_stacks_transfer( + let xfer_to_contract = make_stacks_transfer_serialized( &sk_3, 3, 190, @@ -1452,7 +1455,7 @@ fn contract_stx_transfer() { #[test] fn mine_transactions_out_of_order() { - let mut conf = super::new_test_conf(); + let mut conf = new_test_conf(); let sk = StacksPrivateKey::from_hex(SK_3).unwrap(); let addr = to_addr(&sk); @@ -1480,7 +1483,7 @@ fn mine_transactions_out_of_order() { if round == 1 { // block-height = 2 - let xfer_to_contract = make_stacks_transfer( + let xfer_to_contract = make_stacks_transfer_serialized( &sk, 1, 10, @@ -1517,7 +1520,7 @@ fn mine_transactions_out_of_order() { ) .unwrap(); } else if round == 3 { - let xfer_to_contract = make_stacks_transfer( + let xfer_to_contract = make_stacks_transfer_serialized( &sk, 3, 10, @@ -1538,7 +1541,7 @@ fn mine_transactions_out_of_order() { ) .unwrap(); } else if round == 4 { - let xfer_to_contract = make_stacks_transfer( + let xfer_to_contract = make_stacks_transfer_serialized( &sk, 0, 10, @@ -1624,7 +1627,7 @@ fn mine_transactions_out_of_order() { /// in the block it was processed for. Tests issue #1540 #[test] fn mine_contract_twice() { - let mut conf = super::new_test_conf(); + let mut conf = new_test_conf(); let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); conf.burnchain.commit_anchor_block_within = 1000; @@ -1710,7 +1713,7 @@ fn mine_contract_twice() { #[test] fn bad_contract_tx_rollback() { - let mut conf = super::new_test_conf(); + let mut conf = new_test_conf(); let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); @@ -1745,7 +1748,7 @@ fn bad_contract_tx_rollback() { if round == 1 { // block-height = 2 - let xfer_to_contract = make_stacks_transfer( + let xfer_to_contract = make_stacks_transfer_serialized( &sk_3, 0, 10, @@ -1771,8 +1774,14 @@ fn bad_contract_tx_rollback() { .unwrap(); } else if round == 2 { // block-height = 3 - let xfer_to_contract = - make_stacks_transfer(&sk_3, 1, 10, CHAIN_ID_TESTNET, &addr_2.into(), 1000); + let xfer_to_contract = make_stacks_transfer_serialized( + &sk_3, + 1, + 10, + CHAIN_ID_TESTNET, + &addr_2.into(), + 1000, + ); let (consensus_hash, block_hash) = ( &tenure.parent_block.metadata.consensus_hash, &tenure.parent_block.metadata.anchored_header.block_hash(), @@ -1791,8 +1800,14 @@ fn bad_contract_tx_rollback() { .unwrap(); // doesn't consistently get mined by the StacksBlockBuilder, because order matters! - let xfer_to_contract = - make_stacks_transfer(&sk_3, 2, 10, CHAIN_ID_TESTNET, &addr_2.into(), 3000); + let xfer_to_contract = make_stacks_transfer_serialized( + &sk_3, + 2, + 10, + CHAIN_ID_TESTNET, + &addr_2.into(), + 3000, + ); tenure .mem_pool .submit_raw( @@ -1982,7 +1997,7 @@ fn make_keys(seed: &str, count: u64) -> Vec { #[test] fn block_limit_runtime_test() { - let mut conf = super::new_test_conf(); + let mut conf = new_test_conf(); conf.burnchain.epochs = Some(EpochList::new(&[ StacksEpoch { @@ -2167,7 +2182,7 @@ fn block_limit_runtime_test() { #[test] fn mempool_errors() { - let mut conf = super::new_test_conf(); + let mut conf = new_test_conf(); conf.burnchain.commit_anchor_block_within = 5000; @@ -2246,7 +2261,7 @@ fn mempool_errors() { if round == 1 { // let's submit an invalid transaction! eprintln!("Test: POST {path} (invalid)"); - let tx_xfer_invalid = make_stacks_transfer( + let tx_xfer_invalid = make_stacks_transfer_serialized( &spender_sk, 30, // bad nonce -- too much chaining 200, @@ -2288,7 +2303,7 @@ fn mempool_errors() { assert_eq!(data.get("expected").unwrap().as_i64().unwrap(), 26); assert_eq!(data.get("actual").unwrap().as_i64().unwrap(), 30); - let tx_xfer_invalid = make_stacks_transfer( + let tx_xfer_invalid = make_stacks_transfer_serialized( &spender_sk, 0, 1, // bad fee @@ -2322,7 +2337,7 @@ fn mempool_errors() { assert_eq!(data.get("expected").unwrap().as_u64().unwrap(), 180); assert_eq!(data.get("actual").unwrap().as_u64().unwrap(), 1); - let tx_xfer_invalid = make_stacks_transfer( + let tx_xfer_invalid = make_stacks_transfer_serialized( &contract_sk, 1, 2000, // not enough funds! diff --git a/testnet/stacks-node/src/tests/mempool.rs b/testnet/stacks-node/src/tests/mempool.rs index 4268daab08..aefaae13b1 100644 --- a/testnet/stacks-node/src/tests/mempool.rs +++ b/testnet/stacks-node/src/tests/mempool.rs @@ -16,8 +16,8 @@ use stacks::chainstate::stacks::{ use stacks::codec::StacksMessageCodec; use stacks::core::mempool::MemPoolDB; use stacks::core::test_util::{ - make_coinbase, make_contract_call, make_contract_publish, make_poison, make_stacks_transfer, - sign_standard_single_sig_tx_anchor_mode_version, to_addr, + make_coinbase, make_contract_call, make_contract_publish, make_poison, + make_stacks_transfer_serialized, sign_standard_single_sig_tx_anchor_mode_version, to_addr, }; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::cost_estimates::metrics::UnitMetric; @@ -292,8 +292,14 @@ fn mempool_setup_chainstate() { ) .unwrap(); - let tx_bytes = - make_stacks_transfer(&contract_sk, 5, 200, CHAIN_ID_TESTNET, &other_addr, 1000); + let tx_bytes = make_stacks_transfer_serialized( + &contract_sk, + 5, + 200, + CHAIN_ID_TESTNET, + &other_addr, + 1000, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); chain_state @@ -370,8 +376,14 @@ fn mempool_setup_chainstate() { .unwrap() .into(); - let tx_bytes = - make_stacks_transfer(&contract_sk, 5, 200, CHAIN_ID_TESTNET, &bad_addr, 1000); + let tx_bytes = make_stacks_transfer_serialized( + &contract_sk, + 5, + 200, + CHAIN_ID_TESTNET, + &bad_addr, + 1000, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -386,8 +398,14 @@ fn mempool_setup_chainstate() { assert!(matches!(e, MemPoolRejection::BadAddressVersionByte)); // bad fees - let tx_bytes = - make_stacks_transfer(&contract_sk, 5, 0, CHAIN_ID_TESTNET, &other_addr, 1000); + let tx_bytes = make_stacks_transfer_serialized( + &contract_sk, + 5, + 0, + CHAIN_ID_TESTNET, + &other_addr, + 1000, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -403,8 +421,14 @@ fn mempool_setup_chainstate() { assert!(matches!(e, MemPoolRejection::FeeTooLow(0, _))); // bad nonce - let tx_bytes = - make_stacks_transfer(&contract_sk, 0, 200, CHAIN_ID_TESTNET, &other_addr, 1000); + let tx_bytes = make_stacks_transfer_serialized( + &contract_sk, + 0, + 200, + CHAIN_ID_TESTNET, + &other_addr, + 1000, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -420,7 +444,7 @@ fn mempool_setup_chainstate() { assert!(matches!(e, MemPoolRejection::BadNonces(_))); // not enough funds - let tx_bytes = make_stacks_transfer( + let tx_bytes = make_stacks_transfer_serialized( &contract_sk, 5, 110000, @@ -444,7 +468,7 @@ fn mempool_setup_chainstate() { // sender == recipient let contract_princ = PrincipalData::from(contract_addr); - let tx_bytes = make_stacks_transfer( + let tx_bytes = make_stacks_transfer_serialized( &contract_sk, 5, 300, @@ -478,7 +502,7 @@ fn mempool_setup_chainstate() { ) .unwrap(); let mainnet_princ = mainnet_recipient.into(); - let tx_bytes = make_stacks_transfer( + let tx_bytes = make_stacks_transfer_serialized( &contract_sk, 5, 300, @@ -531,8 +555,14 @@ fn mempool_setup_chainstate() { assert!(matches!(e, MemPoolRejection::BadTransactionVersion)); // send amount must be positive - let tx_bytes = - make_stacks_transfer(&contract_sk, 5, 300, CHAIN_ID_TESTNET, &other_addr, 0); + let tx_bytes = make_stacks_transfer_serialized( + &contract_sk, + 5, + 300, + CHAIN_ID_TESTNET, + &other_addr, + 0, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -548,7 +578,7 @@ fn mempool_setup_chainstate() { assert!(matches!(e, MemPoolRejection::TransferAmountMustBePositive)); // not enough funds - let tx_bytes = make_stacks_transfer( + let tx_bytes = make_stacks_transfer_serialized( &contract_sk, 5, 110000, @@ -570,7 +600,7 @@ fn mempool_setup_chainstate() { eprintln!("Err: {e:?}"); assert!(matches!(e, MemPoolRejection::NotEnoughFunds(111000, 99500))); - let tx_bytes = make_stacks_transfer( + let tx_bytes = make_stacks_transfer_serialized( &contract_sk, 5, 99700, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index adcf2d2fd3..086fa0ef9c 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -64,7 +64,8 @@ use stacks::chainstate::stacks::{ use stacks::config::{EventKeyType, InitialBalance}; use stacks::core::mempool::{MemPoolWalkStrategy, MAXIMUM_MEMPOOL_TX_CHAINING}; use stacks::core::test_util::{ - insert_tx_in_mempool, make_contract_call, make_contract_publish_versioned, make_stacks_transfer, + insert_tx_in_mempool, make_contract_call, make_contract_publish_versioned, + make_stacks_transfer_serialized, }; use stacks::core::{ EpochList, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, @@ -1578,7 +1579,7 @@ fn simple_neon_integration() { assert_eq!(prior_commits + 15, post_commits, "There should have been exactly {tenures_count} submitted commits during the {tenures_count} tenures"); // Submit a TX - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, 0, send_fee, @@ -2039,7 +2040,7 @@ fn flash_blocks_on_epoch_3_FLAKY() { } // Submit a TX - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, 0, send_fee, @@ -2264,7 +2265,7 @@ fn mine_multiple_per_tenure_integration() { .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block let sender_nonce = tenure_ix * inter_blocks_per_tenure + interim_block_ix; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -2516,7 +2517,7 @@ fn multiple_miners() { .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block let sender_nonce = tenure_ix * inter_blocks_per_tenure + interim_block_ix; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -3060,7 +3061,7 @@ fn block_proposal_api_endpoint() { .tenure_begin(&burn_dbconn, &mut miner_tenure_info) .unwrap(); - let tx = make_stacks_transfer( + let tx = make_stacks_transfer_serialized( &account_keys[0], 0, 100, @@ -3091,6 +3092,7 @@ fn block_proposal_api_endpoint() { let proposal = NakamotoBlockProposal { block, chain_id: chainstate.chain_id, + replay_txs: None, }; const HTTP_ACCEPTED: u16 = 202; @@ -3133,7 +3135,7 @@ fn block_proposal_api_endpoint() { sign(&p) }, HTTP_ACCEPTED, - Some(Err(ValidateRejectCode::InvalidBlock)), + Some(Err(ValidateRejectCode::NetworkChainMismatch)), ), ( "Invalid `miner_signature`", @@ -3780,7 +3782,7 @@ fn follower_bootup_simple() { let sender_nonce = account .nonce .max(last_nonce.as_ref().map(|ln| *ln + 1).unwrap_or(0)); - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -4322,7 +4324,7 @@ fn follower_bootup_custom_chain_id() { let sender_nonce = account .nonce .max(last_nonce.as_ref().map(|ln| *ln + 1).unwrap_or(0)); - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -4887,7 +4889,7 @@ fn burn_ops_integration_test() { .expect("Mutex poisoned") .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, 200, @@ -5306,7 +5308,7 @@ fn forked_tenure_is_ignored() { // submit a tx so that the miner will mine an extra block let sender_nonce = 0; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -5722,7 +5724,7 @@ fn check_block_heights() { .expect("Mutex poisoned") .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -6018,7 +6020,7 @@ fn nakamoto_attempt_time() { let mut sender_nonce = account.nonce; for _ in 0..txs_per_block { - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, tx_fee, @@ -6119,7 +6121,7 @@ fn nakamoto_attempt_time() { 'submit_txs: loop { let acct = &mut account[acct_idx]; for _ in 0..MAXIMUM_MEMPOOL_TX_CHAINING { - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &acct.privk, acct.nonce, tx_fee, @@ -6577,6 +6579,7 @@ fn signer_chainstate() { // this config disallows any reorg due to poorly timed block commits let proposal_conf = ProposalEvalConfig { + proposal_wait_for_parent_time: Duration::from_secs(0), first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), tenure_last_block_proposal_timeout: Duration::from_secs(30), @@ -6669,7 +6672,7 @@ fn signer_chainstate() { // submit a tx to trigger an intermediate block let sender_nonce = i; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -6703,6 +6706,7 @@ fn signer_chainstate() { // this config disallows any reorg due to poorly timed block commits let proposal_conf = ProposalEvalConfig { + proposal_wait_for_parent_time: Duration::from_secs(0), first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), tenure_last_block_proposal_timeout: Duration::from_secs(30), @@ -6780,6 +6784,7 @@ fn signer_chainstate() { // this config disallows any reorg due to poorly timed block commits let proposal_conf = ProposalEvalConfig { + proposal_wait_for_parent_time: Duration::from_secs(0), first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), tenure_last_block_proposal_timeout: Duration::from_secs(30), @@ -7138,7 +7143,7 @@ fn continue_tenure_extend() { assert!(sortition.was_sortition); // Submit a TX - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, transfer_nonce, send_fee, @@ -7204,7 +7209,7 @@ fn continue_tenure_extend() { for i in 0..3 { info!("Triggering Nakamoto blocks after extend ({})", i + 1); transfer_nonce += 1; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, transfer_nonce, send_fee, @@ -7651,7 +7656,7 @@ fn check_block_times() { info!("Mining Nakamoto block"); // submit a tx so that the miner will mine an extra block - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -7702,7 +7707,7 @@ fn check_block_times() { info!("Mining Nakamoto block"); // submit a tx so that the miner will mine an extra block - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -8195,7 +8200,7 @@ fn check_block_info() { // Now we want to test the behavior of a new nakamoto block within the same tenure // We'll force a nakamoto block by submitting a transfer, then waiting for the nonce to bump info!("Mining an interim nakamoto block"); - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -8294,7 +8299,7 @@ fn check_block_info() { // Now we'll mine one more interim block so that we can test that the stacks-block-info outputs update // again. info!("Mining a second interim nakamoto block"); - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -8655,7 +8660,7 @@ fn check_block_info_rewards() { .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -8688,7 +8693,7 @@ fn check_block_info_rewards() { .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -9002,7 +9007,7 @@ fn mock_mining() { .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block let sender_nonce = tenure_ix * inter_blocks_per_tenure + interim_block_ix; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -9035,14 +9040,17 @@ fn mock_mining() { let follower_node_info = get_chain_info(&follower_conf); info!("Node heights"; "miner" => miner_node_info.stacks_tip_height, "follower" => follower_node_info.stacks_tip_height); + // Wait for at least 2 blocks to be mined by the mock-miner + // This is to ensure that the mock miner has mined the tenure change + // block and at least one interim block. wait_for(60, || { Ok(follower_naka_mined_blocks.load(Ordering::SeqCst) - > follower_naka_mined_blocks_before) + > follower_naka_mined_blocks_before + 1) }) .unwrap_or_else(|_| { panic!( "Timed out waiting for mock miner block {}", - follower_naka_mined_blocks_before + 1 + follower_naka_mined_blocks_before + 2 ) }); @@ -9381,7 +9389,7 @@ fn v3_signer_api_endpoint() { .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra stacks block let sender_nonce = 0; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -9644,7 +9652,7 @@ fn nakamoto_lockup_events() { "height" => %height_before, "nonce" => %sender_nonce, ); - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -9819,7 +9827,11 @@ fn skip_mining_long_tx() { // Sleep for longer than the miner's attempt time, so that the miner will // mark this tx as long-running and skip it in the next attempt - sleep_ms(naka_conf.miner.nakamoto_attempt_time_ms + 1000); + sleep_ms( + naka_conf.miner.nakamoto_attempt_time_ms + + naka_conf.miner.empty_mempool_sleep_time.as_millis() as u64 + + 1000, + ); TEST_TX_STALL.set(false); @@ -9830,7 +9842,7 @@ fn skip_mining_long_tx() { TEST_P2P_BROADCAST_SKIP.set(false); } else { - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_1_sk, i - 1, send_fee, @@ -10619,12 +10631,28 @@ fn consensus_hash_event_dispatcher() { let expected_consensus_hash = format!("0x{}", tip.consensus_hash); let burn_blocks = test_observer::get_burn_blocks(); + let parent_burn_block = burn_blocks.get(burn_blocks.len() - 2).unwrap(); let burn_block = burn_blocks.last().unwrap(); assert_eq!( burn_block.get("consensus_hash").unwrap().as_str().unwrap(), expected_consensus_hash ); + let parent_burn_block_hash = parent_burn_block + .get("burn_block_hash") + .unwrap() + .as_str() + .unwrap(); + + assert_eq!( + burn_block + .get("parent_burn_block_hash") + .unwrap() + .as_str() + .unwrap(), + parent_burn_block_hash + ); + let stacks_blocks = test_observer::get_blocks(); for block in stacks_blocks.iter() { if block.get("block_height").unwrap().as_u64().unwrap() == tip.stacks_block_height { @@ -11503,7 +11531,7 @@ fn large_mempool_base(strategy: MemPoolWalkStrategy, set_fee: impl Fn() -> u64) let recipient_sk = StacksPrivateKey::random(); let recipient_addr = tests::to_addr(&recipient_sk); let sender_addr = tests::to_addr(sender_sk); - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( sender_sk, *nonce, transfer_fee, @@ -11559,7 +11587,7 @@ fn large_mempool_base(strategy: MemPoolWalkStrategy, set_fee: impl Fn() -> u64) let sender_addr = tests::to_addr(sender_sk); let recipient_sk = StacksPrivateKey::random(); let recipient_addr = tests::to_addr(&recipient_sk); - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( sender_sk, *nonce, transfer_fee, @@ -11620,7 +11648,7 @@ fn large_mempool_base(strategy: MemPoolWalkStrategy, set_fee: impl Fn() -> u64) let sender_addr = tests::to_addr(sender_sk); let fee = set_fee(); assert!(fee >= 180 && fee <= 2000); - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( sender_sk, *nonce, fee, @@ -11840,7 +11868,7 @@ fn larger_mempool() { let recipient_sk = StacksPrivateKey::random(); let recipient_addr = tests::to_addr(&recipient_sk); let sender_addr = tests::to_addr(sender_sk); - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( sender_sk, *nonce, transfer_fee, @@ -11896,7 +11924,7 @@ fn larger_mempool() { let sender_addr = tests::to_addr(sender_sk); let recipient_sk = StacksPrivateKey::random(); let recipient_addr = tests::to_addr(&recipient_sk); - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( sender_sk, *nonce, transfer_fee, @@ -11957,7 +11985,7 @@ fn larger_mempool() { for (sender_sk, nonce) in senders.iter_mut() { let sender_addr = tests::to_addr(sender_sk); let fee = thread_rng().gen_range(180..2000); - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( sender_sk, *nonce, fee, @@ -12265,7 +12293,7 @@ fn handle_considered_txs_foreign_key_failure() { next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); - let good_transfer_tx = make_stacks_transfer( + let good_transfer_tx = make_stacks_transfer_serialized( &good_sender_sk, 0, send_fee, @@ -12287,7 +12315,7 @@ fn handle_considered_txs_foreign_key_failure() { TEST_MINE_STALL.set(true); TEST_TX_STALL.set(true); - let bad_transfer_tx = make_stacks_transfer( + let bad_transfer_tx = make_stacks_transfer_serialized( &bad_sender_sk, 0, send_fee, @@ -12339,3 +12367,114 @@ fn handle_considered_txs_foreign_key_failure() { run_loop_thread.join().unwrap(); } + +#[test] +#[ignore] +fn empty_mempool_sleep_ms() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut conf, _miner_account) = naka_neon_integration_conf(None); + let password = "12345".to_string(); + conf.connection_options.auth_token = Some(password.clone()); + let stacker_sk = setup_stacker(&mut conf); + let signer_sk = Secp256k1PrivateKey::random(); + let signer_addr = tests::to_addr(&signer_sk); + let sender_sk = Secp256k1PrivateKey::random(); + // setup sender + recipient for a stx transfer + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + conf.add_initial_balance( + PrincipalData::from(sender_addr).to_string(), + send_amt + send_fee, + ); + conf.add_initial_balance(PrincipalData::from(signer_addr).to_string(), 100000); + + // Set the empty mempool sleep time to something long enough that we can + // see the effect in the test. + conf.miner.empty_mempool_sleep_time = Duration::from_secs(30); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks, + .. + } = run_loop.counters(); + let counters = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + let mut signers = TestSigners::new(vec![signer_sk]); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &conf, + &blocks_processed, + &[stacker_sk], + &[signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + blind_signer(&conf, &signers, &counters); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // Sleep for 5 seconds to ensure that the miner tries to mine and sees an + // empty mempool. + thread::sleep(Duration::from_secs(5)); + + info!("------------------------- Submit a transaction -------------------------"); + let proposals_before = naka_proposed_blocks.load(Ordering::SeqCst); + + let transfer_tx = make_stacks_transfer_serialized( + &sender_sk, + 0, + send_fee, + conf.burnchain.chain_id, + &signer_addr.into(), + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + // The miner should have slept for 30 seconds after seeing an empty mempool + // before trying to mine again. Let's check that there was at least 10s + // before the next block proposal. + wait_for(10, || { + let proposals_after = naka_proposed_blocks.load(Ordering::SeqCst); + Ok(proposals_after > proposals_before) + }) + .expect_err("Expected to wait for 30 seconds before mining a block"); + + // Wait for the transaction to be mined + wait_for(60, || { + let account = get_account(&http_origin, &sender_addr); + Ok(account.nonce == 1) + }) + .expect("Timed out waiting for transaction to be mined after delay"); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 528c311032..315ce2830d 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -42,7 +42,7 @@ use stacks::config::{EventKeyType, EventObserverConfig, FeeEstimatorName, Initia use stacks::core::mempool::MemPoolWalkTxTypes; use stacks::core::test_util::{ make_contract_call, make_contract_publish, make_contract_publish_microblock_only, - make_microblock, make_stacks_transfer, make_stacks_transfer_mblock_only, to_addr, + make_microblock, make_stacks_transfer_mblock_only, make_stacks_transfer_serialized, to_addr, }; use stacks::core::{ self, EpochList, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_20, BLOCK_LIMIT_MAINNET_205, @@ -3338,7 +3338,7 @@ fn filter_low_fee_tx_integration_test() { if ix < 5 { // low-fee - make_stacks_transfer( + make_stacks_transfer_serialized( spender_sk, 0, 1000 + (ix as u64), @@ -3348,7 +3348,7 @@ fn filter_low_fee_tx_integration_test() { ) } else { // high-fee - make_stacks_transfer( + make_stacks_transfer_serialized( spender_sk, 0, 2000 + (ix as u64), @@ -3438,7 +3438,7 @@ fn filter_long_runtime_tx_integration_test() { .enumerate() .map(|(ix, spender_sk)| { let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - make_stacks_transfer( + make_stacks_transfer_serialized( spender_sk, 0, 1000 + (ix as u64), @@ -3833,7 +3833,7 @@ fn block_replay_integration_test() { assert_eq!(account.nonce, 0); let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - let tx = make_stacks_transfer( + let tx = make_stacks_transfer_serialized( &spender_sk, 0, 1000, @@ -4557,7 +4557,7 @@ fn block_limit_hit_integration_test() { &max_contract_src, ); // included in first block - let tx_4 = make_stacks_transfer( + let tx_4 = make_stacks_transfer_serialized( &third_spender_sk, 0, 180, diff --git a/testnet/stacks-node/src/tests/signer/commands/bitcoin_mining.rs b/testnet/stacks-node/src/tests/signer/commands/bitcoin_mining.rs new file mode 100644 index 0000000000..e2b1fbcbaa --- /dev/null +++ b/testnet/stacks-node/src/tests/signer/commands/bitcoin_mining.rs @@ -0,0 +1,244 @@ +use std::sync::atomic::Ordering; +use std::sync::{Arc, Mutex}; + +use madhouse::{Command, CommandWrapper}; +use proptest::prelude::{Just, Strategy}; +use stacks::chainstate::stacks::TenureChangeCause; +use tracing::info; + +use super::context::{SignerTestContext, SignerTestState}; +use crate::tests::neon_integrations::get_chain_info; +use crate::tests::signer::v0::{wait_for_block_pushed_by_miner_key, MultipleMinerTest}; + +pub struct MineBitcoinBlockTenureChangeMiner1 { + miners: Arc>, +} + +impl MineBitcoinBlockTenureChangeMiner1 { + pub fn new(miners: Arc>) -> Self { + Self { miners } + } +} + +impl Command for MineBitcoinBlockTenureChangeMiner1 { + fn check(&self, state: &SignerTestState) -> bool { + let (conf_1, _) = self.miners.lock().unwrap().get_node_configs(); + let burn_height = get_chain_info(&conf_1).burn_block_height; + let miner_1_submitted_commit_last_burn_height = self + .miners + .lock() + .unwrap() + .get_primary_submitted_commit_last_burn_height() + .0 + .load(Ordering::SeqCst); + let miner_2_submitted_commit_last_burn_height = self + .miners + .lock() + .unwrap() + .get_secondary_submitted_commit_last_burn_height() + .0 + .load(Ordering::SeqCst); + + info!( + "Checking: Miner 1 mining Bitcoin block and tenure change tx. Result: {:?} && {:?} && {:?}", + state.is_booted_to_nakamoto, burn_height == miner_1_submitted_commit_last_burn_height, burn_height > miner_2_submitted_commit_last_burn_height + ); + state.is_booted_to_nakamoto + && burn_height == miner_1_submitted_commit_last_burn_height + && burn_height > miner_2_submitted_commit_last_burn_height + } + + fn apply(&self, _state: &mut SignerTestState) { + info!("Applying: Miner 1 mining Bitcoin block and tenure change tx"); + + let (stacks_height_before, conf_1, miner_pk_1) = { + let mut miners = self.miners.lock().unwrap(); + let stacks_height_before = miners.get_peer_stacks_tip_height(); + let (conf_1, _) = miners.get_node_configs(); + let burnchain = conf_1.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + miners + .mine_bitcoin_block_and_tenure_change_tx(&sortdb, TenureChangeCause::BlockFound, 60) + .expect("Failed to mine BTC block"); + + let (miner_pk_1, _) = miners.get_miner_public_keys(); + + (stacks_height_before, conf_1, miner_pk_1) + }; + + info!( + "Waiting for Nakamoto block {} pushed by miner 1", + stacks_height_before + 1 + ); + + let miner_1_block = + wait_for_block_pushed_by_miner_key(30, stacks_height_before + 1, &miner_pk_1) + .expect("Failed to get block"); + + let mined_block_height = miner_1_block.header.chain_length; + info!( + "Miner 1 mined Nakamoto block height: {}", + mined_block_height + ); + + let info_after = get_chain_info(&conf_1); + assert_eq!(info_after.stacks_tip, miner_1_block.header.block_hash()); + assert_eq!(info_after.stacks_tip_height, mined_block_height); + assert_eq!(mined_block_height, stacks_height_before + 1); + } + + fn label(&self) -> String { + "MINE_BITCOIN_BLOCK_AND_TENURE_CHANGE_MINER_1".to_string() + } + + fn build( + ctx: Arc, + ) -> impl Strategy> { + Just(CommandWrapper::new( + MineBitcoinBlockTenureChangeMiner1::new(ctx.miners.clone()), + )) + } +} + +pub struct MineBitcoinBlockTenureChangeMiner2 { + miners: Arc>, +} + +impl MineBitcoinBlockTenureChangeMiner2 { + pub fn new(miners: Arc>) -> Self { + Self { miners } + } +} + +impl Command for MineBitcoinBlockTenureChangeMiner2 { + fn check(&self, state: &SignerTestState) -> bool { + let (conf_1, _) = self.miners.lock().unwrap().get_node_configs(); + let burn_height = get_chain_info(&conf_1).burn_block_height; + let miner_1_submitted_commit_last_burn_height = self + .miners + .lock() + .unwrap() + .get_primary_submitted_commit_last_burn_height() + .0 + .load(Ordering::SeqCst); + let miner_2_submitted_commit_last_burn_height = self + .miners + .lock() + .unwrap() + .get_secondary_submitted_commit_last_burn_height() + .0 + .load(Ordering::SeqCst); + + info!( + "Checking: Miner 2 mining Bitcoin block and tenure change tx. Result: {:?} && {:?} && {:?}", + state.is_booted_to_nakamoto, burn_height == miner_1_submitted_commit_last_burn_height, burn_height > miner_2_submitted_commit_last_burn_height + ); + state.is_booted_to_nakamoto + && burn_height == miner_2_submitted_commit_last_burn_height + && burn_height > miner_1_submitted_commit_last_burn_height + } + + fn apply(&self, _state: &mut SignerTestState) { + info!("Applying: Miner 2 mining Bitcoin block and tenure change tx"); + + let stacks_height_before = self.miners.lock().unwrap().get_peer_stacks_tip_height(); + + let (conf_1, conf_2) = self.miners.lock().unwrap().get_node_configs(); + let burnchain = conf_1.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + self.miners + .lock() + .unwrap() + .mine_bitcoin_block_and_tenure_change_tx(&sortdb, TenureChangeCause::BlockFound, 60) + .expect("Failed to mine BTC block"); + + let (_, miner_pk_2) = self.miners.lock().unwrap().get_miner_public_keys(); + + info!( + "Waiting for Nakamoto block {} pushed by miner 2", + stacks_height_before + 1 + ); + + let secondary_miner_block = + wait_for_block_pushed_by_miner_key(30, stacks_height_before + 1, &miner_pk_2) + .expect("Failed to get block N"); + + let mined_block_height = secondary_miner_block.header.chain_length; + + let info_after = get_chain_info(&conf_2); + assert_eq!( + info_after.stacks_tip, + secondary_miner_block.header.block_hash() + ); + assert_eq!(info_after.stacks_tip_height, mined_block_height); + assert_eq!(mined_block_height, stacks_height_before + 1); + } + + fn label(&self) -> String { + "MINE_BITCOIN_BLOCK_AND_TENURE_CHANGE_MINER_2".to_string() + } + + fn build( + ctx: Arc, + ) -> impl Strategy> { + Just(CommandWrapper::new( + MineBitcoinBlockTenureChangeMiner2::new(ctx.miners.clone()), + )) + } +} + +pub struct MineBitcoinBlock { + miners: Arc>, + timeout_secs: u64, +} + +impl MineBitcoinBlock { + pub fn new(miners: Arc>, timeout_secs: u64) -> Self { + Self { + miners, + timeout_secs, + } + } +} + +impl Command for MineBitcoinBlock { + fn check(&self, _state: &SignerTestState) -> bool { + info!("Checking: Mining tenure. Result: {:?}", true); + true + } + + fn apply(&self, _state: &mut SignerTestState) { + info!( + "Applying: Mining tenure and waiting for it for {:?} seconds", + self.timeout_secs + ); + + let sortdb = { + let miners = self.miners.lock().unwrap(); + let (conf_1, _) = miners.get_node_configs(); + let burnchain = conf_1.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + sortdb + }; + + { + let mut miners = self.miners.lock().unwrap(); + miners + .mine_bitcoin_blocks_and_confirm(&sortdb, 1, self.timeout_secs) + .expect("Failed to mine BTC block"); + } + } + + fn label(&self) -> String { + "MINE_BITCOIN_BLOCK".to_string() + } + + fn build( + ctx: Arc, + ) -> impl Strategy> { + (60u64..90u64).prop_map(move |timeout_secs| { + CommandWrapper::new(MineBitcoinBlock::new(ctx.miners.clone(), timeout_secs)) + }) + } +} diff --git a/testnet/stacks-node/src/tests/signer/commands/block_commit.rs b/testnet/stacks-node/src/tests/signer/commands/block_commit.rs new file mode 100644 index 0000000000..d2f91ced22 --- /dev/null +++ b/testnet/stacks-node/src/tests/signer/commands/block_commit.rs @@ -0,0 +1,95 @@ +use std::sync::{Arc, Mutex}; + +use madhouse::{Command, CommandWrapper}; +use proptest::prelude::{Just, Strategy}; + +use super::context::{SignerTestContext, SignerTestState}; +use crate::tests::signer::v0::MultipleMinerTest; + +pub struct SubmitBlockCommitMiner2 { + miners: Arc>, +} + +impl SubmitBlockCommitMiner2 { + pub fn new(miners: Arc>) -> Self { + Self { miners } + } +} + +impl Command for SubmitBlockCommitMiner2 { + fn check(&self, state: &SignerTestState) -> bool { + info!( + "Checking: Submitting block commit miner 2. Result: {:?}", + state.is_secondary_miner_skip_commit_op + ); + // Ensure Miner 2's automatic commit ops are paused. If not, this may + // result in no commit being submitted. + state.is_secondary_miner_skip_commit_op + } + + fn apply(&self, _state: &mut SignerTestState) { + info!("Applying: Submitting block commit miner 2"); + + let (conf_1, _) = self.miners.lock().unwrap().get_node_configs(); + let burnchain = conf_1.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + self.miners.lock().unwrap().submit_commit_miner_2(&sortdb); + } + + fn label(&self) -> String { + "SUBMIT_BLOCK_COMMIT_MINER_2".to_string() + } + + fn build( + ctx: Arc, + ) -> impl Strategy> { + Just(CommandWrapper::new(SubmitBlockCommitMiner2::new( + ctx.miners.clone(), + ))) + } +} + +pub struct SubmitBlockCommitMiner1 { + miners: Arc>, +} + +impl SubmitBlockCommitMiner1 { + pub fn new(miners: Arc>) -> Self { + Self { miners } + } +} + +impl Command for SubmitBlockCommitMiner1 { + fn check(&self, state: &SignerTestState) -> bool { + info!( + "Checking: Submitting block commit miner 1. Result: {:?}", + state.is_primary_miner_skip_commit_op + ); + // Ensure Miner 1's automatic commit ops are paused. If not, this may + // result in no commit being submitted. + state.is_primary_miner_skip_commit_op + } + + fn apply(&self, _state: &mut SignerTestState) { + info!("Applying: Submitting block commit miner 1"); + + let (conf_1, _) = self.miners.lock().unwrap().get_node_configs(); + let burnchain = conf_1.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + self.miners.lock().unwrap().submit_commit_miner_1(&sortdb); + } + + fn label(&self) -> String { + "SUBMIT_BLOCK_COMMIT_MINER_1".to_string() + } + + fn build( + ctx: Arc, + ) -> impl Strategy> { + Just(CommandWrapper::new(SubmitBlockCommitMiner1::new( + ctx.miners.clone(), + ))) + } +} diff --git a/testnet/stacks-node/src/tests/signer/commands/block_wait.rs b/testnet/stacks-node/src/tests/signer/commands/block_wait.rs new file mode 100644 index 0000000000..747c686a12 --- /dev/null +++ b/testnet/stacks-node/src/tests/signer/commands/block_wait.rs @@ -0,0 +1,125 @@ +use std::sync::atomic::Ordering; +use std::sync::{Arc, Mutex}; + +use madhouse::{Command, CommandWrapper}; +use proptest::prelude::{Just, Strategy}; + +use super::context::{SignerTestContext, SignerTestState}; +use crate::tests::signer::v0::{wait_for_block_pushed_by_miner_key, MultipleMinerTest}; + +pub struct WaitForTenureChangeBlockFromMiner1 { + miners: Arc>, +} + +impl WaitForTenureChangeBlockFromMiner1 { + pub fn new(miners: Arc>) -> Self { + Self { miners } + } +} + +impl Command for WaitForTenureChangeBlockFromMiner1 { + fn check(&self, state: &SignerTestState) -> bool { + info!( + "Checking: Waiting for Nakamoto block from miner 1. Result: {:?}", + !state.mining_stalled + ); + !state.mining_stalled + } + + fn apply(&self, _state: &mut SignerTestState) { + info!("Applying: Waiting for Nakamoto block from miner 1"); + + let miners_arc = self.miners.clone(); + + let (miner_pk_1, last_confirmed_nakamoto_height_counter) = { + let miners = miners_arc.lock().unwrap(); + let (miner_pk_1, _) = miners.get_miner_public_keys(); + let last_confirmed_nakamoto_height = miners.get_primary_last_stacks_tip_counter(); + (miner_pk_1, last_confirmed_nakamoto_height) + }; + + let last_confirmed_height = last_confirmed_nakamoto_height_counter + .0 + .load(Ordering::SeqCst); + let expected_height = last_confirmed_height + 1; + + info!( + "Waiting for Nakamoto block {} pushed by miner 1", + expected_height + ); + + let _miner_1_block = wait_for_block_pushed_by_miner_key(30, expected_height, &miner_pk_1) + .expect(&format!("Failed to get block {}", expected_height)); + } + + fn label(&self) -> String { + "WAIT_FOR_TENURE_CHANGE_BLOCK_FROM_MINER_1".to_string() + } + + fn build( + ctx: Arc, + ) -> impl Strategy> { + Just(CommandWrapper::new( + WaitForTenureChangeBlockFromMiner1::new(ctx.miners.clone()), + )) + } +} + +pub struct WaitForTenureChangeBlockFromMiner2 { + miners: Arc>, +} + +impl WaitForTenureChangeBlockFromMiner2 { + pub fn new(miners: Arc>) -> Self { + Self { miners } + } +} + +impl Command for WaitForTenureChangeBlockFromMiner2 { + fn check(&self, state: &SignerTestState) -> bool { + info!( + "Checking: Waiting for Nakamoto block from miner 2. Result: {:?}", + !state.mining_stalled + ); + !state.mining_stalled + } + + fn apply(&self, _state: &mut SignerTestState) { + info!("Applying: Waiting for Nakamoto block from miner 2"); + + let miners_arc = self.miners.clone(); + + let (miner_pk_2, last_confirmed_nakamoto_height_counter) = { + let miners = miners_arc.lock().unwrap(); + let (_, miner_pk_2) = miners.get_miner_public_keys(); + let last_confirmed_nakamoto_height = miners.get_secondary_last_stacks_tip_counter(); + (miner_pk_2, last_confirmed_nakamoto_height) + }; + + let last_confirmed_height = last_confirmed_nakamoto_height_counter + .0 + .load(Ordering::SeqCst); + let expected_stacks_height = last_confirmed_height + 1; + + info!( + "Waiting for Nakamoto block {} pushed by miner 2", + expected_stacks_height + ); + + let _miner_2_block_n_1 = + wait_for_block_pushed_by_miner_key(30, expected_stacks_height, &miner_pk_2) + .expect(&format!("Failed to get block {:?}", expected_stacks_height)); + } + + fn label(&self) -> String { + "WAIT_FOR_TENURE_CHANGE_BLOCK_FROM_MINER_2".to_string() + } + + fn build( + ctx: Arc, + ) -> impl Strategy> { + Just(CommandWrapper::new( + WaitForTenureChangeBlockFromMiner2::new(ctx.miners.clone()), + )) + } +} diff --git a/testnet/stacks-node/src/tests/signer/commands/boot.rs b/testnet/stacks-node/src/tests/signer/commands/boot.rs new file mode 100644 index 0000000000..9194b738e1 --- /dev/null +++ b/testnet/stacks-node/src/tests/signer/commands/boot.rs @@ -0,0 +1,51 @@ +use std::sync::{Arc, Mutex}; + +use madhouse::{Command, CommandWrapper}; +use proptest::prelude::{Just, Strategy}; + +use super::context::{SignerTestContext, SignerTestState}; +use crate::tests::neon_integrations::get_chain_info; +use crate::tests::signer::v0::MultipleMinerTest; + +pub struct BootToEpoch3 { + miners: Arc>, +} + +impl BootToEpoch3 { + pub fn new(miners: Arc>) -> Self { + Self { miners } + } +} + +impl Command for BootToEpoch3 { + fn check(&self, state: &SignerTestState) -> bool { + info!( + "Checking: Booting miners to Nakamoto. Result: {:?}", + !state.is_booted_to_nakamoto + ); + !state.is_booted_to_nakamoto + } + + fn apply(&self, state: &mut SignerTestState) { + info!("Applying: Booting miners to Nakamoto"); + + self.miners.lock().unwrap().boot_to_epoch_3(); + + let (conf_1, _) = self.miners.lock().unwrap().get_node_configs(); + let burn_block_height = get_chain_info(&conf_1).burn_block_height; + + assert_eq!(burn_block_height, 231); + + state.is_booted_to_nakamoto = true; + } + + fn label(&self) -> String { + "BOOT_TO_EPOCH_3".to_string() + } + + fn build( + ctx: Arc, + ) -> impl Strategy> { + Just(CommandWrapper::new(BootToEpoch3::new(ctx.miners.clone()))) + } +} diff --git a/testnet/stacks-node/src/tests/signer/commands/commit_ops.rs b/testnet/stacks-node/src/tests/signer/commands/commit_ops.rs new file mode 100644 index 0000000000..6786fa15c7 --- /dev/null +++ b/testnet/stacks-node/src/tests/signer/commands/commit_ops.rs @@ -0,0 +1,91 @@ +use std::sync::Arc; + +use madhouse::{Command, CommandWrapper}; +use proptest::prelude::{Just, Strategy}; +use stacks::util::tests::TestFlag; + +use super::context::{SignerTestContext, SignerTestState}; + +pub struct SkipCommitOpMiner1 { + miner_1_skip_commit_flag: TestFlag, +} + +impl SkipCommitOpMiner1 { + pub fn new(miner_1_skip_commit_flag: TestFlag) -> Self { + Self { + miner_1_skip_commit_flag, + } + } +} + +impl Command for SkipCommitOpMiner1 { + fn check(&self, state: &SignerTestState) -> bool { + info!( + "Checking: Skipping commit operations for miner 1. Result: {:?}", + !state.is_primary_miner_skip_commit_op + ); + !state.is_primary_miner_skip_commit_op + } + + fn apply(&self, state: &mut SignerTestState) { + info!("Applying: Skipping commit operations for miner 1"); + + self.miner_1_skip_commit_flag.set(true); + + state.is_primary_miner_skip_commit_op = true; + } + + fn label(&self) -> String { + "SKIP_COMMIT_OP_MINER_1".to_string() + } + + fn build( + ctx: Arc, + ) -> impl Strategy> { + Just(CommandWrapper::new(SkipCommitOpMiner1::new( + ctx.miners.lock().unwrap().get_primary_skip_commit_flag(), + ))) + } +} + +pub struct SkipCommitOpMiner2 { + miner_2_skip_commit_flag: TestFlag, +} + +impl SkipCommitOpMiner2 { + pub fn new(miner_2_skip_commit_flag: TestFlag) -> Self { + Self { + miner_2_skip_commit_flag, + } + } +} + +impl Command for SkipCommitOpMiner2 { + fn check(&self, state: &SignerTestState) -> bool { + info!( + "Checking: Skipping commit operations for miner 2. Result: {:?}", + !state.is_secondary_miner_skip_commit_op + ); + !state.is_secondary_miner_skip_commit_op + } + + fn apply(&self, state: &mut SignerTestState) { + info!("Applying: Skipping commit operations for miner 2"); + + self.miner_2_skip_commit_flag.set(true); + + state.is_secondary_miner_skip_commit_op = true; + } + + fn label(&self) -> String { + "SKIP_COMMIT_OP_MINER_2".to_string() + } + + fn build( + ctx: Arc, + ) -> impl Strategy> { + Just(CommandWrapper::new(SkipCommitOpMiner2::new( + ctx.miners.lock().unwrap().get_secondary_skip_commit_flag(), + ))) + } +} diff --git a/testnet/stacks-node/src/tests/signer/commands/context.rs b/testnet/stacks-node/src/tests/signer/commands/context.rs new file mode 100644 index 0000000000..3feb11a29d --- /dev/null +++ b/testnet/stacks-node/src/tests/signer/commands/context.rs @@ -0,0 +1,58 @@ +use std::fmt::Debug; +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +use madhouse::{State, TestContext}; + +use crate::tests::signer::v0::MultipleMinerTest; + +#[derive(Clone)] +pub struct SignerTestContext { + pub miners: Arc>, +} + +impl Debug for SignerTestContext { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("SignerTestContext").finish() + } +} + +impl TestContext for SignerTestContext {} + +impl SignerTestContext { + pub fn new(num_signers: usize, num_transfer_txs: u64) -> Self { + let miners = MultipleMinerTest::new_with_config_modifications( + num_signers, + num_transfer_txs, + |signer_config| { + signer_config.block_proposal_validation_timeout = Duration::from_secs(1800); + signer_config.tenure_last_block_proposal_timeout = Duration::from_secs(1800); + signer_config.first_proposal_burn_block_timing = Duration::from_secs(1800); + }, + |config| { + config.miner.block_commit_delay = Duration::from_secs(0); + }, + |config| { + config.miner.block_commit_delay = Duration::from_secs(0); + }, + ); + + Self { + miners: Arc::new(Mutex::new(miners)), + } + } +} + +type StacksHeightBefore = u64; +type TxId = String; + +#[derive(Debug, Default)] +pub struct SignerTestState { + pub is_booted_to_nakamoto: bool, + pub is_primary_miner_skip_commit_op: bool, + pub is_secondary_miner_skip_commit_op: bool, + pub mining_stalled: bool, + pub transfer_txs_submitted: Vec<(StacksHeightBefore, TxId)>, +} + +impl State for SignerTestState {} diff --git a/testnet/stacks-node/src/tests/signer/commands/mod.rs b/testnet/stacks-node/src/tests/signer/commands/mod.rs new file mode 100644 index 0000000000..4da0c1ec9a --- /dev/null +++ b/testnet/stacks-node/src/tests/signer/commands/mod.rs @@ -0,0 +1,23 @@ +mod context; + +mod bitcoin_mining; +mod block_commit; +mod block_wait; +mod boot; +mod commit_ops; +mod shutdown; +mod sortition; +mod stacks_mining; +mod transfer; + +pub use bitcoin_mining::MineBitcoinBlock; +pub use block_commit::SubmitBlockCommitMiner2; +pub use block_wait::{WaitForTenureChangeBlockFromMiner1, WaitForTenureChangeBlockFromMiner2}; +pub use boot::BootToEpoch3; +pub use commit_ops::{SkipCommitOpMiner1, SkipCommitOpMiner2}; +pub use context::SignerTestContext; +pub use shutdown::ShutdownMiners; +pub use sortition::{ + VerifyLastSortitionWinnerReorged, VerifyMiner1WonSortition, VerifyMiner2WonSortition, +}; +pub use stacks_mining::{PauseStacksMining, ResumeStacksMining}; diff --git a/testnet/stacks-node/src/tests/signer/commands/shutdown.rs b/testnet/stacks-node/src/tests/signer/commands/shutdown.rs new file mode 100644 index 0000000000..28cee7163e --- /dev/null +++ b/testnet/stacks-node/src/tests/signer/commands/shutdown.rs @@ -0,0 +1,44 @@ +use std::sync::{Arc, Mutex}; + +use madhouse::{Command, CommandWrapper}; +use proptest::prelude::{Just, Strategy}; + +use super::context::{SignerTestContext, SignerTestState}; +use crate::tests::signer::v0::MultipleMinerTest; + +pub struct ShutdownMiners { + miners: Arc>, +} + +impl ShutdownMiners { + pub fn new(miners: Arc>) -> Self { + Self { miners } + } +} + +impl Command for ShutdownMiners { + fn check(&self, _state: &SignerTestState) -> bool { + info!("Checking: Shutting down miners. Result: {:?}", true); + true + } + + fn apply(&self, _state: &mut SignerTestState) { + info!("Applying: Shutting down miners"); + + if let Ok(miners_arc) = Arc::try_unwrap(self.miners.clone()) { + if let Ok(miners) = miners_arc.into_inner() { + miners.shutdown(); + } + } + } + + fn label(&self) -> String { + "SHUTDOWN_MINERS".to_string() + } + + fn build( + ctx: Arc, + ) -> impl Strategy> { + Just(CommandWrapper::new(ShutdownMiners::new(ctx.miners.clone()))) + } +} diff --git a/testnet/stacks-node/src/tests/signer/commands/sortition.rs b/testnet/stacks-node/src/tests/signer/commands/sortition.rs new file mode 100644 index 0000000000..ded82db07b --- /dev/null +++ b/testnet/stacks-node/src/tests/signer/commands/sortition.rs @@ -0,0 +1,130 @@ +use std::sync::{Arc, Mutex}; + +use madhouse::{Command, CommandWrapper}; +use proptest::prelude::{Just, Strategy}; + +use super::context::SignerTestState; +use super::SignerTestContext; +use crate::tests::signer::v0::{verify_sortition_winner, MultipleMinerTest}; + +pub struct VerifyMiner1WonSortition { + miners: Arc>, +} + +impl VerifyMiner1WonSortition { + pub fn new(miners: Arc>) -> Self { + Self { miners } + } +} + +impl Command for VerifyMiner1WonSortition { + fn check(&self, _state: &SignerTestState) -> bool { + info!( + "Checking: Verifying miner 1 won sortition. Result: {:?}", + true + ); + true + } + + fn apply(&self, _state: &mut SignerTestState) { + info!("Applying: Verifying miner 1 won sortition"); + + let (conf_1, _) = self.miners.lock().unwrap().get_node_configs(); + let burnchain = conf_1.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (miner_pkh_1, _) = self.miners.lock().unwrap().get_miner_public_key_hashes(); + + verify_sortition_winner(&sortdb, &miner_pkh_1); + } + fn label(&self) -> String { + "VERIFY_MINER_1_WON_SORTITION".to_string() + } + fn build( + ctx: Arc, + ) -> impl Strategy> { + Just(CommandWrapper::new(VerifyMiner1WonSortition::new( + ctx.miners.clone(), + ))) + } +} + +pub struct VerifyMiner2WonSortition { + miners: Arc>, +} + +impl VerifyMiner2WonSortition { + pub fn new(miners: Arc>) -> Self { + Self { miners } + } +} + +impl Command for VerifyMiner2WonSortition { + fn check(&self, _state: &SignerTestState) -> bool { + info!( + "Checking: Verifying miner 2 won sortition. Result: {:?}", + true + ); + true + } + + fn apply(&self, _state: &mut SignerTestState) { + info!("Applying: Verifying miner 2 won sortition"); + + let (conf_1, _) = self.miners.lock().unwrap().get_node_configs(); + let burnchain = conf_1.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (_, miner_pkh_2) = self.miners.lock().unwrap().get_miner_public_key_hashes(); + + verify_sortition_winner(&sortdb, &miner_pkh_2); + } + fn label(&self) -> String { + "VERIFY_MINER_2_WON_SORTITION".to_string() + } + fn build( + ctx: Arc, + ) -> impl Strategy> { + Just(CommandWrapper::new(VerifyMiner2WonSortition::new( + ctx.miners.clone(), + ))) + } +} + +pub struct VerifyLastSortitionWinnerReorged { + miners: Arc>, +} + +impl VerifyLastSortitionWinnerReorged { + pub fn new(miners: Arc>) -> Self { + Self { miners } + } +} + +impl Command for VerifyLastSortitionWinnerReorged { + fn check(&self, _state: &SignerTestState) -> bool { + info!( + "Checking: Verifying last sortition winner reorged. Result: {:?}", + true + ); + true + } + + fn apply(&self, _state: &mut SignerTestState) { + info!("Applying: Verifying last sortition winner reorged"); + self.miners + .lock() + .unwrap() + .assert_last_sortition_winner_reorged(); + } + + fn label(&self) -> String { + "VERIFY_LAST_SORTITION_WINNER_REORGED".to_string() + } + + fn build( + ctx: Arc, + ) -> impl Strategy> { + Just(CommandWrapper::new(VerifyLastSortitionWinnerReorged::new( + ctx.miners.clone(), + ))) + } +} diff --git a/testnet/stacks-node/src/tests/signer/commands/stacks_mining.rs b/testnet/stacks-node/src/tests/signer/commands/stacks_mining.rs new file mode 100644 index 0000000000..2a640f974d --- /dev/null +++ b/testnet/stacks-node/src/tests/signer/commands/stacks_mining.rs @@ -0,0 +1,62 @@ +use std::sync::Arc; + +use madhouse::{Command, CommandWrapper}; +use proptest::prelude::{Just, Strategy}; + +use super::context::{SignerTestContext, SignerTestState}; + +pub struct PauseStacksMining; + +impl Command for PauseStacksMining { + fn check(&self, state: &SignerTestState) -> bool { + info!( + "Checking: Stalling mining. Result: {:?}", + !state.mining_stalled + ); + !state.mining_stalled + } + + fn apply(&self, state: &mut SignerTestState) { + info!("Applying: Stalling mining"); + crate::tests::signer::v0::test_mine_stall_set(true); + state.mining_stalled = true; + } + + fn label(&self) -> String { + "PAUSE_STACKS_MINING".to_string() + } + + fn build( + _ctx: Arc, + ) -> impl Strategy> { + Just(CommandWrapper::new(PauseStacksMining)) + } +} + +pub struct ResumeStacksMining; + +impl Command for ResumeStacksMining { + fn check(&self, state: &SignerTestState) -> bool { + info!( + "Checking: Recovering from mining stall. Result: {:?}", + state.mining_stalled + ); + state.mining_stalled + } + + fn apply(&self, state: &mut SignerTestState) { + info!("Applying: Recovering from mining stall"); + crate::tests::signer::v0::test_mine_stall_set(false); + state.mining_stalled = false; + } + + fn label(&self) -> String { + "RESUME_STACKS_MINING".to_string() + } + + fn build( + _ctx: Arc, + ) -> impl Strategy> { + Just(CommandWrapper::new(ResumeStacksMining)) + } +} diff --git a/testnet/stacks-node/src/tests/signer/commands/transfer.rs b/testnet/stacks-node/src/tests/signer/commands/transfer.rs new file mode 100644 index 0000000000..749bbe2dfc --- /dev/null +++ b/testnet/stacks-node/src/tests/signer/commands/transfer.rs @@ -0,0 +1,47 @@ +use std::sync::{Arc, Mutex}; + +use madhouse::{Command, CommandWrapper}; +use proptest::prelude::{Just, Strategy}; + +use super::context::{SignerTestContext, SignerTestState}; +use crate::tests::neon_integrations::get_chain_info; +use crate::tests::signer::v0::MultipleMinerTest; + +pub struct SendTransferTx { + miners: Arc>, +} + +impl SendTransferTx { + pub fn new(miners: Arc>) -> Self { + Self { miners } + } +} + +impl Command for SendTransferTx { + fn check(&self, _state: &SignerTestState) -> bool { + info!("Checking: Sending transfer tx. Result: {:?}", true); + true + } + + fn apply(&self, state: &mut SignerTestState) { + info!("Applying: Sending transfer tx"); + + let (conf_1, _) = self.miners.lock().unwrap().get_node_configs(); + let stacks_height_before = get_chain_info(&conf_1).stacks_tip_height; + let (txid, _) = self.miners.lock().unwrap().send_transfer_tx(); + + state + .transfer_txs_submitted + .push((stacks_height_before, txid)); + } + + fn label(&self) -> String { + "SEND_TRANSFER_TX".to_string() + } + + fn build( + ctx: Arc, + ) -> impl Strategy> { + Just(CommandWrapper::new(SendTransferTx::new(ctx.miners.clone()))) + } +} diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 8d10be077a..83091b78de 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -12,6 +12,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +mod commands; mod v0; use std::collections::HashSet; @@ -32,7 +33,9 @@ use stacks::chainstate::nakamoto::NakamotoBlock; use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, SIGNERS_NAME}; use stacks::chainstate::stacks::StacksPrivateKey; use stacks::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; -use stacks::core::test_util::{make_contract_call, make_contract_publish, make_stacks_transfer}; +use stacks::core::test_util::{ + make_contract_call, make_contract_publish, make_stacks_transfer_serialized, +}; use stacks::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, }; @@ -437,7 +440,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest { /// Run the test until the first epoch 2.5 reward cycle. /// Will activate pox-4 and register signers for the first full Epoch 2.5 reward cycle. @@ -616,6 +627,46 @@ impl MultipleMinerTest { } } + pub fn get_primary_skip_commit_flag(&self) -> stacks::util::tests::TestFlag { + self.signer_test + .running_nodes + .counters + .naka_skip_commit_op + .clone() + } + + pub fn get_secondary_skip_commit_flag(&self) -> stacks::util::tests::TestFlag { + self.rl2_counters.naka_skip_commit_op.clone() + } + + pub fn get_primary_last_stacks_tip_counter(&self) -> RunLoopCounter { + self.signer_test + .running_nodes + .counters + .naka_submitted_commit_last_stacks_tip + .clone() + } + + pub fn get_secondary_last_stacks_tip_counter(&self) -> RunLoopCounter { + self.rl2_counters + .naka_submitted_commit_last_stacks_tip + .clone() + } + + pub fn get_primary_submitted_commit_last_burn_height(&self) -> RunLoopCounter { + self.signer_test + .running_nodes + .counters + .naka_submitted_commit_last_burn_height + .clone() + } + + pub fn get_secondary_submitted_commit_last_burn_height(&self) -> RunLoopCounter { + self.rl2_counters + .naka_submitted_commit_last_burn_height + .clone() + } + /// Boot node 1 to epoch 3.0 and wait for node 2 to catch up. pub fn boot_to_epoch_3(&mut self) { info!( @@ -1018,6 +1069,12 @@ impl MultipleMinerTest { }) .expect("Timed out waiting for boostrapped node to catch up to the miner"); } + + pub fn assert_last_sortition_winner_reorged(&self) { + let (conf_1, _) = self.get_node_configs(); + let latest_sortition = get_sortition_info(&conf_1); + assert!(latest_sortition.stacks_parent_ch != latest_sortition.last_sortition_ch); + } } /// Returns whether the last block in the test observer contains a tenure change @@ -1061,7 +1118,7 @@ fn verify_last_block_contains_tenure_change_tx(cause: TenureChangeCause) { } /// Verifies that the tip of the sortition database was won by the provided miner public key hash -fn verify_sortition_winner(sortdb: &SortitionDB, miner_pkh: &Hash160) { +pub fn verify_sortition_winner(sortdb: &SortitionDB, miner_pkh: &Hash160) { let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); assert!(tip.sortition); assert_eq!(&tip.miner_pk_hash.unwrap(), miner_pkh); @@ -1160,7 +1217,7 @@ fn wait_for_block_pushed( } /// Waits for a block with the provided expected height to be proposed and pushed by the miner with the provided public key. -fn wait_for_block_pushed_by_miner_key( +pub fn wait_for_block_pushed_by_miner_key( timeout_secs: u64, expected_height: u64, miner_key: &StacksPublicKey, @@ -1374,14 +1431,22 @@ pub fn wait_for_block_rejections_from_signers( Ok(result) } -/// Waits for all of the provided signers to send an update for a block with the specificed burn block height and parent tenure stacks block height +/// Waits for all of the provided signers to send an update for a block with the specificed burn block height and parent tenure stacks block height and message version pub fn wait_for_state_machine_update( timeout_secs: u64, expected_burn_block: &ConsensusHash, expected_burn_block_height: u64, expected_miner_info: Option<(Hash160, u64)>, + signer_keys: &[StacksPublicKey], + version: u64, ) -> Result<(), String> { + let addresses: Vec<_> = signer_keys + .iter() + .map(|key| StacksAddress::p2pkh(false, &key)) + .collect(); + wait_for(timeout_secs, || { + let mut found_updates = HashSet::new(); let stackerdb_events = test_observer::get_stackerdb_chunks(); for chunk in stackerdb_events .into_iter() @@ -1392,26 +1457,42 @@ pub fn wait_for_state_machine_update( let SignerMessage::StateMachineUpdate(update) = message else { continue; }; - let StateMachineUpdateContent::V0 { - burn_block, - burn_block_height, - current_miner, - } = &update.content; + let Some(address) = addresses.iter().find(|addr| chunk.verify(addr).unwrap()) else { + continue; + }; + let (burn_block, burn_block_height, current_miner) = match (version, &update.content) { + ( + 0, + StateMachineUpdateContent::V0 { + burn_block, + burn_block_height, + current_miner, + }, + ) + | ( + 1, + StateMachineUpdateContent::V1 { + burn_block, + burn_block_height, + current_miner, + .. + }, + ) => (burn_block, burn_block_height, current_miner), + (_, _) => continue, + }; if *burn_block_height != expected_burn_block_height || burn_block != expected_burn_block { continue; } - match current_miner { - StateMachineUpdateMinerState::ActiveMiner { - current_miner_pkh, - parent_tenure_last_block_height, - .. - } => { - if let Some(( - expected_miner_pkh, - expected_miner_parent_tenure_last_block_height, - )) = expected_miner_info - { + if let Some((expected_miner_pkh, expected_miner_parent_tenure_last_block_height)) = + expected_miner_info + { + match current_miner { + StateMachineUpdateMinerState::ActiveMiner { + current_miner_pkh, + parent_tenure_last_block_height, + .. + } => { if expected_miner_pkh != *current_miner_pkh || expected_miner_parent_tenure_last_block_height != *parent_tenure_last_block_height @@ -1419,20 +1500,19 @@ pub fn wait_for_state_machine_update( continue; } } - } - StateMachineUpdateMinerState::NoValidMiner => { - if expected_miner_info.is_some() { + StateMachineUpdateMinerState::NoValidMiner => { continue; - }; + } } - } + }; // We only need one update to match our conditions - return Ok(true); + found_updates.insert(address); } - Ok(false) + Ok(found_updates.len() == signer_keys.len()) }) } +#[tag(bitcoind)] #[test] #[ignore] /// Test that a signer can respond to an invalid block proposal @@ -1467,6 +1547,7 @@ fn block_proposal_rejection() { info!("------------------------- Send Block Proposal To Signers -------------------------"); let proposal_conf = ProposalEvalConfig { + proposal_wait_for_parent_time: Duration::from_secs(0), first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), tenure_last_block_proposal_timeout: Duration::from_secs(30), @@ -1511,7 +1592,7 @@ fn block_proposal_rejection() { signer_test.wait_for_validate_reject_response(short_timeout, block_signer_signature_hash_2); assert!(matches!( reject.reason_code, - ValidateRejectCode::InvalidBlock + ValidateRejectCode::UnknownParent )); let start_polling = Instant::now(); @@ -1541,7 +1622,7 @@ fn block_proposal_rejection() { found_signer_signature_hash_2 = true; assert!(matches!( reason_code, - RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock) + RejectCode::ValidationFailed(ValidateRejectCode::UnknownParent) )); } else { continue; @@ -1667,136 +1748,431 @@ fn mine_2_nakamoto_reward_cycles() { #[test] #[ignore] -/// This test is a regression test for issue #5858 in which the signer runloop -/// used the signature from the stackerdb to determine the miner public key. -/// This does not work in cases where events get coalesced. The fix was to use -/// the signature in the proposal's block header instead. -/// -/// This test covers the regression by adding a thread that interposes on the -/// stackerdb events sent to the test signers and mutating the signatures -/// so that the stackerdb chunks are signed by the wrong signer. After the -/// fix to #5848, signers are resilient to this behavior because they check -/// the signature on the block proposal (not the chunk). -fn regr_use_block_header_pk() { +fn revalidate_unknown_parent() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let signer_listeners: Mutex> = Mutex::default(); + let max_nakamoto_tenures = 30; + let inter_blocks_per_tenure = 5; + + // setup sender + recipient for a test stx transfer + let sender_sk = Secp256k1PrivateKey::random(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 1000; + let send_fee = 180; + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); + + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + + // All signers are listening to node 1 let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![], - |_| {}, - |node_config| { - node_config.events_observers = node_config - .events_observers - .clone() - .into_iter() - .map(|mut event_observer| { - if event_observer - .endpoint - .ends_with(&test_observer::EVENT_OBSERVER_PORT.to_string()) - { - event_observer - } else if event_observer - .events_keys - .contains(&EventKeyType::StackerDBChunks) - { - event_observer - .events_keys - .retain(|key| *key != EventKeyType::StackerDBChunks); - let mut listeners_lock = signer_listeners.lock().unwrap(); - listeners_lock.push(event_observer.endpoint.clone()); - event_observer - } else { - event_observer - } - }) - .collect(); + vec![( + sender_addr, + (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, + )], + |signer_config| { + signer_config.node_host = node_1_rpc_bind.clone(); + signer_config.first_proposal_burn_block_timing = Duration::from_secs(0); + // rely on actually checking that the block is processed + signer_config.proposal_wait_for_parent_time = Duration::from_secs(600); }, - None, + |config| { + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + config.node.pox_sync_sample_secs = 30; + config.miner.block_commit_delay = Duration::from_secs(0); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + // Increase the reward cycle length to avoid missing a prepare phase + // while we are intentionally forking. + config.burnchain.pox_reward_length = Some(40); + config.burnchain.pox_prepare_length = Some(10); + + // Move epoch 2.5 and 3.0 earlier, so we have more time for the + // test before re-stacking is required. + if let Some(epochs) = config.burnchain.epochs.as_mut() { + epochs[StacksEpochId::Epoch24].end_height = 131; + epochs[StacksEpochId::Epoch25].start_height = 131; + epochs[StacksEpochId::Epoch25].end_height = 166; + epochs[StacksEpochId::Epoch30].start_height = 166; + } else { + panic!("Expected epochs to be set"); + } + }, + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); - let signer_listeners: Vec<_> = signer_listeners - .lock() - .unwrap() - .drain(..) - .map(|endpoint| EventObserver { - endpoint, - db_path: None, - timeout: Duration::from_secs(120), - disable_retries: false, - }) - .collect(); + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed; + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); - let bad_signer = Secp256k1PrivateKey::from_seed(&[0xde, 0xad, 0xbe, 0xef]); - let bad_signer_pk = Secp256k1PublicKey::from_private(&bad_signer); + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - let broadcast_thread_stopper = Arc::new(AtomicBool::new(true)); - let broadcast_thread_flag = broadcast_thread_stopper.clone(); - let broadcast_thread = thread::Builder::new() - .name("rebroadcast-thread".into()) - .spawn(move || { - let mut last_sent = 0; - while broadcast_thread_flag.load(Ordering::SeqCst) { - thread::sleep(Duration::from_secs(1)); - let mut signerdb_chunks = test_observer::get_stackerdb_chunks(); - if last_sent >= signerdb_chunks.len() { - continue; - } - let mut to_send = signerdb_chunks.split_off(last_sent); - last_sent = signerdb_chunks.len(); - for event in to_send.iter_mut() { - // mutilate the signature - event.modified_slots.iter_mut().for_each(|chunk_data| { - let pk = chunk_data.recover_pk().unwrap(); - assert_ne!(pk, bad_signer_pk); - chunk_data.sign(&bad_signer).unwrap(); - }); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); - let payload = serde_json::to_value(event).unwrap(); - for signer_listener in signer_listeners.iter() { - signer_listener.send_stackerdb_chunks(&payload); - } - } - } - }) - .unwrap(); + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); - let timeout = Duration::from_secs(200); - signer_test.boot_to_epoch_3(); + let mining_pk_1 = StacksPublicKey::from_private(&conf.miner.mining_key.unwrap()); + let mining_pk_2 = StacksPublicKey::from_private(&conf_node_2.miner.mining_key.unwrap()); + let mining_pkh_1 = Hash160::from_node_public_key(&mining_pk_1); + let mining_pkh_2 = Hash160::from_node_public_key(&mining_pk_2); + debug!("The mining key for miner 1 is {mining_pkh_1}"); + debug!("The mining key for miner 2 is {mining_pkh_2}"); - let prior_stacks_height = signer_test.get_peer_info().stacks_tip_height; + let http_origin = format!("http://{}", &conf.node.rpc_bind); - let tenures_to_mine = 2; - for _i in 0..tenures_to_mine { - signer_test.mine_nakamoto_block(timeout, false); - } + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); + let Counters { + naka_skip_commit_op: rl2_skip_commit_op, + .. + } = run_loop_2.counters(); + let rl2_counters = run_loop_2.counters(); + let rl1_counters = signer_test.running_nodes.counters.clone(); - let current_stacks_height = signer_test.get_peer_info().stacks_tip_height; + signer_test.boot_to_epoch_3(); - assert!(current_stacks_height >= prior_stacks_height + tenures_to_mine); + // Pause block commits from miner 2 to make sure + // miner 1 wins the first block + rl2_skip_commit_op.set(true); - broadcast_thread_stopper.store(false, Ordering::SeqCst); - broadcast_thread.join().unwrap(); - signer_test.shutdown(); -} + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); -#[test] -#[ignore] -fn forked_tenure_invalid() { + wait_for(200, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for follower to catch up to the miner"); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + let rl1_skip_commit_op = signer_test + .running_nodes + .counters + .naka_skip_commit_op + .clone(); + + let sortdb = SortitionDB::open( + &conf.get_burn_db_file_path(), + false, + conf.get_burnchain().pox_constants, + ) + .unwrap(); + + info!("-------- Waiting miner 2 to catch up to miner 1 --------"); + + // Wait for miner 2 to catch up to miner 1 + // (note: use a high timeout to avoid potential failing on github workflow) + wait_for(600, || { + let info_1 = get_chain_info(&conf); + let info_2 = get_chain_info(&conf_node_2); + Ok(info_1.stacks_tip_height == info_2.stacks_tip_height) + }) + .expect("Timed out waiting for miner 2 to catch up to miner 1"); + + info!("-------- Miner 2 caught up to miner 1 --------"); + + let info_before = get_chain_info(&conf); + + info!("-------- Miner 1 starting next tenure --------"); + + wait_for(60, || { + Ok(rl1_counters.naka_submitted_commit_last_burn_height.get() + >= info_before.burn_block_height) + }) + .unwrap(); + info!("-------- Blocking Miner 1 so that Miner 2 will win the next next tenure --------"); + rl1_skip_commit_op.set(true); + + // Mine the first block + signer_test.mine_bitcoin_block(); + signer_test.check_signer_states_normal(); + + let tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert_eq!(tip_sn.miner_pk_hash, Some(mining_pkh_1)); + + info!("------- Unblocking Miner 2 ------"); + rl2_skip_commit_op.set(false); + wait_for(60, || { + Ok(rl2_counters.naka_submitted_commit_last_burn_height.get() + > info_before.burn_block_height + && rl2_counters.naka_submitted_commit_last_stacks_tip.get() + > info_before.stacks_tip_height) + }) + .unwrap(); + let peer_info_before = signer_test.get_peer_info(); + info!("------- Miner 2 wins first tenure ------"); + signer_test.mine_bitcoin_block(); + signer_test.check_signer_states_normal(); + let tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert_eq!(tip_sn.miner_pk_hash, Some(mining_pkh_2)); + + // Setup miner 1 to ignore a block in this tenure + let ignore_block = peer_info_before.stacks_tip_height + 2; + set_ignore_block(ignore_block, &conf.node.working_dir); + + // wait for the tenure to start (i.e., the tenure change block to be produced, + // which should be mined and not ignored) + wait_for(60, || { + Ok(signer_test.get_peer_info().stacks_tip_height == ignore_block - 1) + }) + .unwrap(); + + info!( + "Mining 1st interim block in Miner 2's first tenure"; + ); + + let (_, sender_nonce) = signer_test + .submit_transfer_tx(&sender_sk, send_fee, send_amt) + .unwrap(); + + wait_for(60, || { + let http_origin = &conf_node_2.node.data_url; + Ok(get_account(http_origin, &sender_addr).nonce > sender_nonce) + }) + .unwrap(); + + // should not have updated yet in node 1 + assert_eq!(get_account(&http_origin, &sender_addr).nonce, sender_nonce); + + info!( + "Mining 2nd interim block in Miner 2's first tenure"; + ); + + let sender_nonce = get_account(&conf_node_2.node.data_url, &sender_addr).nonce; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let transfer_tx = make_stacks_transfer_serialized( + &sender_sk, + sender_nonce, + send_fee, + conf.burnchain.chain_id, + &recipient, + send_amt, + ); + + // should be no pending proposals yet. + signer_test + .get_all_states() + .iter() + .for_each(|state| assert_eq!(state.pending_proposals_count, 0)); + + submit_tx_fallible(&http_origin, &transfer_tx).unwrap(); + + wait_for(60, || { + Ok(signer_test.get_all_states().iter().all(|state| { + info!( + "State: pending_proposal_count = {}", + state.pending_proposals_count + ); + state.pending_proposals_count == 1 + })) + }) + .unwrap(); + + // sleep to make sure that the pending proposal isn't just temporarily pending + thread::sleep(Duration::from_secs(5)); + + signer_test + .get_all_states() + .iter() + .for_each(|state| assert_eq!(state.pending_proposals_count, 1)); + assert_eq!( + get_account(&http_origin, &sender_addr).nonce, + sender_nonce - 1 + ); + + // clear the block ignore and make sure that the proposal gets processed by miner 1 + clear_ignore_block(); + + wait_for(60, || { + Ok(get_account(&http_origin, &sender_addr).nonce > sender_nonce) + }) + .unwrap(); + + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// This test is a regression test for issue #5858 in which the signer runloop +/// used the signature from the stackerdb to determine the miner public key. +/// This does not work in cases where events get coalesced. The fix was to use +/// the signature in the proposal's block header instead. +/// +/// This test covers the regression by adding a thread that interposes on the +/// stackerdb events sent to the test signers and mutating the signatures +/// so that the stackerdb chunks are signed by the wrong signer. After the +/// fix to #5848, signers are resilient to this behavior because they check +/// the signature on the block proposal (not the chunk). +fn regr_use_block_header_pk() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let signer_listeners: Mutex> = Mutex::default(); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![], + |_| {}, + |node_config| { + node_config.events_observers = node_config + .events_observers + .clone() + .into_iter() + .map(|mut event_observer| { + if event_observer + .endpoint + .ends_with(&test_observer::EVENT_OBSERVER_PORT.to_string()) + { + event_observer + } else if event_observer + .events_keys + .contains(&EventKeyType::StackerDBChunks) + { + event_observer + .events_keys + .retain(|key| *key != EventKeyType::StackerDBChunks); + let mut listeners_lock = signer_listeners.lock().unwrap(); + listeners_lock.push(event_observer.endpoint.clone()); + event_observer + } else { + event_observer + } + }) + .collect(); + }, + None, + None, + ); + + let signer_listeners: Vec<_> = signer_listeners + .lock() + .unwrap() + .drain(..) + .map(|endpoint| EventObserver { + endpoint, + db_path: None, + timeout: Duration::from_secs(120), + disable_retries: false, + }) + .collect(); + + let bad_signer = Secp256k1PrivateKey::from_seed(&[0xde, 0xad, 0xbe, 0xef]); + let bad_signer_pk = Secp256k1PublicKey::from_private(&bad_signer); + + let broadcast_thread_stopper = Arc::new(AtomicBool::new(true)); + let broadcast_thread_flag = broadcast_thread_stopper.clone(); + let broadcast_thread = thread::Builder::new() + .name("rebroadcast-thread".into()) + .spawn(move || { + let mut last_sent = 0; + while broadcast_thread_flag.load(Ordering::SeqCst) { + thread::sleep(Duration::from_secs(1)); + let mut signerdb_chunks = test_observer::get_stackerdb_chunks(); + if last_sent >= signerdb_chunks.len() { + continue; + } + let mut to_send = signerdb_chunks.split_off(last_sent); + last_sent = signerdb_chunks.len(); + for event in to_send.iter_mut() { + // mutilate the signature + event.modified_slots.iter_mut().for_each(|chunk_data| { + let pk = chunk_data.recover_pk().unwrap(); + assert_ne!(pk, bad_signer_pk); + chunk_data.sign(&bad_signer).unwrap(); + }); + + let payload = serde_json::to_value(event).unwrap(); + for signer_listener in signer_listeners.iter() { + signer_listener.send_stackerdb_chunks(&payload); + } + } + } + }) + .unwrap(); + + let timeout = Duration::from_secs(200); + signer_test.boot_to_epoch_3(); + + let prior_stacks_height = signer_test.get_peer_info().stacks_tip_height; + + let tenures_to_mine = 2; + for _i in 0..tenures_to_mine { + signer_test.mine_nakamoto_block(timeout, false); + } + + let current_stacks_height = signer_test.get_peer_info().stacks_tip_height; + + assert!(current_stacks_height >= prior_stacks_height + tenures_to_mine); + + broadcast_thread_stopper.store(false, Ordering::SeqCst); + broadcast_thread.join().unwrap(); + signer_test.shutdown(); +} + +#[test] +#[ignore] +fn forked_tenure_invalid() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let result = forked_tenure_testing(Duration::from_secs(5), Duration::from_secs(7), false); + let result = forked_tenure_testing(Duration::from_secs(5), None, Duration::from_secs(7), false); assert_ne!( result.tip_b.index_block_hash(), @@ -1863,7 +2239,8 @@ fn forked_tenure_okay() { return; } - let result = forked_tenure_testing(Duration::from_secs(360), Duration::from_secs(0), true); + let result = + forked_tenure_testing(Duration::from_secs(360), None, Duration::from_secs(0), true); assert_ne!(result.tip_b, result.tip_a); assert_ne!(result.tip_b, result.tip_c); @@ -2049,6 +2426,7 @@ fn reloads_signer_set_in() { /// * tenure C ignores b_0, and correctly builds off of block a_x. fn forked_tenure_testing( proposal_limit: Duration, + odd_proposal_limit: Option, post_btc_block_pause: Duration, expect_tenure_c: bool, ) -> TenureForkingResult { @@ -2068,7 +2446,17 @@ fn forked_tenure_testing( vec![(sender_addr, send_amt + send_fee)], |config| { // make the duration long enough that the reorg attempt will definitely be accepted - config.first_proposal_burn_block_timing = proposal_limit; + config.first_proposal_burn_block_timing = odd_proposal_limit + .map(|limit| { + if config.endpoint.port() % 2 == 1 { + // 2/5 or 40% of signers will have this seperate limit + limit + } else { + // 3/5 or 60% of signers will have this original limit + proposal_limit + } + }) + .unwrap_or(proposal_limit); // don't allow signers to post signed blocks (limits the amount of fault injection we // need) TEST_SKIP_BLOCK_BROADCAST.set(true); @@ -2329,7 +2717,7 @@ fn forked_tenure_testing( // Now let's produce a second block for tenure C and ensure it builds off of block C. // submit a tx so that the miner will mine an extra block let sender_nonce = 0; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -3103,7 +3491,7 @@ fn end_of_tenure() { // submit a tx so that the miner will mine an extra block let sender_nonce = 0; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -3218,7 +3606,7 @@ fn retry_on_rejection() { let start_time = Instant::now(); // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -3252,7 +3640,7 @@ fn retry_on_rejection() { let blocks_before = mined_blocks.load(Ordering::SeqCst); // submit a tx so that the miner will mine a block - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -3345,7 +3733,7 @@ fn signers_broadcast_signed_blocks() { // submit a tx so that the miner will mine a blockn let sender_nonce = 0; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -3476,7 +3864,7 @@ fn tenure_extend_with_other_transactions() { let stacks_tip_height = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; // Submit a transaction to force a response from signers that indicate that the tenure extend timeout is exceeded let mut sender_nonce = 0; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -3496,7 +3884,7 @@ fn tenure_extend_with_other_transactions() { TEST_MINE_STALL.set(true); TEST_BROADCAST_PROPOSAL_STALL.set(vec![]); // Submit a transaction to be included with the tenure extend - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -3768,7 +4156,7 @@ fn stx_transfers_dont_effect_idle_timeout() { for i in 0..num_txs { info!("---- Mining interim block {} ----", i + 1); signer_test.wait_for_nakamoto_block(30, || { - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -3962,7 +4350,7 @@ fn idle_tenure_extend_active_mining() { // Throw in a STX transfer to test mixed blocks let sender_nonce = get_and_increment_nonce(&sender_sk, &mut sender_nonces); - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -3997,6 +4385,7 @@ fn idle_tenure_extend_active_mining() { } TEST_MINE_STALL.set(false); }); + let latest_response = signer_test.get_latest_block_response(slot_id); let naka_blocks = test_observer::get_mined_nakamoto_blocks(); info!( @@ -4059,7 +4448,7 @@ fn idle_tenure_extend_active_mining() { signer_test.wait_for_nakamoto_block(30, || { // Throw in a STX transfer to test mixed blocks let sender_nonce = get_and_increment_nonce(&sender_sk, &mut sender_nonces); - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -4165,7 +4554,7 @@ fn empty_tenure_delayed() { // submit a tx so that the miner will mine an extra block let sender_nonce = 0; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -4362,7 +4751,7 @@ fn empty_sortition_before_approval() { // submit a tx so that the miner will mine an extra block let sender_nonce = 0; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -4516,7 +4905,7 @@ fn empty_sortition_before_proposal() { // submit a tx so that the miner will mine an extra block let sender_nonce = 0; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -4926,7 +5315,7 @@ fn signer_set_rollover() { info!("---- Mining a block to trigger the signer set -----"); // submit a tx so that the miner will mine an extra block let sender_nonce = 0; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -5068,7 +5457,7 @@ fn signer_set_rollover() { info!("---- Mining a block to verify new signer set -----"); let sender_nonce = 1; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -5146,7 +5535,7 @@ fn min_gap_between_blocks() { for interim_block_ix in 0..interim_blocks { let blocks_processed_before = mined_blocks.load(Ordering::SeqCst); // submit a tx so that the miner will mine an extra block - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, interim_block_ix, // same as the sender nonce send_fee, @@ -5804,11 +6193,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], ); - let all_signers: Vec<_> = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect(); + let all_signers = signer_test.signer_test_pks(); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let miner_sk = signer_test.running_nodes.conf.miner.mining_key.unwrap(); @@ -5819,7 +6204,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { let info_before = signer_test.get_peer_info(); // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -5852,7 +6237,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { let info_before = signer_test.get_peer_info(); // Make a new stacks transaction to create a different block signature, but make sure to propose it // AFTER the signers are unfrozen so they don't inadvertently prevent the new block being accepted - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -5880,7 +6265,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); test_observer::clear(); - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -5966,7 +6351,7 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { // submit a tx so that the miner will mine a stacks block N let mut sender_nonce = 0; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -5999,7 +6384,7 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { // submit a tx so that the miner will mine a stacks block N+1 let info_before = signer_test.get_peer_info(); - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -6036,7 +6421,7 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); // submit a tx so that the miner will mine a stacks block N+2 and ensure ALL signers accept it - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -6115,7 +6500,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { let info_before = signer_test.get_peer_info(); // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -6158,7 +6543,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { let info_before = signer_test.get_peer_info(); // submit a tx so that the miner will ATTEMPT to mine a stacks block N+1 - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -6287,7 +6672,7 @@ fn reorg_locally_accepted_blocks_across_tenures_fails() { // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -6328,7 +6713,7 @@ fn reorg_locally_accepted_blocks_across_tenures_fails() { let info_before = signer_test.get_peer_info(); // submit a tx so that the miner will ATTEMPT to mine a stacks block N+1 - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -6446,7 +6831,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { let info_before = signer_test.get_peer_info(); // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -6479,7 +6864,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { test_observer::clear(); let info_before = signer_test.get_peer_info(); - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -6548,7 +6933,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { .expect("Timed out waiting for block N+1' to be rejected"); // Induce block N+2 to get mined - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -6903,7 +7288,7 @@ fn continue_after_tenure_extend() { for sender_nonce in 0..5 { let stacks_height_before = signer_test.get_peer_info().stacks_tip_height; // submit a tx so that the miner will mine an extra block - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -7303,7 +7688,7 @@ fn block_validation_response_timeout() { // submit a tx so that the miner will attempt to mine an extra block let sender_nonce = 0; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -7326,6 +7711,7 @@ fn block_validation_response_timeout() { info!("------------------------- Propose Another Block Before Hitting the Timeout -------------------------"); let proposal_conf = ProposalEvalConfig { + proposal_wait_for_parent_time: Duration::from_secs(0), first_proposal_burn_block_timing: Duration::from_secs(0), tenure_last_block_proposal_timeout: Duration::from_secs(30), block_proposal_timeout: Duration::from_secs(100), @@ -7594,7 +7980,7 @@ fn block_validation_pending_table() { // submit a tx so that the miner will attempt to mine an extra block let sender_nonce = 0; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -7614,6 +8000,7 @@ fn block_validation_pending_table() { info!("----- Proposing a concurrent block -----"); let proposal_conf = ProposalEvalConfig { + proposal_wait_for_parent_time: Duration::from_secs(0), first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), tenure_last_block_proposal_timeout: Duration::from_secs(30), @@ -7759,7 +8146,7 @@ fn new_tenure_while_validating_previous_scenario() { // submit a tx so that the miner will attempt to mine an extra block let sender_nonce = 0; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -8433,7 +8820,7 @@ fn global_acceptance_depends_on_block_announcement() { test_observer::clear(); // submit a tx so that the miner will mine a stacks block N let mut sender_nonce = 0; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -8477,7 +8864,7 @@ fn global_acceptance_depends_on_block_announcement() { // submit a tx so that the miner will mine a stacks block N+1 let info_before = signer_test.get_peer_info(); - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -8850,7 +9237,7 @@ fn incoming_signers_ignore_block_proposals() { info!("------------------------- Test Mine A Valid Block -------------------------"); // submit a tx so that the miner will mine an extra block let sender_nonce = 0; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -8902,6 +9289,7 @@ fn incoming_signers_ignore_block_proposals() { no_next_signer_messages(); let proposal_conf = ProposalEvalConfig { + proposal_wait_for_parent_time: Duration::from_secs(0), first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), tenure_last_block_proposal_timeout: Duration::from_secs(30), @@ -9026,7 +9414,7 @@ fn outgoing_signers_ignore_block_proposals() { info!("------------------------- Test Mine A Valid Block -------------------------"); // submit a tx so that the miner will mine an extra block let sender_nonce = 0; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -9082,6 +9470,7 @@ fn outgoing_signers_ignore_block_proposals() { old_signers_ignore_block_proposals(new_signature_hash); let proposal_conf = ProposalEvalConfig { + proposal_wait_for_parent_time: Duration::from_secs(0), first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), tenure_last_block_proposal_timeout: Duration::from_secs(30), @@ -9360,10 +9749,7 @@ fn injected_signatures_are_ignored_across_boundaries() { } info!("---- Manually mine a single burn block to force the signers to update ----"); - next_block_and_wait( - &mut signer_test.running_nodes.btc_regtest_controller, - &signer_test.running_nodes.counters.blocks_processed, - ); + signer_test.mine_nakamoto_block(Duration::from_secs(60), true); signer_test.wait_for_registered_both_reward_cycles(); @@ -9409,7 +9795,7 @@ fn injected_signatures_are_ignored_across_boundaries() { let info_before = signer_test.get_peer_info(); // submit a tx so that the miner will ATTEMPT to mine a stacks block N - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, 0, send_fee, @@ -9596,7 +9982,7 @@ fn reorg_attempts_count_towards_miner_validity() { test_observer::clear(); // submit a tx so that the miner will mine an extra block let sender_nonce = 0; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -9750,7 +10136,7 @@ fn reorg_attempts_activity_timeout_exceeded() { test_observer::clear(); // submit a tx so that the miner will mine an extra block let sender_nonce = 0; - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -9873,7 +10259,7 @@ fn fast_sortition() { signer_test.boot_to_epoch_3(); info!("------------------------- Mine a Block -------------------------"); - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -9903,7 +10289,7 @@ fn fast_sortition() { .expect("Failed to mine a block"); info!("------------------------- Mine a Block -------------------------"); - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, sender_nonce, send_fee, @@ -10056,6 +10442,7 @@ fn multiple_miners_empty_sortition() { miners.shutdown(); } +#[tag(bitcoind, flaky, slow)] #[test] #[ignore] /// This test spins up a single nakamoto node configured to mine. @@ -10398,6 +10785,37 @@ fn allow_reorg_within_first_proposal_burn_block_timing_secs() { miners.shutdown(); } +#[test] +#[ignore] +fn allow_reorg_within_first_proposal_burn_block_timing_secs_scenario() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let num_transfer_txs = 3; + + let test_context = Arc::new(SignerTestContext::new(num_signers, num_transfer_txs)); + + scenario![ + test_context, + SkipCommitOpMiner2, + BootToEpoch3, + SkipCommitOpMiner1, + PauseStacksMining, + MineBitcoinBlock, + VerifyMiner1WonSortition, + SubmitBlockCommitMiner2, + ResumeStacksMining, + WaitForTenureChangeBlockFromMiner1, + MineBitcoinBlock, + VerifyMiner2WonSortition, + VerifyLastSortitionWinnerReorged, + WaitForTenureChangeBlockFromMiner2, + ShutdownMiners + ] +} + /// Test a scenario where: /// Two miners boot to Nakamoto. /// Sortition occurs. Miner 1 wins. @@ -12044,10 +12462,10 @@ fn mark_miner_as_invalid_if_reorg_is_rejected() { .expect("Failed to get block proposal N+1'"); // Stall the miner from proposing again until we're ready TEST_BROADCAST_PROPOSAL_STALL.set(vec![miner_pk_1]); - + // Due to reorging signers capitulating to the majority rejection of the reorg...all signers will update their state to reject miners .signer_test - .check_signer_states_reorg(&approving_signers, &rejecting_signers); + .check_signer_states_reorg(&[], &all_signers); info!("------------------------- Wait for 3 acceptances and 2 rejections -------------------------"); let signer_signature_hash = block_n_1_prime.header.signer_signature_hash(); @@ -12105,8 +12523,20 @@ fn repeated_rejection() { let send_amt = 100; let send_fee = 180; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = - SignerTest::new(num_signers, vec![(sender_addr, (send_amt + send_fee) * 3)]); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * 3)], + |_| {}, + |config| { + config.miner.block_rejection_timeout_steps.clear(); + config + .miner + .block_rejection_timeout_steps + .insert(0, Duration::from_secs(120)); + }, + None, + None, + ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); signer_test.boot_to_epoch_3(); @@ -12131,7 +12561,7 @@ fn repeated_rejection() { let proposals_before = proposed_blocks.load(Ordering::SeqCst); // submit a tx so that the miner will mine a block - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, 0, send_fee, @@ -12257,7 +12687,7 @@ fn retry_proposal() { let proposals_before = proposed_blocks.load(Ordering::SeqCst); // submit a tx so that the miner will mine a block - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, 0, send_fee, @@ -12279,7 +12709,7 @@ fn retry_proposal() { info!( "Block proposed, submitting another transaction that should not get included in the block" ); - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, 1, send_fee, @@ -12384,7 +12814,7 @@ fn signer_can_accept_rejected_block() { TEST_VALIDATE_STALL.set(true); // submit a tx so that the miner will mine a block - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, 0, send_fee, @@ -12413,7 +12843,7 @@ fn signer_can_accept_rejected_block() { info!( "Block proposed, submitting another transaction that should not get included in the block" ); - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, 1, send_fee, @@ -12777,7 +13207,7 @@ fn large_mempool_base(strategy: MemPoolWalkStrategy, set_fee: impl Fn() -> u64) let recipient_sk = StacksPrivateKey::random(); let recipient_addr = tests::to_addr(&recipient_sk); let sender_addr = tests::to_addr(sender_sk); - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( sender_sk, *nonce, transfer_fee, @@ -12833,7 +13263,7 @@ fn large_mempool_base(strategy: MemPoolWalkStrategy, set_fee: impl Fn() -> u64) let sender_addr = tests::to_addr(sender_sk); let recipient_sk = StacksPrivateKey::random(); let recipient_addr = tests::to_addr(&recipient_sk); - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( sender_sk, *nonce, transfer_fee, @@ -12894,7 +13324,8 @@ fn large_mempool_base(strategy: MemPoolWalkStrategy, set_fee: impl Fn() -> u64) let sender_addr = tests::to_addr(sender_sk); let fee = set_fee(); assert!(fee >= 180 && fee <= 2000); - let transfer_tx = make_stacks_transfer(sender_sk, *nonce, fee, chain_id, &recipient, 1); + let transfer_tx = + make_stacks_transfer_serialized(sender_sk, *nonce, fee, chain_id, &recipient, 1); insert_tx_in_mempool( &db_tx, transfer_tx, @@ -12938,7 +13369,7 @@ fn large_mempool_base(strategy: MemPoolWalkStrategy, set_fee: impl Fn() -> u64) } // Wait for the first block to be accepted. - wait_for(20, || { + wait_for(30, || { let blocks = test_observer::get_blocks().len(); Ok(blocks > blocks_before) }) @@ -13064,7 +13495,7 @@ fn larger_mempool() { let recipient_sk = StacksPrivateKey::random(); let recipient_addr = tests::to_addr(&recipient_sk); let sender_addr = tests::to_addr(sender_sk); - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( sender_sk, *nonce, transfer_fee, @@ -13120,7 +13551,7 @@ fn larger_mempool() { let sender_addr = tests::to_addr(sender_sk); let recipient_sk = StacksPrivateKey::random(); let recipient_addr = tests::to_addr(&recipient_sk); - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( sender_sk, *nonce, transfer_fee, @@ -13180,8 +13611,14 @@ fn larger_mempool() { for _ in 0..25 { for (sender_sk, nonce) in senders.iter_mut() { let sender_addr = tests::to_addr(sender_sk); - let transfer_tx = - make_stacks_transfer(sender_sk, *nonce, transfer_fee, chain_id, &recipient, 1); + let transfer_tx = make_stacks_transfer_serialized( + sender_sk, + *nonce, + transfer_fee, + chain_id, + &recipient, + 1, + ); insert_tx_in_mempool( &db_tx, transfer_tx, @@ -13270,6 +13707,13 @@ fn signers_send_state_message_updates() { }, ); + let all_signers: Vec<_> = miners + .signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + let rl1_skip_commit_op = miners .signer_test .running_nodes @@ -13320,11 +13764,14 @@ fn signers_send_state_message_updates() { info!("------------------------- Confirm Miner 1 is the Active Miner in Update -------------------------"); // Verify that signers first sent a bitcoin block update + wait_for_state_machine_update( 60, &get_burn_consensus_hash(), starting_burn_height + 1, Some((miner_pkh_1, starting_peer_height)), + &all_signers, + SUPPORTED_SIGNER_PROTOCOL_VERSION, ) .expect("Timed out waiting for signers to send a state update"); @@ -13353,6 +13800,8 @@ fn signers_send_state_message_updates() { &get_burn_consensus_hash(), starting_burn_height + 2, Some((miner_pkh_2, starting_peer_height + 1)), + &all_signers, + SUPPORTED_SIGNER_PROTOCOL_VERSION, ) .expect("Timed out waiting for signers to send their state update"); @@ -13371,6 +13820,8 @@ fn signers_send_state_message_updates() { &get_burn_consensus_hash(), starting_burn_height + 2, Some((miner_pkh_1, starting_peer_height)), + &all_signers, + SUPPORTED_SIGNER_PROTOCOL_VERSION, ) .expect("Timed out waiting for signers to send their state update"); @@ -13439,7 +13890,7 @@ fn verify_mempool_caches() { TEST_REJECT_ALL_BLOCK_PROPOSAL.set(rejecting_signers); // submit a tx so that the miner will mine a block - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, 0, send_fee, @@ -13513,7 +13964,7 @@ fn verify_mempool_caches() { info!("Nonce cache has the expected nonce after successfully mining block"); - let transfer_tx = make_stacks_transfer( + let transfer_tx = make_stacks_transfer_serialized( &sender_sk, 1, send_fee, @@ -13763,3 +14214,389 @@ fn burn_block_height_behavior() { signer_test.shutdown(); } + +#[test] +#[ignore] +fn reorging_signers_capitulate_to_nonreorging_signers_during_tenure_fork() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let num_txs = 5; + + let disallow_reorg_proposal_timeout = Duration::from_secs(10); + let allow_reorg_proposal_timeout = Duration::from_secs(360); + let post_btc_block_pause = + disallow_reorg_proposal_timeout.saturating_add(Duration::from_secs(1)); + let mut miners = MultipleMinerTest::new_with_config_modifications( + num_signers, + num_txs, + |config| { + config.first_proposal_burn_block_timing = if config.endpoint.port() % 2 == 1 { + // 2/5 or 40% of signers will allow the reorg + allow_reorg_proposal_timeout + } else { + // 3/5 or 60% of signers will reject the reorg + disallow_reorg_proposal_timeout + }; + // don't allow signers to post signed blocks (limits the amount of fault injection we + // need) + TEST_SKIP_BLOCK_BROADCAST.set(true); + }, + |config| { + config.burnchain.pox_reward_length = Some(30); + config.miner.tenure_cost_limit_per_block_percentage = None; + // this test relies on the miner submitting these timed out commits. + // the test still passes without this override, but the default timeout + // makes the test take longer than strictly necessary + config.miner.block_commit_delay = Duration::from_secs(10); + }, + |_| {}, + ); + let rl1_skip_commit_op = miners + .signer_test + .running_nodes + .counters + .naka_skip_commit_op + .clone(); + + let rl2_skip_commit_op = miners.rl2_counters.naka_skip_commit_op.clone(); + + let (conf_1, _) = miners.get_node_configs(); + let (miner_pkh_1, miner_pkh_2) = miners.get_miner_public_key_hashes(); + let (miner_pk_1, miner_pk_2) = miners.get_miner_public_keys(); + + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + + // Make sure Miner 2 cannot win a sortition at first. + rl2_skip_commit_op.set(true); + + miners.boot_to_epoch_3(); + + let burnchain = conf_1.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + conf_1.is_mainnet(), + conf_1.burnchain.chain_id, + &conf_1.get_chainstate_path_str(), + None, + ) + .unwrap(); + info!("------------------------- Pause Miner 1's Block Commit -------------------------"); + + // Make sure miner 1 doesn't submit any further block commits for the next tenure BEFORE mining the bitcoin block + rl1_skip_commit_op.set(true); + + info!("------------------------- Miner 1 Wins Normal Tenure A -------------------------"); + miners + .mine_bitcoin_block_and_tenure_change_tx(&sortdb, TenureChangeCause::BlockFound, 30) + .expect("Failed to mine BTC block followed by tenure change tx"); + verify_sortition_winner(&sortdb, &miner_pkh_1); + + info!("------------------------- Miner 1 Mines Another Block -------------------------"); + miners + .send_and_mine_transfer_tx(30) + .expect("Failed to mine tx"); + + let tip_a = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + + info!("------------------------- Pause Block Proposals -------------------------"); + // For the next tenure, submit the commit op but do not allow any stacks blocks to be broadcasted + TEST_BROADCAST_PROPOSAL_STALL.set(vec![miner_pk_1]); + TEST_BLOCK_ANNOUNCE_STALL.set(true); + + miners.submit_commit_miner_1(&sortdb); + + info!("------------------------- Miner 1 Wins Tenure B -------------------------"); + miners + .mine_bitcoin_blocks_and_confirm(&sortdb, 1, 30) + .expect("Failed to mine BTC block"); + // assure we have a successful sortition that miner 1 won + verify_sortition_winner(&sortdb, &miner_pkh_1); + + info!("----------------- Miner 2 Submits Block Commit for Tenure C Before Any Tenure B Blocks Produced ------------------"); + miners.submit_commit_miner_2(&sortdb); + + info!("----------------------------- Resume Block Production for Tenure B -----------------------------"); + + let stacks_height_before = miners.get_peer_stacks_tip_height(); + TEST_BROADCAST_PROPOSAL_STALL.set(vec![]); + + let tenure_b_block_proposal = + wait_for_block_proposal(30, stacks_height_before + 1, &miner_pk_1) + .expect("Timed out waiting for Tenure B block to be proposed"); + info!("Tenure B broadcasted a block. Wait {post_btc_block_pause:?}, issue the next bitcoin block, and un-stall block commits."); + thread::sleep(post_btc_block_pause); + + // the block will be stored, not processed, so load it out of staging + let tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .expect("Failed to get sortition tip"); + + let tenure_b_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_tenure_start_blocks(&tip_sn.consensus_hash) + .unwrap() + .first() + .cloned() + .unwrap(); + + // synthesize a StacksHeaderInfo from this unprocessed block + let tip_b = StacksHeaderInfo { + anchored_header: StacksBlockHeaderTypes::Nakamoto(tenure_b_block.header.clone()), + microblock_tail: None, + stacks_block_height: tenure_b_block.header.chain_length, + index_root: TrieHash([0x00; 32]), // we can't know this yet since the block hasn't been processed + consensus_hash: tenure_b_block.header.consensus_hash, + burn_header_hash: tip_sn.burn_header_hash, + burn_header_height: tip_sn.block_height as u32, + burn_header_timestamp: tip_sn.burn_header_timestamp, + anchored_block_size: tenure_b_block.serialize_to_vec().len() as u64, + burn_view: Some(tenure_b_block.header.consensus_hash), + }; + + // Block B was built atop block A + assert_ne!(tip_b.index_block_hash(), tip_a.index_block_hash()); + assert_eq!(tip_b.stacks_block_height, tip_a.stacks_block_height + 1); + assert_eq!( + tenure_b_block.header.parent_block_id, + tip_a.index_block_hash() + ); + assert_ne!(tip_b, tip_a); + + let chain_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let burn_height_before = chain_tip.block_height; + + // allow B to process, so it'll be distinct from C + TEST_BLOCK_ANNOUNCE_STALL.set(false); + sleep_ms(1000); + + info!("--------------- Miner 2 Wins Tenure C With Old Block Commit ----------------"); + info!("Prevent Miner 1 from extending at first"); + TEST_BROADCAST_PROPOSAL_STALL.set(vec![miner_pk_1]); + + test_observer::clear(); + + miners + .mine_bitcoin_blocks_and_confirm(&sortdb, 1, 60) + .expect("Failed to mine bitcoin block"); + // assure we have a successful sortition that miner 2 + verify_sortition_winner(&sortdb, &miner_pkh_2); + + // Note tenure C block will attempt to reorg the prior miner so its expected height should be the same as prior to block B processing. + let tenure_c_block_proposal = + wait_for_block_proposal(30, tip_b.stacks_block_height, &miner_pk_2) + .expect("Timed out waiting for miner 2's Tenure C block"); + + assert_ne!(tenure_c_block_proposal, tenure_b_block_proposal); + + let tip_c = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + assert_eq!( + tip_b.index_block_hash(), + tip_c.get_canonical_stacks_block_id() + ); + assert_ne!(tip_c.consensus_hash, tip_a.consensus_hash); + assert_ne!(tip_c.burn_header_hash, tip_a.burn_header_hash); + assert_eq!(tip_c.block_height, burn_height_before + 1); + + wait_for(30, || { + let mut nmb_matches = 0; + let stackerdb_events = test_observer::get_stackerdb_chunks(); + for chunk in stackerdb_events + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + let SignerMessage::StateMachineUpdate(update) = message else { + continue; + }; + let (burn_block, burn_block_height, current_miner_pkh) = + match (SUPPORTED_SIGNER_PROTOCOL_VERSION, update.content) { + ( + 0, + StateMachineUpdateContent::V0 { + burn_block, + burn_block_height, + current_miner: + StateMachineUpdateMinerState::ActiveMiner { + current_miner_pkh, .. + }, + .. + }, + ) + | ( + 1, + StateMachineUpdateContent::V1 { + burn_block, + burn_block_height, + current_miner: + StateMachineUpdateMinerState::ActiveMiner { + current_miner_pkh, .. + }, + .. + }, + ) => (burn_block, burn_block_height, current_miner_pkh), + _ => continue, + }; + if burn_block == tenure_c_block_proposal.header.consensus_hash + && burn_block_height == burn_height_before + 1 + && current_miner_pkh == miner_pkh_1 + { + nmb_matches += 1; + } + } + Ok(nmb_matches == 5) + }) + .unwrap(); + + info!("--------------- Miner 1 Extends Tenure B over Tenure C ---------------"); + TEST_BROADCAST_PROPOSAL_STALL.set(vec![]); + let tenure_extend_block = + wait_for_block_proposal(30, tip_b.stacks_block_height + 1, &miner_pk_1) + .expect("Timed out waiting for miner 1's tenure extend block"); + wait_for_block_acceptance_from_signers( + 30, + &tenure_extend_block.header.signer_signature_hash(), + &miners.signer_test.signer_test_pks(), + ) + .expect("Expected all signers to accept the extend"); + + info!("------------------------- Miner 1 Mines Another Block -------------------------"); + miners + .send_and_mine_transfer_tx(30) + .expect("Failed to mine tx"); + + info!("------------------------- Miner 2 Mines the Next Tenure -------------------------"); + miners.submit_commit_miner_2(&sortdb); + + miners + .mine_bitcoin_block_and_tenure_change_tx(&sortdb, TenureChangeCause::BlockFound, 30) + .expect("Failed to mine BTC block followed by tenure change tx"); + + // assure we have a successful sortition that miner 2 won and it had a block found tenure change + verify_sortition_winner(&sortdb, &miner_pkh_2); + + miners.shutdown(); + + // Block C was built AFTER Block B was built, but BEFORE it was broadcasted, so it should be built off of Block A + assert_eq!( + tenure_c_block_proposal.header.parent_block_id, + tip_a.index_block_hash() + ); +} + +/// Tests that signers are able to upgrade or downgrade their active protocol version numbers based on +/// the majority of other signers current local supported version numbers +#[test] +#[ignore] +fn rollover_signer_protocol_version() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + let num_signers = 5; + + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); + signer_test.boot_to_epoch_3(); + + let conf = signer_test.running_nodes.conf.clone(); + + let burnchain = conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + let all_signers = signer_test.signer_test_pks(); + info!( + "------------------------- Miner Tenure Starts and Mines Block N-------------------------" + ); + test_observer::clear(); + signer_test.mine_and_verify_confirmed_naka_block(Duration::from_secs(30), num_signers, true); + + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let burn_consensus_hash = tip.consensus_hash; + let burn_height = tip.block_height; + + info!("------------------------- Confirm Miner is the Active Miner in Update and All Signers Are Using Protocol Number {SUPPORTED_SIGNER_PROTOCOL_VERSION} -------------------------"); + // Verify that signers first sent a bitcoin block update + wait_for_state_machine_update( + 60, + &burn_consensus_hash, + burn_height, + None, + &all_signers, + SUPPORTED_SIGNER_PROTOCOL_VERSION, + ) + .expect("Timed out waiting for signers to send a state update for block N"); + + test_observer::clear(); + let downgraded_version = SUPPORTED_SIGNER_PROTOCOL_VERSION.saturating_sub(1); + info!("------------------------- Downgrading Signer Versions to {downgraded_version} for 20 Percent of Signers -------------------------"); + // Take a non blocking minority of signers (20%) and downgrade their version number + let pinned_signers: Vec<_> = all_signers + .iter() + .take(num_signers * 2 / 10) + .cloned() + .collect(); + let pinned_signers_versions: HashMap = pinned_signers + .iter() + .map(|signer| (*signer, downgraded_version)) + .collect(); + TEST_PIN_SUPPORTED_SIGNER_PROTOCOL_VERSION.set(pinned_signers_versions); + + info!("------------------------- Confirm Signers Still Manage to Sign a Stacks Block With Misaligned Version Numbers -------------------------"); + signer_test.mine_and_verify_confirmed_naka_block(Duration::from_secs(30), num_signers, true); + + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let burn_consensus_hash = tip.consensus_hash; + let burn_height = tip.block_height; + // Only one signer is downgraded so the active protocol version remains the same. + wait_for_state_machine_update( + 60, + &burn_consensus_hash, + burn_height, + None, + &all_signers, + SUPPORTED_SIGNER_PROTOCOL_VERSION, + ) + .expect("Timed out waiting for signers to send their downgraded state update for block N+1"); + + test_observer::clear(); + info!("------------------------- Confirm Signer Version Downgrades Fully Once 70 percent of Signers Downgrade -------------------------"); + let pinned_signers: Vec<_> = all_signers + .iter() + .take(num_signers * 7 / 10) + .cloned() + .collect(); + let pinned_signers_versions: HashMap = pinned_signers + .iter() + .map(|signer| (*signer, downgraded_version)) + .collect(); + TEST_PIN_SUPPORTED_SIGNER_PROTOCOL_VERSION.set(pinned_signers_versions); + + info!("------------------------- Confirm Signers Sign The Block After Complete Downgraded Version Number -------------------------"); + signer_test.mine_and_verify_confirmed_naka_block(Duration::from_secs(30), num_signers, true); + + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let burn_consensus_hash = tip.consensus_hash; + let burn_height = tip.block_height; + // Confirm ALL signers downgrade their supported version and then send a corresponding message in that version message + wait_for_state_machine_update( + 60, + &burn_consensus_hash, + burn_height, + None, + &all_signers, + downgraded_version, + ) + .expect("Timed out waiting for signers to send their state update for block N+2"); + + info!("------------------------- Reset All Signers to {SUPPORTED_SIGNER_PROTOCOL_VERSION} -------------------------"); + TEST_PIN_SUPPORTED_SIGNER_PROTOCOL_VERSION.set(HashMap::new()); + test_observer::clear(); + info!("------------------------- Confirm Signers Sign The Block After Upgraded Version Number -------------------------"); + signer_test.mine_and_verify_confirmed_naka_block(Duration::from_secs(30), num_signers, true); + + signer_test.shutdown(); +} diff --git a/versions.toml b/versions.toml index a75a85190e..fc1401db85 100644 --- a/versions.toml +++ b/versions.toml @@ -1,4 +1,4 @@ # Update these values when a new release is created. # `stacks-common/build.rs` will automatically update `versions.rs` with these values. -stacks_node_version = "3.1.0.0.8-rc1" -stacks_signer_version = "3.1.0.0.8.0-rc1" +stacks_node_version = "3.1.0.0.8" +stacks_signer_version = "3.1.0.0.8.1"