From 6501fd49ba77311b7b26e1f628b88e22d0358138 Mon Sep 17 00:00:00 2001 From: ian Date: Tue, 13 Jun 2023 14:45:30 +0800 Subject: [PATCH 001/267] chore: bump to v0.112.0-pre --- Cargo.lock | 136 +++++++++---------- Cargo.toml | 8 +- README.md | 2 +- benches/Cargo.toml | 36 ++--- block-filter/Cargo.toml | 14 +- chain/Cargo.toml | 52 +++---- ckb-bin/Cargo.toml | 46 +++---- db-migration/Cargo.toml | 12 +- db-schema/Cargo.toml | 2 +- db/Cargo.toml | 10 +- error/Cargo.toml | 4 +- freezer/Cargo.toml | 12 +- miner/Cargo.toml | 20 +-- network/Cargo.toml | 22 +-- notify/Cargo.toml | 12 +- pow/Cargo.toml | 6 +- resource/Cargo.toml | 6 +- rpc/Cargo.toml | 60 ++++---- script/Cargo.toml | 24 ++-- script/fuzz/Cargo.toml | 10 +- shared/Cargo.toml | 38 +++--- spec/Cargo.toml | 26 ++-- store/Cargo.toml | 20 +-- sync/Cargo.toml | 56 ++++---- test/Cargo.toml | 40 +++--- traits/Cargo.toml | 4 +- tx-pool/Cargo.toml | 44 +++--- util/Cargo.toml | 4 +- util/app-config/Cargo.toml | 24 ++-- util/build-info/Cargo.toml | 2 +- util/chain-iter/Cargo.toml | 6 +- util/channel/Cargo.toml | 2 +- util/constant/Cargo.toml | 2 +- util/crypto/Cargo.toml | 4 +- util/dao/Cargo.toml | 16 +-- util/dao/utils/Cargo.toml | 6 +- util/fixed-hash/Cargo.toml | 6 +- util/fixed-hash/core/Cargo.toml | 2 +- util/fixed-hash/macros/Cargo.toml | 4 +- util/hash/Cargo.toml | 2 +- util/indexer/Cargo.toml | 20 +-- util/instrument/Cargo.toml | 12 +- util/jsonrpc-types/Cargo.toml | 4 +- util/launcher/Cargo.toml | 68 +++++----- util/launcher/migration-template/Cargo.toml | 2 +- util/light-client-protocol-server/Cargo.toml | 32 ++--- util/logger-config/Cargo.toml | 2 +- util/logger-service/Cargo.toml | 10 +- util/logger/Cargo.toml | 2 +- util/memory-tracker/Cargo.toml | 8 +- util/metrics-config/Cargo.toml | 2 +- util/metrics-service/Cargo.toml | 12 +- util/metrics/Cargo.toml | 2 +- util/multisig/Cargo.toml | 8 +- util/network-alert/Cargo.toml | 30 ++-- util/occupied-capacity/Cargo.toml | 6 +- util/occupied-capacity/core/Cargo.toml | 2 +- util/occupied-capacity/macros/Cargo.toml | 4 +- util/proposal-table/Cargo.toml | 8 +- util/rational/Cargo.toml | 2 +- util/reward-calculator/Cargo.toml | 20 +-- util/runtime/Cargo.toml | 8 +- util/rust-unstable-port/Cargo.toml | 2 +- util/snapshot/Cargo.toml | 18 +-- util/spawn/Cargo.toml | 2 +- util/stop-handler/Cargo.toml | 6 +- util/systemtime/Cargo.toml | 2 +- util/test-chain-utils/Cargo.toml | 24 ++-- util/types/Cargo.toml | 16 +-- verification/Cargo.toml | 28 ++-- verification/contextual/Cargo.toml | 38 +++--- verification/traits/Cargo.toml | 4 +- wasm-build-test/Cargo.toml | 6 +- 73 files changed, 606 insertions(+), 606 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3ba4263737..8644af07c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -395,7 +395,7 @@ dependencies = [ [[package]] name = "ckb" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-bin", "ckb-build-info", @@ -404,7 +404,7 @@ dependencies = [ [[package]] name = "ckb-app-config" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-build-info", "ckb-chain-spec", @@ -433,7 +433,7 @@ dependencies = [ [[package]] name = "ckb-async-runtime" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-logger", "ckb-spawn", @@ -443,7 +443,7 @@ dependencies = [ [[package]] name = "ckb-benches" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-app-config", "ckb-chain", @@ -471,7 +471,7 @@ dependencies = [ [[package]] name = "ckb-bin" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "base64", "ckb-app-config", @@ -511,7 +511,7 @@ dependencies = [ [[package]] name = "ckb-block-filter" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-async-runtime", "ckb-logger", @@ -523,11 +523,11 @@ dependencies = [ [[package]] name = "ckb-build-info" -version = "0.111.0-pre" +version = "0.112.0-pre" [[package]] name = "ckb-chain" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-app-config", "ckb-chain-spec", @@ -561,7 +561,7 @@ dependencies = [ [[package]] name = "ckb-chain-iter" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-store", "ckb-types", @@ -569,7 +569,7 @@ dependencies = [ [[package]] name = "ckb-chain-spec" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-constant", "ckb-crypto", @@ -589,18 +589,18 @@ dependencies = [ [[package]] name = "ckb-channel" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "crossbeam-channel", ] [[package]] name = "ckb-constant" -version = "0.111.0-pre" +version = "0.112.0-pre" [[package]] name = "ckb-crypto" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-fixed-hash", "faster-hex", @@ -612,7 +612,7 @@ dependencies = [ [[package]] name = "ckb-dao" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "byteorder", "ckb-chain-spec", @@ -627,7 +627,7 @@ dependencies = [ [[package]] name = "ckb-dao-utils" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "byteorder", "ckb-error", @@ -636,7 +636,7 @@ dependencies = [ [[package]] name = "ckb-db" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-app-config", "ckb-db-schema", @@ -649,7 +649,7 @@ dependencies = [ [[package]] name = "ckb-db-migration" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-app-config", "ckb-db", @@ -663,11 +663,11 @@ dependencies = [ [[package]] name = "ckb-db-schema" -version = "0.111.0-pre" +version = "0.112.0-pre" [[package]] name = "ckb-error" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "anyhow", "ckb-occupied-capacity", @@ -677,7 +677,7 @@ dependencies = [ [[package]] name = "ckb-fixed-hash" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-fixed-hash-core", "ckb-fixed-hash-macros", @@ -685,7 +685,7 @@ dependencies = [ [[package]] name = "ckb-fixed-hash-core" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "faster-hex", "serde", @@ -695,7 +695,7 @@ dependencies = [ [[package]] name = "ckb-fixed-hash-macros" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-fixed-hash-core", "proc-macro2", @@ -705,7 +705,7 @@ dependencies = [ [[package]] name = "ckb-freezer" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-error", "ckb-logger", @@ -721,7 +721,7 @@ dependencies = [ [[package]] name = "ckb-hash" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "blake2b-ref", "blake2b-rs", @@ -729,7 +729,7 @@ dependencies = [ [[package]] name = "ckb-indexer" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-app-config", "ckb-async-runtime", @@ -751,7 +751,7 @@ dependencies = [ [[package]] name = "ckb-instrument" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-chain", "ckb-chain-iter", @@ -764,7 +764,7 @@ dependencies = [ [[package]] name = "ckb-jsonrpc-types" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-types", "faster-hex", @@ -777,7 +777,7 @@ dependencies = [ [[package]] name = "ckb-launcher" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-app-config", "ckb-async-runtime", @@ -833,7 +833,7 @@ dependencies = [ [[package]] name = "ckb-light-client-protocol-server" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-app-config", "ckb-chain", @@ -856,14 +856,14 @@ dependencies = [ [[package]] name = "ckb-logger" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "log", ] [[package]] name = "ckb-logger-config" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "serde", "toml", @@ -871,7 +871,7 @@ dependencies = [ [[package]] name = "ckb-logger-service" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "backtrace", "ckb-channel", @@ -890,7 +890,7 @@ dependencies = [ [[package]] name = "ckb-memory-tracker" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-db", "ckb-logger", @@ -912,7 +912,7 @@ dependencies = [ [[package]] name = "ckb-metrics" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "once_cell", "prometheus", @@ -921,14 +921,14 @@ dependencies = [ [[package]] name = "ckb-metrics-config" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "serde", ] [[package]] name = "ckb-metrics-service" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-async-runtime", "ckb-logger", @@ -941,7 +941,7 @@ dependencies = [ [[package]] name = "ckb-migration-template" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "quote", "syn", @@ -949,7 +949,7 @@ dependencies = [ [[package]] name = "ckb-miner" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "base64", "ckb-app-config", @@ -978,7 +978,7 @@ dependencies = [ [[package]] name = "ckb-multisig" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-crypto", "ckb-error", @@ -988,7 +988,7 @@ dependencies = [ [[package]] name = "ckb-network" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "bitflags", "bloom-filters", @@ -1026,7 +1026,7 @@ dependencies = [ [[package]] name = "ckb-network-alert" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-app-config", "ckb-async-runtime", @@ -1050,7 +1050,7 @@ dependencies = [ [[package]] name = "ckb-notify" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-app-config", "ckb-async-runtime", @@ -1062,7 +1062,7 @@ dependencies = [ [[package]] name = "ckb-occupied-capacity" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-occupied-capacity-core", "ckb-occupied-capacity-macros", @@ -1070,14 +1070,14 @@ dependencies = [ [[package]] name = "ckb-occupied-capacity-core" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "serde", ] [[package]] name = "ckb-occupied-capacity-macros" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-occupied-capacity-core", "quote", @@ -1086,7 +1086,7 @@ dependencies = [ [[package]] name = "ckb-pow" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "byteorder", "ckb-hash", @@ -1098,7 +1098,7 @@ dependencies = [ [[package]] name = "ckb-proposal-table" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-chain-spec", "ckb-logger", @@ -1107,7 +1107,7 @@ dependencies = [ [[package]] name = "ckb-rational" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "numext-fixed-uint", "proptest", @@ -1116,7 +1116,7 @@ dependencies = [ [[package]] name = "ckb-resource" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-system-scripts", "ckb-types", @@ -1130,7 +1130,7 @@ dependencies = [ [[package]] name = "ckb-reward-calculator" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-chain-spec", "ckb-dao", @@ -1157,7 +1157,7 @@ dependencies = [ [[package]] name = "ckb-rpc" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-app-config", "ckb-chain", @@ -1206,14 +1206,14 @@ dependencies = [ [[package]] name = "ckb-rust-unstable-port" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "is_sorted", ] [[package]] name = "ckb-script" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "byteorder", "ckb-chain-spec", @@ -1238,7 +1238,7 @@ dependencies = [ [[package]] name = "ckb-shared" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "arc-swap", "ckb-async-runtime", @@ -1262,7 +1262,7 @@ dependencies = [ [[package]] name = "ckb-snapshot" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "arc-swap", "ckb-chain-spec", @@ -1278,11 +1278,11 @@ dependencies = [ [[package]] name = "ckb-spawn" -version = "0.111.0-pre" +version = "0.112.0-pre" [[package]] name = "ckb-stop-handler" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-channel", "ckb-logger", @@ -1292,7 +1292,7 @@ dependencies = [ [[package]] name = "ckb-store" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-app-config", "ckb-chain-spec", @@ -1310,7 +1310,7 @@ dependencies = [ [[package]] name = "ckb-sync" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "bitflags", "ckb-app-config", @@ -1367,11 +1367,11 @@ dependencies = [ [[package]] name = "ckb-systemtime" -version = "0.111.0-pre" +version = "0.112.0-pre" [[package]] name = "ckb-test-chain-utils" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-chain-spec", "ckb-dao-utils", @@ -1389,14 +1389,14 @@ dependencies = [ [[package]] name = "ckb-traits" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-types", ] [[package]] name = "ckb-tx-pool" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-app-config", "ckb-async-runtime", @@ -1432,7 +1432,7 @@ dependencies = [ [[package]] name = "ckb-types" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "bit-vec", "bytes 1.4.0", @@ -1456,7 +1456,7 @@ dependencies = [ [[package]] name = "ckb-util" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-fixed-hash", "linked-hash-map", @@ -1467,7 +1467,7 @@ dependencies = [ [[package]] name = "ckb-verification" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-chain-spec", "ckb-dao", @@ -1487,7 +1487,7 @@ dependencies = [ [[package]] name = "ckb-verification-contextual" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "ckb-async-runtime", "ckb-chain", @@ -1514,7 +1514,7 @@ dependencies = [ [[package]] name = "ckb-verification-traits" -version = "0.111.0-pre" +version = "0.112.0-pre" dependencies = [ "bitflags", "ckb-error", diff --git a/Cargo.toml b/Cargo.toml index 584f9cc985..4852aeae94 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -11,11 +11,11 @@ repository = "https://github.com/nervosnetwork/ckb" rust-version = "1.67.1" [build-dependencies] -ckb-build-info = { path = "util/build-info", version = "= 0.111.0-pre" } +ckb-build-info = { path = "util/build-info", version = "= 0.112.0-pre" } [dependencies] -ckb-build-info = { path = "util/build-info", version = "= 0.111.0-pre" } -ckb-bin = { path = "ckb-bin", version = "= 0.111.0-pre" } +ckb-build-info = { path = "util/build-info", version = "= 0.112.0-pre" } +ckb-bin = { path = "ckb-bin", version = "= 0.112.0-pre" } [dev-dependencies] diff --git a/README.md b/README.md index ae9c4a5133..d18a547c55 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # [Nervos CKB](https://www.nervos.org/) - The Common Knowledge Base -[![Version](https://img.shields.io/badge/version-0.111.0--pre-orange.svg)](https://github.com/nervosnetwork/ckb/releases) +[![Version](https://img.shields.io/badge/version-0.112.0--pre-orange.svg)](https://github.com/nervosnetwork/ckb/releases) [![Nervos Talk](https://img.shields.io/badge/discuss-on%20Nervos%20Talk-3CC68A.svg)](https://talk.nervos.org/t/where-to-discuss-ckb-and-how-to-ask-for-support/6024) master | develop diff --git a/benches/Cargo.toml b/benches/Cargo.toml index f0d85d1b76..7e36e114f7 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-benches" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -13,26 +13,26 @@ repository = "https://github.com/nervosnetwork/ckb" [dev-dependencies] criterion = "0.4" -ckb-chain = { path = "../chain", version = "= 0.111.0-pre" } -ckb-types = { path = "../util/types", version = "= 0.111.0-pre" } -ckb-shared = { path = "../shared", version = "= 0.111.0-pre" } -ckb-store = { path = "../store", version = "= 0.111.0-pre" } -ckb-chain-spec = { path = "../spec", version = "= 0.111.0-pre" } +ckb-chain = { path = "../chain", version = "= 0.112.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.112.0-pre" } +ckb-shared = { path = "../shared", version = "= 0.112.0-pre" } +ckb-store = { path = "../store", version = "= 0.112.0-pre" } +ckb-chain-spec = { path = "../spec", version = "= 0.112.0-pre" } rand = "0.7" -ckb-hash = {path = "../util/hash", version = "= 0.111.0-pre"} -ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.111.0-pre" } -ckb-dao-utils = { path = "../util/dao/utils", version = "= 0.111.0-pre" } -ckb-dao = { path = "../util/dao", version = "= 0.111.0-pre" } +ckb-hash = {path = "../util/hash", version = "= 0.112.0-pre"} +ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.112.0-pre" } +ckb-dao-utils = { path = "../util/dao/utils", version = "= 0.112.0-pre" } +ckb-dao = { path = "../util/dao", version = "= 0.112.0-pre" } ckb-system-scripts = { version = "= 0.5.4" } lazy_static = "1.3.0" -ckb-crypto = { path = "../util/crypto", version = "= 0.111.0-pre" } -ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.111.0-pre" } -ckb-verification = { path = "../verification", version = "= 0.111.0-pre" } -ckb-verification-traits = { path = "../verification/traits", version = "= 0.111.0-pre" } -ckb-app-config = { path = "../util/app-config", version = "= 0.111.0-pre" } -ckb-resource = { path = "../resource", version = "= 0.111.0-pre" } -ckb-network = { path = "../network", version = "= 0.111.0-pre" } -ckb-launcher = { path = "../util/launcher", version = "= 0.111.0-pre" } +ckb-crypto = { path = "../util/crypto", version = "= 0.112.0-pre" } +ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.112.0-pre" } +ckb-verification = { path = "../verification", version = "= 0.112.0-pre" } +ckb-verification-traits = { path = "../verification/traits", version = "= 0.112.0-pre" } +ckb-app-config = { path = "../util/app-config", version = "= 0.112.0-pre" } +ckb-resource = { path = "../resource", version = "= 0.112.0-pre" } +ckb-network = { path = "../network", version = "= 0.112.0-pre" } +ckb-launcher = { path = "../util/launcher", version = "= 0.112.0-pre" } tempfile.workspace = true [[bench]] diff --git a/block-filter/Cargo.toml b/block-filter/Cargo.toml index 1ff663848f..4b202db1d9 100644 --- a/block-filter/Cargo.toml +++ b/block-filter/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-block-filter" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -11,9 +11,9 @@ repository = "https://github.com/nervosnetwork/ckb" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -ckb-store = { path = "../store", version = "= 0.111.0-pre" } -ckb-shared = { path = "../shared", version = "= 0.111.0-pre" } -ckb-logger = { path = "../util/logger", version = "= 0.111.0-pre" } -ckb-types = { path = "../util/types", version = "= 0.111.0-pre" } -ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.111.0-pre" } -ckb-async-runtime = { path = "../util/runtime", version = "= 0.111.0-pre" } +ckb-store = { path = "../store", version = "= 0.112.0-pre" } +ckb-shared = { path = "../shared", version = "= 0.112.0-pre" } +ckb-logger = { path = "../util/logger", version = "= 0.112.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.112.0-pre" } +ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.112.0-pre" } +ckb-async-runtime = { path = "../util/runtime", version = "= 0.112.0-pre" } diff --git a/chain/Cargo.toml b/chain/Cargo.toml index 18623e6437..9279f5b5ee 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-chain" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -9,37 +9,37 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-logger = { path = "../util/logger", version = "= 0.111.0-pre" } -ckb-metrics = { path = "../util/metrics", version = "= 0.111.0-pre" } -ckb-types = { path = "../util/types", version = "= 0.111.0-pre" } -ckb-shared = { path = "../shared", version = "= 0.111.0-pre" } -ckb-chain-spec = { path = "../spec", version = "= 0.111.0-pre" } -ckb-store = { path = "../store", version = "= 0.111.0-pre" } -ckb-verification = { path = "../verification", version = "= 0.111.0-pre" } -ckb-verification-contextual = { path = "../verification/contextual", version = "= 0.111.0-pre" } -ckb-verification-traits = { path = "../verification/traits", version = "= 0.111.0-pre" } -ckb-systemtime = { path = "../util/systemtime", version = "= 0.111.0-pre" } -ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.111.0-pre" } -ckb-dao = { path = "../util/dao", version = "= 0.111.0-pre" } -ckb-proposal-table = { path = "../util/proposal-table", version = "= 0.111.0-pre" } -ckb-error = { path = "../error", version = "= 0.111.0-pre" } -ckb-app-config = { path = "../util/app-config", version = "= 0.111.0-pre" } -ckb-rust-unstable-port = { path = "../util/rust-unstable-port", version = "= 0.111.0-pre" } -ckb-channel = { path = "../util/channel", version = "= 0.111.0-pre" } +ckb-logger = { path = "../util/logger", version = "= 0.112.0-pre" } +ckb-metrics = { path = "../util/metrics", version = "= 0.112.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.112.0-pre" } +ckb-shared = { path = "../shared", version = "= 0.112.0-pre" } +ckb-chain-spec = { path = "../spec", version = "= 0.112.0-pre" } +ckb-store = { path = "../store", version = "= 0.112.0-pre" } +ckb-verification = { path = "../verification", version = "= 0.112.0-pre" } +ckb-verification-contextual = { path = "../verification/contextual", version = "= 0.112.0-pre" } +ckb-verification-traits = { path = "../verification/traits", version = "= 0.112.0-pre" } +ckb-systemtime = { path = "../util/systemtime", version = "= 0.112.0-pre" } +ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.112.0-pre" } +ckb-dao = { path = "../util/dao", version = "= 0.112.0-pre" } +ckb-proposal-table = { path = "../util/proposal-table", version = "= 0.112.0-pre" } +ckb-error = { path = "../error", version = "= 0.112.0-pre" } +ckb-app-config = { path = "../util/app-config", version = "= 0.112.0-pre" } +ckb-rust-unstable-port = { path = "../util/rust-unstable-port", version = "= 0.112.0-pre" } +ckb-channel = { path = "../util/channel", version = "= 0.112.0-pre" } faux = { version = "^0.1", optional = true } ckb-merkle-mountain-range = "0.5.2" [dev-dependencies] -ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.111.0-pre" } -ckb-dao-utils = { path = "../util/dao/utils", version = "= 0.111.0-pre" } -ckb-reward-calculator = { path = "../util/reward-calculator", version = "= 0.111.0-pre" } -ckb-tx-pool = { path = "../tx-pool", version = "= 0.111.0-pre", features = ["internal"] } -ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.111.0-pre" } -ckb-network = { path = "../network", version = "= 0.111.0-pre" } -ckb-launcher = { path = "../util/launcher", version = "= 0.111.0-pre" } +ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.112.0-pre" } +ckb-dao-utils = { path = "../util/dao/utils", version = "= 0.112.0-pre" } +ckb-reward-calculator = { path = "../util/reward-calculator", version = "= 0.112.0-pre" } +ckb-tx-pool = { path = "../tx-pool", version = "= 0.112.0-pre", features = ["internal"] } +ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.112.0-pre" } +ckb-network = { path = "../network", version = "= 0.112.0-pre" } +ckb-launcher = { path = "../util/launcher", version = "= 0.112.0-pre" } lazy_static = "1.4" tempfile.workspace = true -ckb-systemtime = { path = "../util/systemtime", version = "= 0.111.0-pre" ,features = ["enable_faketime"]} +ckb-systemtime = { path = "../util/systemtime", version = "= 0.112.0-pre" ,features = ["enable_faketime"]} [features] default = [] diff --git a/ckb-bin/Cargo.toml b/ckb-bin/Cargo.toml index 3e77ace54c..bdd04dd5be 100644 --- a/ckb-bin/Cargo.toml +++ b/ckb-bin/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-bin" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -14,29 +14,29 @@ serde = { version = "1.0", features = ["derive"] } serde_json = { version = "1.0" } serde_plain = "0.3.0" toml = "0.5" -ckb-app-config = { path = "../util/app-config", version = "= 0.111.0-pre" } -ckb-logger = { path = "../util/logger", version = "= 0.111.0-pre" } -ckb-logger-service = { path = "../util/logger-service", version = "= 0.111.0-pre" } -ckb-metrics-service = { path = "../util/metrics-service", version = "= 0.111.0-pre" } -ckb-util = { path = "../util", version = "= 0.111.0-pre" } -ckb-types = { path = "../util/types", version = "= 0.111.0-pre" } -ckb-channel = { path = "../util/channel", version = "= 0.111.0-pre" } -ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.111.0-pre" } -ckb-chain = { path = "../chain", version = "= 0.111.0-pre" } -ckb-shared = { path = "../shared", version = "= 0.111.0-pre" } -ckb-store = { path = "../store", version = "= 0.111.0-pre" } -ckb-chain-spec = {path = "../spec", version = "= 0.111.0-pre"} -ckb-miner = { path = "../miner", version = "= 0.111.0-pre" } -ckb-network = { path = "../network", version = "= 0.111.0-pre"} -ckb-resource = { path = "../resource", version = "= 0.111.0-pre"} +ckb-app-config = { path = "../util/app-config", version = "= 0.112.0-pre" } +ckb-logger = { path = "../util/logger", version = "= 0.112.0-pre" } +ckb-logger-service = { path = "../util/logger-service", version = "= 0.112.0-pre" } +ckb-metrics-service = { path = "../util/metrics-service", version = "= 0.112.0-pre" } +ckb-util = { path = "../util", version = "= 0.112.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.112.0-pre" } +ckb-channel = { path = "../util/channel", version = "= 0.112.0-pre" } +ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.112.0-pre" } +ckb-chain = { path = "../chain", version = "= 0.112.0-pre" } +ckb-shared = { path = "../shared", version = "= 0.112.0-pre" } +ckb-store = { path = "../store", version = "= 0.112.0-pre" } +ckb-chain-spec = {path = "../spec", version = "= 0.112.0-pre"} +ckb-miner = { path = "../miner", version = "= 0.112.0-pre" } +ckb-network = { path = "../network", version = "= 0.112.0-pre"} +ckb-resource = { path = "../resource", version = "= 0.112.0-pre"} ctrlc = { version = "3.1", features = ["termination"] } -ckb-instrument = { path = "../util/instrument", version = "= 0.111.0-pre", features = ["progress_bar"] } -ckb-build-info = { path = "../util/build-info", version = "= 0.111.0-pre" } -ckb-memory-tracker = { path = "../util/memory-tracker", version = "= 0.111.0-pre" } -ckb-chain-iter = { path = "../util/chain-iter", version = "= 0.111.0-pre" } -ckb-verification-traits = { path = "../verification/traits", version = "= 0.111.0-pre" } -ckb-async-runtime = { path = "../util/runtime", version = "= 0.111.0-pre" } -ckb-launcher = { path = "../util/launcher", version = "= 0.111.0-pre" } +ckb-instrument = { path = "../util/instrument", version = "= 0.112.0-pre", features = ["progress_bar"] } +ckb-build-info = { path = "../util/build-info", version = "= 0.112.0-pre" } +ckb-memory-tracker = { path = "../util/memory-tracker", version = "= 0.112.0-pre" } +ckb-chain-iter = { path = "../util/chain-iter", version = "= 0.112.0-pre" } +ckb-verification-traits = { path = "../verification/traits", version = "= 0.112.0-pre" } +ckb-async-runtime = { path = "../util/runtime", version = "= 0.112.0-pre" } +ckb-launcher = { path = "../util/launcher", version = "= 0.112.0-pre" } base64 = "0.21.0" tempfile.workspace = true rayon = "1.0" diff --git a/db-migration/Cargo.toml b/db-migration/Cargo.toml index c686fc81e0..b675d90e46 100644 --- a/db-migration/Cargo.toml +++ b/db-migration/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-db-migration" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -11,16 +11,16 @@ repository = "https://github.com/nervosnetwork/ckb" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -ckb-db = { path = "../db", version = "= 0.111.0-pre" } -ckb-logger = { path = "../util/logger", version = "= 0.111.0-pre" } -ckb-error = { path = "../error", version = "= 0.111.0-pre" } -ckb-db-schema = { path = "../db-schema", version = "= 0.111.0-pre" } +ckb-db = { path = "../db", version = "= 0.112.0-pre" } +ckb-logger = { path = "../util/logger", version = "= 0.112.0-pre" } +ckb-error = { path = "../error", version = "= 0.112.0-pre" } +ckb-db-schema = { path = "../db-schema", version = "= 0.112.0-pre" } indicatif = "0.16" console = ">=0.9.1, <1.0.0" [dev-dependencies] tempfile.workspace = true -ckb-app-config = { path = "../util/app-config", version = "= 0.111.0-pre" } +ckb-app-config = { path = "../util/app-config", version = "= 0.112.0-pre" } [features] portable = ["ckb-db/portable"] diff --git a/db-schema/Cargo.toml b/db-schema/Cargo.toml index eac2706dff..cea290e2cd 100644 --- a/db-schema/Cargo.toml +++ b/db-schema/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-db-schema" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" diff --git a/db/Cargo.toml b/db/Cargo.toml index fcd6b90ba3..d711b115fa 100644 --- a/db/Cargo.toml +++ b/db/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-db" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -9,12 +9,12 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-app-config = { path = "../util/app-config", version = "= 0.111.0-pre" } -ckb-logger = { path = "../util/logger", version = "= 0.111.0-pre" } -ckb-error = { path = "../error", version = "= 0.111.0-pre" } +ckb-app-config = { path = "../util/app-config", version = "= 0.112.0-pre" } +ckb-logger = { path = "../util/logger", version = "= 0.112.0-pre" } +ckb-error = { path = "../error", version = "= 0.112.0-pre" } libc = "0.2" rocksdb = { package = "ckb-rocksdb", version ="=0.20.0", features = ["snappy"], default-features = false } -ckb-db-schema = { path = "../db-schema", version = "= 0.111.0-pre" } +ckb-db-schema = { path = "../db-schema", version = "= 0.112.0-pre" } [dev-dependencies] tempfile.workspace = true diff --git a/error/Cargo.toml b/error/Cargo.toml index b8b662f464..fd55ac5b82 100644 --- a/error/Cargo.toml +++ b/error/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-error" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -11,5 +11,5 @@ repository = "https://github.com/nervosnetwork/ckb" [dependencies] thiserror = "1.0.22" anyhow = "1.0.34" -ckb-occupied-capacity = { path = "../util/occupied-capacity", version = "= 0.111.0-pre" } +ckb-occupied-capacity = { path = "../util/occupied-capacity", version = "= 0.112.0-pre" } derive_more = { version = "0.99.0", default-features = false, features = ["display"] } diff --git a/freezer/Cargo.toml b/freezer/Cargo.toml index 151f731671..8c9d5c2f4b 100644 --- a/freezer/Cargo.toml +++ b/freezer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-freezer" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -10,11 +10,11 @@ repository = "https://github.com/nervosnetwork/ckb" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -ckb-types = { path = "../util/types", version = "= 0.111.0-pre" } -ckb-error = { path = "../error", version = "= 0.111.0-pre" } -ckb-logger = { path = "../util/logger", version = "= 0.111.0-pre" } -ckb-util = { path = "../util", version = "= 0.111.0-pre" } -ckb-metrics = { path = "../util/metrics", version = "= 0.111.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.112.0-pre" } +ckb-error = { path = "../error", version = "= 0.112.0-pre" } +ckb-logger = { path = "../util/logger", version = "= 0.112.0-pre" } +ckb-util = { path = "../util", version = "= 0.112.0-pre" } +ckb-metrics = { path = "../util/metrics", version = "= 0.112.0-pre" } fs2 = "0.4.3" fail = "0.4" snap = "1" diff --git a/miner/Cargo.toml b/miner/Cargo.toml index ee1d121767..09848c1db3 100644 --- a/miner/Cargo.toml +++ b/miner/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-miner" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -9,23 +9,23 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-logger = { path = "../util/logger", version = "= 0.111.0-pre" } -ckb-app-config = { path = "../util/app-config", version = "= 0.111.0-pre" } -ckb-types = { path = "../util/types", version = "= 0.111.0-pre" } -ckb-channel = { path = "../util/channel", version = "= 0.111.0-pre" } -ckb-hash = { path = "../util/hash", version = "= 0.111.0-pre" } -ckb-pow = { path = "../pow", version = "= 0.111.0-pre" } +ckb-logger = { path = "../util/logger", version = "= 0.112.0-pre" } +ckb-app-config = { path = "../util/app-config", version = "= 0.112.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.112.0-pre" } +ckb-channel = { path = "../util/channel", version = "= 0.112.0-pre" } +ckb-hash = { path = "../util/hash", version = "= 0.112.0-pre" } +ckb-pow = { path = "../pow", version = "= 0.112.0-pre" } rand = "0.7" rand_distr = "0.3" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" -ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.111.0-pre" } +ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.112.0-pre" } hyper = { version = "0.14", features = ["client", "http2", "http1", "server"] } hyper-tls = "0.5" futures = "0.3" lru = "0.7.1" -ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.111.0-pre" } -ckb-async-runtime = { path = "../util/runtime", version = "= 0.111.0-pre" } +ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.112.0-pre" } +ckb-async-runtime = { path = "../util/runtime", version = "= 0.112.0-pre" } indicatif = "0.16" console = ">=0.9.1, <1.0.0" eaglesong = "0.1" diff --git a/network/Cargo.toml b/network/Cargo.toml index d4e5ff8ce2..881efb687b 100644 --- a/network/Cargo.toml +++ b/network/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-network" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -11,28 +11,28 @@ repository = "https://github.com/nervosnetwork/ckb" [dependencies] rand = "0.7" serde = { version = "1.0", features = ["derive"] } -ckb-util = { path = "../util", version = "= 0.111.0-pre" } -ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.111.0-pre" } -ckb-logger = { path = "../util/logger", version = "= 0.111.0-pre" } -ckb-app-config = { path = "../util/app-config", version = "= 0.111.0-pre" } -ckb-metrics = {path = "../util/metrics", version = "= 0.111.0-pre"} +ckb-util = { path = "../util", version = "= 0.112.0-pre" } +ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.112.0-pre" } +ckb-logger = { path = "../util/logger", version = "= 0.112.0-pre" } +ckb-app-config = { path = "../util/app-config", version = "= 0.112.0-pre" } +ckb-metrics = {path = "../util/metrics", version = "= 0.112.0-pre"} tokio = { version = "1", features = ["sync", "macros"] } tokio-util = { version = "0.7", features = ["codec"] } futures = "0.3" -ckb-systemtime = {path = "../util/systemtime", version = "= 0.111.0-pre"} +ckb-systemtime = {path = "../util/systemtime", version = "= 0.112.0-pre"} lazy_static = { version = "1.3.0", optional = true } bs58 = { version = "0.4.0", optional = true } sentry = { version = "0.26.0", optional = true } faster-hex = { version = "0.6", optional = true } -ckb-hash = {path = "../util/hash", version = "= 0.111.0-pre"} +ckb-hash = {path = "../util/hash", version = "= 0.112.0-pre"} secp256k1 = {version = "0.24", features = ["recovery"], optional = true } trust-dns-resolver = { version = "0.20", optional = true } snap = "1" -ckb-types = { path = "../util/types", version = "= 0.111.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.112.0-pre" } ipnetwork = "0.18" serde_json = "1.0" bloom-filters = "0.1" -ckb-spawn = { path = "../util/spawn", version = "= 0.111.0-pre" } +ckb-spawn = { path = "../util/spawn", version = "= 0.112.0-pre" } socket2 = "0.4" bitflags = "1.0" @@ -48,7 +48,7 @@ criterion = "0.4" proptest = "1.0" num_cpus = "1.10" once_cell = "1.8.0" -ckb-systemtime = {path = "../util/systemtime", version = "= 0.111.0-pre", features = ["enable_faketime"]} +ckb-systemtime = {path = "../util/systemtime", version = "= 0.112.0-pre", features = ["enable_faketime"]} [[bench]] name = "peer_store" diff --git a/notify/Cargo.toml b/notify/Cargo.toml index 75cf5b129d..40c429e255 100644 --- a/notify/Cargo.toml +++ b/notify/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-notify" -version = "0.111.0-pre" +version = "0.112.0-pre" authors = ["Nervos Core Dev "] edition = "2021" license = "MIT" @@ -9,11 +9,11 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-logger = { path = "../util/logger", version = "= 0.111.0-pre" } -ckb-app-config = { path = "../util/app-config", version = "= 0.111.0-pre" } -ckb-types = { path = "../util/types", version = "= 0.111.0-pre" } -ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.111.0-pre" } -ckb-async-runtime = { path = "../util/runtime", version = "= 0.111.0-pre" } +ckb-logger = { path = "../util/logger", version = "= 0.112.0-pre" } +ckb-app-config = { path = "../util/app-config", version = "= 0.112.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.112.0-pre" } +ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.112.0-pre" } +ckb-async-runtime = { path = "../util/runtime", version = "= 0.112.0-pre" } tokio = { version = "1", features = ["sync"] } [dev-dependencies] diff --git a/pow/Cargo.toml b/pow/Cargo.toml index fe51fd47f8..38c9d16f7d 100644 --- a/pow/Cargo.toml +++ b/pow/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-pow" -version = "0.111.0-pre" +version = "0.112.0-pre" authors = ["Nervos Core Dev "] edition = "2021" license = "MIT" @@ -10,8 +10,8 @@ repository = "https://github.com/nervosnetwork/ckb" [dependencies] byteorder = "1.3.1" -ckb-types = { path = "../util/types", version = "= 0.111.0-pre" } -ckb-hash = { path = "../util/hash", version = "= 0.111.0-pre"} +ckb-types = { path = "../util/types", version = "= 0.112.0-pre" } +ckb-hash = { path = "../util/hash", version = "= 0.112.0-pre"} serde = { version = "1.0", features = ["derive"] } eaglesong = "0.1" log = "0.4" diff --git a/resource/Cargo.toml b/resource/Cargo.toml index a4541c7c9f..561d2179de 100644 --- a/resource/Cargo.toml +++ b/resource/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-resource" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -13,13 +13,13 @@ repository = "https://github.com/nervosnetwork/ckb" phf = "0.8.0" includedir = "0.6.0" serde = { version = "1.0", features = ["derive"] } -ckb-types = { path = "../util/types", version = "= 0.111.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.112.0-pre" } ckb-system-scripts = { version = "= 0.5.4" } [build-dependencies] includedir_codegen = "0.6.0" walkdir = "2.1.4" -ckb-types = { path = "../util/types", version = "= 0.111.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.112.0-pre" } ckb-system-scripts = { version = "= 0.5.4" } [dev-dependencies] diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 090546b567..80a8c51fb9 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-rpc" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -9,19 +9,19 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-chain-spec = { path = "../spec", version = "= 0.111.0-pre" } -ckb-types = { path = "../util/types", version = "= 0.111.0-pre" } -ckb-network = { path = "../network", version = "= 0.111.0-pre" } -ckb-notify = { path = "../notify", version = "= 0.111.0-pre" } -ckb-shared = { path = "../shared", version = "= 0.111.0-pre" } -ckb-store = { path = "../store", version = "= 0.111.0-pre" } -ckb-sync = { path = "../sync", version = "= 0.111.0-pre" } -ckb-chain = { path = "../chain", version = "= 0.111.0-pre" } -ckb-logger = { path = "../util/logger", version = "= 0.111.0-pre"} -ckb-logger-service = { path = "../util/logger-service", version = "= 0.111.0-pre"} -ckb-network-alert = { path = "../util/network-alert", version = "= 0.111.0-pre" } -ckb-app-config = { path = "../util/app-config", version = "= 0.111.0-pre" } -ckb-constant = { path = "../util/constant", version = "= 0.111.0-pre" } +ckb-chain-spec = { path = "../spec", version = "= 0.112.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.112.0-pre" } +ckb-network = { path = "../network", version = "= 0.112.0-pre" } +ckb-notify = { path = "../notify", version = "= 0.112.0-pre" } +ckb-shared = { path = "../shared", version = "= 0.112.0-pre" } +ckb-store = { path = "../store", version = "= 0.112.0-pre" } +ckb-sync = { path = "../sync", version = "= 0.112.0-pre" } +ckb-chain = { path = "../chain", version = "= 0.112.0-pre" } +ckb-logger = { path = "../util/logger", version = "= 0.112.0-pre"} +ckb-logger-service = { path = "../util/logger-service", version = "= 0.112.0-pre"} +ckb-network-alert = { path = "../util/network-alert", version = "= 0.112.0-pre" } +ckb-app-config = { path = "../util/app-config", version = "= 0.112.0-pre" } +ckb-constant = { path = "../util/constant", version = "= 0.112.0-pre" } jsonrpc-core = "18.0" jsonrpc-derive = "18.0" jsonrpc-http-server = "18.0" @@ -30,27 +30,27 @@ jsonrpc-ws-server = "18.0" jsonrpc-server-utils = "18.0" jsonrpc-pubsub = "18.0" serde_json = "1.0" -ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.111.0-pre" } -ckb-verification = { path = "../verification", version = "= 0.111.0-pre" } -ckb-verification-traits = { path = "../verification/traits", version = "= 0.111.0-pre" } -ckb-traits = { path = "../traits", version = "= 0.111.0-pre" } -ckb-util = { path = "../util", version = "= 0.111.0-pre" } -ckb-systemtime = { path = "../util/systemtime", version = "= 0.111.0-pre" } -ckb-dao = { path = "../util/dao", version = "= 0.111.0-pre" } -ckb-error = { path = "../error", version = "= 0.111.0-pre" } -ckb-reward-calculator = { path = "../util/reward-calculator", version = "= 0.111.0-pre" } -ckb-tx-pool = { path = "../tx-pool", version = "= 0.111.0-pre" } -ckb-memory-tracker = { path = "../util/memory-tracker", version = "= 0.111.0-pre" } -ckb-pow = { path = "../pow", version = "= 0.111.0-pre" } -ckb-indexer = { path = "../util/indexer", version = "= 0.111.0-pre" } +ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.112.0-pre" } +ckb-verification = { path = "../verification", version = "= 0.112.0-pre" } +ckb-verification-traits = { path = "../verification/traits", version = "= 0.112.0-pre" } +ckb-traits = { path = "../traits", version = "= 0.112.0-pre" } +ckb-util = { path = "../util", version = "= 0.112.0-pre" } +ckb-systemtime = { path = "../util/systemtime", version = "= 0.112.0-pre" } +ckb-dao = { path = "../util/dao", version = "= 0.112.0-pre" } +ckb-error = { path = "../error", version = "= 0.112.0-pre" } +ckb-reward-calculator = { path = "../util/reward-calculator", version = "= 0.112.0-pre" } +ckb-tx-pool = { path = "../tx-pool", version = "= 0.112.0-pre" } +ckb-memory-tracker = { path = "../util/memory-tracker", version = "= 0.112.0-pre" } +ckb-pow = { path = "../pow", version = "= 0.112.0-pre" } +ckb-indexer = { path = "../util/indexer", version = "= 0.112.0-pre" } itertools = "0.10.5" tokio = "1" [dev-dependencies] reqwest = { version = "0.11.4", features = ["blocking", "json"] } serde = { version = "1.0", features = ["derive"] } -ckb-launcher = { path = "../util/launcher", version = "= 0.111.0-pre" } -ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.111.0-pre" } +ckb-launcher = { path = "../util/launcher", version = "= 0.112.0-pre" } +ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.112.0-pre" } tempfile.workspace = true pretty_assertions = "1.3.0" -ckb-dao-utils = { path = "../util/dao/utils", version = "= 0.111.0-pre" } +ckb-dao-utils = { path = "../util/dao/utils", version = "= 0.112.0-pre" } diff --git a/script/Cargo.toml b/script/Cargo.toml index 12b04a23ea..cf90b09038 100644 --- a/script/Cargo.toml +++ b/script/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-script" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -18,24 +18,24 @@ logging = ["ckb-logger"] flatmemory = [] [dependencies] -ckb-traits = { path = "../traits", version = "= 0.111.0-pre" } +ckb-traits = { path = "../traits", version = "= 0.112.0-pre" } byteorder = "1.3.1" -ckb-types = { path = "../util/types", version = "= 0.111.0-pre" } -ckb-hash = { path = "../util/hash", version = "= 0.111.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.112.0-pre" } +ckb-hash = { path = "../util/hash", version = "= 0.112.0-pre" } ckb-vm = { version = "=0.24.0", default-features = false } faster-hex = "0.6" -ckb-logger = { path = "../util/logger", version = "= 0.111.0-pre", optional = true } +ckb-logger = { path = "../util/logger", version = "= 0.112.0-pre", optional = true } serde = { version = "1.0", features = ["derive"] } -ckb-error = { path = "../error", version = "= 0.111.0-pre" } -ckb-chain-spec = { path = "../spec", version = "= 0.111.0-pre" } +ckb-error = { path = "../error", version = "= 0.112.0-pre" } +ckb-chain-spec = { path = "../spec", version = "= 0.112.0-pre" } [dev-dependencies] proptest = "1.0" -ckb-db = { path = "../db", version = "= 0.111.0-pre" } -ckb-store = { path = "../store", version = "= 0.111.0-pre" } -ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.111.0-pre" } +ckb-db = { path = "../db", version = "= 0.112.0-pre" } +ckb-store = { path = "../store", version = "= 0.112.0-pre" } +ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.112.0-pre" } tiny-keccak = { version = "2.0", features = ["sha3"] } -ckb-crypto = { path = "../util/crypto", version = "= 0.111.0-pre" } -ckb-db-schema = { path = "../db-schema", version = "= 0.111.0-pre" } +ckb-crypto = { path = "../util/crypto", version = "= 0.112.0-pre" } +ckb-db-schema = { path = "../db-schema", version = "= 0.112.0-pre" } tempfile.workspace = true rand = "0.8.4" diff --git a/script/fuzz/Cargo.toml b/script/fuzz/Cargo.toml index c3638bf632..88b5401626 100644 --- a/script/fuzz/Cargo.toml +++ b/script/fuzz/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-script-fuzz" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -15,10 +15,10 @@ cargo-fuzz = true [dependencies] arbitrary = { version = "1", features = ["derive"] } libfuzzer-sys = { version="0.4.0", features=["arbitrary-derive"] } -ckb-traits = { path = "../../traits", version = "= 0.111.0-pre" } -ckb-types = { path = "../../util/types", version = "= 0.111.0-pre" } -ckb-script = { path = "../../script", version = "= 0.111.0-pre" } -ckb-chain-spec = { path = "../../spec", version = "= 0.111.0-pre" } +ckb-traits = { path = "../../traits", version = "= 0.112.0-pre" } +ckb-types = { path = "../../util/types", version = "= 0.112.0-pre" } +ckb-script = { path = "../../script", version = "= 0.112.0-pre" } +ckb-chain-spec = { path = "../../spec", version = "= 0.112.0-pre" } # Prevent this from interfering with workspaces [workspace] diff --git a/shared/Cargo.toml b/shared/Cargo.toml index c71b704888..84a780771b 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-shared" -version = "0.111.0-pre" +version = "0.112.0-pre" authors = ["Nervos Core Dev "] edition = "2021" license = "MIT" @@ -9,27 +9,27 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-types = { path = "../util/types", version = "= 0.111.0-pre" } -ckb-chain-spec = { path = "../spec", version = "= 0.111.0-pre" } -ckb-store = { path = "../store", version = "= 0.111.0-pre" } -ckb-db = { path = "../db", version = "= 0.111.0-pre" } -ckb-proposal-table = { path = "../util/proposal-table", version = "= 0.111.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.112.0-pre" } +ckb-chain-spec = { path = "../spec", version = "= 0.112.0-pre" } +ckb-store = { path = "../store", version = "= 0.112.0-pre" } +ckb-db = { path = "../db", version = "= 0.112.0-pre" } +ckb-proposal-table = { path = "../util/proposal-table", version = "= 0.112.0-pre" } arc-swap = "1.3" -ckb-error = { path = "../error", version = "= 0.111.0-pre" } -ckb-snapshot = { path = "../util/snapshot", version = "= 0.111.0-pre" } -ckb-tx-pool = { path = "../tx-pool", version = "= 0.111.0-pre" } -ckb-verification = { path = "../verification", version = "= 0.111.0-pre" } -ckb-notify = { path = "../notify", version = "= 0.111.0-pre" } -ckb-logger = { path = "../util/logger", version = "= 0.111.0-pre" } -ckb-db-schema = { path = "../db-schema", version = "= 0.111.0-pre" } -ckb-async-runtime = { path = "../util/runtime", version = "= 0.111.0-pre" } -ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.111.0-pre" } -ckb-channel = { path = "../util/channel", version = "= 0.111.0-pre" } -ckb-constant = { path = "../util/constant", version = "= 0.111.0-pre" } -ckb-systemtime = { path = "../util/systemtime", version = "= 0.111.0-pre" } +ckb-error = { path = "../error", version = "= 0.112.0-pre" } +ckb-snapshot = { path = "../util/snapshot", version = "= 0.112.0-pre" } +ckb-tx-pool = { path = "../tx-pool", version = "= 0.112.0-pre" } +ckb-verification = { path = "../verification", version = "= 0.112.0-pre" } +ckb-notify = { path = "../notify", version = "= 0.112.0-pre" } +ckb-logger = { path = "../util/logger", version = "= 0.112.0-pre" } +ckb-db-schema = { path = "../db-schema", version = "= 0.112.0-pre" } +ckb-async-runtime = { path = "../util/runtime", version = "= 0.112.0-pre" } +ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.112.0-pre" } +ckb-channel = { path = "../util/channel", version = "= 0.112.0-pre" } +ckb-constant = { path = "../util/constant", version = "= 0.112.0-pre" } +ckb-systemtime = { path = "../util/systemtime", version = "= 0.112.0-pre" } [dev-dependencies] -ckb-systemtime = { path = "../util/systemtime", version = "= 0.111.0-pre", features = ["enable_faketime"] } +ckb-systemtime = { path = "../util/systemtime", version = "= 0.112.0-pre", features = ["enable_faketime"] } [features] portable = ["ckb-db/portable", "ckb-store/portable", "ckb-tx-pool/portable"] diff --git a/spec/Cargo.toml b/spec/Cargo.toml index bd9a7a7f15..2f2c4a1b93 100644 --- a/spec/Cargo.toml +++ b/spec/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-chain-spec" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -11,15 +11,15 @@ repository = "https://github.com/nervosnetwork/ckb" [dependencies] serde = { version = "1.0", features = ["derive"] } toml = "0.5" -ckb-constant = { path = "../util/constant", version = "= 0.111.0-pre" } -ckb-types = { path = "../util/types", version = "= 0.111.0-pre" } -ckb-pow = { path = "../pow", version = "= 0.111.0-pre" } -ckb-resource = { path = "../resource", version = "= 0.111.0-pre" } -ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.111.0-pre" } -ckb-dao-utils = { path = "../util/dao/utils", version = "= 0.111.0-pre" } -ckb-rational = { path = "../util/rational", version = "= 0.111.0-pre" } -ckb-crypto = { path = "../util/crypto", version = "= 0.111.0-pre"} -ckb-hash = { path = "../util/hash", version = "= 0.111.0-pre"} -ckb-error = { path = "../error", version = "= 0.111.0-pre" } -ckb-traits = { path = "../traits", version = "= 0.111.0-pre" } -ckb-util = { path = "../util", version = "= 0.111.0-pre" } +ckb-constant = { path = "../util/constant", version = "= 0.112.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.112.0-pre" } +ckb-pow = { path = "../pow", version = "= 0.112.0-pre" } +ckb-resource = { path = "../resource", version = "= 0.112.0-pre" } +ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.112.0-pre" } +ckb-dao-utils = { path = "../util/dao/utils", version = "= 0.112.0-pre" } +ckb-rational = { path = "../util/rational", version = "= 0.112.0-pre" } +ckb-crypto = { path = "../util/crypto", version = "= 0.112.0-pre"} +ckb-hash = { path = "../util/hash", version = "= 0.112.0-pre"} +ckb-error = { path = "../error", version = "= 0.112.0-pre" } +ckb-traits = { path = "../traits", version = "= 0.112.0-pre" } +ckb-util = { path = "../util", version = "= 0.112.0-pre" } diff --git a/store/Cargo.toml b/store/Cargo.toml index 43f86fba35..c97e4c1369 100644 --- a/store/Cargo.toml +++ b/store/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-store" -version = "0.111.0-pre" +version = "0.112.0-pre" authors = ["Nervos Core Dev "] edition = "2021" license = "MIT" @@ -9,16 +9,16 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-types = { path = "../util/types", version = "= 0.111.0-pre" } -ckb-db = { path = "../db", version = "= 0.111.0-pre" } -ckb-chain-spec = { path = "../spec", version = "= 0.111.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.112.0-pre" } +ckb-db = { path = "../db", version = "= 0.112.0-pre" } +ckb-chain-spec = { path = "../spec", version = "= 0.112.0-pre" } lru = "0.7.1" -ckb-traits = { path = "../traits", version = "= 0.111.0-pre" } -ckb-util = { path = "../util", version = "= 0.111.0-pre" } -ckb-error = { path = "../error", version = "= 0.111.0-pre" } -ckb-app-config = { path = "../util/app-config", version = "= 0.111.0-pre" } -ckb-db-schema = { path = "../db-schema", version = "= 0.111.0-pre" } -ckb-freezer = { path = "../freezer", version = "= 0.111.0-pre" } +ckb-traits = { path = "../traits", version = "= 0.112.0-pre" } +ckb-util = { path = "../util", version = "= 0.112.0-pre" } +ckb-error = { path = "../error", version = "= 0.112.0-pre" } +ckb-app-config = { path = "../util/app-config", version = "= 0.112.0-pre" } +ckb-db-schema = { path = "../db-schema", version = "= 0.112.0-pre" } +ckb-freezer = { path = "../freezer", version = "= 0.112.0-pre" } ckb-merkle-mountain-range = "0.5.2" [dev-dependencies] diff --git a/sync/Cargo.toml b/sync/Cargo.toml index 34ae04f800..2906f518d4 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-sync" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -9,48 +9,48 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-chain = { path = "../chain", version = "= 0.111.0-pre" } -ckb-shared = { path = "../shared", version = "= 0.111.0-pre" } -ckb-store = { path = "../store", version = "= 0.111.0-pre" } -ckb-app-config = {path = "../util/app-config", version = "= 0.111.0-pre"} -ckb-types = {path = "../util/types", version = "= 0.111.0-pre"} -ckb-network = { path = "../network", version = "= 0.111.0-pre" } -ckb-logger = {path = "../util/logger", version = "= 0.111.0-pre"} -ckb-metrics = {path = "../util/metrics", version = "= 0.111.0-pre"} -ckb-util = { path = "../util", version = "= 0.111.0-pre" } -ckb-verification = { path = "../verification", version = "= 0.111.0-pre" } -ckb-verification-traits = { path = "../verification/traits", version = "= 0.111.0-pre" } -ckb-chain-spec = { path = "../spec", version = "= 0.111.0-pre" } -ckb-channel = { path = "../util/channel", version = "= 0.111.0-pre" } -ckb-traits = { path = "../traits", version = "= 0.111.0-pre" } -ckb-error = {path = "../error", version = "= 0.111.0-pre"} -ckb-tx-pool = { path = "../tx-pool", version = "= 0.111.0-pre" } +ckb-chain = { path = "../chain", version = "= 0.112.0-pre" } +ckb-shared = { path = "../shared", version = "= 0.112.0-pre" } +ckb-store = { path = "../store", version = "= 0.112.0-pre" } +ckb-app-config = {path = "../util/app-config", version = "= 0.112.0-pre"} +ckb-types = {path = "../util/types", version = "= 0.112.0-pre"} +ckb-network = { path = "../network", version = "= 0.112.0-pre" } +ckb-logger = {path = "../util/logger", version = "= 0.112.0-pre"} +ckb-metrics = {path = "../util/metrics", version = "= 0.112.0-pre"} +ckb-util = { path = "../util", version = "= 0.112.0-pre" } +ckb-verification = { path = "../verification", version = "= 0.112.0-pre" } +ckb-verification-traits = { path = "../verification/traits", version = "= 0.112.0-pre" } +ckb-chain-spec = { path = "../spec", version = "= 0.112.0-pre" } +ckb-channel = { path = "../util/channel", version = "= 0.112.0-pre" } +ckb-traits = { path = "../traits", version = "= 0.112.0-pre" } +ckb-error = {path = "../error", version = "= 0.112.0-pre"} +ckb-tx-pool = { path = "../tx-pool", version = "= 0.112.0-pre" } sentry = { version = "0.26.0", optional = true } -ckb-constant = { path = "../util/constant", version = "= 0.111.0-pre" } -ckb-async-runtime = { path = "../util/runtime", version = "= 0.111.0-pre" } -ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.111.0-pre" } +ckb-constant = { path = "../util/constant", version = "= 0.112.0-pre" } +ckb-async-runtime = { path = "../util/runtime", version = "= 0.112.0-pre" } +ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.112.0-pre" } tokio = { version = "1", features = ["sync"] } lru = "0.7.1" futures = "0.3" governor = "0.3.1" tempfile.workspace = true -ckb-systemtime = { path = "../util/systemtime", version = "= 0.111.0-pre" } +ckb-systemtime = { path = "../util/systemtime", version = "= 0.112.0-pre" } bitflags = "1.0" dashmap = "4.0" keyed_priority_queue = "0.3" sled = "0.34.7" [dev-dependencies] -ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.111.0-pre" } +ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.112.0-pre" } rand = "0.7" -ckb-dao = { path = "../util/dao", version = "= 0.111.0-pre" } -ckb-dao-utils = { path = "../util/dao/utils", version = "= 0.111.0-pre" } -ckb-reward-calculator = { path = "../util/reward-calculator", version = "= 0.111.0-pre" } -ckb-chain = { path = "../chain", version = "= 0.111.0-pre", features = ["mock"] } -ckb-launcher = { path = "../util/launcher", version = "= 0.111.0-pre" } +ckb-dao = { path = "../util/dao", version = "= 0.112.0-pre" } +ckb-dao-utils = { path = "../util/dao/utils", version = "= 0.112.0-pre" } +ckb-reward-calculator = { path = "../util/reward-calculator", version = "= 0.112.0-pre" } +ckb-chain = { path = "../chain", version = "= 0.112.0-pre", features = ["mock"] } +ckb-launcher = { path = "../util/launcher", version = "= 0.112.0-pre" } faux = "^0.1" once_cell = "1.8.0" -ckb-systemtime = { path = "../util/systemtime", version = "= 0.111.0-pre" , features = ["enable_faketime"]} +ckb-systemtime = { path = "../util/systemtime", version = "= 0.112.0-pre" , features = ["enable_faketime"]} [features] default = [] diff --git a/test/Cargo.toml b/test/Cargo.toml index d40e665dd5..856c11332f 100644 --- a/test/Cargo.toml +++ b/test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-test" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -11,28 +11,28 @@ repository = "https://github.com/nervosnetwork/ckb" [dependencies] clap = { version = "=3.2.21" } toml = "0.5.0" -ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.111.0-pre" } -ckb-app-config = { path = "../util/app-config", version = "= 0.111.0-pre" } -ckb-network = { path = "../network", version = "= 0.111.0-pre" } -ckb-channel = { path = "../util/channel", version = "= 0.111.0-pre" } -ckb-types = { path = "../util/types", version = "= 0.111.0-pre" } -ckb-hash = { path = "../util/hash", version = "= 0.111.0-pre" } -ckb-util = { path = "../util", version = "= 0.111.0-pre" } -ckb-chain-spec = { path = "../spec", version = "= 0.111.0-pre" } -ckb-crypto = { path = "../util/crypto", version = "= 0.111.0-pre" } -ckb-dao-utils = { path = "../util/dao/utils", version = "= 0.111.0-pre" } -ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.111.0-pre" } -ckb-resource = { path = "../resource", version = "= 0.111.0-pre" } -ckb-async-runtime = { path = "../util/runtime", version = "= 0.111.0-pre" } -ckb-logger = { path = "../util/logger", version = "= 0.111.0-pre" } -ckb-logger-config = { path = "../util/logger-config", version = "= 0.111.0-pre" } -ckb-logger-service = { path = "../util/logger-service", version = "= 0.111.0-pre" } -ckb-error = { path = "../error", version = "= 0.111.0-pre" } -ckb-constant = { path = "../util/constant", version = "= 0.111.0-pre" } +ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.112.0-pre" } +ckb-app-config = { path = "../util/app-config", version = "= 0.112.0-pre" } +ckb-network = { path = "../network", version = "= 0.112.0-pre" } +ckb-channel = { path = "../util/channel", version = "= 0.112.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.112.0-pre" } +ckb-hash = { path = "../util/hash", version = "= 0.112.0-pre" } +ckb-util = { path = "../util", version = "= 0.112.0-pre" } +ckb-chain-spec = { path = "../spec", version = "= 0.112.0-pre" } +ckb-crypto = { path = "../util/crypto", version = "= 0.112.0-pre" } +ckb-dao-utils = { path = "../util/dao/utils", version = "= 0.112.0-pre" } +ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.112.0-pre" } +ckb-resource = { path = "../resource", version = "= 0.112.0-pre" } +ckb-async-runtime = { path = "../util/runtime", version = "= 0.112.0-pre" } +ckb-logger = { path = "../util/logger", version = "= 0.112.0-pre" } +ckb-logger-config = { path = "../util/logger-config", version = "= 0.112.0-pre" } +ckb-logger-service = { path = "../util/logger-service", version = "= 0.112.0-pre" } +ckb-error = { path = "../error", version = "= 0.112.0-pre" } +ckb-constant = { path = "../util/constant", version = "= 0.112.0-pre" } tempfile = "3" reqwest = { version = "0.11.4", features = ["blocking", "json"] } rand = "0.7" -ckb-systemtime = { path = "../util/systemtime", version = "= 0.111.0-pre" } +ckb-systemtime = { path = "../util/systemtime", version = "= 0.112.0-pre" } serde_json = "1.0" lazy_static = "1.4.0" byteorder = "1.3.1" diff --git a/traits/Cargo.toml b/traits/Cargo.toml index 018a23aef2..a16a3326b1 100644 --- a/traits/Cargo.toml +++ b/traits/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-traits" -version = "0.111.0-pre" +version = "0.112.0-pre" authors = ["Nervos Core Dev "] edition = "2021" license = "MIT" @@ -9,4 +9,4 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-types = { path = "../util/types", version = "= 0.111.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.112.0-pre" } diff --git a/tx-pool/Cargo.toml b/tx-pool/Cargo.toml index 32b4f48cf8..5fd3bf0503 100644 --- a/tx-pool/Cargo.toml +++ b/tx-pool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-tx-pool" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -11,27 +11,27 @@ repository = "https://github.com/nervosnetwork/ckb" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -ckb-types = { path = "../util/types", version = "= 0.111.0-pre" } -ckb-logger = {path = "../util/logger", version = "= 0.111.0-pre"} -ckb-verification = { path = "../verification", version = "= 0.111.0-pre" } -ckb-systemtime = {path = "../util/systemtime", version = "= 0.111.0-pre"} +ckb-types = { path = "../util/types", version = "= 0.112.0-pre" } +ckb-logger = {path = "../util/logger", version = "= 0.112.0-pre"} +ckb-verification = { path = "../verification", version = "= 0.112.0-pre" } +ckb-systemtime = {path = "../util/systemtime", version = "= 0.112.0-pre"} lru = "0.7.1" -ckb-dao = { path = "../util/dao", version = "= 0.111.0-pre" } -ckb-reward-calculator = { path = "../util/reward-calculator", version = "= 0.111.0-pre" } -ckb-store = { path = "../store", version = "= 0.111.0-pre" } -ckb-util = { path = "../util", version = "= 0.111.0-pre" } -ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.111.0-pre" } -ckb-chain-spec = { path = "../spec", version = "= 0.111.0-pre" } -ckb-snapshot = { path = "../util/snapshot", version = "= 0.111.0-pre" } -ckb-error = { path = "../error", version = "= 0.111.0-pre" } +ckb-dao = { path = "../util/dao", version = "= 0.112.0-pre" } +ckb-reward-calculator = { path = "../util/reward-calculator", version = "= 0.112.0-pre" } +ckb-store = { path = "../store", version = "= 0.112.0-pre" } +ckb-util = { path = "../util", version = "= 0.112.0-pre" } +ckb-jsonrpc-types = { path = "../util/jsonrpc-types", version = "= 0.112.0-pre" } +ckb-chain-spec = { path = "../spec", version = "= 0.112.0-pre" } +ckb-snapshot = { path = "../util/snapshot", version = "= 0.112.0-pre" } +ckb-error = { path = "../error", version = "= 0.112.0-pre" } tokio = { version = "1", features = ["sync", "process"] } -ckb-async-runtime = { path = "../util/runtime", version = "= 0.111.0-pre" } -ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.111.0-pre" } -ckb-app-config = { path = "../util/app-config", version = "= 0.111.0-pre" } -ckb-network = { path = "../network", version = "= 0.111.0-pre" } -ckb-channel = { path = "../util/channel", version = "= 0.111.0-pre" } -ckb-traits = { path = "../traits", version = "= 0.111.0-pre" } -ckb-db = { path = "../db", version = "= 0.111.0-pre" } +ckb-async-runtime = { path = "../util/runtime", version = "= 0.112.0-pre" } +ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.112.0-pre" } +ckb-app-config = { path = "../util/app-config", version = "= 0.112.0-pre" } +ckb-network = { path = "../network", version = "= 0.112.0-pre" } +ckb-channel = { path = "../util/channel", version = "= 0.112.0-pre" } +ckb-traits = { path = "../traits", version = "= 0.112.0-pre" } +ckb-db = { path = "../db", version = "= 0.112.0-pre" } sentry = { version = "0.26.0", optional = true } serde_json = "1.0" rand = "0.8.4" @@ -42,8 +42,8 @@ rustc-hash = "1.1" [dev-dependencies] tempfile.workspace = true -ckb-hash = { path = "../util/hash", version = "= 0.111.0-pre" } -ckb-systemtime = {path = "../util/systemtime", version = "= 0.111.0-pre", features = ["enable_faketime"]} +ckb-hash = { path = "../util/hash", version = "= 0.112.0-pre" } +ckb-systemtime = {path = "../util/systemtime", version = "= 0.112.0-pre", features = ["enable_faketime"]} [features] default = [] diff --git a/util/Cargo.toml b/util/Cargo.toml index 42ab50184b..b85d39ec8a 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-util" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -15,7 +15,7 @@ regex = "1.1.6" once_cell = "1.8.0" [dev-dependencies] -ckb-fixed-hash = { path = "fixed-hash", version = "= 0.111.0-pre" } +ckb-fixed-hash = { path = "fixed-hash", version = "= 0.112.0-pre" } [features] deadlock_detection = ["parking_lot/deadlock_detection"] diff --git a/util/app-config/Cargo.toml b/util/app-config/Cargo.toml index b02c1d84e9..d0804265b8 100644 --- a/util/app-config/Cargo.toml +++ b/util/app-config/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-app-config" -version = "0.111.0-pre" +version = "0.112.0-pre" authors = ["Nervos Core Dev "] edition = "2021" license = "MIT" @@ -15,20 +15,20 @@ serde_plain = "0.3.0" serde_json = "1.0" toml = "0.5" path-clean = "0.1.0" -ckb-logger = { path = "../../util/logger", version = "= 0.111.0-pre" } -ckb-logger-config = { path = "../../util/logger-config", version = "= 0.111.0-pre" } -ckb-metrics-config = { path = "../../util/metrics-config", version = "= 0.111.0-pre" } -ckb-chain-spec = {path = "../../spec", version = "= 0.111.0-pre"} -ckb-jsonrpc-types = {path = "../jsonrpc-types", version = "= 0.111.0-pre"} -ckb-pow = { path = "../../pow", version = "= 0.111.0-pre" } -ckb-resource = { path = "../../resource", version = "= 0.111.0-pre"} -ckb-build-info = { path = "../build-info", version = "= 0.111.0-pre" } -ckb-types = { path = "../types", version = "= 0.111.0-pre" } +ckb-logger = { path = "../../util/logger", version = "= 0.112.0-pre" } +ckb-logger-config = { path = "../../util/logger-config", version = "= 0.112.0-pre" } +ckb-metrics-config = { path = "../../util/metrics-config", version = "= 0.112.0-pre" } +ckb-chain-spec = {path = "../../spec", version = "= 0.112.0-pre"} +ckb-jsonrpc-types = {path = "../jsonrpc-types", version = "= 0.112.0-pre"} +ckb-pow = { path = "../../pow", version = "= 0.112.0-pre" } +ckb-resource = { path = "../../resource", version = "= 0.112.0-pre"} +ckb-build-info = { path = "../build-info", version = "= 0.112.0-pre" } +ckb-types = { path = "../types", version = "= 0.112.0-pre" } secio = { version="0.5.7", package="tentacle-secio" } multiaddr = { version="0.3.0", package="tentacle-multiaddr" } rand = "0.7" sentry = { version = "0.26.0", optional = true } -ckb-systemtime = { path = "../systemtime", version = "= 0.111.0-pre" } +ckb-systemtime = { path = "../systemtime", version = "= 0.112.0-pre" } url = { version = "2.2.2", features = ["serde"] } ubyte = { version = "0.10", features = ["serde"] } @@ -37,4 +37,4 @@ with_sentry = ["sentry"] [dev-dependencies] tempfile.workspace = true -ckb-systemtime = { path = "../systemtime", version = "= 0.111.0-pre" ,features = ["enable_faketime"]} +ckb-systemtime = { path = "../systemtime", version = "= 0.112.0-pre" ,features = ["enable_faketime"]} diff --git a/util/build-info/Cargo.toml b/util/build-info/Cargo.toml index 51d71c1e3d..083cd5e18f 100644 --- a/util/build-info/Cargo.toml +++ b/util/build-info/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-build-info" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" diff --git a/util/chain-iter/Cargo.toml b/util/chain-iter/Cargo.toml index e317cc2dab..5925d95eba 100644 --- a/util/chain-iter/Cargo.toml +++ b/util/chain-iter/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-chain-iter" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -11,5 +11,5 @@ repository = "https://github.com/nervosnetwork/ckb" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -ckb-types = { path = "../types", version = "= 0.111.0-pre" } -ckb-store = { path = "../../store", version = "= 0.111.0-pre" } +ckb-types = { path = "../types", version = "= 0.112.0-pre" } +ckb-store = { path = "../../store", version = "= 0.112.0-pre" } diff --git a/util/channel/Cargo.toml b/util/channel/Cargo.toml index b255eed33e..79653b6dd8 100644 --- a/util/channel/Cargo.toml +++ b/util/channel/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-channel" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" diff --git a/util/constant/Cargo.toml b/util/constant/Cargo.toml index e47cf05087..5031752283 100644 --- a/util/constant/Cargo.toml +++ b/util/constant/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-constant" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" diff --git a/util/crypto/Cargo.toml b/util/crypto/Cargo.toml index 7172e29590..1395ca265a 100644 --- a/util/crypto/Cargo.toml +++ b/util/crypto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-crypto" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -9,7 +9,7 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-fixed-hash = { path = "../fixed-hash", version = "= 0.111.0-pre" } +ckb-fixed-hash = { path = "../fixed-hash", version = "= 0.112.0-pre" } lazy_static = "1.3" secp256k1 = { version = "0.24", features = ["recovery"], optional = true } thiserror = "1.0.22" diff --git a/util/dao/Cargo.toml b/util/dao/Cargo.toml index 08556417c7..2436f2e691 100644 --- a/util/dao/Cargo.toml +++ b/util/dao/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-dao" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -10,13 +10,13 @@ repository = "https://github.com/nervosnetwork/ckb" [dependencies] byteorder = "1.3.1" -ckb-types = { path = "../types", version = "= 0.111.0-pre" } -ckb-chain-spec = { path = "../../spec", version = "= 0.111.0-pre" } -ckb-dao-utils = { path = "./utils", version = "= 0.111.0-pre" } -ckb-traits = { path = "../../traits", version = "= 0.111.0-pre" } +ckb-types = { path = "../types", version = "= 0.112.0-pre" } +ckb-chain-spec = { path = "../../spec", version = "= 0.112.0-pre" } +ckb-dao-utils = { path = "./utils", version = "= 0.112.0-pre" } +ckb-traits = { path = "../../traits", version = "= 0.112.0-pre" } [dev-dependencies] -ckb-db = { path = "../../db", version = "= 0.111.0-pre" } -ckb-db-schema = { path = "../../db-schema", version = "= 0.111.0-pre" } -ckb-store = { path = "../../store", version = "= 0.111.0-pre" } +ckb-db = { path = "../../db", version = "= 0.112.0-pre" } +ckb-db-schema = { path = "../../db-schema", version = "= 0.112.0-pre" } +ckb-store = { path = "../../store", version = "= 0.112.0-pre" } tempfile.workspace = true diff --git a/util/dao/utils/Cargo.toml b/util/dao/utils/Cargo.toml index 363afddffb..47ba4dee97 100644 --- a/util/dao/utils/Cargo.toml +++ b/util/dao/utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-dao-utils" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -10,5 +10,5 @@ repository = "https://github.com/nervosnetwork/ckb" [dependencies] byteorder = "1.3.1" -ckb-types = { path = "../../types", version = "= 0.111.0-pre" } -ckb-error = { path = "../../../error", version = "= 0.111.0-pre" } +ckb-types = { path = "../../types", version = "= 0.112.0-pre" } +ckb-error = { path = "../../../error", version = "= 0.112.0-pre" } diff --git a/util/fixed-hash/Cargo.toml b/util/fixed-hash/Cargo.toml index 136ea77db9..17c1fe169b 100644 --- a/util/fixed-hash/Cargo.toml +++ b/util/fixed-hash/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-fixed-hash" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos "] edition = "2021" @@ -9,5 +9,5 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-fixed-hash-core = { path = "core", version = "= 0.111.0-pre" } -ckb-fixed-hash-macros = { path = "macros", version = "= 0.111.0-pre" } +ckb-fixed-hash-core = { path = "core", version = "= 0.112.0-pre" } +ckb-fixed-hash-macros = { path = "macros", version = "= 0.112.0-pre" } diff --git a/util/fixed-hash/core/Cargo.toml b/util/fixed-hash/core/Cargo.toml index 3e791b62a7..5f35705213 100644 --- a/util/fixed-hash/core/Cargo.toml +++ b/util/fixed-hash/core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-fixed-hash-core" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos "] edition = "2021" diff --git a/util/fixed-hash/macros/Cargo.toml b/util/fixed-hash/macros/Cargo.toml index 3fe103b9f3..6dcc2a1cbd 100644 --- a/util/fixed-hash/macros/Cargo.toml +++ b/util/fixed-hash/macros/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-fixed-hash-macros" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos "] edition = "2021" @@ -12,7 +12,7 @@ repository = "https://github.com/nervosnetwork/ckb" proc-macro = true [dependencies] -ckb-fixed-hash-core = { path = "../core", version = "= 0.111.0-pre" } +ckb-fixed-hash-core = { path = "../core", version = "= 0.112.0-pre" } quote = "1.0" syn = "1.0" proc-macro2 = "1.0" diff --git a/util/hash/Cargo.toml b/util/hash/Cargo.toml index 247800e185..d1af80c423 100644 --- a/util/hash/Cargo.toml +++ b/util/hash/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-hash" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" diff --git a/util/indexer/Cargo.toml b/util/indexer/Cargo.toml index c069d88d4f..50e080fa2f 100644 --- a/util/indexer/Cargo.toml +++ b/util/indexer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-indexer" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -13,15 +13,15 @@ repository = "https://github.com/nervosnetwork/ckb" [dependencies] thiserror = "1.0" rocksdb = { package = "ckb-rocksdb", version ="=0.20.0", features = ["snappy"], default-features = false } -ckb-db-schema = { path = "../../db-schema", version = "= 0.111.0-pre" } -ckb-types = { path = "../types", version = "= 0.111.0-pre" } -ckb-jsonrpc-types = { path = "../jsonrpc-types", version = "= 0.111.0-pre" } -ckb-logger = { path = "../logger", version = "= 0.111.0-pre" } -ckb-app-config = { path = "../app-config", version = "= 0.111.0-pre" } -ckb-notify = { path = "../../notify", version = "= 0.111.0-pre" } -ckb-store = { path = "../../store", version = "= 0.111.0-pre" } -ckb-stop-handler = { path = "../stop-handler", version = "= 0.111.0-pre" } -ckb-async-runtime = { path = "../runtime", version = "= 0.111.0-pre" } +ckb-db-schema = { path = "../../db-schema", version = "= 0.112.0-pre" } +ckb-types = { path = "../types", version = "= 0.112.0-pre" } +ckb-jsonrpc-types = { path = "../jsonrpc-types", version = "= 0.112.0-pre" } +ckb-logger = { path = "../logger", version = "= 0.112.0-pre" } +ckb-app-config = { path = "../app-config", version = "= 0.112.0-pre" } +ckb-notify = { path = "../../notify", version = "= 0.112.0-pre" } +ckb-store = { path = "../../store", version = "= 0.112.0-pre" } +ckb-stop-handler = { path = "../stop-handler", version = "= 0.112.0-pre" } +ckb-async-runtime = { path = "../runtime", version = "= 0.112.0-pre" } rhai = { version = "1.10.0", features = ["no_function", "no_float", "no_module", "sync"]} serde_json = "1.0" numext-fixed-uint = "0.1" diff --git a/util/instrument/Cargo.toml b/util/instrument/Cargo.toml index aad5ef1844..445561e7ab 100644 --- a/util/instrument/Cargo.toml +++ b/util/instrument/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-instrument" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -9,11 +9,11 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-types = { path = "../types", version = "= 0.111.0-pre" } -ckb-chain = { path = "../../chain", version = "= 0.111.0-pre" } -ckb-chain-iter = { path = "../chain-iter", version = "= 0.111.0-pre" } -ckb-shared = { path = "../../shared", version = "= 0.111.0-pre" } -ckb-jsonrpc-types = { path = "../jsonrpc-types", version = "= 0.111.0-pre" } +ckb-types = { path = "../types", version = "= 0.112.0-pre" } +ckb-chain = { path = "../../chain", version = "= 0.112.0-pre" } +ckb-chain-iter = { path = "../chain-iter", version = "= 0.112.0-pre" } +ckb-shared = { path = "../../shared", version = "= 0.112.0-pre" } +ckb-jsonrpc-types = { path = "../jsonrpc-types", version = "= 0.112.0-pre" } serde_json = "1.0" indicatif = { version = "0.16", optional = true } diff --git a/util/jsonrpc-types/Cargo.toml b/util/jsonrpc-types/Cargo.toml index 8610a5386a..216eb95da6 100644 --- a/util/jsonrpc-types/Cargo.toml +++ b/util/jsonrpc-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-jsonrpc-types" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -9,7 +9,7 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-types = { path = "../types", version = "= 0.111.0-pre" } +ckb-types = { path = "../types", version = "= 0.112.0-pre" } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" faster-hex = "0.6" diff --git a/util/launcher/Cargo.toml b/util/launcher/Cargo.toml index 8484e4ede7..35aa2a90e3 100644 --- a/util/launcher/Cargo.toml +++ b/util/launcher/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-launcher" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -11,44 +11,44 @@ repository = "https://github.com/nervosnetwork/ckb" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -ckb-types = { path = "../types", version = "= 0.111.0-pre" } -ckb-store = { path = "../../store", version = "= 0.111.0-pre" } -ckb-db = { path = "../../db", version = "= 0.111.0-pre" } -ckb-migration-template = { path = "migration-template", version = "= 0.111.0-pre" } -ckb-app-config = { path = "../app-config", version = "= 0.111.0-pre" } -ckb-db-migration = { path = "../../db-migration", version = "= 0.111.0-pre" } -ckb-logger = { path = "../logger", version = "= 0.111.0-pre" } -ckb-db-schema = { path = "../../db-schema", version = "= 0.111.0-pre" } -ckb-error = { path = "../../error", version = "= 0.111.0-pre" } -ckb-build-info = { path = "../build-info", version = "= 0.111.0-pre" } -ckb-jsonrpc-types = { path = "../jsonrpc-types", version = "= 0.111.0-pre" } -ckb-chain = { path = "../../chain", version = "= 0.111.0-pre" } -ckb-shared = { path = "../../shared", version = "= 0.111.0-pre" } -ckb-network = { path = "../../network", version = "= 0.111.0-pre"} -ckb-rpc = { path = "../../rpc", version = "= 0.111.0-pre"} -ckb-resource = { path = "../../resource", version = "= 0.111.0-pre"} -ckb-network-alert = { path = "../network-alert", version = "= 0.111.0-pre" } -ckb-sync = { path = "../../sync", version = "= 0.111.0-pre"} -ckb-verification = { path = "../../verification", version = "= 0.111.0-pre" } -ckb-verification-traits = { path = "../../verification/traits", version = "= 0.111.0-pre" } -ckb-async-runtime = { path = "../runtime", version = "= 0.111.0-pre" } -ckb-proposal-table = { path = "../proposal-table", version = "= 0.111.0-pre" } -ckb-channel = { path = "../channel", version = "= 0.111.0-pre" } -ckb-chain-spec = { path = "../../spec", version = "= 0.111.0-pre" } -ckb-freezer = { path = "../../freezer", version = "= 0.111.0-pre" } -ckb-notify = { path = "../../notify", version = "= 0.111.0-pre" } -ckb-snapshot = { path = "../snapshot", version = "= 0.111.0-pre" } -ckb-tx-pool = { path = "../../tx-pool", version = "= 0.111.0-pre" } -ckb-stop-handler = { path = "../stop-handler", version = "= 0.111.0-pre" } -ckb-light-client-protocol-server = { path = "../light-client-protocol-server", version = "= 0.111.0-pre" } -ckb-block-filter = { path = "../../block-filter", version = "= 0.111.0-pre" } -ckb-hash = { path = "../hash", version = "= 0.111.0-pre" } +ckb-types = { path = "../types", version = "= 0.112.0-pre" } +ckb-store = { path = "../../store", version = "= 0.112.0-pre" } +ckb-db = { path = "../../db", version = "= 0.112.0-pre" } +ckb-migration-template = { path = "migration-template", version = "= 0.112.0-pre" } +ckb-app-config = { path = "../app-config", version = "= 0.112.0-pre" } +ckb-db-migration = { path = "../../db-migration", version = "= 0.112.0-pre" } +ckb-logger = { path = "../logger", version = "= 0.112.0-pre" } +ckb-db-schema = { path = "../../db-schema", version = "= 0.112.0-pre" } +ckb-error = { path = "../../error", version = "= 0.112.0-pre" } +ckb-build-info = { path = "../build-info", version = "= 0.112.0-pre" } +ckb-jsonrpc-types = { path = "../jsonrpc-types", version = "= 0.112.0-pre" } +ckb-chain = { path = "../../chain", version = "= 0.112.0-pre" } +ckb-shared = { path = "../../shared", version = "= 0.112.0-pre" } +ckb-network = { path = "../../network", version = "= 0.112.0-pre"} +ckb-rpc = { path = "../../rpc", version = "= 0.112.0-pre"} +ckb-resource = { path = "../../resource", version = "= 0.112.0-pre"} +ckb-network-alert = { path = "../network-alert", version = "= 0.112.0-pre" } +ckb-sync = { path = "../../sync", version = "= 0.112.0-pre"} +ckb-verification = { path = "../../verification", version = "= 0.112.0-pre" } +ckb-verification-traits = { path = "../../verification/traits", version = "= 0.112.0-pre" } +ckb-async-runtime = { path = "../runtime", version = "= 0.112.0-pre" } +ckb-proposal-table = { path = "../proposal-table", version = "= 0.112.0-pre" } +ckb-channel = { path = "../channel", version = "= 0.112.0-pre" } +ckb-chain-spec = { path = "../../spec", version = "= 0.112.0-pre" } +ckb-freezer = { path = "../../freezer", version = "= 0.112.0-pre" } +ckb-notify = { path = "../../notify", version = "= 0.112.0-pre" } +ckb-snapshot = { path = "../snapshot", version = "= 0.112.0-pre" } +ckb-tx-pool = { path = "../../tx-pool", version = "= 0.112.0-pre" } +ckb-stop-handler = { path = "../stop-handler", version = "= 0.112.0-pre" } +ckb-light-client-protocol-server = { path = "../light-client-protocol-server", version = "= 0.112.0-pre" } +ckb-block-filter = { path = "../../block-filter", version = "= 0.112.0-pre" } +ckb-hash = { path = "../hash", version = "= 0.112.0-pre" } num_cpus = "1.10" once_cell = "1.8.0" tempfile.workspace = true [dev-dependencies] -ckb-systemtime = {path = "../systemtime", version = "= 0.111.0-pre", features = ["enable_faketime"] } +ckb-systemtime = {path = "../systemtime", version = "= 0.112.0-pre", features = ["enable_faketime"] } [features] with_sentry = [ "ckb-sync/with_sentry", "ckb-network/with_sentry", "ckb-app-config/with_sentry" ] diff --git a/util/launcher/migration-template/Cargo.toml b/util/launcher/migration-template/Cargo.toml index 1c500acce6..e187197f04 100644 --- a/util/launcher/migration-template/Cargo.toml +++ b/util/launcher/migration-template/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-migration-template" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos "] edition = "2021" diff --git a/util/light-client-protocol-server/Cargo.toml b/util/light-client-protocol-server/Cargo.toml index b9b6550fd0..4fc16f00d5 100644 --- a/util/light-client-protocol-server/Cargo.toml +++ b/util/light-client-protocol-server/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-light-client-protocol-server" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -9,23 +9,23 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-network = { path = "../../network", version = "= 0.111.0-pre" } -ckb-shared = { path = "../../shared", version = "= 0.111.0-pre" } -ckb-logger = { path = "../logger", version = "= 0.111.0-pre" } -ckb-types = { path = "../types", version = "= 0.111.0-pre" } -ckb-store = { path = "../../store", version = "= 0.111.0-pre" } +ckb-network = { path = "../../network", version = "= 0.112.0-pre" } +ckb-shared = { path = "../../shared", version = "= 0.112.0-pre" } +ckb-logger = { path = "../logger", version = "= 0.112.0-pre" } +ckb-types = { path = "../types", version = "= 0.112.0-pre" } +ckb-store = { path = "../../store", version = "= 0.112.0-pre" } ckb-merkle-mountain-range = "0.5.2" -ckb-systemtime = {path = "../systemtime", version = "= 0.111.0-pre"} +ckb-systemtime = {path = "../systemtime", version = "= 0.112.0-pre"} [dev-dependencies] -ckb-chain = { path = "../../chain", version = "= 0.111.0-pre" } -ckb-chain-spec = { path = "../../spec", version = "= 0.111.0-pre" } -ckb-tx-pool = { path = "../../tx-pool", version = "= 0.111.0-pre" } -ckb-launcher = { path = "../launcher", version = "= 0.111.0-pre" } -ckb-app-config = { path = "../app-config", version = "= 0.111.0-pre" } -ckb-jsonrpc-types = { path = "../jsonrpc-types", version = "= 0.111.0-pre" } -ckb-dao-utils = { path = "../dao/utils", version = "= 0.111.0-pre" } -ckb-test-chain-utils = { path = "../test-chain-utils", version = "= 0.111.0-pre" } +ckb-chain = { path = "../../chain", version = "= 0.112.0-pre" } +ckb-chain-spec = { path = "../../spec", version = "= 0.112.0-pre" } +ckb-tx-pool = { path = "../../tx-pool", version = "= 0.112.0-pre" } +ckb-launcher = { path = "../launcher", version = "= 0.112.0-pre" } +ckb-app-config = { path = "../app-config", version = "= 0.112.0-pre" } +ckb-jsonrpc-types = { path = "../jsonrpc-types", version = "= 0.112.0-pre" } +ckb-dao-utils = { path = "../dao/utils", version = "= 0.112.0-pre" } +ckb-test-chain-utils = { path = "../test-chain-utils", version = "= 0.112.0-pre" } tempfile.workspace = true -ckb-systemtime = {path = "../systemtime", version = "= 0.111.0-pre", features = ["enable_faketime"]} +ckb-systemtime = {path = "../systemtime", version = "= 0.112.0-pre", features = ["enable_faketime"]} tokio = "1.20" diff --git a/util/logger-config/Cargo.toml b/util/logger-config/Cargo.toml index f029e002ec..8308bcf351 100644 --- a/util/logger-config/Cargo.toml +++ b/util/logger-config/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-logger-config" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos "] edition = "2021" diff --git a/util/logger-service/Cargo.toml b/util/logger-service/Cargo.toml index e0a865da97..7adde82cef 100644 --- a/util/logger-service/Cargo.toml +++ b/util/logger-service/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-logger-service" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos "] edition = "2021" @@ -9,9 +9,9 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-util = { path = "..", version = "= 0.111.0-pre" } -ckb-logger-config = { path = "../logger-config", version = "= 0.111.0-pre" } -ckb-channel = { path = "../channel", version = "= 0.111.0-pre" } +ckb-util = { path = "..", version = "= 0.112.0-pre" } +ckb-logger-config = { path = "../logger-config", version = "= 0.112.0-pre" } +ckb-channel = { path = "../channel", version = "= 0.112.0-pre" } yansi = "0.5" log = "0.4" env_logger = "0.6" @@ -22,7 +22,7 @@ sentry = { version = "0.26.0", optional = true, features = ["log"] } time = { version = "0.3.11", features = ["formatting"] } [dev-dependencies] -ckb-logger = { path = "../logger", version = "= 0.111.0-pre" } +ckb-logger = { path = "../logger", version = "= 0.112.0-pre" } tempfile.workspace = true [features] diff --git a/util/logger/Cargo.toml b/util/logger/Cargo.toml index e507880194..f93bcfeb88 100644 --- a/util/logger/Cargo.toml +++ b/util/logger/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-logger" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos "] edition = "2021" diff --git a/util/memory-tracker/Cargo.toml b/util/memory-tracker/Cargo.toml index c92d4f3e28..041452ece7 100644 --- a/util/memory-tracker/Cargo.toml +++ b/util/memory-tracker/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-memory-tracker" -version = "0.111.0-pre" +version = "0.112.0-pre" authors = ["Nervos Core Dev "] edition = "2021" license = "MIT" @@ -9,9 +9,9 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-logger = { path = "../logger", version = "= 0.111.0-pre" } -ckb-metrics = { path = "../metrics", version = "= 0.111.0-pre" } -ckb-db = { path = "../../db", version = "= 0.111.0-pre" } +ckb-logger = { path = "../logger", version = "= 0.112.0-pre" } +ckb-metrics = { path = "../metrics", version = "= 0.112.0-pre" } +ckb-db = { path = "../../db", version = "= 0.112.0-pre" } [target.'cfg(all(not(target_env = "msvc"), not(target_os="macos")))'.dependencies] jemalloc-ctl = { package = "tikv-jemalloc-ctl", version = "0.5.0" } diff --git a/util/metrics-config/Cargo.toml b/util/metrics-config/Cargo.toml index 96bfc41046..1ac79386f1 100644 --- a/util/metrics-config/Cargo.toml +++ b/util/metrics-config/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-metrics-config" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos "] edition = "2021" diff --git a/util/metrics-service/Cargo.toml b/util/metrics-service/Cargo.toml index c13c9f835f..0bd9c7d9bd 100644 --- a/util/metrics-service/Cargo.toml +++ b/util/metrics-service/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-metrics-service" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos "] edition = "2021" @@ -9,10 +9,10 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-metrics-config = { path = "../metrics-config", version = "= 0.111.0-pre" } -ckb-metrics = { path = "../metrics", version = "= 0.111.0-pre" } -ckb-logger = { path = "../logger", version = "= 0.111.0-pre" } -ckb-async-runtime = { path = "../runtime", version = "= 0.111.0-pre" } -ckb-util = { path = "..", version = "= 0.111.0-pre" } +ckb-metrics-config = { path = "../metrics-config", version = "= 0.112.0-pre" } +ckb-metrics = { path = "../metrics", version = "= 0.112.0-pre" } +ckb-logger = { path = "../logger", version = "= 0.112.0-pre" } +ckb-async-runtime = { path = "../runtime", version = "= 0.112.0-pre" } +ckb-util = { path = "..", version = "= 0.112.0-pre" } prometheus = "0.13.3" hyper = { version = "0.14", features = ["http1", "tcp", "server"] } diff --git a/util/metrics/Cargo.toml b/util/metrics/Cargo.toml index c2b7fccdde..9206b728e0 100644 --- a/util/metrics/Cargo.toml +++ b/util/metrics/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-metrics" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos "] edition = "2021" diff --git a/util/multisig/Cargo.toml b/util/multisig/Cargo.toml index 104950d3b7..92a1e6ca7b 100644 --- a/util/multisig/Cargo.toml +++ b/util/multisig/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-multisig" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -9,9 +9,9 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-error = { path = "../../error", version = "= 0.111.0-pre" } -ckb-logger = { path = "../logger", version = "= 0.111.0-pre" } -ckb-crypto = { path = "../crypto", version = "= 0.111.0-pre" } +ckb-error = { path = "../../error", version = "= 0.112.0-pre" } +ckb-logger = { path = "../logger", version = "= 0.112.0-pre" } +ckb-crypto = { path = "../crypto", version = "= 0.112.0-pre" } [dev-dependencies] rand = "0.7" diff --git a/util/network-alert/Cargo.toml b/util/network-alert/Cargo.toml index 6468389bb8..035d01fc3b 100644 --- a/util/network-alert/Cargo.toml +++ b/util/network-alert/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-network-alert" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -9,24 +9,24 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-multisig = { path = "../multisig", version = "= 0.111.0-pre" } -ckb-types = { path = "../types", version = "= 0.111.0-pre" } -ckb-util = { path = "..", version = "= 0.111.0-pre" } -ckb-network = { path = "../../network", version = "= 0.111.0-pre" } -ckb-notify = { path = "../../notify", version = "= 0.111.0-pre"} -ckb-jsonrpc-types = { path = "../jsonrpc-types", version = "= 0.111.0-pre" } -ckb-logger = { path = "../logger", version = "= 0.111.0-pre"} -ckb-app-config = { path = "../app-config", version = "= 0.111.0-pre" } -ckb-error = { path = "../../error", version = "= 0.111.0-pre" } -ckb-systemtime = { path = "../systemtime", version = "= 0.111.0-pre" } +ckb-multisig = { path = "../multisig", version = "= 0.112.0-pre" } +ckb-types = { path = "../types", version = "= 0.112.0-pre" } +ckb-util = { path = "..", version = "= 0.112.0-pre" } +ckb-network = { path = "../../network", version = "= 0.112.0-pre" } +ckb-notify = { path = "../../notify", version = "= 0.112.0-pre"} +ckb-jsonrpc-types = { path = "../jsonrpc-types", version = "= 0.112.0-pre" } +ckb-logger = { path = "../logger", version = "= 0.112.0-pre"} +ckb-app-config = { path = "../app-config", version = "= 0.112.0-pre" } +ckb-error = { path = "../../error", version = "= 0.112.0-pre" } +ckb-systemtime = { path = "../systemtime", version = "= 0.112.0-pre" } lru = "0.7.1" semver = "1.0" [dev-dependencies] -ckb-crypto = { path = "../crypto", version = "= 0.111.0-pre" } -ckb-async-runtime = { path = "../runtime", version = "= 0.111.0-pre" } -ckb-stop-handler = { path = "../stop-handler", version = "= 0.111.0-pre" } +ckb-crypto = { path = "../crypto", version = "= 0.112.0-pre" } +ckb-async-runtime = { path = "../runtime", version = "= 0.112.0-pre" } +ckb-stop-handler = { path = "../stop-handler", version = "= 0.112.0-pre" } once_cell = "1.8.0" -ckb-systemtime = {path = "../systemtime", version = "= 0.111.0-pre", features = ["enable_faketime"]} +ckb-systemtime = {path = "../systemtime", version = "= 0.112.0-pre", features = ["enable_faketime"]} faster-hex = "0.6" serde_json = "1.0" diff --git a/util/occupied-capacity/Cargo.toml b/util/occupied-capacity/Cargo.toml index f8ba7fe7ee..a992292356 100644 --- a/util/occupied-capacity/Cargo.toml +++ b/util/occupied-capacity/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-occupied-capacity" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -9,5 +9,5 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-occupied-capacity-macros = { path = "macros", version = "= 0.111.0-pre" } -ckb-occupied-capacity-core = { path = "core", version = "= 0.111.0-pre" } +ckb-occupied-capacity-macros = { path = "macros", version = "= 0.112.0-pre" } +ckb-occupied-capacity-core = { path = "core", version = "= 0.112.0-pre" } diff --git a/util/occupied-capacity/core/Cargo.toml b/util/occupied-capacity/core/Cargo.toml index 667f441370..934b999633 100644 --- a/util/occupied-capacity/core/Cargo.toml +++ b/util/occupied-capacity/core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-occupied-capacity-core" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" diff --git a/util/occupied-capacity/macros/Cargo.toml b/util/occupied-capacity/macros/Cargo.toml index 5cc550b71a..9f714a9223 100644 --- a/util/occupied-capacity/macros/Cargo.toml +++ b/util/occupied-capacity/macros/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-occupied-capacity-macros" -version = "0.111.0-pre" +version = "0.112.0-pre" authors = ["Nervos Core Dev "] edition = "2021" license = "MIT" @@ -14,4 +14,4 @@ proc-macro = true [dependencies] quote = "1.0" syn = "1.0" -ckb-occupied-capacity-core = { path = "../core", version = "= 0.111.0-pre" } +ckb-occupied-capacity-core = { path = "../core", version = "= 0.112.0-pre" } diff --git a/util/proposal-table/Cargo.toml b/util/proposal-table/Cargo.toml index cc07051509..be173b7111 100644 --- a/util/proposal-table/Cargo.toml +++ b/util/proposal-table/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-proposal-table" -version = "0.111.0-pre" +version = "0.112.0-pre" authors = ["Nervos Core Dev "] edition = "2021" license = "MIT" @@ -10,6 +10,6 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-logger = { path = "../logger", version = "= 0.111.0-pre" } -ckb-chain-spec = { path = "../../spec", version = "= 0.111.0-pre" } -ckb-types = { path = "../types", version = "= 0.111.0-pre" } +ckb-logger = { path = "../logger", version = "= 0.112.0-pre" } +ckb-chain-spec = { path = "../../spec", version = "= 0.112.0-pre" } +ckb-types = { path = "../types", version = "= 0.112.0-pre" } diff --git a/util/rational/Cargo.toml b/util/rational/Cargo.toml index f786fa1428..d1f124ea57 100644 --- a/util/rational/Cargo.toml +++ b/util/rational/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-rational" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" diff --git a/util/reward-calculator/Cargo.toml b/util/reward-calculator/Cargo.toml index c5aba87fa2..242823cd55 100644 --- a/util/reward-calculator/Cargo.toml +++ b/util/reward-calculator/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-reward-calculator" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -9,15 +9,15 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-types = { path = "../types", version = "= 0.111.0-pre" } -ckb-store = { path = "../../store", version = "= 0.111.0-pre" } -ckb-dao = { path = "../dao", version = "= 0.111.0-pre" } -ckb-dao-utils = { path = "../dao/utils", version = "= 0.111.0-pre" } -ckb-logger = { path = "../logger", version = "= 0.111.0-pre" } -ckb-chain-spec = {path = "../../spec", version = "= 0.111.0-pre"} +ckb-types = { path = "../types", version = "= 0.112.0-pre" } +ckb-store = { path = "../../store", version = "= 0.112.0-pre" } +ckb-dao = { path = "../dao", version = "= 0.112.0-pre" } +ckb-dao-utils = { path = "../dao/utils", version = "= 0.112.0-pre" } +ckb-logger = { path = "../logger", version = "= 0.112.0-pre" } +ckb-chain-spec = {path = "../../spec", version = "= 0.112.0-pre"} [dev-dependencies] -ckb-db = { path = "../../db", version = "= 0.111.0-pre" } -ckb-occupied-capacity = { path = "../occupied-capacity", version = "= 0.111.0-pre" } -ckb-db-schema = { path = "../../db-schema", version = "= 0.111.0-pre" } +ckb-db = { path = "../../db", version = "= 0.112.0-pre" } +ckb-occupied-capacity = { path = "../occupied-capacity", version = "= 0.112.0-pre" } +ckb-db-schema = { path = "../../db-schema", version = "= 0.112.0-pre" } tempfile.workspace = true diff --git a/util/runtime/Cargo.toml b/util/runtime/Cargo.toml index 5d8d463738..0dd064eb1b 100644 --- a/util/runtime/Cargo.toml +++ b/util/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-async-runtime" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -10,6 +10,6 @@ repository = "https://github.com/nervosnetwork/ckb" [dependencies] tokio = { version = "1", features = ["full"] } -ckb-stop-handler = { path = "../stop-handler", version = "= 0.111.0-pre" } -ckb-logger = { path = "../logger", version = "= 0.111.0-pre" } -ckb-spawn = { path = "../spawn", version = "= 0.111.0-pre" } +ckb-stop-handler = { path = "../stop-handler", version = "= 0.112.0-pre" } +ckb-logger = { path = "../logger", version = "= 0.112.0-pre" } +ckb-spawn = { path = "../spawn", version = "= 0.112.0-pre" } diff --git a/util/rust-unstable-port/Cargo.toml b/util/rust-unstable-port/Cargo.toml index ec7c8ea530..56b7730be8 100644 --- a/util/rust-unstable-port/Cargo.toml +++ b/util/rust-unstable-port/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-rust-unstable-port" -version = "0.111.0-pre" +version = "0.112.0-pre" authors = ["Nervos Core Dev "] edition = "2021" license = "MIT" diff --git a/util/snapshot/Cargo.toml b/util/snapshot/Cargo.toml index f3f12fe28f..214a649040 100644 --- a/util/snapshot/Cargo.toml +++ b/util/snapshot/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-snapshot" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -11,15 +11,15 @@ repository = "https://github.com/nervosnetwork/ckb" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -ckb-types = { path = "../types", version = "= 0.111.0-pre" } -ckb-chain-spec = { path = "../../spec", version = "= 0.111.0-pre" } -ckb-store = { path = "../../store", version = "= 0.111.0-pre" } -ckb-db = { path = "../../db", version = "= 0.111.0-pre" } -ckb-traits = { path = "../../traits", version = "= 0.111.0-pre" } -ckb-proposal-table = { path = "../proposal-table", version = "= 0.111.0-pre" } +ckb-types = { path = "../types", version = "= 0.112.0-pre" } +ckb-chain-spec = { path = "../../spec", version = "= 0.112.0-pre" } +ckb-store = { path = "../../store", version = "= 0.112.0-pre" } +ckb-db = { path = "../../db", version = "= 0.112.0-pre" } +ckb-traits = { path = "../../traits", version = "= 0.112.0-pre" } +ckb-proposal-table = { path = "../proposal-table", version = "= 0.112.0-pre" } arc-swap = "1.3" -ckb-db-schema = { path = "../../db-schema", version = "= 0.111.0-pre" } -ckb-freezer = { path = "../../freezer", version = "= 0.111.0-pre" } +ckb-db-schema = { path = "../../db-schema", version = "= 0.112.0-pre" } +ckb-freezer = { path = "../../freezer", version = "= 0.112.0-pre" } ckb-merkle-mountain-range = "0.5.2" [features] diff --git a/util/spawn/Cargo.toml b/util/spawn/Cargo.toml index 3fc666c580..9e1875b48a 100644 --- a/util/spawn/Cargo.toml +++ b/util/spawn/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-spawn" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" diff --git a/util/stop-handler/Cargo.toml b/util/stop-handler/Cargo.toml index 9cf5b0df1c..996648b6ba 100644 --- a/util/stop-handler/Cargo.toml +++ b/util/stop-handler/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-stop-handler" -version = "0.111.0-pre" +version = "0.112.0-pre" authors = ["Nervos Core Dev "] edition = "2021" license = "MIT" @@ -10,6 +10,6 @@ repository = "https://github.com/nervosnetwork/ckb" [dependencies] parking_lot = "0.12" -ckb-logger = { path = "../logger", version = "= 0.111.0-pre" } +ckb-logger = { path = "../logger", version = "= 0.112.0-pre" } tokio = { version = "1", features = ["sync", "rt-multi-thread"] } -ckb-channel = { path = "../channel", version = "= 0.111.0-pre" } +ckb-channel = { path = "../channel", version = "= 0.112.0-pre" } diff --git a/util/systemtime/Cargo.toml b/util/systemtime/Cargo.toml index d2df774233..84ad4d134b 100644 --- a/util/systemtime/Cargo.toml +++ b/util/systemtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-systemtime" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos "] edition = "2021" diff --git a/util/test-chain-utils/Cargo.toml b/util/test-chain-utils/Cargo.toml index 1cc5e547c6..8707987f36 100644 --- a/util/test-chain-utils/Cargo.toml +++ b/util/test-chain-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-test-chain-utils" -version = "0.111.0-pre" +version = "0.112.0-pre" authors = ["Nervos Core Dev "] edition = "2021" license = "MIT" @@ -9,18 +9,18 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-types = {path = "../types", version = "= 0.111.0-pre"} -ckb-db = { path = "../../db", version = "= 0.111.0-pre" } -ckb-store = { path = "../../store", version = "= 0.111.0-pre" } -ckb-chain-spec = { path = "../../spec", version = "= 0.111.0-pre" } -ckb-dao-utils = { path = "../dao/utils", version = "= 0.111.0-pre" } -ckb-traits = { path = "../../traits", version = "= 0.111.0-pre" } +ckb-types = {path = "../types", version = "= 0.112.0-pre"} +ckb-db = { path = "../../db", version = "= 0.112.0-pre" } +ckb-store = { path = "../../store", version = "= 0.112.0-pre" } +ckb-chain-spec = { path = "../../spec", version = "= 0.112.0-pre" } +ckb-dao-utils = { path = "../dao/utils", version = "= 0.112.0-pre" } +ckb-traits = { path = "../../traits", version = "= 0.112.0-pre" } lazy_static = "1.3.0" -ckb-systemtime = { path = "../systemtime", version = "= 0.111.0-pre" } -ckb-resource = { path = "../../resource", version = "= 0.111.0-pre" } -ckb-db-schema = { path = "../../db-schema", version = "= 0.111.0-pre" } -ckb-util = { path = "..", version = "= 0.111.0-pre" } +ckb-systemtime = { path = "../systemtime", version = "= 0.112.0-pre" } +ckb-resource = { path = "../../resource", version = "= 0.112.0-pre" } +ckb-db-schema = { path = "../../db-schema", version = "= 0.112.0-pre" } +ckb-util = { path = "..", version = "= 0.112.0-pre" } tempfile.workspace = true [dev-dependencies] -ckb-systemtime = { path = "../systemtime", version = "= 0.111.0-pre", features = ["enable_faketime"] } +ckb-systemtime = { path = "../systemtime", version = "= 0.112.0-pre", features = ["enable_faketime"] } diff --git a/util/types/Cargo.toml b/util/types/Cargo.toml index 26676e851b..2b974f1e83 100644 --- a/util/types/Cargo.toml +++ b/util/types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-types" -version = "0.111.0-pre" +version = "0.112.0-pre" authors = ["Nervos Core Dev "] edition = "2021" license = "MIT" @@ -10,17 +10,17 @@ repository = "https://github.com/nervosnetwork/ckb" [dependencies] molecule = "=0.7.5" -ckb-fixed-hash = { path = "../fixed-hash", version = "= 0.111.0-pre" } +ckb-fixed-hash = { path = "../fixed-hash", version = "= 0.112.0-pre" } numext-fixed-uint = { version = "0.1", features = ["support_rand", "support_heapsize", "support_serde"] } bytes = { version="1", features = ["serde"] } merkle-cbt = "0.3" -ckb-occupied-capacity = { path = "../occupied-capacity", version = "= 0.111.0-pre" } -ckb-hash = { path = "../hash", version = "= 0.111.0-pre" } -ckb-channel = { path = "../channel", version = "= 0.111.0-pre" } -ckb-constant = { path = "../constant", version = "= 0.111.0-pre" } +ckb-occupied-capacity = { path = "../occupied-capacity", version = "= 0.112.0-pre" } +ckb-hash = { path = "../hash", version = "= 0.112.0-pre" } +ckb-channel = { path = "../channel", version = "= 0.112.0-pre" } +ckb-constant = { path = "../constant", version = "= 0.112.0-pre" } bit-vec = "0.6.3" -ckb-error = { path = "../../error", version = "= 0.111.0-pre" } -ckb-rational = { path = "../rational", version = "= 0.111.0-pre" } +ckb-error = { path = "../../error", version = "= 0.112.0-pre" } +ckb-rational = { path = "../rational", version = "= 0.112.0-pre" } once_cell = "1.8.0" derive_more = { version = "0.99.0", default-features=false, features = ["display"] } ckb-merkle-mountain-range = "0.5.2" diff --git a/verification/Cargo.toml b/verification/Cargo.toml index 01b5315d4d..d7b590124d 100644 --- a/verification/Cargo.toml +++ b/verification/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-verification" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -9,20 +9,20 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-types = { path = "../util/types", version = "= 0.111.0-pre" } -ckb-script = { path = "../script", version = "= 0.111.0-pre" } -ckb-pow = { path = "../pow", version = "= 0.111.0-pre" } -ckb-systemtime = { path = "../util/systemtime", version = "= 0.111.0-pre" } +ckb-types = { path = "../util/types", version = "= 0.112.0-pre" } +ckb-script = { path = "../script", version = "= 0.112.0-pre" } +ckb-pow = { path = "../pow", version = "= 0.112.0-pre" } +ckb-systemtime = { path = "../util/systemtime", version = "= 0.112.0-pre" } lru = "0.7.1" -ckb-traits = { path = "../traits", version = "= 0.111.0-pre" } -ckb-chain-spec = { path = "../spec", version = "= 0.111.0-pre" } -ckb-dao = { path = "../util/dao", version = "= 0.111.0-pre" } -ckb-dao-utils = { path = "../util/dao/utils", version = "= 0.111.0-pre" } -ckb-error = { path = "../error", version = "= 0.111.0-pre" } +ckb-traits = { path = "../traits", version = "= 0.112.0-pre" } +ckb-chain-spec = { path = "../spec", version = "= 0.112.0-pre" } +ckb-dao = { path = "../util/dao", version = "= 0.112.0-pre" } +ckb-dao-utils = { path = "../util/dao/utils", version = "= 0.112.0-pre" } +ckb-error = { path = "../error", version = "= 0.112.0-pre" } derive_more = { version = "0.99.0", default-features=false, features = ["display"] } -ckb-verification-traits = { path = "./traits", version = "= 0.111.0-pre" } +ckb-verification-traits = { path = "./traits", version = "= 0.112.0-pre" } [dev-dependencies] -ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.111.0-pre" } -ckb-resource = { path = "../resource", version = "= 0.111.0-pre" } -ckb-systemtime = {path = "../util/systemtime", version = "= 0.111.0-pre", features=["enable_faketime"]} +ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.112.0-pre" } +ckb-resource = { path = "../resource", version = "= 0.112.0-pre" } +ckb-systemtime = {path = "../util/systemtime", version = "= 0.112.0-pre", features=["enable_faketime"]} diff --git a/verification/contextual/Cargo.toml b/verification/contextual/Cargo.toml index 6c75f25da1..099bfbacbd 100644 --- a/verification/contextual/Cargo.toml +++ b/verification/contextual/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-verification-contextual" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -9,27 +9,27 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-types = { path = "../../util/types", version = "= 0.111.0-pre" } -ckb-store = { path = "../../store", version = "= 0.111.0-pre" } -ckb-systemtime = { path = "../../util/systemtime", version = "= 0.111.0-pre" } +ckb-types = { path = "../../util/types", version = "= 0.112.0-pre" } +ckb-store = { path = "../../store", version = "= 0.112.0-pre" } +ckb-systemtime = { path = "../../util/systemtime", version = "= 0.112.0-pre" } rayon = "1.0" -ckb-traits = { path = "../../traits", version = "= 0.111.0-pre" } -ckb-chain-spec = { path = "../../spec", version = "= 0.111.0-pre" } -ckb-dao = { path = "../../util/dao", version = "= 0.111.0-pre" } -ckb-dao-utils = { path = "../../util/dao/utils", version = "= 0.111.0-pre" } -ckb-logger = {path = "../../util/logger", version = "= 0.111.0-pre"} -ckb-reward-calculator= { path = "../../util/reward-calculator", version = "= 0.111.0-pre" } -ckb-error = { path = "../../error", version = "= 0.111.0-pre" } +ckb-traits = { path = "../../traits", version = "= 0.112.0-pre" } +ckb-chain-spec = { path = "../../spec", version = "= 0.112.0-pre" } +ckb-dao = { path = "../../util/dao", version = "= 0.112.0-pre" } +ckb-dao-utils = { path = "../../util/dao/utils", version = "= 0.112.0-pre" } +ckb-logger = {path = "../../util/logger", version = "= 0.112.0-pre"} +ckb-reward-calculator= { path = "../../util/reward-calculator", version = "= 0.112.0-pre" } +ckb-error = { path = "../../error", version = "= 0.112.0-pre" } tokio = { version = "1", features = ["sync", "rt-multi-thread"] } -ckb-async-runtime = { path = "../../util/runtime", version = "= 0.111.0-pre" } -ckb-verification-traits = { path = "../traits", version = "= 0.111.0-pre" } -ckb-verification = { path = "..", version = "= 0.111.0-pre" } +ckb-async-runtime = { path = "../../util/runtime", version = "= 0.112.0-pre" } +ckb-verification-traits = { path = "../traits", version = "= 0.112.0-pre" } +ckb-verification = { path = "..", version = "= 0.112.0-pre" } ckb-merkle-mountain-range = "0.5.2" [dev-dependencies] -ckb-chain = { path = "../../chain", version = "= 0.111.0-pre" } -ckb-shared = { path = "../../shared", version = "= 0.111.0-pre" } -ckb-launcher = { path = "../../util/launcher", version = "= 0.111.0-pre" } -ckb-test-chain-utils = { path = "../../util/test-chain-utils", version = "= 0.111.0-pre" } -ckb-systemtime = { path = "../../util/systemtime", version = "= 0.111.0-pre", features = ["enable_faketime"]} +ckb-chain = { path = "../../chain", version = "= 0.112.0-pre" } +ckb-shared = { path = "../../shared", version = "= 0.112.0-pre" } +ckb-launcher = { path = "../../util/launcher", version = "= 0.112.0-pre" } +ckb-test-chain-utils = { path = "../../util/test-chain-utils", version = "= 0.112.0-pre" } +ckb-systemtime = { path = "../../util/systemtime", version = "= 0.112.0-pre", features = ["enable_faketime"]} rand = "0.7" diff --git a/verification/traits/Cargo.toml b/verification/traits/Cargo.toml index 69e1c233a5..28a71132e3 100644 --- a/verification/traits/Cargo.toml +++ b/verification/traits/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-verification-traits" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -10,4 +10,4 @@ repository = "https://github.com/nervosnetwork/ckb" [dependencies] bitflags = "1.0" -ckb-error = { path = "../../error", version = "= 0.111.0-pre" } +ckb-error = { path = "../../error", version = "= 0.112.0-pre" } diff --git a/wasm-build-test/Cargo.toml b/wasm-build-test/Cargo.toml index 55abc32d0a..ffde0ed82a 100644 --- a/wasm-build-test/Cargo.toml +++ b/wasm-build-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ckb-wasm-test" -version = "0.111.0-pre" +version = "0.112.0-pre" license = "MIT" authors = ["Nervos Core Dev "] edition = "2021" @@ -9,8 +9,8 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -ckb-types = { path = "../util/types", version = "= 0.111.0-pre" } -ckb-script = { path = "../script", version = "= 0.111.0-pre", default-features = false, features = [] } +ckb-types = { path = "../util/types", version = "= 0.112.0-pre" } +ckb-script = { path = "../script", version = "= 0.112.0-pre", default-features = false, features = [] } # Prevent this from interfering with workspaces [workspace] From c359cf235eb384617f036e1c52251f0096428918 Mon Sep 17 00:00:00 2001 From: mohanson Date: Wed, 14 Jun 2023 16:18:01 +0800 Subject: [PATCH 002/267] fix(scripts): set_debug_printer should updates generator's debug_printer --- script/src/verify.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/script/src/verify.rs b/script/src/verify.rs index 3f4ebc81a8..0ff5aca316 100644 --- a/script/src/verify.rs +++ b/script/src/verify.rs @@ -336,8 +336,6 @@ impl { data_loader: DL, - debug_printer: DebugPrinter, - rtx: Arc, binaries_by_data_hash: HashMap, @@ -464,7 +462,6 @@ impl(&mut self, func: F) { - self.debug_printer = Arc::new(func); + self.generator.debug_printer = Arc::new(func); } #[cfg(test)] From e78621bcde92ae61191f70db55f994f8576de035 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 14 Jun 2023 16:39:32 +0800 Subject: [PATCH 003/267] Fix `ckb init -c unsupported_spec` should not create config files --- util/app-config/src/cli.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/util/app-config/src/cli.rs b/util/app-config/src/cli.rs index a63a974b86..522d3faee8 100644 --- a/util/app-config/src/cli.rs +++ b/util/app-config/src/cli.rs @@ -1,6 +1,6 @@ //! CKB command line arguments parser. use ckb_build_info::Version; -use ckb_resource::{DEFAULT_P2P_PORT, DEFAULT_RPC_PORT, DEFAULT_SPEC}; +use ckb_resource::{AVAILABLE_SPECS, DEFAULT_P2P_PORT, DEFAULT_RPC_PORT, DEFAULT_SPEC}; use clap::{Arg, ArgGroup, ArgMatches, Command}; /// binary file name(ckb) @@ -414,6 +414,12 @@ fn init() -> Command { Arg::new(ARG_CHAIN) .short('c') .long(ARG_CHAIN) + .value_parser( + AVAILABLE_SPECS + .iter() + .map(|v| v.to_string()) + .collect::>(), + ) .default_value(DEFAULT_SPEC) .help("Initializes CKB directory for "), ) From 6c21fbccafedea7e792777bd0a51d67d001daf3d Mon Sep 17 00:00:00 2001 From: zhangsoledad <787953403@qq.com> Date: Fri, 16 Jun 2023 11:15:24 +0800 Subject: [PATCH 004/267] fix: permanent difficulty mode reward --- spec/src/consensus.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/spec/src/consensus.rs b/spec/src/consensus.rs index a7d053610c..800225209c 100644 --- a/spec/src/consensus.rs +++ b/spec/src/consensus.rs @@ -797,9 +797,18 @@ impl Consensus { epoch_duration_in_milliseconds, } => { if self.permanent_difficulty() { + let primary_epoch_reward = + self.primary_epoch_reward_of_next_epoch(&epoch).as_u64(); + let block_reward = + Capacity::shannons(primary_epoch_reward / epoch.length()); + let remainder_reward = + Capacity::shannons(primary_epoch_reward % epoch.length()); + let dummy_epoch_ext = epoch .clone() .into_builder() + .base_block_reward(block_reward) + .remainder_reward(remainder_reward) .number(epoch.number() + 1) .last_block_hash_in_previous_epoch(header.hash()) .start_number(header.number() + 1) From 1e5ce9bc87e5c8e6ccca7113c8bc8e0acc588061 Mon Sep 17 00:00:00 2001 From: zhangsoledad <787953403@qq.com> Date: Fri, 16 Jun 2023 15:11:25 +0800 Subject: [PATCH 005/267] fix: get_consensus 2023 info --- rpc/README.md | 5 ++++- rpc/src/module/chain.rs | 5 ++++- util/jsonrpc-types/src/blockchain.rs | 5 ++++- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/rpc/README.md b/rpc/README.md index 68cda868d2..843f8c56a2 100644 --- a/rpc/README.md +++ b/rpc/README.md @@ -1665,7 +1665,10 @@ Response { "rfc": "0036", "epoch_number": "0x0" }, { "rfc": "0038", "epoch_number": "0x0" } ], - "ckb2023": [] + "ckb2023": [ + { "rfc": "0146", "epoch_number": null }, + { "rfc": "0148", "epoch_number": null } + ] }, "id": "main", "initial_primary_epoch_reward": "0x71afd498d000", diff --git a/rpc/src/module/chain.rs b/rpc/src/module/chain.rs index 803ce1f30b..d238faad09 100644 --- a/rpc/src/module/chain.rs +++ b/rpc/src/module/chain.rs @@ -1347,7 +1347,10 @@ pub trait ChainRpc { /// { "rfc": "0036", "epoch_number": "0x0" }, /// { "rfc": "0038", "epoch_number": "0x0" } /// ], - /// "ckb2023": [] + /// "ckb2023": [ + /// { "rfc": "0146", "epoch_number": null }, + /// { "rfc": "0148", "epoch_number": null } + /// ] /// }, /// "id": "main", /// "initial_primary_epoch_reward": "0x71afd498d000", diff --git a/util/jsonrpc-types/src/blockchain.rs b/util/jsonrpc-types/src/blockchain.rs index 87f19c2a7f..519d3adeea 100644 --- a/util/jsonrpc-types/src/blockchain.rs +++ b/util/jsonrpc-types/src/blockchain.rs @@ -1402,7 +1402,10 @@ impl HardForks { HardForkFeature::new("0036", convert(hardforks.ckb2021.rfc_0036())), HardForkFeature::new("0038", convert(hardforks.ckb2021.rfc_0038())), ], - ckb2023: vec![], + ckb2023: vec![ + HardForkFeature::new("0146", convert(hardforks.ckb2023.rfc_0146())), + HardForkFeature::new("0148", convert(hardforks.ckb2023.rfc_0148())), + ], } } } From 2be50dbd8bb43198d2a10a38a9e3e1257bb04a60 Mon Sep 17 00:00:00 2001 From: zhangsoledad <787953403@qq.com> Date: Fri, 16 Jun 2023 17:08:50 +0800 Subject: [PATCH 006/267] chore: update 2023 rfc number --- chain/src/chain.rs | 4 +-- rpc/README.md | 4 +-- rpc/src/module/chain.rs | 4 +-- .../transaction_scripts_verifier_data2.rs | 2 +- script/src/verify/tests/utils.rs | 2 +- spec/src/consensus.rs | 8 ++--- spec/src/hardfork.rs | 2 +- tx-pool/src/util.rs | 6 ++-- util/jsonrpc-types/src/blockchain.rs | 4 +-- util/types/src/core/hardfork/ckb2023.rs | 36 +++++++++---------- verification/src/tests/header_verifier.rs | 2 +- 11 files changed, 37 insertions(+), 37 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 004782a5c2..b6291211d5 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -193,14 +193,14 @@ impl ForkChanges { false } else { // This method assumes that the input blocks are sorted and unique. - let rfc_0148 = hardfork.ckb2023.rfc_0148(); + let rfc_0049 = hardfork.ckb2023.rfc_0049(); let epoch_first = blocks.front().unwrap().epoch().number(); let epoch_next = blocks .back() .unwrap() .epoch() .minimum_epoch_number_after_n_blocks(1); - epoch_first < rfc_0148 && rfc_0148 <= epoch_next + epoch_first < rfc_0049 && rfc_0049 <= epoch_next } } } diff --git a/rpc/README.md b/rpc/README.md index 843f8c56a2..cf33106e12 100644 --- a/rpc/README.md +++ b/rpc/README.md @@ -1666,8 +1666,8 @@ Response { "rfc": "0038", "epoch_number": "0x0" } ], "ckb2023": [ - { "rfc": "0146", "epoch_number": null }, - { "rfc": "0148", "epoch_number": null } + { "rfc": "0048", "epoch_number": null }, + { "rfc": "0049", "epoch_number": null } ] }, "id": "main", diff --git a/rpc/src/module/chain.rs b/rpc/src/module/chain.rs index d238faad09..27f05e5bd8 100644 --- a/rpc/src/module/chain.rs +++ b/rpc/src/module/chain.rs @@ -1348,8 +1348,8 @@ pub trait ChainRpc { /// { "rfc": "0038", "epoch_number": "0x0" } /// ], /// "ckb2023": [ - /// { "rfc": "0146", "epoch_number": null }, - /// { "rfc": "0148", "epoch_number": null } + /// { "rfc": "0048", "epoch_number": null }, + /// { "rfc": "0049", "epoch_number": null } /// ] /// }, /// "id": "main", diff --git a/script/fuzz/fuzz_targets/transaction_scripts_verifier_data2.rs b/script/fuzz/fuzz_targets/transaction_scripts_verifier_data2.rs index 934b84f2ab..81f030f105 100644 --- a/script/fuzz/fuzz_targets/transaction_scripts_verifier_data2.rs +++ b/script/fuzz/fuzz_targets/transaction_scripts_verifier_data2.rs @@ -101,7 +101,7 @@ fn run(data: &[u8]) { ckb2021: CKB2021::new_mirana().as_builder().build().unwrap(), ckb2023: CKB2023::new_mirana() .as_builder() - .rfc_0148(0) + .rfc_0049(0) .build() .unwrap(), }; diff --git a/script/src/verify/tests/utils.rs b/script/src/verify/tests/utils.rs index 2239d8ab57..a92ed6cd11 100644 --- a/script/src/verify/tests/utils.rs +++ b/script/src/verify/tests/utils.rs @@ -141,7 +141,7 @@ impl TransactionScriptsVerifierWithEnv { ckb2021: CKB2021::new_mirana(), ckb2023: CKB2023::new_mirana() .as_builder() - .rfc_0148(version_2_enabled_at) + .rfc_0049(version_2_enabled_at) .build() .unwrap(), }; diff --git a/spec/src/consensus.rs b/spec/src/consensus.rs index 800225209c..40dcec7449 100644 --- a/spec/src/consensus.rs +++ b/spec/src/consensus.rs @@ -1028,13 +1028,13 @@ impl Consensus { let epoch_number = epoch.number(); - let rfc_0148 = self.hardfork_switch.ckb2023.rfc_0148(); + let rfc_0049 = self.hardfork_switch.ckb2023.rfc_0049(); // dev default is 0 - if rfc_0148 != 0 && rfc_0148 != EpochNumber::MAX { - return (epoch_number + 1 == rfc_0148 + if rfc_0049 != 0 && rfc_0049 != EpochNumber::MAX { + return (epoch_number + 1 == rfc_0049 && (proposal_window.farthest() + index) >= epoch_length) - || (epoch_number == rfc_0148 && index <= proposal_window.farthest()); + || (epoch_number == rfc_0049 && index <= proposal_window.farthest()); } false } diff --git a/spec/src/hardfork.rs b/spec/src/hardfork.rs index 4e0251e801..edce4ccbc7 100644 --- a/spec/src/hardfork.rs +++ b/spec/src/hardfork.rs @@ -69,7 +69,7 @@ impl HardForkConfig { builder: CKB2023Builder, ckb2023: EpochNumber, ) -> Result { - let builder = builder.rfc_0146(ckb2023).rfc_0148(ckb2023); + let builder = builder.rfc_0048(ckb2023).rfc_0049(ckb2023); Ok(builder) } diff --git a/tx-pool/src/util.rs b/tx-pool/src/util.rs index 7efc62fcca..b4d6c3b5d5 100644 --- a/tx-pool/src/util.rs +++ b/tx-pool/src/util.rs @@ -150,11 +150,11 @@ pub(crate) fn after_delay_window(snapshot: &Snapshot) -> bool { let index = epoch.index(); let epoch_number = epoch.number(); - let rfc_0148 = snapshot.consensus().hardfork_switch.ckb2023.rfc_0148(); + let rfc_0049 = snapshot.consensus().hardfork_switch.ckb2023.rfc_0049(); - if rfc_0148 == 0 && rfc_0148 == EpochNumber::MAX { + if rfc_0049 == 0 && rfc_0049 == EpochNumber::MAX { return true; } - epoch_number > rfc_0148 || (epoch_number == rfc_0148 && index > proposal_window.farthest()) + epoch_number > rfc_0049 || (epoch_number == rfc_0049 && index > proposal_window.farthest()) } diff --git a/util/jsonrpc-types/src/blockchain.rs b/util/jsonrpc-types/src/blockchain.rs index 519d3adeea..44f81994a8 100644 --- a/util/jsonrpc-types/src/blockchain.rs +++ b/util/jsonrpc-types/src/blockchain.rs @@ -1403,8 +1403,8 @@ impl HardForks { HardForkFeature::new("0038", convert(hardforks.ckb2021.rfc_0038())), ], ckb2023: vec![ - HardForkFeature::new("0146", convert(hardforks.ckb2023.rfc_0146())), - HardForkFeature::new("0148", convert(hardforks.ckb2023.rfc_0148())), + HardForkFeature::new("0048", convert(hardforks.ckb2023.rfc_0048())), + HardForkFeature::new("0049", convert(hardforks.ckb2023.rfc_0049())), ], } } diff --git a/util/types/src/core/hardfork/ckb2023.rs b/util/types/src/core/hardfork/ckb2023.rs index 22b539d268..ba0ad73c07 100644 --- a/util/types/src/core/hardfork/ckb2023.rs +++ b/util/types/src/core/hardfork/ckb2023.rs @@ -10,8 +10,8 @@ use paste::paste; /// [`CKB2023Builder`]: struct.CKB2023Builder.html #[derive(Debug, Clone)] pub struct CKB2023 { - rfc_0146: EpochNumber, - rfc_0148: EpochNumber, + rfc_0048: EpochNumber, + rfc_0049: EpochNumber, } /// Builder for [`CKB2023`]. @@ -19,8 +19,8 @@ pub struct CKB2023 { /// [`CKB2023`]: struct.CKB2023.html #[derive(Debug, Clone, Default)] pub struct CKB2023Builder { - rfc_0146: Option, - rfc_0148: Option, + rfc_0048: Option, + rfc_0049: Option, } impl CKB2023 { @@ -32,16 +32,16 @@ impl CKB2023 { /// Creates a new builder based on the current instance. pub fn as_builder(&self) -> CKB2023Builder { Self::new_builder() - .rfc_0146(self.rfc_0146()) - .rfc_0148(self.rfc_0148()) + .rfc_0048(self.rfc_0048()) + .rfc_0049(self.rfc_0049()) } /// Creates a new mirana instance. pub fn new_mirana() -> Self { // Use a builder to ensure all features are set manually. Self::new_builder() - .rfc_0146(hardfork::mainnet::CKB2023_START_EPOCH) - .rfc_0148(hardfork::mainnet::CKB2023_START_EPOCH) + .rfc_0048(hardfork::mainnet::CKB2023_START_EPOCH) + .rfc_0049(hardfork::mainnet::CKB2023_START_EPOCH) .build() .unwrap() } @@ -49,25 +49,25 @@ impl CKB2023 { /// Creates a new dev instance. pub fn new_dev() -> Self { // Use a builder to ensure all features are set manually. - Self::new_builder().rfc_0146(0).rfc_0148(0).build().unwrap() + Self::new_builder().rfc_0048(0).rfc_0049(0).build().unwrap() } } define_methods!( CKB2023, - rfc_0146, + rfc_0048, remove_header_version_reservation_rule, is_remove_header_version_reservation_rule_enabled, - disable_rfc_0146, - "RFC PR 0146" + disable_rfc_0048, + "RFC PR 0048" ); define_methods!( CKB2023, - rfc_0148, + rfc_0049, vm_version_2_and_syscalls_3, is_vm_version_2_and_syscalls_3_enabled, - disable_rfc_0148, - "RFC PR 0148" + disable_rfc_0049, + "RFC PR 0049" ); impl CKB2023Builder { @@ -78,9 +78,9 @@ impl CKB2023Builder { /// /// [`CKB2023`]: struct.CKB2023.html pub fn build(self) -> Result { - let rfc_0146 = try_find!(self, rfc_0146); - let rfc_0148 = try_find!(self, rfc_0148); + let rfc_0048 = try_find!(self, rfc_0048); + let rfc_0049 = try_find!(self, rfc_0049); - Ok(CKB2023 { rfc_0146, rfc_0148 }) + Ok(CKB2023 { rfc_0048, rfc_0049 }) } } diff --git a/verification/src/tests/header_verifier.rs b/verification/src/tests/header_verifier.rs index 82556b833b..2e9219c525 100644 --- a/verification/src/tests/header_verifier.rs +++ b/verification/src/tests/header_verifier.rs @@ -32,7 +32,7 @@ pub fn test_version() { ckb2021: CKB2021::new_mirana(), ckb2023: CKB2023::new_mirana() .as_builder() - .rfc_0146(10) + .rfc_0048(10) .build() .unwrap(), }; From 1bdb66e60e86f4fc31ebf17d33c5818c91e66815 Mon Sep 17 00:00:00 2001 From: zhangsoledad <787953403@qq.com> Date: Mon, 19 Jun 2023 11:55:01 +0800 Subject: [PATCH 007/267] feat: dev configurable hardfork --- docs/hashes.toml | 2 +- resource/specs/dev.toml | 1 + spec/src/hardfork.rs | 22 ++++++++++++++++++++-- util/types/src/core/hardfork/ckb2021.rs | 2 +- util/types/src/core/hardfork/ckb2023.rs | 12 +++++++++++- util/types/src/core/hardfork/mod.rs | 4 ++-- 6 files changed, 36 insertions(+), 7 deletions(-) diff --git a/docs/hashes.toml b/docs/hashes.toml index 365e8a545b..73c4e670b2 100644 --- a/docs/hashes.toml +++ b/docs/hashes.toml @@ -127,7 +127,7 @@ tx_hash = "0xd5780747735fd22c9ba7363bde8afe59061658caa836962867253b03cbda264e" index = 1 [ckb_dev] -spec_hash = "0xdd75ca95ff59da78879506a46f509adf4e8d131f1826827be708408e60ebde37" +spec_hash = "0x6cb679a15a7ff16596cad85f4680ab4335c87bcbf43956c591c23d11841719e4" genesis = "0x823b2ff5785b12da8b1363cac9a5cbe566d8b715a4311441b119c39a0367488c" cellbase = "0xa563884b3686078ec7e7677a5f86449b15cf2693f3c1241766c6996f206cc541" diff --git a/resource/specs/dev.toml b/resource/specs/dev.toml index 0f5b32a817..8ae8149df2 100644 --- a/resource/specs/dev.toml +++ b/resource/specs/dev.toml @@ -96,6 +96,7 @@ genesis_epoch_length = 1000 permanent_difficulty_in_dummy = true [params.hardfork] +ckb2023 = 0 [pow] diff --git a/spec/src/hardfork.rs b/spec/src/hardfork.rs index edce4ccbc7..77ceeef70b 100644 --- a/spec/src/hardfork.rs +++ b/spec/src/hardfork.rs @@ -10,7 +10,9 @@ use serde::{Deserialize, Serialize}; /// Hard forks parameters for spec. #[derive(Default, Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] #[serde(deny_unknown_fields)] -pub struct HardForkConfig {} +pub struct HardForkConfig { + ckb2023: Option, +} impl HardForkConfig { /// If all parameters which have been set are correct for mainnet, then @@ -77,6 +79,22 @@ impl HardForkConfig { /// /// Enable features which are set to `None` at the dev default config. pub fn complete_with_dev_default(&self) -> Result { - Ok(HardForks::new_dev()) + let mut ckb2021 = CKB2021::new_builder(); + ckb2021 = self.update_2021( + ckb2021, + testnet::CKB2021_START_EPOCH, + testnet::RFC0028_START_EPOCH, + )?; + + let ckb2023 = if let Some(epoch) = self.ckb2023 { + CKB2023::new_with_specified(epoch) + } else { + CKB2023::new_dev_default() + }; + + Ok(HardForks { + ckb2021: ckb2021.build()?, + ckb2023, + }) } } diff --git a/util/types/src/core/hardfork/ckb2021.rs b/util/types/src/core/hardfork/ckb2021.rs index 82cc6040a8..ffe10d3e95 100644 --- a/util/types/src/core/hardfork/ckb2021.rs +++ b/util/types/src/core/hardfork/ckb2021.rs @@ -99,7 +99,7 @@ impl CKB2021 { } /// Creates a new dev instance. - pub fn new_dev() -> Self { + pub fn new_dev_default() -> Self { // Use a builder to ensure all features are set manually. Self::new_builder() .rfc_0028(0) diff --git a/util/types/src/core/hardfork/ckb2023.rs b/util/types/src/core/hardfork/ckb2023.rs index ba0ad73c07..bff61241f0 100644 --- a/util/types/src/core/hardfork/ckb2023.rs +++ b/util/types/src/core/hardfork/ckb2023.rs @@ -47,10 +47,20 @@ impl CKB2023 { } /// Creates a new dev instance. - pub fn new_dev() -> Self { + pub fn new_dev_default() -> Self { // Use a builder to ensure all features are set manually. Self::new_builder().rfc_0048(0).rfc_0049(0).build().unwrap() } + + /// Creates a new instance with specified. + pub fn new_with_specified(epoch: EpochNumber) -> Self { + // Use a builder to ensure all features are set manually. + Self::new_builder() + .rfc_0048(epoch) + .rfc_0049(epoch) + .build() + .unwrap() + } } define_methods!( diff --git a/util/types/src/core/hardfork/mod.rs b/util/types/src/core/hardfork/mod.rs index 37b42fb5c0..b2229c83ec 100644 --- a/util/types/src/core/hardfork/mod.rs +++ b/util/types/src/core/hardfork/mod.rs @@ -29,8 +29,8 @@ impl HardForks { /// construct dev configuration pub fn new_dev() -> HardForks { HardForks { - ckb2021: CKB2021::new_dev(), - ckb2023: CKB2023::new_dev(), + ckb2021: CKB2021::new_dev_default(), + ckb2023: CKB2023::new_dev_default(), } } } From adbed9e24e95139e6883482321d491514eeecc11 Mon Sep 17 00:00:00 2001 From: zhangsoledad <787953403@qq.com> Date: Mon, 19 Jun 2023 19:16:53 +0800 Subject: [PATCH 008/267] fix: 2023 vm version check --- script/src/verify.rs | 8 +- spec/src/hardfork.rs | 3 +- test/src/main.rs | 3 +- test/src/specs/hardfork/mod.rs | 2 + test/src/specs/hardfork/v2021/mod.rs | 4 +- .../v2021/{vm_version.rs => vm_version1.rs} | 4 +- test/src/specs/hardfork/v2023/mod.rs | 3 + test/src/specs/hardfork/v2023/vm_version2.rs | 408 ++++++++++++++++++ verification/src/transaction_verifier.rs | 79 ++++ 9 files changed, 507 insertions(+), 7 deletions(-) rename test/src/specs/hardfork/v2021/{vm_version.rs => vm_version1.rs} (99%) create mode 100644 test/src/specs/hardfork/v2023/mod.rs create mode 100644 test/src/specs/hardfork/v2023/vm_version2.rs diff --git a/script/src/verify.rs b/script/src/verify.rs index 0ff5aca316..ee1e524e32 100644 --- a/script/src/verify.rs +++ b/script/src/verify.rs @@ -540,7 +540,13 @@ impl Ok(ScriptVersion::V0), ScriptHashType::Data1 => Ok(ScriptVersion::V1), - ScriptHashType::Data2 => Ok(ScriptVersion::V2), + ScriptHashType::Data2 => { + if is_vm_version_2_and_syscalls_3_enabled { + Ok(ScriptVersion::V2) + } else { + Err(ScriptError::InvalidVmVersion(2)) + } + } ScriptHashType::Type => { if is_vm_version_2_and_syscalls_3_enabled { Ok(ScriptVersion::V2) diff --git a/spec/src/hardfork.rs b/spec/src/hardfork.rs index 77ceeef70b..2c1f411a2f 100644 --- a/spec/src/hardfork.rs +++ b/spec/src/hardfork.rs @@ -11,7 +11,8 @@ use serde::{Deserialize, Serialize}; #[derive(Default, Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct HardForkConfig { - ckb2023: Option, + /// ckb 2023 epoch + pub ckb2023: Option, } impl HardForkConfig { diff --git a/test/src/main.rs b/test/src/main.rs index 6b8034fd34..0ef0feafff 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -530,7 +530,8 @@ fn all_specs() -> Vec> { Box::new(CheckCellDeps), Box::new(CheckAbsoluteEpochSince), Box::new(CheckRelativeEpochSince), - Box::new(CheckVmVersion), + Box::new(CheckVmVersion1), + Box::new(CheckVmVersion2), Box::new(CheckVmBExtension), ]; specs.shuffle(&mut thread_rng()); diff --git a/test/src/specs/hardfork/mod.rs b/test/src/specs/hardfork/mod.rs index 422849ec4a..14092f87b4 100644 --- a/test/src/specs/hardfork/mod.rs +++ b/test/src/specs/hardfork/mod.rs @@ -1,3 +1,5 @@ mod v2021; +mod v2023; pub use v2021::*; +pub use v2023::*; diff --git a/test/src/specs/hardfork/v2021/mod.rs b/test/src/specs/hardfork/v2021/mod.rs index e80e4adc43..56a1422b9f 100644 --- a/test/src/specs/hardfork/v2021/mod.rs +++ b/test/src/specs/hardfork/v2021/mod.rs @@ -1,9 +1,9 @@ mod cell_deps; mod since; mod vm_b_extension; -mod vm_version; +mod vm_version1; pub use cell_deps::CheckCellDeps; pub use since::{CheckAbsoluteEpochSince, CheckRelativeEpochSince}; pub use vm_b_extension::CheckVmBExtension; -pub use vm_version::CheckVmVersion; +pub use vm_version1::CheckVmVersion1; diff --git a/test/src/specs/hardfork/v2021/vm_version.rs b/test/src/specs/hardfork/v2021/vm_version1.rs similarity index 99% rename from test/src/specs/hardfork/v2021/vm_version.rs rename to test/src/specs/hardfork/v2021/vm_version1.rs index 46a4ff393c..f49d955275 100644 --- a/test/src/specs/hardfork/v2021/vm_version.rs +++ b/test/src/specs/hardfork/v2021/vm_version1.rs @@ -21,7 +21,7 @@ const GENESIS_EPOCH_LENGTH: u64 = 10; const TEST_CASES_COUNT: usize = (RPC_MAX_VM_VERSION as usize + 1 + 1) * 2; const INITIAL_INPUTS_COUNT: usize = 1 + TEST_CASES_COUNT * 2; -pub struct CheckVmVersion; +pub struct CheckVmVersion1; struct NewScript { cell_dep: packed::CellDep, @@ -42,7 +42,7 @@ struct CheckVmVersionTestRunner<'a> { node: &'a Node, } -impl Spec for CheckVmVersion { +impl Spec for CheckVmVersion1 { crate::setup!(num_nodes: 2); fn run(&self, nodes: &mut Vec) { diff --git a/test/src/specs/hardfork/v2023/mod.rs b/test/src/specs/hardfork/v2023/mod.rs new file mode 100644 index 0000000000..a9ce71a763 --- /dev/null +++ b/test/src/specs/hardfork/v2023/mod.rs @@ -0,0 +1,3 @@ +mod vm_version2; + +pub use vm_version2::CheckVmVersion2; diff --git a/test/src/specs/hardfork/v2023/vm_version2.rs b/test/src/specs/hardfork/v2023/vm_version2.rs new file mode 100644 index 0000000000..adbc3258c5 --- /dev/null +++ b/test/src/specs/hardfork/v2023/vm_version2.rs @@ -0,0 +1,408 @@ +use crate::{ + util::{ + cell::gen_spendable, + check::{assert_epoch_should_less_than, is_transaction_committed}, + }, + utils::{assert_send_transaction_fail, wait_until}, + Node, Spec, +}; +use ckb_jsonrpc_types as rpc; +use ckb_jsonrpc_types::Either; +use ckb_logger::{debug, info}; +use ckb_types::{ + core::{Capacity, DepType, ScriptHashType, TransactionView}, + packed, + prelude::*, +}; +use std::fmt; + +const RPC_MAX_VM_VERSION: u8 = 2; +const MAX_VM_VERSION: u8 = 2; + +const GENESIS_EPOCH_LENGTH: u64 = 10; +const CKB2023_START_EPOCH: u64 = 10; + +const TEST_CASES_COUNT: usize = (RPC_MAX_VM_VERSION as usize + 1 + 1) * 2; +const INITIAL_INPUTS_COUNT: usize = 1 + TEST_CASES_COUNT * 2; + +pub struct CheckVmVersion2; + +struct NewScript { + cell_dep: packed::CellDep, + data_hash: packed::Byte32, + type_hash: packed::Byte32, +} + +#[derive(Debug, Clone, Copy)] +enum ExpectedResult { + ShouldBePassed, + IncompatibleVmV2, + RpcInvalidVmVersion, + LockInvalidVmVersion, + TypeInvalidVmVersion, +} + +struct CheckVmVersionTestRunner<'a> { + node: &'a Node, +} + +impl Spec for CheckVmVersion2 { + crate::setup!(num_nodes: 2); + + fn run(&self, nodes: &mut Vec) { + let epoch_length = GENESIS_EPOCH_LENGTH; + let ckb2021_last_epoch = CKB2023_START_EPOCH - 1; + + let node = &nodes[0]; + let node1 = &nodes[1]; + + node.mine(1); + node1.connect(node); + + { + let mut inputs = gen_spendable(node, INITIAL_INPUTS_COUNT) + .into_iter() + .map(|input| packed::CellInput::new(input.out_point, 0)); + let script = NewScript::new_with_id(node, 0, &mut inputs); + let runner = CheckVmVersionTestRunner::new(node); + + info!("CKB v2021:"); + runner.run_all_tests(&mut inputs, &script, 1); + + assert_epoch_should_less_than(node, ckb2021_last_epoch, epoch_length - 4, epoch_length); + node.mine_until_epoch(ckb2021_last_epoch, epoch_length - 4, epoch_length); + + info!("CKB v2023:"); + runner.run_all_tests(&mut inputs, &script, 2); + } + + { + info!("Test Sync:"); + let (rpc_client0, rpc_client1) = (node.rpc_client(), node1.rpc_client()); + + // The GetHeaders will be sent every 15s. + // When reach tip, the GetHeaders will be paused 28s. + let ret = wait_until(60, || { + let header0 = rpc_client0.get_tip_header(); + let header1 = rpc_client1.get_tip_header(); + header0 == header1 + }); + assert!( + ret, + "Nodes should sync with each other until same tip chain", + ); + } + } + + fn modify_chain_spec(&self, spec: &mut ckb_chain_spec::ChainSpec) { + spec.params.permanent_difficulty_in_dummy = Some(true); + spec.params.genesis_epoch_length = Some(GENESIS_EPOCH_LENGTH); + if spec.params.hardfork.is_none() { + spec.params.hardfork = Some(Default::default()); + } + if let Some(mut switch) = spec.params.hardfork.as_mut() { + switch.ckb2023 = Some(CKB2023_START_EPOCH); + } + } +} + +impl NewScript { + fn new_with_id( + node: &Node, + id: u8, + inputs: &mut impl Iterator, + ) -> Self { + let original_data = node.always_success_raw_data(); + let data = packed::Bytes::new_builder() + .extend(original_data.as_ref().iter().map(|x| (*x).into())) + .push(id.into()) + .build(); + let tx = Self::deploy(node, &data, inputs); + let cell_dep = packed::CellDep::new_builder() + .out_point(packed::OutPoint::new(tx.hash(), 0)) + .dep_type(DepType::Code.into()) + .build(); + let data_hash = packed::CellOutput::calc_data_hash(&data.raw_data()); + let type_hash = tx + .output(0) + .unwrap() + .type_() + .to_opt() + .unwrap() + .calc_script_hash(); + Self { + cell_dep, + data_hash, + type_hash, + } + } + + fn deploy( + node: &Node, + data: &packed::Bytes, + inputs: &mut impl Iterator, + ) -> TransactionView { + let type_script = node.always_success_script(); + let tx_template = TransactionView::new_advanced_builder(); + let cell_input = inputs.next().unwrap(); + let cell_output = packed::CellOutput::new_builder() + .type_(Some(type_script).pack()) + .build_exact_capacity(Capacity::bytes(data.len()).unwrap()) + .unwrap(); + let tx = tx_template + .cell_dep(node.always_success_cell_dep()) + .input(cell_input) + .output(cell_output) + .output_data(data.clone()) + .build(); + node.submit_transaction(&tx); + node.mine_until_bool(|| is_transaction_committed(node, &tx)); + tx + } + + fn cell_dep(&self) -> packed::CellDep { + self.cell_dep.clone() + } + + fn as_data_script(&self, vm_version: u8) -> packed::Script { + let hash_type = match vm_version { + 0 => ScriptHashType::Data, + 1 => ScriptHashType::Data1, + 2 => ScriptHashType::Data2, + _ => panic!("unknown vm_version [{}]", vm_version), + }; + packed::Script::new_builder() + .code_hash(self.data_hash.clone()) + .hash_type(hash_type.into()) + .build() + } + + fn as_type_script(&self) -> packed::Script { + packed::Script::new_builder() + .code_hash(self.type_hash.clone()) + .hash_type(ScriptHashType::Type.into()) + .build() + } +} + +impl fmt::Display for ExpectedResult { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::ShouldBePassed => write!(f, " allowed"), + _ => write!(f, "not allowed"), + } + } +} + +impl ExpectedResult { + fn error_message(self) -> Option<&'static str> { + match self { + Self::ShouldBePassed => None, + Self::IncompatibleVmV2 => Some( + "{\"code\":-302,\"message\":\"TransactionFailedToVerify: \ + Verification failed Transaction(Compatible: \ + the feature \\\"VM Version 2\\\"", + ), + Self::RpcInvalidVmVersion => Some( + "{\"code\":-32602,\"message\":\"\ + Invalid params: the maximum vm version currently supported is", + ), + Self::LockInvalidVmVersion => Some( + "{\"code\":-302,\"message\":\"TransactionFailedToVerify: \ + Verification failed Script(TransactionScriptError \ + { source: Inputs[0].Lock, cause: Invalid VM Version:", + ), + Self::TypeInvalidVmVersion => Some( + "{\"code\":-302,\"message\":\"TransactionFailedToVerify: \ + Verification failed Script(TransactionScriptError { \ + source: Outputs[0].Type, cause: Invalid VM Version: ", + ), + } + } +} + +impl<'a> CheckVmVersionTestRunner<'a> { + fn new(node: &'a Node) -> Self { + Self { node } + } + + fn test_create( + &self, + inputs: &mut impl Iterator, + cell_dep_opt: Option, + script: packed::Script, + expected: ExpectedResult, + ) -> Option { + let (tx_builder, co_builder) = if let Some(cell_dep) = cell_dep_opt { + ( + TransactionView::new_advanced_builder().cell_dep(cell_dep), + packed::CellOutput::new_builder() + .lock(self.node.always_success_script()) + .type_(Some(script).pack()), + ) + } else { + ( + TransactionView::new_advanced_builder(), + packed::CellOutput::new_builder().lock(script), + ) + }; + let cell_input = inputs.next().unwrap(); + let input_cell = self.get_previous_output(&cell_input); + let cell_output = co_builder + .capacity((input_cell.capacity.value() - 1).pack()) + .build(); + let tx = tx_builder + .cell_dep(self.node.always_success_cell_dep()) + .input(cell_input) + .output(cell_output) + .output_data(Default::default()) + .build(); + if let Some(errmsg) = expected.error_message() { + assert_send_transaction_fail(self.node, &tx, errmsg); + None + } else { + self.submit_transaction_until_committed(&tx); + Some(tx) + } + } + + fn test_spend( + &self, + tx: TransactionView, + cell_dep: packed::CellDep, + has_always_success: bool, + expected: ExpectedResult, + ) { + let out_point = packed::OutPoint::new(tx.hash(), 0); + let input = packed::CellInput::new(out_point, 0); + let output = packed::CellOutput::new_builder() + .build_exact_capacity(Capacity::shannons(0)) + .unwrap(); + let tx = if has_always_success { + TransactionView::new_advanced_builder().cell_dep(self.node.always_success_cell_dep()) + } else { + TransactionView::new_advanced_builder() + } + .cell_dep(cell_dep) + .input(input) + .output(output) + .output_data(Default::default()) + .build(); + if let Some(errmsg) = expected.error_message() { + assert_send_transaction_fail(self.node, &tx, errmsg); + } else { + self.submit_transaction_until_committed(&tx); + } + } + + fn get_previous_output(&self, cell_input: &packed::CellInput) -> rpc::CellOutput { + let previous_output = cell_input.previous_output(); + let previous_output_index: usize = previous_output.index().unpack(); + + if let Either::Left(tx) = self + .node + .rpc_client() + .get_transaction(previous_output.tx_hash()) + .transaction + .unwrap() + .inner + { + tx.inner.outputs[previous_output_index].clone() + } else { + panic!("get_previous_output failed"); + } + } + + fn submit_transaction_until_committed(&self, tx: &TransactionView) { + debug!(">>> >>> submit: transaction {:#x}.", tx.hash()); + self.node.submit_transaction(tx); + self.node + .mine_until_bool(|| is_transaction_committed(self.node, tx)); + } + + fn run_all_tests( + &self, + inputs: &mut impl Iterator, + script: &NewScript, + max_vm_version: u8, + ) { + for vm_version in 0..=RPC_MAX_VM_VERSION { + let res = if vm_version <= max_vm_version { + ExpectedResult::ShouldBePassed + } else if vm_version <= MAX_VM_VERSION { + ExpectedResult::IncompatibleVmV2 + } else { + ExpectedResult::RpcInvalidVmVersion + }; + info!( + ">>> Create a cell with Data({:2}) lock script is {}", + vm_version, res + ); + let s = script.as_data_script(vm_version); + if let Some(tx) = self.test_create(inputs, None, s, res) { + let res = if vm_version <= max_vm_version { + ExpectedResult::ShouldBePassed + } else { + ExpectedResult::LockInvalidVmVersion + }; + info!( + ">>> Spend the cell with Data({:2}) lock script is {}", + vm_version, res + ); + let dep = script.cell_dep(); + self.test_spend(tx, dep, false, res); + } + } + { + let res = ExpectedResult::ShouldBePassed; + info!(">>> Create a cell with Type lock script is {}", res); + let s = script.as_type_script(); + if let Some(tx) = self.test_create(inputs, None, s, res) { + let res = ExpectedResult::ShouldBePassed; + info!(">>> Spend the cell with Type lock script is {}", res); + let dep = script.cell_dep(); + self.test_spend(tx, dep, false, res); + } + } + for vm_version in 0..=RPC_MAX_VM_VERSION { + let res = if vm_version <= max_vm_version { + ExpectedResult::ShouldBePassed + } else if vm_version <= MAX_VM_VERSION { + ExpectedResult::TypeInvalidVmVersion + } else { + ExpectedResult::RpcInvalidVmVersion + }; + info!( + ">>> Create a cell with Data({:2}) type script is {}", + vm_version, res + ); + let dep = Some(script.cell_dep()); + let s = script.as_data_script(vm_version); + if let Some(tx) = self.test_create(inputs, dep, s, res) { + let res = if vm_version <= max_vm_version { + ExpectedResult::ShouldBePassed + } else { + ExpectedResult::TypeInvalidVmVersion + }; + info!( + ">>> Spend the cell with Data({:2}) type script is {}", + vm_version, res + ); + let dep = script.cell_dep(); + self.test_spend(tx, dep, true, res); + } + } + { + let res = ExpectedResult::ShouldBePassed; + info!(">>> Create a cell with Type type script is {}", res); + let dep = Some(script.cell_dep()); + let s = script.as_type_script(); + if let Some(tx) = self.test_create(inputs, dep, s, res) { + let res = ExpectedResult::ShouldBePassed; + info!(">>> Spend the cell with Type type script is {}", res); + let dep = script.cell_dep(); + self.test_spend(tx, dep, true, res); + } + } + } +} diff --git a/verification/src/transaction_verifier.rs b/verification/src/transaction_verifier.rs index 7b78086642..8c4e11e734 100644 --- a/verification/src/transaction_verifier.rs +++ b/verification/src/transaction_verifier.rs @@ -99,11 +99,13 @@ impl<'a> NonContextualTransactionVerifier<'a> { /// Context-dependent verification checks for transaction /// /// Contains: +/// [`CompatibleVerifier`](./struct.CompatibleVerifier.html) /// [`TimeRelativeTransactionVerifier`](./struct.TimeRelativeTransactionVerifier.html) /// [`CapacityVerifier`](./struct.CapacityVerifier.html) /// [`ScriptVerifier`](./struct.ScriptVerifier.html) /// [`FeeCalculator`](./struct.FeeCalculator.html) pub struct ContextualTransactionVerifier
{ + pub(crate) compatible: CompatibleVerifier, pub(crate) time_relative: TimeRelativeTransactionVerifier
, pub(crate) capacity: CapacityVerifier, pub(crate) script: ScriptVerifier
, @@ -130,6 +132,11 @@ where tx_env: Arc, ) -> Self { ContextualTransactionVerifier { + compatible: CompatibleVerifier::new( + Arc::clone(&rtx), + Arc::clone(&consensus), + Arc::clone(&tx_env), + ), time_relative: TimeRelativeTransactionVerifier::new( Arc::clone(&rtx), Arc::clone(&consensus), @@ -149,6 +156,7 @@ where /// Perform resumable context-dependent verification, return a `Result` to `CacheEntry` pub fn resumable_verify(&self, limit_cycles: Cycle) -> Result<(VerifyResult, Capacity), Error> { + self.compatible.verify()?; self.time_relative.verify()?; self.capacity.verify()?; let fee = self.fee_calculator.transaction_fee()?; @@ -160,6 +168,7 @@ where /// /// skip script verify will result in the return value cycle always is zero pub fn verify(&self, max_cycles: Cycle, skip_script_verify: bool) -> Result { + self.compatible.verify()?; self.time_relative.verify()?; self.capacity.verify()?; let cycles = if skip_script_verify { @@ -180,6 +189,7 @@ where skip_script_verify: bool, snapshot: &TransactionSnapshot, ) -> Result { + self.compatible.verify()?; self.time_relative.verify()?; self.capacity.verify()?; let cycles = if skip_script_verify { @@ -832,6 +842,68 @@ impl<'a> OutputsDataVerifier<'a> { } } +/// Check compatible between different versions CKB clients. +/// +/// When a new client with hardfork features released, before the hardfork started, the old CKB +/// clients will still be able to work. +/// So, the new CKB client have to add several necessary checks to avoid fork attacks. +/// +/// After hardfork, the old clients will be no longer available. Then we can delete all code in +/// this verifier until next hardfork. +pub struct CompatibleVerifier { + rtx: Arc, + consensus: Arc, + tx_env: Arc, +} + +impl CompatibleVerifier { + pub fn new( + rtx: Arc, + consensus: Arc, + tx_env: Arc, + ) -> Self { + Self { + rtx, + consensus, + tx_env, + } + } + + pub fn verify(&self) -> Result<(), Error> { + let proposal_window = self.consensus.tx_proposal_window(); + let epoch_number = self.tx_env.epoch_number(proposal_window); + if !self + .consensus + .hardfork_switch() + .ckb2023 + .is_vm_version_2_and_syscalls_3_enabled(epoch_number) + { + for ht in self + .rtx + .transaction + .outputs() + .into_iter() + .map(|output| output.lock().hash_type()) + { + let hash_type: ScriptHashType = ht.try_into().map_err(|_| { + let val: u8 = ht.into(); + // This couldn't happen, because we already check it. + TransactionError::Internal { + description: format!("unknown hash type {:02x}", val), + } + })?; + if hash_type == ScriptHashType::Data2 { + return Err(TransactionError::Compatible { + feature: "VM Version 2", + } + .into()); + } + } + } + Ok(()) + } +} + /// Context-dependent checks exclude script /// /// Contains: @@ -839,6 +911,7 @@ impl<'a> OutputsDataVerifier<'a> { /// [`CapacityVerifier`](./struct.CapacityVerifier.html) /// [`FeeCalculator`](./struct.FeeCalculator.html) pub struct ContextualWithoutScriptTransactionVerifier
{ + pub(crate) compatible: CompatibleVerifier, pub(crate) time_relative: TimeRelativeTransactionVerifier
, pub(crate) capacity: CapacityVerifier, pub(crate) fee_calculator: FeeCalculator
, @@ -864,6 +937,11 @@ where tx_env: Arc, ) -> Self { ContextualWithoutScriptTransactionVerifier { + compatible: CompatibleVerifier::new( + Arc::clone(&rtx), + Arc::clone(&consensus), + Arc::clone(&tx_env), + ), time_relative: TimeRelativeTransactionVerifier::new( Arc::clone(&rtx), Arc::clone(&consensus), @@ -877,6 +955,7 @@ where /// Perform verification pub fn verify(&self) -> Result { + self.compatible.verify()?; self.time_relative.verify()?; self.capacity.verify()?; let fee = self.fee_calculator.transaction_fee()?; From 5c8cceab381a1ea60843d1946f83de8579d59b11 Mon Sep 17 00:00:00 2001 From: zhangsoledad <787953403@qq.com> Date: Tue, 20 Jun 2023 15:02:55 +0800 Subject: [PATCH 009/267] chore: adjust 2023 activation time --- util/constant/src/hardfork/testnet.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/util/constant/src/hardfork/testnet.rs b/util/constant/src/hardfork/testnet.rs index 4f476b5493..6de8bc7bbe 100644 --- a/util/constant/src/hardfork/testnet.rs +++ b/util/constant/src/hardfork/testnet.rs @@ -7,5 +7,5 @@ pub const RFC0028_START_EPOCH: u64 = 3113; // pub const CKB2021_START_EPOCH: u64 = 3113; pub const CKB2021_START_EPOCH: u64 = 0; -/// hardcode ckb2023 epoch, about 2023/06/25 6:50 UTC -pub const CKB2023_START_EPOCH: u64 = 6765; +/// hardcode ckb2023 epoch, about 2023/06/30 6:50 UTC +pub const CKB2023_START_EPOCH: u64 = 6795; From 30b60bb0fadede07888fd29d9a0c2dd8b6a12d82 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 20 Jun 2023 18:29:56 +0800 Subject: [PATCH 010/267] test: make sure `2_in_2_out_rtx` will suspend on first run --- script/src/verify/tests/utils.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/src/verify/tests/utils.rs b/script/src/verify/tests/utils.rs index a92ed6cd11..d206de1f37 100644 --- a/script/src/verify/tests/utils.rs +++ b/script/src/verify/tests/utils.rs @@ -35,7 +35,7 @@ use tempfile::TempDir; use crate::verify::*; pub(crate) const ALWAYS_SUCCESS_SCRIPT_CYCLE: u64 = 537; -pub(crate) const CYCLE_BOUND: Cycle = 200_000; +pub(crate) const CYCLE_BOUND: Cycle = 250_000; pub(crate) const V2_CYCLE_BOUND: Cycle = 300_000; fn sha3_256>(s: T) -> [u8; 32] { From 6a36abb0d6fdd86739e52a89cdf3892be8d3b898 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 21 Jun 2023 11:01:19 +0800 Subject: [PATCH 011/267] Fix RUSTSEC-2023-0044 warning Signed-off-by: Eval EXEC --- Cargo.lock | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8644af07c2..2e88372dd0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3306,9 +3306,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.48" +version = "0.10.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "518915b97df115dd36109bfa429a48b8f737bd05508cf9588977b599648926d2" +checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d" dependencies = [ "bitflags", "cfg-if 1.0.0", @@ -3338,11 +3338,10 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.83" +version = "0.9.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "666416d899cf077260dac8698d60a60b435a46d57e82acb1be3d0dad87284e5b" +checksum = "374533b0e45f3a7ced10fcaeccca020e66656bc03dac384f852e4e5a7a8104a6" dependencies = [ - "autocfg", "cc", "libc", "pkg-config", From e5b74d01b654a1a4b6cd74d28e19b0b92d630e0e Mon Sep 17 00:00:00 2001 From: zhangsoledad <787953403@qq.com> Date: Wed, 21 Jun 2023 18:32:26 +0800 Subject: [PATCH 012/267] fix: get_consensus compatibility --- rpc/README.md | 47 +++++++--------------------- rpc/src/module/chain.rs | 26 +++++++-------- util/jsonrpc-types/src/blockchain.rs | 10 ++---- 3 files changed, 25 insertions(+), 58 deletions(-) diff --git a/rpc/README.md b/rpc/README.md index cf33106e12..166d922b30 100644 --- a/rpc/README.md +++ b/rpc/README.md @@ -138,7 +138,6 @@ The crate `ckb-rpc`'s minimum supported rustc version is 1.67.1. * [Type `EstimateCycles`](#type-estimatecycles) * [Type `FeeRateStatistics`](#type-feeratestatistics) * [Type `H256`](#type-h256) - * [Type `HardForkFeature`](#type-hardforkfeature) * [Type `HardForks`](#type-hardforks) * [Type `Header`](#type-header) * [Type `HeaderView`](#type-headerview) @@ -1655,21 +1654,17 @@ Response "dao_type_hash": null, "epoch_duration_target": "0x3840", "genesis_hash": "0x7978ec7ce5b507cfb52e149e36b1a23f6062ed150503c85bbf825da3599095ed", - "hardfork_features": { - "ckb2021": [ - { "rfc": "0028", "epoch_number": "0x1526" }, - { "rfc": "0029", "epoch_number": "0x0" }, - { "rfc": "0030", "epoch_number": "0x0" }, - { "rfc": "0031", "epoch_number": "0x0" }, - { "rfc": "0032", "epoch_number": "0x0" }, - { "rfc": "0036", "epoch_number": "0x0" }, - { "rfc": "0038", "epoch_number": "0x0" } - ], - "ckb2023": [ - { "rfc": "0048", "epoch_number": null }, - { "rfc": "0049", "epoch_number": null } - ] - }, + "hardfork_features": [ + { "rfc": "0028", "epoch_number": "0x1526" }, + { "rfc": "0029", "epoch_number": "0x0" }, + { "rfc": "0030", "epoch_number": "0x0" }, + { "rfc": "0031", "epoch_number": "0x0" }, + { "rfc": "0032", "epoch_number": "0x0" }, + { "rfc": "0036", "epoch_number": "0x0" }, + { "rfc": "0038", "epoch_number": "0x0" }, + { "rfc": "0048", "epoch_number": null }, + { "rfc": "0049", "epoch_number": null } + ], "id": "main", "initial_primary_epoch_reward": "0x71afd498d000", "max_block_bytes": "0x91c08", @@ -5902,30 +5897,10 @@ The fee_rate statistics information, includes mean and median, unit: shannons pe The 256-bit binary data encoded as a 0x-prefixed hex string in JSON. -### Type `HardForkFeature` - -The information about one hardfork feature. - -#### Fields - -`HardForkFeature` is a JSON object with the following fields. - -* `rfc`: `string` - The related RFC ID. - -* `epoch_number`: [`EpochNumber`](#type-epochnumber) `|` `null` - The first epoch when the feature is enabled, `null` indicates that the RFC has never been enabled. - - ### Type `HardForks` Hardfork information -#### Fields - -`HardForks` is a JSON object with the following fields. - -* `ckb2021`: `Array<` [`HardForkFeature`](#type-hardforkfeature) `>` - ckb2021 information - -* `ckb2023`: `Array<` [`HardForkFeature`](#type-hardforkfeature) `>` - ckb2023 information ### Type `Header` diff --git a/rpc/src/module/chain.rs b/rpc/src/module/chain.rs index 27f05e5bd8..dce4c322f8 100644 --- a/rpc/src/module/chain.rs +++ b/rpc/src/module/chain.rs @@ -1337,21 +1337,17 @@ pub trait ChainRpc { /// "dao_type_hash": null, /// "epoch_duration_target": "0x3840", /// "genesis_hash": "0x7978ec7ce5b507cfb52e149e36b1a23f6062ed150503c85bbf825da3599095ed", - /// "hardfork_features": { - /// "ckb2021": [ - /// { "rfc": "0028", "epoch_number": "0x1526" }, - /// { "rfc": "0029", "epoch_number": "0x0" }, - /// { "rfc": "0030", "epoch_number": "0x0" }, - /// { "rfc": "0031", "epoch_number": "0x0" }, - /// { "rfc": "0032", "epoch_number": "0x0" }, - /// { "rfc": "0036", "epoch_number": "0x0" }, - /// { "rfc": "0038", "epoch_number": "0x0" } - /// ], - /// "ckb2023": [ - /// { "rfc": "0048", "epoch_number": null }, - /// { "rfc": "0049", "epoch_number": null } - /// ] - /// }, + /// "hardfork_features": [ + /// { "rfc": "0028", "epoch_number": "0x1526" }, + /// { "rfc": "0029", "epoch_number": "0x0" }, + /// { "rfc": "0030", "epoch_number": "0x0" }, + /// { "rfc": "0031", "epoch_number": "0x0" }, + /// { "rfc": "0032", "epoch_number": "0x0" }, + /// { "rfc": "0036", "epoch_number": "0x0" }, + /// { "rfc": "0038", "epoch_number": "0x0" }, + /// { "rfc": "0048", "epoch_number": null }, + /// { "rfc": "0049", "epoch_number": null } + /// ], /// "id": "main", /// "initial_primary_epoch_reward": "0x71afd498d000", /// "max_block_bytes": "0x91c08", diff --git a/util/jsonrpc-types/src/blockchain.rs b/util/jsonrpc-types/src/blockchain.rs index 44f81994a8..8acaf95ce7 100644 --- a/util/jsonrpc-types/src/blockchain.rs +++ b/util/jsonrpc-types/src/blockchain.rs @@ -1382,18 +1382,16 @@ pub struct Consensus { /// Hardfork information #[derive(Clone, Serialize, Deserialize, Debug)] +#[serde(transparent)] pub struct HardForks { - /// ckb2021 information - pub ckb2021: Vec, - /// ckb2023 information - pub ckb2023: Vec, + inner: Vec, } impl HardForks { /// Returns a list of hardfork features from a hardfork switch. pub fn new(hardforks: &core::hardfork::HardForks) -> Self { HardForks { - ckb2021: vec![ + inner: vec![ HardForkFeature::new("0028", convert(hardforks.ckb2021.rfc_0028())), HardForkFeature::new("0029", convert(hardforks.ckb2021.rfc_0029())), HardForkFeature::new("0030", convert(hardforks.ckb2021.rfc_0030())), @@ -1401,8 +1399,6 @@ impl HardForks { HardForkFeature::new("0032", convert(hardforks.ckb2021.rfc_0032())), HardForkFeature::new("0036", convert(hardforks.ckb2021.rfc_0036())), HardForkFeature::new("0038", convert(hardforks.ckb2021.rfc_0038())), - ], - ckb2023: vec![ HardForkFeature::new("0048", convert(hardforks.ckb2023.rfc_0048())), HardForkFeature::new("0049", convert(hardforks.ckb2023.rfc_0049())), ], From 742d1330d72b8480a7326302fd7de511b652b257 Mon Sep 17 00:00:00 2001 From: zhangsoledad <787953403@qq.com> Date: Fri, 23 Jun 2023 13:26:44 +0800 Subject: [PATCH 013/267] chore: delay 2023 testnet launch --- util/constant/src/hardfork/testnet.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/util/constant/src/hardfork/testnet.rs b/util/constant/src/hardfork/testnet.rs index 6de8bc7bbe..362df34df3 100644 --- a/util/constant/src/hardfork/testnet.rs +++ b/util/constant/src/hardfork/testnet.rs @@ -7,5 +7,5 @@ pub const RFC0028_START_EPOCH: u64 = 3113; // pub const CKB2021_START_EPOCH: u64 = 3113; pub const CKB2021_START_EPOCH: u64 = 0; -/// hardcode ckb2023 epoch, about 2023/06/30 6:50 UTC -pub const CKB2023_START_EPOCH: u64 = 6795; +/// hardcode ckb2023 epoch +pub const CKB2023_START_EPOCH: u64 = u64::MAX; From f8aa2f52f62989b39751637462e85503ad81569f Mon Sep 17 00:00:00 2001 From: ian Date: Sun, 25 Jun 2023 21:43:05 +0800 Subject: [PATCH 014/267] chore: add `make docker-publish-rc` Push the latest version as the image tag only. Skip the `latest` image tag. This will be used to push the rc version, which should not become the latest image in Docker. --- Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Makefile b/Makefile index e264edde70..7d7adb98fb 100644 --- a/Makefile +++ b/Makefile @@ -168,6 +168,10 @@ docker-publish: docker tag nervos/ckb:$$(git describe) nervos/ckb:latest docker push nervos/ckb:latest +.PHONY: docker-publish-rc +docker-publish-rc: + docker push nervos/ckb:$$(git describe) + ##@ Code Quality .PHONY: fmt fmt: setup-ckb-test ## Check Rust source code format to keep to the same style. From c1ce9c8d3705377d10303fe43abfa8ce43f64d58 Mon Sep 17 00:00:00 2001 From: mohanson Date: Tue, 27 Jun 2023 17:54:08 +0800 Subject: [PATCH 015/267] chore(script): make ResumableMachine type public --- script/src/lib.rs | 4 ++-- script/src/types.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/script/src/lib.rs b/script/src/lib.rs index 5adfd1fc9c..8b2d128039 100644 --- a/script/src/lib.rs +++ b/script/src/lib.rs @@ -9,8 +9,8 @@ mod verify_env; pub use crate::error::{ScriptError, TransactionScriptError}; pub use crate::types::{ - CoreMachine, ScriptGroup, ScriptGroupType, ScriptVersion, TransactionSnapshot, - TransactionState, VerifyResult, VmIsa, VmVersion, + CoreMachine, MachineContext, ResumableMachine, ScriptGroup, ScriptGroupType, ScriptVersion, + TransactionSnapshot, TransactionState, VerifyResult, VmIsa, VmVersion, }; pub use crate::verify::{TransactionScriptsSyscallsGenerator, TransactionScriptsVerifier}; pub use crate::verify_env::TxVerifyEnv; diff --git a/script/src/types.rs b/script/src/types.rs index 055c3c0f7e..45c36ea337 100644 --- a/script/src/types.rs +++ b/script/src/types.rs @@ -117,7 +117,7 @@ pub(crate) type Machine = TraceMachine; /// a chain of spawned machines. #[derive(Default)] pub struct MachineContext { - pub(crate) suspended_machines: Vec, + pub suspended_machines: Vec, } /// Data structure captured all environment data for a suspended machine From 626a23d0751d793fd75962ed9f6c7e779ae2a8d3 Mon Sep 17 00:00:00 2001 From: mohanson Date: Wed, 28 Jun 2023 10:14:48 +0800 Subject: [PATCH 016/267] Fix ci --- script/src/types.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/script/src/types.rs b/script/src/types.rs index 45c36ea337..1fdab303c1 100644 --- a/script/src/types.rs +++ b/script/src/types.rs @@ -117,6 +117,7 @@ pub(crate) type Machine = TraceMachine; /// a chain of spawned machines. #[derive(Default)] pub struct MachineContext { + /// A stack of ResumableMachines. pub suspended_machines: Vec, } @@ -175,8 +176,11 @@ impl TryFrom<&SpawnData> for ResumePoint { } } +/// An enumerated type indicating the type of the Machine. pub enum ResumableMachine { + /// Root machine instance. Initial(Machine), + /// A machine which created by spawn syscall. Spawn(Machine, SpawnData), } @@ -211,10 +215,12 @@ impl ResumableMachine { set_vm_max_cycles(self.machine_mut(), cycles) } + /// Add cycles to current machine. pub fn add_cycles(&mut self, cycles: Cycle) -> Result<(), VMInternalError> { self.machine_mut().machine.add_cycles(cycles) } + /// Run machine. pub fn run(&mut self) -> Result { self.machine_mut().run() } From f30eba94fb86efa10959da9982380d1c0535bd81 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 29 Jun 2023 08:56:23 +0800 Subject: [PATCH 017/267] Fix bats tests failed for rc version like `ckb 0.111.0-rc6` --- util/app-config/src/tests/cli.bats | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/app-config/src/tests/cli.bats b/util/app-config/src/tests/cli.bats index 08368f7de6..6212a0025b 100644 --- a/util/app-config/src/tests/cli.bats +++ b/util/app-config/src/tests/cli.bats @@ -31,7 +31,7 @@ _full_help() { function short_version { #@test run _short assert_success - assert_output --regexp "^ckb [0-9.]+[-]?[a-z]*$" + assert_output --regexp "^ckb [0-9.]+[-]?[a-z0-9]*$" } #@test "ckb --version" { From 189278d1302c19969803b555fce951ef5488aae1 Mon Sep 17 00:00:00 2001 From: mohanson Date: Fri, 30 Jun 2023 15:31:36 +0800 Subject: [PATCH 018/267] chore(script): make update_caller_machine public --- script/src/lib.rs | 1 + script/src/syscalls/spawn.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/script/src/lib.rs b/script/src/lib.rs index 8b2d128039..7f73ca7ee6 100644 --- a/script/src/lib.rs +++ b/script/src/lib.rs @@ -8,6 +8,7 @@ mod verify; mod verify_env; pub use crate::error::{ScriptError, TransactionScriptError}; +pub use crate::syscalls::spawn::update_caller_machine; pub use crate::types::{ CoreMachine, MachineContext, ResumableMachine, ScriptGroup, ScriptGroupType, ScriptVersion, TransactionSnapshot, TransactionState, VerifyResult, VmIsa, VmVersion, diff --git a/script/src/syscalls/spawn.rs b/script/src/syscalls/spawn.rs index 1f8e369d56..78f2c9715d 100644 --- a/script/src/syscalls/spawn.rs +++ b/script/src/syscalls/spawn.rs @@ -292,6 +292,7 @@ pub fn build_child_machine< Ok(machine_child) } +/// Write the data generated by callee back to the caller memory space. pub fn update_caller_machine( caller: &mut Mac, callee_exit_code: i8, From 68463088c6cc343e4ca0153b61f3eb9d9048316b Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 3 Jul 2023 15:40:45 +0800 Subject: [PATCH 019/267] Fix bats test `ckb_run` should wait ckb process exit --- util/app-config/src/tests/ckb_run_replay.bats | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/util/app-config/src/tests/ckb_run_replay.bats b/util/app-config/src/tests/ckb_run_replay.bats index be7eba5a3b..6ca4dd5405 100644 --- a/util/app-config/src/tests/ckb_run_replay.bats +++ b/util/app-config/src/tests/ckb_run_replay.bats @@ -3,12 +3,18 @@ bats_load_library 'bats-assert' bats_load_library 'bats-support' _ckb_run() { - ckb run -C ${CKB_DIRNAME} 1>${TMP_DIR}/ckb_run.log 2>&1 & - echo $! >${TMP_DIR}/ckb_run.pid + ckb run -C ${CKB_DIRNAME} &> ${TMP_DIR}/ckb_run.log & + PID=$! sleep 5 - kill "$(<"${TMP_DIR}/ckb_run.pid")" + kill ${PID} + + while kill -0 ${PID}; do + echo "waiting for ckb to exit" + sleep 1 + done tail -n 50 ${TMP_DIR}/ckb_run.log } + _ckb_replay() { # from 1 to 2500 enough to trigger profile action CKB_LOG=err ckb replay -C ${CKB_DIRNAME} --tmp-target ${TMP_DIR} --profile 1 2500 From cda77089466d761b4141fd3f5edffe4e617e1f9a Mon Sep 17 00:00:00 2001 From: driftluo Date: Tue, 4 Jul 2023 14:01:41 +0800 Subject: [PATCH 020/267] chore: add comment to header sync with empty --- sync/src/synchronizer/block_fetcher.rs | 12 ++++++++++-- sync/src/synchronizer/headers_process.rs | 5 +++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index ec0688f238..14d882933e 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -97,8 +97,16 @@ impl<'a> BlockFetcher<'a> { } } - // This peer has nothing interesting. - let best_known = self.peer_best_known_header()?; + let best_known = match self.peer_best_known_header() { + Some(t) => t, + None => { + debug!( + "peer {} doesn't have best known header, ignore it", + self.peer + ); + return None; + } + }; if !best_known.is_better_than(self.active_chain.total_difficulty()) { // Advancing this peer's last_common_header is unnecessary for block-sync mechanism. // However, RPC `get_peers`, returns peers information which includes diff --git a/sync/src/synchronizer/headers_process.rs b/sync/src/synchronizer/headers_process.rs index ec0b732335..5a875f79ba 100644 --- a/sync/src/synchronizer/headers_process.rs +++ b/sync/src/synchronizer/headers_process.rs @@ -108,6 +108,11 @@ impl<'a> HeadersProcess<'a> { } if headers.is_empty() { + // Empty means that the other peer's tip may be consistent with our own best known, + // but empty cannot 100% confirm this, so it does not set the other peer's best header + // to the shared best known. + // This action means that if the newly connected node has not been sync with headers, + // it cannot be used as a synchronization node. debug!("HeadersProcess is_empty (synchronized)"); if let Some(mut state) = self.synchronizer.peers().state.get_mut(&self.peer) { self.synchronizer From 7e0d2799cef410acbf22ac60a1cb0bb1037453c2 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 5 Jul 2023 11:25:23 +0800 Subject: [PATCH 021/267] Fix relayer::request_proposal_txs dedup proposals Signed-off-by: Eval EXEC --- Cargo.lock | 16 +++++++++++++--- Cargo.toml | 1 + sync/Cargo.toml | 1 + sync/src/relayer/mod.rs | 9 +++++---- 4 files changed, 20 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2e88372dd0..4ed5639ac5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1188,7 +1188,7 @@ dependencies = [ "ckb-util", "ckb-verification", "ckb-verification-traits", - "itertools", + "itertools 0.10.5", "jsonrpc-core", "jsonrpc-derive", "jsonrpc-http-server", @@ -1342,6 +1342,7 @@ dependencies = [ "faux", "futures", "governor", + "itertools 0.11.0", "keyed_priority_queue", "lru", "once_cell", @@ -1700,7 +1701,7 @@ dependencies = [ "ciborium", "clap 3.2.23", "criterion-plot", - "itertools", + "itertools 0.10.5", "lazy_static", "num-traits", "oorandom", @@ -1721,7 +1722,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", - "itertools", + "itertools 0.10.5", ] [[package]] @@ -2740,6 +2741,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.1" diff --git a/Cargo.toml b/Cargo.toml index 4852aeae94..79cb81e4c3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -93,6 +93,7 @@ members = [ [workspace.dependencies] tempfile = "3" +itertools = "0.11.0" [profile.release] overflow-checks = true diff --git a/sync/Cargo.toml b/sync/Cargo.toml index 2906f518d4..ed636def98 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -39,6 +39,7 @@ bitflags = "1.0" dashmap = "4.0" keyed_priority_queue = "0.3" sled = "0.34.7" +itertools.workspace = true [dev-dependencies] ckb-test-chain-utils = { path = "../util/test-chain-utils", version = "= 0.112.0-pre" } diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index 92a03a77ea..38a31a19af 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -39,6 +39,7 @@ use ckb_types::{ prelude::*, }; use ckb_util::Mutex; +use itertools::Itertools; use std::collections::{HashMap, HashSet}; use std::sync::Arc; use std::time::{Duration, Instant}; @@ -238,11 +239,11 @@ impl Relayer { nc: &dyn CKBProtocolContext, peer: PeerIndex, block_hash_and_number: BlockNumberAndHash, - mut proposals: Vec, + proposals: Vec, ) { - proposals.dedup(); let tx_pool = self.shared.shared().tx_pool_controller(); - let fresh_proposals = match tx_pool.fresh_proposals_filter(proposals) { + let fresh_proposals: Vec = match tx_pool.fresh_proposals_filter(proposals) + { Err(err) => { debug_target!( crate::LOG_TARGET_RELAY, @@ -251,7 +252,7 @@ impl Relayer { ); return; } - Ok(fresh_proposals) => fresh_proposals, + Ok(fresh_proposals) => fresh_proposals.into_iter().unique().collect(), }; let to_ask_proposals: Vec = self From aa9071b5bd119cd80903eb9c099b8fefd091dc5b Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 5 Jul 2023 17:40:27 +0800 Subject: [PATCH 022/267] Fix `hermit-abi` yanked --- Cargo.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2e88372dd0..b78ea3f119 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2466,9 +2466,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" +checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" [[package]] name = "hex" @@ -2719,7 +2719,7 @@ version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" dependencies = [ - "hermit-abi 0.3.1", + "hermit-abi 0.3.2", "io-lifetimes", "rustix 0.37.7", "windows-sys 0.48.0", From 77a327180e1d28b1f8504c9e9234627197a641cb Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 5 Jul 2023 17:47:05 +0800 Subject: [PATCH 023/267] deps: let `ckb-rpc` use `workspace.itertools` --- Cargo.lock | 2 +- rpc/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4ed5639ac5..b96ef4e12f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1188,7 +1188,7 @@ dependencies = [ "ckb-util", "ckb-verification", "ckb-verification-traits", - "itertools 0.10.5", + "itertools 0.11.0", "jsonrpc-core", "jsonrpc-derive", "jsonrpc-http-server", diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 80a8c51fb9..212b5b2d06 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -43,7 +43,7 @@ ckb-tx-pool = { path = "../tx-pool", version = "= 0.112.0-pre" } ckb-memory-tracker = { path = "../util/memory-tracker", version = "= 0.112.0-pre" } ckb-pow = { path = "../pow", version = "= 0.112.0-pre" } ckb-indexer = { path = "../util/indexer", version = "= 0.112.0-pre" } -itertools = "0.10.5" +itertools.workspace = true tokio = "1" [dev-dependencies] From 331a0444bcb7d580064c1a9c38a0adb0bf428702 Mon Sep 17 00:00:00 2001 From: mohanson Date: Thu, 6 Jul 2023 14:40:50 +0800 Subject: [PATCH 024/267] feat(script): add peak_memory syscall --- script/src/syscalls/mod.rs | 3 ++ script/src/syscalls/peak_memory.rs | 30 ++++++++++++++++ script/src/syscalls/spawn.rs | 3 ++ script/src/verify.rs | 10 ++++-- .../tests/ckb_latest/features_since_v2023.rs | 32 ++++++++++++++++++ script/testdata/Makefile | 4 ++- script/testdata/spawn_peak_memory | Bin 0 -> 4240 bytes script/testdata/spawn_peak_memory.c | 28 +++++++++++++++ 8 files changed, 107 insertions(+), 3 deletions(-) create mode 100644 script/src/syscalls/peak_memory.rs create mode 100755 script/testdata/spawn_peak_memory create mode 100644 script/testdata/spawn_peak_memory.c diff --git a/script/src/syscalls/mod.rs b/script/src/syscalls/mod.rs index 5b5f84fc52..258c30b3ec 100644 --- a/script/src/syscalls/mod.rs +++ b/script/src/syscalls/mod.rs @@ -11,6 +11,7 @@ mod load_script; mod load_script_hash; mod load_tx; mod load_witness; +mod peak_memory; mod set_content; pub(crate) mod spawn; mod utils; @@ -35,6 +36,7 @@ pub use self::load_script::LoadScript; pub use self::load_script_hash::LoadScriptHash; pub use self::load_tx::LoadTx; pub use self::load_witness::LoadWitness; +pub use self::peak_memory::PeakMemory; pub use self::set_content::SetContent; pub use self::spawn::Spawn; pub use self::vm_version::VMVersion; @@ -77,6 +79,7 @@ pub const SPAWN: u64 = 2101; pub const GET_MEMORY_LIMIT: u64 = 2102; pub const SET_CONTENT: u64 = 2103; pub const LOAD_EXTENSION: u64 = 2104; +pub const PEAK_MEMORY: u64 = 2105; pub const DEBUG_PRINT_SYSCALL_NUMBER: u64 = 2177; #[cfg(test)] pub const DEBUG_PAUSE: u64 = 2178; diff --git a/script/src/syscalls/peak_memory.rs b/script/src/syscalls/peak_memory.rs new file mode 100644 index 0000000000..e86e9d034f --- /dev/null +++ b/script/src/syscalls/peak_memory.rs @@ -0,0 +1,30 @@ +use crate::syscalls::PEAK_MEMORY; +use ckb_vm::{ + registers::{A0, A7}, + Error as VMError, Register, SupportMachine, Syscalls, +}; + +#[derive(Debug, Default)] +pub struct PeakMemory { + value: u64, +} + +impl PeakMemory { + pub fn new(value: u64) -> Self { + Self { value } + } +} + +impl Syscalls for PeakMemory { + fn initialize(&mut self, _machine: &mut Mac) -> Result<(), VMError> { + Ok(()) + } + + fn ecall(&mut self, machine: &mut Mac) -> Result { + if machine.registers()[A7].to_u64() != PEAK_MEMORY { + return Ok(false); + } + machine.set_register(A0, Mac::REG::from_u64(self.value)); + Ok(true) + } +} diff --git a/script/src/syscalls/spawn.rs b/script/src/syscalls/spawn.rs index 78f2c9715d..3ff98bcec2 100644 --- a/script/src/syscalls/spawn.rs +++ b/script/src/syscalls/spawn.rs @@ -287,6 +287,9 @@ pub fn build_child_machine< *callee_peak_memory, Arc::clone(context), ))); + let machine_builder = machine_builder.syscall(Box::new( + syscalls_generator.build_peak_memory(*callee_peak_memory), + )); let mut machine_child = Machine::new(machine_builder.build()); set_vm_max_cycles(&mut machine_child, cycles_limit); Ok(machine_child) diff --git a/script/src/verify.rs b/script/src/verify.rs index ee1e524e32..0e62f8497c 100644 --- a/script/src/verify.rs +++ b/script/src/verify.rs @@ -6,8 +6,8 @@ use crate::{ syscalls::{ spawn::{build_child_machine, update_caller_machine}, CurrentCycles, Debugger, Exec, GetMemoryLimit, LoadCell, LoadCellData, LoadExtension, - LoadHeader, LoadInput, LoadScript, LoadScriptHash, LoadTx, LoadWitness, SetContent, Spawn, - VMVersion, + LoadHeader, LoadInput, LoadScript, LoadScriptHash, LoadTx, LoadWitness, PeakMemory, + SetContent, Spawn, VMVersion, }, type_id::TypeIdSystemScript, types::{ @@ -257,6 +257,11 @@ impl PeakMemory { + PeakMemory::new(peak_memory) + } + /// Generate same syscalls. The result does not contain spawn syscalls. pub fn generate_same_syscalls( &self, @@ -323,6 +328,7 @@ impl 1); } + +#[test] +fn check_spawn_peak_memory() { + let script_version = SCRIPT_VERSION; + + let (spawn_caller_cell, spawn_caller_data_hash) = + load_cell_from_path("testdata/spawn_peak_memory"); + + let spawn_caller_script = Script::new_builder() + .hash_type(script_version.data_hash_type().into()) + .code_hash(spawn_caller_data_hash) + .build(); + let output = CellOutputBuilder::default() + .capacity(capacity_bytes!(100).pack()) + .lock(spawn_caller_script) + .build(); + let input = CellInput::new(OutPoint::null(), 0); + + let transaction = TransactionBuilder::default().input(input).build(); + let dummy_cell = create_dummy_cell(output); + + let rtx = ResolvedTransaction { + transaction, + resolved_cell_deps: vec![spawn_caller_cell], + resolved_inputs: vec![dummy_cell], + resolved_dep_groups: vec![], + }; + let verifier = TransactionScriptsVerifierWithEnv::new(); + let result = verifier.verify_without_limit(script_version, &rtx); + assert_eq!(result.is_ok(), script_version >= ScriptVersion::V2); +} diff --git a/script/testdata/Makefile b/script/testdata/Makefile index 0584b38224..f78b9a4fe2 100644 --- a/script/testdata/Makefile +++ b/script/testdata/Makefile @@ -68,7 +68,8 @@ ALL_BINS := jalr_zero \ spawn_caller_strcat_wrap \ spawn_caller_strcat \ spawn_caller_strcat_data_hash \ - spawn_recursive + spawn_recursive \ + spawn_peak_memory ALL_LIBS := is_even.lib \ add1.lib sub1.lib mul2.lib div2.lib @@ -154,3 +155,4 @@ spawn_caller_strcat_data_hash: spawn_caller_strcat_data_hash.c spawn_peak_memory_2m_to_32m: spawn_peak_memory_2m_to_32m.c spawn_peak_memory_4m_to_32m: spawn_peak_memory_4m_to_32m.c spawn_recursive: spawn_recursive.c +spawn_peak_memory: spawn_peak_memory.c diff --git a/script/testdata/spawn_peak_memory b/script/testdata/spawn_peak_memory new file mode 100755 index 0000000000000000000000000000000000000000..417eaca59965076e7f47f8639ee4e36979cce6c9 GIT binary patch literal 4240 zcmeHLeN0nV6hH6vwS|ZRD;qdbkqT_Yb$m^6D52t`%xpr2x?r+Nc~oFpg|_0yV(bT{ z7R8U$v@C83(##pR7&BwaT+}hbG+9ixKZr|AHawFRlXl*>76jj?3YD@! zCh${=?plvaS?1_cGRMs)54L~cxTOTyEf>hTT+BzuDjUD;zZIH($FH1#3%bG)58~yK zE2B5Y3rCwCnI3-=WXIVM!?*ECNy$;3sEWC4Oj`T^!yEK=tDLu68B`C>#C=C=P|4-m z%Fpdq-zor)ZQx6l(>AGUVg8IhW#e%UixY9hRPs+UmiCIZUiBDQ(^2ItzXq6+vGJcL ze!HoSPwjWyu}vz1XU9m-w(oe-bA=CQJB;eosZUOXI;+?kEhFYWiwuvG`Gc?$al31g zg(Hhk9qFy*Yqs=zT)GOCa&v`ZG(DK>9xf8vhCx-izDUJwuCQ%Ru5xm#IJ<9XboC5h z(~GO-+NM%AR}dy)2`7-vHAgT9t2Whi*0A+*T!)RtnJ%HRhxHtsg6^T41D+hWKpxt^+(Nol_@UZ}1Z>{wrsFeZmD*~WK^~(`a{Tw2yUxtY4XAx2T z3?ix@5K(d7lasL?uM_&(JYG37Te5v^l>v`81H>FH_F-%KUtVi>;_}ml-l4{xM;`~? zZG3xNcEc9h@~9;!Ti?2}vp@au*3gd8og?oBtNVrMXg2f2l@rb_)0K|ZZdleFni0x4 zaB|mzt5*pvi&#g1OA+h`Wk>S{5{8oUW-eAKY6dNv4tE}Aj|{%QX;0@KcCQ{+#F62K z_NwW1LbHogU*$mmz>yK1{gt;BWNX|odD@oY9%FqnWI5Kgt`rhn%#bVN?|Tml-THtd z`buNyuJ{;cZWPJm-)i|01w>4Ow2+)0<-6xLQzWii*P%-M+mRDrEt4}CAms~ZS zpii*-et9hJ5!s>3PW;uBFnn}}EMN;c*11+*=xNs-EfvZ}xaHl~>ZVfEU3cSTmz3G^ zl7bf=LT=q%P@|>p`Z2{_BE~k7C{ifi~yi9*O@IQi@q8( z7Q-P6XiWQS^cFp640XG9oAgElXzKUXTXyM9CjDX1>_&suMV?uNYO@(M!hWOCu#a>? zMySzSET;PE0~Q1Mka=by?_lz7CU+zvBeNDkQJ&xze7N8T^E`=1i^Nm^)9)maRyvOj z5EWrBMVrLaH1u9Z?`!0)7%q{v8XK3O()fJo9OO&)LFpPe*9FZ_@l|Yz4T~?8;!CA? z%A*}nn_>YrgpZ$|zj7(Q6z@#oLgT1Ui +#include + +#include "ckb_syscalls.h" + +int main(int argc, char *argv[]) { + int peak_memory = ckb_peak_memory(); + if (peak_memory != (argc + 1) * 8) { + return 1; + } + if (peak_memory < 56) { + int spawn_argc = argc + 1; + const char *spawn_argv[] = {"", "", "", "", "", "", "", ""}; + int8_t spawn_exit_code = 255; + spawn_args_t spgs = { + .memory_limit = 8, + .exit_code = &spawn_exit_code, + .content = NULL, + .content_length = NULL, + }; + uint64_t success = ckb_spawn(0, 3, 0, spawn_argc, spawn_argv, &spgs); + if (success != 0) { + return success; + } + } else { + return 0; + } +} From afcf0f020c553ded0268a309cc7f3c49db296d97 Mon Sep 17 00:00:00 2001 From: mohanson Date: Thu, 6 Jul 2023 17:55:25 +0800 Subject: [PATCH 025/267] Rename peak_memory to current_memory --- .../syscalls/{peak_memory.rs => current_memory.rs} | 10 +++++----- script/src/syscalls/mod.rs | 6 +++--- script/src/syscalls/spawn.rs | 2 +- script/src/verify.rs | 12 ++++++------ .../verify/tests/ckb_latest/features_since_v2023.rs | 6 +++--- script/testdata/Makefile | 4 ++-- .../{spawn_peak_memory => spawn_current_memory} | Bin .../{spawn_peak_memory.c => spawn_current_memory.c} | 2 +- 8 files changed, 21 insertions(+), 21 deletions(-) rename script/src/syscalls/{peak_memory.rs => current_memory.rs} (71%) rename script/testdata/{spawn_peak_memory => spawn_current_memory} (100%) rename script/testdata/{spawn_peak_memory.c => spawn_current_memory.c} (93%) diff --git a/script/src/syscalls/peak_memory.rs b/script/src/syscalls/current_memory.rs similarity index 71% rename from script/src/syscalls/peak_memory.rs rename to script/src/syscalls/current_memory.rs index e86e9d034f..401d778191 100644 --- a/script/src/syscalls/peak_memory.rs +++ b/script/src/syscalls/current_memory.rs @@ -1,27 +1,27 @@ -use crate::syscalls::PEAK_MEMORY; +use crate::syscalls::CURRENT_MEMORY; use ckb_vm::{ registers::{A0, A7}, Error as VMError, Register, SupportMachine, Syscalls, }; #[derive(Debug, Default)] -pub struct PeakMemory { +pub struct CurrentMemory { value: u64, } -impl PeakMemory { +impl CurrentMemory { pub fn new(value: u64) -> Self { Self { value } } } -impl Syscalls for PeakMemory { +impl Syscalls for CurrentMemory { fn initialize(&mut self, _machine: &mut Mac) -> Result<(), VMError> { Ok(()) } fn ecall(&mut self, machine: &mut Mac) -> Result { - if machine.registers()[A7].to_u64() != PEAK_MEMORY { + if machine.registers()[A7].to_u64() != CURRENT_MEMORY { return Ok(false); } machine.set_register(A0, Mac::REG::from_u64(self.value)); diff --git a/script/src/syscalls/mod.rs b/script/src/syscalls/mod.rs index 258c30b3ec..17012469a5 100644 --- a/script/src/syscalls/mod.rs +++ b/script/src/syscalls/mod.rs @@ -1,4 +1,5 @@ mod current_cycles; +mod current_memory; mod debugger; mod exec; mod get_memory_limit; @@ -11,7 +12,6 @@ mod load_script; mod load_script_hash; mod load_tx; mod load_witness; -mod peak_memory; mod set_content; pub(crate) mod spawn; mod utils; @@ -24,6 +24,7 @@ mod pause; mod tests; pub use self::current_cycles::CurrentCycles; +pub use self::current_memory::CurrentMemory; pub use self::debugger::Debugger; pub use self::exec::Exec; pub use self::get_memory_limit::GetMemoryLimit; @@ -36,7 +37,6 @@ pub use self::load_script::LoadScript; pub use self::load_script_hash::LoadScriptHash; pub use self::load_tx::LoadTx; pub use self::load_witness::LoadWitness; -pub use self::peak_memory::PeakMemory; pub use self::set_content::SetContent; pub use self::spawn::Spawn; pub use self::vm_version::VMVersion; @@ -79,7 +79,7 @@ pub const SPAWN: u64 = 2101; pub const GET_MEMORY_LIMIT: u64 = 2102; pub const SET_CONTENT: u64 = 2103; pub const LOAD_EXTENSION: u64 = 2104; -pub const PEAK_MEMORY: u64 = 2105; +pub const CURRENT_MEMORY: u64 = 2105; pub const DEBUG_PRINT_SYSCALL_NUMBER: u64 = 2177; #[cfg(test)] pub const DEBUG_PAUSE: u64 = 2178; diff --git a/script/src/syscalls/spawn.rs b/script/src/syscalls/spawn.rs index 3ff98bcec2..15d48b6908 100644 --- a/script/src/syscalls/spawn.rs +++ b/script/src/syscalls/spawn.rs @@ -288,7 +288,7 @@ pub fn build_child_machine< Arc::clone(context), ))); let machine_builder = machine_builder.syscall(Box::new( - syscalls_generator.build_peak_memory(*callee_peak_memory), + syscalls_generator.build_current_memory(*callee_peak_memory), )); let mut machine_child = Machine::new(machine_builder.build()); set_vm_max_cycles(&mut machine_child, cycles_limit); diff --git a/script/src/verify.rs b/script/src/verify.rs index 0e62f8497c..0d1356aeda 100644 --- a/script/src/verify.rs +++ b/script/src/verify.rs @@ -5,8 +5,8 @@ use crate::{ error::{ScriptError, TransactionScriptError}, syscalls::{ spawn::{build_child_machine, update_caller_machine}, - CurrentCycles, Debugger, Exec, GetMemoryLimit, LoadCell, LoadCellData, LoadExtension, - LoadHeader, LoadInput, LoadScript, LoadScriptHash, LoadTx, LoadWitness, PeakMemory, + CurrentCycles, CurrentMemory, Debugger, Exec, GetMemoryLimit, LoadCell, LoadCellData, + LoadExtension, LoadHeader, LoadInput, LoadScript, LoadScriptHash, LoadTx, LoadWitness, SetContent, Spawn, VMVersion, }, type_id::TypeIdSystemScript, @@ -257,9 +257,9 @@ impl PeakMemory { - PeakMemory::new(peak_memory) + /// Build syscall: current_memory + pub fn build_current_memory(&self, current_memory: u64) -> CurrentMemory { + CurrentMemory::new(current_memory) } /// Generate same syscalls. The result does not contain spawn syscalls. @@ -328,7 +328,7 @@ impl Date: Wed, 5 Jul 2023 16:47:34 +0800 Subject: [PATCH 026/267] refactor(scripts): current_cycles returns the sum of cycles of all living machine --- script/src/syscalls/current_cycles.rs | 14 ++++-- script/src/syscalls/spawn.rs | 5 +++ .../syscalls/tests/vm_latest/syscalls_2.rs | 2 +- script/src/types.rs | 4 ++ script/src/verify.rs | 10 +++-- .../tests/ckb_latest/features_since_v2023.rs | 34 +++++++++++++++ script/testdata/Makefile | 6 ++- script/testdata/spawn_callee_current_cycles | Bin 0 -> 4744 bytes script/testdata/spawn_callee_current_cycles.c | 15 +++++++ script/testdata/spawn_caller_current_cycles | Bin 0 -> 7008 bytes script/testdata/spawn_caller_current_cycles.c | 41 ++++++++++++++++++ 11 files changed, 122 insertions(+), 9 deletions(-) create mode 100755 script/testdata/spawn_callee_current_cycles create mode 100644 script/testdata/spawn_callee_current_cycles.c create mode 100755 script/testdata/spawn_caller_current_cycles create mode 100644 script/testdata/spawn_caller_current_cycles.c diff --git a/script/src/syscalls/current_cycles.rs b/script/src/syscalls/current_cycles.rs index 15528ce111..dc87cd281c 100644 --- a/script/src/syscalls/current_cycles.rs +++ b/script/src/syscalls/current_cycles.rs @@ -5,11 +5,13 @@ use ckb_vm::{ }; #[derive(Debug, Default)] -pub struct CurrentCycles {} +pub struct CurrentCycles { + base: u64, +} impl CurrentCycles { - pub fn new() -> Self { - Self {} + pub fn new(base: u64) -> Self { + Self { base } } } @@ -22,7 +24,11 @@ impl Syscalls for CurrentCycles { if machine.registers()[A7].to_u64() != CURRENT_CYCLES { return Ok(false); } - machine.set_register(A0, Mac::REG::from_u64(machine.cycles())); + let cycles = self + .base + .checked_add(machine.cycles()) + .ok_or(VMError::CyclesOverflow)?; + machine.set_register(A0, Mac::REG::from_u64(cycles)); Ok(true) } } diff --git a/script/src/syscalls/spawn.rs b/script/src/syscalls/spawn.rs index 15d48b6908..7c96b2b682 100644 --- a/script/src/syscalls/spawn.rs +++ b/script/src/syscalls/spawn.rs @@ -155,6 +155,7 @@ where caller_exit_code_addr: exit_code_addr.to_u64(), caller_content_addr: content_addr.to_u64(), caller_content_length_addr: content_length_addr.to_u64(), + cycles_base: machine.cycles(), }; let mut machine_child = build_child_machine( &self.script_group, @@ -258,6 +259,7 @@ pub fn build_child_machine< callee_memory_limit, content, content_length, + cycles_base, .. } = spawn_data; @@ -275,6 +277,9 @@ pub fn build_child_machine< let machine_builder = machine_syscalls .into_iter() .fold(machine_builder, |builder, syscall| builder.syscall(syscall)); + let machine_builder = machine_builder.syscall(Box::new( + syscalls_generator.build_current_cycles(*cycles_base), + )); let machine_builder = machine_builder.syscall(Box::new( syscalls_generator.build_get_memory_limit(*callee_memory_limit), )); diff --git a/script/src/syscalls/tests/vm_latest/syscalls_2.rs b/script/src/syscalls/tests/vm_latest/syscalls_2.rs index 85c44d7fd3..b0ad504a91 100644 --- a/script/src/syscalls/tests/vm_latest/syscalls_2.rs +++ b/script/src/syscalls/tests/vm_latest/syscalls_2.rs @@ -53,7 +53,7 @@ fn test_current_cycles() { machine.set_cycles(cycles); - let result = CurrentCycles::new().ecall(&mut machine); + let result = CurrentCycles::new(0).ecall(&mut machine); assert!(result.unwrap()); assert_eq!(machine.registers()[A0], cycles); diff --git a/script/src/types.rs b/script/src/types.rs index 1fdab303c1..19434b7a9c 100644 --- a/script/src/types.rs +++ b/script/src/types.rs @@ -133,6 +133,7 @@ pub enum ResumePoint { caller_exit_code_addr: u64, caller_content_addr: u64, caller_content_length_addr: u64, + cycles_base: u64, }, } @@ -146,6 +147,7 @@ pub struct SpawnData { pub(crate) caller_exit_code_addr: u64, pub(crate) caller_content_addr: u64, pub(crate) caller_content_length_addr: u64, + pub(crate) cycles_base: u64, } impl TryFrom<&SpawnData> for ResumePoint { @@ -160,6 +162,7 @@ impl TryFrom<&SpawnData> for ResumePoint { caller_exit_code_addr, caller_content_addr, caller_content_length_addr, + cycles_base, } = value; Ok(ResumePoint::Spawn { callee_peak_memory: *callee_peak_memory, @@ -172,6 +175,7 @@ impl TryFrom<&SpawnData> for ResumePoint { caller_exit_code_addr: *caller_exit_code_addr, caller_content_addr: *caller_content_addr, caller_content_length_addr: *caller_content_length_addr, + cycles_base: *cycles_base, }) } } diff --git a/script/src/verify.rs b/script/src/verify.rs index 0d1356aeda..af1182af12 100644 --- a/script/src/verify.rs +++ b/script/src/verify.rs @@ -137,8 +137,8 @@ impl { /// Build syscall: current_cycles - pub fn build_current_cycles(&self) -> CurrentCycles { - CurrentCycles::new() + pub fn build_current_cycles(&self, base: u64) -> CurrentCycles { + CurrentCycles::new(base) } /// Build syscall: vm_version @@ -299,7 +299,6 @@ impl= ScriptVersion::V1 { syscalls.append(&mut vec![ Box::new(self.build_vm_version()), - Box::new(self.build_current_cycles()), Box::new(self.build_exec( Arc::clone(&script_group_input_indices), Arc::clone(&script_group_output_indices), @@ -323,6 +322,9 @@ impl>, ) -> Vec)>> { let mut syscalls = self.generate_same_syscalls(script_version, script_group); + if script_version >= ScriptVersion::V1 { + syscalls.push(Box::new(self.build_current_cycles(0))); + } if script_version >= ScriptVersion::V2 { syscalls.append(&mut vec![ Box::new(self.build_get_memory_limit(8)), @@ -1044,6 +1046,7 @@ impl { let spawn_data = SpawnData { callee_peak_memory: *callee_peak_memory, @@ -1053,6 +1056,7 @@ impl= ScriptVersion::V2); } + +#[test] +fn check_spawn_current_cycles() { + let script_version = SCRIPT_VERSION; + + let (spawn_caller_cell, spawn_caller_data_hash) = + load_cell_from_path("testdata/spawn_caller_current_cycles"); + let (spawn_callee_cell, _spawn_callee_data_hash) = + load_cell_from_path("testdata/spawn_callee_current_cycles"); + + let spawn_caller_script = Script::new_builder() + .hash_type(script_version.data_hash_type().into()) + .code_hash(spawn_caller_data_hash) + .build(); + let output = CellOutputBuilder::default() + .capacity(capacity_bytes!(100).pack()) + .lock(spawn_caller_script) + .build(); + let input = CellInput::new(OutPoint::null(), 0); + + let transaction = TransactionBuilder::default().input(input).build(); + let dummy_cell = create_dummy_cell(output); + + let rtx = ResolvedTransaction { + transaction, + resolved_cell_deps: vec![spawn_caller_cell, spawn_callee_cell], + resolved_inputs: vec![dummy_cell], + resolved_dep_groups: vec![], + }; + let verifier = TransactionScriptsVerifierWithEnv::new(); + let result = verifier.verify_without_limit(script_version, &rtx); + assert_eq!(result.is_ok(), script_version >= ScriptVersion::V2); +} diff --git a/script/testdata/Makefile b/script/testdata/Makefile index 79df20d70a..e57b995194 100644 --- a/script/testdata/Makefile +++ b/script/testdata/Makefile @@ -69,7 +69,9 @@ ALL_BINS := jalr_zero \ spawn_caller_strcat \ spawn_caller_strcat_data_hash \ spawn_recursive \ - spawn_current_memory + spawn_current_memory \ + spawn_caller_current_cycles \ + spawn_callee_current_cycles ALL_LIBS := is_even.lib \ add1.lib sub1.lib mul2.lib div2.lib @@ -138,12 +140,14 @@ get_memory_limit: get_memory_limit.c set_content: set_content.c spawn_big_content_length: spawn_big_content_length.c spawn_big_memory_size: spawn_big_memory_size.c +spawn_callee_current_cycles: spawn_callee_current_cycles.c spawn_callee_exec_callee: spawn_callee_exec_callee.c spawn_callee_exec_caller: spawn_callee_exec_caller.c spawn_callee_get_memory_limit: spawn_callee_get_memory_limit.c spawn_callee_out_of_cycles: spawn_callee_out_of_cycles.c spawn_callee_set_content: spawn_callee_out_of_cycles.c spawn_callee_strcat: spawn_callee_strcat.c +spawn_caller_current_cycles: spawn_caller_current_cycles.c spawn_caller_exec: spawn_caller_exec.c spawn_caller_get_memory_limit: spawn_caller_get_memory_limit.c spawn_caller_out_of_cycles: spawn_caller_out_of_cycles.c diff --git a/script/testdata/spawn_callee_current_cycles b/script/testdata/spawn_callee_current_cycles new file mode 100755 index 0000000000000000000000000000000000000000..b726e5699fe594cf5ed4a793efc4c3e0ca1d810d GIT binary patch literal 4744 zcmeHLe{56L9{--(+j~2ybhFEwW}B21H`s%^(JAnN*>1KSsDl{s1(FBr?&>zR3T*)~ zS?rH>T{l2UNedyy{Rl+R5OtX~yu7#9w~UdzkoaDL)0pU3E1hwnGwU44=H5BG_kip3 z{pBCVWZ&eR&-s48pYQp8?>V>k_S`p2Pn#KpfC>XsKs2*tGf0khl{g?i18f8t?v=0% zBkX6f?do!gm;pO8bef5ve39?x9)LBx=%fhQqE1*GZfhm~+B`9iqD!*BFCmRH=6Fm+ zqgkTK)rh-XT zPaS$h|A-tJk=GrNbHnrTe>vXr*9a#ww@~j+H`#sa@Hgr4dxW|NNQ{~}HRLjp+ zJv1bahAMG(V+#JkF{ZVE%&EzDZoM5f<`j+khCS0<;zWwf?D>}o&phOa;LJ(f{1)xe z@6$bXOask*0y9E>^aEk#;Vo#yEU(*`v&QT(k?U}ml4=PXkA?!gQ_ZtZ zZlXA$4<-%5u_UN>Z!)M^*6De&pw7>(WWAC2#Hx8y!%!n;y9k>w2}|h2?Ehho)m1h0 zG%(F_tJl4?x-=jh7-T|+XCM~;+x5^PZhmXWrLZ!tOd9#PedMQ{F|WIi8#Huq5e6U} zZgigmEgywmE(%)QdgT~#>t!+G)+@t^TQ7qVw_b!1w_d=A8{UWMX;mofhik_|VL4jp znQO;(M?>Kf5OcJ7lkVal!|qsK;hUA=_<=#Z-YAEgC>k$L;5Fe5jPUJ<(r*#keoz{l zF1D)`r|MOzf0pY6=Mp$+ewd@tf zRY>SmEtA*k6|XUsr+hfGUqNC?O#IHx*?fNLju0PHoC!o`q)GL$ z>iEfInq0YB;g+ip&)gp`;HP}U4??UusEC=@Me}7-x8v9AZ{bRQ+&U0{DxE$b4e=VL zAXeON{7=Y#YreQ$IrjDd$w6|FEb?vF`stkA)nwLgb3B_**`oh!y&b&}koBYKkJ@x? zfA{zI|K-55ld?;m^!8hA3E8HO6+NTHv$g5xC-#i*P1KJHSy@bJ=fzI{_PO1@RYAya zP0tHESn#ubiB&6wwq=Yj%4!7fVOdMX_1yS_74zrnxP~!T)sdbf%R=w2o67#YZ zuY<272&d}iHVCZ&R{H@9)-Qb}S>=BXcYthd5T@Vslmt^uM21Dzg$%4AMTx+g3^3ANcZ)A5a`0*3Rj7(WwvZyER+YngMJ$TB}QX}jbXA5I}O*0zp zCs(p%@ABnx^X5O?0DaRH&|*#d$>*A&Sx{YrGh7~=9M(6^Xpl+y9eGCa8ts;uhwTHt zbCz;s`uB0|g^{v*EW&Bog?Wlk<>VPBRLcQ%k!Mk9E0@^Jx(%txC#E2Vy08;KLuEA) zs@%i&?8U;~70GT~5cCTo5~>!2`2`_<%|a2sefKyL-!~sg*eW5d%v~V*%L+vtm5};> z?EUm!O3jDeisFBWd6fM=&f6}aOTyL{I zW~q%ZLqqmptCjYd%5mGiZ0e>+H;M&^T%)I<1}B2o{n}!O|qc-99t^zxI}Vr2qf` literal 0 HcmV?d00001 diff --git a/script/testdata/spawn_callee_current_cycles.c b/script/testdata/spawn_callee_current_cycles.c new file mode 100644 index 0000000000..b84fb5353f --- /dev/null +++ b/script/testdata/spawn_callee_current_cycles.c @@ -0,0 +1,15 @@ +#include +#include +#include + +#include "ckb_syscalls.h" + +int main(int argc, char *argv[]) { + int caller_cycles = atoi(argv[0]); + // Callee's current cycles must >= caller's current cycles. + int callee_cycles = ckb_current_cycles(); + if (callee_cycles < caller_cycles) { + return 1; + } + return 0; +} diff --git a/script/testdata/spawn_caller_current_cycles b/script/testdata/spawn_caller_current_cycles new file mode 100755 index 0000000000000000000000000000000000000000..efa2fcec4dc2521451a7944a9c92c52bcb89966a GIT binary patch literal 7008 zcmeHMeNYqE7C*cBB7&%m6-%^6qEMMOkB=xyFcXj@N-gc{tM+52m9T_QQ6UjsMl@fW zHG$~((t>SlOW{3UwNs{}l0WqQDfyv|!#{cehn$`d9zz z40q=4@7#NS=bp1?@9s_RJ)qxgz%dN?g2Ru%v|@3#Psb?=7PA7=tAn*5K-U1saVo>yu?m-QJUdk$Yg!ycnVTl=$Yr}lbj>EkYmiG~dJTH5JU7ye+ z?n`!!fTB!r+CH9Dd8E_Plt7Ju?!cC+mTUPF&yQ`r^~&|HH@W*0r}M(nr}cUEJh@8> zgs)5>mwp!U@3|2n)f#D7XU}EqnjbxWj8rF5q4*U@7Z1nZ4X+D&U&P2v#wyfQHeILo zlxT!6V1D{?8!>krMEdrE*rFTOJn{rNr}jp9UPYP7VZ-!xJ!Y^QFr(e*k7_<(LANju zM&~J@g<566R*rxzclt${*e0~=--lMg7$giidPV>?`XwOJ4!>vAj`}54pdCL3+PhR~ zR+M&1r1zmQDZ}EZCG-6UUvlzeVk=iq3q)0rzPcISIEU=P)2%nlM3l?-p~Kd+0Kjiw zZjw><37H!UOAp!g$a{7idQ>uz+5A`+D^VttwW1LyP~-Y10W*w@E*mC4$zG7q4I@l<`D8arqy~a>+!53jVt|ljtt2zZfM(7u=GoDK=;=s6JQs*L&nyqG3BMJj zujoFeu-tOG(5>|qkGIhQ@6QoA6%B;#gJtAu^37nw37yJ$@*uWZ>;9a&L1!L(%{Snd zdwz=KQSw7`fUy4%PikfRZ%2YL|H-yLU3@qo3`YX7X!b;3e^h?xc7)LdoM$5%`m^kS z<7(vbLz%<&{Iky9l0d_Fgm8{8^hbIlL3%1%+jr@3HJ$IZ8{0hbLoKd^A~}Wbfo^z2 zW*LkGperI5_D3{Sfi8Iz+mJwI-dc3My1(|?Kd0Wgdv84c?%B~h!zYJ6?Q8EbOpT0g zyP0Dl)xCQ)0yE&dZ?Its!s_6m|6<`ZQXA@gd;A?b==ugD*9GgXSg8A{ zF^!bO!kL;F4;K3L)e)8FM*Ulq)|W${qz&$%*NcTU8kZMB=j(2^?923}HK^#bu3)qk zBi9FA0SJBe;h@I%H;l~6bZMkdVWEHd`UmQR5)ZPY4C{{(7i*-!`g+*i%zAvl+ZV46 zZaVi4%|>^+qi%JgD~Gt~3TIvN9DjGe_sxz6V+m&(+?p5)`=le&*Rqd|f{pBrb8I9J zMe#r1BtMfvOS7&F)RS3Ebic+G`mSa&Kt}^J3q$W#Tn^ATBN}M~1nZNqlLe0y-L~eF z?PP52?ehGJ*zGs=<<*_2dyQ(Z_%rkNFGv{bD$y6^_4sKc$RxQS9!~5__YCR?-yq0d zUZ;}@bHSdwq<|7G7do0k!z&{CqOKA@O=B{N3m7KDBI*%a=$gUoEyyWqDZe}frwwCz*R0{Xb?YcB%<#Cwz7r!p3QA3NTAraGxVb9o?r46uCz^nM%1jML|q+9 zn+TY<7t=HE@A?hNG*Q)w`}a)3`OqiV{59gpdhf@LF`<~jY)Po{^1TJU4%;blr_L*G z!T}l^OKfeRlypF=xC4}^wu%s<+A2hdYO4Sts;xLeR9i8GsI~$^RM9tMVq%`Zk%Di1 z{zegY|B8R>d%45km;|g3w`0(j_*0|peB9E*+Qv{-=iRqEm|RKzdh{=XQxb5Vp+Rs) zg1x@e*=@whs?2Yc;xRnF&oGoTyk?Zo4beL&R?+Ql`^Mm&E?gFi2`I5#AOgcd>WLuT zEp>L&)0!<^cql<1qLv@Jq&<$_Km znO+p??7pX>^yq0Y*~IjW^lH`fqk@m^<9T=8eS&q~Wi364Q~BdthhG_bt*@|$kV^67 z`u2J%Z~A5D3J=I#=xgz;5GdiPz7@*}cQo$o5Gn}A9zmV@+QQIcb)>C8T-0mLAzMhi zwzoW|qNM`gZbE*XNgg;_Fr7iTXrb~WA(*Z>lccG^MlT4S^uWYndy;1aZxSGuGsC-# zSV&_bI_c*xuV_7{4rkJ)ROe~oe1`Sr;42i~bnToHMYH>y3dv&~{4BmdD}_RQX+vV4 zc-a4fT4%ptTBv<}Nx}3&(?Um6{}ekTnfZ8vLTjD@(@UEL9rkf&2EFXP&ZEYAyatJaIYYR~Eym+k$veUx}C%k=THqK{NASB)i0Y8lg>xRy%<6 zShL_XHsL^GN@`lVdi9!&wd+iUM3K3;q_nKOVteI|ox7^4E!N#Nd-jsGXrS3Ln}OL3 z%w}LV1G5?ULuTL}w+B$m{qNt*A%8P3J<9&ij#|Xx84h{@_9 zMQC?pw#;T=HUqO6n9aa!27X@#6x{wakH!rfG8Iphq?ygD&Fb`&l+?AW6sc*dG*t?) zDr5eZUwX2iMb4|sEJO|3F+ielzNorpjk+vtSIV|Z=3-(l1aqmatlYgll@SU}EEKXp zuwX#|pt6)&tkqUiA*ih8y;e|F?<_J|O`tNDZY!=fRhmInwxi6t%~V}&B0*J*8obQ% zEG!gSETAHGR#uvKFoQ4=s!UdEby?wVtC{(O{ksdZ1~Y5({XKt#L-$(@vSm#aCvbo^ znjR`>L;Jo(8rQ3kvMarUJ6~A&5F3CWj{iOI7-x?*r`ht$c>~*I&ORP2Guuj}j762V zkK*j3xbqAC7=s^!_s`>>kYe5g_KP|D#hf9}`z?D7QZVY k$oH-pv|rAvo|~cI?fGt|&BxC7F*w1+U;1Dtl4sig9|w`;asU7T literal 0 HcmV?d00001 diff --git a/script/testdata/spawn_caller_current_cycles.c b/script/testdata/spawn_caller_current_cycles.c new file mode 100644 index 0000000000..f508cd3966 --- /dev/null +++ b/script/testdata/spawn_caller_current_cycles.c @@ -0,0 +1,41 @@ +#include +#include +#include + +#include "ckb_syscalls.h" + +int fib(int n) { + if (n < 2) { + return n; + } + return fib(n - 1) + fib(n - 2); +} + +int main() { + // Use invalid calculations to make the current cycles a larger value. + if (fib(20) != 6765) { + return 1; + } + + int cycles = ckb_current_cycles(); + char buffer[8]; + itoa(cycles, buffer, 10); + const char *argv[] = { &buffer[0] }; + int8_t spawn_exit_code = 255; + uint8_t spawn_content[80] = {}; + uint64_t spawn_content_length = 80; + spawn_args_t spgs = { + .memory_limit = 8, + .exit_code = &spawn_exit_code, + .content = &spawn_content[0], + .content_length = &spawn_content_length, + }; + int success = ckb_spawn(1, 3, 0, 1, argv, &spgs); + if (success != 0) { + return 1; + } + if (spawn_exit_code != 0) { + return 1; + } + return 0; +} From 6f3ca122e25a8132a75104a1b2251fa298cdc02c Mon Sep 17 00:00:00 2001 From: mohanson Date: Thu, 6 Jul 2023 10:20:40 +0800 Subject: [PATCH 027/267] User more strict checking in spawn_callee_current_cycles --- script/testdata/spawn_callee_current_cycles | Bin 4744 -> 4760 bytes script/testdata/spawn_callee_current_cycles.c | 4 ++-- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/script/testdata/spawn_callee_current_cycles b/script/testdata/spawn_callee_current_cycles index b726e5699fe594cf5ed4a793efc4c3e0ca1d810d..1c2dd40aa077b6b63675e0cae5d1acad0e851907 100755 GIT binary patch delta 456 zcmeBBouN8GLNJVxkpTo8z$631jES1^On3PvZ)6jjm|($a$qo^>oVZY2&<4t}5r^op znRrr1+KPE=;}@IUiHDn}E4997UMBRhZQ^Y_b!hz4Z5)`yB<99yA|s{L1!Z;(L&M<7ro* z{L$jca~M?_izgpq)OIcrW@jm3d!zQmxbwL~a_@5n;YdpdVHTE>Mi-fcRu)B;rs6ylqL4ZOH}-Q3 delta 440 zcmbQC+Mzl@LePzokpTo8z$61h$3#tern7vL53-3(Ot9e8WQT}rPFyH1r~~Ebh(q+~ zOgyP0ZOOc~@rzCF#KTR~m0I63Fa&fYItsEq0kU`|3ouGaEJ|QhHCAP1bYyI5m<%+a z^SEQGFsp0myI_aOPk)2=}IqeYX~Fsd>ZO}@ma?OZI( z&Qi?wM(v4l=W~bT-scR$k(Lg^EG)&1E;0$NEQ%~m(;vT4`R|%?G<|YAdkIK2NF_*B zayL*lP~~I?rpJs)n^l;-nOIANSz1aqm$05;G&FZJ@kIgIj{23W-CjaEu=hR?>sL_~gC}7SB52MM2K%N0qgTdsD0_Kxf2nbC6 zAz*+oX0oB6Ip+j9i2eza3kA(5X9x;R-XUngxM1=}L3_rtlMRLJ8D%FI3aJAChGA;r diff --git a/script/testdata/spawn_callee_current_cycles.c b/script/testdata/spawn_callee_current_cycles.c index b84fb5353f..c2005c9da4 100644 --- a/script/testdata/spawn_callee_current_cycles.c +++ b/script/testdata/spawn_callee_current_cycles.c @@ -6,9 +6,9 @@ int main(int argc, char *argv[]) { int caller_cycles = atoi(argv[0]); - // Callee's current cycles must >= caller's current cycles. + // Callee's current cycles must > caller's current cycles. int callee_cycles = ckb_current_cycles(); - if (callee_cycles < caller_cycles) { + if (callee_cycles < caller_cycles + 100000) { return 1; } return 0; From 518437977e5a76a20fe88d151be9db081a30ff02 Mon Sep 17 00:00:00 2001 From: mohanson Date: Tue, 11 Jul 2023 15:16:48 +0800 Subject: [PATCH 028/267] chore(script): test case spawn_peak_memory_512k_to_32m --- .../tests/ckb_latest/features_since_v2023.rs | 32 ++++++++++++++++ script/testdata/Makefile | 6 ++- script/testdata/spawn_peak_memory_512k_to_32m | Bin 0 -> 7008 bytes .../testdata/spawn_peak_memory_512k_to_32m.c | 36 ++++++++++++++++++ 4 files changed, 73 insertions(+), 1 deletion(-) create mode 100755 script/testdata/spawn_peak_memory_512k_to_32m create mode 100644 script/testdata/spawn_peak_memory_512k_to_32m.c diff --git a/script/src/verify/tests/ckb_latest/features_since_v2023.rs b/script/src/verify/tests/ckb_latest/features_since_v2023.rs index 05990ee14d..82d7e96964 100644 --- a/script/src/verify/tests/ckb_latest/features_since_v2023.rs +++ b/script/src/verify/tests/ckb_latest/features_since_v2023.rs @@ -22,6 +22,7 @@ use crate::verify::{tests::utils::*, *}; // check_spawn_big_content_length: fails when content_length > 256K. // check_peak_memory_4m_to_32m: spawn should success when peak memory <= 32M // check_peak_memory_2m_to_32m: spawn should success when peak memory <= 32M +// check_peak_memory_512k_to_32m: spawn should success when peak memory <= 32M // check_spawn_snapshot: A spawn B, then B gets suspended to snapshot and resume again. // check_spawn_state: Like check_spawn_snapshot but invoking verifier.resume_from_state instead. // check_spawn_current_memory: Use current_memory() to terminate infinite recursion. @@ -572,6 +573,37 @@ fn check_peak_memory_2m_to_32m() { assert_eq!(result.is_ok(), script_version >= ScriptVersion::V2); } +#[test] +fn check_peak_memory_512k_to_32m() { + let script_version = SCRIPT_VERSION; + + let (spawn_caller_cell, spawn_caller_data_hash) = + load_cell_from_path("testdata/spawn_peak_memory_512k_to_32m"); + + let spawn_caller_script = Script::new_builder() + .hash_type(script_version.data_hash_type().into()) + .code_hash(spawn_caller_data_hash) + .build(); + let output = CellOutputBuilder::default() + .capacity(capacity_bytes!(100).pack()) + .lock(spawn_caller_script) + .build(); + let input = CellInput::new(OutPoint::null(), 0); + + let transaction = TransactionBuilder::default().input(input).build(); + let dummy_cell = create_dummy_cell(output); + + let rtx = ResolvedTransaction { + transaction, + resolved_cell_deps: vec![spawn_caller_cell], + resolved_inputs: vec![dummy_cell], + resolved_dep_groups: vec![], + }; + let verifier = TransactionScriptsVerifierWithEnv::new(); + let result = verifier.verify_without_limit(script_version, &rtx); + assert_eq!(result.is_ok(), script_version >= ScriptVersion::V2); +} + #[test] fn check_spawn_snapshot() { let script_version = SCRIPT_VERSION; diff --git a/script/testdata/Makefile b/script/testdata/Makefile index e57b995194..1550a931de 100644 --- a/script/testdata/Makefile +++ b/script/testdata/Makefile @@ -71,7 +71,10 @@ ALL_BINS := jalr_zero \ spawn_recursive \ spawn_current_memory \ spawn_caller_current_cycles \ - spawn_callee_current_cycles + spawn_callee_current_cycles \ + spawn_peak_memory_512k_to_32m \ + spawn_peak_memory_2m_to_32m \ + spawn_peak_memory_4m_to_32m ALL_LIBS := is_even.lib \ add1.lib sub1.lib mul2.lib div2.lib @@ -156,6 +159,7 @@ spawn_caller_set_content: spawn_caller_set_content.c spawn_caller_strcat_wrap: spawn_caller_strcat_wrap.c spawn_caller_strcat: spawn_caller_strcat.c spawn_caller_strcat_data_hash: spawn_caller_strcat_data_hash.c +spawn_peak_memory_512k_to_32m: spawn_peak_memory_512k_to_32m.c spawn_peak_memory_2m_to_32m: spawn_peak_memory_2m_to_32m.c spawn_peak_memory_4m_to_32m: spawn_peak_memory_4m_to_32m.c spawn_recursive: spawn_recursive.c diff --git a/script/testdata/spawn_peak_memory_512k_to_32m b/script/testdata/spawn_peak_memory_512k_to_32m new file mode 100755 index 0000000000000000000000000000000000000000..118c5ab1d3667660fd139ae7df1657e77c51cb44 GIT binary patch literal 7008 zcmeHMeN0nX7C-m3?}LF6n4&NXu9XU?JJSJS{1``;f^F5ELHEN^C)u&(Q7NNUfvRXi zZ2NdE_%X)RfD?BTh3Jk>V%#Ncjb@i+31YG_aX(ylGs!H_j_qWL#aP4-Dtj;QJwKY6 zzvi#a_9XZH?z!i8?m6$g+xOCY4;tSyQ3!z|6x;`*waH@@vcZxgb%?D3mV*jk(;yWi z>{jEkv)M8+0X8J)N+g2xOSTs+0B%k)DhL^pPFM`-$~t7oelj+*67Q0X3eS+~Hs%jA zp~bCHaJ{GMb;sUy93Sv>=8I7Per`j2Q#X4LM05E=KgRMY?^svFGwccd%9?Vc9CKW= z{OoQO|M=H{jCU-i+5)S`MaJP(E!QP3GSLN5&}NDD{0K50H6oK}%Hi^I{B4_sCY(?@ zjHjal>x5vw2P1!@jV*nCoL~Ep!KR^&_Ss)}T%DN!ez&_`Bf2LvUi2vC6Spx6!V8^z}ihclP}#ST;oAj|^pe1CS{?elRvWV93lL7KYptOz3!&%3Qts9FQ^xCrAoX81+)fUvUhL#)FrkIRs`$mBLMAspkaE(WT`-|s_Rh4Q)l&LaJ`$$RoB7lSfC{=ut&6%W`J!|W z!AtIqYO#-R7@++7CZRw0`w{j}FGA9uL=v*#Ww?+7*wJp3$x zGMTabNMQ(9n604rZ*HfFX5RZC&0xRSrr~m$RYM$@1d;1uBOna1aOQA)Qp4S(PO&WT zh}CG=OBCN8e#M_P{-bynT03}G!-9%G63#eZb>37Ob5dukbA4%W>@d-Y;JC)1cf5Hbx zxl_K*so=HSQ}Ina^OWP&DNxFR0bS$sX1$U&syx+7kv!KZoSsic{$n4fAbH6`FBF_(>pPeeScN(L^ww7T*J9&&A!Q3gL&+IVJHXstJ=AK)0=@@ z)%{1ok(!4%lgF$2f-gENjsvZ`E%RW$DX3>dV!jPHT-|)`g zb<92L2O(3Wa%}0ivSRtsFr)Wf^t#tv-8V>SW+9cx>ewTTO_`i4@3Azysu#zCt4 z*n_9Rg9B3+PwHG8@M@0g@zSW$s~TWHSi?fgQXT$1>RfZsRrY!X&Bo^h2Zo;NMI){n zS>wn^dcSP=*|_S8JGSnjH>6r|XkOQF;Z$|(gRu`IABAd$`IHoD@!^Yy#dYy5!h&|l zZH-Oy>(wBtdqNB5^WIcS2&lEZXP>I2Y$Pj~Q#O6h!qkpBEBANprw)vMT)Dk#JGH}# z;|N9VaMBWA#<%*^x=U)Xekl~Clw9pN1gbaMVd9v(s69$`sxV7fc4!`-C%)vk)@_>b>HKRS(#P+YR4 zwCwGr%a*UO*6_8qZFTngkGJn|?A*1xp|Q!iXYan|{RapSw2CYwqA1U?E#qZTKNES} zFeB(&L?Tqq2=y&O?AnWV|M$Qj5*AB|A4q z!pkx$_WyO1cM)J#>5VVSUUS?T`-;Z98%ar4AX$NA1(Fp=R^a8&dPlpk zZi&tIwymtRxVU8bB5lbM{StjKNG5aj#%Cjj^*DHAeG|VI@BQGeI)iT9yR@u+$?oE< z4sx-Q3r{Ww9F9FZN=T>1DnX3|yaY`c0Q61vCTFA5S_68gt=S3s#$B~mrxo-z`_^rZ zR)-Dr^*igGTdj?a*8QO0h83kR@tP#4X=(yJzsupU?Ie?s4E0v0v$4KrkJCmrNxrv` zZ!q~b&+PeQjLc{RCE1}EWF2qhz9e{Cea1r>-^@y1`ptL`oX8^O!!M~IA3Mkz!0Wh2 zD0qupl4s`)l1k1#UX(;#hmEsvQ~WvEpOeoolw%BK4kf;VZ(&3F3;sE>e~#>^@V?7` zB{sa6zq08#Q5*y zaT2jOfv5PtnJ_M8C;1T52e{$byAx>7&Z{;g7!-eHnCMFG${4{3Ie+_$gSemQ|6g9z B{SN>D literal 0 HcmV?d00001 diff --git a/script/testdata/spawn_peak_memory_512k_to_32m.c b/script/testdata/spawn_peak_memory_512k_to_32m.c new file mode 100644 index 0000000000..21680926ce --- /dev/null +++ b/script/testdata/spawn_peak_memory_512k_to_32m.c @@ -0,0 +1,36 @@ +#include +#include +#include + +#include "ckb_syscalls.h" + +int main(int argc, char *argv[]) { + int8_t spawn_exit_code = 255; + spawn_args_t spgs = { + .memory_limit = 1, + .exit_code = &spawn_exit_code, + .content = NULL, + .content_length = NULL, + }; + int8_t can_i_spawn = 0; + if (argc == 0) { + can_i_spawn = 1; + } + uint64_t depth = (uint64_t)atoi(argv[0]); + if (depth < 56) { + can_i_spawn = 1; + } + if (can_i_spawn) { + char buffer[20]; + itoa(depth + 1, buffer, 10); + const char *argv[] = {buffer}; + uint64_t success = ckb_spawn(0, 3, 0, 1, argv, &spgs); + if (success != 0) { + return success; + } + if (spawn_exit_code != 0) { + return 1; + } + } + return 0; +} From bb216ff8ce6105e6b3e2c762f821f175ab0c7f5e Mon Sep 17 00:00:00 2001 From: mohanson Date: Tue, 11 Jul 2023 17:48:25 +0800 Subject: [PATCH 029/267] Update ckb to v0.24.2 --- script/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/Cargo.toml b/script/Cargo.toml index cf90b09038..0cad2ad920 100644 --- a/script/Cargo.toml +++ b/script/Cargo.toml @@ -22,7 +22,7 @@ ckb-traits = { path = "../traits", version = "= 0.112.0-pre" } byteorder = "1.3.1" ckb-types = { path = "../util/types", version = "= 0.112.0-pre" } ckb-hash = { path = "../util/hash", version = "= 0.112.0-pre" } -ckb-vm = { version = "=0.24.0", default-features = false } +ckb-vm = { version = "=0.24.2", default-features = false } faster-hex = "0.6" ckb-logger = { path = "../util/logger", version = "= 0.112.0-pre", optional = true } serde = { version = "1.0", features = ["derive"] } From 83250a37c37dcdd866904471644edcd7a9ff8a87 Mon Sep 17 00:00:00 2001 From: mohanson Date: Tue, 11 Jul 2023 18:54:16 +0800 Subject: [PATCH 030/267] Add Cargo.lock --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bcda9c4aea..b089ecb5dc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1523,9 +1523,9 @@ dependencies = [ [[package]] name = "ckb-vm" -version = "0.24.0" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d77a1ce8d1ed8b18041d194ffe0d647c3a707af9347f49b5f543317c73943634" +checksum = "cb89eb30f9e82e63c637a0824e25b12d99de9be9c36675ba6f79479094a8b42c" dependencies = [ "byteorder", "bytes 1.4.0", @@ -1541,9 +1541,9 @@ dependencies = [ [[package]] name = "ckb-vm-definitions" -version = "0.24.0" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "328c7a6b5664483d5c02fae7b0ada0c407660e7064b2f1505ee6f5d07a4dbf2b" +checksum = "9d51f3b5550ebe2a37a6921127273afc00257b11f39517664dd0b35455d7e396" [[package]] name = "clang-sys" From c00841e82812b2acd4c1feac9191d7adcbe51c32 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 20 Jun 2023 17:02:17 +0800 Subject: [PATCH 031/267] feat: add `register_thread` and `new_tokio_exit_rx` --- util/stop-handler/src/lib.rs | 151 ++----------------------- util/stop-handler/src/stop_register.rs | 74 ++++++++++++ 2 files changed, 82 insertions(+), 143 deletions(-) create mode 100644 util/stop-handler/src/stop_register.rs diff --git a/util/stop-handler/src/lib.rs b/util/stop-handler/src/lib.rs index d9ceddf268..fe80839dd3 100644 --- a/util/stop-handler/src/lib.rs +++ b/util/stop-handler/src/lib.rs @@ -1,147 +1,12 @@ //! TODO(doc): @keroro520 -use ckb_logger::error; -use parking_lot::Mutex; -use std::fmt::Debug; -use std::sync::mpsc; -use std::sync::{Arc, Weak}; -use std::thread::JoinHandle; -use tokio::sync::oneshot as tokio_oneshot; -use tokio::sync::watch as tokio_watch; -/// init flags -pub const WATCH_INIT: u8 = 0; -/// stop flags -pub const WATCH_STOP: u8 = 1; +pub use stop_register::{ + broadcast_exit_signals, new_crossbeam_exit_rx, new_tokio_exit_rx, register_thread, + wait_all_ckb_services_exit, +}; -/// TODO(doc): @keroro520 -#[derive(Debug)] -pub enum SignalSender { - /// TODO(doc): @keroro520 - Crossbeam(ckb_channel::Sender), - /// TODO(doc): @keroro520 - Std(mpsc::Sender), - /// TODO(doc): @keroro520 - Tokio(tokio_oneshot::Sender), - /// A single-producer, multi-consumer channel that only retains the last sent value. - Watch(tokio_watch::Sender), - /// Do nothing, for tests - Dummy, -} +pub use tokio_util::sync::CancellationToken; -impl SignalSender { - /// TODO(doc): @keroro520 - pub fn send(self, cmd: T) { - match self { - SignalSender::Crossbeam(tx) => { - if let Err(e) = tx.try_send(cmd) { - error!("handler signal send error {:?}", e); - }; - } - SignalSender::Std(tx) => { - if let Err(e) = tx.send(cmd) { - error!("handler signal send error {:?}", e); - }; - } - SignalSender::Tokio(tx) => { - if let Err(e) = tx.send(cmd) { - error!("handler signal send error {:?}", e); - }; - } - SignalSender::Watch(tx) => { - if let Err(e) = tx.send(WATCH_STOP) { - error!("handler signal send error {:?}", e); - }; - } - SignalSender::Dummy => {} - } - } -} - -#[derive(Debug)] -struct Handler { - signal: SignalSender, - thread: Option>, -} - -/// Weak is a version of Arc that holds a non-owning reference to the managed allocation. -/// Since a Weak reference does not count towards ownership, -/// it will not prevent the value stored in the allocation from being dropped, -/// and Weak itself makes no guarantees about the value still being present. -#[derive(Debug)] -enum Ref { - Arc(Arc), - Weak(Weak), -} - -impl Clone for Ref { - #[inline] - fn clone(&self) -> Ref { - match self { - Self::Arc(arc) => Self::Arc(Arc::clone(arc)), - Self::Weak(weak) => Self::Weak(Weak::clone(weak)), - } - } -} - -impl Ref { - fn downgrade(&self) -> Ref { - match self { - Self::Arc(arc) => Self::Weak(Arc::downgrade(arc)), - Self::Weak(weak) => Self::Weak(Weak::clone(weak)), - } - } -} - -/// TODO(doc): @keroro520 -//the outer Option take ownership for `Arc::try_unwrap` -//the inner Option take ownership for `JoinHandle` or `oneshot::Sender` -#[derive(Clone, Debug)] -pub struct StopHandler { - inner: Option>>>>, - name: String, -} - -impl StopHandler { - /// TODO(doc): @keroro520 - pub fn new( - signal: SignalSender, - thread: Option>, - name: String, - ) -> StopHandler { - let handler = Handler { signal, thread }; - StopHandler { - inner: Some(Ref::Arc(Arc::new(Mutex::new(Some(handler))))), - name, - } - } - - /// Creates a new Weak pointer. - pub fn downgrade_clone(&self) -> StopHandler { - StopHandler { - inner: self.inner.as_ref().map(|inner| inner.downgrade()), - name: self.name.clone(), - } - } - - /// TODO(doc): @keroro520 - pub fn try_send(&mut self, cmd: T) { - let inner = self - .inner - .take() - .expect("Stop signal can only be sent once"); - - if let Ref::Arc(inner) = inner { - if let Ok(lock) = Arc::try_unwrap(inner) { - ckb_logger::info!("StopHandler({}) send signal", self.name); - let handler = lock.lock().take().expect("Handler can only be taken once"); - let Handler { signal, thread } = handler; - signal.send(cmd); - if let Some(thread) = thread { - if let Err(e) = thread.join() { - error!("handler thread join error {:?}", e); - }; - } - }; - } - } -} +mod stop_register; +#[cfg(test)] +mod tests; diff --git a/util/stop-handler/src/stop_register.rs b/util/stop-handler/src/stop_register.rs new file mode 100644 index 0000000000..e329565ae8 --- /dev/null +++ b/util/stop-handler/src/stop_register.rs @@ -0,0 +1,74 @@ +use ckb_logger::{info, trace, warn}; +use ckb_util::Mutex; +use tokio_util::sync::CancellationToken; + +struct CkbServiceHandles { + thread_handles: Vec<(String, std::thread::JoinHandle<()>)>, +} + +/// Wait all ckb services exit +pub fn wait_all_ckb_services_exit() { + info!("waiting exit signal..."); + let exit_signal = new_crossbeam_exit_rx(); + let _ = exit_signal.recv(); + info!("received exit signal, broadcasting exit signal to all threads"); + let mut handles = CKB_HANDLES.lock(); + for (name, join_handle) in handles.thread_handles.drain(..) { + match join_handle.join() { + Ok(_) => { + info!("wait thread {} done", name); + } + Err(e) => { + warn!("wait thread {}: ERROR: {:?}", name, e) + } + } + } + info!("all ckb threads have been stopped"); +} + +static CKB_HANDLES: once_cell::sync::Lazy> = + once_cell::sync::Lazy::new(|| { + Mutex::new(CkbServiceHandles { + thread_handles: vec![], + }) + }); + +static TOKIO_EXIT: once_cell::sync::Lazy = + once_cell::sync::Lazy::new(CancellationToken::new); + +static CROSSBEAM_EXIT_SENDERS: once_cell::sync::Lazy>>> = + once_cell::sync::Lazy::new(|| Mutex::new(vec![])); + +/// Create a new CancellationToken for exit signal +pub fn new_tokio_exit_rx() -> CancellationToken { + TOKIO_EXIT.clone() +} + +/// Create a new crossbeam Receiver for exit signal +pub fn new_crossbeam_exit_rx() -> ckb_channel::Receiver<()> { + let (tx, rx) = ckb_channel::bounded(1); + CROSSBEAM_EXIT_SENDERS.lock().push(tx); + rx +} + +/// Broadcast exit signals to all threads and all tokio tasks +pub fn broadcast_exit_signals() { + TOKIO_EXIT.cancel(); + CROSSBEAM_EXIT_SENDERS.lock().iter().for_each(|tx| { + if let Err(e) = tx.try_send(()) { + println!("broadcast thread: ERROR: {:?}", e) + } else { + println!("send a crossbeam exit signal"); + } + }); +} + +/// Register a thread `JoinHandle` to `CKB_HANDLES` +pub fn register_thread(name: &str, thread_handle: std::thread::JoinHandle<()>) { + trace!("register thread {}", name); + CKB_HANDLES + .lock() + .thread_handles + .push((name.into(), thread_handle)); + trace!("register thread done {}", name); +} From 7ee2a807f5758db481e91ff90e6e971b2fb6828c Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 20 Jun 2023 17:02:38 +0800 Subject: [PATCH 032/267] test: add basic unit test for ckb shutdown --- util/stop-handler/src/tests.rs | 142 +++++++++++++++++++++++++++++++++ 1 file changed, 142 insertions(+) create mode 100644 util/stop-handler/src/tests.rs diff --git a/util/stop-handler/src/tests.rs b/util/stop-handler/src/tests.rs new file mode 100644 index 0000000000..3141512bb7 --- /dev/null +++ b/util/stop-handler/src/tests.rs @@ -0,0 +1,142 @@ +use crate::{ + broadcast_exit_signals, new_crossbeam_exit_rx, new_tokio_exit_rx, register_thread, + wait_all_ckb_services_exit, +}; +use ckb_async_runtime::{new_global_runtime, Handle}; +use ckb_channel::select; +use rand::Rng; +use std::sync::atomic::{AtomicI64, Ordering}; +use std::sync::Arc; +use std::time::Duration; +use tokio_util::sync::CancellationToken; + +fn send_ctrlc_later(duration: Duration) { + std::thread::spawn(move || { + std::thread::sleep(duration); + // send SIGINT to myself + unsafe { + libc::raise(libc::SIGINT); + println!("[ $$ sent SIGINT to myself $$ ]"); + } + }); +} + +#[derive(Default)] +struct TestStopMemo { + spawned_threads_count: Arc, + stopped_threads_count: Arc, + + spawned_tokio_task_count: Arc, + stopped_tokio_task_count: Arc, +} + +impl TestStopMemo { + fn start_many_threads(&self) { + for i in 0..rand::thread_rng().gen_range(3..7) { + let join = std::thread::spawn({ + let stopped_threads_count = Arc::clone(&self.stopped_threads_count); + move || { + let ticker = ckb_channel::tick(Duration::from_millis(500)); + let deadline = ckb_channel::after(Duration::from_millis( + (rand::thread_rng().gen_range(1.0..5.0) * 1000.0) as u64, + )); + + let stop = new_crossbeam_exit_rx(); + + loop { + select! { + recv(ticker) -> _ => { + println!("thread {} received tick signal", i); + }, + recv(stop) -> _ => { + println!("thread {} received crossbeam exit signal", i); + stopped_threads_count.fetch_add(1, Ordering::SeqCst); + return; + }, + recv(deadline) -> _ =>{ + println!("thread {} finish its job", i); + stopped_threads_count.fetch_add(1, Ordering::SeqCst); + return + } + } + } + } + }); + + self.spawned_threads_count.fetch_add(1, Ordering::SeqCst); + register_thread(&format!("test thread {}", i), join); + } + } + + fn start_many_tokio_tasks(&self, handle: &Handle) { + for i in 0..rand::thread_rng().gen_range(3..7) { + let stop: CancellationToken = new_tokio_exit_rx(); + + handle.spawn({ + let spawned_tokio_task_count = Arc::clone(&self.spawned_tokio_task_count); + let stopped_tokio_task_count = Arc::clone(&self.stopped_tokio_task_count); + async move { + spawned_tokio_task_count.fetch_add(1, Ordering::SeqCst); + + let mut interval = tokio::time::interval(Duration::from_millis(500)); + + let duration = Duration::from_millis( + (rand::thread_rng().gen_range(1.0..5.0) * 1000.0) as u64, + ); + let deadline = tokio::time::sleep(duration); + tokio::pin!(deadline); + + loop { + tokio::select! { + _ = &mut deadline =>{ + println!("tokio task {} finish its job", i); + stopped_tokio_task_count.fetch_add(1, Ordering::SeqCst); + break; + } + _ = interval.tick()=> { + println!("tokio task {} received tick signal", i); + }, + _ = stop.cancelled() => { + println!("tokio task {} receive exit signal", i); + stopped_tokio_task_count.fetch_add(1, Ordering::SeqCst); + break + }, + else => break, + } + } + } + }); + } + } +} +#[test] +fn basic() { + let (mut handle, mut stop_recv, _runtime) = new_global_runtime(); + + ctrlc::set_handler(move || { + broadcast_exit_signals(); + }) + .expect("Error setting Ctrl-C handler"); + + send_ctrlc_later(Duration::from_secs(3)); + + let test_memo = TestStopMemo::default(); + + test_memo.start_many_threads(); + test_memo.start_many_tokio_tasks(&handle); + + handle.drop_guard(); + wait_all_ckb_services_exit(); + handle.block_on(async move { + stop_recv.recv().await; + }); + + assert_eq!( + test_memo.spawned_threads_count.load(Ordering::SeqCst), + test_memo.stopped_threads_count.load(Ordering::SeqCst), + ); + assert_eq!( + test_memo.spawned_tokio_task_count.load(Ordering::SeqCst), + test_memo.stopped_tokio_task_count.load(Ordering::SeqCst), + ); +} From d120f573f42b4c5c84312bf83983e162926f2a4b Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 20 Jun 2023 17:04:09 +0800 Subject: [PATCH 033/267] wait all thread and tokio tasks exit before process exit --- Cargo.lock | 26 +++++++--- benches/benches/benchmarks/overall.rs | 3 +- block-filter/src/filter.rs | 16 +++--- chain/src/chain.rs | 34 +++---------- chain/src/tests/util.rs | 3 +- ckb-bin/Cargo.toml | 1 + ckb-bin/src/helper.rs | 40 ++++++++++----- ckb-bin/src/lib.rs | 46 +++++++++++++---- ckb-bin/src/subcommand/miner.rs | 25 ++++------ ckb-bin/src/subcommand/run.rs | 28 +++-------- miner/src/client.rs | 47 +++++++++--------- miner/src/miner.rs | 18 +++---- network/src/network.rs | 43 +++++----------- network/src/protocols/tests/mod.rs | 4 +- notify/src/lib.rs | 21 ++------ rpc/src/module/subscription.rs | 1 + rpc/src/tests/examples.rs | 3 +- rpc/src/tests/mod.rs | 5 +- shared/Cargo.toml | 1 - shared/src/shared.rs | 19 +++---- sync/src/relayer/tests/helper.rs | 7 ++- sync/src/synchronizer/mod.rs | 7 ++- sync/src/tests/net_time_checker.rs | 9 ++-- sync/src/types/header_map/mod.rs | 24 ++++----- test/src/net.rs | 7 ++- tx-pool/Cargo.toml | 1 + tx-pool/src/chunk_process.rs | 13 +++-- tx-pool/src/process.rs | 2 +- tx-pool/src/service.rs | 49 +++++++++---------- util/channel/src/lib.rs | 4 +- util/indexer/src/service.rs | 46 +++++------------ util/launcher/Cargo.toml | 1 - util/launcher/src/lib.rs | 14 ++---- util/launcher/src/shared_builder.rs | 10 ++-- .../src/tests/utils/chain.rs | 3 +- util/metrics-service/Cargo.toml | 1 + util/metrics-service/src/lib.rs | 10 +++- util/network-alert/Cargo.toml | 1 - util/network-alert/src/tests/test_notifier.rs | 10 ++-- util/runtime/Cargo.toml | 1 - util/runtime/src/lib.rs | 48 ++++++++++++------ util/stop-handler/Cargo.toml | 11 ++++- 42 files changed, 314 insertions(+), 349 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bcda9c4aea..eb5351f95d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -437,7 +437,6 @@ version = "0.112.0-pre" dependencies = [ "ckb-logger", "ckb-spawn", - "ckb-stop-handler", "tokio", ] @@ -492,6 +491,7 @@ dependencies = [ "ckb-network", "ckb-resource", "ckb-shared", + "ckb-stop-handler", "ckb-store", "ckb-types", "ckb-util", @@ -934,6 +934,7 @@ dependencies = [ "ckb-logger", "ckb-metrics", "ckb-metrics-config", + "ckb-stop-handler", "ckb-util", "hyper", "prometheus", @@ -1020,7 +1021,7 @@ dependencies = [ "tempfile", "tentacle", "tokio", - "tokio-util 0.7.7", + "tokio-util 0.7.8", "trust-dns-resolver", ] @@ -1178,6 +1179,7 @@ dependencies = [ "ckb-pow", "ckb-reward-calculator", "ckb-shared", + "ckb-stop-handler", "ckb-store", "ckb-sync", "ckb-systemtime", @@ -1284,10 +1286,17 @@ version = "0.112.0-pre" name = "ckb-stop-handler" version = "0.112.0-pre" dependencies = [ + "ckb-async-runtime", "ckb-channel", "ckb-logger", + "ckb-util", + "ctrlc", + "libc", + "once_cell", "parking_lot 0.12.1", + "rand 0.8.5", "tokio", + "tokio-util 0.7.8", ] [[package]] @@ -1429,6 +1438,7 @@ dependencies = [ "slab", "tempfile", "tokio", + "tokio-util 0.7.8", ] [[package]] @@ -2403,7 +2413,7 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util 0.7.7", + "tokio-util 0.7.8", "tracing", ] @@ -4479,7 +4489,7 @@ dependencies = [ "tentacle-secio", "thiserror", "tokio", - "tokio-util 0.7.7", + "tokio-util 0.7.8", "tokio-yamux", "wasm-bindgen", "wasm-bindgen-futures", @@ -4521,7 +4531,7 @@ dependencies = [ "secp256k1", "sha2", "tokio", - "tokio-util 0.7.7", + "tokio-util 0.7.8", "unsigned-varint", "x25519-dalek", ] @@ -4733,9 +4743,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2" +checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" dependencies = [ "bytes 1.4.0", "futures-core", @@ -4756,7 +4766,7 @@ dependencies = [ "log", "nohash-hasher", "tokio", - "tokio-util 0.7.7", + "tokio-util 0.7.8", ] [[package]] diff --git a/benches/benches/benchmarks/overall.rs b/benches/benches/benchmarks/overall.rs index af98fe6bae..15a76de599 100644 --- a/benches/benches/benchmarks/overall.rs +++ b/benches/benches/benchmarks/overall.rs @@ -6,7 +6,7 @@ use ckb_chain_spec::consensus::{ConsensusBuilder, ProposalWindow}; use ckb_dao_utils::genesis_dao_data; use ckb_jsonrpc_types::JsonBytes; use ckb_launcher::SharedBuilder; -use ckb_network::{DefaultExitHandler, Flags, NetworkController, NetworkService, NetworkState}; +use ckb_network::{Flags, NetworkController, NetworkService, NetworkState}; use ckb_shared::Shared; use ckb_store::ChainStore; use ckb_types::{ @@ -77,7 +77,6 @@ fn dummy_network(shared: &Shared) -> NetworkController { "test".to_string(), Flags::COMPATIBILITY, ), - DefaultExitHandler::default(), ) .start(shared.async_handle()) .expect("Start network service failed") diff --git a/block-filter/src/filter.rs b/block-filter/src/filter.rs index 71ffca1c80..56fcc92f6b 100644 --- a/block-filter/src/filter.rs +++ b/block-filter/src/filter.rs @@ -1,7 +1,7 @@ -use ckb_async_runtime::tokio::{self, sync::oneshot, task::block_in_place}; -use ckb_logger::{debug, warn}; +use ckb_async_runtime::tokio::{self, task::block_in_place}; +use ckb_logger::{debug, info, warn}; use ckb_shared::Shared; -use ckb_stop_handler::{SignalSender, StopHandler}; +use ckb_stop_handler::{new_tokio_exit_rx, CancellationToken}; use ckb_store::{ChainDB, ChainStore}; use ckb_types::{ core::HeaderView, @@ -43,10 +43,10 @@ impl BlockFilter { } /// start background single-threaded service to create block filter data - pub fn start(self) -> StopHandler<()> { + pub fn start(self) { let notify_controller = self.shared.notify_controller().clone(); let async_handle = self.shared.async_handle().clone(); - let (stop, mut stop_rx) = oneshot::channel::<()>(); + let stop_rx: CancellationToken = new_tokio_exit_rx(); let filter_data_builder = self.clone(); let build_filter_data = @@ -62,12 +62,14 @@ impl BlockFilter { block_in_place(|| self.build_filter_data()); new_block_watcher.borrow_and_update(); } - _ = &mut stop_rx => break, + _ = stop_rx.cancelled() => { + info!("BlockFilter received exit signal, exit now"); + break + }, else => break, } } }); - StopHandler::new(SignalSender::Tokio(stop), None, NAME.to_string()) } /// build block filter data to the latest block diff --git a/chain/src/chain.rs b/chain/src/chain.rs index b6291211d5..8f7e5d8e3c 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -12,7 +12,7 @@ use ckb_proposal_table::ProposalTable; #[cfg(debug_assertions)] use ckb_rust_unstable_port::IsSorted; use ckb_shared::shared::Shared; -use ckb_stop_handler::{SignalSender, StopHandler}; +use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread}; use ckb_store::{attach_block_cell, detach_block_cell, ChainStore, StoreTransaction}; use ckb_systemtime::unix_time_as_millis; use ckb_types::{ @@ -22,7 +22,7 @@ use ckb_types::{ ResolvedTransaction, }, hardfork::HardForks, - service::{Request, DEFAULT_CHANNEL_SIZE, SIGNAL_CHANNEL_SIZE}, + service::{Request, DEFAULT_CHANNEL_SIZE}, BlockExt, BlockNumber, BlockView, Cycle, HeaderView, }, packed::{Byte32, ProposalShortId}, @@ -50,13 +50,6 @@ type TruncateRequest = Request>; pub struct ChainController { process_block_sender: Sender, truncate_sender: Sender, // Used for testing only - stop: Option>, -} - -impl Drop for ChainController { - fn drop(&mut self) { - self.try_stop(); - } } #[cfg_attr(feature = "mock", faux::methods)] @@ -64,12 +57,10 @@ impl ChainController { pub fn new( process_block_sender: Sender, truncate_sender: Sender, - stop: StopHandler<()>, ) -> Self { ChainController { process_block_sender, truncate_sender, - stop: Some(stop), } } /// Inserts the block into database. @@ -109,17 +100,10 @@ impl ChainController { }) } - pub fn try_stop(&mut self) { - if let Some(ref mut stop) = self.stop { - stop.try_send(()); - } - } - /// Since a non-owning reference does not count towards ownership, /// it will not prevent the value stored in the allocation from being dropped pub fn non_owning_clone(&self) -> Self { ChainController { - stop: None, truncate_sender: self.truncate_sender.clone(), process_block_sender: self.process_block_sender.clone(), } @@ -245,7 +229,7 @@ impl ChainService { /// start background single-threaded service with specified thread_name. pub fn start(mut self, thread_name: Option) -> ChainController { - let (signal_sender, signal_receiver) = channel::bounded::<()>(SIGNAL_CHANNEL_SIZE); + let signal_receiver = new_crossbeam_exit_rx(); let (process_block_sender, process_block_receiver) = channel::bounded(DEFAULT_CHANNEL_SIZE); let (truncate_sender, truncate_receiver) = channel::bounded(1); @@ -256,10 +240,11 @@ impl ChainService { } let tx_control = self.shared.tx_pool_controller().clone(); - let thread = thread_builder + let chain_jh = thread_builder .spawn(move || loop { select! { recv(signal_receiver) -> _ => { + info!("ChainService received exit signal, stopped"); break; }, recv(process_block_receiver) -> msg => match msg { @@ -287,13 +272,10 @@ impl ChainService { } }) .expect("Start ChainService failed"); - let stop = StopHandler::new( - SignalSender::Crossbeam(signal_sender), - Some(thread), - "chain".to_string(), - ); - ChainController::new(process_block_sender, truncate_sender, stop) + register_thread("ChainService", chain_jh); + + ChainController::new(process_block_sender, truncate_sender) } fn make_fork_for_truncate(&self, target: &HeaderView, current_tip: &HeaderView) -> ForkChanges { diff --git a/chain/src/tests/util.rs b/chain/src/tests/util.rs index 3051dd775f..7ade19bc53 100644 --- a/chain/src/tests/util.rs +++ b/chain/src/tests/util.rs @@ -6,7 +6,7 @@ use ckb_dao::DaoCalculator; use ckb_dao_utils::genesis_dao_data; use ckb_jsonrpc_types::ScriptHashType; use ckb_launcher::SharedBuilder; -use ckb_network::{DefaultExitHandler, Flags, NetworkController, NetworkService, NetworkState}; +use ckb_network::{Flags, NetworkController, NetworkService, NetworkState}; use ckb_shared::shared::Shared; use ckb_store::ChainStore; pub use ckb_test_chain_utils::MockStore; @@ -314,7 +314,6 @@ pub(crate) fn dummy_network(shared: &Shared) -> NetworkController { "test".to_string(), Flags::COMPATIBILITY, ), - DefaultExitHandler::default(), ) .start(shared.async_handle()) .expect("Start network service failed") diff --git a/ckb-bin/Cargo.toml b/ckb-bin/Cargo.toml index bdd04dd5be..b0edf465ba 100644 --- a/ckb-bin/Cargo.toml +++ b/ckb-bin/Cargo.toml @@ -43,6 +43,7 @@ rayon = "1.0" sentry = { version = "0.26.0", optional = true } is-terminal = "0.4.7" fdlimit = "0.2.1" +ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.112.0-pre" } [features] deadlock_detection = ["ckb-util/deadlock_detection"] diff --git a/ckb-bin/src/helper.rs b/ckb-bin/src/helper.rs index 142d06226c..21c93732b8 100644 --- a/ckb-bin/src/helper.rs +++ b/ckb-bin/src/helper.rs @@ -1,4 +1,5 @@ use ckb_logger::info; + use std::io::{stdin, stdout, Write}; #[cfg(not(feature = "deadlock_detection"))] @@ -6,27 +7,42 @@ pub fn deadlock_detection() {} #[cfg(feature = "deadlock_detection")] pub fn deadlock_detection() { + use ckb_channel::select; use ckb_logger::warn; + use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread}; use ckb_util::parking_lot::deadlock; use std::{thread, time::Duration}; info!("deadlock_detection enable"); - thread::spawn(move || loop { - thread::sleep(Duration::from_secs(10)); - let deadlocks = deadlock::check_deadlock(); - if deadlocks.is_empty() { - continue; - } + let dead_lock_jh = thread::spawn({ + let ticker = ckb_channel::tick(Duration::from_secs(10)); + let stop_rx = new_crossbeam_exit_rx(); + move || loop { + select! { + recv(ticker) -> _ => { + let deadlocks = deadlock::check_deadlock(); + if deadlocks.is_empty() { + continue; + } + + warn!("{} deadlocks detected", deadlocks.len()); + for (i, threads) in deadlocks.iter().enumerate() { + warn!("Deadlock #{}", i); + for t in threads { + warn!("Thread Id {:#?}", t.thread_id()); + warn!("{:#?}", t.backtrace()); + } + } - warn!("{} deadlocks detected", deadlocks.len()); - for (i, threads) in deadlocks.iter().enumerate() { - warn!("Deadlock #{}", i); - for t in threads { - warn!("Thread Id {:#?}", t.thread_id()); - warn!("{:#?}", t.backtrace()); + }, + recv(stop_rx) -> _ =>{ + info!("deadlock_detection received exit signal, stopped"); + return; + } } } }); + register_thread("dead_lock_detect", dead_lock_jh); } pub fn prompt(msg: &str) -> String { diff --git a/ckb-bin/src/lib.rs b/ckb-bin/src/lib.rs index b047a14086..91c61b813d 100644 --- a/ckb-bin/src/lib.rs +++ b/ckb-bin/src/lib.rs @@ -8,13 +8,15 @@ mod subcommand; use ckb_app_config::{cli, ExitCode, Setup}; use ckb_async_runtime::new_global_runtime; use ckb_build_info::Version; +use ckb_logger::info; +use ckb_network::tokio; +use ckb_stop_handler::broadcast_exit_signals; use helper::raise_fd_limit; use setup_guard::SetupGuard; -use std::time::Duration; +use std::sync::Arc; #[cfg(feature = "with_sentry")] pub(crate) const LOG_TARGET_SENTRY: &str = "sentry"; -const RUNTIME_SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(1); /// The executable main entry. /// @@ -58,25 +60,49 @@ pub fn run_app(version: Version) -> Result<(), ExitCode> { .expect("SubcommandRequiredElseHelp"); let is_silent_logging = is_silent_logging(cmd); - let (handle, runtime) = new_global_runtime(); + let (mut handle, mut handle_stop_rx, _runtime) = new_global_runtime(); let setup = Setup::from_matches(bin_name, cmd, matches)?; let _guard = SetupGuard::from_setup(&setup, &version, handle.clone(), is_silent_logging)?; raise_fd_limit(); + // indicate whether the process is terminated by an exit signal + let caught_exit_signal = Arc::new(std::sync::atomic::AtomicBool::new(false)); + + ctrlc::set_handler({ + let caught_exit_signal = Arc::clone(&caught_exit_signal); + move || { + broadcast_exit_signals(); + caught_exit_signal.store(true, std::sync::atomic::Ordering::SeqCst); + } + }) + .expect("Error setting Ctrl-C handler"); + let ret = match cmd { - cli::CMD_RUN => subcommand::run(setup.run(matches)?, version, handle), - cli::CMD_MINER => subcommand::miner(setup.miner(matches)?, handle), - cli::CMD_REPLAY => subcommand::replay(setup.replay(matches)?, handle), - cli::CMD_EXPORT => subcommand::export(setup.export(matches)?, handle), - cli::CMD_IMPORT => subcommand::import(setup.import(matches)?, handle), - cli::CMD_STATS => subcommand::stats(setup.stats(matches)?, handle), + cli::CMD_RUN => subcommand::run(setup.run(matches)?, version, handle.clone()), + cli::CMD_MINER => subcommand::miner(setup.miner(matches)?, handle.clone()), + cli::CMD_REPLAY => subcommand::replay(setup.replay(matches)?, handle.clone()), + cli::CMD_EXPORT => subcommand::export(setup.export(matches)?, handle.clone()), + cli::CMD_IMPORT => subcommand::import(setup.import(matches)?, handle.clone()), + cli::CMD_STATS => subcommand::stats(setup.stats(matches)?, handle.clone()), cli::CMD_RESET_DATA => subcommand::reset_data(setup.reset_data(matches)?), cli::CMD_MIGRATE => subcommand::migrate(setup.migrate(matches)?), _ => unreachable!(), }; - runtime.shutdown_timeout(RUNTIME_SHUTDOWN_TIMEOUT); + if !caught_exit_signal.load(std::sync::atomic::Ordering::SeqCst) { + // if `subcommand` finish normally, and we didn't catch exit signal, broadcast exit signals + broadcast_exit_signals(); + } + + handle.drop_guard(); + + tokio::task::block_in_place(|| { + info!("waiting all tokio tasks done"); + handle_stop_rx.blocking_recv(); + info!("all tokio tasks have been stopped"); + }); + ret } diff --git a/ckb-bin/src/subcommand/miner.rs b/ckb-bin/src/subcommand/miner.rs index dace1cc9a7..fd9a892abc 100644 --- a/ckb-bin/src/subcommand/miner.rs +++ b/ckb-bin/src/subcommand/miner.rs @@ -2,16 +2,15 @@ use ckb_app_config::{ExitCode, MinerArgs, MinerConfig}; use ckb_async_runtime::Handle; use ckb_channel::unbounded; use ckb_miner::{Client, Miner}; -use ckb_network::{DefaultExitHandler, ExitHandler}; +use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread, wait_all_ckb_services_exit}; use std::thread; pub fn miner(args: MinerArgs, async_handle: Handle) -> Result<(), ExitCode> { let (new_work_tx, new_work_rx) = unbounded(); let MinerConfig { client, workers } = args.config; - let exit_handler = DefaultExitHandler::default(); let client = Client::new(new_work_tx, client, async_handle); - let (mut miner, miner_stop) = Miner::new( + let mut miner = Miner::new( args.pow_engine, client.clone(), new_work_rx, @@ -21,21 +20,17 @@ pub fn miner(args: MinerArgs, async_handle: Handle) -> Result<(), ExitCode> { ckb_memory_tracker::track_current_process_simple(args.memory_tracker.interval); - let client_stop = client.spawn_background(); + client.spawn_background(); - thread::Builder::new() - .name("client".to_string()) - .spawn(move || miner.run()) + let stop_rx = new_crossbeam_exit_rx(); + const THREAD_NAME: &str = "client"; + let miner_jh = thread::Builder::new() + .name(THREAD_NAME.into()) + .spawn(move || miner.run(stop_rx)) .expect("Start client failed!"); + register_thread(THREAD_NAME, miner_jh); - let exit_handler_clone = exit_handler.clone(); - ctrlc::set_handler(move || { - exit_handler_clone.notify_exit(); - }) - .expect("Error setting Ctrl-C handler"); - exit_handler.wait_for_exit(); + wait_all_ckb_services_exit(); - drop(client_stop); - drop(miner_stop); Ok(()) } diff --git a/ckb-bin/src/subcommand/run.rs b/ckb-bin/src/subcommand/run.rs index 7878b7b6de..cd7d8c6282 100644 --- a/ckb-bin/src/subcommand/run.rs +++ b/ckb-bin/src/subcommand/run.rs @@ -4,7 +4,8 @@ use ckb_async_runtime::Handle; use ckb_build_info::Version; use ckb_launcher::Launcher; use ckb_logger::info; -use ckb_network::{DefaultExitHandler, ExitHandler}; +use ckb_stop_handler::wait_all_ckb_services_exit; + use ckb_types::core::cell::setup_system_cell_cache; pub fn run(args: RunArgs, version: Version, async_handle: Handle) -> Result<(), ExitCode> { @@ -16,7 +17,6 @@ pub fn run(args: RunArgs, version: Version, async_handle: Handle) -> Result<(), let block_assembler_config = launcher.sanitize_block_assembler_config()?; let miner_enable = block_assembler_config.is_some(); - let exit_handler = DefaultExitHandler::default(); let (shared, mut pack) = launcher.build_shared(block_assembler_config)?; @@ -43,12 +43,11 @@ pub fn run(args: RunArgs, version: Version, async_handle: Handle) -> Result<(), let chain_controller = launcher.start_chain_service(&shared, pack.take_proposal_table()); - let block_filter = launcher.start_block_filter(&shared); + launcher.start_block_filter(&shared); - let (network_controller, rpc_server) = launcher.start_network_and_rpc( + let (network_controller, _rpc_server) = launcher.start_network_and_rpc( &shared, chain_controller.non_owning_clone(), - &exit_handler, miner_enable, pack.take_relay_tx_receiver(), ); @@ -56,22 +55,7 @@ pub fn run(args: RunArgs, version: Version, async_handle: Handle) -> Result<(), let tx_pool_builder = pack.take_tx_pool_builder(); tx_pool_builder.start(network_controller.non_owning_clone()); - let exit_handler_clone = exit_handler.clone(); - ctrlc::set_handler(move || { - exit_handler_clone.notify_exit(); - }) - .expect("Error setting Ctrl-C handler"); - exit_handler.wait_for_exit(); - - info!("Finishing work, please wait..."); - shared.tx_pool_controller().save_pool().map_err(|err| { - eprintln!("TxPool Error: {err}"); - ExitCode::Failure - })?; - - drop(rpc_server); - drop(block_filter); - drop(network_controller); - drop(chain_controller); + wait_all_ckb_services_exit(); + Ok(()) } diff --git a/miner/src/client.rs b/miner/src/client.rs index 57d8e87cb0..dda47570a7 100644 --- a/miner/src/client.rs +++ b/miner/src/client.rs @@ -4,8 +4,8 @@ use ckb_app_config::MinerClientConfig; use ckb_async_runtime::Handle; use ckb_channel::Sender; use ckb_jsonrpc_types::{Block as JsonBlock, BlockTemplate}; -use ckb_logger::{debug, error}; -use ckb_stop_handler::{SignalSender, StopHandler}; +use ckb_logger::{debug, error, info}; +use ckb_stop_handler::{new_tokio_exit_rx, CancellationToken}; use ckb_types::{ packed::{Block, Byte32}, H256, @@ -45,13 +45,12 @@ pub enum RpcError { #[derive(Debug, Clone)] pub struct Rpc { sender: mpsc::Sender, - stop: StopHandler<()>, } impl Rpc { pub fn new(url: Uri, handle: Handle) -> Rpc { let (sender, mut receiver) = mpsc::channel(65_535); - let (stop, mut stop_rx) = oneshot::channel::<()>(); + let stop_rx: CancellationToken = new_tokio_exit_rx(); let https = hyper_tls::HttpsConnector::new(); let client = HttpClient::builder().build(https); @@ -87,16 +86,16 @@ impl Rpc { } }); }, - _ = &mut stop_rx => break, + _ = stop_rx.cancelled() => { + info!("Rpc server received exit signal, exit now"); + break + }, else => break } } }); - Rpc { - sender, - stop: StopHandler::new(SignalSender::Tokio(stop), None, "miner-rpc".to_string()), - } + Rpc { sender } } pub fn request( @@ -128,12 +127,6 @@ impl Rpc { } } -impl Drop for Rpc { - fn drop(&mut self) { - self.stop.try_send(()); - } -} - pub enum Works { New(Work), FailSubmit(Byte32), @@ -200,8 +193,7 @@ impl Client { } /// spawn background update process - pub fn spawn_background(self) -> StopHandler<()> { - let (stop, stop_rx) = oneshot::channel::<()>(); + pub fn spawn_background(self) { let client = self.clone(); if let Some(addr) = self.config.listen { ckb_logger::info!("listen notify mode : {}", addr); @@ -220,19 +212,18 @@ Otherwise ckb-miner does not work properly and will behave as it stopped committ addr ); self.handle.spawn(async move { - client.listen_block_template_notify(addr, stop_rx).await; + client.listen_block_template_notify(addr).await; }); - self.blocking_fetch_block_template() + self.blocking_fetch_block_template(); } else { ckb_logger::info!("loop poll mode: interval {}ms", self.config.poll_interval); self.handle.spawn(async move { - client.poll_block_template(stop_rx).await; + client.poll_block_template().await; }); } - StopHandler::new(SignalSender::Tokio(stop), None, "miner-updater".to_string()) } - async fn listen_block_template_notify(&self, addr: SocketAddr, stop_rx: oneshot::Receiver<()>) { + async fn listen_block_template_notify(&self, addr: SocketAddr) { let client = self.clone(); let make_service = make_service_fn(move |_conn| { let client = client.clone(); @@ -241,8 +232,10 @@ Otherwise ckb-miner does not work properly and will behave as it stopped committ }); let server = Server::bind(&addr).serve(make_service); + let stop_rx: CancellationToken = new_tokio_exit_rx(); let graceful = server.with_graceful_shutdown(async move { - stop_rx.await.ok(); + stop_rx.cancelled().await; + info!("Miner client received exit signal, exit now"); }); if let Err(e) = graceful.await { @@ -250,17 +243,21 @@ Otherwise ckb-miner does not work properly and will behave as it stopped committ } } - async fn poll_block_template(&self, mut stop_rx: oneshot::Receiver<()>) { + async fn poll_block_template(&self) { let poll_interval = time::Duration::from_millis(self.config.poll_interval); let mut interval = tokio::time::interval(poll_interval); interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + let stop_rx: CancellationToken = new_tokio_exit_rx(); loop { tokio::select! { _ = interval.tick() => { debug!("poll block template..."); self.fetch_block_template().await; } - _ = &mut stop_rx => break, + _ = stop_rx.cancelled() => { + info!("Miner client pool_block_template received exit signal, exit now"); + break + }, else => break, } } diff --git a/miner/src/miner.rs b/miner/src/miner.rs index 8119a7bea9..e14ffef72f 100644 --- a/miner/src/miner.rs +++ b/miner/src/miner.rs @@ -5,7 +5,6 @@ use ckb_app_config::MinerWorkerConfig; use ckb_channel::{select, unbounded, Receiver}; use ckb_logger::{debug, error, info}; use ckb_pow::PowEngine; -use ckb_stop_handler::{SignalSender, StopHandler}; use ckb_types::{ packed::{Byte32, Header}, prelude::*, @@ -27,7 +26,6 @@ pub struct Miner { pub(crate) worker_controllers: Vec, pub(crate) work_rx: Receiver, pub(crate) nonce_rx: Receiver<(Byte32, Work, u128)>, - pub(crate) stop_rx: Receiver<()>, pub(crate) pb: ProgressBar, pub(crate) nonces_found: u128, pub(crate) stderr_is_tty: bool, @@ -42,9 +40,8 @@ impl Miner { work_rx: Receiver, workers: &[MinerWorkerConfig], limit: u128, - ) -> (Miner, StopHandler<()>) { + ) -> Miner { let (nonce_tx, nonce_rx) = unbounded(); - let (stop, stop_rx) = unbounded(); let mp = MultiProgress::new(); let worker_controllers = workers @@ -61,9 +58,7 @@ impl Miner { mp.join().expect("MultiProgress join failed"); }); - let stop = StopHandler::new(SignalSender::Crossbeam(stop), None, "miner".to_string()); - - let miner = Miner { + Miner { legacy_work: LruCache::new(WORK_CACHE_SIZE), nonces_found: 0, _pow: pow, @@ -71,16 +66,14 @@ impl Miner { worker_controllers, work_rx, nonce_rx, - stop_rx, pb, stderr_is_tty, limit, - }; - (miner, stop) + } } /// TODO(doc): @quake - pub fn run(&mut self) { + pub fn run(&mut self, stop_rx: Receiver<()>) { loop { select! { recv(self.work_rx) -> msg => match msg { @@ -109,7 +102,8 @@ impl Miner { break; }, }, - recv(self.stop_rx) -> _msg => { + recv(stop_rx) -> _msg => { + info!("miner received exit signal, stopped"); break; } }; diff --git a/network/src/network.rs b/network/src/network.rs index cbe84e2cf2..c9436aa28d 100644 --- a/network/src/network.rs +++ b/network/src/network.rs @@ -21,7 +21,7 @@ use crate::{Behaviour, CKBProtocol, Peer, PeerIndex, ProtocolId, ServiceControl} use ckb_app_config::{default_support_all_protocols, NetworkConfig, SupportProtocol}; use ckb_logger::{debug, error, info, trace, warn}; use ckb_spawn::Spawn; -use ckb_stop_handler::{SignalSender, StopHandler}; +use ckb_stop_handler::{broadcast_exit_signals, new_tokio_exit_rx, CancellationToken}; use ckb_util::{Condvar, Mutex, RwLock}; use futures::{channel::mpsc::Sender, Future}; use ipnetwork::IpNetwork; @@ -490,18 +490,14 @@ impl NetworkState { } /// Used to handle global events of tentacle, such as session open/close -pub struct EventHandler { +pub struct EventHandler { pub(crate) network_state: Arc, - pub(crate) exit_handler: T, } -impl EventHandler { +impl EventHandler { /// init an event handler - pub fn new(network_state: Arc, exit_handler: T) -> Self { - Self { - network_state, - exit_handler, - } + pub fn new(network_state: Arc) -> Self { + Self { network_state } } } @@ -531,7 +527,7 @@ impl ExitHandler for DefaultExitHandler { } } -impl EventHandler { +impl EventHandler { fn inbound_eviction(&self) -> Vec { if self.network_state.config.bootnode_mode { let status = self.network_state.connection_status(); @@ -560,7 +556,7 @@ impl EventHandler { } #[async_trait] -impl ServiceHandle for EventHandler { +impl ServiceHandle for EventHandler { async fn handle_error(&mut self, context: &mut ServiceContext, error: ServiceError) { match error { ServiceError::DialerError { address, error } => { @@ -763,8 +759,8 @@ impl ServiceHandle for EventHandler { } /// Ckb network service, use to start p2p network -pub struct NetworkService { - p2p_service: Service>, +pub struct NetworkService { + p2p_service: Service, network_state: Arc, ping_controller: Option>, // Background services @@ -772,7 +768,7 @@ pub struct NetworkService { version: String, } -impl NetworkService { +impl NetworkService { /// init with all config pub fn new( network_state: Arc, @@ -780,7 +776,6 @@ impl NetworkService { required_protocol_ids: Vec, // name, version, flags identify_announce: (String, String, Flags), - exit_handler: T, ) -> Self { let config = &network_state.config; @@ -891,7 +886,6 @@ impl NetworkService { } let event_handler = EventHandler { network_state: Arc::clone(&network_state), - exit_handler, }; service_builder = service_builder .key_pair(network_state.local_private_key.clone()) @@ -1098,7 +1092,7 @@ impl NetworkService { }) .unzip(); - let (sender, mut receiver) = oneshot::channel(); + let receiver: CancellationToken = new_tokio_exit_rx(); let (start_sender, start_receiver) = mpsc::channel(); { let network_state = Arc::clone(&network_state); @@ -1130,7 +1124,8 @@ impl NetworkService { tokio::spawn(async move { p2p_service.run().await }); loop { tokio::select! { - _ = &mut receiver => { + _ = receiver.cancelled() => { + info!("NetworkService receive exit signal, start shutdown..."); let _ = p2p_control.shutdown().await; // Drop senders to stop all corresponding background task drop(bg_signals); @@ -1163,13 +1158,11 @@ impl NetworkService { return Err(e); } - let stop = StopHandler::new(SignalSender::Tokio(sender), None, "network".to_string()); Ok(NetworkController { version, network_state, p2p_control, ping_controller, - stop: Some(stop), }) } } @@ -1181,7 +1174,6 @@ pub struct NetworkController { network_state: Arc, p2p_control: ServiceControl, ping_controller: Option>, - stop: Option>, } impl NetworkController { @@ -1397,7 +1389,6 @@ impl NetworkController { /// it will not prevent the value stored in the allocation from being dropped pub fn non_owning_clone(&self) -> Self { NetworkController { - stop: None, version: self.version.clone(), network_state: Arc::clone(&self.network_state), p2p_control: self.p2p_control.clone(), @@ -1406,14 +1397,6 @@ impl NetworkController { } } -impl Drop for NetworkController { - fn drop(&mut self) { - if let Some(ref mut stop) = self.stop { - stop.try_send(()); - } - } -} - // Send an optional message before disconnect a peer pub(crate) fn disconnect_with_message( control: &ServiceControl, diff --git a/network/src/protocols/tests/mod.rs b/network/src/protocols/tests/mod.rs index db72949dda..140e9625d8 100644 --- a/network/src/protocols/tests/mod.rs +++ b/network/src/protocols/tests/mod.rs @@ -7,8 +7,7 @@ use super::{ }; use crate::{ - network::{DefaultExitHandler, EventHandler}, - services::protocol_type_checker::ProtocolTypeCheckerService, + network::EventHandler, services::protocol_type_checker::ProtocolTypeCheckerService, NetworkState, PeerIdentifyInfo, SupportProtocols, }; @@ -224,7 +223,6 @@ fn net_service_start( .forever(true) .build(EventHandler { network_state: Arc::clone(&network_state), - exit_handler: DefaultExitHandler::default(), }); let peer_id = network_state.local_peer_id().clone(); diff --git a/notify/src/lib.rs b/notify/src/lib.rs index 9a95336a3a..2b9969ba96 100644 --- a/notify/src/lib.rs +++ b/notify/src/lib.rs @@ -1,8 +1,8 @@ //! TODO(doc): @quake use ckb_app_config::NotifyConfig; use ckb_async_runtime::Handle; -use ckb_logger::{debug, error, trace}; -use ckb_stop_handler::{SignalSender, StopHandler}; +use ckb_logger::{debug, error, info, trace}; +use ckb_stop_handler::{new_tokio_exit_rx, CancellationToken}; use ckb_types::packed::Byte32; use ckb_types::{ core::{tx_pool::Reject, BlockView}, @@ -88,7 +88,6 @@ impl NotifyTimeout { /// TODO(doc): @quake #[derive(Clone)] pub struct NotifyController { - stop: StopHandler<()>, new_block_register: NotifyRegister, new_block_watcher: NotifyWatcher, new_block_notifier: Sender, @@ -103,12 +102,6 @@ pub struct NotifyController { handle: Handle, } -impl Drop for NotifyController { - fn drop(&mut self) { - self.stop.try_send(()); - } -} - /// TODO(doc): @quake pub struct NotifyService { config: NotifyConfig, @@ -142,7 +135,7 @@ impl NotifyService { /// start background tokio spawned task. pub fn start(mut self) -> NotifyController { - let (signal_sender, mut signal_receiver) = oneshot::channel(); + let signal_receiver: CancellationToken = new_tokio_exit_rx(); let handle = self.handle.clone(); let (new_block_register, mut new_block_register_receiver) = @@ -173,7 +166,8 @@ impl NotifyService { handle.spawn(async move { loop { tokio::select! { - _ = &mut signal_receiver => { + _ = signal_receiver.cancelled() => { + info!("NotifyService received exit signal, exit now"); break; } Some(msg) = new_block_register_receiver.recv() => { self.handle_register_new_block(msg) }, @@ -204,11 +198,6 @@ impl NotifyService { reject_transaction_notifier: reject_transaction_sender, network_alert_register, network_alert_notifier: network_alert_sender, - stop: StopHandler::new( - SignalSender::Tokio(signal_sender), - None, - "notify".to_string(), - ), handle, } } diff --git a/rpc/src/module/subscription.rs b/rpc/src/module/subscription.rs index 69d7cc0c88..c5ca5e5160 100644 --- a/rpc/src/module/subscription.rs +++ b/rpc/src/module/subscription.rs @@ -1,5 +1,6 @@ use ckb_jsonrpc_types::Topic; use ckb_notify::NotifyController; + use jsonrpc_core::{Metadata, Result}; use jsonrpc_derive::rpc; use jsonrpc_pubsub::{ diff --git a/rpc/src/tests/examples.rs b/rpc/src/tests/examples.rs index 1164278acd..f134f22e16 100644 --- a/rpc/src/tests/examples.rs +++ b/rpc/src/tests/examples.rs @@ -10,7 +10,7 @@ use ckb_chain_spec::consensus::{Consensus, ConsensusBuilder}; use ckb_chain_spec::versionbits::{ActiveMode, Deployment, DeploymentPos}; use ckb_dao_utils::genesis_dao_data; use ckb_launcher::SharedBuilder; -use ckb_network::{DefaultExitHandler, Flags, NetworkService, NetworkState}; +use ckb_network::{Flags, NetworkService, NetworkState}; use ckb_network_alert::alert_relayer::AlertRelayer; use ckb_notify::NotifyService; use ckb_sync::SyncShared; @@ -151,7 +151,6 @@ fn setup_rpc_test_suite(height: u64) -> RpcTestSuite { "0.1.0".to_string(), Flags::COMPATIBILITY, ), - DefaultExitHandler::default(), ) .start(shared.async_handle()) .expect("Start network service failed") diff --git a/rpc/src/tests/mod.rs b/rpc/src/tests/mod.rs index b35c80844b..70ba66762c 100644 --- a/rpc/src/tests/mod.rs +++ b/rpc/src/tests/mod.rs @@ -4,7 +4,7 @@ use ckb_chain::chain::{ChainController, ChainService}; use ckb_dao::DaoCalculator; use ckb_jsonrpc_types::ScriptHashType; use ckb_launcher::SharedBuilder; -use ckb_network::{DefaultExitHandler, Flags, NetworkService, NetworkState}; +use ckb_network::{Flags, NetworkService, NetworkState}; use ckb_reward_calculator::RewardCalculator; use ckb_shared::{Shared, Snapshot}; use ckb_store::ChainStore; @@ -80,7 +80,7 @@ impl RpcTestResponse { #[allow(dead_code)] struct RpcTestSuite { - rpc_client: reqwest::blocking::Client, + rpc_client: Client, rpc_uri: String, shared: Shared, chain_controller: ChainController, @@ -249,7 +249,6 @@ fn setup() -> RpcTestSuite { "0.1.0".to_string(), Flags::COMPATIBILITY, ), - DefaultExitHandler::default(), ) .start(shared.async_handle()) .expect("Start network service failed") diff --git a/shared/Cargo.toml b/shared/Cargo.toml index 84a780771b..66ebf6a756 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -24,7 +24,6 @@ ckb-logger = { path = "../util/logger", version = "= 0.112.0-pre" } ckb-db-schema = { path = "../db-schema", version = "= 0.112.0-pre" } ckb-async-runtime = { path = "../util/runtime", version = "= 0.112.0-pre" } ckb-stop-handler = { path = "../util/stop-handler", version = "= 0.112.0-pre" } -ckb-channel = { path = "../util/channel", version = "= 0.112.0-pre" } ckb-constant = { path = "../util/constant", version = "= 0.112.0-pre" } ckb-systemtime = { path = "../util/systemtime", version = "= 0.112.0-pre" } diff --git a/shared/src/shared.rs b/shared/src/shared.rs index 0ec83eb888..377b941df7 100644 --- a/shared/src/shared.rs +++ b/shared/src/shared.rs @@ -10,12 +10,12 @@ use ckb_db_schema::{COLUMN_BLOCK_BODY, COLUMN_NUMBER_HASH}; use ckb_error::{AnyError, Error}; use ckb_notify::NotifyController; use ckb_proposal_table::ProposalView; -use ckb_stop_handler::{SignalSender, StopHandler}; +use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread}; use ckb_store::{ChainDB, ChainStore}; use ckb_systemtime::unix_time_as_millis; use ckb_tx_pool::{BlockTemplate, TokioRwLock, TxPoolController}; use ckb_types::{ - core::{service, BlockNumber, EpochExt, EpochNumber, HeaderView, Version}, + core::{BlockNumber, EpochExt, EpochNumber, HeaderView, Version}, packed::{self, Byte32}, prelude::*, U256, @@ -35,13 +35,11 @@ const MAX_FREEZE_LIMIT: BlockNumber = 30_000; /// An owned permission to close on a freezer thread pub struct FreezerClose { stopped: Arc, - stop: StopHandler<()>, } impl Drop for FreezerClose { fn drop(&mut self) { self.stopped.store(true, Ordering::SeqCst); - self.stop.try_send(()); } } @@ -86,10 +84,9 @@ impl Shared { pub fn spawn_freeze(&self) -> Option { if let Some(freezer) = self.store.freezer() { ckb_logger::info!("Freezer enable"); - let (signal_sender, signal_receiver) = - ckb_channel::bounded::<()>(service::SIGNAL_CHANNEL_SIZE); + let signal_receiver = new_crossbeam_exit_rx(); let shared = self.clone(); - let thread = thread::Builder::new() + let freeze_jh = thread::Builder::new() .spawn(move || loop { match signal_receiver.recv_timeout(FREEZER_INTERVAL) { Err(_) => { @@ -106,14 +103,10 @@ impl Shared { }) .expect("Start FreezerService failed"); - let stop = StopHandler::new( - SignalSender::Crossbeam(signal_sender), - Some(thread), - "freezer".to_string(), - ); + register_thread("freeze", freeze_jh); + return Some(FreezerClose { stopped: Arc::clone(&freezer.stopped), - stop, }); } None diff --git a/sync/src/relayer/tests/helper.rs b/sync/src/relayer/tests/helper.rs index 2f738f66f2..eb6b3c0a08 100644 --- a/sync/src/relayer/tests/helper.rs +++ b/sync/src/relayer/tests/helper.rs @@ -4,9 +4,9 @@ use ckb_chain::chain::ChainService; use ckb_chain_spec::consensus::{build_genesis_epoch_ext, ConsensusBuilder}; use ckb_launcher::SharedBuilder; use ckb_network::{ - async_trait, bytes::Bytes as P2pBytes, Behaviour, CKBProtocolContext, DefaultExitHandler, - Error, Flags, NetworkController, NetworkService, NetworkState, Peer, PeerIndex, ProtocolId, - SupportProtocols, TargetSession, + async_trait, bytes::Bytes as P2pBytes, Behaviour, CKBProtocolContext, Error, Flags, + NetworkController, NetworkService, NetworkState, Peer, PeerIndex, ProtocolId, SupportProtocols, + TargetSession, }; use ckb_shared::Shared; use ckb_store::ChainStore; @@ -122,7 +122,6 @@ pub(crate) fn dummy_network(shared: &Shared) -> NetworkController { "test".to_string(), Flags::COMPATIBILITY, ), - DefaultExitHandler::default(), ) .start(shared.async_handle()) .expect("Start network service failed") diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 55e8edf394..a2ab6cf46b 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -37,6 +37,7 @@ use ckb_network::{ async_trait, bytes::Bytes, tokio, CKBProtocolContext, CKBProtocolHandler, PeerIndex, ServiceControl, SupportProtocols, }; +use ckb_stop_handler::register_thread; use ckb_systemtime::unix_time_as_millis; use ckb_types::{ core::{self, BlockNumber}, @@ -625,8 +626,9 @@ impl Synchronizer { self.fetch_channel = Some(sender); let thread = ::std::thread::Builder::new(); let number = self.shared.state().shared_best_header_ref().number(); - thread - .name("BlockDownload".to_string()) + const THREAD_NAME: &str = "BlockDownload"; + let blockdownload_jh = thread + .name(THREAD_NAME.into()) .spawn(move || { BlockFetchCMD { sync, @@ -638,6 +640,7 @@ impl Synchronizer { .run(); }) .expect("download thread can't start"); + register_thread(THREAD_NAME, blockdownload_jh); } }, None => { diff --git a/sync/src/tests/net_time_checker.rs b/sync/src/tests/net_time_checker.rs index 890e0e218b..d849458754 100644 --- a/sync/src/tests/net_time_checker.rs +++ b/sync/src/tests/net_time_checker.rs @@ -2,8 +2,8 @@ use crate::net_time_checker::{NetTimeChecker, NetTimeProtocol, TOLERANT_OFFSET}; use ckb_app_config::NetworkConfig; use ckb_network::{ multiaddr::{Multiaddr, Protocol}, - CKBProtocol, DefaultExitHandler, EventHandler, NetworkState, ServiceBuilder, ServiceControl, - SessionId, SupportProtocols, TargetProtocol, + CKBProtocol, EventHandler, NetworkState, ServiceBuilder, ServiceControl, SessionId, + SupportProtocols, TargetProtocol, }; use std::{ borrow::Cow, @@ -102,10 +102,7 @@ fn net_service_start() -> Node { .key_pair(network_state.local_private_key().clone()) .upnp(config.upnp) .forever(true) - .build(EventHandler::new( - Arc::clone(&network_state), - DefaultExitHandler::default(), - )); + .build(EventHandler::new(Arc::clone(&network_state))); let peer_id = network_state.local_peer_id().clone(); diff --git a/sync/src/types/header_map/mod.rs b/sync/src/types/header_map/mod.rs index 5210e482f6..78939164b6 100644 --- a/sync/src/types/header_map/mod.rs +++ b/sync/src/types/header_map/mod.rs @@ -1,10 +1,11 @@ use ckb_async_runtime::Handle; -use ckb_stop_handler::{SignalSender, StopHandler}; +use ckb_logger::info; +use ckb_stop_handler::{new_tokio_exit_rx, CancellationToken}; use ckb_types::packed::Byte32; use std::sync::Arc; use std::time::Duration; use std::{mem::size_of, path}; -use tokio::sync::oneshot; + use tokio::time::MissedTickBehavior; mod backend; @@ -21,13 +22,6 @@ use super::HeaderIndexView; pub struct HeaderMap { inner: Arc>, - stop: StopHandler<()>, -} - -impl Drop for HeaderMap { - fn drop(&mut self) { - self.stop.try_send(()); - } } const INTERVAL: Duration = Duration::from_millis(500); @@ -51,7 +45,7 @@ impl HeaderMap { let size_limit = memory_limit / ITEM_BYTES_SIZE; let inner = Arc::new(HeaderMapKernel::new(tmpdir, size_limit)); let map = Arc::clone(&inner); - let (stop, mut stop_rx) = oneshot::channel::<()>(); + let stop_rx: CancellationToken = new_tokio_exit_rx(); async_handle.spawn(async move { let mut interval = tokio::time::interval(INTERVAL); @@ -61,15 +55,15 @@ impl HeaderMap { _ = interval.tick() => { map.limit_memory(); } - _ = &mut stop_rx => break, + _ = stop_rx.cancelled() => { + info!("HeaderMap limit_memory received exit signal, exit now"); + break + }, } } }); - Self { - inner, - stop: StopHandler::new(SignalSender::Tokio(stop), None, "HeaderMap".to_string()), - } + Self { inner } } pub(crate) fn contains_key(&self, hash: &Byte32) -> bool { diff --git a/test/src/net.rs b/test/src/net.rs index 5cf87930ad..56c4f5676e 100644 --- a/test/src/net.rs +++ b/test/src/net.rs @@ -7,8 +7,8 @@ use ckb_channel::{self as channel, unbounded, Receiver, RecvTimeoutError, Sender use ckb_logger::info; use ckb_network::{ async_trait, bytes::Bytes, extract_peer_id, CKBProtocol, CKBProtocolContext, - CKBProtocolHandler, DefaultExitHandler, Flags, NetworkController, NetworkService, NetworkState, - PeerIndex, ProtocolId, SupportProtocols, + CKBProtocolHandler, Flags, NetworkController, NetworkService, NetworkState, PeerIndex, + ProtocolId, SupportProtocols, }; use ckb_util::Mutex; use std::collections::HashMap; @@ -63,7 +63,7 @@ impl Net { ) }) .collect(); - let (async_handle, async_runtime) = new_global_runtime(); + let (async_handle, _handle_recv, async_runtime) = new_global_runtime(); let controller = NetworkService::new( Arc::clone(&network_state), ckb_protocols, @@ -73,7 +73,6 @@ impl Net { "0.1.0".to_string(), Flags::COMPATIBILITY, ), - DefaultExitHandler::default(), ) .start(&async_handle) .unwrap(); diff --git a/tx-pool/Cargo.toml b/tx-pool/Cargo.toml index 5fd3bf0503..bc425d696c 100644 --- a/tx-pool/Cargo.toml +++ b/tx-pool/Cargo.toml @@ -39,6 +39,7 @@ hyper = { version = "0.14", features = ["http1", "client", "tcp"] } multi_index_map = "0.5.0" slab = "0.4" rustc-hash = "1.1" +tokio-util = "0.7.8" [dev-dependencies] tempfile.workspace = true diff --git a/tx-pool/src/chunk_process.rs b/tx-pool/src/chunk_process.rs index c86a2966cb..b35e547a21 100644 --- a/tx-pool/src/chunk_process.rs +++ b/tx-pool/src/chunk_process.rs @@ -4,6 +4,7 @@ use crate::try_or_return_with_snapshot; use crate::{error::Reject, service::TxPoolService}; use ckb_chain_spec::consensus::Consensus; use ckb_error::Error; +use ckb_logger::info; use ckb_snapshot::Snapshot; use ckb_store::data_loader_wrapper::AsDataLoader; use ckb_traits::{CellDataProvider, ExtensionProvider, HeaderProvider}; @@ -21,6 +22,7 @@ use std::sync::Arc; use tokio::sync::watch; use tokio::sync::RwLock; use tokio::task::block_in_place; +use tokio_util::sync::CancellationToken; const MIN_STEP_CYCLE: Cycle = 10_000_000; @@ -41,15 +43,15 @@ enum State { pub(crate) struct ChunkProcess { service: TxPoolService, recv: watch::Receiver, - signal: watch::Receiver, current_state: ChunkCommand, + signal: CancellationToken, } impl ChunkProcess { pub fn new( service: TxPoolService, recv: watch::Receiver, - signal: watch::Receiver, + signal: CancellationToken, ) -> Self { ChunkProcess { service, @@ -73,7 +75,10 @@ impl ChunkProcess { } } }, - _ = self.signal.changed() => break, + _ = self.signal.cancelled() => { + info!("TxPool received exit signal, exit now"); + break + }, _ = interval.tick() => { if matches!(self.current_state, ChunkCommand::Resume) { let stop = self.try_process().await; @@ -136,7 +141,7 @@ impl ChunkProcess { let mut tmp_state: Option = None; let completed: Cycle = loop { - if self.signal.has_changed().unwrap_or(false) { + if self.signal.is_cancelled() { return Ok(State::Stopped); } if self.recv.has_changed().unwrap_or(false) { diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index 678a9fbad5..0013d5cf33 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -916,7 +916,7 @@ impl TxPoolService { } } - pub(crate) async fn save_pool(&mut self) { + pub(crate) async fn save_pool(&self) { let mut tx_pool = self.tx_pool.write().await; if let Err(err) = tx_pool.save_into_file() { error!("failed to save pool, error: {:?}", err) diff --git a/tx-pool/src/service.rs b/tx-pool/src/service.rs index a4184809a1..f405129201 100644 --- a/tx-pool/src/service.rs +++ b/tx-pool/src/service.rs @@ -17,7 +17,7 @@ use ckb_logger::error; use ckb_logger::info; use ckb_network::{NetworkController, PeerIndex}; use ckb_snapshot::Snapshot; -use ckb_stop_handler::{SignalSender, StopHandler, WATCH_INIT}; +use ckb_stop_handler::new_tokio_exit_rx; use ckb_types::core::tx_pool::{TransactionWithStatus, TxStatus}; use ckb_types::{ core::{ @@ -37,6 +37,7 @@ use std::time::Duration; use tokio::sync::watch; use tokio::sync::{mpsc, RwLock}; use tokio::task::block_in_place; +use tokio_util::sync::CancellationToken; #[cfg(feature = "internal")] use crate::{component::entry::TxEntry, process::PlugTarget}; @@ -128,18 +129,9 @@ pub struct TxPoolController { reorg_sender: mpsc::Sender>, chunk_tx: Arc>, handle: Handle, - stop: StopHandler<()>, started: Arc, } -impl Drop for TxPoolController { - fn drop(&mut self) { - if self.service_started() { - self.stop.try_send(()); - } - } -} - macro_rules! send_message { ($self:ident, $msg_type:ident, $args:expr) => {{ let (responder, response) = oneshot::channel(); @@ -378,7 +370,7 @@ pub struct TxPoolServiceBuilder { pub(crate) callbacks: Callbacks, pub(crate) receiver: mpsc::Receiver, pub(crate) reorg_receiver: mpsc::Receiver>, - pub(crate) signal_receiver: watch::Receiver, + pub(crate) signal_receiver: CancellationToken, pub(crate) handle: Handle, pub(crate) tx_relay_sender: ckb_channel::Sender, pub(crate) chunk_rx: watch::Receiver, @@ -403,22 +395,16 @@ impl TxPoolServiceBuilder { let (sender, receiver) = mpsc::channel(DEFAULT_CHANNEL_SIZE); let block_assembler_channel = mpsc::channel(BLOCK_ASSEMBLER_CHANNEL_SIZE); let (reorg_sender, reorg_receiver) = mpsc::channel(DEFAULT_CHANNEL_SIZE); - let (signal_sender, signal_receiver) = watch::channel(WATCH_INIT); + let signal_receiver: CancellationToken = new_tokio_exit_rx(); let (chunk_tx, chunk_rx) = watch::channel(ChunkCommand::Resume); let chunk = Arc::new(RwLock::new(ChunkQueue::new())); let started = Arc::new(AtomicBool::new(false)); - let stop = StopHandler::new( - SignalSender::Watch(signal_sender), - None, - "tx-pool".to_string(), - ); let controller = TxPoolController { sender, reorg_sender, handle: handle.clone(), chunk_tx: Arc::new(chunk_tx), - stop, started: Arc::clone(&started), }; @@ -515,7 +501,7 @@ impl TxPoolServiceBuilder { let handle_clone = self.handle.clone(); let process_service = service.clone(); - let mut signal_receiver = self.signal_receiver.clone(); + let signal_receiver = self.signal_receiver.clone(); self.handle.spawn(async move { loop { tokio::select! { @@ -523,7 +509,11 @@ impl TxPoolServiceBuilder { let service_clone = process_service.clone(); handle_clone.spawn(process(service_clone, message)); }, - _ = signal_receiver.changed() => break, + _ = signal_receiver.cancelled() => { + info!("TxPool is saving, please wait..."); + process_service.save_pool().await; + break + }, else => break, } } @@ -531,7 +521,7 @@ impl TxPoolServiceBuilder { let process_service = service.clone(); if let Some(ref block_assembler) = service.block_assembler { - let mut signal_receiver = self.signal_receiver.clone(); + let signal_receiver = self.signal_receiver.clone(); let interval = Duration::from_millis(block_assembler.config.update_interval_millis); if interval.is_zero() { // block_assembler.update_interval_millis set zero interval should only be used for tests, @@ -547,7 +537,10 @@ impl TxPoolServiceBuilder { let service_clone = process_service.clone(); block_assembler::process(service_clone, &message).await; }, - _ = signal_receiver.changed() => break, + _ = signal_receiver.cancelled() => { + info!("TxPool received exit signal, exit now"); + break + }, else => break, } } @@ -579,7 +572,10 @@ impl TxPoolServiceBuilder { } queue.clear(); } - _ = signal_receiver.changed() => break, + _ = signal_receiver.cancelled() => { + info!("TxPool received exit signal, exit now"); + break + }, else => break, } } @@ -587,7 +583,7 @@ impl TxPoolServiceBuilder { } } - let mut signal_receiver = self.signal_receiver; + let signal_receiver = self.signal_receiver; self.handle.spawn(async move { loop { tokio::select! { @@ -614,7 +610,10 @@ impl TxPoolServiceBuilder { service.update_block_assembler_after_tx_pool_reorg().await; }, - _ = signal_receiver.changed() => break, + _ = signal_receiver.cancelled() => { + info!("TxPool received exit signal, exit now"); + break + }, else => break, } } diff --git a/util/channel/src/lib.rs b/util/channel/src/lib.rs index 90755a11f3..a250f5f104 100644 --- a/util/channel/src/lib.rs +++ b/util/channel/src/lib.rs @@ -1,7 +1,7 @@ //! Reexports `crossbeam_channel` to uniform the dependency version. pub use crossbeam_channel::{ - bounded, select, unbounded, Receiver, RecvError, RecvTimeoutError, Select, SendError, Sender, - TrySendError, + after, bounded, select, tick, unbounded, Receiver, RecvError, RecvTimeoutError, Select, + SendError, Sender, TrySendError, }; pub mod oneshot { diff --git a/util/indexer/src/service.rs b/util/indexer/src/service.rs index d104eb3646..ef8f876aee 100644 --- a/util/indexer/src/service.rs +++ b/util/indexer/src/service.rs @@ -7,7 +7,7 @@ use crate::store::{IteratorDirection, RocksdbStore, SecondaryDB, Store}; use crate::error::Error; use ckb_app_config::{DBConfig, IndexerConfig}; use ckb_async_runtime::{ - tokio::{self, sync::watch, time}, + tokio::{self, time}, Handle, }; use ckb_db_schema::{COLUMN_BLOCK_BODY, COLUMN_BLOCK_HEADER, COLUMN_INDEX, COLUMN_META}; @@ -18,7 +18,7 @@ use ckb_jsonrpc_types::{ }; use ckb_logger::{error, info}; use ckb_notify::NotifyController; -use ckb_stop_handler::{SignalSender, StopHandler, WATCH_INIT}; +use ckb_stop_handler::{new_tokio_exit_rx, CancellationToken}; use ckb_store::ChainStore; use ckb_types::{core, packed, prelude::*, H256}; use rocksdb::{prelude::*, Direction, IteratorMode}; @@ -39,8 +39,6 @@ pub struct IndexerService { pool: Option>>, poll_interval: Duration, async_handle: Handle, - stop_handler: StopHandler<()>, - stop: watch::Receiver, block_filter: Option, cell_filter: Option, } @@ -48,13 +46,6 @@ pub struct IndexerService { impl IndexerService { /// Construct new Indexer service instance from DBConfig and IndexerConfig pub fn new(ckb_db_config: &DBConfig, config: &IndexerConfig, async_handle: Handle) -> Self { - let (stop_sender, stop) = watch::channel(WATCH_INIT); - let stop_handler = StopHandler::new( - SignalSender::Watch(stop_sender), - None, - "indexer".to_string(), - ); - let store_opts = Self::indexer_store_options(config); let store = RocksdbStore::new(&store_opts, &config.store); let pool = if config.index_tx_pool { @@ -82,8 +73,6 @@ impl IndexerService { secondary_db, pool, async_handle, - stop_handler, - stop, poll_interval: Duration::from_secs(config.poll_interval), block_filter: config.block_filter.clone(), cell_filter: config.cell_filter.clone(), @@ -98,14 +87,13 @@ impl IndexerService { IndexerHandle { store: self.store.clone(), pool: self.pool.clone(), - stop_handler: self.stop_handler.clone(), } } /// Processes that handle index pool transaction and expect to be spawned to run in tokio runtime pub fn index_tx_pool(&self, notify_controller: NotifyController) { let service = self.clone(); - let mut stop = self.stop.clone(); + let stop: CancellationToken = new_tokio_exit_rx(); self.async_handle.spawn(async move { let mut new_transaction_receiver = notify_controller @@ -129,7 +117,10 @@ impl IndexerService { .transaction_rejected(&tx_entry.transaction); } } - _ = stop.changed() => break, + _ = stop.cancelled() => { + info!("Indexer received exit signal, exit now"); + break + }, else => break, } } @@ -183,7 +174,7 @@ impl IndexerService { let initial_syncing = self .async_handle .spawn_blocking(move || initial_service.try_loop_sync()); - let mut stop = self.stop.clone(); + let stop: CancellationToken = new_tokio_exit_rx(); let async_handle = self.async_handle.clone(); let poll_service = self.clone(); self.async_handle.spawn(async move { @@ -212,7 +203,10 @@ impl IndexerService { error!("ckb indexer syncing join error {:?}", e); } } - _ = stop.changed() => break, + _ = stop.cancelled() => { + info!("Indexer received exit signal, exit now"); + break + }, } } }); @@ -262,13 +256,6 @@ impl IndexerService { pub struct IndexerHandle { pub(crate) store: RocksdbStore, pub(crate) pool: Option>>, - stop_handler: StopHandler<()>, -} - -impl Drop for IndexerHandle { - fn drop(&mut self) { - self.stop_handler.try_send(()); - } } impl IndexerHandle { @@ -984,11 +971,9 @@ mod tests { let store = new_store("rpc"); let pool = Arc::new(RwLock::new(Pool::default())); let indexer = Indexer::new(store.clone(), 10, 100, None, CustomFilters::new(None, None)); - let stop_handler = StopHandler::new(SignalSender::Dummy, None, "indexer-test".to_string()); let rpc = IndexerHandle { store, pool: Some(Arc::clone(&pool)), - stop_handler, }; // setup test data @@ -1573,12 +1558,7 @@ mod tests { fn script_search_mode_rpc() { let store = new_store("script_search_mode_rpc"); let indexer = Indexer::new(store.clone(), 10, 100, None, CustomFilters::new(None, None)); - let stop_handler = StopHandler::new(SignalSender::Dummy, None, "indexer-test".to_string()); - let rpc = IndexerHandle { - store, - pool: None, - stop_handler, - }; + let rpc = IndexerHandle { store, pool: None }; // setup test data let lock_script1 = ScriptBuilder::default() diff --git a/util/launcher/Cargo.toml b/util/launcher/Cargo.toml index 35aa2a90e3..a1ec81525d 100644 --- a/util/launcher/Cargo.toml +++ b/util/launcher/Cargo.toml @@ -39,7 +39,6 @@ ckb-freezer = { path = "../../freezer", version = "= 0.112.0-pre" } ckb-notify = { path = "../../notify", version = "= 0.112.0-pre" } ckb-snapshot = { path = "../snapshot", version = "= 0.112.0-pre" } ckb-tx-pool = { path = "../../tx-pool", version = "= 0.112.0-pre" } -ckb-stop-handler = { path = "../stop-handler", version = "= 0.112.0-pre" } ckb-light-client-protocol-server = { path = "../light-client-protocol-server", version = "= 0.112.0-pre" } ckb-block-filter = { path = "../../block-filter", version = "= 0.112.0-pre" } ckb-hash = { path = "../hash", version = "= 0.112.0-pre" } diff --git a/util/launcher/src/lib.rs b/util/launcher/src/lib.rs index 96259acb5b..ad56947f35 100644 --- a/util/launcher/src/lib.rs +++ b/util/launcher/src/lib.rs @@ -23,15 +23,15 @@ use ckb_jsonrpc_types::ScriptHashType; use ckb_light_client_protocol_server::LightClientProtocol; use ckb_logger::info; use ckb_network::{ - observe_listen_port_occupancy, CKBProtocol, DefaultExitHandler, Flags, NetworkController, - NetworkService, NetworkState, SupportProtocols, + observe_listen_port_occupancy, CKBProtocol, Flags, NetworkController, NetworkService, + NetworkState, SupportProtocols, }; use ckb_network_alert::alert_relayer::AlertRelayer; use ckb_proposal_table::ProposalTable; use ckb_resource::Resource; use ckb_rpc::{RpcServer, ServiceBuilder}; use ckb_shared::Shared; -use ckb_stop_handler::StopHandler; + use ckb_store::{ChainDB, ChainStore}; use ckb_sync::{BlockFilter, NetTimeProtocol, Relayer, SyncShared, Synchronizer}; use ckb_tx_pool::service::TxVerificationResult; @@ -250,7 +250,7 @@ impl Launcher { } /// start block filter service - pub fn start_block_filter(&self, shared: &Shared) -> Option> { + pub fn start_block_filter(&self, shared: &Shared) { if self .args .config @@ -258,9 +258,7 @@ impl Launcher { .support_protocols .contains(&SupportProtocol::Filter) { - Some(BlockFilterService::new(shared.clone()).start()) - } else { - None + BlockFilterService::new(shared.clone()).start(); } } @@ -269,7 +267,6 @@ impl Launcher { &self, shared: &Shared, chain_controller: ChainController, - exit_handler: &DefaultExitHandler, miner_enable: bool, relay_tx_receiver: Receiver, ) -> (NetworkController, RpcServer) { @@ -383,7 +380,6 @@ impl Launcher { self.version.to_string(), flags, ), - exit_handler.clone(), ) .start(shared.async_handle()) .expect("Start network service failed"); diff --git a/util/launcher/src/shared_builder.rs b/util/launcher/src/shared_builder.rs index 7c1a096953..09f9fd862c 100644 --- a/util/launcher/src/shared_builder.rs +++ b/util/launcher/src/shared_builder.rs @@ -19,7 +19,7 @@ use ckb_proposal_table::ProposalTable; use ckb_proposal_table::ProposalView; use ckb_shared::Shared; use ckb_snapshot::{Snapshot, SnapshotMgr}; -use ckb_stop_handler::StopHandler; + use ckb_store::ChainDB; use ckb_store::ChainStore; use ckb_tx_pool::{ @@ -151,7 +151,7 @@ impl SharedBuilder { thread_local! { // NOTICE:we can't put the runtime directly into thread_local here, // on windows the runtime in thread_local will get stuck when dropping - static RUNTIME_HANDLE: unsync::OnceCell<(Handle, StopHandler<()>)> = unsync::OnceCell::new(); + static RUNTIME_HANDLE: unsync::OnceCell = unsync::OnceCell::new(); } static DB_COUNT: AtomicUsize = AtomicUsize::new(0); @@ -177,11 +177,7 @@ impl SharedBuilder { notify_config: None, store_config: None, block_assembler_config: None, - async_handle: runtime - .borrow() - .get_or_init(new_background_runtime) - .0 - .clone(), + async_handle: runtime.borrow().get_or_init(new_background_runtime).clone(), }) } } diff --git a/util/light-client-protocol-server/src/tests/utils/chain.rs b/util/light-client-protocol-server/src/tests/utils/chain.rs index a87805c486..fcfd483a86 100644 --- a/util/light-client-protocol-server/src/tests/utils/chain.rs +++ b/util/light-client-protocol-server/src/tests/utils/chain.rs @@ -9,7 +9,7 @@ use ckb_chain_spec::consensus::{build_genesis_epoch_ext, ConsensusBuilder}; use ckb_dao_utils::genesis_dao_data; use ckb_jsonrpc_types::ScriptHashType; use ckb_launcher::SharedBuilder; -use ckb_network::{DefaultExitHandler, Flags, NetworkController, NetworkService, NetworkState}; +use ckb_network::{Flags, NetworkController, NetworkService, NetworkState}; use ckb_shared::Shared; use ckb_systemtime::unix_time_as_millis; use ckb_test_chain_utils::always_success_cell; @@ -242,7 +242,6 @@ fn dummy_network(shared: &Shared) -> NetworkController { "test".to_string(), Flags::all(), ), - DefaultExitHandler::default(), ) .start(shared.async_handle()) .expect("Start network service failed") diff --git a/util/metrics-service/Cargo.toml b/util/metrics-service/Cargo.toml index 0bd9c7d9bd..20c6ecf941 100644 --- a/util/metrics-service/Cargo.toml +++ b/util/metrics-service/Cargo.toml @@ -16,3 +16,4 @@ ckb-async-runtime = { path = "../runtime", version = "= 0.112.0-pre" } ckb-util = { path = "..", version = "= 0.112.0-pre" } prometheus = "0.13.3" hyper = { version = "0.14", features = ["http1", "tcp", "server"] } +ckb-stop-handler = { path = "../stop-handler", version = "= 0.112.0-pre" } diff --git a/util/metrics-service/src/lib.rs b/util/metrics-service/src/lib.rs index 6d7fb3ebfc..1b88171de0 100644 --- a/util/metrics-service/src/lib.rs +++ b/util/metrics-service/src/lib.rs @@ -10,7 +10,9 @@ use hyper::{ use prometheus::Encoder as _; use ckb_async_runtime::Handle; +use ckb_logger::info; use ckb_metrics_config::{Config, Exporter, Target}; +use ckb_stop_handler::{new_tokio_exit_rx, CancellationToken}; use ckb_util::strings; /// Ensures the metrics service can shutdown gracefully. @@ -59,7 +61,13 @@ fn run_exporter(exporter: Exporter, handle: &Handle) -> Result<(), String> { }); ckb_logger::info!("start prometheus exporter at {}", addr); handle.spawn(async move { - let server = Server::bind(&addr).serve(make_svc); + let server = Server::bind(&addr) + .serve(make_svc) + .with_graceful_shutdown(async { + let exit_rx: CancellationToken = new_tokio_exit_rx(); + exit_rx.cancelled().await; + info!("prometheus server received exit signal, exit now"); + }); if let Err(err) = server.await { ckb_logger::error!("prometheus server error: {}", err); } diff --git a/util/network-alert/Cargo.toml b/util/network-alert/Cargo.toml index 035d01fc3b..c42f154094 100644 --- a/util/network-alert/Cargo.toml +++ b/util/network-alert/Cargo.toml @@ -25,7 +25,6 @@ semver = "1.0" [dev-dependencies] ckb-crypto = { path = "../crypto", version = "= 0.112.0-pre" } ckb-async-runtime = { path = "../runtime", version = "= 0.112.0-pre" } -ckb-stop-handler = { path = "../stop-handler", version = "= 0.112.0-pre" } once_cell = "1.8.0" ckb-systemtime = {path = "../systemtime", version = "= 0.112.0-pre", features = ["enable_faketime"]} faster-hex = "0.6" diff --git a/util/network-alert/src/tests/test_notifier.rs b/util/network-alert/src/tests/test_notifier.rs index 6b6a1f9bd8..fc793dc5e8 100644 --- a/util/network-alert/src/tests/test_notifier.rs +++ b/util/network-alert/src/tests/test_notifier.rs @@ -1,7 +1,7 @@ use crate::notifier::Notifier; use ckb_async_runtime::{new_background_runtime, Handle}; use ckb_notify::NotifyService; -use ckb_stop_handler::StopHandler; + use ckb_types::{packed, prelude::*}; use once_cell::unsync; use std::borrow::Borrow; @@ -27,17 +27,13 @@ fn new_notifier(version: &str) -> Notifier { thread_local! { // NOTICE:we can't put the runtime directly into thread_local here, // on windows the runtime in thread_local will get stuck when dropping - static RUNTIME_HANDLE: unsync::OnceCell<(Handle, StopHandler<()>)> = unsync::OnceCell::new(); + static RUNTIME_HANDLE: unsync::OnceCell = unsync::OnceCell::new(); } let notify_controller = RUNTIME_HANDLE.with(|runtime| { NotifyService::new( Default::default(), - runtime - .borrow() - .get_or_init(new_background_runtime) - .0 - .clone(), + runtime.borrow().get_or_init(new_background_runtime).clone(), ) .start() }); diff --git a/util/runtime/Cargo.toml b/util/runtime/Cargo.toml index 0dd064eb1b..6e01a931dd 100644 --- a/util/runtime/Cargo.toml +++ b/util/runtime/Cargo.toml @@ -10,6 +10,5 @@ repository = "https://github.com/nervosnetwork/ckb" [dependencies] tokio = { version = "1", features = ["full"] } -ckb-stop-handler = { path = "../stop-handler", version = "= 0.112.0-pre" } ckb-logger = { path = "../logger", version = "= 0.112.0-pre" } ckb-spawn = { path = "../spawn", version = "= 0.112.0-pre" } diff --git a/util/runtime/src/lib.rs b/util/runtime/src/lib.rs index 6984ded8e4..17a60a9d53 100644 --- a/util/runtime/src/lib.rs +++ b/util/runtime/src/lib.rs @@ -1,17 +1,17 @@ //! Utilities for tokio runtime. use ckb_spawn::Spawn; -use ckb_stop_handler::{SignalSender, StopHandler}; use core::future::Future; use std::sync::atomic::{AtomicU32, Ordering}; -use std::thread; + use tokio::runtime::Builder; use tokio::runtime::Handle as TokioHandle; -use tokio::sync::oneshot; + use tokio::task::JoinHandle; pub use tokio; pub use tokio::runtime::Runtime; +use tokio::sync::mpsc::{Receiver, Sender}; // Handle is a newtype wrap and unwrap tokio::Handle, it is workaround with Rust Orphan Rules. // We need `Handle` impl ckb spawn trait decouple tokio dependence @@ -20,6 +20,19 @@ pub use tokio::runtime::Runtime; #[derive(Debug, Clone)] pub struct Handle { pub(crate) inner: TokioHandle, + guard: Option>, +} + +impl Handle { + /// Create a new Handle + pub fn new(inner: TokioHandle, guard: Option>) -> Self { + Self { inner, guard } + } + + /// Drop the guard + pub fn drop_guard(&mut self) { + let _ = self.guard.take(); + } } impl Handle { @@ -42,7 +55,15 @@ impl Handle { F: Future + Send + 'static, F::Output: Send + 'static, { - self.inner.spawn(future) + let tokio_task_guard = self.guard.clone(); + + self.inner.spawn(async move { + // move tokio_task_guard into the spawned future + // so that it will be dropped when the future is finished + let _guard = tokio_task_guard; + + future.await + }) } /// Run a future to completion on the Tokio runtime from a synchronous context. @@ -101,32 +122,31 @@ fn new_runtime() -> Runtime { } /// Create new threaded_scheduler tokio Runtime, return `Runtime` -pub fn new_global_runtime() -> (Handle, Runtime) { +pub fn new_global_runtime() -> (Handle, Receiver<()>, Runtime) { let runtime = new_runtime(); let handle = runtime.handle().clone(); + let (guard, handle_stop_rx): (Sender<()>, Receiver<()>) = tokio::sync::mpsc::channel::<()>(1); - (Handle { inner: handle }, runtime) + (Handle::new(handle, Some(guard)), handle_stop_rx, runtime) } /// Create new threaded_scheduler tokio Runtime, return `Handle` and background thread join handle, /// NOTICE: This is only used in testing -pub fn new_background_runtime() -> (Handle, StopHandler<()>) { +pub fn new_background_runtime() -> Handle { let runtime = new_runtime(); let handle = runtime.handle().clone(); - let (tx, rx) = oneshot::channel(); - let thread = thread::Builder::new() + let (guard, mut handle_stop_rx): (Sender<()>, Receiver<()>) = + tokio::sync::mpsc::channel::<()>(1); + let _thread = std::thread::Builder::new() .name("GlobalRtBuilder".to_string()) .spawn(move || { - let ret = runtime.block_on(rx); + let ret = runtime.block_on(async move { handle_stop_rx.recv().await }); ckb_logger::debug!("global runtime finish {:?}", ret); }) .expect("tokio runtime started"); - ( - Handle { inner: handle }, - StopHandler::new(SignalSender::Tokio(tx), Some(thread), "GT".to_string()), - ) + Handle::new(handle, Some(guard)) } impl Spawn for Handle { diff --git a/util/stop-handler/Cargo.toml b/util/stop-handler/Cargo.toml index 996648b6ba..4ff8f9d529 100644 --- a/util/stop-handler/Cargo.toml +++ b/util/stop-handler/Cargo.toml @@ -9,7 +9,16 @@ homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] -parking_lot = "0.12" ckb-logger = { path = "../logger", version = "= 0.112.0-pre" } tokio = { version = "1", features = ["sync", "rt-multi-thread"] } ckb-channel = { path = "../channel", version = "= 0.112.0-pre" } +ckb-util = { path = "..", version = "= 0.112.0-pre" } +once_cell = "1.8.0" +ckb-async-runtime = { path = "../runtime", version = "= 0.112.0-pre" } +tokio-util = "0.7.8" + + +[dev-dependencies] +ctrlc = { version = "3.1", features = ["termination"] } +libc = "0.2" +rand = "0.8.5" From 97f7766518676547cf9e8bbe76e2efab49d69ab3 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 20 Jun 2023 17:04:55 +0800 Subject: [PATCH 034/267] exit when `ProtocolHandleErrorKind::AbnormallyClosed` received Signed-off-by: Eval EXEC --- network/src/network.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/network/src/network.rs b/network/src/network.rs index c9436aa28d..eaf3f7576b 100644 --- a/network/src/network.rs +++ b/network/src/network.rs @@ -654,7 +654,9 @@ impl ServiceHandle for EventHandler { ) }, ); - self.exit_handler.notify_exit(); + error!("ProtocolHandleError: AbnormallyClosed, proto_id: {opt_session_id:?}, session id: {opt_session_id:?}"); + + broadcast_exit_signals(); } } } From c13a089f66eda200123ea22ab6b2a262d521cf73 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 20 Jun 2023 17:55:56 +0800 Subject: [PATCH 035/267] Fix BlockDownload thread exit Signed-off-by: Eval EXEC --- Cargo.lock | 5 -- sync/src/synchronizer/mod.rs | 104 ++++++++++++++----------- util/stop-handler/src/stop_register.rs | 20 +++-- 3 files changed, 71 insertions(+), 58 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index eb5351f95d..afdd099c92 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -804,7 +804,6 @@ dependencies = [ "ckb-rpc", "ckb-shared", "ckb-snapshot", - "ckb-stop-handler", "ckb-store", "ckb-sync", "ckb-systemtime", @@ -1038,7 +1037,6 @@ dependencies = [ "ckb-multisig", "ckb-network", "ckb-notify", - "ckb-stop-handler", "ckb-systemtime", "ckb-types", "ckb-util", @@ -1179,7 +1177,6 @@ dependencies = [ "ckb-pow", "ckb-reward-calculator", "ckb-shared", - "ckb-stop-handler", "ckb-store", "ckb-sync", "ckb-systemtime", @@ -1245,7 +1242,6 @@ dependencies = [ "arc-swap", "ckb-async-runtime", "ckb-chain-spec", - "ckb-channel", "ckb-constant", "ckb-db", "ckb-db-schema", @@ -1293,7 +1289,6 @@ dependencies = [ "ctrlc", "libc", "once_cell", - "parking_lot 0.12.1", "rand 0.8.5", "tokio", "tokio-util 0.7.8", diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index a2ab6cf46b..7341ce4bc8 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -27,6 +27,7 @@ use crate::{Status, StatusCode}; use ckb_chain::chain::ChainController; use ckb_channel as channel; +use ckb_channel::{select, Receiver}; use ckb_constant::sync::{ BAD_MESSAGE_BAN_TIME, CHAIN_SYNC_TIMEOUT, EVICTION_HEADERS_RESPONSE_TIME, INIT_BLOCKS_IN_TRANSIT_PER_PEER, MAX_TIP_AGE, @@ -37,7 +38,7 @@ use ckb_network::{ async_trait, bytes::Bytes, tokio, CKBProtocolContext, CKBProtocolHandler, PeerIndex, ServiceControl, SupportProtocols, }; -use ckb_stop_handler::register_thread; +use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread}; use ckb_systemtime::unix_time_as_millis; use ckb_types::{ core::{self, BlockNumber}, @@ -87,58 +88,70 @@ struct BlockFetchCMD { } impl BlockFetchCMD { - fn run(&mut self) { - while let Ok(cmd) = self.recv.recv() { - match cmd { - FetchCMD::Fetch((peers, state)) => match self.can_start() { - CanStart::Ready => { - for peer in peers { - if let Some(fetch) = BlockFetcher::new(&self.sync, peer, state).fetch() - { - for item in fetch { - BlockFetchCMD::send_getblocks(item, &self.p2p_control, peer); - } + fn process_fetch_cmd(&mut self, cmd: FetchCMD) { + match cmd { + FetchCMD::Fetch((peers, state)) => match self.can_start() { + CanStart::Ready => { + for peer in peers { + if let Some(fetch) = BlockFetcher::new(&self.sync, peer, state).fetch() { + for item in fetch { + BlockFetchCMD::send_getblocks(item, &self.p2p_control, peer); } } } - CanStart::MinWorkNotReach => { - let best_known = self.sync.shared.state().shared_best_header_ref(); - let number = best_known.number(); - if number != self.number && (number - self.number) % 10000 == 0 { - self.number = number; - info!( - "best known header number: {}, total difficulty: {:#x}, \ + } + CanStart::MinWorkNotReach => { + let best_known = self.sync.shared.state().shared_best_header_ref(); + let number = best_known.number(); + if number != self.number && (number - self.number) % 10000 == 0 { + self.number = number; + info!( + "best known header number: {}, total difficulty: {:#x}, \ require min header number on 500_000, min total difficulty: {:#x}, \ then start to download block", - number, - best_known.total_difficulty(), - self.sync.shared.state().min_chain_work() - ); - } + number, + best_known.total_difficulty(), + self.sync.shared.state().min_chain_work() + ); } - CanStart::AssumeValidNotFound => { - let state = self.sync.shared.state(); - let best_known = state.shared_best_header_ref(); - let number = best_known.number(); - let assume_valid_target: Byte32 = state - .assume_valid_target() - .as_ref() - .map(Pack::pack) - .expect("assume valid target must exist"); - - if number != self.number && (number - self.number) % 10000 == 0 { - self.number = number; - info!( - "best known header number: {}, hash: {:#?}, \ + } + CanStart::AssumeValidNotFound => { + let state = self.sync.shared.state(); + let best_known = state.shared_best_header_ref(); + let number = best_known.number(); + let assume_valid_target: Byte32 = state + .assume_valid_target() + .as_ref() + .map(Pack::pack) + .expect("assume valid target must exist"); + + if number != self.number && (number - self.number) % 10000 == 0 { + self.number = number; + info!( + "best known header number: {}, hash: {:#?}, \ can't find assume valid target temporarily, hash: {:#?} \ please wait", - number, - best_known.hash(), - assume_valid_target - ); - } + number, + best_known.hash(), + assume_valid_target + ); } - }, + } + }, + } + } + fn run(&mut self, stop_signal: Receiver<()>) { + loop { + select! { + recv(self.recv) -> msg => { + if let Ok(cmd) = msg { + self.process_fetch_cmd(cmd) + } + } + recv(stop_signal) -> _ => { + info!("thread BlockDownload received exit signal, exit now"); + return; + } } } } @@ -630,6 +643,7 @@ impl Synchronizer { let blockdownload_jh = thread .name(THREAD_NAME.into()) .spawn(move || { + let stop_signal = new_crossbeam_exit_rx(); BlockFetchCMD { sync, p2p_control, @@ -637,7 +651,7 @@ impl Synchronizer { number, can_start: CanStart::MinWorkNotReach, } - .run(); + .run(stop_signal); }) .expect("download thread can't start"); register_thread(THREAD_NAME, blockdownload_jh); diff --git a/util/stop-handler/src/stop_register.rs b/util/stop-handler/src/stop_register.rs index e329565ae8..e496866383 100644 --- a/util/stop-handler/src/stop_register.rs +++ b/util/stop-handler/src/stop_register.rs @@ -1,4 +1,5 @@ -use ckb_logger::{info, trace, warn}; +use ckb_channel::TrySendError; +use ckb_logger::{error, info, trace, warn}; use ckb_util::Mutex; use tokio_util::sync::CancellationToken; @@ -54,13 +55,16 @@ pub fn new_crossbeam_exit_rx() -> ckb_channel::Receiver<()> { /// Broadcast exit signals to all threads and all tokio tasks pub fn broadcast_exit_signals() { TOKIO_EXIT.cancel(); - CROSSBEAM_EXIT_SENDERS.lock().iter().for_each(|tx| { - if let Err(e) = tx.try_send(()) { - println!("broadcast thread: ERROR: {:?}", e) - } else { - println!("send a crossbeam exit signal"); - } - }); + CROSSBEAM_EXIT_SENDERS + .lock() + .iter() + .for_each(|tx| match tx.try_send(()) { + Ok(_) => {} + Err(TrySendError::Full(_)) => error!("send exit signal to channel failed since the channel is full, this should not happen"), + Err(TrySendError::Disconnected(_)) => { + info!("broadcast thread: channel is disconnected") + } + }); } /// Register a thread `JoinHandle` to `CKB_HANDLES` From 7ad68101ef53a1c3369e75b3e1970ba9a11ba62b Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 26 Jun 2023 10:48:38 +0800 Subject: [PATCH 036/267] Re-order ckb workspace members --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 79cb81e4c3..4d0c0cd5fe 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -40,8 +40,8 @@ members = [ "util/occupied-capacity/macros", "util/fixed-hash/macros", "util/logger-service", - "util/stop-handler", "util/runtime", + "util/stop-handler", "util/metrics", "util/metrics-service", "util/fixed-hash", From 4fcf72b177b6479d3d4cb66296c0595853a39c30 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 3 Jul 2023 15:34:48 +0800 Subject: [PATCH 037/267] Fix bats test for ckb run Signed-off-by: Eval EXEC --- util/app-config/src/tests/ckb_run_replay.bats | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/app-config/src/tests/ckb_run_replay.bats b/util/app-config/src/tests/ckb_run_replay.bats index 6ca4dd5405..dc9f87ef18 100644 --- a/util/app-config/src/tests/ckb_run_replay.bats +++ b/util/app-config/src/tests/ckb_run_replay.bats @@ -24,7 +24,7 @@ function ckb_run { #@test run _ckb_run [ "$status" -eq 0 ] # assert_output --regexp "ckb_chain::chain.*block number:.*, hash:.*, size:.*, cycles:.*" - assert_output --regexp "ckb_bin::subcommand::run Finishing work, please wait" + assert_output --regexp "ckb_bin all tokio tasks have been stopped" } function ckb_replay { #@test From 0bb4ff57737a741211559a6446ee01c6acca6ae4 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 12 Jul 2023 09:31:05 +0800 Subject: [PATCH 038/267] Only catch exit signal for `ckb run` --- ckb-bin/src/lib.rs | 33 ++++++++------------------------- ckb-bin/src/subcommand/run.rs | 7 ++++++- notify/src/lib.rs | 8 ++++---- 3 files changed, 18 insertions(+), 30 deletions(-) diff --git a/ckb-bin/src/lib.rs b/ckb-bin/src/lib.rs index 91c61b813d..c8373224d7 100644 --- a/ckb-bin/src/lib.rs +++ b/ckb-bin/src/lib.rs @@ -10,10 +10,8 @@ use ckb_async_runtime::new_global_runtime; use ckb_build_info::Version; use ckb_logger::info; use ckb_network::tokio; -use ckb_stop_handler::broadcast_exit_signals; use helper::raise_fd_limit; use setup_guard::SetupGuard; -use std::sync::Arc; #[cfg(feature = "with_sentry")] pub(crate) const LOG_TARGET_SENTRY: &str = "sentry"; @@ -66,18 +64,6 @@ pub fn run_app(version: Version) -> Result<(), ExitCode> { raise_fd_limit(); - // indicate whether the process is terminated by an exit signal - let caught_exit_signal = Arc::new(std::sync::atomic::AtomicBool::new(false)); - - ctrlc::set_handler({ - let caught_exit_signal = Arc::clone(&caught_exit_signal); - move || { - broadcast_exit_signals(); - caught_exit_signal.store(true, std::sync::atomic::Ordering::SeqCst); - } - }) - .expect("Error setting Ctrl-C handler"); - let ret = match cmd { cli::CMD_RUN => subcommand::run(setup.run(matches)?, version, handle.clone()), cli::CMD_MINER => subcommand::miner(setup.miner(matches)?, handle.clone()), @@ -90,18 +76,15 @@ pub fn run_app(version: Version) -> Result<(), ExitCode> { _ => unreachable!(), }; - if !caught_exit_signal.load(std::sync::atomic::Ordering::SeqCst) { - // if `subcommand` finish normally, and we didn't catch exit signal, broadcast exit signals - broadcast_exit_signals(); - } - - handle.drop_guard(); + if matches!(cmd, cli::CMD_RUN) { + handle.drop_guard(); - tokio::task::block_in_place(|| { - info!("waiting all tokio tasks done"); - handle_stop_rx.blocking_recv(); - info!("all tokio tasks have been stopped"); - }); + tokio::task::block_in_place(|| { + info!("waiting all tokio tasks done"); + handle_stop_rx.blocking_recv(); + info!("all tokio tasks have been stopped"); + }); + } ret } diff --git a/ckb-bin/src/subcommand/run.rs b/ckb-bin/src/subcommand/run.rs index cd7d8c6282..2ce70792b9 100644 --- a/ckb-bin/src/subcommand/run.rs +++ b/ckb-bin/src/subcommand/run.rs @@ -4,7 +4,7 @@ use ckb_async_runtime::Handle; use ckb_build_info::Version; use ckb_launcher::Launcher; use ckb_logger::info; -use ckb_stop_handler::wait_all_ckb_services_exit; +use ckb_stop_handler::{broadcast_exit_signals, wait_all_ckb_services_exit}; use ckb_types::core::cell::setup_system_cell_cache; @@ -55,6 +55,11 @@ pub fn run(args: RunArgs, version: Version, async_handle: Handle) -> Result<(), let tx_pool_builder = pack.take_tx_pool_builder(); tx_pool_builder.start(network_controller.non_owning_clone()); + ctrlc::set_handler(|| { + broadcast_exit_signals(); + }) + .expect("Error setting Ctrl-C handler"); + wait_all_ckb_services_exit(); Ok(()) diff --git a/notify/src/lib.rs b/notify/src/lib.rs index 2b9969ba96..b6a0172a8a 100644 --- a/notify/src/lib.rs +++ b/notify/src/lib.rs @@ -166,10 +166,6 @@ impl NotifyService { handle.spawn(async move { loop { tokio::select! { - _ = signal_receiver.cancelled() => { - info!("NotifyService received exit signal, exit now"); - break; - } Some(msg) = new_block_register_receiver.recv() => { self.handle_register_new_block(msg) }, Some(msg) = new_block_watcher_receiver.recv() => { self.handle_watch_new_block(msg) }, Some(msg) = new_block_receiver.recv() => { self.handle_notify_new_block(msg) }, @@ -181,6 +177,10 @@ impl NotifyService { Some(msg) = reject_transaction_receiver.recv() => { self.handle_notify_reject_transaction(msg) }, Some(msg) = network_alert_register_receiver.recv() => { self.handle_register_network_alert(msg) }, Some(msg) = network_alert_receiver.recv() => { self.handle_notify_network_alert(msg) }, + _ = signal_receiver.cancelled() => { + info!("NotifyService received exit signal, exit now"); + break; + } else => break, } } From 6d00f61b765188679678a56c2eddaa48a5b3ba04 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 12 Jul 2023 09:54:28 +0800 Subject: [PATCH 039/267] Use debug level to print exit signal log --- block-filter/src/filter.rs | 4 ++-- chain/src/chain.rs | 8 ++++---- ckb-bin/src/helper.rs | 4 ++-- ckb-bin/src/lib.rs | 6 +++--- ckb-bin/src/subcommand/run.rs | 1 + miner/src/client.rs | 8 ++++---- miner/src/miner.rs | 2 +- network/src/network.rs | 2 +- notify/src/lib.rs | 4 ++-- sync/src/synchronizer/mod.rs | 2 +- sync/src/tests/synchronizer/functions.rs | 2 -- sync/src/types/header_map/mod.rs | 4 ++-- tx-pool/src/chunk_process.rs | 10 +++++----- tx-pool/src/process.rs | 2 ++ tx-pool/src/service.rs | 8 ++++---- util/indexer/src/service.rs | 6 +++--- util/metrics-service/src/lib.rs | 4 ++-- util/stop-handler/src/stop_register.rs | 8 ++++---- 18 files changed, 43 insertions(+), 42 deletions(-) diff --git a/block-filter/src/filter.rs b/block-filter/src/filter.rs index 56fcc92f6b..04e8a6566b 100644 --- a/block-filter/src/filter.rs +++ b/block-filter/src/filter.rs @@ -1,5 +1,5 @@ use ckb_async_runtime::tokio::{self, task::block_in_place}; -use ckb_logger::{debug, info, warn}; +use ckb_logger::{debug, warn}; use ckb_shared::Shared; use ckb_stop_handler::{new_tokio_exit_rx, CancellationToken}; use ckb_store::{ChainDB, ChainStore}; @@ -63,7 +63,7 @@ impl BlockFilter { new_block_watcher.borrow_and_update(); } _ = stop_rx.cancelled() => { - info!("BlockFilter received exit signal, exit now"); + debug!("BlockFilter received exit signal, exit now"); break }, else => break, diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 8f7e5d8e3c..3323492032 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -243,10 +243,6 @@ impl ChainService { let chain_jh = thread_builder .spawn(move || loop { select! { - recv(signal_receiver) -> _ => { - info!("ChainService received exit signal, stopped"); - break; - }, recv(process_block_receiver) -> msg => match msg { Ok(Request { responder, arguments: (block, verify) }) => { let _ = tx_control.suspend_chunk_process(); @@ -268,6 +264,10 @@ impl ChainService { error!("truncate_receiver closed"); break; }, + }, + recv(signal_receiver) -> _ => { + debug!("ChainService received exit signal, exit now"); + break; } } }) diff --git a/ckb-bin/src/helper.rs b/ckb-bin/src/helper.rs index 21c93732b8..7dee9de15d 100644 --- a/ckb-bin/src/helper.rs +++ b/ckb-bin/src/helper.rs @@ -8,7 +8,7 @@ pub fn deadlock_detection() {} #[cfg(feature = "deadlock_detection")] pub fn deadlock_detection() { use ckb_channel::select; - use ckb_logger::warn; + use ckb_logger::{debug, warn}; use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread}; use ckb_util::parking_lot::deadlock; use std::{thread, time::Duration}; @@ -36,7 +36,7 @@ pub fn deadlock_detection() { }, recv(stop_rx) -> _ =>{ - info!("deadlock_detection received exit signal, stopped"); + debug!("deadlock_detection received exit signal, stopped"); return; } } diff --git a/ckb-bin/src/lib.rs b/ckb-bin/src/lib.rs index c8373224d7..8596f504d0 100644 --- a/ckb-bin/src/lib.rs +++ b/ckb-bin/src/lib.rs @@ -8,7 +8,7 @@ mod subcommand; use ckb_app_config::{cli, ExitCode, Setup}; use ckb_async_runtime::new_global_runtime; use ckb_build_info::Version; -use ckb_logger::info; +use ckb_logger::{debug, info}; use ckb_network::tokio; use helper::raise_fd_limit; use setup_guard::SetupGuard; @@ -80,9 +80,9 @@ pub fn run_app(version: Version) -> Result<(), ExitCode> { handle.drop_guard(); tokio::task::block_in_place(|| { - info!("waiting all tokio tasks done"); + debug!("waiting all tokio tasks done"); handle_stop_rx.blocking_recv(); - info!("all tokio tasks have been stopped"); + info!("ckb shutdown"); }); } diff --git a/ckb-bin/src/subcommand/run.rs b/ckb-bin/src/subcommand/run.rs index 2ce70792b9..726a415944 100644 --- a/ckb-bin/src/subcommand/run.rs +++ b/ckb-bin/src/subcommand/run.rs @@ -56,6 +56,7 @@ pub fn run(args: RunArgs, version: Version, async_handle: Handle) -> Result<(), tx_pool_builder.start(network_controller.non_owning_clone()); ctrlc::set_handler(|| { + info!("Trapped exit signal, exiting..."); broadcast_exit_signals(); }) .expect("Error setting Ctrl-C handler"); diff --git a/miner/src/client.rs b/miner/src/client.rs index dda47570a7..c598f5af9a 100644 --- a/miner/src/client.rs +++ b/miner/src/client.rs @@ -4,7 +4,7 @@ use ckb_app_config::MinerClientConfig; use ckb_async_runtime::Handle; use ckb_channel::Sender; use ckb_jsonrpc_types::{Block as JsonBlock, BlockTemplate}; -use ckb_logger::{debug, error, info}; +use ckb_logger::{debug, error}; use ckb_stop_handler::{new_tokio_exit_rx, CancellationToken}; use ckb_types::{ packed::{Block, Byte32}, @@ -87,7 +87,7 @@ impl Rpc { }); }, _ = stop_rx.cancelled() => { - info!("Rpc server received exit signal, exit now"); + debug!("Rpc server received exit signal, exit now"); break }, else => break @@ -235,7 +235,7 @@ Otherwise ckb-miner does not work properly and will behave as it stopped committ let stop_rx: CancellationToken = new_tokio_exit_rx(); let graceful = server.with_graceful_shutdown(async move { stop_rx.cancelled().await; - info!("Miner client received exit signal, exit now"); + debug!("Miner client received exit signal, exit now"); }); if let Err(e) = graceful.await { @@ -255,7 +255,7 @@ Otherwise ckb-miner does not work properly and will behave as it stopped committ self.fetch_block_template().await; } _ = stop_rx.cancelled() => { - info!("Miner client pool_block_template received exit signal, exit now"); + debug!("Miner client pool_block_template received exit signal, exit now"); break }, else => break, diff --git a/miner/src/miner.rs b/miner/src/miner.rs index e14ffef72f..110fad5514 100644 --- a/miner/src/miner.rs +++ b/miner/src/miner.rs @@ -103,7 +103,7 @@ impl Miner { }, }, recv(stop_rx) -> _msg => { - info!("miner received exit signal, stopped"); + debug!("miner received exit signal, stopped"); break; } }; diff --git a/network/src/network.rs b/network/src/network.rs index eaf3f7576b..93bccc1a9f 100644 --- a/network/src/network.rs +++ b/network/src/network.rs @@ -1127,7 +1127,7 @@ impl NetworkService { loop { tokio::select! { _ = receiver.cancelled() => { - info!("NetworkService receive exit signal, start shutdown..."); + debug!("NetworkService receive exit signal, start shutdown..."); let _ = p2p_control.shutdown().await; // Drop senders to stop all corresponding background task drop(bg_signals); diff --git a/notify/src/lib.rs b/notify/src/lib.rs index b6a0172a8a..41d1451e04 100644 --- a/notify/src/lib.rs +++ b/notify/src/lib.rs @@ -1,7 +1,7 @@ //! TODO(doc): @quake use ckb_app_config::NotifyConfig; use ckb_async_runtime::Handle; -use ckb_logger::{debug, error, info, trace}; +use ckb_logger::{debug, error, trace}; use ckb_stop_handler::{new_tokio_exit_rx, CancellationToken}; use ckb_types::packed::Byte32; use ckb_types::{ @@ -178,7 +178,7 @@ impl NotifyService { Some(msg) = network_alert_register_receiver.recv() => { self.handle_register_network_alert(msg) }, Some(msg) = network_alert_receiver.recv() => { self.handle_notify_network_alert(msg) }, _ = signal_receiver.cancelled() => { - info!("NotifyService received exit signal, exit now"); + debug!("NotifyService received exit signal, exit now"); break; } else => break, diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 7341ce4bc8..b348226560 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -149,7 +149,7 @@ impl BlockFetchCMD { } } recv(stop_signal) -> _ => { - info!("thread BlockDownload received exit signal, exit now"); + debug!("thread BlockDownload received exit signal, exit now"); return; } } diff --git a/sync/src/tests/synchronizer/functions.rs b/sync/src/tests/synchronizer/functions.rs index bd0a55740c..bbdd902a3d 100644 --- a/sync/src/tests/synchronizer/functions.rs +++ b/sync/src/tests/synchronizer/functions.rs @@ -1226,8 +1226,6 @@ fn test_internal_db_error() { InternalErrorKind::Database.other("mocked db error").into(), )); - faux::when!(chain_controller.try_stop()).then_return(()); - let synchronizer = Synchronizer::new(chain_controller, sync_shared); let status = synchronizer diff --git a/sync/src/types/header_map/mod.rs b/sync/src/types/header_map/mod.rs index 78939164b6..975c7b9075 100644 --- a/sync/src/types/header_map/mod.rs +++ b/sync/src/types/header_map/mod.rs @@ -1,5 +1,5 @@ use ckb_async_runtime::Handle; -use ckb_logger::info; +use ckb_logger::debug; use ckb_stop_handler::{new_tokio_exit_rx, CancellationToken}; use ckb_types::packed::Byte32; use std::sync::Arc; @@ -56,7 +56,7 @@ impl HeaderMap { map.limit_memory(); } _ = stop_rx.cancelled() => { - info!("HeaderMap limit_memory received exit signal, exit now"); + debug!("HeaderMap limit_memory received exit signal, exit now"); break }, } diff --git a/tx-pool/src/chunk_process.rs b/tx-pool/src/chunk_process.rs index b35e547a21..73e4f246eb 100644 --- a/tx-pool/src/chunk_process.rs +++ b/tx-pool/src/chunk_process.rs @@ -4,7 +4,7 @@ use crate::try_or_return_with_snapshot; use crate::{error::Reject, service::TxPoolService}; use ckb_chain_spec::consensus::Consensus; use ckb_error::Error; -use ckb_logger::info; +use ckb_logger::debug; use ckb_snapshot::Snapshot; use ckb_store::data_loader_wrapper::AsDataLoader; use ckb_traits::{CellDataProvider, ExtensionProvider, HeaderProvider}; @@ -75,10 +75,6 @@ impl ChunkProcess { } } }, - _ = self.signal.cancelled() => { - info!("TxPool received exit signal, exit now"); - break - }, _ = interval.tick() => { if matches!(self.current_state, ChunkCommand::Resume) { let stop = self.try_process().await; @@ -87,6 +83,10 @@ impl ChunkProcess { } } }, + _ = self.signal.cancelled() => { + debug!("TxPool received exit signal, exit now"); + break + }, else => break, } } diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index 0013d5cf33..b5062b3b35 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -920,6 +920,8 @@ impl TxPoolService { let mut tx_pool = self.tx_pool.write().await; if let Err(err) = tx_pool.save_into_file() { error!("failed to save pool, error: {:?}", err) + } else { + info!("TxPool save successfully") } } diff --git a/tx-pool/src/service.rs b/tx-pool/src/service.rs index f405129201..b11564ea11 100644 --- a/tx-pool/src/service.rs +++ b/tx-pool/src/service.rs @@ -13,8 +13,8 @@ use ckb_chain_spec::consensus::Consensus; use ckb_channel::oneshot; use ckb_error::AnyError; use ckb_jsonrpc_types::BlockTemplate; -use ckb_logger::error; use ckb_logger::info; +use ckb_logger::{debug, error}; use ckb_network::{NetworkController, PeerIndex}; use ckb_snapshot::Snapshot; use ckb_stop_handler::new_tokio_exit_rx; @@ -538,7 +538,7 @@ impl TxPoolServiceBuilder { block_assembler::process(service_clone, &message).await; }, _ = signal_receiver.cancelled() => { - info!("TxPool received exit signal, exit now"); + debug!("TxPool received exit signal, exit now"); break }, else => break, @@ -573,7 +573,7 @@ impl TxPoolServiceBuilder { queue.clear(); } _ = signal_receiver.cancelled() => { - info!("TxPool received exit signal, exit now"); + debug!("TxPool received exit signal, exit now"); break }, else => break, @@ -611,7 +611,7 @@ impl TxPoolServiceBuilder { service.update_block_assembler_after_tx_pool_reorg().await; }, _ = signal_receiver.cancelled() => { - info!("TxPool received exit signal, exit now"); + debug!("TxPool received exit signal, exit now"); break }, else => break, diff --git a/util/indexer/src/service.rs b/util/indexer/src/service.rs index ef8f876aee..bcdc89c7ce 100644 --- a/util/indexer/src/service.rs +++ b/util/indexer/src/service.rs @@ -16,7 +16,7 @@ use ckb_jsonrpc_types::{ IndexerScriptSearchMode, IndexerScriptType, IndexerSearchKey, IndexerTip, IndexerTx, IndexerTxWithCell, IndexerTxWithCells, JsonBytes, Uint32, }; -use ckb_logger::{error, info}; +use ckb_logger::{debug, error, info}; use ckb_notify::NotifyController; use ckb_stop_handler::{new_tokio_exit_rx, CancellationToken}; use ckb_store::ChainStore; @@ -118,7 +118,7 @@ impl IndexerService { } } _ = stop.cancelled() => { - info!("Indexer received exit signal, exit now"); + debug!("Indexer received exit signal, exit now"); break }, else => break, @@ -204,7 +204,7 @@ impl IndexerService { } } _ = stop.cancelled() => { - info!("Indexer received exit signal, exit now"); + debug!("Indexer received exit signal, exit now"); break }, } diff --git a/util/metrics-service/src/lib.rs b/util/metrics-service/src/lib.rs index 1b88171de0..4206a3d0aa 100644 --- a/util/metrics-service/src/lib.rs +++ b/util/metrics-service/src/lib.rs @@ -10,7 +10,7 @@ use hyper::{ use prometheus::Encoder as _; use ckb_async_runtime::Handle; -use ckb_logger::info; +use ckb_logger::debug; use ckb_metrics_config::{Config, Exporter, Target}; use ckb_stop_handler::{new_tokio_exit_rx, CancellationToken}; use ckb_util::strings; @@ -66,7 +66,7 @@ fn run_exporter(exporter: Exporter, handle: &Handle) -> Result<(), String> { .with_graceful_shutdown(async { let exit_rx: CancellationToken = new_tokio_exit_rx(); exit_rx.cancelled().await; - info!("prometheus server received exit signal, exit now"); + debug!("prometheus server received exit signal, exit now"); }); if let Err(err) = server.await { ckb_logger::error!("prometheus server error: {}", err); diff --git a/util/stop-handler/src/stop_register.rs b/util/stop-handler/src/stop_register.rs index e496866383..8948217c19 100644 --- a/util/stop-handler/src/stop_register.rs +++ b/util/stop-handler/src/stop_register.rs @@ -1,5 +1,5 @@ use ckb_channel::TrySendError; -use ckb_logger::{error, info, trace, warn}; +use ckb_logger::{debug, error, info, trace, warn}; use ckb_util::Mutex; use tokio_util::sync::CancellationToken; @@ -12,19 +12,19 @@ pub fn wait_all_ckb_services_exit() { info!("waiting exit signal..."); let exit_signal = new_crossbeam_exit_rx(); let _ = exit_signal.recv(); - info!("received exit signal, broadcasting exit signal to all threads"); + debug!("received exit signal, broadcasting exit signal to all threads"); let mut handles = CKB_HANDLES.lock(); for (name, join_handle) in handles.thread_handles.drain(..) { match join_handle.join() { Ok(_) => { - info!("wait thread {} done", name); + debug!("wait thread {} done", name); } Err(e) => { warn!("wait thread {}: ERROR: {:?}", name, e) } } } - info!("all ckb threads have been stopped"); + debug!("all ckb threads have been stopped"); } static CKB_HANDLES: once_cell::sync::Lazy> = From 71e4297d158826045350b34c80f8ed802970a415 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 12 Jul 2023 09:59:50 +0800 Subject: [PATCH 040/267] Add exit handler for `ckb miner` --- ckb-bin/src/subcommand/miner.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/ckb-bin/src/subcommand/miner.rs b/ckb-bin/src/subcommand/miner.rs index fd9a892abc..0bc7312e42 100644 --- a/ckb-bin/src/subcommand/miner.rs +++ b/ckb-bin/src/subcommand/miner.rs @@ -1,8 +1,11 @@ use ckb_app_config::{ExitCode, MinerArgs, MinerConfig}; use ckb_async_runtime::Handle; use ckb_channel::unbounded; +use ckb_logger::info; use ckb_miner::{Client, Miner}; -use ckb_stop_handler::{new_crossbeam_exit_rx, register_thread, wait_all_ckb_services_exit}; +use ckb_stop_handler::{ + broadcast_exit_signals, new_crossbeam_exit_rx, register_thread, wait_all_ckb_services_exit, +}; use std::thread; pub fn miner(args: MinerArgs, async_handle: Handle) -> Result<(), ExitCode> { @@ -30,6 +33,12 @@ pub fn miner(args: MinerArgs, async_handle: Handle) -> Result<(), ExitCode> { .expect("Start client failed!"); register_thread(THREAD_NAME, miner_jh); + ctrlc::set_handler(|| { + info!("Trapped exit signal, exiting..."); + broadcast_exit_signals(); + }) + .expect("Error setting Ctrl-C handler"); + wait_all_ckb_services_exit(); Ok(()) From fc9d3e6f17547c0d183c8d1cdfcca56a031c7d08 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Wed, 12 Jul 2023 10:45:47 +0800 Subject: [PATCH 041/267] Add bats test for graceful shutdown Signed-off-by: Eval EXEC --- util/app-config/src/tests/ckb_run_replay.bats | 2 +- util/app-config/src/tests/cli_test.sh | 4 +- .../src/tests/graceful_shutdown.bats | 45 +++++++++++++++++++ 3 files changed, 48 insertions(+), 3 deletions(-) create mode 100644 util/app-config/src/tests/graceful_shutdown.bats diff --git a/util/app-config/src/tests/ckb_run_replay.bats b/util/app-config/src/tests/ckb_run_replay.bats index dc9f87ef18..c6943eeb5b 100644 --- a/util/app-config/src/tests/ckb_run_replay.bats +++ b/util/app-config/src/tests/ckb_run_replay.bats @@ -24,7 +24,7 @@ function ckb_run { #@test run _ckb_run [ "$status" -eq 0 ] # assert_output --regexp "ckb_chain::chain.*block number:.*, hash:.*, size:.*, cycles:.*" - assert_output --regexp "ckb_bin all tokio tasks have been stopped" + assert_output --regexp "ckb_bin ckb shutdown" } function ckb_replay { #@test diff --git a/util/app-config/src/tests/cli_test.sh b/util/app-config/src/tests/cli_test.sh index 56e30dbab8..707b2b1dea 100755 --- a/util/app-config/src/tests/cli_test.sh +++ b/util/app-config/src/tests/cli_test.sh @@ -38,7 +38,7 @@ bash ${CKB_BATS_CORE_DIR}/bats-assert/load.bash cd ${CKB_BATS_TESTBED} -./ckb init --force && ./ckb import ckb_mainnet_4000.json +./ckb init --force && sed -i 's/filter = "info"/filter = "debug"/g' ckb.toml && ./ckb import ckb_mainnet_4000.json export PATH=${CKB_BATS_TESTBED}:/tmp/ckb_bats_bin/tmp_install/bin:${PATH} export BATS_LIB_PATH=${CKB_BATS_CORE_DIR} @@ -47,7 +47,7 @@ export TMP_DIR=${CKB_BATS_TESTBED}/tmp_dir mkdir ${TMP_DIR} for bats_cases in *.bats; do - bats --trace "$bats_cases" + bats "$bats_cases" ret=$? if [ "$ret" -ne "0" ]; then exit "$ret" diff --git a/util/app-config/src/tests/graceful_shutdown.bats b/util/app-config/src/tests/graceful_shutdown.bats new file mode 100644 index 0000000000..eb906c3949 --- /dev/null +++ b/util/app-config/src/tests/graceful_shutdown.bats @@ -0,0 +1,45 @@ +#!/usr/bin/env bats +bats_load_library 'bats-assert' +bats_load_library 'bats-support' + +_ckb_graceful_shutdown() { + ckb run -C ${CKB_DIRNAME} &> ${TMP_DIR}/ckb_run.log & + PID=$! + sleep 10 + kill ${PID} + + while kill -0 ${PID}; do + echo "waiting for ckb to exit" + sleep 1 + done + + tail -n 500 ${TMP_DIR}/ckb_run.log +} + +function ckb_graceful_shutdown { #@test + run _ckb_graceful_shutdown + + [ "$status" -eq 0 ] + assert_output --regexp "INFO ckb_bin::subcommand::run Trapped exit signal, exiting..." + assert_output --regexp "DEBUG ckb_stop_handler::stop_register received exit signal, broadcasting exit signal to all threads" + assert_output --regexp "DEBUG ckb_tx_pool::chunk_process TxPool received exit signal, exit now" + assert_output --regexp "DEBUG ckb_sync::types::header_map HeaderMap limit_memory received exit signal, exit now" + assert_output --regexp "DEBUG ckb_chain::chain ChainService received exit signal, exit now" + assert_output --regexp "DEBUG ckb_sync::synchronizer thread BlockDownload received exit signal, exit now" + assert_output --regexp "DEBUG ckb_network::network NetworkService receive exit signal, start shutdown..." + assert_output --regexp "INFO ckb_tx_pool::service TxPool is saving, please wait..." + assert_output --regexp "DEBUG ckb_tx_pool::service TxPool received exit signal, exit now" + assert_output --regexp "DEBUG ckb_block_filter::filter BlockFilter received exit signal, exit now" + assert_output --regexp "DEBUG ckb_network::services::dump_peer_store dump peer store before exit" + assert_output --regexp "DEBUG ckb_notify NotifyService received exit signal, exit now" + assert_output --regexp "DEBUG ckb_stop_handler::stop_register wait thread ChainService done" + assert_output --regexp "DEBUG ckb_stop_handler::stop_register wait thread BlockDownload done" + assert_output --regexp "DEBUG ckb_stop_handler::stop_register all ckb threads have been stopped" + assert_output --regexp "DEBUG ckb_bin waiting all tokio tasks done" + assert_output --regexp "INFO ckb_tx_pool::process TxPool save successfully" + assert_output --regexp "INFO ckb_bin ckb shutdown" +} + +teardown_file() { + rm -f ${TMP_DIR}/ckb_run.log +} From d0a256680e6e8ed9a0478a57a9ff7d2adc0db76f Mon Sep 17 00:00:00 2001 From: mohanson Date: Mon, 17 Jul 2023 10:52:27 +0800 Subject: [PATCH 042/267] deps(script): update ckb-vm to v0.24.3 --- Cargo.lock | 8 ++++---- script/Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b089ecb5dc..c27e960219 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1523,9 +1523,9 @@ dependencies = [ [[package]] name = "ckb-vm" -version = "0.24.2" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb89eb30f9e82e63c637a0824e25b12d99de9be9c36675ba6f79479094a8b42c" +checksum = "1f5747a877a71ff164fa0f17daf6e9abca036c2381b8576679fb3ac07ae77bbc" dependencies = [ "byteorder", "bytes 1.4.0", @@ -1541,9 +1541,9 @@ dependencies = [ [[package]] name = "ckb-vm-definitions" -version = "0.24.2" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d51f3b5550ebe2a37a6921127273afc00257b11f39517664dd0b35455d7e396" +checksum = "83869c9d322de1ddbfde5b54b7376f9a1ac32273c50e21cdd5e8a1bd1a1cf632" [[package]] name = "clang-sys" diff --git a/script/Cargo.toml b/script/Cargo.toml index 0cad2ad920..0a55306a5a 100644 --- a/script/Cargo.toml +++ b/script/Cargo.toml @@ -22,7 +22,7 @@ ckb-traits = { path = "../traits", version = "= 0.112.0-pre" } byteorder = "1.3.1" ckb-types = { path = "../util/types", version = "= 0.112.0-pre" } ckb-hash = { path = "../util/hash", version = "= 0.112.0-pre" } -ckb-vm = { version = "=0.24.2", default-features = false } +ckb-vm = { version = "=0.24.3", default-features = false } faster-hex = "0.6" ckb-logger = { path = "../util/logger", version = "= 0.112.0-pre", optional = true } serde = { version = "1.0", features = ["derive"] } From 3432bf73309e32d4363249419de0ac0df1b86e29 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 17 Jul 2023 11:17:52 +0800 Subject: [PATCH 043/267] docs: Fix `PendingCompactBlockMap` comment --- sync/src/types/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index d00fcf137a..73ce7935d0 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -1269,7 +1269,7 @@ fn get_skip_height(height: BlockNumber) -> BlockNumber { } } -// , timestamp)> +// , Vec)>, timestamp)> pub(crate) type PendingCompactBlockMap = HashMap< Byte32, ( From 603a0a9269717bbc18d0cff965db57413b2c09df Mon Sep 17 00:00:00 2001 From: mohanson Date: Mon, 17 Jul 2023 11:41:09 +0800 Subject: [PATCH 044/267] test(script): add check_spawn_times_bug test case --- .../tests/ckb_latest/features_since_v2023.rs | 97 ++++++++++++++++++ script/testdata/spawn_times | Bin 0 -> 15288 bytes script/testdata/spawn_times.md | 81 +++++++++++++++ 3 files changed, 178 insertions(+) create mode 100755 script/testdata/spawn_times create mode 100644 script/testdata/spawn_times.md diff --git a/script/src/verify/tests/ckb_latest/features_since_v2023.rs b/script/src/verify/tests/ckb_latest/features_since_v2023.rs index 82d7e96964..09ed1b2ce4 100644 --- a/script/src/verify/tests/ckb_latest/features_since_v2023.rs +++ b/script/src/verify/tests/ckb_latest/features_since_v2023.rs @@ -27,6 +27,8 @@ use crate::verify::{tests::utils::*, *}; // check_spawn_state: Like check_spawn_snapshot but invoking verifier.resume_from_state instead. // check_spawn_current_memory: Use current_memory() to terminate infinite recursion. // check_spawn_current_cycles: callee's current_cycles should inherit caller's current_cycles. +// check_spawn_times_bug_1: BUG: execution results may be inconsistent +// check_spawn_times_bug_2: BUG: execution results may be inconsistent #[test] fn check_vm_version() { @@ -777,3 +779,98 @@ fn check_spawn_current_cycles() { let result = verifier.verify_without_limit(script_version, &rtx); assert_eq!(result.is_ok(), script_version >= ScriptVersion::V2); } + +#[test] +fn check_spawn_times_bug_1() { + let script_version = SCRIPT_VERSION; + + let (spawn_caller_cell, spawn_caller_data_hash) = load_cell_from_path("testdata/spawn_times"); + + let spawn_caller_script = Script::new_builder() + .hash_type(script_version.data_hash_type().into()) + .code_hash(spawn_caller_data_hash) + .build(); + let output = CellOutputBuilder::default() + .capacity(capacity_bytes!(100).pack()) + .lock(spawn_caller_script) + .build(); + let input = CellInput::new(OutPoint::null(), 0); + + let transaction = TransactionBuilder::default().input(input).build(); + let dummy_cell = create_dummy_cell(output); + + let rtx = ResolvedTransaction { + transaction, + resolved_cell_deps: vec![spawn_caller_cell], + resolved_inputs: vec![dummy_cell], + resolved_dep_groups: vec![], + }; + let verifier = TransactionScriptsVerifierWithEnv::new(); + let result = verifier.verify_without_limit(script_version, &rtx); + assert_eq!(result.is_ok(), script_version >= ScriptVersion::V2); +} + +#[test] +fn check_spawn_times_bug_2() { + let script_version = SCRIPT_VERSION; + if script_version <= ScriptVersion::V1 { + return; + } + let (spawn_caller_cell, spawn_caller_data_hash) = load_cell_from_path("testdata/spawn_times"); + + let spawn_caller_script = Script::new_builder() + .hash_type(script_version.data_hash_type().into()) + .code_hash(spawn_caller_data_hash) + .build(); + let output = CellOutputBuilder::default() + .capacity(capacity_bytes!(100).pack()) + .lock(spawn_caller_script) + .build(); + let input = CellInput::new(OutPoint::null(), 0); + + let transaction = TransactionBuilder::default().input(input).build(); + let dummy_cell = create_dummy_cell(output); + + let rtx = ResolvedTransaction { + transaction, + resolved_cell_deps: vec![spawn_caller_cell], + resolved_inputs: vec![dummy_cell], + resolved_dep_groups: vec![], + }; + let verifier = TransactionScriptsVerifierWithEnv::new(); + let result = verifier.verify_without_pause(script_version, &rtx, Cycle::MAX); + let cycles_once = result.unwrap(); + + let (cycles, _) = verifier + .verify_map(script_version, &rtx, |verifier| { + let max_cycles = Cycle::MAX; + let cycles; + let mut times = 0usize; + times += 1; + let mut init_state = match verifier.resumable_verify(max_cycles).unwrap() { + VerifyResult::Suspended(state) => Some(state), + VerifyResult::Completed(cycle) => { + cycles = cycle; + return Ok((cycles, times)); + } + }; + + loop { + times += 1; + let state = init_state.take().unwrap(); + match verifier.resume_from_state(state, max_cycles).unwrap() { + VerifyResult::Suspended(state) => { + init_state = Some(state); + } + VerifyResult::Completed(cycle) => { + cycles = cycle; + break; + } + } + } + + Ok::<(u64, usize), Error>((cycles, times)) + }) + .unwrap(); + assert_eq!(cycles, cycles_once); +} diff --git a/script/testdata/spawn_times b/script/testdata/spawn_times new file mode 100755 index 0000000000000000000000000000000000000000..c7dcd39e58e0206107a29fda1098222a07eacb1c GIT binary patch literal 15288 zcmb_j4O|r0wZAj7GrPc|kYT~kybUV^_vs3|EFUIXKzB97_)BfX)TV}ISQG{1V?jyc zE3nKW1dL`xV$}p85VJM@`Xp$Qend22G>OSCtt8qsMfs4l*w7S|Bna=^`A~v1eeHXf z-*D&t@44rmbMCo!&b>4HGutXVNf1a$B>FqTrF6Z7CCC*cancP&6HpwY;lCg9MGTZx zA|}-*h^;Y=)P9DPAtl#HWMVCDcPGh&a=Y{ZsYc0>T*g#1TnetpspK8=q;^U^u^pB* z(kj(Qu9f;zay=#C_Zegf<^F!y>q^J%{@rw0FZHA7t>jP1IO4YExbhvGt6)($A-vGEMXI8$` zkzTA%+nS%hb(=GN&U|IO#pd#;Ba|8|;hV_s*8l5zq!p&87cT^{w{oD1)f6SFBBD=< zC{|;l{R%c^X1MfOPWX4186_d%{{(wy7iPIKbF+%4q&_0cpCY$pA;*3@84m%>Po}}y z1pWnuX`*NRulQp=zHm*&f2w|G1Fd=rdW=u>_lfd|_Q}uOxaoTgG=Y9ff1E6z4f@0W zF&Tdkuo-Y=IUO1Lg3PC!`s~62mp;v@cVuO5%)$O16{Alpb~#}XA=RrSe=&%qyZOBj z_*DIL0)8J{ll#Z`oGI{2fX$i${|qq9-wCt1c#8UNz@nzWzW~gC3jC{oKtJ*i=owHS z^T~JS6=vo;3-sAr^TB4YnB*7hVP)Z**kqYGnXXJQAeb`=Pme@AJD?RpAJLNiEMn+}C5ch1(FCtaKH8eibZa8A%bBgy|H^o3I>TzK5jB-{v4BwSgU zY55L#N{K79xMJr!wmn(o9uHuOgxT7*3tP4)Ni`6|x< zd5(xhc)V@R!G0sHEcy&bW|lJ@^C5jCJ+N=#6e5?g&2j3N`^9SmPS}rd-DLbF8PCi~ zcW&3qUYfSGFeklWvEH@G3D2J#eP)3^NqkN|Qs}b5^D3L zm+z5^NU4{LXpQ__Rmze!2t6j_*$x-C$yspUk|j$V1qIH0(GZgQi}k4y;!{yCuKn{< zef8o~67bXoz7mM<7wdsJ>nm}dDR;C$E-P{^mCIVWK3Oiy;TKAk@qd-iZkdmwmnaPI znrw$pWjb&RF)4n*!E!0OTSm5PQBGmD7#v^X^D)E zv_{56#zw|P#z#d)MMasTEK$)>)~J}M*r>RucypvV%4{}U%+Y46ImR4ojx)zwA}vuC zv&CYGwpcAOmRL)iB|bVbIx5;6ZHbPKwnoQ9$419R$6F(is7{_z-U6@EpER5+e9u3zUQ{bxrKQaZ5 zC*})N;LiYFKVRa9lBOjjl@cQVy@3B3@M(|(9-!byp?G`>{2joL0Pd0KA64i-gK{t| zSSpOm3ClkO7YVrHTeu42*faycNAYouLgWT z!n+8l!1|5?9{QgMt&;E(1rGwlqyR4S7sq8D&@2Ev7w~Fm2lA8or@%)6*8nbWA;lJ$J_s*zfKT?n z1%TgxhhGAw1icjdE(iQxpvN(Wk}#hq06z*wtioW@wB3q*U6>vMxQRHPYt?}720TNu zZ-QOY6So%iI|}uk(J;m$BK^btyaRX-;G(>89mYQc{4(H^=kE~U=K-InH*QAS79hf2 z2Y1&>68r?sM|1()6M@V?Xn%N*%3&%u@pXE!c68>saK3@Rnwpf}kWM>73M6tBZO$Z=mHaf%(_`!j=^Y0TdWe}sl*5GBdcwKSv;a3+HZ>4#ji$#($? z(mbdkZ03qOP~cigJ%m(+c}#EUmFCg&BOx*HY-2om+N+vAS(62aZ5-Rm5X(cXP)D~f z)i?w?Aiyx=zfmJaAY;#bn{j}p^N?{U0vSglG{#ZdW*qc!&h{D3Z78p?9k$O2AZ&!4 zSRu=0Jagi^>v_u;OSd$t}sDO4F8r~bH=2MgvnzpmvrGDZ{Q^>pt;_1 z0>-4&7V44uNWC}w_V|>Qgus|@M24^B{2M7>_N~+_wL;;Y++{4E+s3Yahl>A#wd#-% z6ph%KRb1qcwlgy;S2Iu+SuSE)Rq`HG)otRzk6sKq{63L{YpV^O+- zSSk8vy`ZW8*+i7EVZ5wlZk}hLp})E3eAnvHH6tlQ>jzoC5XAZTX%?6Nw(>-|x$0Pz zv68o~m$lLlnktMvrvCK<^eL8}hXmbnwAiylH@|&vs-X6uHeH)4=;~6>tzp`EK(p4} z$*=J=crH{BE0cNLqp>_f9_{+CLixT_dwO#^lX8XaN$fv`=9+EFN_pj$K%j0$m z4u~Yrw|y?v+I#lR8=x40(qTLIO@~Gp9-e-nj)#`@3I5N%_R`E<2Wr*CYcCn=@_s(&ObdM$ z8IBE)al;+V6KCd}HF$@|3`d8@u3-KucKZtS#MwFTJ@MY04;>oe1vM$`gX>cbX=nht zm*HxL>&Wn!g<%qJtw^ft*vAc?C5d2R8#5+UsL8kLg1UpQ1^p%HOIEEWyJy`TD=Uf5 z3mpv~Ngi4~xMm=we|^t}F5#vXaTV%>%+ubAne`mS7~LH-;%OFeL&+y-M=-C>zA4;@ zMN2C_`16vL+>Uouf`%FAw$C6e{U;H-eLLb*nn>Z(m56_{jIM|2gZ z)gbo~8c|1V=&<@QZ8#b-ZaAhM4?O!q;vV%L+HmaJ7`KPH*QbLzOM|4>P@3zsE3~+* zIBoOk&7oV$w%iAvuww;ReAd@zrTbkPy^zT5Waj&?Y}v z_DeiE@S+FJetai!;37{P==2b?@2x-U z+ce$#)_o(WXQJoT*5DR?2w9k-goZj{J40Yw2t{_VhiHp(jmG^R*y24Ma-R=F)Ojm% zpR*zA+*;&*$BC$S3X%H^kKCtgk^93YM19zb+!xvrb>SkS&h#SpyWo{2+pt!Ip>!S5 z2$R}S7E3gi-A@?Yb*Rx@fehu_P-FQzWZ-WSjr=vj;Q54T^qeIO6&q1wMF`PGx2a*A zRqoR?^Z}OpFbw*zLLVT<8Ia>$kmKD#=w}^jEV&8&T!Vf-fqu?HKVVIF2+`O-fgGr$FcUTlAN2lh1R1?-M(4z180dXKuBQeVZsNS)Q8O10s+PsSRbyEaxy$G5UK zV^Q~9^+0?p9!F{XLSX!09W7(U@tZhqu`q7iVBAjPaYNKAH}SX;Fm42l8v)~nVB8Rl z8-j5o{l>7-It`9`H zhT6#;I?LKtMAH)`Ykxs%)p2MdFK4}k_5qrp$a@)~nR%YkhLPr>^Me-#KJM@Ax!kp3 zG;JhfX!Bssz;4H0r$yU|xQzGB-m0CMLeVf1P6Q)MY(4sV7gx;v0(F_%bO!&+2438?i*0N6iqM$dSoc zT*lD_#;VdsEcB{G(2yuy1-)!)MKv~~hu6}j-Pha4*qQ~1+j8hX9kv$hYb5s)ZM4v@ zku54O;;t@VNZ78}TKHd+Y$dN^RXoAbl`4+%H8odS{Jh8zc>sm;+(AD)!U3yq_pGm> ztbRyyrA?*j4duIsqHoFL^PD6rbGY5hY5XFv9>cMO!YTE$xwP;Y=8{QZ zF6)6yxXdNNeR(f;Eqx~D68>y=ZS`ni{pIrT+jj2yoZE7Ap0U#KOVIH3D#GxFFEac( znSe57c>OJko9;i0^}96=5ze&DJ!|^uM+Cob$2agfRmwQfp2h1{-Hnr9sWOZx7Ntqa65^c)Oy3SAJ=rh=r_o1S>SJ>7bF_q@S?t4 zyQ|s76!&G?H#cu)wp@KW>6wOS$Q@U+eQ*SJ5$`hiTxq;L&ndAMIs$Q+Sl6B-)!E$_ zPfAErwuCgh&%G_T&kE_pyAsdVHkroxq=fXjUOghw*fvXuy)aL16}C=7nhWR3H1sA3 zVTzh$n%?zNUm4}Fc1ZI4SeD0w=X<40=X6OeGhDiQxxWIrzaph&SiaOUyrNc)DD{G( zW$(s1;4bjaO$v4UQxal(S{{X8Aar*Gydg;9$N2lw^kA`W9==>AIM|-*mCBUg?TT5o8GMDcG33)ZKrm50;Y?xg;#aT>4}#cX&{X%h77S^LB5i zjF|4@yXZ8jT1R!)&K|9!yjOMQzMzzv2i6m-((c*=T-}nC)sQ9b1jMnWS4v~% zc}N+BUb{T5o53?-PjRgz6UIR7p-A-a-QT~RqM6$fz{%Zcmt#BvV#P7{2QD)d>d zrwXxtNag-jHA($iiySr!{W7__R#5RI{m4YVB|5Ik=N@;fY7DOyEuMBH6c!??f`KP; zi|zyx+K0!ppA!mo5Qp`lJ#9Nu+$U6H=2}c0vyZYf5Q`QfOG+oi?b}J z9qep$g+y8{m&W_yh++5D0B4Y6ci)~iW2O5yDnzR$N}>11+Wl1-Sy-#Ex8;fJ(-l?s zjH;nE<@aF?%Y8J8E*i&F>tb^*ZlCm4JowwJXc2*v* z$M7fy%Cp>gzl8QZri^>n24&ovX3FEf3aE}q=&Jm?ULm{H5l zPvAp{5$)bgVQmKDx-X&={K4)@`@w5VJ=|X|YHsO-Pb(QM8u7VVPrzY9YD+u9p zh#3D=ftGrH>oHfb45Q+<_Av(3iP%p(R3p^~IYSx5+(R~!Vwp6Exrb;Z#4=%k{BT(# zv@)n}phk5tVNj=_Mh13mOc83NVJAa(5{;@pVlTJ5@0MER z?#x^q10E|kmUEdWR1vwES0Y@u)-D*F+q*K?+p{vu+qH6&aWn!gm?6#igg)99F%;^I z7)f$QjHWqPc9DfEdstefZdPKL;RU%oyzediYqedvFCwzM{SlkIJrU{N zu835bIdg5etuc1>ANh|T#IY7MaripryjbaJ)EWFae;KuvGVWN`&8#tdPi(u@QkJP zWthi0)YAGo%C32+ZDo4qIheC0uOqgTryAYwKu%KzG3Uv~GO+!`-#h4V zX(O}})xPah5Rpt)-_!p=xGZ$2a0{FVEZ(cs&p8s@ZA zQ*qy&(ddGcZoIQ%M&2}w+5f}bCEYcLnR|@=Ca{VD9VTIaDQbxtCQ@h2QK&mYC29jAZqp{bBw55b zdaMid{EmIwL2q&-0v(sPz@?BhPc))MM_C z8DI;Cr!OA#Img?$xveUxZEf&;IVRNwV=mQOj3aOMMg(|=NMmpm0XsoNV*R zUdQ60rGo*PGn^VnwbMMXxZl_liDG2xE_5{)pA+7_e?MMH+k=D6BmQ=SwvB`s8T|v!gLJxE%g6WTlLOf4< zYj-<2RR%%W@2jtNR6A(98D;`dLHw>7|6aO5(~vgndD>JP0Ge0v)| zPIb@ly-|7u^dO_offiUlQIqGHJ)$NcJz{6sM2!Mc2}yRVBzydo-KiAlfqD90PgEt& zYEyOJ>pKeEU^n|JT3Am!B;Mz}N`h8|pnC{THd_(aUz-+?f%Tu+M%y5Ew@uzf;=SYd z#niv8TwEFRthvfq{bn1LuBF(*Aav3<5Y228e1A!{XsrZWxE$#|U};2hxxY{mHrOYr zF2frHHv`cwBQ!IkLPs#VrHiS(HN3E%ny00N>X}IJ{fNXZ9m7O2Wz`bI_5Jm0@Y@5J z_t#3glD!8p>VJ+}QcZ9-jzYiXD6!XVtET+zw2gDOL%bmrT48U0DDg@gZRhs1Yl%lL z)l<=0LAs-WhrqCj5?OJnMC zFOhmQ7>|#kV~vTAsuyS- zqD$F7>X7?E;J5_#h|rG!Ep7!>zDu3(Cvj|#TksEuy&X#jBL3lS&C6e9z&Xl9O;c%~ zTkaDzM24tI*}lNx(&e1*A@DeVk{9;}-T2m$W+}d{nyfm4aQXz=`?7M zl9zt7nQwWZiTJt|KOsT~!v~W4SN9xHGbmOQh!(?JCR4e$lB-k`LAk5><9yjZFHhU{ zTrYipPS$GBKiSieOS-P6CuKHd+8kvSgzYS-_QtVYaAtSB(p1SEVuE2cp3xdWS*yrWTMH9UuxcHOr2>(l8T4m0 zXnL}TfY>1z?WytB8a*7DK5a)x*ja8`ry_2MD z;ppcoSn;WI)!arEMcHD|jgZ#_3A5E$(uWbJP1mrr9sTqqO9Qpq0+B%G+bkV}j>G*B zDn7~ibuzdY?ngA{H65bqX=0(zN4@T6Aa3tKgL9yFzQj|Rg73mi4InK;d zU)seM-JsU)`e98h6Nosbo#flNTUNqCzlkWC#&PN>x2_!`^T6Kt`tC#4@nn8x+p$Vx zC09l=RCE;~&cC5o@%-E6gD!ymr<#A<(>&?6RZ%r|Snu2yk;x+z#=snUIpMH0hg7A> z`|FeW3-UZny{9{qA9Y)+5M}j62eZYwk;~NFxyqMG_lakT^|B}WUx6KIHS`G7 zv`V4}f2D`U1M5?G%i^`f8oqfd8A>P{P37oz>?srPHj8(+-$IM;iK$C>q8}V#;FV8` zJ0M;V&)J(+c>A@Q3Gu?4Q^X6>J|YX^G!LAeZsW&WG-ngI0Ucr4sY_(7JCShm1tgpe zK|iet{D83SKR4A_l$8jD+fnHXZu(hxU)h&n=G_bpXX46=NvluP?YP-O2E(&x{|9X0 z&hZvc1EP2a-f^&D^eEl~+|n8M-5O@s#Uc6Wy^i_;E#J4NxZ`LPt3v_z&@`Pyvl<?a%1oSeHc34t9nd7`#Er1RT1-{yQN68FvZv><^(+EN>P z!-A$U?i16{;b{tgshuPEM6L5iV1230a$A$*w>2p%!Cz6Bc9z=Qb{bZSQuJ!ll4bLc zSUhha_U4W0LdpuX}@OJ8}LCsejg+iocSO>OAG3gGPD+o%&Szp5xz^g!&#yI`FA& z_|GOvg50tsVqM?UcO=n&Bt>kYBjH2lokEHYR@z)rPzT&!cL8>{yDmJ%{rv{4TagU+ zMYHdGJGZTUn!wzI9s=QXYBmk~MAVG_cm%#13C7=aG+W(oFsL%)fSbwVGiL8Pu|gmd z7#7y$lYPVV?B{Mw_f}Y}I%;9*156?Lfah$25X?~Lo^SDVP{N;riMBF!sPAXL@;piE zUbF`%0=^4bRNe5_lR<7zFe=*TR>9h7XOMyXJ=sVM!I{clI8S+p%nRw}#=l|J+eB%* zzil33gK6ra=lP5So)ar9Oebk86PN_%S8%60WY;C`DGn*Bbu+L&*}xanf#|KeMtJ9L zD0v0m`^>j@XSWYBw7^^&FRG>Kf5mWI(TgX9U7*y$E?O*`mIf5F%RF^KcSh zYM1s&C0nlMCb=41ILgJnz#6OLDWQ{JCN#mhUb_x)zh_`3ev}}633BF|#$odCHmTIz zhoi?iIb{_=jsT|#-UrIxi`2k!1&NT;2k%SCE~k$^lv0VDCYKeuwQ_=QKgBd%P72@u zYyFSbAE;mS#&V>{_s{r|UvAk_{MhkZ@6H-|H9G5+%JK12Tfg!c(5(xO^1yBH|6<*C z_P-Z7o8n79sD1RzBT0Ui(BJ=rv@9~beewElE{yg4;SbR#J}LWr_Odg>5l=jKdN|_Y zAA90D+q0t!X2$gYZRmyCkAIo`hoxcDi+(5P@b!Apx1Yroj*m%6v6rI%Kf)C{CBIFN z#>GEH@5woQr7I>y-eucg&wFS6GrkL#&H2mKm{~tSC0((_XA+l9E-QK{cHA$enLn3v zMPJzs-k(e-)9;t*_y4nTnB2eQZU6rNM<16z8Vr!W(o2w8$>_h#_|Z*pSb-CKafOre z?W^*8FA0TH86(`~XBKcph|t5WDuB)}iiyrN=SDu6jsH3DKMwxE_o10NTb%ie^{ZAb z*GE}Rk)~L*V%f6A`iS}ZD6`3I0uodH)^vx-|}8-A4o1e@UZZ!Sj~NcDeD*pB$RM`o2O{~j(?Ht-0X zB27%P*=|GFnwXT7k;k=C!^gD}pj8Yk*H8S`5<@IpxP|gtE#>!*%I_j?|NapZLz=`6 z;qUWtdkD+Kr2Gz2`F-RjMKHO3a{qe3C$@iKoh1JYu~LIK>0+E1pRT!znTao@RjI7)X)d(3}gn%sUFRALN#^gHQk fS$@NJP>B{p3ZGSQujoFJ^hLRS%0zHNI|cq9v;@p) literal 0 HcmV?d00001 diff --git a/script/testdata/spawn_times.md b/script/testdata/spawn_times.md new file mode 100644 index 0000000000..351817947d --- /dev/null +++ b/script/testdata/spawn_times.md @@ -0,0 +1,81 @@ +This binary comes from: . Since I couldn't build a binary in C that would cause the same bug, I just added the binary to the project. + +```rs +#![no_std] +#![cfg_attr(not(test), no_main)] + +#[cfg(test)] +extern crate alloc; + +#[cfg(not(test))] +use ckb_std::default_alloc; +#[cfg(not(test))] +ckb_std::entry!(program_entry); +#[cfg(not(test))] +default_alloc!(); + +use core::result::Result; + +use alloc::{vec}; +use core::ffi::{CStr}; + +use ckb_std::{debug, syscalls}; +use ckb_std::ckb_constants::Source; +use ckb_std::env::argv; +use ckb_std::syscalls::{current_cycles, get_memory_limit, set_content, spawn}; + + +/// +/// test case : +/// invoke int ckb_spawn( uint64_t memory_limit, +/// size_t index, +/// size_t source, +/// size_t bounds, +/// int argc, char* argv[], +/// int8_t* exit_code, +/// uint8_t* content, +/// uint64_t* content_length); +/// +/// for { +/// spawn(xxx) +/// } +/// case1 : for { +/// spawn(xxx) +/// } +/// +/// result: +/// return ERROR : ExceededMaximumCycles +/// +pub fn program_entry() -> i8 { + // let argvs = argv(); + // debug!("argvs length:{:?}:{:?}",argvs.len(),argvs); + + if get_memory_limit() != 8 { + return 0; + } + let mut exit_code: i8 = 0; + let mut content: [u8; 10] = [1; 10]; + + let content_length: u64 = content.len() as u64; + let mut spawn_args = syscalls::SpawnArgs { + memory_limit: 8, + exit_code: &mut exit_code as *mut i8, + content: content.as_mut_ptr(), + content_length: &content_length as *const u64 as *mut u64, + }; + // let cstr1 = CStr::from_bytes_with_nul(b"arg0\0").unwrap(); + //argv is empty + let cstrs = vec![]; + + spawn_args.memory_limit = 1; + for i in 0..10000 { + debug!("current idx:{:?}",i); + let result = spawn(0, Source::CellDep, 0, cstrs.as_slice(), &spawn_args); + assert_eq!(exit_code, 0); + // debug!("result:{:?}",result); + let cycles = current_cycles(); + debug!("cycle:{:?}",cycles); + } + return 0; +} +``` From fb978574ba960e114d8340a2940f93c3addfe4b2 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 18 Jul 2023 16:12:23 +0800 Subject: [PATCH 045/267] fix: set `TxPoolService.started` to `true` before `tx_pool_controller.load_persisted_data` --- tx-pool/src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tx-pool/src/service.rs b/tx-pool/src/service.rs index b11564ea11..5dc54018b4 100644 --- a/tx-pool/src/service.rs +++ b/tx-pool/src/service.rs @@ -618,10 +618,10 @@ impl TxPoolServiceBuilder { } } }); + self.started.store(true, Ordering::Relaxed); if let Err(err) = self.tx_pool_controller.load_persisted_data(txs) { error!("Failed to import persisted txs, cause: {}", err); } - self.started.store(true, Ordering::Relaxed); } } From 824347ecbf891a1989c06cff8aec811495103739 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 18 Jul 2023 17:42:34 +0800 Subject: [PATCH 046/267] Add `winapi` dep to `dev-dependencies` --- Cargo.lock | 1 + util/stop-handler/Cargo.toml | 1 + 2 files changed, 2 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index ddd51deff0..4b074b818b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1292,6 +1292,7 @@ dependencies = [ "rand 0.8.5", "tokio", "tokio-util 0.7.8", + "winapi 0.3.9", ] [[package]] diff --git a/util/stop-handler/Cargo.toml b/util/stop-handler/Cargo.toml index 4ff8f9d529..1314346be4 100644 --- a/util/stop-handler/Cargo.toml +++ b/util/stop-handler/Cargo.toml @@ -21,4 +21,5 @@ tokio-util = "0.7.8" [dev-dependencies] ctrlc = { version = "3.1", features = ["termination"] } libc = "0.2" +winapi = "0.3.9" rand = "0.8.5" From d2640bffb736c347890cd619e8168326909f92ca Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 18 Jul 2023 17:43:04 +0800 Subject: [PATCH 047/267] Fix unit test: handle windows platform for signal-handle Signed-off-by: Eval EXEC --- util/stop-handler/src/tests.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/util/stop-handler/src/tests.rs b/util/stop-handler/src/tests.rs index 3141512bb7..099aa32b90 100644 --- a/util/stop-handler/src/tests.rs +++ b/util/stop-handler/src/tests.rs @@ -13,7 +13,18 @@ use tokio_util::sync::CancellationToken; fn send_ctrlc_later(duration: Duration) { std::thread::spawn(move || { std::thread::sleep(duration); - // send SIGINT to myself + + // send CTRL_C event to myself on windows platform + #[cfg(windows)] + { + let pid = std::process::id(); + unsafe { + winapi::um::wincon::GenerateConsoleCtrlEvent(winapi::um::wincon::CTRL_C_EVENT, pid); + } + } + + // send SIGINT to myself on Linux and MacOS platform + #[cfg(not(windows))] unsafe { libc::raise(libc::SIGINT); println!("[ $$ sent SIGINT to myself $$ ]"); From cb5d406ad6c2124c7d52a0a7f56a99d634b285f5 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 18 Jul 2023 22:40:11 +0800 Subject: [PATCH 048/267] Remove stop-handler unit test for Windows platform --- Cargo.lock | 1 - util/stop-handler/Cargo.toml | 1 - util/stop-handler/src/lib.rs | 3 ++- util/stop-handler/src/tests.rs | 11 +---------- 4 files changed, 3 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4b074b818b..ddd51deff0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1292,7 +1292,6 @@ dependencies = [ "rand 0.8.5", "tokio", "tokio-util 0.7.8", - "winapi 0.3.9", ] [[package]] diff --git a/util/stop-handler/Cargo.toml b/util/stop-handler/Cargo.toml index 1314346be4..4ff8f9d529 100644 --- a/util/stop-handler/Cargo.toml +++ b/util/stop-handler/Cargo.toml @@ -21,5 +21,4 @@ tokio-util = "0.7.8" [dev-dependencies] ctrlc = { version = "3.1", features = ["termination"] } libc = "0.2" -winapi = "0.3.9" rand = "0.8.5" diff --git a/util/stop-handler/src/lib.rs b/util/stop-handler/src/lib.rs index fe80839dd3..48309b03e5 100644 --- a/util/stop-handler/src/lib.rs +++ b/util/stop-handler/src/lib.rs @@ -8,5 +8,6 @@ pub use stop_register::{ pub use tokio_util::sync::CancellationToken; mod stop_register; -#[cfg(test)] + +#[cfg(all(test, unix))] mod tests; diff --git a/util/stop-handler/src/tests.rs b/util/stop-handler/src/tests.rs index 099aa32b90..f45c200a83 100644 --- a/util/stop-handler/src/tests.rs +++ b/util/stop-handler/src/tests.rs @@ -14,17 +14,7 @@ fn send_ctrlc_later(duration: Duration) { std::thread::spawn(move || { std::thread::sleep(duration); - // send CTRL_C event to myself on windows platform - #[cfg(windows)] - { - let pid = std::process::id(); - unsafe { - winapi::um::wincon::GenerateConsoleCtrlEvent(winapi::um::wincon::CTRL_C_EVENT, pid); - } - } - // send SIGINT to myself on Linux and MacOS platform - #[cfg(not(windows))] unsafe { libc::raise(libc::SIGINT); println!("[ $$ sent SIGINT to myself $$ ]"); @@ -120,6 +110,7 @@ impl TestStopMemo { } } } + #[test] fn basic() { let (mut handle, mut stop_recv, _runtime) = new_global_runtime(); From f1e42ea21d3fd91a37e429104430c9e718488387 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 18 Jul 2023 18:54:56 +0800 Subject: [PATCH 049/267] docs: note the `send_transaction` is asynchronous --- rpc/src/module/pool.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/rpc/src/module/pool.rs b/rpc/src/module/pool.rs index d2832c734b..dfd4d9531d 100644 --- a/rpc/src/module/pool.rs +++ b/rpc/src/module/pool.rs @@ -16,6 +16,10 @@ pub trait PoolRpc { /// Submits a new transaction into the transaction pool. If the transaction is already in the /// pool, rebroadcast it to peers. /// + /// Please note that `send_transaction` is an asynchronous process. + /// The return of `send_transaction` does NOT indicate that the transaction have been fully verified. + /// If you want to track the status of the transaction, please use the `get_transaction`rpc. + /// /// ## Params /// /// * `transaction` - The transaction. From 4c60a5b2c7786b582e867c8f32c5c99cda76f5d1 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 18 Jul 2023 18:55:28 +0800 Subject: [PATCH 050/267] docs: re-execute `make gen-rpc-doc` Signed-off-by: Eval EXEC --- rpc/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/rpc/README.md b/rpc/README.md index 166d922b30..6db0bf3d40 100644 --- a/rpc/README.md +++ b/rpc/README.md @@ -4343,6 +4343,8 @@ RPC Module Pool for transaction memory pool. Submits a new transaction into the transaction pool. If the transaction is already in the pool, rebroadcast it to peers. +Please note that `send_transaction` is an asynchronous process. The return of `send_transaction` does NOT indicate that the transaction have been fully verified. If you want to track the status of the transaction, please use the `get_transaction`rpc. + ###### Params * `transaction` - The transaction. From 9f14ab1be3c605874294290f0ac0e6a0b907c4fe Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 20 Jul 2023 15:37:46 +0800 Subject: [PATCH 051/267] Fix `FeeOfMultipleMaxBlockProposalsLimit` failed --- test/src/specs/mining/fee.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/src/specs/mining/fee.rs b/test/src/specs/mining/fee.rs index ecf6a10676..12a77cab7a 100644 --- a/test/src/specs/mining/fee.rs +++ b/test/src/specs/mining/fee.rs @@ -146,7 +146,7 @@ impl Spec for FeeOfMultipleMaxBlockProposalsLimit { }); (0..multiple).for_each(|_| { - let block = node.new_block(None, None, None); + let block = node.new_block_with_blocking(|template| template.proposals.is_empty()); node.submit_block(&block); assert_eq!( max_block_proposals_limit as usize, From e65581991fd1b92b6456f7a8f9b5da4614f7c0cc Mon Sep 17 00:00:00 2001 From: yukang Date: Mon, 15 May 2023 18:19:55 +0800 Subject: [PATCH 052/267] begin refactor tx_pool --- tx-pool/src/component/commit_txs_scanner.rs | 7 +- tx-pool/src/pool.rs | 634 +++++++++++++++++++- 2 files changed, 615 insertions(+), 26 deletions(-) diff --git a/tx-pool/src/component/commit_txs_scanner.rs b/tx-pool/src/component/commit_txs_scanner.rs index 8f97432ae8..af277a1b0a 100644 --- a/tx-pool/src/component/commit_txs_scanner.rs +++ b/tx-pool/src/component/commit_txs_scanner.rs @@ -2,6 +2,7 @@ use crate::component::{container::AncestorsScoreSortKey, entry::TxEntry, propose use ckb_types::{core::Cycle, packed::ProposalShortId}; use ckb_util::LinkedHashMap; use std::collections::{BTreeSet, HashMap, HashSet}; +use crate::pool::MultiIndexPoolEntryMap; // A template data struct used to store modified entries when package txs #[derive(Default)] @@ -49,6 +50,7 @@ const MAX_CONSECUTIVE_FAILURES: usize = 500; /// find txs to package into commitment pub struct CommitTxsScanner<'a> { proposed_pool: &'a ProposedPool, + pool_entries: &'a MultiIndexPoolEntryMap, entries: Vec, // modified_entries will store sorted packages after they are modified // because some of their txs are already in the block @@ -60,10 +62,11 @@ pub struct CommitTxsScanner<'a> { } impl<'a> CommitTxsScanner<'a> { - pub fn new(proposed_pool: &'a ProposedPool) -> CommitTxsScanner<'a> { + pub fn new(proposed_pool: &'a ProposedPool, pool_entries: &'a MultiIndexPoolEntryMap) -> CommitTxsScanner<'a> { CommitTxsScanner { proposed_pool, entries: Vec::new(), + pool_entries: pool_entries, modified_entries: TxModifiedEntries::default(), fetched_txs: HashSet::default(), failed_txs: HashSet::default(), @@ -80,7 +83,7 @@ impl<'a> CommitTxsScanner<'a> { let mut cycles: Cycle = 0; let mut consecutive_failed = 0; - let mut iter = self.proposed_pool.score_sorted_iter().peekable(); + let mut iter = self.pool_entries.score_sorted_iter().peekable(); loop { let mut using_modified = false; diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 8fb2da50c6..5ee73cfbb8 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -1,6 +1,10 @@ //! Top-level Pool type, methods, and tests +extern crate rustc_hash; +extern crate slab; use super::component::{commit_txs_scanner::CommitTxsScanner, TxEntry}; use crate::callback::Callbacks; +use crate::component::container::AncestorsScoreSortKey; +use crate::component::entry::EvictKey; use crate::component::pending::PendingQueue; use crate::component::proposed::ProposedPool; use crate::component::recent_reject::RecentReject; @@ -10,9 +14,18 @@ use ckb_app_config::TxPoolConfig; use ckb_logger::{debug, error, trace, warn}; use ckb_snapshot::Snapshot; use ckb_store::ChainStore; +use ckb_types::core::error::OutPointError; +use ckb_types::packed::OutPoint; +use ckb_types::{ + core::cell::{CellMetaBuilder, CellProvider, CellStatus}, + prelude::*, +}; use ckb_types::{ core::{ - cell::{resolve_transaction, OverlayCellChecker, OverlayCellProvider, ResolvedTransaction}, + cell::{ + resolve_transaction, CellChecker, OverlayCellChecker, OverlayCellProvider, + ResolvedTransaction, + }, tx_pool::{TxPoolEntryInfo, TxPoolIds}, Cycle, TransactionView, UncleBlockView, }, @@ -20,7 +33,10 @@ use ckb_types::{ }; use ckb_verification::{cache::CacheEntry, TxVerifyEnv}; use lru::LruCache; -use std::collections::HashSet; +use multi_index_map::MultiIndexMap; +use std::collections::hash_map::Entry; +use std::collections::HashMap; +use std::collections::{HashSet, VecDeque}; use std::sync::Arc; const COMMITTED_HASH_CACHE_SIZE: usize = 100_000; @@ -51,6 +67,40 @@ macro_rules! evict_for_trim_size { }; } +type ConflictEntry = (TxEntry, Reject); + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum Status { + Pending, + Gap, + Proposed, +} + +#[derive(MultiIndexMap, Clone)] +pub struct PoolEntry { + #[multi_index(hashed_unique)] + pub id: ProposalShortId, + #[multi_index(ordered_non_unique)] + pub score: AncestorsScoreSortKey, + #[multi_index(ordered_non_unique)] + pub status: Status, + #[multi_index(ordered_non_unique)] + pub evict_key: EvictKey, + + pub inner: TxEntry, + // other sort key +} + +impl MultiIndexPoolEntryMap { + /// sorted by ancestor score from higher to lower + pub fn score_sorted_iter(&self) -> impl Iterator { + // Note: multi_index don't support reverse order iteration now + // so we need to collect and reverse + let entries = self.iter_by_score().collect::>(); + entries.into_iter().rev().map(move |entry| &entry.inner) + } +} + /// Tx-pool implementation pub struct TxPool { pub(crate) config: TxPoolConfig, @@ -60,6 +110,18 @@ pub struct TxPool { pub(crate) gap: PendingQueue, /// Tx pool that finely for commit pub(crate) proposed: ProposedPool, + + /// The pool entries with different kinds of sort strategies + pub(crate) entries: MultiIndexPoolEntryMap, + /// dep-set map represent in-pool tx's header deps + pub(crate) header_deps: HashMap>, + /// dep-set map represent in-pool tx's deps + pub(crate) deps: HashMap>, + /// input-set map represent in-pool tx's inputs + pub(crate) inputs: HashMap>, + pub(crate) outputs: HashMap>, + pub(crate) max_ancestors_count: usize, + /// cache for committed transactions hash pub(crate) committed_txs_hash_cache: LruCache, // sum of all tx_pool tx's virtual sizes. @@ -77,12 +139,18 @@ pub struct TxPool { impl TxPool { /// Create new TxPool pub fn new(config: TxPoolConfig, snapshot: Arc) -> TxPool { - let recent_reject = build_recent_reject(&config); + let recent_reject = Self::build_recent_reject(&config); let expiry = config.expiry_hours as u64 * 60 * 60 * 1000; TxPool { pending: PendingQueue::new(), gap: PendingQueue::new(), proposed: ProposedPool::new(config.max_ancestors_count), + entries: MultiIndexPoolEntryMap::default(), + header_deps: HashMap::default(), + deps: Default::default(), + inputs: Default::default(), + outputs: Default::default(), + max_ancestors_count: config.max_ancestors_count, committed_txs_hash_cache: LruCache::new(COMMITTED_HASH_CACHE_SIZE), total_tx_size: 0, total_tx_cycles: 0, @@ -135,6 +203,24 @@ impl TxPool { self.total_tx_cycles = total_tx_cycles; } + fn add_poolentry(&mut self, entry: TxEntry, status: Status) -> bool { + let short_id = entry.proposal_short_id(); + if self.entries.get_by_id(&short_id).is_some() { + return false; + } + trace!("add_{:?} {}", status, entry.transaction().hash()); + let score = entry.as_score_key(); + let evict_key = entry.as_evict_key(); + self.entries.insert(PoolEntry { + id: short_id, + score, + status, + inner: entry, + evict_key, + }); + true + } + /// Add tx to pending pool /// If did have this value present, false is returned. pub fn add_pending(&mut self, entry: TxEntry) -> bool { @@ -145,6 +231,10 @@ impl TxPool { self.pending.add_entry(entry) } + pub fn add_pending_v2(&mut self, entry: TxEntry) -> bool { + self.add_poolentry(entry, Status::Pending) + } + /// Add tx which proposed but still uncommittable to gap pool pub fn add_gap(&mut self, entry: TxEntry) -> bool { if self.proposed.contains_key(&entry.proposal_short_id()) { @@ -154,17 +244,29 @@ impl TxPool { self.gap.add_entry(entry) } + pub fn add_gap_v2(&mut self, entry: TxEntry) -> bool { + self.add_poolentry(entry, Status::Gap) + } + /// Add tx to proposed pool pub fn add_proposed(&mut self, entry: TxEntry) -> Result { trace!("add_proposed {}", entry.transaction().hash()); self.proposed.add_entry(entry) } + pub fn add_proposed_v2(&mut self, entry: TxEntry) -> bool { + self.add_poolentry(entry, Status::Proposed) + } + /// Returns true if the tx-pool contains a tx with specified id. pub fn contains_proposal_id(&self, id: &ProposalShortId) -> bool { self.pending.contains_key(id) || self.gap.contains_key(id) || self.proposed.contains_key(id) } + pub fn contains_proposal_id_v2(&self, id: &ProposalShortId) -> bool { + self.entries.get_by_id(id).is_some() + } + /// Returns tx with cycles corresponding to the id. pub fn get_tx_with_cycles(&self, id: &ProposalShortId) -> Option<(TransactionView, Cycle)> { self.pending @@ -182,6 +284,12 @@ impl TxPool { }) } + pub fn get_tx_with_cycles_v2(&self, id: &ProposalShortId) -> Option<(TransactionView, Cycle)> { + self.entries + .get_by_id(id) + .map(|entry| (entry.inner.transaction().clone(), entry.inner.cycles)) + } + /// Returns tx corresponding to the id. pub fn get_tx(&self, id: &ProposalShortId) -> Option<&TransactionView> { self.pending @@ -190,11 +298,28 @@ impl TxPool { .or_else(|| self.proposed.get_tx(id)) } + pub fn get_tx_v2(&self, id: &ProposalShortId) -> Option<&TransactionView> { + self.entries + .get_by_id(id) + .map(|entry| entry.inner.transaction()) + } + /// Returns tx from pending and gap corresponding to the id. RPC pub fn get_entry_from_pending_or_gap(&self, id: &ProposalShortId) -> Option<&TxEntry> { self.pending.get(id).or_else(|| self.gap.get(id)) } + pub fn get_entry_from_pending_or_gap_v2(&self, id: &ProposalShortId) -> Option<&TxEntry> { + if let Some(entry) = self.entries.get_by_id(id) { + match entry.status { + Status::Pending | Status::Gap => return Some(&entry.inner), + _ => return None, + } + } else { + return None; + } + } + pub(crate) fn proposed(&self) -> &ProposedPool { &self.proposed } @@ -209,6 +334,15 @@ impl TxPool { .or_else(|| self.pending.get_tx(id)) } + pub(crate) fn get_tx_from_proposed_and_others_v2( + &self, + id: &ProposalShortId, + ) -> Option<&TransactionView> { + self.entries + .get_by_id(id) + .map(|entry| entry.inner.transaction()) + } + pub(crate) fn remove_committed_txs<'a>( &mut self, txs: impl Iterator, @@ -245,6 +379,129 @@ impl TxPool { } } + pub(crate) fn resolve_conflict_header_dep_v2( + &mut self, + detached_headers: &HashSet, + callbacks: &Callbacks, + ) { + for (entry, reject) in self.__resolve_conflict_header_dep_v2(detached_headers) { + callbacks.call_reject(self, &entry, reject); + } + } + + pub(crate) fn get_descendants(&self, entry: &TxEntry) -> HashSet { + let mut entries: VecDeque<&TxEntry> = VecDeque::new(); + entries.push_back(entry); + + let mut descendants = HashSet::new(); + while let Some(entry) = entries.pop_front() { + let outputs = entry.transaction().output_pts(); + + for output in outputs { + if let Some(ids) = self.outputs.get(&output) { + for id in ids { + if descendants.insert(id.clone()) { + if let Some(entry) = self.entries.get_by_id(id) { + entries.push_back(&entry.inner); + } + } + } + } + } + } + descendants + } + + pub(crate) fn remove_entry_relation(&mut self, entry: &TxEntry) { + let inputs = entry.transaction().input_pts_iter(); + let tx_short_id = entry.proposal_short_id(); + let outputs = entry.transaction().output_pts(); + + for i in inputs { + if let Entry::Occupied(mut occupied) = self.inputs.entry(i) { + let empty = { + let ids = occupied.get_mut(); + ids.remove(&tx_short_id); + ids.is_empty() + }; + if empty { + occupied.remove(); + } + } + } + + // remove dep + for d in entry.related_dep_out_points().cloned() { + if let Entry::Occupied(mut occupied) = self.deps.entry(d) { + let empty = { + let ids = occupied.get_mut(); + ids.remove(&tx_short_id); + ids.is_empty() + }; + if empty { + occupied.remove(); + } + } + } + + for o in outputs { + self.outputs.remove(&o); + } + + self.header_deps.remove(&tx_short_id); + } + + fn remove_entry(&mut self, id: &ProposalShortId) -> Option { + let removed = self.entries.remove_by_id(id); + + if let Some(ref entry) = removed { + self.remove_entry_relation(&entry.inner); + } + removed.map(|e| e.inner) + } + + fn remove_entry_and_descendants(&mut self, id: &ProposalShortId) -> Vec { + let mut removed = Vec::new(); + if let Some(entry) = self.entries.remove_by_id(id) { + let descendants = self.get_descendants(&entry.inner); + self.remove_entry_relation(&entry.inner); + removed.push(entry.inner); + for id in descendants { + if let Some(entry) = self.remove_entry(&id) { + removed.push(entry); + } + } + } + removed + } + + fn __resolve_conflict_header_dep_v2( + &mut self, + headers: &HashSet, + ) -> Vec { + let mut conflicts = Vec::new(); + + // invalid header deps + let mut ids = Vec::new(); + for (tx_id, deps) in self.header_deps.iter() { + for hash in deps { + if headers.contains(hash) { + ids.push((hash.clone(), tx_id.clone())); + break; + } + } + } + + for (blk_hash, id) in ids { + let entries = self.remove_entry_and_descendants(&id); + for entry in entries { + let reject = Reject::Resolve(OutPointError::InvalidHeader(blk_hash.to_owned())); + conflicts.push((entry, reject)); + } + } + conflicts + } + pub(crate) fn remove_committed_tx(&mut self, tx: &TransactionView, callbacks: &Callbacks) { let hash = tx.hash(); let short_id = tx.proposal_short_id(); @@ -287,6 +544,50 @@ impl TxPool { } } + fn resolve_conflict(&mut self, tx: &TransactionView) -> Vec { + let inputs = tx.input_pts_iter(); + let mut conflicts = Vec::new(); + + for i in inputs { + if let Some(ids) = self.inputs.remove(&i) { + for id in ids { + let entries = self.remove_entry_and_descendants(&id); + for entry in entries { + let reject = Reject::Resolve(OutPointError::Dead(i.clone())); + conflicts.push((entry, reject)); + } + } + } + + // deps consumed + if let Some(ids) = self.deps.remove(&i) { + for id in ids { + let entries = self.remove_entry_and_descendants(&id); + for entry in entries { + let reject = Reject::Resolve(OutPointError::Dead(i.clone())); + conflicts.push((entry, reject)); + } + } + } + } + conflicts + } + + pub(crate) fn remove_committed_tx_v2(&mut self, tx: &TransactionView, callbacks: &Callbacks) { + let hash = tx.hash(); + let short_id = tx.proposal_short_id(); + if let Some(entry) = self.remove_entry(&short_id) { + debug!("remove_committed_tx from gap {}", hash); + callbacks.call_committed(self, &entry) + } + { + let conflicts = self.resolve_conflict(tx); + for (entry, reject) in conflicts { + callbacks.call_reject(self, &entry, reject); + } + } + } + // Expire all transaction (and their dependencies) in the pool. pub(crate) fn remove_expired(&mut self, callbacks: &Callbacks) { let now_ms = ckb_systemtime::unix_time_as_millis(); @@ -318,6 +619,42 @@ impl TxPool { } } + fn remove_entries_by_filter bool>( + &mut self, + mut predicate: P, + ) -> Vec { + let mut removed = Vec::new(); + for (_, entry) in self.entries.iter() { + if predicate(&entry.id, &entry.inner) { + removed.push(entry.inner.clone()); + } + } + for entry in &removed { + self.remove_entry(&entry.proposal_short_id()); + } + + removed + } + + // Expire all transaction (and their dependencies) in the pool. + pub(crate) fn remove_expired_v2(&mut self, callbacks: &Callbacks) { + let now_ms = ckb_systemtime::unix_time_as_millis(); + let removed: Vec<_> = self + .entries + .iter() + .filter(|&(_, entry)| self.expiry + entry.inner.timestamp < now_ms) + .map(|(_, entry)| entry.inner.clone()) + .collect(); + + for entry in removed { + self.remove_entry(&entry.proposal_short_id()); + let tx_hash = entry.transaction().hash(); + debug!("remove_expired {} timestamp({})", tx_hash, entry.timestamp); + let reject = Reject::Expiry(entry.timestamp); + callbacks.call_reject(self, &entry, reject); + } + } + // Remove transactions from the pool until total size < size_limit. pub(crate) fn limit_size(&mut self, callbacks: &Callbacks) { while self.total_tx_size > self.config.max_tx_pool_size { @@ -331,6 +668,31 @@ impl TxPool { } } + pub(crate) fn limit_size_v2(&mut self, callbacks: &Callbacks) { + while self.total_tx_size > self.config.max_tx_pool_size { + if let Some(id) = self + .entries + .iter_by_evict_key() + .next() + .map(|entry| entry.id.clone()) + { + let removed = self.remove_entry_and_descendants(&id); + for entry in removed { + let tx_hash = entry.transaction().hash(); + debug!( + "removed by size limit {} timestamp({})", + tx_hash, entry.timestamp + ); + let reject = Reject::Full(format!( + "the fee_rate for this transaction is: {}", + entry.fee_rate() + )); + callbacks.call_reject(self, &entry, reject); + } + } + } + } + // remove transaction with detached proposal from gap and proposed // try re-put to pending pub(crate) fn remove_by_detached_proposal<'a>( @@ -360,6 +722,34 @@ impl TxPool { } } + // remove transaction with detached proposal from gap and proposed + // try re-put to pending + pub(crate) fn remove_by_detached_proposal_v2<'a>( + &mut self, + ids: impl Iterator, + ) { + for id in ids { + if let Some(e) = self.entries.get_by_id(id) { + let status = e.status; + // TODO: double check this + if status == Status::Pending { + continue; + } + let mut entries = self.remove_entry_and_descendants(id); + entries.sort_unstable_by_key(|entry| entry.ancestors_count); + for mut entry in entries { + let tx_hash = entry.transaction().hash(); + entry.reset_ancestors_state(); + let ret = self.add_pending(entry); + debug!( + "remove_by_detached_proposal from {:?} {} add_pending {}", status, + tx_hash, ret + ); + } + } + } + } + pub(crate) fn remove_tx(&mut self, id: &ProposalShortId) -> bool { let entries = self.proposed.remove_entry_and_descendants(id); if !entries.is_empty() { @@ -382,6 +772,14 @@ impl TxPool { false } + pub(crate) fn remove_tx_v2(&mut self, id: &ProposalShortId) -> bool { + if let Some(entry) = self.remove_entry(id) { + self.update_statics_for_remove_tx(entry.size, entry.cycles); + return true; + } + false + } + pub(crate) fn resolve_tx_from_pending_and_proposed( &self, tx: TransactionView, @@ -402,6 +800,18 @@ impl TxPool { .map_err(Reject::Resolve) } + pub(crate) fn resolve_tx_from_pending_and_proposed_v2( + &self, + tx: TransactionView, + ) -> Result, Reject> { + let snapshot = self.snapshot(); + let provider = OverlayCellProvider::new(&self.entries, snapshot); + let mut seen_inputs = HashSet::new(); + resolve_transaction(tx, &mut seen_inputs, &provider, snapshot) + .map(Arc::new) + .map_err(Reject::Resolve) + } + pub(crate) fn check_rtx_from_pending_and_proposed( &self, rtx: &ResolvedTransaction, @@ -416,6 +826,17 @@ impl TxPool { .map_err(Reject::Resolve) } + pub(crate) fn check_rtx_from_pending_and_proposed_v2( + &self, + rtx: &ResolvedTransaction, + ) -> Result<(), Reject> { + let snapshot = self.snapshot(); + let checker = OverlayCellChecker::new(&self.entries, snapshot); + let mut seen_inputs = HashSet::new(); + rtx.check(&mut seen_inputs, &checker, snapshot) + .map_err(Reject::Resolve) + } + pub(crate) fn resolve_tx_from_proposed( &self, tx: TransactionView, @@ -428,6 +849,17 @@ impl TxPool { .map_err(Reject::Resolve) } + pub(crate) fn resolve_tx_from_proposed_v2( + &self, + rtx: &ResolvedTransaction, + ) -> Result<(), Reject> { + let snapshot = self.snapshot(); + let checker = OverlayCellChecker::new(&self.entries, snapshot); + let mut seen_inputs = HashSet::new(); + rtx.check(&mut seen_inputs, &checker, snapshot) + .map_err(Reject::Resolve) + } + pub(crate) fn check_rtx_from_proposed(&self, rtx: &ResolvedTransaction) -> Result<(), Reject> { let snapshot = self.snapshot(); let cell_checker = OverlayCellChecker::new(&self.proposed, snapshot); @@ -436,6 +868,17 @@ impl TxPool { .map_err(Reject::Resolve) } + pub(crate) fn check_rtx_from_proposed_v2( + &self, + rtx: &ResolvedTransaction, + ) -> Result<(), Reject> { + let snapshot = self.snapshot(); + let cell_checker = OverlayCellChecker::new(&self.entries, snapshot); + let mut seen_inputs = HashSet::new(); + rtx.check(&mut seen_inputs, &cell_checker, snapshot) + .map_err(Reject::Resolve) + } + pub(crate) fn gap_rtx( &mut self, cache_entry: CacheEntry, @@ -498,6 +941,24 @@ impl TxPool { } } + // fill proposal txs + pub fn fill_proposals( + &self, + limit: usize, + exclusion: &HashSet, + proposals: &mut HashSet, + status: &Status, + ) { + for entry in self.entries.get_by_status(status) { + if proposals.len() == limit { + break; + } + if !exclusion.contains(&entry.id) { + proposals.insert(entry.id.clone()); + } + } + } + /// Get to-be-proposal transactions that may be included in the next block. pub fn get_proposals( &self, @@ -511,6 +972,18 @@ impl TxPool { proposals } + /// Get to-be-proposal transactions that may be included in the next block. + pub fn get_proposals_v2( + &self, + limit: usize, + exclusion: &HashSet, + ) -> HashSet { + let mut proposals = HashSet::with_capacity(limit); + self.fill_proposals(limit, exclusion, &mut proposals, &Status::Pending); + self.fill_proposals(limit, exclusion, &mut proposals, &Status::Gap); + proposals + } + /// Returns tx from tx-pool or storage corresponding to the id. pub fn get_tx_from_pool_or_store( &self, @@ -542,6 +1015,25 @@ impl TxPool { TxPoolIds { pending, proposed } } + // This is for RPC request, performance is not critical + pub(crate) fn get_ids_v2(&self) -> TxPoolIds { + let pending: Vec = self + .entries + .get_by_status(&Status::Pending) + .iter() + .chain(self.entries.get_by_status(&Status::Gap).iter()) + .map(|entry| entry.inner.transaction().hash()) + .collect(); + + let proposed: Vec = self + .proposed + .iter() + .map(|(_, entry)| entry.transaction().hash()) + .collect(); + + TxPoolIds { pending, proposed } + } + pub(crate) fn get_all_entry_info(&self) -> TxPoolEntryInfo { let pending = self .pending @@ -563,8 +1055,27 @@ impl TxPool { TxPoolEntryInfo { pending, proposed } } + pub(crate) fn get_all_entry_info_v2(&self) -> TxPoolEntryInfo { + let pending = self + .entries + .get_by_status(&Status::Pending) + .iter() + .chain(self.entries.get_by_status(&Status::Gap).iter()) + .map(|entry| (entry.inner.transaction().hash(), entry.inner.to_info())) + .collect(); + + let proposed = self + .entries + .get_by_status(&Status::Proposed) + .iter() + .map(|entry| (entry.inner.transaction().hash(), entry.inner.to_info())) + .collect(); + + TxPoolEntryInfo { pending, proposed } + } + pub(crate) fn drain_all_transactions(&mut self) -> Vec { - let mut txs = CommitTxsScanner::new(&self.proposed) + let mut txs = CommitTxsScanner::new(&self.proposed, &self.entries) .txs_to_commit(self.total_tx_size, self.total_tx_cycles) .0 .into_iter() @@ -579,10 +1090,47 @@ impl TxPool { txs } + pub(crate) fn drain_all_transactions_v2(&mut self) -> Vec { + let mut txs = CommitTxsScanner::new(&self.proposed, &self.entries) + .txs_to_commit(self.total_tx_size, self.total_tx_cycles) + .0 + .into_iter() + .map(|tx_entry| tx_entry.into_transaction()) + .collect::>(); + self.proposed.clear(); + let mut pending = self + .entries + .remove_by_status(&Status::Pending) + .into_iter() + .map(|e| e.inner.into_transaction()) + .collect::>(); + txs.append(&mut pending); + let mut gap = self + .entries + .remove_by_status(&Status::Gap) + .into_iter() + .map(|e| e.inner.into_transaction()) + .collect::>(); + txs.append(&mut gap); + self.total_tx_size = 0; + self.total_tx_cycles = 0; + self.deps.clear(); + self.inputs.clear(); + self.header_deps.clear(); + self.outputs.clear(); + // self.touch_last_txs_updated_at(); + txs + } + pub(crate) fn clear(&mut self, snapshot: Arc) { self.pending = PendingQueue::new(); self.gap = PendingQueue::new(); self.proposed = ProposedPool::new(self.config.max_ancestors_count); + self.entries = MultiIndexPoolEntryMap::default(); + self.header_deps = HashMap::default(); + self.deps = HashMap::default(); + self.inputs = HashMap::default(); + self.outputs = HashMap::default(); self.snapshot = snapshot; self.committed_txs_hash_cache = LruCache::new(COMMITTED_HASH_CACHE_SIZE); self.total_tx_size = 0; @@ -606,8 +1154,8 @@ impl TxPool { max_block_cycles: Cycle, txs_size_limit: usize, ) -> (Vec, usize, Cycle) { - let (entries, size, cycles) = - CommitTxsScanner::new(self.proposed()).txs_to_commit(txs_size_limit, max_block_cycles); + let (entries, size, cycles) = CommitTxsScanner::new(self.proposed(), &self.entries) + .txs_to_commit(txs_size_limit, max_block_cycles); if !entries.is_empty() { ckb_logger::info!( @@ -621,27 +1169,65 @@ impl TxPool { } (entries, size, cycles) } + + fn build_recent_reject(config: &TxPoolConfig) -> Option { + if !config.recent_reject.as_os_str().is_empty() { + let recent_reject_ttl = config.keep_rejected_tx_hashes_days as i32 * 24 * 60 * 60; + match RecentReject::new( + &config.recent_reject, + config.keep_rejected_tx_hashes_count, + recent_reject_ttl, + ) { + Ok(recent_reject) => Some(recent_reject), + Err(err) => { + error!( + "Failed to open recent reject database {:?} {}", + config.recent_reject, err + ); + None + } + } + } else { + warn!("Recent reject database is disabled!"); + None + } + } } -fn build_recent_reject(config: &TxPoolConfig) -> Option { - if !config.recent_reject.as_os_str().is_empty() { - let recent_reject_ttl = config.keep_rejected_tx_hashes_days as i32 * 24 * 60 * 60; - match RecentReject::new( - &config.recent_reject, - config.keep_rejected_tx_hashes_count, - recent_reject_ttl, - ) { - Ok(recent_reject) => Some(recent_reject), - Err(err) => { - error!( - "Failed to open recent reject database {:?} {}", - config.recent_reject, err - ); - None +impl CellProvider for MultiIndexPoolEntryMap { + fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { + let tx_hash = out_point.tx_hash(); + if let Some(entry) = self.get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) { + match entry + .inner + .transaction() + .output_with_data(out_point.index().unpack()) + { + Some((output, data)) => { + let cell_meta = CellMetaBuilder::from_cell_output(output, data) + .out_point(out_point.to_owned()) + .build(); + CellStatus::live_cell(cell_meta) + } + None => CellStatus::Unknown, } + } else { + CellStatus::Unknown + } + } +} + +impl CellChecker for MultiIndexPoolEntryMap { + fn is_live(&self, out_point: &OutPoint) -> Option { + let tx_hash = out_point.tx_hash(); + if let Some(entry) = self.get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) { + entry + .inner + .transaction() + .output(out_point.index().unpack()) + .map(|_| true) + } else { + None } - } else { - warn!("Recent reject database is disabled!"); - None } } From 7349d0226bd3a404e7f7e12a9f73b6f082655a3d Mon Sep 17 00:00:00 2001 From: yukang Date: Wed, 17 May 2023 01:41:37 +0800 Subject: [PATCH 053/267] add pool_map --- tx-pool/src/component/commit_txs_scanner.rs | 2 +- tx-pool/src/component/mod.rs | 1 + tx-pool/src/component/pool_map.rs | 430 ++++++++++++++++++++ tx-pool/src/component/tests/mod.rs | 1 + tx-pool/src/component/tests/pool_map.rs | 236 +++++++++++ tx-pool/src/pool.rs | 396 +++--------------- 6 files changed, 719 insertions(+), 347 deletions(-) create mode 100644 tx-pool/src/component/pool_map.rs create mode 100644 tx-pool/src/component/tests/pool_map.rs diff --git a/tx-pool/src/component/commit_txs_scanner.rs b/tx-pool/src/component/commit_txs_scanner.rs index af277a1b0a..a9b4287140 100644 --- a/tx-pool/src/component/commit_txs_scanner.rs +++ b/tx-pool/src/component/commit_txs_scanner.rs @@ -2,7 +2,7 @@ use crate::component::{container::AncestorsScoreSortKey, entry::TxEntry, propose use ckb_types::{core::Cycle, packed::ProposalShortId}; use ckb_util::LinkedHashMap; use std::collections::{BTreeSet, HashMap, HashSet}; -use crate::pool::MultiIndexPoolEntryMap; +use crate::component::pool_map::MultiIndexPoolEntryMap; // A template data struct used to store modified entries when package txs #[derive(Default)] diff --git a/tx-pool/src/component/mod.rs b/tx-pool/src/component/mod.rs index 3df4d620de..60b4e78ae7 100644 --- a/tx-pool/src/component/mod.rs +++ b/tx-pool/src/component/mod.rs @@ -7,6 +7,7 @@ pub(crate) mod orphan; pub(crate) mod pending; pub(crate) mod proposed; pub(crate) mod recent_reject; +pub(crate) mod pool_map; #[cfg(test)] mod tests; diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs new file mode 100644 index 0000000000..2a66e760f2 --- /dev/null +++ b/tx-pool/src/component/pool_map.rs @@ -0,0 +1,430 @@ +//! Top-level Pool type, methods, and tests +extern crate rustc_hash; +extern crate slab; +use crate::component::container::AncestorsScoreSortKey; +use crate::component::entry::EvictKey; +use crate::error::Reject; +use crate::TxEntry; +use ckb_logger::{debug, error, trace, warn}; +use ckb_types::core::error::OutPointError; +use ckb_types::packed::OutPoint; +use ckb_types::{ + core::cell::{CellMetaBuilder, CellProvider, CellStatus}, + prelude::*, +}; +use ckb_types::{ + core::{cell::CellChecker, TransactionView}, + packed::{Byte32, ProposalShortId}, +}; +use multi_index_map::MultiIndexMap; +use std::collections::hash_map::Entry; +use std::collections::HashMap; +use std::collections::{HashSet, VecDeque}; + +type ConflictEntry = (TxEntry, Reject); + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum Status { + Pending, + Gap, + Proposed, +} + +#[derive(MultiIndexMap, Clone)] +pub struct PoolEntry { + #[multi_index(hashed_unique)] + pub id: ProposalShortId, + #[multi_index(ordered_non_unique)] + pub score: AncestorsScoreSortKey, + #[multi_index(ordered_non_unique)] + pub status: Status, + #[multi_index(ordered_non_unique)] + pub evict_key: EvictKey, + + pub inner: TxEntry, + // other sort key +} + +impl MultiIndexPoolEntryMap { + /// sorted by ancestor score from higher to lower + pub fn score_sorted_iter(&self) -> impl Iterator { + // Note: multi_index don't support reverse order iteration now + // so we need to collect and reverse + let entries = self.iter_by_score().collect::>(); + entries.into_iter().rev().map(move |entry| &entry.inner) + } +} + +pub struct PoolMap { + /// The pool entries with different kinds of sort strategies + pub(crate) entries: MultiIndexPoolEntryMap, + /// dep-set map represent in-pool tx's header deps + pub(crate) header_deps: HashMap>, + /// dep-set map represent in-pool tx's deps + pub(crate) deps: HashMap>, + /// input-set map represent in-pool tx's inputs + pub(crate) inputs: HashMap>, + pub(crate) outputs: HashMap>, + pub(crate) max_ancestors_count: usize, +} + +impl PoolMap { + pub fn new(max_ancestors_count: usize) -> Self { + PoolMap { + entries: MultiIndexPoolEntryMap::default(), + header_deps: HashMap::default(), + deps: HashMap::default(), + inputs: HashMap::default(), + outputs: HashMap::default(), + max_ancestors_count, + } + } + + #[cfg(test)] + pub(crate) fn outputs_len(&self) -> usize { + self.outputs.len() + } + + #[cfg(test)] + pub(crate) fn header_deps_len(&self) -> usize { + self.header_deps.len() + } + + #[cfg(test)] + pub(crate) fn deps_len(&self) -> usize { + self.deps.len() + } + + #[cfg(test)] + pub(crate) fn inputs_len(&self) -> usize { + self.inputs.len() + } + + #[cfg(test)] + pub fn size(&self) -> usize { + self.entries.len() + } + + #[cfg(test)] + pub fn contains_key(&self, id: &ProposalShortId) -> bool { + self.entries.get_by_id(id).is_some() + } + + pub(crate) fn get_tx(&self, id: &ProposalShortId) -> Option<&TransactionView> { + self.entries + .get_by_id(id) + .map(|entry| entry.inner.transaction()) + } + + pub fn add_entry(&mut self, entry: TxEntry, status: Status) -> bool { + let tx_short_id = entry.proposal_short_id(); + if self.entries.get_by_id(&tx_short_id).is_some() { + return false; + } + trace!("add_{:?} {}", status, entry.transaction().hash()); + let inputs = entry.transaction().input_pts_iter(); + let outputs = entry.transaction().output_pts(); + + for i in inputs { + self.inputs + .entry(i.to_owned()) + .or_default() + .insert(tx_short_id.clone()); + + if let Some(outputs) = self.outputs.get_mut(&i) { + outputs.insert(tx_short_id.clone()); + } + } + + // record dep-txid + for d in entry.related_dep_out_points() { + self.deps + .entry(d.to_owned()) + .or_default() + .insert(tx_short_id.clone()); + + if let Some(outputs) = self.outputs.get_mut(d) { + outputs.insert(tx_short_id.clone()); + } + } + + // record tx unconsumed output + for o in outputs { + self.outputs.insert(o, HashSet::new()); + } + + // record header_deps + let header_deps = entry.transaction().header_deps(); + if !header_deps.is_empty() { + self.header_deps + .insert(tx_short_id.clone(), header_deps.into_iter().collect()); + } + + let score = entry.as_score_key(); + let evict_key = entry.as_evict_key(); + self.entries.insert(PoolEntry { + id: tx_short_id, + score, + status, + inner: entry, + evict_key, + }); + true + } + + pub fn get_by_id(&self, id: &ProposalShortId) -> Option<&PoolEntry> { + self.entries.get_by_id(id).map(|entry| entry) + } + + fn get_descendants(&self, entry: &TxEntry) -> HashSet { + let mut entries: VecDeque<&TxEntry> = VecDeque::new(); + entries.push_back(entry); + + let mut descendants = HashSet::new(); + while let Some(entry) = entries.pop_front() { + let outputs = entry.transaction().output_pts(); + + for output in outputs { + if let Some(ids) = self.outputs.get(&output) { + for id in ids { + if descendants.insert(id.clone()) { + if let Some(entry) = self.entries.get_by_id(id) { + entries.push_back(&entry.inner); + } + } + } + } + } + } + descendants + } + + pub(crate) fn remove_entry_relation(&mut self, entry: &TxEntry) { + let inputs = entry.transaction().input_pts_iter(); + let tx_short_id = entry.proposal_short_id(); + let outputs = entry.transaction().output_pts(); + + for i in inputs { + if let Entry::Occupied(mut occupied) = self.inputs.entry(i) { + let empty = { + let ids = occupied.get_mut(); + ids.remove(&tx_short_id); + ids.is_empty() + }; + if empty { + occupied.remove(); + } + } + } + + // remove dep + for d in entry.related_dep_out_points().cloned() { + if let Entry::Occupied(mut occupied) = self.deps.entry(d) { + let empty = { + let ids = occupied.get_mut(); + ids.remove(&tx_short_id); + ids.is_empty() + }; + if empty { + occupied.remove(); + } + } + } + + for o in outputs { + self.outputs.remove(&o); + } + + self.header_deps.remove(&tx_short_id); + } + + pub fn remove_entry(&mut self, id: &ProposalShortId) -> Option { + let removed = self.entries.remove_by_id(id); + + if let Some(ref entry) = removed { + self.remove_entry_relation(&entry.inner); + } + removed.map(|e| e.inner) + } + + pub fn remove_entry_and_descendants(&mut self, id: &ProposalShortId) -> Vec { + let mut removed = Vec::new(); + if let Some(entry) = self.entries.remove_by_id(id) { + let descendants = self.get_descendants(&entry.inner); + self.remove_entry_relation(&entry.inner); + removed.push(entry.inner); + for id in descendants { + if let Some(entry) = self.remove_entry(&id) { + removed.push(entry); + } + } + } + removed + } + + pub fn resolve_conflict_header_dep( + &mut self, + headers: &HashSet, + ) -> Vec { + let mut conflicts = Vec::new(); + + // invalid header deps + let mut ids = Vec::new(); + for (tx_id, deps) in self.header_deps.iter() { + for hash in deps { + if headers.contains(hash) { + ids.push((hash.clone(), tx_id.clone())); + break; + } + } + } + + for (blk_hash, id) in ids { + let entries = self.remove_entry_and_descendants(&id); + for entry in entries { + let reject = Reject::Resolve(OutPointError::InvalidHeader(blk_hash.to_owned())); + conflicts.push((entry, reject)); + } + } + conflicts + } + + pub fn resolve_conflict(&mut self, tx: &TransactionView) -> Vec { + let inputs = tx.input_pts_iter(); + let mut conflicts = Vec::new(); + + for i in inputs { + if let Some(ids) = self.inputs.remove(&i) { + for id in ids { + let entries = self.remove_entry_and_descendants(&id); + for entry in entries { + let reject = Reject::Resolve(OutPointError::Dead(i.clone())); + conflicts.push((entry, reject)); + } + } + } + + // deps consumed + if let Some(ids) = self.deps.remove(&i) { + for id in ids { + let entries = self.remove_entry_and_descendants(&id); + for entry in entries { + let reject = Reject::Resolve(OutPointError::Dead(i.clone())); + conflicts.push((entry, reject)); + } + } + } + } + conflicts + } + + // fill proposal txs + pub fn fill_proposals( + &self, + limit: usize, + exclusion: &HashSet, + proposals: &mut HashSet, + status: &Status, + ) { + for entry in self.entries.get_by_status(status) { + if proposals.len() == limit { + break; + } + if !exclusion.contains(&entry.id) { + proposals.insert(entry.id.clone()); + } + } + } + + pub fn remove_entries_by_filter bool>( + &mut self, + mut predicate: P, + ) -> Vec { + let mut removed = Vec::new(); + for (_, entry) in self.entries.iter() { + if predicate(&entry.id, &entry.inner) { + removed.push(entry.inner.clone()); + } + } + for entry in &removed { + self.remove_entry(&entry.proposal_short_id()); + } + + removed + } + + pub fn iter(&self) -> impl Iterator { + self.entries.iter().map(|(_, entry)| entry) + } + + pub fn iter_by_evict_key(&self) -> impl Iterator { + self.entries.iter_by_evict_key() + } + + pub fn next_evict_entry(&self) -> Option { + self.iter_by_evict_key() + .into_iter() + .next() + .map(|entry| entry.id.clone()) + } + + pub fn clear(&mut self) { + self.entries = MultiIndexPoolEntryMap::default(); + self.deps.clear(); + self.inputs.clear(); + self.header_deps.clear(); + self.outputs.clear(); + } + + pub(crate) fn drain(&mut self) -> Vec { + let txs = self + .entries + .iter() + .map(|(_k, entry)| entry.inner.clone().into_transaction()) + .collect::>(); + self.entries.clear(); + self.deps.clear(); + self.inputs.clear(); + self.header_deps.clear(); + self.outputs.clear(); + txs + } +} + +impl CellProvider for MultiIndexPoolEntryMap { + fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { + let tx_hash = out_point.tx_hash(); + if let Some(entry) = self.get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) { + match entry + .inner + .transaction() + .output_with_data(out_point.index().unpack()) + { + Some((output, data)) => { + let cell_meta = CellMetaBuilder::from_cell_output(output, data) + .out_point(out_point.to_owned()) + .build(); + CellStatus::live_cell(cell_meta) + } + None => CellStatus::Unknown, + } + } else { + CellStatus::Unknown + } + } +} + +impl CellChecker for MultiIndexPoolEntryMap { + fn is_live(&self, out_point: &OutPoint) -> Option { + let tx_hash = out_point.tx_hash(); + if let Some(entry) = self.get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) { + entry + .inner + .transaction() + .output(out_point.index().unpack()) + .map(|_| true) + } else { + None + } + } +} diff --git a/tx-pool/src/component/tests/mod.rs b/tx-pool/src/component/tests/mod.rs index 5bde917729..0f8bfcd719 100644 --- a/tx-pool/src/component/tests/mod.rs +++ b/tx-pool/src/component/tests/mod.rs @@ -4,3 +4,4 @@ mod pending; mod proposed; mod recent_reject; mod util; +mod pool_map; \ No newline at end of file diff --git a/tx-pool/src/component/tests/pool_map.rs b/tx-pool/src/component/tests/pool_map.rs new file mode 100644 index 0000000000..7fcbd9e1a6 --- /dev/null +++ b/tx-pool/src/component/tests/pool_map.rs @@ -0,0 +1,236 @@ +use crate::component::tests::util::{ + build_tx, build_tx_with_dep, build_tx_with_header_dep, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE, +}; +use crate::component::{ + entry::TxEntry, + pool_map::{PoolEntry, PoolMap, Status}, +}; +use ckb_types::{h256, packed::Byte32, prelude::*}; +use std::collections::HashSet; + +#[test] +fn test_basic() { + let mut pool = PoolMap::new(100); + assert_eq!(pool.size(), 0); + let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&Byte32::zero(), 2)], 1); + let tx2 = build_tx( + vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 2)], + 3, + ); + let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + assert!(pool.add_entry(entry1.clone(), Status::Pending)); + assert!(pool.add_entry(entry2, Status::Pending)); + assert!(pool.size() == 2); + assert!(pool.contains_key(&tx1.proposal_short_id())); + assert!(pool.contains_key(&tx2.proposal_short_id())); + + assert_eq!(pool.inputs_len(), 4); + assert_eq!(pool.outputs_len(), 4); + + assert_eq!(pool.entries.get_by_id(&tx1.proposal_short_id()).unwrap().inner, entry1); + assert_eq!(pool.get_tx(&tx2.proposal_short_id()).unwrap(), &tx2); + + let txs = pool.drain(); + assert!(pool.entries.is_empty()); + assert!(pool.deps.is_empty()); + assert!(pool.inputs.is_empty()); + assert!(pool.header_deps.is_empty()); + assert!(pool.outputs.is_empty()); + assert_eq!(txs, vec![tx1, tx2]); +} + +#[test] +fn test_resolve_conflict() { + let mut pool = PoolMap::new(100); + let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); + let tx2 = build_tx( + vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], + 3, + ); + let tx3 = build_tx_with_dep( + vec![(&h256!("0x4").pack(), 1)], + vec![(&h256!("0x5").pack(), 1)], + 3, + ); + let tx4 = build_tx( + vec![(&h256!("0x2").pack(), 1), (&h256!("0x1").pack(), 1)], + 3, + ); + let tx5 = build_tx(vec![(&h256!("0x5").pack(), 1)], 3); + + let entry1 = TxEntry::dummy_resolve(tx1, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + let entry2 = TxEntry::dummy_resolve(tx2, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + let entry3 = TxEntry::dummy_resolve(tx3, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + assert!(pool.add_entry(entry1.clone(), Status::Pending)); + assert!(pool.add_entry(entry2.clone(), Status::Pending)); + assert!(pool.add_entry(entry3.clone(), Status::Pending)); + + let conflicts = pool.resolve_conflict(&tx4); + assert_eq!( + conflicts.into_iter().map(|i| i.0).collect::>(), + HashSet::from_iter(vec![entry1, entry2]) + ); + + let conflicts = pool.resolve_conflict(&tx5); + assert_eq!( + conflicts.into_iter().map(|i| i.0).collect::>(), + HashSet::from_iter(vec![entry3]) + ); +} + +#[test] +fn test_resolve_conflict_descendants() { + let mut pool = PoolMap::new(1000); + let tx1 = build_tx(vec![(&Byte32::zero(), 1)], 1); + let tx3 = build_tx(vec![(&tx1.hash(), 0)], 2); + let tx4 = build_tx(vec![(&tx3.hash(), 0)], 1); + + let tx2 = build_tx(vec![(&tx1.hash(), 0)], 1); + + let entry1 = TxEntry::dummy_resolve(tx1, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + let entry3 = TxEntry::dummy_resolve(tx3, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + let entry4 = TxEntry::dummy_resolve(tx4, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + assert!(pool.add_entry(entry1, Status::Pending)); + assert!(pool.add_entry(entry3.clone(), Status::Pending)); + assert!(pool.add_entry(entry4.clone(), Status::Pending)); + + let conflicts = pool.resolve_conflict(&tx2); + assert_eq!( + conflicts.into_iter().map(|i| i.0).collect::>(), + HashSet::from_iter(vec![entry3, entry4]) + ); +} + +#[test] +fn test_resolve_conflict_header_dep() { + let mut pool = PoolMap::new(1000); + + let header: Byte32 = h256!("0x1").pack(); + let tx = build_tx_with_header_dep( + vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], + vec![header.clone()], + 1, + ); + let tx1 = build_tx(vec![(&tx.hash(), 0)], 1); + + let entry = TxEntry::dummy_resolve(tx, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + let entry1 = TxEntry::dummy_resolve(tx1, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + assert!(pool.add_entry(entry.clone(), Status::Pending)); + assert!(pool.add_entry(entry1.clone(), Status::Pending)); + + assert_eq!(pool.inputs_len(), 3); + assert_eq!(pool.header_deps_len(), 1); + assert_eq!(pool.outputs_len(), 2); + + let mut headers = HashSet::new(); + headers.insert(header); + + let conflicts = pool.resolve_conflict_header_dep(&headers); + assert_eq!( + conflicts.into_iter().map(|i| i.0).collect::>(), + HashSet::from_iter(vec![entry, entry1]) + ); +} + + +#[test] +fn test_remove_entry() { + let mut pool = PoolMap::new(1000); + let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); + let header: Byte32 = h256!("0x1").pack(); + let tx2 = build_tx_with_header_dep(vec![(&h256!("0x2").pack(), 1)], vec![header], 1); + + let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + assert!(pool.add_entry(entry1.clone(), Status::Pending)); + assert!(pool.add_entry(entry2.clone(), Status::Pending)); + + let removed = pool.remove_entry(&tx1.proposal_short_id()); + assert_eq!(removed, Some(entry1)); + let removed = pool.remove_entry(&tx2.proposal_short_id()); + assert_eq!(removed, Some(entry2)); + assert!(pool.entries.is_empty()); + assert!(pool.deps.is_empty()); + assert!(pool.inputs.is_empty()); + assert!(pool.header_deps.is_empty()); +} + + +#[test] +fn test_remove_entries_by_filter() { + let mut pool = PoolMap::new(1000); + let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); + let tx2 = build_tx( + vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], + 3, + ); + let tx3 = build_tx_with_dep( + vec![(&h256!("0x4").pack(), 1)], + vec![(&h256!("0x5").pack(), 1)], + 3, + ); + let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + let entry3 = TxEntry::dummy_resolve(tx3.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + assert!(pool.add_entry(entry1, Status::Pending)); + assert!(pool.add_entry(entry2, Status::Pending)); + assert!(pool.add_entry(entry3, Status::Pending)); + + pool.remove_entries_by_filter(|id, _tx_entry| id == &tx1.proposal_short_id()); + + assert!(!pool.contains_key(&tx1.proposal_short_id())); + assert!(pool.contains_key(&tx2.proposal_short_id())); + assert!(pool.contains_key(&tx3.proposal_short_id())); +} + + +#[test] +fn test_fill_proposals() { + let mut pool = PoolMap::new(1000); + let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); + let tx2 = build_tx( + vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], + 3, + ); + let tx3 = build_tx_with_dep( + vec![(&h256!("0x4").pack(), 1)], + vec![(&h256!("0x5").pack(), 1)], + 3, + ); + let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + let entry3 = TxEntry::dummy_resolve(tx3.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + assert!(pool.add_entry(entry1, Status::Pending)); + assert!(pool.add_entry(entry2, Status::Pending)); + assert!(pool.add_entry(entry3, Status::Pending)); + + assert_eq!(pool.inputs_len(), 5); + assert_eq!(pool.deps_len(), 1); + assert_eq!(pool.outputs_len(), 7); + + let id1 = tx1.proposal_short_id(); + let id2 = tx2.proposal_short_id(); + let id3 = tx3.proposal_short_id(); + + let mut ret = HashSet::new(); + pool.fill_proposals(10, &HashSet::new(), &mut ret, &Status::Pending); + assert_eq!( + ret, + HashSet::from_iter(vec![id1.clone(), id2.clone(), id3.clone()]) + ); + + let mut ret = HashSet::new(); + pool.fill_proposals(1, &HashSet::new(), &mut ret, &Status::Pending); + assert_eq!(ret, HashSet::from_iter(vec![id1.clone()])); + + let mut ret = HashSet::new(); + pool.fill_proposals(2, &HashSet::new(), &mut ret, &Status::Pending); + assert_eq!(ret, HashSet::from_iter(vec![id1.clone(), id2.clone()])); + + let mut ret = HashSet::new(); + let mut exclusion = HashSet::new(); + exclusion.insert(id2); + pool.fill_proposals(2, &exclusion, &mut ret, &Status::Pending); + assert_eq!(ret, HashSet::from_iter(vec![id1, id3])); +} diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 5ee73cfbb8..c9dcc3b7fd 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -3,29 +3,19 @@ extern crate rustc_hash; extern crate slab; use super::component::{commit_txs_scanner::CommitTxsScanner, TxEntry}; use crate::callback::Callbacks; -use crate::component::container::AncestorsScoreSortKey; -use crate::component::entry::EvictKey; use crate::component::pending::PendingQueue; use crate::component::proposed::ProposedPool; use crate::component::recent_reject::RecentReject; use crate::error::Reject; +use crate::component::pool_map::{PoolMap, Status}; use crate::util::verify_rtx; use ckb_app_config::TxPoolConfig; use ckb_logger::{debug, error, trace, warn}; use ckb_snapshot::Snapshot; use ckb_store::ChainStore; -use ckb_types::core::error::OutPointError; -use ckb_types::packed::OutPoint; -use ckb_types::{ - core::cell::{CellMetaBuilder, CellProvider, CellStatus}, - prelude::*, -}; use ckb_types::{ core::{ - cell::{ - resolve_transaction, CellChecker, OverlayCellChecker, OverlayCellProvider, - ResolvedTransaction, - }, + cell::{resolve_transaction, OverlayCellChecker, OverlayCellProvider, ResolvedTransaction}, tx_pool::{TxPoolEntryInfo, TxPoolIds}, Cycle, TransactionView, UncleBlockView, }, @@ -33,10 +23,7 @@ use ckb_types::{ }; use ckb_verification::{cache::CacheEntry, TxVerifyEnv}; use lru::LruCache; -use multi_index_map::MultiIndexMap; -use std::collections::hash_map::Entry; -use std::collections::HashMap; -use std::collections::{HashSet, VecDeque}; +use std::collections::HashSet; use std::sync::Arc; const COMMITTED_HASH_CACHE_SIZE: usize = 100_000; @@ -69,38 +56,6 @@ macro_rules! evict_for_trim_size { type ConflictEntry = (TxEntry, Reject); -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub enum Status { - Pending, - Gap, - Proposed, -} - -#[derive(MultiIndexMap, Clone)] -pub struct PoolEntry { - #[multi_index(hashed_unique)] - pub id: ProposalShortId, - #[multi_index(ordered_non_unique)] - pub score: AncestorsScoreSortKey, - #[multi_index(ordered_non_unique)] - pub status: Status, - #[multi_index(ordered_non_unique)] - pub evict_key: EvictKey, - - pub inner: TxEntry, - // other sort key -} - -impl MultiIndexPoolEntryMap { - /// sorted by ancestor score from higher to lower - pub fn score_sorted_iter(&self) -> impl Iterator { - // Note: multi_index don't support reverse order iteration now - // so we need to collect and reverse - let entries = self.iter_by_score().collect::>(); - entries.into_iter().rev().map(move |entry| &entry.inner) - } -} - /// Tx-pool implementation pub struct TxPool { pub(crate) config: TxPoolConfig, @@ -111,17 +66,7 @@ pub struct TxPool { /// Tx pool that finely for commit pub(crate) proposed: ProposedPool, - /// The pool entries with different kinds of sort strategies - pub(crate) entries: MultiIndexPoolEntryMap, - /// dep-set map represent in-pool tx's header deps - pub(crate) header_deps: HashMap>, - /// dep-set map represent in-pool tx's deps - pub(crate) deps: HashMap>, - /// input-set map represent in-pool tx's inputs - pub(crate) inputs: HashMap>, - pub(crate) outputs: HashMap>, - pub(crate) max_ancestors_count: usize, - + pub(crate) pool_map: PoolMap, /// cache for committed transactions hash pub(crate) committed_txs_hash_cache: LruCache, // sum of all tx_pool tx's virtual sizes. @@ -145,12 +90,7 @@ impl TxPool { pending: PendingQueue::new(), gap: PendingQueue::new(), proposed: ProposedPool::new(config.max_ancestors_count), - entries: MultiIndexPoolEntryMap::default(), - header_deps: HashMap::default(), - deps: Default::default(), - inputs: Default::default(), - outputs: Default::default(), - max_ancestors_count: config.max_ancestors_count, + pool_map: PoolMap::new(config.max_ancestors_count), committed_txs_hash_cache: LruCache::new(COMMITTED_HASH_CACHE_SIZE), total_tx_size: 0, total_tx_cycles: 0, @@ -203,24 +143,6 @@ impl TxPool { self.total_tx_cycles = total_tx_cycles; } - fn add_poolentry(&mut self, entry: TxEntry, status: Status) -> bool { - let short_id = entry.proposal_short_id(); - if self.entries.get_by_id(&short_id).is_some() { - return false; - } - trace!("add_{:?} {}", status, entry.transaction().hash()); - let score = entry.as_score_key(); - let evict_key = entry.as_evict_key(); - self.entries.insert(PoolEntry { - id: short_id, - score, - status, - inner: entry, - evict_key, - }); - true - } - /// Add tx to pending pool /// If did have this value present, false is returned. pub fn add_pending(&mut self, entry: TxEntry) -> bool { @@ -232,7 +154,7 @@ impl TxPool { } pub fn add_pending_v2(&mut self, entry: TxEntry) -> bool { - self.add_poolentry(entry, Status::Pending) + self.pool_map.add_entry(entry, Status::Pending) } /// Add tx which proposed but still uncommittable to gap pool @@ -245,7 +167,7 @@ impl TxPool { } pub fn add_gap_v2(&mut self, entry: TxEntry) -> bool { - self.add_poolentry(entry, Status::Gap) + self.pool_map.add_entry(entry, Status::Gap) } /// Add tx to proposed pool @@ -255,7 +177,7 @@ impl TxPool { } pub fn add_proposed_v2(&mut self, entry: TxEntry) -> bool { - self.add_poolentry(entry, Status::Proposed) + self.pool_map.add_entry(entry, Status::Proposed) } /// Returns true if the tx-pool contains a tx with specified id. @@ -264,7 +186,7 @@ impl TxPool { } pub fn contains_proposal_id_v2(&self, id: &ProposalShortId) -> bool { - self.entries.get_by_id(id).is_some() + self.pool_map.get_by_id(id).is_some() } /// Returns tx with cycles corresponding to the id. @@ -285,7 +207,7 @@ impl TxPool { } pub fn get_tx_with_cycles_v2(&self, id: &ProposalShortId) -> Option<(TransactionView, Cycle)> { - self.entries + self.pool_map .get_by_id(id) .map(|entry| (entry.inner.transaction().clone(), entry.inner.cycles)) } @@ -299,7 +221,7 @@ impl TxPool { } pub fn get_tx_v2(&self, id: &ProposalShortId) -> Option<&TransactionView> { - self.entries + self.pool_map .get_by_id(id) .map(|entry| entry.inner.transaction()) } @@ -310,7 +232,7 @@ impl TxPool { } pub fn get_entry_from_pending_or_gap_v2(&self, id: &ProposalShortId) -> Option<&TxEntry> { - if let Some(entry) = self.entries.get_by_id(id) { + if let Some(entry) = self.pool_map.get_by_id(id) { match entry.status { Status::Pending | Status::Gap => return Some(&entry.inner), _ => return None, @@ -338,7 +260,7 @@ impl TxPool { &self, id: &ProposalShortId, ) -> Option<&TransactionView> { - self.entries + self.pool_map .get_by_id(id) .map(|entry| entry.inner.transaction()) } @@ -384,124 +306,14 @@ impl TxPool { detached_headers: &HashSet, callbacks: &Callbacks, ) { - for (entry, reject) in self.__resolve_conflict_header_dep_v2(detached_headers) { + for (entry, reject) in self + .pool_map + .resolve_conflict_header_dep(detached_headers) + { callbacks.call_reject(self, &entry, reject); } } - pub(crate) fn get_descendants(&self, entry: &TxEntry) -> HashSet { - let mut entries: VecDeque<&TxEntry> = VecDeque::new(); - entries.push_back(entry); - - let mut descendants = HashSet::new(); - while let Some(entry) = entries.pop_front() { - let outputs = entry.transaction().output_pts(); - - for output in outputs { - if let Some(ids) = self.outputs.get(&output) { - for id in ids { - if descendants.insert(id.clone()) { - if let Some(entry) = self.entries.get_by_id(id) { - entries.push_back(&entry.inner); - } - } - } - } - } - } - descendants - } - - pub(crate) fn remove_entry_relation(&mut self, entry: &TxEntry) { - let inputs = entry.transaction().input_pts_iter(); - let tx_short_id = entry.proposal_short_id(); - let outputs = entry.transaction().output_pts(); - - for i in inputs { - if let Entry::Occupied(mut occupied) = self.inputs.entry(i) { - let empty = { - let ids = occupied.get_mut(); - ids.remove(&tx_short_id); - ids.is_empty() - }; - if empty { - occupied.remove(); - } - } - } - - // remove dep - for d in entry.related_dep_out_points().cloned() { - if let Entry::Occupied(mut occupied) = self.deps.entry(d) { - let empty = { - let ids = occupied.get_mut(); - ids.remove(&tx_short_id); - ids.is_empty() - }; - if empty { - occupied.remove(); - } - } - } - - for o in outputs { - self.outputs.remove(&o); - } - - self.header_deps.remove(&tx_short_id); - } - - fn remove_entry(&mut self, id: &ProposalShortId) -> Option { - let removed = self.entries.remove_by_id(id); - - if let Some(ref entry) = removed { - self.remove_entry_relation(&entry.inner); - } - removed.map(|e| e.inner) - } - - fn remove_entry_and_descendants(&mut self, id: &ProposalShortId) -> Vec { - let mut removed = Vec::new(); - if let Some(entry) = self.entries.remove_by_id(id) { - let descendants = self.get_descendants(&entry.inner); - self.remove_entry_relation(&entry.inner); - removed.push(entry.inner); - for id in descendants { - if let Some(entry) = self.remove_entry(&id) { - removed.push(entry); - } - } - } - removed - } - - fn __resolve_conflict_header_dep_v2( - &mut self, - headers: &HashSet, - ) -> Vec { - let mut conflicts = Vec::new(); - - // invalid header deps - let mut ids = Vec::new(); - for (tx_id, deps) in self.header_deps.iter() { - for hash in deps { - if headers.contains(hash) { - ids.push((hash.clone(), tx_id.clone())); - break; - } - } - } - - for (blk_hash, id) in ids { - let entries = self.remove_entry_and_descendants(&id); - for entry in entries { - let reject = Reject::Resolve(OutPointError::InvalidHeader(blk_hash.to_owned())); - conflicts.push((entry, reject)); - } - } - conflicts - } - pub(crate) fn remove_committed_tx(&mut self, tx: &TransactionView, callbacks: &Callbacks) { let hash = tx.hash(); let short_id = tx.proposal_short_id(); @@ -544,44 +356,15 @@ impl TxPool { } } - fn resolve_conflict(&mut self, tx: &TransactionView) -> Vec { - let inputs = tx.input_pts_iter(); - let mut conflicts = Vec::new(); - - for i in inputs { - if let Some(ids) = self.inputs.remove(&i) { - for id in ids { - let entries = self.remove_entry_and_descendants(&id); - for entry in entries { - let reject = Reject::Resolve(OutPointError::Dead(i.clone())); - conflicts.push((entry, reject)); - } - } - } - - // deps consumed - if let Some(ids) = self.deps.remove(&i) { - for id in ids { - let entries = self.remove_entry_and_descendants(&id); - for entry in entries { - let reject = Reject::Resolve(OutPointError::Dead(i.clone())); - conflicts.push((entry, reject)); - } - } - } - } - conflicts - } - pub(crate) fn remove_committed_tx_v2(&mut self, tx: &TransactionView, callbacks: &Callbacks) { let hash = tx.hash(); let short_id = tx.proposal_short_id(); - if let Some(entry) = self.remove_entry(&short_id) { + if let Some(entry) = self.pool_map.remove_entry(&short_id) { debug!("remove_committed_tx from gap {}", hash); callbacks.call_committed(self, &entry) } { - let conflicts = self.resolve_conflict(tx); + let conflicts = self.pool_map.resolve_conflict(tx); for (entry, reject) in conflicts { callbacks.call_reject(self, &entry, reject); } @@ -619,35 +402,18 @@ impl TxPool { } } - fn remove_entries_by_filter bool>( - &mut self, - mut predicate: P, - ) -> Vec { - let mut removed = Vec::new(); - for (_, entry) in self.entries.iter() { - if predicate(&entry.id, &entry.inner) { - removed.push(entry.inner.clone()); - } - } - for entry in &removed { - self.remove_entry(&entry.proposal_short_id()); - } - - removed - } - // Expire all transaction (and their dependencies) in the pool. pub(crate) fn remove_expired_v2(&mut self, callbacks: &Callbacks) { let now_ms = ckb_systemtime::unix_time_as_millis(); let removed: Vec<_> = self - .entries + .pool_map .iter() - .filter(|&(_, entry)| self.expiry + entry.inner.timestamp < now_ms) - .map(|(_, entry)| entry.inner.clone()) + .filter(|&entry| self.expiry + entry.inner.timestamp < now_ms) + .map(|entry| entry.inner.clone()) .collect(); for entry in removed { - self.remove_entry(&entry.proposal_short_id()); + self.pool_map.remove_entry(&entry.proposal_short_id()); let tx_hash = entry.transaction().hash(); debug!("remove_expired {} timestamp({})", tx_hash, entry.timestamp); let reject = Reject::Expiry(entry.timestamp); @@ -670,13 +436,8 @@ impl TxPool { pub(crate) fn limit_size_v2(&mut self, callbacks: &Callbacks) { while self.total_tx_size > self.config.max_tx_pool_size { - if let Some(id) = self - .entries - .iter_by_evict_key() - .next() - .map(|entry| entry.id.clone()) - { - let removed = self.remove_entry_and_descendants(&id); + if let Some(id) = self.pool_map.next_evict_entry() { + let removed = self.pool_map.remove_entry_and_descendants(&id); for entry in removed { let tx_hash = entry.transaction().hash(); debug!( @@ -729,21 +490,21 @@ impl TxPool { ids: impl Iterator, ) { for id in ids { - if let Some(e) = self.entries.get_by_id(id) { + if let Some(e) = self.pool_map.get_by_id(id) { let status = e.status; // TODO: double check this if status == Status::Pending { continue; } - let mut entries = self.remove_entry_and_descendants(id); + let mut entries = self.pool_map.remove_entry_and_descendants(id); entries.sort_unstable_by_key(|entry| entry.ancestors_count); for mut entry in entries { let tx_hash = entry.transaction().hash(); entry.reset_ancestors_state(); let ret = self.add_pending(entry); debug!( - "remove_by_detached_proposal from {:?} {} add_pending {}", status, - tx_hash, ret + "remove_by_detached_proposal from {:?} {} add_pending {}", + status, tx_hash, ret ); } } @@ -773,7 +534,7 @@ impl TxPool { } pub(crate) fn remove_tx_v2(&mut self, id: &ProposalShortId) -> bool { - if let Some(entry) = self.remove_entry(id) { + if let Some(entry) = self.pool_map.remove_entry(id) { self.update_statics_for_remove_tx(entry.size, entry.cycles); return true; } @@ -805,7 +566,7 @@ impl TxPool { tx: TransactionView, ) -> Result, Reject> { let snapshot = self.snapshot(); - let provider = OverlayCellProvider::new(&self.entries, snapshot); + let provider = OverlayCellProvider::new(&self.pool_map.entries, snapshot); let mut seen_inputs = HashSet::new(); resolve_transaction(tx, &mut seen_inputs, &provider, snapshot) .map(Arc::new) @@ -831,7 +592,7 @@ impl TxPool { rtx: &ResolvedTransaction, ) -> Result<(), Reject> { let snapshot = self.snapshot(); - let checker = OverlayCellChecker::new(&self.entries, snapshot); + let checker = OverlayCellChecker::new(&self.pool_map.entries, snapshot); let mut seen_inputs = HashSet::new(); rtx.check(&mut seen_inputs, &checker, snapshot) .map_err(Reject::Resolve) @@ -854,7 +615,7 @@ impl TxPool { rtx: &ResolvedTransaction, ) -> Result<(), Reject> { let snapshot = self.snapshot(); - let checker = OverlayCellChecker::new(&self.entries, snapshot); + let checker = OverlayCellChecker::new(&self.pool_map.entries, snapshot); let mut seen_inputs = HashSet::new(); rtx.check(&mut seen_inputs, &checker, snapshot) .map_err(Reject::Resolve) @@ -873,7 +634,7 @@ impl TxPool { rtx: &ResolvedTransaction, ) -> Result<(), Reject> { let snapshot = self.snapshot(); - let cell_checker = OverlayCellChecker::new(&self.entries, snapshot); + let cell_checker = OverlayCellChecker::new(&self.pool_map.entries, snapshot); let mut seen_inputs = HashSet::new(); rtx.check(&mut seen_inputs, &cell_checker, snapshot) .map_err(Reject::Resolve) @@ -941,24 +702,6 @@ impl TxPool { } } - // fill proposal txs - pub fn fill_proposals( - &self, - limit: usize, - exclusion: &HashSet, - proposals: &mut HashSet, - status: &Status, - ) { - for entry in self.entries.get_by_status(status) { - if proposals.len() == limit { - break; - } - if !exclusion.contains(&entry.id) { - proposals.insert(entry.id.clone()); - } - } - } - /// Get to-be-proposal transactions that may be included in the next block. pub fn get_proposals( &self, @@ -979,8 +722,8 @@ impl TxPool { exclusion: &HashSet, ) -> HashSet { let mut proposals = HashSet::with_capacity(limit); - self.fill_proposals(limit, exclusion, &mut proposals, &Status::Pending); - self.fill_proposals(limit, exclusion, &mut proposals, &Status::Gap); + self.pool_map.fill_proposals(limit, exclusion, &mut proposals, &Status::Pending); + self.pool_map.fill_proposals(limit, exclusion, &mut proposals, &Status::Gap); proposals } @@ -1018,10 +761,11 @@ impl TxPool { // This is for RPC request, performance is not critical pub(crate) fn get_ids_v2(&self) -> TxPoolIds { let pending: Vec = self + .pool_map .entries .get_by_status(&Status::Pending) .iter() - .chain(self.entries.get_by_status(&Status::Gap).iter()) + .chain(self.pool_map.entries.get_by_status(&Status::Gap).iter()) .map(|entry| entry.inner.transaction().hash()) .collect(); @@ -1057,14 +801,16 @@ impl TxPool { pub(crate) fn get_all_entry_info_v2(&self) -> TxPoolEntryInfo { let pending = self + .pool_map .entries .get_by_status(&Status::Pending) .iter() - .chain(self.entries.get_by_status(&Status::Gap).iter()) + .chain(self.pool_map.entries.get_by_status(&Status::Gap).iter()) .map(|entry| (entry.inner.transaction().hash(), entry.inner.to_info())) .collect(); let proposed = self + .pool_map .entries .get_by_status(&Status::Proposed) .iter() @@ -1075,7 +821,7 @@ impl TxPool { } pub(crate) fn drain_all_transactions(&mut self) -> Vec { - let mut txs = CommitTxsScanner::new(&self.proposed, &self.entries) + let mut txs = CommitTxsScanner::new(&self.proposed, &self.pool_map.entries) .txs_to_commit(self.total_tx_size, self.total_tx_cycles) .0 .into_iter() @@ -1091,7 +837,7 @@ impl TxPool { } pub(crate) fn drain_all_transactions_v2(&mut self) -> Vec { - let mut txs = CommitTxsScanner::new(&self.proposed, &self.entries) + let mut txs = CommitTxsScanner::new(&self.proposed, &self.pool_map.entries) .txs_to_commit(self.total_tx_size, self.total_tx_cycles) .0 .into_iter() @@ -1099,6 +845,7 @@ impl TxPool { .collect::>(); self.proposed.clear(); let mut pending = self + .pool_map .entries .remove_by_status(&Status::Pending) .into_iter() @@ -1106,6 +853,7 @@ impl TxPool { .collect::>(); txs.append(&mut pending); let mut gap = self + .pool_map .entries .remove_by_status(&Status::Gap) .into_iter() @@ -1114,10 +862,7 @@ impl TxPool { txs.append(&mut gap); self.total_tx_size = 0; self.total_tx_cycles = 0; - self.deps.clear(); - self.inputs.clear(); - self.header_deps.clear(); - self.outputs.clear(); + self.pool_map.clear(); // self.touch_last_txs_updated_at(); txs } @@ -1126,11 +871,7 @@ impl TxPool { self.pending = PendingQueue::new(); self.gap = PendingQueue::new(); self.proposed = ProposedPool::new(self.config.max_ancestors_count); - self.entries = MultiIndexPoolEntryMap::default(); - self.header_deps = HashMap::default(); - self.deps = HashMap::default(); - self.inputs = HashMap::default(); - self.outputs = HashMap::default(); + self.pool_map.clear(); self.snapshot = snapshot; self.committed_txs_hash_cache = LruCache::new(COMMITTED_HASH_CACHE_SIZE); self.total_tx_size = 0; @@ -1154,8 +895,9 @@ impl TxPool { max_block_cycles: Cycle, txs_size_limit: usize, ) -> (Vec, usize, Cycle) { - let (entries, size, cycles) = CommitTxsScanner::new(self.proposed(), &self.entries) - .txs_to_commit(txs_size_limit, max_block_cycles); + let (entries, size, cycles) = + CommitTxsScanner::new(self.proposed(), &self.pool_map.entries) + .txs_to_commit(txs_size_limit, max_block_cycles); if !entries.is_empty() { ckb_logger::info!( @@ -1193,41 +935,3 @@ impl TxPool { } } } - -impl CellProvider for MultiIndexPoolEntryMap { - fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { - let tx_hash = out_point.tx_hash(); - if let Some(entry) = self.get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) { - match entry - .inner - .transaction() - .output_with_data(out_point.index().unpack()) - { - Some((output, data)) => { - let cell_meta = CellMetaBuilder::from_cell_output(output, data) - .out_point(out_point.to_owned()) - .build(); - CellStatus::live_cell(cell_meta) - } - None => CellStatus::Unknown, - } - } else { - CellStatus::Unknown - } - } -} - -impl CellChecker for MultiIndexPoolEntryMap { - fn is_live(&self, out_point: &OutPoint) -> Option { - let tx_hash = out_point.tx_hash(); - if let Some(entry) = self.get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) { - entry - .inner - .transaction() - .output(out_point.index().unpack()) - .map(|_| true) - } else { - None - } - } -} From b0cf66b00dbabd21697b3bd7963514de4b0559f1 Mon Sep 17 00:00:00 2001 From: yukang Date: Wed, 17 May 2023 02:09:06 +0800 Subject: [PATCH 054/267] remove pending and gap --- chain/src/tests/dep_cell.rs | 1 - chain/src/tests/load_code_with_snapshot.rs | 4 + rpc/src/module/chain.rs | 2 + test/src/main.rs | 9 +- test/src/node.rs | 5 + test/src/specs/mining/fee.rs | 1 - test/src/specs/tx_pool/pool_reconcile.rs | 1 + test/src/specs/tx_pool/reorg_proposals.rs | 6 + test/src/util/mining.rs | 2 +- tx-pool/Cargo.toml | 1 + tx-pool/src/block_assembler/mod.rs | 1 + tx-pool/src/component/chunk.rs | 1 - tx-pool/src/component/commit_txs_scanner.rs | 25 +- tx-pool/src/component/container.rs | 492 -------------- tx-pool/src/component/edges.rs | 114 ++++ tx-pool/src/component/entry.rs | 17 +- tx-pool/src/component/links.rs | 144 +++++ tx-pool/src/component/mod.rs | 8 +- tx-pool/src/component/pending.rs | 363 ----------- tx-pool/src/component/pool_map.rs | 605 +++++++++++------- tx-pool/src/component/proposed.rs | 403 ------------ tx-pool/src/component/score_key.rs | 54 ++ tx-pool/src/component/tests/mod.rs | 3 +- tx-pool/src/component/tests/pending.rs | 166 +++-- tx-pool/src/component/tests/pool_map.rs | 236 ------- tx-pool/src/component/tests/proposed.rs | 152 +++-- .../tests/{container.rs => score_key.rs} | 21 +- tx-pool/src/pool.rs | 585 ++++------------- tx-pool/src/process.rs | 115 ++-- tx-pool/src/service.rs | 43 +- 30 files changed, 1123 insertions(+), 2457 deletions(-) delete mode 100644 tx-pool/src/component/container.rs create mode 100644 tx-pool/src/component/edges.rs create mode 100644 tx-pool/src/component/links.rs delete mode 100644 tx-pool/src/component/pending.rs delete mode 100644 tx-pool/src/component/proposed.rs create mode 100644 tx-pool/src/component/score_key.rs delete mode 100644 tx-pool/src/component/tests/pool_map.rs rename tx-pool/src/component/tests/{container.rs => score_key.rs} (94%) diff --git a/chain/src/tests/dep_cell.rs b/chain/src/tests/dep_cell.rs index 3f1bb3428e..3f557236fd 100644 --- a/chain/src/tests/dep_cell.rs +++ b/chain/src/tests/dep_cell.rs @@ -535,7 +535,6 @@ fn test_package_txs_with_deps_priority() { ); let txs = vec![tx2.clone(), tx1]; - for tx in &txs { let ret = tx_pool.submit_local_tx(tx.clone()).unwrap(); assert!(ret.is_ok(), "submit {} {:?}", tx.proposal_short_id(), ret); diff --git a/chain/src/tests/load_code_with_snapshot.rs b/chain/src/tests/load_code_with_snapshot.rs index 5c7c16583b..fcbe986ed8 100644 --- a/chain/src/tests/load_code_with_snapshot.rs +++ b/chain/src/tests/load_code_with_snapshot.rs @@ -263,6 +263,10 @@ fn _test_load_code_with_snapshot_after_hardfork(script_type: ScriptHashType) { let tx = create_call_load_is_even_tx(&issue_tx, 0); let tx_pool = shared.tx_pool_controller(); + + let tx_status = tx_pool.get_tx_status(tx.hash()); + assert_eq!(tx_status.unwrap().unwrap(), (TxStatus::Unknown, None)); + let ret = tx_pool.submit_local_tx(tx.clone()).unwrap(); assert!(ret.is_ok(), "ret {ret:?}"); diff --git a/rpc/src/module/chain.rs b/rpc/src/module/chain.rs index dce4c322f8..08574e13bb 100644 --- a/rpc/src/module/chain.rs +++ b/rpc/src/module/chain.rs @@ -2117,6 +2117,7 @@ impl ChainRpcImpl { only_committed: bool, ) -> Result { let snapshot = self.shared.snapshot(); + if let Some(tx_info) = snapshot.get_transaction_info(&tx_hash) { let cycles = if tx_info.is_cellbase() { None @@ -2202,6 +2203,7 @@ impl ChainRpcImpl { let transaction_with_status = transaction_with_status.unwrap(); Ok(transaction_with_status) } + fn get_block_by_hash( &self, snapshot: &Snapshot, diff --git a/test/src/main.rs b/test/src/main.rs index 0ef0feafff..2e8a8e8620 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -360,6 +360,12 @@ fn canonicalize_path>(path: P) -> PathBuf { .unwrap_or_else(|_| path.as_ref().to_path_buf()) } +fn _all_specs() -> Vec> { + // This case is not stable right now + //vec![Box::new(PoolResolveConflictAfterReorg)] + vec![Box::new(RemoveConflictFromPending)] +} + fn all_specs() -> Vec> { let mut specs: Vec> = vec![ Box::new(BlockSyncFromOne), @@ -402,7 +408,8 @@ fn all_specs() -> Vec> { Box::new(GetRawTxPool), Box::new(PoolReconcile), Box::new(PoolResurrect), - Box::new(PoolResolveConflictAfterReorg), + //TODO: (yukang) + //Box::new(PoolResolveConflictAfterReorg), Box::new(InvalidHeaderDep), #[cfg(not(target_os = "windows"))] Box::new(PoolPersisted), diff --git a/test/src/node.rs b/test/src/node.rs index 50a8c8f21d..da5d7421fe 100644 --- a/test/src/node.rs +++ b/test/src/node.rs @@ -5,6 +5,7 @@ use crate::{SYSTEM_CELL_ALWAYS_FAILURE_INDEX, SYSTEM_CELL_ALWAYS_SUCCESS_INDEX}; use ckb_app_config::CKBAppConfig; use ckb_chain_spec::consensus::Consensus; use ckb_chain_spec::ChainSpec; +use ckb_jsonrpc_types::TxStatus; use ckb_jsonrpc_types::{BlockFilter, BlockTemplate, TxPoolInfo}; use ckb_logger::{debug, error}; use ckb_resource::Resource; @@ -352,6 +353,10 @@ impl Node { .send_transaction(transaction.data().into()) } + pub fn get_transaction(&self, tx_hash: Byte32) -> TxStatus { + self.rpc_client().get_transaction(tx_hash).tx_status + } + pub fn remove_transaction(&self, tx_hash: Byte32) -> bool { self.rpc_client().remove_transaction(tx_hash) } diff --git a/test/src/specs/mining/fee.rs b/test/src/specs/mining/fee.rs index ecf6a10676..843e97227e 100644 --- a/test/src/specs/mining/fee.rs +++ b/test/src/specs/mining/fee.rs @@ -144,7 +144,6 @@ impl Spec for FeeOfMultipleMaxBlockProposalsLimit { txs.iter().for_each(|tx| { node.submit_transaction(tx); }); - (0..multiple).for_each(|_| { let block = node.new_block(None, None, None); node.submit_block(&block); diff --git a/test/src/specs/tx_pool/pool_reconcile.rs b/test/src/specs/tx_pool/pool_reconcile.rs index 3e1806767d..280c506cc8 100644 --- a/test/src/specs/tx_pool/pool_reconcile.rs +++ b/test/src/specs/tx_pool/pool_reconcile.rs @@ -120,6 +120,7 @@ impl Spec for PoolResolveConflictAfterReorg { node0.mine_with_blocking(|template| template.number.value() != (block.number() + 1)); node0.wait_for_tx_pool(); + for tx in txs[1..].iter() { assert!(is_transaction_proposed(node0, tx)); } diff --git a/test/src/specs/tx_pool/reorg_proposals.rs b/test/src/specs/tx_pool/reorg_proposals.rs index ab224e16e1..d3c0f5aefb 100644 --- a/test/src/specs/tx_pool/reorg_proposals.rs +++ b/test/src/specs/tx_pool/reorg_proposals.rs @@ -1,6 +1,7 @@ use crate::specs::tx_pool::utils::{assert_new_block_committed, prepare_tx_family}; use crate::utils::{blank, propose}; use crate::{Node, Spec}; +use ckb_jsonrpc_types::TxStatus; use ckb_types::core::BlockView; pub struct ReorgHandleProposals; @@ -40,8 +41,13 @@ impl Spec for ReorgHandleProposals { node_a.submit_transaction(family.b()); node_b.submit_transaction(family.a()); node_b.submit_transaction(family.b()); + node_a.submit_block(&propose(node_a, &[family.a()])); node_b.submit_block(&propose(node_b, &[family.b()])); + + assert!(node_a.get_transaction(family.a().hash()) == TxStatus::pending()); + assert!(node_a.get_transaction(family.b().hash()) == TxStatus::pending()); + (0..window.closest()).for_each(|_| { node_a.submit_block(&blank(node_a)); }); diff --git a/test/src/util/mining.rs b/test/src/util/mining.rs index d4880151bb..ebbf686522 100644 --- a/test/src/util/mining.rs +++ b/test/src/util/mining.rs @@ -112,11 +112,11 @@ impl Node { { let mut count = 0; let mut template = self.rpc_client().get_block_template(None, None, None); + while blocking(&mut template) { sleep(Duration::from_millis(100)); template = self.rpc_client().get_block_template(None, None, None); count += 1; - if count > 900 { panic!("mine_with_blocking timeout"); } diff --git a/tx-pool/Cargo.toml b/tx-pool/Cargo.toml index bc425d696c..0ce764ecb0 100644 --- a/tx-pool/Cargo.toml +++ b/tx-pool/Cargo.toml @@ -36,6 +36,7 @@ sentry = { version = "0.26.0", optional = true } serde_json = "1.0" rand = "0.8.4" hyper = { version = "0.14", features = ["http1", "client", "tcp"] } +#multi_index_map = { git = "https://github.com/wyjin/multi_index_map.git", branch = "master" } multi_index_map = "0.5.0" slab = "0.4" rustc-hash = "1.1" diff --git a/tx-pool/src/block_assembler/mod.rs b/tx-pool/src/block_assembler/mod.rs index 4f1f4b5edc..68b63db2de 100644 --- a/tx-pool/src/block_assembler/mod.rs +++ b/tx-pool/src/block_assembler/mod.rs @@ -203,6 +203,7 @@ impl BlockAssembler { current_template.cellbase.clone(), txs, )?; + let txs_size = checked_txs.iter().map(|tx| tx.size).sum(); let total_size = basic_size + txs_size; diff --git a/tx-pool/src/component/chunk.rs b/tx-pool/src/component/chunk.rs index f86b54fb55..225e0a4ea8 100644 --- a/tx-pool/src/component/chunk.rs +++ b/tx-pool/src/component/chunk.rs @@ -93,7 +93,6 @@ impl ChunkQueue { } /// If the queue did not have this tx present, true is returned. - /// /// If the queue did have this tx present, false is returned. pub fn add_tx(&mut self, tx: TransactionView, remote: Option<(Cycle, PeerIndex)>) -> bool { if self.contains_key(&tx.proposal_short_id()) { diff --git a/tx-pool/src/component/commit_txs_scanner.rs b/tx-pool/src/component/commit_txs_scanner.rs index a9b4287140..4e5d487cdf 100644 --- a/tx-pool/src/component/commit_txs_scanner.rs +++ b/tx-pool/src/component/commit_txs_scanner.rs @@ -1,8 +1,8 @@ -use crate::component::{container::AncestorsScoreSortKey, entry::TxEntry, proposed::ProposedPool}; +use crate::component::pool_map::PoolMap; +use crate::component::{entry::TxEntry, score_key::AncestorsScoreSortKey}; use ckb_types::{core::Cycle, packed::ProposalShortId}; use ckb_util::LinkedHashMap; use std::collections::{BTreeSet, HashMap, HashSet}; -use crate::component::pool_map::MultiIndexPoolEntryMap; // A template data struct used to store modified entries when package txs #[derive(Default)] @@ -49,8 +49,7 @@ const MAX_CONSECUTIVE_FAILURES: usize = 500; /// find txs to package into commitment pub struct CommitTxsScanner<'a> { - proposed_pool: &'a ProposedPool, - pool_entries: &'a MultiIndexPoolEntryMap, + pool_map: &'a PoolMap, entries: Vec, // modified_entries will store sorted packages after they are modified // because some of their txs are already in the block @@ -62,11 +61,10 @@ pub struct CommitTxsScanner<'a> { } impl<'a> CommitTxsScanner<'a> { - pub fn new(proposed_pool: &'a ProposedPool, pool_entries: &'a MultiIndexPoolEntryMap) -> CommitTxsScanner<'a> { + pub fn new(pool_map: &'a PoolMap) -> CommitTxsScanner<'a> { CommitTxsScanner { - proposed_pool, entries: Vec::new(), - pool_entries: pool_entries, + pool_map, modified_entries: TxModifiedEntries::default(), fetched_txs: HashSet::default(), failed_txs: HashSet::default(), @@ -83,7 +81,7 @@ impl<'a> CommitTxsScanner<'a> { let mut cycles: Cycle = 0; let mut consecutive_failed = 0; - let mut iter = self.pool_entries.score_sorted_iter().peekable(); + let mut iter = self.pool_map.score_sorted_iter().peekable(); loop { let mut using_modified = false; @@ -146,9 +144,10 @@ impl<'a> CommitTxsScanner<'a> { }; // prepare to package tx with ancestors - let ancestors_ids = self.proposed_pool.calc_ancestors(&short_id); + let ancestors_ids = self.pool_map.calc_ancestors(&short_id); let mut ancestors = ancestors_ids .iter() + .filter(|id| self.pool_map.has_proposed(id)) .filter_map(only_unconfirmed) .cloned() .collect::>(); @@ -181,7 +180,7 @@ impl<'a> CommitTxsScanner<'a> { fn retrieve_entry(&self, short_id: &ProposalShortId) -> Option<&TxEntry> { self.modified_entries .get(short_id) - .or_else(|| self.proposed_pool.get(short_id)) + .or_else(|| self.pool_map.get_proposed(short_id)) } // Skip entries in `proposed` that are already in a block or are present @@ -198,17 +197,17 @@ impl<'a> CommitTxsScanner<'a> { /// state updated assuming given transactions are inBlock. fn update_modified_entries(&mut self, already_added: &LinkedHashMap) { for (id, entry) in already_added { - let descendants = self.proposed_pool.calc_descendants(id); + let descendants = self.pool_map.calc_descendants(id); for desc_id in descendants .iter() - .filter(|id| !already_added.contains_key(id)) + .filter(|id| !already_added.contains_key(id) && self.pool_map.has_proposed(id)) { // Note: since https://github.com/nervosnetwork/ckb/pull/3706 // calc_descendants() may not consistent if let Some(mut desc) = self .modified_entries .remove(desc_id) - .or_else(|| self.proposed_pool.get(desc_id).cloned()) + .or_else(|| self.pool_map.get(desc_id).cloned()) { desc.sub_entry_weight(entry); self.modified_entries.insert(desc); diff --git a/tx-pool/src/component/container.rs b/tx-pool/src/component/container.rs deleted file mode 100644 index 2dc7752d94..0000000000 --- a/tx-pool/src/component/container.rs +++ /dev/null @@ -1,492 +0,0 @@ -//! The primary module containing the implementations of the transaction pool -//! and its top-level members. -extern crate rustc_hash; -extern crate slab; - -use crate::{component::entry::TxEntry, error::Reject}; -use ckb_types::{ - core::Capacity, - packed::{OutPoint, ProposalShortId}, -}; -use multi_index_map::MultiIndexMap; -use std::borrow::Cow; -use std::cmp::Ordering; -use std::collections::hash_map::Entry as HashMapEntry; -use std::collections::{HashMap, HashSet}; - -/// A struct to use as a sorted key -#[derive(Eq, PartialEq, Clone, Debug)] -pub struct AncestorsScoreSortKey { - pub fee: Capacity, - pub weight: u64, - pub id: ProposalShortId, - pub ancestors_fee: Capacity, - pub ancestors_weight: u64, - pub ancestors_size: usize, -} - -impl AncestorsScoreSortKey { - /// compare tx fee rate with ancestors fee rate and return the min one - pub(crate) fn min_fee_and_weight(&self) -> (Capacity, u64) { - // avoid division a_fee/a_weight > b_fee/b_weight - let tx_weight = u128::from(self.fee.as_u64()) * u128::from(self.ancestors_weight); - let ancestors_weight = u128::from(self.ancestors_fee.as_u64()) * u128::from(self.weight); - - if tx_weight < ancestors_weight { - (self.fee, self.weight) - } else { - (self.ancestors_fee, self.ancestors_weight) - } - } -} - -impl PartialOrd for AncestorsScoreSortKey { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for AncestorsScoreSortKey { - fn cmp(&self, other: &Self) -> Ordering { - // avoid division a_fee/a_weight > b_fee/b_weight - let (fee, weight) = self.min_fee_and_weight(); - let (other_fee, other_weight) = other.min_fee_and_weight(); - let self_weight = u128::from(fee.as_u64()) * u128::from(other_weight); - let other_weight = u128::from(other_fee.as_u64()) * u128::from(weight); - if self_weight == other_weight { - // if fee rate weight is same, then compare with ancestor weight - if self.ancestors_weight == other.ancestors_weight { - self.id.raw_data().cmp(&other.id.raw_data()) - } else { - self.ancestors_weight.cmp(&other.ancestors_weight) - } - } else { - self_weight.cmp(&other_weight) - } - } -} - -#[derive(Default, Debug, Clone)] -pub struct TxLinks { - pub parents: HashSet, - pub children: HashSet, -} - -#[derive(Clone, Copy)] -enum Relation { - Parents, - Children, -} - -impl TxLinks { - fn get_direct_ids(&self, relation: Relation) -> &HashSet { - match relation { - Relation::Parents => &self.parents, - Relation::Children => &self.children, - } - } -} - -#[derive(Default, Debug, Clone)] -pub struct TxLinksMap { - pub(crate) inner: HashMap, -} - -impl TxLinksMap { - fn new() -> Self { - TxLinksMap { - inner: Default::default(), - } - } - - fn calc_relative_ids( - &self, - short_id: &ProposalShortId, - relation: Relation, - ) -> HashSet { - let direct = self - .inner - .get(short_id) - .map(|link| link.get_direct_ids(relation)) - .cloned() - .unwrap_or_default(); - - self.calc_relation_ids(Cow::Owned(direct), relation) - } - - fn calc_relation_ids( - &self, - stage: Cow>, - relation: Relation, - ) -> HashSet { - let mut stage = stage.into_owned(); - let mut relation_ids = HashSet::with_capacity(stage.len()); - - while let Some(id) = stage.iter().next().cloned() { - relation_ids.insert(id.clone()); - stage.remove(&id); - - //recursively - for id in self - .inner - .get(&id) - .map(|link| link.get_direct_ids(relation)) - .cloned() - .unwrap_or_default() - { - if !relation_ids.contains(&id) { - stage.insert(id); - } - } - } - relation_ids - } - - fn calc_ancestors(&self, short_id: &ProposalShortId) -> HashSet { - self.calc_relative_ids(short_id, Relation::Parents) - } - - fn calc_descendants(&self, short_id: &ProposalShortId) -> HashSet { - self.calc_relative_ids(short_id, Relation::Children) - } - - pub fn get_children(&self, short_id: &ProposalShortId) -> Option<&HashSet> { - self.inner.get(short_id).map(|link| &link.children) - } - - pub fn get_parents(&self, short_id: &ProposalShortId) -> Option<&HashSet> { - self.inner.get(short_id).map(|link| &link.parents) - } - - pub fn remove(&mut self, short_id: &ProposalShortId) -> Option { - self.inner.remove(short_id) - } - - fn remove_child( - &mut self, - short_id: &ProposalShortId, - child: &ProposalShortId, - ) -> Option { - self.inner - .get_mut(short_id) - .map(|links| links.children.remove(child)) - } - - fn remove_parent( - &mut self, - short_id: &ProposalShortId, - parent: &ProposalShortId, - ) -> Option { - self.inner - .get_mut(short_id) - .map(|links| links.parents.remove(parent)) - } - - fn add_child(&mut self, short_id: &ProposalShortId, child: ProposalShortId) -> Option { - self.inner - .get_mut(short_id) - .map(|links| links.children.insert(child)) - } - - fn add_parent(&mut self, short_id: &ProposalShortId, parent: ProposalShortId) -> Option { - self.inner - .get_mut(short_id) - .map(|links| links.parents.insert(parent)) - } - - fn clear(&mut self) { - self.inner.clear(); - } -} - -/// MultiIndexMap is used for multiple sort strategies, -/// to add any new sort strategy, you need to follow `AncestorsScoreSortKey` -/// and add logic to update the sort column in `insert_index_key` and `update_*_index_key` -#[derive(MultiIndexMap, Clone)] -pub struct IndexKey { - #[multi_index(hashed_unique)] - pub id: ProposalShortId, - #[multi_index(ordered_non_unique)] - pub score: AncestorsScoreSortKey, - // other sort key -} - -#[derive(Copy, Clone)] -enum EntryOp { - Add, - Remove, -} - -#[derive(Clone)] -pub(crate) struct SortedTxMap { - entries: HashMap, - pub(crate) sorted_index: MultiIndexIndexKeyMap, - deps: HashMap>, - /// A map track transaction ancestors and descendants - pub(crate) links: TxLinksMap, - max_ancestors_count: usize, -} - -impl SortedTxMap { - pub fn new(max_ancestors_count: usize) -> Self { - SortedTxMap { - entries: Default::default(), - sorted_index: MultiIndexIndexKeyMap::default(), - links: TxLinksMap::new(), - deps: Default::default(), - max_ancestors_count, - } - } - - pub fn size(&self) -> usize { - self.entries.len() - } - - pub fn iter(&self) -> impl Iterator { - self.entries.iter() - } - - fn insert_index_key(&mut self, entry: &TxEntry) { - self.sorted_index.insert(entry.as_index_key()); - } - - fn remove_sort_key(&mut self, entry: &TxEntry) { - self.sorted_index.remove_by_id(&entry.proposal_short_id()); - } - - fn update_descendants_index_key(&mut self, entry: &TxEntry, op: EntryOp) { - let descendants = self.calc_descendants(&entry.proposal_short_id()); - for desc_id in &descendants { - if let Some(desc_entry) = self.entries.get_mut(desc_id) { - let deleted = self - .sorted_index - .remove_by_id(&desc_entry.proposal_short_id()); - debug_assert!(deleted.is_some(), "pool inconsistent"); - - match op { - EntryOp::Remove => desc_entry.sub_entry_weight(entry), - EntryOp::Add => desc_entry.add_entry_weight(entry), - } - self.sorted_index.insert(desc_entry.as_index_key()); - } - } - } - - // Usually when a new transaction is added to the pool, it has no in-pool - // children (because any such children would be an orphan). So in add_entry(), we: - // - update a new entry's parents set to include all in-pool parents - // - update the new entry's parents to include the new tx as a child - // - update all ancestors of the transaction to include the new tx's size/fee - pub fn add_entry(&mut self, mut entry: TxEntry) -> Result { - let short_id = entry.proposal_short_id(); - - if self.contains_key(&short_id) { - return Ok(false); - }; - - // find in pool parents - let mut parents: HashSet = HashSet::with_capacity( - entry.transaction().inputs().len() + entry.transaction().cell_deps().len(), - ); - - for input in entry.transaction().inputs() { - let input_pt = input.previous_output(); - if let Some(deps) = self.deps.get(&input_pt) { - parents.extend(deps.iter().cloned()); - } - - let parent_hash = &input_pt.tx_hash(); - let id = ProposalShortId::from_tx_hash(parent_hash); - if self.links.inner.contains_key(&id) { - parents.insert(id); - } - } - for cell_dep in entry.transaction().cell_deps() { - let dep_pt = cell_dep.out_point(); - let id = ProposalShortId::from_tx_hash(&dep_pt.tx_hash()); - if self.links.inner.contains_key(&id) { - parents.insert(id); - } - } - - let ancestors = self - .links - .calc_relation_ids(Cow::Borrowed(&parents), Relation::Parents); - - // update parents references - for ancestor_id in &ancestors { - let ancestor = self.entries.get(ancestor_id).expect("pool consistent"); - entry.add_entry_weight(ancestor); - } - - if entry.ancestors_count > self.max_ancestors_count { - return Err(Reject::ExceededMaximumAncestorsCount); - } - - for cell_dep in entry.transaction().cell_deps() { - let dep_pt = cell_dep.out_point(); - // insert dep-ref map - self.deps - .entry(dep_pt) - .or_insert_with(HashSet::new) - .insert(short_id.clone()); - } - - for parent in &parents { - self.links.add_child(parent, short_id.clone()); - } - - // insert links - let links = TxLinks { - parents, - children: Default::default(), - }; - self.links.inner.insert(short_id.clone(), links); - self.insert_index_key(&entry); - self.entries.insert(short_id, entry); - Ok(true) - } - - // update_descendants_from_detached is used to update - // the descendants for a single transaction that has been added to the - // pool but may have child transactions in the pool, eg during a - // chain reorg. - pub fn update_descendants_from_detached( - &mut self, - id: &ProposalShortId, - children: HashSet, - ) { - if let Some(entry) = self.entries.get(id).cloned() { - for child in &children { - self.links.add_parent(child, id.clone()); - } - if let Some(links) = self.links.inner.get_mut(id) { - links.children.extend(children); - } - - self.update_descendants_index_key(&entry, EntryOp::Add); - } - } - - pub fn contains_key(&self, id: &ProposalShortId) -> bool { - self.entries.contains_key(id) - } - - pub fn get(&self, id: &ProposalShortId) -> Option<&TxEntry> { - self.entries.get(id) - } - - #[cfg(test)] - pub(crate) fn deps(&self) -> &HashMap> { - &self.deps - } - - fn update_deps_for_remove(&mut self, entry: &TxEntry) { - for cell_dep in entry.transaction().cell_deps() { - let dep_pt = cell_dep.out_point(); - if let HashMapEntry::Occupied(mut o) = self.deps.entry(dep_pt) { - let set = o.get_mut(); - if set.remove(&entry.proposal_short_id()) && set.is_empty() { - o.remove_entry(); - } - } - } - } - - fn update_children_for_remove(&mut self, id: &ProposalShortId) { - if let Some(children) = self.get_children(id).cloned() { - for child in children { - self.links.remove_parent(&child, id); - } - } - } - - fn update_parents_for_remove(&mut self, id: &ProposalShortId) { - if let Some(parents) = self.get_parents(id).cloned() { - for parent in parents { - self.links.remove_child(&parent, id); - } - } - } - - fn remove_unchecked(&mut self, id: &ProposalShortId) -> Option { - self.entries.remove(id).map(|entry| { - self.remove_sort_key(&entry); - self.update_deps_for_remove(&entry); - entry - }) - } - - pub fn remove_entry_and_descendants(&mut self, id: &ProposalShortId) -> Vec { - let mut removed_ids = vec![id.to_owned()]; - let mut removed = vec![]; - let descendants = self.calc_descendants(id); - removed_ids.extend(descendants); - - // update links state for remove - for id in &removed_ids { - self.update_parents_for_remove(id); - self.update_children_for_remove(id); - } - - for id in removed_ids { - if let Some(entry) = self.remove_unchecked(&id) { - self.links.remove(&id); - removed.push(entry); - } - } - removed - } - - // notice: - // we are sure that all in-pool ancestor have already been processed. - // otherwise `links` will differ from the set of parents we'd calculate by searching - pub fn remove_entry(&mut self, id: &ProposalShortId) -> Option { - self.remove_unchecked(id).map(|entry| { - // We're not recursively removing a tx and all its descendants - // So we need update statistics state - self.update_descendants_index_key(&entry, EntryOp::Remove); - self.update_parents_for_remove(id); - self.update_children_for_remove(id); - self.links.remove(id); - entry - }) - } - - /// calculate all ancestors from pool - pub fn calc_ancestors(&self, short_id: &ProposalShortId) -> HashSet { - self.links.calc_ancestors(short_id) - } - - /// calculate all descendants from pool - pub fn calc_descendants(&self, short_id: &ProposalShortId) -> HashSet { - self.links.calc_descendants(short_id) - } - - /// find children from pool - pub fn get_children(&self, short_id: &ProposalShortId) -> Option<&HashSet> { - self.links.get_children(short_id) - } - - /// find parents from pool - pub fn get_parents(&self, short_id: &ProposalShortId) -> Option<&HashSet> { - self.links.get_parents(short_id) - } - - /// sorted by ancestor score from higher to lower - pub fn score_sorted_iter(&self) -> impl Iterator { - // Note: multi_index don't support reverse order iteration now - // so we need to collect and reverse - let keys = self.sorted_index.iter_by_score().collect::>(); - keys.into_iter() - .rev() - .map(move |key| self.entries.get(&key.id).expect("consistent")) - } - - pub(crate) fn clear(&mut self) { - self.sorted_index.clear(); - self.deps.clear(); - self.links.clear(); - self.entries.clear(); - } -} diff --git a/tx-pool/src/component/edges.rs b/tx-pool/src/component/edges.rs new file mode 100644 index 0000000000..decf98c1a5 --- /dev/null +++ b/tx-pool/src/component/edges.rs @@ -0,0 +1,114 @@ +use ckb_types::packed::{Byte32, OutPoint, ProposalShortId}; +use std::collections::{hash_map::Entry, HashMap, HashSet}; + +#[derive(Debug, PartialEq, Clone)] +pub(crate) enum OutPointStatus { + UnConsumed, + Consumed(ProposalShortId), +} + +#[derive(Default, Debug, Clone)] +pub(crate) struct Edges { + /// input-txid map represent in-pool tx's inputs + pub(crate) inputs: HashMap, + /// output-op map represent in-pool tx's outputs + pub(crate) outputs: HashMap, + /// dep-set map represent in-pool tx's deps + pub(crate) deps: HashMap>, + /// dep-set map represent in-pool tx's header deps + pub(crate) header_deps: HashMap>, +} + +impl Edges { + #[cfg(test)] + pub(crate) fn outputs_len(&self) -> usize { + self.outputs.len() + } + + #[cfg(test)] + pub(crate) fn inputs_len(&self) -> usize { + self.inputs.len() + } + + #[cfg(test)] + pub(crate) fn header_deps_len(&self) -> usize { + self.header_deps.len() + } + + #[cfg(test)] + pub(crate) fn deps_len(&self) -> usize { + self.deps.len() + } + + pub(crate) fn insert_input(&mut self, out_point: OutPoint, txid: ProposalShortId) { + self.inputs.insert(out_point, txid); + } + + pub(crate) fn remove_input(&mut self, out_point: &OutPoint) -> Option { + self.inputs.remove(out_point) + } + + pub(crate) fn remove_output(&mut self, out_point: &OutPoint) -> Option { + match self.outputs.remove(out_point) { + Some(OutPointStatus::Consumed(id)) => Some(id), + _ => None, + } + } + + pub(crate) fn insert_unconsumed_output(&mut self, out_point: OutPoint) { + self.outputs.insert(out_point, OutPointStatus::UnConsumed); + } + + pub(crate) fn insert_consumed_output(&mut self, out_point: OutPoint, id: ProposalShortId) { + self.outputs.insert(out_point, OutPointStatus::Consumed(id)); + } + + pub(crate) fn get_input_ref(&self, out_point: &OutPoint) -> Option<&ProposalShortId> { + self.inputs.get(out_point) + } + + pub(crate) fn get_deps_ref(&self, out_point: &OutPoint) -> Option<&HashSet> { + self.deps.get(out_point) + } + + pub(crate) fn set_output_consumed( + &mut self, + out_point: &OutPoint, + tx_short_id: &ProposalShortId, + ) { + if let Some(status) = self.outputs.get_mut(out_point) { + *status = OutPointStatus::Consumed(tx_short_id.clone()); + } + } + + pub(crate) fn set_output_unconsumed(&mut self, out_point: &OutPoint) { + if let Some(status) = self.outputs.get_mut(out_point) { + *status = OutPointStatus::UnConsumed; + } + } + + pub(crate) fn get_output_ref(&self, out_point: &OutPoint) -> Option<&OutPointStatus> { + self.outputs.get(out_point) + } + + pub(crate) fn insert_deps(&mut self, out_point: OutPoint, txid: ProposalShortId) { + self.deps.entry(out_point).or_default().insert(txid); + } + + pub(crate) fn delete_txid_by_dep(&mut self, out_point: OutPoint, txid: &ProposalShortId) { + if let Entry::Occupied(mut occupied) = self.deps.entry(out_point) { + let ids = occupied.get_mut(); + ids.remove(txid); + if ids.is_empty() { + occupied.remove(); + } + } + } + + pub(crate) fn clear(&mut self) { + self.inputs.clear(); + self.outputs.clear(); + self.deps.clear(); + self.header_deps.clear(); + } +} diff --git a/tx-pool/src/component/entry.rs b/tx-pool/src/component/entry.rs index 9dc6a027c7..bcce6a2e16 100644 --- a/tx-pool/src/component/entry.rs +++ b/tx-pool/src/component/entry.rs @@ -1,5 +1,4 @@ -use crate::component::container::AncestorsScoreSortKey; -use crate::component::container::IndexKey; +use crate::component::score_key::AncestorsScoreSortKey; use ckb_systemtime::unix_time_as_millis; use ckb_types::{ core::{ @@ -100,11 +99,6 @@ impl TxEntry { EvictKey::from(self) } - /// Return a sort index - pub fn as_index_key(&self) -> IndexKey { - IndexKey::from(self) - } - /// Returns fee rate pub fn fee_rate(&self) -> FeeRate { let weight = get_transaction_weight(self.size, self.cycles); @@ -172,15 +166,6 @@ impl From<&TxEntry> for AncestorsScoreSortKey { } } -impl From<&TxEntry> for IndexKey { - fn from(entry: &TxEntry) -> Self { - IndexKey { - id: entry.proposal_short_id(), - score: entry.as_score_key(), - } - } -} - impl Hash for TxEntry { fn hash(&self, state: &mut H) { Hash::hash(self.transaction(), state); diff --git a/tx-pool/src/component/links.rs b/tx-pool/src/component/links.rs new file mode 100644 index 0000000000..520673b59d --- /dev/null +++ b/tx-pool/src/component/links.rs @@ -0,0 +1,144 @@ +use ckb_types::packed::ProposalShortId; +use std::borrow::Cow; +use std::collections::{HashMap, HashSet}; + +#[derive(Default, Debug, Clone)] +pub struct TxLinks { + pub parents: HashSet, + pub children: HashSet, +} + +#[derive(Clone, Copy)] +pub enum Relation { + Parents, + Children, +} + +impl TxLinks { + fn get_direct_ids(&self, relation: Relation) -> &HashSet { + match relation { + Relation::Parents => &self.parents, + Relation::Children => &self.children, + } + } +} + +#[derive(Default, Debug, Clone)] +pub struct TxLinksMap { + pub inner: HashMap, +} + +impl TxLinksMap { + pub fn new() -> Self { + TxLinksMap { + inner: Default::default(), + } + } + + fn calc_relative_ids( + &self, + short_id: &ProposalShortId, + relation: Relation, + ) -> HashSet { + let direct = self + .inner + .get(short_id) + .map(|link| link.get_direct_ids(relation)) + .cloned() + .unwrap_or_default(); + + self.calc_relation_ids(Cow::Owned(direct), relation) + } + + pub fn calc_relation_ids( + &self, + stage: Cow>, + relation: Relation, + ) -> HashSet { + let mut stage = stage.into_owned(); + let mut relation_ids = HashSet::with_capacity(stage.len()); + + while let Some(id) = stage.iter().next().cloned() { + relation_ids.insert(id.clone()); + stage.remove(&id); + + //recursively + for id in self + .inner + .get(&id) + .map(|link| link.get_direct_ids(relation)) + .cloned() + .unwrap_or_default() + { + if !relation_ids.contains(&id) { + stage.insert(id); + } + } + } + relation_ids + } + + pub fn calc_ancestors(&self, short_id: &ProposalShortId) -> HashSet { + self.calc_relative_ids(short_id, Relation::Parents) + } + + pub fn calc_descendants(&self, short_id: &ProposalShortId) -> HashSet { + self.calc_relative_ids(short_id, Relation::Children) + } + + pub fn get_children(&self, short_id: &ProposalShortId) -> Option<&HashSet> { + self.inner.get(short_id).map(|link| &link.children) + } + + pub fn get_parents(&self, short_id: &ProposalShortId) -> Option<&HashSet> { + self.inner.get(short_id).map(|link| &link.parents) + } + + pub fn remove(&mut self, short_id: &ProposalShortId) -> Option { + self.inner.remove(short_id) + } + + pub fn remove_child( + &mut self, + short_id: &ProposalShortId, + child: &ProposalShortId, + ) -> Option { + self.inner + .get_mut(short_id) + .map(|links| links.children.remove(child)) + } + + pub fn remove_parent( + &mut self, + short_id: &ProposalShortId, + parent: &ProposalShortId, + ) -> Option { + self.inner + .get_mut(short_id) + .map(|links| links.parents.remove(parent)) + } + + pub fn add_child( + &mut self, + short_id: &ProposalShortId, + child: ProposalShortId, + ) -> Option { + self.inner + .get_mut(short_id) + .map(|links| links.children.insert(child)) + } + + pub fn add_parent( + &mut self, + short_id: &ProposalShortId, + parent: ProposalShortId, + ) -> Option { + self.inner + .get_mut(short_id) + .map(|links| links.parents.insert(parent)) + } + + pub fn clear(&mut self) { + self.inner.clear(); + } +} diff --git a/tx-pool/src/component/mod.rs b/tx-pool/src/component/mod.rs index 60b4e78ae7..7f325424ba 100644 --- a/tx-pool/src/component/mod.rs +++ b/tx-pool/src/component/mod.rs @@ -2,12 +2,12 @@ pub mod commit_txs_scanner; pub mod entry; pub(crate) mod chunk; -pub(crate) mod container; +pub(crate) mod edges; +pub(crate) mod links; pub(crate) mod orphan; -pub(crate) mod pending; -pub(crate) mod proposed; -pub(crate) mod recent_reject; pub(crate) mod pool_map; +pub(crate) mod recent_reject; +pub(crate) mod score_key; #[cfg(test)] mod tests; diff --git a/tx-pool/src/component/pending.rs b/tx-pool/src/component/pending.rs deleted file mode 100644 index 3d0bd72383..0000000000 --- a/tx-pool/src/component/pending.rs +++ /dev/null @@ -1,363 +0,0 @@ -use crate::component::entry::TxEntry; -use ckb_types::{ - core::{ - cell::{CellChecker, CellMetaBuilder, CellProvider, CellStatus}, - error::OutPointError, - tx_pool::Reject, - TransactionView, - }, - packed::{Byte32, OutPoint, ProposalShortId}, - prelude::*, -}; -use ckb_util::{LinkedHashMap, LinkedHashMapEntries}; -use std::collections::{hash_map::Entry, HashMap, HashSet, VecDeque}; - -type ConflictEntry = (TxEntry, Reject); - -#[derive(Debug, Clone)] -pub(crate) struct PendingQueue { - pub(crate) inner: LinkedHashMap, - /// dep-set map represent in-pool tx's deps - pub(crate) deps: HashMap>, - /// input-set map represent in-pool tx's inputs - pub(crate) inputs: HashMap>, - /// dep-set map represent in-pool tx's header deps - pub(crate) header_deps: HashMap>, - // /// output-op map represent in-pool tx's outputs - pub(crate) outputs: HashMap>, -} - -impl PendingQueue { - pub(crate) fn new() -> Self { - PendingQueue { - inner: Default::default(), - deps: Default::default(), - inputs: Default::default(), - header_deps: Default::default(), - outputs: Default::default(), - } - } - - pub(crate) fn size(&self) -> usize { - self.inner.len() - } - - pub(crate) fn is_empty(&self) -> bool { - self.inner.len() == 0 - } - - #[cfg(test)] - pub(crate) fn outputs_len(&self) -> usize { - self.outputs.len() - } - - #[cfg(test)] - pub(crate) fn header_deps_len(&self) -> usize { - self.header_deps.len() - } - - #[cfg(test)] - pub(crate) fn deps_len(&self) -> usize { - self.deps.len() - } - - #[cfg(test)] - pub(crate) fn inputs_len(&self) -> usize { - self.inputs.len() - } - - pub(crate) fn add_entry(&mut self, entry: TxEntry) -> bool { - let tx_short_id = entry.proposal_short_id(); - if self.inner.contains_key(&tx_short_id) { - return false; - } - - let inputs = entry.transaction().input_pts_iter(); - let outputs = entry.transaction().output_pts(); - - for i in inputs { - self.inputs - .entry(i.to_owned()) - .or_default() - .insert(tx_short_id.clone()); - - if let Some(outputs) = self.outputs.get_mut(&i) { - outputs.insert(tx_short_id.clone()); - } - } - - // record dep-txid - for d in entry.related_dep_out_points() { - self.deps - .entry(d.to_owned()) - .or_default() - .insert(tx_short_id.clone()); - - if let Some(outputs) = self.outputs.get_mut(d) { - outputs.insert(tx_short_id.clone()); - } - } - - // record tx unconsumed output - for o in outputs { - self.outputs.insert(o, HashSet::new()); - } - - // record header_deps - let header_deps = entry.transaction().header_deps(); - if !header_deps.is_empty() { - self.header_deps - .insert(tx_short_id.clone(), header_deps.into_iter().collect()); - } - - self.inner.insert(tx_short_id, entry); - true - } - - pub(crate) fn resolve_conflict(&mut self, tx: &TransactionView) -> Vec { - let inputs = tx.input_pts_iter(); - let mut conflicts = Vec::new(); - - for i in inputs { - if let Some(ids) = self.inputs.remove(&i) { - for id in ids { - let entries = self.remove_entry_and_descendants(&id); - for entry in entries { - let reject = Reject::Resolve(OutPointError::Dead(i.clone())); - conflicts.push((entry, reject)); - } - } - } - - // deps consumed - if let Some(ids) = self.deps.remove(&i) { - for id in ids { - let entries = self.remove_entry_and_descendants(&id); - for entry in entries { - let reject = Reject::Resolve(OutPointError::Dead(i.clone())); - conflicts.push((entry, reject)); - } - } - } - } - conflicts - } - - pub(crate) fn resolve_conflict_header_dep( - &mut self, - headers: &HashSet, - ) -> Vec { - let mut conflicts = Vec::new(); - - // invalid header deps - let mut ids = Vec::new(); - for (tx_id, deps) in self.header_deps.iter() { - for hash in deps { - if headers.contains(hash) { - ids.push((hash.clone(), tx_id.clone())); - break; - } - } - } - - for (blk_hash, id) in ids { - let entries = self.remove_entry_and_descendants(&id); - for entry in entries { - let reject = Reject::Resolve(OutPointError::InvalidHeader(blk_hash.to_owned())); - conflicts.push((entry, reject)); - } - } - conflicts - } - - pub(crate) fn contains_key(&self, id: &ProposalShortId) -> bool { - self.inner.contains_key(id) - } - - pub fn iter(&self) -> impl Iterator { - self.inner.iter() - } - - pub(crate) fn get(&self, id: &ProposalShortId) -> Option<&TxEntry> { - self.inner.get(id) - } - - pub(crate) fn get_tx(&self, id: &ProposalShortId) -> Option<&TransactionView> { - self.inner.get(id).map(|entry| entry.transaction()) - } - - pub(crate) fn remove_entry(&mut self, id: &ProposalShortId) -> Option { - let removed = self.inner.remove(id); - - if let Some(ref entry) = removed { - self.remove_entry_relation(entry); - } - - removed - } - - pub(crate) fn remove_entry_and_descendants(&mut self, id: &ProposalShortId) -> Vec { - let mut removed = Vec::new(); - if let Some(entry) = self.inner.remove(id) { - let descendants = self.get_descendants(&entry); - self.remove_entry_relation(&entry); - removed.push(entry); - for id in descendants { - if let Some(entry) = self.remove_entry(&id) { - removed.push(entry); - } - } - } - removed - } - - pub(crate) fn get_descendants(&self, entry: &TxEntry) -> HashSet { - let mut entries: VecDeque<&TxEntry> = VecDeque::new(); - entries.push_back(entry); - - let mut descendants = HashSet::new(); - while let Some(entry) = entries.pop_front() { - let outputs = entry.transaction().output_pts(); - - for output in outputs { - if let Some(ids) = self.outputs.get(&output) { - for id in ids { - if descendants.insert(id.clone()) { - if let Some(entry) = self.inner.get(id) { - entries.push_back(entry); - } - } - } - } - } - } - descendants - } - - pub(crate) fn remove_entry_relation(&mut self, entry: &TxEntry) { - let inputs = entry.transaction().input_pts_iter(); - let tx_short_id = entry.proposal_short_id(); - let outputs = entry.transaction().output_pts(); - - for i in inputs { - if let Entry::Occupied(mut occupied) = self.inputs.entry(i) { - let empty = { - let ids = occupied.get_mut(); - ids.remove(&tx_short_id); - ids.is_empty() - }; - if empty { - occupied.remove(); - } - } - } - - // remove dep - for d in entry.related_dep_out_points().cloned() { - if let Entry::Occupied(mut occupied) = self.deps.entry(d) { - let empty = { - let ids = occupied.get_mut(); - ids.remove(&tx_short_id); - ids.is_empty() - }; - if empty { - occupied.remove(); - } - } - } - - for o in outputs { - self.outputs.remove(&o); - } - - self.header_deps.remove(&tx_short_id); - } - - pub(crate) fn remove_entries_by_filter bool>( - &mut self, - mut predicate: P, - ) -> Vec { - let entries = self.entries(); - let mut removed = Vec::new(); - for entry in entries { - if predicate(entry.key(), entry.get()) { - removed.push(entry.remove()); - } - } - for entry in &removed { - self.remove_entry_relation(entry); - } - - removed - } - - pub fn entries(&mut self) -> LinkedHashMapEntries { - self.inner.entries() - } - - // fill proposal txs - pub fn fill_proposals( - &self, - limit: usize, - exclusion: &HashSet, - proposals: &mut HashSet, - ) { - for id in self.inner.keys() { - if proposals.len() == limit { - break; - } - if !exclusion.contains(id) { - proposals.insert(id.clone()); - } - } - } - - pub(crate) fn drain(&mut self) -> Vec { - let txs = self - .inner - .drain() - .map(|(_k, entry)| entry.into_transaction()) - .collect::>(); - self.deps.clear(); - self.inputs.clear(); - self.header_deps.clear(); - self.outputs.clear(); - txs - } -} - -impl CellProvider for PendingQueue { - fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { - let tx_hash = out_point.tx_hash(); - if let Some(entry) = self.inner.get(&ProposalShortId::from_tx_hash(&tx_hash)) { - match entry - .transaction() - .output_with_data(out_point.index().unpack()) - { - Some((output, data)) => { - let cell_meta = CellMetaBuilder::from_cell_output(output, data) - .out_point(out_point.to_owned()) - .build(); - CellStatus::live_cell(cell_meta) - } - None => CellStatus::Unknown, - } - } else { - CellStatus::Unknown - } - } -} - -impl CellChecker for PendingQueue { - fn is_live(&self, out_point: &OutPoint) -> Option { - let tx_hash = out_point.tx_hash(); - if let Some(entry) = self.inner.get(&ProposalShortId::from_tx_hash(&tx_hash)) { - entry - .transaction() - .output(out_point.index().unpack()) - .map(|_| true) - } else { - None - } - } -} diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index 2a66e760f2..f9b00b75e3 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -1,25 +1,30 @@ //! Top-level Pool type, methods, and tests extern crate rustc_hash; extern crate slab; -use crate::component::container::AncestorsScoreSortKey; +use crate::component::edges::{Edges, OutPointStatus}; use crate::component::entry::EvictKey; +use crate::component::links::{Relation, TxLinksMap}; +use crate::component::score_key::AncestorsScoreSortKey; use crate::error::Reject; use crate::TxEntry; -use ckb_logger::{debug, error, trace, warn}; + +use ckb_logger::trace; use ckb_types::core::error::OutPointError; use ckb_types::packed::OutPoint; use ckb_types::{ - core::cell::{CellMetaBuilder, CellProvider, CellStatus}, - prelude::*, + bytes::Bytes, + core::{cell::CellChecker, TransactionView}, + packed::{Byte32, CellOutput, ProposalShortId}, }; use ckb_types::{ - core::{cell::CellChecker, TransactionView}, - packed::{Byte32, ProposalShortId}, + core::cell::{CellMetaBuilder, CellProvider, CellStatus}, + prelude::*, }; use multi_index_map::MultiIndexMap; -use std::collections::hash_map::Entry; -use std::collections::HashMap; -use std::collections::{HashSet, VecDeque}; +use std::borrow::Cow; +use std::collections::HashSet; + +use super::links::TxLinks; type ConflictEntry = (TxEntry, Reject); @@ -30,6 +35,12 @@ pub enum Status { Proposed, } +#[derive(Copy, Clone)] +enum EntryOp { + Add, + Remove, +} + #[derive(MultiIndexMap, Clone)] pub struct PoolEntry { #[multi_index(hashed_unique)] @@ -40,9 +51,8 @@ pub struct PoolEntry { pub status: Status, #[multi_index(ordered_non_unique)] pub evict_key: EvictKey, - - pub inner: TxEntry, // other sort key + pub inner: TxEntry, } impl MultiIndexPoolEntryMap { @@ -50,7 +60,10 @@ impl MultiIndexPoolEntryMap { pub fn score_sorted_iter(&self) -> impl Iterator { // Note: multi_index don't support reverse order iteration now // so we need to collect and reverse - let entries = self.iter_by_score().collect::>(); + let entries = self + .iter_by_score() + .filter(|entry| entry.status == Status::Proposed) + .collect::>(); entries.into_iter().rev().map(move |entry| &entry.inner) } } @@ -58,13 +71,10 @@ impl MultiIndexPoolEntryMap { pub struct PoolMap { /// The pool entries with different kinds of sort strategies pub(crate) entries: MultiIndexPoolEntryMap, - /// dep-set map represent in-pool tx's header deps - pub(crate) header_deps: HashMap>, - /// dep-set map represent in-pool tx's deps - pub(crate) deps: HashMap>, - /// input-set map represent in-pool tx's inputs - pub(crate) inputs: HashMap>, - pub(crate) outputs: HashMap>, + /// All the deps, header_deps, inputs, outputs relationships + pub(crate) edges: Edges, + /// All the parent/children relationships + pub(crate) links: TxLinksMap, pub(crate) max_ancestors_count: usize, } @@ -72,197 +82,140 @@ impl PoolMap { pub fn new(max_ancestors_count: usize) -> Self { PoolMap { entries: MultiIndexPoolEntryMap::default(), - header_deps: HashMap::default(), - deps: HashMap::default(), - inputs: HashMap::default(), - outputs: HashMap::default(), + edges: Edges::default(), + links: TxLinksMap::new(), max_ancestors_count, } } #[cfg(test)] pub(crate) fn outputs_len(&self) -> usize { - self.outputs.len() + self.edges.outputs_len() } #[cfg(test)] pub(crate) fn header_deps_len(&self) -> usize { - self.header_deps.len() + self.edges.header_deps_len() } #[cfg(test)] pub(crate) fn deps_len(&self) -> usize { - self.deps.len() + self.edges.deps_len() } #[cfg(test)] pub(crate) fn inputs_len(&self) -> usize { - self.inputs.len() + self.edges.inputs_len() } #[cfg(test)] - pub fn size(&self) -> usize { + pub(crate) fn size(&self) -> usize { self.entries.len() } #[cfg(test)] - pub fn contains_key(&self, id: &ProposalShortId) -> bool { + pub(crate) fn contains_key(&self, id: &ProposalShortId) -> bool { self.entries.get_by_id(id).is_some() } + #[cfg(test)] pub(crate) fn get_tx(&self, id: &ProposalShortId) -> Option<&TransactionView> { self.entries .get_by_id(id) .map(|entry| entry.inner.transaction()) } - pub fn add_entry(&mut self, entry: TxEntry, status: Status) -> bool { - let tx_short_id = entry.proposal_short_id(); - if self.entries.get_by_id(&tx_short_id).is_some() { - return false; - } - trace!("add_{:?} {}", status, entry.transaction().hash()); - let inputs = entry.transaction().input_pts_iter(); - let outputs = entry.transaction().output_pts(); - - for i in inputs { - self.inputs - .entry(i.to_owned()) - .or_default() - .insert(tx_short_id.clone()); - - if let Some(outputs) = self.outputs.get_mut(&i) { - outputs.insert(tx_short_id.clone()); - } - } - - // record dep-txid - for d in entry.related_dep_out_points() { - self.deps - .entry(d.to_owned()) - .or_default() - .insert(tx_short_id.clone()); - - if let Some(outputs) = self.outputs.get_mut(d) { - outputs.insert(tx_short_id.clone()); - } - } + #[cfg(test)] + pub(crate) fn add_proposed(&mut self, entry: TxEntry) -> Result { + self.add_entry(entry, Status::Proposed) + } - // record tx unconsumed output - for o in outputs { - self.outputs.insert(o, HashSet::new()); - } + #[cfg(test)] + pub(crate) fn remove_committed_tx(&mut self, tx: &TransactionView) -> Option { + self.remove_entry(&tx.proposal_short_id()) + } - // record header_deps - let header_deps = entry.transaction().header_deps(); - if !header_deps.is_empty() { - self.header_deps - .insert(tx_short_id.clone(), header_deps.into_iter().collect()); - } + pub(crate) fn get_by_id(&self, id: &ProposalShortId) -> Option<&PoolEntry> { + self.entries.get_by_id(id) + } - let score = entry.as_score_key(); - let evict_key = entry.as_evict_key(); - self.entries.insert(PoolEntry { - id: tx_short_id, - score, - status, - inner: entry, - evict_key, - }); - true + pub(crate) fn pending_size(&self) -> usize { + self.entries.get_by_status(&Status::Pending).len() + + self.entries.get_by_status(&Status::Gap).len() } - pub fn get_by_id(&self, id: &ProposalShortId) -> Option<&PoolEntry> { - self.entries.get_by_id(id).map(|entry| entry) + pub(crate) fn proposed_size(&self) -> usize { + self.entries.get_by_status(&Status::Proposed).len() } - fn get_descendants(&self, entry: &TxEntry) -> HashSet { - let mut entries: VecDeque<&TxEntry> = VecDeque::new(); - entries.push_back(entry); + pub(crate) fn score_sorted_iter(&self) -> impl Iterator { + self.entries.score_sorted_iter() + } - let mut descendants = HashSet::new(); - while let Some(entry) = entries.pop_front() { - let outputs = entry.transaction().output_pts(); + pub(crate) fn get(&self, id: &ProposalShortId) -> Option<&TxEntry> { + self.get_by_id(id).map(|entry| &entry.inner) + } - for output in outputs { - if let Some(ids) = self.outputs.get(&output) { - for id in ids { - if descendants.insert(id.clone()) { - if let Some(entry) = self.entries.get_by_id(id) { - entries.push_back(&entry.inner); - } - } - } - } - } + pub(crate) fn get_proposed(&self, id: &ProposalShortId) -> Option<&TxEntry> { + match self.get_by_id(id) { + Some(entry) if entry.status == Status::Proposed => Some(&entry.inner), + _ => None, } - descendants } - pub(crate) fn remove_entry_relation(&mut self, entry: &TxEntry) { - let inputs = entry.transaction().input_pts_iter(); - let tx_short_id = entry.proposal_short_id(); - let outputs = entry.transaction().output_pts(); + pub(crate) fn has_proposed(&self, id: &ProposalShortId) -> bool { + self.get_proposed(id).is_some() + } - for i in inputs { - if let Entry::Occupied(mut occupied) = self.inputs.entry(i) { - let empty = { - let ids = occupied.get_mut(); - ids.remove(&tx_short_id); - ids.is_empty() - }; - if empty { - occupied.remove(); - } - } - } + /// calculate all ancestors from pool + pub(crate) fn calc_ancestors(&self, short_id: &ProposalShortId) -> HashSet { + self.links.calc_ancestors(short_id) + } - // remove dep - for d in entry.related_dep_out_points().cloned() { - if let Entry::Occupied(mut occupied) = self.deps.entry(d) { - let empty = { - let ids = occupied.get_mut(); - ids.remove(&tx_short_id); - ids.is_empty() - }; - if empty { - occupied.remove(); - } - } - } + /// calculate all descendants from pool + pub(crate) fn calc_descendants(&self, short_id: &ProposalShortId) -> HashSet { + self.links.calc_descendants(short_id) + } - for o in outputs { - self.outputs.remove(&o); - } + pub(crate) fn get_output_with_data(&self, out_point: &OutPoint) -> Option<(CellOutput, Bytes)> { + self.get(&ProposalShortId::from_tx_hash(&out_point.tx_hash())) + .and_then(|entry| { + entry + .transaction() + .output_with_data(out_point.index().unpack()) + }) + } - self.header_deps.remove(&tx_short_id); + pub(crate) fn remove_entry(&mut self, id: &ProposalShortId) -> Option { + if let Some(entry) = self.entries.remove_by_id(id) { + self.update_descendants_index_key(&entry.inner, EntryOp::Remove); + self.remove_entry_deps(&entry.inner); + self.remove_entry_edges(&entry.inner); + self.remove_entry_links(id); + return Some(entry.inner); + } + None } - pub fn remove_entry(&mut self, id: &ProposalShortId) -> Option { - let removed = self.entries.remove_by_id(id); + pub(crate) fn remove_entry_and_descendants(&mut self, id: &ProposalShortId) -> Vec { + let mut removed_ids = vec![id.to_owned()]; + let mut removed = vec![]; + removed_ids.extend(self.calc_descendants(id)); - if let Some(ref entry) = removed { - self.remove_entry_relation(&entry.inner); + // update links state for remove + for id in &removed_ids { + self.remove_entry_links(id); } - removed.map(|e| e.inner) - } - pub fn remove_entry_and_descendants(&mut self, id: &ProposalShortId) -> Vec { - let mut removed = Vec::new(); - if let Some(entry) = self.entries.remove_by_id(id) { - let descendants = self.get_descendants(&entry.inner); - self.remove_entry_relation(&entry.inner); - removed.push(entry.inner); - for id in descendants { - if let Some(entry) = self.remove_entry(&id) { - removed.push(entry); - } + for id in removed_ids { + if let Some(entry) = self.remove_entry(&id) { + removed.push(entry); } } removed } - pub fn resolve_conflict_header_dep( + pub(crate) fn resolve_conflict_header_dep( &mut self, headers: &HashSet, ) -> Vec { @@ -270,7 +223,7 @@ impl PoolMap { // invalid header deps let mut ids = Vec::new(); - for (tx_id, deps) in self.header_deps.iter() { + for (tx_id, deps) in self.edges.header_deps.iter() { for hash in deps { if headers.contains(hash) { ids.push((hash.clone(), tx_id.clone())); @@ -289,37 +242,45 @@ impl PoolMap { conflicts } - pub fn resolve_conflict(&mut self, tx: &TransactionView) -> Vec { - let inputs = tx.input_pts_iter(); + /// pending gap and proposed store the inputs and deps in edges, it's removed in `remove_entry` + /// here we use `input_pts_iter` and `related_dep_out_points` to find the conflict txs + pub(crate) fn resolve_conflict(&mut self, tx: &TransactionView) -> Vec { + let mut to_be_removed = Vec::new(); let mut conflicts = Vec::new(); - for i in inputs { - if let Some(ids) = self.inputs.remove(&i) { - for id in ids { - let entries = self.remove_entry_and_descendants(&id); - for entry in entries { - let reject = Reject::Resolve(OutPointError::Dead(i.clone())); - conflicts.push((entry, reject)); - } + for (_, entry) in self.entries.iter() { + let entry = &entry.inner; + let tx_id = entry.proposal_short_id(); + let tx_inputs = entry.transaction().input_pts_iter(); + let deps = entry.related_dep_out_points(); + + // tx input conflict + for i in tx_inputs { + if tx.input_pts_iter().any(|j| i == j) { + to_be_removed.push((tx_id.to_owned(), i.clone())); } } - // deps consumed - if let Some(ids) = self.deps.remove(&i) { - for id in ids { - let entries = self.remove_entry_and_descendants(&id); - for entry in entries { - let reject = Reject::Resolve(OutPointError::Dead(i.clone())); - conflicts.push((entry, reject)); - } + // tx deps conflict + for i in deps { + if tx.input_pts_iter().any(|j| *i == j) { + to_be_removed.push((tx_id.to_owned(), i.clone())); } } } + + for (tx_id, input) in to_be_removed.iter() { + let entries = self.remove_entry_and_descendants(tx_id); + let reject = Reject::Resolve(OutPointError::Dead(input.to_owned())); + let rejects = std::iter::repeat(reject).take(entries.len()); + conflicts.extend(entries.into_iter().zip(rejects)); + } + conflicts } // fill proposal txs - pub fn fill_proposals( + pub(crate) fn fill_proposals( &self, limit: usize, exclusion: &HashSet, @@ -336,12 +297,13 @@ impl PoolMap { } } - pub fn remove_entries_by_filter bool>( + pub(crate) fn remove_entries_by_filter bool>( &mut self, + status: &Status, mut predicate: P, ) -> Vec { let mut removed = Vec::new(); - for (_, entry) in self.entries.iter() { + for entry in self.entries.get_by_status(status) { if predicate(&entry.id, &entry.inner) { removed.push(entry.inner.clone()); } @@ -349,82 +311,283 @@ impl PoolMap { for entry in &removed { self.remove_entry(&entry.proposal_short_id()); } - removed } - pub fn iter(&self) -> impl Iterator { + pub(crate) fn iter(&self) -> impl Iterator { self.entries.iter().map(|(_, entry)| entry) } - pub fn iter_by_evict_key(&self) -> impl Iterator { + pub(crate) fn iter_by_evict_key(&self) -> impl Iterator { self.entries.iter_by_evict_key() } - pub fn next_evict_entry(&self) -> Option { + pub(crate) fn next_evict_entry(&self) -> Option { self.iter_by_evict_key() - .into_iter() .next() .map(|entry| entry.id.clone()) } - pub fn clear(&mut self) { + pub(crate) fn clear(&mut self) { self.entries = MultiIndexPoolEntryMap::default(); - self.deps.clear(); - self.inputs.clear(); - self.header_deps.clear(); - self.outputs.clear(); + self.edges.clear(); + self.links.clear(); } - pub(crate) fn drain(&mut self) -> Vec { - let txs = self - .entries - .iter() - .map(|(_k, entry)| entry.inner.clone().into_transaction()) - .collect::>(); - self.entries.clear(); - self.deps.clear(); - self.inputs.clear(); - self.header_deps.clear(); - self.outputs.clear(); - txs + fn remove_entry_links(&mut self, id: &ProposalShortId) { + if let Some(parents) = self.links.get_parents(id).cloned() { + for parent in parents { + self.links.remove_child(&parent, id); + } + } + if let Some(children) = self.links.get_children(id).cloned() { + for child in children { + self.links.remove_parent(&child, id); + } + } + self.links.remove(id); + } + + fn update_descendants_index_key(&mut self, parent: &TxEntry, op: EntryOp) { + let descendants: HashSet = + self.links.calc_descendants(&parent.proposal_short_id()); + for desc_id in &descendants { + // update child score + let entry = self.entries.get_by_id(desc_id).unwrap().clone(); + let mut child = entry.inner.clone(); + match op { + EntryOp::Remove => child.sub_entry_weight(parent), + EntryOp::Add => child.add_entry_weight(parent), + } + let short_id = child.proposal_short_id(); + //TODO: optimize it + self.entries.remove_by_id(&short_id); + self.insert_entry(&child, entry.status) + .expect("pool consistent"); + } + } + + fn record_entry_deps(&mut self, entry: &TxEntry) { + let tx_short_id: ProposalShortId = entry.proposal_short_id(); + let header_deps = entry.transaction().header_deps(); + let related_dep_out_points: Vec<_> = entry.related_dep_out_points().cloned().collect(); + + // record dep-txid + for d in related_dep_out_points { + self.edges.insert_deps(d.to_owned(), tx_short_id.clone()); + } + // record header_deps + if !header_deps.is_empty() { + self.edges + .header_deps + .insert(tx_short_id, header_deps.into_iter().collect()); + } + } + + fn record_entry_edges(&mut self, entry: &TxEntry) { + let tx_short_id: ProposalShortId = entry.proposal_short_id(); + let inputs = entry.transaction().input_pts_iter(); + let outputs = entry.transaction().output_pts(); + + let mut children = HashSet::new(); + // if input reference a in-pool output, connect it + // otherwise, record input for conflict check + for i in inputs { + self.edges.set_output_consumed(&i, &tx_short_id); + self.edges.insert_input(i.to_owned(), tx_short_id.clone()); + } + + // record tx output + for o in outputs { + if let Some(ids) = self.edges.get_deps_ref(&o).cloned() { + children.extend(ids); + } + if let Some(id) = self.edges.get_input_ref(&o).cloned() { + self.edges.insert_consumed_output(o, id.clone()); + children.insert(id); + } else { + self.edges.insert_unconsumed_output(o); + } + } + // update children + if !children.is_empty() { + self.update_descendants_from_detached(&tx_short_id, children); + } + } + + // update_descendants_from_detached is used to update + // the descendants for a single transaction that has been added to the + // pool but may have child transactions in the pool, eg during a + // chain reorg. + fn update_descendants_from_detached( + &mut self, + id: &ProposalShortId, + children: HashSet, + ) { + if let Some(entry) = self.get_by_id(id).cloned() { + for child in &children { + self.links.add_parent(child, id.clone()); + } + if let Some(links) = self.links.inner.get_mut(id) { + links.children.extend(children); + } + + self.update_descendants_index_key(&entry.inner, EntryOp::Add); + } + } + + /// Record the links for entry + fn record_entry_links(&mut self, entry: &mut TxEntry, status: &Status) -> Result { + // find in pool parents + let mut parents: HashSet = HashSet::with_capacity( + entry.transaction().inputs().len() + entry.transaction().cell_deps().len(), + ); + let short_id = entry.proposal_short_id(); + + for input in entry.transaction().inputs() { + let input_pt = input.previous_output(); + if let Some(deps) = self.edges.deps.get(&input_pt) { + parents.extend(deps.iter().cloned()); + } + + let parent_hash = &input_pt.tx_hash(); + let id = ProposalShortId::from_tx_hash(parent_hash); + if self.links.inner.contains_key(&id) { + parents.insert(id); + } + } + for cell_dep in entry.transaction().cell_deps() { + let dep_pt = cell_dep.out_point(); + let id = ProposalShortId::from_tx_hash(&dep_pt.tx_hash()); + if self.links.inner.contains_key(&id) { + parents.insert(id); + } + } + + let ancestors = self + .links + .calc_relation_ids(Cow::Borrowed(&parents), Relation::Parents); + + // update parents references + for ancestor_id in &ancestors { + let ancestor = self + .entries + .get_by_id(ancestor_id) + .expect("pool consistent"); + entry.add_entry_weight(&ancestor.inner); + } + if *status == Status::Proposed && entry.ancestors_count > self.max_ancestors_count { + return Err(Reject::ExceededMaximumAncestorsCount); + } + + for cell_dep in entry.transaction().cell_deps() { + let dep_pt = cell_dep.out_point(); + // insert dep-ref map + self.edges + .deps + .entry(dep_pt) + .or_insert_with(HashSet::new) + .insert(short_id.clone()); + } + + for parent in &parents { + self.links.add_child(parent, short_id.clone()); + } + + // insert links + let links = TxLinks { + parents, + children: Default::default(), + }; + self.links.inner.insert(short_id, links); + + Ok(true) + } + + fn remove_entry_edges(&mut self, entry: &TxEntry) { + let inputs = entry.transaction().input_pts_iter(); + let outputs = entry.transaction().output_pts(); + + for o in outputs { + self.edges.remove_output(&o); + } + + for i in inputs { + // release input record + self.edges.remove_input(&i); + self.edges.set_output_unconsumed(&i); + } + } + + fn remove_entry_deps(&mut self, entry: &TxEntry) { + let id = entry.proposal_short_id(); + for d in entry.related_dep_out_points().cloned() { + self.edges.delete_txid_by_dep(d, &id); + } + + self.edges.header_deps.remove(&id); + } + + pub(crate) fn add_entry(&mut self, mut entry: TxEntry, status: Status) -> Result { + let tx_short_id = entry.proposal_short_id(); + if self.entries.get_by_id(&tx_short_id).is_some() { + return Ok(false); + } + trace!("add_{:?} {}", status, entry.transaction().hash()); + self.record_entry_links(&mut entry, &status)?; + self.insert_entry(&entry, status)?; + self.record_entry_deps(&entry); + self.record_entry_edges(&entry); + Ok(true) + } + + fn insert_entry(&mut self, entry: &TxEntry, status: Status) -> Result { + let tx_short_id = entry.proposal_short_id(); + let score = entry.as_score_key(); + let evict_key = entry.as_evict_key(); + self.entries.insert(PoolEntry { + id: tx_short_id, + score, + status, + inner: entry.clone(), + evict_key, + }); + Ok(true) } } -impl CellProvider for MultiIndexPoolEntryMap { +impl CellProvider for PoolMap { fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { - let tx_hash = out_point.tx_hash(); - if let Some(entry) = self.get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) { - match entry - .inner - .transaction() - .output_with_data(out_point.index().unpack()) - { - Some((output, data)) => { - let cell_meta = CellMetaBuilder::from_cell_output(output, data) - .out_point(out_point.to_owned()) - .build(); - CellStatus::live_cell(cell_meta) - } - None => CellStatus::Unknown, + if let Some(id) = self.edges.get_input_ref(out_point) { + if self.has_proposed(id) { + return CellStatus::Dead; + } + } + match self.edges.get_output_ref(out_point) { + Some(OutPointStatus::UnConsumed) => { + let (output, data) = self.get_output_with_data(out_point).expect("output"); + let cell_meta = CellMetaBuilder::from_cell_output(output, data) + .out_point(out_point.to_owned()) + .build(); + CellStatus::live_cell(cell_meta) } - } else { - CellStatus::Unknown + Some(OutPointStatus::Consumed(id)) if self.has_proposed(id) => CellStatus::Dead, + _ => CellStatus::Unknown, } } } -impl CellChecker for MultiIndexPoolEntryMap { +impl CellChecker for PoolMap { fn is_live(&self, out_point: &OutPoint) -> Option { - let tx_hash = out_point.tx_hash(); - if let Some(entry) = self.get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) { - entry - .inner - .transaction() - .output(out_point.index().unpack()) - .map(|_| true) - } else { - None + if let Some(id) = self.edges.get_input_ref(out_point) { + if self.has_proposed(id) { + return Some(false); + } + } + match self.edges.get_output_ref(out_point) { + Some(OutPointStatus::Consumed(id)) if self.has_proposed(id) => Some(false), + Some(OutPointStatus::UnConsumed) => Some(true), + _ => None, } } } diff --git a/tx-pool/src/component/proposed.rs b/tx-pool/src/component/proposed.rs deleted file mode 100644 index 2ac9114aa8..0000000000 --- a/tx-pool/src/component/proposed.rs +++ /dev/null @@ -1,403 +0,0 @@ -use crate::component::container::SortedTxMap; -use crate::component::entry::TxEntry; -use crate::error::Reject; -use ckb_types::{ - bytes::Bytes, - core::{ - cell::{CellChecker, CellMetaBuilder, CellProvider, CellStatus}, - error::OutPointError, - TransactionView, - }, - packed::{Byte32, CellOutput, OutPoint, ProposalShortId}, - prelude::*, -}; -use std::collections::{hash_map::Entry, HashMap, HashSet}; -use std::iter; - -type ConflictEntry = (TxEntry, Reject); - -#[derive(Default, Debug, Clone)] -pub(crate) struct Edges { - /// output-op map represent in-pool tx's outputs - pub(crate) outputs: HashMap>, - /// input-txid map represent in-pool tx's inputs - pub(crate) inputs: HashMap, - /// dep-set map represent in-pool tx's deps - pub(crate) deps: HashMap>, - /// dep-set map represent in-pool tx's header deps - pub(crate) header_deps: HashMap>, -} - -impl Edges { - #[cfg(test)] - pub(crate) fn outputs_len(&self) -> usize { - self.outputs.len() - } - - #[cfg(test)] - pub(crate) fn inputs_len(&self) -> usize { - self.inputs.len() - } - - pub(crate) fn insert_input(&mut self, out_point: OutPoint, txid: ProposalShortId) { - self.inputs.insert(out_point, txid); - } - - pub(crate) fn remove_input(&mut self, out_point: &OutPoint) -> Option { - self.inputs.remove(out_point) - } - - pub(crate) fn remove_output(&mut self, out_point: &OutPoint) -> Option { - self.outputs.remove(out_point).unwrap_or(None) - } - - pub(crate) fn insert_output(&mut self, out_point: OutPoint) { - self.outputs.insert(out_point, None); - } - - pub(crate) fn insert_consumed_output(&mut self, out_point: OutPoint, id: ProposalShortId) { - self.outputs.insert(out_point, Some(id)); - } - - pub(crate) fn get_output_ref(&self, out_point: &OutPoint) -> Option<&Option> { - self.outputs.get(out_point) - } - - pub(crate) fn get_input_ref(&self, out_point: &OutPoint) -> Option<&ProposalShortId> { - self.inputs.get(out_point) - } - - pub(crate) fn get_deps_ref(&self, out_point: &OutPoint) -> Option<&HashSet> { - self.deps.get(out_point) - } - - pub(crate) fn get_mut_output( - &mut self, - out_point: &OutPoint, - ) -> Option<&mut Option> { - self.outputs.get_mut(out_point) - } - - pub(crate) fn remove_deps(&mut self, out_point: &OutPoint) -> Option> { - self.deps.remove(out_point) - } - - pub(crate) fn insert_deps(&mut self, out_point: OutPoint, txid: ProposalShortId) { - self.deps.entry(out_point).or_default().insert(txid); - } - - pub(crate) fn delete_txid_by_dep(&mut self, out_point: OutPoint, txid: &ProposalShortId) { - if let Entry::Occupied(mut occupied) = self.deps.entry(out_point) { - let empty = { - let ids = occupied.get_mut(); - ids.remove(txid); - ids.is_empty() - }; - if empty { - occupied.remove(); - } - } - } - - pub(crate) fn clear(&mut self) { - self.outputs.clear(); - self.inputs.clear(); - self.deps.clear(); - self.header_deps.clear(); - } -} - -#[derive(Clone)] -pub struct ProposedPool { - pub(crate) edges: Edges, - inner: SortedTxMap, -} - -impl CellProvider for ProposedPool { - fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { - if self.edges.get_input_ref(out_point).is_some() { - return CellStatus::Dead; - } - if let Some(x) = self.edges.get_output_ref(out_point) { - // output consumed - if x.is_some() { - return CellStatus::Dead; - } else { - let (output, data) = self.get_output_with_data(out_point).expect("output"); - let cell_meta = CellMetaBuilder::from_cell_output(output, data) - .out_point(out_point.to_owned()) - .build(); - return CellStatus::live_cell(cell_meta); - } - } - CellStatus::Unknown - } -} - -impl CellChecker for ProposedPool { - fn is_live(&self, out_point: &OutPoint) -> Option { - if self.edges.get_input_ref(out_point).is_some() { - return Some(false); - } - if let Some(x) = self.edges.get_output_ref(out_point) { - // output consumed - if x.is_some() { - return Some(false); - } else { - return Some(true); - } - } - None - } -} - -impl ProposedPool { - pub(crate) fn new(max_ancestors_count: usize) -> Self { - ProposedPool { - edges: Default::default(), - inner: SortedTxMap::new(max_ancestors_count), - } - } - - pub(crate) fn contains_key(&self, id: &ProposalShortId) -> bool { - self.inner.contains_key(id) - } - - pub fn get(&self, id: &ProposalShortId) -> Option<&TxEntry> { - self.inner.get(id) - } - - pub fn iter(&self) -> impl Iterator { - self.inner.iter() - } - - pub(crate) fn get_tx(&self, id: &ProposalShortId) -> Option<&TransactionView> { - self.get(id).map(|entry| entry.transaction()) - } - - pub fn size(&self) -> usize { - self.inner.size() - } - - pub(crate) fn get_output_with_data(&self, out_point: &OutPoint) -> Option<(CellOutput, Bytes)> { - self.inner - .get(&ProposalShortId::from_tx_hash(&out_point.tx_hash())) - .and_then(|entry| { - entry - .transaction() - .output_with_data(out_point.index().unpack()) - }) - } - - // remove entry and all it's descendants - pub(crate) fn remove_entry_and_descendants(&mut self, id: &ProposalShortId) -> Vec { - let removed_entries = self.inner.remove_entry_and_descendants(id); - for entry in &removed_entries { - let tx = entry.transaction(); - let inputs = tx.input_pts_iter(); - let outputs = tx.output_pts(); - for i in inputs { - self.edges.inputs.remove(&i); - if let Some(id) = self.edges.get_mut_output(&i) { - *id = None; - } - } - - for d in entry.related_dep_out_points().cloned() { - self.edges.delete_txid_by_dep(d, id); - } - - for o in outputs { - self.edges.remove_output(&o); - } - - self.edges.header_deps.remove(&entry.proposal_short_id()); - } - removed_entries - } - - pub(crate) fn remove_committed_tx(&mut self, tx: &TransactionView) -> Option { - let outputs = tx.output_pts(); - let inputs = tx.input_pts_iter(); - let id = tx.proposal_short_id(); - - if let Some(entry) = self.inner.remove_entry(&id) { - for o in outputs { - self.edges.remove_output(&o); - } - - for i in inputs { - // release input record - self.edges.remove_input(&i); - if let Some(id) = self.edges.get_mut_output(&i) { - *id = None; - } - } - - for d in entry.related_dep_out_points().cloned() { - self.edges.delete_txid_by_dep(d, &id); - } - - self.edges.header_deps.remove(&id); - - return Some(entry); - } - None - } - - // In the event of a reorg, the assumption that a newly added tx has no - // in-pool children is false. In particular, the pool is in an - // inconsistent state while new transactions are being added, because there may - // be descendant transactions of a tx coming from a disconnected block that are - // unreachable from just looking at transactions in the pool (the linking - // transactions may also be in the disconnected block, waiting to be added). - // Because of this, there's not much benefit in trying to search for in-pool - // children in add_entry(). Instead, in the special case of transactions - // being added from a disconnected block, out-of-block descendants for all the - // in-block transactions by calling update_descendants_from_detached(). Note that - // until this is called, the pool state is not consistent, and in particular - // TxLinks may not be correct (and therefore functions like - // calc_ancestors() and calc_descendants() that rely - // on them to walk the pool are not generally safe to use). - pub(crate) fn add_entry(&mut self, entry: TxEntry) -> Result { - let tx_short_id = entry.proposal_short_id(); - - if self.inner.contains_key(&tx_short_id) { - return Ok(false); - } - - let inputs = entry.transaction().input_pts_iter(); - let outputs = entry.transaction().output_pts(); - let related_dep_out_points: Vec<_> = entry.related_dep_out_points().cloned().collect(); - let header_deps = entry.transaction().header_deps(); - - self.inner.add_entry(entry).map(|inserted| { - if inserted { - let mut children = HashSet::new(); - // if input reference a in-pool output, connect it - // otherwise, record input for conflict check - for i in inputs { - if let Some(id) = self.edges.get_mut_output(&i) { - *id = Some(tx_short_id.clone()); - } - self.edges.insert_input(i.to_owned(), tx_short_id.clone()); - } - - // record dep-txid - for d in related_dep_out_points { - self.edges.insert_deps(d.to_owned(), tx_short_id.clone()); - } - - // record tx output - for o in outputs { - if let Some(ids) = self.edges.get_deps_ref(&o).cloned() { - children.extend(ids); - } - if let Some(id) = self.edges.get_input_ref(&o).cloned() { - self.edges.insert_consumed_output(o, id.clone()); - children.insert(id); - } else { - self.edges.insert_output(o); - } - } - - // record header_deps - if !header_deps.is_empty() { - self.edges - .header_deps - .insert(tx_short_id.clone(), header_deps.into_iter().collect()); - } - - if !children.is_empty() { - self.inner - .update_descendants_from_detached(&tx_short_id, children); - } - } - inserted - }) - } - - pub(crate) fn resolve_conflict(&mut self, tx: &TransactionView) -> Vec { - let inputs = tx.input_pts_iter(); - let mut conflicts = Vec::new(); - - for i in inputs { - if let Some(id) = self.edges.remove_input(&i) { - let entries = self.remove_entry_and_descendants(&id); - if !entries.is_empty() { - let reject = Reject::Resolve(OutPointError::Dead(i.clone())); - let rejects = iter::repeat(reject).take(entries.len()); - conflicts.extend(entries.into_iter().zip(rejects)); - } - } - - // deps consumed - if let Some(x) = self.edges.remove_deps(&i) { - for id in x { - let entries = self.remove_entry_and_descendants(&id); - if !entries.is_empty() { - let reject = Reject::Resolve(OutPointError::Dead(i.clone())); - let rejects = iter::repeat(reject).take(entries.len()); - conflicts.extend(entries.into_iter().zip(rejects)); - } - } - } - } - - conflicts - } - - pub(crate) fn resolve_conflict_header_dep( - &mut self, - headers: &HashSet, - ) -> Vec { - let mut conflicts = Vec::new(); - - // invalid header deps - let mut invalid_header_ids = Vec::new(); - for (tx_id, deps) in self.edges.header_deps.iter() { - for hash in deps { - if headers.contains(hash) { - invalid_header_ids.push((hash.clone(), tx_id.clone())); - break; - } - } - } - - for (blk_hash, id) in invalid_header_ids { - let entries = self.remove_entry_and_descendants(&id); - if !entries.is_empty() { - let reject = Reject::Resolve(OutPointError::InvalidHeader(blk_hash)); - let rejects = iter::repeat(reject).take(entries.len()); - conflicts.extend(entries.into_iter().zip(rejects)); - } - } - - conflicts - } - - /// sorted by ancestor score from higher to lower - pub fn score_sorted_iter(&self) -> impl Iterator { - self.inner.score_sorted_iter() - } - - /// find all ancestors from pool - pub fn calc_ancestors(&self, tx_short_id: &ProposalShortId) -> HashSet { - self.inner.calc_ancestors(tx_short_id) - } - - /// find all descendants from pool - pub fn calc_descendants(&self, tx_short_id: &ProposalShortId) -> HashSet { - self.inner.calc_descendants(tx_short_id) - } - - #[cfg(test)] - pub(crate) fn inner(&self) -> &SortedTxMap { - &self.inner - } - - pub(crate) fn clear(&mut self) { - self.edges.clear(); - self.inner.clear(); - } -} diff --git a/tx-pool/src/component/score_key.rs b/tx-pool/src/component/score_key.rs new file mode 100644 index 0000000000..011fb4000b --- /dev/null +++ b/tx-pool/src/component/score_key.rs @@ -0,0 +1,54 @@ +use ckb_types::{core::Capacity, packed::ProposalShortId}; +use std::cmp::Ordering; + +/// A struct to use as a sorted key +#[derive(Eq, PartialEq, Clone, Debug)] +pub struct AncestorsScoreSortKey { + pub fee: Capacity, + pub weight: u64, + pub id: ProposalShortId, + pub ancestors_fee: Capacity, + pub ancestors_weight: u64, + pub ancestors_size: usize, +} + +impl AncestorsScoreSortKey { + /// compare tx fee rate with ancestors fee rate and return the min one + pub(crate) fn min_fee_and_weight(&self) -> (Capacity, u64) { + // avoid division a_fee/a_weight > b_fee/b_weight + let tx_weight = u128::from(self.fee.as_u64()) * u128::from(self.ancestors_weight); + let ancestors_weight = u128::from(self.ancestors_fee.as_u64()) * u128::from(self.weight); + + if tx_weight < ancestors_weight { + (self.fee, self.weight) + } else { + (self.ancestors_fee, self.ancestors_weight) + } + } +} + +impl PartialOrd for AncestorsScoreSortKey { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for AncestorsScoreSortKey { + fn cmp(&self, other: &Self) -> Ordering { + // avoid division a_fee/a_weight > b_fee/b_weight + let (fee, weight) = self.min_fee_and_weight(); + let (other_fee, other_weight) = other.min_fee_and_weight(); + let self_weight = u128::from(fee.as_u64()) * u128::from(other_weight); + let other_weight = u128::from(other_fee.as_u64()) * u128::from(weight); + if self_weight == other_weight { + // if fee rate weight is same, then compare with ancestor weight + if self.ancestors_weight == other.ancestors_weight { + self.id.raw_data().cmp(&other.id.raw_data()) + } else { + self.ancestors_weight.cmp(&other.ancestors_weight) + } + } else { + self_weight.cmp(&other_weight) + } + } +} diff --git a/tx-pool/src/component/tests/mod.rs b/tx-pool/src/component/tests/mod.rs index 0f8bfcd719..d9a3529707 100644 --- a/tx-pool/src/component/tests/mod.rs +++ b/tx-pool/src/component/tests/mod.rs @@ -1,7 +1,6 @@ mod chunk; -mod container; mod pending; mod proposed; mod recent_reject; +mod score_key; mod util; -mod pool_map; \ No newline at end of file diff --git a/tx-pool/src/component/tests/pending.rs b/tx-pool/src/component/tests/pending.rs index d38ae3c1c0..b07e2e96e6 100644 --- a/tx-pool/src/component/tests/pending.rs +++ b/tx-pool/src/component/tests/pending.rs @@ -1,13 +1,19 @@ +use crate::component::edges::Edges; use crate::component::tests::util::{ build_tx, build_tx_with_dep, build_tx_with_header_dep, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE, }; -use crate::component::{entry::TxEntry, pending::PendingQueue}; +use crate::component::{ + entry::TxEntry, + pool_map::{PoolMap, Status}, +}; +use ckb_types::packed::OutPoint; use ckb_types::{h256, packed::Byte32, prelude::*}; use std::collections::HashSet; #[test] fn test_basic() { - let mut queue = PendingQueue::new(); + let mut pool = PoolMap::new(100); + assert_eq!(pool.size(), 0); let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&Byte32::zero(), 2)], 1); let tx2 = build_tx( vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 2)], @@ -15,30 +21,36 @@ fn test_basic() { ); let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(queue.add_entry(entry1.clone())); - assert!(queue.add_entry(entry2)); - assert!(queue.size() == 2); - assert!(queue.contains_key(&tx1.proposal_short_id())); - assert!(queue.contains_key(&tx2.proposal_short_id())); - - assert_eq!(queue.inputs_len(), 4); - assert_eq!(queue.outputs_len(), 4); - - assert_eq!(queue.get(&tx1.proposal_short_id()).unwrap(), &entry1); - assert_eq!(queue.get_tx(&tx2.proposal_short_id()).unwrap(), &tx2); - - let txs = queue.drain(); - assert!(queue.inner.is_empty()); - assert!(queue.deps.is_empty()); - assert!(queue.inputs.is_empty()); - assert!(queue.header_deps.is_empty()); - assert!(queue.outputs.is_empty()); - assert_eq!(txs, vec![tx1, tx2]); + assert!(pool.add_entry(entry1.clone(), Status::Pending).unwrap()); + assert!(pool.add_entry(entry2, Status::Pending).unwrap()); + assert!(pool.size() == 2); + assert!(pool.contains_key(&tx1.proposal_short_id())); + assert!(pool.contains_key(&tx2.proposal_short_id())); + + assert_eq!(pool.inputs_len(), 4); + assert_eq!(pool.outputs_len(), 4); + + assert_eq!( + pool.entries + .get_by_id(&tx1.proposal_short_id()) + .unwrap() + .inner, + entry1 + ); + assert_eq!(pool.get_tx(&tx2.proposal_short_id()).unwrap(), &tx2); + assert_eq!(pool.edges.deps.len(), 0); + + pool.clear(); + assert!(pool.entries.is_empty()); + assert!(pool.edges.deps.is_empty()); + assert!(pool.edges.inputs.is_empty()); + assert!(pool.edges.header_deps.is_empty()); + assert!(pool.edges.outputs.is_empty()); } #[test] fn test_resolve_conflict() { - let mut queue = PendingQueue::new(); + let mut pool = PoolMap::new(100); let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); let tx2 = build_tx( vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], @@ -58,17 +70,17 @@ fn test_resolve_conflict() { let entry1 = TxEntry::dummy_resolve(tx1, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); let entry2 = TxEntry::dummy_resolve(tx2, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); let entry3 = TxEntry::dummy_resolve(tx3, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(queue.add_entry(entry1.clone())); - assert!(queue.add_entry(entry2.clone())); - assert!(queue.add_entry(entry3.clone())); + assert!(pool.add_entry(entry1.clone(), Status::Pending).unwrap()); + assert!(pool.add_entry(entry2.clone(), Status::Pending).unwrap()); + assert!(pool.add_entry(entry3.clone(), Status::Pending).unwrap()); - let conflicts = queue.resolve_conflict(&tx4); + let conflicts = pool.resolve_conflict(&tx4); assert_eq!( conflicts.into_iter().map(|i| i.0).collect::>(), HashSet::from_iter(vec![entry1, entry2]) ); - let conflicts = queue.resolve_conflict(&tx5); + let conflicts = pool.resolve_conflict(&tx5); assert_eq!( conflicts.into_iter().map(|i| i.0).collect::>(), HashSet::from_iter(vec![entry3]) @@ -77,7 +89,7 @@ fn test_resolve_conflict() { #[test] fn test_resolve_conflict_descendants() { - let mut queue = PendingQueue::new(); + let mut pool = PoolMap::new(1000); let tx1 = build_tx(vec![(&Byte32::zero(), 1)], 1); let tx3 = build_tx(vec![(&tx1.hash(), 0)], 2); let tx4 = build_tx(vec![(&tx3.hash(), 0)], 1); @@ -87,11 +99,11 @@ fn test_resolve_conflict_descendants() { let entry1 = TxEntry::dummy_resolve(tx1, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); let entry3 = TxEntry::dummy_resolve(tx3, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); let entry4 = TxEntry::dummy_resolve(tx4, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(queue.add_entry(entry1)); - assert!(queue.add_entry(entry3.clone())); - assert!(queue.add_entry(entry4.clone())); + assert!(pool.add_entry(entry1, Status::Pending).unwrap()); + assert!(pool.add_entry(entry3.clone(), Status::Pending).unwrap()); + assert!(pool.add_entry(entry4.clone(), Status::Pending).unwrap()); - let conflicts = queue.resolve_conflict(&tx2); + let conflicts = pool.resolve_conflict(&tx2); assert_eq!( conflicts.into_iter().map(|i| i.0).collect::>(), HashSet::from_iter(vec![entry3, entry4]) @@ -100,7 +112,7 @@ fn test_resolve_conflict_descendants() { #[test] fn test_resolve_conflict_header_dep() { - let mut queue = PendingQueue::new(); + let mut pool = PoolMap::new(1000); let header: Byte32 = h256!("0x1").pack(); let tx = build_tx_with_header_dep( @@ -112,17 +124,17 @@ fn test_resolve_conflict_header_dep() { let entry = TxEntry::dummy_resolve(tx, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); let entry1 = TxEntry::dummy_resolve(tx1, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(queue.add_entry(entry.clone())); - assert!(queue.add_entry(entry1.clone())); + assert!(pool.add_entry(entry.clone(), Status::Pending).unwrap()); + assert!(pool.add_entry(entry1.clone(), Status::Pending).unwrap()); - assert_eq!(queue.inputs_len(), 3); - assert_eq!(queue.header_deps_len(), 1); - assert_eq!(queue.outputs_len(), 2); + assert_eq!(pool.inputs_len(), 3); + assert_eq!(pool.header_deps_len(), 1); + assert_eq!(pool.outputs_len(), 2); let mut headers = HashSet::new(); headers.insert(header); - let conflicts = queue.resolve_conflict_header_dep(&headers); + let conflicts = pool.resolve_conflict_header_dep(&headers); assert_eq!( conflicts.into_iter().map(|i| i.0).collect::>(), HashSet::from_iter(vec![entry, entry1]) @@ -131,29 +143,29 @@ fn test_resolve_conflict_header_dep() { #[test] fn test_remove_entry() { - let mut queue = PendingQueue::new(); + let mut pool = PoolMap::new(1000); let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); let header: Byte32 = h256!("0x1").pack(); let tx2 = build_tx_with_header_dep(vec![(&h256!("0x2").pack(), 1)], vec![header], 1); let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(queue.add_entry(entry1.clone())); - assert!(queue.add_entry(entry2.clone())); + assert!(pool.add_entry(entry1.clone(), Status::Pending).unwrap()); + assert!(pool.add_entry(entry2.clone(), Status::Pending).unwrap()); - let removed = queue.remove_entry(&tx1.proposal_short_id()); + let removed = pool.remove_entry(&tx1.proposal_short_id()); assert_eq!(removed, Some(entry1)); - let removed = queue.remove_entry(&tx2.proposal_short_id()); + let removed = pool.remove_entry(&tx2.proposal_short_id()); assert_eq!(removed, Some(entry2)); - assert!(queue.inner.is_empty()); - assert!(queue.deps.is_empty()); - assert!(queue.inputs.is_empty()); - assert!(queue.header_deps.is_empty()); + assert!(pool.entries.is_empty()); + assert!(pool.edges.deps.is_empty()); + assert!(pool.edges.inputs.is_empty()); + assert!(pool.edges.header_deps.is_empty()); } #[test] fn test_remove_entries_by_filter() { - let mut queue = PendingQueue::new(); + let mut pool = PoolMap::new(1000); let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); let tx2 = build_tx( vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], @@ -167,20 +179,22 @@ fn test_remove_entries_by_filter() { let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); let entry3 = TxEntry::dummy_resolve(tx3.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(queue.add_entry(entry1)); - assert!(queue.add_entry(entry2)); - assert!(queue.add_entry(entry3)); + assert!(pool.add_entry(entry1, Status::Pending).unwrap()); + assert!(pool.add_entry(entry2, Status::Pending).unwrap()); + assert!(pool.add_entry(entry3, Status::Pending).unwrap()); - queue.remove_entries_by_filter(|id, _tx_entry| id == &tx1.proposal_short_id()); + pool.remove_entries_by_filter(&Status::Pending, |id, _tx_entry| { + id == &tx1.proposal_short_id() + }); - assert!(!queue.contains_key(&tx1.proposal_short_id())); - assert!(queue.contains_key(&tx2.proposal_short_id())); - assert!(queue.contains_key(&tx3.proposal_short_id())); + assert!(!pool.contains_key(&tx1.proposal_short_id())); + assert!(pool.contains_key(&tx2.proposal_short_id())); + assert!(pool.contains_key(&tx3.proposal_short_id())); } #[test] fn test_fill_proposals() { - let mut queue = PendingQueue::new(); + let mut pool = PoolMap::new(1000); let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); let tx2 = build_tx( vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], @@ -194,36 +208,54 @@ fn test_fill_proposals() { let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); let entry3 = TxEntry::dummy_resolve(tx3.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(queue.add_entry(entry1)); - assert!(queue.add_entry(entry2)); - assert!(queue.add_entry(entry3)); + assert!(pool.add_entry(entry1, Status::Pending).unwrap()); + assert!(pool.add_entry(entry2, Status::Pending).unwrap()); + assert!(pool.add_entry(entry3, Status::Pending).unwrap()); - assert_eq!(queue.inputs_len(), 5); - assert_eq!(queue.deps_len(), 1); - assert_eq!(queue.outputs_len(), 7); + assert_eq!(pool.inputs_len(), 5); + assert_eq!(pool.deps_len(), 1); + assert_eq!(pool.outputs_len(), 7); let id1 = tx1.proposal_short_id(); let id2 = tx2.proposal_short_id(); let id3 = tx3.proposal_short_id(); let mut ret = HashSet::new(); - queue.fill_proposals(10, &HashSet::new(), &mut ret); + pool.fill_proposals(10, &HashSet::new(), &mut ret, &Status::Pending); assert_eq!( ret, HashSet::from_iter(vec![id1.clone(), id2.clone(), id3.clone()]) ); let mut ret = HashSet::new(); - queue.fill_proposals(1, &HashSet::new(), &mut ret); + pool.fill_proposals(1, &HashSet::new(), &mut ret, &Status::Pending); assert_eq!(ret, HashSet::from_iter(vec![id1.clone()])); let mut ret = HashSet::new(); - queue.fill_proposals(2, &HashSet::new(), &mut ret); + pool.fill_proposals(2, &HashSet::new(), &mut ret, &Status::Pending); assert_eq!(ret, HashSet::from_iter(vec![id1.clone(), id2.clone()])); let mut ret = HashSet::new(); let mut exclusion = HashSet::new(); exclusion.insert(id2); - queue.fill_proposals(2, &exclusion, &mut ret); + pool.fill_proposals(2, &exclusion, &mut ret, &Status::Pending); assert_eq!(ret, HashSet::from_iter(vec![id1, id3])); } + +#[test] +fn test_edges() { + let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); + let tx2 = build_tx(vec![(&h256!("0x1").pack(), 1)], 1); + + let short_id1 = tx1.proposal_short_id(); + let short_id2 = tx2.proposal_short_id(); + let mut edges = Edges::default(); + let outpoint = OutPoint::default(); + edges.insert_deps(outpoint.clone(), short_id1.clone()); + edges.insert_deps(outpoint.clone(), short_id2.clone()); + assert!(edges.deps.contains_key(&outpoint)); + edges.delete_txid_by_dep(outpoint.clone(), &short_id1); + assert!(!edges.deps.is_empty()); + edges.delete_txid_by_dep(outpoint, &short_id2); + assert!(edges.deps.is_empty()); +} diff --git a/tx-pool/src/component/tests/pool_map.rs b/tx-pool/src/component/tests/pool_map.rs deleted file mode 100644 index 7fcbd9e1a6..0000000000 --- a/tx-pool/src/component/tests/pool_map.rs +++ /dev/null @@ -1,236 +0,0 @@ -use crate::component::tests::util::{ - build_tx, build_tx_with_dep, build_tx_with_header_dep, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE, -}; -use crate::component::{ - entry::TxEntry, - pool_map::{PoolEntry, PoolMap, Status}, -}; -use ckb_types::{h256, packed::Byte32, prelude::*}; -use std::collections::HashSet; - -#[test] -fn test_basic() { - let mut pool = PoolMap::new(100); - assert_eq!(pool.size(), 0); - let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&Byte32::zero(), 2)], 1); - let tx2 = build_tx( - vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 2)], - 3, - ); - let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(pool.add_entry(entry1.clone(), Status::Pending)); - assert!(pool.add_entry(entry2, Status::Pending)); - assert!(pool.size() == 2); - assert!(pool.contains_key(&tx1.proposal_short_id())); - assert!(pool.contains_key(&tx2.proposal_short_id())); - - assert_eq!(pool.inputs_len(), 4); - assert_eq!(pool.outputs_len(), 4); - - assert_eq!(pool.entries.get_by_id(&tx1.proposal_short_id()).unwrap().inner, entry1); - assert_eq!(pool.get_tx(&tx2.proposal_short_id()).unwrap(), &tx2); - - let txs = pool.drain(); - assert!(pool.entries.is_empty()); - assert!(pool.deps.is_empty()); - assert!(pool.inputs.is_empty()); - assert!(pool.header_deps.is_empty()); - assert!(pool.outputs.is_empty()); - assert_eq!(txs, vec![tx1, tx2]); -} - -#[test] -fn test_resolve_conflict() { - let mut pool = PoolMap::new(100); - let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); - let tx2 = build_tx( - vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], - 3, - ); - let tx3 = build_tx_with_dep( - vec![(&h256!("0x4").pack(), 1)], - vec![(&h256!("0x5").pack(), 1)], - 3, - ); - let tx4 = build_tx( - vec![(&h256!("0x2").pack(), 1), (&h256!("0x1").pack(), 1)], - 3, - ); - let tx5 = build_tx(vec![(&h256!("0x5").pack(), 1)], 3); - - let entry1 = TxEntry::dummy_resolve(tx1, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - let entry2 = TxEntry::dummy_resolve(tx2, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - let entry3 = TxEntry::dummy_resolve(tx3, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(pool.add_entry(entry1.clone(), Status::Pending)); - assert!(pool.add_entry(entry2.clone(), Status::Pending)); - assert!(pool.add_entry(entry3.clone(), Status::Pending)); - - let conflicts = pool.resolve_conflict(&tx4); - assert_eq!( - conflicts.into_iter().map(|i| i.0).collect::>(), - HashSet::from_iter(vec![entry1, entry2]) - ); - - let conflicts = pool.resolve_conflict(&tx5); - assert_eq!( - conflicts.into_iter().map(|i| i.0).collect::>(), - HashSet::from_iter(vec![entry3]) - ); -} - -#[test] -fn test_resolve_conflict_descendants() { - let mut pool = PoolMap::new(1000); - let tx1 = build_tx(vec![(&Byte32::zero(), 1)], 1); - let tx3 = build_tx(vec![(&tx1.hash(), 0)], 2); - let tx4 = build_tx(vec![(&tx3.hash(), 0)], 1); - - let tx2 = build_tx(vec![(&tx1.hash(), 0)], 1); - - let entry1 = TxEntry::dummy_resolve(tx1, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - let entry3 = TxEntry::dummy_resolve(tx3, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - let entry4 = TxEntry::dummy_resolve(tx4, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(pool.add_entry(entry1, Status::Pending)); - assert!(pool.add_entry(entry3.clone(), Status::Pending)); - assert!(pool.add_entry(entry4.clone(), Status::Pending)); - - let conflicts = pool.resolve_conflict(&tx2); - assert_eq!( - conflicts.into_iter().map(|i| i.0).collect::>(), - HashSet::from_iter(vec![entry3, entry4]) - ); -} - -#[test] -fn test_resolve_conflict_header_dep() { - let mut pool = PoolMap::new(1000); - - let header: Byte32 = h256!("0x1").pack(); - let tx = build_tx_with_header_dep( - vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], - vec![header.clone()], - 1, - ); - let tx1 = build_tx(vec![(&tx.hash(), 0)], 1); - - let entry = TxEntry::dummy_resolve(tx, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - let entry1 = TxEntry::dummy_resolve(tx1, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(pool.add_entry(entry.clone(), Status::Pending)); - assert!(pool.add_entry(entry1.clone(), Status::Pending)); - - assert_eq!(pool.inputs_len(), 3); - assert_eq!(pool.header_deps_len(), 1); - assert_eq!(pool.outputs_len(), 2); - - let mut headers = HashSet::new(); - headers.insert(header); - - let conflicts = pool.resolve_conflict_header_dep(&headers); - assert_eq!( - conflicts.into_iter().map(|i| i.0).collect::>(), - HashSet::from_iter(vec![entry, entry1]) - ); -} - - -#[test] -fn test_remove_entry() { - let mut pool = PoolMap::new(1000); - let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); - let header: Byte32 = h256!("0x1").pack(); - let tx2 = build_tx_with_header_dep(vec![(&h256!("0x2").pack(), 1)], vec![header], 1); - - let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(pool.add_entry(entry1.clone(), Status::Pending)); - assert!(pool.add_entry(entry2.clone(), Status::Pending)); - - let removed = pool.remove_entry(&tx1.proposal_short_id()); - assert_eq!(removed, Some(entry1)); - let removed = pool.remove_entry(&tx2.proposal_short_id()); - assert_eq!(removed, Some(entry2)); - assert!(pool.entries.is_empty()); - assert!(pool.deps.is_empty()); - assert!(pool.inputs.is_empty()); - assert!(pool.header_deps.is_empty()); -} - - -#[test] -fn test_remove_entries_by_filter() { - let mut pool = PoolMap::new(1000); - let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); - let tx2 = build_tx( - vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], - 3, - ); - let tx3 = build_tx_with_dep( - vec![(&h256!("0x4").pack(), 1)], - vec![(&h256!("0x5").pack(), 1)], - 3, - ); - let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - let entry3 = TxEntry::dummy_resolve(tx3.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(pool.add_entry(entry1, Status::Pending)); - assert!(pool.add_entry(entry2, Status::Pending)); - assert!(pool.add_entry(entry3, Status::Pending)); - - pool.remove_entries_by_filter(|id, _tx_entry| id == &tx1.proposal_short_id()); - - assert!(!pool.contains_key(&tx1.proposal_short_id())); - assert!(pool.contains_key(&tx2.proposal_short_id())); - assert!(pool.contains_key(&tx3.proposal_short_id())); -} - - -#[test] -fn test_fill_proposals() { - let mut pool = PoolMap::new(1000); - let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); - let tx2 = build_tx( - vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], - 3, - ); - let tx3 = build_tx_with_dep( - vec![(&h256!("0x4").pack(), 1)], - vec![(&h256!("0x5").pack(), 1)], - 3, - ); - let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - let entry3 = TxEntry::dummy_resolve(tx3.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(pool.add_entry(entry1, Status::Pending)); - assert!(pool.add_entry(entry2, Status::Pending)); - assert!(pool.add_entry(entry3, Status::Pending)); - - assert_eq!(pool.inputs_len(), 5); - assert_eq!(pool.deps_len(), 1); - assert_eq!(pool.outputs_len(), 7); - - let id1 = tx1.proposal_short_id(); - let id2 = tx2.proposal_short_id(); - let id3 = tx3.proposal_short_id(); - - let mut ret = HashSet::new(); - pool.fill_proposals(10, &HashSet::new(), &mut ret, &Status::Pending); - assert_eq!( - ret, - HashSet::from_iter(vec![id1.clone(), id2.clone(), id3.clone()]) - ); - - let mut ret = HashSet::new(); - pool.fill_proposals(1, &HashSet::new(), &mut ret, &Status::Pending); - assert_eq!(ret, HashSet::from_iter(vec![id1.clone()])); - - let mut ret = HashSet::new(); - pool.fill_proposals(2, &HashSet::new(), &mut ret, &Status::Pending); - assert_eq!(ret, HashSet::from_iter(vec![id1.clone(), id2.clone()])); - - let mut ret = HashSet::new(); - let mut exclusion = HashSet::new(); - exclusion.insert(id2); - pool.fill_proposals(2, &exclusion, &mut ret, &Status::Pending); - assert_eq!(ret, HashSet::from_iter(vec![id1, id3])); -} diff --git a/tx-pool/src/component/tests/proposed.rs b/tx-pool/src/component/tests/proposed.rs index 7c784b36b4..ff5fa67866 100644 --- a/tx-pool/src/component/tests/proposed.rs +++ b/tx-pool/src/component/tests/proposed.rs @@ -2,7 +2,7 @@ use crate::component::tests::util::{ build_tx, build_tx_with_dep, build_tx_with_header_dep, DEFAULT_MAX_ANCESTORS_COUNT, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE, }; -use crate::component::{entry::TxEntry, proposed::ProposedPool}; +use crate::component::{entry::TxEntry, pool_map::PoolMap}; use ckb_types::{ bytes::Bytes, core::{ @@ -49,16 +49,16 @@ fn test_add_entry() { let tx1_hash = tx1.hash(); let tx2 = build_tx(vec![(&tx1_hash, 0)], 1); - let mut pool = ProposedPool::new(DEFAULT_MAX_ANCESTORS_COUNT); + let mut pool = PoolMap::new(DEFAULT_MAX_ANCESTORS_COUNT); - pool.add_entry(TxEntry::new( + pool.add_proposed(TxEntry::new( dummy_resolve(tx1.clone(), |_| None), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE, )) .unwrap(); - pool.add_entry(TxEntry::new( + pool.add_proposed(TxEntry::new( dummy_resolve(tx2, |_| None), MOCK_CYCLES, MOCK_FEE, @@ -91,21 +91,20 @@ fn test_add_entry_from_detached() { let id2 = entry2.proposal_short_id(); let id3 = entry3.proposal_short_id(); - let mut pool = ProposedPool::new(DEFAULT_MAX_ANCESTORS_COUNT); - pool.add_entry(entry1.clone()).unwrap(); - pool.add_entry(entry2.clone()).unwrap(); - pool.add_entry(entry3).unwrap(); + let mut pool = PoolMap::new(DEFAULT_MAX_ANCESTORS_COUNT); + pool.add_proposed(entry1.clone()).unwrap(); + pool.add_proposed(entry2.clone()).unwrap(); + pool.add_proposed(entry3).unwrap(); assert_eq!(pool.size(), 3); assert_eq!(pool.edges.outputs_len(), 3); assert_eq!(pool.edges.inputs_len(), 4); - assert_eq!(pool.inner().sorted_index.len(), 3); + assert_eq!(pool.size(), 3); let expected = vec![(id1.clone(), 1), (id2.clone(), 2), (id3.clone(), 3)]; let got = pool - .inner() - .sorted_index + .entries .iter() .map(|(_, key)| (key.id.clone(), key.score.ancestors_size)) .collect::>(); @@ -114,81 +113,78 @@ fn test_add_entry_from_detached() { // check link { - assert!(pool.inner().links.get_parents(&id1).unwrap().is_empty()); + assert!(pool.links.get_parents(&id1).unwrap().is_empty()); assert_eq!( - pool.inner().links.get_children(&id1).unwrap(), + pool.links.get_children(&id1).unwrap(), &HashSet::from_iter(vec![id2.clone()].into_iter()) ); assert_eq!( - pool.inner().links.get_parents(&id2).unwrap(), + pool.links.get_parents(&id2).unwrap(), &HashSet::from_iter(vec![id1.clone()].into_iter()) ); assert_eq!( - pool.inner() - .links + pool.links .get_children(&entry2.proposal_short_id()) .unwrap(), &HashSet::from_iter(vec![id3.clone()].into_iter()) ); assert_eq!( - pool.inner().links.get_parents(&id3).unwrap(), + pool.links.get_parents(&id3).unwrap(), &HashSet::from_iter(vec![id2.clone()].into_iter()) ); - assert!(pool.inner().links.get_children(&id3).unwrap().is_empty()); + assert!(pool.links.get_children(&id3).unwrap().is_empty()); } pool.remove_committed_tx(&tx1); assert_eq!(pool.edges.outputs_len(), 2); assert_eq!(pool.edges.inputs_len(), 2); - assert_eq!(pool.inner().sorted_index.len(), 2); + assert_eq!(pool.entries.len(), 2); let left = vec![(id2.clone(), 1), (id3.clone(), 2)]; let got = pool - .inner() - .sorted_index + .entries .iter() .map(|(_, key)| (key.id.clone(), key.score.ancestors_size)) .collect::>(); assert_eq!(left, got); assert!(pool - .inner() .links .get_parents(&entry2.proposal_short_id()) .unwrap() .is_empty()); - assert!(pool.add_entry(entry1).unwrap()); - for (idx, (_, key)) in pool.inner().sorted_index.iter().enumerate() { - assert_eq!(key.id, expected[idx].0); - assert_eq!(key.score.ancestors_size, expected[idx].1); + assert!(pool.add_proposed(entry1).unwrap()); + + for (idx, (_, entry)) in pool.entries.iter().enumerate() { + assert_eq!(entry.id, expected[idx].0); + assert_eq!(entry.score.ancestors_size, expected[idx].1); } { - assert!(pool.inner().links.get_parents(&id1).unwrap().is_empty()); + assert!(pool.links.get_parents(&id1).unwrap().is_empty()); assert_eq!( - pool.inner().links.get_children(&id1).unwrap(), + pool.links.get_children(&id1).unwrap(), &HashSet::from_iter(vec![id2.clone()].into_iter()) ); assert_eq!( - pool.inner().links.get_parents(&id2).unwrap(), + pool.links.get_parents(&id2).unwrap(), &HashSet::from_iter(vec![id1].into_iter()) ); assert_eq!( - pool.inner() - .links + pool.links .get_children(&entry2.proposal_short_id()) .unwrap(), &HashSet::from_iter(vec![id3.clone()].into_iter()) ); assert_eq!( - pool.inner().links.get_parents(&id3).unwrap(), + pool.links.get_parents(&id3).unwrap(), &HashSet::from_iter(vec![id2].into_iter()) ); - assert!(pool.inner().links.get_children(&id3).unwrap().is_empty()); + assert!(pool.links.get_children(&id3).unwrap().is_empty()); } } @@ -200,16 +196,16 @@ fn test_add_roots() { 3, ); - let mut pool = ProposedPool::new(DEFAULT_MAX_ANCESTORS_COUNT); + let mut pool = PoolMap::new(DEFAULT_MAX_ANCESTORS_COUNT); - pool.add_entry(TxEntry::new( + pool.add_proposed(TxEntry::new( dummy_resolve(tx1.clone(), |_| None), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE, )) .unwrap(); - pool.add_entry(TxEntry::new( + pool.add_proposed(TxEntry::new( dummy_resolve(tx2, |_| None), MOCK_CYCLES, MOCK_FEE, @@ -240,37 +236,37 @@ fn test_add_no_roots() { let tx3_hash = tx3.hash(); let tx5 = build_tx(vec![(&tx1_hash, 2), (&tx3_hash, 0)], 2); - let mut pool = ProposedPool::new(DEFAULT_MAX_ANCESTORS_COUNT); + let mut pool = PoolMap::new(DEFAULT_MAX_ANCESTORS_COUNT); - pool.add_entry(TxEntry::new( + pool.add_proposed(TxEntry::new( dummy_resolve(tx1.clone(), |_| None), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE, )) .unwrap(); - pool.add_entry(TxEntry::new( + pool.add_proposed(TxEntry::new( dummy_resolve(tx2, |_| None), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE, )) .unwrap(); - pool.add_entry(TxEntry::new( + pool.add_proposed(TxEntry::new( dummy_resolve(tx3, |_| None), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE, )) .unwrap(); - pool.add_entry(TxEntry::new( + pool.add_proposed(TxEntry::new( dummy_resolve(tx4, |_| None), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE, )) .unwrap(); - pool.add_entry(TxEntry::new( + pool.add_proposed(TxEntry::new( dummy_resolve(tx5, |_| None), MOCK_CYCLES, MOCK_FEE, @@ -293,26 +289,26 @@ fn test_sorted_by_tx_fee_rate() { let tx2 = build_tx(vec![(&Byte32::zero(), 2)], 1); let tx3 = build_tx(vec![(&Byte32::zero(), 3)], 1); - let mut pool = ProposedPool::new(DEFAULT_MAX_ANCESTORS_COUNT); + let mut pool = PoolMap::new(DEFAULT_MAX_ANCESTORS_COUNT); let cycles = 5_000_000; let size = 200; - pool.add_entry(TxEntry::dummy_resolve( + pool.add_proposed(TxEntry::dummy_resolve( tx1.clone(), cycles, Capacity::shannons(100), size, )) .unwrap(); - pool.add_entry(TxEntry::dummy_resolve( + pool.add_proposed(TxEntry::dummy_resolve( tx2.clone(), cycles, Capacity::shannons(300), size, )) .unwrap(); - pool.add_entry(TxEntry::dummy_resolve( + pool.add_proposed(TxEntry::dummy_resolve( tx3.clone(), cycles, Capacity::shannons(200), @@ -337,33 +333,33 @@ fn test_sorted_by_ancestors_score() { let tx3 = build_tx(vec![(&tx1_hash, 2)], 1); let tx4 = build_tx(vec![(&tx2_hash, 1)], 1); - let mut pool = ProposedPool::new(DEFAULT_MAX_ANCESTORS_COUNT); + let mut pool = PoolMap::new(DEFAULT_MAX_ANCESTORS_COUNT); let cycles = 5_000_000; let size = 200; - pool.add_entry(TxEntry::dummy_resolve( + pool.add_proposed(TxEntry::dummy_resolve( tx1.clone(), cycles, Capacity::shannons(100), size, )) .unwrap(); - pool.add_entry(TxEntry::dummy_resolve( + pool.add_proposed(TxEntry::dummy_resolve( tx2.clone(), cycles, Capacity::shannons(300), size, )) .unwrap(); - pool.add_entry(TxEntry::dummy_resolve( + pool.add_proposed(TxEntry::dummy_resolve( tx3.clone(), cycles, Capacity::shannons(200), size, )) .unwrap(); - pool.add_entry(TxEntry::dummy_resolve( + pool.add_proposed(TxEntry::dummy_resolve( tx4.clone(), cycles, Capacity::shannons(400), @@ -395,7 +391,7 @@ fn test_sorted_by_ancestors_score_competitive() { let tx2_3_hash = tx2_3.hash(); let tx2_4 = build_tx(vec![(&tx2_3_hash, 0)], 1); - let mut pool = ProposedPool::new(DEFAULT_MAX_ANCESTORS_COUNT); + let mut pool = PoolMap::new(DEFAULT_MAX_ANCESTORS_COUNT); // Choose 5_000_839, so the weight is 853.0001094046, which will not lead to carry when // calculating the weight for a package. @@ -403,7 +399,7 @@ fn test_sorted_by_ancestors_score_competitive() { let size = 200; for &tx in &[&tx1, &tx2, &tx3, &tx2_1, &tx2_2, &tx2_3, &tx2_4] { - pool.add_entry(TxEntry::dummy_resolve( + pool.add_proposed(TxEntry::dummy_resolve( tx.clone(), cycles, Capacity::shannons(200), @@ -430,33 +426,33 @@ fn test_get_ancestors() { let tx3 = build_tx(vec![(&tx1_hash, 1)], 1); let tx4 = build_tx(vec![(&tx2_hash, 0)], 1); - let mut pool = ProposedPool::new(DEFAULT_MAX_ANCESTORS_COUNT); + let mut pool = PoolMap::new(DEFAULT_MAX_ANCESTORS_COUNT); let cycles = 5_000_000; let size = 200; - pool.add_entry(TxEntry::dummy_resolve( + pool.add_proposed(TxEntry::dummy_resolve( tx1.clone(), cycles, Capacity::shannons(100), size, )) .unwrap(); - pool.add_entry(TxEntry::dummy_resolve( + pool.add_proposed(TxEntry::dummy_resolve( tx2.clone(), cycles, Capacity::shannons(300), size, )) .unwrap(); - pool.add_entry(TxEntry::dummy_resolve( + pool.add_proposed(TxEntry::dummy_resolve( tx3.clone(), cycles, Capacity::shannons(200), size, )) .unwrap(); - pool.add_entry(TxEntry::dummy_resolve( + pool.add_proposed(TxEntry::dummy_resolve( tx4.clone(), cycles, Capacity::shannons(400), @@ -561,9 +557,9 @@ fn test_dep_group() { } }; - let mut pool = ProposedPool::new(DEFAULT_MAX_ANCESTORS_COUNT); + let mut pool = PoolMap::new(DEFAULT_MAX_ANCESTORS_COUNT); for tx in &[&tx1, &tx2, &tx3] { - pool.add_entry(TxEntry::new( + pool.add_proposed(TxEntry::new( dummy_resolve((*tx).clone(), get_cell_data), MOCK_CYCLES, MOCK_FEE, @@ -572,7 +568,7 @@ fn test_dep_group() { .unwrap(); } - let get_deps_len = |pool: &ProposedPool, out_point: &OutPoint| -> usize { + let get_deps_len = |pool: &PoolMap, out_point: &OutPoint| -> usize { pool.edges .deps .get(out_point) @@ -591,7 +587,7 @@ fn test_dep_group() { #[test] fn test_resolve_conflict_header_dep() { - let mut pool = ProposedPool::new(DEFAULT_MAX_ANCESTORS_COUNT); + let mut pool = PoolMap::new(DEFAULT_MAX_ANCESTORS_COUNT); let header: Byte32 = h256!("0x1").pack(); let tx = build_tx_with_header_dep( @@ -602,7 +598,7 @@ fn test_resolve_conflict_header_dep() { let entry = TxEntry::dummy_resolve(tx, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(pool.add_entry(entry.clone()).is_ok()); + assert!(pool.add_proposed(entry.clone()).is_ok()); let mut headers = HashSet::new(); headers.insert(header); @@ -633,10 +629,10 @@ fn test_disordered_remove_committed_tx() { MOCK_SIZE, ); - let mut pool = ProposedPool::new(DEFAULT_MAX_ANCESTORS_COUNT); + let mut pool = PoolMap::new(DEFAULT_MAX_ANCESTORS_COUNT); - pool.add_entry(entry1).unwrap(); - pool.add_entry(entry2).unwrap(); + pool.add_proposed(entry1).unwrap(); + pool.add_proposed(entry2).unwrap(); assert_eq!(pool.edges.outputs_len(), 2); assert_eq!(pool.edges.inputs_len(), 2); @@ -650,7 +646,7 @@ fn test_disordered_remove_committed_tx() { #[test] fn test_max_ancestors() { - let mut pool = ProposedPool::new(1); + let mut pool = PoolMap::new(1); let tx1 = build_tx(vec![(&Byte32::zero(), 0)], 1); let tx1_id = tx1.proposal_short_id(); let tx1_hash = tx1.hash(); @@ -659,15 +655,15 @@ fn test_max_ancestors() { let entry1 = TxEntry::dummy_resolve(tx1, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); let entry2 = TxEntry::dummy_resolve(tx2, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(pool.add_entry(entry1).is_ok()); - assert!(pool.add_entry(entry2).is_err()); + assert!(pool.add_proposed(entry1).is_ok()); + assert!(pool.add_proposed(entry2).is_err()); assert_eq!( - pool.inner() + pool.links .get_children(&tx1_id) .map(|children| children.is_empty()), Some(true) ); - assert!(pool.inner().calc_descendants(&tx1_id).is_empty()); + assert!(pool.calc_descendants(&tx1_id).is_empty()); assert_eq!(pool.edges.inputs_len(), 1); assert_eq!(pool.edges.outputs_len(), 1); @@ -675,7 +671,7 @@ fn test_max_ancestors() { #[test] fn test_max_ancestors_with_dep() { - let mut pool = ProposedPool::new(1); + let mut pool = PoolMap::new(1); let tx1 = build_tx_with_dep( vec![(&Byte32::zero(), 0)], vec![(&h256!("0x1").pack(), 0)], @@ -687,14 +683,14 @@ fn test_max_ancestors_with_dep() { let entry1 = TxEntry::dummy_resolve(tx1, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); let entry2 = TxEntry::dummy_resolve(tx2, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(pool.add_entry(entry1).is_ok()); - assert!(pool.add_entry(entry2).is_err()); - assert_eq!(pool.inner().deps().len(), 1); + assert!(pool.add_proposed(entry1).is_ok()); + assert!(pool.add_proposed(entry2).is_err()); + assert_eq!(pool.edges.deps.len(), 1); assert!(pool - .inner() - .deps() + .edges + .deps .contains_key(&OutPoint::new(h256!("0x1").pack(), 0))); - assert!(pool.inner().calc_descendants(&tx1_id).is_empty()); + assert!(pool.calc_descendants(&tx1_id).is_empty()); assert_eq!(pool.edges.inputs_len(), 1); assert_eq!(pool.edges.outputs_len(), 1); diff --git a/tx-pool/src/component/tests/container.rs b/tx-pool/src/component/tests/score_key.rs similarity index 94% rename from tx-pool/src/component/tests/container.rs rename to tx-pool/src/component/tests/score_key.rs index 5c06e9601f..22da657f84 100644 --- a/tx-pool/src/component/tests/container.rs +++ b/tx-pool/src/component/tests/score_key.rs @@ -6,10 +6,7 @@ use ckb_types::{ }; use std::mem::size_of; -use crate::component::{ - container::{AncestorsScoreSortKey, SortedTxMap}, - entry::TxEntry, -}; +use crate::component::{entry::TxEntry, pool_map::PoolMap, score_key::AncestorsScoreSortKey}; const DEFAULT_MAX_ANCESTORS_COUNT: usize = 125; @@ -98,7 +95,7 @@ fn test_ancestors_sorted_key_order() { #[test] fn test_remove_entry() { - let mut map = SortedTxMap::new(DEFAULT_MAX_ANCESTORS_COUNT); + let mut map = PoolMap::new(DEFAULT_MAX_ANCESTORS_COUNT); let tx1 = TxEntry::dummy_resolve( TransactionBuilder::default().build(), 100, @@ -144,9 +141,9 @@ fn test_remove_entry() { let tx1_id = tx1.proposal_short_id(); let tx2_id = tx2.proposal_short_id(); let tx3_id = tx3.proposal_short_id(); - map.add_entry(tx1).unwrap(); - map.add_entry(tx2).unwrap(); - map.add_entry(tx3).unwrap(); + map.add_proposed(tx1).unwrap(); + map.add_proposed(tx2).unwrap(); + map.add_proposed(tx3).unwrap(); let descendants_set = map.calc_descendants(&tx1_id); assert!(descendants_set.contains(&tx2_id)); assert!(descendants_set.contains(&tx3_id)); @@ -171,7 +168,7 @@ fn test_remove_entry() { #[test] fn test_remove_entry_and_descendants() { - let mut map = SortedTxMap::new(DEFAULT_MAX_ANCESTORS_COUNT); + let mut map = PoolMap::new(DEFAULT_MAX_ANCESTORS_COUNT); let tx1 = TxEntry::dummy_resolve( TransactionBuilder::default().build(), 100, @@ -217,9 +214,9 @@ fn test_remove_entry_and_descendants() { let tx1_id = tx1.proposal_short_id(); let tx2_id = tx2.proposal_short_id(); let tx3_id = tx3.proposal_short_id(); - map.add_entry(tx1).unwrap(); - map.add_entry(tx2).unwrap(); - map.add_entry(tx3).unwrap(); + map.add_proposed(tx1).unwrap(); + map.add_proposed(tx2).unwrap(); + map.add_proposed(tx3).unwrap(); let descendants_set = map.calc_descendants(&tx1_id); assert!(descendants_set.contains(&tx2_id)); assert!(descendants_set.contains(&tx3_id)); diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index c9dcc3b7fd..c01ad22b64 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -3,23 +3,23 @@ extern crate rustc_hash; extern crate slab; use super::component::{commit_txs_scanner::CommitTxsScanner, TxEntry}; use crate::callback::Callbacks; -use crate::component::pending::PendingQueue; -use crate::component::proposed::ProposedPool; +use crate::component::pool_map::{PoolEntry, PoolMap, Status}; use crate::component::recent_reject::RecentReject; use crate::error::Reject; -use crate::component::pool_map::{PoolMap, Status}; use crate::util::verify_rtx; use ckb_app_config::TxPoolConfig; -use ckb_logger::{debug, error, trace, warn}; +use ckb_logger::{debug, error, warn}; use ckb_snapshot::Snapshot; use ckb_store::ChainStore; use ckb_types::{ core::{ cell::{resolve_transaction, OverlayCellChecker, OverlayCellProvider, ResolvedTransaction}, + cell::{CellChecker, CellMetaBuilder, CellProvider, CellStatus}, tx_pool::{TxPoolEntryInfo, TxPoolIds}, Cycle, TransactionView, UncleBlockView, }, - packed::{Byte32, ProposalShortId}, + packed::{Byte32, OutPoint, ProposalShortId}, + prelude::*, }; use ckb_verification::{cache::CacheEntry, TxVerifyEnv}; use lru::LruCache; @@ -28,44 +28,9 @@ use std::sync::Arc; const COMMITTED_HASH_CACHE_SIZE: usize = 100_000; -// limit the size of the pool by sorting out tx based on EvictKey. -macro_rules! evict_for_trim_size { - ($self:ident, $pool:expr, $callbacks:expr) => { - if let Some(id) = $pool - .iter() - .min_by_key(|(_id, entry)| entry.as_evict_key()) - .map(|(id, _)| id) - .cloned() - { - let removed = $pool.remove_entry_and_descendants(&id); - for entry in removed { - let tx_hash = entry.transaction().hash(); - debug!( - "removed by size limit {} timestamp({})", - tx_hash, entry.timestamp - ); - let reject = Reject::Full(format!( - "the fee_rate for this transaction is: {}", - entry.fee_rate() - )); - $callbacks.call_reject($self, &entry, reject); - } - } - }; -} - -type ConflictEntry = (TxEntry, Reject); - /// Tx-pool implementation pub struct TxPool { pub(crate) config: TxPoolConfig, - /// The short id that has not been proposed - pub(crate) pending: PendingQueue, - /// The proposal gap - pub(crate) gap: PendingQueue, - /// Tx pool that finely for commit - pub(crate) proposed: ProposedPool, - pub(crate) pool_map: PoolMap, /// cache for committed transactions hash pub(crate) committed_txs_hash_cache: LruCache, @@ -87,9 +52,6 @@ impl TxPool { let recent_reject = Self::build_recent_reject(&config); let expiry = config.expiry_hours as u64 * 60 * 60 * 1000; TxPool { - pending: PendingQueue::new(), - gap: PendingQueue::new(), - proposed: ProposedPool::new(config.max_ancestors_count), pool_map: PoolMap::new(config.max_ancestors_count), committed_txs_hash_cache: LruCache::new(COMMITTED_HASH_CACHE_SIZE), total_tx_size: 0, @@ -102,18 +64,22 @@ impl TxPool { } /// Tx-pool owned snapshot, it may not consistent with chain cause tx-pool update snapshot asynchronously - pub fn snapshot(&self) -> &Snapshot { + pub(crate) fn snapshot(&self) -> &Snapshot { &self.snapshot } /// Makes a clone of the `Arc` - pub fn cloned_snapshot(&self) -> Arc { + pub(crate) fn cloned_snapshot(&self) -> Arc { Arc::clone(&self.snapshot) } - /// Whether Tx-pool reach size limit - pub fn reach_size_limit(&self, tx_size: usize) -> bool { - (self.total_tx_size + tx_size) > self.config.max_tx_pool_size + fn get_by_status(&self, status: &Status) -> Vec<&PoolEntry> { + self.pool_map.entries.get_by_status(status) + } + + /// Get tx-pool size + pub fn status_size(&self, status: &Status) -> usize { + self.get_by_status(status).len() } /// Update size and cycles statics for add tx @@ -143,123 +109,38 @@ impl TxPool { self.total_tx_cycles = total_tx_cycles; } - /// Add tx to pending pool + /// Add tx with pending status /// If did have this value present, false is returned. - pub fn add_pending(&mut self, entry: TxEntry) -> bool { - if self.gap.contains_key(&entry.proposal_short_id()) { - return false; - } - trace!("add_pending {}", entry.transaction().hash()); - self.pending.add_entry(entry) - } - - pub fn add_pending_v2(&mut self, entry: TxEntry) -> bool { + pub(crate) fn add_pending(&mut self, entry: TxEntry) -> Result { self.pool_map.add_entry(entry, Status::Pending) } - /// Add tx which proposed but still uncommittable to gap pool - pub fn add_gap(&mut self, entry: TxEntry) -> bool { - if self.proposed.contains_key(&entry.proposal_short_id()) { - return false; - } - trace!("add_gap {}", entry.transaction().hash()); - self.gap.add_entry(entry) - } - - pub fn add_gap_v2(&mut self, entry: TxEntry) -> bool { + /// Add tx which proposed but still uncommittable to gap + pub(crate) fn add_gap(&mut self, entry: TxEntry) -> Result { self.pool_map.add_entry(entry, Status::Gap) } - /// Add tx to proposed pool - pub fn add_proposed(&mut self, entry: TxEntry) -> Result { - trace!("add_proposed {}", entry.transaction().hash()); - self.proposed.add_entry(entry) - } - - pub fn add_proposed_v2(&mut self, entry: TxEntry) -> bool { + /// Add tx with proposed status + pub(crate) fn add_proposed(&mut self, entry: TxEntry) -> Result { self.pool_map.add_entry(entry, Status::Proposed) } /// Returns true if the tx-pool contains a tx with specified id. - pub fn contains_proposal_id(&self, id: &ProposalShortId) -> bool { - self.pending.contains_key(id) || self.gap.contains_key(id) || self.proposed.contains_key(id) - } - - pub fn contains_proposal_id_v2(&self, id: &ProposalShortId) -> bool { + pub(crate) fn contains_proposal_id(&self, id: &ProposalShortId) -> bool { self.pool_map.get_by_id(id).is_some() } /// Returns tx with cycles corresponding to the id. - pub fn get_tx_with_cycles(&self, id: &ProposalShortId) -> Option<(TransactionView, Cycle)> { - self.pending - .get(id) - .map(|entry| (entry.transaction().clone(), entry.cycles)) - .or_else(|| { - self.gap - .get(id) - .map(|entry| (entry.transaction().clone(), entry.cycles)) - }) - .or_else(|| { - self.proposed - .get(id) - .map(|entry| (entry.transaction().clone(), entry.cycles)) - }) - } - - pub fn get_tx_with_cycles_v2(&self, id: &ProposalShortId) -> Option<(TransactionView, Cycle)> { + pub(crate) fn get_tx_with_cycles( + &self, + id: &ProposalShortId, + ) -> Option<(TransactionView, Cycle)> { self.pool_map .get_by_id(id) .map(|entry| (entry.inner.transaction().clone(), entry.inner.cycles)) } - /// Returns tx corresponding to the id. - pub fn get_tx(&self, id: &ProposalShortId) -> Option<&TransactionView> { - self.pending - .get_tx(id) - .or_else(|| self.gap.get_tx(id)) - .or_else(|| self.proposed.get_tx(id)) - } - - pub fn get_tx_v2(&self, id: &ProposalShortId) -> Option<&TransactionView> { - self.pool_map - .get_by_id(id) - .map(|entry| entry.inner.transaction()) - } - - /// Returns tx from pending and gap corresponding to the id. RPC - pub fn get_entry_from_pending_or_gap(&self, id: &ProposalShortId) -> Option<&TxEntry> { - self.pending.get(id).or_else(|| self.gap.get(id)) - } - - pub fn get_entry_from_pending_or_gap_v2(&self, id: &ProposalShortId) -> Option<&TxEntry> { - if let Some(entry) = self.pool_map.get_by_id(id) { - match entry.status { - Status::Pending | Status::Gap => return Some(&entry.inner), - _ => return None, - } - } else { - return None; - } - } - - pub(crate) fn proposed(&self) -> &ProposedPool { - &self.proposed - } - - pub(crate) fn get_tx_from_proposed_and_others( - &self, - id: &ProposalShortId, - ) -> Option<&TransactionView> { - self.proposed - .get_tx(id) - .or_else(|| self.gap.get_tx(id)) - .or_else(|| self.pending.get_tx(id)) - } - - pub(crate) fn get_tx_from_proposed_and_others_v2( - &self, - id: &ProposalShortId, - ) -> Option<&TransactionView> { + pub(crate) fn get_tx_from_pool(&self, id: &ProposalShortId) -> Option<&TransactionView> { self.pool_map .get_by_id(id) .map(|entry| entry.inner.transaction()) @@ -273,7 +154,6 @@ impl TxPool { ) { for tx in txs { let tx_hash = tx.hash(); - debug!("try remove_committed_tx {}", tx_hash); self.remove_committed_tx(tx, callbacks); self.committed_txs_hash_cache @@ -290,77 +170,14 @@ impl TxPool { detached_headers: &HashSet, callbacks: &Callbacks, ) { - for (entry, reject) in self.proposed.resolve_conflict_header_dep(detached_headers) { - callbacks.call_reject(self, &entry, reject); - } - for (entry, reject) in self.gap.resolve_conflict_header_dep(detached_headers) { - callbacks.call_reject(self, &entry, reject); - } - for (entry, reject) in self.pending.resolve_conflict_header_dep(detached_headers) { - callbacks.call_reject(self, &entry, reject); - } - } - - pub(crate) fn resolve_conflict_header_dep_v2( - &mut self, - detached_headers: &HashSet, - callbacks: &Callbacks, - ) { - for (entry, reject) in self - .pool_map - .resolve_conflict_header_dep(detached_headers) - { + for (entry, reject) in self.pool_map.resolve_conflict_header_dep(detached_headers) { callbacks.call_reject(self, &entry, reject); } } pub(crate) fn remove_committed_tx(&mut self, tx: &TransactionView, callbacks: &Callbacks) { - let hash = tx.hash(); - let short_id = tx.proposal_short_id(); - // try remove committed tx from proposed - // proposed tx should not contain conflict, if exists just skip resolve conflict - if let Some(entry) = self.proposed.remove_committed_tx(tx) { - debug!("remove_committed_tx from proposed {}", hash); - callbacks.call_committed(self, &entry) - } else { - let conflicts = self.proposed.resolve_conflict(tx); - - for (entry, reject) in conflicts { - callbacks.call_reject(self, &entry, reject); - } - } - - // pending and gap should resolve conflict no matter exists or not - if let Some(entry) = self.gap.remove_entry(&short_id) { - debug!("remove_committed_tx from gap {}", hash); - callbacks.call_committed(self, &entry) - } - { - let conflicts = self.gap.resolve_conflict(tx); - - for (entry, reject) in conflicts { - callbacks.call_reject(self, &entry, reject); - } - } - - if let Some(entry) = self.pending.remove_entry(&short_id) { - debug!("remove_committed_tx from pending {}", hash); - callbacks.call_committed(self, &entry) - } - { - let conflicts = self.pending.resolve_conflict(tx); - - for (entry, reject) in conflicts { - callbacks.call_reject(self, &entry, reject); - } - } - } - - pub(crate) fn remove_committed_tx_v2(&mut self, tx: &TransactionView, callbacks: &Callbacks) { - let hash = tx.hash(); let short_id = tx.proposal_short_id(); if let Some(entry) = self.pool_map.remove_entry(&short_id) { - debug!("remove_committed_tx from gap {}", hash); callbacks.call_committed(self, &entry) } { @@ -373,37 +190,6 @@ impl TxPool { // Expire all transaction (and their dependencies) in the pool. pub(crate) fn remove_expired(&mut self, callbacks: &Callbacks) { - let now_ms = ckb_systemtime::unix_time_as_millis(); - let expired = - |_id: &ProposalShortId, tx_entry: &TxEntry| self.expiry + tx_entry.timestamp < now_ms; - let mut removed = self.pending.remove_entries_by_filter(expired); - removed.extend(self.gap.remove_entries_by_filter(expired)); - let removed_proposed_ids: Vec<_> = self - .proposed - .iter() - .filter_map(|(id, tx_entry)| { - if self.expiry + tx_entry.timestamp < now_ms { - Some(id) - } else { - None - } - }) - .cloned() - .collect(); - for id in removed_proposed_ids { - removed.extend(self.proposed.remove_entry_and_descendants(&id)) - } - - for entry in removed { - let tx_hash = entry.transaction().hash(); - debug!("remove_expired {} timestamp({})", tx_hash, entry.timestamp); - let reject = Reject::Expiry(entry.timestamp); - callbacks.call_reject(self, &entry, reject); - } - } - - // Expire all transaction (and their dependencies) in the pool. - pub(crate) fn remove_expired_v2(&mut self, callbacks: &Callbacks) { let now_ms = ckb_systemtime::unix_time_as_millis(); let removed: Vec<_> = self .pool_map @@ -414,8 +200,6 @@ impl TxPool { for entry in removed { self.pool_map.remove_entry(&entry.proposal_short_id()); - let tx_hash = entry.transaction().hash(); - debug!("remove_expired {} timestamp({})", tx_hash, entry.timestamp); let reject = Reject::Expiry(entry.timestamp); callbacks.call_reject(self, &entry, reject); } @@ -423,18 +207,6 @@ impl TxPool { // Remove transactions from the pool until total size < size_limit. pub(crate) fn limit_size(&mut self, callbacks: &Callbacks) { - while self.total_tx_size > self.config.max_tx_pool_size { - if !self.pending.is_empty() { - evict_for_trim_size!(self, self.pending, callbacks) - } else if !self.gap.is_empty() { - evict_for_trim_size!(self, self.gap, callbacks) - } else { - evict_for_trim_size!(self, self.proposed, callbacks) - } - } - } - - pub(crate) fn limit_size_v2(&mut self, callbacks: &Callbacks) { while self.total_tx_size > self.config.max_tx_pool_size { if let Some(id) = self.pool_map.next_evict_entry() { let removed = self.pool_map.remove_entry_and_descendants(&id); @@ -459,40 +231,10 @@ impl TxPool { pub(crate) fn remove_by_detached_proposal<'a>( &mut self, ids: impl Iterator, - ) { - for id in ids { - if let Some(entry) = self.gap.remove_entry(id) { - let tx_hash = entry.transaction().hash(); - let ret = self.add_pending(entry); - debug!( - "remove_by_detached_proposal from gap {} add_pending {}", - tx_hash, ret - ); - } - let mut entries = self.proposed.remove_entry_and_descendants(id); - entries.sort_unstable_by_key(|entry| entry.ancestors_count); - for mut entry in entries { - let tx_hash = entry.transaction().hash(); - entry.reset_ancestors_state(); - let ret = self.add_pending(entry); - debug!( - "remove_by_detached_proposal from proposed {} add_pending {}", - tx_hash, ret - ); - } - } - } - - // remove transaction with detached proposal from gap and proposed - // try re-put to pending - pub(crate) fn remove_by_detached_proposal_v2<'a>( - &mut self, - ids: impl Iterator, ) { for id in ids { if let Some(e) = self.pool_map.get_by_id(id) { let status = e.status; - // TODO: double check this if status == Status::Pending { continue; } @@ -503,7 +245,7 @@ impl TxPool { entry.reset_ancestors_state(); let ret = self.add_pending(entry); debug!( - "remove_by_detached_proposal from {:?} {} add_pending {}", + "remove_by_detached_proposal from {:?} {} add_pending {:?}", status, tx_hash, ret ); } @@ -512,7 +254,7 @@ impl TxPool { } pub(crate) fn remove_tx(&mut self, id: &ProposalShortId) -> bool { - let entries = self.proposed.remove_entry_and_descendants(id); + let entries = self.pool_map.remove_entry_and_descendants(id); if !entries.is_empty() { for entry in entries { self.update_statics_for_remove_tx(entry.size, entry.cycles); @@ -520,20 +262,6 @@ impl TxPool { return true; } - if let Some(entry) = self.gap.remove_entry(id) { - self.update_statics_for_remove_tx(entry.size, entry.cycles); - return true; - } - - if let Some(entry) = self.pending.remove_entry(id) { - self.update_statics_for_remove_tx(entry.size, entry.cycles); - return true; - } - - false - } - - pub(crate) fn remove_tx_v2(&mut self, id: &ProposalShortId) -> bool { if let Some(entry) = self.pool_map.remove_entry(id) { self.update_statics_for_remove_tx(entry.size, entry.cycles); return true; @@ -541,102 +269,48 @@ impl TxPool { false } - pub(crate) fn resolve_tx_from_pending_and_proposed( - &self, - tx: TransactionView, - ) -> Result, Reject> { - let snapshot = self.snapshot(); - let proposed_provider = OverlayCellProvider::new(&self.proposed, snapshot); - let gap_and_proposed_provider = OverlayCellProvider::new(&self.gap, &proposed_provider); - let pending_and_proposed_provider = - OverlayCellProvider::new(&self.pending, &gap_and_proposed_provider); - let mut seen_inputs = HashSet::new(); - resolve_transaction( - tx, - &mut seen_inputs, - &pending_and_proposed_provider, - snapshot, - ) - .map(Arc::new) - .map_err(Reject::Resolve) - } - - pub(crate) fn resolve_tx_from_pending_and_proposed_v2( - &self, - tx: TransactionView, - ) -> Result, Reject> { - let snapshot = self.snapshot(); - let provider = OverlayCellProvider::new(&self.pool_map.entries, snapshot); - let mut seen_inputs = HashSet::new(); - resolve_transaction(tx, &mut seen_inputs, &provider, snapshot) - .map(Arc::new) - .map_err(Reject::Resolve) - } - pub(crate) fn check_rtx_from_pending_and_proposed( &self, rtx: &ResolvedTransaction, ) -> Result<(), Reject> { let snapshot = self.snapshot(); - let proposed_checker = OverlayCellChecker::new(&self.proposed, snapshot); - let gap_and_proposed_checker = OverlayCellChecker::new(&self.gap, &proposed_checker); - let pending_and_proposed_checker = - OverlayCellChecker::new(&self.pending, &gap_and_proposed_checker); + let proposal_checker = OverlayCellChecker::new(&self.pool_map, snapshot); + let checker = OverlayCellChecker::new(self, &proposal_checker); let mut seen_inputs = HashSet::new(); - rtx.check(&mut seen_inputs, &pending_and_proposed_checker, snapshot) + rtx.check(&mut seen_inputs, &checker, snapshot) .map_err(Reject::Resolve) } - pub(crate) fn check_rtx_from_pending_and_proposed_v2( - &self, - rtx: &ResolvedTransaction, - ) -> Result<(), Reject> { + pub(crate) fn check_rtx_from_proposed(&self, rtx: &ResolvedTransaction) -> Result<(), Reject> { let snapshot = self.snapshot(); - let checker = OverlayCellChecker::new(&self.pool_map.entries, snapshot); + let proposal_checker = OverlayCellChecker::new(&self.pool_map, snapshot); let mut seen_inputs = HashSet::new(); - rtx.check(&mut seen_inputs, &checker, snapshot) + rtx.check(&mut seen_inputs, &proposal_checker, snapshot) .map_err(Reject::Resolve) } - pub(crate) fn resolve_tx_from_proposed( + pub(crate) fn resolve_tx_from_pending_and_proposed( &self, tx: TransactionView, ) -> Result, Reject> { let snapshot = self.snapshot(); - let cell_provider = OverlayCellProvider::new(&self.proposed, snapshot); + let proposed_provider = OverlayCellProvider::new(&self.pool_map, snapshot); + let provider = OverlayCellProvider::new(self, &proposed_provider); let mut seen_inputs = HashSet::new(); - resolve_transaction(tx, &mut seen_inputs, &cell_provider, snapshot) + resolve_transaction(tx, &mut seen_inputs, &provider, snapshot) .map(Arc::new) .map_err(Reject::Resolve) } - pub(crate) fn resolve_tx_from_proposed_v2( - &self, - rtx: &ResolvedTransaction, - ) -> Result<(), Reject> { - let snapshot = self.snapshot(); - let checker = OverlayCellChecker::new(&self.pool_map.entries, snapshot); - let mut seen_inputs = HashSet::new(); - rtx.check(&mut seen_inputs, &checker, snapshot) - .map_err(Reject::Resolve) - } - - pub(crate) fn check_rtx_from_proposed(&self, rtx: &ResolvedTransaction) -> Result<(), Reject> { - let snapshot = self.snapshot(); - let cell_checker = OverlayCellChecker::new(&self.proposed, snapshot); - let mut seen_inputs = HashSet::new(); - rtx.check(&mut seen_inputs, &cell_checker, snapshot) - .map_err(Reject::Resolve) - } - - pub(crate) fn check_rtx_from_proposed_v2( + pub(crate) fn resolve_tx_from_proposed( &self, - rtx: &ResolvedTransaction, - ) -> Result<(), Reject> { + tx: TransactionView, + ) -> Result, Reject> { let snapshot = self.snapshot(); - let cell_checker = OverlayCellChecker::new(&self.pool_map.entries, snapshot); + let proposed_provider = OverlayCellProvider::new(&self.pool_map, snapshot); let mut seen_inputs = HashSet::new(); - rtx.check(&mut seen_inputs, &cell_checker, snapshot) + resolve_transaction(tx, &mut seen_inputs, &proposed_provider, snapshot) + .map(Arc::new) .map_err(Reject::Resolve) } @@ -663,8 +337,9 @@ impl TxPool { let entry = TxEntry::new_with_timestamp(rtx, verified.cycles, verified.fee, size, timestamp); + let tx_hash = entry.transaction().hash(); - if self.add_gap(entry) { + if self.add_gap(entry).unwrap_or(false) { Ok(CacheEntry::Completed(verified)) } else { Err(Reject::Duplicated(tx_hash)) @@ -695,6 +370,11 @@ impl TxPool { let entry = TxEntry::new_with_timestamp(rtx, verified.cycles, verified.fee, size, timestamp); let tx_hash = entry.transaction().hash(); + debug!( + "proposed_rtx: {:?} => {:?}", + tx_hash, + entry.proposal_short_id() + ); if self.add_proposed(entry)? { Ok(CacheEntry::Completed(verified)) } else { @@ -703,76 +383,45 @@ impl TxPool { } /// Get to-be-proposal transactions that may be included in the next block. - pub fn get_proposals( - &self, - limit: usize, - exclusion: &HashSet, - ) -> HashSet { - let mut proposals = HashSet::with_capacity(limit); - self.pending - .fill_proposals(limit, exclusion, &mut proposals); - self.gap.fill_proposals(limit, exclusion, &mut proposals); - proposals - } - - /// Get to-be-proposal transactions that may be included in the next block. - pub fn get_proposals_v2( + /// TODO: do we need to consider the something like score, so that we can + /// provide best transactions to be proposed. + pub(crate) fn get_proposals( &self, limit: usize, exclusion: &HashSet, ) -> HashSet { let mut proposals = HashSet::with_capacity(limit); - self.pool_map.fill_proposals(limit, exclusion, &mut proposals, &Status::Pending); - self.pool_map.fill_proposals(limit, exclusion, &mut proposals, &Status::Gap); + self.pool_map + .fill_proposals(limit, exclusion, &mut proposals, &Status::Pending); + self.pool_map + .fill_proposals(limit, exclusion, &mut proposals, &Status::Gap); proposals } /// Returns tx from tx-pool or storage corresponding to the id. - pub fn get_tx_from_pool_or_store( + pub(crate) fn get_tx_from_pool_or_store( &self, proposal_id: &ProposalShortId, ) -> Option { - self.get_tx_from_proposed_and_others(proposal_id) - .cloned() - .or_else(|| { - self.committed_txs_hash_cache - .peek(proposal_id) - .and_then(|tx_hash| self.snapshot().get_transaction(tx_hash).map(|(tx, _)| tx)) - }) + self.get_tx_from_pool(proposal_id).cloned().or_else(|| { + self.committed_txs_hash_cache + .peek(proposal_id) + .and_then(|tx_hash| self.snapshot().get_transaction(tx_hash).map(|(tx, _)| tx)) + }) } pub(crate) fn get_ids(&self) -> TxPoolIds { - let pending = self - .pending - .iter() - .map(|(_, entry)| entry.transaction().hash()) - .chain(self.gap.iter().map(|(_, entry)| entry.transaction().hash())) - .collect(); - - let proposed = self - .proposed - .iter() - .map(|(_, entry)| entry.transaction().hash()) - .collect(); - - TxPoolIds { pending, proposed } - } - - // This is for RPC request, performance is not critical - pub(crate) fn get_ids_v2(&self) -> TxPoolIds { let pending: Vec = self - .pool_map - .entries .get_by_status(&Status::Pending) .iter() - .chain(self.pool_map.entries.get_by_status(&Status::Gap).iter()) + .chain(self.get_by_status(&Status::Gap).iter()) .map(|entry| entry.inner.transaction().hash()) .collect(); let proposed: Vec = self - .proposed + .get_by_status(&Status::Proposed) .iter() - .map(|(_, entry)| entry.transaction().hash()) + .map(|entry| entry.inner.transaction().hash()) .collect(); TxPoolIds { pending, proposed } @@ -780,38 +429,13 @@ impl TxPool { pub(crate) fn get_all_entry_info(&self) -> TxPoolEntryInfo { let pending = self - .pending - .iter() - .map(|(_, entry)| (entry.transaction().hash(), entry.to_info())) - .chain( - self.gap - .iter() - .map(|(_, entry)| (entry.transaction().hash(), entry.to_info())), - ) - .collect(); - - let proposed = self - .proposed - .iter() - .map(|(_, entry)| (entry.transaction().hash(), entry.to_info())) - .collect(); - - TxPoolEntryInfo { pending, proposed } - } - - pub(crate) fn get_all_entry_info_v2(&self) -> TxPoolEntryInfo { - let pending = self - .pool_map - .entries .get_by_status(&Status::Pending) .iter() - .chain(self.pool_map.entries.get_by_status(&Status::Gap).iter()) + .chain(self.get_by_status(&Status::Gap).iter()) .map(|entry| (entry.inner.transaction().hash(), entry.inner.to_info())) .collect(); let proposed = self - .pool_map - .entries .get_by_status(&Status::Proposed) .iter() .map(|entry| (entry.inner.transaction().hash(), entry.inner.to_info())) @@ -821,29 +445,12 @@ impl TxPool { } pub(crate) fn drain_all_transactions(&mut self) -> Vec { - let mut txs = CommitTxsScanner::new(&self.proposed, &self.pool_map.entries) - .txs_to_commit(self.total_tx_size, self.total_tx_cycles) - .0 - .into_iter() - .map(|tx_entry| tx_entry.into_transaction()) - .collect::>(); - self.proposed.clear(); - txs.append(&mut self.gap.drain()); - txs.append(&mut self.pending.drain()); - self.total_tx_size = 0; - self.total_tx_cycles = 0; - // self.touch_last_txs_updated_at(); - txs - } - - pub(crate) fn drain_all_transactions_v2(&mut self) -> Vec { - let mut txs = CommitTxsScanner::new(&self.proposed, &self.pool_map.entries) + let mut txs = CommitTxsScanner::new(&self.pool_map) .txs_to_commit(self.total_tx_size, self.total_tx_cycles) .0 .into_iter() .map(|tx_entry| tx_entry.into_transaction()) .collect::>(); - self.proposed.clear(); let mut pending = self .pool_map .entries @@ -868,9 +475,6 @@ impl TxPool { } pub(crate) fn clear(&mut self, snapshot: Arc) { - self.pending = PendingQueue::new(); - self.gap = PendingQueue::new(); - self.proposed = ProposedPool::new(self.config.max_ancestors_count); self.pool_map.clear(); self.snapshot = snapshot; self.committed_txs_hash_cache = LruCache::new(COMMITTED_HASH_CACHE_SIZE); @@ -896,8 +500,7 @@ impl TxPool { txs_size_limit: usize, ) -> (Vec, usize, Cycle) { let (entries, size, cycles) = - CommitTxsScanner::new(self.proposed(), &self.pool_map.entries) - .txs_to_commit(txs_size_limit, max_block_cycles); + CommitTxsScanner::new(&self.pool_map).txs_to_commit(txs_size_limit, max_block_cycles); if !entries.is_empty() { ckb_logger::info!( @@ -935,3 +538,49 @@ impl TxPool { } } } + +/// This is a hack right now, we use `CellProvider` to check if a transaction is in `Pending` or `Gap` status. +/// To make sure the behavior is same as before, we need to remove this if we have finished replace-by-fee strategy. +impl CellProvider for TxPool { + fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { + let tx_hash = out_point.tx_hash(); + match self + .pool_map + .get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) + { + Some(pool_entry) if pool_entry.status != Status::Proposed => { + match pool_entry + .inner + .transaction() + .output_with_data(out_point.index().unpack()) + { + Some((output, data)) => { + let cell_meta = CellMetaBuilder::from_cell_output(output, data) + .out_point(out_point.to_owned()) + .build(); + CellStatus::live_cell(cell_meta) + } + None => CellStatus::Unknown, + } + } + _ => CellStatus::Unknown, + } + } +} + +impl CellChecker for TxPool { + fn is_live(&self, out_point: &OutPoint) -> Option { + let tx_hash = out_point.tx_hash(); + match self + .pool_map + .get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) + { + Some(pool_entry) if pool_entry.status != Status::Proposed => pool_entry + .inner + .transaction() + .output_with_data(out_point.index().unpack()) + .map(|_| true), + _ => None, + } + } +} diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index b5062b3b35..00dde6d264 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -1,6 +1,7 @@ use crate::callback::Callbacks; use crate::component::entry::TxEntry; use crate::component::orphan::Entry as OrphanEntry; +use crate::component::pool_map::Status; use crate::error::Reject; use crate::pool::TxPool; use crate::service::{BlockAssemblerMessage, TxPoolService, TxVerificationResult}; @@ -51,6 +52,7 @@ pub enum TxStatus { Proposed, } +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub(crate) enum ProcessResult { Suspended, Completed(Completed), @@ -122,7 +124,6 @@ impl TxPoolService { } _submit_entry(tx_pool, status, entry.clone(), &self.callbacks)?; - Ok(()) }) .await; @@ -276,7 +277,6 @@ impl TxPoolService { if let Some((ret, snapshot)) = self._process_tx(tx.clone(), remote.map(|r| r.0)).await { self.after_process(tx, remote, &snapshot, &ret).await; - ret } else { // currently, the returned cycles is not been used, mock 0 if delay @@ -360,7 +360,6 @@ impl TxPoolService { self.process_orphan_tx(&tx).await; } Err(reject) => { - debug!("after_process {} reject: {} ", tx_hash, reject); if is_missing_input(reject) && all_inputs_is_unknown(snapshot, &tx) { self.add_orphan(tx, peer, declared_cycle).await; } else { @@ -551,7 +550,6 @@ impl TxPoolService { let tx_hash = tx.hash(); let (ret, snapshot) = self.pre_check(&tx).await; - let (tip_hash, rtx, status, fee, tx_size) = try_or_return_with_snapshot!(ret, snapshot); if self.is_in_delay_window(&snapshot) { @@ -640,7 +638,6 @@ impl TxPoolService { try_or_return_with_snapshot!(ret, submit_snapshot); self.notify_block_assembler(status).await; - if cached.is_none() { // update cache let txs_verify_cache = Arc::clone(&self.txs_verify_cache); @@ -962,38 +959,36 @@ fn check_rtx( rtx: &ResolvedTransaction, ) -> Result { let short_id = rtx.transaction.proposal_short_id(); - if snapshot.proposals().contains_proposed(&short_id) { - tx_pool - .check_rtx_from_proposed(rtx) - .map(|_| TxStatus::Proposed) + let tx_status = if snapshot.proposals().contains_proposed(&short_id) { + TxStatus::Proposed + } else if snapshot.proposals().contains_gap(&short_id) { + TxStatus::Gap } else { - let tx_status = if snapshot.proposals().contains_gap(&short_id) { - TxStatus::Gap - } else { - TxStatus::Fresh - }; - tx_pool - .check_rtx_from_pending_and_proposed(rtx) - .map(|_| tx_status) + TxStatus::Fresh + }; + if tx_status == TxStatus::Proposed { + tx_pool.check_rtx_from_proposed(rtx) + } else { + tx_pool.check_rtx_from_pending_and_proposed(rtx) } + .map(|_| tx_status) } fn resolve_tx(tx_pool: &TxPool, snapshot: &Snapshot, tx: TransactionView) -> ResolveResult { let short_id = tx.proposal_short_id(); - if snapshot.proposals().contains_proposed(&short_id) { - tx_pool - .resolve_tx_from_proposed(tx) - .map(|rtx| (rtx, TxStatus::Proposed)) + let tx_status = if snapshot.proposals().contains_proposed(&short_id) { + TxStatus::Proposed + } else if snapshot.proposals().contains_gap(&short_id) { + TxStatus::Gap } else { - let tx_status = if snapshot.proposals().contains_gap(&short_id) { - TxStatus::Gap - } else { - TxStatus::Fresh - }; - tx_pool - .resolve_tx_from_pending_and_proposed(tx) - .map(|rtx| (rtx, tx_status)) + TxStatus::Fresh + }; + if tx_status == TxStatus::Proposed { + tx_pool.resolve_tx_from_proposed(tx) + } else { + tx_pool.resolve_tx_from_pending_and_proposed(tx) } + .map(|rtx| (rtx, tx_status)) } fn _submit_entry( @@ -1005,16 +1000,15 @@ fn _submit_entry( let tx_hash = entry.transaction().hash(); match status { TxStatus::Fresh => { - if tx_pool.add_pending(entry.clone()) { - debug!("submit_entry pending {}", tx_hash); + if tx_pool.add_pending(entry.clone()).unwrap_or(false) { callbacks.call_pending(tx_pool, &entry); } else { return Err(Reject::Duplicated(tx_hash)); } } + TxStatus::Gap => { - if tx_pool.add_gap(entry.clone()) { - debug!("submit_entry gap {}", tx_hash); + if tx_pool.add_gap(entry.clone()).unwrap_or(false) { callbacks.call_pending(tx_pool, &entry); } else { return Err(Reject::Duplicated(tx_hash)); @@ -1022,10 +1016,7 @@ fn _submit_entry( } TxStatus::Proposed => { if tx_pool.add_proposed(entry.clone())? { - debug!("submit_entry proposed {}", tx_hash); callbacks.call_proposed(tx_pool, &entry, true); - } else { - return Err(Reject::Duplicated(tx_hash)); } } } @@ -1055,38 +1046,39 @@ fn _update_tx_pool_for_reorg( // pending ---> gap ----> proposed // try move gap to proposed if mine_mode { - let mut entries = Vec::new(); + let mut proposals = Vec::new(); let mut gaps = Vec::new(); - tx_pool.gap.remove_entries_by_filter(|id, tx_entry| { - if snapshot.proposals().contains_proposed(id) { - entries.push(tx_entry.clone()); - true - } else { - false - } - }); - - tx_pool.pending.remove_entries_by_filter(|id, tx_entry| { - if snapshot.proposals().contains_proposed(id) { - entries.push(tx_entry.clone()); - true - } else if snapshot.proposals().contains_gap(id) { - gaps.push(tx_entry.clone()); - true - } else { - false - } - }); + tx_pool + .pool_map + .remove_entries_by_filter(&Status::Gap, |id, tx_entry| { + if snapshot.proposals().contains_proposed(id) { + proposals.push(tx_entry.clone()); + true + } else { + false + } + }); - for entry in entries { - debug!("tx move to proposed {}", entry.transaction().hash()); + tx_pool + .pool_map + .remove_entries_by_filter(&Status::Pending, |id, tx_entry| { + if snapshot.proposals().contains_proposed(id) { + proposals.push(tx_entry.clone()); + true + } else if snapshot.proposals().contains_gap(id) { + gaps.push(tx_entry.clone()); + true + } else { + false + } + }); + + for entry in proposals { let cached = CacheEntry::completed(entry.cycles, entry.fee); - let tx_hash = entry.transaction().hash(); if let Err(e) = tx_pool.proposed_rtx(cached, entry.size, entry.timestamp, Arc::clone(&entry.rtx)) { - debug!("Failed to add proposed tx {}, reason: {}", tx_hash, e); callbacks.call_reject(tx_pool, &entry, e.clone()); } else { callbacks.call_proposed(tx_pool, &entry, false); @@ -1094,7 +1086,6 @@ fn _update_tx_pool_for_reorg( } for entry in gaps { - debug!("tx move to gap {}", entry.transaction().hash()); let tx_hash = entry.transaction().hash(); let cached = CacheEntry::completed(entry.cycles, entry.fee); if let Err(e) = diff --git a/tx-pool/src/service.rs b/tx-pool/src/service.rs index 5dc54018b4..a008502c62 100644 --- a/tx-pool/src/service.rs +++ b/tx-pool/src/service.rs @@ -3,6 +3,7 @@ use crate::block_assembler::{self, BlockAssembler}; use crate::callback::{Callback, Callbacks, ProposedCallback, RejectCallback}; use crate::chunk_process::ChunkCommand; +use crate::component::pool_map::{PoolEntry, Status}; use crate::component::{chunk::ChunkQueue, orphan::OrphanPool}; use crate::error::{handle_recv_error, handle_send_cmd_error, handle_try_send_error}; use crate::pool::TxPool; @@ -735,10 +736,18 @@ async fn process(mut service: TxPoolService, message: Message) { }) => { let id = ProposalShortId::from_tx_hash(&hash); let tx_pool = service.tx_pool.read().await; - let ret = if let Some(entry) = tx_pool.proposed.get(&id) { - Ok((TxStatus::Proposed, Some(entry.cycles))) - } else if let Some(entry) = tx_pool.get_entry_from_pending_or_gap(&id) { - Ok((TxStatus::Pending, Some(entry.cycles))) + let ret = if let Some(PoolEntry { + status, + inner: entry, + .. + }) = tx_pool.pool_map.get_by_id(&id) + { + let status = if status == &Status::Proposed { + TxStatus::Proposed + } else { + TxStatus::Pending + }; + Ok((status, Some(entry.cycles))) } else if let Some(ref recent_reject_db) = tx_pool.recent_reject { let recent_reject_result = recent_reject_db.get(&hash); if let Ok(recent_reject) = recent_reject_result { @@ -764,14 +773,18 @@ async fn process(mut service: TxPoolService, message: Message) { }) => { let id = ProposalShortId::from_tx_hash(&hash); let tx_pool = service.tx_pool.read().await; - let ret = if let Some(entry) = tx_pool.proposed.get(&id) { - Ok(TransactionWithStatus::with_proposed( - Some(entry.transaction().clone()), - entry.cycles, - entry.timestamp, - )) - } else if let Some(entry) = tx_pool.get_entry_from_pending_or_gap(&id) { - Ok(TransactionWithStatus::with_pending( + let ret = if let Some(PoolEntry { + status, + inner: entry, + .. + }) = tx_pool.pool_map.get_by_id(&id) + { + let trans_status = if status == &Status::Proposed { + TransactionWithStatus::with_proposed + } else { + TransactionWithStatus::with_pending + }; + Ok(trans_status( Some(entry.transaction().clone()), entry.cycles, entry.timestamp, @@ -900,8 +913,8 @@ impl TxPoolService { TxPoolInfo { tip_hash: tip_header.hash(), tip_number: tip_header.number(), - pending_size: tx_pool.pending.size() + tx_pool.gap.size(), - proposed_size: tx_pool.proposed.size(), + pending_size: tx_pool.pool_map.pending_size(), + proposed_size: tx_pool.pool_map.proposed_size(), orphan_size: orphan.len(), total_tx_size: tx_pool.total_tx_size, total_tx_cycles: tx_pool.total_tx_cycles, @@ -968,7 +981,7 @@ impl TxPoolService { match target { PlugTarget::Pending => { for entry in entries { - tx_pool.add_pending(entry); + tx_pool.add_pending(entry).unwrap(); } } PlugTarget::Proposed => { From 5788226589aec10a471a0836b79e47c47fb2a60e Mon Sep 17 00:00:00 2001 From: yukang Date: Thu, 1 Jun 2023 18:12:17 +0800 Subject: [PATCH 055/267] move double spend checking and fix tests --- Cargo.lock | 32 +++--- chain/src/chain.rs | 1 - chain/src/tests/dep_cell.rs | 15 ++- rpc/src/tests/module/pool.rs | 2 +- test/src/main.rs | 8 -- test/src/node.rs | 12 ++ test/src/specs/relay/transaction_relay.rs | 14 ++- test/src/specs/tx_pool/collision.rs | 27 ++--- .../tx_pool/different_txs_with_same_input.rs | 38 +++--- test/src/specs/tx_pool/send_tx_chain.rs | 12 +- tx-pool/Cargo.toml | 3 +- tx-pool/src/component/edges.rs | 4 + tx-pool/src/component/pool_map.rs | 88 +++++++------- tx-pool/src/pool.rs | 108 +++--------------- tx-pool/src/process.rs | 55 ++++----- 15 files changed, 180 insertions(+), 239 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ddd51deff0..0072b84283 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1423,9 +1423,9 @@ dependencies = [ "ckb-types", "ckb-util", "ckb-verification", + "ckb_multi_index_map", "hyper", "lru", - "multi_index_map", "rand 0.8.5", "rustc-hash", "sentry", @@ -1550,6 +1550,21 @@ version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83869c9d322de1ddbfde5b54b7376f9a1ac32273c50e21cdd5e8a1bd1a1cf632" +[[package]] +name = "ckb_multi_index_map" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2adba00c3dcb84fc4634c948cf3d24c05ce3193810bfa568effe13ad814f662a" +dependencies = [ + "convert_case 0.6.0", + "proc-macro-error", + "proc-macro2", + "quote", + "rustc-hash", + "slab", + "syn", +] + [[package]] name = "clang-sys" version = "1.3.1" @@ -3125,21 +3140,6 @@ dependencies = [ "faster-hex", ] -[[package]] -name = "multi_index_map" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a58eea8dbf91e7420e0e843535f585491046d6017e669d36cb8342cfa4861e2" -dependencies = [ - "convert_case 0.6.0", - "proc-macro-error", - "proc-macro2", - "quote", - "rustc-hash", - "slab", - "syn", -] - [[package]] name = "native-tls" version = "0.2.11" diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 3323492032..bcef1bfc12 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -560,7 +560,6 @@ impl ChainService { self.proposal_table .insert(blk.header().number(), blk.union_proposal_ids()); } - self.reload_proposal_table(fork); } diff --git a/chain/src/tests/dep_cell.rs b/chain/src/tests/dep_cell.rs index 3f557236fd..3e7b24eed1 100644 --- a/chain/src/tests/dep_cell.rs +++ b/chain/src/tests/dep_cell.rs @@ -436,7 +436,6 @@ fn test_package_txs_with_deps2() { .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } - // skip gap { while Into::::into(block_template.number) != 2 { @@ -461,7 +460,7 @@ fn test_package_txs_with_deps2() { let mut tx_pool_info = tx_pool.get_tx_pool_info().unwrap(); while tx_pool_info.proposed_size != txs.len() { - tx_pool_info = tx_pool.get_tx_pool_info().unwrap() + tx_pool_info = tx_pool.get_tx_pool_info().unwrap(); } // get block template with txs @@ -534,11 +533,11 @@ fn test_package_txs_with_deps_priority() { Capacity::shannons(10000), ); - let txs = vec![tx2.clone(), tx1]; - for tx in &txs { - let ret = tx_pool.submit_local_tx(tx.clone()).unwrap(); - assert!(ret.is_ok(), "submit {} {:?}", tx.proposal_short_id(), ret); - } + let ret = tx_pool.submit_local_tx(tx2.clone()).unwrap(); + assert!(ret.is_ok(), "submit {} {:?}", tx2.proposal_short_id(), ret); + + let ret = tx_pool.submit_local_tx(tx1.clone()).unwrap(); + assert!(ret.is_err(), "submit {} {:?}", tx1.proposal_short_id(), ret); let mut block_template = shared .get_block_template(None, None, None) @@ -548,7 +547,7 @@ fn test_package_txs_with_deps_priority() { // proposal txs { while !(Into::::into(block_template.number) == 1 - && block_template.proposals.len() == 2) + && block_template.proposals.len() == 1) { block_template = shared .get_block_template(None, None, None) diff --git a/rpc/src/tests/module/pool.rs b/rpc/src/tests/module/pool.rs index 7288b91f0f..5349465f4f 100644 --- a/rpc/src/tests/module/pool.rs +++ b/rpc/src/tests/module/pool.rs @@ -172,7 +172,7 @@ fn test_send_transaction_exceeded_maximum_ancestors_count() { parent_tx_hash = tx.hash(); } - suite.wait_block_template_array_ge("proposals", 130); + suite.wait_block_template_array_ge("proposals", 125); // 130 txs will be added to proposal list while store.get_tip_header().unwrap().number() != (tip.number() + 2) { diff --git a/test/src/main.rs b/test/src/main.rs index 2e8a8e8620..f321bfe0a7 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -360,12 +360,6 @@ fn canonicalize_path>(path: P) -> PathBuf { .unwrap_or_else(|_| path.as_ref().to_path_buf()) } -fn _all_specs() -> Vec> { - // This case is not stable right now - //vec![Box::new(PoolResolveConflictAfterReorg)] - vec![Box::new(RemoveConflictFromPending)] -} - fn all_specs() -> Vec> { let mut specs: Vec> = vec![ Box::new(BlockSyncFromOne), @@ -408,8 +402,6 @@ fn all_specs() -> Vec> { Box::new(GetRawTxPool), Box::new(PoolReconcile), Box::new(PoolResurrect), - //TODO: (yukang) - //Box::new(PoolResolveConflictAfterReorg), Box::new(InvalidHeaderDep), #[cfg(not(target_os = "windows"))] Box::new(PoolPersisted), diff --git a/test/src/node.rs b/test/src/node.rs index da5d7421fe..acf6c19daf 100644 --- a/test/src/node.rs +++ b/test/src/node.rs @@ -5,6 +5,7 @@ use crate::{SYSTEM_CELL_ALWAYS_FAILURE_INDEX, SYSTEM_CELL_ALWAYS_SUCCESS_INDEX}; use ckb_app_config::CKBAppConfig; use ckb_chain_spec::consensus::Consensus; use ckb_chain_spec::ChainSpec; +use ckb_error::AnyError; use ckb_jsonrpc_types::TxStatus; use ckb_jsonrpc_types::{BlockFilter, BlockTemplate, TxPoolInfo}; use ckb_logger::{debug, error}; @@ -353,6 +354,17 @@ impl Node { .send_transaction(transaction.data().into()) } + pub fn submit_transaction_with_result( + &self, + transaction: &TransactionView, + ) -> Result { + let res = self + .rpc_client() + .send_transaction_result(transaction.data().into())? + .pack(); + Ok(res) + } + pub fn get_transaction(&self, tx_hash: Byte32) -> TxStatus { self.rpc_client().get_transaction(tx_hash).tx_status } diff --git a/test/src/specs/relay/transaction_relay.rs b/test/src/specs/relay/transaction_relay.rs index 39cb05f3da..e5af42eb45 100644 --- a/test/src/specs/relay/transaction_relay.rs +++ b/test/src/specs/relay/transaction_relay.rs @@ -5,7 +5,6 @@ use crate::util::transaction::{always_success_transaction, always_success_transa use crate::utils::{build_relay_tx_hashes, build_relay_txs, sleep, wait_until}; use crate::{Net, Node, Spec}; use ckb_constant::sync::RETRY_ASK_TX_TIMEOUT_INCREASE; -use ckb_jsonrpc_types::Status; use ckb_logger::info; use ckb_network::SupportProtocols; use ckb_types::{ @@ -234,10 +233,15 @@ impl Spec for TransactionRelayConflict { .build(); node0.rpc_client().send_transaction(tx1.data().into()); sleep(6); - node0.rpc_client().send_transaction(tx2.data().into()); + + let res = node0 + .rpc_client() + .send_transaction_result(tx2.data().into()); + assert!(res.is_err()); + eprintln!("res: {:?}", res); let relayed = wait_until(20, || { - [tx1.hash(), tx2.hash()].iter().all(|hash| { + [tx1.hash()].iter().all(|hash| { node1 .rpc_client() .get_transaction(hash.clone()) @@ -247,13 +251,14 @@ impl Spec for TransactionRelayConflict { }); assert!(relayed, "all transactions should be relayed"); - let proposed = node1.mine_with_blocking(|template| template.proposals.len() != 3); + let proposed = node1.mine_with_blocking(|template| template.proposals.len() != 2); node1.mine_with_blocking(|template| template.number.value() != (proposed + 1)); waiting_for_sync(nodes); node0.wait_for_tx_pool(); node1.wait_for_tx_pool(); + /* let ret = node1 .rpc_client() .get_transaction_with_verbosity(tx2.hash(), 1); @@ -289,5 +294,6 @@ impl Spec for TransactionRelayConflict { .is_some() }); assert!(relayed, "Transaction should be relayed to node1"); + */ } } diff --git a/test/src/specs/tx_pool/collision.rs b/test/src/specs/tx_pool/collision.rs index c349ddbb34..7dc606d459 100644 --- a/test/src/specs/tx_pool/collision.rs +++ b/test/src/specs/tx_pool/collision.rs @@ -1,6 +1,4 @@ -use crate::util::check::{ - is_transaction_committed, is_transaction_pending, is_transaction_rejected, -}; +use crate::util::check::{is_transaction_committed, is_transaction_pending}; use crate::utils::{assert_send_transaction_fail, blank, commit, propose}; use crate::{Node, Spec}; use ckb_types::bytes::Bytes; @@ -67,7 +65,8 @@ impl Spec for ConflictInPending { let (txa, txb) = conflict_transactions(node); node.submit_transaction(&txa); - node.submit_transaction(&txb); + let res = node.submit_transaction_with_result(&txb); + assert!(res.is_err()); node.submit_block(&propose(node, &[&txa])); (0..window.closest()).for_each(|_| { @@ -89,13 +88,15 @@ impl Spec for ConflictInGap { let (txa, txb) = conflict_transactions(node); node.submit_transaction(&txa); - node.submit_transaction(&txb); + let res = node.submit_transaction_with_result(&txb); + assert!(res.is_err()); node.submit_block(&propose(node, &[&txa])); (0..window.closest() - 1).for_each(|_| { node.submit_block(&blank(node)); }); - node.submit_block(&propose(node, &[&txb])); + + //node.submit_block(&propose(node, &[&txb])); let block = node.new_block(None, None, None); assert_eq!(&[txa], &block.transactions()[1..]); @@ -114,7 +115,8 @@ impl Spec for ConflictInProposed { let (txa, txb) = conflict_transactions(node); node.submit_transaction(&txa); - node.submit_transaction(&txb); + let res = node.submit_transaction_with_result(&txb); + assert!(res.is_err()); node.submit_block(&propose(node, &[&txa, &txb])); node.mine(window.farthest()); @@ -153,12 +155,13 @@ impl Spec for RemoveConflictFromPending { conflict_transactions_with_capacity(node, Bytes::new(), capacity_bytes!(1000)); let txc = node.new_transaction_with_since_capacity(txb.hash(), 0, capacity_bytes!(100)); node.submit_transaction(&txa); - node.submit_transaction(&txb); - node.submit_transaction(&txc); + let res = node.submit_transaction_with_result(&txb); + assert!(res.is_err()); + + let res = node.submit_transaction_with_result(&txc); + assert!(res.is_err()); assert!(is_transaction_pending(node, &txa)); - assert!(is_transaction_pending(node, &txb)); - assert!(is_transaction_pending(node, &txc)); node.submit_block(&propose(node, &[&txa])); (0..window.closest()).for_each(|_| { @@ -168,8 +171,6 @@ impl Spec for RemoveConflictFromPending { node.wait_for_tx_pool(); assert!(is_transaction_committed(node, &txa)); - assert!(is_transaction_rejected(node, &txb)); - assert!(is_transaction_rejected(node, &txc)); } } diff --git a/test/src/specs/tx_pool/different_txs_with_same_input.rs b/test/src/specs/tx_pool/different_txs_with_same_input.rs index 443cea4cb5..8539c7c8c6 100644 --- a/test/src/specs/tx_pool/different_txs_with_same_input.rs +++ b/test/src/specs/tx_pool/different_txs_with_same_input.rs @@ -28,10 +28,14 @@ impl Spec for DifferentTxsWithSameInput { .as_advanced_builder() .set_outputs(vec![output]) .build(); + node0.rpc_client().send_transaction(tx1.data().into()); - node0.rpc_client().send_transaction(tx2.data().into()); + let res = node0 + .rpc_client() + .send_transaction_result(tx2.data().into()); + assert!(res.is_err(), "tx2 should be rejected"); - node0.mine_with_blocking(|template| template.proposals.len() != 3); + node0.mine_with_blocking(|template| template.proposals.len() != 2); node0.mine_with_blocking(|template| template.number.value() != 14); node0.mine_with_blocking(|template| template.transactions.len() != 2); @@ -47,11 +51,11 @@ impl Spec for DifferentTxsWithSameInput { assert!(!commit_txs_hash.contains(&tx2.hash())); // when tx1 was confirmed, tx2 should be rejected - let ret = node0.rpc_client().get_transaction(tx2.hash()); - assert!( - matches!(ret.tx_status.status, Status::Rejected), - "tx2 should be rejected" - ); + // let ret = node0.rpc_client().get_transaction(tx2.hash()); + // assert!( + // matches!(ret.tx_status.status, Status::Rejected), + // "tx2 should be rejected" + // ); // verbosity = 1 let ret = node0 @@ -60,11 +64,11 @@ impl Spec for DifferentTxsWithSameInput { assert!(ret.transaction.is_none()); assert!(matches!(ret.tx_status.status, Status::Committed)); - let ret = node0 - .rpc_client() - .get_transaction_with_verbosity(tx2.hash(), 1); - assert!(ret.transaction.is_none()); - assert!(matches!(ret.tx_status.status, Status::Rejected)); + // let ret = node0 + // .rpc_client() + // .get_transaction_with_verbosity(tx2.hash(), 1); + // assert!(ret.transaction.is_none()); + // assert!(matches!(ret.tx_status.status, Status::Rejected)); // verbosity = 2 let ret = node0 @@ -73,10 +77,10 @@ impl Spec for DifferentTxsWithSameInput { assert!(ret.transaction.is_some()); assert!(matches!(ret.tx_status.status, Status::Committed)); - let ret = node0 - .rpc_client() - .get_transaction_with_verbosity(tx2.hash(), 2); - assert!(ret.transaction.is_none()); - assert!(matches!(ret.tx_status.status, Status::Rejected)); + // let ret = node0 + // .rpc_client() + // .get_transaction_with_verbosity(tx2.hash(), 2); + // assert!(ret.transaction.is_none()); + // assert!(matches!(ret.tx_status.status, Status::Rejected)); } } diff --git a/test/src/specs/tx_pool/send_tx_chain.rs b/test/src/specs/tx_pool/send_tx_chain.rs index 3065b690be..f27030a5a1 100644 --- a/test/src/specs/tx_pool/send_tx_chain.rs +++ b/test/src/specs/tx_pool/send_tx_chain.rs @@ -33,10 +33,15 @@ impl Spec for SendTxChain { assert_eq!(txs.len(), MAX_ANCESTORS_COUNT + 1); // send tx chain info!("submit fresh txs chain to node0"); - for tx in txs[..=MAX_ANCESTORS_COUNT].iter() { + for tx in txs[..=MAX_ANCESTORS_COUNT - 1].iter() { let ret = node0.rpc_client().send_transaction_result(tx.data().into()); assert!(ret.is_ok()); } + // The last one will be rejected + let ret = node0 + .rpc_client() + .send_transaction_result(txs[MAX_ANCESTORS_COUNT].data().into()); + assert!(ret.is_err()); node0.mine(3); @@ -76,6 +81,11 @@ impl Spec for SendTxChain { .rpc_client() .send_transaction_result(txs.last().unwrap().data().into()); assert!(ret.is_err()); + assert!(ret + .err() + .unwrap() + .to_string() + .contains("Transaction exceeded maximum ancestors count limit")); } fn modify_app_config(&self, config: &mut ckb_app_config::CKBAppConfig) { diff --git a/tx-pool/Cargo.toml b/tx-pool/Cargo.toml index 0ce764ecb0..d8a16bff50 100644 --- a/tx-pool/Cargo.toml +++ b/tx-pool/Cargo.toml @@ -36,8 +36,7 @@ sentry = { version = "0.26.0", optional = true } serde_json = "1.0" rand = "0.8.4" hyper = { version = "0.14", features = ["http1", "client", "tcp"] } -#multi_index_map = { git = "https://github.com/wyjin/multi_index_map.git", branch = "master" } -multi_index_map = "0.5.0" +ckb_multi_index_map = "0.0.1" # ckb team fork crate slab = "0.4" rustc-hash = "1.1" tokio-util = "0.7.8" diff --git a/tx-pool/src/component/edges.rs b/tx-pool/src/component/edges.rs index decf98c1a5..129b33c0a2 100644 --- a/tx-pool/src/component/edges.rs +++ b/tx-pool/src/component/edges.rs @@ -91,6 +91,10 @@ impl Edges { self.outputs.get(out_point) } + pub(crate) fn remove_deps(&mut self, out_point: &OutPoint) -> Option> { + self.deps.remove(out_point) + } + pub(crate) fn insert_deps(&mut self, out_point: OutPoint, txid: ProposalShortId) { self.deps.entry(out_point).or_default().insert(txid); } diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index f9b00b75e3..50c5c393dc 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -9,6 +9,7 @@ use crate::error::Reject; use crate::TxEntry; use ckb_logger::trace; +use ckb_multi_index_map::MultiIndexMap; use ckb_types::core::error::OutPointError; use ckb_types::packed::OutPoint; use ckb_types::{ @@ -20,7 +21,6 @@ use ckb_types::{ core::cell::{CellMetaBuilder, CellProvider, CellStatus}, prelude::*, }; -use multi_index_map::MultiIndexMap; use std::borrow::Cow; use std::collections::HashSet; @@ -242,40 +242,33 @@ impl PoolMap { conflicts } - /// pending gap and proposed store the inputs and deps in edges, it's removed in `remove_entry` - /// here we use `input_pts_iter` and `related_dep_out_points` to find the conflict txs pub(crate) fn resolve_conflict(&mut self, tx: &TransactionView) -> Vec { - let mut to_be_removed = Vec::new(); + let inputs = tx.input_pts_iter(); let mut conflicts = Vec::new(); - for (_, entry) in self.entries.iter() { - let entry = &entry.inner; - let tx_id = entry.proposal_short_id(); - let tx_inputs = entry.transaction().input_pts_iter(); - let deps = entry.related_dep_out_points(); - - // tx input conflict - for i in tx_inputs { - if tx.input_pts_iter().any(|j| i == j) { - to_be_removed.push((tx_id.to_owned(), i.clone())); + for i in inputs { + if let Some(id) = self.edges.remove_input(&i) { + let entries = self.remove_entry_and_descendants(&id); + if !entries.is_empty() { + let reject = Reject::Resolve(OutPointError::Dead(i.clone())); + let rejects = std::iter::repeat(reject).take(entries.len()); + conflicts.extend(entries.into_iter().zip(rejects)); } } - // tx deps conflict - for i in deps { - if tx.input_pts_iter().any(|j| *i == j) { - to_be_removed.push((tx_id.to_owned(), i.clone())); + // deps consumed + if let Some(x) = self.edges.remove_deps(&i) { + for id in x { + let entries = self.remove_entry_and_descendants(&id); + if !entries.is_empty() { + let reject = Reject::Resolve(OutPointError::Dead(i.clone())); + let rejects = std::iter::repeat(reject).take(entries.len()); + conflicts.extend(entries.into_iter().zip(rejects)); + } } } } - for (tx_id, input) in to_be_removed.iter() { - let entries = self.remove_entry_and_descendants(tx_id); - let reject = Reject::Resolve(OutPointError::Dead(input.to_owned())); - let rejects = std::iter::repeat(reject).take(entries.len()); - conflicts.extend(entries.into_iter().zip(rejects)); - } - conflicts } @@ -297,6 +290,7 @@ impl PoolMap { } } + #[cfg(test)] pub(crate) fn remove_entries_by_filter bool>( &mut self, status: &Status, @@ -360,10 +354,11 @@ impl PoolMap { EntryOp::Add => child.add_entry_weight(parent), } let short_id = child.proposal_short_id(); - //TODO: optimize it - self.entries.remove_by_id(&short_id); - self.insert_entry(&child, entry.status) - .expect("pool consistent"); + self.entries.modify_by_id(&short_id, |e| { + e.score = child.as_score_key(); + e.evict_key = child.as_evict_key(); + e.inner = child; + }); } } @@ -437,7 +432,7 @@ impl PoolMap { } /// Record the links for entry - fn record_entry_links(&mut self, entry: &mut TxEntry, status: &Status) -> Result { + fn record_entry_links(&mut self, entry: &mut TxEntry) -> Result { // find in pool parents let mut parents: HashSet = HashSet::with_capacity( entry.transaction().inputs().len() + entry.transaction().cell_deps().len(), @@ -476,7 +471,8 @@ impl PoolMap { .expect("pool consistent"); entry.add_entry_weight(&ancestor.inner); } - if *status == Status::Proposed && entry.ancestors_count > self.max_ancestors_count { + if entry.ancestors_count > self.max_ancestors_count { + eprintln!("debug: exceeded maximum ancestors count"); return Err(Reject::ExceededMaximumAncestorsCount); } @@ -534,13 +530,25 @@ impl PoolMap { return Ok(false); } trace!("add_{:?} {}", status, entry.transaction().hash()); - self.record_entry_links(&mut entry, &status)?; + self.record_entry_links(&mut entry)?; self.insert_entry(&entry, status)?; self.record_entry_deps(&entry); self.record_entry_edges(&entry); Ok(true) } + /// Change the status of the entry, only used for `gap_rtx` and `proposed_rtx` + pub(crate) fn set_entry(&mut self, entry: &TxEntry, status: Status) { + let tx_short_id = entry.proposal_short_id(); + let _ = self + .entries + .get_by_id(&tx_short_id) + .expect("unconsistent pool"); + self.entries.modify_by_id(&tx_short_id, |e| { + e.status = status; + }); + } + fn insert_entry(&mut self, entry: &TxEntry, status: Status) -> Result { let tx_short_id = entry.proposal_short_id(); let score = entry.as_score_key(); @@ -558,10 +566,8 @@ impl PoolMap { impl CellProvider for PoolMap { fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { - if let Some(id) = self.edges.get_input_ref(out_point) { - if self.has_proposed(id) { - return CellStatus::Dead; - } + if self.edges.get_input_ref(out_point).is_some() { + return CellStatus::Dead; } match self.edges.get_output_ref(out_point) { Some(OutPointStatus::UnConsumed) => { @@ -571,7 +577,7 @@ impl CellProvider for PoolMap { .build(); CellStatus::live_cell(cell_meta) } - Some(OutPointStatus::Consumed(id)) if self.has_proposed(id) => CellStatus::Dead, + Some(OutPointStatus::Consumed(_id)) => CellStatus::Dead, _ => CellStatus::Unknown, } } @@ -579,13 +585,11 @@ impl CellProvider for PoolMap { impl CellChecker for PoolMap { fn is_live(&self, out_point: &OutPoint) -> Option { - if let Some(id) = self.edges.get_input_ref(out_point) { - if self.has_proposed(id) { - return Some(false); - } + if self.edges.get_input_ref(out_point).is_some() { + return Some(false); } match self.edges.get_output_ref(out_point) { - Some(OutPointStatus::Consumed(id)) if self.has_proposed(id) => Some(false), + Some(OutPointStatus::Consumed(_id)) => Some(false), Some(OutPointStatus::UnConsumed) => Some(true), _ => None, } diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index c01ad22b64..41e766e630 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -14,12 +14,10 @@ use ckb_store::ChainStore; use ckb_types::{ core::{ cell::{resolve_transaction, OverlayCellChecker, OverlayCellProvider, ResolvedTransaction}, - cell::{CellChecker, CellMetaBuilder, CellProvider, CellStatus}, tx_pool::{TxPoolEntryInfo, TxPoolIds}, Cycle, TransactionView, UncleBlockView, }, - packed::{Byte32, OutPoint, ProposalShortId}, - prelude::*, + packed::{Byte32, ProposalShortId}, }; use ckb_verification::{cache::CacheEntry, TxVerifyEnv}; use lru::LruCache; @@ -130,6 +128,14 @@ impl TxPool { self.pool_map.get_by_id(id).is_some() } + pub(crate) fn set_entry_proposed(&mut self, entry: &TxEntry) { + self.pool_map.set_entry(entry, Status::Proposed) + } + + pub(crate) fn set_entry_gap(&mut self, entry: &TxEntry) { + self.pool_map.set_entry(entry, Status::Gap) + } + /// Returns tx with cycles corresponding to the id. pub(crate) fn get_tx_with_cycles( &self, @@ -269,51 +275,26 @@ impl TxPool { false } - pub(crate) fn check_rtx_from_pending_and_proposed( - &self, - rtx: &ResolvedTransaction, - ) -> Result<(), Reject> { + pub(crate) fn check_rtx_from_pool(&self, rtx: &ResolvedTransaction) -> Result<(), Reject> { let snapshot = self.snapshot(); - let proposal_checker = OverlayCellChecker::new(&self.pool_map, snapshot); - let checker = OverlayCellChecker::new(self, &proposal_checker); + let checker = OverlayCellChecker::new(&self.pool_map, snapshot); let mut seen_inputs = HashSet::new(); rtx.check(&mut seen_inputs, &checker, snapshot) .map_err(Reject::Resolve) } - pub(crate) fn check_rtx_from_proposed(&self, rtx: &ResolvedTransaction) -> Result<(), Reject> { - let snapshot = self.snapshot(); - let proposal_checker = OverlayCellChecker::new(&self.pool_map, snapshot); - let mut seen_inputs = HashSet::new(); - rtx.check(&mut seen_inputs, &proposal_checker, snapshot) - .map_err(Reject::Resolve) - } - - pub(crate) fn resolve_tx_from_pending_and_proposed( + pub(crate) fn resolve_tx_from_pool( &self, tx: TransactionView, ) -> Result, Reject> { let snapshot = self.snapshot(); - let proposed_provider = OverlayCellProvider::new(&self.pool_map, snapshot); - let provider = OverlayCellProvider::new(self, &proposed_provider); + let provider = OverlayCellProvider::new(&self.pool_map, snapshot); let mut seen_inputs = HashSet::new(); resolve_transaction(tx, &mut seen_inputs, &provider, snapshot) .map(Arc::new) .map_err(Reject::Resolve) } - pub(crate) fn resolve_tx_from_proposed( - &self, - tx: TransactionView, - ) -> Result, Reject> { - let snapshot = self.snapshot(); - let proposed_provider = OverlayCellProvider::new(&self.pool_map, snapshot); - let mut seen_inputs = HashSet::new(); - resolve_transaction(tx, &mut seen_inputs, &proposed_provider, snapshot) - .map(Arc::new) - .map_err(Reject::Resolve) - } - pub(crate) fn gap_rtx( &mut self, cache_entry: CacheEntry, @@ -324,7 +305,6 @@ impl TxPool { let snapshot = self.cloned_snapshot(); let tip_header = snapshot.tip_header(); let tx_env = Arc::new(TxVerifyEnv::new_proposed(tip_header, 0)); - self.check_rtx_from_pending_and_proposed(&rtx)?; let max_cycles = snapshot.consensus().max_block_cycles(); let verified = verify_rtx( @@ -338,12 +318,8 @@ impl TxPool { let entry = TxEntry::new_with_timestamp(rtx, verified.cycles, verified.fee, size, timestamp); - let tx_hash = entry.transaction().hash(); - if self.add_gap(entry).unwrap_or(false) { - Ok(CacheEntry::Completed(verified)) - } else { - Err(Reject::Duplicated(tx_hash)) - } + self.set_entry_gap(&entry); + Ok(CacheEntry::Completed(verified)) } pub(crate) fn proposed_rtx( @@ -356,7 +332,6 @@ impl TxPool { let snapshot = self.cloned_snapshot(); let tip_header = snapshot.tip_header(); let tx_env = Arc::new(TxVerifyEnv::new_proposed(tip_header, 1)); - self.check_rtx_from_proposed(&rtx)?; let max_cycles = snapshot.consensus().max_block_cycles(); let verified = verify_rtx( @@ -375,11 +350,8 @@ impl TxPool { tx_hash, entry.proposal_short_id() ); - if self.add_proposed(entry)? { - Ok(CacheEntry::Completed(verified)) - } else { - Err(Reject::Duplicated(tx_hash)) - } + self.set_entry_proposed(&entry); + Ok(CacheEntry::Completed(verified)) } /// Get to-be-proposal transactions that may be included in the next block. @@ -538,49 +510,3 @@ impl TxPool { } } } - -/// This is a hack right now, we use `CellProvider` to check if a transaction is in `Pending` or `Gap` status. -/// To make sure the behavior is same as before, we need to remove this if we have finished replace-by-fee strategy. -impl CellProvider for TxPool { - fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { - let tx_hash = out_point.tx_hash(); - match self - .pool_map - .get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) - { - Some(pool_entry) if pool_entry.status != Status::Proposed => { - match pool_entry - .inner - .transaction() - .output_with_data(out_point.index().unpack()) - { - Some((output, data)) => { - let cell_meta = CellMetaBuilder::from_cell_output(output, data) - .out_point(out_point.to_owned()) - .build(); - CellStatus::live_cell(cell_meta) - } - None => CellStatus::Unknown, - } - } - _ => CellStatus::Unknown, - } - } -} - -impl CellChecker for TxPool { - fn is_live(&self, out_point: &OutPoint) -> Option { - let tx_hash = out_point.tx_hash(); - match self - .pool_map - .get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) - { - Some(pool_entry) if pool_entry.status != Status::Proposed => pool_entry - .inner - .transaction() - .output_with_data(out_point.index().unpack()) - .map(|_| true), - _ => None, - } - } -} diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index 00dde6d264..354e56cac0 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -966,12 +966,7 @@ fn check_rtx( } else { TxStatus::Fresh }; - if tx_status == TxStatus::Proposed { - tx_pool.check_rtx_from_proposed(rtx) - } else { - tx_pool.check_rtx_from_pending_and_proposed(rtx) - } - .map(|_| tx_status) + tx_pool.check_rtx_from_pool(rtx).map(|_| tx_status) } fn resolve_tx(tx_pool: &TxPool, snapshot: &Snapshot, tx: TransactionView) -> ResolveResult { @@ -983,12 +978,7 @@ fn resolve_tx(tx_pool: &TxPool, snapshot: &Snapshot, tx: TransactionView) -> Res } else { TxStatus::Fresh }; - if tx_status == TxStatus::Proposed { - tx_pool.resolve_tx_from_proposed(tx) - } else { - tx_pool.resolve_tx_from_pending_and_proposed(tx) - } - .map(|rtx| (rtx, tx_status)) + tx_pool.resolve_tx_from_pool(tx).map(|rtx| (rtx, tx_status)) } fn _submit_entry( @@ -1049,32 +1039,26 @@ fn _update_tx_pool_for_reorg( let mut proposals = Vec::new(); let mut gaps = Vec::new(); - tx_pool - .pool_map - .remove_entries_by_filter(&Status::Gap, |id, tx_entry| { - if snapshot.proposals().contains_proposed(id) { - proposals.push(tx_entry.clone()); - true - } else { - false - } - }); + for entry in tx_pool.pool_map.entries.get_by_status(&Status::Gap) { + let e = &entry.inner; + let short_id = e.proposal_short_id(); + if snapshot.proposals().contains_proposed(&short_id) { + proposals.push(e.clone()); + } + } - tx_pool - .pool_map - .remove_entries_by_filter(&Status::Pending, |id, tx_entry| { - if snapshot.proposals().contains_proposed(id) { - proposals.push(tx_entry.clone()); - true - } else if snapshot.proposals().contains_gap(id) { - gaps.push(tx_entry.clone()); - true - } else { - false - } - }); + for entry in tx_pool.pool_map.entries.get_by_status(&Status::Pending) { + let e = &entry.inner; + let short_id = e.proposal_short_id(); + if snapshot.proposals().contains_proposed(&short_id) { + proposals.push(e.clone()); + } else if snapshot.proposals().contains_gap(&short_id) { + gaps.push(e.clone()); + } + } for entry in proposals { + debug!("begin to proposed: {:x}", entry.transaction().hash()); let cached = CacheEntry::completed(entry.cycles, entry.fee); if let Err(e) = tx_pool.proposed_rtx(cached, entry.size, entry.timestamp, Arc::clone(&entry.rtx)) @@ -1086,6 +1070,7 @@ fn _update_tx_pool_for_reorg( } for entry in gaps { + debug!("begin to gap: {:x}", entry.transaction().hash()); let tx_hash = entry.transaction().hash(); let cached = CacheEntry::completed(entry.cycles, entry.fee); if let Err(e) = From 92bc7339264813439cab9de9d2db52738cd2de9a Mon Sep 17 00:00:00 2001 From: yukang Date: Mon, 5 Jun 2023 16:51:52 +0800 Subject: [PATCH 056/267] fix makefile so that we may run a specific integration test with environment name --- Makefile | 2 +- test/src/main.rs | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 7d7adb98fb..3d8a92ba2b 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ MOLC_VERSION := 0.7.5 VERBOSE := $(if ${CI},--verbose,) CLIPPY_OPTS := -D warnings -D clippy::clone_on_ref_ptr -D clippy::enum_glob_use -D clippy::fallible_impl_from \ -A clippy::mutable_key_type -A clippy::upper_case_acronyms -CKB_TEST_ARGS := ${CKB_TEST_ARGS} -c 4 +CKB_TEST_ARGS := -c 4 ${CKB_TEST_ARGS} CKB_FEATURES ?= deadlock_detection,with_sentry ALL_FEATURES := deadlock_detection,with_sentry,with_dns_seeding,profiling,march-native CKB_BENCH_FEATURES ?= ci diff --git a/test/src/main.rs b/test/src/main.rs index f321bfe0a7..7d095cbef0 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -294,8 +294,12 @@ fn clap_app() -> App<'static> { .value_name("SECONDS") .help("Exit when total running time exceeds this limit"), ) - .arg(Arg::with_name("list-specs").long("list-specs")) - .arg(Arg::with_name("specs").multiple(true)) + .arg( + Arg::with_name("list-specs") + .long("list-specs") + .help("list all specs"), + ) + .arg(Arg::with_name("specs").multiple(true).help("Specs to run")) .arg( Arg::with_name("concurrent") .short('c') From 180159e68331f92a0d904c27e3ff698e8f6033d3 Mon Sep 17 00:00:00 2001 From: yukang Date: Mon, 5 Jun 2023 18:22:15 +0800 Subject: [PATCH 057/267] fix TransactionRelayConflict --- test/src/specs/relay/transaction_relay.rs | 16 ++++++++++++---- tx-pool/src/component/pool_map.rs | 3 ++- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/test/src/specs/relay/transaction_relay.rs b/test/src/specs/relay/transaction_relay.rs index e5af42eb45..137d36964a 100644 --- a/test/src/specs/relay/transaction_relay.rs +++ b/test/src/specs/relay/transaction_relay.rs @@ -5,6 +5,7 @@ use crate::util::transaction::{always_success_transaction, always_success_transa use crate::utils::{build_relay_tx_hashes, build_relay_txs, sleep, wait_until}; use crate::{Net, Node, Spec}; use ckb_constant::sync::RETRY_ASK_TX_TIMEOUT_INCREASE; +use ckb_jsonrpc_types::Status; use ckb_logger::info; use ckb_network::SupportProtocols; use ckb_types::{ @@ -234,11 +235,18 @@ impl Spec for TransactionRelayConflict { node0.rpc_client().send_transaction(tx1.data().into()); sleep(6); + let res = node0.rpc_client().get_transaction(tx1.hash()); + assert!(matches!(res.tx_status.status, Status::Pending)); + let res = node0 .rpc_client() .send_transaction_result(tx2.data().into()); assert!(res.is_err()); - eprintln!("res: {:?}", res); + assert!(res + .err() + .unwrap() + .to_string() + .contains("TransactionFailedToResolve: Resolve failed Dead")); let relayed = wait_until(20, || { [tx1.hash()].iter().all(|hash| { @@ -258,11 +266,10 @@ impl Spec for TransactionRelayConflict { node0.wait_for_tx_pool(); node1.wait_for_tx_pool(); - /* let ret = node1 .rpc_client() .get_transaction_with_verbosity(tx2.hash(), 1); - assert!(matches!(ret.tx_status.status, Status::Rejected)); + assert!(matches!(ret.tx_status.status, Status::Unknown)); node0.remove_transaction(tx1.hash()); node0.remove_transaction(tx2.hash()); @@ -284,9 +291,11 @@ impl Spec for TransactionRelayConflict { let relayed = wait_until(10, || { // re-broadcast + // TODO: (yukang) double comfirm this behavior let _ = node1 .rpc_client() .send_transaction_result(tx2.data().into()); + node0 .rpc_client() .get_transaction(tx2.hash()) @@ -294,6 +303,5 @@ impl Spec for TransactionRelayConflict { .is_some() }); assert!(relayed, "Transaction should be relayed to node1"); - */ } } diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index 50c5c393dc..89e4d0980f 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -60,6 +60,7 @@ impl MultiIndexPoolEntryMap { pub fn score_sorted_iter(&self) -> impl Iterator { // Note: multi_index don't support reverse order iteration now // so we need to collect and reverse + // TODO: @wyjin will add reverse order iteration support for multi_index let entries = self .iter_by_score() .filter(|entry| entry.status == Status::Proposed) @@ -202,7 +203,7 @@ impl PoolMap { let mut removed = vec![]; removed_ids.extend(self.calc_descendants(id)); - // update links state for remove + // update links state for remove, so that we won't update_descendants_index_key in remove_entry for id in &removed_ids { self.remove_entry_links(id); } From ea00a950321e7ddfc60bf9a55f317390674dadfc Mon Sep 17 00:00:00 2001 From: yukang Date: Tue, 6 Jun 2023 10:32:57 +0800 Subject: [PATCH 058/267] upgrade multi_index_map for iterator rev --- Cargo.lock | 4 +- tx-pool/Cargo.toml | 2 +- tx-pool/src/component/pool_map.rs | 70 +++++++++++++------------------ tx-pool/src/pool.rs | 2 +- 4 files changed, 34 insertions(+), 44 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0072b84283..70723dc6bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1552,9 +1552,9 @@ checksum = "83869c9d322de1ddbfde5b54b7376f9a1ac32273c50e21cdd5e8a1bd1a1cf632" [[package]] name = "ckb_multi_index_map" -version = "0.0.1" +version = "0.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2adba00c3dcb84fc4634c948cf3d24c05ce3193810bfa568effe13ad814f662a" +checksum = "53c20823dfd9f9a8e30faa3b0bdcab4801fb2544957586fada3884c78dcdf38b" dependencies = [ "convert_case 0.6.0", "proc-macro-error", diff --git a/tx-pool/Cargo.toml b/tx-pool/Cargo.toml index d8a16bff50..04e465561a 100644 --- a/tx-pool/Cargo.toml +++ b/tx-pool/Cargo.toml @@ -36,7 +36,7 @@ sentry = { version = "0.26.0", optional = true } serde_json = "1.0" rand = "0.8.4" hyper = { version = "0.14", features = ["http1", "client", "tcp"] } -ckb_multi_index_map = "0.0.1" # ckb team fork crate +ckb_multi_index_map = "0.0.2" # ckb team fork crate slab = "0.4" rustc-hash = "1.1" tokio-util = "0.7.8" diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index 89e4d0980f..dd6b471e7e 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -55,20 +55,6 @@ pub struct PoolEntry { pub inner: TxEntry, } -impl MultiIndexPoolEntryMap { - /// sorted by ancestor score from higher to lower - pub fn score_sorted_iter(&self) -> impl Iterator { - // Note: multi_index don't support reverse order iteration now - // so we need to collect and reverse - // TODO: @wyjin will add reverse order iteration support for multi_index - let entries = self - .iter_by_score() - .filter(|entry| entry.status == Status::Proposed) - .collect::>(); - entries.into_iter().rev().map(move |entry| &entry.inner) - } -} - pub struct PoolMap { /// The pool entries with different kinds of sort strategies pub(crate) entries: MultiIndexPoolEntryMap, @@ -150,7 +136,11 @@ impl PoolMap { } pub(crate) fn score_sorted_iter(&self) -> impl Iterator { - self.entries.score_sorted_iter() + self.entries + .iter_by_score() + .rev() + .filter(|entry| entry.status == Status::Proposed) + .map(|entry| &entry.inner) } pub(crate) fn get(&self, id: &ProposalShortId) -> Option<&TxEntry> { @@ -187,6 +177,31 @@ impl PoolMap { }) } + pub(crate) fn add_entry(&mut self, mut entry: TxEntry, status: Status) -> Result { + let tx_short_id = entry.proposal_short_id(); + if self.entries.get_by_id(&tx_short_id).is_some() { + return Ok(false); + } + trace!("add_{:?} {}", status, entry.transaction().hash()); + self.record_entry_links(&mut entry)?; + self.insert_entry(&entry, status)?; + self.record_entry_deps(&entry); + self.record_entry_edges(&entry); + Ok(true) + } + + /// Change the status of the entry, only used for `gap_rtx` and `proposed_rtx` + pub(crate) fn set_entry(&mut self, entry: &TxEntry, status: Status) { + let tx_short_id = entry.proposal_short_id(); + let _ = self + .entries + .get_by_id(&tx_short_id) + .expect("unconsistent pool"); + self.entries.modify_by_id(&tx_short_id, |e| { + e.status = status; + }); + } + pub(crate) fn remove_entry(&mut self, id: &ProposalShortId) -> Option { if let Some(entry) = self.entries.remove_by_id(id) { self.update_descendants_index_key(&entry.inner, EntryOp::Remove); @@ -525,31 +540,6 @@ impl PoolMap { self.edges.header_deps.remove(&id); } - pub(crate) fn add_entry(&mut self, mut entry: TxEntry, status: Status) -> Result { - let tx_short_id = entry.proposal_short_id(); - if self.entries.get_by_id(&tx_short_id).is_some() { - return Ok(false); - } - trace!("add_{:?} {}", status, entry.transaction().hash()); - self.record_entry_links(&mut entry)?; - self.insert_entry(&entry, status)?; - self.record_entry_deps(&entry); - self.record_entry_edges(&entry); - Ok(true) - } - - /// Change the status of the entry, only used for `gap_rtx` and `proposed_rtx` - pub(crate) fn set_entry(&mut self, entry: &TxEntry, status: Status) { - let tx_short_id = entry.proposal_short_id(); - let _ = self - .entries - .get_by_id(&tx_short_id) - .expect("unconsistent pool"); - self.entries.modify_by_id(&tx_short_id, |e| { - e.status = status; - }); - } - fn insert_entry(&mut self, entry: &TxEntry, status: Status) -> Result { let tx_short_id = entry.proposal_short_id(); let score = entry.as_score_key(); diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 41e766e630..fe58a3abe7 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -356,7 +356,7 @@ impl TxPool { /// Get to-be-proposal transactions that may be included in the next block. /// TODO: do we need to consider the something like score, so that we can - /// provide best transactions to be proposed. + /// provide best transactions to be proposed. pub(crate) fn get_proposals( &self, limit: usize, From 9d375c8693f09055ee8c86bbce29d16065e095c1 Mon Sep 17 00:00:00 2001 From: yukang Date: Tue, 6 Jun 2023 10:53:29 +0800 Subject: [PATCH 059/267] enable PoolResolveConflictAfterReorg --- test/src/main.rs | 1 + test/src/specs/tx_pool/collision.rs | 12 ++++++-- .../tx_pool/different_txs_with_same_input.rs | 30 +++++++++---------- 3 files changed, 25 insertions(+), 18 deletions(-) diff --git a/test/src/main.rs b/test/src/main.rs index 7d095cbef0..00cc5a45fd 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -406,6 +406,7 @@ fn all_specs() -> Vec> { Box::new(GetRawTxPool), Box::new(PoolReconcile), Box::new(PoolResurrect), + Box::new(PoolResolveConflictAfterReorg), Box::new(InvalidHeaderDep), #[cfg(not(target_os = "windows"))] Box::new(PoolPersisted), diff --git a/test/src/specs/tx_pool/collision.rs b/test/src/specs/tx_pool/collision.rs index 7dc606d459..0d93697c03 100644 --- a/test/src/specs/tx_pool/collision.rs +++ b/test/src/specs/tx_pool/collision.rs @@ -1,4 +1,6 @@ -use crate::util::check::{is_transaction_committed, is_transaction_pending}; +use crate::util::check::{ + is_transaction_committed, is_transaction_pending, is_transaction_rejected, +}; use crate::utils::{assert_send_transaction_fail, blank, commit, propose}; use crate::{Node, Spec}; use ckb_types::bytes::Bytes; @@ -6,7 +8,7 @@ use ckb_types::core::{capacity_bytes, Capacity, TransactionView}; use ckb_types::prelude::*; // Convention: -// * `tx1` and `tx2` are cousin transactions, with the same transaction content, expect the +// * `tx1` and `tx2` are cousin transactions, with the same transaction content, except the // witnesses. Hence `tx1` and `tx2` have the same tx_hash/proposal-id but different witness_hash. pub struct TransactionHashCollisionDifferentWitnessHashes; @@ -95,8 +97,8 @@ impl Spec for ConflictInGap { (0..window.closest() - 1).for_each(|_| { node.submit_block(&blank(node)); }); + node.submit_block(&propose(node, &[&txb])); - //node.submit_block(&propose(node, &[&txb])); let block = node.new_block(None, None, None); assert_eq!(&[txa], &block.transactions()[1..]); @@ -162,6 +164,8 @@ impl Spec for RemoveConflictFromPending { assert!(res.is_err()); assert!(is_transaction_pending(node, &txa)); + assert!(is_transaction_rejected(node, &txb)); + assert!(is_transaction_rejected(node, &txc)); node.submit_block(&propose(node, &[&txa])); (0..window.closest()).for_each(|_| { @@ -171,6 +175,8 @@ impl Spec for RemoveConflictFromPending { node.wait_for_tx_pool(); assert!(is_transaction_committed(node, &txa)); + assert!(is_transaction_rejected(node, &txb)); + assert!(is_transaction_rejected(node, &txc)); } } diff --git a/test/src/specs/tx_pool/different_txs_with_same_input.rs b/test/src/specs/tx_pool/different_txs_with_same_input.rs index 8539c7c8c6..a816bd2eb9 100644 --- a/test/src/specs/tx_pool/different_txs_with_same_input.rs +++ b/test/src/specs/tx_pool/different_txs_with_same_input.rs @@ -51,11 +51,11 @@ impl Spec for DifferentTxsWithSameInput { assert!(!commit_txs_hash.contains(&tx2.hash())); // when tx1 was confirmed, tx2 should be rejected - // let ret = node0.rpc_client().get_transaction(tx2.hash()); - // assert!( - // matches!(ret.tx_status.status, Status::Rejected), - // "tx2 should be rejected" - // ); + let ret = node0.rpc_client().get_transaction(tx2.hash()); + assert!( + matches!(ret.tx_status.status, Status::Rejected), + "tx2 should be rejected" + ); // verbosity = 1 let ret = node0 @@ -64,11 +64,11 @@ impl Spec for DifferentTxsWithSameInput { assert!(ret.transaction.is_none()); assert!(matches!(ret.tx_status.status, Status::Committed)); - // let ret = node0 - // .rpc_client() - // .get_transaction_with_verbosity(tx2.hash(), 1); - // assert!(ret.transaction.is_none()); - // assert!(matches!(ret.tx_status.status, Status::Rejected)); + let ret = node0 + .rpc_client() + .get_transaction_with_verbosity(tx2.hash(), 1); + assert!(ret.transaction.is_none()); + assert!(matches!(ret.tx_status.status, Status::Rejected)); // verbosity = 2 let ret = node0 @@ -77,10 +77,10 @@ impl Spec for DifferentTxsWithSameInput { assert!(ret.transaction.is_some()); assert!(matches!(ret.tx_status.status, Status::Committed)); - // let ret = node0 - // .rpc_client() - // .get_transaction_with_verbosity(tx2.hash(), 2); - // assert!(ret.transaction.is_none()); - // assert!(matches!(ret.tx_status.status, Status::Rejected)); + let ret = node0 + .rpc_client() + .get_transaction_with_verbosity(tx2.hash(), 2); + assert!(ret.transaction.is_none()); + assert!(matches!(ret.tx_status.status, Status::Rejected)); } } From d46bd54a2df9602e2b5fc9bdf7740c34e2a1548c Mon Sep 17 00:00:00 2001 From: yukang Date: Thu, 8 Jun 2023 00:17:16 +0800 Subject: [PATCH 060/267] comments feedback and code cleanup --- tx-pool/src/component/pool_map.rs | 2 +- tx-pool/src/pool.rs | 2 +- tx-pool/src/process.rs | 10 ++-------- tx-pool/src/service.rs | 6 ++++-- 4 files changed, 8 insertions(+), 12 deletions(-) diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index dd6b471e7e..b0d591850b 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -182,7 +182,7 @@ impl PoolMap { if self.entries.get_by_id(&tx_short_id).is_some() { return Ok(false); } - trace!("add_{:?} {}", status, entry.transaction().hash()); + trace!("pool_map.add_{:?} {}", status, entry.transaction().hash()); self.record_entry_links(&mut entry)?; self.insert_entry(&entry, status)?; self.record_entry_deps(&entry); diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index fe58a3abe7..f271115cea 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -211,7 +211,7 @@ impl TxPool { } } - // Remove transactions from the pool until total size < size_limit. + // Remove transactions from the pool until total size <= size_limit. pub(crate) fn limit_size(&mut self, callbacks: &Callbacks) { while self.total_tx_size > self.config.max_tx_pool_size { if let Some(id) = self.pool_map.next_evict_entry() { diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index 354e56cac0..2ee84e2f6e 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -987,21 +987,15 @@ fn _submit_entry( entry: TxEntry, callbacks: &Callbacks, ) -> Result<(), Reject> { - let tx_hash = entry.transaction().hash(); match status { TxStatus::Fresh => { - if tx_pool.add_pending(entry.clone()).unwrap_or(false) { + if tx_pool.add_pending(entry.clone())? { callbacks.call_pending(tx_pool, &entry); - } else { - return Err(Reject::Duplicated(tx_hash)); } } - TxStatus::Gap => { - if tx_pool.add_gap(entry.clone()).unwrap_or(false) { + if tx_pool.add_gap(entry.clone())? { callbacks.call_pending(tx_pool, &entry); - } else { - return Err(Reject::Duplicated(tx_hash)); } } TxStatus::Proposed => { diff --git a/tx-pool/src/service.rs b/tx-pool/src/service.rs index a008502c62..1a187615c8 100644 --- a/tx-pool/src/service.rs +++ b/tx-pool/src/service.rs @@ -981,13 +981,15 @@ impl TxPoolService { match target { PlugTarget::Pending => { for entry in entries { - tx_pool.add_pending(entry).unwrap(); + if let Err(err) = tx_pool.add_pending(entry) { + error!("plug entry add_pending error {}", err); + } } } PlugTarget::Proposed => { for entry in entries { if let Err(err) = tx_pool.add_proposed(entry) { - error!("plug entry error {}", err); + error!("plug entry add_proposed error {}", err); } } } From 466c9712cf4f3a8d4e5bbfef1e22676319e058dc Mon Sep 17 00:00:00 2001 From: yukang Date: Thu, 8 Jun 2023 10:03:13 +0800 Subject: [PATCH 061/267] add index_map shrink --- tx-pool/src/component/pool_map.rs | 6 +----- tx-pool/src/pool.rs | 1 + 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index b0d591850b..df829c0875 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -328,12 +328,8 @@ impl PoolMap { self.entries.iter().map(|(_, entry)| entry) } - pub(crate) fn iter_by_evict_key(&self) -> impl Iterator { - self.entries.iter_by_evict_key() - } - pub(crate) fn next_evict_entry(&self) -> Option { - self.iter_by_evict_key() + self.entries.iter_by_evict_key() .next() .map(|entry| entry.id.clone()) } diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index f271115cea..dfe8b5a425 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -230,6 +230,7 @@ impl TxPool { } } } + self.pool_map.entries.shrink_to_fit(); } // remove transaction with detached proposal from gap and proposed From 12b211686472d440d24fdd0c5d122d9071345295 Mon Sep 17 00:00:00 2001 From: yukang Date: Thu, 8 Jun 2023 15:27:52 +0800 Subject: [PATCH 062/267] confirmed TransactionRelayConflict is ok --- test/src/specs/relay/transaction_relay.rs | 12 +++++++++++- tx-pool/src/component/pool_map.rs | 3 ++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/test/src/specs/relay/transaction_relay.rs b/test/src/specs/relay/transaction_relay.rs index 137d36964a..75a34e0df6 100644 --- a/test/src/specs/relay/transaction_relay.rs +++ b/test/src/specs/relay/transaction_relay.rs @@ -266,6 +266,11 @@ impl Spec for TransactionRelayConflict { node0.wait_for_tx_pool(); node1.wait_for_tx_pool(); + let ret = node1 + .rpc_client() + .get_transaction_with_verbosity(tx1.hash(), 1); + assert!(matches!(ret.tx_status.status, Status::Proposed)); + let ret = node1 .rpc_client() .get_transaction_with_verbosity(tx2.hash(), 1); @@ -278,6 +283,12 @@ impl Spec for TransactionRelayConflict { node0.wait_for_tx_pool(); node1.wait_for_tx_pool(); + // make sure tx1 is removed from tx-pool + let ret = node1 + .rpc_client() + .get_transaction_with_verbosity(tx1.hash(), 1); + assert!(matches!(ret.tx_status.status, Status::Unknown)); + let result = wait_until(5, || { let tx_pool_info = node0.get_tip_tx_pool_info(); tx_pool_info.orphan.value() == 0 && tx_pool_info.pending.value() == 0 @@ -291,7 +302,6 @@ impl Spec for TransactionRelayConflict { let relayed = wait_until(10, || { // re-broadcast - // TODO: (yukang) double comfirm this behavior let _ = node1 .rpc_client() .send_transaction_result(tx2.data().into()); diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index df829c0875..cc1c20d684 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -329,7 +329,8 @@ impl PoolMap { } pub(crate) fn next_evict_entry(&self) -> Option { - self.entries.iter_by_evict_key() + self.entries + .iter_by_evict_key() .next() .map(|entry| entry.id.clone()) } From abbcd23f3396c16dac4a16df64d3ddd963950221 Mon Sep 17 00:00:00 2001 From: yukang Date: Thu, 8 Jun 2023 18:39:42 +0800 Subject: [PATCH 063/267] clean up proposed_rtx and gap_rtx --- tx-pool/src/component/pool_map.rs | 14 ++--- tx-pool/src/pool.rs | 99 ++++++++++++------------------- tx-pool/src/process.rs | 42 ++++++------- 3 files changed, 63 insertions(+), 92 deletions(-) diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index cc1c20d684..dadb3b3bc2 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -126,6 +126,10 @@ impl PoolMap { self.entries.get_by_id(id) } + pub(crate) fn get_by_status(&self, status: &Status) -> Vec<&PoolEntry> { + self.entries.get_by_status(status) + } + pub(crate) fn pending_size(&self) -> usize { self.entries.get_by_status(&Status::Pending).len() + self.entries.get_by_status(&Status::Gap).len() @@ -191,13 +195,9 @@ impl PoolMap { } /// Change the status of the entry, only used for `gap_rtx` and `proposed_rtx` - pub(crate) fn set_entry(&mut self, entry: &TxEntry, status: Status) { - let tx_short_id = entry.proposal_short_id(); - let _ = self - .entries - .get_by_id(&tx_short_id) - .expect("unconsistent pool"); - self.entries.modify_by_id(&tx_short_id, |e| { + pub(crate) fn set_entry(&mut self, short_id: &ProposalShortId, status: Status) { + let _ = self.entries.get_by_id(short_id).expect("unconsistent pool"); + self.entries.modify_by_id(short_id, |e| { e.status = status; }); } diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index dfe8b5a425..88564c2058 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -6,7 +6,6 @@ use crate::callback::Callbacks; use crate::component::pool_map::{PoolEntry, PoolMap, Status}; use crate::component::recent_reject::RecentReject; use crate::error::Reject; -use crate::util::verify_rtx; use ckb_app_config::TxPoolConfig; use ckb_logger::{debug, error, warn}; use ckb_snapshot::Snapshot; @@ -19,7 +18,6 @@ use ckb_types::{ }, packed::{Byte32, ProposalShortId}, }; -use ckb_verification::{cache::CacheEntry, TxVerifyEnv}; use lru::LruCache; use std::collections::HashSet; use std::sync::Arc; @@ -72,7 +70,7 @@ impl TxPool { } fn get_by_status(&self, status: &Status) -> Vec<&PoolEntry> { - self.pool_map.entries.get_by_status(status) + self.pool_map.get_by_status(status) } /// Get tx-pool size @@ -128,12 +126,12 @@ impl TxPool { self.pool_map.get_by_id(id).is_some() } - pub(crate) fn set_entry_proposed(&mut self, entry: &TxEntry) { - self.pool_map.set_entry(entry, Status::Proposed) + pub(crate) fn set_entry_proposed(&mut self, short_id: &ProposalShortId) { + self.pool_map.set_entry(short_id, Status::Proposed) } - pub(crate) fn set_entry_gap(&mut self, entry: &TxEntry) { - self.pool_map.set_entry(entry, Status::Gap) + pub(crate) fn set_entry_gap(&mut self, short_id: &ProposalShortId) { + self.pool_map.set_entry(short_id, Status::Gap) } /// Returns tx with cycles corresponding to the id. @@ -146,6 +144,10 @@ impl TxPool { .map(|entry| (entry.inner.transaction().clone(), entry.inner.cycles)) } + pub(crate) fn get_pool_entry(&self, id: &ProposalShortId) -> Option<&PoolEntry> { + self.pool_map.get_by_id(id) + } + pub(crate) fn get_tx_from_pool(&self, id: &ProposalShortId) -> Option<&TransactionView> { self.pool_map .get_by_id(id) @@ -296,63 +298,36 @@ impl TxPool { .map_err(Reject::Resolve) } - pub(crate) fn gap_rtx( - &mut self, - cache_entry: CacheEntry, - size: usize, - timestamp: u64, - rtx: Arc, - ) -> Result { - let snapshot = self.cloned_snapshot(); - let tip_header = snapshot.tip_header(); - let tx_env = Arc::new(TxVerifyEnv::new_proposed(tip_header, 0)); - - let max_cycles = snapshot.consensus().max_block_cycles(); - let verified = verify_rtx( - snapshot, - Arc::clone(&rtx), - tx_env, - &Some(cache_entry), - max_cycles, - )?; - - let entry = - TxEntry::new_with_timestamp(rtx, verified.cycles, verified.fee, size, timestamp); - - self.set_entry_gap(&entry); - Ok(CacheEntry::Completed(verified)) + pub(crate) fn gap_rtx(&mut self, short_id: &ProposalShortId) -> Result<(), Reject> { + match self.get_pool_entry(short_id) { + Some(entry) => { + let tx_hash = entry.inner.transaction().hash(); + if entry.status == Status::Gap { + Err(Reject::Duplicated(tx_hash)) + } else { + debug!("gap_rtx: {:?} => {:?}", tx_hash, short_id); + self.set_entry_gap(short_id); + Ok(()) + } + } + None => Err(Reject::Malformed(String::from("invalid short_id"))), + } } - pub(crate) fn proposed_rtx( - &mut self, - cache_entry: CacheEntry, - size: usize, - timestamp: u64, - rtx: Arc, - ) -> Result { - let snapshot = self.cloned_snapshot(); - let tip_header = snapshot.tip_header(); - let tx_env = Arc::new(TxVerifyEnv::new_proposed(tip_header, 1)); - - let max_cycles = snapshot.consensus().max_block_cycles(); - let verified = verify_rtx( - snapshot, - Arc::clone(&rtx), - tx_env, - &Some(cache_entry), - max_cycles, - )?; - - let entry = - TxEntry::new_with_timestamp(rtx, verified.cycles, verified.fee, size, timestamp); - let tx_hash = entry.transaction().hash(); - debug!( - "proposed_rtx: {:?} => {:?}", - tx_hash, - entry.proposal_short_id() - ); - self.set_entry_proposed(&entry); - Ok(CacheEntry::Completed(verified)) + pub(crate) fn proposed_rtx(&mut self, short_id: &ProposalShortId) -> Result<(), Reject> { + match self.get_pool_entry(short_id) { + Some(entry) => { + let tx_hash = entry.inner.transaction().hash(); + if entry.status == Status::Proposed { + Err(Reject::Duplicated(tx_hash)) + } else { + debug!("proposed_rtx: {:?} => {:?}", tx_hash, short_id); + self.set_entry_proposed(short_id); + Ok(()) + } + } + None => Err(Reject::Malformed(String::from("invalid short_id"))), + } } /// Get to-be-proposal transactions that may be included in the next block. diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index 2ee84e2f6e..e921203aff 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -1034,43 +1034,39 @@ fn _update_tx_pool_for_reorg( let mut gaps = Vec::new(); for entry in tx_pool.pool_map.entries.get_by_status(&Status::Gap) { - let e = &entry.inner; - let short_id = e.proposal_short_id(); + let short_id = entry.inner.proposal_short_id(); if snapshot.proposals().contains_proposed(&short_id) { - proposals.push(e.clone()); + proposals.push((short_id, entry.inner.clone())); } } for entry in tx_pool.pool_map.entries.get_by_status(&Status::Pending) { - let e = &entry.inner; - let short_id = e.proposal_short_id(); + let short_id = entry.inner.proposal_short_id(); + let elem = (short_id.clone(), entry.inner.clone()); if snapshot.proposals().contains_proposed(&short_id) { - proposals.push(e.clone()); + proposals.push(elem); } else if snapshot.proposals().contains_gap(&short_id) { - gaps.push(e.clone()); + gaps.push(elem); } } - for entry in proposals { - debug!("begin to proposed: {:x}", entry.transaction().hash()); - let cached = CacheEntry::completed(entry.cycles, entry.fee); - if let Err(e) = - tx_pool.proposed_rtx(cached, entry.size, entry.timestamp, Arc::clone(&entry.rtx)) - { - callbacks.call_reject(tx_pool, &entry, e.clone()); + for (id, entry) in proposals { + debug!("begin to proposed: {:x}", id); + if let Err(e) = tx_pool.proposed_rtx(&id) { + callbacks.call_reject(tx_pool, &entry, e); } else { - callbacks.call_proposed(tx_pool, &entry, false); + callbacks.call_proposed(tx_pool, &entry, false) } } - for entry in gaps { - debug!("begin to gap: {:x}", entry.transaction().hash()); - let tx_hash = entry.transaction().hash(); - let cached = CacheEntry::completed(entry.cycles, entry.fee); - if let Err(e) = - tx_pool.gap_rtx(cached, entry.size, entry.timestamp, Arc::clone(&entry.rtx)) - { - debug!("Failed to add tx to gap {}, reason: {}", tx_hash, e); + for (id, entry) in gaps { + debug!("begin to gap: {:x}", id); + if let Err(e) = tx_pool.gap_rtx(&id) { + debug!( + "Failed to add tx to gap {}, reason: {}", + entry.transaction().hash(), + e + ); callbacks.call_reject(tx_pool, &entry, e.clone()); } } From 508e281fc4019c852c87885495e6a028ffcfb364 Mon Sep 17 00:00:00 2001 From: yukang Date: Thu, 8 Jun 2023 22:10:48 +0800 Subject: [PATCH 064/267] check keep_rejected_tx_hashes_days --- tx-pool/src/pool.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 88564c2058..7e1db62c57 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -465,7 +465,8 @@ impl TxPool { fn build_recent_reject(config: &TxPoolConfig) -> Option { if !config.recent_reject.as_os_str().is_empty() { - let recent_reject_ttl = config.keep_rejected_tx_hashes_days as i32 * 24 * 60 * 60; + let recent_reject_ttl = + u8::max(1, config.keep_rejected_tx_hashes_days) as i32 * 24 * 60 * 60; match RecentReject::new( &config.recent_reject, config.keep_rejected_tx_hashes_count, From f2b3b86b53e50e98b887bdc92afde11b828813bc Mon Sep 17 00:00:00 2001 From: yukang Date: Tue, 13 Jun 2023 16:25:05 +0800 Subject: [PATCH 065/267] add more test for pool --- tx-pool/src/component/tests/proposed.rs | 46 +++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/tx-pool/src/component/tests/proposed.rs b/tx-pool/src/component/tests/proposed.rs index ff5fa67866..153b9324d1 100644 --- a/tx-pool/src/component/tests/proposed.rs +++ b/tx-pool/src/component/tests/proposed.rs @@ -1,7 +1,9 @@ +use crate::component::pool_map::Status; use crate::component::tests::util::{ build_tx, build_tx_with_dep, build_tx_with_header_dep, DEFAULT_MAX_ANCESTORS_COUNT, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE, }; + use crate::component::{entry::TxEntry, pool_map::PoolMap}; use ckb_types::{ bytes::Bytes, @@ -695,3 +697,47 @@ fn test_max_ancestors_with_dep() { assert_eq!(pool.edges.inputs_len(), 1); assert_eq!(pool.edges.outputs_len(), 1); } + +#[test] +fn test_container_bench_add_limits() { + use rand::Rng; + let mut rng = rand::thread_rng(); + + let mut pool = PoolMap::new(1000000); + let tx1 = TxEntry::dummy_resolve( + TransactionBuilder::default().build(), + 100, + Capacity::shannons(100), + 100, + ); + pool.add_entry(tx1.clone(), Status::Proposed).unwrap(); + let mut prev_tx = tx1; + + for _i in 0..1000 { + let next_tx = TxEntry::dummy_resolve( + TransactionBuilder::default() + .input( + CellInput::new_builder() + .previous_output( + OutPoint::new_builder() + .tx_hash(prev_tx.transaction().hash()) + .index(0u32.pack()) + .build(), + ) + .build(), + ) + .witness(Bytes::new().pack()) + .build(), + rng.gen_range(0..1000), + Capacity::shannons(200), + rng.gen_range(0..1000), + ); + pool.add_entry(next_tx.clone(), Status::Proposed).unwrap(); + prev_tx = next_tx; + } + assert_eq!(pool.size(), 1001); + assert_eq!(pool.proposed_size(), 1001); + assert_eq!(pool.pending_size(), 0); + pool.clear(); + assert_eq!(pool.size(), 0); +} From bdf557d01f739e3ddcba5ee3b40cde0060750ef1 Mon Sep 17 00:00:00 2001 From: yukang Date: Tue, 13 Jun 2023 16:58:57 +0800 Subject: [PATCH 066/267] change name (add/sub)_ancestor_weight, prepare for (add/sub)_descendant_weight --- tx-pool/src/component/commit_txs_scanner.rs | 2 +- tx-pool/src/component/entry.rs | 4 ++-- tx-pool/src/component/pool_map.rs | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tx-pool/src/component/commit_txs_scanner.rs b/tx-pool/src/component/commit_txs_scanner.rs index 4e5d487cdf..f90ac94094 100644 --- a/tx-pool/src/component/commit_txs_scanner.rs +++ b/tx-pool/src/component/commit_txs_scanner.rs @@ -209,7 +209,7 @@ impl<'a> CommitTxsScanner<'a> { .remove(desc_id) .or_else(|| self.pool_map.get(desc_id).cloned()) { - desc.sub_entry_weight(entry); + desc.sub_ancestor_weight(entry); self.modified_entries.insert(desc); } } diff --git a/tx-pool/src/component/entry.rs b/tx-pool/src/component/entry.rs index bcce6a2e16..544d7e2817 100644 --- a/tx-pool/src/component/entry.rs +++ b/tx-pool/src/component/entry.rs @@ -106,7 +106,7 @@ impl TxEntry { } /// Update ancestor state for add an entry - pub fn add_entry_weight(&mut self, entry: &TxEntry) { + pub fn add_ancestor_weight(&mut self, entry: &TxEntry) { self.ancestors_count = self.ancestors_count.saturating_add(1); self.ancestors_size = self.ancestors_size.saturating_add(entry.size); self.ancestors_cycles = self.ancestors_cycles.saturating_add(entry.cycles); @@ -118,7 +118,7 @@ impl TxEntry { } /// Update ancestor state for remove an entry - pub fn sub_entry_weight(&mut self, entry: &TxEntry) { + pub fn sub_ancestor_weight(&mut self, entry: &TxEntry) { self.ancestors_count = self.ancestors_count.saturating_sub(1); self.ancestors_size = self.ancestors_size.saturating_sub(entry.size); self.ancestors_cycles = self.ancestors_cycles.saturating_sub(entry.cycles); diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index dadb3b3bc2..af2c5979e2 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -363,8 +363,8 @@ impl PoolMap { let entry = self.entries.get_by_id(desc_id).unwrap().clone(); let mut child = entry.inner.clone(); match op { - EntryOp::Remove => child.sub_entry_weight(parent), - EntryOp::Add => child.add_entry_weight(parent), + EntryOp::Remove => child.sub_ancestor_weight(parent), + EntryOp::Add => child.add_ancestor_weight(parent), } let short_id = child.proposal_short_id(); self.entries.modify_by_id(&short_id, |e| { @@ -482,7 +482,7 @@ impl PoolMap { .entries .get_by_id(ancestor_id) .expect("pool consistent"); - entry.add_entry_weight(&ancestor.inner); + entry.add_ancestor_weight(&ancestor.inner); } if entry.ancestors_count > self.max_ancestors_count { eprintln!("debug: exceeded maximum ancestors count"); From 8a4172f543bf931628d8b1657f774e79c9bea66c Mon Sep 17 00:00:00 2001 From: yukang Date: Tue, 13 Jun 2023 17:16:46 +0800 Subject: [PATCH 067/267] remove ancestors_size from AncestorsScoreSortKey since it is useless --- tx-pool/src/component/entry.rs | 1 - tx-pool/src/component/score_key.rs | 1 - tx-pool/src/component/tests/proposed.rs | 19 +++++++++++-------- tx-pool/src/component/tests/score_key.rs | 2 -- 4 files changed, 11 insertions(+), 12 deletions(-) diff --git a/tx-pool/src/component/entry.rs b/tx-pool/src/component/entry.rs index 544d7e2817..2b8bfa6f56 100644 --- a/tx-pool/src/component/entry.rs +++ b/tx-pool/src/component/entry.rs @@ -160,7 +160,6 @@ impl From<&TxEntry> for AncestorsScoreSortKey { weight, id: entry.proposal_short_id(), ancestors_fee: entry.ancestors_fee, - ancestors_size: entry.ancestors_size, ancestors_weight, } } diff --git a/tx-pool/src/component/score_key.rs b/tx-pool/src/component/score_key.rs index 011fb4000b..18dd48fcb2 100644 --- a/tx-pool/src/component/score_key.rs +++ b/tx-pool/src/component/score_key.rs @@ -9,7 +9,6 @@ pub struct AncestorsScoreSortKey { pub id: ProposalShortId, pub ancestors_fee: Capacity, pub ancestors_weight: u64, - pub ancestors_size: usize, } impl AncestorsScoreSortKey { diff --git a/tx-pool/src/component/tests/proposed.rs b/tx-pool/src/component/tests/proposed.rs index 153b9324d1..2bfa5d2449 100644 --- a/tx-pool/src/component/tests/proposed.rs +++ b/tx-pool/src/component/tests/proposed.rs @@ -104,11 +104,11 @@ fn test_add_entry_from_detached() { assert_eq!(pool.size(), 3); - let expected = vec![(id1.clone(), 1), (id2.clone(), 2), (id3.clone(), 3)]; + let expected = vec![id1.clone(), id2.clone(), id3.clone()]; let got = pool .entries .iter() - .map(|(_, key)| (key.id.clone(), key.score.ancestors_size)) + .map(|(_, key)| key.id.clone()) .collect::>(); assert_eq!(expected, got); @@ -144,11 +144,11 @@ fn test_add_entry_from_detached() { assert_eq!(pool.edges.inputs_len(), 2); assert_eq!(pool.entries.len(), 2); - let left = vec![(id2.clone(), 1), (id3.clone(), 2)]; + let left = vec![id2.clone(), id3.clone()]; let got = pool .entries .iter() - .map(|(_, key)| (key.id.clone(), key.score.ancestors_size)) + .map(|(_, key)| key.id.clone()) .collect::>(); assert_eq!(left, got); @@ -160,10 +160,13 @@ fn test_add_entry_from_detached() { assert!(pool.add_proposed(entry1).unwrap()); - for (idx, (_, entry)) in pool.entries.iter().enumerate() { - assert_eq!(entry.id, expected[idx].0); - assert_eq!(entry.score.ancestors_size, expected[idx].1); - } + let ids = pool + .entries + .iter() + .map(|(_, entry)| entry.inner.proposal_short_id()) + .collect::>(); + assert_eq!(ids, expected); + { assert!(pool.links.get_parents(&id1).unwrap().is_empty()); assert_eq!( diff --git a/tx-pool/src/component/tests/score_key.rs b/tx-pool/src/component/tests/score_key.rs index 22da657f84..09475f3d19 100644 --- a/tx-pool/src/component/tests/score_key.rs +++ b/tx-pool/src/component/tests/score_key.rs @@ -30,7 +30,6 @@ fn test_min_fee_and_weight() { id: ProposalShortId::new([0u8; 10]), ancestors_fee: Capacity::shannons(ancestors_fee), ancestors_weight, - ancestors_size: 0, }; key.min_fee_and_weight() }) @@ -75,7 +74,6 @@ fn test_ancestors_sorted_key_order() { id: ProposalShortId::new(id), ancestors_fee: Capacity::shannons(ancestors_fee), ancestors_weight, - ancestors_size: 0, } }) .collect::>(); From 702791550e2b5f76752cdb7be1bdc6de63892bc0 Mon Sep 17 00:00:00 2001 From: yukang Date: Tue, 13 Jun 2023 22:05:45 +0800 Subject: [PATCH 068/267] trivial refactor for PoolTransactionEntry --- util/launcher/src/shared_builder.rs | 31 ++++++++++------------------- 1 file changed, 10 insertions(+), 21 deletions(-) diff --git a/util/launcher/src/shared_builder.rs b/util/launcher/src/shared_builder.rs index 09f9fd862c..96d750a198 100644 --- a/util/launcher/src/shared_builder.rs +++ b/util/launcher/src/shared_builder.rs @@ -421,18 +421,19 @@ fn register_tx_pool_callback(tx_pool_builder: &mut TxPoolServiceBuilder, notify: let notify_pending = notify.clone(); let tx_relay_sender = tx_pool_builder.tx_relay_sender(); + let create_notify_entry = |entry: &TxEntry| PoolTransactionEntry { + transaction: entry.rtx.transaction.clone(), + cycles: entry.cycles, + size: entry.size, + fee: entry.fee, + timestamp: entry.timestamp, + }; tx_pool_builder.register_pending(Box::new(move |tx_pool: &mut TxPool, entry: &TxEntry| { // update statics tx_pool.update_statics_for_add_tx(entry.size, entry.cycles); // notify - let notify_tx_entry = PoolTransactionEntry { - transaction: entry.rtx.transaction.clone(), - cycles: entry.cycles, - size: entry.size, - fee: entry.fee, - timestamp: entry.timestamp, - }; + let notify_tx_entry = create_notify_entry(entry); notify_pending.notify_new_transaction(notify_tx_entry); })); @@ -445,13 +446,7 @@ fn register_tx_pool_callback(tx_pool_builder: &mut TxPoolServiceBuilder, notify: } // notify - let notify_tx_entry = PoolTransactionEntry { - transaction: entry.rtx.transaction.clone(), - cycles: entry.cycles, - size: entry.size, - fee: entry.fee, - timestamp: entry.timestamp, - }; + let notify_tx_entry = create_notify_entry(entry); notify_proposed.notify_proposed_transaction(notify_tx_entry); }, )); @@ -483,13 +478,7 @@ fn register_tx_pool_callback(tx_pool_builder: &mut TxPoolServiceBuilder, notify: } // notify - let notify_tx_entry = PoolTransactionEntry { - transaction: entry.rtx.transaction.clone(), - cycles: entry.cycles, - size: entry.size, - fee: entry.fee, - timestamp: entry.timestamp, - }; + let notify_tx_entry = create_notify_entry(entry); notify_reject.notify_reject_transaction(notify_tx_entry, reject); }, )); From 8e72e3ef0c8b146e0525520e6f1242b222c193ff Mon Sep 17 00:00:00 2001 From: yukang Date: Wed, 14 Jun 2023 05:20:59 +0800 Subject: [PATCH 069/267] high score tx_entry will not blocked at pending --- tx-pool/src/component/entry.rs | 1 + tx-pool/src/component/pool_map.rs | 21 +++++---- tx-pool/src/component/score_key.rs | 8 +++- tx-pool/src/component/tests/pending.rs | 55 ++++++++++++++++++++++++ tx-pool/src/component/tests/score_key.rs | 2 + tx-pool/src/pool.rs | 2 - 6 files changed, 78 insertions(+), 11 deletions(-) diff --git a/tx-pool/src/component/entry.rs b/tx-pool/src/component/entry.rs index 2b8bfa6f56..67cc2c8035 100644 --- a/tx-pool/src/component/entry.rs +++ b/tx-pool/src/component/entry.rs @@ -161,6 +161,7 @@ impl From<&TxEntry> for AncestorsScoreSortKey { id: entry.proposal_short_id(), ancestors_fee: entry.ancestors_fee, ancestors_weight, + timestamp: entry.timestamp, } } } diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index af2c5979e2..c6d232fcd3 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -140,11 +140,7 @@ impl PoolMap { } pub(crate) fn score_sorted_iter(&self) -> impl Iterator { - self.entries - .iter_by_score() - .rev() - .filter(|entry| entry.status == Status::Proposed) - .map(|entry| &entry.inner) + self.score_sorted_iter_by(Status::Proposed) } pub(crate) fn get(&self, id: &ProposalShortId) -> Option<&TxEntry> { @@ -296,12 +292,13 @@ impl PoolMap { proposals: &mut HashSet, status: &Status, ) { - for entry in self.entries.get_by_status(status) { + for entry in self.score_sorted_iter_by(*status) { if proposals.len() == limit { break; } - if !exclusion.contains(&entry.id) { - proposals.insert(entry.id.clone()); + let id = entry.proposal_short_id(); + if !exclusion.contains(&id) { + proposals.insert(id); } } } @@ -341,6 +338,14 @@ impl PoolMap { self.links.clear(); } + fn score_sorted_iter_by(&self, status: Status) -> impl Iterator { + self.entries + .iter_by_score() + .rev() + .filter(move |entry| entry.status == status) + .map(|entry| &entry.inner) + } + fn remove_entry_links(&mut self, id: &ProposalShortId) { if let Some(parents) = self.links.get_parents(id).cloned() { for parent in parents { diff --git a/tx-pool/src/component/score_key.rs b/tx-pool/src/component/score_key.rs index 18dd48fcb2..1a9843b7ad 100644 --- a/tx-pool/src/component/score_key.rs +++ b/tx-pool/src/component/score_key.rs @@ -9,6 +9,7 @@ pub struct AncestorsScoreSortKey { pub id: ProposalShortId, pub ancestors_fee: Capacity, pub ancestors_weight: u64, + pub timestamp: u64, } impl AncestorsScoreSortKey { @@ -42,7 +43,12 @@ impl Ord for AncestorsScoreSortKey { if self_weight == other_weight { // if fee rate weight is same, then compare with ancestor weight if self.ancestors_weight == other.ancestors_weight { - self.id.raw_data().cmp(&other.id.raw_data()) + if self.timestamp == other.timestamp { + self.id.raw_data().cmp(&other.id.raw_data()) + } else { + // NOTE: we use timestamp to compare, so the order is reversed + self.timestamp.cmp(&other.timestamp).reverse() + } } else { self.ancestors_weight.cmp(&other.ancestors_weight) } diff --git a/tx-pool/src/component/tests/pending.rs b/tx-pool/src/component/tests/pending.rs index b07e2e96e6..764f6d0026 100644 --- a/tx-pool/src/component/tests/pending.rs +++ b/tx-pool/src/component/tests/pending.rs @@ -6,9 +6,11 @@ use crate::component::{ entry::TxEntry, pool_map::{PoolMap, Status}, }; +use ckb_types::core::Capacity; use ckb_types::packed::OutPoint; use ckb_types::{h256, packed::Byte32, prelude::*}; use std::collections::HashSet; +use std::time::Duration; #[test] fn test_basic() { @@ -206,7 +208,11 @@ fn test_fill_proposals() { 3, ); let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + std::thread::sleep(Duration::from_millis(1)); + let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + std::thread::sleep(Duration::from_millis(1)); + let entry3 = TxEntry::dummy_resolve(tx3.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); assert!(pool.add_entry(entry1, Status::Pending).unwrap()); assert!(pool.add_entry(entry2, Status::Pending).unwrap()); @@ -242,6 +248,55 @@ fn test_fill_proposals() { assert_eq!(ret, HashSet::from_iter(vec![id1, id3])); } +#[test] +fn test_fill_proposals_with_high_score() { + let mut pool = PoolMap::new(1000); + let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); + let tx2 = build_tx( + vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], + 3, + ); + let tx3 = build_tx_with_dep( + vec![(&h256!("0x4").pack(), 1)], + vec![(&h256!("0x5").pack(), 1)], + 3, + ); + let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + std::thread::sleep(Duration::from_millis(1)); + let entry2 = TxEntry::dummy_resolve(tx2.clone(), 2, Capacity::shannons(50), 2); + std::thread::sleep(Duration::from_millis(1)); + let entry3 = TxEntry::dummy_resolve(tx3.clone(), 2, Capacity::shannons(100), 2); + + assert!(pool.add_entry(entry1, Status::Pending).unwrap()); + assert!(pool.add_entry(entry2, Status::Pending).unwrap()); + assert!(pool.add_entry(entry3, Status::Pending).unwrap()); + + let id1 = tx1.proposal_short_id(); + let id2 = tx2.proposal_short_id(); + let id3 = tx3.proposal_short_id(); + + let mut ret = HashSet::new(); + pool.fill_proposals(10, &HashSet::new(), &mut ret, &Status::Pending); + assert_eq!( + ret, + HashSet::from_iter(vec![id3.clone(), id2.clone(), id1.clone()]) + ); + + let mut ret = HashSet::new(); + pool.fill_proposals(1, &HashSet::new(), &mut ret, &Status::Pending); + assert_eq!(ret, HashSet::from_iter(vec![id3.clone()])); + + let mut ret = HashSet::new(); + pool.fill_proposals(2, &HashSet::new(), &mut ret, &Status::Pending); + assert_eq!(ret, HashSet::from_iter(vec![id3.clone(), id2.clone()])); + + let mut ret = HashSet::new(); + let mut exclusion = HashSet::new(); + exclusion.insert(id2); + pool.fill_proposals(2, &exclusion, &mut ret, &Status::Pending); + assert_eq!(ret, HashSet::from_iter(vec![id1, id3])); +} + #[test] fn test_edges() { let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); diff --git a/tx-pool/src/component/tests/score_key.rs b/tx-pool/src/component/tests/score_key.rs index 09475f3d19..7acc22a895 100644 --- a/tx-pool/src/component/tests/score_key.rs +++ b/tx-pool/src/component/tests/score_key.rs @@ -30,6 +30,7 @@ fn test_min_fee_and_weight() { id: ProposalShortId::new([0u8; 10]), ancestors_fee: Capacity::shannons(ancestors_fee), ancestors_weight, + timestamp: 0, }; key.min_fee_and_weight() }) @@ -74,6 +75,7 @@ fn test_ancestors_sorted_key_order() { id: ProposalShortId::new(id), ancestors_fee: Capacity::shannons(ancestors_fee), ancestors_weight, + timestamp: 0, } }) .collect::>(); diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 7e1db62c57..2fdbe23d45 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -331,8 +331,6 @@ impl TxPool { } /// Get to-be-proposal transactions that may be included in the next block. - /// TODO: do we need to consider the something like score, so that we can - /// provide best transactions to be proposed. pub(crate) fn get_proposals( &self, limit: usize, From 08f58beb42619abb95de0deca13e06ed0b0590bc Mon Sep 17 00:00:00 2001 From: yukang Date: Wed, 14 Jun 2023 19:13:43 +0800 Subject: [PATCH 070/267] add descendants related info for evict, has performance regression --- test/src/specs/tx_pool/limit.rs | 2 +- tx-pool/src/component/entry.rs | 69 ++++++++++-- tx-pool/src/component/pool_map.rs | 32 +++++- tx-pool/src/component/tests/entry.rs | 54 +++++++++ tx-pool/src/component/tests/mod.rs | 1 + tx-pool/src/component/tests/pending.rs | 145 +++++++++++++++++++++++++ tx-pool/src/pool.rs | 11 +- util/types/src/core/tx_pool.rs | 4 + 8 files changed, 301 insertions(+), 17 deletions(-) create mode 100644 tx-pool/src/component/tests/entry.rs diff --git a/test/src/specs/tx_pool/limit.rs b/test/src/specs/tx_pool/limit.rs index b4defc175b..ad84e36124 100644 --- a/test/src/specs/tx_pool/limit.rs +++ b/test/src/specs/tx_pool/limit.rs @@ -34,7 +34,7 @@ impl Spec for SizeLimit { let max_tx_num = (MAX_MEM_SIZE_FOR_SIZE_LIMIT as u64) / one_tx_size; - info!("Generate as much as possible txs on node"); + info!("Generate as much as possible txs on : {}", max_tx_num); (0..(max_tx_num - 1)).for_each(|_| { let tx = node.new_transaction(hash.clone()); hash = node.rpc_client().send_transaction(tx.data().into()); diff --git a/tx-pool/src/component/entry.rs b/tx-pool/src/component/entry.rs index 67cc2c8035..3638e21285 100644 --- a/tx-pool/src/component/entry.rs +++ b/tx-pool/src/component/entry.rs @@ -31,6 +31,14 @@ pub struct TxEntry { pub ancestors_cycles: Cycle, /// ancestors txs count pub ancestors_count: usize, + /// descendants txs fee + pub descendants_fee: Capacity, + /// descendants txs size + pub descendants_size: usize, + /// descendants txs cycles + pub descendants_cycles: Cycle, + /// descendants txs count + pub descendants_count: usize, /// The unix timestamp when entering the Txpool, unit: Millisecond pub timestamp: u64, } @@ -58,6 +66,10 @@ impl TxEntry { ancestors_size: size, ancestors_fee: fee, ancestors_cycles: cycles, + descendants_fee: fee, + descendants_size: size, + descendants_cycles: cycles, + descendants_count: 1, ancestors_count: 1, } } @@ -105,6 +117,30 @@ impl TxEntry { FeeRate::calculate(self.fee, weight) } + /// Update ancestor state for add an entry + pub fn add_descendant_weight(&mut self, entry: &TxEntry) { + self.descendants_count = self.descendants_count.saturating_add(1); + self.descendants_size = self.descendants_size.saturating_add(entry.size); + self.descendants_cycles = self.descendants_cycles.saturating_add(entry.cycles); + self.descendants_fee = Capacity::shannons( + self.descendants_fee + .as_u64() + .saturating_add(entry.fee.as_u64()), + ); + } + + /// Update ancestor state for remove an entry + pub fn sub_descendant_weight(&mut self, entry: &TxEntry) { + self.descendants_count = self.descendants_count.saturating_sub(1); + self.descendants_size = self.descendants_size.saturating_sub(entry.size); + self.descendants_cycles = self.descendants_cycles.saturating_sub(entry.cycles); + self.descendants_fee = Capacity::shannons( + self.descendants_fee + .as_u64() + .saturating_sub(entry.fee.as_u64()), + ); + } + /// Update ancestor state for add an entry pub fn add_ancestor_weight(&mut self, entry: &TxEntry) { self.ancestors_count = self.ancestors_count.saturating_add(1); @@ -130,11 +166,16 @@ impl TxEntry { } /// Reset ancestor state by remove - pub fn reset_ancestors_state(&mut self) { + pub fn reset_statistic_state(&mut self) { self.ancestors_count = 1; self.ancestors_size = self.size; self.ancestors_cycles = self.cycles; self.ancestors_fee = self.fee; + + self.descendants_count = 1; + self.descendants_size = self.size; + self.descendants_cycles = self.cycles; + self.descendants_fee = self.fee; } /// Converts entry to a `TxEntryInfo`. @@ -145,6 +186,8 @@ impl TxEntry { fee: self.fee, ancestors_size: self.ancestors_size as u64, ancestors_cycles: self.ancestors_cycles, + descendants_size: self.descendants_size as u64, + descendants_cycles: self.descendants_cycles, ancestors_count: self.ancestors_count as u64, timestamp: self.timestamp, } @@ -190,22 +233,28 @@ impl Ord for TxEntry { } } -/// Currently we do not have trace descendants, -/// so first take the simplest strategy, -/// first compare fee_rate, select the smallest fee_rate, +/// First compare fee_rate, select the smallest fee_rate, /// and then select the latest timestamp, for eviction, /// the latest timestamp which also means that the fewer descendants may exist. #[derive(Eq, PartialEq, Clone, Debug)] pub struct EvictKey { - fee_rate: FeeRate, - timestamp: u64, + pub fee_rate: FeeRate, + pub timestamp: u64, + pub descendants_count: usize, } impl From<&TxEntry> for EvictKey { fn from(entry: &TxEntry) -> Self { + let weight = get_transaction_weight(entry.size, entry.cycles); + let descendants_weight = + get_transaction_weight(entry.descendants_size, entry.descendants_cycles); + + let descendants_feerate = FeeRate::calculate(entry.descendants_fee, descendants_weight); + let feerate = FeeRate::calculate(entry.fee, weight); EvictKey { - fee_rate: entry.fee_rate(), + fee_rate: descendants_feerate.max(feerate), timestamp: entry.timestamp, + descendants_count: entry.descendants_count, } } } @@ -219,7 +268,11 @@ impl PartialOrd for EvictKey { impl Ord for EvictKey { fn cmp(&self, other: &Self) -> Ordering { if self.fee_rate == other.fee_rate { - self.timestamp.cmp(&other.timestamp).reverse() + if self.descendants_count == other.descendants_count { + self.timestamp.cmp(&other.timestamp) + } else { + self.descendants_count.cmp(&other.descendants_count) + } } else { self.fee_rate.cmp(&other.fee_rate) } diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index c6d232fcd3..cb0393d4ec 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -184,7 +184,7 @@ impl PoolMap { } trace!("pool_map.add_{:?} {}", status, entry.transaction().hash()); self.record_entry_links(&mut entry)?; - self.insert_entry(&entry, status)?; + self.insert_entry(&entry, status); self.record_entry_deps(&entry); self.record_entry_edges(&entry); Ok(true) @@ -200,6 +200,7 @@ impl PoolMap { pub(crate) fn remove_entry(&mut self, id: &ProposalShortId) -> Option { if let Some(entry) = self.entries.remove_by_id(id) { + self.update_ancestors_index_key(&entry.inner, EntryOp::Remove); self.update_descendants_index_key(&entry.inner, EntryOp::Remove); self.remove_entry_deps(&entry.inner); self.remove_entry_edges(&entry.inner); @@ -325,10 +326,10 @@ impl PoolMap { self.entries.iter().map(|(_, entry)| entry) } - pub(crate) fn next_evict_entry(&self) -> Option { + pub(crate) fn next_evict_entry(&self, status: Status) -> Option { self.entries .iter_by_evict_key() - .next() + .find(move |entry| entry.status == status) .map(|entry| entry.id.clone()) } @@ -360,6 +361,25 @@ impl PoolMap { self.links.remove(id); } + fn update_ancestors_index_key(&mut self, child: &TxEntry, op: EntryOp) { + let ancestors: HashSet = + self.links.calc_ancestors(&child.proposal_short_id()); + for anc_id in &ancestors { + // update parent score + let entry = self.entries.get_by_id(anc_id).unwrap().clone(); + let mut parent = entry.inner.clone(); + match op { + EntryOp::Remove => parent.sub_descendant_weight(child), + EntryOp::Add => parent.add_descendant_weight(child), + } + let short_id = parent.proposal_short_id(); + self.entries.modify_by_id(&short_id, |e| { + e.evict_key = parent.as_evict_key(); + e.inner = parent; + }); + } + } + fn update_descendants_index_key(&mut self, parent: &TxEntry, op: EntryOp) { let descendants: HashSet = self.links.calc_descendants(&parent.proposal_short_id()); @@ -374,7 +394,6 @@ impl PoolMap { let short_id = child.proposal_short_id(); self.entries.modify_by_id(&short_id, |e| { e.score = child.as_score_key(); - e.evict_key = child.as_evict_key(); e.inner = child; }); } @@ -426,6 +445,8 @@ impl PoolMap { if !children.is_empty() { self.update_descendants_from_detached(&tx_short_id, children); } + // update ancestors + self.update_ancestors_index_key(entry, EntryOp::Add); } // update_descendants_from_detached is used to update @@ -542,7 +563,7 @@ impl PoolMap { self.edges.header_deps.remove(&id); } - fn insert_entry(&mut self, entry: &TxEntry, status: Status) -> Result { + fn insert_entry(&mut self, entry: &TxEntry, status: Status) { let tx_short_id = entry.proposal_short_id(); let score = entry.as_score_key(); let evict_key = entry.as_evict_key(); @@ -553,7 +574,6 @@ impl PoolMap { inner: entry.clone(), evict_key, }); - Ok(true) } } diff --git a/tx-pool/src/component/tests/entry.rs b/tx-pool/src/component/tests/entry.rs new file mode 100644 index 0000000000..8aa7edf3ff --- /dev/null +++ b/tx-pool/src/component/tests/entry.rs @@ -0,0 +1,54 @@ +use ckb_types::core::{Capacity, FeeRate}; + +use crate::component::entry::EvictKey; + +#[test] +fn test_min_fee_and_weight_evict() { + let mut result = vec![(500, 10, 30), (10, 10, 31), (100, 10, 32)] + .into_iter() + .map(|(fee, weight, timestamp)| EvictKey { + fee_rate: FeeRate::calculate(Capacity::shannons(fee), weight), + timestamp, + descendants_count: 0, + }) + .collect::>(); + result.sort(); + assert_eq!( + result.iter().map(|key| key.timestamp).collect::>(), + vec![31, 32, 30] + ); +} + +#[test] +fn test_min_timestamp_evict() { + let mut result = vec![(500, 10, 30), (500, 10, 31), (500, 10, 32)] + .into_iter() + .map(|(fee, weight, timestamp)| EvictKey { + fee_rate: FeeRate::calculate(Capacity::shannons(fee), weight), + timestamp, + descendants_count: 0, + }) + .collect::>(); + result.sort(); + assert_eq!( + result.iter().map(|key| key.timestamp).collect::>(), + vec![30, 31, 32] + ); +} + +#[test] +fn test_min_weight_evict() { + let mut result = vec![(500, 10, 30), (500, 12, 31), (500, 13, 32)] + .into_iter() + .map(|(fee, weight, timestamp)| EvictKey { + fee_rate: FeeRate::calculate(Capacity::shannons(fee), weight), + timestamp, + descendants_count: 0, + }) + .collect::>(); + result.sort(); + assert_eq!( + result.iter().map(|key| key.timestamp).collect::>(), + vec![32, 31, 30] + ); +} diff --git a/tx-pool/src/component/tests/mod.rs b/tx-pool/src/component/tests/mod.rs index d9a3529707..fb851e4855 100644 --- a/tx-pool/src/component/tests/mod.rs +++ b/tx-pool/src/component/tests/mod.rs @@ -1,4 +1,5 @@ mod chunk; +mod entry; mod pending; mod proposed; mod recent_reject; diff --git a/tx-pool/src/component/tests/pending.rs b/tx-pool/src/component/tests/pending.rs index 764f6d0026..f2c3b6b19d 100644 --- a/tx-pool/src/component/tests/pending.rs +++ b/tx-pool/src/component/tests/pending.rs @@ -314,3 +314,148 @@ fn test_edges() { edges.delete_txid_by_dep(outpoint, &short_id2); assert!(edges.deps.is_empty()); } + +#[test] +fn test_pool_evict() { + let mut pool = PoolMap::new(1000); + let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); + let tx2 = build_tx( + vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], + 3, + ); + let tx3 = build_tx_with_dep( + vec![(&h256!("0x4").pack(), 1)], + vec![(&h256!("0x5").pack(), 1)], + 3, + ); + let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + std::thread::sleep(Duration::from_millis(1)); + let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + std::thread::sleep(Duration::from_millis(1)); + let entry3 = TxEntry::dummy_resolve(tx3.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + + assert!(pool.add_entry(entry1, Status::Pending).unwrap()); + assert!(pool.add_entry(entry2, Status::Pending).unwrap()); + assert!(pool.add_entry(entry3, Status::Pending).unwrap()); + + let e1 = pool.next_evict_entry(Status::Pending).unwrap(); + assert_eq!(e1, tx1.proposal_short_id()); + pool.remove_entry(&e1); + + let e2 = pool.next_evict_entry(Status::Pending).unwrap(); + assert_eq!(e2, tx2.proposal_short_id()); + pool.remove_entry(&e2); + + let e3 = pool.next_evict_entry(Status::Pending).unwrap(); + assert_eq!(e3, tx3.proposal_short_id()); + pool.remove_entry(&e3); + + assert!(pool.next_evict_entry(Status::Pending).is_none()); +} + +#[test] +fn test_pool_min_weight_evict() { + let mut pool = PoolMap::new(1000); + let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); + let tx2 = build_tx( + vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], + 3, + ); + let tx3 = build_tx_with_dep( + vec![(&h256!("0x4").pack(), 1)], + vec![(&h256!("0x5").pack(), 1)], + 3, + ); + let entry1 = TxEntry::dummy_resolve(tx1.clone(), 2, Capacity::shannons(100), 2); + std::thread::sleep(Duration::from_millis(1)); + let entry2 = TxEntry::dummy_resolve(tx2.clone(), 2, Capacity::shannons(50), 2); + std::thread::sleep(Duration::from_millis(1)); + let entry3 = TxEntry::dummy_resolve(tx3.clone(), 2, Capacity::shannons(10), 2); + + assert!(pool.add_entry(entry1, Status::Pending).unwrap()); + assert!(pool.add_entry(entry2, Status::Pending).unwrap()); + assert!(pool.add_entry(entry3, Status::Pending).unwrap()); + + let e1 = pool.next_evict_entry(Status::Pending).unwrap(); + assert_eq!(e1, tx3.proposal_short_id()); + pool.remove_entry(&e1); + + let e2 = pool.next_evict_entry(Status::Pending).unwrap(); + assert_eq!(e2, tx2.proposal_short_id()); + pool.remove_entry(&e2); + + let e3 = pool.next_evict_entry(Status::Pending).unwrap(); + assert_eq!(e3, tx1.proposal_short_id()); + pool.remove_entry(&e3); + + assert!(pool.next_evict_entry(Status::Pending).is_none()); +} + +#[test] +fn test_pool_max_size_evict() { + let mut pool = PoolMap::new(1000); + let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); + let tx2 = build_tx( + vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], + 3, + ); + let tx3 = build_tx_with_dep( + vec![(&h256!("0x4").pack(), 1)], + vec![(&h256!("0x5").pack(), 1)], + 3, + ); + let entry1 = TxEntry::dummy_resolve(tx1.clone(), 2, Capacity::shannons(100), 3); + std::thread::sleep(Duration::from_millis(1)); + let entry2 = TxEntry::dummy_resolve(tx2.clone(), 2, Capacity::shannons(100), 2); + std::thread::sleep(Duration::from_millis(1)); + let entry3 = TxEntry::dummy_resolve(tx3.clone(), 2, Capacity::shannons(100), 1); + + assert!(pool.add_entry(entry1, Status::Pending).unwrap()); + assert!(pool.add_entry(entry2, Status::Pending).unwrap()); + assert!(pool.add_entry(entry3, Status::Pending).unwrap()); + + let e1 = pool.next_evict_entry(Status::Pending).unwrap(); + assert_eq!(e1, tx1.proposal_short_id()); + pool.remove_entry(&e1); + + let e2 = pool.next_evict_entry(Status::Pending).unwrap(); + assert_eq!(e2, tx2.proposal_short_id()); + pool.remove_entry(&e2); + + let e3 = pool.next_evict_entry(Status::Pending).unwrap(); + assert_eq!(e3, tx3.proposal_short_id()); + pool.remove_entry(&e3); + + assert!(pool.next_evict_entry(Status::Pending).is_none()); +} + +#[test] +fn test_pool_min_descendants_evict() { + let mut pool = PoolMap::new(1000); + let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); + let tx2 = build_tx(vec![(&tx1.hash(), 1), (&h256!("0x3").pack(), 1)], 3); + let tx3 = build_tx_with_dep(vec![(&tx2.hash(), 1)], vec![(&h256!("0x5").pack(), 1)], 3); + let entry1 = TxEntry::dummy_resolve(tx1.clone(), 2, Capacity::shannons(100), 1); + std::thread::sleep(Duration::from_millis(1)); + let entry2 = TxEntry::dummy_resolve(tx2.clone(), 2, Capacity::shannons(100), 1); + std::thread::sleep(Duration::from_millis(1)); + let entry3 = TxEntry::dummy_resolve(tx3.clone(), 2, Capacity::shannons(100), 1); + + assert!(pool.add_entry(entry1, Status::Pending).unwrap()); + assert!(pool.add_entry(entry2, Status::Pending).unwrap()); + assert!(pool.add_entry(entry3, Status::Pending).unwrap()); + + let e1 = pool.next_evict_entry(Status::Pending).unwrap(); + assert_eq!(e1, tx3.proposal_short_id()); + pool.remove_entry(&e1); + + let e2 = pool.next_evict_entry(Status::Pending).unwrap(); + assert_eq!(e2, tx2.proposal_short_id()); + pool.remove_entry(&e2); + + let e3 = pool.next_evict_entry(Status::Pending).unwrap(); + assert_eq!(e3, tx1.proposal_short_id()); + pool.remove_entry(&e3); + + assert!(pool.next_evict_entry(Status::Pending).is_none()); +} diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 2fdbe23d45..afcb56aad2 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -216,7 +216,14 @@ impl TxPool { // Remove transactions from the pool until total size <= size_limit. pub(crate) fn limit_size(&mut self, callbacks: &Callbacks) { while self.total_tx_size > self.config.max_tx_pool_size { - if let Some(id) = self.pool_map.next_evict_entry() { + let next_evict_entry = || { + self.pool_map + .next_evict_entry(Status::Pending) + .or_else(|| self.pool_map.next_evict_entry(Status::Gap)) + .or_else(|| self.pool_map.next_evict_entry(Status::Proposed)) + }; + + if let Some(id) = next_evict_entry() { let removed = self.pool_map.remove_entry_and_descendants(&id); for entry in removed { let tx_hash = entry.transaction().hash(); @@ -251,7 +258,7 @@ impl TxPool { entries.sort_unstable_by_key(|entry| entry.ancestors_count); for mut entry in entries { let tx_hash = entry.transaction().hash(); - entry.reset_ancestors_state(); + entry.reset_statistic_state(); let ret = self.add_pending(entry); debug!( "remove_by_detached_proposal from {:?} {} add_pending {:?}", diff --git a/util/types/src/core/tx_pool.rs b/util/types/src/core/tx_pool.rs index d5b41e1d4d..43fcd7e547 100644 --- a/util/types/src/core/tx_pool.rs +++ b/util/types/src/core/tx_pool.rs @@ -128,6 +128,10 @@ pub struct TxEntryInfo { pub ancestors_size: u64, /// Cycles of in-tx-pool ancestor transactions pub ancestors_cycles: u64, + /// Size of in-tx-pool descendants transactions + pub descendants_size: u64, + /// Cycles of in-tx-pool descendants transactions + pub descendants_cycles: u64, /// Number of in-tx-pool ancestor transactions pub ancestors_count: u64, /// The unix timestamp when entering the Txpool, unit: Millisecond From 0d9a1031ba863e04f4316266d19a41f92eb3b177 Mon Sep 17 00:00:00 2001 From: yukang Date: Wed, 14 Jun 2023 23:30:38 +0800 Subject: [PATCH 071/267] cleanup and remove unused function names and tests --- tx-pool/src/component/pool_map.rs | 59 +++++-------------------- tx-pool/src/component/tests/pending.rs | 29 ------------ tx-pool/src/component/tests/proposed.rs | 14 +++--- tx-pool/src/pool.rs | 4 +- 4 files changed, 19 insertions(+), 87 deletions(-) diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index cb0393d4ec..596a9dce8f 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -117,11 +117,6 @@ impl PoolMap { self.add_entry(entry, Status::Proposed) } - #[cfg(test)] - pub(crate) fn remove_committed_tx(&mut self, tx: &TransactionView) -> Option { - self.remove_entry(&tx.proposal_short_id()) - } - pub(crate) fn get_by_id(&self, id: &ProposalShortId) -> Option<&PoolEntry> { self.entries.get_by_id(id) } @@ -192,10 +187,11 @@ impl PoolMap { /// Change the status of the entry, only used for `gap_rtx` and `proposed_rtx` pub(crate) fn set_entry(&mut self, short_id: &ProposalShortId, status: Status) { - let _ = self.entries.get_by_id(short_id).expect("unconsistent pool"); - self.entries.modify_by_id(short_id, |e| { - e.status = status; - }); + self.entries + .modify_by_id(short_id, |e| { + e.status = status; + }) + .expect("unconsistent pool"); } pub(crate) fn remove_entry(&mut self, id: &ProposalShortId) -> Option { @@ -304,24 +300,6 @@ impl PoolMap { } } - #[cfg(test)] - pub(crate) fn remove_entries_by_filter bool>( - &mut self, - status: &Status, - mut predicate: P, - ) -> Vec { - let mut removed = Vec::new(); - for entry in self.entries.get_by_status(status) { - if predicate(&entry.id, &entry.inner) { - removed.push(entry.inner.clone()); - } - } - for entry in &removed { - self.remove_entry(&entry.proposal_short_id()); - } - removed - } - pub(crate) fn iter(&self) -> impl Iterator { self.entries.iter().map(|(_, entry)| entry) } @@ -443,31 +421,16 @@ impl PoolMap { } // update children if !children.is_empty() { - self.update_descendants_from_detached(&tx_short_id, children); - } - // update ancestors - self.update_ancestors_index_key(entry, EntryOp::Add); - } - - // update_descendants_from_detached is used to update - // the descendants for a single transaction that has been added to the - // pool but may have child transactions in the pool, eg during a - // chain reorg. - fn update_descendants_from_detached( - &mut self, - id: &ProposalShortId, - children: HashSet, - ) { - if let Some(entry) = self.get_by_id(id).cloned() { for child in &children { - self.links.add_parent(child, id.clone()); + self.links.add_parent(child, tx_short_id.clone()); } - if let Some(links) = self.links.inner.get_mut(id) { + if let Some(links) = self.links.inner.get_mut(&tx_short_id) { links.children.extend(children); } - - self.update_descendants_index_key(&entry.inner, EntryOp::Add); + self.update_descendants_index_key(entry, EntryOp::Add); } + // update ancestors + self.update_ancestors_index_key(entry, EntryOp::Add); } /// Record the links for entry @@ -517,7 +480,6 @@ impl PoolMap { for cell_dep in entry.transaction().cell_deps() { let dep_pt = cell_dep.out_point(); - // insert dep-ref map self.edges .deps .entry(dep_pt) @@ -529,7 +491,6 @@ impl PoolMap { self.links.add_child(parent, short_id.clone()); } - // insert links let links = TxLinks { parents, children: Default::default(), diff --git a/tx-pool/src/component/tests/pending.rs b/tx-pool/src/component/tests/pending.rs index f2c3b6b19d..2445593caf 100644 --- a/tx-pool/src/component/tests/pending.rs +++ b/tx-pool/src/component/tests/pending.rs @@ -165,35 +165,6 @@ fn test_remove_entry() { assert!(pool.edges.header_deps.is_empty()); } -#[test] -fn test_remove_entries_by_filter() { - let mut pool = PoolMap::new(1000); - let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); - let tx2 = build_tx( - vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], - 3, - ); - let tx3 = build_tx_with_dep( - vec![(&h256!("0x4").pack(), 1)], - vec![(&h256!("0x5").pack(), 1)], - 3, - ); - let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - let entry3 = TxEntry::dummy_resolve(tx3.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(pool.add_entry(entry1, Status::Pending).unwrap()); - assert!(pool.add_entry(entry2, Status::Pending).unwrap()); - assert!(pool.add_entry(entry3, Status::Pending).unwrap()); - - pool.remove_entries_by_filter(&Status::Pending, |id, _tx_entry| { - id == &tx1.proposal_short_id() - }); - - assert!(!pool.contains_key(&tx1.proposal_short_id())); - assert!(pool.contains_key(&tx2.proposal_short_id())); - assert!(pool.contains_key(&tx3.proposal_short_id())); -} - #[test] fn test_fill_proposals() { let mut pool = PoolMap::new(1000); diff --git a/tx-pool/src/component/tests/proposed.rs b/tx-pool/src/component/tests/proposed.rs index 2bfa5d2449..4f864e3a47 100644 --- a/tx-pool/src/component/tests/proposed.rs +++ b/tx-pool/src/component/tests/proposed.rs @@ -72,7 +72,7 @@ fn test_add_entry() { assert_eq!(pool.edges.outputs_len(), 2); assert_eq!(pool.edges.inputs_len(), 3); - pool.remove_committed_tx(&tx1); + pool.remove_entry(&tx1.proposal_short_id()); assert_eq!(pool.edges.outputs_len(), 1); assert_eq!(pool.edges.inputs_len(), 1); } @@ -139,7 +139,7 @@ fn test_add_entry_from_detached() { assert!(pool.links.get_children(&id3).unwrap().is_empty()); } - pool.remove_committed_tx(&tx1); + pool.remove_entry(&tx1.proposal_short_id()); assert_eq!(pool.edges.outputs_len(), 2); assert_eq!(pool.edges.inputs_len(), 2); assert_eq!(pool.entries.len(), 2); @@ -221,7 +221,7 @@ fn test_add_roots() { assert_eq!(pool.edges.outputs_len(), 4); assert_eq!(pool.edges.inputs_len(), 4); - pool.remove_committed_tx(&tx1); + pool.remove_entry(&tx1.proposal_short_id()); assert_eq!(pool.edges.outputs_len(), 3); assert_eq!(pool.edges.inputs_len(), 2); @@ -282,7 +282,7 @@ fn test_add_no_roots() { assert_eq!(pool.edges.outputs_len(), 13); assert_eq!(pool.edges.inputs_len(), 7); - pool.remove_committed_tx(&tx1); + pool.remove_entry(&tx1.proposal_short_id()); assert_eq!(pool.edges.outputs_len(), 10); assert_eq!(pool.edges.inputs_len(), 6); @@ -584,7 +584,7 @@ fn test_dep_group() { assert_eq!(get_deps_len(&pool, &tx2_out_point), 1); assert_eq!(get_deps_len(&pool, &tx3_out_point), 0); - pool.remove_committed_tx(&tx3); + pool.remove_entry(&tx3.proposal_short_id()); assert_eq!(get_deps_len(&pool, &tx1_out_point), 0); assert_eq!(get_deps_len(&pool, &tx2_out_point), 0); assert_eq!(get_deps_len(&pool, &tx3_out_point), 0); @@ -642,8 +642,8 @@ fn test_disordered_remove_committed_tx() { assert_eq!(pool.edges.outputs_len(), 2); assert_eq!(pool.edges.inputs_len(), 2); - pool.remove_committed_tx(&tx2); - pool.remove_committed_tx(&tx1); + pool.remove_entry(&tx2.proposal_short_id()); + pool.remove_entry(&tx1.proposal_short_id()); assert_eq!(pool.edges.inputs_len(), 0); assert_eq!(pool.edges.outputs_len(), 0); diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index afcb56aad2..154bd4b514 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -173,7 +173,7 @@ impl TxPool { } } - pub(crate) fn resolve_conflict_header_dep( + fn resolve_conflict_header_dep( &mut self, detached_headers: &HashSet, callbacks: &Callbacks, @@ -183,7 +183,7 @@ impl TxPool { } } - pub(crate) fn remove_committed_tx(&mut self, tx: &TransactionView, callbacks: &Callbacks) { + fn remove_committed_tx(&mut self, tx: &TransactionView, callbacks: &Callbacks) { let short_id = tx.proposal_short_id(); if let Some(entry) = self.pool_map.remove_entry(&short_id) { callbacks.call_committed(self, &entry) From 2ab1e5f37ef88bbe3e84390db195037c68d7c820 Mon Sep 17 00:00:00 2001 From: yukang Date: Mon, 26 Jun 2023 11:33:36 +0800 Subject: [PATCH 072/267] remove outputs in edges --- tx-pool/src/component/edges.rs | 49 ------------------------- tx-pool/src/component/pool_map.rs | 44 +++++++--------------- tx-pool/src/component/tests/pending.rs | 4 -- tx-pool/src/component/tests/proposed.rs | 12 ------ tx-pool/src/pool.rs | 1 - 5 files changed, 13 insertions(+), 97 deletions(-) diff --git a/tx-pool/src/component/edges.rs b/tx-pool/src/component/edges.rs index 129b33c0a2..5ed01fbbe3 100644 --- a/tx-pool/src/component/edges.rs +++ b/tx-pool/src/component/edges.rs @@ -1,18 +1,10 @@ use ckb_types::packed::{Byte32, OutPoint, ProposalShortId}; use std::collections::{hash_map::Entry, HashMap, HashSet}; -#[derive(Debug, PartialEq, Clone)] -pub(crate) enum OutPointStatus { - UnConsumed, - Consumed(ProposalShortId), -} - #[derive(Default, Debug, Clone)] pub(crate) struct Edges { /// input-txid map represent in-pool tx's inputs pub(crate) inputs: HashMap, - /// output-op map represent in-pool tx's outputs - pub(crate) outputs: HashMap, /// dep-set map represent in-pool tx's deps pub(crate) deps: HashMap>, /// dep-set map represent in-pool tx's header deps @@ -20,11 +12,6 @@ pub(crate) struct Edges { } impl Edges { - #[cfg(test)] - pub(crate) fn outputs_len(&self) -> usize { - self.outputs.len() - } - #[cfg(test)] pub(crate) fn inputs_len(&self) -> usize { self.inputs.len() @@ -48,21 +35,6 @@ impl Edges { self.inputs.remove(out_point) } - pub(crate) fn remove_output(&mut self, out_point: &OutPoint) -> Option { - match self.outputs.remove(out_point) { - Some(OutPointStatus::Consumed(id)) => Some(id), - _ => None, - } - } - - pub(crate) fn insert_unconsumed_output(&mut self, out_point: OutPoint) { - self.outputs.insert(out_point, OutPointStatus::UnConsumed); - } - - pub(crate) fn insert_consumed_output(&mut self, out_point: OutPoint, id: ProposalShortId) { - self.outputs.insert(out_point, OutPointStatus::Consumed(id)); - } - pub(crate) fn get_input_ref(&self, out_point: &OutPoint) -> Option<&ProposalShortId> { self.inputs.get(out_point) } @@ -71,26 +43,6 @@ impl Edges { self.deps.get(out_point) } - pub(crate) fn set_output_consumed( - &mut self, - out_point: &OutPoint, - tx_short_id: &ProposalShortId, - ) { - if let Some(status) = self.outputs.get_mut(out_point) { - *status = OutPointStatus::Consumed(tx_short_id.clone()); - } - } - - pub(crate) fn set_output_unconsumed(&mut self, out_point: &OutPoint) { - if let Some(status) = self.outputs.get_mut(out_point) { - *status = OutPointStatus::UnConsumed; - } - } - - pub(crate) fn get_output_ref(&self, out_point: &OutPoint) -> Option<&OutPointStatus> { - self.outputs.get(out_point) - } - pub(crate) fn remove_deps(&mut self, out_point: &OutPoint) -> Option> { self.deps.remove(out_point) } @@ -111,7 +63,6 @@ impl Edges { pub(crate) fn clear(&mut self) { self.inputs.clear(); - self.outputs.clear(); self.deps.clear(); self.header_deps.clear(); } diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index 596a9dce8f..6171f83f97 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -1,7 +1,7 @@ //! Top-level Pool type, methods, and tests extern crate rustc_hash; extern crate slab; -use crate::component::edges::{Edges, OutPointStatus}; +use crate::component::edges::Edges; use crate::component::entry::EvictKey; use crate::component::links::{Relation, TxLinksMap}; use crate::component::score_key::AncestorsScoreSortKey; @@ -75,11 +75,6 @@ impl PoolMap { } } - #[cfg(test)] - pub(crate) fn outputs_len(&self) -> usize { - self.edges.outputs_len() - } - #[cfg(test)] pub(crate) fn header_deps_len(&self) -> usize { self.edges.header_deps_len() @@ -403,7 +398,8 @@ impl PoolMap { // if input reference a in-pool output, connect it // otherwise, record input for conflict check for i in inputs { - self.edges.set_output_consumed(&i, &tx_short_id); + // FIXME: This assertion is invalid only for plug_entry + // assert!(self.edges.get_input_ref(&i).is_none()); self.edges.insert_input(i.to_owned(), tx_short_id.clone()); } @@ -413,10 +409,7 @@ impl PoolMap { children.extend(ids); } if let Some(id) = self.edges.get_input_ref(&o).cloned() { - self.edges.insert_consumed_output(o, id.clone()); children.insert(id); - } else { - self.edges.insert_unconsumed_output(o); } } // update children @@ -502,16 +495,9 @@ impl PoolMap { fn remove_entry_edges(&mut self, entry: &TxEntry) { let inputs = entry.transaction().input_pts_iter(); - let outputs = entry.transaction().output_pts(); - - for o in outputs { - self.edges.remove_output(&o); - } - for i in inputs { // release input record self.edges.remove_input(&i); - self.edges.set_output_unconsumed(&i); } } @@ -543,16 +529,13 @@ impl CellProvider for PoolMap { if self.edges.get_input_ref(out_point).is_some() { return CellStatus::Dead; } - match self.edges.get_output_ref(out_point) { - Some(OutPointStatus::UnConsumed) => { - let (output, data) = self.get_output_with_data(out_point).expect("output"); - let cell_meta = CellMetaBuilder::from_cell_output(output, data) - .out_point(out_point.to_owned()) - .build(); - CellStatus::live_cell(cell_meta) - } - Some(OutPointStatus::Consumed(_id)) => CellStatus::Dead, - _ => CellStatus::Unknown, + if let Some((output, data)) = self.get_output_with_data(out_point) { + let cell_meta = CellMetaBuilder::from_cell_output(output, data) + .out_point(out_point.to_owned()) + .build(); + CellStatus::live_cell(cell_meta) + } else { + CellStatus::Unknown } } } @@ -562,10 +545,9 @@ impl CellChecker for PoolMap { if self.edges.get_input_ref(out_point).is_some() { return Some(false); } - match self.edges.get_output_ref(out_point) { - Some(OutPointStatus::Consumed(_id)) => Some(false), - Some(OutPointStatus::UnConsumed) => Some(true), - _ => None, + if self.get_output_with_data(out_point).is_some() { + return Some(true); } + None } } diff --git a/tx-pool/src/component/tests/pending.rs b/tx-pool/src/component/tests/pending.rs index 2445593caf..ec1b65397b 100644 --- a/tx-pool/src/component/tests/pending.rs +++ b/tx-pool/src/component/tests/pending.rs @@ -30,7 +30,6 @@ fn test_basic() { assert!(pool.contains_key(&tx2.proposal_short_id())); assert_eq!(pool.inputs_len(), 4); - assert_eq!(pool.outputs_len(), 4); assert_eq!( pool.entries @@ -47,7 +46,6 @@ fn test_basic() { assert!(pool.edges.deps.is_empty()); assert!(pool.edges.inputs.is_empty()); assert!(pool.edges.header_deps.is_empty()); - assert!(pool.edges.outputs.is_empty()); } #[test] @@ -131,7 +129,6 @@ fn test_resolve_conflict_header_dep() { assert_eq!(pool.inputs_len(), 3); assert_eq!(pool.header_deps_len(), 1); - assert_eq!(pool.outputs_len(), 2); let mut headers = HashSet::new(); headers.insert(header); @@ -191,7 +188,6 @@ fn test_fill_proposals() { assert_eq!(pool.inputs_len(), 5); assert_eq!(pool.deps_len(), 1); - assert_eq!(pool.outputs_len(), 7); let id1 = tx1.proposal_short_id(); let id2 = tx2.proposal_short_id(); diff --git a/tx-pool/src/component/tests/proposed.rs b/tx-pool/src/component/tests/proposed.rs index 4f864e3a47..b443d4f045 100644 --- a/tx-pool/src/component/tests/proposed.rs +++ b/tx-pool/src/component/tests/proposed.rs @@ -69,11 +69,9 @@ fn test_add_entry() { .unwrap(); assert_eq!(pool.size(), 2); - assert_eq!(pool.edges.outputs_len(), 2); assert_eq!(pool.edges.inputs_len(), 3); pool.remove_entry(&tx1.proposal_short_id()); - assert_eq!(pool.edges.outputs_len(), 1); assert_eq!(pool.edges.inputs_len(), 1); } @@ -99,7 +97,6 @@ fn test_add_entry_from_detached() { pool.add_proposed(entry3).unwrap(); assert_eq!(pool.size(), 3); - assert_eq!(pool.edges.outputs_len(), 3); assert_eq!(pool.edges.inputs_len(), 4); assert_eq!(pool.size(), 3); @@ -140,7 +137,6 @@ fn test_add_entry_from_detached() { } pool.remove_entry(&tx1.proposal_short_id()); - assert_eq!(pool.edges.outputs_len(), 2); assert_eq!(pool.edges.inputs_len(), 2); assert_eq!(pool.entries.len(), 2); @@ -218,12 +214,10 @@ fn test_add_roots() { )) .unwrap(); - assert_eq!(pool.edges.outputs_len(), 4); assert_eq!(pool.edges.inputs_len(), 4); pool.remove_entry(&tx1.proposal_short_id()); - assert_eq!(pool.edges.outputs_len(), 3); assert_eq!(pool.edges.inputs_len(), 2); } @@ -279,12 +273,10 @@ fn test_add_no_roots() { )) .unwrap(); - assert_eq!(pool.edges.outputs_len(), 13); assert_eq!(pool.edges.inputs_len(), 7); pool.remove_entry(&tx1.proposal_short_id()); - assert_eq!(pool.edges.outputs_len(), 10); assert_eq!(pool.edges.inputs_len(), 6); } @@ -639,14 +631,12 @@ fn test_disordered_remove_committed_tx() { pool.add_proposed(entry1).unwrap(); pool.add_proposed(entry2).unwrap(); - assert_eq!(pool.edges.outputs_len(), 2); assert_eq!(pool.edges.inputs_len(), 2); pool.remove_entry(&tx2.proposal_short_id()); pool.remove_entry(&tx1.proposal_short_id()); assert_eq!(pool.edges.inputs_len(), 0); - assert_eq!(pool.edges.outputs_len(), 0); } #[test] @@ -671,7 +661,6 @@ fn test_max_ancestors() { assert!(pool.calc_descendants(&tx1_id).is_empty()); assert_eq!(pool.edges.inputs_len(), 1); - assert_eq!(pool.edges.outputs_len(), 1); } #[test] @@ -698,7 +687,6 @@ fn test_max_ancestors_with_dep() { assert!(pool.calc_descendants(&tx1_id).is_empty()); assert_eq!(pool.edges.inputs_len(), 1); - assert_eq!(pool.edges.outputs_len(), 1); } #[test] diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 154bd4b514..f4c69ac8ae 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -423,7 +423,6 @@ impl TxPool { self.total_tx_size = 0; self.total_tx_cycles = 0; self.pool_map.clear(); - // self.touch_last_txs_updated_at(); txs } From 459189a9cbd8f7654350dc6a07a3ca7b8f4f3fb4 Mon Sep 17 00:00:00 2001 From: yukang Date: Thu, 13 Jul 2023 18:00:02 +0800 Subject: [PATCH 073/267] fix get_all_entry_info and get_ids to sort tx --- tx-pool/src/component/commit_txs_scanner.rs | 2 +- tx-pool/src/component/pool_map.rs | 13 +++++---- tx-pool/src/component/tests/proposed.rs | 6 ++--- tx-pool/src/pool.rs | 30 ++++++++++----------- 4 files changed, 26 insertions(+), 25 deletions(-) diff --git a/tx-pool/src/component/commit_txs_scanner.rs b/tx-pool/src/component/commit_txs_scanner.rs index f90ac94094..c2058fdba6 100644 --- a/tx-pool/src/component/commit_txs_scanner.rs +++ b/tx-pool/src/component/commit_txs_scanner.rs @@ -81,7 +81,7 @@ impl<'a> CommitTxsScanner<'a> { let mut cycles: Cycle = 0; let mut consecutive_failed = 0; - let mut iter = self.pool_map.score_sorted_iter().peekable(); + let mut iter = self.pool_map.sorted_proposed_iter().peekable(); loop { let mut using_modified = false; diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index 6171f83f97..c51b45be7f 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -129,8 +129,8 @@ impl PoolMap { self.entries.get_by_status(&Status::Proposed).len() } - pub(crate) fn score_sorted_iter(&self) -> impl Iterator { - self.score_sorted_iter_by(Status::Proposed) + pub(crate) fn sorted_proposed_iter(&self) -> impl Iterator { + self.score_sorted_iter_by(vec![Status::Proposed]) } pub(crate) fn get(&self, id: &ProposalShortId) -> Option<&TxEntry> { @@ -284,7 +284,7 @@ impl PoolMap { proposals: &mut HashSet, status: &Status, ) { - for entry in self.score_sorted_iter_by(*status) { + for entry in self.score_sorted_iter_by(vec![*status]) { if proposals.len() == limit { break; } @@ -312,11 +312,14 @@ impl PoolMap { self.links.clear(); } - fn score_sorted_iter_by(&self, status: Status) -> impl Iterator { + pub(crate) fn score_sorted_iter_by( + &self, + statuses: Vec, + ) -> impl Iterator { self.entries .iter_by_score() .rev() - .filter(move |entry| entry.status == status) + .filter(move |entry| statuses.contains(&entry.status)) .map(|entry| &entry.inner) } diff --git a/tx-pool/src/component/tests/proposed.rs b/tx-pool/src/component/tests/proposed.rs index b443d4f045..3536bdfcdc 100644 --- a/tx-pool/src/component/tests/proposed.rs +++ b/tx-pool/src/component/tests/proposed.rs @@ -314,7 +314,7 @@ fn test_sorted_by_tx_fee_rate() { .unwrap(); let txs_sorted_by_fee_rate = pool - .score_sorted_iter() + .sorted_proposed_iter() .map(|entry| entry.transaction().hash()) .collect::>(); let expect_result = vec![tx2.hash(), tx3.hash(), tx1.hash()]; @@ -365,7 +365,7 @@ fn test_sorted_by_ancestors_score() { .unwrap(); let txs_sorted_by_fee_rate = pool - .score_sorted_iter() + .sorted_proposed_iter() .map(|entry| entry.transaction().hash()) .collect::>(); let expect_result = vec![tx4.hash(), tx2.hash(), tx3.hash(), tx1.hash()]; @@ -406,7 +406,7 @@ fn test_sorted_by_ancestors_score_competitive() { } let txs_sorted_by_fee_rate = pool - .score_sorted_iter() + .sorted_proposed_iter() .map(|entry| format!("{}", entry.transaction().hash())) .collect::>(); // the entry with most ancestors score will win diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index f4c69ac8ae..bd0fa3ca53 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -364,17 +364,16 @@ impl TxPool { } pub(crate) fn get_ids(&self) -> TxPoolIds { - let pending: Vec = self - .get_by_status(&Status::Pending) - .iter() - .chain(self.get_by_status(&Status::Gap).iter()) - .map(|entry| entry.inner.transaction().hash()) + let pending = self + .pool_map + .score_sorted_iter_by(vec![Status::Pending, Status::Gap]) + .map(|entry| entry.transaction().hash()) .collect(); - let proposed: Vec = self - .get_by_status(&Status::Proposed) - .iter() - .map(|entry| entry.inner.transaction().hash()) + let proposed = self + .pool_map + .sorted_proposed_iter() + .map(|entry| entry.transaction().hash()) .collect(); TxPoolIds { pending, proposed } @@ -382,16 +381,15 @@ impl TxPool { pub(crate) fn get_all_entry_info(&self) -> TxPoolEntryInfo { let pending = self - .get_by_status(&Status::Pending) - .iter() - .chain(self.get_by_status(&Status::Gap).iter()) - .map(|entry| (entry.inner.transaction().hash(), entry.inner.to_info())) + .pool_map + .score_sorted_iter_by(vec![Status::Pending, Status::Gap]) + .map(|entry| (entry.transaction().hash(), entry.to_info())) .collect(); let proposed = self - .get_by_status(&Status::Proposed) - .iter() - .map(|entry| (entry.inner.transaction().hash(), entry.inner.to_info())) + .pool_map + .sorted_proposed_iter() + .map(|entry| (entry.transaction().hash(), entry.to_info())) .collect(); TxPoolEntryInfo { pending, proposed } From 8ffeeab5a7643b2ce989d14af3bad9630e4a5b56 Mon Sep 17 00:00:00 2001 From: yukang Date: Thu, 13 Jul 2023 23:31:29 +0800 Subject: [PATCH 074/267] fix tests and remove timestamp in sort key --- test/src/specs/tx_pool/get_raw_tx_pool.rs | 21 +++++++++++++-------- tx-pool/src/component/entry.rs | 2 +- tx-pool/src/component/score_key.rs | 8 +------- tx-pool/src/component/tests/pending.rs | 4 ++-- tx-pool/src/component/tests/score_key.rs | 2 -- 5 files changed, 17 insertions(+), 20 deletions(-) diff --git a/test/src/specs/tx_pool/get_raw_tx_pool.rs b/test/src/specs/tx_pool/get_raw_tx_pool.rs index 6ff3ae1db7..1e3056f338 100644 --- a/test/src/specs/tx_pool/get_raw_tx_pool.rs +++ b/test/src/specs/tx_pool/get_raw_tx_pool.rs @@ -1,7 +1,7 @@ use crate::{Node, Spec}; -use ckb_jsonrpc_types::{RawTxPool, TxPoolIds}; +use ckb_jsonrpc_types::RawTxPool; use ckb_logger::info; -use ckb_types::prelude::Unpack; +use ckb_types::{prelude::Unpack, H256}; pub struct GetRawTxPool; @@ -21,13 +21,18 @@ impl Spec for GetRawTxPool { txs_hash.push(node0.rpc_client().send_transaction(tx.data().into())); }); - let raw_tx_pool = RawTxPool::Ids(TxPoolIds { - pending: txs_hash.iter().map(Unpack::unpack).collect(), - proposed: Vec::new(), - }); + let mut pending: Vec = txs_hash.iter().map(Unpack::unpack).collect(); + pending.sort(); let result = node0.rpc_client().get_raw_tx_pool(None); - assert_eq!(raw_tx_pool, result); - + match result { + RawTxPool::Ids(ids) => { + assert_eq!(0, ids.proposed.len()); + let mut ids = ids.pending; + ids.sort(); + assert_eq!(ids, pending); + } + _ => panic!("get_raw_tx_pool(true) should return entries"), + } match node0.rpc_client().get_raw_tx_pool(Some(true)) { RawTxPool::Ids(_ids) => { panic!("get_raw_tx_pool(true) should return entries"); diff --git a/tx-pool/src/component/entry.rs b/tx-pool/src/component/entry.rs index 3638e21285..2f8fdf95ef 100644 --- a/tx-pool/src/component/entry.rs +++ b/tx-pool/src/component/entry.rs @@ -204,7 +204,7 @@ impl From<&TxEntry> for AncestorsScoreSortKey { id: entry.proposal_short_id(), ancestors_fee: entry.ancestors_fee, ancestors_weight, - timestamp: entry.timestamp, + //timestamp: entry.timestamp, } } } diff --git a/tx-pool/src/component/score_key.rs b/tx-pool/src/component/score_key.rs index 1a9843b7ad..18dd48fcb2 100644 --- a/tx-pool/src/component/score_key.rs +++ b/tx-pool/src/component/score_key.rs @@ -9,7 +9,6 @@ pub struct AncestorsScoreSortKey { pub id: ProposalShortId, pub ancestors_fee: Capacity, pub ancestors_weight: u64, - pub timestamp: u64, } impl AncestorsScoreSortKey { @@ -43,12 +42,7 @@ impl Ord for AncestorsScoreSortKey { if self_weight == other_weight { // if fee rate weight is same, then compare with ancestor weight if self.ancestors_weight == other.ancestors_weight { - if self.timestamp == other.timestamp { - self.id.raw_data().cmp(&other.id.raw_data()) - } else { - // NOTE: we use timestamp to compare, so the order is reversed - self.timestamp.cmp(&other.timestamp).reverse() - } + self.id.raw_data().cmp(&other.id.raw_data()) } else { self.ancestors_weight.cmp(&other.ancestors_weight) } diff --git a/tx-pool/src/component/tests/pending.rs b/tx-pool/src/component/tests/pending.rs index ec1b65397b..e81c3520b8 100644 --- a/tx-pool/src/component/tests/pending.rs +++ b/tx-pool/src/component/tests/pending.rs @@ -202,11 +202,11 @@ fn test_fill_proposals() { let mut ret = HashSet::new(); pool.fill_proposals(1, &HashSet::new(), &mut ret, &Status::Pending); - assert_eq!(ret, HashSet::from_iter(vec![id1.clone()])); + assert_eq!(ret.len(), 1); let mut ret = HashSet::new(); pool.fill_proposals(2, &HashSet::new(), &mut ret, &Status::Pending); - assert_eq!(ret, HashSet::from_iter(vec![id1.clone(), id2.clone()])); + assert_eq!(ret.len(), 2); let mut ret = HashSet::new(); let mut exclusion = HashSet::new(); diff --git a/tx-pool/src/component/tests/score_key.rs b/tx-pool/src/component/tests/score_key.rs index 7acc22a895..09475f3d19 100644 --- a/tx-pool/src/component/tests/score_key.rs +++ b/tx-pool/src/component/tests/score_key.rs @@ -30,7 +30,6 @@ fn test_min_fee_and_weight() { id: ProposalShortId::new([0u8; 10]), ancestors_fee: Capacity::shannons(ancestors_fee), ancestors_weight, - timestamp: 0, }; key.min_fee_and_weight() }) @@ -75,7 +74,6 @@ fn test_ancestors_sorted_key_order() { id: ProposalShortId::new(id), ancestors_fee: Capacity::shannons(ancestors_fee), ancestors_weight, - timestamp: 0, } }) .collect::>(); From cc1a72c033783e27099d751dabf1e00eea9553a9 Mon Sep 17 00:00:00 2001 From: yukang Date: Fri, 14 Jul 2023 22:14:11 +0800 Subject: [PATCH 075/267] cleanup entry deps, ancestors, descendants --- tx-pool/src/component/pool_map.rs | 40 +++++++++++-------------------- 1 file changed, 14 insertions(+), 26 deletions(-) diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index c51b45be7f..2ebe4210b9 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -173,10 +173,10 @@ impl PoolMap { return Ok(false); } trace!("pool_map.add_{:?} {}", status, entry.transaction().hash()); - self.record_entry_links(&mut entry)?; + self.check_record_ancestors(&mut entry)?; self.insert_entry(&entry, status); self.record_entry_deps(&entry); - self.record_entry_edges(&entry); + self.record_entry_descendants(&entry); Ok(true) } @@ -379,6 +379,13 @@ impl PoolMap { let tx_short_id: ProposalShortId = entry.proposal_short_id(); let header_deps = entry.transaction().header_deps(); let related_dep_out_points: Vec<_> = entry.related_dep_out_points().cloned().collect(); + let inputs = entry.transaction().input_pts_iter(); + + // if input reference a in-pool output, connect it + // otherwise, record input for conflict check + for i in inputs { + self.edges.insert_input(i.to_owned(), tx_short_id.clone()); + } // record dep-txid for d in related_dep_out_points { @@ -392,21 +399,12 @@ impl PoolMap { } } - fn record_entry_edges(&mut self, entry: &TxEntry) { + fn record_entry_descendants(&mut self, entry: &TxEntry) { let tx_short_id: ProposalShortId = entry.proposal_short_id(); - let inputs = entry.transaction().input_pts_iter(); let outputs = entry.transaction().output_pts(); - let mut children = HashSet::new(); - // if input reference a in-pool output, connect it - // otherwise, record input for conflict check - for i in inputs { - // FIXME: This assertion is invalid only for plug_entry - // assert!(self.edges.get_input_ref(&i).is_none()); - self.edges.insert_input(i.to_owned(), tx_short_id.clone()); - } - // record tx output + // collect children for o in outputs { if let Some(ids) = self.edges.get_deps_ref(&o).cloned() { children.extend(ids); @@ -425,13 +423,12 @@ impl PoolMap { } self.update_descendants_index_key(entry, EntryOp::Add); } - // update ancestors + // update ancestor's index key for adding new entry self.update_ancestors_index_key(entry, EntryOp::Add); } - /// Record the links for entry - fn record_entry_links(&mut self, entry: &mut TxEntry) -> Result { - // find in pool parents + /// Check ancestors and record for entry + fn check_record_ancestors(&mut self, entry: &mut TxEntry) -> Result { let mut parents: HashSet = HashSet::with_capacity( entry.transaction().inputs().len() + entry.transaction().cell_deps().len(), ); @@ -474,15 +471,6 @@ impl PoolMap { return Err(Reject::ExceededMaximumAncestorsCount); } - for cell_dep in entry.transaction().cell_deps() { - let dep_pt = cell_dep.out_point(); - self.edges - .deps - .entry(dep_pt) - .or_insert_with(HashSet::new) - .insert(short_id.clone()); - } - for parent in &parents { self.links.add_child(parent, short_id.clone()); } From 287d1c62515912f92a243076a4ec50652f5200af Mon Sep 17 00:00:00 2001 From: yukang Date: Tue, 18 Jul 2023 19:20:45 +0800 Subject: [PATCH 076/267] terminate process when nonce limit reached --- miner/src/miner.rs | 4 +++- util/stop-handler/src/stop_register.rs | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/miner/src/miner.rs b/miner/src/miner.rs index 110fad5514..101f156b4d 100644 --- a/miner/src/miner.rs +++ b/miner/src/miner.rs @@ -5,6 +5,7 @@ use ckb_app_config::MinerWorkerConfig; use ckb_channel::{select, unbounded, Receiver}; use ckb_logger::{debug, error, info}; use ckb_pow::PowEngine; +use ckb_stop_handler::broadcast_exit_signals; use ckb_types::{ packed::{Byte32, Header}, prelude::*, @@ -94,7 +95,8 @@ impl Miner { Ok((pow_hash, work, nonce)) => { self.submit_nonce(pow_hash, work, nonce); if self.limit != 0 && self.nonces_found >= self.limit { - break; + debug!("miner nonce limit reached, terminate ..."); + broadcast_exit_signals(); } }, _ => { diff --git a/util/stop-handler/src/stop_register.rs b/util/stop-handler/src/stop_register.rs index 8948217c19..d7f2a7f560 100644 --- a/util/stop-handler/src/stop_register.rs +++ b/util/stop-handler/src/stop_register.rs @@ -12,8 +12,8 @@ pub fn wait_all_ckb_services_exit() { info!("waiting exit signal..."); let exit_signal = new_crossbeam_exit_rx(); let _ = exit_signal.recv(); - debug!("received exit signal, broadcasting exit signal to all threads"); let mut handles = CKB_HANDLES.lock(); + debug!("wait_all_ckb_services_exit wait all threads to exit"); for (name, join_handle) in handles.thread_handles.drain(..) { match join_handle.join() { Ok(_) => { @@ -54,6 +54,7 @@ pub fn new_crossbeam_exit_rx() -> ckb_channel::Receiver<()> { /// Broadcast exit signals to all threads and all tokio tasks pub fn broadcast_exit_signals() { + debug!("received exit signal, broadcasting exit signal to all threads"); TOKIO_EXIT.cancel(); CROSSBEAM_EXIT_SENDERS .lock() From ade27e03cdb5a512e6973f5888b33c658cce3b77 Mon Sep 17 00:00:00 2001 From: mohanson Date: Fri, 21 Jul 2023 13:35:26 +0800 Subject: [PATCH 077/267] Update ckb-vm to v0.24.4 --- script/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/Cargo.toml b/script/Cargo.toml index 0a55306a5a..c07654e17c 100644 --- a/script/Cargo.toml +++ b/script/Cargo.toml @@ -22,7 +22,7 @@ ckb-traits = { path = "../traits", version = "= 0.112.0-pre" } byteorder = "1.3.1" ckb-types = { path = "../util/types", version = "= 0.112.0-pre" } ckb-hash = { path = "../util/hash", version = "= 0.112.0-pre" } -ckb-vm = { version = "=0.24.3", default-features = false } +ckb-vm = { version = "=0.24.4", default-features = false } faster-hex = "0.6" ckb-logger = { path = "../util/logger", version = "= 0.112.0-pre", optional = true } serde = { version = "1.0", features = ["derive"] } From a1909161c8cf3472f78f766e0813e870186bca47 Mon Sep 17 00:00:00 2001 From: mohanson Date: Fri, 21 Jul 2023 15:47:17 +0800 Subject: [PATCH 078/267] Update Cargo.lock --- Cargo.lock | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ddd51deff0..6122a592fe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1528,9 +1528,9 @@ dependencies = [ [[package]] name = "ckb-vm" -version = "0.24.3" +version = "0.24.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f5747a877a71ff164fa0f17daf6e9abca036c2381b8576679fb3ac07ae77bbc" +checksum = "40894adbde925bfc6584d324a06228e19d78bd877146fc7df085927552d29f50" dependencies = [ "byteorder", "bytes 1.4.0", @@ -1546,9 +1546,12 @@ dependencies = [ [[package]] name = "ckb-vm-definitions" -version = "0.24.3" +version = "0.24.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83869c9d322de1ddbfde5b54b7376f9a1ac32273c50e21cdd5e8a1bd1a1cf632" +checksum = "0253bdea8dc20db90b58fe54e01392f71989e0567d42e09e7f8e588f156551db" +dependencies = [ + "paste", +] [[package]] name = "clang-sys" From 342e93946f316ff43fc2d6334dfe7fb01922541b Mon Sep 17 00:00:00 2001 From: yukang Date: Wed, 2 Aug 2023 12:21:21 +0800 Subject: [PATCH 079/267] add ctrl hander for test main --- test/Cargo.toml | 1 + test/src/main.rs | 36 ++++++++++++++++++++++++++++++++---- 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/test/Cargo.toml b/test/Cargo.toml index 856c11332f..3dc3649d0e 100644 --- a/test/Cargo.toml +++ b/test/Cargo.toml @@ -37,6 +37,7 @@ serde_json = "1.0" lazy_static = "1.4.0" byteorder = "1.3.1" jsonrpc-core = "18.0" +ctrlc = { version = "3.1", features = ["termination"] } [target.'cfg(not(target_os="windows"))'.dependencies] nix = { version = "0.24.0", default-features = false, features = ["signal"] } diff --git a/test/src/main.rs b/test/src/main.rs index 0ef0feafff..227e2c0b0d 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -12,6 +12,7 @@ use clap::{App, Arg}; use rand::{seq::SliceRandom, thread_rng}; use std::any::Any; use std::cmp::min; +use std::collections::HashSet; use std::env; use std::fs::{self, read_to_string, File}; use std::io::{self, BufRead, BufReader, Write}; @@ -113,6 +114,8 @@ fn main() { info!("max time: {:?}", max_time); let specs = filter_specs(all_specs(), spec_names_to_run); + let running_spec_names = Arc::new(Mutex::new(HashSet::new())); + let total = specs.len(); let worker_count = min(worker_count, total); let specs = Arc::new(Mutex::new(specs)); @@ -122,6 +125,22 @@ fn main() { let (notify_tx, notify_rx) = unbounded(); + let cloned_running_names = running_spec_names.clone(); + ctrlc::set_handler(move || { + std::thread::sleep(Duration::from_secs(1)); + warn!( + "Total {} specs are not finished", + cloned_running_names.lock().len() + ); + for name in cloned_running_names.lock().iter() { + warn!("spec {} is still not finished", name); + } + // sleep 1 second to wait for the log flush + std::thread::sleep(Duration::from_secs(1)); + std::process::exit(1); + }) + .expect("Error setting Ctrl-C handler"); + info!("start {} workers...", worker_count); let mut workers = Workers::new(worker_count, Arc::clone(&specs), notify_tx, start_port); workers.start(); @@ -148,6 +167,7 @@ fn main() { match msg { Notify::Start { spec_name } => { info!("[{}] Start executing", spec_name); + running_spec_names.lock().insert(spec_name); } Notify::Error { spec_error, @@ -166,6 +186,7 @@ fn main() { workers.shutdown(); worker_running -= 1; } + running_spec_names.lock().remove(&spec_name); spec_errors.push(Some(spec_error)); if verbose { info!("[{}] Error", spec_name); @@ -189,6 +210,7 @@ fn main() { worker_running -= 1; } spec_errors.push(None); + running_spec_names.lock().remove(&spec_name); if verbose { info!("[{}] Panic", spec_name); print_panicked_logs(&node_log_paths); @@ -204,6 +226,7 @@ fn main() { status: TestResultStatus::Passed, duration: seconds, }); + running_spec_names.lock().remove(&spec_name); done_specs += 1; info!( "{}/{} .............. [{}] Done in {} seconds", @@ -222,6 +245,7 @@ fn main() { } } } + // join all workers threads workers.join_all(); @@ -294,8 +318,12 @@ fn clap_app() -> App<'static> { .value_name("SECONDS") .help("Exit when total running time exceeds this limit"), ) - .arg(Arg::with_name("list-specs").long("list-specs")) - .arg(Arg::with_name("specs").multiple(true)) + .arg( + Arg::with_name("list-specs") + .long("list-specs") + .help("list all specs"), + ) + .arg(Arg::with_name("specs").multiple(true).help("Specs to run")) .arg( Arg::with_name("concurrent") .short('c') @@ -626,13 +654,13 @@ fn log_failed_specs(error_spec_names: &[String]) -> Result<(), io::Error> { fn print_results(mut test_results: Vec) { println!("{}", "-".repeat(20)); - println!("{:50} | {:10} | {:10}", "TEST", "STATUS", "DURATION"); + println!("{:65} | {:10} | {:10}", "TEST", "STATUS", "DURATION"); test_results.sort_by(|a, b| (&a.status, a.duration).cmp(&(&b.status, b.duration))); for result in test_results.iter() { println!( - "{:50} | {:10} | {:<10}", + "{:65} | {:10} | {:<10}", result.spec_name, format!("{:?}", result.status), format!("{} s", result.duration), From 3ef18d9352b5700b448e8f9d3e43459af4b17b31 Mon Sep 17 00:00:00 2001 From: yukang Date: Wed, 2 Aug 2023 12:25:10 +0800 Subject: [PATCH 080/267] fix the timeout issue in integeration main --- devtools/ci/ci_main.sh | 2 +- test/src/main.rs | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/devtools/ci/ci_main.sh b/devtools/ci/ci_main.sh index c48a7382c6..377da7e939 100755 --- a/devtools/ci/ci_main.sh +++ b/devtools/ci/ci_main.sh @@ -51,7 +51,7 @@ case $GITHUB_WORKFLOW in if [[ $github_workflow_os == 'macos' ]]; then export CKB_FEATURES="deadlock_detection,with_sentry,portable" fi - make CKB_TEST_SEC_COEFFICIENT=5 CKB_TEST_ARGS="-c 4 --no-report" integration + make CKB_TEST_SEC_COEFFICIENT=5 CKB_TEST_ARGS="-c 4 --no-report --max-time 3600 " integration ;; ci_quick_checks*) echo "ci_quick_check" diff --git a/test/src/main.rs b/test/src/main.rs index 227e2c0b0d..f650e7ab35 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -125,7 +125,7 @@ fn main() { let (notify_tx, notify_rx) = unbounded(); - let cloned_running_names = running_spec_names.clone(); + let cloned_running_names = Arc::clone(&running_spec_names); ctrlc::set_handler(move || { std::thread::sleep(Duration::from_secs(1)); warn!( @@ -153,6 +153,7 @@ fn main() { if max_time > 0 && start_time.elapsed().as_secs() > max_time { // shutdown, specs running to long workers.shutdown(); + break; } let msg = match notify_rx.recv_timeout(Duration::from_secs(5)) { From 5673983e7de82678bf1ad30d67c90214356b6e98 Mon Sep 17 00:00:00 2001 From: yukang Date: Wed, 2 Aug 2023 14:38:14 +0800 Subject: [PATCH 081/267] node start will not panic when child process crashed --- test/Cargo.toml | 1 + test/src/main.rs | 3 +-- test/src/node.rs | 7 ++++--- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/test/Cargo.toml b/test/Cargo.toml index 3dc3649d0e..2f31f4dd68 100644 --- a/test/Cargo.toml +++ b/test/Cargo.toml @@ -38,6 +38,7 @@ lazy_static = "1.4.0" byteorder = "1.3.1" jsonrpc-core = "18.0" ctrlc = { version = "3.1", features = ["termination"] } +log = "0.4" [target.'cfg(not(target_os="windows"))'.dependencies] nix = { version = "0.24.0", default-features = false, features = ["signal"] } diff --git a/test/src/main.rs b/test/src/main.rs index f650e7ab35..02e09c809b 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -135,8 +135,7 @@ fn main() { for name in cloned_running_names.lock().iter() { warn!("spec {} is still not finished", name); } - // sleep 1 second to wait for the log flush - std::thread::sleep(Duration::from_secs(1)); + log::logger().flush(); std::process::exit(1); }) .expect("Error setting Ctrl-C handler"); diff --git a/test/src/node.rs b/test/src/node.rs index 50a8c8f21d..168370bcaa 100644 --- a/test/src/node.rs +++ b/test/src/node.rs @@ -22,7 +22,7 @@ use std::collections::HashSet; use std::convert::Into; use std::fs; use std::path::PathBuf; -use std::process::{self, Child, Command, Stdio}; +use std::process::{Child, Command, Stdio}; use std::thread::sleep; use std::time::{Duration, Instant}; @@ -609,7 +609,8 @@ impl Node { status, self.log_path().display() ); - process::exit(status.code().unwrap()); + // parent process will exit + return; } Err(error) => { error!( @@ -617,7 +618,7 @@ impl Node { error, self.log_path().display() ); - process::exit(255); + return; } } }; From 941ec2db329fcea29f2dac8d320d831543821b45 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Fri, 4 Aug 2023 09:07:06 +0800 Subject: [PATCH 082/267] Increase integration CI timeout to 2 hours --- devtools/ci/ci_main.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/devtools/ci/ci_main.sh b/devtools/ci/ci_main.sh index 377da7e939..25623ed7f9 100755 --- a/devtools/ci/ci_main.sh +++ b/devtools/ci/ci_main.sh @@ -51,7 +51,7 @@ case $GITHUB_WORKFLOW in if [[ $github_workflow_os == 'macos' ]]; then export CKB_FEATURES="deadlock_detection,with_sentry,portable" fi - make CKB_TEST_SEC_COEFFICIENT=5 CKB_TEST_ARGS="-c 4 --no-report --max-time 3600 " integration + make CKB_TEST_SEC_COEFFICIENT=5 CKB_TEST_ARGS="-c 4 --no-report --max-time 7200 " integration ;; ci_quick_checks*) echo "ci_quick_check" From 2228b9e71151c2af1a2014a36e2bb03c6d136f98 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Fri, 4 Aug 2023 09:17:48 +0800 Subject: [PATCH 083/267] Print node Spec name when killed the node --- test/src/node.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/test/src/node.rs b/test/src/node.rs index 168370bcaa..143367a148 100644 --- a/test/src/node.rs +++ b/test/src/node.rs @@ -27,6 +27,7 @@ use std::thread::sleep; use std::time::{Duration, Instant}; struct ProcessGuard { + pub name: String, pub child: Child, pub killed: bool, } @@ -35,8 +36,8 @@ impl Drop for ProcessGuard { fn drop(&mut self) { if !self.killed { match self.child.kill() { - Err(e) => error!("Could not kill ckb process: {}", e), - Ok(_) => debug!("Successfully killed ckb process"), + Err(e) => error!("Could not kill ckb process ({}): {}", self.name, e), + Ok(_) => debug!("Successfully killed ckb process ({})", self.name), } let _ = self.child.wait(); } @@ -44,6 +45,7 @@ impl Drop for ProcessGuard { } pub struct Node { + spec_node_name: String, working_dir: PathBuf, consensus: Consensus, p2p_listen: String, @@ -74,8 +76,9 @@ impl Node { .unwrap_or_else(|_| panic!("cp {:?} {}", src.display(), dest.display())); } + let spec_node_name = format!("{}_{}", spec_name, node_name); // Allocate rpc port and p2p port, and fill into app config - let mut node = Self::init(working_dir); + let mut node = Self::init(working_dir, spec_node_name); node.modify_app_config(|app_config| { let rpc_port = find_available_port(); let p2p_port = find_available_port(); @@ -99,7 +102,7 @@ impl Node { modifier(&mut app_config); fs::write(&app_config_path, toml::to_string(&app_config).unwrap()).unwrap(); - *self = Self::init(self.working_dir()); + *self = Self::init(self.working_dir(), self.spec_node_name.clone()); } pub fn modify_chain_spec(&mut self, modifier: M) @@ -112,11 +115,11 @@ impl Node { modifier(&mut chain_spec); fs::write(&chain_spec_path, toml::to_string(&chain_spec).unwrap()).unwrap(); - *self = Self::init(self.working_dir()); + *self = Self::init(self.working_dir(), self.spec_node_name.clone()); } // Initialize Node instance based on working directory - fn init(working_dir: PathBuf) -> Self { + fn init(working_dir: PathBuf, spec_node_name: String) -> Self { let app_config = { let app_config_path = working_dir.join("ckb.toml"); let toml = fs::read(app_config_path).unwrap(); @@ -144,6 +147,7 @@ impl Node { chain_spec.build_consensus().unwrap() }; Self { + spec_node_name, working_dir, consensus, p2p_listen, @@ -626,6 +630,7 @@ impl Node { self.wait_tx_pool_ready(); self.guard = Some(ProcessGuard { + name: self.spec_node_name.clone(), child: child_process, killed: false, }); From 6a24f7f5d4655bfb7c025ea5fefbeba612a7dc4e Mon Sep 17 00:00:00 2001 From: yukang Date: Mon, 15 May 2023 18:19:55 +0800 Subject: [PATCH 084/267] begin refactor tx_pool --- tx-pool/src/component/commit_txs_scanner.rs | 7 +- tx-pool/src/pool.rs | 634 +++++++++++++++++++- 2 files changed, 615 insertions(+), 26 deletions(-) diff --git a/tx-pool/src/component/commit_txs_scanner.rs b/tx-pool/src/component/commit_txs_scanner.rs index 8f97432ae8..af277a1b0a 100644 --- a/tx-pool/src/component/commit_txs_scanner.rs +++ b/tx-pool/src/component/commit_txs_scanner.rs @@ -2,6 +2,7 @@ use crate::component::{container::AncestorsScoreSortKey, entry::TxEntry, propose use ckb_types::{core::Cycle, packed::ProposalShortId}; use ckb_util::LinkedHashMap; use std::collections::{BTreeSet, HashMap, HashSet}; +use crate::pool::MultiIndexPoolEntryMap; // A template data struct used to store modified entries when package txs #[derive(Default)] @@ -49,6 +50,7 @@ const MAX_CONSECUTIVE_FAILURES: usize = 500; /// find txs to package into commitment pub struct CommitTxsScanner<'a> { proposed_pool: &'a ProposedPool, + pool_entries: &'a MultiIndexPoolEntryMap, entries: Vec, // modified_entries will store sorted packages after they are modified // because some of their txs are already in the block @@ -60,10 +62,11 @@ pub struct CommitTxsScanner<'a> { } impl<'a> CommitTxsScanner<'a> { - pub fn new(proposed_pool: &'a ProposedPool) -> CommitTxsScanner<'a> { + pub fn new(proposed_pool: &'a ProposedPool, pool_entries: &'a MultiIndexPoolEntryMap) -> CommitTxsScanner<'a> { CommitTxsScanner { proposed_pool, entries: Vec::new(), + pool_entries: pool_entries, modified_entries: TxModifiedEntries::default(), fetched_txs: HashSet::default(), failed_txs: HashSet::default(), @@ -80,7 +83,7 @@ impl<'a> CommitTxsScanner<'a> { let mut cycles: Cycle = 0; let mut consecutive_failed = 0; - let mut iter = self.proposed_pool.score_sorted_iter().peekable(); + let mut iter = self.pool_entries.score_sorted_iter().peekable(); loop { let mut using_modified = false; diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 8fb2da50c6..5ee73cfbb8 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -1,6 +1,10 @@ //! Top-level Pool type, methods, and tests +extern crate rustc_hash; +extern crate slab; use super::component::{commit_txs_scanner::CommitTxsScanner, TxEntry}; use crate::callback::Callbacks; +use crate::component::container::AncestorsScoreSortKey; +use crate::component::entry::EvictKey; use crate::component::pending::PendingQueue; use crate::component::proposed::ProposedPool; use crate::component::recent_reject::RecentReject; @@ -10,9 +14,18 @@ use ckb_app_config::TxPoolConfig; use ckb_logger::{debug, error, trace, warn}; use ckb_snapshot::Snapshot; use ckb_store::ChainStore; +use ckb_types::core::error::OutPointError; +use ckb_types::packed::OutPoint; +use ckb_types::{ + core::cell::{CellMetaBuilder, CellProvider, CellStatus}, + prelude::*, +}; use ckb_types::{ core::{ - cell::{resolve_transaction, OverlayCellChecker, OverlayCellProvider, ResolvedTransaction}, + cell::{ + resolve_transaction, CellChecker, OverlayCellChecker, OverlayCellProvider, + ResolvedTransaction, + }, tx_pool::{TxPoolEntryInfo, TxPoolIds}, Cycle, TransactionView, UncleBlockView, }, @@ -20,7 +33,10 @@ use ckb_types::{ }; use ckb_verification::{cache::CacheEntry, TxVerifyEnv}; use lru::LruCache; -use std::collections::HashSet; +use multi_index_map::MultiIndexMap; +use std::collections::hash_map::Entry; +use std::collections::HashMap; +use std::collections::{HashSet, VecDeque}; use std::sync::Arc; const COMMITTED_HASH_CACHE_SIZE: usize = 100_000; @@ -51,6 +67,40 @@ macro_rules! evict_for_trim_size { }; } +type ConflictEntry = (TxEntry, Reject); + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum Status { + Pending, + Gap, + Proposed, +} + +#[derive(MultiIndexMap, Clone)] +pub struct PoolEntry { + #[multi_index(hashed_unique)] + pub id: ProposalShortId, + #[multi_index(ordered_non_unique)] + pub score: AncestorsScoreSortKey, + #[multi_index(ordered_non_unique)] + pub status: Status, + #[multi_index(ordered_non_unique)] + pub evict_key: EvictKey, + + pub inner: TxEntry, + // other sort key +} + +impl MultiIndexPoolEntryMap { + /// sorted by ancestor score from higher to lower + pub fn score_sorted_iter(&self) -> impl Iterator { + // Note: multi_index don't support reverse order iteration now + // so we need to collect and reverse + let entries = self.iter_by_score().collect::>(); + entries.into_iter().rev().map(move |entry| &entry.inner) + } +} + /// Tx-pool implementation pub struct TxPool { pub(crate) config: TxPoolConfig, @@ -60,6 +110,18 @@ pub struct TxPool { pub(crate) gap: PendingQueue, /// Tx pool that finely for commit pub(crate) proposed: ProposedPool, + + /// The pool entries with different kinds of sort strategies + pub(crate) entries: MultiIndexPoolEntryMap, + /// dep-set map represent in-pool tx's header deps + pub(crate) header_deps: HashMap>, + /// dep-set map represent in-pool tx's deps + pub(crate) deps: HashMap>, + /// input-set map represent in-pool tx's inputs + pub(crate) inputs: HashMap>, + pub(crate) outputs: HashMap>, + pub(crate) max_ancestors_count: usize, + /// cache for committed transactions hash pub(crate) committed_txs_hash_cache: LruCache, // sum of all tx_pool tx's virtual sizes. @@ -77,12 +139,18 @@ pub struct TxPool { impl TxPool { /// Create new TxPool pub fn new(config: TxPoolConfig, snapshot: Arc) -> TxPool { - let recent_reject = build_recent_reject(&config); + let recent_reject = Self::build_recent_reject(&config); let expiry = config.expiry_hours as u64 * 60 * 60 * 1000; TxPool { pending: PendingQueue::new(), gap: PendingQueue::new(), proposed: ProposedPool::new(config.max_ancestors_count), + entries: MultiIndexPoolEntryMap::default(), + header_deps: HashMap::default(), + deps: Default::default(), + inputs: Default::default(), + outputs: Default::default(), + max_ancestors_count: config.max_ancestors_count, committed_txs_hash_cache: LruCache::new(COMMITTED_HASH_CACHE_SIZE), total_tx_size: 0, total_tx_cycles: 0, @@ -135,6 +203,24 @@ impl TxPool { self.total_tx_cycles = total_tx_cycles; } + fn add_poolentry(&mut self, entry: TxEntry, status: Status) -> bool { + let short_id = entry.proposal_short_id(); + if self.entries.get_by_id(&short_id).is_some() { + return false; + } + trace!("add_{:?} {}", status, entry.transaction().hash()); + let score = entry.as_score_key(); + let evict_key = entry.as_evict_key(); + self.entries.insert(PoolEntry { + id: short_id, + score, + status, + inner: entry, + evict_key, + }); + true + } + /// Add tx to pending pool /// If did have this value present, false is returned. pub fn add_pending(&mut self, entry: TxEntry) -> bool { @@ -145,6 +231,10 @@ impl TxPool { self.pending.add_entry(entry) } + pub fn add_pending_v2(&mut self, entry: TxEntry) -> bool { + self.add_poolentry(entry, Status::Pending) + } + /// Add tx which proposed but still uncommittable to gap pool pub fn add_gap(&mut self, entry: TxEntry) -> bool { if self.proposed.contains_key(&entry.proposal_short_id()) { @@ -154,17 +244,29 @@ impl TxPool { self.gap.add_entry(entry) } + pub fn add_gap_v2(&mut self, entry: TxEntry) -> bool { + self.add_poolentry(entry, Status::Gap) + } + /// Add tx to proposed pool pub fn add_proposed(&mut self, entry: TxEntry) -> Result { trace!("add_proposed {}", entry.transaction().hash()); self.proposed.add_entry(entry) } + pub fn add_proposed_v2(&mut self, entry: TxEntry) -> bool { + self.add_poolentry(entry, Status::Proposed) + } + /// Returns true if the tx-pool contains a tx with specified id. pub fn contains_proposal_id(&self, id: &ProposalShortId) -> bool { self.pending.contains_key(id) || self.gap.contains_key(id) || self.proposed.contains_key(id) } + pub fn contains_proposal_id_v2(&self, id: &ProposalShortId) -> bool { + self.entries.get_by_id(id).is_some() + } + /// Returns tx with cycles corresponding to the id. pub fn get_tx_with_cycles(&self, id: &ProposalShortId) -> Option<(TransactionView, Cycle)> { self.pending @@ -182,6 +284,12 @@ impl TxPool { }) } + pub fn get_tx_with_cycles_v2(&self, id: &ProposalShortId) -> Option<(TransactionView, Cycle)> { + self.entries + .get_by_id(id) + .map(|entry| (entry.inner.transaction().clone(), entry.inner.cycles)) + } + /// Returns tx corresponding to the id. pub fn get_tx(&self, id: &ProposalShortId) -> Option<&TransactionView> { self.pending @@ -190,11 +298,28 @@ impl TxPool { .or_else(|| self.proposed.get_tx(id)) } + pub fn get_tx_v2(&self, id: &ProposalShortId) -> Option<&TransactionView> { + self.entries + .get_by_id(id) + .map(|entry| entry.inner.transaction()) + } + /// Returns tx from pending and gap corresponding to the id. RPC pub fn get_entry_from_pending_or_gap(&self, id: &ProposalShortId) -> Option<&TxEntry> { self.pending.get(id).or_else(|| self.gap.get(id)) } + pub fn get_entry_from_pending_or_gap_v2(&self, id: &ProposalShortId) -> Option<&TxEntry> { + if let Some(entry) = self.entries.get_by_id(id) { + match entry.status { + Status::Pending | Status::Gap => return Some(&entry.inner), + _ => return None, + } + } else { + return None; + } + } + pub(crate) fn proposed(&self) -> &ProposedPool { &self.proposed } @@ -209,6 +334,15 @@ impl TxPool { .or_else(|| self.pending.get_tx(id)) } + pub(crate) fn get_tx_from_proposed_and_others_v2( + &self, + id: &ProposalShortId, + ) -> Option<&TransactionView> { + self.entries + .get_by_id(id) + .map(|entry| entry.inner.transaction()) + } + pub(crate) fn remove_committed_txs<'a>( &mut self, txs: impl Iterator, @@ -245,6 +379,129 @@ impl TxPool { } } + pub(crate) fn resolve_conflict_header_dep_v2( + &mut self, + detached_headers: &HashSet, + callbacks: &Callbacks, + ) { + for (entry, reject) in self.__resolve_conflict_header_dep_v2(detached_headers) { + callbacks.call_reject(self, &entry, reject); + } + } + + pub(crate) fn get_descendants(&self, entry: &TxEntry) -> HashSet { + let mut entries: VecDeque<&TxEntry> = VecDeque::new(); + entries.push_back(entry); + + let mut descendants = HashSet::new(); + while let Some(entry) = entries.pop_front() { + let outputs = entry.transaction().output_pts(); + + for output in outputs { + if let Some(ids) = self.outputs.get(&output) { + for id in ids { + if descendants.insert(id.clone()) { + if let Some(entry) = self.entries.get_by_id(id) { + entries.push_back(&entry.inner); + } + } + } + } + } + } + descendants + } + + pub(crate) fn remove_entry_relation(&mut self, entry: &TxEntry) { + let inputs = entry.transaction().input_pts_iter(); + let tx_short_id = entry.proposal_short_id(); + let outputs = entry.transaction().output_pts(); + + for i in inputs { + if let Entry::Occupied(mut occupied) = self.inputs.entry(i) { + let empty = { + let ids = occupied.get_mut(); + ids.remove(&tx_short_id); + ids.is_empty() + }; + if empty { + occupied.remove(); + } + } + } + + // remove dep + for d in entry.related_dep_out_points().cloned() { + if let Entry::Occupied(mut occupied) = self.deps.entry(d) { + let empty = { + let ids = occupied.get_mut(); + ids.remove(&tx_short_id); + ids.is_empty() + }; + if empty { + occupied.remove(); + } + } + } + + for o in outputs { + self.outputs.remove(&o); + } + + self.header_deps.remove(&tx_short_id); + } + + fn remove_entry(&mut self, id: &ProposalShortId) -> Option { + let removed = self.entries.remove_by_id(id); + + if let Some(ref entry) = removed { + self.remove_entry_relation(&entry.inner); + } + removed.map(|e| e.inner) + } + + fn remove_entry_and_descendants(&mut self, id: &ProposalShortId) -> Vec { + let mut removed = Vec::new(); + if let Some(entry) = self.entries.remove_by_id(id) { + let descendants = self.get_descendants(&entry.inner); + self.remove_entry_relation(&entry.inner); + removed.push(entry.inner); + for id in descendants { + if let Some(entry) = self.remove_entry(&id) { + removed.push(entry); + } + } + } + removed + } + + fn __resolve_conflict_header_dep_v2( + &mut self, + headers: &HashSet, + ) -> Vec { + let mut conflicts = Vec::new(); + + // invalid header deps + let mut ids = Vec::new(); + for (tx_id, deps) in self.header_deps.iter() { + for hash in deps { + if headers.contains(hash) { + ids.push((hash.clone(), tx_id.clone())); + break; + } + } + } + + for (blk_hash, id) in ids { + let entries = self.remove_entry_and_descendants(&id); + for entry in entries { + let reject = Reject::Resolve(OutPointError::InvalidHeader(blk_hash.to_owned())); + conflicts.push((entry, reject)); + } + } + conflicts + } + pub(crate) fn remove_committed_tx(&mut self, tx: &TransactionView, callbacks: &Callbacks) { let hash = tx.hash(); let short_id = tx.proposal_short_id(); @@ -287,6 +544,50 @@ impl TxPool { } } + fn resolve_conflict(&mut self, tx: &TransactionView) -> Vec { + let inputs = tx.input_pts_iter(); + let mut conflicts = Vec::new(); + + for i in inputs { + if let Some(ids) = self.inputs.remove(&i) { + for id in ids { + let entries = self.remove_entry_and_descendants(&id); + for entry in entries { + let reject = Reject::Resolve(OutPointError::Dead(i.clone())); + conflicts.push((entry, reject)); + } + } + } + + // deps consumed + if let Some(ids) = self.deps.remove(&i) { + for id in ids { + let entries = self.remove_entry_and_descendants(&id); + for entry in entries { + let reject = Reject::Resolve(OutPointError::Dead(i.clone())); + conflicts.push((entry, reject)); + } + } + } + } + conflicts + } + + pub(crate) fn remove_committed_tx_v2(&mut self, tx: &TransactionView, callbacks: &Callbacks) { + let hash = tx.hash(); + let short_id = tx.proposal_short_id(); + if let Some(entry) = self.remove_entry(&short_id) { + debug!("remove_committed_tx from gap {}", hash); + callbacks.call_committed(self, &entry) + } + { + let conflicts = self.resolve_conflict(tx); + for (entry, reject) in conflicts { + callbacks.call_reject(self, &entry, reject); + } + } + } + // Expire all transaction (and their dependencies) in the pool. pub(crate) fn remove_expired(&mut self, callbacks: &Callbacks) { let now_ms = ckb_systemtime::unix_time_as_millis(); @@ -318,6 +619,42 @@ impl TxPool { } } + fn remove_entries_by_filter bool>( + &mut self, + mut predicate: P, + ) -> Vec { + let mut removed = Vec::new(); + for (_, entry) in self.entries.iter() { + if predicate(&entry.id, &entry.inner) { + removed.push(entry.inner.clone()); + } + } + for entry in &removed { + self.remove_entry(&entry.proposal_short_id()); + } + + removed + } + + // Expire all transaction (and their dependencies) in the pool. + pub(crate) fn remove_expired_v2(&mut self, callbacks: &Callbacks) { + let now_ms = ckb_systemtime::unix_time_as_millis(); + let removed: Vec<_> = self + .entries + .iter() + .filter(|&(_, entry)| self.expiry + entry.inner.timestamp < now_ms) + .map(|(_, entry)| entry.inner.clone()) + .collect(); + + for entry in removed { + self.remove_entry(&entry.proposal_short_id()); + let tx_hash = entry.transaction().hash(); + debug!("remove_expired {} timestamp({})", tx_hash, entry.timestamp); + let reject = Reject::Expiry(entry.timestamp); + callbacks.call_reject(self, &entry, reject); + } + } + // Remove transactions from the pool until total size < size_limit. pub(crate) fn limit_size(&mut self, callbacks: &Callbacks) { while self.total_tx_size > self.config.max_tx_pool_size { @@ -331,6 +668,31 @@ impl TxPool { } } + pub(crate) fn limit_size_v2(&mut self, callbacks: &Callbacks) { + while self.total_tx_size > self.config.max_tx_pool_size { + if let Some(id) = self + .entries + .iter_by_evict_key() + .next() + .map(|entry| entry.id.clone()) + { + let removed = self.remove_entry_and_descendants(&id); + for entry in removed { + let tx_hash = entry.transaction().hash(); + debug!( + "removed by size limit {} timestamp({})", + tx_hash, entry.timestamp + ); + let reject = Reject::Full(format!( + "the fee_rate for this transaction is: {}", + entry.fee_rate() + )); + callbacks.call_reject(self, &entry, reject); + } + } + } + } + // remove transaction with detached proposal from gap and proposed // try re-put to pending pub(crate) fn remove_by_detached_proposal<'a>( @@ -360,6 +722,34 @@ impl TxPool { } } + // remove transaction with detached proposal from gap and proposed + // try re-put to pending + pub(crate) fn remove_by_detached_proposal_v2<'a>( + &mut self, + ids: impl Iterator, + ) { + for id in ids { + if let Some(e) = self.entries.get_by_id(id) { + let status = e.status; + // TODO: double check this + if status == Status::Pending { + continue; + } + let mut entries = self.remove_entry_and_descendants(id); + entries.sort_unstable_by_key(|entry| entry.ancestors_count); + for mut entry in entries { + let tx_hash = entry.transaction().hash(); + entry.reset_ancestors_state(); + let ret = self.add_pending(entry); + debug!( + "remove_by_detached_proposal from {:?} {} add_pending {}", status, + tx_hash, ret + ); + } + } + } + } + pub(crate) fn remove_tx(&mut self, id: &ProposalShortId) -> bool { let entries = self.proposed.remove_entry_and_descendants(id); if !entries.is_empty() { @@ -382,6 +772,14 @@ impl TxPool { false } + pub(crate) fn remove_tx_v2(&mut self, id: &ProposalShortId) -> bool { + if let Some(entry) = self.remove_entry(id) { + self.update_statics_for_remove_tx(entry.size, entry.cycles); + return true; + } + false + } + pub(crate) fn resolve_tx_from_pending_and_proposed( &self, tx: TransactionView, @@ -402,6 +800,18 @@ impl TxPool { .map_err(Reject::Resolve) } + pub(crate) fn resolve_tx_from_pending_and_proposed_v2( + &self, + tx: TransactionView, + ) -> Result, Reject> { + let snapshot = self.snapshot(); + let provider = OverlayCellProvider::new(&self.entries, snapshot); + let mut seen_inputs = HashSet::new(); + resolve_transaction(tx, &mut seen_inputs, &provider, snapshot) + .map(Arc::new) + .map_err(Reject::Resolve) + } + pub(crate) fn check_rtx_from_pending_and_proposed( &self, rtx: &ResolvedTransaction, @@ -416,6 +826,17 @@ impl TxPool { .map_err(Reject::Resolve) } + pub(crate) fn check_rtx_from_pending_and_proposed_v2( + &self, + rtx: &ResolvedTransaction, + ) -> Result<(), Reject> { + let snapshot = self.snapshot(); + let checker = OverlayCellChecker::new(&self.entries, snapshot); + let mut seen_inputs = HashSet::new(); + rtx.check(&mut seen_inputs, &checker, snapshot) + .map_err(Reject::Resolve) + } + pub(crate) fn resolve_tx_from_proposed( &self, tx: TransactionView, @@ -428,6 +849,17 @@ impl TxPool { .map_err(Reject::Resolve) } + pub(crate) fn resolve_tx_from_proposed_v2( + &self, + rtx: &ResolvedTransaction, + ) -> Result<(), Reject> { + let snapshot = self.snapshot(); + let checker = OverlayCellChecker::new(&self.entries, snapshot); + let mut seen_inputs = HashSet::new(); + rtx.check(&mut seen_inputs, &checker, snapshot) + .map_err(Reject::Resolve) + } + pub(crate) fn check_rtx_from_proposed(&self, rtx: &ResolvedTransaction) -> Result<(), Reject> { let snapshot = self.snapshot(); let cell_checker = OverlayCellChecker::new(&self.proposed, snapshot); @@ -436,6 +868,17 @@ impl TxPool { .map_err(Reject::Resolve) } + pub(crate) fn check_rtx_from_proposed_v2( + &self, + rtx: &ResolvedTransaction, + ) -> Result<(), Reject> { + let snapshot = self.snapshot(); + let cell_checker = OverlayCellChecker::new(&self.entries, snapshot); + let mut seen_inputs = HashSet::new(); + rtx.check(&mut seen_inputs, &cell_checker, snapshot) + .map_err(Reject::Resolve) + } + pub(crate) fn gap_rtx( &mut self, cache_entry: CacheEntry, @@ -498,6 +941,24 @@ impl TxPool { } } + // fill proposal txs + pub fn fill_proposals( + &self, + limit: usize, + exclusion: &HashSet, + proposals: &mut HashSet, + status: &Status, + ) { + for entry in self.entries.get_by_status(status) { + if proposals.len() == limit { + break; + } + if !exclusion.contains(&entry.id) { + proposals.insert(entry.id.clone()); + } + } + } + /// Get to-be-proposal transactions that may be included in the next block. pub fn get_proposals( &self, @@ -511,6 +972,18 @@ impl TxPool { proposals } + /// Get to-be-proposal transactions that may be included in the next block. + pub fn get_proposals_v2( + &self, + limit: usize, + exclusion: &HashSet, + ) -> HashSet { + let mut proposals = HashSet::with_capacity(limit); + self.fill_proposals(limit, exclusion, &mut proposals, &Status::Pending); + self.fill_proposals(limit, exclusion, &mut proposals, &Status::Gap); + proposals + } + /// Returns tx from tx-pool or storage corresponding to the id. pub fn get_tx_from_pool_or_store( &self, @@ -542,6 +1015,25 @@ impl TxPool { TxPoolIds { pending, proposed } } + // This is for RPC request, performance is not critical + pub(crate) fn get_ids_v2(&self) -> TxPoolIds { + let pending: Vec = self + .entries + .get_by_status(&Status::Pending) + .iter() + .chain(self.entries.get_by_status(&Status::Gap).iter()) + .map(|entry| entry.inner.transaction().hash()) + .collect(); + + let proposed: Vec = self + .proposed + .iter() + .map(|(_, entry)| entry.transaction().hash()) + .collect(); + + TxPoolIds { pending, proposed } + } + pub(crate) fn get_all_entry_info(&self) -> TxPoolEntryInfo { let pending = self .pending @@ -563,8 +1055,27 @@ impl TxPool { TxPoolEntryInfo { pending, proposed } } + pub(crate) fn get_all_entry_info_v2(&self) -> TxPoolEntryInfo { + let pending = self + .entries + .get_by_status(&Status::Pending) + .iter() + .chain(self.entries.get_by_status(&Status::Gap).iter()) + .map(|entry| (entry.inner.transaction().hash(), entry.inner.to_info())) + .collect(); + + let proposed = self + .entries + .get_by_status(&Status::Proposed) + .iter() + .map(|entry| (entry.inner.transaction().hash(), entry.inner.to_info())) + .collect(); + + TxPoolEntryInfo { pending, proposed } + } + pub(crate) fn drain_all_transactions(&mut self) -> Vec { - let mut txs = CommitTxsScanner::new(&self.proposed) + let mut txs = CommitTxsScanner::new(&self.proposed, &self.entries) .txs_to_commit(self.total_tx_size, self.total_tx_cycles) .0 .into_iter() @@ -579,10 +1090,47 @@ impl TxPool { txs } + pub(crate) fn drain_all_transactions_v2(&mut self) -> Vec { + let mut txs = CommitTxsScanner::new(&self.proposed, &self.entries) + .txs_to_commit(self.total_tx_size, self.total_tx_cycles) + .0 + .into_iter() + .map(|tx_entry| tx_entry.into_transaction()) + .collect::>(); + self.proposed.clear(); + let mut pending = self + .entries + .remove_by_status(&Status::Pending) + .into_iter() + .map(|e| e.inner.into_transaction()) + .collect::>(); + txs.append(&mut pending); + let mut gap = self + .entries + .remove_by_status(&Status::Gap) + .into_iter() + .map(|e| e.inner.into_transaction()) + .collect::>(); + txs.append(&mut gap); + self.total_tx_size = 0; + self.total_tx_cycles = 0; + self.deps.clear(); + self.inputs.clear(); + self.header_deps.clear(); + self.outputs.clear(); + // self.touch_last_txs_updated_at(); + txs + } + pub(crate) fn clear(&mut self, snapshot: Arc) { self.pending = PendingQueue::new(); self.gap = PendingQueue::new(); self.proposed = ProposedPool::new(self.config.max_ancestors_count); + self.entries = MultiIndexPoolEntryMap::default(); + self.header_deps = HashMap::default(); + self.deps = HashMap::default(); + self.inputs = HashMap::default(); + self.outputs = HashMap::default(); self.snapshot = snapshot; self.committed_txs_hash_cache = LruCache::new(COMMITTED_HASH_CACHE_SIZE); self.total_tx_size = 0; @@ -606,8 +1154,8 @@ impl TxPool { max_block_cycles: Cycle, txs_size_limit: usize, ) -> (Vec, usize, Cycle) { - let (entries, size, cycles) = - CommitTxsScanner::new(self.proposed()).txs_to_commit(txs_size_limit, max_block_cycles); + let (entries, size, cycles) = CommitTxsScanner::new(self.proposed(), &self.entries) + .txs_to_commit(txs_size_limit, max_block_cycles); if !entries.is_empty() { ckb_logger::info!( @@ -621,27 +1169,65 @@ impl TxPool { } (entries, size, cycles) } + + fn build_recent_reject(config: &TxPoolConfig) -> Option { + if !config.recent_reject.as_os_str().is_empty() { + let recent_reject_ttl = config.keep_rejected_tx_hashes_days as i32 * 24 * 60 * 60; + match RecentReject::new( + &config.recent_reject, + config.keep_rejected_tx_hashes_count, + recent_reject_ttl, + ) { + Ok(recent_reject) => Some(recent_reject), + Err(err) => { + error!( + "Failed to open recent reject database {:?} {}", + config.recent_reject, err + ); + None + } + } + } else { + warn!("Recent reject database is disabled!"); + None + } + } } -fn build_recent_reject(config: &TxPoolConfig) -> Option { - if !config.recent_reject.as_os_str().is_empty() { - let recent_reject_ttl = config.keep_rejected_tx_hashes_days as i32 * 24 * 60 * 60; - match RecentReject::new( - &config.recent_reject, - config.keep_rejected_tx_hashes_count, - recent_reject_ttl, - ) { - Ok(recent_reject) => Some(recent_reject), - Err(err) => { - error!( - "Failed to open recent reject database {:?} {}", - config.recent_reject, err - ); - None +impl CellProvider for MultiIndexPoolEntryMap { + fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { + let tx_hash = out_point.tx_hash(); + if let Some(entry) = self.get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) { + match entry + .inner + .transaction() + .output_with_data(out_point.index().unpack()) + { + Some((output, data)) => { + let cell_meta = CellMetaBuilder::from_cell_output(output, data) + .out_point(out_point.to_owned()) + .build(); + CellStatus::live_cell(cell_meta) + } + None => CellStatus::Unknown, } + } else { + CellStatus::Unknown + } + } +} + +impl CellChecker for MultiIndexPoolEntryMap { + fn is_live(&self, out_point: &OutPoint) -> Option { + let tx_hash = out_point.tx_hash(); + if let Some(entry) = self.get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) { + entry + .inner + .transaction() + .output(out_point.index().unpack()) + .map(|_| true) + } else { + None } - } else { - warn!("Recent reject database is disabled!"); - None } } From 7f7f02ff18814ee16f30e071f2a4d845b4917a16 Mon Sep 17 00:00:00 2001 From: yukang Date: Wed, 17 May 2023 01:41:37 +0800 Subject: [PATCH 085/267] add pool_map --- tx-pool/src/component/commit_txs_scanner.rs | 2 +- tx-pool/src/component/mod.rs | 1 + tx-pool/src/component/pool_map.rs | 430 ++++++++++++++++++++ tx-pool/src/component/tests/mod.rs | 1 + tx-pool/src/component/tests/pool_map.rs | 236 +++++++++++ tx-pool/src/pool.rs | 396 +++--------------- 6 files changed, 719 insertions(+), 347 deletions(-) create mode 100644 tx-pool/src/component/pool_map.rs create mode 100644 tx-pool/src/component/tests/pool_map.rs diff --git a/tx-pool/src/component/commit_txs_scanner.rs b/tx-pool/src/component/commit_txs_scanner.rs index af277a1b0a..a9b4287140 100644 --- a/tx-pool/src/component/commit_txs_scanner.rs +++ b/tx-pool/src/component/commit_txs_scanner.rs @@ -2,7 +2,7 @@ use crate::component::{container::AncestorsScoreSortKey, entry::TxEntry, propose use ckb_types::{core::Cycle, packed::ProposalShortId}; use ckb_util::LinkedHashMap; use std::collections::{BTreeSet, HashMap, HashSet}; -use crate::pool::MultiIndexPoolEntryMap; +use crate::component::pool_map::MultiIndexPoolEntryMap; // A template data struct used to store modified entries when package txs #[derive(Default)] diff --git a/tx-pool/src/component/mod.rs b/tx-pool/src/component/mod.rs index 3df4d620de..60b4e78ae7 100644 --- a/tx-pool/src/component/mod.rs +++ b/tx-pool/src/component/mod.rs @@ -7,6 +7,7 @@ pub(crate) mod orphan; pub(crate) mod pending; pub(crate) mod proposed; pub(crate) mod recent_reject; +pub(crate) mod pool_map; #[cfg(test)] mod tests; diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs new file mode 100644 index 0000000000..2a66e760f2 --- /dev/null +++ b/tx-pool/src/component/pool_map.rs @@ -0,0 +1,430 @@ +//! Top-level Pool type, methods, and tests +extern crate rustc_hash; +extern crate slab; +use crate::component::container::AncestorsScoreSortKey; +use crate::component::entry::EvictKey; +use crate::error::Reject; +use crate::TxEntry; +use ckb_logger::{debug, error, trace, warn}; +use ckb_types::core::error::OutPointError; +use ckb_types::packed::OutPoint; +use ckb_types::{ + core::cell::{CellMetaBuilder, CellProvider, CellStatus}, + prelude::*, +}; +use ckb_types::{ + core::{cell::CellChecker, TransactionView}, + packed::{Byte32, ProposalShortId}, +}; +use multi_index_map::MultiIndexMap; +use std::collections::hash_map::Entry; +use std::collections::HashMap; +use std::collections::{HashSet, VecDeque}; + +type ConflictEntry = (TxEntry, Reject); + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum Status { + Pending, + Gap, + Proposed, +} + +#[derive(MultiIndexMap, Clone)] +pub struct PoolEntry { + #[multi_index(hashed_unique)] + pub id: ProposalShortId, + #[multi_index(ordered_non_unique)] + pub score: AncestorsScoreSortKey, + #[multi_index(ordered_non_unique)] + pub status: Status, + #[multi_index(ordered_non_unique)] + pub evict_key: EvictKey, + + pub inner: TxEntry, + // other sort key +} + +impl MultiIndexPoolEntryMap { + /// sorted by ancestor score from higher to lower + pub fn score_sorted_iter(&self) -> impl Iterator { + // Note: multi_index don't support reverse order iteration now + // so we need to collect and reverse + let entries = self.iter_by_score().collect::>(); + entries.into_iter().rev().map(move |entry| &entry.inner) + } +} + +pub struct PoolMap { + /// The pool entries with different kinds of sort strategies + pub(crate) entries: MultiIndexPoolEntryMap, + /// dep-set map represent in-pool tx's header deps + pub(crate) header_deps: HashMap>, + /// dep-set map represent in-pool tx's deps + pub(crate) deps: HashMap>, + /// input-set map represent in-pool tx's inputs + pub(crate) inputs: HashMap>, + pub(crate) outputs: HashMap>, + pub(crate) max_ancestors_count: usize, +} + +impl PoolMap { + pub fn new(max_ancestors_count: usize) -> Self { + PoolMap { + entries: MultiIndexPoolEntryMap::default(), + header_deps: HashMap::default(), + deps: HashMap::default(), + inputs: HashMap::default(), + outputs: HashMap::default(), + max_ancestors_count, + } + } + + #[cfg(test)] + pub(crate) fn outputs_len(&self) -> usize { + self.outputs.len() + } + + #[cfg(test)] + pub(crate) fn header_deps_len(&self) -> usize { + self.header_deps.len() + } + + #[cfg(test)] + pub(crate) fn deps_len(&self) -> usize { + self.deps.len() + } + + #[cfg(test)] + pub(crate) fn inputs_len(&self) -> usize { + self.inputs.len() + } + + #[cfg(test)] + pub fn size(&self) -> usize { + self.entries.len() + } + + #[cfg(test)] + pub fn contains_key(&self, id: &ProposalShortId) -> bool { + self.entries.get_by_id(id).is_some() + } + + pub(crate) fn get_tx(&self, id: &ProposalShortId) -> Option<&TransactionView> { + self.entries + .get_by_id(id) + .map(|entry| entry.inner.transaction()) + } + + pub fn add_entry(&mut self, entry: TxEntry, status: Status) -> bool { + let tx_short_id = entry.proposal_short_id(); + if self.entries.get_by_id(&tx_short_id).is_some() { + return false; + } + trace!("add_{:?} {}", status, entry.transaction().hash()); + let inputs = entry.transaction().input_pts_iter(); + let outputs = entry.transaction().output_pts(); + + for i in inputs { + self.inputs + .entry(i.to_owned()) + .or_default() + .insert(tx_short_id.clone()); + + if let Some(outputs) = self.outputs.get_mut(&i) { + outputs.insert(tx_short_id.clone()); + } + } + + // record dep-txid + for d in entry.related_dep_out_points() { + self.deps + .entry(d.to_owned()) + .or_default() + .insert(tx_short_id.clone()); + + if let Some(outputs) = self.outputs.get_mut(d) { + outputs.insert(tx_short_id.clone()); + } + } + + // record tx unconsumed output + for o in outputs { + self.outputs.insert(o, HashSet::new()); + } + + // record header_deps + let header_deps = entry.transaction().header_deps(); + if !header_deps.is_empty() { + self.header_deps + .insert(tx_short_id.clone(), header_deps.into_iter().collect()); + } + + let score = entry.as_score_key(); + let evict_key = entry.as_evict_key(); + self.entries.insert(PoolEntry { + id: tx_short_id, + score, + status, + inner: entry, + evict_key, + }); + true + } + + pub fn get_by_id(&self, id: &ProposalShortId) -> Option<&PoolEntry> { + self.entries.get_by_id(id).map(|entry| entry) + } + + fn get_descendants(&self, entry: &TxEntry) -> HashSet { + let mut entries: VecDeque<&TxEntry> = VecDeque::new(); + entries.push_back(entry); + + let mut descendants = HashSet::new(); + while let Some(entry) = entries.pop_front() { + let outputs = entry.transaction().output_pts(); + + for output in outputs { + if let Some(ids) = self.outputs.get(&output) { + for id in ids { + if descendants.insert(id.clone()) { + if let Some(entry) = self.entries.get_by_id(id) { + entries.push_back(&entry.inner); + } + } + } + } + } + } + descendants + } + + pub(crate) fn remove_entry_relation(&mut self, entry: &TxEntry) { + let inputs = entry.transaction().input_pts_iter(); + let tx_short_id = entry.proposal_short_id(); + let outputs = entry.transaction().output_pts(); + + for i in inputs { + if let Entry::Occupied(mut occupied) = self.inputs.entry(i) { + let empty = { + let ids = occupied.get_mut(); + ids.remove(&tx_short_id); + ids.is_empty() + }; + if empty { + occupied.remove(); + } + } + } + + // remove dep + for d in entry.related_dep_out_points().cloned() { + if let Entry::Occupied(mut occupied) = self.deps.entry(d) { + let empty = { + let ids = occupied.get_mut(); + ids.remove(&tx_short_id); + ids.is_empty() + }; + if empty { + occupied.remove(); + } + } + } + + for o in outputs { + self.outputs.remove(&o); + } + + self.header_deps.remove(&tx_short_id); + } + + pub fn remove_entry(&mut self, id: &ProposalShortId) -> Option { + let removed = self.entries.remove_by_id(id); + + if let Some(ref entry) = removed { + self.remove_entry_relation(&entry.inner); + } + removed.map(|e| e.inner) + } + + pub fn remove_entry_and_descendants(&mut self, id: &ProposalShortId) -> Vec { + let mut removed = Vec::new(); + if let Some(entry) = self.entries.remove_by_id(id) { + let descendants = self.get_descendants(&entry.inner); + self.remove_entry_relation(&entry.inner); + removed.push(entry.inner); + for id in descendants { + if let Some(entry) = self.remove_entry(&id) { + removed.push(entry); + } + } + } + removed + } + + pub fn resolve_conflict_header_dep( + &mut self, + headers: &HashSet, + ) -> Vec { + let mut conflicts = Vec::new(); + + // invalid header deps + let mut ids = Vec::new(); + for (tx_id, deps) in self.header_deps.iter() { + for hash in deps { + if headers.contains(hash) { + ids.push((hash.clone(), tx_id.clone())); + break; + } + } + } + + for (blk_hash, id) in ids { + let entries = self.remove_entry_and_descendants(&id); + for entry in entries { + let reject = Reject::Resolve(OutPointError::InvalidHeader(blk_hash.to_owned())); + conflicts.push((entry, reject)); + } + } + conflicts + } + + pub fn resolve_conflict(&mut self, tx: &TransactionView) -> Vec { + let inputs = tx.input_pts_iter(); + let mut conflicts = Vec::new(); + + for i in inputs { + if let Some(ids) = self.inputs.remove(&i) { + for id in ids { + let entries = self.remove_entry_and_descendants(&id); + for entry in entries { + let reject = Reject::Resolve(OutPointError::Dead(i.clone())); + conflicts.push((entry, reject)); + } + } + } + + // deps consumed + if let Some(ids) = self.deps.remove(&i) { + for id in ids { + let entries = self.remove_entry_and_descendants(&id); + for entry in entries { + let reject = Reject::Resolve(OutPointError::Dead(i.clone())); + conflicts.push((entry, reject)); + } + } + } + } + conflicts + } + + // fill proposal txs + pub fn fill_proposals( + &self, + limit: usize, + exclusion: &HashSet, + proposals: &mut HashSet, + status: &Status, + ) { + for entry in self.entries.get_by_status(status) { + if proposals.len() == limit { + break; + } + if !exclusion.contains(&entry.id) { + proposals.insert(entry.id.clone()); + } + } + } + + pub fn remove_entries_by_filter bool>( + &mut self, + mut predicate: P, + ) -> Vec { + let mut removed = Vec::new(); + for (_, entry) in self.entries.iter() { + if predicate(&entry.id, &entry.inner) { + removed.push(entry.inner.clone()); + } + } + for entry in &removed { + self.remove_entry(&entry.proposal_short_id()); + } + + removed + } + + pub fn iter(&self) -> impl Iterator { + self.entries.iter().map(|(_, entry)| entry) + } + + pub fn iter_by_evict_key(&self) -> impl Iterator { + self.entries.iter_by_evict_key() + } + + pub fn next_evict_entry(&self) -> Option { + self.iter_by_evict_key() + .into_iter() + .next() + .map(|entry| entry.id.clone()) + } + + pub fn clear(&mut self) { + self.entries = MultiIndexPoolEntryMap::default(); + self.deps.clear(); + self.inputs.clear(); + self.header_deps.clear(); + self.outputs.clear(); + } + + pub(crate) fn drain(&mut self) -> Vec { + let txs = self + .entries + .iter() + .map(|(_k, entry)| entry.inner.clone().into_transaction()) + .collect::>(); + self.entries.clear(); + self.deps.clear(); + self.inputs.clear(); + self.header_deps.clear(); + self.outputs.clear(); + txs + } +} + +impl CellProvider for MultiIndexPoolEntryMap { + fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { + let tx_hash = out_point.tx_hash(); + if let Some(entry) = self.get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) { + match entry + .inner + .transaction() + .output_with_data(out_point.index().unpack()) + { + Some((output, data)) => { + let cell_meta = CellMetaBuilder::from_cell_output(output, data) + .out_point(out_point.to_owned()) + .build(); + CellStatus::live_cell(cell_meta) + } + None => CellStatus::Unknown, + } + } else { + CellStatus::Unknown + } + } +} + +impl CellChecker for MultiIndexPoolEntryMap { + fn is_live(&self, out_point: &OutPoint) -> Option { + let tx_hash = out_point.tx_hash(); + if let Some(entry) = self.get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) { + entry + .inner + .transaction() + .output(out_point.index().unpack()) + .map(|_| true) + } else { + None + } + } +} diff --git a/tx-pool/src/component/tests/mod.rs b/tx-pool/src/component/tests/mod.rs index 5bde917729..0f8bfcd719 100644 --- a/tx-pool/src/component/tests/mod.rs +++ b/tx-pool/src/component/tests/mod.rs @@ -4,3 +4,4 @@ mod pending; mod proposed; mod recent_reject; mod util; +mod pool_map; \ No newline at end of file diff --git a/tx-pool/src/component/tests/pool_map.rs b/tx-pool/src/component/tests/pool_map.rs new file mode 100644 index 0000000000..7fcbd9e1a6 --- /dev/null +++ b/tx-pool/src/component/tests/pool_map.rs @@ -0,0 +1,236 @@ +use crate::component::tests::util::{ + build_tx, build_tx_with_dep, build_tx_with_header_dep, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE, +}; +use crate::component::{ + entry::TxEntry, + pool_map::{PoolEntry, PoolMap, Status}, +}; +use ckb_types::{h256, packed::Byte32, prelude::*}; +use std::collections::HashSet; + +#[test] +fn test_basic() { + let mut pool = PoolMap::new(100); + assert_eq!(pool.size(), 0); + let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&Byte32::zero(), 2)], 1); + let tx2 = build_tx( + vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 2)], + 3, + ); + let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + assert!(pool.add_entry(entry1.clone(), Status::Pending)); + assert!(pool.add_entry(entry2, Status::Pending)); + assert!(pool.size() == 2); + assert!(pool.contains_key(&tx1.proposal_short_id())); + assert!(pool.contains_key(&tx2.proposal_short_id())); + + assert_eq!(pool.inputs_len(), 4); + assert_eq!(pool.outputs_len(), 4); + + assert_eq!(pool.entries.get_by_id(&tx1.proposal_short_id()).unwrap().inner, entry1); + assert_eq!(pool.get_tx(&tx2.proposal_short_id()).unwrap(), &tx2); + + let txs = pool.drain(); + assert!(pool.entries.is_empty()); + assert!(pool.deps.is_empty()); + assert!(pool.inputs.is_empty()); + assert!(pool.header_deps.is_empty()); + assert!(pool.outputs.is_empty()); + assert_eq!(txs, vec![tx1, tx2]); +} + +#[test] +fn test_resolve_conflict() { + let mut pool = PoolMap::new(100); + let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); + let tx2 = build_tx( + vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], + 3, + ); + let tx3 = build_tx_with_dep( + vec![(&h256!("0x4").pack(), 1)], + vec![(&h256!("0x5").pack(), 1)], + 3, + ); + let tx4 = build_tx( + vec![(&h256!("0x2").pack(), 1), (&h256!("0x1").pack(), 1)], + 3, + ); + let tx5 = build_tx(vec![(&h256!("0x5").pack(), 1)], 3); + + let entry1 = TxEntry::dummy_resolve(tx1, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + let entry2 = TxEntry::dummy_resolve(tx2, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + let entry3 = TxEntry::dummy_resolve(tx3, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + assert!(pool.add_entry(entry1.clone(), Status::Pending)); + assert!(pool.add_entry(entry2.clone(), Status::Pending)); + assert!(pool.add_entry(entry3.clone(), Status::Pending)); + + let conflicts = pool.resolve_conflict(&tx4); + assert_eq!( + conflicts.into_iter().map(|i| i.0).collect::>(), + HashSet::from_iter(vec![entry1, entry2]) + ); + + let conflicts = pool.resolve_conflict(&tx5); + assert_eq!( + conflicts.into_iter().map(|i| i.0).collect::>(), + HashSet::from_iter(vec![entry3]) + ); +} + +#[test] +fn test_resolve_conflict_descendants() { + let mut pool = PoolMap::new(1000); + let tx1 = build_tx(vec![(&Byte32::zero(), 1)], 1); + let tx3 = build_tx(vec![(&tx1.hash(), 0)], 2); + let tx4 = build_tx(vec![(&tx3.hash(), 0)], 1); + + let tx2 = build_tx(vec![(&tx1.hash(), 0)], 1); + + let entry1 = TxEntry::dummy_resolve(tx1, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + let entry3 = TxEntry::dummy_resolve(tx3, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + let entry4 = TxEntry::dummy_resolve(tx4, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + assert!(pool.add_entry(entry1, Status::Pending)); + assert!(pool.add_entry(entry3.clone(), Status::Pending)); + assert!(pool.add_entry(entry4.clone(), Status::Pending)); + + let conflicts = pool.resolve_conflict(&tx2); + assert_eq!( + conflicts.into_iter().map(|i| i.0).collect::>(), + HashSet::from_iter(vec![entry3, entry4]) + ); +} + +#[test] +fn test_resolve_conflict_header_dep() { + let mut pool = PoolMap::new(1000); + + let header: Byte32 = h256!("0x1").pack(); + let tx = build_tx_with_header_dep( + vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], + vec![header.clone()], + 1, + ); + let tx1 = build_tx(vec![(&tx.hash(), 0)], 1); + + let entry = TxEntry::dummy_resolve(tx, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + let entry1 = TxEntry::dummy_resolve(tx1, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + assert!(pool.add_entry(entry.clone(), Status::Pending)); + assert!(pool.add_entry(entry1.clone(), Status::Pending)); + + assert_eq!(pool.inputs_len(), 3); + assert_eq!(pool.header_deps_len(), 1); + assert_eq!(pool.outputs_len(), 2); + + let mut headers = HashSet::new(); + headers.insert(header); + + let conflicts = pool.resolve_conflict_header_dep(&headers); + assert_eq!( + conflicts.into_iter().map(|i| i.0).collect::>(), + HashSet::from_iter(vec![entry, entry1]) + ); +} + + +#[test] +fn test_remove_entry() { + let mut pool = PoolMap::new(1000); + let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); + let header: Byte32 = h256!("0x1").pack(); + let tx2 = build_tx_with_header_dep(vec![(&h256!("0x2").pack(), 1)], vec![header], 1); + + let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + assert!(pool.add_entry(entry1.clone(), Status::Pending)); + assert!(pool.add_entry(entry2.clone(), Status::Pending)); + + let removed = pool.remove_entry(&tx1.proposal_short_id()); + assert_eq!(removed, Some(entry1)); + let removed = pool.remove_entry(&tx2.proposal_short_id()); + assert_eq!(removed, Some(entry2)); + assert!(pool.entries.is_empty()); + assert!(pool.deps.is_empty()); + assert!(pool.inputs.is_empty()); + assert!(pool.header_deps.is_empty()); +} + + +#[test] +fn test_remove_entries_by_filter() { + let mut pool = PoolMap::new(1000); + let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); + let tx2 = build_tx( + vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], + 3, + ); + let tx3 = build_tx_with_dep( + vec![(&h256!("0x4").pack(), 1)], + vec![(&h256!("0x5").pack(), 1)], + 3, + ); + let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + let entry3 = TxEntry::dummy_resolve(tx3.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + assert!(pool.add_entry(entry1, Status::Pending)); + assert!(pool.add_entry(entry2, Status::Pending)); + assert!(pool.add_entry(entry3, Status::Pending)); + + pool.remove_entries_by_filter(|id, _tx_entry| id == &tx1.proposal_short_id()); + + assert!(!pool.contains_key(&tx1.proposal_short_id())); + assert!(pool.contains_key(&tx2.proposal_short_id())); + assert!(pool.contains_key(&tx3.proposal_short_id())); +} + + +#[test] +fn test_fill_proposals() { + let mut pool = PoolMap::new(1000); + let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); + let tx2 = build_tx( + vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], + 3, + ); + let tx3 = build_tx_with_dep( + vec![(&h256!("0x4").pack(), 1)], + vec![(&h256!("0x5").pack(), 1)], + 3, + ); + let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + let entry3 = TxEntry::dummy_resolve(tx3.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + assert!(pool.add_entry(entry1, Status::Pending)); + assert!(pool.add_entry(entry2, Status::Pending)); + assert!(pool.add_entry(entry3, Status::Pending)); + + assert_eq!(pool.inputs_len(), 5); + assert_eq!(pool.deps_len(), 1); + assert_eq!(pool.outputs_len(), 7); + + let id1 = tx1.proposal_short_id(); + let id2 = tx2.proposal_short_id(); + let id3 = tx3.proposal_short_id(); + + let mut ret = HashSet::new(); + pool.fill_proposals(10, &HashSet::new(), &mut ret, &Status::Pending); + assert_eq!( + ret, + HashSet::from_iter(vec![id1.clone(), id2.clone(), id3.clone()]) + ); + + let mut ret = HashSet::new(); + pool.fill_proposals(1, &HashSet::new(), &mut ret, &Status::Pending); + assert_eq!(ret, HashSet::from_iter(vec![id1.clone()])); + + let mut ret = HashSet::new(); + pool.fill_proposals(2, &HashSet::new(), &mut ret, &Status::Pending); + assert_eq!(ret, HashSet::from_iter(vec![id1.clone(), id2.clone()])); + + let mut ret = HashSet::new(); + let mut exclusion = HashSet::new(); + exclusion.insert(id2); + pool.fill_proposals(2, &exclusion, &mut ret, &Status::Pending); + assert_eq!(ret, HashSet::from_iter(vec![id1, id3])); +} diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 5ee73cfbb8..c9dcc3b7fd 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -3,29 +3,19 @@ extern crate rustc_hash; extern crate slab; use super::component::{commit_txs_scanner::CommitTxsScanner, TxEntry}; use crate::callback::Callbacks; -use crate::component::container::AncestorsScoreSortKey; -use crate::component::entry::EvictKey; use crate::component::pending::PendingQueue; use crate::component::proposed::ProposedPool; use crate::component::recent_reject::RecentReject; use crate::error::Reject; +use crate::component::pool_map::{PoolMap, Status}; use crate::util::verify_rtx; use ckb_app_config::TxPoolConfig; use ckb_logger::{debug, error, trace, warn}; use ckb_snapshot::Snapshot; use ckb_store::ChainStore; -use ckb_types::core::error::OutPointError; -use ckb_types::packed::OutPoint; -use ckb_types::{ - core::cell::{CellMetaBuilder, CellProvider, CellStatus}, - prelude::*, -}; use ckb_types::{ core::{ - cell::{ - resolve_transaction, CellChecker, OverlayCellChecker, OverlayCellProvider, - ResolvedTransaction, - }, + cell::{resolve_transaction, OverlayCellChecker, OverlayCellProvider, ResolvedTransaction}, tx_pool::{TxPoolEntryInfo, TxPoolIds}, Cycle, TransactionView, UncleBlockView, }, @@ -33,10 +23,7 @@ use ckb_types::{ }; use ckb_verification::{cache::CacheEntry, TxVerifyEnv}; use lru::LruCache; -use multi_index_map::MultiIndexMap; -use std::collections::hash_map::Entry; -use std::collections::HashMap; -use std::collections::{HashSet, VecDeque}; +use std::collections::HashSet; use std::sync::Arc; const COMMITTED_HASH_CACHE_SIZE: usize = 100_000; @@ -69,38 +56,6 @@ macro_rules! evict_for_trim_size { type ConflictEntry = (TxEntry, Reject); -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub enum Status { - Pending, - Gap, - Proposed, -} - -#[derive(MultiIndexMap, Clone)] -pub struct PoolEntry { - #[multi_index(hashed_unique)] - pub id: ProposalShortId, - #[multi_index(ordered_non_unique)] - pub score: AncestorsScoreSortKey, - #[multi_index(ordered_non_unique)] - pub status: Status, - #[multi_index(ordered_non_unique)] - pub evict_key: EvictKey, - - pub inner: TxEntry, - // other sort key -} - -impl MultiIndexPoolEntryMap { - /// sorted by ancestor score from higher to lower - pub fn score_sorted_iter(&self) -> impl Iterator { - // Note: multi_index don't support reverse order iteration now - // so we need to collect and reverse - let entries = self.iter_by_score().collect::>(); - entries.into_iter().rev().map(move |entry| &entry.inner) - } -} - /// Tx-pool implementation pub struct TxPool { pub(crate) config: TxPoolConfig, @@ -111,17 +66,7 @@ pub struct TxPool { /// Tx pool that finely for commit pub(crate) proposed: ProposedPool, - /// The pool entries with different kinds of sort strategies - pub(crate) entries: MultiIndexPoolEntryMap, - /// dep-set map represent in-pool tx's header deps - pub(crate) header_deps: HashMap>, - /// dep-set map represent in-pool tx's deps - pub(crate) deps: HashMap>, - /// input-set map represent in-pool tx's inputs - pub(crate) inputs: HashMap>, - pub(crate) outputs: HashMap>, - pub(crate) max_ancestors_count: usize, - + pub(crate) pool_map: PoolMap, /// cache for committed transactions hash pub(crate) committed_txs_hash_cache: LruCache, // sum of all tx_pool tx's virtual sizes. @@ -145,12 +90,7 @@ impl TxPool { pending: PendingQueue::new(), gap: PendingQueue::new(), proposed: ProposedPool::new(config.max_ancestors_count), - entries: MultiIndexPoolEntryMap::default(), - header_deps: HashMap::default(), - deps: Default::default(), - inputs: Default::default(), - outputs: Default::default(), - max_ancestors_count: config.max_ancestors_count, + pool_map: PoolMap::new(config.max_ancestors_count), committed_txs_hash_cache: LruCache::new(COMMITTED_HASH_CACHE_SIZE), total_tx_size: 0, total_tx_cycles: 0, @@ -203,24 +143,6 @@ impl TxPool { self.total_tx_cycles = total_tx_cycles; } - fn add_poolentry(&mut self, entry: TxEntry, status: Status) -> bool { - let short_id = entry.proposal_short_id(); - if self.entries.get_by_id(&short_id).is_some() { - return false; - } - trace!("add_{:?} {}", status, entry.transaction().hash()); - let score = entry.as_score_key(); - let evict_key = entry.as_evict_key(); - self.entries.insert(PoolEntry { - id: short_id, - score, - status, - inner: entry, - evict_key, - }); - true - } - /// Add tx to pending pool /// If did have this value present, false is returned. pub fn add_pending(&mut self, entry: TxEntry) -> bool { @@ -232,7 +154,7 @@ impl TxPool { } pub fn add_pending_v2(&mut self, entry: TxEntry) -> bool { - self.add_poolentry(entry, Status::Pending) + self.pool_map.add_entry(entry, Status::Pending) } /// Add tx which proposed but still uncommittable to gap pool @@ -245,7 +167,7 @@ impl TxPool { } pub fn add_gap_v2(&mut self, entry: TxEntry) -> bool { - self.add_poolentry(entry, Status::Gap) + self.pool_map.add_entry(entry, Status::Gap) } /// Add tx to proposed pool @@ -255,7 +177,7 @@ impl TxPool { } pub fn add_proposed_v2(&mut self, entry: TxEntry) -> bool { - self.add_poolentry(entry, Status::Proposed) + self.pool_map.add_entry(entry, Status::Proposed) } /// Returns true if the tx-pool contains a tx with specified id. @@ -264,7 +186,7 @@ impl TxPool { } pub fn contains_proposal_id_v2(&self, id: &ProposalShortId) -> bool { - self.entries.get_by_id(id).is_some() + self.pool_map.get_by_id(id).is_some() } /// Returns tx with cycles corresponding to the id. @@ -285,7 +207,7 @@ impl TxPool { } pub fn get_tx_with_cycles_v2(&self, id: &ProposalShortId) -> Option<(TransactionView, Cycle)> { - self.entries + self.pool_map .get_by_id(id) .map(|entry| (entry.inner.transaction().clone(), entry.inner.cycles)) } @@ -299,7 +221,7 @@ impl TxPool { } pub fn get_tx_v2(&self, id: &ProposalShortId) -> Option<&TransactionView> { - self.entries + self.pool_map .get_by_id(id) .map(|entry| entry.inner.transaction()) } @@ -310,7 +232,7 @@ impl TxPool { } pub fn get_entry_from_pending_or_gap_v2(&self, id: &ProposalShortId) -> Option<&TxEntry> { - if let Some(entry) = self.entries.get_by_id(id) { + if let Some(entry) = self.pool_map.get_by_id(id) { match entry.status { Status::Pending | Status::Gap => return Some(&entry.inner), _ => return None, @@ -338,7 +260,7 @@ impl TxPool { &self, id: &ProposalShortId, ) -> Option<&TransactionView> { - self.entries + self.pool_map .get_by_id(id) .map(|entry| entry.inner.transaction()) } @@ -384,124 +306,14 @@ impl TxPool { detached_headers: &HashSet, callbacks: &Callbacks, ) { - for (entry, reject) in self.__resolve_conflict_header_dep_v2(detached_headers) { + for (entry, reject) in self + .pool_map + .resolve_conflict_header_dep(detached_headers) + { callbacks.call_reject(self, &entry, reject); } } - pub(crate) fn get_descendants(&self, entry: &TxEntry) -> HashSet { - let mut entries: VecDeque<&TxEntry> = VecDeque::new(); - entries.push_back(entry); - - let mut descendants = HashSet::new(); - while let Some(entry) = entries.pop_front() { - let outputs = entry.transaction().output_pts(); - - for output in outputs { - if let Some(ids) = self.outputs.get(&output) { - for id in ids { - if descendants.insert(id.clone()) { - if let Some(entry) = self.entries.get_by_id(id) { - entries.push_back(&entry.inner); - } - } - } - } - } - } - descendants - } - - pub(crate) fn remove_entry_relation(&mut self, entry: &TxEntry) { - let inputs = entry.transaction().input_pts_iter(); - let tx_short_id = entry.proposal_short_id(); - let outputs = entry.transaction().output_pts(); - - for i in inputs { - if let Entry::Occupied(mut occupied) = self.inputs.entry(i) { - let empty = { - let ids = occupied.get_mut(); - ids.remove(&tx_short_id); - ids.is_empty() - }; - if empty { - occupied.remove(); - } - } - } - - // remove dep - for d in entry.related_dep_out_points().cloned() { - if let Entry::Occupied(mut occupied) = self.deps.entry(d) { - let empty = { - let ids = occupied.get_mut(); - ids.remove(&tx_short_id); - ids.is_empty() - }; - if empty { - occupied.remove(); - } - } - } - - for o in outputs { - self.outputs.remove(&o); - } - - self.header_deps.remove(&tx_short_id); - } - - fn remove_entry(&mut self, id: &ProposalShortId) -> Option { - let removed = self.entries.remove_by_id(id); - - if let Some(ref entry) = removed { - self.remove_entry_relation(&entry.inner); - } - removed.map(|e| e.inner) - } - - fn remove_entry_and_descendants(&mut self, id: &ProposalShortId) -> Vec { - let mut removed = Vec::new(); - if let Some(entry) = self.entries.remove_by_id(id) { - let descendants = self.get_descendants(&entry.inner); - self.remove_entry_relation(&entry.inner); - removed.push(entry.inner); - for id in descendants { - if let Some(entry) = self.remove_entry(&id) { - removed.push(entry); - } - } - } - removed - } - - fn __resolve_conflict_header_dep_v2( - &mut self, - headers: &HashSet, - ) -> Vec { - let mut conflicts = Vec::new(); - - // invalid header deps - let mut ids = Vec::new(); - for (tx_id, deps) in self.header_deps.iter() { - for hash in deps { - if headers.contains(hash) { - ids.push((hash.clone(), tx_id.clone())); - break; - } - } - } - - for (blk_hash, id) in ids { - let entries = self.remove_entry_and_descendants(&id); - for entry in entries { - let reject = Reject::Resolve(OutPointError::InvalidHeader(blk_hash.to_owned())); - conflicts.push((entry, reject)); - } - } - conflicts - } - pub(crate) fn remove_committed_tx(&mut self, tx: &TransactionView, callbacks: &Callbacks) { let hash = tx.hash(); let short_id = tx.proposal_short_id(); @@ -544,44 +356,15 @@ impl TxPool { } } - fn resolve_conflict(&mut self, tx: &TransactionView) -> Vec { - let inputs = tx.input_pts_iter(); - let mut conflicts = Vec::new(); - - for i in inputs { - if let Some(ids) = self.inputs.remove(&i) { - for id in ids { - let entries = self.remove_entry_and_descendants(&id); - for entry in entries { - let reject = Reject::Resolve(OutPointError::Dead(i.clone())); - conflicts.push((entry, reject)); - } - } - } - - // deps consumed - if let Some(ids) = self.deps.remove(&i) { - for id in ids { - let entries = self.remove_entry_and_descendants(&id); - for entry in entries { - let reject = Reject::Resolve(OutPointError::Dead(i.clone())); - conflicts.push((entry, reject)); - } - } - } - } - conflicts - } - pub(crate) fn remove_committed_tx_v2(&mut self, tx: &TransactionView, callbacks: &Callbacks) { let hash = tx.hash(); let short_id = tx.proposal_short_id(); - if let Some(entry) = self.remove_entry(&short_id) { + if let Some(entry) = self.pool_map.remove_entry(&short_id) { debug!("remove_committed_tx from gap {}", hash); callbacks.call_committed(self, &entry) } { - let conflicts = self.resolve_conflict(tx); + let conflicts = self.pool_map.resolve_conflict(tx); for (entry, reject) in conflicts { callbacks.call_reject(self, &entry, reject); } @@ -619,35 +402,18 @@ impl TxPool { } } - fn remove_entries_by_filter bool>( - &mut self, - mut predicate: P, - ) -> Vec { - let mut removed = Vec::new(); - for (_, entry) in self.entries.iter() { - if predicate(&entry.id, &entry.inner) { - removed.push(entry.inner.clone()); - } - } - for entry in &removed { - self.remove_entry(&entry.proposal_short_id()); - } - - removed - } - // Expire all transaction (and their dependencies) in the pool. pub(crate) fn remove_expired_v2(&mut self, callbacks: &Callbacks) { let now_ms = ckb_systemtime::unix_time_as_millis(); let removed: Vec<_> = self - .entries + .pool_map .iter() - .filter(|&(_, entry)| self.expiry + entry.inner.timestamp < now_ms) - .map(|(_, entry)| entry.inner.clone()) + .filter(|&entry| self.expiry + entry.inner.timestamp < now_ms) + .map(|entry| entry.inner.clone()) .collect(); for entry in removed { - self.remove_entry(&entry.proposal_short_id()); + self.pool_map.remove_entry(&entry.proposal_short_id()); let tx_hash = entry.transaction().hash(); debug!("remove_expired {} timestamp({})", tx_hash, entry.timestamp); let reject = Reject::Expiry(entry.timestamp); @@ -670,13 +436,8 @@ impl TxPool { pub(crate) fn limit_size_v2(&mut self, callbacks: &Callbacks) { while self.total_tx_size > self.config.max_tx_pool_size { - if let Some(id) = self - .entries - .iter_by_evict_key() - .next() - .map(|entry| entry.id.clone()) - { - let removed = self.remove_entry_and_descendants(&id); + if let Some(id) = self.pool_map.next_evict_entry() { + let removed = self.pool_map.remove_entry_and_descendants(&id); for entry in removed { let tx_hash = entry.transaction().hash(); debug!( @@ -729,21 +490,21 @@ impl TxPool { ids: impl Iterator, ) { for id in ids { - if let Some(e) = self.entries.get_by_id(id) { + if let Some(e) = self.pool_map.get_by_id(id) { let status = e.status; // TODO: double check this if status == Status::Pending { continue; } - let mut entries = self.remove_entry_and_descendants(id); + let mut entries = self.pool_map.remove_entry_and_descendants(id); entries.sort_unstable_by_key(|entry| entry.ancestors_count); for mut entry in entries { let tx_hash = entry.transaction().hash(); entry.reset_ancestors_state(); let ret = self.add_pending(entry); debug!( - "remove_by_detached_proposal from {:?} {} add_pending {}", status, - tx_hash, ret + "remove_by_detached_proposal from {:?} {} add_pending {}", + status, tx_hash, ret ); } } @@ -773,7 +534,7 @@ impl TxPool { } pub(crate) fn remove_tx_v2(&mut self, id: &ProposalShortId) -> bool { - if let Some(entry) = self.remove_entry(id) { + if let Some(entry) = self.pool_map.remove_entry(id) { self.update_statics_for_remove_tx(entry.size, entry.cycles); return true; } @@ -805,7 +566,7 @@ impl TxPool { tx: TransactionView, ) -> Result, Reject> { let snapshot = self.snapshot(); - let provider = OverlayCellProvider::new(&self.entries, snapshot); + let provider = OverlayCellProvider::new(&self.pool_map.entries, snapshot); let mut seen_inputs = HashSet::new(); resolve_transaction(tx, &mut seen_inputs, &provider, snapshot) .map(Arc::new) @@ -831,7 +592,7 @@ impl TxPool { rtx: &ResolvedTransaction, ) -> Result<(), Reject> { let snapshot = self.snapshot(); - let checker = OverlayCellChecker::new(&self.entries, snapshot); + let checker = OverlayCellChecker::new(&self.pool_map.entries, snapshot); let mut seen_inputs = HashSet::new(); rtx.check(&mut seen_inputs, &checker, snapshot) .map_err(Reject::Resolve) @@ -854,7 +615,7 @@ impl TxPool { rtx: &ResolvedTransaction, ) -> Result<(), Reject> { let snapshot = self.snapshot(); - let checker = OverlayCellChecker::new(&self.entries, snapshot); + let checker = OverlayCellChecker::new(&self.pool_map.entries, snapshot); let mut seen_inputs = HashSet::new(); rtx.check(&mut seen_inputs, &checker, snapshot) .map_err(Reject::Resolve) @@ -873,7 +634,7 @@ impl TxPool { rtx: &ResolvedTransaction, ) -> Result<(), Reject> { let snapshot = self.snapshot(); - let cell_checker = OverlayCellChecker::new(&self.entries, snapshot); + let cell_checker = OverlayCellChecker::new(&self.pool_map.entries, snapshot); let mut seen_inputs = HashSet::new(); rtx.check(&mut seen_inputs, &cell_checker, snapshot) .map_err(Reject::Resolve) @@ -941,24 +702,6 @@ impl TxPool { } } - // fill proposal txs - pub fn fill_proposals( - &self, - limit: usize, - exclusion: &HashSet, - proposals: &mut HashSet, - status: &Status, - ) { - for entry in self.entries.get_by_status(status) { - if proposals.len() == limit { - break; - } - if !exclusion.contains(&entry.id) { - proposals.insert(entry.id.clone()); - } - } - } - /// Get to-be-proposal transactions that may be included in the next block. pub fn get_proposals( &self, @@ -979,8 +722,8 @@ impl TxPool { exclusion: &HashSet, ) -> HashSet { let mut proposals = HashSet::with_capacity(limit); - self.fill_proposals(limit, exclusion, &mut proposals, &Status::Pending); - self.fill_proposals(limit, exclusion, &mut proposals, &Status::Gap); + self.pool_map.fill_proposals(limit, exclusion, &mut proposals, &Status::Pending); + self.pool_map.fill_proposals(limit, exclusion, &mut proposals, &Status::Gap); proposals } @@ -1018,10 +761,11 @@ impl TxPool { // This is for RPC request, performance is not critical pub(crate) fn get_ids_v2(&self) -> TxPoolIds { let pending: Vec = self + .pool_map .entries .get_by_status(&Status::Pending) .iter() - .chain(self.entries.get_by_status(&Status::Gap).iter()) + .chain(self.pool_map.entries.get_by_status(&Status::Gap).iter()) .map(|entry| entry.inner.transaction().hash()) .collect(); @@ -1057,14 +801,16 @@ impl TxPool { pub(crate) fn get_all_entry_info_v2(&self) -> TxPoolEntryInfo { let pending = self + .pool_map .entries .get_by_status(&Status::Pending) .iter() - .chain(self.entries.get_by_status(&Status::Gap).iter()) + .chain(self.pool_map.entries.get_by_status(&Status::Gap).iter()) .map(|entry| (entry.inner.transaction().hash(), entry.inner.to_info())) .collect(); let proposed = self + .pool_map .entries .get_by_status(&Status::Proposed) .iter() @@ -1075,7 +821,7 @@ impl TxPool { } pub(crate) fn drain_all_transactions(&mut self) -> Vec { - let mut txs = CommitTxsScanner::new(&self.proposed, &self.entries) + let mut txs = CommitTxsScanner::new(&self.proposed, &self.pool_map.entries) .txs_to_commit(self.total_tx_size, self.total_tx_cycles) .0 .into_iter() @@ -1091,7 +837,7 @@ impl TxPool { } pub(crate) fn drain_all_transactions_v2(&mut self) -> Vec { - let mut txs = CommitTxsScanner::new(&self.proposed, &self.entries) + let mut txs = CommitTxsScanner::new(&self.proposed, &self.pool_map.entries) .txs_to_commit(self.total_tx_size, self.total_tx_cycles) .0 .into_iter() @@ -1099,6 +845,7 @@ impl TxPool { .collect::>(); self.proposed.clear(); let mut pending = self + .pool_map .entries .remove_by_status(&Status::Pending) .into_iter() @@ -1106,6 +853,7 @@ impl TxPool { .collect::>(); txs.append(&mut pending); let mut gap = self + .pool_map .entries .remove_by_status(&Status::Gap) .into_iter() @@ -1114,10 +862,7 @@ impl TxPool { txs.append(&mut gap); self.total_tx_size = 0; self.total_tx_cycles = 0; - self.deps.clear(); - self.inputs.clear(); - self.header_deps.clear(); - self.outputs.clear(); + self.pool_map.clear(); // self.touch_last_txs_updated_at(); txs } @@ -1126,11 +871,7 @@ impl TxPool { self.pending = PendingQueue::new(); self.gap = PendingQueue::new(); self.proposed = ProposedPool::new(self.config.max_ancestors_count); - self.entries = MultiIndexPoolEntryMap::default(); - self.header_deps = HashMap::default(); - self.deps = HashMap::default(); - self.inputs = HashMap::default(); - self.outputs = HashMap::default(); + self.pool_map.clear(); self.snapshot = snapshot; self.committed_txs_hash_cache = LruCache::new(COMMITTED_HASH_CACHE_SIZE); self.total_tx_size = 0; @@ -1154,8 +895,9 @@ impl TxPool { max_block_cycles: Cycle, txs_size_limit: usize, ) -> (Vec, usize, Cycle) { - let (entries, size, cycles) = CommitTxsScanner::new(self.proposed(), &self.entries) - .txs_to_commit(txs_size_limit, max_block_cycles); + let (entries, size, cycles) = + CommitTxsScanner::new(self.proposed(), &self.pool_map.entries) + .txs_to_commit(txs_size_limit, max_block_cycles); if !entries.is_empty() { ckb_logger::info!( @@ -1193,41 +935,3 @@ impl TxPool { } } } - -impl CellProvider for MultiIndexPoolEntryMap { - fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { - let tx_hash = out_point.tx_hash(); - if let Some(entry) = self.get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) { - match entry - .inner - .transaction() - .output_with_data(out_point.index().unpack()) - { - Some((output, data)) => { - let cell_meta = CellMetaBuilder::from_cell_output(output, data) - .out_point(out_point.to_owned()) - .build(); - CellStatus::live_cell(cell_meta) - } - None => CellStatus::Unknown, - } - } else { - CellStatus::Unknown - } - } -} - -impl CellChecker for MultiIndexPoolEntryMap { - fn is_live(&self, out_point: &OutPoint) -> Option { - let tx_hash = out_point.tx_hash(); - if let Some(entry) = self.get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) { - entry - .inner - .transaction() - .output(out_point.index().unpack()) - .map(|_| true) - } else { - None - } - } -} From 013c9d17e74ecc9ac1f4a1860a96eb434f6665f8 Mon Sep 17 00:00:00 2001 From: yukang Date: Wed, 17 May 2023 02:09:06 +0800 Subject: [PATCH 086/267] remove pending and gap --- chain/src/tests/dep_cell.rs | 1 - chain/src/tests/load_code_with_snapshot.rs | 4 + rpc/src/module/chain.rs | 2 + test/src/main.rs | 9 +- test/src/node.rs | 5 + test/src/specs/mining/fee.rs | 1 - test/src/specs/tx_pool/pool_reconcile.rs | 1 + test/src/specs/tx_pool/reorg_proposals.rs | 6 + test/src/util/mining.rs | 2 +- tx-pool/Cargo.toml | 1 + tx-pool/src/block_assembler/mod.rs | 1 + tx-pool/src/component/chunk.rs | 1 - tx-pool/src/component/commit_txs_scanner.rs | 25 +- tx-pool/src/component/container.rs | 492 -------------- tx-pool/src/component/edges.rs | 114 ++++ tx-pool/src/component/entry.rs | 17 +- tx-pool/src/component/links.rs | 144 +++++ tx-pool/src/component/mod.rs | 8 +- tx-pool/src/component/pending.rs | 363 ----------- tx-pool/src/component/pool_map.rs | 605 +++++++++++------- tx-pool/src/component/proposed.rs | 403 ------------ tx-pool/src/component/score_key.rs | 54 ++ tx-pool/src/component/tests/mod.rs | 3 +- tx-pool/src/component/tests/pending.rs | 166 +++-- tx-pool/src/component/tests/pool_map.rs | 236 ------- tx-pool/src/component/tests/proposed.rs | 152 +++-- .../tests/{container.rs => score_key.rs} | 21 +- tx-pool/src/pool.rs | 585 ++++------------- tx-pool/src/process.rs | 115 ++-- tx-pool/src/service.rs | 43 +- 30 files changed, 1123 insertions(+), 2457 deletions(-) delete mode 100644 tx-pool/src/component/container.rs create mode 100644 tx-pool/src/component/edges.rs create mode 100644 tx-pool/src/component/links.rs delete mode 100644 tx-pool/src/component/pending.rs delete mode 100644 tx-pool/src/component/proposed.rs create mode 100644 tx-pool/src/component/score_key.rs delete mode 100644 tx-pool/src/component/tests/pool_map.rs rename tx-pool/src/component/tests/{container.rs => score_key.rs} (94%) diff --git a/chain/src/tests/dep_cell.rs b/chain/src/tests/dep_cell.rs index 3f1bb3428e..3f557236fd 100644 --- a/chain/src/tests/dep_cell.rs +++ b/chain/src/tests/dep_cell.rs @@ -535,7 +535,6 @@ fn test_package_txs_with_deps_priority() { ); let txs = vec![tx2.clone(), tx1]; - for tx in &txs { let ret = tx_pool.submit_local_tx(tx.clone()).unwrap(); assert!(ret.is_ok(), "submit {} {:?}", tx.proposal_short_id(), ret); diff --git a/chain/src/tests/load_code_with_snapshot.rs b/chain/src/tests/load_code_with_snapshot.rs index 5c7c16583b..fcbe986ed8 100644 --- a/chain/src/tests/load_code_with_snapshot.rs +++ b/chain/src/tests/load_code_with_snapshot.rs @@ -263,6 +263,10 @@ fn _test_load_code_with_snapshot_after_hardfork(script_type: ScriptHashType) { let tx = create_call_load_is_even_tx(&issue_tx, 0); let tx_pool = shared.tx_pool_controller(); + + let tx_status = tx_pool.get_tx_status(tx.hash()); + assert_eq!(tx_status.unwrap().unwrap(), (TxStatus::Unknown, None)); + let ret = tx_pool.submit_local_tx(tx.clone()).unwrap(); assert!(ret.is_ok(), "ret {ret:?}"); diff --git a/rpc/src/module/chain.rs b/rpc/src/module/chain.rs index dce4c322f8..08574e13bb 100644 --- a/rpc/src/module/chain.rs +++ b/rpc/src/module/chain.rs @@ -2117,6 +2117,7 @@ impl ChainRpcImpl { only_committed: bool, ) -> Result { let snapshot = self.shared.snapshot(); + if let Some(tx_info) = snapshot.get_transaction_info(&tx_hash) { let cycles = if tx_info.is_cellbase() { None @@ -2202,6 +2203,7 @@ impl ChainRpcImpl { let transaction_with_status = transaction_with_status.unwrap(); Ok(transaction_with_status) } + fn get_block_by_hash( &self, snapshot: &Snapshot, diff --git a/test/src/main.rs b/test/src/main.rs index 02e09c809b..b44a15f2c4 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -388,6 +388,12 @@ fn canonicalize_path>(path: P) -> PathBuf { .unwrap_or_else(|_| path.as_ref().to_path_buf()) } +fn _all_specs() -> Vec> { + // This case is not stable right now + //vec![Box::new(PoolResolveConflictAfterReorg)] + vec![Box::new(RemoveConflictFromPending)] +} + fn all_specs() -> Vec> { let mut specs: Vec> = vec![ Box::new(BlockSyncFromOne), @@ -430,7 +436,8 @@ fn all_specs() -> Vec> { Box::new(GetRawTxPool), Box::new(PoolReconcile), Box::new(PoolResurrect), - Box::new(PoolResolveConflictAfterReorg), + //TODO: (yukang) + //Box::new(PoolResolveConflictAfterReorg), Box::new(InvalidHeaderDep), #[cfg(not(target_os = "windows"))] Box::new(PoolPersisted), diff --git a/test/src/node.rs b/test/src/node.rs index 143367a148..90bad67dde 100644 --- a/test/src/node.rs +++ b/test/src/node.rs @@ -5,6 +5,7 @@ use crate::{SYSTEM_CELL_ALWAYS_FAILURE_INDEX, SYSTEM_CELL_ALWAYS_SUCCESS_INDEX}; use ckb_app_config::CKBAppConfig; use ckb_chain_spec::consensus::Consensus; use ckb_chain_spec::ChainSpec; +use ckb_jsonrpc_types::TxStatus; use ckb_jsonrpc_types::{BlockFilter, BlockTemplate, TxPoolInfo}; use ckb_logger::{debug, error}; use ckb_resource::Resource; @@ -356,6 +357,10 @@ impl Node { .send_transaction(transaction.data().into()) } + pub fn get_transaction(&self, tx_hash: Byte32) -> TxStatus { + self.rpc_client().get_transaction(tx_hash).tx_status + } + pub fn remove_transaction(&self, tx_hash: Byte32) -> bool { self.rpc_client().remove_transaction(tx_hash) } diff --git a/test/src/specs/mining/fee.rs b/test/src/specs/mining/fee.rs index ecf6a10676..843e97227e 100644 --- a/test/src/specs/mining/fee.rs +++ b/test/src/specs/mining/fee.rs @@ -144,7 +144,6 @@ impl Spec for FeeOfMultipleMaxBlockProposalsLimit { txs.iter().for_each(|tx| { node.submit_transaction(tx); }); - (0..multiple).for_each(|_| { let block = node.new_block(None, None, None); node.submit_block(&block); diff --git a/test/src/specs/tx_pool/pool_reconcile.rs b/test/src/specs/tx_pool/pool_reconcile.rs index 3e1806767d..280c506cc8 100644 --- a/test/src/specs/tx_pool/pool_reconcile.rs +++ b/test/src/specs/tx_pool/pool_reconcile.rs @@ -120,6 +120,7 @@ impl Spec for PoolResolveConflictAfterReorg { node0.mine_with_blocking(|template| template.number.value() != (block.number() + 1)); node0.wait_for_tx_pool(); + for tx in txs[1..].iter() { assert!(is_transaction_proposed(node0, tx)); } diff --git a/test/src/specs/tx_pool/reorg_proposals.rs b/test/src/specs/tx_pool/reorg_proposals.rs index ab224e16e1..d3c0f5aefb 100644 --- a/test/src/specs/tx_pool/reorg_proposals.rs +++ b/test/src/specs/tx_pool/reorg_proposals.rs @@ -1,6 +1,7 @@ use crate::specs::tx_pool::utils::{assert_new_block_committed, prepare_tx_family}; use crate::utils::{blank, propose}; use crate::{Node, Spec}; +use ckb_jsonrpc_types::TxStatus; use ckb_types::core::BlockView; pub struct ReorgHandleProposals; @@ -40,8 +41,13 @@ impl Spec for ReorgHandleProposals { node_a.submit_transaction(family.b()); node_b.submit_transaction(family.a()); node_b.submit_transaction(family.b()); + node_a.submit_block(&propose(node_a, &[family.a()])); node_b.submit_block(&propose(node_b, &[family.b()])); + + assert!(node_a.get_transaction(family.a().hash()) == TxStatus::pending()); + assert!(node_a.get_transaction(family.b().hash()) == TxStatus::pending()); + (0..window.closest()).for_each(|_| { node_a.submit_block(&blank(node_a)); }); diff --git a/test/src/util/mining.rs b/test/src/util/mining.rs index d4880151bb..ebbf686522 100644 --- a/test/src/util/mining.rs +++ b/test/src/util/mining.rs @@ -112,11 +112,11 @@ impl Node { { let mut count = 0; let mut template = self.rpc_client().get_block_template(None, None, None); + while blocking(&mut template) { sleep(Duration::from_millis(100)); template = self.rpc_client().get_block_template(None, None, None); count += 1; - if count > 900 { panic!("mine_with_blocking timeout"); } diff --git a/tx-pool/Cargo.toml b/tx-pool/Cargo.toml index bc425d696c..0ce764ecb0 100644 --- a/tx-pool/Cargo.toml +++ b/tx-pool/Cargo.toml @@ -36,6 +36,7 @@ sentry = { version = "0.26.0", optional = true } serde_json = "1.0" rand = "0.8.4" hyper = { version = "0.14", features = ["http1", "client", "tcp"] } +#multi_index_map = { git = "https://github.com/wyjin/multi_index_map.git", branch = "master" } multi_index_map = "0.5.0" slab = "0.4" rustc-hash = "1.1" diff --git a/tx-pool/src/block_assembler/mod.rs b/tx-pool/src/block_assembler/mod.rs index 4f1f4b5edc..68b63db2de 100644 --- a/tx-pool/src/block_assembler/mod.rs +++ b/tx-pool/src/block_assembler/mod.rs @@ -203,6 +203,7 @@ impl BlockAssembler { current_template.cellbase.clone(), txs, )?; + let txs_size = checked_txs.iter().map(|tx| tx.size).sum(); let total_size = basic_size + txs_size; diff --git a/tx-pool/src/component/chunk.rs b/tx-pool/src/component/chunk.rs index f86b54fb55..225e0a4ea8 100644 --- a/tx-pool/src/component/chunk.rs +++ b/tx-pool/src/component/chunk.rs @@ -93,7 +93,6 @@ impl ChunkQueue { } /// If the queue did not have this tx present, true is returned. - /// /// If the queue did have this tx present, false is returned. pub fn add_tx(&mut self, tx: TransactionView, remote: Option<(Cycle, PeerIndex)>) -> bool { if self.contains_key(&tx.proposal_short_id()) { diff --git a/tx-pool/src/component/commit_txs_scanner.rs b/tx-pool/src/component/commit_txs_scanner.rs index a9b4287140..4e5d487cdf 100644 --- a/tx-pool/src/component/commit_txs_scanner.rs +++ b/tx-pool/src/component/commit_txs_scanner.rs @@ -1,8 +1,8 @@ -use crate::component::{container::AncestorsScoreSortKey, entry::TxEntry, proposed::ProposedPool}; +use crate::component::pool_map::PoolMap; +use crate::component::{entry::TxEntry, score_key::AncestorsScoreSortKey}; use ckb_types::{core::Cycle, packed::ProposalShortId}; use ckb_util::LinkedHashMap; use std::collections::{BTreeSet, HashMap, HashSet}; -use crate::component::pool_map::MultiIndexPoolEntryMap; // A template data struct used to store modified entries when package txs #[derive(Default)] @@ -49,8 +49,7 @@ const MAX_CONSECUTIVE_FAILURES: usize = 500; /// find txs to package into commitment pub struct CommitTxsScanner<'a> { - proposed_pool: &'a ProposedPool, - pool_entries: &'a MultiIndexPoolEntryMap, + pool_map: &'a PoolMap, entries: Vec, // modified_entries will store sorted packages after they are modified // because some of their txs are already in the block @@ -62,11 +61,10 @@ pub struct CommitTxsScanner<'a> { } impl<'a> CommitTxsScanner<'a> { - pub fn new(proposed_pool: &'a ProposedPool, pool_entries: &'a MultiIndexPoolEntryMap) -> CommitTxsScanner<'a> { + pub fn new(pool_map: &'a PoolMap) -> CommitTxsScanner<'a> { CommitTxsScanner { - proposed_pool, entries: Vec::new(), - pool_entries: pool_entries, + pool_map, modified_entries: TxModifiedEntries::default(), fetched_txs: HashSet::default(), failed_txs: HashSet::default(), @@ -83,7 +81,7 @@ impl<'a> CommitTxsScanner<'a> { let mut cycles: Cycle = 0; let mut consecutive_failed = 0; - let mut iter = self.pool_entries.score_sorted_iter().peekable(); + let mut iter = self.pool_map.score_sorted_iter().peekable(); loop { let mut using_modified = false; @@ -146,9 +144,10 @@ impl<'a> CommitTxsScanner<'a> { }; // prepare to package tx with ancestors - let ancestors_ids = self.proposed_pool.calc_ancestors(&short_id); + let ancestors_ids = self.pool_map.calc_ancestors(&short_id); let mut ancestors = ancestors_ids .iter() + .filter(|id| self.pool_map.has_proposed(id)) .filter_map(only_unconfirmed) .cloned() .collect::>(); @@ -181,7 +180,7 @@ impl<'a> CommitTxsScanner<'a> { fn retrieve_entry(&self, short_id: &ProposalShortId) -> Option<&TxEntry> { self.modified_entries .get(short_id) - .or_else(|| self.proposed_pool.get(short_id)) + .or_else(|| self.pool_map.get_proposed(short_id)) } // Skip entries in `proposed` that are already in a block or are present @@ -198,17 +197,17 @@ impl<'a> CommitTxsScanner<'a> { /// state updated assuming given transactions are inBlock. fn update_modified_entries(&mut self, already_added: &LinkedHashMap) { for (id, entry) in already_added { - let descendants = self.proposed_pool.calc_descendants(id); + let descendants = self.pool_map.calc_descendants(id); for desc_id in descendants .iter() - .filter(|id| !already_added.contains_key(id)) + .filter(|id| !already_added.contains_key(id) && self.pool_map.has_proposed(id)) { // Note: since https://github.com/nervosnetwork/ckb/pull/3706 // calc_descendants() may not consistent if let Some(mut desc) = self .modified_entries .remove(desc_id) - .or_else(|| self.proposed_pool.get(desc_id).cloned()) + .or_else(|| self.pool_map.get(desc_id).cloned()) { desc.sub_entry_weight(entry); self.modified_entries.insert(desc); diff --git a/tx-pool/src/component/container.rs b/tx-pool/src/component/container.rs deleted file mode 100644 index 2dc7752d94..0000000000 --- a/tx-pool/src/component/container.rs +++ /dev/null @@ -1,492 +0,0 @@ -//! The primary module containing the implementations of the transaction pool -//! and its top-level members. -extern crate rustc_hash; -extern crate slab; - -use crate::{component::entry::TxEntry, error::Reject}; -use ckb_types::{ - core::Capacity, - packed::{OutPoint, ProposalShortId}, -}; -use multi_index_map::MultiIndexMap; -use std::borrow::Cow; -use std::cmp::Ordering; -use std::collections::hash_map::Entry as HashMapEntry; -use std::collections::{HashMap, HashSet}; - -/// A struct to use as a sorted key -#[derive(Eq, PartialEq, Clone, Debug)] -pub struct AncestorsScoreSortKey { - pub fee: Capacity, - pub weight: u64, - pub id: ProposalShortId, - pub ancestors_fee: Capacity, - pub ancestors_weight: u64, - pub ancestors_size: usize, -} - -impl AncestorsScoreSortKey { - /// compare tx fee rate with ancestors fee rate and return the min one - pub(crate) fn min_fee_and_weight(&self) -> (Capacity, u64) { - // avoid division a_fee/a_weight > b_fee/b_weight - let tx_weight = u128::from(self.fee.as_u64()) * u128::from(self.ancestors_weight); - let ancestors_weight = u128::from(self.ancestors_fee.as_u64()) * u128::from(self.weight); - - if tx_weight < ancestors_weight { - (self.fee, self.weight) - } else { - (self.ancestors_fee, self.ancestors_weight) - } - } -} - -impl PartialOrd for AncestorsScoreSortKey { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for AncestorsScoreSortKey { - fn cmp(&self, other: &Self) -> Ordering { - // avoid division a_fee/a_weight > b_fee/b_weight - let (fee, weight) = self.min_fee_and_weight(); - let (other_fee, other_weight) = other.min_fee_and_weight(); - let self_weight = u128::from(fee.as_u64()) * u128::from(other_weight); - let other_weight = u128::from(other_fee.as_u64()) * u128::from(weight); - if self_weight == other_weight { - // if fee rate weight is same, then compare with ancestor weight - if self.ancestors_weight == other.ancestors_weight { - self.id.raw_data().cmp(&other.id.raw_data()) - } else { - self.ancestors_weight.cmp(&other.ancestors_weight) - } - } else { - self_weight.cmp(&other_weight) - } - } -} - -#[derive(Default, Debug, Clone)] -pub struct TxLinks { - pub parents: HashSet, - pub children: HashSet, -} - -#[derive(Clone, Copy)] -enum Relation { - Parents, - Children, -} - -impl TxLinks { - fn get_direct_ids(&self, relation: Relation) -> &HashSet { - match relation { - Relation::Parents => &self.parents, - Relation::Children => &self.children, - } - } -} - -#[derive(Default, Debug, Clone)] -pub struct TxLinksMap { - pub(crate) inner: HashMap, -} - -impl TxLinksMap { - fn new() -> Self { - TxLinksMap { - inner: Default::default(), - } - } - - fn calc_relative_ids( - &self, - short_id: &ProposalShortId, - relation: Relation, - ) -> HashSet { - let direct = self - .inner - .get(short_id) - .map(|link| link.get_direct_ids(relation)) - .cloned() - .unwrap_or_default(); - - self.calc_relation_ids(Cow::Owned(direct), relation) - } - - fn calc_relation_ids( - &self, - stage: Cow>, - relation: Relation, - ) -> HashSet { - let mut stage = stage.into_owned(); - let mut relation_ids = HashSet::with_capacity(stage.len()); - - while let Some(id) = stage.iter().next().cloned() { - relation_ids.insert(id.clone()); - stage.remove(&id); - - //recursively - for id in self - .inner - .get(&id) - .map(|link| link.get_direct_ids(relation)) - .cloned() - .unwrap_or_default() - { - if !relation_ids.contains(&id) { - stage.insert(id); - } - } - } - relation_ids - } - - fn calc_ancestors(&self, short_id: &ProposalShortId) -> HashSet { - self.calc_relative_ids(short_id, Relation::Parents) - } - - fn calc_descendants(&self, short_id: &ProposalShortId) -> HashSet { - self.calc_relative_ids(short_id, Relation::Children) - } - - pub fn get_children(&self, short_id: &ProposalShortId) -> Option<&HashSet> { - self.inner.get(short_id).map(|link| &link.children) - } - - pub fn get_parents(&self, short_id: &ProposalShortId) -> Option<&HashSet> { - self.inner.get(short_id).map(|link| &link.parents) - } - - pub fn remove(&mut self, short_id: &ProposalShortId) -> Option { - self.inner.remove(short_id) - } - - fn remove_child( - &mut self, - short_id: &ProposalShortId, - child: &ProposalShortId, - ) -> Option { - self.inner - .get_mut(short_id) - .map(|links| links.children.remove(child)) - } - - fn remove_parent( - &mut self, - short_id: &ProposalShortId, - parent: &ProposalShortId, - ) -> Option { - self.inner - .get_mut(short_id) - .map(|links| links.parents.remove(parent)) - } - - fn add_child(&mut self, short_id: &ProposalShortId, child: ProposalShortId) -> Option { - self.inner - .get_mut(short_id) - .map(|links| links.children.insert(child)) - } - - fn add_parent(&mut self, short_id: &ProposalShortId, parent: ProposalShortId) -> Option { - self.inner - .get_mut(short_id) - .map(|links| links.parents.insert(parent)) - } - - fn clear(&mut self) { - self.inner.clear(); - } -} - -/// MultiIndexMap is used for multiple sort strategies, -/// to add any new sort strategy, you need to follow `AncestorsScoreSortKey` -/// and add logic to update the sort column in `insert_index_key` and `update_*_index_key` -#[derive(MultiIndexMap, Clone)] -pub struct IndexKey { - #[multi_index(hashed_unique)] - pub id: ProposalShortId, - #[multi_index(ordered_non_unique)] - pub score: AncestorsScoreSortKey, - // other sort key -} - -#[derive(Copy, Clone)] -enum EntryOp { - Add, - Remove, -} - -#[derive(Clone)] -pub(crate) struct SortedTxMap { - entries: HashMap, - pub(crate) sorted_index: MultiIndexIndexKeyMap, - deps: HashMap>, - /// A map track transaction ancestors and descendants - pub(crate) links: TxLinksMap, - max_ancestors_count: usize, -} - -impl SortedTxMap { - pub fn new(max_ancestors_count: usize) -> Self { - SortedTxMap { - entries: Default::default(), - sorted_index: MultiIndexIndexKeyMap::default(), - links: TxLinksMap::new(), - deps: Default::default(), - max_ancestors_count, - } - } - - pub fn size(&self) -> usize { - self.entries.len() - } - - pub fn iter(&self) -> impl Iterator { - self.entries.iter() - } - - fn insert_index_key(&mut self, entry: &TxEntry) { - self.sorted_index.insert(entry.as_index_key()); - } - - fn remove_sort_key(&mut self, entry: &TxEntry) { - self.sorted_index.remove_by_id(&entry.proposal_short_id()); - } - - fn update_descendants_index_key(&mut self, entry: &TxEntry, op: EntryOp) { - let descendants = self.calc_descendants(&entry.proposal_short_id()); - for desc_id in &descendants { - if let Some(desc_entry) = self.entries.get_mut(desc_id) { - let deleted = self - .sorted_index - .remove_by_id(&desc_entry.proposal_short_id()); - debug_assert!(deleted.is_some(), "pool inconsistent"); - - match op { - EntryOp::Remove => desc_entry.sub_entry_weight(entry), - EntryOp::Add => desc_entry.add_entry_weight(entry), - } - self.sorted_index.insert(desc_entry.as_index_key()); - } - } - } - - // Usually when a new transaction is added to the pool, it has no in-pool - // children (because any such children would be an orphan). So in add_entry(), we: - // - update a new entry's parents set to include all in-pool parents - // - update the new entry's parents to include the new tx as a child - // - update all ancestors of the transaction to include the new tx's size/fee - pub fn add_entry(&mut self, mut entry: TxEntry) -> Result { - let short_id = entry.proposal_short_id(); - - if self.contains_key(&short_id) { - return Ok(false); - }; - - // find in pool parents - let mut parents: HashSet = HashSet::with_capacity( - entry.transaction().inputs().len() + entry.transaction().cell_deps().len(), - ); - - for input in entry.transaction().inputs() { - let input_pt = input.previous_output(); - if let Some(deps) = self.deps.get(&input_pt) { - parents.extend(deps.iter().cloned()); - } - - let parent_hash = &input_pt.tx_hash(); - let id = ProposalShortId::from_tx_hash(parent_hash); - if self.links.inner.contains_key(&id) { - parents.insert(id); - } - } - for cell_dep in entry.transaction().cell_deps() { - let dep_pt = cell_dep.out_point(); - let id = ProposalShortId::from_tx_hash(&dep_pt.tx_hash()); - if self.links.inner.contains_key(&id) { - parents.insert(id); - } - } - - let ancestors = self - .links - .calc_relation_ids(Cow::Borrowed(&parents), Relation::Parents); - - // update parents references - for ancestor_id in &ancestors { - let ancestor = self.entries.get(ancestor_id).expect("pool consistent"); - entry.add_entry_weight(ancestor); - } - - if entry.ancestors_count > self.max_ancestors_count { - return Err(Reject::ExceededMaximumAncestorsCount); - } - - for cell_dep in entry.transaction().cell_deps() { - let dep_pt = cell_dep.out_point(); - // insert dep-ref map - self.deps - .entry(dep_pt) - .or_insert_with(HashSet::new) - .insert(short_id.clone()); - } - - for parent in &parents { - self.links.add_child(parent, short_id.clone()); - } - - // insert links - let links = TxLinks { - parents, - children: Default::default(), - }; - self.links.inner.insert(short_id.clone(), links); - self.insert_index_key(&entry); - self.entries.insert(short_id, entry); - Ok(true) - } - - // update_descendants_from_detached is used to update - // the descendants for a single transaction that has been added to the - // pool but may have child transactions in the pool, eg during a - // chain reorg. - pub fn update_descendants_from_detached( - &mut self, - id: &ProposalShortId, - children: HashSet, - ) { - if let Some(entry) = self.entries.get(id).cloned() { - for child in &children { - self.links.add_parent(child, id.clone()); - } - if let Some(links) = self.links.inner.get_mut(id) { - links.children.extend(children); - } - - self.update_descendants_index_key(&entry, EntryOp::Add); - } - } - - pub fn contains_key(&self, id: &ProposalShortId) -> bool { - self.entries.contains_key(id) - } - - pub fn get(&self, id: &ProposalShortId) -> Option<&TxEntry> { - self.entries.get(id) - } - - #[cfg(test)] - pub(crate) fn deps(&self) -> &HashMap> { - &self.deps - } - - fn update_deps_for_remove(&mut self, entry: &TxEntry) { - for cell_dep in entry.transaction().cell_deps() { - let dep_pt = cell_dep.out_point(); - if let HashMapEntry::Occupied(mut o) = self.deps.entry(dep_pt) { - let set = o.get_mut(); - if set.remove(&entry.proposal_short_id()) && set.is_empty() { - o.remove_entry(); - } - } - } - } - - fn update_children_for_remove(&mut self, id: &ProposalShortId) { - if let Some(children) = self.get_children(id).cloned() { - for child in children { - self.links.remove_parent(&child, id); - } - } - } - - fn update_parents_for_remove(&mut self, id: &ProposalShortId) { - if let Some(parents) = self.get_parents(id).cloned() { - for parent in parents { - self.links.remove_child(&parent, id); - } - } - } - - fn remove_unchecked(&mut self, id: &ProposalShortId) -> Option { - self.entries.remove(id).map(|entry| { - self.remove_sort_key(&entry); - self.update_deps_for_remove(&entry); - entry - }) - } - - pub fn remove_entry_and_descendants(&mut self, id: &ProposalShortId) -> Vec { - let mut removed_ids = vec![id.to_owned()]; - let mut removed = vec![]; - let descendants = self.calc_descendants(id); - removed_ids.extend(descendants); - - // update links state for remove - for id in &removed_ids { - self.update_parents_for_remove(id); - self.update_children_for_remove(id); - } - - for id in removed_ids { - if let Some(entry) = self.remove_unchecked(&id) { - self.links.remove(&id); - removed.push(entry); - } - } - removed - } - - // notice: - // we are sure that all in-pool ancestor have already been processed. - // otherwise `links` will differ from the set of parents we'd calculate by searching - pub fn remove_entry(&mut self, id: &ProposalShortId) -> Option { - self.remove_unchecked(id).map(|entry| { - // We're not recursively removing a tx and all its descendants - // So we need update statistics state - self.update_descendants_index_key(&entry, EntryOp::Remove); - self.update_parents_for_remove(id); - self.update_children_for_remove(id); - self.links.remove(id); - entry - }) - } - - /// calculate all ancestors from pool - pub fn calc_ancestors(&self, short_id: &ProposalShortId) -> HashSet { - self.links.calc_ancestors(short_id) - } - - /// calculate all descendants from pool - pub fn calc_descendants(&self, short_id: &ProposalShortId) -> HashSet { - self.links.calc_descendants(short_id) - } - - /// find children from pool - pub fn get_children(&self, short_id: &ProposalShortId) -> Option<&HashSet> { - self.links.get_children(short_id) - } - - /// find parents from pool - pub fn get_parents(&self, short_id: &ProposalShortId) -> Option<&HashSet> { - self.links.get_parents(short_id) - } - - /// sorted by ancestor score from higher to lower - pub fn score_sorted_iter(&self) -> impl Iterator { - // Note: multi_index don't support reverse order iteration now - // so we need to collect and reverse - let keys = self.sorted_index.iter_by_score().collect::>(); - keys.into_iter() - .rev() - .map(move |key| self.entries.get(&key.id).expect("consistent")) - } - - pub(crate) fn clear(&mut self) { - self.sorted_index.clear(); - self.deps.clear(); - self.links.clear(); - self.entries.clear(); - } -} diff --git a/tx-pool/src/component/edges.rs b/tx-pool/src/component/edges.rs new file mode 100644 index 0000000000..decf98c1a5 --- /dev/null +++ b/tx-pool/src/component/edges.rs @@ -0,0 +1,114 @@ +use ckb_types::packed::{Byte32, OutPoint, ProposalShortId}; +use std::collections::{hash_map::Entry, HashMap, HashSet}; + +#[derive(Debug, PartialEq, Clone)] +pub(crate) enum OutPointStatus { + UnConsumed, + Consumed(ProposalShortId), +} + +#[derive(Default, Debug, Clone)] +pub(crate) struct Edges { + /// input-txid map represent in-pool tx's inputs + pub(crate) inputs: HashMap, + /// output-op map represent in-pool tx's outputs + pub(crate) outputs: HashMap, + /// dep-set map represent in-pool tx's deps + pub(crate) deps: HashMap>, + /// dep-set map represent in-pool tx's header deps + pub(crate) header_deps: HashMap>, +} + +impl Edges { + #[cfg(test)] + pub(crate) fn outputs_len(&self) -> usize { + self.outputs.len() + } + + #[cfg(test)] + pub(crate) fn inputs_len(&self) -> usize { + self.inputs.len() + } + + #[cfg(test)] + pub(crate) fn header_deps_len(&self) -> usize { + self.header_deps.len() + } + + #[cfg(test)] + pub(crate) fn deps_len(&self) -> usize { + self.deps.len() + } + + pub(crate) fn insert_input(&mut self, out_point: OutPoint, txid: ProposalShortId) { + self.inputs.insert(out_point, txid); + } + + pub(crate) fn remove_input(&mut self, out_point: &OutPoint) -> Option { + self.inputs.remove(out_point) + } + + pub(crate) fn remove_output(&mut self, out_point: &OutPoint) -> Option { + match self.outputs.remove(out_point) { + Some(OutPointStatus::Consumed(id)) => Some(id), + _ => None, + } + } + + pub(crate) fn insert_unconsumed_output(&mut self, out_point: OutPoint) { + self.outputs.insert(out_point, OutPointStatus::UnConsumed); + } + + pub(crate) fn insert_consumed_output(&mut self, out_point: OutPoint, id: ProposalShortId) { + self.outputs.insert(out_point, OutPointStatus::Consumed(id)); + } + + pub(crate) fn get_input_ref(&self, out_point: &OutPoint) -> Option<&ProposalShortId> { + self.inputs.get(out_point) + } + + pub(crate) fn get_deps_ref(&self, out_point: &OutPoint) -> Option<&HashSet> { + self.deps.get(out_point) + } + + pub(crate) fn set_output_consumed( + &mut self, + out_point: &OutPoint, + tx_short_id: &ProposalShortId, + ) { + if let Some(status) = self.outputs.get_mut(out_point) { + *status = OutPointStatus::Consumed(tx_short_id.clone()); + } + } + + pub(crate) fn set_output_unconsumed(&mut self, out_point: &OutPoint) { + if let Some(status) = self.outputs.get_mut(out_point) { + *status = OutPointStatus::UnConsumed; + } + } + + pub(crate) fn get_output_ref(&self, out_point: &OutPoint) -> Option<&OutPointStatus> { + self.outputs.get(out_point) + } + + pub(crate) fn insert_deps(&mut self, out_point: OutPoint, txid: ProposalShortId) { + self.deps.entry(out_point).or_default().insert(txid); + } + + pub(crate) fn delete_txid_by_dep(&mut self, out_point: OutPoint, txid: &ProposalShortId) { + if let Entry::Occupied(mut occupied) = self.deps.entry(out_point) { + let ids = occupied.get_mut(); + ids.remove(txid); + if ids.is_empty() { + occupied.remove(); + } + } + } + + pub(crate) fn clear(&mut self) { + self.inputs.clear(); + self.outputs.clear(); + self.deps.clear(); + self.header_deps.clear(); + } +} diff --git a/tx-pool/src/component/entry.rs b/tx-pool/src/component/entry.rs index 9dc6a027c7..bcce6a2e16 100644 --- a/tx-pool/src/component/entry.rs +++ b/tx-pool/src/component/entry.rs @@ -1,5 +1,4 @@ -use crate::component::container::AncestorsScoreSortKey; -use crate::component::container::IndexKey; +use crate::component::score_key::AncestorsScoreSortKey; use ckb_systemtime::unix_time_as_millis; use ckb_types::{ core::{ @@ -100,11 +99,6 @@ impl TxEntry { EvictKey::from(self) } - /// Return a sort index - pub fn as_index_key(&self) -> IndexKey { - IndexKey::from(self) - } - /// Returns fee rate pub fn fee_rate(&self) -> FeeRate { let weight = get_transaction_weight(self.size, self.cycles); @@ -172,15 +166,6 @@ impl From<&TxEntry> for AncestorsScoreSortKey { } } -impl From<&TxEntry> for IndexKey { - fn from(entry: &TxEntry) -> Self { - IndexKey { - id: entry.proposal_short_id(), - score: entry.as_score_key(), - } - } -} - impl Hash for TxEntry { fn hash(&self, state: &mut H) { Hash::hash(self.transaction(), state); diff --git a/tx-pool/src/component/links.rs b/tx-pool/src/component/links.rs new file mode 100644 index 0000000000..520673b59d --- /dev/null +++ b/tx-pool/src/component/links.rs @@ -0,0 +1,144 @@ +use ckb_types::packed::ProposalShortId; +use std::borrow::Cow; +use std::collections::{HashMap, HashSet}; + +#[derive(Default, Debug, Clone)] +pub struct TxLinks { + pub parents: HashSet, + pub children: HashSet, +} + +#[derive(Clone, Copy)] +pub enum Relation { + Parents, + Children, +} + +impl TxLinks { + fn get_direct_ids(&self, relation: Relation) -> &HashSet { + match relation { + Relation::Parents => &self.parents, + Relation::Children => &self.children, + } + } +} + +#[derive(Default, Debug, Clone)] +pub struct TxLinksMap { + pub inner: HashMap, +} + +impl TxLinksMap { + pub fn new() -> Self { + TxLinksMap { + inner: Default::default(), + } + } + + fn calc_relative_ids( + &self, + short_id: &ProposalShortId, + relation: Relation, + ) -> HashSet { + let direct = self + .inner + .get(short_id) + .map(|link| link.get_direct_ids(relation)) + .cloned() + .unwrap_or_default(); + + self.calc_relation_ids(Cow::Owned(direct), relation) + } + + pub fn calc_relation_ids( + &self, + stage: Cow>, + relation: Relation, + ) -> HashSet { + let mut stage = stage.into_owned(); + let mut relation_ids = HashSet::with_capacity(stage.len()); + + while let Some(id) = stage.iter().next().cloned() { + relation_ids.insert(id.clone()); + stage.remove(&id); + + //recursively + for id in self + .inner + .get(&id) + .map(|link| link.get_direct_ids(relation)) + .cloned() + .unwrap_or_default() + { + if !relation_ids.contains(&id) { + stage.insert(id); + } + } + } + relation_ids + } + + pub fn calc_ancestors(&self, short_id: &ProposalShortId) -> HashSet { + self.calc_relative_ids(short_id, Relation::Parents) + } + + pub fn calc_descendants(&self, short_id: &ProposalShortId) -> HashSet { + self.calc_relative_ids(short_id, Relation::Children) + } + + pub fn get_children(&self, short_id: &ProposalShortId) -> Option<&HashSet> { + self.inner.get(short_id).map(|link| &link.children) + } + + pub fn get_parents(&self, short_id: &ProposalShortId) -> Option<&HashSet> { + self.inner.get(short_id).map(|link| &link.parents) + } + + pub fn remove(&mut self, short_id: &ProposalShortId) -> Option { + self.inner.remove(short_id) + } + + pub fn remove_child( + &mut self, + short_id: &ProposalShortId, + child: &ProposalShortId, + ) -> Option { + self.inner + .get_mut(short_id) + .map(|links| links.children.remove(child)) + } + + pub fn remove_parent( + &mut self, + short_id: &ProposalShortId, + parent: &ProposalShortId, + ) -> Option { + self.inner + .get_mut(short_id) + .map(|links| links.parents.remove(parent)) + } + + pub fn add_child( + &mut self, + short_id: &ProposalShortId, + child: ProposalShortId, + ) -> Option { + self.inner + .get_mut(short_id) + .map(|links| links.children.insert(child)) + } + + pub fn add_parent( + &mut self, + short_id: &ProposalShortId, + parent: ProposalShortId, + ) -> Option { + self.inner + .get_mut(short_id) + .map(|links| links.parents.insert(parent)) + } + + pub fn clear(&mut self) { + self.inner.clear(); + } +} diff --git a/tx-pool/src/component/mod.rs b/tx-pool/src/component/mod.rs index 60b4e78ae7..7f325424ba 100644 --- a/tx-pool/src/component/mod.rs +++ b/tx-pool/src/component/mod.rs @@ -2,12 +2,12 @@ pub mod commit_txs_scanner; pub mod entry; pub(crate) mod chunk; -pub(crate) mod container; +pub(crate) mod edges; +pub(crate) mod links; pub(crate) mod orphan; -pub(crate) mod pending; -pub(crate) mod proposed; -pub(crate) mod recent_reject; pub(crate) mod pool_map; +pub(crate) mod recent_reject; +pub(crate) mod score_key; #[cfg(test)] mod tests; diff --git a/tx-pool/src/component/pending.rs b/tx-pool/src/component/pending.rs deleted file mode 100644 index 3d0bd72383..0000000000 --- a/tx-pool/src/component/pending.rs +++ /dev/null @@ -1,363 +0,0 @@ -use crate::component::entry::TxEntry; -use ckb_types::{ - core::{ - cell::{CellChecker, CellMetaBuilder, CellProvider, CellStatus}, - error::OutPointError, - tx_pool::Reject, - TransactionView, - }, - packed::{Byte32, OutPoint, ProposalShortId}, - prelude::*, -}; -use ckb_util::{LinkedHashMap, LinkedHashMapEntries}; -use std::collections::{hash_map::Entry, HashMap, HashSet, VecDeque}; - -type ConflictEntry = (TxEntry, Reject); - -#[derive(Debug, Clone)] -pub(crate) struct PendingQueue { - pub(crate) inner: LinkedHashMap, - /// dep-set map represent in-pool tx's deps - pub(crate) deps: HashMap>, - /// input-set map represent in-pool tx's inputs - pub(crate) inputs: HashMap>, - /// dep-set map represent in-pool tx's header deps - pub(crate) header_deps: HashMap>, - // /// output-op map represent in-pool tx's outputs - pub(crate) outputs: HashMap>, -} - -impl PendingQueue { - pub(crate) fn new() -> Self { - PendingQueue { - inner: Default::default(), - deps: Default::default(), - inputs: Default::default(), - header_deps: Default::default(), - outputs: Default::default(), - } - } - - pub(crate) fn size(&self) -> usize { - self.inner.len() - } - - pub(crate) fn is_empty(&self) -> bool { - self.inner.len() == 0 - } - - #[cfg(test)] - pub(crate) fn outputs_len(&self) -> usize { - self.outputs.len() - } - - #[cfg(test)] - pub(crate) fn header_deps_len(&self) -> usize { - self.header_deps.len() - } - - #[cfg(test)] - pub(crate) fn deps_len(&self) -> usize { - self.deps.len() - } - - #[cfg(test)] - pub(crate) fn inputs_len(&self) -> usize { - self.inputs.len() - } - - pub(crate) fn add_entry(&mut self, entry: TxEntry) -> bool { - let tx_short_id = entry.proposal_short_id(); - if self.inner.contains_key(&tx_short_id) { - return false; - } - - let inputs = entry.transaction().input_pts_iter(); - let outputs = entry.transaction().output_pts(); - - for i in inputs { - self.inputs - .entry(i.to_owned()) - .or_default() - .insert(tx_short_id.clone()); - - if let Some(outputs) = self.outputs.get_mut(&i) { - outputs.insert(tx_short_id.clone()); - } - } - - // record dep-txid - for d in entry.related_dep_out_points() { - self.deps - .entry(d.to_owned()) - .or_default() - .insert(tx_short_id.clone()); - - if let Some(outputs) = self.outputs.get_mut(d) { - outputs.insert(tx_short_id.clone()); - } - } - - // record tx unconsumed output - for o in outputs { - self.outputs.insert(o, HashSet::new()); - } - - // record header_deps - let header_deps = entry.transaction().header_deps(); - if !header_deps.is_empty() { - self.header_deps - .insert(tx_short_id.clone(), header_deps.into_iter().collect()); - } - - self.inner.insert(tx_short_id, entry); - true - } - - pub(crate) fn resolve_conflict(&mut self, tx: &TransactionView) -> Vec { - let inputs = tx.input_pts_iter(); - let mut conflicts = Vec::new(); - - for i in inputs { - if let Some(ids) = self.inputs.remove(&i) { - for id in ids { - let entries = self.remove_entry_and_descendants(&id); - for entry in entries { - let reject = Reject::Resolve(OutPointError::Dead(i.clone())); - conflicts.push((entry, reject)); - } - } - } - - // deps consumed - if let Some(ids) = self.deps.remove(&i) { - for id in ids { - let entries = self.remove_entry_and_descendants(&id); - for entry in entries { - let reject = Reject::Resolve(OutPointError::Dead(i.clone())); - conflicts.push((entry, reject)); - } - } - } - } - conflicts - } - - pub(crate) fn resolve_conflict_header_dep( - &mut self, - headers: &HashSet, - ) -> Vec { - let mut conflicts = Vec::new(); - - // invalid header deps - let mut ids = Vec::new(); - for (tx_id, deps) in self.header_deps.iter() { - for hash in deps { - if headers.contains(hash) { - ids.push((hash.clone(), tx_id.clone())); - break; - } - } - } - - for (blk_hash, id) in ids { - let entries = self.remove_entry_and_descendants(&id); - for entry in entries { - let reject = Reject::Resolve(OutPointError::InvalidHeader(blk_hash.to_owned())); - conflicts.push((entry, reject)); - } - } - conflicts - } - - pub(crate) fn contains_key(&self, id: &ProposalShortId) -> bool { - self.inner.contains_key(id) - } - - pub fn iter(&self) -> impl Iterator { - self.inner.iter() - } - - pub(crate) fn get(&self, id: &ProposalShortId) -> Option<&TxEntry> { - self.inner.get(id) - } - - pub(crate) fn get_tx(&self, id: &ProposalShortId) -> Option<&TransactionView> { - self.inner.get(id).map(|entry| entry.transaction()) - } - - pub(crate) fn remove_entry(&mut self, id: &ProposalShortId) -> Option { - let removed = self.inner.remove(id); - - if let Some(ref entry) = removed { - self.remove_entry_relation(entry); - } - - removed - } - - pub(crate) fn remove_entry_and_descendants(&mut self, id: &ProposalShortId) -> Vec { - let mut removed = Vec::new(); - if let Some(entry) = self.inner.remove(id) { - let descendants = self.get_descendants(&entry); - self.remove_entry_relation(&entry); - removed.push(entry); - for id in descendants { - if let Some(entry) = self.remove_entry(&id) { - removed.push(entry); - } - } - } - removed - } - - pub(crate) fn get_descendants(&self, entry: &TxEntry) -> HashSet { - let mut entries: VecDeque<&TxEntry> = VecDeque::new(); - entries.push_back(entry); - - let mut descendants = HashSet::new(); - while let Some(entry) = entries.pop_front() { - let outputs = entry.transaction().output_pts(); - - for output in outputs { - if let Some(ids) = self.outputs.get(&output) { - for id in ids { - if descendants.insert(id.clone()) { - if let Some(entry) = self.inner.get(id) { - entries.push_back(entry); - } - } - } - } - } - } - descendants - } - - pub(crate) fn remove_entry_relation(&mut self, entry: &TxEntry) { - let inputs = entry.transaction().input_pts_iter(); - let tx_short_id = entry.proposal_short_id(); - let outputs = entry.transaction().output_pts(); - - for i in inputs { - if let Entry::Occupied(mut occupied) = self.inputs.entry(i) { - let empty = { - let ids = occupied.get_mut(); - ids.remove(&tx_short_id); - ids.is_empty() - }; - if empty { - occupied.remove(); - } - } - } - - // remove dep - for d in entry.related_dep_out_points().cloned() { - if let Entry::Occupied(mut occupied) = self.deps.entry(d) { - let empty = { - let ids = occupied.get_mut(); - ids.remove(&tx_short_id); - ids.is_empty() - }; - if empty { - occupied.remove(); - } - } - } - - for o in outputs { - self.outputs.remove(&o); - } - - self.header_deps.remove(&tx_short_id); - } - - pub(crate) fn remove_entries_by_filter bool>( - &mut self, - mut predicate: P, - ) -> Vec { - let entries = self.entries(); - let mut removed = Vec::new(); - for entry in entries { - if predicate(entry.key(), entry.get()) { - removed.push(entry.remove()); - } - } - for entry in &removed { - self.remove_entry_relation(entry); - } - - removed - } - - pub fn entries(&mut self) -> LinkedHashMapEntries { - self.inner.entries() - } - - // fill proposal txs - pub fn fill_proposals( - &self, - limit: usize, - exclusion: &HashSet, - proposals: &mut HashSet, - ) { - for id in self.inner.keys() { - if proposals.len() == limit { - break; - } - if !exclusion.contains(id) { - proposals.insert(id.clone()); - } - } - } - - pub(crate) fn drain(&mut self) -> Vec { - let txs = self - .inner - .drain() - .map(|(_k, entry)| entry.into_transaction()) - .collect::>(); - self.deps.clear(); - self.inputs.clear(); - self.header_deps.clear(); - self.outputs.clear(); - txs - } -} - -impl CellProvider for PendingQueue { - fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { - let tx_hash = out_point.tx_hash(); - if let Some(entry) = self.inner.get(&ProposalShortId::from_tx_hash(&tx_hash)) { - match entry - .transaction() - .output_with_data(out_point.index().unpack()) - { - Some((output, data)) => { - let cell_meta = CellMetaBuilder::from_cell_output(output, data) - .out_point(out_point.to_owned()) - .build(); - CellStatus::live_cell(cell_meta) - } - None => CellStatus::Unknown, - } - } else { - CellStatus::Unknown - } - } -} - -impl CellChecker for PendingQueue { - fn is_live(&self, out_point: &OutPoint) -> Option { - let tx_hash = out_point.tx_hash(); - if let Some(entry) = self.inner.get(&ProposalShortId::from_tx_hash(&tx_hash)) { - entry - .transaction() - .output(out_point.index().unpack()) - .map(|_| true) - } else { - None - } - } -} diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index 2a66e760f2..f9b00b75e3 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -1,25 +1,30 @@ //! Top-level Pool type, methods, and tests extern crate rustc_hash; extern crate slab; -use crate::component::container::AncestorsScoreSortKey; +use crate::component::edges::{Edges, OutPointStatus}; use crate::component::entry::EvictKey; +use crate::component::links::{Relation, TxLinksMap}; +use crate::component::score_key::AncestorsScoreSortKey; use crate::error::Reject; use crate::TxEntry; -use ckb_logger::{debug, error, trace, warn}; + +use ckb_logger::trace; use ckb_types::core::error::OutPointError; use ckb_types::packed::OutPoint; use ckb_types::{ - core::cell::{CellMetaBuilder, CellProvider, CellStatus}, - prelude::*, + bytes::Bytes, + core::{cell::CellChecker, TransactionView}, + packed::{Byte32, CellOutput, ProposalShortId}, }; use ckb_types::{ - core::{cell::CellChecker, TransactionView}, - packed::{Byte32, ProposalShortId}, + core::cell::{CellMetaBuilder, CellProvider, CellStatus}, + prelude::*, }; use multi_index_map::MultiIndexMap; -use std::collections::hash_map::Entry; -use std::collections::HashMap; -use std::collections::{HashSet, VecDeque}; +use std::borrow::Cow; +use std::collections::HashSet; + +use super::links::TxLinks; type ConflictEntry = (TxEntry, Reject); @@ -30,6 +35,12 @@ pub enum Status { Proposed, } +#[derive(Copy, Clone)] +enum EntryOp { + Add, + Remove, +} + #[derive(MultiIndexMap, Clone)] pub struct PoolEntry { #[multi_index(hashed_unique)] @@ -40,9 +51,8 @@ pub struct PoolEntry { pub status: Status, #[multi_index(ordered_non_unique)] pub evict_key: EvictKey, - - pub inner: TxEntry, // other sort key + pub inner: TxEntry, } impl MultiIndexPoolEntryMap { @@ -50,7 +60,10 @@ impl MultiIndexPoolEntryMap { pub fn score_sorted_iter(&self) -> impl Iterator { // Note: multi_index don't support reverse order iteration now // so we need to collect and reverse - let entries = self.iter_by_score().collect::>(); + let entries = self + .iter_by_score() + .filter(|entry| entry.status == Status::Proposed) + .collect::>(); entries.into_iter().rev().map(move |entry| &entry.inner) } } @@ -58,13 +71,10 @@ impl MultiIndexPoolEntryMap { pub struct PoolMap { /// The pool entries with different kinds of sort strategies pub(crate) entries: MultiIndexPoolEntryMap, - /// dep-set map represent in-pool tx's header deps - pub(crate) header_deps: HashMap>, - /// dep-set map represent in-pool tx's deps - pub(crate) deps: HashMap>, - /// input-set map represent in-pool tx's inputs - pub(crate) inputs: HashMap>, - pub(crate) outputs: HashMap>, + /// All the deps, header_deps, inputs, outputs relationships + pub(crate) edges: Edges, + /// All the parent/children relationships + pub(crate) links: TxLinksMap, pub(crate) max_ancestors_count: usize, } @@ -72,197 +82,140 @@ impl PoolMap { pub fn new(max_ancestors_count: usize) -> Self { PoolMap { entries: MultiIndexPoolEntryMap::default(), - header_deps: HashMap::default(), - deps: HashMap::default(), - inputs: HashMap::default(), - outputs: HashMap::default(), + edges: Edges::default(), + links: TxLinksMap::new(), max_ancestors_count, } } #[cfg(test)] pub(crate) fn outputs_len(&self) -> usize { - self.outputs.len() + self.edges.outputs_len() } #[cfg(test)] pub(crate) fn header_deps_len(&self) -> usize { - self.header_deps.len() + self.edges.header_deps_len() } #[cfg(test)] pub(crate) fn deps_len(&self) -> usize { - self.deps.len() + self.edges.deps_len() } #[cfg(test)] pub(crate) fn inputs_len(&self) -> usize { - self.inputs.len() + self.edges.inputs_len() } #[cfg(test)] - pub fn size(&self) -> usize { + pub(crate) fn size(&self) -> usize { self.entries.len() } #[cfg(test)] - pub fn contains_key(&self, id: &ProposalShortId) -> bool { + pub(crate) fn contains_key(&self, id: &ProposalShortId) -> bool { self.entries.get_by_id(id).is_some() } + #[cfg(test)] pub(crate) fn get_tx(&self, id: &ProposalShortId) -> Option<&TransactionView> { self.entries .get_by_id(id) .map(|entry| entry.inner.transaction()) } - pub fn add_entry(&mut self, entry: TxEntry, status: Status) -> bool { - let tx_short_id = entry.proposal_short_id(); - if self.entries.get_by_id(&tx_short_id).is_some() { - return false; - } - trace!("add_{:?} {}", status, entry.transaction().hash()); - let inputs = entry.transaction().input_pts_iter(); - let outputs = entry.transaction().output_pts(); - - for i in inputs { - self.inputs - .entry(i.to_owned()) - .or_default() - .insert(tx_short_id.clone()); - - if let Some(outputs) = self.outputs.get_mut(&i) { - outputs.insert(tx_short_id.clone()); - } - } - - // record dep-txid - for d in entry.related_dep_out_points() { - self.deps - .entry(d.to_owned()) - .or_default() - .insert(tx_short_id.clone()); - - if let Some(outputs) = self.outputs.get_mut(d) { - outputs.insert(tx_short_id.clone()); - } - } + #[cfg(test)] + pub(crate) fn add_proposed(&mut self, entry: TxEntry) -> Result { + self.add_entry(entry, Status::Proposed) + } - // record tx unconsumed output - for o in outputs { - self.outputs.insert(o, HashSet::new()); - } + #[cfg(test)] + pub(crate) fn remove_committed_tx(&mut self, tx: &TransactionView) -> Option { + self.remove_entry(&tx.proposal_short_id()) + } - // record header_deps - let header_deps = entry.transaction().header_deps(); - if !header_deps.is_empty() { - self.header_deps - .insert(tx_short_id.clone(), header_deps.into_iter().collect()); - } + pub(crate) fn get_by_id(&self, id: &ProposalShortId) -> Option<&PoolEntry> { + self.entries.get_by_id(id) + } - let score = entry.as_score_key(); - let evict_key = entry.as_evict_key(); - self.entries.insert(PoolEntry { - id: tx_short_id, - score, - status, - inner: entry, - evict_key, - }); - true + pub(crate) fn pending_size(&self) -> usize { + self.entries.get_by_status(&Status::Pending).len() + + self.entries.get_by_status(&Status::Gap).len() } - pub fn get_by_id(&self, id: &ProposalShortId) -> Option<&PoolEntry> { - self.entries.get_by_id(id).map(|entry| entry) + pub(crate) fn proposed_size(&self) -> usize { + self.entries.get_by_status(&Status::Proposed).len() } - fn get_descendants(&self, entry: &TxEntry) -> HashSet { - let mut entries: VecDeque<&TxEntry> = VecDeque::new(); - entries.push_back(entry); + pub(crate) fn score_sorted_iter(&self) -> impl Iterator { + self.entries.score_sorted_iter() + } - let mut descendants = HashSet::new(); - while let Some(entry) = entries.pop_front() { - let outputs = entry.transaction().output_pts(); + pub(crate) fn get(&self, id: &ProposalShortId) -> Option<&TxEntry> { + self.get_by_id(id).map(|entry| &entry.inner) + } - for output in outputs { - if let Some(ids) = self.outputs.get(&output) { - for id in ids { - if descendants.insert(id.clone()) { - if let Some(entry) = self.entries.get_by_id(id) { - entries.push_back(&entry.inner); - } - } - } - } - } + pub(crate) fn get_proposed(&self, id: &ProposalShortId) -> Option<&TxEntry> { + match self.get_by_id(id) { + Some(entry) if entry.status == Status::Proposed => Some(&entry.inner), + _ => None, } - descendants } - pub(crate) fn remove_entry_relation(&mut self, entry: &TxEntry) { - let inputs = entry.transaction().input_pts_iter(); - let tx_short_id = entry.proposal_short_id(); - let outputs = entry.transaction().output_pts(); + pub(crate) fn has_proposed(&self, id: &ProposalShortId) -> bool { + self.get_proposed(id).is_some() + } - for i in inputs { - if let Entry::Occupied(mut occupied) = self.inputs.entry(i) { - let empty = { - let ids = occupied.get_mut(); - ids.remove(&tx_short_id); - ids.is_empty() - }; - if empty { - occupied.remove(); - } - } - } + /// calculate all ancestors from pool + pub(crate) fn calc_ancestors(&self, short_id: &ProposalShortId) -> HashSet { + self.links.calc_ancestors(short_id) + } - // remove dep - for d in entry.related_dep_out_points().cloned() { - if let Entry::Occupied(mut occupied) = self.deps.entry(d) { - let empty = { - let ids = occupied.get_mut(); - ids.remove(&tx_short_id); - ids.is_empty() - }; - if empty { - occupied.remove(); - } - } - } + /// calculate all descendants from pool + pub(crate) fn calc_descendants(&self, short_id: &ProposalShortId) -> HashSet { + self.links.calc_descendants(short_id) + } - for o in outputs { - self.outputs.remove(&o); - } + pub(crate) fn get_output_with_data(&self, out_point: &OutPoint) -> Option<(CellOutput, Bytes)> { + self.get(&ProposalShortId::from_tx_hash(&out_point.tx_hash())) + .and_then(|entry| { + entry + .transaction() + .output_with_data(out_point.index().unpack()) + }) + } - self.header_deps.remove(&tx_short_id); + pub(crate) fn remove_entry(&mut self, id: &ProposalShortId) -> Option { + if let Some(entry) = self.entries.remove_by_id(id) { + self.update_descendants_index_key(&entry.inner, EntryOp::Remove); + self.remove_entry_deps(&entry.inner); + self.remove_entry_edges(&entry.inner); + self.remove_entry_links(id); + return Some(entry.inner); + } + None } - pub fn remove_entry(&mut self, id: &ProposalShortId) -> Option { - let removed = self.entries.remove_by_id(id); + pub(crate) fn remove_entry_and_descendants(&mut self, id: &ProposalShortId) -> Vec { + let mut removed_ids = vec![id.to_owned()]; + let mut removed = vec![]; + removed_ids.extend(self.calc_descendants(id)); - if let Some(ref entry) = removed { - self.remove_entry_relation(&entry.inner); + // update links state for remove + for id in &removed_ids { + self.remove_entry_links(id); } - removed.map(|e| e.inner) - } - pub fn remove_entry_and_descendants(&mut self, id: &ProposalShortId) -> Vec { - let mut removed = Vec::new(); - if let Some(entry) = self.entries.remove_by_id(id) { - let descendants = self.get_descendants(&entry.inner); - self.remove_entry_relation(&entry.inner); - removed.push(entry.inner); - for id in descendants { - if let Some(entry) = self.remove_entry(&id) { - removed.push(entry); - } + for id in removed_ids { + if let Some(entry) = self.remove_entry(&id) { + removed.push(entry); } } removed } - pub fn resolve_conflict_header_dep( + pub(crate) fn resolve_conflict_header_dep( &mut self, headers: &HashSet, ) -> Vec { @@ -270,7 +223,7 @@ impl PoolMap { // invalid header deps let mut ids = Vec::new(); - for (tx_id, deps) in self.header_deps.iter() { + for (tx_id, deps) in self.edges.header_deps.iter() { for hash in deps { if headers.contains(hash) { ids.push((hash.clone(), tx_id.clone())); @@ -289,37 +242,45 @@ impl PoolMap { conflicts } - pub fn resolve_conflict(&mut self, tx: &TransactionView) -> Vec { - let inputs = tx.input_pts_iter(); + /// pending gap and proposed store the inputs and deps in edges, it's removed in `remove_entry` + /// here we use `input_pts_iter` and `related_dep_out_points` to find the conflict txs + pub(crate) fn resolve_conflict(&mut self, tx: &TransactionView) -> Vec { + let mut to_be_removed = Vec::new(); let mut conflicts = Vec::new(); - for i in inputs { - if let Some(ids) = self.inputs.remove(&i) { - for id in ids { - let entries = self.remove_entry_and_descendants(&id); - for entry in entries { - let reject = Reject::Resolve(OutPointError::Dead(i.clone())); - conflicts.push((entry, reject)); - } + for (_, entry) in self.entries.iter() { + let entry = &entry.inner; + let tx_id = entry.proposal_short_id(); + let tx_inputs = entry.transaction().input_pts_iter(); + let deps = entry.related_dep_out_points(); + + // tx input conflict + for i in tx_inputs { + if tx.input_pts_iter().any(|j| i == j) { + to_be_removed.push((tx_id.to_owned(), i.clone())); } } - // deps consumed - if let Some(ids) = self.deps.remove(&i) { - for id in ids { - let entries = self.remove_entry_and_descendants(&id); - for entry in entries { - let reject = Reject::Resolve(OutPointError::Dead(i.clone())); - conflicts.push((entry, reject)); - } + // tx deps conflict + for i in deps { + if tx.input_pts_iter().any(|j| *i == j) { + to_be_removed.push((tx_id.to_owned(), i.clone())); } } } + + for (tx_id, input) in to_be_removed.iter() { + let entries = self.remove_entry_and_descendants(tx_id); + let reject = Reject::Resolve(OutPointError::Dead(input.to_owned())); + let rejects = std::iter::repeat(reject).take(entries.len()); + conflicts.extend(entries.into_iter().zip(rejects)); + } + conflicts } // fill proposal txs - pub fn fill_proposals( + pub(crate) fn fill_proposals( &self, limit: usize, exclusion: &HashSet, @@ -336,12 +297,13 @@ impl PoolMap { } } - pub fn remove_entries_by_filter bool>( + pub(crate) fn remove_entries_by_filter bool>( &mut self, + status: &Status, mut predicate: P, ) -> Vec { let mut removed = Vec::new(); - for (_, entry) in self.entries.iter() { + for entry in self.entries.get_by_status(status) { if predicate(&entry.id, &entry.inner) { removed.push(entry.inner.clone()); } @@ -349,82 +311,283 @@ impl PoolMap { for entry in &removed { self.remove_entry(&entry.proposal_short_id()); } - removed } - pub fn iter(&self) -> impl Iterator { + pub(crate) fn iter(&self) -> impl Iterator { self.entries.iter().map(|(_, entry)| entry) } - pub fn iter_by_evict_key(&self) -> impl Iterator { + pub(crate) fn iter_by_evict_key(&self) -> impl Iterator { self.entries.iter_by_evict_key() } - pub fn next_evict_entry(&self) -> Option { + pub(crate) fn next_evict_entry(&self) -> Option { self.iter_by_evict_key() - .into_iter() .next() .map(|entry| entry.id.clone()) } - pub fn clear(&mut self) { + pub(crate) fn clear(&mut self) { self.entries = MultiIndexPoolEntryMap::default(); - self.deps.clear(); - self.inputs.clear(); - self.header_deps.clear(); - self.outputs.clear(); + self.edges.clear(); + self.links.clear(); } - pub(crate) fn drain(&mut self) -> Vec { - let txs = self - .entries - .iter() - .map(|(_k, entry)| entry.inner.clone().into_transaction()) - .collect::>(); - self.entries.clear(); - self.deps.clear(); - self.inputs.clear(); - self.header_deps.clear(); - self.outputs.clear(); - txs + fn remove_entry_links(&mut self, id: &ProposalShortId) { + if let Some(parents) = self.links.get_parents(id).cloned() { + for parent in parents { + self.links.remove_child(&parent, id); + } + } + if let Some(children) = self.links.get_children(id).cloned() { + for child in children { + self.links.remove_parent(&child, id); + } + } + self.links.remove(id); + } + + fn update_descendants_index_key(&mut self, parent: &TxEntry, op: EntryOp) { + let descendants: HashSet = + self.links.calc_descendants(&parent.proposal_short_id()); + for desc_id in &descendants { + // update child score + let entry = self.entries.get_by_id(desc_id).unwrap().clone(); + let mut child = entry.inner.clone(); + match op { + EntryOp::Remove => child.sub_entry_weight(parent), + EntryOp::Add => child.add_entry_weight(parent), + } + let short_id = child.proposal_short_id(); + //TODO: optimize it + self.entries.remove_by_id(&short_id); + self.insert_entry(&child, entry.status) + .expect("pool consistent"); + } + } + + fn record_entry_deps(&mut self, entry: &TxEntry) { + let tx_short_id: ProposalShortId = entry.proposal_short_id(); + let header_deps = entry.transaction().header_deps(); + let related_dep_out_points: Vec<_> = entry.related_dep_out_points().cloned().collect(); + + // record dep-txid + for d in related_dep_out_points { + self.edges.insert_deps(d.to_owned(), tx_short_id.clone()); + } + // record header_deps + if !header_deps.is_empty() { + self.edges + .header_deps + .insert(tx_short_id, header_deps.into_iter().collect()); + } + } + + fn record_entry_edges(&mut self, entry: &TxEntry) { + let tx_short_id: ProposalShortId = entry.proposal_short_id(); + let inputs = entry.transaction().input_pts_iter(); + let outputs = entry.transaction().output_pts(); + + let mut children = HashSet::new(); + // if input reference a in-pool output, connect it + // otherwise, record input for conflict check + for i in inputs { + self.edges.set_output_consumed(&i, &tx_short_id); + self.edges.insert_input(i.to_owned(), tx_short_id.clone()); + } + + // record tx output + for o in outputs { + if let Some(ids) = self.edges.get_deps_ref(&o).cloned() { + children.extend(ids); + } + if let Some(id) = self.edges.get_input_ref(&o).cloned() { + self.edges.insert_consumed_output(o, id.clone()); + children.insert(id); + } else { + self.edges.insert_unconsumed_output(o); + } + } + // update children + if !children.is_empty() { + self.update_descendants_from_detached(&tx_short_id, children); + } + } + + // update_descendants_from_detached is used to update + // the descendants for a single transaction that has been added to the + // pool but may have child transactions in the pool, eg during a + // chain reorg. + fn update_descendants_from_detached( + &mut self, + id: &ProposalShortId, + children: HashSet, + ) { + if let Some(entry) = self.get_by_id(id).cloned() { + for child in &children { + self.links.add_parent(child, id.clone()); + } + if let Some(links) = self.links.inner.get_mut(id) { + links.children.extend(children); + } + + self.update_descendants_index_key(&entry.inner, EntryOp::Add); + } + } + + /// Record the links for entry + fn record_entry_links(&mut self, entry: &mut TxEntry, status: &Status) -> Result { + // find in pool parents + let mut parents: HashSet = HashSet::with_capacity( + entry.transaction().inputs().len() + entry.transaction().cell_deps().len(), + ); + let short_id = entry.proposal_short_id(); + + for input in entry.transaction().inputs() { + let input_pt = input.previous_output(); + if let Some(deps) = self.edges.deps.get(&input_pt) { + parents.extend(deps.iter().cloned()); + } + + let parent_hash = &input_pt.tx_hash(); + let id = ProposalShortId::from_tx_hash(parent_hash); + if self.links.inner.contains_key(&id) { + parents.insert(id); + } + } + for cell_dep in entry.transaction().cell_deps() { + let dep_pt = cell_dep.out_point(); + let id = ProposalShortId::from_tx_hash(&dep_pt.tx_hash()); + if self.links.inner.contains_key(&id) { + parents.insert(id); + } + } + + let ancestors = self + .links + .calc_relation_ids(Cow::Borrowed(&parents), Relation::Parents); + + // update parents references + for ancestor_id in &ancestors { + let ancestor = self + .entries + .get_by_id(ancestor_id) + .expect("pool consistent"); + entry.add_entry_weight(&ancestor.inner); + } + if *status == Status::Proposed && entry.ancestors_count > self.max_ancestors_count { + return Err(Reject::ExceededMaximumAncestorsCount); + } + + for cell_dep in entry.transaction().cell_deps() { + let dep_pt = cell_dep.out_point(); + // insert dep-ref map + self.edges + .deps + .entry(dep_pt) + .or_insert_with(HashSet::new) + .insert(short_id.clone()); + } + + for parent in &parents { + self.links.add_child(parent, short_id.clone()); + } + + // insert links + let links = TxLinks { + parents, + children: Default::default(), + }; + self.links.inner.insert(short_id, links); + + Ok(true) + } + + fn remove_entry_edges(&mut self, entry: &TxEntry) { + let inputs = entry.transaction().input_pts_iter(); + let outputs = entry.transaction().output_pts(); + + for o in outputs { + self.edges.remove_output(&o); + } + + for i in inputs { + // release input record + self.edges.remove_input(&i); + self.edges.set_output_unconsumed(&i); + } + } + + fn remove_entry_deps(&mut self, entry: &TxEntry) { + let id = entry.proposal_short_id(); + for d in entry.related_dep_out_points().cloned() { + self.edges.delete_txid_by_dep(d, &id); + } + + self.edges.header_deps.remove(&id); + } + + pub(crate) fn add_entry(&mut self, mut entry: TxEntry, status: Status) -> Result { + let tx_short_id = entry.proposal_short_id(); + if self.entries.get_by_id(&tx_short_id).is_some() { + return Ok(false); + } + trace!("add_{:?} {}", status, entry.transaction().hash()); + self.record_entry_links(&mut entry, &status)?; + self.insert_entry(&entry, status)?; + self.record_entry_deps(&entry); + self.record_entry_edges(&entry); + Ok(true) + } + + fn insert_entry(&mut self, entry: &TxEntry, status: Status) -> Result { + let tx_short_id = entry.proposal_short_id(); + let score = entry.as_score_key(); + let evict_key = entry.as_evict_key(); + self.entries.insert(PoolEntry { + id: tx_short_id, + score, + status, + inner: entry.clone(), + evict_key, + }); + Ok(true) } } -impl CellProvider for MultiIndexPoolEntryMap { +impl CellProvider for PoolMap { fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { - let tx_hash = out_point.tx_hash(); - if let Some(entry) = self.get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) { - match entry - .inner - .transaction() - .output_with_data(out_point.index().unpack()) - { - Some((output, data)) => { - let cell_meta = CellMetaBuilder::from_cell_output(output, data) - .out_point(out_point.to_owned()) - .build(); - CellStatus::live_cell(cell_meta) - } - None => CellStatus::Unknown, + if let Some(id) = self.edges.get_input_ref(out_point) { + if self.has_proposed(id) { + return CellStatus::Dead; + } + } + match self.edges.get_output_ref(out_point) { + Some(OutPointStatus::UnConsumed) => { + let (output, data) = self.get_output_with_data(out_point).expect("output"); + let cell_meta = CellMetaBuilder::from_cell_output(output, data) + .out_point(out_point.to_owned()) + .build(); + CellStatus::live_cell(cell_meta) } - } else { - CellStatus::Unknown + Some(OutPointStatus::Consumed(id)) if self.has_proposed(id) => CellStatus::Dead, + _ => CellStatus::Unknown, } } } -impl CellChecker for MultiIndexPoolEntryMap { +impl CellChecker for PoolMap { fn is_live(&self, out_point: &OutPoint) -> Option { - let tx_hash = out_point.tx_hash(); - if let Some(entry) = self.get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) { - entry - .inner - .transaction() - .output(out_point.index().unpack()) - .map(|_| true) - } else { - None + if let Some(id) = self.edges.get_input_ref(out_point) { + if self.has_proposed(id) { + return Some(false); + } + } + match self.edges.get_output_ref(out_point) { + Some(OutPointStatus::Consumed(id)) if self.has_proposed(id) => Some(false), + Some(OutPointStatus::UnConsumed) => Some(true), + _ => None, } } } diff --git a/tx-pool/src/component/proposed.rs b/tx-pool/src/component/proposed.rs deleted file mode 100644 index 2ac9114aa8..0000000000 --- a/tx-pool/src/component/proposed.rs +++ /dev/null @@ -1,403 +0,0 @@ -use crate::component::container::SortedTxMap; -use crate::component::entry::TxEntry; -use crate::error::Reject; -use ckb_types::{ - bytes::Bytes, - core::{ - cell::{CellChecker, CellMetaBuilder, CellProvider, CellStatus}, - error::OutPointError, - TransactionView, - }, - packed::{Byte32, CellOutput, OutPoint, ProposalShortId}, - prelude::*, -}; -use std::collections::{hash_map::Entry, HashMap, HashSet}; -use std::iter; - -type ConflictEntry = (TxEntry, Reject); - -#[derive(Default, Debug, Clone)] -pub(crate) struct Edges { - /// output-op map represent in-pool tx's outputs - pub(crate) outputs: HashMap>, - /// input-txid map represent in-pool tx's inputs - pub(crate) inputs: HashMap, - /// dep-set map represent in-pool tx's deps - pub(crate) deps: HashMap>, - /// dep-set map represent in-pool tx's header deps - pub(crate) header_deps: HashMap>, -} - -impl Edges { - #[cfg(test)] - pub(crate) fn outputs_len(&self) -> usize { - self.outputs.len() - } - - #[cfg(test)] - pub(crate) fn inputs_len(&self) -> usize { - self.inputs.len() - } - - pub(crate) fn insert_input(&mut self, out_point: OutPoint, txid: ProposalShortId) { - self.inputs.insert(out_point, txid); - } - - pub(crate) fn remove_input(&mut self, out_point: &OutPoint) -> Option { - self.inputs.remove(out_point) - } - - pub(crate) fn remove_output(&mut self, out_point: &OutPoint) -> Option { - self.outputs.remove(out_point).unwrap_or(None) - } - - pub(crate) fn insert_output(&mut self, out_point: OutPoint) { - self.outputs.insert(out_point, None); - } - - pub(crate) fn insert_consumed_output(&mut self, out_point: OutPoint, id: ProposalShortId) { - self.outputs.insert(out_point, Some(id)); - } - - pub(crate) fn get_output_ref(&self, out_point: &OutPoint) -> Option<&Option> { - self.outputs.get(out_point) - } - - pub(crate) fn get_input_ref(&self, out_point: &OutPoint) -> Option<&ProposalShortId> { - self.inputs.get(out_point) - } - - pub(crate) fn get_deps_ref(&self, out_point: &OutPoint) -> Option<&HashSet> { - self.deps.get(out_point) - } - - pub(crate) fn get_mut_output( - &mut self, - out_point: &OutPoint, - ) -> Option<&mut Option> { - self.outputs.get_mut(out_point) - } - - pub(crate) fn remove_deps(&mut self, out_point: &OutPoint) -> Option> { - self.deps.remove(out_point) - } - - pub(crate) fn insert_deps(&mut self, out_point: OutPoint, txid: ProposalShortId) { - self.deps.entry(out_point).or_default().insert(txid); - } - - pub(crate) fn delete_txid_by_dep(&mut self, out_point: OutPoint, txid: &ProposalShortId) { - if let Entry::Occupied(mut occupied) = self.deps.entry(out_point) { - let empty = { - let ids = occupied.get_mut(); - ids.remove(txid); - ids.is_empty() - }; - if empty { - occupied.remove(); - } - } - } - - pub(crate) fn clear(&mut self) { - self.outputs.clear(); - self.inputs.clear(); - self.deps.clear(); - self.header_deps.clear(); - } -} - -#[derive(Clone)] -pub struct ProposedPool { - pub(crate) edges: Edges, - inner: SortedTxMap, -} - -impl CellProvider for ProposedPool { - fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { - if self.edges.get_input_ref(out_point).is_some() { - return CellStatus::Dead; - } - if let Some(x) = self.edges.get_output_ref(out_point) { - // output consumed - if x.is_some() { - return CellStatus::Dead; - } else { - let (output, data) = self.get_output_with_data(out_point).expect("output"); - let cell_meta = CellMetaBuilder::from_cell_output(output, data) - .out_point(out_point.to_owned()) - .build(); - return CellStatus::live_cell(cell_meta); - } - } - CellStatus::Unknown - } -} - -impl CellChecker for ProposedPool { - fn is_live(&self, out_point: &OutPoint) -> Option { - if self.edges.get_input_ref(out_point).is_some() { - return Some(false); - } - if let Some(x) = self.edges.get_output_ref(out_point) { - // output consumed - if x.is_some() { - return Some(false); - } else { - return Some(true); - } - } - None - } -} - -impl ProposedPool { - pub(crate) fn new(max_ancestors_count: usize) -> Self { - ProposedPool { - edges: Default::default(), - inner: SortedTxMap::new(max_ancestors_count), - } - } - - pub(crate) fn contains_key(&self, id: &ProposalShortId) -> bool { - self.inner.contains_key(id) - } - - pub fn get(&self, id: &ProposalShortId) -> Option<&TxEntry> { - self.inner.get(id) - } - - pub fn iter(&self) -> impl Iterator { - self.inner.iter() - } - - pub(crate) fn get_tx(&self, id: &ProposalShortId) -> Option<&TransactionView> { - self.get(id).map(|entry| entry.transaction()) - } - - pub fn size(&self) -> usize { - self.inner.size() - } - - pub(crate) fn get_output_with_data(&self, out_point: &OutPoint) -> Option<(CellOutput, Bytes)> { - self.inner - .get(&ProposalShortId::from_tx_hash(&out_point.tx_hash())) - .and_then(|entry| { - entry - .transaction() - .output_with_data(out_point.index().unpack()) - }) - } - - // remove entry and all it's descendants - pub(crate) fn remove_entry_and_descendants(&mut self, id: &ProposalShortId) -> Vec { - let removed_entries = self.inner.remove_entry_and_descendants(id); - for entry in &removed_entries { - let tx = entry.transaction(); - let inputs = tx.input_pts_iter(); - let outputs = tx.output_pts(); - for i in inputs { - self.edges.inputs.remove(&i); - if let Some(id) = self.edges.get_mut_output(&i) { - *id = None; - } - } - - for d in entry.related_dep_out_points().cloned() { - self.edges.delete_txid_by_dep(d, id); - } - - for o in outputs { - self.edges.remove_output(&o); - } - - self.edges.header_deps.remove(&entry.proposal_short_id()); - } - removed_entries - } - - pub(crate) fn remove_committed_tx(&mut self, tx: &TransactionView) -> Option { - let outputs = tx.output_pts(); - let inputs = tx.input_pts_iter(); - let id = tx.proposal_short_id(); - - if let Some(entry) = self.inner.remove_entry(&id) { - for o in outputs { - self.edges.remove_output(&o); - } - - for i in inputs { - // release input record - self.edges.remove_input(&i); - if let Some(id) = self.edges.get_mut_output(&i) { - *id = None; - } - } - - for d in entry.related_dep_out_points().cloned() { - self.edges.delete_txid_by_dep(d, &id); - } - - self.edges.header_deps.remove(&id); - - return Some(entry); - } - None - } - - // In the event of a reorg, the assumption that a newly added tx has no - // in-pool children is false. In particular, the pool is in an - // inconsistent state while new transactions are being added, because there may - // be descendant transactions of a tx coming from a disconnected block that are - // unreachable from just looking at transactions in the pool (the linking - // transactions may also be in the disconnected block, waiting to be added). - // Because of this, there's not much benefit in trying to search for in-pool - // children in add_entry(). Instead, in the special case of transactions - // being added from a disconnected block, out-of-block descendants for all the - // in-block transactions by calling update_descendants_from_detached(). Note that - // until this is called, the pool state is not consistent, and in particular - // TxLinks may not be correct (and therefore functions like - // calc_ancestors() and calc_descendants() that rely - // on them to walk the pool are not generally safe to use). - pub(crate) fn add_entry(&mut self, entry: TxEntry) -> Result { - let tx_short_id = entry.proposal_short_id(); - - if self.inner.contains_key(&tx_short_id) { - return Ok(false); - } - - let inputs = entry.transaction().input_pts_iter(); - let outputs = entry.transaction().output_pts(); - let related_dep_out_points: Vec<_> = entry.related_dep_out_points().cloned().collect(); - let header_deps = entry.transaction().header_deps(); - - self.inner.add_entry(entry).map(|inserted| { - if inserted { - let mut children = HashSet::new(); - // if input reference a in-pool output, connect it - // otherwise, record input for conflict check - for i in inputs { - if let Some(id) = self.edges.get_mut_output(&i) { - *id = Some(tx_short_id.clone()); - } - self.edges.insert_input(i.to_owned(), tx_short_id.clone()); - } - - // record dep-txid - for d in related_dep_out_points { - self.edges.insert_deps(d.to_owned(), tx_short_id.clone()); - } - - // record tx output - for o in outputs { - if let Some(ids) = self.edges.get_deps_ref(&o).cloned() { - children.extend(ids); - } - if let Some(id) = self.edges.get_input_ref(&o).cloned() { - self.edges.insert_consumed_output(o, id.clone()); - children.insert(id); - } else { - self.edges.insert_output(o); - } - } - - // record header_deps - if !header_deps.is_empty() { - self.edges - .header_deps - .insert(tx_short_id.clone(), header_deps.into_iter().collect()); - } - - if !children.is_empty() { - self.inner - .update_descendants_from_detached(&tx_short_id, children); - } - } - inserted - }) - } - - pub(crate) fn resolve_conflict(&mut self, tx: &TransactionView) -> Vec { - let inputs = tx.input_pts_iter(); - let mut conflicts = Vec::new(); - - for i in inputs { - if let Some(id) = self.edges.remove_input(&i) { - let entries = self.remove_entry_and_descendants(&id); - if !entries.is_empty() { - let reject = Reject::Resolve(OutPointError::Dead(i.clone())); - let rejects = iter::repeat(reject).take(entries.len()); - conflicts.extend(entries.into_iter().zip(rejects)); - } - } - - // deps consumed - if let Some(x) = self.edges.remove_deps(&i) { - for id in x { - let entries = self.remove_entry_and_descendants(&id); - if !entries.is_empty() { - let reject = Reject::Resolve(OutPointError::Dead(i.clone())); - let rejects = iter::repeat(reject).take(entries.len()); - conflicts.extend(entries.into_iter().zip(rejects)); - } - } - } - } - - conflicts - } - - pub(crate) fn resolve_conflict_header_dep( - &mut self, - headers: &HashSet, - ) -> Vec { - let mut conflicts = Vec::new(); - - // invalid header deps - let mut invalid_header_ids = Vec::new(); - for (tx_id, deps) in self.edges.header_deps.iter() { - for hash in deps { - if headers.contains(hash) { - invalid_header_ids.push((hash.clone(), tx_id.clone())); - break; - } - } - } - - for (blk_hash, id) in invalid_header_ids { - let entries = self.remove_entry_and_descendants(&id); - if !entries.is_empty() { - let reject = Reject::Resolve(OutPointError::InvalidHeader(blk_hash)); - let rejects = iter::repeat(reject).take(entries.len()); - conflicts.extend(entries.into_iter().zip(rejects)); - } - } - - conflicts - } - - /// sorted by ancestor score from higher to lower - pub fn score_sorted_iter(&self) -> impl Iterator { - self.inner.score_sorted_iter() - } - - /// find all ancestors from pool - pub fn calc_ancestors(&self, tx_short_id: &ProposalShortId) -> HashSet { - self.inner.calc_ancestors(tx_short_id) - } - - /// find all descendants from pool - pub fn calc_descendants(&self, tx_short_id: &ProposalShortId) -> HashSet { - self.inner.calc_descendants(tx_short_id) - } - - #[cfg(test)] - pub(crate) fn inner(&self) -> &SortedTxMap { - &self.inner - } - - pub(crate) fn clear(&mut self) { - self.edges.clear(); - self.inner.clear(); - } -} diff --git a/tx-pool/src/component/score_key.rs b/tx-pool/src/component/score_key.rs new file mode 100644 index 0000000000..011fb4000b --- /dev/null +++ b/tx-pool/src/component/score_key.rs @@ -0,0 +1,54 @@ +use ckb_types::{core::Capacity, packed::ProposalShortId}; +use std::cmp::Ordering; + +/// A struct to use as a sorted key +#[derive(Eq, PartialEq, Clone, Debug)] +pub struct AncestorsScoreSortKey { + pub fee: Capacity, + pub weight: u64, + pub id: ProposalShortId, + pub ancestors_fee: Capacity, + pub ancestors_weight: u64, + pub ancestors_size: usize, +} + +impl AncestorsScoreSortKey { + /// compare tx fee rate with ancestors fee rate and return the min one + pub(crate) fn min_fee_and_weight(&self) -> (Capacity, u64) { + // avoid division a_fee/a_weight > b_fee/b_weight + let tx_weight = u128::from(self.fee.as_u64()) * u128::from(self.ancestors_weight); + let ancestors_weight = u128::from(self.ancestors_fee.as_u64()) * u128::from(self.weight); + + if tx_weight < ancestors_weight { + (self.fee, self.weight) + } else { + (self.ancestors_fee, self.ancestors_weight) + } + } +} + +impl PartialOrd for AncestorsScoreSortKey { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for AncestorsScoreSortKey { + fn cmp(&self, other: &Self) -> Ordering { + // avoid division a_fee/a_weight > b_fee/b_weight + let (fee, weight) = self.min_fee_and_weight(); + let (other_fee, other_weight) = other.min_fee_and_weight(); + let self_weight = u128::from(fee.as_u64()) * u128::from(other_weight); + let other_weight = u128::from(other_fee.as_u64()) * u128::from(weight); + if self_weight == other_weight { + // if fee rate weight is same, then compare with ancestor weight + if self.ancestors_weight == other.ancestors_weight { + self.id.raw_data().cmp(&other.id.raw_data()) + } else { + self.ancestors_weight.cmp(&other.ancestors_weight) + } + } else { + self_weight.cmp(&other_weight) + } + } +} diff --git a/tx-pool/src/component/tests/mod.rs b/tx-pool/src/component/tests/mod.rs index 0f8bfcd719..d9a3529707 100644 --- a/tx-pool/src/component/tests/mod.rs +++ b/tx-pool/src/component/tests/mod.rs @@ -1,7 +1,6 @@ mod chunk; -mod container; mod pending; mod proposed; mod recent_reject; +mod score_key; mod util; -mod pool_map; \ No newline at end of file diff --git a/tx-pool/src/component/tests/pending.rs b/tx-pool/src/component/tests/pending.rs index d38ae3c1c0..b07e2e96e6 100644 --- a/tx-pool/src/component/tests/pending.rs +++ b/tx-pool/src/component/tests/pending.rs @@ -1,13 +1,19 @@ +use crate::component::edges::Edges; use crate::component::tests::util::{ build_tx, build_tx_with_dep, build_tx_with_header_dep, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE, }; -use crate::component::{entry::TxEntry, pending::PendingQueue}; +use crate::component::{ + entry::TxEntry, + pool_map::{PoolMap, Status}, +}; +use ckb_types::packed::OutPoint; use ckb_types::{h256, packed::Byte32, prelude::*}; use std::collections::HashSet; #[test] fn test_basic() { - let mut queue = PendingQueue::new(); + let mut pool = PoolMap::new(100); + assert_eq!(pool.size(), 0); let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&Byte32::zero(), 2)], 1); let tx2 = build_tx( vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 2)], @@ -15,30 +21,36 @@ fn test_basic() { ); let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(queue.add_entry(entry1.clone())); - assert!(queue.add_entry(entry2)); - assert!(queue.size() == 2); - assert!(queue.contains_key(&tx1.proposal_short_id())); - assert!(queue.contains_key(&tx2.proposal_short_id())); - - assert_eq!(queue.inputs_len(), 4); - assert_eq!(queue.outputs_len(), 4); - - assert_eq!(queue.get(&tx1.proposal_short_id()).unwrap(), &entry1); - assert_eq!(queue.get_tx(&tx2.proposal_short_id()).unwrap(), &tx2); - - let txs = queue.drain(); - assert!(queue.inner.is_empty()); - assert!(queue.deps.is_empty()); - assert!(queue.inputs.is_empty()); - assert!(queue.header_deps.is_empty()); - assert!(queue.outputs.is_empty()); - assert_eq!(txs, vec![tx1, tx2]); + assert!(pool.add_entry(entry1.clone(), Status::Pending).unwrap()); + assert!(pool.add_entry(entry2, Status::Pending).unwrap()); + assert!(pool.size() == 2); + assert!(pool.contains_key(&tx1.proposal_short_id())); + assert!(pool.contains_key(&tx2.proposal_short_id())); + + assert_eq!(pool.inputs_len(), 4); + assert_eq!(pool.outputs_len(), 4); + + assert_eq!( + pool.entries + .get_by_id(&tx1.proposal_short_id()) + .unwrap() + .inner, + entry1 + ); + assert_eq!(pool.get_tx(&tx2.proposal_short_id()).unwrap(), &tx2); + assert_eq!(pool.edges.deps.len(), 0); + + pool.clear(); + assert!(pool.entries.is_empty()); + assert!(pool.edges.deps.is_empty()); + assert!(pool.edges.inputs.is_empty()); + assert!(pool.edges.header_deps.is_empty()); + assert!(pool.edges.outputs.is_empty()); } #[test] fn test_resolve_conflict() { - let mut queue = PendingQueue::new(); + let mut pool = PoolMap::new(100); let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); let tx2 = build_tx( vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], @@ -58,17 +70,17 @@ fn test_resolve_conflict() { let entry1 = TxEntry::dummy_resolve(tx1, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); let entry2 = TxEntry::dummy_resolve(tx2, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); let entry3 = TxEntry::dummy_resolve(tx3, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(queue.add_entry(entry1.clone())); - assert!(queue.add_entry(entry2.clone())); - assert!(queue.add_entry(entry3.clone())); + assert!(pool.add_entry(entry1.clone(), Status::Pending).unwrap()); + assert!(pool.add_entry(entry2.clone(), Status::Pending).unwrap()); + assert!(pool.add_entry(entry3.clone(), Status::Pending).unwrap()); - let conflicts = queue.resolve_conflict(&tx4); + let conflicts = pool.resolve_conflict(&tx4); assert_eq!( conflicts.into_iter().map(|i| i.0).collect::>(), HashSet::from_iter(vec![entry1, entry2]) ); - let conflicts = queue.resolve_conflict(&tx5); + let conflicts = pool.resolve_conflict(&tx5); assert_eq!( conflicts.into_iter().map(|i| i.0).collect::>(), HashSet::from_iter(vec![entry3]) @@ -77,7 +89,7 @@ fn test_resolve_conflict() { #[test] fn test_resolve_conflict_descendants() { - let mut queue = PendingQueue::new(); + let mut pool = PoolMap::new(1000); let tx1 = build_tx(vec![(&Byte32::zero(), 1)], 1); let tx3 = build_tx(vec![(&tx1.hash(), 0)], 2); let tx4 = build_tx(vec![(&tx3.hash(), 0)], 1); @@ -87,11 +99,11 @@ fn test_resolve_conflict_descendants() { let entry1 = TxEntry::dummy_resolve(tx1, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); let entry3 = TxEntry::dummy_resolve(tx3, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); let entry4 = TxEntry::dummy_resolve(tx4, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(queue.add_entry(entry1)); - assert!(queue.add_entry(entry3.clone())); - assert!(queue.add_entry(entry4.clone())); + assert!(pool.add_entry(entry1, Status::Pending).unwrap()); + assert!(pool.add_entry(entry3.clone(), Status::Pending).unwrap()); + assert!(pool.add_entry(entry4.clone(), Status::Pending).unwrap()); - let conflicts = queue.resolve_conflict(&tx2); + let conflicts = pool.resolve_conflict(&tx2); assert_eq!( conflicts.into_iter().map(|i| i.0).collect::>(), HashSet::from_iter(vec![entry3, entry4]) @@ -100,7 +112,7 @@ fn test_resolve_conflict_descendants() { #[test] fn test_resolve_conflict_header_dep() { - let mut queue = PendingQueue::new(); + let mut pool = PoolMap::new(1000); let header: Byte32 = h256!("0x1").pack(); let tx = build_tx_with_header_dep( @@ -112,17 +124,17 @@ fn test_resolve_conflict_header_dep() { let entry = TxEntry::dummy_resolve(tx, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); let entry1 = TxEntry::dummy_resolve(tx1, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(queue.add_entry(entry.clone())); - assert!(queue.add_entry(entry1.clone())); + assert!(pool.add_entry(entry.clone(), Status::Pending).unwrap()); + assert!(pool.add_entry(entry1.clone(), Status::Pending).unwrap()); - assert_eq!(queue.inputs_len(), 3); - assert_eq!(queue.header_deps_len(), 1); - assert_eq!(queue.outputs_len(), 2); + assert_eq!(pool.inputs_len(), 3); + assert_eq!(pool.header_deps_len(), 1); + assert_eq!(pool.outputs_len(), 2); let mut headers = HashSet::new(); headers.insert(header); - let conflicts = queue.resolve_conflict_header_dep(&headers); + let conflicts = pool.resolve_conflict_header_dep(&headers); assert_eq!( conflicts.into_iter().map(|i| i.0).collect::>(), HashSet::from_iter(vec![entry, entry1]) @@ -131,29 +143,29 @@ fn test_resolve_conflict_header_dep() { #[test] fn test_remove_entry() { - let mut queue = PendingQueue::new(); + let mut pool = PoolMap::new(1000); let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); let header: Byte32 = h256!("0x1").pack(); let tx2 = build_tx_with_header_dep(vec![(&h256!("0x2").pack(), 1)], vec![header], 1); let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(queue.add_entry(entry1.clone())); - assert!(queue.add_entry(entry2.clone())); + assert!(pool.add_entry(entry1.clone(), Status::Pending).unwrap()); + assert!(pool.add_entry(entry2.clone(), Status::Pending).unwrap()); - let removed = queue.remove_entry(&tx1.proposal_short_id()); + let removed = pool.remove_entry(&tx1.proposal_short_id()); assert_eq!(removed, Some(entry1)); - let removed = queue.remove_entry(&tx2.proposal_short_id()); + let removed = pool.remove_entry(&tx2.proposal_short_id()); assert_eq!(removed, Some(entry2)); - assert!(queue.inner.is_empty()); - assert!(queue.deps.is_empty()); - assert!(queue.inputs.is_empty()); - assert!(queue.header_deps.is_empty()); + assert!(pool.entries.is_empty()); + assert!(pool.edges.deps.is_empty()); + assert!(pool.edges.inputs.is_empty()); + assert!(pool.edges.header_deps.is_empty()); } #[test] fn test_remove_entries_by_filter() { - let mut queue = PendingQueue::new(); + let mut pool = PoolMap::new(1000); let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); let tx2 = build_tx( vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], @@ -167,20 +179,22 @@ fn test_remove_entries_by_filter() { let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); let entry3 = TxEntry::dummy_resolve(tx3.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(queue.add_entry(entry1)); - assert!(queue.add_entry(entry2)); - assert!(queue.add_entry(entry3)); + assert!(pool.add_entry(entry1, Status::Pending).unwrap()); + assert!(pool.add_entry(entry2, Status::Pending).unwrap()); + assert!(pool.add_entry(entry3, Status::Pending).unwrap()); - queue.remove_entries_by_filter(|id, _tx_entry| id == &tx1.proposal_short_id()); + pool.remove_entries_by_filter(&Status::Pending, |id, _tx_entry| { + id == &tx1.proposal_short_id() + }); - assert!(!queue.contains_key(&tx1.proposal_short_id())); - assert!(queue.contains_key(&tx2.proposal_short_id())); - assert!(queue.contains_key(&tx3.proposal_short_id())); + assert!(!pool.contains_key(&tx1.proposal_short_id())); + assert!(pool.contains_key(&tx2.proposal_short_id())); + assert!(pool.contains_key(&tx3.proposal_short_id())); } #[test] fn test_fill_proposals() { - let mut queue = PendingQueue::new(); + let mut pool = PoolMap::new(1000); let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); let tx2 = build_tx( vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], @@ -194,36 +208,54 @@ fn test_fill_proposals() { let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); let entry3 = TxEntry::dummy_resolve(tx3.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(queue.add_entry(entry1)); - assert!(queue.add_entry(entry2)); - assert!(queue.add_entry(entry3)); + assert!(pool.add_entry(entry1, Status::Pending).unwrap()); + assert!(pool.add_entry(entry2, Status::Pending).unwrap()); + assert!(pool.add_entry(entry3, Status::Pending).unwrap()); - assert_eq!(queue.inputs_len(), 5); - assert_eq!(queue.deps_len(), 1); - assert_eq!(queue.outputs_len(), 7); + assert_eq!(pool.inputs_len(), 5); + assert_eq!(pool.deps_len(), 1); + assert_eq!(pool.outputs_len(), 7); let id1 = tx1.proposal_short_id(); let id2 = tx2.proposal_short_id(); let id3 = tx3.proposal_short_id(); let mut ret = HashSet::new(); - queue.fill_proposals(10, &HashSet::new(), &mut ret); + pool.fill_proposals(10, &HashSet::new(), &mut ret, &Status::Pending); assert_eq!( ret, HashSet::from_iter(vec![id1.clone(), id2.clone(), id3.clone()]) ); let mut ret = HashSet::new(); - queue.fill_proposals(1, &HashSet::new(), &mut ret); + pool.fill_proposals(1, &HashSet::new(), &mut ret, &Status::Pending); assert_eq!(ret, HashSet::from_iter(vec![id1.clone()])); let mut ret = HashSet::new(); - queue.fill_proposals(2, &HashSet::new(), &mut ret); + pool.fill_proposals(2, &HashSet::new(), &mut ret, &Status::Pending); assert_eq!(ret, HashSet::from_iter(vec![id1.clone(), id2.clone()])); let mut ret = HashSet::new(); let mut exclusion = HashSet::new(); exclusion.insert(id2); - queue.fill_proposals(2, &exclusion, &mut ret); + pool.fill_proposals(2, &exclusion, &mut ret, &Status::Pending); assert_eq!(ret, HashSet::from_iter(vec![id1, id3])); } + +#[test] +fn test_edges() { + let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); + let tx2 = build_tx(vec![(&h256!("0x1").pack(), 1)], 1); + + let short_id1 = tx1.proposal_short_id(); + let short_id2 = tx2.proposal_short_id(); + let mut edges = Edges::default(); + let outpoint = OutPoint::default(); + edges.insert_deps(outpoint.clone(), short_id1.clone()); + edges.insert_deps(outpoint.clone(), short_id2.clone()); + assert!(edges.deps.contains_key(&outpoint)); + edges.delete_txid_by_dep(outpoint.clone(), &short_id1); + assert!(!edges.deps.is_empty()); + edges.delete_txid_by_dep(outpoint, &short_id2); + assert!(edges.deps.is_empty()); +} diff --git a/tx-pool/src/component/tests/pool_map.rs b/tx-pool/src/component/tests/pool_map.rs deleted file mode 100644 index 7fcbd9e1a6..0000000000 --- a/tx-pool/src/component/tests/pool_map.rs +++ /dev/null @@ -1,236 +0,0 @@ -use crate::component::tests::util::{ - build_tx, build_tx_with_dep, build_tx_with_header_dep, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE, -}; -use crate::component::{ - entry::TxEntry, - pool_map::{PoolEntry, PoolMap, Status}, -}; -use ckb_types::{h256, packed::Byte32, prelude::*}; -use std::collections::HashSet; - -#[test] -fn test_basic() { - let mut pool = PoolMap::new(100); - assert_eq!(pool.size(), 0); - let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&Byte32::zero(), 2)], 1); - let tx2 = build_tx( - vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 2)], - 3, - ); - let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(pool.add_entry(entry1.clone(), Status::Pending)); - assert!(pool.add_entry(entry2, Status::Pending)); - assert!(pool.size() == 2); - assert!(pool.contains_key(&tx1.proposal_short_id())); - assert!(pool.contains_key(&tx2.proposal_short_id())); - - assert_eq!(pool.inputs_len(), 4); - assert_eq!(pool.outputs_len(), 4); - - assert_eq!(pool.entries.get_by_id(&tx1.proposal_short_id()).unwrap().inner, entry1); - assert_eq!(pool.get_tx(&tx2.proposal_short_id()).unwrap(), &tx2); - - let txs = pool.drain(); - assert!(pool.entries.is_empty()); - assert!(pool.deps.is_empty()); - assert!(pool.inputs.is_empty()); - assert!(pool.header_deps.is_empty()); - assert!(pool.outputs.is_empty()); - assert_eq!(txs, vec![tx1, tx2]); -} - -#[test] -fn test_resolve_conflict() { - let mut pool = PoolMap::new(100); - let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); - let tx2 = build_tx( - vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], - 3, - ); - let tx3 = build_tx_with_dep( - vec![(&h256!("0x4").pack(), 1)], - vec![(&h256!("0x5").pack(), 1)], - 3, - ); - let tx4 = build_tx( - vec![(&h256!("0x2").pack(), 1), (&h256!("0x1").pack(), 1)], - 3, - ); - let tx5 = build_tx(vec![(&h256!("0x5").pack(), 1)], 3); - - let entry1 = TxEntry::dummy_resolve(tx1, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - let entry2 = TxEntry::dummy_resolve(tx2, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - let entry3 = TxEntry::dummy_resolve(tx3, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(pool.add_entry(entry1.clone(), Status::Pending)); - assert!(pool.add_entry(entry2.clone(), Status::Pending)); - assert!(pool.add_entry(entry3.clone(), Status::Pending)); - - let conflicts = pool.resolve_conflict(&tx4); - assert_eq!( - conflicts.into_iter().map(|i| i.0).collect::>(), - HashSet::from_iter(vec![entry1, entry2]) - ); - - let conflicts = pool.resolve_conflict(&tx5); - assert_eq!( - conflicts.into_iter().map(|i| i.0).collect::>(), - HashSet::from_iter(vec![entry3]) - ); -} - -#[test] -fn test_resolve_conflict_descendants() { - let mut pool = PoolMap::new(1000); - let tx1 = build_tx(vec![(&Byte32::zero(), 1)], 1); - let tx3 = build_tx(vec![(&tx1.hash(), 0)], 2); - let tx4 = build_tx(vec![(&tx3.hash(), 0)], 1); - - let tx2 = build_tx(vec![(&tx1.hash(), 0)], 1); - - let entry1 = TxEntry::dummy_resolve(tx1, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - let entry3 = TxEntry::dummy_resolve(tx3, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - let entry4 = TxEntry::dummy_resolve(tx4, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(pool.add_entry(entry1, Status::Pending)); - assert!(pool.add_entry(entry3.clone(), Status::Pending)); - assert!(pool.add_entry(entry4.clone(), Status::Pending)); - - let conflicts = pool.resolve_conflict(&tx2); - assert_eq!( - conflicts.into_iter().map(|i| i.0).collect::>(), - HashSet::from_iter(vec![entry3, entry4]) - ); -} - -#[test] -fn test_resolve_conflict_header_dep() { - let mut pool = PoolMap::new(1000); - - let header: Byte32 = h256!("0x1").pack(); - let tx = build_tx_with_header_dep( - vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], - vec![header.clone()], - 1, - ); - let tx1 = build_tx(vec![(&tx.hash(), 0)], 1); - - let entry = TxEntry::dummy_resolve(tx, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - let entry1 = TxEntry::dummy_resolve(tx1, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(pool.add_entry(entry.clone(), Status::Pending)); - assert!(pool.add_entry(entry1.clone(), Status::Pending)); - - assert_eq!(pool.inputs_len(), 3); - assert_eq!(pool.header_deps_len(), 1); - assert_eq!(pool.outputs_len(), 2); - - let mut headers = HashSet::new(); - headers.insert(header); - - let conflicts = pool.resolve_conflict_header_dep(&headers); - assert_eq!( - conflicts.into_iter().map(|i| i.0).collect::>(), - HashSet::from_iter(vec![entry, entry1]) - ); -} - - -#[test] -fn test_remove_entry() { - let mut pool = PoolMap::new(1000); - let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); - let header: Byte32 = h256!("0x1").pack(); - let tx2 = build_tx_with_header_dep(vec![(&h256!("0x2").pack(), 1)], vec![header], 1); - - let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(pool.add_entry(entry1.clone(), Status::Pending)); - assert!(pool.add_entry(entry2.clone(), Status::Pending)); - - let removed = pool.remove_entry(&tx1.proposal_short_id()); - assert_eq!(removed, Some(entry1)); - let removed = pool.remove_entry(&tx2.proposal_short_id()); - assert_eq!(removed, Some(entry2)); - assert!(pool.entries.is_empty()); - assert!(pool.deps.is_empty()); - assert!(pool.inputs.is_empty()); - assert!(pool.header_deps.is_empty()); -} - - -#[test] -fn test_remove_entries_by_filter() { - let mut pool = PoolMap::new(1000); - let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); - let tx2 = build_tx( - vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], - 3, - ); - let tx3 = build_tx_with_dep( - vec![(&h256!("0x4").pack(), 1)], - vec![(&h256!("0x5").pack(), 1)], - 3, - ); - let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - let entry3 = TxEntry::dummy_resolve(tx3.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(pool.add_entry(entry1, Status::Pending)); - assert!(pool.add_entry(entry2, Status::Pending)); - assert!(pool.add_entry(entry3, Status::Pending)); - - pool.remove_entries_by_filter(|id, _tx_entry| id == &tx1.proposal_short_id()); - - assert!(!pool.contains_key(&tx1.proposal_short_id())); - assert!(pool.contains_key(&tx2.proposal_short_id())); - assert!(pool.contains_key(&tx3.proposal_short_id())); -} - - -#[test] -fn test_fill_proposals() { - let mut pool = PoolMap::new(1000); - let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); - let tx2 = build_tx( - vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], - 3, - ); - let tx3 = build_tx_with_dep( - vec![(&h256!("0x4").pack(), 1)], - vec![(&h256!("0x5").pack(), 1)], - 3, - ); - let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - let entry3 = TxEntry::dummy_resolve(tx3.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(pool.add_entry(entry1, Status::Pending)); - assert!(pool.add_entry(entry2, Status::Pending)); - assert!(pool.add_entry(entry3, Status::Pending)); - - assert_eq!(pool.inputs_len(), 5); - assert_eq!(pool.deps_len(), 1); - assert_eq!(pool.outputs_len(), 7); - - let id1 = tx1.proposal_short_id(); - let id2 = tx2.proposal_short_id(); - let id3 = tx3.proposal_short_id(); - - let mut ret = HashSet::new(); - pool.fill_proposals(10, &HashSet::new(), &mut ret, &Status::Pending); - assert_eq!( - ret, - HashSet::from_iter(vec![id1.clone(), id2.clone(), id3.clone()]) - ); - - let mut ret = HashSet::new(); - pool.fill_proposals(1, &HashSet::new(), &mut ret, &Status::Pending); - assert_eq!(ret, HashSet::from_iter(vec![id1.clone()])); - - let mut ret = HashSet::new(); - pool.fill_proposals(2, &HashSet::new(), &mut ret, &Status::Pending); - assert_eq!(ret, HashSet::from_iter(vec![id1.clone(), id2.clone()])); - - let mut ret = HashSet::new(); - let mut exclusion = HashSet::new(); - exclusion.insert(id2); - pool.fill_proposals(2, &exclusion, &mut ret, &Status::Pending); - assert_eq!(ret, HashSet::from_iter(vec![id1, id3])); -} diff --git a/tx-pool/src/component/tests/proposed.rs b/tx-pool/src/component/tests/proposed.rs index 7c784b36b4..ff5fa67866 100644 --- a/tx-pool/src/component/tests/proposed.rs +++ b/tx-pool/src/component/tests/proposed.rs @@ -2,7 +2,7 @@ use crate::component::tests::util::{ build_tx, build_tx_with_dep, build_tx_with_header_dep, DEFAULT_MAX_ANCESTORS_COUNT, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE, }; -use crate::component::{entry::TxEntry, proposed::ProposedPool}; +use crate::component::{entry::TxEntry, pool_map::PoolMap}; use ckb_types::{ bytes::Bytes, core::{ @@ -49,16 +49,16 @@ fn test_add_entry() { let tx1_hash = tx1.hash(); let tx2 = build_tx(vec![(&tx1_hash, 0)], 1); - let mut pool = ProposedPool::new(DEFAULT_MAX_ANCESTORS_COUNT); + let mut pool = PoolMap::new(DEFAULT_MAX_ANCESTORS_COUNT); - pool.add_entry(TxEntry::new( + pool.add_proposed(TxEntry::new( dummy_resolve(tx1.clone(), |_| None), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE, )) .unwrap(); - pool.add_entry(TxEntry::new( + pool.add_proposed(TxEntry::new( dummy_resolve(tx2, |_| None), MOCK_CYCLES, MOCK_FEE, @@ -91,21 +91,20 @@ fn test_add_entry_from_detached() { let id2 = entry2.proposal_short_id(); let id3 = entry3.proposal_short_id(); - let mut pool = ProposedPool::new(DEFAULT_MAX_ANCESTORS_COUNT); - pool.add_entry(entry1.clone()).unwrap(); - pool.add_entry(entry2.clone()).unwrap(); - pool.add_entry(entry3).unwrap(); + let mut pool = PoolMap::new(DEFAULT_MAX_ANCESTORS_COUNT); + pool.add_proposed(entry1.clone()).unwrap(); + pool.add_proposed(entry2.clone()).unwrap(); + pool.add_proposed(entry3).unwrap(); assert_eq!(pool.size(), 3); assert_eq!(pool.edges.outputs_len(), 3); assert_eq!(pool.edges.inputs_len(), 4); - assert_eq!(pool.inner().sorted_index.len(), 3); + assert_eq!(pool.size(), 3); let expected = vec![(id1.clone(), 1), (id2.clone(), 2), (id3.clone(), 3)]; let got = pool - .inner() - .sorted_index + .entries .iter() .map(|(_, key)| (key.id.clone(), key.score.ancestors_size)) .collect::>(); @@ -114,81 +113,78 @@ fn test_add_entry_from_detached() { // check link { - assert!(pool.inner().links.get_parents(&id1).unwrap().is_empty()); + assert!(pool.links.get_parents(&id1).unwrap().is_empty()); assert_eq!( - pool.inner().links.get_children(&id1).unwrap(), + pool.links.get_children(&id1).unwrap(), &HashSet::from_iter(vec![id2.clone()].into_iter()) ); assert_eq!( - pool.inner().links.get_parents(&id2).unwrap(), + pool.links.get_parents(&id2).unwrap(), &HashSet::from_iter(vec![id1.clone()].into_iter()) ); assert_eq!( - pool.inner() - .links + pool.links .get_children(&entry2.proposal_short_id()) .unwrap(), &HashSet::from_iter(vec![id3.clone()].into_iter()) ); assert_eq!( - pool.inner().links.get_parents(&id3).unwrap(), + pool.links.get_parents(&id3).unwrap(), &HashSet::from_iter(vec![id2.clone()].into_iter()) ); - assert!(pool.inner().links.get_children(&id3).unwrap().is_empty()); + assert!(pool.links.get_children(&id3).unwrap().is_empty()); } pool.remove_committed_tx(&tx1); assert_eq!(pool.edges.outputs_len(), 2); assert_eq!(pool.edges.inputs_len(), 2); - assert_eq!(pool.inner().sorted_index.len(), 2); + assert_eq!(pool.entries.len(), 2); let left = vec![(id2.clone(), 1), (id3.clone(), 2)]; let got = pool - .inner() - .sorted_index + .entries .iter() .map(|(_, key)| (key.id.clone(), key.score.ancestors_size)) .collect::>(); assert_eq!(left, got); assert!(pool - .inner() .links .get_parents(&entry2.proposal_short_id()) .unwrap() .is_empty()); - assert!(pool.add_entry(entry1).unwrap()); - for (idx, (_, key)) in pool.inner().sorted_index.iter().enumerate() { - assert_eq!(key.id, expected[idx].0); - assert_eq!(key.score.ancestors_size, expected[idx].1); + assert!(pool.add_proposed(entry1).unwrap()); + + for (idx, (_, entry)) in pool.entries.iter().enumerate() { + assert_eq!(entry.id, expected[idx].0); + assert_eq!(entry.score.ancestors_size, expected[idx].1); } { - assert!(pool.inner().links.get_parents(&id1).unwrap().is_empty()); + assert!(pool.links.get_parents(&id1).unwrap().is_empty()); assert_eq!( - pool.inner().links.get_children(&id1).unwrap(), + pool.links.get_children(&id1).unwrap(), &HashSet::from_iter(vec![id2.clone()].into_iter()) ); assert_eq!( - pool.inner().links.get_parents(&id2).unwrap(), + pool.links.get_parents(&id2).unwrap(), &HashSet::from_iter(vec![id1].into_iter()) ); assert_eq!( - pool.inner() - .links + pool.links .get_children(&entry2.proposal_short_id()) .unwrap(), &HashSet::from_iter(vec![id3.clone()].into_iter()) ); assert_eq!( - pool.inner().links.get_parents(&id3).unwrap(), + pool.links.get_parents(&id3).unwrap(), &HashSet::from_iter(vec![id2].into_iter()) ); - assert!(pool.inner().links.get_children(&id3).unwrap().is_empty()); + assert!(pool.links.get_children(&id3).unwrap().is_empty()); } } @@ -200,16 +196,16 @@ fn test_add_roots() { 3, ); - let mut pool = ProposedPool::new(DEFAULT_MAX_ANCESTORS_COUNT); + let mut pool = PoolMap::new(DEFAULT_MAX_ANCESTORS_COUNT); - pool.add_entry(TxEntry::new( + pool.add_proposed(TxEntry::new( dummy_resolve(tx1.clone(), |_| None), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE, )) .unwrap(); - pool.add_entry(TxEntry::new( + pool.add_proposed(TxEntry::new( dummy_resolve(tx2, |_| None), MOCK_CYCLES, MOCK_FEE, @@ -240,37 +236,37 @@ fn test_add_no_roots() { let tx3_hash = tx3.hash(); let tx5 = build_tx(vec![(&tx1_hash, 2), (&tx3_hash, 0)], 2); - let mut pool = ProposedPool::new(DEFAULT_MAX_ANCESTORS_COUNT); + let mut pool = PoolMap::new(DEFAULT_MAX_ANCESTORS_COUNT); - pool.add_entry(TxEntry::new( + pool.add_proposed(TxEntry::new( dummy_resolve(tx1.clone(), |_| None), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE, )) .unwrap(); - pool.add_entry(TxEntry::new( + pool.add_proposed(TxEntry::new( dummy_resolve(tx2, |_| None), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE, )) .unwrap(); - pool.add_entry(TxEntry::new( + pool.add_proposed(TxEntry::new( dummy_resolve(tx3, |_| None), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE, )) .unwrap(); - pool.add_entry(TxEntry::new( + pool.add_proposed(TxEntry::new( dummy_resolve(tx4, |_| None), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE, )) .unwrap(); - pool.add_entry(TxEntry::new( + pool.add_proposed(TxEntry::new( dummy_resolve(tx5, |_| None), MOCK_CYCLES, MOCK_FEE, @@ -293,26 +289,26 @@ fn test_sorted_by_tx_fee_rate() { let tx2 = build_tx(vec![(&Byte32::zero(), 2)], 1); let tx3 = build_tx(vec![(&Byte32::zero(), 3)], 1); - let mut pool = ProposedPool::new(DEFAULT_MAX_ANCESTORS_COUNT); + let mut pool = PoolMap::new(DEFAULT_MAX_ANCESTORS_COUNT); let cycles = 5_000_000; let size = 200; - pool.add_entry(TxEntry::dummy_resolve( + pool.add_proposed(TxEntry::dummy_resolve( tx1.clone(), cycles, Capacity::shannons(100), size, )) .unwrap(); - pool.add_entry(TxEntry::dummy_resolve( + pool.add_proposed(TxEntry::dummy_resolve( tx2.clone(), cycles, Capacity::shannons(300), size, )) .unwrap(); - pool.add_entry(TxEntry::dummy_resolve( + pool.add_proposed(TxEntry::dummy_resolve( tx3.clone(), cycles, Capacity::shannons(200), @@ -337,33 +333,33 @@ fn test_sorted_by_ancestors_score() { let tx3 = build_tx(vec![(&tx1_hash, 2)], 1); let tx4 = build_tx(vec![(&tx2_hash, 1)], 1); - let mut pool = ProposedPool::new(DEFAULT_MAX_ANCESTORS_COUNT); + let mut pool = PoolMap::new(DEFAULT_MAX_ANCESTORS_COUNT); let cycles = 5_000_000; let size = 200; - pool.add_entry(TxEntry::dummy_resolve( + pool.add_proposed(TxEntry::dummy_resolve( tx1.clone(), cycles, Capacity::shannons(100), size, )) .unwrap(); - pool.add_entry(TxEntry::dummy_resolve( + pool.add_proposed(TxEntry::dummy_resolve( tx2.clone(), cycles, Capacity::shannons(300), size, )) .unwrap(); - pool.add_entry(TxEntry::dummy_resolve( + pool.add_proposed(TxEntry::dummy_resolve( tx3.clone(), cycles, Capacity::shannons(200), size, )) .unwrap(); - pool.add_entry(TxEntry::dummy_resolve( + pool.add_proposed(TxEntry::dummy_resolve( tx4.clone(), cycles, Capacity::shannons(400), @@ -395,7 +391,7 @@ fn test_sorted_by_ancestors_score_competitive() { let tx2_3_hash = tx2_3.hash(); let tx2_4 = build_tx(vec![(&tx2_3_hash, 0)], 1); - let mut pool = ProposedPool::new(DEFAULT_MAX_ANCESTORS_COUNT); + let mut pool = PoolMap::new(DEFAULT_MAX_ANCESTORS_COUNT); // Choose 5_000_839, so the weight is 853.0001094046, which will not lead to carry when // calculating the weight for a package. @@ -403,7 +399,7 @@ fn test_sorted_by_ancestors_score_competitive() { let size = 200; for &tx in &[&tx1, &tx2, &tx3, &tx2_1, &tx2_2, &tx2_3, &tx2_4] { - pool.add_entry(TxEntry::dummy_resolve( + pool.add_proposed(TxEntry::dummy_resolve( tx.clone(), cycles, Capacity::shannons(200), @@ -430,33 +426,33 @@ fn test_get_ancestors() { let tx3 = build_tx(vec![(&tx1_hash, 1)], 1); let tx4 = build_tx(vec![(&tx2_hash, 0)], 1); - let mut pool = ProposedPool::new(DEFAULT_MAX_ANCESTORS_COUNT); + let mut pool = PoolMap::new(DEFAULT_MAX_ANCESTORS_COUNT); let cycles = 5_000_000; let size = 200; - pool.add_entry(TxEntry::dummy_resolve( + pool.add_proposed(TxEntry::dummy_resolve( tx1.clone(), cycles, Capacity::shannons(100), size, )) .unwrap(); - pool.add_entry(TxEntry::dummy_resolve( + pool.add_proposed(TxEntry::dummy_resolve( tx2.clone(), cycles, Capacity::shannons(300), size, )) .unwrap(); - pool.add_entry(TxEntry::dummy_resolve( + pool.add_proposed(TxEntry::dummy_resolve( tx3.clone(), cycles, Capacity::shannons(200), size, )) .unwrap(); - pool.add_entry(TxEntry::dummy_resolve( + pool.add_proposed(TxEntry::dummy_resolve( tx4.clone(), cycles, Capacity::shannons(400), @@ -561,9 +557,9 @@ fn test_dep_group() { } }; - let mut pool = ProposedPool::new(DEFAULT_MAX_ANCESTORS_COUNT); + let mut pool = PoolMap::new(DEFAULT_MAX_ANCESTORS_COUNT); for tx in &[&tx1, &tx2, &tx3] { - pool.add_entry(TxEntry::new( + pool.add_proposed(TxEntry::new( dummy_resolve((*tx).clone(), get_cell_data), MOCK_CYCLES, MOCK_FEE, @@ -572,7 +568,7 @@ fn test_dep_group() { .unwrap(); } - let get_deps_len = |pool: &ProposedPool, out_point: &OutPoint| -> usize { + let get_deps_len = |pool: &PoolMap, out_point: &OutPoint| -> usize { pool.edges .deps .get(out_point) @@ -591,7 +587,7 @@ fn test_dep_group() { #[test] fn test_resolve_conflict_header_dep() { - let mut pool = ProposedPool::new(DEFAULT_MAX_ANCESTORS_COUNT); + let mut pool = PoolMap::new(DEFAULT_MAX_ANCESTORS_COUNT); let header: Byte32 = h256!("0x1").pack(); let tx = build_tx_with_header_dep( @@ -602,7 +598,7 @@ fn test_resolve_conflict_header_dep() { let entry = TxEntry::dummy_resolve(tx, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(pool.add_entry(entry.clone()).is_ok()); + assert!(pool.add_proposed(entry.clone()).is_ok()); let mut headers = HashSet::new(); headers.insert(header); @@ -633,10 +629,10 @@ fn test_disordered_remove_committed_tx() { MOCK_SIZE, ); - let mut pool = ProposedPool::new(DEFAULT_MAX_ANCESTORS_COUNT); + let mut pool = PoolMap::new(DEFAULT_MAX_ANCESTORS_COUNT); - pool.add_entry(entry1).unwrap(); - pool.add_entry(entry2).unwrap(); + pool.add_proposed(entry1).unwrap(); + pool.add_proposed(entry2).unwrap(); assert_eq!(pool.edges.outputs_len(), 2); assert_eq!(pool.edges.inputs_len(), 2); @@ -650,7 +646,7 @@ fn test_disordered_remove_committed_tx() { #[test] fn test_max_ancestors() { - let mut pool = ProposedPool::new(1); + let mut pool = PoolMap::new(1); let tx1 = build_tx(vec![(&Byte32::zero(), 0)], 1); let tx1_id = tx1.proposal_short_id(); let tx1_hash = tx1.hash(); @@ -659,15 +655,15 @@ fn test_max_ancestors() { let entry1 = TxEntry::dummy_resolve(tx1, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); let entry2 = TxEntry::dummy_resolve(tx2, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(pool.add_entry(entry1).is_ok()); - assert!(pool.add_entry(entry2).is_err()); + assert!(pool.add_proposed(entry1).is_ok()); + assert!(pool.add_proposed(entry2).is_err()); assert_eq!( - pool.inner() + pool.links .get_children(&tx1_id) .map(|children| children.is_empty()), Some(true) ); - assert!(pool.inner().calc_descendants(&tx1_id).is_empty()); + assert!(pool.calc_descendants(&tx1_id).is_empty()); assert_eq!(pool.edges.inputs_len(), 1); assert_eq!(pool.edges.outputs_len(), 1); @@ -675,7 +671,7 @@ fn test_max_ancestors() { #[test] fn test_max_ancestors_with_dep() { - let mut pool = ProposedPool::new(1); + let mut pool = PoolMap::new(1); let tx1 = build_tx_with_dep( vec![(&Byte32::zero(), 0)], vec![(&h256!("0x1").pack(), 0)], @@ -687,14 +683,14 @@ fn test_max_ancestors_with_dep() { let entry1 = TxEntry::dummy_resolve(tx1, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); let entry2 = TxEntry::dummy_resolve(tx2, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(pool.add_entry(entry1).is_ok()); - assert!(pool.add_entry(entry2).is_err()); - assert_eq!(pool.inner().deps().len(), 1); + assert!(pool.add_proposed(entry1).is_ok()); + assert!(pool.add_proposed(entry2).is_err()); + assert_eq!(pool.edges.deps.len(), 1); assert!(pool - .inner() - .deps() + .edges + .deps .contains_key(&OutPoint::new(h256!("0x1").pack(), 0))); - assert!(pool.inner().calc_descendants(&tx1_id).is_empty()); + assert!(pool.calc_descendants(&tx1_id).is_empty()); assert_eq!(pool.edges.inputs_len(), 1); assert_eq!(pool.edges.outputs_len(), 1); diff --git a/tx-pool/src/component/tests/container.rs b/tx-pool/src/component/tests/score_key.rs similarity index 94% rename from tx-pool/src/component/tests/container.rs rename to tx-pool/src/component/tests/score_key.rs index 5c06e9601f..22da657f84 100644 --- a/tx-pool/src/component/tests/container.rs +++ b/tx-pool/src/component/tests/score_key.rs @@ -6,10 +6,7 @@ use ckb_types::{ }; use std::mem::size_of; -use crate::component::{ - container::{AncestorsScoreSortKey, SortedTxMap}, - entry::TxEntry, -}; +use crate::component::{entry::TxEntry, pool_map::PoolMap, score_key::AncestorsScoreSortKey}; const DEFAULT_MAX_ANCESTORS_COUNT: usize = 125; @@ -98,7 +95,7 @@ fn test_ancestors_sorted_key_order() { #[test] fn test_remove_entry() { - let mut map = SortedTxMap::new(DEFAULT_MAX_ANCESTORS_COUNT); + let mut map = PoolMap::new(DEFAULT_MAX_ANCESTORS_COUNT); let tx1 = TxEntry::dummy_resolve( TransactionBuilder::default().build(), 100, @@ -144,9 +141,9 @@ fn test_remove_entry() { let tx1_id = tx1.proposal_short_id(); let tx2_id = tx2.proposal_short_id(); let tx3_id = tx3.proposal_short_id(); - map.add_entry(tx1).unwrap(); - map.add_entry(tx2).unwrap(); - map.add_entry(tx3).unwrap(); + map.add_proposed(tx1).unwrap(); + map.add_proposed(tx2).unwrap(); + map.add_proposed(tx3).unwrap(); let descendants_set = map.calc_descendants(&tx1_id); assert!(descendants_set.contains(&tx2_id)); assert!(descendants_set.contains(&tx3_id)); @@ -171,7 +168,7 @@ fn test_remove_entry() { #[test] fn test_remove_entry_and_descendants() { - let mut map = SortedTxMap::new(DEFAULT_MAX_ANCESTORS_COUNT); + let mut map = PoolMap::new(DEFAULT_MAX_ANCESTORS_COUNT); let tx1 = TxEntry::dummy_resolve( TransactionBuilder::default().build(), 100, @@ -217,9 +214,9 @@ fn test_remove_entry_and_descendants() { let tx1_id = tx1.proposal_short_id(); let tx2_id = tx2.proposal_short_id(); let tx3_id = tx3.proposal_short_id(); - map.add_entry(tx1).unwrap(); - map.add_entry(tx2).unwrap(); - map.add_entry(tx3).unwrap(); + map.add_proposed(tx1).unwrap(); + map.add_proposed(tx2).unwrap(); + map.add_proposed(tx3).unwrap(); let descendants_set = map.calc_descendants(&tx1_id); assert!(descendants_set.contains(&tx2_id)); assert!(descendants_set.contains(&tx3_id)); diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index c9dcc3b7fd..c01ad22b64 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -3,23 +3,23 @@ extern crate rustc_hash; extern crate slab; use super::component::{commit_txs_scanner::CommitTxsScanner, TxEntry}; use crate::callback::Callbacks; -use crate::component::pending::PendingQueue; -use crate::component::proposed::ProposedPool; +use crate::component::pool_map::{PoolEntry, PoolMap, Status}; use crate::component::recent_reject::RecentReject; use crate::error::Reject; -use crate::component::pool_map::{PoolMap, Status}; use crate::util::verify_rtx; use ckb_app_config::TxPoolConfig; -use ckb_logger::{debug, error, trace, warn}; +use ckb_logger::{debug, error, warn}; use ckb_snapshot::Snapshot; use ckb_store::ChainStore; use ckb_types::{ core::{ cell::{resolve_transaction, OverlayCellChecker, OverlayCellProvider, ResolvedTransaction}, + cell::{CellChecker, CellMetaBuilder, CellProvider, CellStatus}, tx_pool::{TxPoolEntryInfo, TxPoolIds}, Cycle, TransactionView, UncleBlockView, }, - packed::{Byte32, ProposalShortId}, + packed::{Byte32, OutPoint, ProposalShortId}, + prelude::*, }; use ckb_verification::{cache::CacheEntry, TxVerifyEnv}; use lru::LruCache; @@ -28,44 +28,9 @@ use std::sync::Arc; const COMMITTED_HASH_CACHE_SIZE: usize = 100_000; -// limit the size of the pool by sorting out tx based on EvictKey. -macro_rules! evict_for_trim_size { - ($self:ident, $pool:expr, $callbacks:expr) => { - if let Some(id) = $pool - .iter() - .min_by_key(|(_id, entry)| entry.as_evict_key()) - .map(|(id, _)| id) - .cloned() - { - let removed = $pool.remove_entry_and_descendants(&id); - for entry in removed { - let tx_hash = entry.transaction().hash(); - debug!( - "removed by size limit {} timestamp({})", - tx_hash, entry.timestamp - ); - let reject = Reject::Full(format!( - "the fee_rate for this transaction is: {}", - entry.fee_rate() - )); - $callbacks.call_reject($self, &entry, reject); - } - } - }; -} - -type ConflictEntry = (TxEntry, Reject); - /// Tx-pool implementation pub struct TxPool { pub(crate) config: TxPoolConfig, - /// The short id that has not been proposed - pub(crate) pending: PendingQueue, - /// The proposal gap - pub(crate) gap: PendingQueue, - /// Tx pool that finely for commit - pub(crate) proposed: ProposedPool, - pub(crate) pool_map: PoolMap, /// cache for committed transactions hash pub(crate) committed_txs_hash_cache: LruCache, @@ -87,9 +52,6 @@ impl TxPool { let recent_reject = Self::build_recent_reject(&config); let expiry = config.expiry_hours as u64 * 60 * 60 * 1000; TxPool { - pending: PendingQueue::new(), - gap: PendingQueue::new(), - proposed: ProposedPool::new(config.max_ancestors_count), pool_map: PoolMap::new(config.max_ancestors_count), committed_txs_hash_cache: LruCache::new(COMMITTED_HASH_CACHE_SIZE), total_tx_size: 0, @@ -102,18 +64,22 @@ impl TxPool { } /// Tx-pool owned snapshot, it may not consistent with chain cause tx-pool update snapshot asynchronously - pub fn snapshot(&self) -> &Snapshot { + pub(crate) fn snapshot(&self) -> &Snapshot { &self.snapshot } /// Makes a clone of the `Arc` - pub fn cloned_snapshot(&self) -> Arc { + pub(crate) fn cloned_snapshot(&self) -> Arc { Arc::clone(&self.snapshot) } - /// Whether Tx-pool reach size limit - pub fn reach_size_limit(&self, tx_size: usize) -> bool { - (self.total_tx_size + tx_size) > self.config.max_tx_pool_size + fn get_by_status(&self, status: &Status) -> Vec<&PoolEntry> { + self.pool_map.entries.get_by_status(status) + } + + /// Get tx-pool size + pub fn status_size(&self, status: &Status) -> usize { + self.get_by_status(status).len() } /// Update size and cycles statics for add tx @@ -143,123 +109,38 @@ impl TxPool { self.total_tx_cycles = total_tx_cycles; } - /// Add tx to pending pool + /// Add tx with pending status /// If did have this value present, false is returned. - pub fn add_pending(&mut self, entry: TxEntry) -> bool { - if self.gap.contains_key(&entry.proposal_short_id()) { - return false; - } - trace!("add_pending {}", entry.transaction().hash()); - self.pending.add_entry(entry) - } - - pub fn add_pending_v2(&mut self, entry: TxEntry) -> bool { + pub(crate) fn add_pending(&mut self, entry: TxEntry) -> Result { self.pool_map.add_entry(entry, Status::Pending) } - /// Add tx which proposed but still uncommittable to gap pool - pub fn add_gap(&mut self, entry: TxEntry) -> bool { - if self.proposed.contains_key(&entry.proposal_short_id()) { - return false; - } - trace!("add_gap {}", entry.transaction().hash()); - self.gap.add_entry(entry) - } - - pub fn add_gap_v2(&mut self, entry: TxEntry) -> bool { + /// Add tx which proposed but still uncommittable to gap + pub(crate) fn add_gap(&mut self, entry: TxEntry) -> Result { self.pool_map.add_entry(entry, Status::Gap) } - /// Add tx to proposed pool - pub fn add_proposed(&mut self, entry: TxEntry) -> Result { - trace!("add_proposed {}", entry.transaction().hash()); - self.proposed.add_entry(entry) - } - - pub fn add_proposed_v2(&mut self, entry: TxEntry) -> bool { + /// Add tx with proposed status + pub(crate) fn add_proposed(&mut self, entry: TxEntry) -> Result { self.pool_map.add_entry(entry, Status::Proposed) } /// Returns true if the tx-pool contains a tx with specified id. - pub fn contains_proposal_id(&self, id: &ProposalShortId) -> bool { - self.pending.contains_key(id) || self.gap.contains_key(id) || self.proposed.contains_key(id) - } - - pub fn contains_proposal_id_v2(&self, id: &ProposalShortId) -> bool { + pub(crate) fn contains_proposal_id(&self, id: &ProposalShortId) -> bool { self.pool_map.get_by_id(id).is_some() } /// Returns tx with cycles corresponding to the id. - pub fn get_tx_with_cycles(&self, id: &ProposalShortId) -> Option<(TransactionView, Cycle)> { - self.pending - .get(id) - .map(|entry| (entry.transaction().clone(), entry.cycles)) - .or_else(|| { - self.gap - .get(id) - .map(|entry| (entry.transaction().clone(), entry.cycles)) - }) - .or_else(|| { - self.proposed - .get(id) - .map(|entry| (entry.transaction().clone(), entry.cycles)) - }) - } - - pub fn get_tx_with_cycles_v2(&self, id: &ProposalShortId) -> Option<(TransactionView, Cycle)> { + pub(crate) fn get_tx_with_cycles( + &self, + id: &ProposalShortId, + ) -> Option<(TransactionView, Cycle)> { self.pool_map .get_by_id(id) .map(|entry| (entry.inner.transaction().clone(), entry.inner.cycles)) } - /// Returns tx corresponding to the id. - pub fn get_tx(&self, id: &ProposalShortId) -> Option<&TransactionView> { - self.pending - .get_tx(id) - .or_else(|| self.gap.get_tx(id)) - .or_else(|| self.proposed.get_tx(id)) - } - - pub fn get_tx_v2(&self, id: &ProposalShortId) -> Option<&TransactionView> { - self.pool_map - .get_by_id(id) - .map(|entry| entry.inner.transaction()) - } - - /// Returns tx from pending and gap corresponding to the id. RPC - pub fn get_entry_from_pending_or_gap(&self, id: &ProposalShortId) -> Option<&TxEntry> { - self.pending.get(id).or_else(|| self.gap.get(id)) - } - - pub fn get_entry_from_pending_or_gap_v2(&self, id: &ProposalShortId) -> Option<&TxEntry> { - if let Some(entry) = self.pool_map.get_by_id(id) { - match entry.status { - Status::Pending | Status::Gap => return Some(&entry.inner), - _ => return None, - } - } else { - return None; - } - } - - pub(crate) fn proposed(&self) -> &ProposedPool { - &self.proposed - } - - pub(crate) fn get_tx_from_proposed_and_others( - &self, - id: &ProposalShortId, - ) -> Option<&TransactionView> { - self.proposed - .get_tx(id) - .or_else(|| self.gap.get_tx(id)) - .or_else(|| self.pending.get_tx(id)) - } - - pub(crate) fn get_tx_from_proposed_and_others_v2( - &self, - id: &ProposalShortId, - ) -> Option<&TransactionView> { + pub(crate) fn get_tx_from_pool(&self, id: &ProposalShortId) -> Option<&TransactionView> { self.pool_map .get_by_id(id) .map(|entry| entry.inner.transaction()) @@ -273,7 +154,6 @@ impl TxPool { ) { for tx in txs { let tx_hash = tx.hash(); - debug!("try remove_committed_tx {}", tx_hash); self.remove_committed_tx(tx, callbacks); self.committed_txs_hash_cache @@ -290,77 +170,14 @@ impl TxPool { detached_headers: &HashSet, callbacks: &Callbacks, ) { - for (entry, reject) in self.proposed.resolve_conflict_header_dep(detached_headers) { - callbacks.call_reject(self, &entry, reject); - } - for (entry, reject) in self.gap.resolve_conflict_header_dep(detached_headers) { - callbacks.call_reject(self, &entry, reject); - } - for (entry, reject) in self.pending.resolve_conflict_header_dep(detached_headers) { - callbacks.call_reject(self, &entry, reject); - } - } - - pub(crate) fn resolve_conflict_header_dep_v2( - &mut self, - detached_headers: &HashSet, - callbacks: &Callbacks, - ) { - for (entry, reject) in self - .pool_map - .resolve_conflict_header_dep(detached_headers) - { + for (entry, reject) in self.pool_map.resolve_conflict_header_dep(detached_headers) { callbacks.call_reject(self, &entry, reject); } } pub(crate) fn remove_committed_tx(&mut self, tx: &TransactionView, callbacks: &Callbacks) { - let hash = tx.hash(); - let short_id = tx.proposal_short_id(); - // try remove committed tx from proposed - // proposed tx should not contain conflict, if exists just skip resolve conflict - if let Some(entry) = self.proposed.remove_committed_tx(tx) { - debug!("remove_committed_tx from proposed {}", hash); - callbacks.call_committed(self, &entry) - } else { - let conflicts = self.proposed.resolve_conflict(tx); - - for (entry, reject) in conflicts { - callbacks.call_reject(self, &entry, reject); - } - } - - // pending and gap should resolve conflict no matter exists or not - if let Some(entry) = self.gap.remove_entry(&short_id) { - debug!("remove_committed_tx from gap {}", hash); - callbacks.call_committed(self, &entry) - } - { - let conflicts = self.gap.resolve_conflict(tx); - - for (entry, reject) in conflicts { - callbacks.call_reject(self, &entry, reject); - } - } - - if let Some(entry) = self.pending.remove_entry(&short_id) { - debug!("remove_committed_tx from pending {}", hash); - callbacks.call_committed(self, &entry) - } - { - let conflicts = self.pending.resolve_conflict(tx); - - for (entry, reject) in conflicts { - callbacks.call_reject(self, &entry, reject); - } - } - } - - pub(crate) fn remove_committed_tx_v2(&mut self, tx: &TransactionView, callbacks: &Callbacks) { - let hash = tx.hash(); let short_id = tx.proposal_short_id(); if let Some(entry) = self.pool_map.remove_entry(&short_id) { - debug!("remove_committed_tx from gap {}", hash); callbacks.call_committed(self, &entry) } { @@ -373,37 +190,6 @@ impl TxPool { // Expire all transaction (and their dependencies) in the pool. pub(crate) fn remove_expired(&mut self, callbacks: &Callbacks) { - let now_ms = ckb_systemtime::unix_time_as_millis(); - let expired = - |_id: &ProposalShortId, tx_entry: &TxEntry| self.expiry + tx_entry.timestamp < now_ms; - let mut removed = self.pending.remove_entries_by_filter(expired); - removed.extend(self.gap.remove_entries_by_filter(expired)); - let removed_proposed_ids: Vec<_> = self - .proposed - .iter() - .filter_map(|(id, tx_entry)| { - if self.expiry + tx_entry.timestamp < now_ms { - Some(id) - } else { - None - } - }) - .cloned() - .collect(); - for id in removed_proposed_ids { - removed.extend(self.proposed.remove_entry_and_descendants(&id)) - } - - for entry in removed { - let tx_hash = entry.transaction().hash(); - debug!("remove_expired {} timestamp({})", tx_hash, entry.timestamp); - let reject = Reject::Expiry(entry.timestamp); - callbacks.call_reject(self, &entry, reject); - } - } - - // Expire all transaction (and their dependencies) in the pool. - pub(crate) fn remove_expired_v2(&mut self, callbacks: &Callbacks) { let now_ms = ckb_systemtime::unix_time_as_millis(); let removed: Vec<_> = self .pool_map @@ -414,8 +200,6 @@ impl TxPool { for entry in removed { self.pool_map.remove_entry(&entry.proposal_short_id()); - let tx_hash = entry.transaction().hash(); - debug!("remove_expired {} timestamp({})", tx_hash, entry.timestamp); let reject = Reject::Expiry(entry.timestamp); callbacks.call_reject(self, &entry, reject); } @@ -423,18 +207,6 @@ impl TxPool { // Remove transactions from the pool until total size < size_limit. pub(crate) fn limit_size(&mut self, callbacks: &Callbacks) { - while self.total_tx_size > self.config.max_tx_pool_size { - if !self.pending.is_empty() { - evict_for_trim_size!(self, self.pending, callbacks) - } else if !self.gap.is_empty() { - evict_for_trim_size!(self, self.gap, callbacks) - } else { - evict_for_trim_size!(self, self.proposed, callbacks) - } - } - } - - pub(crate) fn limit_size_v2(&mut self, callbacks: &Callbacks) { while self.total_tx_size > self.config.max_tx_pool_size { if let Some(id) = self.pool_map.next_evict_entry() { let removed = self.pool_map.remove_entry_and_descendants(&id); @@ -459,40 +231,10 @@ impl TxPool { pub(crate) fn remove_by_detached_proposal<'a>( &mut self, ids: impl Iterator, - ) { - for id in ids { - if let Some(entry) = self.gap.remove_entry(id) { - let tx_hash = entry.transaction().hash(); - let ret = self.add_pending(entry); - debug!( - "remove_by_detached_proposal from gap {} add_pending {}", - tx_hash, ret - ); - } - let mut entries = self.proposed.remove_entry_and_descendants(id); - entries.sort_unstable_by_key(|entry| entry.ancestors_count); - for mut entry in entries { - let tx_hash = entry.transaction().hash(); - entry.reset_ancestors_state(); - let ret = self.add_pending(entry); - debug!( - "remove_by_detached_proposal from proposed {} add_pending {}", - tx_hash, ret - ); - } - } - } - - // remove transaction with detached proposal from gap and proposed - // try re-put to pending - pub(crate) fn remove_by_detached_proposal_v2<'a>( - &mut self, - ids: impl Iterator, ) { for id in ids { if let Some(e) = self.pool_map.get_by_id(id) { let status = e.status; - // TODO: double check this if status == Status::Pending { continue; } @@ -503,7 +245,7 @@ impl TxPool { entry.reset_ancestors_state(); let ret = self.add_pending(entry); debug!( - "remove_by_detached_proposal from {:?} {} add_pending {}", + "remove_by_detached_proposal from {:?} {} add_pending {:?}", status, tx_hash, ret ); } @@ -512,7 +254,7 @@ impl TxPool { } pub(crate) fn remove_tx(&mut self, id: &ProposalShortId) -> bool { - let entries = self.proposed.remove_entry_and_descendants(id); + let entries = self.pool_map.remove_entry_and_descendants(id); if !entries.is_empty() { for entry in entries { self.update_statics_for_remove_tx(entry.size, entry.cycles); @@ -520,20 +262,6 @@ impl TxPool { return true; } - if let Some(entry) = self.gap.remove_entry(id) { - self.update_statics_for_remove_tx(entry.size, entry.cycles); - return true; - } - - if let Some(entry) = self.pending.remove_entry(id) { - self.update_statics_for_remove_tx(entry.size, entry.cycles); - return true; - } - - false - } - - pub(crate) fn remove_tx_v2(&mut self, id: &ProposalShortId) -> bool { if let Some(entry) = self.pool_map.remove_entry(id) { self.update_statics_for_remove_tx(entry.size, entry.cycles); return true; @@ -541,102 +269,48 @@ impl TxPool { false } - pub(crate) fn resolve_tx_from_pending_and_proposed( - &self, - tx: TransactionView, - ) -> Result, Reject> { - let snapshot = self.snapshot(); - let proposed_provider = OverlayCellProvider::new(&self.proposed, snapshot); - let gap_and_proposed_provider = OverlayCellProvider::new(&self.gap, &proposed_provider); - let pending_and_proposed_provider = - OverlayCellProvider::new(&self.pending, &gap_and_proposed_provider); - let mut seen_inputs = HashSet::new(); - resolve_transaction( - tx, - &mut seen_inputs, - &pending_and_proposed_provider, - snapshot, - ) - .map(Arc::new) - .map_err(Reject::Resolve) - } - - pub(crate) fn resolve_tx_from_pending_and_proposed_v2( - &self, - tx: TransactionView, - ) -> Result, Reject> { - let snapshot = self.snapshot(); - let provider = OverlayCellProvider::new(&self.pool_map.entries, snapshot); - let mut seen_inputs = HashSet::new(); - resolve_transaction(tx, &mut seen_inputs, &provider, snapshot) - .map(Arc::new) - .map_err(Reject::Resolve) - } - pub(crate) fn check_rtx_from_pending_and_proposed( &self, rtx: &ResolvedTransaction, ) -> Result<(), Reject> { let snapshot = self.snapshot(); - let proposed_checker = OverlayCellChecker::new(&self.proposed, snapshot); - let gap_and_proposed_checker = OverlayCellChecker::new(&self.gap, &proposed_checker); - let pending_and_proposed_checker = - OverlayCellChecker::new(&self.pending, &gap_and_proposed_checker); + let proposal_checker = OverlayCellChecker::new(&self.pool_map, snapshot); + let checker = OverlayCellChecker::new(self, &proposal_checker); let mut seen_inputs = HashSet::new(); - rtx.check(&mut seen_inputs, &pending_and_proposed_checker, snapshot) + rtx.check(&mut seen_inputs, &checker, snapshot) .map_err(Reject::Resolve) } - pub(crate) fn check_rtx_from_pending_and_proposed_v2( - &self, - rtx: &ResolvedTransaction, - ) -> Result<(), Reject> { + pub(crate) fn check_rtx_from_proposed(&self, rtx: &ResolvedTransaction) -> Result<(), Reject> { let snapshot = self.snapshot(); - let checker = OverlayCellChecker::new(&self.pool_map.entries, snapshot); + let proposal_checker = OverlayCellChecker::new(&self.pool_map, snapshot); let mut seen_inputs = HashSet::new(); - rtx.check(&mut seen_inputs, &checker, snapshot) + rtx.check(&mut seen_inputs, &proposal_checker, snapshot) .map_err(Reject::Resolve) } - pub(crate) fn resolve_tx_from_proposed( + pub(crate) fn resolve_tx_from_pending_and_proposed( &self, tx: TransactionView, ) -> Result, Reject> { let snapshot = self.snapshot(); - let cell_provider = OverlayCellProvider::new(&self.proposed, snapshot); + let proposed_provider = OverlayCellProvider::new(&self.pool_map, snapshot); + let provider = OverlayCellProvider::new(self, &proposed_provider); let mut seen_inputs = HashSet::new(); - resolve_transaction(tx, &mut seen_inputs, &cell_provider, snapshot) + resolve_transaction(tx, &mut seen_inputs, &provider, snapshot) .map(Arc::new) .map_err(Reject::Resolve) } - pub(crate) fn resolve_tx_from_proposed_v2( - &self, - rtx: &ResolvedTransaction, - ) -> Result<(), Reject> { - let snapshot = self.snapshot(); - let checker = OverlayCellChecker::new(&self.pool_map.entries, snapshot); - let mut seen_inputs = HashSet::new(); - rtx.check(&mut seen_inputs, &checker, snapshot) - .map_err(Reject::Resolve) - } - - pub(crate) fn check_rtx_from_proposed(&self, rtx: &ResolvedTransaction) -> Result<(), Reject> { - let snapshot = self.snapshot(); - let cell_checker = OverlayCellChecker::new(&self.proposed, snapshot); - let mut seen_inputs = HashSet::new(); - rtx.check(&mut seen_inputs, &cell_checker, snapshot) - .map_err(Reject::Resolve) - } - - pub(crate) fn check_rtx_from_proposed_v2( + pub(crate) fn resolve_tx_from_proposed( &self, - rtx: &ResolvedTransaction, - ) -> Result<(), Reject> { + tx: TransactionView, + ) -> Result, Reject> { let snapshot = self.snapshot(); - let cell_checker = OverlayCellChecker::new(&self.pool_map.entries, snapshot); + let proposed_provider = OverlayCellProvider::new(&self.pool_map, snapshot); let mut seen_inputs = HashSet::new(); - rtx.check(&mut seen_inputs, &cell_checker, snapshot) + resolve_transaction(tx, &mut seen_inputs, &proposed_provider, snapshot) + .map(Arc::new) .map_err(Reject::Resolve) } @@ -663,8 +337,9 @@ impl TxPool { let entry = TxEntry::new_with_timestamp(rtx, verified.cycles, verified.fee, size, timestamp); + let tx_hash = entry.transaction().hash(); - if self.add_gap(entry) { + if self.add_gap(entry).unwrap_or(false) { Ok(CacheEntry::Completed(verified)) } else { Err(Reject::Duplicated(tx_hash)) @@ -695,6 +370,11 @@ impl TxPool { let entry = TxEntry::new_with_timestamp(rtx, verified.cycles, verified.fee, size, timestamp); let tx_hash = entry.transaction().hash(); + debug!( + "proposed_rtx: {:?} => {:?}", + tx_hash, + entry.proposal_short_id() + ); if self.add_proposed(entry)? { Ok(CacheEntry::Completed(verified)) } else { @@ -703,76 +383,45 @@ impl TxPool { } /// Get to-be-proposal transactions that may be included in the next block. - pub fn get_proposals( - &self, - limit: usize, - exclusion: &HashSet, - ) -> HashSet { - let mut proposals = HashSet::with_capacity(limit); - self.pending - .fill_proposals(limit, exclusion, &mut proposals); - self.gap.fill_proposals(limit, exclusion, &mut proposals); - proposals - } - - /// Get to-be-proposal transactions that may be included in the next block. - pub fn get_proposals_v2( + /// TODO: do we need to consider the something like score, so that we can + /// provide best transactions to be proposed. + pub(crate) fn get_proposals( &self, limit: usize, exclusion: &HashSet, ) -> HashSet { let mut proposals = HashSet::with_capacity(limit); - self.pool_map.fill_proposals(limit, exclusion, &mut proposals, &Status::Pending); - self.pool_map.fill_proposals(limit, exclusion, &mut proposals, &Status::Gap); + self.pool_map + .fill_proposals(limit, exclusion, &mut proposals, &Status::Pending); + self.pool_map + .fill_proposals(limit, exclusion, &mut proposals, &Status::Gap); proposals } /// Returns tx from tx-pool or storage corresponding to the id. - pub fn get_tx_from_pool_or_store( + pub(crate) fn get_tx_from_pool_or_store( &self, proposal_id: &ProposalShortId, ) -> Option { - self.get_tx_from_proposed_and_others(proposal_id) - .cloned() - .or_else(|| { - self.committed_txs_hash_cache - .peek(proposal_id) - .and_then(|tx_hash| self.snapshot().get_transaction(tx_hash).map(|(tx, _)| tx)) - }) + self.get_tx_from_pool(proposal_id).cloned().or_else(|| { + self.committed_txs_hash_cache + .peek(proposal_id) + .and_then(|tx_hash| self.snapshot().get_transaction(tx_hash).map(|(tx, _)| tx)) + }) } pub(crate) fn get_ids(&self) -> TxPoolIds { - let pending = self - .pending - .iter() - .map(|(_, entry)| entry.transaction().hash()) - .chain(self.gap.iter().map(|(_, entry)| entry.transaction().hash())) - .collect(); - - let proposed = self - .proposed - .iter() - .map(|(_, entry)| entry.transaction().hash()) - .collect(); - - TxPoolIds { pending, proposed } - } - - // This is for RPC request, performance is not critical - pub(crate) fn get_ids_v2(&self) -> TxPoolIds { let pending: Vec = self - .pool_map - .entries .get_by_status(&Status::Pending) .iter() - .chain(self.pool_map.entries.get_by_status(&Status::Gap).iter()) + .chain(self.get_by_status(&Status::Gap).iter()) .map(|entry| entry.inner.transaction().hash()) .collect(); let proposed: Vec = self - .proposed + .get_by_status(&Status::Proposed) .iter() - .map(|(_, entry)| entry.transaction().hash()) + .map(|entry| entry.inner.transaction().hash()) .collect(); TxPoolIds { pending, proposed } @@ -780,38 +429,13 @@ impl TxPool { pub(crate) fn get_all_entry_info(&self) -> TxPoolEntryInfo { let pending = self - .pending - .iter() - .map(|(_, entry)| (entry.transaction().hash(), entry.to_info())) - .chain( - self.gap - .iter() - .map(|(_, entry)| (entry.transaction().hash(), entry.to_info())), - ) - .collect(); - - let proposed = self - .proposed - .iter() - .map(|(_, entry)| (entry.transaction().hash(), entry.to_info())) - .collect(); - - TxPoolEntryInfo { pending, proposed } - } - - pub(crate) fn get_all_entry_info_v2(&self) -> TxPoolEntryInfo { - let pending = self - .pool_map - .entries .get_by_status(&Status::Pending) .iter() - .chain(self.pool_map.entries.get_by_status(&Status::Gap).iter()) + .chain(self.get_by_status(&Status::Gap).iter()) .map(|entry| (entry.inner.transaction().hash(), entry.inner.to_info())) .collect(); let proposed = self - .pool_map - .entries .get_by_status(&Status::Proposed) .iter() .map(|entry| (entry.inner.transaction().hash(), entry.inner.to_info())) @@ -821,29 +445,12 @@ impl TxPool { } pub(crate) fn drain_all_transactions(&mut self) -> Vec { - let mut txs = CommitTxsScanner::new(&self.proposed, &self.pool_map.entries) - .txs_to_commit(self.total_tx_size, self.total_tx_cycles) - .0 - .into_iter() - .map(|tx_entry| tx_entry.into_transaction()) - .collect::>(); - self.proposed.clear(); - txs.append(&mut self.gap.drain()); - txs.append(&mut self.pending.drain()); - self.total_tx_size = 0; - self.total_tx_cycles = 0; - // self.touch_last_txs_updated_at(); - txs - } - - pub(crate) fn drain_all_transactions_v2(&mut self) -> Vec { - let mut txs = CommitTxsScanner::new(&self.proposed, &self.pool_map.entries) + let mut txs = CommitTxsScanner::new(&self.pool_map) .txs_to_commit(self.total_tx_size, self.total_tx_cycles) .0 .into_iter() .map(|tx_entry| tx_entry.into_transaction()) .collect::>(); - self.proposed.clear(); let mut pending = self .pool_map .entries @@ -868,9 +475,6 @@ impl TxPool { } pub(crate) fn clear(&mut self, snapshot: Arc) { - self.pending = PendingQueue::new(); - self.gap = PendingQueue::new(); - self.proposed = ProposedPool::new(self.config.max_ancestors_count); self.pool_map.clear(); self.snapshot = snapshot; self.committed_txs_hash_cache = LruCache::new(COMMITTED_HASH_CACHE_SIZE); @@ -896,8 +500,7 @@ impl TxPool { txs_size_limit: usize, ) -> (Vec, usize, Cycle) { let (entries, size, cycles) = - CommitTxsScanner::new(self.proposed(), &self.pool_map.entries) - .txs_to_commit(txs_size_limit, max_block_cycles); + CommitTxsScanner::new(&self.pool_map).txs_to_commit(txs_size_limit, max_block_cycles); if !entries.is_empty() { ckb_logger::info!( @@ -935,3 +538,49 @@ impl TxPool { } } } + +/// This is a hack right now, we use `CellProvider` to check if a transaction is in `Pending` or `Gap` status. +/// To make sure the behavior is same as before, we need to remove this if we have finished replace-by-fee strategy. +impl CellProvider for TxPool { + fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { + let tx_hash = out_point.tx_hash(); + match self + .pool_map + .get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) + { + Some(pool_entry) if pool_entry.status != Status::Proposed => { + match pool_entry + .inner + .transaction() + .output_with_data(out_point.index().unpack()) + { + Some((output, data)) => { + let cell_meta = CellMetaBuilder::from_cell_output(output, data) + .out_point(out_point.to_owned()) + .build(); + CellStatus::live_cell(cell_meta) + } + None => CellStatus::Unknown, + } + } + _ => CellStatus::Unknown, + } + } +} + +impl CellChecker for TxPool { + fn is_live(&self, out_point: &OutPoint) -> Option { + let tx_hash = out_point.tx_hash(); + match self + .pool_map + .get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) + { + Some(pool_entry) if pool_entry.status != Status::Proposed => pool_entry + .inner + .transaction() + .output_with_data(out_point.index().unpack()) + .map(|_| true), + _ => None, + } + } +} diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index b5062b3b35..00dde6d264 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -1,6 +1,7 @@ use crate::callback::Callbacks; use crate::component::entry::TxEntry; use crate::component::orphan::Entry as OrphanEntry; +use crate::component::pool_map::Status; use crate::error::Reject; use crate::pool::TxPool; use crate::service::{BlockAssemblerMessage, TxPoolService, TxVerificationResult}; @@ -51,6 +52,7 @@ pub enum TxStatus { Proposed, } +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub(crate) enum ProcessResult { Suspended, Completed(Completed), @@ -122,7 +124,6 @@ impl TxPoolService { } _submit_entry(tx_pool, status, entry.clone(), &self.callbacks)?; - Ok(()) }) .await; @@ -276,7 +277,6 @@ impl TxPoolService { if let Some((ret, snapshot)) = self._process_tx(tx.clone(), remote.map(|r| r.0)).await { self.after_process(tx, remote, &snapshot, &ret).await; - ret } else { // currently, the returned cycles is not been used, mock 0 if delay @@ -360,7 +360,6 @@ impl TxPoolService { self.process_orphan_tx(&tx).await; } Err(reject) => { - debug!("after_process {} reject: {} ", tx_hash, reject); if is_missing_input(reject) && all_inputs_is_unknown(snapshot, &tx) { self.add_orphan(tx, peer, declared_cycle).await; } else { @@ -551,7 +550,6 @@ impl TxPoolService { let tx_hash = tx.hash(); let (ret, snapshot) = self.pre_check(&tx).await; - let (tip_hash, rtx, status, fee, tx_size) = try_or_return_with_snapshot!(ret, snapshot); if self.is_in_delay_window(&snapshot) { @@ -640,7 +638,6 @@ impl TxPoolService { try_or_return_with_snapshot!(ret, submit_snapshot); self.notify_block_assembler(status).await; - if cached.is_none() { // update cache let txs_verify_cache = Arc::clone(&self.txs_verify_cache); @@ -962,38 +959,36 @@ fn check_rtx( rtx: &ResolvedTransaction, ) -> Result { let short_id = rtx.transaction.proposal_short_id(); - if snapshot.proposals().contains_proposed(&short_id) { - tx_pool - .check_rtx_from_proposed(rtx) - .map(|_| TxStatus::Proposed) + let tx_status = if snapshot.proposals().contains_proposed(&short_id) { + TxStatus::Proposed + } else if snapshot.proposals().contains_gap(&short_id) { + TxStatus::Gap } else { - let tx_status = if snapshot.proposals().contains_gap(&short_id) { - TxStatus::Gap - } else { - TxStatus::Fresh - }; - tx_pool - .check_rtx_from_pending_and_proposed(rtx) - .map(|_| tx_status) + TxStatus::Fresh + }; + if tx_status == TxStatus::Proposed { + tx_pool.check_rtx_from_proposed(rtx) + } else { + tx_pool.check_rtx_from_pending_and_proposed(rtx) } + .map(|_| tx_status) } fn resolve_tx(tx_pool: &TxPool, snapshot: &Snapshot, tx: TransactionView) -> ResolveResult { let short_id = tx.proposal_short_id(); - if snapshot.proposals().contains_proposed(&short_id) { - tx_pool - .resolve_tx_from_proposed(tx) - .map(|rtx| (rtx, TxStatus::Proposed)) + let tx_status = if snapshot.proposals().contains_proposed(&short_id) { + TxStatus::Proposed + } else if snapshot.proposals().contains_gap(&short_id) { + TxStatus::Gap } else { - let tx_status = if snapshot.proposals().contains_gap(&short_id) { - TxStatus::Gap - } else { - TxStatus::Fresh - }; - tx_pool - .resolve_tx_from_pending_and_proposed(tx) - .map(|rtx| (rtx, tx_status)) + TxStatus::Fresh + }; + if tx_status == TxStatus::Proposed { + tx_pool.resolve_tx_from_proposed(tx) + } else { + tx_pool.resolve_tx_from_pending_and_proposed(tx) } + .map(|rtx| (rtx, tx_status)) } fn _submit_entry( @@ -1005,16 +1000,15 @@ fn _submit_entry( let tx_hash = entry.transaction().hash(); match status { TxStatus::Fresh => { - if tx_pool.add_pending(entry.clone()) { - debug!("submit_entry pending {}", tx_hash); + if tx_pool.add_pending(entry.clone()).unwrap_or(false) { callbacks.call_pending(tx_pool, &entry); } else { return Err(Reject::Duplicated(tx_hash)); } } + TxStatus::Gap => { - if tx_pool.add_gap(entry.clone()) { - debug!("submit_entry gap {}", tx_hash); + if tx_pool.add_gap(entry.clone()).unwrap_or(false) { callbacks.call_pending(tx_pool, &entry); } else { return Err(Reject::Duplicated(tx_hash)); @@ -1022,10 +1016,7 @@ fn _submit_entry( } TxStatus::Proposed => { if tx_pool.add_proposed(entry.clone())? { - debug!("submit_entry proposed {}", tx_hash); callbacks.call_proposed(tx_pool, &entry, true); - } else { - return Err(Reject::Duplicated(tx_hash)); } } } @@ -1055,38 +1046,39 @@ fn _update_tx_pool_for_reorg( // pending ---> gap ----> proposed // try move gap to proposed if mine_mode { - let mut entries = Vec::new(); + let mut proposals = Vec::new(); let mut gaps = Vec::new(); - tx_pool.gap.remove_entries_by_filter(|id, tx_entry| { - if snapshot.proposals().contains_proposed(id) { - entries.push(tx_entry.clone()); - true - } else { - false - } - }); - - tx_pool.pending.remove_entries_by_filter(|id, tx_entry| { - if snapshot.proposals().contains_proposed(id) { - entries.push(tx_entry.clone()); - true - } else if snapshot.proposals().contains_gap(id) { - gaps.push(tx_entry.clone()); - true - } else { - false - } - }); + tx_pool + .pool_map + .remove_entries_by_filter(&Status::Gap, |id, tx_entry| { + if snapshot.proposals().contains_proposed(id) { + proposals.push(tx_entry.clone()); + true + } else { + false + } + }); - for entry in entries { - debug!("tx move to proposed {}", entry.transaction().hash()); + tx_pool + .pool_map + .remove_entries_by_filter(&Status::Pending, |id, tx_entry| { + if snapshot.proposals().contains_proposed(id) { + proposals.push(tx_entry.clone()); + true + } else if snapshot.proposals().contains_gap(id) { + gaps.push(tx_entry.clone()); + true + } else { + false + } + }); + + for entry in proposals { let cached = CacheEntry::completed(entry.cycles, entry.fee); - let tx_hash = entry.transaction().hash(); if let Err(e) = tx_pool.proposed_rtx(cached, entry.size, entry.timestamp, Arc::clone(&entry.rtx)) { - debug!("Failed to add proposed tx {}, reason: {}", tx_hash, e); callbacks.call_reject(tx_pool, &entry, e.clone()); } else { callbacks.call_proposed(tx_pool, &entry, false); @@ -1094,7 +1086,6 @@ fn _update_tx_pool_for_reorg( } for entry in gaps { - debug!("tx move to gap {}", entry.transaction().hash()); let tx_hash = entry.transaction().hash(); let cached = CacheEntry::completed(entry.cycles, entry.fee); if let Err(e) = diff --git a/tx-pool/src/service.rs b/tx-pool/src/service.rs index 5dc54018b4..a008502c62 100644 --- a/tx-pool/src/service.rs +++ b/tx-pool/src/service.rs @@ -3,6 +3,7 @@ use crate::block_assembler::{self, BlockAssembler}; use crate::callback::{Callback, Callbacks, ProposedCallback, RejectCallback}; use crate::chunk_process::ChunkCommand; +use crate::component::pool_map::{PoolEntry, Status}; use crate::component::{chunk::ChunkQueue, orphan::OrphanPool}; use crate::error::{handle_recv_error, handle_send_cmd_error, handle_try_send_error}; use crate::pool::TxPool; @@ -735,10 +736,18 @@ async fn process(mut service: TxPoolService, message: Message) { }) => { let id = ProposalShortId::from_tx_hash(&hash); let tx_pool = service.tx_pool.read().await; - let ret = if let Some(entry) = tx_pool.proposed.get(&id) { - Ok((TxStatus::Proposed, Some(entry.cycles))) - } else if let Some(entry) = tx_pool.get_entry_from_pending_or_gap(&id) { - Ok((TxStatus::Pending, Some(entry.cycles))) + let ret = if let Some(PoolEntry { + status, + inner: entry, + .. + }) = tx_pool.pool_map.get_by_id(&id) + { + let status = if status == &Status::Proposed { + TxStatus::Proposed + } else { + TxStatus::Pending + }; + Ok((status, Some(entry.cycles))) } else if let Some(ref recent_reject_db) = tx_pool.recent_reject { let recent_reject_result = recent_reject_db.get(&hash); if let Ok(recent_reject) = recent_reject_result { @@ -764,14 +773,18 @@ async fn process(mut service: TxPoolService, message: Message) { }) => { let id = ProposalShortId::from_tx_hash(&hash); let tx_pool = service.tx_pool.read().await; - let ret = if let Some(entry) = tx_pool.proposed.get(&id) { - Ok(TransactionWithStatus::with_proposed( - Some(entry.transaction().clone()), - entry.cycles, - entry.timestamp, - )) - } else if let Some(entry) = tx_pool.get_entry_from_pending_or_gap(&id) { - Ok(TransactionWithStatus::with_pending( + let ret = if let Some(PoolEntry { + status, + inner: entry, + .. + }) = tx_pool.pool_map.get_by_id(&id) + { + let trans_status = if status == &Status::Proposed { + TransactionWithStatus::with_proposed + } else { + TransactionWithStatus::with_pending + }; + Ok(trans_status( Some(entry.transaction().clone()), entry.cycles, entry.timestamp, @@ -900,8 +913,8 @@ impl TxPoolService { TxPoolInfo { tip_hash: tip_header.hash(), tip_number: tip_header.number(), - pending_size: tx_pool.pending.size() + tx_pool.gap.size(), - proposed_size: tx_pool.proposed.size(), + pending_size: tx_pool.pool_map.pending_size(), + proposed_size: tx_pool.pool_map.proposed_size(), orphan_size: orphan.len(), total_tx_size: tx_pool.total_tx_size, total_tx_cycles: tx_pool.total_tx_cycles, @@ -968,7 +981,7 @@ impl TxPoolService { match target { PlugTarget::Pending => { for entry in entries { - tx_pool.add_pending(entry); + tx_pool.add_pending(entry).unwrap(); } } PlugTarget::Proposed => { From d1f56a110a8a906a3d90369f71fa345f8b6d6a1d Mon Sep 17 00:00:00 2001 From: yukang Date: Thu, 1 Jun 2023 18:12:17 +0800 Subject: [PATCH 087/267] move double spend checking and fix tests --- Cargo.lock | 32 +++--- chain/src/chain.rs | 1 - chain/src/tests/dep_cell.rs | 15 ++- rpc/src/tests/module/pool.rs | 2 +- test/src/main.rs | 8 -- test/src/node.rs | 12 ++ test/src/specs/relay/transaction_relay.rs | 14 ++- test/src/specs/tx_pool/collision.rs | 27 ++--- .../tx_pool/different_txs_with_same_input.rs | 38 +++--- test/src/specs/tx_pool/send_tx_chain.rs | 12 +- tx-pool/Cargo.toml | 3 +- tx-pool/src/component/edges.rs | 4 + tx-pool/src/component/pool_map.rs | 88 +++++++------- tx-pool/src/pool.rs | 108 +++--------------- tx-pool/src/process.rs | 55 ++++----- 15 files changed, 180 insertions(+), 239 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6122a592fe..1da5088e1e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1423,9 +1423,9 @@ dependencies = [ "ckb-types", "ckb-util", "ckb-verification", + "ckb_multi_index_map", "hyper", "lru", - "multi_index_map", "rand 0.8.5", "rustc-hash", "sentry", @@ -1553,6 +1553,21 @@ dependencies = [ "paste", ] +[[package]] +name = "ckb_multi_index_map" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2adba00c3dcb84fc4634c948cf3d24c05ce3193810bfa568effe13ad814f662a" +dependencies = [ + "convert_case 0.6.0", + "proc-macro-error", + "proc-macro2", + "quote", + "rustc-hash", + "slab", + "syn", +] + [[package]] name = "clang-sys" version = "1.3.1" @@ -3128,21 +3143,6 @@ dependencies = [ "faster-hex", ] -[[package]] -name = "multi_index_map" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a58eea8dbf91e7420e0e843535f585491046d6017e669d36cb8342cfa4861e2" -dependencies = [ - "convert_case 0.6.0", - "proc-macro-error", - "proc-macro2", - "quote", - "rustc-hash", - "slab", - "syn", -] - [[package]] name = "native-tls" version = "0.2.11" diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 3323492032..bcef1bfc12 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -560,7 +560,6 @@ impl ChainService { self.proposal_table .insert(blk.header().number(), blk.union_proposal_ids()); } - self.reload_proposal_table(fork); } diff --git a/chain/src/tests/dep_cell.rs b/chain/src/tests/dep_cell.rs index 3f557236fd..3e7b24eed1 100644 --- a/chain/src/tests/dep_cell.rs +++ b/chain/src/tests/dep_cell.rs @@ -436,7 +436,6 @@ fn test_package_txs_with_deps2() { .internal_process_block(Arc::new(block), Switch::DISABLE_ALL) .unwrap(); } - // skip gap { while Into::::into(block_template.number) != 2 { @@ -461,7 +460,7 @@ fn test_package_txs_with_deps2() { let mut tx_pool_info = tx_pool.get_tx_pool_info().unwrap(); while tx_pool_info.proposed_size != txs.len() { - tx_pool_info = tx_pool.get_tx_pool_info().unwrap() + tx_pool_info = tx_pool.get_tx_pool_info().unwrap(); } // get block template with txs @@ -534,11 +533,11 @@ fn test_package_txs_with_deps_priority() { Capacity::shannons(10000), ); - let txs = vec![tx2.clone(), tx1]; - for tx in &txs { - let ret = tx_pool.submit_local_tx(tx.clone()).unwrap(); - assert!(ret.is_ok(), "submit {} {:?}", tx.proposal_short_id(), ret); - } + let ret = tx_pool.submit_local_tx(tx2.clone()).unwrap(); + assert!(ret.is_ok(), "submit {} {:?}", tx2.proposal_short_id(), ret); + + let ret = tx_pool.submit_local_tx(tx1.clone()).unwrap(); + assert!(ret.is_err(), "submit {} {:?}", tx1.proposal_short_id(), ret); let mut block_template = shared .get_block_template(None, None, None) @@ -548,7 +547,7 @@ fn test_package_txs_with_deps_priority() { // proposal txs { while !(Into::::into(block_template.number) == 1 - && block_template.proposals.len() == 2) + && block_template.proposals.len() == 1) { block_template = shared .get_block_template(None, None, None) diff --git a/rpc/src/tests/module/pool.rs b/rpc/src/tests/module/pool.rs index 7288b91f0f..5349465f4f 100644 --- a/rpc/src/tests/module/pool.rs +++ b/rpc/src/tests/module/pool.rs @@ -172,7 +172,7 @@ fn test_send_transaction_exceeded_maximum_ancestors_count() { parent_tx_hash = tx.hash(); } - suite.wait_block_template_array_ge("proposals", 130); + suite.wait_block_template_array_ge("proposals", 125); // 130 txs will be added to proposal list while store.get_tip_header().unwrap().number() != (tip.number() + 2) { diff --git a/test/src/main.rs b/test/src/main.rs index b44a15f2c4..f203804860 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -388,12 +388,6 @@ fn canonicalize_path>(path: P) -> PathBuf { .unwrap_or_else(|_| path.as_ref().to_path_buf()) } -fn _all_specs() -> Vec> { - // This case is not stable right now - //vec![Box::new(PoolResolveConflictAfterReorg)] - vec![Box::new(RemoveConflictFromPending)] -} - fn all_specs() -> Vec> { let mut specs: Vec> = vec![ Box::new(BlockSyncFromOne), @@ -436,8 +430,6 @@ fn all_specs() -> Vec> { Box::new(GetRawTxPool), Box::new(PoolReconcile), Box::new(PoolResurrect), - //TODO: (yukang) - //Box::new(PoolResolveConflictAfterReorg), Box::new(InvalidHeaderDep), #[cfg(not(target_os = "windows"))] Box::new(PoolPersisted), diff --git a/test/src/node.rs b/test/src/node.rs index 90bad67dde..5ef6167312 100644 --- a/test/src/node.rs +++ b/test/src/node.rs @@ -5,6 +5,7 @@ use crate::{SYSTEM_CELL_ALWAYS_FAILURE_INDEX, SYSTEM_CELL_ALWAYS_SUCCESS_INDEX}; use ckb_app_config::CKBAppConfig; use ckb_chain_spec::consensus::Consensus; use ckb_chain_spec::ChainSpec; +use ckb_error::AnyError; use ckb_jsonrpc_types::TxStatus; use ckb_jsonrpc_types::{BlockFilter, BlockTemplate, TxPoolInfo}; use ckb_logger::{debug, error}; @@ -357,6 +358,17 @@ impl Node { .send_transaction(transaction.data().into()) } + pub fn submit_transaction_with_result( + &self, + transaction: &TransactionView, + ) -> Result { + let res = self + .rpc_client() + .send_transaction_result(transaction.data().into())? + .pack(); + Ok(res) + } + pub fn get_transaction(&self, tx_hash: Byte32) -> TxStatus { self.rpc_client().get_transaction(tx_hash).tx_status } diff --git a/test/src/specs/relay/transaction_relay.rs b/test/src/specs/relay/transaction_relay.rs index 39cb05f3da..e5af42eb45 100644 --- a/test/src/specs/relay/transaction_relay.rs +++ b/test/src/specs/relay/transaction_relay.rs @@ -5,7 +5,6 @@ use crate::util::transaction::{always_success_transaction, always_success_transa use crate::utils::{build_relay_tx_hashes, build_relay_txs, sleep, wait_until}; use crate::{Net, Node, Spec}; use ckb_constant::sync::RETRY_ASK_TX_TIMEOUT_INCREASE; -use ckb_jsonrpc_types::Status; use ckb_logger::info; use ckb_network::SupportProtocols; use ckb_types::{ @@ -234,10 +233,15 @@ impl Spec for TransactionRelayConflict { .build(); node0.rpc_client().send_transaction(tx1.data().into()); sleep(6); - node0.rpc_client().send_transaction(tx2.data().into()); + + let res = node0 + .rpc_client() + .send_transaction_result(tx2.data().into()); + assert!(res.is_err()); + eprintln!("res: {:?}", res); let relayed = wait_until(20, || { - [tx1.hash(), tx2.hash()].iter().all(|hash| { + [tx1.hash()].iter().all(|hash| { node1 .rpc_client() .get_transaction(hash.clone()) @@ -247,13 +251,14 @@ impl Spec for TransactionRelayConflict { }); assert!(relayed, "all transactions should be relayed"); - let proposed = node1.mine_with_blocking(|template| template.proposals.len() != 3); + let proposed = node1.mine_with_blocking(|template| template.proposals.len() != 2); node1.mine_with_blocking(|template| template.number.value() != (proposed + 1)); waiting_for_sync(nodes); node0.wait_for_tx_pool(); node1.wait_for_tx_pool(); + /* let ret = node1 .rpc_client() .get_transaction_with_verbosity(tx2.hash(), 1); @@ -289,5 +294,6 @@ impl Spec for TransactionRelayConflict { .is_some() }); assert!(relayed, "Transaction should be relayed to node1"); + */ } } diff --git a/test/src/specs/tx_pool/collision.rs b/test/src/specs/tx_pool/collision.rs index c349ddbb34..7dc606d459 100644 --- a/test/src/specs/tx_pool/collision.rs +++ b/test/src/specs/tx_pool/collision.rs @@ -1,6 +1,4 @@ -use crate::util::check::{ - is_transaction_committed, is_transaction_pending, is_transaction_rejected, -}; +use crate::util::check::{is_transaction_committed, is_transaction_pending}; use crate::utils::{assert_send_transaction_fail, blank, commit, propose}; use crate::{Node, Spec}; use ckb_types::bytes::Bytes; @@ -67,7 +65,8 @@ impl Spec for ConflictInPending { let (txa, txb) = conflict_transactions(node); node.submit_transaction(&txa); - node.submit_transaction(&txb); + let res = node.submit_transaction_with_result(&txb); + assert!(res.is_err()); node.submit_block(&propose(node, &[&txa])); (0..window.closest()).for_each(|_| { @@ -89,13 +88,15 @@ impl Spec for ConflictInGap { let (txa, txb) = conflict_transactions(node); node.submit_transaction(&txa); - node.submit_transaction(&txb); + let res = node.submit_transaction_with_result(&txb); + assert!(res.is_err()); node.submit_block(&propose(node, &[&txa])); (0..window.closest() - 1).for_each(|_| { node.submit_block(&blank(node)); }); - node.submit_block(&propose(node, &[&txb])); + + //node.submit_block(&propose(node, &[&txb])); let block = node.new_block(None, None, None); assert_eq!(&[txa], &block.transactions()[1..]); @@ -114,7 +115,8 @@ impl Spec for ConflictInProposed { let (txa, txb) = conflict_transactions(node); node.submit_transaction(&txa); - node.submit_transaction(&txb); + let res = node.submit_transaction_with_result(&txb); + assert!(res.is_err()); node.submit_block(&propose(node, &[&txa, &txb])); node.mine(window.farthest()); @@ -153,12 +155,13 @@ impl Spec for RemoveConflictFromPending { conflict_transactions_with_capacity(node, Bytes::new(), capacity_bytes!(1000)); let txc = node.new_transaction_with_since_capacity(txb.hash(), 0, capacity_bytes!(100)); node.submit_transaction(&txa); - node.submit_transaction(&txb); - node.submit_transaction(&txc); + let res = node.submit_transaction_with_result(&txb); + assert!(res.is_err()); + + let res = node.submit_transaction_with_result(&txc); + assert!(res.is_err()); assert!(is_transaction_pending(node, &txa)); - assert!(is_transaction_pending(node, &txb)); - assert!(is_transaction_pending(node, &txc)); node.submit_block(&propose(node, &[&txa])); (0..window.closest()).for_each(|_| { @@ -168,8 +171,6 @@ impl Spec for RemoveConflictFromPending { node.wait_for_tx_pool(); assert!(is_transaction_committed(node, &txa)); - assert!(is_transaction_rejected(node, &txb)); - assert!(is_transaction_rejected(node, &txc)); } } diff --git a/test/src/specs/tx_pool/different_txs_with_same_input.rs b/test/src/specs/tx_pool/different_txs_with_same_input.rs index 443cea4cb5..8539c7c8c6 100644 --- a/test/src/specs/tx_pool/different_txs_with_same_input.rs +++ b/test/src/specs/tx_pool/different_txs_with_same_input.rs @@ -28,10 +28,14 @@ impl Spec for DifferentTxsWithSameInput { .as_advanced_builder() .set_outputs(vec![output]) .build(); + node0.rpc_client().send_transaction(tx1.data().into()); - node0.rpc_client().send_transaction(tx2.data().into()); + let res = node0 + .rpc_client() + .send_transaction_result(tx2.data().into()); + assert!(res.is_err(), "tx2 should be rejected"); - node0.mine_with_blocking(|template| template.proposals.len() != 3); + node0.mine_with_blocking(|template| template.proposals.len() != 2); node0.mine_with_blocking(|template| template.number.value() != 14); node0.mine_with_blocking(|template| template.transactions.len() != 2); @@ -47,11 +51,11 @@ impl Spec for DifferentTxsWithSameInput { assert!(!commit_txs_hash.contains(&tx2.hash())); // when tx1 was confirmed, tx2 should be rejected - let ret = node0.rpc_client().get_transaction(tx2.hash()); - assert!( - matches!(ret.tx_status.status, Status::Rejected), - "tx2 should be rejected" - ); + // let ret = node0.rpc_client().get_transaction(tx2.hash()); + // assert!( + // matches!(ret.tx_status.status, Status::Rejected), + // "tx2 should be rejected" + // ); // verbosity = 1 let ret = node0 @@ -60,11 +64,11 @@ impl Spec for DifferentTxsWithSameInput { assert!(ret.transaction.is_none()); assert!(matches!(ret.tx_status.status, Status::Committed)); - let ret = node0 - .rpc_client() - .get_transaction_with_verbosity(tx2.hash(), 1); - assert!(ret.transaction.is_none()); - assert!(matches!(ret.tx_status.status, Status::Rejected)); + // let ret = node0 + // .rpc_client() + // .get_transaction_with_verbosity(tx2.hash(), 1); + // assert!(ret.transaction.is_none()); + // assert!(matches!(ret.tx_status.status, Status::Rejected)); // verbosity = 2 let ret = node0 @@ -73,10 +77,10 @@ impl Spec for DifferentTxsWithSameInput { assert!(ret.transaction.is_some()); assert!(matches!(ret.tx_status.status, Status::Committed)); - let ret = node0 - .rpc_client() - .get_transaction_with_verbosity(tx2.hash(), 2); - assert!(ret.transaction.is_none()); - assert!(matches!(ret.tx_status.status, Status::Rejected)); + // let ret = node0 + // .rpc_client() + // .get_transaction_with_verbosity(tx2.hash(), 2); + // assert!(ret.transaction.is_none()); + // assert!(matches!(ret.tx_status.status, Status::Rejected)); } } diff --git a/test/src/specs/tx_pool/send_tx_chain.rs b/test/src/specs/tx_pool/send_tx_chain.rs index 3065b690be..f27030a5a1 100644 --- a/test/src/specs/tx_pool/send_tx_chain.rs +++ b/test/src/specs/tx_pool/send_tx_chain.rs @@ -33,10 +33,15 @@ impl Spec for SendTxChain { assert_eq!(txs.len(), MAX_ANCESTORS_COUNT + 1); // send tx chain info!("submit fresh txs chain to node0"); - for tx in txs[..=MAX_ANCESTORS_COUNT].iter() { + for tx in txs[..=MAX_ANCESTORS_COUNT - 1].iter() { let ret = node0.rpc_client().send_transaction_result(tx.data().into()); assert!(ret.is_ok()); } + // The last one will be rejected + let ret = node0 + .rpc_client() + .send_transaction_result(txs[MAX_ANCESTORS_COUNT].data().into()); + assert!(ret.is_err()); node0.mine(3); @@ -76,6 +81,11 @@ impl Spec for SendTxChain { .rpc_client() .send_transaction_result(txs.last().unwrap().data().into()); assert!(ret.is_err()); + assert!(ret + .err() + .unwrap() + .to_string() + .contains("Transaction exceeded maximum ancestors count limit")); } fn modify_app_config(&self, config: &mut ckb_app_config::CKBAppConfig) { diff --git a/tx-pool/Cargo.toml b/tx-pool/Cargo.toml index 0ce764ecb0..d8a16bff50 100644 --- a/tx-pool/Cargo.toml +++ b/tx-pool/Cargo.toml @@ -36,8 +36,7 @@ sentry = { version = "0.26.0", optional = true } serde_json = "1.0" rand = "0.8.4" hyper = { version = "0.14", features = ["http1", "client", "tcp"] } -#multi_index_map = { git = "https://github.com/wyjin/multi_index_map.git", branch = "master" } -multi_index_map = "0.5.0" +ckb_multi_index_map = "0.0.1" # ckb team fork crate slab = "0.4" rustc-hash = "1.1" tokio-util = "0.7.8" diff --git a/tx-pool/src/component/edges.rs b/tx-pool/src/component/edges.rs index decf98c1a5..129b33c0a2 100644 --- a/tx-pool/src/component/edges.rs +++ b/tx-pool/src/component/edges.rs @@ -91,6 +91,10 @@ impl Edges { self.outputs.get(out_point) } + pub(crate) fn remove_deps(&mut self, out_point: &OutPoint) -> Option> { + self.deps.remove(out_point) + } + pub(crate) fn insert_deps(&mut self, out_point: OutPoint, txid: ProposalShortId) { self.deps.entry(out_point).or_default().insert(txid); } diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index f9b00b75e3..50c5c393dc 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -9,6 +9,7 @@ use crate::error::Reject; use crate::TxEntry; use ckb_logger::trace; +use ckb_multi_index_map::MultiIndexMap; use ckb_types::core::error::OutPointError; use ckb_types::packed::OutPoint; use ckb_types::{ @@ -20,7 +21,6 @@ use ckb_types::{ core::cell::{CellMetaBuilder, CellProvider, CellStatus}, prelude::*, }; -use multi_index_map::MultiIndexMap; use std::borrow::Cow; use std::collections::HashSet; @@ -242,40 +242,33 @@ impl PoolMap { conflicts } - /// pending gap and proposed store the inputs and deps in edges, it's removed in `remove_entry` - /// here we use `input_pts_iter` and `related_dep_out_points` to find the conflict txs pub(crate) fn resolve_conflict(&mut self, tx: &TransactionView) -> Vec { - let mut to_be_removed = Vec::new(); + let inputs = tx.input_pts_iter(); let mut conflicts = Vec::new(); - for (_, entry) in self.entries.iter() { - let entry = &entry.inner; - let tx_id = entry.proposal_short_id(); - let tx_inputs = entry.transaction().input_pts_iter(); - let deps = entry.related_dep_out_points(); - - // tx input conflict - for i in tx_inputs { - if tx.input_pts_iter().any(|j| i == j) { - to_be_removed.push((tx_id.to_owned(), i.clone())); + for i in inputs { + if let Some(id) = self.edges.remove_input(&i) { + let entries = self.remove_entry_and_descendants(&id); + if !entries.is_empty() { + let reject = Reject::Resolve(OutPointError::Dead(i.clone())); + let rejects = std::iter::repeat(reject).take(entries.len()); + conflicts.extend(entries.into_iter().zip(rejects)); } } - // tx deps conflict - for i in deps { - if tx.input_pts_iter().any(|j| *i == j) { - to_be_removed.push((tx_id.to_owned(), i.clone())); + // deps consumed + if let Some(x) = self.edges.remove_deps(&i) { + for id in x { + let entries = self.remove_entry_and_descendants(&id); + if !entries.is_empty() { + let reject = Reject::Resolve(OutPointError::Dead(i.clone())); + let rejects = std::iter::repeat(reject).take(entries.len()); + conflicts.extend(entries.into_iter().zip(rejects)); + } } } } - for (tx_id, input) in to_be_removed.iter() { - let entries = self.remove_entry_and_descendants(tx_id); - let reject = Reject::Resolve(OutPointError::Dead(input.to_owned())); - let rejects = std::iter::repeat(reject).take(entries.len()); - conflicts.extend(entries.into_iter().zip(rejects)); - } - conflicts } @@ -297,6 +290,7 @@ impl PoolMap { } } + #[cfg(test)] pub(crate) fn remove_entries_by_filter bool>( &mut self, status: &Status, @@ -360,10 +354,11 @@ impl PoolMap { EntryOp::Add => child.add_entry_weight(parent), } let short_id = child.proposal_short_id(); - //TODO: optimize it - self.entries.remove_by_id(&short_id); - self.insert_entry(&child, entry.status) - .expect("pool consistent"); + self.entries.modify_by_id(&short_id, |e| { + e.score = child.as_score_key(); + e.evict_key = child.as_evict_key(); + e.inner = child; + }); } } @@ -437,7 +432,7 @@ impl PoolMap { } /// Record the links for entry - fn record_entry_links(&mut self, entry: &mut TxEntry, status: &Status) -> Result { + fn record_entry_links(&mut self, entry: &mut TxEntry) -> Result { // find in pool parents let mut parents: HashSet = HashSet::with_capacity( entry.transaction().inputs().len() + entry.transaction().cell_deps().len(), @@ -476,7 +471,8 @@ impl PoolMap { .expect("pool consistent"); entry.add_entry_weight(&ancestor.inner); } - if *status == Status::Proposed && entry.ancestors_count > self.max_ancestors_count { + if entry.ancestors_count > self.max_ancestors_count { + eprintln!("debug: exceeded maximum ancestors count"); return Err(Reject::ExceededMaximumAncestorsCount); } @@ -534,13 +530,25 @@ impl PoolMap { return Ok(false); } trace!("add_{:?} {}", status, entry.transaction().hash()); - self.record_entry_links(&mut entry, &status)?; + self.record_entry_links(&mut entry)?; self.insert_entry(&entry, status)?; self.record_entry_deps(&entry); self.record_entry_edges(&entry); Ok(true) } + /// Change the status of the entry, only used for `gap_rtx` and `proposed_rtx` + pub(crate) fn set_entry(&mut self, entry: &TxEntry, status: Status) { + let tx_short_id = entry.proposal_short_id(); + let _ = self + .entries + .get_by_id(&tx_short_id) + .expect("unconsistent pool"); + self.entries.modify_by_id(&tx_short_id, |e| { + e.status = status; + }); + } + fn insert_entry(&mut self, entry: &TxEntry, status: Status) -> Result { let tx_short_id = entry.proposal_short_id(); let score = entry.as_score_key(); @@ -558,10 +566,8 @@ impl PoolMap { impl CellProvider for PoolMap { fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { - if let Some(id) = self.edges.get_input_ref(out_point) { - if self.has_proposed(id) { - return CellStatus::Dead; - } + if self.edges.get_input_ref(out_point).is_some() { + return CellStatus::Dead; } match self.edges.get_output_ref(out_point) { Some(OutPointStatus::UnConsumed) => { @@ -571,7 +577,7 @@ impl CellProvider for PoolMap { .build(); CellStatus::live_cell(cell_meta) } - Some(OutPointStatus::Consumed(id)) if self.has_proposed(id) => CellStatus::Dead, + Some(OutPointStatus::Consumed(_id)) => CellStatus::Dead, _ => CellStatus::Unknown, } } @@ -579,13 +585,11 @@ impl CellProvider for PoolMap { impl CellChecker for PoolMap { fn is_live(&self, out_point: &OutPoint) -> Option { - if let Some(id) = self.edges.get_input_ref(out_point) { - if self.has_proposed(id) { - return Some(false); - } + if self.edges.get_input_ref(out_point).is_some() { + return Some(false); } match self.edges.get_output_ref(out_point) { - Some(OutPointStatus::Consumed(id)) if self.has_proposed(id) => Some(false), + Some(OutPointStatus::Consumed(_id)) => Some(false), Some(OutPointStatus::UnConsumed) => Some(true), _ => None, } diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index c01ad22b64..41e766e630 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -14,12 +14,10 @@ use ckb_store::ChainStore; use ckb_types::{ core::{ cell::{resolve_transaction, OverlayCellChecker, OverlayCellProvider, ResolvedTransaction}, - cell::{CellChecker, CellMetaBuilder, CellProvider, CellStatus}, tx_pool::{TxPoolEntryInfo, TxPoolIds}, Cycle, TransactionView, UncleBlockView, }, - packed::{Byte32, OutPoint, ProposalShortId}, - prelude::*, + packed::{Byte32, ProposalShortId}, }; use ckb_verification::{cache::CacheEntry, TxVerifyEnv}; use lru::LruCache; @@ -130,6 +128,14 @@ impl TxPool { self.pool_map.get_by_id(id).is_some() } + pub(crate) fn set_entry_proposed(&mut self, entry: &TxEntry) { + self.pool_map.set_entry(entry, Status::Proposed) + } + + pub(crate) fn set_entry_gap(&mut self, entry: &TxEntry) { + self.pool_map.set_entry(entry, Status::Gap) + } + /// Returns tx with cycles corresponding to the id. pub(crate) fn get_tx_with_cycles( &self, @@ -269,51 +275,26 @@ impl TxPool { false } - pub(crate) fn check_rtx_from_pending_and_proposed( - &self, - rtx: &ResolvedTransaction, - ) -> Result<(), Reject> { + pub(crate) fn check_rtx_from_pool(&self, rtx: &ResolvedTransaction) -> Result<(), Reject> { let snapshot = self.snapshot(); - let proposal_checker = OverlayCellChecker::new(&self.pool_map, snapshot); - let checker = OverlayCellChecker::new(self, &proposal_checker); + let checker = OverlayCellChecker::new(&self.pool_map, snapshot); let mut seen_inputs = HashSet::new(); rtx.check(&mut seen_inputs, &checker, snapshot) .map_err(Reject::Resolve) } - pub(crate) fn check_rtx_from_proposed(&self, rtx: &ResolvedTransaction) -> Result<(), Reject> { - let snapshot = self.snapshot(); - let proposal_checker = OverlayCellChecker::new(&self.pool_map, snapshot); - let mut seen_inputs = HashSet::new(); - rtx.check(&mut seen_inputs, &proposal_checker, snapshot) - .map_err(Reject::Resolve) - } - - pub(crate) fn resolve_tx_from_pending_and_proposed( + pub(crate) fn resolve_tx_from_pool( &self, tx: TransactionView, ) -> Result, Reject> { let snapshot = self.snapshot(); - let proposed_provider = OverlayCellProvider::new(&self.pool_map, snapshot); - let provider = OverlayCellProvider::new(self, &proposed_provider); + let provider = OverlayCellProvider::new(&self.pool_map, snapshot); let mut seen_inputs = HashSet::new(); resolve_transaction(tx, &mut seen_inputs, &provider, snapshot) .map(Arc::new) .map_err(Reject::Resolve) } - pub(crate) fn resolve_tx_from_proposed( - &self, - tx: TransactionView, - ) -> Result, Reject> { - let snapshot = self.snapshot(); - let proposed_provider = OverlayCellProvider::new(&self.pool_map, snapshot); - let mut seen_inputs = HashSet::new(); - resolve_transaction(tx, &mut seen_inputs, &proposed_provider, snapshot) - .map(Arc::new) - .map_err(Reject::Resolve) - } - pub(crate) fn gap_rtx( &mut self, cache_entry: CacheEntry, @@ -324,7 +305,6 @@ impl TxPool { let snapshot = self.cloned_snapshot(); let tip_header = snapshot.tip_header(); let tx_env = Arc::new(TxVerifyEnv::new_proposed(tip_header, 0)); - self.check_rtx_from_pending_and_proposed(&rtx)?; let max_cycles = snapshot.consensus().max_block_cycles(); let verified = verify_rtx( @@ -338,12 +318,8 @@ impl TxPool { let entry = TxEntry::new_with_timestamp(rtx, verified.cycles, verified.fee, size, timestamp); - let tx_hash = entry.transaction().hash(); - if self.add_gap(entry).unwrap_or(false) { - Ok(CacheEntry::Completed(verified)) - } else { - Err(Reject::Duplicated(tx_hash)) - } + self.set_entry_gap(&entry); + Ok(CacheEntry::Completed(verified)) } pub(crate) fn proposed_rtx( @@ -356,7 +332,6 @@ impl TxPool { let snapshot = self.cloned_snapshot(); let tip_header = snapshot.tip_header(); let tx_env = Arc::new(TxVerifyEnv::new_proposed(tip_header, 1)); - self.check_rtx_from_proposed(&rtx)?; let max_cycles = snapshot.consensus().max_block_cycles(); let verified = verify_rtx( @@ -375,11 +350,8 @@ impl TxPool { tx_hash, entry.proposal_short_id() ); - if self.add_proposed(entry)? { - Ok(CacheEntry::Completed(verified)) - } else { - Err(Reject::Duplicated(tx_hash)) - } + self.set_entry_proposed(&entry); + Ok(CacheEntry::Completed(verified)) } /// Get to-be-proposal transactions that may be included in the next block. @@ -538,49 +510,3 @@ impl TxPool { } } } - -/// This is a hack right now, we use `CellProvider` to check if a transaction is in `Pending` or `Gap` status. -/// To make sure the behavior is same as before, we need to remove this if we have finished replace-by-fee strategy. -impl CellProvider for TxPool { - fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { - let tx_hash = out_point.tx_hash(); - match self - .pool_map - .get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) - { - Some(pool_entry) if pool_entry.status != Status::Proposed => { - match pool_entry - .inner - .transaction() - .output_with_data(out_point.index().unpack()) - { - Some((output, data)) => { - let cell_meta = CellMetaBuilder::from_cell_output(output, data) - .out_point(out_point.to_owned()) - .build(); - CellStatus::live_cell(cell_meta) - } - None => CellStatus::Unknown, - } - } - _ => CellStatus::Unknown, - } - } -} - -impl CellChecker for TxPool { - fn is_live(&self, out_point: &OutPoint) -> Option { - let tx_hash = out_point.tx_hash(); - match self - .pool_map - .get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) - { - Some(pool_entry) if pool_entry.status != Status::Proposed => pool_entry - .inner - .transaction() - .output_with_data(out_point.index().unpack()) - .map(|_| true), - _ => None, - } - } -} diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index 00dde6d264..354e56cac0 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -966,12 +966,7 @@ fn check_rtx( } else { TxStatus::Fresh }; - if tx_status == TxStatus::Proposed { - tx_pool.check_rtx_from_proposed(rtx) - } else { - tx_pool.check_rtx_from_pending_and_proposed(rtx) - } - .map(|_| tx_status) + tx_pool.check_rtx_from_pool(rtx).map(|_| tx_status) } fn resolve_tx(tx_pool: &TxPool, snapshot: &Snapshot, tx: TransactionView) -> ResolveResult { @@ -983,12 +978,7 @@ fn resolve_tx(tx_pool: &TxPool, snapshot: &Snapshot, tx: TransactionView) -> Res } else { TxStatus::Fresh }; - if tx_status == TxStatus::Proposed { - tx_pool.resolve_tx_from_proposed(tx) - } else { - tx_pool.resolve_tx_from_pending_and_proposed(tx) - } - .map(|rtx| (rtx, tx_status)) + tx_pool.resolve_tx_from_pool(tx).map(|rtx| (rtx, tx_status)) } fn _submit_entry( @@ -1049,32 +1039,26 @@ fn _update_tx_pool_for_reorg( let mut proposals = Vec::new(); let mut gaps = Vec::new(); - tx_pool - .pool_map - .remove_entries_by_filter(&Status::Gap, |id, tx_entry| { - if snapshot.proposals().contains_proposed(id) { - proposals.push(tx_entry.clone()); - true - } else { - false - } - }); + for entry in tx_pool.pool_map.entries.get_by_status(&Status::Gap) { + let e = &entry.inner; + let short_id = e.proposal_short_id(); + if snapshot.proposals().contains_proposed(&short_id) { + proposals.push(e.clone()); + } + } - tx_pool - .pool_map - .remove_entries_by_filter(&Status::Pending, |id, tx_entry| { - if snapshot.proposals().contains_proposed(id) { - proposals.push(tx_entry.clone()); - true - } else if snapshot.proposals().contains_gap(id) { - gaps.push(tx_entry.clone()); - true - } else { - false - } - }); + for entry in tx_pool.pool_map.entries.get_by_status(&Status::Pending) { + let e = &entry.inner; + let short_id = e.proposal_short_id(); + if snapshot.proposals().contains_proposed(&short_id) { + proposals.push(e.clone()); + } else if snapshot.proposals().contains_gap(&short_id) { + gaps.push(e.clone()); + } + } for entry in proposals { + debug!("begin to proposed: {:x}", entry.transaction().hash()); let cached = CacheEntry::completed(entry.cycles, entry.fee); if let Err(e) = tx_pool.proposed_rtx(cached, entry.size, entry.timestamp, Arc::clone(&entry.rtx)) @@ -1086,6 +1070,7 @@ fn _update_tx_pool_for_reorg( } for entry in gaps { + debug!("begin to gap: {:x}", entry.transaction().hash()); let tx_hash = entry.transaction().hash(); let cached = CacheEntry::completed(entry.cycles, entry.fee); if let Err(e) = From 66f49cd7f1d3b3196742e856cad4b1ab71665184 Mon Sep 17 00:00:00 2001 From: yukang Date: Mon, 5 Jun 2023 16:51:52 +0800 Subject: [PATCH 088/267] fix makefile so that we may run a specific integration test with environment name --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 7d7adb98fb..3d8a92ba2b 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ MOLC_VERSION := 0.7.5 VERBOSE := $(if ${CI},--verbose,) CLIPPY_OPTS := -D warnings -D clippy::clone_on_ref_ptr -D clippy::enum_glob_use -D clippy::fallible_impl_from \ -A clippy::mutable_key_type -A clippy::upper_case_acronyms -CKB_TEST_ARGS := ${CKB_TEST_ARGS} -c 4 +CKB_TEST_ARGS := -c 4 ${CKB_TEST_ARGS} CKB_FEATURES ?= deadlock_detection,with_sentry ALL_FEATURES := deadlock_detection,with_sentry,with_dns_seeding,profiling,march-native CKB_BENCH_FEATURES ?= ci From a2c129fd639aba8a65bb000b4975fea8df790f33 Mon Sep 17 00:00:00 2001 From: yukang Date: Mon, 5 Jun 2023 18:22:15 +0800 Subject: [PATCH 089/267] fix TransactionRelayConflict --- test/src/specs/relay/transaction_relay.rs | 16 ++++++++++++---- tx-pool/src/component/pool_map.rs | 3 ++- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/test/src/specs/relay/transaction_relay.rs b/test/src/specs/relay/transaction_relay.rs index e5af42eb45..137d36964a 100644 --- a/test/src/specs/relay/transaction_relay.rs +++ b/test/src/specs/relay/transaction_relay.rs @@ -5,6 +5,7 @@ use crate::util::transaction::{always_success_transaction, always_success_transa use crate::utils::{build_relay_tx_hashes, build_relay_txs, sleep, wait_until}; use crate::{Net, Node, Spec}; use ckb_constant::sync::RETRY_ASK_TX_TIMEOUT_INCREASE; +use ckb_jsonrpc_types::Status; use ckb_logger::info; use ckb_network::SupportProtocols; use ckb_types::{ @@ -234,11 +235,18 @@ impl Spec for TransactionRelayConflict { node0.rpc_client().send_transaction(tx1.data().into()); sleep(6); + let res = node0.rpc_client().get_transaction(tx1.hash()); + assert!(matches!(res.tx_status.status, Status::Pending)); + let res = node0 .rpc_client() .send_transaction_result(tx2.data().into()); assert!(res.is_err()); - eprintln!("res: {:?}", res); + assert!(res + .err() + .unwrap() + .to_string() + .contains("TransactionFailedToResolve: Resolve failed Dead")); let relayed = wait_until(20, || { [tx1.hash()].iter().all(|hash| { @@ -258,11 +266,10 @@ impl Spec for TransactionRelayConflict { node0.wait_for_tx_pool(); node1.wait_for_tx_pool(); - /* let ret = node1 .rpc_client() .get_transaction_with_verbosity(tx2.hash(), 1); - assert!(matches!(ret.tx_status.status, Status::Rejected)); + assert!(matches!(ret.tx_status.status, Status::Unknown)); node0.remove_transaction(tx1.hash()); node0.remove_transaction(tx2.hash()); @@ -284,9 +291,11 @@ impl Spec for TransactionRelayConflict { let relayed = wait_until(10, || { // re-broadcast + // TODO: (yukang) double comfirm this behavior let _ = node1 .rpc_client() .send_transaction_result(tx2.data().into()); + node0 .rpc_client() .get_transaction(tx2.hash()) @@ -294,6 +303,5 @@ impl Spec for TransactionRelayConflict { .is_some() }); assert!(relayed, "Transaction should be relayed to node1"); - */ } } diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index 50c5c393dc..89e4d0980f 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -60,6 +60,7 @@ impl MultiIndexPoolEntryMap { pub fn score_sorted_iter(&self) -> impl Iterator { // Note: multi_index don't support reverse order iteration now // so we need to collect and reverse + // TODO: @wyjin will add reverse order iteration support for multi_index let entries = self .iter_by_score() .filter(|entry| entry.status == Status::Proposed) @@ -202,7 +203,7 @@ impl PoolMap { let mut removed = vec![]; removed_ids.extend(self.calc_descendants(id)); - // update links state for remove + // update links state for remove, so that we won't update_descendants_index_key in remove_entry for id in &removed_ids { self.remove_entry_links(id); } From c1c8f5ab278841da1867cbbeedeba7bd579c9f1c Mon Sep 17 00:00:00 2001 From: yukang Date: Tue, 6 Jun 2023 10:32:57 +0800 Subject: [PATCH 090/267] upgrade multi_index_map for iterator rev --- Cargo.lock | 4 +- tx-pool/Cargo.toml | 2 +- tx-pool/src/component/pool_map.rs | 70 +++++++++++++------------------ tx-pool/src/pool.rs | 2 +- 4 files changed, 34 insertions(+), 44 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1da5088e1e..8e42a97bf3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1555,9 +1555,9 @@ dependencies = [ [[package]] name = "ckb_multi_index_map" -version = "0.0.1" +version = "0.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2adba00c3dcb84fc4634c948cf3d24c05ce3193810bfa568effe13ad814f662a" +checksum = "53c20823dfd9f9a8e30faa3b0bdcab4801fb2544957586fada3884c78dcdf38b" dependencies = [ "convert_case 0.6.0", "proc-macro-error", diff --git a/tx-pool/Cargo.toml b/tx-pool/Cargo.toml index d8a16bff50..04e465561a 100644 --- a/tx-pool/Cargo.toml +++ b/tx-pool/Cargo.toml @@ -36,7 +36,7 @@ sentry = { version = "0.26.0", optional = true } serde_json = "1.0" rand = "0.8.4" hyper = { version = "0.14", features = ["http1", "client", "tcp"] } -ckb_multi_index_map = "0.0.1" # ckb team fork crate +ckb_multi_index_map = "0.0.2" # ckb team fork crate slab = "0.4" rustc-hash = "1.1" tokio-util = "0.7.8" diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index 89e4d0980f..dd6b471e7e 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -55,20 +55,6 @@ pub struct PoolEntry { pub inner: TxEntry, } -impl MultiIndexPoolEntryMap { - /// sorted by ancestor score from higher to lower - pub fn score_sorted_iter(&self) -> impl Iterator { - // Note: multi_index don't support reverse order iteration now - // so we need to collect and reverse - // TODO: @wyjin will add reverse order iteration support for multi_index - let entries = self - .iter_by_score() - .filter(|entry| entry.status == Status::Proposed) - .collect::>(); - entries.into_iter().rev().map(move |entry| &entry.inner) - } -} - pub struct PoolMap { /// The pool entries with different kinds of sort strategies pub(crate) entries: MultiIndexPoolEntryMap, @@ -150,7 +136,11 @@ impl PoolMap { } pub(crate) fn score_sorted_iter(&self) -> impl Iterator { - self.entries.score_sorted_iter() + self.entries + .iter_by_score() + .rev() + .filter(|entry| entry.status == Status::Proposed) + .map(|entry| &entry.inner) } pub(crate) fn get(&self, id: &ProposalShortId) -> Option<&TxEntry> { @@ -187,6 +177,31 @@ impl PoolMap { }) } + pub(crate) fn add_entry(&mut self, mut entry: TxEntry, status: Status) -> Result { + let tx_short_id = entry.proposal_short_id(); + if self.entries.get_by_id(&tx_short_id).is_some() { + return Ok(false); + } + trace!("add_{:?} {}", status, entry.transaction().hash()); + self.record_entry_links(&mut entry)?; + self.insert_entry(&entry, status)?; + self.record_entry_deps(&entry); + self.record_entry_edges(&entry); + Ok(true) + } + + /// Change the status of the entry, only used for `gap_rtx` and `proposed_rtx` + pub(crate) fn set_entry(&mut self, entry: &TxEntry, status: Status) { + let tx_short_id = entry.proposal_short_id(); + let _ = self + .entries + .get_by_id(&tx_short_id) + .expect("unconsistent pool"); + self.entries.modify_by_id(&tx_short_id, |e| { + e.status = status; + }); + } + pub(crate) fn remove_entry(&mut self, id: &ProposalShortId) -> Option { if let Some(entry) = self.entries.remove_by_id(id) { self.update_descendants_index_key(&entry.inner, EntryOp::Remove); @@ -525,31 +540,6 @@ impl PoolMap { self.edges.header_deps.remove(&id); } - pub(crate) fn add_entry(&mut self, mut entry: TxEntry, status: Status) -> Result { - let tx_short_id = entry.proposal_short_id(); - if self.entries.get_by_id(&tx_short_id).is_some() { - return Ok(false); - } - trace!("add_{:?} {}", status, entry.transaction().hash()); - self.record_entry_links(&mut entry)?; - self.insert_entry(&entry, status)?; - self.record_entry_deps(&entry); - self.record_entry_edges(&entry); - Ok(true) - } - - /// Change the status of the entry, only used for `gap_rtx` and `proposed_rtx` - pub(crate) fn set_entry(&mut self, entry: &TxEntry, status: Status) { - let tx_short_id = entry.proposal_short_id(); - let _ = self - .entries - .get_by_id(&tx_short_id) - .expect("unconsistent pool"); - self.entries.modify_by_id(&tx_short_id, |e| { - e.status = status; - }); - } - fn insert_entry(&mut self, entry: &TxEntry, status: Status) -> Result { let tx_short_id = entry.proposal_short_id(); let score = entry.as_score_key(); diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 41e766e630..fe58a3abe7 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -356,7 +356,7 @@ impl TxPool { /// Get to-be-proposal transactions that may be included in the next block. /// TODO: do we need to consider the something like score, so that we can - /// provide best transactions to be proposed. + /// provide best transactions to be proposed. pub(crate) fn get_proposals( &self, limit: usize, From 03e062b7882d778f19cb6c2998e6154b3f1d0c19 Mon Sep 17 00:00:00 2001 From: yukang Date: Tue, 6 Jun 2023 10:53:29 +0800 Subject: [PATCH 091/267] enable PoolResolveConflictAfterReorg --- test/src/main.rs | 1 + test/src/specs/tx_pool/collision.rs | 12 ++++++-- .../tx_pool/different_txs_with_same_input.rs | 30 +++++++++---------- 3 files changed, 25 insertions(+), 18 deletions(-) diff --git a/test/src/main.rs b/test/src/main.rs index f203804860..02e09c809b 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -430,6 +430,7 @@ fn all_specs() -> Vec> { Box::new(GetRawTxPool), Box::new(PoolReconcile), Box::new(PoolResurrect), + Box::new(PoolResolveConflictAfterReorg), Box::new(InvalidHeaderDep), #[cfg(not(target_os = "windows"))] Box::new(PoolPersisted), diff --git a/test/src/specs/tx_pool/collision.rs b/test/src/specs/tx_pool/collision.rs index 7dc606d459..0d93697c03 100644 --- a/test/src/specs/tx_pool/collision.rs +++ b/test/src/specs/tx_pool/collision.rs @@ -1,4 +1,6 @@ -use crate::util::check::{is_transaction_committed, is_transaction_pending}; +use crate::util::check::{ + is_transaction_committed, is_transaction_pending, is_transaction_rejected, +}; use crate::utils::{assert_send_transaction_fail, blank, commit, propose}; use crate::{Node, Spec}; use ckb_types::bytes::Bytes; @@ -6,7 +8,7 @@ use ckb_types::core::{capacity_bytes, Capacity, TransactionView}; use ckb_types::prelude::*; // Convention: -// * `tx1` and `tx2` are cousin transactions, with the same transaction content, expect the +// * `tx1` and `tx2` are cousin transactions, with the same transaction content, except the // witnesses. Hence `tx1` and `tx2` have the same tx_hash/proposal-id but different witness_hash. pub struct TransactionHashCollisionDifferentWitnessHashes; @@ -95,8 +97,8 @@ impl Spec for ConflictInGap { (0..window.closest() - 1).for_each(|_| { node.submit_block(&blank(node)); }); + node.submit_block(&propose(node, &[&txb])); - //node.submit_block(&propose(node, &[&txb])); let block = node.new_block(None, None, None); assert_eq!(&[txa], &block.transactions()[1..]); @@ -162,6 +164,8 @@ impl Spec for RemoveConflictFromPending { assert!(res.is_err()); assert!(is_transaction_pending(node, &txa)); + assert!(is_transaction_rejected(node, &txb)); + assert!(is_transaction_rejected(node, &txc)); node.submit_block(&propose(node, &[&txa])); (0..window.closest()).for_each(|_| { @@ -171,6 +175,8 @@ impl Spec for RemoveConflictFromPending { node.wait_for_tx_pool(); assert!(is_transaction_committed(node, &txa)); + assert!(is_transaction_rejected(node, &txb)); + assert!(is_transaction_rejected(node, &txc)); } } diff --git a/test/src/specs/tx_pool/different_txs_with_same_input.rs b/test/src/specs/tx_pool/different_txs_with_same_input.rs index 8539c7c8c6..a816bd2eb9 100644 --- a/test/src/specs/tx_pool/different_txs_with_same_input.rs +++ b/test/src/specs/tx_pool/different_txs_with_same_input.rs @@ -51,11 +51,11 @@ impl Spec for DifferentTxsWithSameInput { assert!(!commit_txs_hash.contains(&tx2.hash())); // when tx1 was confirmed, tx2 should be rejected - // let ret = node0.rpc_client().get_transaction(tx2.hash()); - // assert!( - // matches!(ret.tx_status.status, Status::Rejected), - // "tx2 should be rejected" - // ); + let ret = node0.rpc_client().get_transaction(tx2.hash()); + assert!( + matches!(ret.tx_status.status, Status::Rejected), + "tx2 should be rejected" + ); // verbosity = 1 let ret = node0 @@ -64,11 +64,11 @@ impl Spec for DifferentTxsWithSameInput { assert!(ret.transaction.is_none()); assert!(matches!(ret.tx_status.status, Status::Committed)); - // let ret = node0 - // .rpc_client() - // .get_transaction_with_verbosity(tx2.hash(), 1); - // assert!(ret.transaction.is_none()); - // assert!(matches!(ret.tx_status.status, Status::Rejected)); + let ret = node0 + .rpc_client() + .get_transaction_with_verbosity(tx2.hash(), 1); + assert!(ret.transaction.is_none()); + assert!(matches!(ret.tx_status.status, Status::Rejected)); // verbosity = 2 let ret = node0 @@ -77,10 +77,10 @@ impl Spec for DifferentTxsWithSameInput { assert!(ret.transaction.is_some()); assert!(matches!(ret.tx_status.status, Status::Committed)); - // let ret = node0 - // .rpc_client() - // .get_transaction_with_verbosity(tx2.hash(), 2); - // assert!(ret.transaction.is_none()); - // assert!(matches!(ret.tx_status.status, Status::Rejected)); + let ret = node0 + .rpc_client() + .get_transaction_with_verbosity(tx2.hash(), 2); + assert!(ret.transaction.is_none()); + assert!(matches!(ret.tx_status.status, Status::Rejected)); } } From 25be8f03cfdd55eba31c8f11ce86ef526c33693b Mon Sep 17 00:00:00 2001 From: yukang Date: Thu, 8 Jun 2023 00:17:16 +0800 Subject: [PATCH 092/267] comments feedback and code cleanup --- tx-pool/src/component/pool_map.rs | 2 +- tx-pool/src/pool.rs | 2 +- tx-pool/src/process.rs | 10 ++-------- tx-pool/src/service.rs | 6 ++++-- 4 files changed, 8 insertions(+), 12 deletions(-) diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index dd6b471e7e..b0d591850b 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -182,7 +182,7 @@ impl PoolMap { if self.entries.get_by_id(&tx_short_id).is_some() { return Ok(false); } - trace!("add_{:?} {}", status, entry.transaction().hash()); + trace!("pool_map.add_{:?} {}", status, entry.transaction().hash()); self.record_entry_links(&mut entry)?; self.insert_entry(&entry, status)?; self.record_entry_deps(&entry); diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index fe58a3abe7..f271115cea 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -211,7 +211,7 @@ impl TxPool { } } - // Remove transactions from the pool until total size < size_limit. + // Remove transactions from the pool until total size <= size_limit. pub(crate) fn limit_size(&mut self, callbacks: &Callbacks) { while self.total_tx_size > self.config.max_tx_pool_size { if let Some(id) = self.pool_map.next_evict_entry() { diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index 354e56cac0..2ee84e2f6e 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -987,21 +987,15 @@ fn _submit_entry( entry: TxEntry, callbacks: &Callbacks, ) -> Result<(), Reject> { - let tx_hash = entry.transaction().hash(); match status { TxStatus::Fresh => { - if tx_pool.add_pending(entry.clone()).unwrap_or(false) { + if tx_pool.add_pending(entry.clone())? { callbacks.call_pending(tx_pool, &entry); - } else { - return Err(Reject::Duplicated(tx_hash)); } } - TxStatus::Gap => { - if tx_pool.add_gap(entry.clone()).unwrap_or(false) { + if tx_pool.add_gap(entry.clone())? { callbacks.call_pending(tx_pool, &entry); - } else { - return Err(Reject::Duplicated(tx_hash)); } } TxStatus::Proposed => { diff --git a/tx-pool/src/service.rs b/tx-pool/src/service.rs index a008502c62..1a187615c8 100644 --- a/tx-pool/src/service.rs +++ b/tx-pool/src/service.rs @@ -981,13 +981,15 @@ impl TxPoolService { match target { PlugTarget::Pending => { for entry in entries { - tx_pool.add_pending(entry).unwrap(); + if let Err(err) = tx_pool.add_pending(entry) { + error!("plug entry add_pending error {}", err); + } } } PlugTarget::Proposed => { for entry in entries { if let Err(err) = tx_pool.add_proposed(entry) { - error!("plug entry error {}", err); + error!("plug entry add_proposed error {}", err); } } } From f8d789f53741394e0fbc27137386d92b8c30e8f1 Mon Sep 17 00:00:00 2001 From: yukang Date: Thu, 8 Jun 2023 10:03:13 +0800 Subject: [PATCH 093/267] add index_map shrink --- tx-pool/src/component/pool_map.rs | 6 +----- tx-pool/src/pool.rs | 1 + 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index b0d591850b..df829c0875 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -328,12 +328,8 @@ impl PoolMap { self.entries.iter().map(|(_, entry)| entry) } - pub(crate) fn iter_by_evict_key(&self) -> impl Iterator { - self.entries.iter_by_evict_key() - } - pub(crate) fn next_evict_entry(&self) -> Option { - self.iter_by_evict_key() + self.entries.iter_by_evict_key() .next() .map(|entry| entry.id.clone()) } diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index f271115cea..dfe8b5a425 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -230,6 +230,7 @@ impl TxPool { } } } + self.pool_map.entries.shrink_to_fit(); } // remove transaction with detached proposal from gap and proposed From 544f65f6343620c9f9f7382ddf62f401dc5f21d4 Mon Sep 17 00:00:00 2001 From: yukang Date: Thu, 8 Jun 2023 15:27:52 +0800 Subject: [PATCH 094/267] confirmed TransactionRelayConflict is ok --- test/src/specs/relay/transaction_relay.rs | 12 +++++++++++- tx-pool/src/component/pool_map.rs | 3 ++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/test/src/specs/relay/transaction_relay.rs b/test/src/specs/relay/transaction_relay.rs index 137d36964a..75a34e0df6 100644 --- a/test/src/specs/relay/transaction_relay.rs +++ b/test/src/specs/relay/transaction_relay.rs @@ -266,6 +266,11 @@ impl Spec for TransactionRelayConflict { node0.wait_for_tx_pool(); node1.wait_for_tx_pool(); + let ret = node1 + .rpc_client() + .get_transaction_with_verbosity(tx1.hash(), 1); + assert!(matches!(ret.tx_status.status, Status::Proposed)); + let ret = node1 .rpc_client() .get_transaction_with_verbosity(tx2.hash(), 1); @@ -278,6 +283,12 @@ impl Spec for TransactionRelayConflict { node0.wait_for_tx_pool(); node1.wait_for_tx_pool(); + // make sure tx1 is removed from tx-pool + let ret = node1 + .rpc_client() + .get_transaction_with_verbosity(tx1.hash(), 1); + assert!(matches!(ret.tx_status.status, Status::Unknown)); + let result = wait_until(5, || { let tx_pool_info = node0.get_tip_tx_pool_info(); tx_pool_info.orphan.value() == 0 && tx_pool_info.pending.value() == 0 @@ -291,7 +302,6 @@ impl Spec for TransactionRelayConflict { let relayed = wait_until(10, || { // re-broadcast - // TODO: (yukang) double comfirm this behavior let _ = node1 .rpc_client() .send_transaction_result(tx2.data().into()); diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index df829c0875..cc1c20d684 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -329,7 +329,8 @@ impl PoolMap { } pub(crate) fn next_evict_entry(&self) -> Option { - self.entries.iter_by_evict_key() + self.entries + .iter_by_evict_key() .next() .map(|entry| entry.id.clone()) } From 5964cfa2c48781c34ea5b9db86a8da47731e2cae Mon Sep 17 00:00:00 2001 From: yukang Date: Thu, 8 Jun 2023 18:39:42 +0800 Subject: [PATCH 095/267] clean up proposed_rtx and gap_rtx --- tx-pool/src/component/pool_map.rs | 14 ++--- tx-pool/src/pool.rs | 99 ++++++++++++------------------- tx-pool/src/process.rs | 42 ++++++------- 3 files changed, 63 insertions(+), 92 deletions(-) diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index cc1c20d684..dadb3b3bc2 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -126,6 +126,10 @@ impl PoolMap { self.entries.get_by_id(id) } + pub(crate) fn get_by_status(&self, status: &Status) -> Vec<&PoolEntry> { + self.entries.get_by_status(status) + } + pub(crate) fn pending_size(&self) -> usize { self.entries.get_by_status(&Status::Pending).len() + self.entries.get_by_status(&Status::Gap).len() @@ -191,13 +195,9 @@ impl PoolMap { } /// Change the status of the entry, only used for `gap_rtx` and `proposed_rtx` - pub(crate) fn set_entry(&mut self, entry: &TxEntry, status: Status) { - let tx_short_id = entry.proposal_short_id(); - let _ = self - .entries - .get_by_id(&tx_short_id) - .expect("unconsistent pool"); - self.entries.modify_by_id(&tx_short_id, |e| { + pub(crate) fn set_entry(&mut self, short_id: &ProposalShortId, status: Status) { + let _ = self.entries.get_by_id(short_id).expect("unconsistent pool"); + self.entries.modify_by_id(short_id, |e| { e.status = status; }); } diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index dfe8b5a425..88564c2058 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -6,7 +6,6 @@ use crate::callback::Callbacks; use crate::component::pool_map::{PoolEntry, PoolMap, Status}; use crate::component::recent_reject::RecentReject; use crate::error::Reject; -use crate::util::verify_rtx; use ckb_app_config::TxPoolConfig; use ckb_logger::{debug, error, warn}; use ckb_snapshot::Snapshot; @@ -19,7 +18,6 @@ use ckb_types::{ }, packed::{Byte32, ProposalShortId}, }; -use ckb_verification::{cache::CacheEntry, TxVerifyEnv}; use lru::LruCache; use std::collections::HashSet; use std::sync::Arc; @@ -72,7 +70,7 @@ impl TxPool { } fn get_by_status(&self, status: &Status) -> Vec<&PoolEntry> { - self.pool_map.entries.get_by_status(status) + self.pool_map.get_by_status(status) } /// Get tx-pool size @@ -128,12 +126,12 @@ impl TxPool { self.pool_map.get_by_id(id).is_some() } - pub(crate) fn set_entry_proposed(&mut self, entry: &TxEntry) { - self.pool_map.set_entry(entry, Status::Proposed) + pub(crate) fn set_entry_proposed(&mut self, short_id: &ProposalShortId) { + self.pool_map.set_entry(short_id, Status::Proposed) } - pub(crate) fn set_entry_gap(&mut self, entry: &TxEntry) { - self.pool_map.set_entry(entry, Status::Gap) + pub(crate) fn set_entry_gap(&mut self, short_id: &ProposalShortId) { + self.pool_map.set_entry(short_id, Status::Gap) } /// Returns tx with cycles corresponding to the id. @@ -146,6 +144,10 @@ impl TxPool { .map(|entry| (entry.inner.transaction().clone(), entry.inner.cycles)) } + pub(crate) fn get_pool_entry(&self, id: &ProposalShortId) -> Option<&PoolEntry> { + self.pool_map.get_by_id(id) + } + pub(crate) fn get_tx_from_pool(&self, id: &ProposalShortId) -> Option<&TransactionView> { self.pool_map .get_by_id(id) @@ -296,63 +298,36 @@ impl TxPool { .map_err(Reject::Resolve) } - pub(crate) fn gap_rtx( - &mut self, - cache_entry: CacheEntry, - size: usize, - timestamp: u64, - rtx: Arc, - ) -> Result { - let snapshot = self.cloned_snapshot(); - let tip_header = snapshot.tip_header(); - let tx_env = Arc::new(TxVerifyEnv::new_proposed(tip_header, 0)); - - let max_cycles = snapshot.consensus().max_block_cycles(); - let verified = verify_rtx( - snapshot, - Arc::clone(&rtx), - tx_env, - &Some(cache_entry), - max_cycles, - )?; - - let entry = - TxEntry::new_with_timestamp(rtx, verified.cycles, verified.fee, size, timestamp); - - self.set_entry_gap(&entry); - Ok(CacheEntry::Completed(verified)) + pub(crate) fn gap_rtx(&mut self, short_id: &ProposalShortId) -> Result<(), Reject> { + match self.get_pool_entry(short_id) { + Some(entry) => { + let tx_hash = entry.inner.transaction().hash(); + if entry.status == Status::Gap { + Err(Reject::Duplicated(tx_hash)) + } else { + debug!("gap_rtx: {:?} => {:?}", tx_hash, short_id); + self.set_entry_gap(short_id); + Ok(()) + } + } + None => Err(Reject::Malformed(String::from("invalid short_id"))), + } } - pub(crate) fn proposed_rtx( - &mut self, - cache_entry: CacheEntry, - size: usize, - timestamp: u64, - rtx: Arc, - ) -> Result { - let snapshot = self.cloned_snapshot(); - let tip_header = snapshot.tip_header(); - let tx_env = Arc::new(TxVerifyEnv::new_proposed(tip_header, 1)); - - let max_cycles = snapshot.consensus().max_block_cycles(); - let verified = verify_rtx( - snapshot, - Arc::clone(&rtx), - tx_env, - &Some(cache_entry), - max_cycles, - )?; - - let entry = - TxEntry::new_with_timestamp(rtx, verified.cycles, verified.fee, size, timestamp); - let tx_hash = entry.transaction().hash(); - debug!( - "proposed_rtx: {:?} => {:?}", - tx_hash, - entry.proposal_short_id() - ); - self.set_entry_proposed(&entry); - Ok(CacheEntry::Completed(verified)) + pub(crate) fn proposed_rtx(&mut self, short_id: &ProposalShortId) -> Result<(), Reject> { + match self.get_pool_entry(short_id) { + Some(entry) => { + let tx_hash = entry.inner.transaction().hash(); + if entry.status == Status::Proposed { + Err(Reject::Duplicated(tx_hash)) + } else { + debug!("proposed_rtx: {:?} => {:?}", tx_hash, short_id); + self.set_entry_proposed(short_id); + Ok(()) + } + } + None => Err(Reject::Malformed(String::from("invalid short_id"))), + } } /// Get to-be-proposal transactions that may be included in the next block. diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index 2ee84e2f6e..e921203aff 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -1034,43 +1034,39 @@ fn _update_tx_pool_for_reorg( let mut gaps = Vec::new(); for entry in tx_pool.pool_map.entries.get_by_status(&Status::Gap) { - let e = &entry.inner; - let short_id = e.proposal_short_id(); + let short_id = entry.inner.proposal_short_id(); if snapshot.proposals().contains_proposed(&short_id) { - proposals.push(e.clone()); + proposals.push((short_id, entry.inner.clone())); } } for entry in tx_pool.pool_map.entries.get_by_status(&Status::Pending) { - let e = &entry.inner; - let short_id = e.proposal_short_id(); + let short_id = entry.inner.proposal_short_id(); + let elem = (short_id.clone(), entry.inner.clone()); if snapshot.proposals().contains_proposed(&short_id) { - proposals.push(e.clone()); + proposals.push(elem); } else if snapshot.proposals().contains_gap(&short_id) { - gaps.push(e.clone()); + gaps.push(elem); } } - for entry in proposals { - debug!("begin to proposed: {:x}", entry.transaction().hash()); - let cached = CacheEntry::completed(entry.cycles, entry.fee); - if let Err(e) = - tx_pool.proposed_rtx(cached, entry.size, entry.timestamp, Arc::clone(&entry.rtx)) - { - callbacks.call_reject(tx_pool, &entry, e.clone()); + for (id, entry) in proposals { + debug!("begin to proposed: {:x}", id); + if let Err(e) = tx_pool.proposed_rtx(&id) { + callbacks.call_reject(tx_pool, &entry, e); } else { - callbacks.call_proposed(tx_pool, &entry, false); + callbacks.call_proposed(tx_pool, &entry, false) } } - for entry in gaps { - debug!("begin to gap: {:x}", entry.transaction().hash()); - let tx_hash = entry.transaction().hash(); - let cached = CacheEntry::completed(entry.cycles, entry.fee); - if let Err(e) = - tx_pool.gap_rtx(cached, entry.size, entry.timestamp, Arc::clone(&entry.rtx)) - { - debug!("Failed to add tx to gap {}, reason: {}", tx_hash, e); + for (id, entry) in gaps { + debug!("begin to gap: {:x}", id); + if let Err(e) = tx_pool.gap_rtx(&id) { + debug!( + "Failed to add tx to gap {}, reason: {}", + entry.transaction().hash(), + e + ); callbacks.call_reject(tx_pool, &entry, e.clone()); } } From d04f8cd92bf1e15ab5602866e2368dabfc40bef2 Mon Sep 17 00:00:00 2001 From: yukang Date: Thu, 8 Jun 2023 22:10:48 +0800 Subject: [PATCH 096/267] check keep_rejected_tx_hashes_days --- tx-pool/src/pool.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 88564c2058..7e1db62c57 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -465,7 +465,8 @@ impl TxPool { fn build_recent_reject(config: &TxPoolConfig) -> Option { if !config.recent_reject.as_os_str().is_empty() { - let recent_reject_ttl = config.keep_rejected_tx_hashes_days as i32 * 24 * 60 * 60; + let recent_reject_ttl = + u8::max(1, config.keep_rejected_tx_hashes_days) as i32 * 24 * 60 * 60; match RecentReject::new( &config.recent_reject, config.keep_rejected_tx_hashes_count, From 2502f1023ad1a6da2b96e8bae82e52a18e77f856 Mon Sep 17 00:00:00 2001 From: yukang Date: Tue, 13 Jun 2023 16:25:05 +0800 Subject: [PATCH 097/267] add more test for pool --- tx-pool/src/component/tests/proposed.rs | 46 +++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/tx-pool/src/component/tests/proposed.rs b/tx-pool/src/component/tests/proposed.rs index ff5fa67866..153b9324d1 100644 --- a/tx-pool/src/component/tests/proposed.rs +++ b/tx-pool/src/component/tests/proposed.rs @@ -1,7 +1,9 @@ +use crate::component::pool_map::Status; use crate::component::tests::util::{ build_tx, build_tx_with_dep, build_tx_with_header_dep, DEFAULT_MAX_ANCESTORS_COUNT, MOCK_CYCLES, MOCK_FEE, MOCK_SIZE, }; + use crate::component::{entry::TxEntry, pool_map::PoolMap}; use ckb_types::{ bytes::Bytes, @@ -695,3 +697,47 @@ fn test_max_ancestors_with_dep() { assert_eq!(pool.edges.inputs_len(), 1); assert_eq!(pool.edges.outputs_len(), 1); } + +#[test] +fn test_container_bench_add_limits() { + use rand::Rng; + let mut rng = rand::thread_rng(); + + let mut pool = PoolMap::new(1000000); + let tx1 = TxEntry::dummy_resolve( + TransactionBuilder::default().build(), + 100, + Capacity::shannons(100), + 100, + ); + pool.add_entry(tx1.clone(), Status::Proposed).unwrap(); + let mut prev_tx = tx1; + + for _i in 0..1000 { + let next_tx = TxEntry::dummy_resolve( + TransactionBuilder::default() + .input( + CellInput::new_builder() + .previous_output( + OutPoint::new_builder() + .tx_hash(prev_tx.transaction().hash()) + .index(0u32.pack()) + .build(), + ) + .build(), + ) + .witness(Bytes::new().pack()) + .build(), + rng.gen_range(0..1000), + Capacity::shannons(200), + rng.gen_range(0..1000), + ); + pool.add_entry(next_tx.clone(), Status::Proposed).unwrap(); + prev_tx = next_tx; + } + assert_eq!(pool.size(), 1001); + assert_eq!(pool.proposed_size(), 1001); + assert_eq!(pool.pending_size(), 0); + pool.clear(); + assert_eq!(pool.size(), 0); +} From 75c1de9a613f22287fd8b856494ce434d75acbc1 Mon Sep 17 00:00:00 2001 From: yukang Date: Tue, 13 Jun 2023 16:58:57 +0800 Subject: [PATCH 098/267] change name (add/sub)_ancestor_weight, prepare for (add/sub)_descendant_weight --- tx-pool/src/component/commit_txs_scanner.rs | 2 +- tx-pool/src/component/entry.rs | 4 ++-- tx-pool/src/component/pool_map.rs | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tx-pool/src/component/commit_txs_scanner.rs b/tx-pool/src/component/commit_txs_scanner.rs index 4e5d487cdf..f90ac94094 100644 --- a/tx-pool/src/component/commit_txs_scanner.rs +++ b/tx-pool/src/component/commit_txs_scanner.rs @@ -209,7 +209,7 @@ impl<'a> CommitTxsScanner<'a> { .remove(desc_id) .or_else(|| self.pool_map.get(desc_id).cloned()) { - desc.sub_entry_weight(entry); + desc.sub_ancestor_weight(entry); self.modified_entries.insert(desc); } } diff --git a/tx-pool/src/component/entry.rs b/tx-pool/src/component/entry.rs index bcce6a2e16..544d7e2817 100644 --- a/tx-pool/src/component/entry.rs +++ b/tx-pool/src/component/entry.rs @@ -106,7 +106,7 @@ impl TxEntry { } /// Update ancestor state for add an entry - pub fn add_entry_weight(&mut self, entry: &TxEntry) { + pub fn add_ancestor_weight(&mut self, entry: &TxEntry) { self.ancestors_count = self.ancestors_count.saturating_add(1); self.ancestors_size = self.ancestors_size.saturating_add(entry.size); self.ancestors_cycles = self.ancestors_cycles.saturating_add(entry.cycles); @@ -118,7 +118,7 @@ impl TxEntry { } /// Update ancestor state for remove an entry - pub fn sub_entry_weight(&mut self, entry: &TxEntry) { + pub fn sub_ancestor_weight(&mut self, entry: &TxEntry) { self.ancestors_count = self.ancestors_count.saturating_sub(1); self.ancestors_size = self.ancestors_size.saturating_sub(entry.size); self.ancestors_cycles = self.ancestors_cycles.saturating_sub(entry.cycles); diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index dadb3b3bc2..af2c5979e2 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -363,8 +363,8 @@ impl PoolMap { let entry = self.entries.get_by_id(desc_id).unwrap().clone(); let mut child = entry.inner.clone(); match op { - EntryOp::Remove => child.sub_entry_weight(parent), - EntryOp::Add => child.add_entry_weight(parent), + EntryOp::Remove => child.sub_ancestor_weight(parent), + EntryOp::Add => child.add_ancestor_weight(parent), } let short_id = child.proposal_short_id(); self.entries.modify_by_id(&short_id, |e| { @@ -482,7 +482,7 @@ impl PoolMap { .entries .get_by_id(ancestor_id) .expect("pool consistent"); - entry.add_entry_weight(&ancestor.inner); + entry.add_ancestor_weight(&ancestor.inner); } if entry.ancestors_count > self.max_ancestors_count { eprintln!("debug: exceeded maximum ancestors count"); From 6b70153b44b68643be61192c6201bbdaac3715a2 Mon Sep 17 00:00:00 2001 From: yukang Date: Tue, 13 Jun 2023 17:16:46 +0800 Subject: [PATCH 099/267] remove ancestors_size from AncestorsScoreSortKey since it is useless --- tx-pool/src/component/entry.rs | 1 - tx-pool/src/component/score_key.rs | 1 - tx-pool/src/component/tests/proposed.rs | 19 +++++++++++-------- tx-pool/src/component/tests/score_key.rs | 2 -- 4 files changed, 11 insertions(+), 12 deletions(-) diff --git a/tx-pool/src/component/entry.rs b/tx-pool/src/component/entry.rs index 544d7e2817..2b8bfa6f56 100644 --- a/tx-pool/src/component/entry.rs +++ b/tx-pool/src/component/entry.rs @@ -160,7 +160,6 @@ impl From<&TxEntry> for AncestorsScoreSortKey { weight, id: entry.proposal_short_id(), ancestors_fee: entry.ancestors_fee, - ancestors_size: entry.ancestors_size, ancestors_weight, } } diff --git a/tx-pool/src/component/score_key.rs b/tx-pool/src/component/score_key.rs index 011fb4000b..18dd48fcb2 100644 --- a/tx-pool/src/component/score_key.rs +++ b/tx-pool/src/component/score_key.rs @@ -9,7 +9,6 @@ pub struct AncestorsScoreSortKey { pub id: ProposalShortId, pub ancestors_fee: Capacity, pub ancestors_weight: u64, - pub ancestors_size: usize, } impl AncestorsScoreSortKey { diff --git a/tx-pool/src/component/tests/proposed.rs b/tx-pool/src/component/tests/proposed.rs index 153b9324d1..2bfa5d2449 100644 --- a/tx-pool/src/component/tests/proposed.rs +++ b/tx-pool/src/component/tests/proposed.rs @@ -104,11 +104,11 @@ fn test_add_entry_from_detached() { assert_eq!(pool.size(), 3); - let expected = vec![(id1.clone(), 1), (id2.clone(), 2), (id3.clone(), 3)]; + let expected = vec![id1.clone(), id2.clone(), id3.clone()]; let got = pool .entries .iter() - .map(|(_, key)| (key.id.clone(), key.score.ancestors_size)) + .map(|(_, key)| key.id.clone()) .collect::>(); assert_eq!(expected, got); @@ -144,11 +144,11 @@ fn test_add_entry_from_detached() { assert_eq!(pool.edges.inputs_len(), 2); assert_eq!(pool.entries.len(), 2); - let left = vec![(id2.clone(), 1), (id3.clone(), 2)]; + let left = vec![id2.clone(), id3.clone()]; let got = pool .entries .iter() - .map(|(_, key)| (key.id.clone(), key.score.ancestors_size)) + .map(|(_, key)| key.id.clone()) .collect::>(); assert_eq!(left, got); @@ -160,10 +160,13 @@ fn test_add_entry_from_detached() { assert!(pool.add_proposed(entry1).unwrap()); - for (idx, (_, entry)) in pool.entries.iter().enumerate() { - assert_eq!(entry.id, expected[idx].0); - assert_eq!(entry.score.ancestors_size, expected[idx].1); - } + let ids = pool + .entries + .iter() + .map(|(_, entry)| entry.inner.proposal_short_id()) + .collect::>(); + assert_eq!(ids, expected); + { assert!(pool.links.get_parents(&id1).unwrap().is_empty()); assert_eq!( diff --git a/tx-pool/src/component/tests/score_key.rs b/tx-pool/src/component/tests/score_key.rs index 22da657f84..09475f3d19 100644 --- a/tx-pool/src/component/tests/score_key.rs +++ b/tx-pool/src/component/tests/score_key.rs @@ -30,7 +30,6 @@ fn test_min_fee_and_weight() { id: ProposalShortId::new([0u8; 10]), ancestors_fee: Capacity::shannons(ancestors_fee), ancestors_weight, - ancestors_size: 0, }; key.min_fee_and_weight() }) @@ -75,7 +74,6 @@ fn test_ancestors_sorted_key_order() { id: ProposalShortId::new(id), ancestors_fee: Capacity::shannons(ancestors_fee), ancestors_weight, - ancestors_size: 0, } }) .collect::>(); From 1e5a80e8d36c05918c2905b32ecaa5fa5ba411bb Mon Sep 17 00:00:00 2001 From: yukang Date: Tue, 13 Jun 2023 22:05:45 +0800 Subject: [PATCH 100/267] trivial refactor for PoolTransactionEntry --- util/launcher/src/shared_builder.rs | 31 ++++++++++------------------- 1 file changed, 10 insertions(+), 21 deletions(-) diff --git a/util/launcher/src/shared_builder.rs b/util/launcher/src/shared_builder.rs index 09f9fd862c..96d750a198 100644 --- a/util/launcher/src/shared_builder.rs +++ b/util/launcher/src/shared_builder.rs @@ -421,18 +421,19 @@ fn register_tx_pool_callback(tx_pool_builder: &mut TxPoolServiceBuilder, notify: let notify_pending = notify.clone(); let tx_relay_sender = tx_pool_builder.tx_relay_sender(); + let create_notify_entry = |entry: &TxEntry| PoolTransactionEntry { + transaction: entry.rtx.transaction.clone(), + cycles: entry.cycles, + size: entry.size, + fee: entry.fee, + timestamp: entry.timestamp, + }; tx_pool_builder.register_pending(Box::new(move |tx_pool: &mut TxPool, entry: &TxEntry| { // update statics tx_pool.update_statics_for_add_tx(entry.size, entry.cycles); // notify - let notify_tx_entry = PoolTransactionEntry { - transaction: entry.rtx.transaction.clone(), - cycles: entry.cycles, - size: entry.size, - fee: entry.fee, - timestamp: entry.timestamp, - }; + let notify_tx_entry = create_notify_entry(entry); notify_pending.notify_new_transaction(notify_tx_entry); })); @@ -445,13 +446,7 @@ fn register_tx_pool_callback(tx_pool_builder: &mut TxPoolServiceBuilder, notify: } // notify - let notify_tx_entry = PoolTransactionEntry { - transaction: entry.rtx.transaction.clone(), - cycles: entry.cycles, - size: entry.size, - fee: entry.fee, - timestamp: entry.timestamp, - }; + let notify_tx_entry = create_notify_entry(entry); notify_proposed.notify_proposed_transaction(notify_tx_entry); }, )); @@ -483,13 +478,7 @@ fn register_tx_pool_callback(tx_pool_builder: &mut TxPoolServiceBuilder, notify: } // notify - let notify_tx_entry = PoolTransactionEntry { - transaction: entry.rtx.transaction.clone(), - cycles: entry.cycles, - size: entry.size, - fee: entry.fee, - timestamp: entry.timestamp, - }; + let notify_tx_entry = create_notify_entry(entry); notify_reject.notify_reject_transaction(notify_tx_entry, reject); }, )); From f5e45d376a41dface2381b343d8e34df7507a9b5 Mon Sep 17 00:00:00 2001 From: yukang Date: Wed, 14 Jun 2023 05:20:59 +0800 Subject: [PATCH 101/267] high score tx_entry will not blocked at pending --- tx-pool/src/component/entry.rs | 1 + tx-pool/src/component/pool_map.rs | 21 +++++---- tx-pool/src/component/score_key.rs | 8 +++- tx-pool/src/component/tests/pending.rs | 55 ++++++++++++++++++++++++ tx-pool/src/component/tests/score_key.rs | 2 + tx-pool/src/pool.rs | 2 - 6 files changed, 78 insertions(+), 11 deletions(-) diff --git a/tx-pool/src/component/entry.rs b/tx-pool/src/component/entry.rs index 2b8bfa6f56..67cc2c8035 100644 --- a/tx-pool/src/component/entry.rs +++ b/tx-pool/src/component/entry.rs @@ -161,6 +161,7 @@ impl From<&TxEntry> for AncestorsScoreSortKey { id: entry.proposal_short_id(), ancestors_fee: entry.ancestors_fee, ancestors_weight, + timestamp: entry.timestamp, } } } diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index af2c5979e2..c6d232fcd3 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -140,11 +140,7 @@ impl PoolMap { } pub(crate) fn score_sorted_iter(&self) -> impl Iterator { - self.entries - .iter_by_score() - .rev() - .filter(|entry| entry.status == Status::Proposed) - .map(|entry| &entry.inner) + self.score_sorted_iter_by(Status::Proposed) } pub(crate) fn get(&self, id: &ProposalShortId) -> Option<&TxEntry> { @@ -296,12 +292,13 @@ impl PoolMap { proposals: &mut HashSet, status: &Status, ) { - for entry in self.entries.get_by_status(status) { + for entry in self.score_sorted_iter_by(*status) { if proposals.len() == limit { break; } - if !exclusion.contains(&entry.id) { - proposals.insert(entry.id.clone()); + let id = entry.proposal_short_id(); + if !exclusion.contains(&id) { + proposals.insert(id); } } } @@ -341,6 +338,14 @@ impl PoolMap { self.links.clear(); } + fn score_sorted_iter_by(&self, status: Status) -> impl Iterator { + self.entries + .iter_by_score() + .rev() + .filter(move |entry| entry.status == status) + .map(|entry| &entry.inner) + } + fn remove_entry_links(&mut self, id: &ProposalShortId) { if let Some(parents) = self.links.get_parents(id).cloned() { for parent in parents { diff --git a/tx-pool/src/component/score_key.rs b/tx-pool/src/component/score_key.rs index 18dd48fcb2..1a9843b7ad 100644 --- a/tx-pool/src/component/score_key.rs +++ b/tx-pool/src/component/score_key.rs @@ -9,6 +9,7 @@ pub struct AncestorsScoreSortKey { pub id: ProposalShortId, pub ancestors_fee: Capacity, pub ancestors_weight: u64, + pub timestamp: u64, } impl AncestorsScoreSortKey { @@ -42,7 +43,12 @@ impl Ord for AncestorsScoreSortKey { if self_weight == other_weight { // if fee rate weight is same, then compare with ancestor weight if self.ancestors_weight == other.ancestors_weight { - self.id.raw_data().cmp(&other.id.raw_data()) + if self.timestamp == other.timestamp { + self.id.raw_data().cmp(&other.id.raw_data()) + } else { + // NOTE: we use timestamp to compare, so the order is reversed + self.timestamp.cmp(&other.timestamp).reverse() + } } else { self.ancestors_weight.cmp(&other.ancestors_weight) } diff --git a/tx-pool/src/component/tests/pending.rs b/tx-pool/src/component/tests/pending.rs index b07e2e96e6..764f6d0026 100644 --- a/tx-pool/src/component/tests/pending.rs +++ b/tx-pool/src/component/tests/pending.rs @@ -6,9 +6,11 @@ use crate::component::{ entry::TxEntry, pool_map::{PoolMap, Status}, }; +use ckb_types::core::Capacity; use ckb_types::packed::OutPoint; use ckb_types::{h256, packed::Byte32, prelude::*}; use std::collections::HashSet; +use std::time::Duration; #[test] fn test_basic() { @@ -206,7 +208,11 @@ fn test_fill_proposals() { 3, ); let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + std::thread::sleep(Duration::from_millis(1)); + let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + std::thread::sleep(Duration::from_millis(1)); + let entry3 = TxEntry::dummy_resolve(tx3.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); assert!(pool.add_entry(entry1, Status::Pending).unwrap()); assert!(pool.add_entry(entry2, Status::Pending).unwrap()); @@ -242,6 +248,55 @@ fn test_fill_proposals() { assert_eq!(ret, HashSet::from_iter(vec![id1, id3])); } +#[test] +fn test_fill_proposals_with_high_score() { + let mut pool = PoolMap::new(1000); + let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); + let tx2 = build_tx( + vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], + 3, + ); + let tx3 = build_tx_with_dep( + vec![(&h256!("0x4").pack(), 1)], + vec![(&h256!("0x5").pack(), 1)], + 3, + ); + let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + std::thread::sleep(Duration::from_millis(1)); + let entry2 = TxEntry::dummy_resolve(tx2.clone(), 2, Capacity::shannons(50), 2); + std::thread::sleep(Duration::from_millis(1)); + let entry3 = TxEntry::dummy_resolve(tx3.clone(), 2, Capacity::shannons(100), 2); + + assert!(pool.add_entry(entry1, Status::Pending).unwrap()); + assert!(pool.add_entry(entry2, Status::Pending).unwrap()); + assert!(pool.add_entry(entry3, Status::Pending).unwrap()); + + let id1 = tx1.proposal_short_id(); + let id2 = tx2.proposal_short_id(); + let id3 = tx3.proposal_short_id(); + + let mut ret = HashSet::new(); + pool.fill_proposals(10, &HashSet::new(), &mut ret, &Status::Pending); + assert_eq!( + ret, + HashSet::from_iter(vec![id3.clone(), id2.clone(), id1.clone()]) + ); + + let mut ret = HashSet::new(); + pool.fill_proposals(1, &HashSet::new(), &mut ret, &Status::Pending); + assert_eq!(ret, HashSet::from_iter(vec![id3.clone()])); + + let mut ret = HashSet::new(); + pool.fill_proposals(2, &HashSet::new(), &mut ret, &Status::Pending); + assert_eq!(ret, HashSet::from_iter(vec![id3.clone(), id2.clone()])); + + let mut ret = HashSet::new(); + let mut exclusion = HashSet::new(); + exclusion.insert(id2); + pool.fill_proposals(2, &exclusion, &mut ret, &Status::Pending); + assert_eq!(ret, HashSet::from_iter(vec![id1, id3])); +} + #[test] fn test_edges() { let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); diff --git a/tx-pool/src/component/tests/score_key.rs b/tx-pool/src/component/tests/score_key.rs index 09475f3d19..7acc22a895 100644 --- a/tx-pool/src/component/tests/score_key.rs +++ b/tx-pool/src/component/tests/score_key.rs @@ -30,6 +30,7 @@ fn test_min_fee_and_weight() { id: ProposalShortId::new([0u8; 10]), ancestors_fee: Capacity::shannons(ancestors_fee), ancestors_weight, + timestamp: 0, }; key.min_fee_and_weight() }) @@ -74,6 +75,7 @@ fn test_ancestors_sorted_key_order() { id: ProposalShortId::new(id), ancestors_fee: Capacity::shannons(ancestors_fee), ancestors_weight, + timestamp: 0, } }) .collect::>(); diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 7e1db62c57..2fdbe23d45 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -331,8 +331,6 @@ impl TxPool { } /// Get to-be-proposal transactions that may be included in the next block. - /// TODO: do we need to consider the something like score, so that we can - /// provide best transactions to be proposed. pub(crate) fn get_proposals( &self, limit: usize, From 737b311476575aa4dff07d17fe23ccb159b65c93 Mon Sep 17 00:00:00 2001 From: yukang Date: Wed, 14 Jun 2023 19:13:43 +0800 Subject: [PATCH 102/267] add descendants related info for evict, has performance regression --- test/src/specs/tx_pool/limit.rs | 2 +- tx-pool/src/component/entry.rs | 69 ++++++++++-- tx-pool/src/component/pool_map.rs | 32 +++++- tx-pool/src/component/tests/entry.rs | 54 +++++++++ tx-pool/src/component/tests/mod.rs | 1 + tx-pool/src/component/tests/pending.rs | 145 +++++++++++++++++++++++++ tx-pool/src/pool.rs | 11 +- util/types/src/core/tx_pool.rs | 4 + 8 files changed, 301 insertions(+), 17 deletions(-) create mode 100644 tx-pool/src/component/tests/entry.rs diff --git a/test/src/specs/tx_pool/limit.rs b/test/src/specs/tx_pool/limit.rs index b4defc175b..ad84e36124 100644 --- a/test/src/specs/tx_pool/limit.rs +++ b/test/src/specs/tx_pool/limit.rs @@ -34,7 +34,7 @@ impl Spec for SizeLimit { let max_tx_num = (MAX_MEM_SIZE_FOR_SIZE_LIMIT as u64) / one_tx_size; - info!("Generate as much as possible txs on node"); + info!("Generate as much as possible txs on : {}", max_tx_num); (0..(max_tx_num - 1)).for_each(|_| { let tx = node.new_transaction(hash.clone()); hash = node.rpc_client().send_transaction(tx.data().into()); diff --git a/tx-pool/src/component/entry.rs b/tx-pool/src/component/entry.rs index 67cc2c8035..3638e21285 100644 --- a/tx-pool/src/component/entry.rs +++ b/tx-pool/src/component/entry.rs @@ -31,6 +31,14 @@ pub struct TxEntry { pub ancestors_cycles: Cycle, /// ancestors txs count pub ancestors_count: usize, + /// descendants txs fee + pub descendants_fee: Capacity, + /// descendants txs size + pub descendants_size: usize, + /// descendants txs cycles + pub descendants_cycles: Cycle, + /// descendants txs count + pub descendants_count: usize, /// The unix timestamp when entering the Txpool, unit: Millisecond pub timestamp: u64, } @@ -58,6 +66,10 @@ impl TxEntry { ancestors_size: size, ancestors_fee: fee, ancestors_cycles: cycles, + descendants_fee: fee, + descendants_size: size, + descendants_cycles: cycles, + descendants_count: 1, ancestors_count: 1, } } @@ -105,6 +117,30 @@ impl TxEntry { FeeRate::calculate(self.fee, weight) } + /// Update ancestor state for add an entry + pub fn add_descendant_weight(&mut self, entry: &TxEntry) { + self.descendants_count = self.descendants_count.saturating_add(1); + self.descendants_size = self.descendants_size.saturating_add(entry.size); + self.descendants_cycles = self.descendants_cycles.saturating_add(entry.cycles); + self.descendants_fee = Capacity::shannons( + self.descendants_fee + .as_u64() + .saturating_add(entry.fee.as_u64()), + ); + } + + /// Update ancestor state for remove an entry + pub fn sub_descendant_weight(&mut self, entry: &TxEntry) { + self.descendants_count = self.descendants_count.saturating_sub(1); + self.descendants_size = self.descendants_size.saturating_sub(entry.size); + self.descendants_cycles = self.descendants_cycles.saturating_sub(entry.cycles); + self.descendants_fee = Capacity::shannons( + self.descendants_fee + .as_u64() + .saturating_sub(entry.fee.as_u64()), + ); + } + /// Update ancestor state for add an entry pub fn add_ancestor_weight(&mut self, entry: &TxEntry) { self.ancestors_count = self.ancestors_count.saturating_add(1); @@ -130,11 +166,16 @@ impl TxEntry { } /// Reset ancestor state by remove - pub fn reset_ancestors_state(&mut self) { + pub fn reset_statistic_state(&mut self) { self.ancestors_count = 1; self.ancestors_size = self.size; self.ancestors_cycles = self.cycles; self.ancestors_fee = self.fee; + + self.descendants_count = 1; + self.descendants_size = self.size; + self.descendants_cycles = self.cycles; + self.descendants_fee = self.fee; } /// Converts entry to a `TxEntryInfo`. @@ -145,6 +186,8 @@ impl TxEntry { fee: self.fee, ancestors_size: self.ancestors_size as u64, ancestors_cycles: self.ancestors_cycles, + descendants_size: self.descendants_size as u64, + descendants_cycles: self.descendants_cycles, ancestors_count: self.ancestors_count as u64, timestamp: self.timestamp, } @@ -190,22 +233,28 @@ impl Ord for TxEntry { } } -/// Currently we do not have trace descendants, -/// so first take the simplest strategy, -/// first compare fee_rate, select the smallest fee_rate, +/// First compare fee_rate, select the smallest fee_rate, /// and then select the latest timestamp, for eviction, /// the latest timestamp which also means that the fewer descendants may exist. #[derive(Eq, PartialEq, Clone, Debug)] pub struct EvictKey { - fee_rate: FeeRate, - timestamp: u64, + pub fee_rate: FeeRate, + pub timestamp: u64, + pub descendants_count: usize, } impl From<&TxEntry> for EvictKey { fn from(entry: &TxEntry) -> Self { + let weight = get_transaction_weight(entry.size, entry.cycles); + let descendants_weight = + get_transaction_weight(entry.descendants_size, entry.descendants_cycles); + + let descendants_feerate = FeeRate::calculate(entry.descendants_fee, descendants_weight); + let feerate = FeeRate::calculate(entry.fee, weight); EvictKey { - fee_rate: entry.fee_rate(), + fee_rate: descendants_feerate.max(feerate), timestamp: entry.timestamp, + descendants_count: entry.descendants_count, } } } @@ -219,7 +268,11 @@ impl PartialOrd for EvictKey { impl Ord for EvictKey { fn cmp(&self, other: &Self) -> Ordering { if self.fee_rate == other.fee_rate { - self.timestamp.cmp(&other.timestamp).reverse() + if self.descendants_count == other.descendants_count { + self.timestamp.cmp(&other.timestamp) + } else { + self.descendants_count.cmp(&other.descendants_count) + } } else { self.fee_rate.cmp(&other.fee_rate) } diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index c6d232fcd3..cb0393d4ec 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -184,7 +184,7 @@ impl PoolMap { } trace!("pool_map.add_{:?} {}", status, entry.transaction().hash()); self.record_entry_links(&mut entry)?; - self.insert_entry(&entry, status)?; + self.insert_entry(&entry, status); self.record_entry_deps(&entry); self.record_entry_edges(&entry); Ok(true) @@ -200,6 +200,7 @@ impl PoolMap { pub(crate) fn remove_entry(&mut self, id: &ProposalShortId) -> Option { if let Some(entry) = self.entries.remove_by_id(id) { + self.update_ancestors_index_key(&entry.inner, EntryOp::Remove); self.update_descendants_index_key(&entry.inner, EntryOp::Remove); self.remove_entry_deps(&entry.inner); self.remove_entry_edges(&entry.inner); @@ -325,10 +326,10 @@ impl PoolMap { self.entries.iter().map(|(_, entry)| entry) } - pub(crate) fn next_evict_entry(&self) -> Option { + pub(crate) fn next_evict_entry(&self, status: Status) -> Option { self.entries .iter_by_evict_key() - .next() + .find(move |entry| entry.status == status) .map(|entry| entry.id.clone()) } @@ -360,6 +361,25 @@ impl PoolMap { self.links.remove(id); } + fn update_ancestors_index_key(&mut self, child: &TxEntry, op: EntryOp) { + let ancestors: HashSet = + self.links.calc_ancestors(&child.proposal_short_id()); + for anc_id in &ancestors { + // update parent score + let entry = self.entries.get_by_id(anc_id).unwrap().clone(); + let mut parent = entry.inner.clone(); + match op { + EntryOp::Remove => parent.sub_descendant_weight(child), + EntryOp::Add => parent.add_descendant_weight(child), + } + let short_id = parent.proposal_short_id(); + self.entries.modify_by_id(&short_id, |e| { + e.evict_key = parent.as_evict_key(); + e.inner = parent; + }); + } + } + fn update_descendants_index_key(&mut self, parent: &TxEntry, op: EntryOp) { let descendants: HashSet = self.links.calc_descendants(&parent.proposal_short_id()); @@ -374,7 +394,6 @@ impl PoolMap { let short_id = child.proposal_short_id(); self.entries.modify_by_id(&short_id, |e| { e.score = child.as_score_key(); - e.evict_key = child.as_evict_key(); e.inner = child; }); } @@ -426,6 +445,8 @@ impl PoolMap { if !children.is_empty() { self.update_descendants_from_detached(&tx_short_id, children); } + // update ancestors + self.update_ancestors_index_key(entry, EntryOp::Add); } // update_descendants_from_detached is used to update @@ -542,7 +563,7 @@ impl PoolMap { self.edges.header_deps.remove(&id); } - fn insert_entry(&mut self, entry: &TxEntry, status: Status) -> Result { + fn insert_entry(&mut self, entry: &TxEntry, status: Status) { let tx_short_id = entry.proposal_short_id(); let score = entry.as_score_key(); let evict_key = entry.as_evict_key(); @@ -553,7 +574,6 @@ impl PoolMap { inner: entry.clone(), evict_key, }); - Ok(true) } } diff --git a/tx-pool/src/component/tests/entry.rs b/tx-pool/src/component/tests/entry.rs new file mode 100644 index 0000000000..8aa7edf3ff --- /dev/null +++ b/tx-pool/src/component/tests/entry.rs @@ -0,0 +1,54 @@ +use ckb_types::core::{Capacity, FeeRate}; + +use crate::component::entry::EvictKey; + +#[test] +fn test_min_fee_and_weight_evict() { + let mut result = vec![(500, 10, 30), (10, 10, 31), (100, 10, 32)] + .into_iter() + .map(|(fee, weight, timestamp)| EvictKey { + fee_rate: FeeRate::calculate(Capacity::shannons(fee), weight), + timestamp, + descendants_count: 0, + }) + .collect::>(); + result.sort(); + assert_eq!( + result.iter().map(|key| key.timestamp).collect::>(), + vec![31, 32, 30] + ); +} + +#[test] +fn test_min_timestamp_evict() { + let mut result = vec![(500, 10, 30), (500, 10, 31), (500, 10, 32)] + .into_iter() + .map(|(fee, weight, timestamp)| EvictKey { + fee_rate: FeeRate::calculate(Capacity::shannons(fee), weight), + timestamp, + descendants_count: 0, + }) + .collect::>(); + result.sort(); + assert_eq!( + result.iter().map(|key| key.timestamp).collect::>(), + vec![30, 31, 32] + ); +} + +#[test] +fn test_min_weight_evict() { + let mut result = vec![(500, 10, 30), (500, 12, 31), (500, 13, 32)] + .into_iter() + .map(|(fee, weight, timestamp)| EvictKey { + fee_rate: FeeRate::calculate(Capacity::shannons(fee), weight), + timestamp, + descendants_count: 0, + }) + .collect::>(); + result.sort(); + assert_eq!( + result.iter().map(|key| key.timestamp).collect::>(), + vec![32, 31, 30] + ); +} diff --git a/tx-pool/src/component/tests/mod.rs b/tx-pool/src/component/tests/mod.rs index d9a3529707..fb851e4855 100644 --- a/tx-pool/src/component/tests/mod.rs +++ b/tx-pool/src/component/tests/mod.rs @@ -1,4 +1,5 @@ mod chunk; +mod entry; mod pending; mod proposed; mod recent_reject; diff --git a/tx-pool/src/component/tests/pending.rs b/tx-pool/src/component/tests/pending.rs index 764f6d0026..f2c3b6b19d 100644 --- a/tx-pool/src/component/tests/pending.rs +++ b/tx-pool/src/component/tests/pending.rs @@ -314,3 +314,148 @@ fn test_edges() { edges.delete_txid_by_dep(outpoint, &short_id2); assert!(edges.deps.is_empty()); } + +#[test] +fn test_pool_evict() { + let mut pool = PoolMap::new(1000); + let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); + let tx2 = build_tx( + vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], + 3, + ); + let tx3 = build_tx_with_dep( + vec![(&h256!("0x4").pack(), 1)], + vec![(&h256!("0x5").pack(), 1)], + 3, + ); + let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + std::thread::sleep(Duration::from_millis(1)); + let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + std::thread::sleep(Duration::from_millis(1)); + let entry3 = TxEntry::dummy_resolve(tx3.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); + + assert!(pool.add_entry(entry1, Status::Pending).unwrap()); + assert!(pool.add_entry(entry2, Status::Pending).unwrap()); + assert!(pool.add_entry(entry3, Status::Pending).unwrap()); + + let e1 = pool.next_evict_entry(Status::Pending).unwrap(); + assert_eq!(e1, tx1.proposal_short_id()); + pool.remove_entry(&e1); + + let e2 = pool.next_evict_entry(Status::Pending).unwrap(); + assert_eq!(e2, tx2.proposal_short_id()); + pool.remove_entry(&e2); + + let e3 = pool.next_evict_entry(Status::Pending).unwrap(); + assert_eq!(e3, tx3.proposal_short_id()); + pool.remove_entry(&e3); + + assert!(pool.next_evict_entry(Status::Pending).is_none()); +} + +#[test] +fn test_pool_min_weight_evict() { + let mut pool = PoolMap::new(1000); + let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); + let tx2 = build_tx( + vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], + 3, + ); + let tx3 = build_tx_with_dep( + vec![(&h256!("0x4").pack(), 1)], + vec![(&h256!("0x5").pack(), 1)], + 3, + ); + let entry1 = TxEntry::dummy_resolve(tx1.clone(), 2, Capacity::shannons(100), 2); + std::thread::sleep(Duration::from_millis(1)); + let entry2 = TxEntry::dummy_resolve(tx2.clone(), 2, Capacity::shannons(50), 2); + std::thread::sleep(Duration::from_millis(1)); + let entry3 = TxEntry::dummy_resolve(tx3.clone(), 2, Capacity::shannons(10), 2); + + assert!(pool.add_entry(entry1, Status::Pending).unwrap()); + assert!(pool.add_entry(entry2, Status::Pending).unwrap()); + assert!(pool.add_entry(entry3, Status::Pending).unwrap()); + + let e1 = pool.next_evict_entry(Status::Pending).unwrap(); + assert_eq!(e1, tx3.proposal_short_id()); + pool.remove_entry(&e1); + + let e2 = pool.next_evict_entry(Status::Pending).unwrap(); + assert_eq!(e2, tx2.proposal_short_id()); + pool.remove_entry(&e2); + + let e3 = pool.next_evict_entry(Status::Pending).unwrap(); + assert_eq!(e3, tx1.proposal_short_id()); + pool.remove_entry(&e3); + + assert!(pool.next_evict_entry(Status::Pending).is_none()); +} + +#[test] +fn test_pool_max_size_evict() { + let mut pool = PoolMap::new(1000); + let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); + let tx2 = build_tx( + vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], + 3, + ); + let tx3 = build_tx_with_dep( + vec![(&h256!("0x4").pack(), 1)], + vec![(&h256!("0x5").pack(), 1)], + 3, + ); + let entry1 = TxEntry::dummy_resolve(tx1.clone(), 2, Capacity::shannons(100), 3); + std::thread::sleep(Duration::from_millis(1)); + let entry2 = TxEntry::dummy_resolve(tx2.clone(), 2, Capacity::shannons(100), 2); + std::thread::sleep(Duration::from_millis(1)); + let entry3 = TxEntry::dummy_resolve(tx3.clone(), 2, Capacity::shannons(100), 1); + + assert!(pool.add_entry(entry1, Status::Pending).unwrap()); + assert!(pool.add_entry(entry2, Status::Pending).unwrap()); + assert!(pool.add_entry(entry3, Status::Pending).unwrap()); + + let e1 = pool.next_evict_entry(Status::Pending).unwrap(); + assert_eq!(e1, tx1.proposal_short_id()); + pool.remove_entry(&e1); + + let e2 = pool.next_evict_entry(Status::Pending).unwrap(); + assert_eq!(e2, tx2.proposal_short_id()); + pool.remove_entry(&e2); + + let e3 = pool.next_evict_entry(Status::Pending).unwrap(); + assert_eq!(e3, tx3.proposal_short_id()); + pool.remove_entry(&e3); + + assert!(pool.next_evict_entry(Status::Pending).is_none()); +} + +#[test] +fn test_pool_min_descendants_evict() { + let mut pool = PoolMap::new(1000); + let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); + let tx2 = build_tx(vec![(&tx1.hash(), 1), (&h256!("0x3").pack(), 1)], 3); + let tx3 = build_tx_with_dep(vec![(&tx2.hash(), 1)], vec![(&h256!("0x5").pack(), 1)], 3); + let entry1 = TxEntry::dummy_resolve(tx1.clone(), 2, Capacity::shannons(100), 1); + std::thread::sleep(Duration::from_millis(1)); + let entry2 = TxEntry::dummy_resolve(tx2.clone(), 2, Capacity::shannons(100), 1); + std::thread::sleep(Duration::from_millis(1)); + let entry3 = TxEntry::dummy_resolve(tx3.clone(), 2, Capacity::shannons(100), 1); + + assert!(pool.add_entry(entry1, Status::Pending).unwrap()); + assert!(pool.add_entry(entry2, Status::Pending).unwrap()); + assert!(pool.add_entry(entry3, Status::Pending).unwrap()); + + let e1 = pool.next_evict_entry(Status::Pending).unwrap(); + assert_eq!(e1, tx3.proposal_short_id()); + pool.remove_entry(&e1); + + let e2 = pool.next_evict_entry(Status::Pending).unwrap(); + assert_eq!(e2, tx2.proposal_short_id()); + pool.remove_entry(&e2); + + let e3 = pool.next_evict_entry(Status::Pending).unwrap(); + assert_eq!(e3, tx1.proposal_short_id()); + pool.remove_entry(&e3); + + assert!(pool.next_evict_entry(Status::Pending).is_none()); +} diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 2fdbe23d45..afcb56aad2 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -216,7 +216,14 @@ impl TxPool { // Remove transactions from the pool until total size <= size_limit. pub(crate) fn limit_size(&mut self, callbacks: &Callbacks) { while self.total_tx_size > self.config.max_tx_pool_size { - if let Some(id) = self.pool_map.next_evict_entry() { + let next_evict_entry = || { + self.pool_map + .next_evict_entry(Status::Pending) + .or_else(|| self.pool_map.next_evict_entry(Status::Gap)) + .or_else(|| self.pool_map.next_evict_entry(Status::Proposed)) + }; + + if let Some(id) = next_evict_entry() { let removed = self.pool_map.remove_entry_and_descendants(&id); for entry in removed { let tx_hash = entry.transaction().hash(); @@ -251,7 +258,7 @@ impl TxPool { entries.sort_unstable_by_key(|entry| entry.ancestors_count); for mut entry in entries { let tx_hash = entry.transaction().hash(); - entry.reset_ancestors_state(); + entry.reset_statistic_state(); let ret = self.add_pending(entry); debug!( "remove_by_detached_proposal from {:?} {} add_pending {:?}", diff --git a/util/types/src/core/tx_pool.rs b/util/types/src/core/tx_pool.rs index d5b41e1d4d..43fcd7e547 100644 --- a/util/types/src/core/tx_pool.rs +++ b/util/types/src/core/tx_pool.rs @@ -128,6 +128,10 @@ pub struct TxEntryInfo { pub ancestors_size: u64, /// Cycles of in-tx-pool ancestor transactions pub ancestors_cycles: u64, + /// Size of in-tx-pool descendants transactions + pub descendants_size: u64, + /// Cycles of in-tx-pool descendants transactions + pub descendants_cycles: u64, /// Number of in-tx-pool ancestor transactions pub ancestors_count: u64, /// The unix timestamp when entering the Txpool, unit: Millisecond From 90b41032232fe612a56b8ba55c94635e0ce3693e Mon Sep 17 00:00:00 2001 From: yukang Date: Wed, 14 Jun 2023 23:30:38 +0800 Subject: [PATCH 103/267] cleanup and remove unused function names and tests --- tx-pool/src/component/pool_map.rs | 59 +++++-------------------- tx-pool/src/component/tests/pending.rs | 29 ------------ tx-pool/src/component/tests/proposed.rs | 14 +++--- tx-pool/src/pool.rs | 4 +- 4 files changed, 19 insertions(+), 87 deletions(-) diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index cb0393d4ec..596a9dce8f 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -117,11 +117,6 @@ impl PoolMap { self.add_entry(entry, Status::Proposed) } - #[cfg(test)] - pub(crate) fn remove_committed_tx(&mut self, tx: &TransactionView) -> Option { - self.remove_entry(&tx.proposal_short_id()) - } - pub(crate) fn get_by_id(&self, id: &ProposalShortId) -> Option<&PoolEntry> { self.entries.get_by_id(id) } @@ -192,10 +187,11 @@ impl PoolMap { /// Change the status of the entry, only used for `gap_rtx` and `proposed_rtx` pub(crate) fn set_entry(&mut self, short_id: &ProposalShortId, status: Status) { - let _ = self.entries.get_by_id(short_id).expect("unconsistent pool"); - self.entries.modify_by_id(short_id, |e| { - e.status = status; - }); + self.entries + .modify_by_id(short_id, |e| { + e.status = status; + }) + .expect("unconsistent pool"); } pub(crate) fn remove_entry(&mut self, id: &ProposalShortId) -> Option { @@ -304,24 +300,6 @@ impl PoolMap { } } - #[cfg(test)] - pub(crate) fn remove_entries_by_filter bool>( - &mut self, - status: &Status, - mut predicate: P, - ) -> Vec { - let mut removed = Vec::new(); - for entry in self.entries.get_by_status(status) { - if predicate(&entry.id, &entry.inner) { - removed.push(entry.inner.clone()); - } - } - for entry in &removed { - self.remove_entry(&entry.proposal_short_id()); - } - removed - } - pub(crate) fn iter(&self) -> impl Iterator { self.entries.iter().map(|(_, entry)| entry) } @@ -443,31 +421,16 @@ impl PoolMap { } // update children if !children.is_empty() { - self.update_descendants_from_detached(&tx_short_id, children); - } - // update ancestors - self.update_ancestors_index_key(entry, EntryOp::Add); - } - - // update_descendants_from_detached is used to update - // the descendants for a single transaction that has been added to the - // pool but may have child transactions in the pool, eg during a - // chain reorg. - fn update_descendants_from_detached( - &mut self, - id: &ProposalShortId, - children: HashSet, - ) { - if let Some(entry) = self.get_by_id(id).cloned() { for child in &children { - self.links.add_parent(child, id.clone()); + self.links.add_parent(child, tx_short_id.clone()); } - if let Some(links) = self.links.inner.get_mut(id) { + if let Some(links) = self.links.inner.get_mut(&tx_short_id) { links.children.extend(children); } - - self.update_descendants_index_key(&entry.inner, EntryOp::Add); + self.update_descendants_index_key(entry, EntryOp::Add); } + // update ancestors + self.update_ancestors_index_key(entry, EntryOp::Add); } /// Record the links for entry @@ -517,7 +480,6 @@ impl PoolMap { for cell_dep in entry.transaction().cell_deps() { let dep_pt = cell_dep.out_point(); - // insert dep-ref map self.edges .deps .entry(dep_pt) @@ -529,7 +491,6 @@ impl PoolMap { self.links.add_child(parent, short_id.clone()); } - // insert links let links = TxLinks { parents, children: Default::default(), diff --git a/tx-pool/src/component/tests/pending.rs b/tx-pool/src/component/tests/pending.rs index f2c3b6b19d..2445593caf 100644 --- a/tx-pool/src/component/tests/pending.rs +++ b/tx-pool/src/component/tests/pending.rs @@ -165,35 +165,6 @@ fn test_remove_entry() { assert!(pool.edges.header_deps.is_empty()); } -#[test] -fn test_remove_entries_by_filter() { - let mut pool = PoolMap::new(1000); - let tx1 = build_tx(vec![(&Byte32::zero(), 1), (&h256!("0x1").pack(), 1)], 1); - let tx2 = build_tx( - vec![(&h256!("0x2").pack(), 1), (&h256!("0x3").pack(), 1)], - 3, - ); - let tx3 = build_tx_with_dep( - vec![(&h256!("0x4").pack(), 1)], - vec![(&h256!("0x5").pack(), 1)], - 3, - ); - let entry1 = TxEntry::dummy_resolve(tx1.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - let entry2 = TxEntry::dummy_resolve(tx2.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - let entry3 = TxEntry::dummy_resolve(tx3.clone(), MOCK_CYCLES, MOCK_FEE, MOCK_SIZE); - assert!(pool.add_entry(entry1, Status::Pending).unwrap()); - assert!(pool.add_entry(entry2, Status::Pending).unwrap()); - assert!(pool.add_entry(entry3, Status::Pending).unwrap()); - - pool.remove_entries_by_filter(&Status::Pending, |id, _tx_entry| { - id == &tx1.proposal_short_id() - }); - - assert!(!pool.contains_key(&tx1.proposal_short_id())); - assert!(pool.contains_key(&tx2.proposal_short_id())); - assert!(pool.contains_key(&tx3.proposal_short_id())); -} - #[test] fn test_fill_proposals() { let mut pool = PoolMap::new(1000); diff --git a/tx-pool/src/component/tests/proposed.rs b/tx-pool/src/component/tests/proposed.rs index 2bfa5d2449..4f864e3a47 100644 --- a/tx-pool/src/component/tests/proposed.rs +++ b/tx-pool/src/component/tests/proposed.rs @@ -72,7 +72,7 @@ fn test_add_entry() { assert_eq!(pool.edges.outputs_len(), 2); assert_eq!(pool.edges.inputs_len(), 3); - pool.remove_committed_tx(&tx1); + pool.remove_entry(&tx1.proposal_short_id()); assert_eq!(pool.edges.outputs_len(), 1); assert_eq!(pool.edges.inputs_len(), 1); } @@ -139,7 +139,7 @@ fn test_add_entry_from_detached() { assert!(pool.links.get_children(&id3).unwrap().is_empty()); } - pool.remove_committed_tx(&tx1); + pool.remove_entry(&tx1.proposal_short_id()); assert_eq!(pool.edges.outputs_len(), 2); assert_eq!(pool.edges.inputs_len(), 2); assert_eq!(pool.entries.len(), 2); @@ -221,7 +221,7 @@ fn test_add_roots() { assert_eq!(pool.edges.outputs_len(), 4); assert_eq!(pool.edges.inputs_len(), 4); - pool.remove_committed_tx(&tx1); + pool.remove_entry(&tx1.proposal_short_id()); assert_eq!(pool.edges.outputs_len(), 3); assert_eq!(pool.edges.inputs_len(), 2); @@ -282,7 +282,7 @@ fn test_add_no_roots() { assert_eq!(pool.edges.outputs_len(), 13); assert_eq!(pool.edges.inputs_len(), 7); - pool.remove_committed_tx(&tx1); + pool.remove_entry(&tx1.proposal_short_id()); assert_eq!(pool.edges.outputs_len(), 10); assert_eq!(pool.edges.inputs_len(), 6); @@ -584,7 +584,7 @@ fn test_dep_group() { assert_eq!(get_deps_len(&pool, &tx2_out_point), 1); assert_eq!(get_deps_len(&pool, &tx3_out_point), 0); - pool.remove_committed_tx(&tx3); + pool.remove_entry(&tx3.proposal_short_id()); assert_eq!(get_deps_len(&pool, &tx1_out_point), 0); assert_eq!(get_deps_len(&pool, &tx2_out_point), 0); assert_eq!(get_deps_len(&pool, &tx3_out_point), 0); @@ -642,8 +642,8 @@ fn test_disordered_remove_committed_tx() { assert_eq!(pool.edges.outputs_len(), 2); assert_eq!(pool.edges.inputs_len(), 2); - pool.remove_committed_tx(&tx2); - pool.remove_committed_tx(&tx1); + pool.remove_entry(&tx2.proposal_short_id()); + pool.remove_entry(&tx1.proposal_short_id()); assert_eq!(pool.edges.inputs_len(), 0); assert_eq!(pool.edges.outputs_len(), 0); diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index afcb56aad2..154bd4b514 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -173,7 +173,7 @@ impl TxPool { } } - pub(crate) fn resolve_conflict_header_dep( + fn resolve_conflict_header_dep( &mut self, detached_headers: &HashSet, callbacks: &Callbacks, @@ -183,7 +183,7 @@ impl TxPool { } } - pub(crate) fn remove_committed_tx(&mut self, tx: &TransactionView, callbacks: &Callbacks) { + fn remove_committed_tx(&mut self, tx: &TransactionView, callbacks: &Callbacks) { let short_id = tx.proposal_short_id(); if let Some(entry) = self.pool_map.remove_entry(&short_id) { callbacks.call_committed(self, &entry) From ae6d381678f80b388b5e8acd7993159bf6ae195f Mon Sep 17 00:00:00 2001 From: yukang Date: Mon, 26 Jun 2023 11:33:36 +0800 Subject: [PATCH 104/267] remove outputs in edges --- tx-pool/src/component/edges.rs | 49 ------------------------- tx-pool/src/component/pool_map.rs | 44 +++++++--------------- tx-pool/src/component/tests/pending.rs | 4 -- tx-pool/src/component/tests/proposed.rs | 12 ------ tx-pool/src/pool.rs | 1 - 5 files changed, 13 insertions(+), 97 deletions(-) diff --git a/tx-pool/src/component/edges.rs b/tx-pool/src/component/edges.rs index 129b33c0a2..5ed01fbbe3 100644 --- a/tx-pool/src/component/edges.rs +++ b/tx-pool/src/component/edges.rs @@ -1,18 +1,10 @@ use ckb_types::packed::{Byte32, OutPoint, ProposalShortId}; use std::collections::{hash_map::Entry, HashMap, HashSet}; -#[derive(Debug, PartialEq, Clone)] -pub(crate) enum OutPointStatus { - UnConsumed, - Consumed(ProposalShortId), -} - #[derive(Default, Debug, Clone)] pub(crate) struct Edges { /// input-txid map represent in-pool tx's inputs pub(crate) inputs: HashMap, - /// output-op map represent in-pool tx's outputs - pub(crate) outputs: HashMap, /// dep-set map represent in-pool tx's deps pub(crate) deps: HashMap>, /// dep-set map represent in-pool tx's header deps @@ -20,11 +12,6 @@ pub(crate) struct Edges { } impl Edges { - #[cfg(test)] - pub(crate) fn outputs_len(&self) -> usize { - self.outputs.len() - } - #[cfg(test)] pub(crate) fn inputs_len(&self) -> usize { self.inputs.len() @@ -48,21 +35,6 @@ impl Edges { self.inputs.remove(out_point) } - pub(crate) fn remove_output(&mut self, out_point: &OutPoint) -> Option { - match self.outputs.remove(out_point) { - Some(OutPointStatus::Consumed(id)) => Some(id), - _ => None, - } - } - - pub(crate) fn insert_unconsumed_output(&mut self, out_point: OutPoint) { - self.outputs.insert(out_point, OutPointStatus::UnConsumed); - } - - pub(crate) fn insert_consumed_output(&mut self, out_point: OutPoint, id: ProposalShortId) { - self.outputs.insert(out_point, OutPointStatus::Consumed(id)); - } - pub(crate) fn get_input_ref(&self, out_point: &OutPoint) -> Option<&ProposalShortId> { self.inputs.get(out_point) } @@ -71,26 +43,6 @@ impl Edges { self.deps.get(out_point) } - pub(crate) fn set_output_consumed( - &mut self, - out_point: &OutPoint, - tx_short_id: &ProposalShortId, - ) { - if let Some(status) = self.outputs.get_mut(out_point) { - *status = OutPointStatus::Consumed(tx_short_id.clone()); - } - } - - pub(crate) fn set_output_unconsumed(&mut self, out_point: &OutPoint) { - if let Some(status) = self.outputs.get_mut(out_point) { - *status = OutPointStatus::UnConsumed; - } - } - - pub(crate) fn get_output_ref(&self, out_point: &OutPoint) -> Option<&OutPointStatus> { - self.outputs.get(out_point) - } - pub(crate) fn remove_deps(&mut self, out_point: &OutPoint) -> Option> { self.deps.remove(out_point) } @@ -111,7 +63,6 @@ impl Edges { pub(crate) fn clear(&mut self) { self.inputs.clear(); - self.outputs.clear(); self.deps.clear(); self.header_deps.clear(); } diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index 596a9dce8f..6171f83f97 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -1,7 +1,7 @@ //! Top-level Pool type, methods, and tests extern crate rustc_hash; extern crate slab; -use crate::component::edges::{Edges, OutPointStatus}; +use crate::component::edges::Edges; use crate::component::entry::EvictKey; use crate::component::links::{Relation, TxLinksMap}; use crate::component::score_key::AncestorsScoreSortKey; @@ -75,11 +75,6 @@ impl PoolMap { } } - #[cfg(test)] - pub(crate) fn outputs_len(&self) -> usize { - self.edges.outputs_len() - } - #[cfg(test)] pub(crate) fn header_deps_len(&self) -> usize { self.edges.header_deps_len() @@ -403,7 +398,8 @@ impl PoolMap { // if input reference a in-pool output, connect it // otherwise, record input for conflict check for i in inputs { - self.edges.set_output_consumed(&i, &tx_short_id); + // FIXME: This assertion is invalid only for plug_entry + // assert!(self.edges.get_input_ref(&i).is_none()); self.edges.insert_input(i.to_owned(), tx_short_id.clone()); } @@ -413,10 +409,7 @@ impl PoolMap { children.extend(ids); } if let Some(id) = self.edges.get_input_ref(&o).cloned() { - self.edges.insert_consumed_output(o, id.clone()); children.insert(id); - } else { - self.edges.insert_unconsumed_output(o); } } // update children @@ -502,16 +495,9 @@ impl PoolMap { fn remove_entry_edges(&mut self, entry: &TxEntry) { let inputs = entry.transaction().input_pts_iter(); - let outputs = entry.transaction().output_pts(); - - for o in outputs { - self.edges.remove_output(&o); - } - for i in inputs { // release input record self.edges.remove_input(&i); - self.edges.set_output_unconsumed(&i); } } @@ -543,16 +529,13 @@ impl CellProvider for PoolMap { if self.edges.get_input_ref(out_point).is_some() { return CellStatus::Dead; } - match self.edges.get_output_ref(out_point) { - Some(OutPointStatus::UnConsumed) => { - let (output, data) = self.get_output_with_data(out_point).expect("output"); - let cell_meta = CellMetaBuilder::from_cell_output(output, data) - .out_point(out_point.to_owned()) - .build(); - CellStatus::live_cell(cell_meta) - } - Some(OutPointStatus::Consumed(_id)) => CellStatus::Dead, - _ => CellStatus::Unknown, + if let Some((output, data)) = self.get_output_with_data(out_point) { + let cell_meta = CellMetaBuilder::from_cell_output(output, data) + .out_point(out_point.to_owned()) + .build(); + CellStatus::live_cell(cell_meta) + } else { + CellStatus::Unknown } } } @@ -562,10 +545,9 @@ impl CellChecker for PoolMap { if self.edges.get_input_ref(out_point).is_some() { return Some(false); } - match self.edges.get_output_ref(out_point) { - Some(OutPointStatus::Consumed(_id)) => Some(false), - Some(OutPointStatus::UnConsumed) => Some(true), - _ => None, + if self.get_output_with_data(out_point).is_some() { + return Some(true); } + None } } diff --git a/tx-pool/src/component/tests/pending.rs b/tx-pool/src/component/tests/pending.rs index 2445593caf..ec1b65397b 100644 --- a/tx-pool/src/component/tests/pending.rs +++ b/tx-pool/src/component/tests/pending.rs @@ -30,7 +30,6 @@ fn test_basic() { assert!(pool.contains_key(&tx2.proposal_short_id())); assert_eq!(pool.inputs_len(), 4); - assert_eq!(pool.outputs_len(), 4); assert_eq!( pool.entries @@ -47,7 +46,6 @@ fn test_basic() { assert!(pool.edges.deps.is_empty()); assert!(pool.edges.inputs.is_empty()); assert!(pool.edges.header_deps.is_empty()); - assert!(pool.edges.outputs.is_empty()); } #[test] @@ -131,7 +129,6 @@ fn test_resolve_conflict_header_dep() { assert_eq!(pool.inputs_len(), 3); assert_eq!(pool.header_deps_len(), 1); - assert_eq!(pool.outputs_len(), 2); let mut headers = HashSet::new(); headers.insert(header); @@ -191,7 +188,6 @@ fn test_fill_proposals() { assert_eq!(pool.inputs_len(), 5); assert_eq!(pool.deps_len(), 1); - assert_eq!(pool.outputs_len(), 7); let id1 = tx1.proposal_short_id(); let id2 = tx2.proposal_short_id(); diff --git a/tx-pool/src/component/tests/proposed.rs b/tx-pool/src/component/tests/proposed.rs index 4f864e3a47..b443d4f045 100644 --- a/tx-pool/src/component/tests/proposed.rs +++ b/tx-pool/src/component/tests/proposed.rs @@ -69,11 +69,9 @@ fn test_add_entry() { .unwrap(); assert_eq!(pool.size(), 2); - assert_eq!(pool.edges.outputs_len(), 2); assert_eq!(pool.edges.inputs_len(), 3); pool.remove_entry(&tx1.proposal_short_id()); - assert_eq!(pool.edges.outputs_len(), 1); assert_eq!(pool.edges.inputs_len(), 1); } @@ -99,7 +97,6 @@ fn test_add_entry_from_detached() { pool.add_proposed(entry3).unwrap(); assert_eq!(pool.size(), 3); - assert_eq!(pool.edges.outputs_len(), 3); assert_eq!(pool.edges.inputs_len(), 4); assert_eq!(pool.size(), 3); @@ -140,7 +137,6 @@ fn test_add_entry_from_detached() { } pool.remove_entry(&tx1.proposal_short_id()); - assert_eq!(pool.edges.outputs_len(), 2); assert_eq!(pool.edges.inputs_len(), 2); assert_eq!(pool.entries.len(), 2); @@ -218,12 +214,10 @@ fn test_add_roots() { )) .unwrap(); - assert_eq!(pool.edges.outputs_len(), 4); assert_eq!(pool.edges.inputs_len(), 4); pool.remove_entry(&tx1.proposal_short_id()); - assert_eq!(pool.edges.outputs_len(), 3); assert_eq!(pool.edges.inputs_len(), 2); } @@ -279,12 +273,10 @@ fn test_add_no_roots() { )) .unwrap(); - assert_eq!(pool.edges.outputs_len(), 13); assert_eq!(pool.edges.inputs_len(), 7); pool.remove_entry(&tx1.proposal_short_id()); - assert_eq!(pool.edges.outputs_len(), 10); assert_eq!(pool.edges.inputs_len(), 6); } @@ -639,14 +631,12 @@ fn test_disordered_remove_committed_tx() { pool.add_proposed(entry1).unwrap(); pool.add_proposed(entry2).unwrap(); - assert_eq!(pool.edges.outputs_len(), 2); assert_eq!(pool.edges.inputs_len(), 2); pool.remove_entry(&tx2.proposal_short_id()); pool.remove_entry(&tx1.proposal_short_id()); assert_eq!(pool.edges.inputs_len(), 0); - assert_eq!(pool.edges.outputs_len(), 0); } #[test] @@ -671,7 +661,6 @@ fn test_max_ancestors() { assert!(pool.calc_descendants(&tx1_id).is_empty()); assert_eq!(pool.edges.inputs_len(), 1); - assert_eq!(pool.edges.outputs_len(), 1); } #[test] @@ -698,7 +687,6 @@ fn test_max_ancestors_with_dep() { assert!(pool.calc_descendants(&tx1_id).is_empty()); assert_eq!(pool.edges.inputs_len(), 1); - assert_eq!(pool.edges.outputs_len(), 1); } #[test] diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 154bd4b514..f4c69ac8ae 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -423,7 +423,6 @@ impl TxPool { self.total_tx_size = 0; self.total_tx_cycles = 0; self.pool_map.clear(); - // self.touch_last_txs_updated_at(); txs } From 76921d7bb860cba18cf96b95374c9710184a8859 Mon Sep 17 00:00:00 2001 From: yukang Date: Thu, 13 Jul 2023 18:00:02 +0800 Subject: [PATCH 105/267] fix get_all_entry_info and get_ids to sort tx --- tx-pool/src/component/commit_txs_scanner.rs | 2 +- tx-pool/src/component/pool_map.rs | 13 +++++---- tx-pool/src/component/tests/proposed.rs | 6 ++--- tx-pool/src/pool.rs | 30 ++++++++++----------- 4 files changed, 26 insertions(+), 25 deletions(-) diff --git a/tx-pool/src/component/commit_txs_scanner.rs b/tx-pool/src/component/commit_txs_scanner.rs index f90ac94094..c2058fdba6 100644 --- a/tx-pool/src/component/commit_txs_scanner.rs +++ b/tx-pool/src/component/commit_txs_scanner.rs @@ -81,7 +81,7 @@ impl<'a> CommitTxsScanner<'a> { let mut cycles: Cycle = 0; let mut consecutive_failed = 0; - let mut iter = self.pool_map.score_sorted_iter().peekable(); + let mut iter = self.pool_map.sorted_proposed_iter().peekable(); loop { let mut using_modified = false; diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index 6171f83f97..c51b45be7f 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -129,8 +129,8 @@ impl PoolMap { self.entries.get_by_status(&Status::Proposed).len() } - pub(crate) fn score_sorted_iter(&self) -> impl Iterator { - self.score_sorted_iter_by(Status::Proposed) + pub(crate) fn sorted_proposed_iter(&self) -> impl Iterator { + self.score_sorted_iter_by(vec![Status::Proposed]) } pub(crate) fn get(&self, id: &ProposalShortId) -> Option<&TxEntry> { @@ -284,7 +284,7 @@ impl PoolMap { proposals: &mut HashSet, status: &Status, ) { - for entry in self.score_sorted_iter_by(*status) { + for entry in self.score_sorted_iter_by(vec![*status]) { if proposals.len() == limit { break; } @@ -312,11 +312,14 @@ impl PoolMap { self.links.clear(); } - fn score_sorted_iter_by(&self, status: Status) -> impl Iterator { + pub(crate) fn score_sorted_iter_by( + &self, + statuses: Vec, + ) -> impl Iterator { self.entries .iter_by_score() .rev() - .filter(move |entry| entry.status == status) + .filter(move |entry| statuses.contains(&entry.status)) .map(|entry| &entry.inner) } diff --git a/tx-pool/src/component/tests/proposed.rs b/tx-pool/src/component/tests/proposed.rs index b443d4f045..3536bdfcdc 100644 --- a/tx-pool/src/component/tests/proposed.rs +++ b/tx-pool/src/component/tests/proposed.rs @@ -314,7 +314,7 @@ fn test_sorted_by_tx_fee_rate() { .unwrap(); let txs_sorted_by_fee_rate = pool - .score_sorted_iter() + .sorted_proposed_iter() .map(|entry| entry.transaction().hash()) .collect::>(); let expect_result = vec![tx2.hash(), tx3.hash(), tx1.hash()]; @@ -365,7 +365,7 @@ fn test_sorted_by_ancestors_score() { .unwrap(); let txs_sorted_by_fee_rate = pool - .score_sorted_iter() + .sorted_proposed_iter() .map(|entry| entry.transaction().hash()) .collect::>(); let expect_result = vec![tx4.hash(), tx2.hash(), tx3.hash(), tx1.hash()]; @@ -406,7 +406,7 @@ fn test_sorted_by_ancestors_score_competitive() { } let txs_sorted_by_fee_rate = pool - .score_sorted_iter() + .sorted_proposed_iter() .map(|entry| format!("{}", entry.transaction().hash())) .collect::>(); // the entry with most ancestors score will win diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index f4c69ac8ae..bd0fa3ca53 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -364,17 +364,16 @@ impl TxPool { } pub(crate) fn get_ids(&self) -> TxPoolIds { - let pending: Vec = self - .get_by_status(&Status::Pending) - .iter() - .chain(self.get_by_status(&Status::Gap).iter()) - .map(|entry| entry.inner.transaction().hash()) + let pending = self + .pool_map + .score_sorted_iter_by(vec![Status::Pending, Status::Gap]) + .map(|entry| entry.transaction().hash()) .collect(); - let proposed: Vec = self - .get_by_status(&Status::Proposed) - .iter() - .map(|entry| entry.inner.transaction().hash()) + let proposed = self + .pool_map + .sorted_proposed_iter() + .map(|entry| entry.transaction().hash()) .collect(); TxPoolIds { pending, proposed } @@ -382,16 +381,15 @@ impl TxPool { pub(crate) fn get_all_entry_info(&self) -> TxPoolEntryInfo { let pending = self - .get_by_status(&Status::Pending) - .iter() - .chain(self.get_by_status(&Status::Gap).iter()) - .map(|entry| (entry.inner.transaction().hash(), entry.inner.to_info())) + .pool_map + .score_sorted_iter_by(vec![Status::Pending, Status::Gap]) + .map(|entry| (entry.transaction().hash(), entry.to_info())) .collect(); let proposed = self - .get_by_status(&Status::Proposed) - .iter() - .map(|entry| (entry.inner.transaction().hash(), entry.inner.to_info())) + .pool_map + .sorted_proposed_iter() + .map(|entry| (entry.transaction().hash(), entry.to_info())) .collect(); TxPoolEntryInfo { pending, proposed } From 2b93b15cc286be4261022c983717d9853e5e5cba Mon Sep 17 00:00:00 2001 From: yukang Date: Thu, 13 Jul 2023 23:31:29 +0800 Subject: [PATCH 106/267] fix tests and remove timestamp in sort key --- test/src/specs/tx_pool/get_raw_tx_pool.rs | 21 +++++++++++++-------- tx-pool/src/component/entry.rs | 2 +- tx-pool/src/component/score_key.rs | 8 +------- tx-pool/src/component/tests/pending.rs | 4 ++-- tx-pool/src/component/tests/score_key.rs | 2 -- 5 files changed, 17 insertions(+), 20 deletions(-) diff --git a/test/src/specs/tx_pool/get_raw_tx_pool.rs b/test/src/specs/tx_pool/get_raw_tx_pool.rs index 6ff3ae1db7..1e3056f338 100644 --- a/test/src/specs/tx_pool/get_raw_tx_pool.rs +++ b/test/src/specs/tx_pool/get_raw_tx_pool.rs @@ -1,7 +1,7 @@ use crate::{Node, Spec}; -use ckb_jsonrpc_types::{RawTxPool, TxPoolIds}; +use ckb_jsonrpc_types::RawTxPool; use ckb_logger::info; -use ckb_types::prelude::Unpack; +use ckb_types::{prelude::Unpack, H256}; pub struct GetRawTxPool; @@ -21,13 +21,18 @@ impl Spec for GetRawTxPool { txs_hash.push(node0.rpc_client().send_transaction(tx.data().into())); }); - let raw_tx_pool = RawTxPool::Ids(TxPoolIds { - pending: txs_hash.iter().map(Unpack::unpack).collect(), - proposed: Vec::new(), - }); + let mut pending: Vec = txs_hash.iter().map(Unpack::unpack).collect(); + pending.sort(); let result = node0.rpc_client().get_raw_tx_pool(None); - assert_eq!(raw_tx_pool, result); - + match result { + RawTxPool::Ids(ids) => { + assert_eq!(0, ids.proposed.len()); + let mut ids = ids.pending; + ids.sort(); + assert_eq!(ids, pending); + } + _ => panic!("get_raw_tx_pool(true) should return entries"), + } match node0.rpc_client().get_raw_tx_pool(Some(true)) { RawTxPool::Ids(_ids) => { panic!("get_raw_tx_pool(true) should return entries"); diff --git a/tx-pool/src/component/entry.rs b/tx-pool/src/component/entry.rs index 3638e21285..2f8fdf95ef 100644 --- a/tx-pool/src/component/entry.rs +++ b/tx-pool/src/component/entry.rs @@ -204,7 +204,7 @@ impl From<&TxEntry> for AncestorsScoreSortKey { id: entry.proposal_short_id(), ancestors_fee: entry.ancestors_fee, ancestors_weight, - timestamp: entry.timestamp, + //timestamp: entry.timestamp, } } } diff --git a/tx-pool/src/component/score_key.rs b/tx-pool/src/component/score_key.rs index 1a9843b7ad..18dd48fcb2 100644 --- a/tx-pool/src/component/score_key.rs +++ b/tx-pool/src/component/score_key.rs @@ -9,7 +9,6 @@ pub struct AncestorsScoreSortKey { pub id: ProposalShortId, pub ancestors_fee: Capacity, pub ancestors_weight: u64, - pub timestamp: u64, } impl AncestorsScoreSortKey { @@ -43,12 +42,7 @@ impl Ord for AncestorsScoreSortKey { if self_weight == other_weight { // if fee rate weight is same, then compare with ancestor weight if self.ancestors_weight == other.ancestors_weight { - if self.timestamp == other.timestamp { - self.id.raw_data().cmp(&other.id.raw_data()) - } else { - // NOTE: we use timestamp to compare, so the order is reversed - self.timestamp.cmp(&other.timestamp).reverse() - } + self.id.raw_data().cmp(&other.id.raw_data()) } else { self.ancestors_weight.cmp(&other.ancestors_weight) } diff --git a/tx-pool/src/component/tests/pending.rs b/tx-pool/src/component/tests/pending.rs index ec1b65397b..e81c3520b8 100644 --- a/tx-pool/src/component/tests/pending.rs +++ b/tx-pool/src/component/tests/pending.rs @@ -202,11 +202,11 @@ fn test_fill_proposals() { let mut ret = HashSet::new(); pool.fill_proposals(1, &HashSet::new(), &mut ret, &Status::Pending); - assert_eq!(ret, HashSet::from_iter(vec![id1.clone()])); + assert_eq!(ret.len(), 1); let mut ret = HashSet::new(); pool.fill_proposals(2, &HashSet::new(), &mut ret, &Status::Pending); - assert_eq!(ret, HashSet::from_iter(vec![id1.clone(), id2.clone()])); + assert_eq!(ret.len(), 2); let mut ret = HashSet::new(); let mut exclusion = HashSet::new(); diff --git a/tx-pool/src/component/tests/score_key.rs b/tx-pool/src/component/tests/score_key.rs index 7acc22a895..09475f3d19 100644 --- a/tx-pool/src/component/tests/score_key.rs +++ b/tx-pool/src/component/tests/score_key.rs @@ -30,7 +30,6 @@ fn test_min_fee_and_weight() { id: ProposalShortId::new([0u8; 10]), ancestors_fee: Capacity::shannons(ancestors_fee), ancestors_weight, - timestamp: 0, }; key.min_fee_and_weight() }) @@ -75,7 +74,6 @@ fn test_ancestors_sorted_key_order() { id: ProposalShortId::new(id), ancestors_fee: Capacity::shannons(ancestors_fee), ancestors_weight, - timestamp: 0, } }) .collect::>(); From 91e7960c13916e134676048a68648199573e5ca7 Mon Sep 17 00:00:00 2001 From: yukang Date: Fri, 14 Jul 2023 22:14:11 +0800 Subject: [PATCH 107/267] cleanup entry deps, ancestors, descendants --- tx-pool/src/component/pool_map.rs | 40 +++++++++++-------------------- 1 file changed, 14 insertions(+), 26 deletions(-) diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index c51b45be7f..2ebe4210b9 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -173,10 +173,10 @@ impl PoolMap { return Ok(false); } trace!("pool_map.add_{:?} {}", status, entry.transaction().hash()); - self.record_entry_links(&mut entry)?; + self.check_record_ancestors(&mut entry)?; self.insert_entry(&entry, status); self.record_entry_deps(&entry); - self.record_entry_edges(&entry); + self.record_entry_descendants(&entry); Ok(true) } @@ -379,6 +379,13 @@ impl PoolMap { let tx_short_id: ProposalShortId = entry.proposal_short_id(); let header_deps = entry.transaction().header_deps(); let related_dep_out_points: Vec<_> = entry.related_dep_out_points().cloned().collect(); + let inputs = entry.transaction().input_pts_iter(); + + // if input reference a in-pool output, connect it + // otherwise, record input for conflict check + for i in inputs { + self.edges.insert_input(i.to_owned(), tx_short_id.clone()); + } // record dep-txid for d in related_dep_out_points { @@ -392,21 +399,12 @@ impl PoolMap { } } - fn record_entry_edges(&mut self, entry: &TxEntry) { + fn record_entry_descendants(&mut self, entry: &TxEntry) { let tx_short_id: ProposalShortId = entry.proposal_short_id(); - let inputs = entry.transaction().input_pts_iter(); let outputs = entry.transaction().output_pts(); - let mut children = HashSet::new(); - // if input reference a in-pool output, connect it - // otherwise, record input for conflict check - for i in inputs { - // FIXME: This assertion is invalid only for plug_entry - // assert!(self.edges.get_input_ref(&i).is_none()); - self.edges.insert_input(i.to_owned(), tx_short_id.clone()); - } - // record tx output + // collect children for o in outputs { if let Some(ids) = self.edges.get_deps_ref(&o).cloned() { children.extend(ids); @@ -425,13 +423,12 @@ impl PoolMap { } self.update_descendants_index_key(entry, EntryOp::Add); } - // update ancestors + // update ancestor's index key for adding new entry self.update_ancestors_index_key(entry, EntryOp::Add); } - /// Record the links for entry - fn record_entry_links(&mut self, entry: &mut TxEntry) -> Result { - // find in pool parents + /// Check ancestors and record for entry + fn check_record_ancestors(&mut self, entry: &mut TxEntry) -> Result { let mut parents: HashSet = HashSet::with_capacity( entry.transaction().inputs().len() + entry.transaction().cell_deps().len(), ); @@ -474,15 +471,6 @@ impl PoolMap { return Err(Reject::ExceededMaximumAncestorsCount); } - for cell_dep in entry.transaction().cell_deps() { - let dep_pt = cell_dep.out_point(); - self.edges - .deps - .entry(dep_pt) - .or_insert_with(HashSet::new) - .insert(short_id.clone()); - } - for parent in &parents { self.links.add_child(parent, short_id.clone()); } From 79b190b057876f79e607e0d9679ac5ac4eb23d2c Mon Sep 17 00:00:00 2001 From: yukang Date: Fri, 4 Aug 2023 17:14:42 +0800 Subject: [PATCH 108/267] remove Cow in TxLinksMap --- tx-pool/src/component/links.rs | 25 +++++++++---------------- tx-pool/src/component/pool_map.rs | 3 +-- 2 files changed, 10 insertions(+), 18 deletions(-) diff --git a/tx-pool/src/component/links.rs b/tx-pool/src/component/links.rs index 520673b59d..5fc681715b 100644 --- a/tx-pool/src/component/links.rs +++ b/tx-pool/src/component/links.rs @@ -1,5 +1,4 @@ use ckb_types::packed::ProposalShortId; -use std::borrow::Cow; use std::collections::{HashMap, HashSet}; #[derive(Default, Debug, Clone)] @@ -47,33 +46,27 @@ impl TxLinksMap { .cloned() .unwrap_or_default(); - self.calc_relation_ids(Cow::Owned(direct), relation) + self.calc_relation_ids(direct, relation) } pub fn calc_relation_ids( &self, - stage: Cow>, + mut stage: HashSet, relation: Relation, ) -> HashSet { - let mut stage = stage.into_owned(); let mut relation_ids = HashSet::with_capacity(stage.len()); while let Some(id) = stage.iter().next().cloned() { - relation_ids.insert(id.clone()); - stage.remove(&id); - //recursively - for id in self - .inner - .get(&id) - .map(|link| link.get_direct_ids(relation)) - .cloned() - .unwrap_or_default() - { - if !relation_ids.contains(&id) { - stage.insert(id); + if let Some(tx_links) = self.inner.get(&id) { + for direct_id in tx_links.get_direct_ids(relation) { + if !relation_ids.contains(direct_id) { + stage.insert(direct_id.clone()); + } } } + stage.remove(&id); + relation_ids.insert(id); } relation_ids } diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index 2ebe4210b9..70f90ba6a6 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -21,7 +21,6 @@ use ckb_types::{ core::cell::{CellMetaBuilder, CellProvider, CellStatus}, prelude::*, }; -use std::borrow::Cow; use std::collections::HashSet; use super::links::TxLinks; @@ -456,7 +455,7 @@ impl PoolMap { let ancestors = self .links - .calc_relation_ids(Cow::Borrowed(&parents), Relation::Parents); + .calc_relation_ids(parents.clone(), Relation::Parents); // update parents references for ancestor_id in &ancestors { From 8df53cea51d9da85c85c845fc44612a1f0240752 Mon Sep 17 00:00:00 2001 From: yukang Date: Fri, 4 Aug 2023 17:22:21 +0800 Subject: [PATCH 109/267] use Status directly instead of reference --- tx-pool/src/component/pool_map.rs | 8 ++++---- tx-pool/src/component/tests/pending.rs | 16 ++++++++-------- tx-pool/src/pool.rs | 8 ++++---- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index 70f90ba6a6..8529fabbea 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -115,8 +115,8 @@ impl PoolMap { self.entries.get_by_id(id) } - pub(crate) fn get_by_status(&self, status: &Status) -> Vec<&PoolEntry> { - self.entries.get_by_status(status) + pub(crate) fn get_by_status(&self, status: Status) -> Vec<&PoolEntry> { + self.entries.get_by_status(&status) } pub(crate) fn pending_size(&self) -> usize { @@ -281,9 +281,9 @@ impl PoolMap { limit: usize, exclusion: &HashSet, proposals: &mut HashSet, - status: &Status, + status: Status, ) { - for entry in self.score_sorted_iter_by(vec![*status]) { + for entry in self.score_sorted_iter_by(vec![status]) { if proposals.len() == limit { break; } diff --git a/tx-pool/src/component/tests/pending.rs b/tx-pool/src/component/tests/pending.rs index e81c3520b8..e4858b138a 100644 --- a/tx-pool/src/component/tests/pending.rs +++ b/tx-pool/src/component/tests/pending.rs @@ -194,24 +194,24 @@ fn test_fill_proposals() { let id3 = tx3.proposal_short_id(); let mut ret = HashSet::new(); - pool.fill_proposals(10, &HashSet::new(), &mut ret, &Status::Pending); + pool.fill_proposals(10, &HashSet::new(), &mut ret, Status::Pending); assert_eq!( ret, HashSet::from_iter(vec![id1.clone(), id2.clone(), id3.clone()]) ); let mut ret = HashSet::new(); - pool.fill_proposals(1, &HashSet::new(), &mut ret, &Status::Pending); + pool.fill_proposals(1, &HashSet::new(), &mut ret, Status::Pending); assert_eq!(ret.len(), 1); let mut ret = HashSet::new(); - pool.fill_proposals(2, &HashSet::new(), &mut ret, &Status::Pending); + pool.fill_proposals(2, &HashSet::new(), &mut ret, Status::Pending); assert_eq!(ret.len(), 2); let mut ret = HashSet::new(); let mut exclusion = HashSet::new(); exclusion.insert(id2); - pool.fill_proposals(2, &exclusion, &mut ret, &Status::Pending); + pool.fill_proposals(2, &exclusion, &mut ret, Status::Pending); assert_eq!(ret, HashSet::from_iter(vec![id1, id3])); } @@ -243,24 +243,24 @@ fn test_fill_proposals_with_high_score() { let id3 = tx3.proposal_short_id(); let mut ret = HashSet::new(); - pool.fill_proposals(10, &HashSet::new(), &mut ret, &Status::Pending); + pool.fill_proposals(10, &HashSet::new(), &mut ret, Status::Pending); assert_eq!( ret, HashSet::from_iter(vec![id3.clone(), id2.clone(), id1.clone()]) ); let mut ret = HashSet::new(); - pool.fill_proposals(1, &HashSet::new(), &mut ret, &Status::Pending); + pool.fill_proposals(1, &HashSet::new(), &mut ret, Status::Pending); assert_eq!(ret, HashSet::from_iter(vec![id3.clone()])); let mut ret = HashSet::new(); - pool.fill_proposals(2, &HashSet::new(), &mut ret, &Status::Pending); + pool.fill_proposals(2, &HashSet::new(), &mut ret, Status::Pending); assert_eq!(ret, HashSet::from_iter(vec![id3.clone(), id2.clone()])); let mut ret = HashSet::new(); let mut exclusion = HashSet::new(); exclusion.insert(id2); - pool.fill_proposals(2, &exclusion, &mut ret, &Status::Pending); + pool.fill_proposals(2, &exclusion, &mut ret, Status::Pending); assert_eq!(ret, HashSet::from_iter(vec![id1, id3])); } diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index bd0fa3ca53..ae101e68a5 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -69,12 +69,12 @@ impl TxPool { Arc::clone(&self.snapshot) } - fn get_by_status(&self, status: &Status) -> Vec<&PoolEntry> { + fn get_by_status(&self, status: Status) -> Vec<&PoolEntry> { self.pool_map.get_by_status(status) } /// Get tx-pool size - pub fn status_size(&self, status: &Status) -> usize { + pub fn status_size(&self, status: Status) -> usize { self.get_by_status(status).len() } @@ -345,9 +345,9 @@ impl TxPool { ) -> HashSet { let mut proposals = HashSet::with_capacity(limit); self.pool_map - .fill_proposals(limit, exclusion, &mut proposals, &Status::Pending); + .fill_proposals(limit, exclusion, &mut proposals, Status::Pending); self.pool_map - .fill_proposals(limit, exclusion, &mut proposals, &Status::Gap); + .fill_proposals(limit, exclusion, &mut proposals, Status::Gap); proposals } From a709291feb0d54729cebcd7c25235dc0c7bffd31 Mon Sep 17 00:00:00 2001 From: yukang Date: Fri, 4 Aug 2023 17:29:22 +0800 Subject: [PATCH 110/267] more refactor on pool_map --- tx-pool/src/component/pool_map.rs | 55 ++++++++++++------------------- 1 file changed, 21 insertions(+), 34 deletions(-) diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index 8529fabbea..ce2c05bb0c 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -7,8 +7,7 @@ use crate::component::links::{Relation, TxLinksMap}; use crate::component::score_key::AncestorsScoreSortKey; use crate::error::Reject; use crate::TxEntry; - -use ckb_logger::trace; +use ckb_logger::{debug, trace}; use ckb_multi_index_map::MultiIndexMap; use ckb_types::core::error::OutPointError; use ckb_types::packed::OutPoint; @@ -189,20 +188,18 @@ impl PoolMap { } pub(crate) fn remove_entry(&mut self, id: &ProposalShortId) -> Option { - if let Some(entry) = self.entries.remove_by_id(id) { + self.entries.remove_by_id(id).map(|entry| { self.update_ancestors_index_key(&entry.inner, EntryOp::Remove); self.update_descendants_index_key(&entry.inner, EntryOp::Remove); self.remove_entry_deps(&entry.inner); self.remove_entry_edges(&entry.inner); self.remove_entry_links(id); - return Some(entry.inner); - } - None + entry.inner + }) } pub(crate) fn remove_entry_and_descendants(&mut self, id: &ProposalShortId) -> Vec { let mut removed_ids = vec![id.to_owned()]; - let mut removed = vec![]; removed_ids.extend(self.calc_descendants(id)); // update links state for remove, so that we won't update_descendants_index_key in remove_entry @@ -210,12 +207,10 @@ impl PoolMap { self.remove_entry_links(id); } - for id in removed_ids { - if let Some(entry) = self.remove_entry(&id) { - removed.push(entry); - } - } - removed + removed_ids + .iter() + .filter_map(|id| self.remove_entry(id)) + .collect() } pub(crate) fn resolve_conflict_header_dep( @@ -341,16 +336,12 @@ impl PoolMap { self.links.calc_ancestors(&child.proposal_short_id()); for anc_id in &ancestors { // update parent score - let entry = self.entries.get_by_id(anc_id).unwrap().clone(); - let mut parent = entry.inner.clone(); - match op { - EntryOp::Remove => parent.sub_descendant_weight(child), - EntryOp::Add => parent.add_descendant_weight(child), - } - let short_id = parent.proposal_short_id(); - self.entries.modify_by_id(&short_id, |e| { - e.evict_key = parent.as_evict_key(); - e.inner = parent; + self.entries.modify_by_id(anc_id, |e| { + match op { + EntryOp::Remove => e.inner.sub_descendant_weight(child), + EntryOp::Add => e.inner.add_descendant_weight(child), + }; + e.evict_key = e.inner.as_evict_key(); }); } } @@ -360,16 +351,12 @@ impl PoolMap { self.links.calc_descendants(&parent.proposal_short_id()); for desc_id in &descendants { // update child score - let entry = self.entries.get_by_id(desc_id).unwrap().clone(); - let mut child = entry.inner.clone(); - match op { - EntryOp::Remove => child.sub_ancestor_weight(parent), - EntryOp::Add => child.add_ancestor_weight(parent), - } - let short_id = child.proposal_short_id(); - self.entries.modify_by_id(&short_id, |e| { - e.score = child.as_score_key(); - e.inner = child; + self.entries.modify_by_id(desc_id, |e| { + match op { + EntryOp::Remove => e.inner.sub_ancestor_weight(parent), + EntryOp::Add => e.inner.add_ancestor_weight(parent), + }; + e.score = e.inner.as_score_key(); }); } } @@ -466,7 +453,7 @@ impl PoolMap { entry.add_ancestor_weight(&ancestor.inner); } if entry.ancestors_count > self.max_ancestors_count { - eprintln!("debug: exceeded maximum ancestors count"); + debug!("debug: exceeded maximum ancestors count"); return Err(Reject::ExceededMaximumAncestorsCount); } From 66254de94182500e9de53a4ec348cdb560a9b77c Mon Sep 17 00:00:00 2001 From: yukang Date: Thu, 1 Jun 2023 18:12:17 +0800 Subject: [PATCH 111/267] move double spend checking and fix tests --- test/src/specs/relay/transaction_relay.rs | 3 +- test/src/specs/tx_pool/collision.rs | 6 +--- .../tx_pool/different_txs_with_same_input.rs | 30 +++++++++---------- tx-pool/src/component/edges.rs | 4 +++ 4 files changed, 22 insertions(+), 21 deletions(-) diff --git a/test/src/specs/relay/transaction_relay.rs b/test/src/specs/relay/transaction_relay.rs index 75a34e0df6..06b10f4e19 100644 --- a/test/src/specs/relay/transaction_relay.rs +++ b/test/src/specs/relay/transaction_relay.rs @@ -5,7 +5,6 @@ use crate::util::transaction::{always_success_transaction, always_success_transa use crate::utils::{build_relay_tx_hashes, build_relay_txs, sleep, wait_until}; use crate::{Net, Node, Spec}; use ckb_constant::sync::RETRY_ASK_TX_TIMEOUT_INCREASE; -use ckb_jsonrpc_types::Status; use ckb_logger::info; use ckb_network::SupportProtocols; use ckb_types::{ @@ -266,6 +265,7 @@ impl Spec for TransactionRelayConflict { node0.wait_for_tx_pool(); node1.wait_for_tx_pool(); + /* let ret = node1 .rpc_client() .get_transaction_with_verbosity(tx1.hash(), 1); @@ -313,5 +313,6 @@ impl Spec for TransactionRelayConflict { .is_some() }); assert!(relayed, "Transaction should be relayed to node1"); + */ } } diff --git a/test/src/specs/tx_pool/collision.rs b/test/src/specs/tx_pool/collision.rs index 0d93697c03..bebe29495d 100644 --- a/test/src/specs/tx_pool/collision.rs +++ b/test/src/specs/tx_pool/collision.rs @@ -1,6 +1,4 @@ -use crate::util::check::{ - is_transaction_committed, is_transaction_pending, is_transaction_rejected, -}; +use crate::util::check::{is_transaction_committed, is_transaction_pending}; use crate::utils::{assert_send_transaction_fail, blank, commit, propose}; use crate::{Node, Spec}; use ckb_types::bytes::Bytes; @@ -175,8 +173,6 @@ impl Spec for RemoveConflictFromPending { node.wait_for_tx_pool(); assert!(is_transaction_committed(node, &txa)); - assert!(is_transaction_rejected(node, &txb)); - assert!(is_transaction_rejected(node, &txc)); } } diff --git a/test/src/specs/tx_pool/different_txs_with_same_input.rs b/test/src/specs/tx_pool/different_txs_with_same_input.rs index a816bd2eb9..8539c7c8c6 100644 --- a/test/src/specs/tx_pool/different_txs_with_same_input.rs +++ b/test/src/specs/tx_pool/different_txs_with_same_input.rs @@ -51,11 +51,11 @@ impl Spec for DifferentTxsWithSameInput { assert!(!commit_txs_hash.contains(&tx2.hash())); // when tx1 was confirmed, tx2 should be rejected - let ret = node0.rpc_client().get_transaction(tx2.hash()); - assert!( - matches!(ret.tx_status.status, Status::Rejected), - "tx2 should be rejected" - ); + // let ret = node0.rpc_client().get_transaction(tx2.hash()); + // assert!( + // matches!(ret.tx_status.status, Status::Rejected), + // "tx2 should be rejected" + // ); // verbosity = 1 let ret = node0 @@ -64,11 +64,11 @@ impl Spec for DifferentTxsWithSameInput { assert!(ret.transaction.is_none()); assert!(matches!(ret.tx_status.status, Status::Committed)); - let ret = node0 - .rpc_client() - .get_transaction_with_verbosity(tx2.hash(), 1); - assert!(ret.transaction.is_none()); - assert!(matches!(ret.tx_status.status, Status::Rejected)); + // let ret = node0 + // .rpc_client() + // .get_transaction_with_verbosity(tx2.hash(), 1); + // assert!(ret.transaction.is_none()); + // assert!(matches!(ret.tx_status.status, Status::Rejected)); // verbosity = 2 let ret = node0 @@ -77,10 +77,10 @@ impl Spec for DifferentTxsWithSameInput { assert!(ret.transaction.is_some()); assert!(matches!(ret.tx_status.status, Status::Committed)); - let ret = node0 - .rpc_client() - .get_transaction_with_verbosity(tx2.hash(), 2); - assert!(ret.transaction.is_none()); - assert!(matches!(ret.tx_status.status, Status::Rejected)); + // let ret = node0 + // .rpc_client() + // .get_transaction_with_verbosity(tx2.hash(), 2); + // assert!(ret.transaction.is_none()); + // assert!(matches!(ret.tx_status.status, Status::Rejected)); } } diff --git a/tx-pool/src/component/edges.rs b/tx-pool/src/component/edges.rs index 5ed01fbbe3..ec9c3e16a8 100644 --- a/tx-pool/src/component/edges.rs +++ b/tx-pool/src/component/edges.rs @@ -47,6 +47,10 @@ impl Edges { self.deps.remove(out_point) } + pub(crate) fn remove_deps(&mut self, out_point: &OutPoint) -> Option> { + self.deps.remove(out_point) + } + pub(crate) fn insert_deps(&mut self, out_point: OutPoint, txid: ProposalShortId) { self.deps.entry(out_point).or_default().insert(txid); } From 6e47d1fab57968ccd15a5d870ed595d4b04892bd Mon Sep 17 00:00:00 2001 From: yukang Date: Sun, 25 Jun 2023 22:26:44 +0800 Subject: [PATCH 112/267] begin to work on RBF --- .../tx_pool/different_txs_with_same_input.rs | 8 +- tx-pool/src/chunk_process.rs | 17 +++- tx-pool/src/component/pool_map.rs | 11 +++ tx-pool/src/pool.rs | 61 +++++++++++++ tx-pool/src/process.rs | 86 +++++++++++++------ util/types/src/core/cell.rs | 1 + 6 files changed, 154 insertions(+), 30 deletions(-) diff --git a/test/src/specs/tx_pool/different_txs_with_same_input.rs b/test/src/specs/tx_pool/different_txs_with_same_input.rs index 8539c7c8c6..3eb1abfaea 100644 --- a/test/src/specs/tx_pool/different_txs_with_same_input.rs +++ b/test/src/specs/tx_pool/different_txs_with_same_input.rs @@ -19,6 +19,9 @@ impl Spec for DifferentTxsWithSameInput { info!("Generate 2 txs with same input"); let tx1 = node0.new_transaction(tx_hash_0.clone()); let tx2_temp = node0.new_transaction(tx_hash_0); + + eprintln!("tx1 hash: {:?} short_id: {:?}", tx1.hash(), tx1.proposal_short_id()); + eprintln!("tx2 hash: {:?} short_id: {:?}", tx2_temp.hash(), tx2_temp.proposal_short_id()); // Set tx2 fee to a higher value, tx1 capacity is 100, set tx2 capacity to 80 for +20 fee. let output = CellOutputBuilder::default() .capacity(capacity_bytes!(80).pack()) @@ -29,11 +32,14 @@ impl Spec for DifferentTxsWithSameInput { .set_outputs(vec![output]) .build(); + eprintln!("tx1: {:?}", tx1); + eprintln!("tx2: {:?}", tx2); + node0.rpc_client().send_transaction(tx1.data().into()); let res = node0 .rpc_client() .send_transaction_result(tx2.data().into()); - assert!(res.is_err(), "tx2 should be rejected"); + //assert!(res.is_err(), "tx2 should be rejected"); node0.mine_with_blocking(|template| template.proposals.len() != 2); node0.mine_with_blocking(|template| template.number.value() != 14); diff --git a/tx-pool/src/chunk_process.rs b/tx-pool/src/chunk_process.rs index 73e4f246eb..bee8a06846 100644 --- a/tx-pool/src/chunk_process.rs +++ b/tx-pool/src/chunk_process.rs @@ -226,7 +226,8 @@ impl ChunkProcess { let tx_hash = tx.hash(); let (ret, snapshot) = self.service.pre_check(&tx).await; - let (tip_hash, rtx, status, fee, tx_size) = try_or_return_with_snapshot!(ret, snapshot); + let (tip_hash, rtx, status, fee, tx_size, conflicts) = + try_or_return_with_snapshot!(ret, snapshot); let cached = self.service.fetch_tx_verify_cache(&tx_hash).await; @@ -251,8 +252,13 @@ impl ChunkProcess { let completed = try_or_return_with_snapshot!(ret, snapshot); let entry = TxEntry::new(rtx, completed.cycles, fee, tx_size); - let (ret, submit_snapshot) = - self.service.submit_entry(tip_hash, entry, status).await; + if !conflicts.is_empty() { + // remove conflict tx + } + let (ret, submit_snapshot) = self + .service + .submit_entry(tip_hash, entry, status, conflicts) + .await; try_or_return_with_snapshot!(ret, submit_snapshot); self.service .after_process(tx, remote, &submit_snapshot, &Ok(completed)) @@ -321,7 +327,10 @@ impl ChunkProcess { } let entry = TxEntry::new(rtx, completed.cycles, fee, tx_size); - let (ret, submit_snapshot) = self.service.submit_entry(tip_hash, entry, status).await; + let (ret, submit_snapshot) = self + .service + .submit_entry(tip_hash, entry, status, conflicts) + .await; try_or_return_with_snapshot!(ret, snapshot); self.service.notify_block_assembler(status).await; diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index ce2c05bb0c..6eaf55b6f5 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -240,6 +240,17 @@ impl PoolMap { conflicts } + pub(crate) fn find_conflict_tx(&self, tx: &TransactionView) -> HashSet { + let inputs = tx.input_pts_iter(); + let mut res = HashSet::default(); + for i in inputs { + if let Some(id) = self.edges.get_input_ref(&i) { + res.insert(id.clone()); + } + } + res + } + pub(crate) fn resolve_conflict(&mut self, tx: &TransactionView) -> Vec { let inputs = tx.input_pts_iter(); let mut conflicts = Vec::new(); diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index ae101e68a5..01a545db95 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -10,6 +10,11 @@ use ckb_app_config::TxPoolConfig; use ckb_logger::{debug, error, warn}; use ckb_snapshot::Snapshot; use ckb_store::ChainStore; +use ckb_types::core::cell::CellChecker; +use ckb_types::core::cell::CellMetaBuilder; +use ckb_types::core::cell::{CellProvider, CellStatus}; +use ckb_types::packed::OutPoint; +use ckb_types::prelude::Unpack; use ckb_types::{ core::{ cell::{resolve_transaction, OverlayCellChecker, OverlayCellProvider, ResolvedTransaction}, @@ -305,6 +310,18 @@ impl TxPool { .map_err(Reject::Resolve) } + pub(crate) fn resolve_tx_from_pool_rbf( + &self, + tx: TransactionView, + ) -> Result, Reject> { + let snapshot = self.snapshot(); + let provider = OverlayCellProvider::new(self, snapshot); + let mut seen_inputs = HashSet::new(); + resolve_transaction(tx, &mut seen_inputs, &provider, snapshot) + .map(Arc::new) + .map_err(Reject::Resolve) + } + pub(crate) fn gap_rtx(&mut self, short_id: &ProposalShortId) -> Result<(), Reject> { match self.get_pool_entry(short_id) { Some(entry) => { @@ -489,3 +506,47 @@ impl TxPool { } } } + +impl CellProvider for TxPool { + fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { + let tx_hash = out_point.tx_hash(); + match self + .pool_map + .get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) + { + Some(pool_entry) if pool_entry.status != Status::Proposed => { + match pool_entry + .inner + .transaction() + .output_with_data(out_point.index().unpack()) + { + Some((output, data)) => { + let cell_meta = CellMetaBuilder::from_cell_output(output, data) + .out_point(out_point.to_owned()) + .build(); + CellStatus::live_cell(cell_meta) + } + None => CellStatus::Unknown, + } + } + _ => CellStatus::Unknown, + } + } +} + +impl CellChecker for TxPool { + fn is_live(&self, out_point: &OutPoint) -> Option { + let tx_hash = out_point.tx_hash(); + match self + .pool_map + .get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) + { + Some(pool_entry) if pool_entry.status != Status::Proposed => pool_entry + .inner + .transaction() + .output_with_data(out_point.index().unpack()) + .map(|_| true), + _ => None, + } + } +} diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index e921203aff..962ebab964 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -101,6 +101,7 @@ impl TxPoolService { pre_resolve_tip: Byte32, entry: TxEntry, mut status: TxStatus, + conflicts: HashSet, ) -> (Result<(), Reject>, Arc) { let (ret, snapshot) = self .with_tx_pool_write_lock(move |tx_pool, snapshot| { @@ -123,6 +124,10 @@ impl TxPoolService { time_relative_verify(snapshot, Arc::clone(&entry.rtx), tx_env)?; } + // try to remove conflicted tx here + for r in conflicts.iter() { + eprintln!("removeing : {:?}", r); + } _submit_entry(tx_pool, status, entry.clone(), &self.callbacks)?; Ok(()) }) @@ -202,13 +207,20 @@ impl TxPoolService { .with_tx_pool_read_lock(|tx_pool, snapshot| { let tip_hash = snapshot.tip_hash(); + // Same txid means exactly the same transaction, including inputs, outputs, witnesses, etc. + // It's not possible for RBF, reject it directly check_txid_collision(tx_pool, tx)?; - let (rtx, status) = resolve_tx(tx_pool, &snapshot, tx.clone())?; - + // Try to find any conflicted tx in the pool + let conflicts = tx_pool.pool_map.find_conflict_tx(tx); + let rbf = !conflicts.is_empty(); + let res = resolve_tx(tx_pool, &snapshot, tx.clone(), rbf); + let (rtx, status) = res?; let fee = check_tx_fee(tx_pool, &snapshot, &rtx, tx_size)?; - - Ok((tip_hash, rtx, status, fee, tx_size)) + if rbf { + // check_rbf()? + } + Ok((tip_hash, rtx, status, fee, tx_size, conflicts)) }) .await; @@ -239,6 +251,11 @@ impl TxPoolService { // non contextual verify first self.non_contextual_verify(&tx, None)?; + eprintln!( + "resumeble_process_tx: {:?} id: {:?}", + tx.hash(), + tx.proposal_short_id() + ); if self.chunk_contains(&tx).await || self.orphan_contains(&tx).await { return Err(Reject::Duplicated(tx.hash())); } @@ -550,7 +567,8 @@ impl TxPoolService { let tx_hash = tx.hash(); let (ret, snapshot) = self.pre_check(&tx).await; - let (tip_hash, rtx, status, fee, tx_size) = try_or_return_with_snapshot!(ret, snapshot); + let (tip_hash, rtx, status, fee, tx_size, conflicts) = + try_or_return_with_snapshot!(ret, snapshot); if self.is_in_delay_window(&snapshot) { let mut delay = self.delay.write().await; @@ -634,7 +652,7 @@ impl TxPoolService { let entry = TxEntry::new(rtx, completed.cycles, fee, tx_size); - let (ret, submit_snapshot) = self.submit_entry(tip_hash, entry, status).await; + let (ret, submit_snapshot) = self.submit_entry(tip_hash, entry, status, conflicts).await; try_or_return_with_snapshot!(ret, submit_snapshot); self.notify_block_assembler(status).await; @@ -679,7 +697,8 @@ impl TxPoolService { let (ret, snapshot) = self.pre_check(&tx).await; - let (tip_hash, rtx, status, fee, tx_size) = try_or_return_with_snapshot!(ret, snapshot); + let (tip_hash, rtx, status, fee, tx_size, conflicts) = + try_or_return_with_snapshot!(ret, snapshot); if self.is_in_delay_window(&snapshot) { let mut delay = self.delay.write().await; @@ -715,7 +734,7 @@ impl TxPoolService { let entry = TxEntry::new(rtx, verified.cycles, fee, tx_size); - let (ret, submit_snapshot) = self.submit_entry(tip_hash, entry, status).await; + let (ret, submit_snapshot) = self.submit_entry(tip_hash, entry, status, conflicts).await; try_or_return_with_snapshot!(ret, submit_snapshot); self.notify_block_assembler(status).await; @@ -872,7 +891,7 @@ impl TxPoolService { for tx in txs { let tx_size = tx.data().serialized_size_in_block(); let tx_hash = tx.hash(); - if let Ok((rtx, status)) = resolve_tx(tx_pool, tx_pool.snapshot(), tx) { + if let Ok((rtx, status)) = resolve_tx(tx_pool, tx_pool.snapshot(), tx, false) { if let Ok(fee) = check_tx_fee(tx_pool, tx_pool.snapshot(), &rtx, tx_size) { let verify_cache = fetched_cache.get(&tx_hash).cloned(); let snapshot = tx_pool.cloned_snapshot(); @@ -949,36 +968,51 @@ impl TxPoolService { } } -type PreCheckedTx = (Byte32, Arc, TxStatus, Capacity, usize); +type PreCheckedTx = ( + Byte32, + Arc, + TxStatus, + Capacity, + usize, + HashSet, +); type ResolveResult = Result<(Arc, TxStatus), Reject>; +fn get_tx_status(snapshot: &Snapshot, short_id: &ProposalShortId) -> TxStatus { + if snapshot.proposals().contains_proposed(&short_id) { + TxStatus::Proposed + } else if snapshot.proposals().contains_gap(&short_id) { + TxStatus::Gap + } else { + TxStatus::Fresh + } +} + fn check_rtx( tx_pool: &TxPool, snapshot: &Snapshot, rtx: &ResolvedTransaction, ) -> Result { let short_id = rtx.transaction.proposal_short_id(); - let tx_status = if snapshot.proposals().contains_proposed(&short_id) { - TxStatus::Proposed - } else if snapshot.proposals().contains_gap(&short_id) { - TxStatus::Gap - } else { - TxStatus::Fresh - }; + let tx_status = get_tx_status(snapshot, &short_id); tx_pool.check_rtx_from_pool(rtx).map(|_| tx_status) } -fn resolve_tx(tx_pool: &TxPool, snapshot: &Snapshot, tx: TransactionView) -> ResolveResult { +fn resolve_tx( + tx_pool: &TxPool, + snapshot: &Snapshot, + tx: TransactionView, + rbf: bool, +) -> ResolveResult { let short_id = tx.proposal_short_id(); - let tx_status = if snapshot.proposals().contains_proposed(&short_id) { - TxStatus::Proposed - } else if snapshot.proposals().contains_gap(&short_id) { - TxStatus::Gap + let tx_status = get_tx_status(snapshot, &short_id); + if !rbf { + tx_pool.resolve_tx_from_pool(tx) } else { - TxStatus::Fresh - }; - tx_pool.resolve_tx_from_pool(tx).map(|rtx| (rtx, tx_status)) + tx_pool.resolve_tx_from_pool_rbf(tx) + } + .map(|rtx| (rtx, tx_status)) } fn _submit_entry( @@ -987,6 +1021,7 @@ fn _submit_entry( entry: TxEntry, callbacks: &Callbacks, ) -> Result<(), Reject> { + eprintln!("_submit_entry: {:?}", entry.proposal_short_id()); match status { TxStatus::Fresh => { if tx_pool.add_pending(entry.clone())? { @@ -1004,6 +1039,7 @@ fn _submit_entry( } } } + eprintln!("finished submit: {:?}", entry.proposal_short_id()); Ok(()) } diff --git a/util/types/src/core/cell.rs b/util/types/src/core/cell.rs index 92b4295597..534362c6e1 100644 --- a/util/types/src/core/cell.rs +++ b/util/types/src/core/cell.rs @@ -710,6 +710,7 @@ pub fn resolve_transaction( // skip resolve input of cellbase if !transaction.is_cellbase() { for out_point in transaction.input_pts_iter() { + eprintln!("resolve input: {:?}", out_point); if !current_inputs.insert(out_point.to_owned()) { return Err(OutPointError::Dead(out_point)); } From 20db3060003c18a3180b28151908cf94fd9ded69 Mon Sep 17 00:00:00 2001 From: yukang Date: Mon, 26 Jun 2023 07:52:04 +0800 Subject: [PATCH 113/267] introduce pool_cell --- rpc/src/error.rs | 3 + .../tx_pool/different_txs_with_same_input.rs | 2 +- tx-pool/src/component/edges.rs | 4 - tx-pool/src/component/pool_map.rs | 35 +------- tx-pool/src/lib.rs | 1 + tx-pool/src/pool.rs | 80 +++++-------------- tx-pool/src/pool_cell.rs | 44 ++++++++++ tx-pool/src/process.rs | 45 ++++++----- util/app-config/src/configs/tx_pool.rs | 2 + util/app-config/src/legacy/tx_pool.rs | 5 ++ util/jsonrpc-types/src/pool.rs | 4 + util/types/src/core/cell.rs | 2 +- util/types/src/core/tx_pool.rs | 4 + 13 files changed, 108 insertions(+), 123 deletions(-) create mode 100644 tx-pool/src/pool_cell.rs diff --git a/rpc/src/error.rs b/rpc/src/error.rs index 1f86db9b72..b067fbf9d0 100644 --- a/rpc/src/error.rs +++ b/rpc/src/error.rs @@ -112,6 +112,8 @@ pub enum RPCError { TransactionExpired = -1109, /// (-1110): The transaction exceeded maximum size limit. PoolRejectedTransactionBySizeLimit = -1110, + /// (-1111): The transaction is rejected for RBF checking. + PoolRejctedRBF = -1111, /// (-1200): The indexer error. Indexer = -1200, } @@ -173,6 +175,7 @@ impl RPCError { Reject::DeclaredWrongCycles(..) => RPCError::PoolRejectedMalformedTransaction, Reject::Resolve(_) => RPCError::TransactionFailedToResolve, Reject::Verification(_) => RPCError::TransactionFailedToVerify, + Reject::RBFRejected(_) => RPCError::PoolRejctedRBF, Reject::ExceededTransactionSizeLimit(_, _) => { RPCError::PoolRejectedTransactionBySizeLimit } diff --git a/test/src/specs/tx_pool/different_txs_with_same_input.rs b/test/src/specs/tx_pool/different_txs_with_same_input.rs index 3eb1abfaea..c6b0594bac 100644 --- a/test/src/specs/tx_pool/different_txs_with_same_input.rs +++ b/test/src/specs/tx_pool/different_txs_with_same_input.rs @@ -39,7 +39,7 @@ impl Spec for DifferentTxsWithSameInput { let res = node0 .rpc_client() .send_transaction_result(tx2.data().into()); - //assert!(res.is_err(), "tx2 should be rejected"); + assert!(res.is_err(), "tx2 should be rejected"); node0.mine_with_blocking(|template| template.proposals.len() != 2); node0.mine_with_blocking(|template| template.number.value() != 14); diff --git a/tx-pool/src/component/edges.rs b/tx-pool/src/component/edges.rs index ec9c3e16a8..5ed01fbbe3 100644 --- a/tx-pool/src/component/edges.rs +++ b/tx-pool/src/component/edges.rs @@ -47,10 +47,6 @@ impl Edges { self.deps.remove(out_point) } - pub(crate) fn remove_deps(&mut self, out_point: &OutPoint) -> Option> { - self.deps.remove(out_point) - } - pub(crate) fn insert_deps(&mut self, out_point: OutPoint, txid: ProposalShortId) { self.deps.entry(out_point).or_default().insert(txid); } diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index 6eaf55b6f5..84f90cd2e1 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -11,15 +11,12 @@ use ckb_logger::{debug, trace}; use ckb_multi_index_map::MultiIndexMap; use ckb_types::core::error::OutPointError; use ckb_types::packed::OutPoint; +use ckb_types::prelude::*; use ckb_types::{ bytes::Bytes, - core::{cell::CellChecker, TransactionView}, + core::TransactionView, packed::{Byte32, CellOutput, ProposalShortId}, }; -use ckb_types::{ - core::cell::{CellMetaBuilder, CellProvider, CellStatus}, - prelude::*, -}; use std::collections::HashSet; use super::links::TxLinks; @@ -511,31 +508,3 @@ impl PoolMap { }); } } - -impl CellProvider for PoolMap { - fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { - if self.edges.get_input_ref(out_point).is_some() { - return CellStatus::Dead; - } - if let Some((output, data)) = self.get_output_with_data(out_point) { - let cell_meta = CellMetaBuilder::from_cell_output(output, data) - .out_point(out_point.to_owned()) - .build(); - CellStatus::live_cell(cell_meta) - } else { - CellStatus::Unknown - } - } -} - -impl CellChecker for PoolMap { - fn is_live(&self, out_point: &OutPoint) -> Option { - if self.edges.get_input_ref(out_point).is_some() { - return Some(false); - } - if self.get_output_with_data(out_point).is_some() { - return Some(true); - } - None - } -} diff --git a/tx-pool/src/lib.rs b/tx-pool/src/lib.rs index 48a2157679..cb2ed5ae90 100644 --- a/tx-pool/src/lib.rs +++ b/tx-pool/src/lib.rs @@ -5,6 +5,7 @@ pub mod block_assembler; mod callback; mod chunk_process; mod component; +mod pool_cell; pub mod error; mod persisted; pub mod pool; diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 01a545db95..5c9ae8be7e 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -6,15 +6,12 @@ use crate::callback::Callbacks; use crate::component::pool_map::{PoolEntry, PoolMap, Status}; use crate::component::recent_reject::RecentReject; use crate::error::Reject; +use crate::pool_cell::PoolCell; use ckb_app_config::TxPoolConfig; +use ckb_jsonrpc_types::Capacity; use ckb_logger::{debug, error, warn}; use ckb_snapshot::Snapshot; use ckb_store::ChainStore; -use ckb_types::core::cell::CellChecker; -use ckb_types::core::cell::CellMetaBuilder; -use ckb_types::core::cell::{CellProvider, CellStatus}; -use ckb_types::packed::OutPoint; -use ckb_types::prelude::Unpack; use ckb_types::{ core::{ cell::{resolve_transaction, OverlayCellChecker, OverlayCellProvider, ResolvedTransaction}, @@ -292,7 +289,8 @@ impl TxPool { pub(crate) fn check_rtx_from_pool(&self, rtx: &ResolvedTransaction) -> Result<(), Reject> { let snapshot = self.snapshot(); - let checker = OverlayCellChecker::new(&self.pool_map, snapshot); + let pool_cell = PoolCell::new(&self.pool_map, false); + let checker = OverlayCellChecker::new(&pool_cell, snapshot); let mut seen_inputs = HashSet::new(); rtx.check(&mut seen_inputs, &checker, snapshot) .map_err(Reject::Resolve) @@ -301,21 +299,11 @@ impl TxPool { pub(crate) fn resolve_tx_from_pool( &self, tx: TransactionView, + rbf: bool, ) -> Result, Reject> { let snapshot = self.snapshot(); - let provider = OverlayCellProvider::new(&self.pool_map, snapshot); - let mut seen_inputs = HashSet::new(); - resolve_transaction(tx, &mut seen_inputs, &provider, snapshot) - .map(Arc::new) - .map_err(Reject::Resolve) - } - - pub(crate) fn resolve_tx_from_pool_rbf( - &self, - tx: TransactionView, - ) -> Result, Reject> { - let snapshot = self.snapshot(); - let provider = OverlayCellProvider::new(self, snapshot); + let pool_cell = PoolCell::new(&self.pool_map, rbf); + let provider = OverlayCellProvider::new(&pool_cell, snapshot); let mut seen_inputs = HashSet::new(); resolve_transaction(tx, &mut seen_inputs, &provider, snapshot) .map(Arc::new) @@ -482,6 +470,17 @@ impl TxPool { (entries, size, cycles) } + pub(crate) fn check_rbf(&self, tx: &ResolvedTransaction, conflicts: &HashSet, fee: Capacity) -> Result<(), Reject> { + if !self.config.enable_rbf { + return Err(Reject::RBFRejected("node disabled RBF".to_string())); + } + if conflicts.len() == 0 { + return Err(Reject::RBFRejected("can not find conflict txs to replace".to_string())); + } + + Ok(()) + } + fn build_recent_reject(config: &TxPoolConfig) -> Option { if !config.recent_reject.as_os_str().is_empty() { let recent_reject_ttl = @@ -507,46 +506,3 @@ impl TxPool { } } -impl CellProvider for TxPool { - fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { - let tx_hash = out_point.tx_hash(); - match self - .pool_map - .get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) - { - Some(pool_entry) if pool_entry.status != Status::Proposed => { - match pool_entry - .inner - .transaction() - .output_with_data(out_point.index().unpack()) - { - Some((output, data)) => { - let cell_meta = CellMetaBuilder::from_cell_output(output, data) - .out_point(out_point.to_owned()) - .build(); - CellStatus::live_cell(cell_meta) - } - None => CellStatus::Unknown, - } - } - _ => CellStatus::Unknown, - } - } -} - -impl CellChecker for TxPool { - fn is_live(&self, out_point: &OutPoint) -> Option { - let tx_hash = out_point.tx_hash(); - match self - .pool_map - .get_by_id(&ProposalShortId::from_tx_hash(&tx_hash)) - { - Some(pool_entry) if pool_entry.status != Status::Proposed => pool_entry - .inner - .transaction() - .output_with_data(out_point.index().unpack()) - .map(|_| true), - _ => None, - } - } -} diff --git a/tx-pool/src/pool_cell.rs b/tx-pool/src/pool_cell.rs new file mode 100644 index 0000000000..4e70d44c82 --- /dev/null +++ b/tx-pool/src/pool_cell.rs @@ -0,0 +1,44 @@ +extern crate rustc_hash; +extern crate slab; +use crate::component::pool_map::PoolMap; +use ckb_types::core::cell::{CellChecker, CellMetaBuilder, CellProvider, CellStatus}; +use ckb_types::packed::OutPoint; + +pub(crate) struct PoolCell<'a> { + pub pool_map: &'a PoolMap, + pub rbf: bool, +} + +impl<'a> PoolCell<'a> { + pub fn new(pool_map: &'a PoolMap, rbf: bool) -> Self { + PoolCell { pool_map, rbf } + } +} + +impl<'a> CellProvider for PoolCell<'a> { + fn cell(&self, out_point: &OutPoint, _eager_load: bool) -> CellStatus { + if !self.rbf && self.pool_map.edges.get_input_ref(out_point).is_some() { + return CellStatus::Dead; + } + if let Some((output, data)) = self.pool_map.get_output_with_data(out_point) { + let cell_meta = CellMetaBuilder::from_cell_output(output, data) + .out_point(out_point.to_owned()) + .build(); + CellStatus::live_cell(cell_meta) + } else { + CellStatus::Unknown + } + } +} + +impl<'a> CellChecker for PoolCell<'a> { + fn is_live(&self, out_point: &OutPoint) -> Option { + if !self.rbf && self.pool_map.edges.get_input_ref(out_point).is_some() { + return Some(false); + } + if self.pool_map.get_output_with_data(out_point).is_some() { + return Some(true); + } + None + } +} diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index 962ebab964..29f73619e2 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -211,16 +211,20 @@ impl TxPoolService { // It's not possible for RBF, reject it directly check_txid_collision(tx_pool, tx)?; - // Try to find any conflicted tx in the pool - let conflicts = tx_pool.pool_map.find_conflict_tx(tx); - let rbf = !conflicts.is_empty(); - let res = resolve_tx(tx_pool, &snapshot, tx.clone(), rbf); - let (rtx, status) = res?; - let fee = check_tx_fee(tx_pool, &snapshot, &rtx, tx_size)?; - if rbf { - // check_rbf()? + // Try normal path first, if double-spending check success we don't need RBF check + // this make sure RBF won't introduce extra performance cost for hot path + let res = resolve_tx(tx_pool, &snapshot, tx.clone(), false); + if let Ok((rtx, status)) = res { + let fee = check_tx_fee(tx_pool, &snapshot, &rtx, tx_size)?; + return Ok((tip_hash, rtx, status, fee, tx_size, HashSet::new())); + } else { + // Try RBF check + let conflicts = tx_pool.pool_map.find_conflict_tx(tx); + let (rtx, status) = resolve_tx(tx_pool, &snapshot, tx.clone(), false)?; + let fee = check_tx_fee(tx_pool, &snapshot, &rtx, tx_size)?; + tx_pool.check_rbf(&rtx, &conflicts, fee.into())?; + return Ok((tip_hash, rtx, status, fee, tx_size, conflicts)); } - Ok((tip_hash, rtx, status, fee, tx_size, conflicts)) }) .await; @@ -251,11 +255,11 @@ impl TxPoolService { // non contextual verify first self.non_contextual_verify(&tx, None)?; - eprintln!( - "resumeble_process_tx: {:?} id: {:?}", - tx.hash(), - tx.proposal_short_id() - ); + // eprintln!( + // "resumeble_process_tx: {:?} id: {:?}", + // tx.hash(), + // tx.proposal_short_id() + // ); if self.chunk_contains(&tx).await || self.orphan_contains(&tx).await { return Err(Reject::Duplicated(tx.hash())); } @@ -1007,12 +1011,9 @@ fn resolve_tx( ) -> ResolveResult { let short_id = tx.proposal_short_id(); let tx_status = get_tx_status(snapshot, &short_id); - if !rbf { - tx_pool.resolve_tx_from_pool(tx) - } else { - tx_pool.resolve_tx_from_pool_rbf(tx) - } - .map(|rtx| (rtx, tx_status)) + tx_pool + .resolve_tx_from_pool(tx, rbf) + .map(|rtx| (rtx, tx_status)) } fn _submit_entry( @@ -1021,7 +1022,7 @@ fn _submit_entry( entry: TxEntry, callbacks: &Callbacks, ) -> Result<(), Reject> { - eprintln!("_submit_entry: {:?}", entry.proposal_short_id()); + //eprintln!("_submit_entry: {:?}", entry.proposal_short_id()); match status { TxStatus::Fresh => { if tx_pool.add_pending(entry.clone())? { @@ -1039,7 +1040,7 @@ fn _submit_entry( } } } - eprintln!("finished submit: {:?}", entry.proposal_short_id()); + //eprintln!("finished submit: {:?}", entry.proposal_short_id()); Ok(()) } diff --git a/util/app-config/src/configs/tx_pool.rs b/util/app-config/src/configs/tx_pool.rs index b71223ef7e..0b14987f94 100644 --- a/util/app-config/src/configs/tx_pool.rs +++ b/util/app-config/src/configs/tx_pool.rs @@ -34,6 +34,8 @@ pub struct TxPoolConfig { pub recent_reject: PathBuf, /// The expiration time for pool transactions in hours pub expiry_hours: u8, + /// Enable RBF + pub enable_rbf: bool, } /// Block assembler config options. diff --git a/util/app-config/src/legacy/tx_pool.rs b/util/app-config/src/legacy/tx_pool.rs index bf82ecc1af..ff9c64e437 100644 --- a/util/app-config/src/legacy/tx_pool.rs +++ b/util/app-config/src/legacy/tx_pool.rs @@ -41,6 +41,8 @@ pub(crate) struct TxPoolConfig { recent_reject: PathBuf, #[serde(default = "default_expiry_hours")] expiry_hours: u8, + #[serde(default)] + enable_rbf: bool, } fn default_keep_rejected_tx_hashes_days() -> u8 { @@ -82,6 +84,7 @@ impl Default for TxPoolConfig { persisted_data: Default::default(), recent_reject: Default::default(), expiry_hours: DEFAULT_EXPIRY_HOURS, + enable_rbf: false } } } @@ -103,6 +106,7 @@ impl From for crate::TxPoolConfig { persisted_data, recent_reject, expiry_hours, + enable_rbf, } = input; Self { @@ -115,6 +119,7 @@ impl From for crate::TxPoolConfig { persisted_data, recent_reject, expiry_hours, + enable_rbf, } } } diff --git a/util/jsonrpc-types/src/pool.rs b/util/jsonrpc-types/src/pool.rs index 4a18c73e7c..1aae977b28 100644 --- a/util/jsonrpc-types/src/pool.rs +++ b/util/jsonrpc-types/src/pool.rs @@ -241,6 +241,9 @@ pub enum PoolTransactionReject { /// Transaction expired Expiry(String), + + /// RBF rejected + RBFRejected(String), } impl From for PoolTransactionReject { @@ -260,6 +263,7 @@ impl From for PoolTransactionReject { Reject::Resolve(_) => Self::Resolve(format!("{reject}")), Reject::Verification(_) => Self::Verification(format!("{reject}")), Reject::Expiry(_) => Self::Expiry(format!("{reject}")), + Reject::RBFRejected(_) => Self::RBFRejected(format!("{reject}")), } } } diff --git a/util/types/src/core/cell.rs b/util/types/src/core/cell.rs index 534362c6e1..33a1e0a218 100644 --- a/util/types/src/core/cell.rs +++ b/util/types/src/core/cell.rs @@ -710,7 +710,7 @@ pub fn resolve_transaction( // skip resolve input of cellbase if !transaction.is_cellbase() { for out_point in transaction.input_pts_iter() { - eprintln!("resolve input: {:?}", out_point); + //eprintln!("resolve input: {:?}", out_point); if !current_inputs.insert(out_point.to_owned()) { return Err(OutPointError::Dead(out_point)); } diff --git a/util/types/src/core/tx_pool.rs b/util/types/src/core/tx_pool.rs index 43fcd7e547..2bed88ff7a 100644 --- a/util/types/src/core/tx_pool.rs +++ b/util/types/src/core/tx_pool.rs @@ -52,6 +52,10 @@ pub enum Reject { /// Expired #[error("Expiry transaction, timestamp {0}")] Expiry(u64), + + /// RBF rejected + #[error("RBF rejected: {0}")] + RBFRejected(String), } fn is_malformed_from_verification(error: &Error) -> bool { From 8bbac254282f6c1464b7024aa043f2fe38ecb213 Mon Sep 17 00:00:00 2001 From: yukang Date: Tue, 27 Jun 2023 19:08:09 +0800 Subject: [PATCH 114/267] more on RBF, make first RBF test pass --- test/src/main.rs | 4 +- .../tx_pool/different_txs_with_same_input.rs | 23 ++- test/src/specs/tx_pool/mod.rs | 2 + test/src/specs/tx_pool/replace.rs | 144 ++++++++++++++++++ tx-pool/src/chunk_process.rs | 3 - tx-pool/src/lib.rs | 2 +- tx-pool/src/pool.rs | 22 ++- tx-pool/src/process.rs | 71 ++++++--- tx-pool/src/service.rs | 13 +- util/app-config/src/legacy/tx_pool.rs | 2 +- util/jsonrpc-types/src/blockchain.rs | 18 ++- util/types/src/core/tx_pool.rs | 2 + 12 files changed, 261 insertions(+), 45 deletions(-) create mode 100644 test/src/specs/tx_pool/replace.rs diff --git a/test/src/main.rs b/test/src/main.rs index 02e09c809b..f16b212ae1 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -458,7 +458,9 @@ fn all_specs() -> Vec> { Box::new(RelayWithWrongTx::new()), Box::new(TxsRelayOrder), Box::new(SendTxChain), - Box::new(DifferentTxsWithSameInput), + Box::new(DifferentTxsWithSameInputWithOutRBF), + Box::new(RbfBasic), + Box::new(RbfSameInput), Box::new(CompactBlockEmpty), Box::new(CompactBlockEmptyParentUnknown), Box::new(CompactBlockPrefilled), diff --git a/test/src/specs/tx_pool/different_txs_with_same_input.rs b/test/src/specs/tx_pool/different_txs_with_same_input.rs index c6b0594bac..261ec9b2e8 100644 --- a/test/src/specs/tx_pool/different_txs_with_same_input.rs +++ b/test/src/specs/tx_pool/different_txs_with_same_input.rs @@ -7,9 +7,9 @@ use ckb_types::{ prelude::*, }; -pub struct DifferentTxsWithSameInput; +pub struct DifferentTxsWithSameInputWithOutRBF; -impl Spec for DifferentTxsWithSameInput { +impl Spec for DifferentTxsWithSameInputWithOutRBF { fn run(&self, nodes: &mut Vec) { let node0 = &nodes[0]; @@ -20,8 +20,16 @@ impl Spec for DifferentTxsWithSameInput { let tx1 = node0.new_transaction(tx_hash_0.clone()); let tx2_temp = node0.new_transaction(tx_hash_0); - eprintln!("tx1 hash: {:?} short_id: {:?}", tx1.hash(), tx1.proposal_short_id()); - eprintln!("tx2 hash: {:?} short_id: {:?}", tx2_temp.hash(), tx2_temp.proposal_short_id()); + eprintln!( + "tx1 hash: {:?} short_id: {:?}", + tx1.hash(), + tx1.proposal_short_id() + ); + eprintln!( + "tx2 hash: {:?} short_id: {:?}", + tx2_temp.hash(), + tx2_temp.proposal_short_id() + ); // Set tx2 fee to a higher value, tx1 capacity is 100, set tx2 capacity to 80 for +20 fee. let output = CellOutputBuilder::default() .capacity(capacity_bytes!(80).pack()) @@ -52,7 +60,7 @@ impl Spec for DifferentTxsWithSameInput { .map(TransactionView::hash) .collect(); - // RBF (Replace-By-Fees) is not implemented + // RBF (Replace-By-Fees) is not enabled assert!(commit_txs_hash.contains(&tx1.hash())); assert!(!commit_txs_hash.contains(&tx2.hash())); @@ -89,4 +97,9 @@ impl Spec for DifferentTxsWithSameInput { // assert!(ret.transaction.is_none()); // assert!(matches!(ret.tx_status.status, Status::Rejected)); } + + fn modify_app_config(&self, config: &mut ckb_app_config::CKBAppConfig) { + config.tx_pool.enable_rbf = false; + } } + diff --git a/test/src/specs/tx_pool/mod.rs b/test/src/specs/tx_pool/mod.rs index bd8ffd3a87..2b2d39230e 100644 --- a/test/src/specs/tx_pool/mod.rs +++ b/test/src/specs/tx_pool/mod.rs @@ -24,6 +24,7 @@ mod send_tx_chain; mod txs_relay_order; mod utils; mod valid_since; +mod replace; pub use cellbase_maturity::*; pub use collision::*; @@ -50,6 +51,7 @@ pub use send_secp_tx::*; pub use send_tx_chain::*; pub use txs_relay_order::*; pub use valid_since::*; +pub use replace::*; use ckb_app_config::BlockAssemblerConfig; use ckb_chain_spec::{build_genesis_type_id_script, OUTPUT_INDEX_SECP256K1_BLAKE160_SIGHASH_ALL}; diff --git a/test/src/specs/tx_pool/replace.rs b/test/src/specs/tx_pool/replace.rs new file mode 100644 index 0000000000..9b6bae9f81 --- /dev/null +++ b/test/src/specs/tx_pool/replace.rs @@ -0,0 +1,144 @@ +use crate::{Node, Spec}; +use ckb_jsonrpc_types::Status; +use ckb_logger::info; +use ckb_types::{ + core::{capacity_bytes, Capacity, TransactionView}, + packed::CellOutputBuilder, + prelude::*, +}; + +pub struct RbfBasic; +pub struct RbfSameInput; + +impl Spec for RbfBasic { + fn run(&self, nodes: &mut Vec) { + let node0 = &nodes[0]; + + node0.mine_until_out_bootstrap_period(); + node0.new_block_with_blocking(|template| template.number.value() != 13); + let tx_hash_0 = node0.generate_transaction(); + info!("Generate 2 txs with same input"); + let tx1 = node0.new_transaction(tx_hash_0.clone()); + let tx2_temp = node0.new_transaction(tx_hash_0); + + eprintln!( + "tx1 hash: {:?} short_id: {:?}", + tx1.hash(), + tx1.proposal_short_id() + ); + eprintln!( + "tx2 hash: {:?} short_id: {:?}", + tx2_temp.hash(), + tx2_temp.proposal_short_id() + ); + // Set tx2 fee to a higher value, tx1 capacity is 100, set tx2 capacity to 80 for +20 fee. + let output = CellOutputBuilder::default() + .capacity(capacity_bytes!(80).pack()) + .build(); + + let tx2 = tx2_temp + .as_advanced_builder() + .set_outputs(vec![output]) + .build(); + + eprintln!("tx1: {:?}", tx1); + eprintln!("tx2: {:?}", tx2); + + node0.rpc_client().send_transaction(tx1.data().into()); + let res = node0 + .rpc_client() + .send_transaction_result(tx2.data().into()); + assert!(res.is_ok(), "tx2 should replace old tx"); + + node0.mine_with_blocking(|template| template.proposals.len() != 2); + node0.mine_with_blocking(|template| template.number.value() != 14); + node0.mine_with_blocking(|template| template.transactions.len() != 2); + + let tip_block = node0.get_tip_block(); + let commit_txs_hash: Vec<_> = tip_block + .transactions() + .iter() + .map(TransactionView::hash) + .collect(); + + // RBF (Replace-By-Fees) is enabled + assert!(!commit_txs_hash.contains(&tx1.hash())); + assert!(commit_txs_hash.contains(&tx2.hash())); + + // when tx1 was confirmed, tx2 should be rejected + let ret = node0.rpc_client().get_transaction(tx2.hash()); + assert!( + matches!(ret.tx_status.status, Status::Committed), + "tx2 should be committed" + ); + + // verbosity = 1 + let ret = node0 + .rpc_client() + .get_transaction_with_verbosity(tx1.hash(), 1); + eprintln!("ret: {:?}", ret); + assert!(ret.transaction.is_none()); + assert!(matches!(ret.tx_status.status, Status::Rejected)); + assert!(ret.tx_status.reason.unwrap().contains("RBFRejected")); + + // verbosity = 2 + let ret = node0 + .rpc_client() + .get_transaction_with_verbosity(tx2.hash(), 2); + assert!(ret.transaction.is_some()); + assert!(matches!(ret.tx_status.status, Status::Committed)); + + let ret = node0 + .rpc_client() + .get_transaction_with_verbosity(tx1.hash(), 2); + assert!(ret.transaction.is_none()); + assert!(matches!(ret.tx_status.status, Status::Rejected)); + assert!(ret.tx_status.reason.unwrap().contains("RBFRejected")); + } + + fn modify_app_config(&self, config: &mut ckb_app_config::CKBAppConfig) { + config.tx_pool.enable_rbf = true; + } +} + + +impl Spec for RbfSameInput { + fn run(&self, nodes: &mut Vec) { + let node0 = &nodes[0]; + + node0.mine_until_out_bootstrap_period(); + node0.new_block_with_blocking(|template| template.number.value() != 13); + let tx_hash_0 = node0.generate_transaction(); + info!("Generate 2 txs with same input"); + let tx1 = node0.new_transaction(tx_hash_0.clone()); + let tx2_temp = node0.new_transaction(tx_hash_0); + + eprintln!( + "tx1 hash: {:?} short_id: {:?}", + tx1.hash(), + tx1.proposal_short_id() + ); + eprintln!( + "tx2 hash: {:?} short_id: {:?}", + tx2_temp.hash(), + tx2_temp.proposal_short_id() + ); + let tx2 = tx2_temp + .as_advanced_builder() + .build(); + + eprintln!("tx1: {:?}", tx1); + eprintln!("tx2: {:?}", tx2); + + node0.rpc_client().send_transaction(tx1.data().into()); + let res = node0 + .rpc_client() + .send_transaction_result(tx2.data().into()); + assert!(res.is_err(), "tx2 should be rejected"); + } + + + fn modify_app_config(&self, config: &mut ckb_app_config::CKBAppConfig) { + config.tx_pool.enable_rbf = true; + } +} \ No newline at end of file diff --git a/tx-pool/src/chunk_process.rs b/tx-pool/src/chunk_process.rs index bee8a06846..51ebdec07d 100644 --- a/tx-pool/src/chunk_process.rs +++ b/tx-pool/src/chunk_process.rs @@ -252,9 +252,6 @@ impl ChunkProcess { let completed = try_or_return_with_snapshot!(ret, snapshot); let entry = TxEntry::new(rtx, completed.cycles, fee, tx_size); - if !conflicts.is_empty() { - // remove conflict tx - } let (ret, submit_snapshot) = self .service .submit_entry(tip_hash, entry, status, conflicts) diff --git a/tx-pool/src/lib.rs b/tx-pool/src/lib.rs index cb2ed5ae90..d122177c01 100644 --- a/tx-pool/src/lib.rs +++ b/tx-pool/src/lib.rs @@ -5,10 +5,10 @@ pub mod block_assembler; mod callback; mod chunk_process; mod component; -mod pool_cell; pub mod error; mod persisted; pub mod pool; +mod pool_cell; mod process; pub mod service; mod util; diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 5c9ae8be7e..4360ad12a4 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -156,6 +156,14 @@ impl TxPool { .map(|entry| entry.inner.transaction()) } + pub(crate) fn put_recent_reject(&mut self, tx_hash: &Byte32, reject: &Reject) { + if let Some(ref mut recent_reject) = self.recent_reject { + if let Err(e) = recent_reject.put(tx_hash, reject.clone()) { + error!("record recent_reject failed {} {} {}", tx_hash, reject, e); + } + } + } + pub(crate) fn remove_committed_txs<'a>( &mut self, txs: impl Iterator, @@ -470,12 +478,19 @@ impl TxPool { (entries, size, cycles) } - pub(crate) fn check_rbf(&self, tx: &ResolvedTransaction, conflicts: &HashSet, fee: Capacity) -> Result<(), Reject> { + pub(crate) fn check_rbf( + &self, + _tx: &ResolvedTransaction, + conflicts: &HashSet, + _fee: Capacity, + ) -> Result<(), Reject> { if !self.config.enable_rbf { return Err(Reject::RBFRejected("node disabled RBF".to_string())); } - if conflicts.len() == 0 { - return Err(Reject::RBFRejected("can not find conflict txs to replace".to_string())); + if conflicts.is_empty() { + return Err(Reject::RBFRejected( + "can not find conflict txs to replace".to_string(), + )); } Ok(()) @@ -505,4 +520,3 @@ impl TxPool { } } } - diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index 29f73619e2..a7f3c17311 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -125,8 +125,31 @@ impl TxPoolService { } // try to remove conflicted tx here - for r in conflicts.iter() { - eprintln!("removeing : {:?}", r); + for id in conflicts.iter() { + let removed = tx_pool.pool_map.remove_entry_and_descendants(id); + if removed.is_empty() { + return Err(Reject::RBFRejected( + "RBF remove old entries error".to_string(), + )); + } + eprintln!("removed: {:?}", id); + for old in removed { + let reject = Reject::RBFRejected(format!( + "replaced by {}", + entry.proposal_short_id() + )); + eprintln!( + "add recent_reject: id: {:?} reject: {:?}", + &old.proposal_short_id(), + reject + ); + // remove old tx from tx_pool, not happened in service so we didn't call reject callbacks + // here we call them manually + // TODO: how to call reject notify like service? + tx_pool.put_recent_reject(&old.transaction().hash(), &reject); + tx_pool.update_statics_for_remove_tx(old.size, old.cycles); + self.callbacks.call_reject(tx_pool, &old, reject) + } } _submit_entry(tx_pool, status, entry.clone(), &self.callbacks)?; Ok(()) @@ -205,7 +228,7 @@ impl TxPoolService { let (ret, snapshot) = self .with_tx_pool_read_lock(|tx_pool, snapshot| { - let tip_hash = snapshot.tip_hash(); + let tip_hash: Byte32 = snapshot.tip_hash(); // Same txid means exactly the same transaction, including inputs, outputs, witnesses, etc. // It's not possible for RBF, reject it directly @@ -214,16 +237,28 @@ impl TxPoolService { // Try normal path first, if double-spending check success we don't need RBF check // this make sure RBF won't introduce extra performance cost for hot path let res = resolve_tx(tx_pool, &snapshot, tx.clone(), false); - if let Ok((rtx, status)) = res { - let fee = check_tx_fee(tx_pool, &snapshot, &rtx, tx_size)?; - return Ok((tip_hash, rtx, status, fee, tx_size, HashSet::new())); - } else { - // Try RBF check - let conflicts = tx_pool.pool_map.find_conflict_tx(tx); - let (rtx, status) = resolve_tx(tx_pool, &snapshot, tx.clone(), false)?; - let fee = check_tx_fee(tx_pool, &snapshot, &rtx, tx_size)?; - tx_pool.check_rbf(&rtx, &conflicts, fee.into())?; - return Ok((tip_hash, rtx, status, fee, tx_size, conflicts)); + match res { + Ok((rtx, status)) => { + let fee = check_tx_fee(tx_pool, &snapshot, &rtx, tx_size)?; + Ok((tip_hash, rtx, status, fee, tx_size, HashSet::new())) + } + Err(err) => { + eprintln!( + "resolve_tx error: {:?}, try RBF check", + tx_pool.config.enable_rbf + ); + if tx_pool.config.enable_rbf { + // Try RBF check + eprintln!("begin RBF check ...."); + let conflicts = tx_pool.pool_map.find_conflict_tx(tx); + let (rtx, status) = resolve_tx(tx_pool, &snapshot, tx.clone(), true)?; + let fee = check_tx_fee(tx_pool, &snapshot, &rtx, tx_size)?; + tx_pool.check_rbf(&rtx, &conflicts, fee.into())?; + Ok((tip_hash, rtx, status, fee, tx_size, conflicts)) + } else { + Err(err) + } + } } }) .await; @@ -310,11 +345,7 @@ impl TxPoolService { pub(crate) async fn put_recent_reject(&self, tx_hash: &Byte32, reject: &Reject) { let mut tx_pool = self.tx_pool.write().await; - if let Some(ref mut recent_reject) = tx_pool.recent_reject { - if let Err(e) = recent_reject.put(tx_hash, reject.clone()) { - error!("record recent_reject failed {} {} {}", tx_hash, reject, e); - } - } + tx_pool.put_recent_reject(tx_hash, reject); } pub(crate) async fn remove_tx(&self, tx_hash: Byte32) -> bool { @@ -984,9 +1015,9 @@ type PreCheckedTx = ( type ResolveResult = Result<(Arc, TxStatus), Reject>; fn get_tx_status(snapshot: &Snapshot, short_id: &ProposalShortId) -> TxStatus { - if snapshot.proposals().contains_proposed(&short_id) { + if snapshot.proposals().contains_proposed(short_id) { TxStatus::Proposed - } else if snapshot.proposals().contains_gap(&short_id) { + } else if snapshot.proposals().contains_gap(short_id) { TxStatus::Gap } else { TxStatus::Fresh diff --git a/tx-pool/src/service.rs b/tx-pool/src/service.rs index 1a187615c8..abd945de75 100644 --- a/tx-pool/src/service.rs +++ b/tx-pool/src/service.rs @@ -790,15 +790,10 @@ async fn process(mut service: TxPoolService, message: Message) { entry.timestamp, )) } else if let Some(ref recent_reject_db) = tx_pool.recent_reject { - let recent_reject_result = recent_reject_db.get(&hash); - if let Ok(recent_reject) = recent_reject_result { - if let Some(record) = recent_reject { - Ok(TransactionWithStatus::with_rejected(record)) - } else { - Ok(TransactionWithStatus::with_unknown()) - } - } else { - Err(recent_reject_result.unwrap_err()) + match recent_reject_db.get(&hash) { + Ok(Some(record)) => Ok(TransactionWithStatus::with_rejected(record)), + Ok(_) => Ok(TransactionWithStatus::with_unknown()), + Err(err) => Err(err), } } else { Ok(TransactionWithStatus::with_unknown()) diff --git a/util/app-config/src/legacy/tx_pool.rs b/util/app-config/src/legacy/tx_pool.rs index ff9c64e437..76b41d0c47 100644 --- a/util/app-config/src/legacy/tx_pool.rs +++ b/util/app-config/src/legacy/tx_pool.rs @@ -84,7 +84,7 @@ impl Default for TxPoolConfig { persisted_data: Default::default(), recent_reject: Default::default(), expiry_hours: DEFAULT_EXPIRY_HOURS, - enable_rbf: false + enable_rbf: false, } } } diff --git a/util/jsonrpc-types/src/blockchain.rs b/util/jsonrpc-types/src/blockchain.rs index 8acaf95ce7..d785ead6c3 100644 --- a/util/jsonrpc-types/src/blockchain.rs +++ b/util/jsonrpc-types/src/blockchain.rs @@ -577,6 +577,8 @@ pub enum Status { /// Status "rejected". The transaction has been recently removed from the pool. /// Due to storage limitations, the node can only hold the most recently removed transactions. Rejected, + /// Status "replaced". The transaction has been recently replace from the pool. + Replaced, } /// Transaction status and the block hash if it is committed. @@ -596,8 +598,9 @@ impl From for TxStatus { tx_pool::TxStatus::Pending => TxStatus::pending(), tx_pool::TxStatus::Proposed => TxStatus::proposed(), tx_pool::TxStatus::Committed(hash) => TxStatus::committed(hash), - tx_pool::TxStatus::Unknown => TxStatus::unknown(), tx_pool::TxStatus::Rejected(reason) => TxStatus::rejected(reason), + tx_pool::TxStatus::Replaced(reason) => TxStatus::replaced(reason), + tx_pool::TxStatus::Unknown => TxStatus::unknown(), } } } @@ -647,6 +650,19 @@ impl TxStatus { } } + /// Transaction which has already been replaced recently. + /// + /// ## Params + /// + /// * `reason` - the reason why the transaction is replaced. + pub fn replaced(reason: String) -> Self { + Self { + status: Status::Replaced, + block_hash: None, + reason: Some(reason), + } + } + /// The node has not seen the transaction, pub fn unknown() -> Self { Self { diff --git a/util/types/src/core/tx_pool.rs b/util/types/src/core/tx_pool.rs index 2bed88ff7a..a061aca8db 100644 --- a/util/types/src/core/tx_pool.rs +++ b/util/types/src/core/tx_pool.rs @@ -117,6 +117,8 @@ pub enum TxStatus { /// Status "rejected". The transaction has been recently removed from the pool. /// Due to storage limitations, the node can only hold the most recently removed transactions. Rejected(String), + /// Status "replaced", The transaction has been recently replaced for RBF. + Replaced(String), } /// Tx-pool entry info From bcb79a6331da51cfc48a7607464674ca6daa7f9a Mon Sep 17 00:00:00 2001 From: yukang Date: Wed, 28 Jun 2023 17:05:28 +0800 Subject: [PATCH 115/267] add checking rules for RBF --- resource/ckb.toml | 1 + rpc/src/module/pool.rs | 1 + .../tx_pool/different_txs_with_same_input.rs | 1 - test/src/specs/tx_pool/mod.rs | 4 +- test/src/specs/tx_pool/replace.rs | 8 +- test/template/ckb.toml | 1 + tx-pool/src/pool.rs | 80 +++++++++++++++++-- tx-pool/src/process.rs | 18 +---- tx-pool/src/service.rs | 1 + util/app-config/src/configs/tx_pool.rs | 3 + util/app-config/src/legacy/tx_pool.rs | 7 ++ util/jsonrpc-types/src/pool.rs | 5 ++ util/types/src/core/tx_pool.rs | 6 ++ 13 files changed, 102 insertions(+), 34 deletions(-) diff --git a/resource/ckb.toml b/resource/ckb.toml index 89ec89f6fb..d59d5d9edd 100644 --- a/resource/ckb.toml +++ b/resource/ckb.toml @@ -134,6 +134,7 @@ enable_deprecated_rpc = false # {{ [tx_pool] max_tx_pool_size = 180_000_000 # 180mb min_fee_rate = 1_000 # Here fee_rate are calculated directly using size in units of shannons/KB +min_rbf_rate = 1_000 # Here fee_rate are calculated directly using size in units of shannons/KB max_tx_verify_cycles = 70_000_000 max_ancestors_count = 25 diff --git a/rpc/src/module/pool.rs b/rpc/src/module/pool.rs index dfd4d9531d..1fe3e45de6 100644 --- a/rpc/src/module/pool.rs +++ b/rpc/src/module/pool.rs @@ -166,6 +166,7 @@ pub trait PoolRpc { /// "result": { /// "last_txs_updated_at": "0x0", /// "min_fee_rate": "0x3e8", + /// "min_rbf_rate": "0x5dc", /// "max_tx_pool_size": "0xaba9500", /// "orphan": "0x0", /// "pending": "0x1", diff --git a/test/src/specs/tx_pool/different_txs_with_same_input.rs b/test/src/specs/tx_pool/different_txs_with_same_input.rs index 261ec9b2e8..87fd7d3c56 100644 --- a/test/src/specs/tx_pool/different_txs_with_same_input.rs +++ b/test/src/specs/tx_pool/different_txs_with_same_input.rs @@ -102,4 +102,3 @@ impl Spec for DifferentTxsWithSameInputWithOutRBF { config.tx_pool.enable_rbf = false; } } - diff --git a/test/src/specs/tx_pool/mod.rs b/test/src/specs/tx_pool/mod.rs index 2b2d39230e..925b5618fc 100644 --- a/test/src/specs/tx_pool/mod.rs +++ b/test/src/specs/tx_pool/mod.rs @@ -15,6 +15,7 @@ mod pool_resurrect; mod proposal_expire_rule; mod remove_tx; mod reorg_proposals; +mod replace; mod send_defected_binary; mod send_large_cycles_tx; mod send_low_fee_rate_tx; @@ -24,7 +25,6 @@ mod send_tx_chain; mod txs_relay_order; mod utils; mod valid_since; -mod replace; pub use cellbase_maturity::*; pub use collision::*; @@ -43,6 +43,7 @@ pub use pool_resurrect::*; pub use proposal_expire_rule::*; pub use remove_tx::*; pub use reorg_proposals::*; +pub use replace::*; pub use send_defected_binary::*; pub use send_large_cycles_tx::*; pub use send_low_fee_rate_tx::*; @@ -51,7 +52,6 @@ pub use send_secp_tx::*; pub use send_tx_chain::*; pub use txs_relay_order::*; pub use valid_since::*; -pub use replace::*; use ckb_app_config::BlockAssemblerConfig; use ckb_chain_spec::{build_genesis_type_id_script, OUTPUT_INDEX_SECP256K1_BLAKE160_SIGHASH_ALL}; diff --git a/test/src/specs/tx_pool/replace.rs b/test/src/specs/tx_pool/replace.rs index 9b6bae9f81..a5e742fbbc 100644 --- a/test/src/specs/tx_pool/replace.rs +++ b/test/src/specs/tx_pool/replace.rs @@ -101,7 +101,6 @@ impl Spec for RbfBasic { } } - impl Spec for RbfSameInput { fn run(&self, nodes: &mut Vec) { let node0 = &nodes[0]; @@ -123,9 +122,7 @@ impl Spec for RbfSameInput { tx2_temp.hash(), tx2_temp.proposal_short_id() ); - let tx2 = tx2_temp - .as_advanced_builder() - .build(); + let tx2 = tx2_temp.as_advanced_builder().build(); eprintln!("tx1: {:?}", tx1); eprintln!("tx2: {:?}", tx2); @@ -137,8 +134,7 @@ impl Spec for RbfSameInput { assert!(res.is_err(), "tx2 should be rejected"); } - fn modify_app_config(&self, config: &mut ckb_app_config::CKBAppConfig) { config.tx_pool.enable_rbf = true; } -} \ No newline at end of file +} diff --git a/test/template/ckb.toml b/test/template/ckb.toml index c722fa1e55..0eea3eb7b1 100644 --- a/test/template/ckb.toml +++ b/test/template/ckb.toml @@ -79,6 +79,7 @@ enable_deprecated_rpc = true [tx_pool] max_tx_pool_size = 180_000_000 # 180mb min_fee_rate = 0 # Here fee_rate are calculated directly using size in units of shannons/KB +min_rbf_rate = 0 # Here rbf_rate are calculated directly using size in units of shannons/KB max_tx_verify_cycles = 70_000_000 max_ancestors_count = 25 diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 4360ad12a4..c62c175141 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -8,7 +8,6 @@ use crate::component::recent_reject::RecentReject; use crate::error::Reject; use crate::pool_cell::PoolCell; use ckb_app_config::TxPoolConfig; -use ckb_jsonrpc_types::Capacity; use ckb_logger::{debug, error, warn}; use ckb_snapshot::Snapshot; use ckb_store::ChainStore; @@ -16,7 +15,7 @@ use ckb_types::{ core::{ cell::{resolve_transaction, OverlayCellChecker, OverlayCellProvider, ResolvedTransaction}, tx_pool::{TxPoolEntryInfo, TxPoolIds}, - Cycle, TransactionView, UncleBlockView, + Capacity, Cycle, TransactionView, UncleBlockView, }, packed::{Byte32, ProposalShortId}, }; @@ -25,7 +24,7 @@ use std::collections::HashSet; use std::sync::Arc; const COMMITTED_HASH_CACHE_SIZE: usize = 100_000; - +const MAX_REPLACEMENT_CANDIDATES: usize = 100; /// Tx-pool implementation pub struct TxPool { pub(crate) config: TxPoolConfig, @@ -480,19 +479,84 @@ impl TxPool { pub(crate) fn check_rbf( &self, - _tx: &ResolvedTransaction, + snapshot: &Snapshot, + rtx: &ResolvedTransaction, conflicts: &HashSet, - _fee: Capacity, + fee: Capacity, + tx_size: usize, ) -> Result<(), Reject> { - if !self.config.enable_rbf { - return Err(Reject::RBFRejected("node disabled RBF".to_string())); - } + assert!(self.config.enable_rbf); if conflicts.is_empty() { return Err(Reject::RBFRejected( "can not find conflict txs to replace".to_string(), )); } + let conflicts = conflicts + .iter() + .map(|id| { + &self + .get_pool_entry(id) + .expect("conflict tx should be in pool or store") + .inner + }) + .collect::>(); + + // TODO: Rule #1, the conflicted tx need to confirmed as `can_be_replaced` + + // Rule #2, new tx don't contain any new unconfirmed inputs + // TODO: confirm whether this could be used in ckb + // https://github.com/bitcoin/bitcoin/blob/d9c7c2fd3ec7b0fcae7e0c9423bff6c6799dd67c/src/policy/rbf.cpp#L107 + let mut inputs = HashSet::new(); + for c in conflicts.iter() { + inputs.extend(c.transaction().input_pts_iter()); + } + if rtx + .transaction + .input_pts_iter() + .any(|pt| !inputs.contains(&pt) && !snapshot.transaction_exists(&pt.tx_hash())) + { + return Err(Reject::RBFRejected( + "new tx contains unconfirmed inputs".to_string(), + )); + } + + // Rule #4, new tx' fee need to higher than min_rbf_fee computed from the tx_pool configuration + let min_rbf_fee = self.config.min_rbf_rate.fee(tx_size as u64); + if fee <= min_rbf_fee { + return Err(Reject::RBFRejected(format!( + "tx fee lower than min_rbf_fee, min_rbf_fee: {}, tx fee: {}", + min_rbf_fee, fee, + ))); + } + + // Rule #3, new tx's fee need to higher than conflicts + for conflict in conflicts.iter() { + eprintln!("old fee: {:?} new_fee: {:?}", conflict.fee, fee); + if conflict.fee >= fee { + return Err(Reject::RBFRejected(format!( + "tx fee lower than conflict tx fee, conflict id: {}, conflict fee: {}, tx fee: {}", + conflict.proposal_short_id(), + conflict.fee, + fee, + ))); + } + } + + // Rule #5, new replaced tx's descendants can not more than 100 + let mut replace_count: usize = 0; + for conflict in conflicts.iter() { + let id = conflict.proposal_short_id(); + let descendants = self.pool_map.calc_descendants(&id); + replace_count += descendants.len() + 1; + if replace_count > MAX_REPLACEMENT_CANDIDATES { + return Err(Reject::RBFRejected(format!( + "tx conflict too many txs, conflict txs count: {}", + replace_count, + ))); + } + } + Ok(()) } diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index a7f3c17311..86e25e5a21 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -132,17 +132,11 @@ impl TxPoolService { "RBF remove old entries error".to_string(), )); } - eprintln!("removed: {:?}", id); for old in removed { let reject = Reject::RBFRejected(format!( "replaced by {}", entry.proposal_short_id() )); - eprintln!( - "add recent_reject: id: {:?} reject: {:?}", - &old.proposal_short_id(), - reject - ); // remove old tx from tx_pool, not happened in service so we didn't call reject callbacks // here we call them manually // TODO: how to call reject notify like service? @@ -243,17 +237,12 @@ impl TxPoolService { Ok((tip_hash, rtx, status, fee, tx_size, HashSet::new())) } Err(err) => { - eprintln!( - "resolve_tx error: {:?}, try RBF check", - tx_pool.config.enable_rbf - ); if tx_pool.config.enable_rbf { // Try RBF check - eprintln!("begin RBF check ...."); let conflicts = tx_pool.pool_map.find_conflict_tx(tx); let (rtx, status) = resolve_tx(tx_pool, &snapshot, tx.clone(), true)?; let fee = check_tx_fee(tx_pool, &snapshot, &rtx, tx_size)?; - tx_pool.check_rbf(&rtx, &conflicts, fee.into())?; + tx_pool.check_rbf(&snapshot, &rtx, &conflicts, fee, tx_size)?; Ok((tip_hash, rtx, status, fee, tx_size, conflicts)) } else { Err(err) @@ -290,11 +279,6 @@ impl TxPoolService { // non contextual verify first self.non_contextual_verify(&tx, None)?; - // eprintln!( - // "resumeble_process_tx: {:?} id: {:?}", - // tx.hash(), - // tx.proposal_short_id() - // ); if self.chunk_contains(&tx).await || self.orphan_contains(&tx).await { return Err(Reject::Duplicated(tx.hash())); } diff --git a/tx-pool/src/service.rs b/tx-pool/src/service.rs index abd945de75..6414f3161f 100644 --- a/tx-pool/src/service.rs +++ b/tx-pool/src/service.rs @@ -914,6 +914,7 @@ impl TxPoolService { total_tx_size: tx_pool.total_tx_size, total_tx_cycles: tx_pool.total_tx_cycles, min_fee_rate: self.tx_pool_config.min_fee_rate, + min_rbf_rate: self.tx_pool_config.min_rbf_rate, last_txs_updated_at: 0, tx_size_limit: TRANSACTION_SIZE_LIMIT, max_tx_pool_size: self.tx_pool_config.max_tx_pool_size as u64, diff --git a/util/app-config/src/configs/tx_pool.rs b/util/app-config/src/configs/tx_pool.rs index 0b14987f94..eb514c3d8a 100644 --- a/util/app-config/src/configs/tx_pool.rs +++ b/util/app-config/src/configs/tx_pool.rs @@ -14,6 +14,9 @@ pub struct TxPoolConfig { /// txs with lower fee rate than this will not be relayed or be mined #[serde(with = "FeeRateDef")] pub min_fee_rate: FeeRate, + /// txs need to pay more than this for RBF + #[serde(with = "FeeRateDef")] + pub min_rbf_rate: FeeRate, /// tx pool rejects txs that cycles greater than max_tx_verify_cycles pub max_tx_verify_cycles: Cycle, /// max ancestors size limit for a single tx diff --git a/util/app-config/src/legacy/tx_pool.rs b/util/app-config/src/legacy/tx_pool.rs index 76b41d0c47..1927bbda7e 100644 --- a/util/app-config/src/legacy/tx_pool.rs +++ b/util/app-config/src/legacy/tx_pool.rs @@ -7,6 +7,8 @@ use std::path::PathBuf; // default min fee rate, 1000 shannons per kilobyte const DEFAULT_MIN_FEE_RATE: FeeRate = FeeRate::from_u64(1000); +// default min rbf rate, 1500 shannons per kilobyte +const DEFAULT_MIN_RBF_RATE: FeeRate = FeeRate::from_u64(1500); // default max tx verify cycles const DEFAULT_MAX_TX_VERIFY_CYCLES: Cycle = TWO_IN_TWO_OUT_CYCLES * 20; // default max ancestors count @@ -33,6 +35,8 @@ pub(crate) struct TxPoolConfig { keep_rejected_tx_hashes_count: u64, #[serde(with = "FeeRateDef")] min_fee_rate: FeeRate, + #[serde(with = "FeeRateDef")] + min_rbf_rate: FeeRate, max_tx_verify_cycles: Cycle, max_ancestors_count: usize, #[serde(default)] @@ -79,6 +83,7 @@ impl Default for TxPoolConfig { keep_rejected_tx_hashes_days: default_keep_rejected_tx_hashes_days(), keep_rejected_tx_hashes_count: default_keep_rejected_tx_hashes_count(), min_fee_rate: DEFAULT_MIN_FEE_RATE, + min_rbf_rate: DEFAULT_MIN_RBF_RATE, max_tx_verify_cycles: DEFAULT_MAX_TX_VERIFY_CYCLES, max_ancestors_count: DEFAULT_MAX_ANCESTORS_COUNT, persisted_data: Default::default(), @@ -101,6 +106,7 @@ impl From for crate::TxPoolConfig { keep_rejected_tx_hashes_days, keep_rejected_tx_hashes_count, min_fee_rate, + min_rbf_rate, max_tx_verify_cycles, max_ancestors_count, persisted_data, @@ -112,6 +118,7 @@ impl From for crate::TxPoolConfig { Self { max_tx_pool_size, min_fee_rate, + min_rbf_rate, max_tx_verify_cycles, max_ancestors_count: cmp::max(DEFAULT_MAX_ANCESTORS_COUNT, max_ancestors_count), keep_rejected_tx_hashes_days, diff --git a/util/jsonrpc-types/src/pool.rs b/util/jsonrpc-types/src/pool.rs index 1aae977b28..3e773c1ca2 100644 --- a/util/jsonrpc-types/src/pool.rs +++ b/util/jsonrpc-types/src/pool.rs @@ -40,6 +40,10 @@ pub struct TxPoolInfo { /// /// The unit is Shannons per 1000 bytes transaction serialization size in the block. pub min_fee_rate: Uint64, + /// RBF rate threshold. The pool reject to resort for transactions which fee rate is below this threshold. + /// + /// The unit is Shannons per 1000 bytes transaction serialization size in the block. + pub min_rbf_rate: Uint64, /// Last updated time. This is the Unix timestamp in milliseconds. pub last_txs_updated_at: Timestamp, /// Limiting transactions to tx_size_limit @@ -63,6 +67,7 @@ impl From for TxPoolInfo { total_tx_size: (tx_pool_info.total_tx_size as u64).into(), total_tx_cycles: tx_pool_info.total_tx_cycles.into(), min_fee_rate: tx_pool_info.min_fee_rate.as_u64().into(), + min_rbf_rate: tx_pool_info.min_rbf_rate.as_u64().into(), last_txs_updated_at: tx_pool_info.last_txs_updated_at.into(), tx_size_limit: tx_pool_info.tx_size_limit.into(), max_tx_pool_size: tx_pool_info.max_tx_pool_size.into(), diff --git a/util/types/src/core/tx_pool.rs b/util/types/src/core/tx_pool.rs index a061aca8db..6c19213502 100644 --- a/util/types/src/core/tx_pool.rs +++ b/util/types/src/core/tx_pool.rs @@ -321,6 +321,12 @@ pub struct TxPoolInfo { /// /// The unit is Shannons per 1000 bytes transaction serialization size in the block. pub min_fee_rate: FeeRate, + + /// Min RBF rate threshold. The pool reject RBF transactions which fee rate is below this threshold. + /// + /// The unit is Shannons per 1000 bytes transaction serialization size in the block. + pub min_rbf_rate: FeeRate, + /// Last updated time. This is the Unix timestamp in milliseconds. pub last_txs_updated_at: u64, /// Limiting transactions to tx_size_limit From 2f6b13b57813626e917029d92db4513a5c59f890 Mon Sep 17 00:00:00 2001 From: yukang Date: Tue, 4 Jul 2023 14:33:36 +0800 Subject: [PATCH 116/267] fix RBF callbacks issues --- tx-pool/src/process.rs | 26 +++++++++++++++++++------- util/launcher/src/shared_builder.rs | 2 +- 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index 86e25e5a21..144321b4b6 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -140,8 +140,6 @@ impl TxPoolService { // remove old tx from tx_pool, not happened in service so we didn't call reject callbacks // here we call them manually // TODO: how to call reject notify like service? - tx_pool.put_recent_reject(&old.transaction().hash(), &reject); - tx_pool.update_statics_for_remove_tx(old.size, old.cycles); self.callbacks.call_reject(tx_pool, &old, reject) } } @@ -225,7 +223,7 @@ impl TxPoolService { let tip_hash: Byte32 = snapshot.tip_hash(); // Same txid means exactly the same transaction, including inputs, outputs, witnesses, etc. - // It's not possible for RBF, reject it directly + // It's also not possible for RBF, reject it directly check_txid_collision(tx_pool, tx)?; // Try normal path first, if double-spending check success we don't need RBF check @@ -408,7 +406,12 @@ impl TxPoolService { }); } - if matches!(reject, Reject::Resolve(..) | Reject::Verification(..)) { + if matches!( + reject, + Reject::Resolve(..) + | Reject::Verification(..) + | Reject::RBFRejected(..) + ) { self.put_recent_reject(&tx_hash, reject).await; } } @@ -433,7 +436,12 @@ impl TxPoolService { }); } Err(reject) => { - if matches!(reject, Reject::Resolve(..) | Reject::Verification(..)) { + if matches!( + reject, + Reject::Resolve(..) + | Reject::Verification(..) + | Reject::RBFRejected(..) + ) { self.put_recent_reject(&tx_hash, reject).await; } } @@ -533,8 +541,12 @@ impl TxPoolService { tx_hash: orphan.tx.hash(), }); } - if matches!(reject, Reject::Resolve(..) | Reject::Verification(..)) - { + if matches!( + reject, + Reject::Resolve(..) + | Reject::Verification(..) + | Reject::RBFRejected(..) + ) { self.put_recent_reject(&orphan.tx.hash(), &reject).await; } } diff --git a/util/launcher/src/shared_builder.rs b/util/launcher/src/shared_builder.rs index 96d750a198..f019b377ad 100644 --- a/util/launcher/src/shared_builder.rs +++ b/util/launcher/src/shared_builder.rs @@ -463,7 +463,7 @@ fn register_tx_pool_callback(tx_pool_builder: &mut TxPoolServiceBuilder, notify: let tx_hash = entry.transaction().hash(); // record recent reject - if matches!(reject, Reject::Resolve(..)) { + if matches!(reject, Reject::Resolve(..) | Reject::RBFRejected(..) ) { if let Some(ref mut recent_reject) = tx_pool.recent_reject { if let Err(e) = recent_reject.put(&tx_hash, reject.clone()) { error!("record recent_reject failed {} {} {}", tx_hash, reject, e); From f2ec36ae4b80c029619fff484d05db0f8e4d23fe Mon Sep 17 00:00:00 2001 From: yukang Date: Wed, 5 Jul 2023 16:56:37 +0800 Subject: [PATCH 117/267] add more test for RBF and cleanup --- resource/ckb.toml | 2 +- rpc/README.md | 12 +- test/src/main.rs | 4 + .../tx_pool/different_txs_with_same_input.rs | 13 - test/src/specs/tx_pool/replace.rs | 280 ++++++++++++++++-- tx-pool/src/component/pool_map.rs | 7 +- tx-pool/src/pool.rs | 54 ++-- tx-pool/src/process.rs | 11 +- util/jsonrpc-types/src/blockchain.rs | 16 - util/launcher/src/shared_builder.rs | 2 +- util/types/src/core/cell.rs | 1 - util/types/src/core/tx_pool.rs | 2 - 12 files changed, 307 insertions(+), 97 deletions(-) diff --git a/resource/ckb.toml b/resource/ckb.toml index d59d5d9edd..c34450a3e5 100644 --- a/resource/ckb.toml +++ b/resource/ckb.toml @@ -134,7 +134,7 @@ enable_deprecated_rpc = false # {{ [tx_pool] max_tx_pool_size = 180_000_000 # 180mb min_fee_rate = 1_000 # Here fee_rate are calculated directly using size in units of shannons/KB -min_rbf_rate = 1_000 # Here fee_rate are calculated directly using size in units of shannons/KB +min_rbf_rate = 1_500 # Here fee_rate are calculated directly using size in units of shannons/KB max_tx_verify_cycles = 70_000_000 max_ancestors_count = 25 diff --git a/rpc/README.md b/rpc/README.md index 6db0bf3d40..e7f6e24131 100644 --- a/rpc/README.md +++ b/rpc/README.md @@ -4510,6 +4510,7 @@ Response "result": { "last_txs_updated_at": "0x0", "min_fee_rate": "0x3e8", + "min_rbf_rate": "0x5dc", "max_tx_pool_size": "0xaba9500", "orphan": "0x0", "pending": "0x1", @@ -5072,6 +5073,10 @@ For example, a cellbase transaction is not allowed in `send_transaction` RPC. (-1110): The transaction exceeded maximum size limit. +### Error `PoolRejctedRBF` + +(-1111): The transaction is rejected for RBF checking. + ### Error `Indexer` (-1200): The indexer error. @@ -6426,7 +6431,7 @@ TX reject message `PoolTransactionReject` is a JSON object with following fields. -* `type`: `"LowFeeRate" | "ExceededMaximumAncestorsCount" | "ExceededTransactionSizeLimit" | "Full" | "Duplicated" | "Malformed" | "DeclaredWrongCycles" | "Resolve" | "Verification" | "Expiry"` - Reject type. +* `type`: `"LowFeeRate" | "ExceededMaximumAncestorsCount" | "ExceededTransactionSizeLimit" | "Full" | "Duplicated" | "Malformed" | "DeclaredWrongCycles" | "Resolve" | "Verification" | "Expiry" | "RBFRejected"` - Reject type. * `description`: `string` - Detailed description about why the transaction is rejected. Different reject types: @@ -6441,6 +6446,7 @@ Different reject types: * `Resolve`: Resolve failed * `Verification`: Verification failed * `Expiry`: Transaction expired +* `RBFRejected`: RBF rejected ### Type `ProposalShortId` @@ -7033,6 +7039,10 @@ Transaction pool information. The unit is Shannons per 1000 bytes transaction serialization size in the block. +* `min_rbf_rate`: [`Uint64`](#type-uint64) - RBF rate threshold. The pool reject to resort for transactions which fee rate is below this threshold. + + The unit is Shannons per 1000 bytes transaction serialization size in the block. + * `last_txs_updated_at`: [`Timestamp`](#type-timestamp) - Last updated time. This is the Unix timestamp in milliseconds. * `tx_size_limit`: [`Uint64`](#type-uint64) - Limiting transactions to tx_size_limit diff --git a/test/src/main.rs b/test/src/main.rs index f16b212ae1..6757d18307 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -461,6 +461,10 @@ fn all_specs() -> Vec> { Box::new(DifferentTxsWithSameInputWithOutRBF), Box::new(RbfBasic), Box::new(RbfSameInput), + Box::new(RbfSameInputwithLessFee), + Box::new(RbfTooManyDescendants), + Box::new(RbfContainNewTx), + Box::new(RbfContainInvalidInput), Box::new(CompactBlockEmpty), Box::new(CompactBlockEmptyParentUnknown), Box::new(CompactBlockPrefilled), diff --git a/test/src/specs/tx_pool/different_txs_with_same_input.rs b/test/src/specs/tx_pool/different_txs_with_same_input.rs index 87fd7d3c56..db86a2de71 100644 --- a/test/src/specs/tx_pool/different_txs_with_same_input.rs +++ b/test/src/specs/tx_pool/different_txs_with_same_input.rs @@ -20,16 +20,6 @@ impl Spec for DifferentTxsWithSameInputWithOutRBF { let tx1 = node0.new_transaction(tx_hash_0.clone()); let tx2_temp = node0.new_transaction(tx_hash_0); - eprintln!( - "tx1 hash: {:?} short_id: {:?}", - tx1.hash(), - tx1.proposal_short_id() - ); - eprintln!( - "tx2 hash: {:?} short_id: {:?}", - tx2_temp.hash(), - tx2_temp.proposal_short_id() - ); // Set tx2 fee to a higher value, tx1 capacity is 100, set tx2 capacity to 80 for +20 fee. let output = CellOutputBuilder::default() .capacity(capacity_bytes!(80).pack()) @@ -40,9 +30,6 @@ impl Spec for DifferentTxsWithSameInputWithOutRBF { .set_outputs(vec![output]) .build(); - eprintln!("tx1: {:?}", tx1); - eprintln!("tx2: {:?}", tx2); - node0.rpc_client().send_transaction(tx1.data().into()); let res = node0 .rpc_client() diff --git a/test/src/specs/tx_pool/replace.rs b/test/src/specs/tx_pool/replace.rs index a5e742fbbc..35af8a8a98 100644 --- a/test/src/specs/tx_pool/replace.rs +++ b/test/src/specs/tx_pool/replace.rs @@ -4,12 +4,11 @@ use ckb_logger::info; use ckb_types::{ core::{capacity_bytes, Capacity, TransactionView}, packed::CellOutputBuilder, + packed::{CellInput, OutPoint}, prelude::*, }; pub struct RbfBasic; -pub struct RbfSameInput; - impl Spec for RbfBasic { fn run(&self, nodes: &mut Vec) { let node0 = &nodes[0]; @@ -21,16 +20,6 @@ impl Spec for RbfBasic { let tx1 = node0.new_transaction(tx_hash_0.clone()); let tx2_temp = node0.new_transaction(tx_hash_0); - eprintln!( - "tx1 hash: {:?} short_id: {:?}", - tx1.hash(), - tx1.proposal_short_id() - ); - eprintln!( - "tx2 hash: {:?} short_id: {:?}", - tx2_temp.hash(), - tx2_temp.proposal_short_id() - ); // Set tx2 fee to a higher value, tx1 capacity is 100, set tx2 capacity to 80 for +20 fee. let output = CellOutputBuilder::default() .capacity(capacity_bytes!(80).pack()) @@ -41,9 +30,6 @@ impl Spec for RbfBasic { .set_outputs(vec![output]) .build(); - eprintln!("tx1: {:?}", tx1); - eprintln!("tx2: {:?}", tx2); - node0.rpc_client().send_transaction(tx1.data().into()); let res = node0 .rpc_client() @@ -76,7 +62,6 @@ impl Spec for RbfBasic { let ret = node0 .rpc_client() .get_transaction_with_verbosity(tx1.hash(), 1); - eprintln!("ret: {:?}", ret); assert!(ret.transaction.is_none()); assert!(matches!(ret.tx_status.status, Status::Rejected)); assert!(ret.tx_status.reason.unwrap().contains("RBFRejected")); @@ -101,6 +86,7 @@ impl Spec for RbfBasic { } } +pub struct RbfSameInput; impl Spec for RbfSameInput { fn run(&self, nodes: &mut Vec) { let node0 = &nodes[0]; @@ -112,26 +98,264 @@ impl Spec for RbfSameInput { let tx1 = node0.new_transaction(tx_hash_0.clone()); let tx2_temp = node0.new_transaction(tx_hash_0); - eprintln!( - "tx1 hash: {:?} short_id: {:?}", - tx1.hash(), - tx1.proposal_short_id() - ); - eprintln!( - "tx2 hash: {:?} short_id: {:?}", - tx2_temp.hash(), - tx2_temp.proposal_short_id() - ); let tx2 = tx2_temp.as_advanced_builder().build(); - eprintln!("tx1: {:?}", tx1); - eprintln!("tx2: {:?}", tx2); + node0.rpc_client().send_transaction(tx1.data().into()); + let res = node0 + .rpc_client() + .send_transaction_result(tx2.data().into()); + assert!(res.is_err(), "tx2 should be rejected"); + } + + fn modify_app_config(&self, config: &mut ckb_app_config::CKBAppConfig) { + config.tx_pool.enable_rbf = true; + } +} + +pub struct RbfSameInputwithLessFee; + +// RBF Rule #3 +impl Spec for RbfSameInputwithLessFee { + fn run(&self, nodes: &mut Vec) { + let node0 = &nodes[0]; + + node0.mine_until_out_bootstrap_period(); + node0.new_block_with_blocking(|template| template.number.value() != 13); + let tx_hash_0 = node0.generate_transaction(); + info!("Generate 2 txs with same input"); + let tx1 = node0.new_transaction(tx_hash_0.clone()); + let tx2_temp = node0.new_transaction(tx_hash_0); + + let output1 = CellOutputBuilder::default() + .capacity(capacity_bytes!(80).pack()) + .build(); + + let tx1 = tx1.as_advanced_builder().set_outputs(vec![output1]).build(); + + // Set tx2 fee to a lower value + let output2 = CellOutputBuilder::default() + .capacity(capacity_bytes!(90).pack()) + .build(); + + let tx2 = tx2_temp + .as_advanced_builder() + .set_outputs(vec![output2]) + .build(); node0.rpc_client().send_transaction(tx1.data().into()); let res = node0 .rpc_client() .send_transaction_result(tx2.data().into()); assert!(res.is_err(), "tx2 should be rejected"); + assert!(res + .err() + .unwrap() + .to_string() + .contains("Tx fee lower than old conflict Tx fee")); + } + + fn modify_app_config(&self, config: &mut ckb_app_config::CKBAppConfig) { + config.tx_pool.enable_rbf = true; + } +} + +pub struct RbfTooManyDescendants; + +// RBF Rule #5 +impl Spec for RbfTooManyDescendants { + fn run(&self, nodes: &mut Vec) { + let node0 = &nodes[0]; + + node0.mine_until_out_bootstrap_period(); + + // build txs chain + let tx0 = node0.new_transaction_spend_tip_cellbase(); + let tx0_temp = tx0.clone(); + let mut txs = vec![tx0]; + let max_count = 101; + while txs.len() <= max_count { + let parent = txs.last().unwrap(); + let child = parent + .as_advanced_builder() + .set_inputs(vec![{ + CellInput::new_builder() + .previous_output(OutPoint::new(parent.hash(), 0)) + .build() + }]) + .set_outputs(vec![parent.output(0).unwrap()]) + .build(); + txs.push(child); + } + assert_eq!(txs.len(), max_count + 1); + // send tx chain + for tx in txs[..=max_count - 1].iter() { + let ret = node0.rpc_client().send_transaction_result(tx.data().into()); + assert!(ret.is_ok()); + } + + // Set tx2 fee to a higher value + let output2 = CellOutputBuilder::default() + .capacity(capacity_bytes!(70).pack()) + .build(); + + let tx2 = tx0_temp + .as_advanced_builder() + .set_outputs(vec![output2]) + .build(); + + let res = node0 + .rpc_client() + .send_transaction_result(tx2.data().into()); + assert!(res.is_err(), "tx2 should be rejected"); + assert!(res + .err() + .unwrap() + .to_string() + .contains("Tx conflict too many txs")); + } + + fn modify_app_config(&self, config: &mut ckb_app_config::CKBAppConfig) { + config.tx_pool.enable_rbf = true; + } +} + +pub struct RbfContainNewTx; + +// RBF Rule #2 +impl Spec for RbfContainNewTx { + fn run(&self, nodes: &mut Vec) { + let node0 = &nodes[0]; + + node0.mine_until_out_bootstrap_period(); + + // build txs chain + let tx0 = node0.new_transaction_spend_tip_cellbase(); + let mut txs = vec![tx0]; + let max_count = 5; + while txs.len() <= max_count { + let parent = txs.last().unwrap(); + let child = parent + .as_advanced_builder() + .set_inputs(vec![{ + CellInput::new_builder() + .previous_output(OutPoint::new(parent.hash(), 0)) + .build() + }]) + .set_outputs(vec![parent.output(0).unwrap()]) + .build(); + txs.push(child); + } + assert_eq!(txs.len(), max_count + 1); + // send tx chain + for tx in txs[..=max_count - 1].iter() { + let ret = node0.rpc_client().send_transaction_result(tx.data().into()); + assert!(ret.is_ok()); + } + + let clone_tx = txs[2].clone(); + // Set tx2 fee to a higher value + let output2 = CellOutputBuilder::default() + .capacity(capacity_bytes!(70).pack()) + .build(); + + let tx2 = clone_tx + .as_advanced_builder() + .set_inputs(vec![ + { + CellInput::new_builder() + .previous_output(OutPoint::new(txs[1].hash(), 0)) + .build() + }, + { + CellInput::new_builder() + .previous_output(OutPoint::new(txs[4].hash(), 0)) + .build() + }, + ]) + .set_outputs(vec![output2]) + .build(); + + let res = node0 + .rpc_client() + .send_transaction_result(tx2.data().into()); + assert!(res.is_err(), "tx2 should be rejected"); + assert!(res + .err() + .unwrap() + .to_string() + .contains("new Tx contains unconfirmed inputs")); + } + + fn modify_app_config(&self, config: &mut ckb_app_config::CKBAppConfig) { + config.tx_pool.enable_rbf = true; + } +} + +pub struct RbfContainInvalidInput; + +// RBF Rule #2 +impl Spec for RbfContainInvalidInput { + fn run(&self, nodes: &mut Vec) { + let node0 = &nodes[0]; + + node0.mine_until_out_bootstrap_period(); + + // build txs chain + let tx0 = node0.new_transaction_spend_tip_cellbase(); + let mut txs = vec![tx0]; + let max_count = 5; + while txs.len() <= max_count { + let parent = txs.last().unwrap(); + let child = parent + .as_advanced_builder() + .set_inputs(vec![{ + CellInput::new_builder() + .previous_output(OutPoint::new(parent.hash(), 0)) + .build() + }]) + .set_outputs(vec![parent.output(0).unwrap()]) + .build(); + txs.push(child); + } + assert_eq!(txs.len(), max_count + 1); + // send Tx chain + for tx in txs[..=max_count - 1].iter() { + let ret = node0.rpc_client().send_transaction_result(tx.data().into()); + assert!(ret.is_ok()); + } + + let clone_tx = txs[2].clone(); + // Set tx2 fee to a higher value + let output2 = CellOutputBuilder::default() + .capacity(capacity_bytes!(70).pack()) + .build(); + + let tx2 = clone_tx + .as_advanced_builder() + .set_inputs(vec![ + { + CellInput::new_builder() + .previous_output(OutPoint::new(txs[1].hash(), 0)) + .build() + }, + { + CellInput::new_builder() + .previous_output(OutPoint::new(txs[3].hash(), 0)) + .build() + }, + ]) + .set_outputs(vec![output2]) + .build(); + + let res = node0 + .rpc_client() + .send_transaction_result(tx2.data().into()); + assert!(res.is_err(), "tx2 should be rejected"); + assert!(res + .err() + .unwrap() + .to_string() + .contains("new Tx contains inputs in descendants of to be replaced Tx")); } fn modify_app_config(&self, config: &mut ckb_app_config::CKBAppConfig) { diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index 84f90cd2e1..231b419bfa 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -238,9 +238,8 @@ impl PoolMap { } pub(crate) fn find_conflict_tx(&self, tx: &TransactionView) -> HashSet { - let inputs = tx.input_pts_iter(); let mut res = HashSet::default(); - for i in inputs { + for i in tx.input_pts_iter() { if let Some(id) = self.edges.get_input_ref(&i) { res.insert(id.clone()); } @@ -249,10 +248,9 @@ impl PoolMap { } pub(crate) fn resolve_conflict(&mut self, tx: &TransactionView) -> Vec { - let inputs = tx.input_pts_iter(); let mut conflicts = Vec::new(); - for i in inputs { + for i in tx.input_pts_iter() { if let Some(id) = self.edges.remove_input(&i) { let entries = self.remove_entry_and_descendants(&id); if !entries.is_empty() { @@ -461,7 +459,6 @@ impl PoolMap { entry.add_ancestor_weight(&ancestor.inner); } if entry.ancestors_count > self.max_ancestors_count { - debug!("debug: exceeded maximum ancestors count"); return Err(Reject::ExceededMaximumAncestorsCount); } diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index c62c175141..6cbe18fe51 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -486,27 +486,20 @@ impl TxPool { tx_size: usize, ) -> Result<(), Reject> { assert!(self.config.enable_rbf); - if conflicts.is_empty() { - return Err(Reject::RBFRejected( - "can not find conflict txs to replace".to_string(), - )); - } + assert!(!conflicts.is_empty()); + let short_id = rtx.transaction.proposal_short_id(); let conflicts = conflicts .iter() .map(|id| { &self .get_pool_entry(id) - .expect("conflict tx should be in pool or store") + .expect("conflict Tx should be in pool") .inner }) .collect::>(); - // TODO: Rule #1, the conflicted tx need to confirmed as `can_be_replaced` - // Rule #2, new tx don't contain any new unconfirmed inputs - // TODO: confirm whether this could be used in ckb - // https://github.com/bitcoin/bitcoin/blob/d9c7c2fd3ec7b0fcae7e0c9423bff6c6799dd67c/src/policy/rbf.cpp#L107 let mut inputs = HashSet::new(); for c in conflicts.iter() { inputs.extend(c.transaction().input_pts_iter()); @@ -517,7 +510,7 @@ impl TxPool { .any(|pt| !inputs.contains(&pt) && !snapshot.transaction_exists(&pt.tx_hash())) { return Err(Reject::RBFRejected( - "new tx contains unconfirmed inputs".to_string(), + "new Tx contains unconfirmed inputs".to_string(), )); } @@ -525,36 +518,57 @@ impl TxPool { let min_rbf_fee = self.config.min_rbf_rate.fee(tx_size as u64); if fee <= min_rbf_fee { return Err(Reject::RBFRejected(format!( - "tx fee lower than min_rbf_fee, min_rbf_fee: {}, tx fee: {}", + "Tx fee lower than min_rbf_fee, min_rbf_fee: {}, tx fee: {}", min_rbf_fee, fee, ))); } - // Rule #3, new tx's fee need to higher than conflicts + // Rule #3, new tx's fee need to higher than conflicts, here we only check the root tx for conflict in conflicts.iter() { - eprintln!("old fee: {:?} new_fee: {:?}", conflict.fee, fee); if conflict.fee >= fee { return Err(Reject::RBFRejected(format!( - "tx fee lower than conflict tx fee, conflict id: {}, conflict fee: {}, tx fee: {}", - conflict.proposal_short_id(), - conflict.fee, - fee, + "Tx fee lower than old conflict Tx fee, tx fee: {}, conflict fee: {}", + fee, conflict.fee, ))); } } - // Rule #5, new replaced tx's descendants can not more than 100 + // Rule #5, the replaced tx's descendants can not more than 100 + // and the ancestor of the new tx don't have common set with the replaced tx's descendants let mut replace_count: usize = 0; + let ancestors = self.pool_map.calc_ancestors(&short_id); for conflict in conflicts.iter() { let id = conflict.proposal_short_id(); let descendants = self.pool_map.calc_descendants(&id); replace_count += descendants.len() + 1; if replace_count > MAX_REPLACEMENT_CANDIDATES { return Err(Reject::RBFRejected(format!( - "tx conflict too many txs, conflict txs count: {}", + "Tx conflict too many txs, conflict txs count: {}", replace_count, ))); } + + if !descendants.is_disjoint(&ancestors) { + return Err(Reject::RBFRejected( + "Tx ancestors have common with conflict Tx descendants".to_string(), + )); + } + + for id in descendants.iter() { + if let Some(entry) = self.get_pool_entry(id) { + let hash = entry.inner.transaction().hash(); + if rtx + .transaction + .input_pts_iter() + .any(|pt| pt.tx_hash() == hash) + { + return Err(Reject::RBFRejected( + "new Tx contains inputs in descendants of to be replaced Tx" + .to_string(), + )); + } + } + } } Ok(()) diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index 144321b4b6..eecc12d7b0 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -127,11 +127,6 @@ impl TxPoolService { // try to remove conflicted tx here for id in conflicts.iter() { let removed = tx_pool.pool_map.remove_entry_and_descendants(id); - if removed.is_empty() { - return Err(Reject::RBFRejected( - "RBF remove old entries error".to_string(), - )); - } for old in removed { let reject = Reject::RBFRejected(format!( "replaced by {}", @@ -220,7 +215,7 @@ impl TxPoolService { let (ret, snapshot) = self .with_tx_pool_read_lock(|tx_pool, snapshot| { - let tip_hash: Byte32 = snapshot.tip_hash(); + let tip_hash = snapshot.tip_hash(); // Same txid means exactly the same transaction, including inputs, outputs, witnesses, etc. // It's also not possible for RBF, reject it directly @@ -235,7 +230,7 @@ impl TxPoolService { Ok((tip_hash, rtx, status, fee, tx_size, HashSet::new())) } Err(err) => { - if tx_pool.config.enable_rbf { + if tx_pool.config.enable_rbf && matches!(err, Reject::Resolve(_)) { // Try RBF check let conflicts = tx_pool.pool_map.find_conflict_tx(tx); let (rtx, status) = resolve_tx(tx_pool, &snapshot, tx.clone(), true)?; @@ -1049,7 +1044,6 @@ fn _submit_entry( entry: TxEntry, callbacks: &Callbacks, ) -> Result<(), Reject> { - //eprintln!("_submit_entry: {:?}", entry.proposal_short_id()); match status { TxStatus::Fresh => { if tx_pool.add_pending(entry.clone())? { @@ -1067,7 +1061,6 @@ fn _submit_entry( } } } - //eprintln!("finished submit: {:?}", entry.proposal_short_id()); Ok(()) } diff --git a/util/jsonrpc-types/src/blockchain.rs b/util/jsonrpc-types/src/blockchain.rs index d785ead6c3..aaa20ffccf 100644 --- a/util/jsonrpc-types/src/blockchain.rs +++ b/util/jsonrpc-types/src/blockchain.rs @@ -577,8 +577,6 @@ pub enum Status { /// Status "rejected". The transaction has been recently removed from the pool. /// Due to storage limitations, the node can only hold the most recently removed transactions. Rejected, - /// Status "replaced". The transaction has been recently replace from the pool. - Replaced, } /// Transaction status and the block hash if it is committed. @@ -599,7 +597,6 @@ impl From for TxStatus { tx_pool::TxStatus::Proposed => TxStatus::proposed(), tx_pool::TxStatus::Committed(hash) => TxStatus::committed(hash), tx_pool::TxStatus::Rejected(reason) => TxStatus::rejected(reason), - tx_pool::TxStatus::Replaced(reason) => TxStatus::replaced(reason), tx_pool::TxStatus::Unknown => TxStatus::unknown(), } } @@ -650,19 +647,6 @@ impl TxStatus { } } - /// Transaction which has already been replaced recently. - /// - /// ## Params - /// - /// * `reason` - the reason why the transaction is replaced. - pub fn replaced(reason: String) -> Self { - Self { - status: Status::Replaced, - block_hash: None, - reason: Some(reason), - } - } - /// The node has not seen the transaction, pub fn unknown() -> Self { Self { diff --git a/util/launcher/src/shared_builder.rs b/util/launcher/src/shared_builder.rs index f019b377ad..e1ad368a24 100644 --- a/util/launcher/src/shared_builder.rs +++ b/util/launcher/src/shared_builder.rs @@ -463,7 +463,7 @@ fn register_tx_pool_callback(tx_pool_builder: &mut TxPoolServiceBuilder, notify: let tx_hash = entry.transaction().hash(); // record recent reject - if matches!(reject, Reject::Resolve(..) | Reject::RBFRejected(..) ) { + if matches!(reject, Reject::Resolve(..) | Reject::RBFRejected(..)) { if let Some(ref mut recent_reject) = tx_pool.recent_reject { if let Err(e) = recent_reject.put(&tx_hash, reject.clone()) { error!("record recent_reject failed {} {} {}", tx_hash, reject, e); diff --git a/util/types/src/core/cell.rs b/util/types/src/core/cell.rs index 33a1e0a218..92b4295597 100644 --- a/util/types/src/core/cell.rs +++ b/util/types/src/core/cell.rs @@ -710,7 +710,6 @@ pub fn resolve_transaction( // skip resolve input of cellbase if !transaction.is_cellbase() { for out_point in transaction.input_pts_iter() { - //eprintln!("resolve input: {:?}", out_point); if !current_inputs.insert(out_point.to_owned()) { return Err(OutPointError::Dead(out_point)); } diff --git a/util/types/src/core/tx_pool.rs b/util/types/src/core/tx_pool.rs index 6c19213502..75eb6e2790 100644 --- a/util/types/src/core/tx_pool.rs +++ b/util/types/src/core/tx_pool.rs @@ -117,8 +117,6 @@ pub enum TxStatus { /// Status "rejected". The transaction has been recently removed from the pool. /// Due to storage limitations, the node can only hold the most recently removed transactions. Rejected(String), - /// Status "replaced", The transaction has been recently replaced for RBF. - Replaced(String), } /// Tx-pool entry info From d37254e5b5f3b8a6d0ba8f57302b0b25fd802036 Mon Sep 17 00:00:00 2001 From: yukang Date: Wed, 19 Jul 2023 16:20:37 +0800 Subject: [PATCH 118/267] add enbale_rbf config option --- pow/src/lib.rs | 4 +-- rpc/README.md | 2 +- sync/src/synchronizer/mod.rs | 4 +-- test/src/specs/relay/transaction_relay.rs | 3 +- test/src/specs/tx_pool/collision.rs | 6 +++- .../tx_pool/different_txs_with_same_input.rs | 4 --- test/src/specs/tx_pool/pool_reconcile.rs | 6 +--- test/src/specs/tx_pool/replace.rs | 20 ++++++------ tx-pool/src/pool.rs | 31 ++++++++++--------- tx-pool/src/process.rs | 5 ++- util/app-config/src/configs/tx_pool.rs | 4 +-- util/app-config/src/legacy/tx_pool.rs | 11 +++---- util/jsonrpc-types/src/pool.rs | 2 +- 13 files changed, 49 insertions(+), 53 deletions(-) diff --git a/pow/src/lib.rs b/pow/src/lib.rs index 4429f24b35..6ce8c1c163 100644 --- a/pow/src/lib.rs +++ b/pow/src/lib.rs @@ -27,10 +27,10 @@ pub enum Pow { /// Mocking dummy PoW engine Dummy, /// The Eaglesong PoW engine - /// Check details of Eaglesong from: https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0010-eaglesong/0010-eaglesong.md + /// Check details of Eaglesong from: Eaglesong, /// The Eaglesong PoW engine, similar to `Eaglesong`, but using `blake2b` hash as the final output. - /// Check details of blake2b from: https://tools.ietf.org/html/rfc7693 and blake2b-rs from: https://github.com/nervosnetwork/blake2b-rs + /// Check details of blake2b from: and blake2b-rs from: EaglesongBlake2b, } diff --git a/rpc/README.md b/rpc/README.md index e7f6e24131..73109788be 100644 --- a/rpc/README.md +++ b/rpc/README.md @@ -7039,7 +7039,7 @@ Transaction pool information. The unit is Shannons per 1000 bytes transaction serialization size in the block. -* `min_rbf_rate`: [`Uint64`](#type-uint64) - RBF rate threshold. The pool reject to resort for transactions which fee rate is below this threshold. +* `min_rbf_rate`: [`Uint64`](#type-uint64) - RBF rate threshold. The pool reject to replace for transactions which fee rate is below this threshold. The unit is Shannons per 1000 bytes transaction serialization size in the block. diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index b348226560..7f0a78c757 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -1,10 +1,10 @@ //! CKB node has initial block download phase (IBD mode) like Bitcoin: -//! https://btcinformation.org/en/glossary/initial-block-download +//! //! //! When CKB node is in IBD mode, it will respond `packed::InIBD` to `GetHeaders` and `GetBlocks` requests //! //! And CKB has a headers-first synchronization style like Bitcoin: -//! https://btcinformation.org/en/glossary/headers-first-sync +//! //! mod block_fetcher; mod block_process; diff --git a/test/src/specs/relay/transaction_relay.rs b/test/src/specs/relay/transaction_relay.rs index 06b10f4e19..75a34e0df6 100644 --- a/test/src/specs/relay/transaction_relay.rs +++ b/test/src/specs/relay/transaction_relay.rs @@ -5,6 +5,7 @@ use crate::util::transaction::{always_success_transaction, always_success_transa use crate::utils::{build_relay_tx_hashes, build_relay_txs, sleep, wait_until}; use crate::{Net, Node, Spec}; use ckb_constant::sync::RETRY_ASK_TX_TIMEOUT_INCREASE; +use ckb_jsonrpc_types::Status; use ckb_logger::info; use ckb_network::SupportProtocols; use ckb_types::{ @@ -265,7 +266,6 @@ impl Spec for TransactionRelayConflict { node0.wait_for_tx_pool(); node1.wait_for_tx_pool(); - /* let ret = node1 .rpc_client() .get_transaction_with_verbosity(tx1.hash(), 1); @@ -313,6 +313,5 @@ impl Spec for TransactionRelayConflict { .is_some() }); assert!(relayed, "Transaction should be relayed to node1"); - */ } } diff --git a/test/src/specs/tx_pool/collision.rs b/test/src/specs/tx_pool/collision.rs index bebe29495d..0d93697c03 100644 --- a/test/src/specs/tx_pool/collision.rs +++ b/test/src/specs/tx_pool/collision.rs @@ -1,4 +1,6 @@ -use crate::util::check::{is_transaction_committed, is_transaction_pending}; +use crate::util::check::{ + is_transaction_committed, is_transaction_pending, is_transaction_rejected, +}; use crate::utils::{assert_send_transaction_fail, blank, commit, propose}; use crate::{Node, Spec}; use ckb_types::bytes::Bytes; @@ -173,6 +175,8 @@ impl Spec for RemoveConflictFromPending { node.wait_for_tx_pool(); assert!(is_transaction_committed(node, &txa)); + assert!(is_transaction_rejected(node, &txb)); + assert!(is_transaction_rejected(node, &txc)); } } diff --git a/test/src/specs/tx_pool/different_txs_with_same_input.rs b/test/src/specs/tx_pool/different_txs_with_same_input.rs index db86a2de71..7b50dbdbef 100644 --- a/test/src/specs/tx_pool/different_txs_with_same_input.rs +++ b/test/src/specs/tx_pool/different_txs_with_same_input.rs @@ -84,8 +84,4 @@ impl Spec for DifferentTxsWithSameInputWithOutRBF { // assert!(ret.transaction.is_none()); // assert!(matches!(ret.tx_status.status, Status::Rejected)); } - - fn modify_app_config(&self, config: &mut ckb_app_config::CKBAppConfig) { - config.tx_pool.enable_rbf = false; - } } diff --git a/test/src/specs/tx_pool/pool_reconcile.rs b/test/src/specs/tx_pool/pool_reconcile.rs index 280c506cc8..c84b32fbc2 100644 --- a/test/src/specs/tx_pool/pool_reconcile.rs +++ b/test/src/specs/tx_pool/pool_reconcile.rs @@ -5,7 +5,7 @@ use crate::util::mining::out_ibd_mode; use crate::{Node, Spec}; use ckb_jsonrpc_types::ProposalShortId; use ckb_logger::info; -use ckb_types::core::{capacity_bytes, Capacity, FeeRate}; +use ckb_types::core::{capacity_bytes, Capacity}; use ckb_types::packed::CellOutputBuilder; use ckb_types::{ packed::{self, CellInput, OutPoint}, @@ -172,8 +172,4 @@ impl Spec for PoolResolveConflictAfterReorg { let err_msg = ret.err().unwrap().to_string(); assert!(err_msg.contains("Resolve failed Dead")); } - - fn modify_app_config(&self, config: &mut ckb_app_config::CKBAppConfig) { - config.tx_pool.min_fee_rate = FeeRate::from_u64(0); - } } diff --git a/test/src/specs/tx_pool/replace.rs b/test/src/specs/tx_pool/replace.rs index 35af8a8a98..b1e1fe6322 100644 --- a/test/src/specs/tx_pool/replace.rs +++ b/test/src/specs/tx_pool/replace.rs @@ -82,7 +82,7 @@ impl Spec for RbfBasic { } fn modify_app_config(&self, config: &mut ckb_app_config::CKBAppConfig) { - config.tx_pool.enable_rbf = true; + config.tx_pool.min_rbf_rate = ckb_types::core::FeeRate(1500); } } @@ -108,7 +108,7 @@ impl Spec for RbfSameInput { } fn modify_app_config(&self, config: &mut ckb_app_config::CKBAppConfig) { - config.tx_pool.enable_rbf = true; + config.tx_pool.min_rbf_rate = ckb_types::core::FeeRate(1500); } } @@ -147,15 +147,13 @@ impl Spec for RbfSameInputwithLessFee { .rpc_client() .send_transaction_result(tx2.data().into()); assert!(res.is_err(), "tx2 should be rejected"); - assert!(res - .err() - .unwrap() - .to_string() - .contains("Tx fee lower than old conflict Tx fee")); + let message = res.err().unwrap().to_string(); + eprintln!("res: {:?}", message); + assert!(message.contains("Tx's current fee is 1000000000, expect it to be larger than")); } fn modify_app_config(&self, config: &mut ckb_app_config::CKBAppConfig) { - config.tx_pool.enable_rbf = true; + config.tx_pool.min_rbf_rate = ckb_types::core::FeeRate(1500); } } @@ -215,7 +213,7 @@ impl Spec for RbfTooManyDescendants { } fn modify_app_config(&self, config: &mut ckb_app_config::CKBAppConfig) { - config.tx_pool.enable_rbf = true; + config.tx_pool.min_rbf_rate = ckb_types::core::FeeRate(1500); } } @@ -287,7 +285,7 @@ impl Spec for RbfContainNewTx { } fn modify_app_config(&self, config: &mut ckb_app_config::CKBAppConfig) { - config.tx_pool.enable_rbf = true; + config.tx_pool.min_rbf_rate = ckb_types::core::FeeRate(1500); } } @@ -359,6 +357,6 @@ impl Spec for RbfContainInvalidInput { } fn modify_app_config(&self, config: &mut ckb_app_config::CKBAppConfig) { - config.tx_pool.enable_rbf = true; + config.tx_pool.min_rbf_rate = ckb_types::core::FeeRate(1500); } } diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 6cbe18fe51..0792f7fcb0 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -85,6 +85,11 @@ impl TxPool { self.total_tx_cycles += cycles; } + /// Check whether tx-pool enable RBF + pub fn enable_rbf(&self) -> bool { + self.config.min_rbf_rate > self.config.min_fee_rate + } + /// Update size and cycles statics for remove tx /// cycles overflow is possible, currently obtaining cycles is not accurate pub fn update_statics_for_remove_tx(&mut self, tx_size: usize, cycles: Cycle) { @@ -485,7 +490,7 @@ impl TxPool { fee: Capacity, tx_size: usize, ) -> Result<(), Reject> { - assert!(self.config.enable_rbf); + assert!(self.enable_rbf()); assert!(!conflicts.is_empty()); let short_id = rtx.transaction.proposal_short_id(); @@ -515,24 +520,22 @@ impl TxPool { } // Rule #4, new tx' fee need to higher than min_rbf_fee computed from the tx_pool configuration + // Rule #3, new tx's fee need to higher than conflicts, here we only check the root tx let min_rbf_fee = self.config.min_rbf_rate.fee(tx_size as u64); - if fee <= min_rbf_fee { + let max_fee = conflicts + .iter() + .map(|c| c.fee) + .max() + .unwrap_or(min_rbf_fee) + .max(min_rbf_fee); + + if fee <= max_fee { return Err(Reject::RBFRejected(format!( - "Tx fee lower than min_rbf_fee, min_rbf_fee: {}, tx fee: {}", - min_rbf_fee, fee, + "Tx's current fee is {}, expect it to be larger than: {} to replace old txs", + fee, max_fee, ))); } - // Rule #3, new tx's fee need to higher than conflicts, here we only check the root tx - for conflict in conflicts.iter() { - if conflict.fee >= fee { - return Err(Reject::RBFRejected(format!( - "Tx fee lower than old conflict Tx fee, tx fee: {}, conflict fee: {}", - fee, conflict.fee, - ))); - } - } - // Rule #5, the replaced tx's descendants can not more than 100 // and the ancestor of the new tx don't have common set with the replaced tx's descendants let mut replace_count: usize = 0; diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index eecc12d7b0..e6747b32cb 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -230,9 +230,12 @@ impl TxPoolService { Ok((tip_hash, rtx, status, fee, tx_size, HashSet::new())) } Err(err) => { - if tx_pool.config.enable_rbf && matches!(err, Reject::Resolve(_)) { + if tx_pool.enable_rbf() && matches!(err, Reject::Resolve(_)) { // Try RBF check let conflicts = tx_pool.pool_map.find_conflict_tx(tx); + if conflicts.is_empty() { + return Err(err); + } let (rtx, status) = resolve_tx(tx_pool, &snapshot, tx.clone(), true)?; let fee = check_tx_fee(tx_pool, &snapshot, &rtx, tx_size)?; tx_pool.check_rbf(&snapshot, &rtx, &conflicts, fee, tx_size)?; diff --git a/util/app-config/src/configs/tx_pool.rs b/util/app-config/src/configs/tx_pool.rs index eb514c3d8a..a24c7938de 100644 --- a/util/app-config/src/configs/tx_pool.rs +++ b/util/app-config/src/configs/tx_pool.rs @@ -14,7 +14,7 @@ pub struct TxPoolConfig { /// txs with lower fee rate than this will not be relayed or be mined #[serde(with = "FeeRateDef")] pub min_fee_rate: FeeRate, - /// txs need to pay more than this for RBF + /// txs need to pay larger fee rate than this for RBF #[serde(with = "FeeRateDef")] pub min_rbf_rate: FeeRate, /// tx pool rejects txs that cycles greater than max_tx_verify_cycles @@ -37,8 +37,6 @@ pub struct TxPoolConfig { pub recent_reject: PathBuf, /// The expiration time for pool transactions in hours pub expiry_hours: u8, - /// Enable RBF - pub enable_rbf: bool, } /// Block assembler config options. diff --git a/util/app-config/src/legacy/tx_pool.rs b/util/app-config/src/legacy/tx_pool.rs index 1927bbda7e..562f3ac6a6 100644 --- a/util/app-config/src/legacy/tx_pool.rs +++ b/util/app-config/src/legacy/tx_pool.rs @@ -35,7 +35,7 @@ pub(crate) struct TxPoolConfig { keep_rejected_tx_hashes_count: u64, #[serde(with = "FeeRateDef")] min_fee_rate: FeeRate, - #[serde(with = "FeeRateDef")] + #[serde(with = "FeeRateDef", default = "default_min_rbf_rate")] min_rbf_rate: FeeRate, max_tx_verify_cycles: Cycle, max_ancestors_count: usize, @@ -45,8 +45,6 @@ pub(crate) struct TxPoolConfig { recent_reject: PathBuf, #[serde(default = "default_expiry_hours")] expiry_hours: u8, - #[serde(default)] - enable_rbf: bool, } fn default_keep_rejected_tx_hashes_days() -> u8 { @@ -65,6 +63,10 @@ fn default_max_tx_pool_size() -> usize { DEFAULT_MAX_TX_POOL_SIZE } +fn default_min_rbf_rate() -> FeeRate { + DEFAULT_MIN_RBF_RATE +} + impl Default for crate::TxPoolConfig { fn default() -> Self { TxPoolConfig::default().into() @@ -89,7 +91,6 @@ impl Default for TxPoolConfig { persisted_data: Default::default(), recent_reject: Default::default(), expiry_hours: DEFAULT_EXPIRY_HOURS, - enable_rbf: false, } } } @@ -112,7 +113,6 @@ impl From for crate::TxPoolConfig { persisted_data, recent_reject, expiry_hours, - enable_rbf, } = input; Self { @@ -126,7 +126,6 @@ impl From for crate::TxPoolConfig { persisted_data, recent_reject, expiry_hours, - enable_rbf, } } } diff --git a/util/jsonrpc-types/src/pool.rs b/util/jsonrpc-types/src/pool.rs index 3e773c1ca2..a7c7bf4d6b 100644 --- a/util/jsonrpc-types/src/pool.rs +++ b/util/jsonrpc-types/src/pool.rs @@ -40,7 +40,7 @@ pub struct TxPoolInfo { /// /// The unit is Shannons per 1000 bytes transaction serialization size in the block. pub min_fee_rate: Uint64, - /// RBF rate threshold. The pool reject to resort for transactions which fee rate is below this threshold. + /// RBF rate threshold. The pool reject to replace for transactions which fee rate is below this threshold. /// /// The unit is Shannons per 1000 bytes transaction serialization size in the block. pub min_rbf_rate: Uint64, From f731e81e4a4994b77ad13c478f2b165397c601f9 Mon Sep 17 00:00:00 2001 From: yukang Date: Tue, 25 Jul 2023 15:09:36 +0800 Subject: [PATCH 119/267] add Rule 7 for RBF, any old Tx should be in Pending status --- test/src/main.rs | 1 + test/src/specs/tx_pool/replace.rs | 74 ++++++++++++++++++++++++++++++- tx-pool/src/pool.rs | 19 ++++++-- 3 files changed, 89 insertions(+), 5 deletions(-) diff --git a/test/src/main.rs b/test/src/main.rs index 6757d18307..4e9d0fbc5c 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -465,6 +465,7 @@ fn all_specs() -> Vec> { Box::new(RbfTooManyDescendants), Box::new(RbfContainNewTx), Box::new(RbfContainInvalidInput), + Box::new(RbfRejectReplaceProposed), Box::new(CompactBlockEmpty), Box::new(CompactBlockEmptyParentUnknown), Box::new(CompactBlockPrefilled), diff --git a/test/src/specs/tx_pool/replace.rs b/test/src/specs/tx_pool/replace.rs index b1e1fe6322..abbfca053c 100644 --- a/test/src/specs/tx_pool/replace.rs +++ b/test/src/specs/tx_pool/replace.rs @@ -148,7 +148,6 @@ impl Spec for RbfSameInputwithLessFee { .send_transaction_result(tx2.data().into()); assert!(res.is_err(), "tx2 should be rejected"); let message = res.err().unwrap().to_string(); - eprintln!("res: {:?}", message); assert!(message.contains("Tx's current fee is 1000000000, expect it to be larger than")); } @@ -360,3 +359,76 @@ impl Spec for RbfContainInvalidInput { config.tx_pool.min_rbf_rate = ckb_types::core::FeeRate(1500); } } + +pub struct RbfRejectReplaceProposed; + +// RBF Rule #6 +impl Spec for RbfRejectReplaceProposed { + fn run(&self, nodes: &mut Vec) { + let node0 = &nodes[0]; + + node0.mine_until_out_bootstrap_period(); + + // build txs chain + let tx0 = node0.new_transaction_spend_tip_cellbase(); + let mut txs = vec![tx0]; + let max_count = 5; + while txs.len() <= max_count { + let parent = txs.last().unwrap(); + let child = parent + .as_advanced_builder() + .set_inputs(vec![{ + CellInput::new_builder() + .previous_output(OutPoint::new(parent.hash(), 0)) + .build() + }]) + .set_outputs(vec![parent.output(0).unwrap()]) + .build(); + txs.push(child); + } + assert_eq!(txs.len(), max_count + 1); + // send Tx chain + for tx in txs[..=max_count - 1].iter() { + let ret = node0.rpc_client().send_transaction_result(tx.data().into()); + assert!(ret.is_ok()); + } + + node0.mine_with_blocking(|template| template.proposals.len() != max_count); + let ret = node0.rpc_client().get_transaction(txs[2].hash()); + assert!( + matches!(ret.tx_status.status, Status::Pending), + "tx1 should be pending" + ); + node0.mine(1); + let ret = node0.rpc_client().get_transaction(txs[2].hash()); + assert!( + matches!(ret.tx_status.status, Status::Proposed), + "tx1 should be proposed" + ); + + let clone_tx = txs[2].clone(); + // Set tx2 fee to a higher value + let output2 = CellOutputBuilder::default() + .capacity(capacity_bytes!(70).pack()) + .build(); + + let tx2 = clone_tx + .as_advanced_builder() + .set_outputs(vec![output2]) + .build(); + + let res = node0 + .rpc_client() + .send_transaction_result(tx2.data().into()); + assert!(res.is_err(), "tx2 should be rejected"); + assert!(res + .err() + .unwrap() + .to_string() + .contains("all conflict Txs should be in Pending status")); + } + + fn modify_app_config(&self, config: &mut ckb_app_config::CKBAppConfig) { + config.tx_pool.min_rbf_rate = ckb_types::core::FeeRate(1500); + } +} diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 0792f7fcb0..a0b7fb3ddf 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -494,16 +494,27 @@ impl TxPool { assert!(!conflicts.is_empty()); let short_id = rtx.transaction.proposal_short_id(); - let conflicts = conflicts + let entries = conflicts .iter() .map(|id| { - &self - .get_pool_entry(id) + self.get_pool_entry(id) .expect("conflict Tx should be in pool") - .inner }) .collect::>(); + // Rule #6, any old Tx should be in `Pending` or `Gap` status + if entries + .iter() + .any(|e| ![Status::Pending, Status::Gap].contains(&e.status)) + { + // Here we only refer to `Pending` status, since `Gap` is an internal status + return Err(Reject::RBFRejected( + "all conflict Txs should be in Pending status".to_string(), + )); + } + + let conflicts = entries.iter().map(|e| e.inner.clone()).collect::>(); + // Rule #2, new tx don't contain any new unconfirmed inputs let mut inputs = HashSet::new(); for c in conflicts.iter() { From 0464eab5ff6224474f45298cc15a6283af6f5e0b Mon Sep 17 00:00:00 2001 From: yukang Date: Wed, 26 Jul 2023 10:50:41 +0800 Subject: [PATCH 120/267] merge score_key and evict_key to sort_key --- tx-pool/src/component/commit_txs_scanner.rs | 2 +- tx-pool/src/component/entry.rs | 57 +++++-------------- tx-pool/src/component/mod.rs | 2 +- tx-pool/src/component/pool_map.rs | 3 +- .../component/{score_key.rs => sort_key.rs} | 35 +++++++++++- tx-pool/src/component/tests/entry.rs | 2 +- tx-pool/src/component/tests/score_key.rs | 2 +- 7 files changed, 52 insertions(+), 51 deletions(-) rename tx-pool/src/component/{score_key.rs => sort_key.rs} (65%) diff --git a/tx-pool/src/component/commit_txs_scanner.rs b/tx-pool/src/component/commit_txs_scanner.rs index c2058fdba6..3546aa0b7f 100644 --- a/tx-pool/src/component/commit_txs_scanner.rs +++ b/tx-pool/src/component/commit_txs_scanner.rs @@ -1,5 +1,5 @@ use crate::component::pool_map::PoolMap; -use crate::component::{entry::TxEntry, score_key::AncestorsScoreSortKey}; +use crate::component::{entry::TxEntry, sort_key::AncestorsScoreSortKey}; use ckb_types::{core::Cycle, packed::ProposalShortId}; use ckb_util::LinkedHashMap; use std::collections::{BTreeSet, HashMap, HashSet}; diff --git a/tx-pool/src/component/entry.rs b/tx-pool/src/component/entry.rs index 2f8fdf95ef..09f9f3c988 100644 --- a/tx-pool/src/component/entry.rs +++ b/tx-pool/src/component/entry.rs @@ -1,4 +1,4 @@ -use crate::component::score_key::AncestorsScoreSortKey; +use crate::component::sort_key::{AncestorsScoreSortKey, EvictKey}; use ckb_systemtime::unix_time_as_millis; use ckb_types::{ core::{ @@ -194,21 +194,6 @@ impl TxEntry { } } -impl From<&TxEntry> for AncestorsScoreSortKey { - fn from(entry: &TxEntry) -> Self { - let weight = get_transaction_weight(entry.size, entry.cycles); - let ancestors_weight = get_transaction_weight(entry.ancestors_size, entry.ancestors_cycles); - AncestorsScoreSortKey { - fee: entry.fee, - weight, - id: entry.proposal_short_id(), - ancestors_fee: entry.ancestors_fee, - ancestors_weight, - //timestamp: entry.timestamp, - } - } -} - impl Hash for TxEntry { fn hash(&self, state: &mut H) { Hash::hash(self.transaction(), state); @@ -233,14 +218,18 @@ impl Ord for TxEntry { } } -/// First compare fee_rate, select the smallest fee_rate, -/// and then select the latest timestamp, for eviction, -/// the latest timestamp which also means that the fewer descendants may exist. -#[derive(Eq, PartialEq, Clone, Debug)] -pub struct EvictKey { - pub fee_rate: FeeRate, - pub timestamp: u64, - pub descendants_count: usize, +impl From<&TxEntry> for AncestorsScoreSortKey { + fn from(entry: &TxEntry) -> Self { + let weight = get_transaction_weight(entry.size, entry.cycles); + let ancestors_weight = get_transaction_weight(entry.ancestors_size, entry.ancestors_cycles); + AncestorsScoreSortKey { + fee: entry.fee, + weight, + id: entry.proposal_short_id(), + ancestors_fee: entry.ancestors_fee, + ancestors_weight, + } + } } impl From<&TxEntry> for EvictKey { @@ -258,23 +247,3 @@ impl From<&TxEntry> for EvictKey { } } } - -impl PartialOrd for EvictKey { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for EvictKey { - fn cmp(&self, other: &Self) -> Ordering { - if self.fee_rate == other.fee_rate { - if self.descendants_count == other.descendants_count { - self.timestamp.cmp(&other.timestamp) - } else { - self.descendants_count.cmp(&other.descendants_count) - } - } else { - self.fee_rate.cmp(&other.fee_rate) - } - } -} diff --git a/tx-pool/src/component/mod.rs b/tx-pool/src/component/mod.rs index 7f325424ba..e5b8ab3cfc 100644 --- a/tx-pool/src/component/mod.rs +++ b/tx-pool/src/component/mod.rs @@ -7,7 +7,7 @@ pub(crate) mod links; pub(crate) mod orphan; pub(crate) mod pool_map; pub(crate) mod recent_reject; -pub(crate) mod score_key; +pub(crate) mod sort_key; #[cfg(test)] mod tests; diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index 231b419bfa..b7ccec0034 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -2,9 +2,8 @@ extern crate rustc_hash; extern crate slab; use crate::component::edges::Edges; -use crate::component::entry::EvictKey; use crate::component::links::{Relation, TxLinksMap}; -use crate::component::score_key::AncestorsScoreSortKey; +use crate::component::sort_key::{AncestorsScoreSortKey, EvictKey}; use crate::error::Reject; use crate::TxEntry; use ckb_logger::{debug, trace}; diff --git a/tx-pool/src/component/score_key.rs b/tx-pool/src/component/sort_key.rs similarity index 65% rename from tx-pool/src/component/score_key.rs rename to tx-pool/src/component/sort_key.rs index 18dd48fcb2..50f2363aff 100644 --- a/tx-pool/src/component/score_key.rs +++ b/tx-pool/src/component/sort_key.rs @@ -1,4 +1,7 @@ -use ckb_types::{core::Capacity, packed::ProposalShortId}; +use ckb_types::{ + core::{Capacity, FeeRate}, + packed::ProposalShortId, +}; use std::cmp::Ordering; /// A struct to use as a sorted key @@ -51,3 +54,33 @@ impl Ord for AncestorsScoreSortKey { } } } + +/// First compare fee_rate, select the smallest fee_rate, +/// and then select the latest timestamp, for eviction, +/// the latest timestamp which also means that the fewer descendants may exist. +#[derive(Eq, PartialEq, Clone, Debug)] +pub struct EvictKey { + pub fee_rate: FeeRate, + pub timestamp: u64, + pub descendants_count: usize, +} + +impl PartialOrd for EvictKey { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for EvictKey { + fn cmp(&self, other: &Self) -> Ordering { + if self.fee_rate == other.fee_rate { + if self.descendants_count == other.descendants_count { + self.timestamp.cmp(&other.timestamp) + } else { + self.descendants_count.cmp(&other.descendants_count) + } + } else { + self.fee_rate.cmp(&other.fee_rate) + } + } +} diff --git a/tx-pool/src/component/tests/entry.rs b/tx-pool/src/component/tests/entry.rs index 8aa7edf3ff..3bd1ea8ebd 100644 --- a/tx-pool/src/component/tests/entry.rs +++ b/tx-pool/src/component/tests/entry.rs @@ -1,6 +1,6 @@ use ckb_types::core::{Capacity, FeeRate}; -use crate::component::entry::EvictKey; +use crate::component::sort_key::EvictKey; #[test] fn test_min_fee_and_weight_evict() { diff --git a/tx-pool/src/component/tests/score_key.rs b/tx-pool/src/component/tests/score_key.rs index 09475f3d19..94aa51cf72 100644 --- a/tx-pool/src/component/tests/score_key.rs +++ b/tx-pool/src/component/tests/score_key.rs @@ -6,7 +6,7 @@ use ckb_types::{ }; use std::mem::size_of; -use crate::component::{entry::TxEntry, pool_map::PoolMap, score_key::AncestorsScoreSortKey}; +use crate::component::{entry::TxEntry, pool_map::PoolMap, sort_key::AncestorsScoreSortKey}; const DEFAULT_MAX_ANCESTORS_COUNT: usize = 125; From 0e0b7aec46729905f502980871ccb4f1e15ff5bc Mon Sep 17 00:00:00 2001 From: yukang Date: Wed, 26 Jul 2023 11:52:22 +0800 Subject: [PATCH 121/267] uncomment test --- .../tx_pool/different_txs_with_same_input.rs | 30 +++++++++---------- test/src/specs/tx_pool/pool_reconcile.rs | 6 +++- tx-pool/src/pool.rs | 8 ----- tx-pool/src/process.rs | 6 +++- 4 files changed, 25 insertions(+), 25 deletions(-) diff --git a/test/src/specs/tx_pool/different_txs_with_same_input.rs b/test/src/specs/tx_pool/different_txs_with_same_input.rs index 7b50dbdbef..f590f7ae81 100644 --- a/test/src/specs/tx_pool/different_txs_with_same_input.rs +++ b/test/src/specs/tx_pool/different_txs_with_same_input.rs @@ -52,11 +52,11 @@ impl Spec for DifferentTxsWithSameInputWithOutRBF { assert!(!commit_txs_hash.contains(&tx2.hash())); // when tx1 was confirmed, tx2 should be rejected - // let ret = node0.rpc_client().get_transaction(tx2.hash()); - // assert!( - // matches!(ret.tx_status.status, Status::Rejected), - // "tx2 should be rejected" - // ); + let ret = node0.rpc_client().get_transaction(tx2.hash()); + assert!( + matches!(ret.tx_status.status, Status::Rejected), + "tx2 should be rejected" + ); // verbosity = 1 let ret = node0 @@ -65,11 +65,11 @@ impl Spec for DifferentTxsWithSameInputWithOutRBF { assert!(ret.transaction.is_none()); assert!(matches!(ret.tx_status.status, Status::Committed)); - // let ret = node0 - // .rpc_client() - // .get_transaction_with_verbosity(tx2.hash(), 1); - // assert!(ret.transaction.is_none()); - // assert!(matches!(ret.tx_status.status, Status::Rejected)); + let ret = node0 + .rpc_client() + .get_transaction_with_verbosity(tx2.hash(), 1); + assert!(ret.transaction.is_none()); + assert!(matches!(ret.tx_status.status, Status::Rejected)); // verbosity = 2 let ret = node0 @@ -78,10 +78,10 @@ impl Spec for DifferentTxsWithSameInputWithOutRBF { assert!(ret.transaction.is_some()); assert!(matches!(ret.tx_status.status, Status::Committed)); - // let ret = node0 - // .rpc_client() - // .get_transaction_with_verbosity(tx2.hash(), 2); - // assert!(ret.transaction.is_none()); - // assert!(matches!(ret.tx_status.status, Status::Rejected)); + let ret = node0 + .rpc_client() + .get_transaction_with_verbosity(tx2.hash(), 2); + assert!(ret.transaction.is_none()); + assert!(matches!(ret.tx_status.status, Status::Rejected)); } } diff --git a/test/src/specs/tx_pool/pool_reconcile.rs b/test/src/specs/tx_pool/pool_reconcile.rs index c84b32fbc2..280c506cc8 100644 --- a/test/src/specs/tx_pool/pool_reconcile.rs +++ b/test/src/specs/tx_pool/pool_reconcile.rs @@ -5,7 +5,7 @@ use crate::util::mining::out_ibd_mode; use crate::{Node, Spec}; use ckb_jsonrpc_types::ProposalShortId; use ckb_logger::info; -use ckb_types::core::{capacity_bytes, Capacity}; +use ckb_types::core::{capacity_bytes, Capacity, FeeRate}; use ckb_types::packed::CellOutputBuilder; use ckb_types::{ packed::{self, CellInput, OutPoint}, @@ -172,4 +172,8 @@ impl Spec for PoolResolveConflictAfterReorg { let err_msg = ret.err().unwrap().to_string(); assert!(err_msg.contains("Resolve failed Dead")); } + + fn modify_app_config(&self, config: &mut ckb_app_config::CKBAppConfig) { + config.tx_pool.min_fee_rate = FeeRate::from_u64(0); + } } diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index a0b7fb3ddf..40f62212aa 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -160,14 +160,6 @@ impl TxPool { .map(|entry| entry.inner.transaction()) } - pub(crate) fn put_recent_reject(&mut self, tx_hash: &Byte32, reject: &Reject) { - if let Some(ref mut recent_reject) = self.recent_reject { - if let Err(e) = recent_reject.put(tx_hash, reject.clone()) { - error!("record recent_reject failed {} {} {}", tx_hash, reject, e); - } - } - } - pub(crate) fn remove_committed_txs<'a>( &mut self, txs: impl Iterator, diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index e6747b32cb..1ec8d1922b 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -325,7 +325,11 @@ impl TxPoolService { pub(crate) async fn put_recent_reject(&self, tx_hash: &Byte32, reject: &Reject) { let mut tx_pool = self.tx_pool.write().await; - tx_pool.put_recent_reject(tx_hash, reject); + if let Some(ref mut recent_reject) = tx_pool.recent_reject { + if let Err(e) = recent_reject.put(tx_hash, reject.clone()) { + error!("record recent_reject failed {} {} {}", tx_hash, reject, e); + } + } } pub(crate) async fn remove_tx(&self, tx_hash: Byte32) -> bool { From 193ccb12d850803bcb968c95ce83c01480d673ae Mon Sep 17 00:00:00 2001 From: yukang Date: Wed, 26 Jul 2023 12:10:48 +0800 Subject: [PATCH 122/267] add comments for min_rbf_rate --- resource/ckb.toml | 1 + rpc/README.md | 2 +- util/jsonrpc-types/src/pool.rs | 1 + util/types/src/core/tx_pool.rs | 1 + 4 files changed, 4 insertions(+), 1 deletion(-) diff --git a/resource/ckb.toml b/resource/ckb.toml index c34450a3e5..9f99d4bb98 100644 --- a/resource/ckb.toml +++ b/resource/ckb.toml @@ -134,6 +134,7 @@ enable_deprecated_rpc = false # {{ [tx_pool] max_tx_pool_size = 180_000_000 # 180mb min_fee_rate = 1_000 # Here fee_rate are calculated directly using size in units of shannons/KB +# min_rbf_rate > min_fee_rate means RBF is enabled min_rbf_rate = 1_500 # Here fee_rate are calculated directly using size in units of shannons/KB max_tx_verify_cycles = 70_000_000 max_ancestors_count = 25 diff --git a/rpc/README.md b/rpc/README.md index 73109788be..f0af4bc71d 100644 --- a/rpc/README.md +++ b/rpc/README.md @@ -7039,7 +7039,7 @@ Transaction pool information. The unit is Shannons per 1000 bytes transaction serialization size in the block. -* `min_rbf_rate`: [`Uint64`](#type-uint64) - RBF rate threshold. The pool reject to replace for transactions which fee rate is below this threshold. +* `min_rbf_rate`: [`Uint64`](#type-uint64) - RBF rate threshold. The pool reject to replace for transactions which fee rate is below this threshold. if min_rbf_rate > min_fee_rate then RBF is enabled on the node. The unit is Shannons per 1000 bytes transaction serialization size in the block. diff --git a/util/jsonrpc-types/src/pool.rs b/util/jsonrpc-types/src/pool.rs index a7c7bf4d6b..e13918e857 100644 --- a/util/jsonrpc-types/src/pool.rs +++ b/util/jsonrpc-types/src/pool.rs @@ -41,6 +41,7 @@ pub struct TxPoolInfo { /// The unit is Shannons per 1000 bytes transaction serialization size in the block. pub min_fee_rate: Uint64, /// RBF rate threshold. The pool reject to replace for transactions which fee rate is below this threshold. + /// if min_rbf_rate > min_fee_rate then RBF is enabled on the node. /// /// The unit is Shannons per 1000 bytes transaction serialization size in the block. pub min_rbf_rate: Uint64, diff --git a/util/types/src/core/tx_pool.rs b/util/types/src/core/tx_pool.rs index 75eb6e2790..25dfd70301 100644 --- a/util/types/src/core/tx_pool.rs +++ b/util/types/src/core/tx_pool.rs @@ -321,6 +321,7 @@ pub struct TxPoolInfo { pub min_fee_rate: FeeRate, /// Min RBF rate threshold. The pool reject RBF transactions which fee rate is below this threshold. + /// if min_rbf_rate > min_fee_rate then RBF is enabled on the node. /// /// The unit is Shannons per 1000 bytes transaction serialization size in the block. pub min_rbf_rate: FeeRate, From e1d9125b0a4a502492ea990d2dde147289568af8 Mon Sep 17 00:00:00 2001 From: yukang Date: Wed, 26 Jul 2023 16:30:01 +0800 Subject: [PATCH 123/267] refactor RBF and add fee related info to get_transaction --- rpc/src/module/chain.rs | 3 +- tx-pool/src/pool.rs | 92 +++++++++++++++++++++++----------- tx-pool/src/service.rs | 13 ++--- util/types/src/core/tx_pool.rs | 30 +++++------ 4 files changed, 85 insertions(+), 53 deletions(-) diff --git a/rpc/src/module/chain.rs b/rpc/src/module/chain.rs index 08574e13bb..666f8e0c3a 100644 --- a/rpc/src/module/chain.rs +++ b/rpc/src/module/chain.rs @@ -2130,11 +2130,11 @@ impl ChainRpcImpl { .and_then(|v| v.get(tx_info.index.saturating_sub(1)).copied()) }) }; - return Ok(TransactionWithStatus::with_committed( None, tx_info.block_hash.unpack(), cycles, + None, )); } @@ -2181,6 +2181,7 @@ impl ChainRpcImpl { Some(tx), tx_info.block_hash.unpack(), cycles, + None, )); } diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 40f62212aa..ffcc65c3fd 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -90,6 +90,38 @@ impl TxPool { self.config.min_rbf_rate > self.config.min_fee_rate } + /// The least required fee rate to allow tx to be replaced + pub fn min_replace_fee(&self, tx: &TxEntry) -> Option { + if !self.enable_rbf() { + return None; + } + let conflicts = self.pool_map.find_conflict_tx(tx.transaction()); + self.calculate_min_replace_fee(&conflicts, tx.size) + } + + fn calculate_min_replace_fee( + &self, + conflicts: &HashSet, + size: usize, + ) -> Option { + let entries = conflicts + .iter() + .map(|id| { + self.get_pool_entry(id) + .expect("conflict Tx should be in pool") + }) + .collect::>(); + let min_rbf_fee = self.config.min_rbf_rate.fee(size as u64); + Some( + entries + .iter() + .map(|c| c.inner.fee) + .max() + .unwrap_or(min_rbf_fee) + .max(min_rbf_fee), + ) + } + /// Update size and cycles statics for remove tx /// cycles overflow is possible, currently obtaining cycles is not accurate pub fn update_statics_for_remove_tx(&mut self, tx_size: usize, cycles: Cycle) { @@ -486,7 +518,20 @@ impl TxPool { assert!(!conflicts.is_empty()); let short_id = rtx.transaction.proposal_short_id(); - let entries = conflicts + // Rule #4, new tx' fee need to higher than min_rbf_fee computed from the tx_pool configuration + // Rule #3, new tx's fee need to higher than conflicts, here we only check the root tx + if let Some(min_replace_fee) = self.calculate_min_replace_fee(conflicts, tx_size) { + if fee < min_replace_fee { + return Err(Reject::RBFRejected(format!( + "Tx's current fee is {}, expect it to be larger than: {} to replace old txs", + fee, min_replace_fee, + ))); + } + } else { + panic!("calculate_min_replace_fee must success"); + } + + let pool_entries = conflicts .iter() .map(|id| { self.get_pool_entry(id) @@ -494,18 +539,12 @@ impl TxPool { }) .collect::>(); - // Rule #6, any old Tx should be in `Pending` or `Gap` status - if entries - .iter() - .any(|e| ![Status::Pending, Status::Gap].contains(&e.status)) - { - // Here we only refer to `Pending` status, since `Gap` is an internal status - return Err(Reject::RBFRejected( - "all conflict Txs should be in Pending status".to_string(), - )); - } + let mut all_statuses = pool_entries.iter().map(|e| e.status).collect::>(); - let conflicts = entries.iter().map(|e| e.inner.clone()).collect::>(); + let conflicts = pool_entries + .iter() + .map(|e| e.inner.clone()) + .collect::>(); // Rule #2, new tx don't contain any new unconfirmed inputs let mut inputs = HashSet::new(); @@ -522,23 +561,6 @@ impl TxPool { )); } - // Rule #4, new tx' fee need to higher than min_rbf_fee computed from the tx_pool configuration - // Rule #3, new tx's fee need to higher than conflicts, here we only check the root tx - let min_rbf_fee = self.config.min_rbf_rate.fee(tx_size as u64); - let max_fee = conflicts - .iter() - .map(|c| c.fee) - .max() - .unwrap_or(min_rbf_fee) - .max(min_rbf_fee); - - if fee <= max_fee { - return Err(Reject::RBFRejected(format!( - "Tx's current fee is {}, expect it to be larger than: {} to replace old txs", - fee, max_fee, - ))); - } - // Rule #5, the replaced tx's descendants can not more than 100 // and the ancestor of the new tx don't have common set with the replaced tx's descendants let mut replace_count: usize = 0; @@ -562,6 +584,7 @@ impl TxPool { for id in descendants.iter() { if let Some(entry) = self.get_pool_entry(id) { + all_statuses.push(entry.status); let hash = entry.inner.transaction().hash(); if rtx .transaction @@ -577,6 +600,17 @@ impl TxPool { } } + // Rule #6, any old Tx should be in `Pending` or `Gap` status + if all_statuses + .iter() + .any(|s| ![Status::Pending, Status::Gap].contains(s)) + { + // Here we only refer to `Pending` status, since `Gap` is an internal status + return Err(Reject::RBFRejected( + "all conflict Txs should be in Pending status".to_string(), + )); + } + Ok(()) } diff --git a/tx-pool/src/service.rs b/tx-pool/src/service.rs index 6414f3161f..668a2bb9a9 100644 --- a/tx-pool/src/service.rs +++ b/tx-pool/src/service.rs @@ -779,15 +779,16 @@ async fn process(mut service: TxPoolService, message: Message) { .. }) = tx_pool.pool_map.get_by_id(&id) { - let trans_status = if status == &Status::Proposed { - TransactionWithStatus::with_proposed - } else { - TransactionWithStatus::with_pending - }; - Ok(trans_status( + Ok(TransactionWithStatus::with_status( Some(entry.transaction().clone()), entry.cycles, entry.timestamp, + if status == &Status::Proposed { + TxStatus::Proposed + } else { + TxStatus::Pending + }, + Some(entry.fee), )) } else if let Some(ref recent_reject_db) = tx_pool.recent_reject { match recent_reject_db.get(&hash) { diff --git a/util/types/src/core/tx_pool.rs b/util/types/src/core/tx_pool.rs index 25dfd70301..30b6b00d2b 100644 --- a/util/types/src/core/tx_pool.rs +++ b/util/types/src/core/tx_pool.rs @@ -169,33 +169,24 @@ pub struct TransactionWithStatus { pub tx_status: TxStatus, /// The transaction verification consumed cycles pub cycles: Option, + /// The transaction fee of the transaction + pub fee: Option, /// If the transaction is in tx-pool, `time_added_to_pool` represent when it enter the tx-pool. unit: Millisecond pub time_added_to_pool: Option, } impl TransactionWithStatus { - /// Build with pending status - pub fn with_pending( + /// Build with tx status + pub fn with_status( tx: Option, cycles: core::Cycle, time_added_to_pool: u64, + tx_status: TxStatus, + fee: Option, ) -> Self { Self { - tx_status: TxStatus::Pending, - transaction: tx, - cycles: Some(cycles), - time_added_to_pool: Some(time_added_to_pool), - } - } - - /// Build with proposed status - pub fn with_proposed( - tx: Option, - cycles: core::Cycle, - time_added_to_pool: u64, - ) -> Self { - Self { - tx_status: TxStatus::Proposed, + tx_status, + fee, transaction: tx, cycles: Some(cycles), time_added_to_pool: Some(time_added_to_pool), @@ -207,11 +198,13 @@ impl TransactionWithStatus { tx: Option, hash: H256, cycles: Option, + fee: Option, ) -> Self { Self { tx_status: TxStatus::Committed(hash), transaction: tx, cycles, + fee, time_added_to_pool: None, } } @@ -222,6 +215,7 @@ impl TransactionWithStatus { tx_status: TxStatus::Rejected(reason), transaction: None, cycles: None, + fee: None, time_added_to_pool: None, } } @@ -232,6 +226,7 @@ impl TransactionWithStatus { tx_status: TxStatus::Unknown, transaction: None, cycles: None, + fee: None, time_added_to_pool: None, } } @@ -242,6 +237,7 @@ impl TransactionWithStatus { tx_status, transaction: None, cycles, + fee: None, time_added_to_pool: None, } } From d3a61b4cff8a648b20bd414ce61c5823cb3d77b6 Mon Sep 17 00:00:00 2001 From: yukang Date: Wed, 26 Jul 2023 16:47:38 +0800 Subject: [PATCH 124/267] add min_replace_fee to get_transaction --- rpc/README.md | 4 ++++ tx-pool/src/service.rs | 12 +++++++----- util/jsonrpc-types/src/blockchain.rs | 8 ++++++++ util/types/src/core/tx_pool.rs | 8 ++++++++ 4 files changed, 27 insertions(+), 5 deletions(-) diff --git a/rpc/README.md b/rpc/README.md index f0af4bc71d..48bd5cd76d 100644 --- a/rpc/README.md +++ b/rpc/README.md @@ -6955,6 +6955,10 @@ The JSON view of a transaction as well as its status. * `tx_status`: [`TxStatus`](#type-txstatus) - The Transaction status. +* `fee`: [`Capacity`](#type-capacity) `|` `null` - The transaction fee of the transaction + +* `min_replace_fee`: [`Capacity`](#type-capacity) `|` `null` - The minimal fee required to replace this transaction + ### Type `TxPoolEntries` diff --git a/tx-pool/src/service.rs b/tx-pool/src/service.rs index 668a2bb9a9..c02d68ab57 100644 --- a/tx-pool/src/service.rs +++ b/tx-pool/src/service.rs @@ -779,16 +779,18 @@ async fn process(mut service: TxPoolService, message: Message) { .. }) = tx_pool.pool_map.get_by_id(&id) { + let tx_status = if status == &Status::Proposed { + TxStatus::Proposed + } else { + TxStatus::Pending + }; Ok(TransactionWithStatus::with_status( Some(entry.transaction().clone()), entry.cycles, entry.timestamp, - if status == &Status::Proposed { - TxStatus::Proposed - } else { - TxStatus::Pending - }, + tx_status, Some(entry.fee), + tx_pool.min_replace_fee(entry), )) } else if let Some(ref recent_reject_db) = tx_pool.recent_reject { match recent_reject_db.get(&hash) { diff --git a/util/jsonrpc-types/src/blockchain.rs b/util/jsonrpc-types/src/blockchain.rs index aaa20ffccf..8d2824b445 100644 --- a/util/jsonrpc-types/src/blockchain.rs +++ b/util/jsonrpc-types/src/blockchain.rs @@ -534,6 +534,10 @@ pub struct TransactionWithStatusResponse { pub time_added_to_pool: Option, /// The Transaction status. pub tx_status: TxStatus, + /// The transaction fee of the transaction + pub fee: Option, + /// The minimal fee required to replace this transaction + pub min_replace_fee: Option, } impl TransactionWithStatusResponse { @@ -548,6 +552,8 @@ impl TransactionWithStatusResponse { tx_status: t.tx_status.into(), cycles: t.cycles.map(Into::into), time_added_to_pool: t.time_added_to_pool.map(Into::into), + fee: t.fee.map(Into::into), + min_replace_fee: t.min_replace_fee.map(Into::into), }, ResponseFormatInnerType::Json => TransactionWithStatusResponse { transaction: t @@ -556,6 +562,8 @@ impl TransactionWithStatusResponse { tx_status: t.tx_status.into(), cycles: t.cycles.map(Into::into), time_added_to_pool: t.time_added_to_pool.map(Into::into), + fee: t.fee.map(Into::into), + min_replace_fee: t.min_replace_fee.map(Into::into), }, } } diff --git a/util/types/src/core/tx_pool.rs b/util/types/src/core/tx_pool.rs index 30b6b00d2b..53baefbf72 100644 --- a/util/types/src/core/tx_pool.rs +++ b/util/types/src/core/tx_pool.rs @@ -171,6 +171,8 @@ pub struct TransactionWithStatus { pub cycles: Option, /// The transaction fee of the transaction pub fee: Option, + /// The minimal fee required to replace this transaction + pub min_replace_fee: Option, /// If the transaction is in tx-pool, `time_added_to_pool` represent when it enter the tx-pool. unit: Millisecond pub time_added_to_pool: Option, } @@ -183,10 +185,12 @@ impl TransactionWithStatus { time_added_to_pool: u64, tx_status: TxStatus, fee: Option, + min_replace_fee: Option, ) -> Self { Self { tx_status, fee, + min_replace_fee, transaction: tx, cycles: Some(cycles), time_added_to_pool: Some(time_added_to_pool), @@ -205,6 +209,7 @@ impl TransactionWithStatus { transaction: tx, cycles, fee, + min_replace_fee: None, time_added_to_pool: None, } } @@ -216,6 +221,7 @@ impl TransactionWithStatus { transaction: None, cycles: None, fee: None, + min_replace_fee: None, time_added_to_pool: None, } } @@ -227,6 +233,7 @@ impl TransactionWithStatus { transaction: None, cycles: None, fee: None, + min_replace_fee: None, time_added_to_pool: None, } } @@ -238,6 +245,7 @@ impl TransactionWithStatus { transaction: None, cycles, fee: None, + min_replace_fee: None, time_added_to_pool: None, } } From 339ebab20b4d7c5abb4dd3bff5becd54fd3a5d52 Mon Sep 17 00:00:00 2001 From: yukang Date: Wed, 26 Jul 2023 18:42:15 +0800 Subject: [PATCH 125/267] fix calculate_min_replace_fee, using sum of fee from replaced Tx --- rpc/README.md | 2 ++ rpc/src/module/chain.rs | 2 ++ test/src/specs/tx_pool/replace.rs | 6 +++-- tx-pool/src/pool.rs | 37 +++++++++++++++++++++++++------ 4 files changed, 38 insertions(+), 9 deletions(-) diff --git a/rpc/README.md b/rpc/README.md index 48bd5cd76d..57edcf01a3 100644 --- a/rpc/README.md +++ b/rpc/README.md @@ -882,6 +882,8 @@ Response }, "cycles": "0x219", "time_added_to_pool" : "0x187b3d137a1", + "fee": "0x16923f7dcf", + "min_replace_fee": "0x16923f7f6a", "tx_status": { "block_hash": null, "status": "pending", diff --git a/rpc/src/module/chain.rs b/rpc/src/module/chain.rs index 666f8e0c3a..36e0f4238f 100644 --- a/rpc/src/module/chain.rs +++ b/rpc/src/module/chain.rs @@ -617,6 +617,8 @@ pub trait ChainRpc { /// }, /// "cycles": "0x219", /// "time_added_to_pool" : "0x187b3d137a1", + /// "fee": "0x16923f7dcf", + /// "min_replace_fee": "0x16923f7f6a", /// "tx_status": { /// "block_hash": null, /// "status": "pending", diff --git a/test/src/specs/tx_pool/replace.rs b/test/src/specs/tx_pool/replace.rs index abbfca053c..937f63d32c 100644 --- a/test/src/specs/tx_pool/replace.rs +++ b/test/src/specs/tx_pool/replace.rs @@ -114,7 +114,7 @@ impl Spec for RbfSameInput { pub struct RbfSameInputwithLessFee; -// RBF Rule #3 +// RBF Rule #3, #4 impl Spec for RbfSameInputwithLessFee { fn run(&self, nodes: &mut Vec) { let node0 = &nodes[0]; @@ -148,7 +148,9 @@ impl Spec for RbfSameInputwithLessFee { .send_transaction_result(tx2.data().into()); assert!(res.is_err(), "tx2 should be rejected"); let message = res.err().unwrap().to_string(); - assert!(message.contains("Tx's current fee is 1000000000, expect it to be larger than")); + assert!(message.contains( + "Tx's current fee is 1000000000, expect it to >= 2000000363 to replace old txs" + )); } fn modify_app_config(&self, config: &mut ckb_app_config::CKBAppConfig) { diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index ffcc65c3fd..4fb5a96dcf 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -96,6 +96,8 @@ impl TxPool { return None; } let conflicts = self.pool_map.find_conflict_tx(tx.transaction()); + // we don't allow conflicted Tx in pool now + assert!(conflicts.len() == 1); self.calculate_min_replace_fee(&conflicts, tx.size) } @@ -111,15 +113,36 @@ impl TxPool { .expect("conflict Tx should be in pool") }) .collect::>(); - let min_rbf_fee = self.config.min_rbf_rate.fee(size as u64); - Some( + + for x in entries.iter() { + eprintln!( + "old tx: {:?} fee: {:?}", + x.inner.transaction().hash(), + x.inner.fee + ); + } + + let extra_rbf_fee = self.config.min_rbf_rate.fee(size as u64); + let replaced_sum_fee: Capacity = entries .iter() .map(|c| c.inner.fee) - .max() - .unwrap_or(min_rbf_fee) - .max(min_rbf_fee), - ) + .fold(Capacity::zero(), |acc, x| { + acc.safe_add(x).unwrap_or_else(|_| { + error!("replaced_sum_fee {} overflow by add {}", acc, x); + Capacity::zero() + }) + }); + let res = replaced_sum_fee.safe_add(extra_rbf_fee); + if let Ok(res) = res { + Some(res) + } else { + error!( + "replaced_sum_fee {} overflow by add {}", + replaced_sum_fee, extra_rbf_fee + ); + None + } } /// Update size and cycles statics for remove tx @@ -523,7 +546,7 @@ impl TxPool { if let Some(min_replace_fee) = self.calculate_min_replace_fee(conflicts, tx_size) { if fee < min_replace_fee { return Err(Reject::RBFRejected(format!( - "Tx's current fee is {}, expect it to be larger than: {} to replace old txs", + "Tx's current fee is {}, expect it to >= {} to replace old txs", fee, min_replace_fee, ))); } From 9d364c67e875f1a239f32f0511e64cbb9e7d531d Mon Sep 17 00:00:00 2001 From: yukang Date: Thu, 27 Jul 2023 01:45:33 +0800 Subject: [PATCH 126/267] refactor TxModifiedEntries with MultiIndexMap --- tx-pool/src/component/commit_txs_scanner.rs | 49 +++++++++++---------- 1 file changed, 25 insertions(+), 24 deletions(-) diff --git a/tx-pool/src/component/commit_txs_scanner.rs b/tx-pool/src/component/commit_txs_scanner.rs index 3546aa0b7f..ecb5619e6d 100644 --- a/tx-pool/src/component/commit_txs_scanner.rs +++ b/tx-pool/src/component/commit_txs_scanner.rs @@ -1,44 +1,45 @@ +extern crate slab; use crate::component::pool_map::PoolMap; use crate::component::{entry::TxEntry, sort_key::AncestorsScoreSortKey}; +use ckb_multi_index_map::MultiIndexMap; use ckb_types::{core::Cycle, packed::ProposalShortId}; use ckb_util::LinkedHashMap; -use std::collections::{BTreeSet, HashMap, HashSet}; +use std::collections::HashSet; // A template data struct used to store modified entries when package txs -#[derive(Default)] -pub struct TxModifiedEntries { - entries: HashMap, - sorted_index: BTreeSet, +#[derive(MultiIndexMap, Clone)] +pub struct ModifiedTx { + #[multi_index(hashed_unique)] + pub id: ProposalShortId, + #[multi_index(ordered_non_unique)] + pub score: AncestorsScoreSortKey, + pub inner: TxEntry, } -impl TxModifiedEntries { +impl MultiIndexModifiedTxMap { pub fn next_best_entry(&self) -> Option<&TxEntry> { - self.sorted_index - .iter() - .max() - .map(|key| self.entries.get(&key.id).expect("consistent")) + self.iter_by_score().last().map(|x| &x.inner) } pub fn get(&self, id: &ProposalShortId) -> Option<&TxEntry> { - self.entries.get(id) + self.get_by_id(id).map(|x| &x.inner) } pub fn contains_key(&self, id: &ProposalShortId) -> bool { - self.entries.contains_key(id) + self.get_by_id(id).is_some() } - pub fn insert(&mut self, entry: TxEntry) { - let key = AncestorsScoreSortKey::from(&entry); - let short_id = entry.proposal_short_id(); - self.entries.insert(short_id, entry); - self.sorted_index.insert(key); + pub fn insert_entry(&mut self, entry: TxEntry) { + let score = AncestorsScoreSortKey::from(&entry); + self.insert(ModifiedTx { + id: entry.proposal_short_id(), + score, + inner: entry, + }); } pub fn remove(&mut self, id: &ProposalShortId) -> Option { - self.entries.remove(id).map(|entry| { - self.sorted_index.remove(&(&entry).into()); - entry - }) + self.remove_by_id(id).map(|x| x.inner) } } @@ -53,7 +54,7 @@ pub struct CommitTxsScanner<'a> { entries: Vec, // modified_entries will store sorted packages after they are modified // because some of their txs are already in the block - modified_entries: TxModifiedEntries, + modified_entries: MultiIndexModifiedTxMap, // txs that packaged in block fetched_txs: HashSet, // Keep track of entries that failed inclusion, to avoid duplicate work @@ -65,7 +66,7 @@ impl<'a> CommitTxsScanner<'a> { CommitTxsScanner { entries: Vec::new(), pool_map, - modified_entries: TxModifiedEntries::default(), + modified_entries: MultiIndexModifiedTxMap::default(), fetched_txs: HashSet::default(), failed_txs: HashSet::default(), } @@ -210,7 +211,7 @@ impl<'a> CommitTxsScanner<'a> { .or_else(|| self.pool_map.get(desc_id).cloned()) { desc.sub_ancestor_weight(entry); - self.modified_entries.insert(desc); + self.modified_entries.insert_entry(desc); } } } From 389c30af846034629d87e97a9bc5c08b4e69bd1f Mon Sep 17 00:00:00 2001 From: yukang Date: Thu, 27 Jul 2023 02:08:42 +0800 Subject: [PATCH 127/267] remove id from AncestorsScoreSortKey for perf --- tx-pool/src/component/entry.rs | 1 - tx-pool/src/component/sort_key.rs | 12 +---- tx-pool/src/component/tests/score_key.rs | 64 +++++++++++++----------- 3 files changed, 36 insertions(+), 41 deletions(-) diff --git a/tx-pool/src/component/entry.rs b/tx-pool/src/component/entry.rs index 09f9f3c988..f45d4feace 100644 --- a/tx-pool/src/component/entry.rs +++ b/tx-pool/src/component/entry.rs @@ -225,7 +225,6 @@ impl From<&TxEntry> for AncestorsScoreSortKey { AncestorsScoreSortKey { fee: entry.fee, weight, - id: entry.proposal_short_id(), ancestors_fee: entry.ancestors_fee, ancestors_weight, } diff --git a/tx-pool/src/component/sort_key.rs b/tx-pool/src/component/sort_key.rs index 50f2363aff..ceeab649bc 100644 --- a/tx-pool/src/component/sort_key.rs +++ b/tx-pool/src/component/sort_key.rs @@ -1,7 +1,4 @@ -use ckb_types::{ - core::{Capacity, FeeRate}, - packed::ProposalShortId, -}; +use ckb_types::core::{Capacity, FeeRate}; use std::cmp::Ordering; /// A struct to use as a sorted key @@ -9,7 +6,6 @@ use std::cmp::Ordering; pub struct AncestorsScoreSortKey { pub fee: Capacity, pub weight: u64, - pub id: ProposalShortId, pub ancestors_fee: Capacity, pub ancestors_weight: u64, } @@ -44,11 +40,7 @@ impl Ord for AncestorsScoreSortKey { let other_weight = u128::from(other_fee.as_u64()) * u128::from(weight); if self_weight == other_weight { // if fee rate weight is same, then compare with ancestor weight - if self.ancestors_weight == other.ancestors_weight { - self.id.raw_data().cmp(&other.id.raw_data()) - } else { - self.ancestors_weight.cmp(&other.ancestors_weight) - } + self.ancestors_weight.cmp(&other.ancestors_weight) } else { self_weight.cmp(&other_weight) } diff --git a/tx-pool/src/component/tests/score_key.rs b/tx-pool/src/component/tests/score_key.rs index 94aa51cf72..c12cc7426c 100644 --- a/tx-pool/src/component/tests/score_key.rs +++ b/tx-pool/src/component/tests/score_key.rs @@ -1,10 +1,9 @@ use ckb_types::{ bytes::Bytes, core::{Capacity, TransactionBuilder}, - packed::{CellInput, OutPoint, ProposalShortId}, + packed::{CellInput, OutPoint}, prelude::*, }; -use std::mem::size_of; use crate::component::{entry::TxEntry, pool_map::PoolMap, sort_key::AncestorsScoreSortKey}; @@ -27,7 +26,6 @@ fn test_min_fee_and_weight() { let key = AncestorsScoreSortKey { fee: Capacity::shannons(fee), weight, - id: ProposalShortId::new([0u8; 10]), ancestors_fee: Capacity::shannons(ancestors_fee), ancestors_weight, }; @@ -51,7 +49,7 @@ fn test_min_fee_and_weight() { #[test] fn test_ancestors_sorted_key_order() { - let mut keys = vec![ + let table = vec![ (0, 0, 0, 0), (1, 0, 1, 0), (500, 10, 1000, 30), @@ -62,33 +60,39 @@ fn test_ancestors_sorted_key_order() { (std::u64::MAX, 0, std::u64::MAX, 0), (std::u64::MAX, 100, std::u64::MAX, 2000), (std::u64::MAX, std::u64::MAX, std::u64::MAX, std::u64::MAX), - ] - .into_iter() - .enumerate() - .map(|(i, (fee, weight, ancestors_fee, ancestors_weight))| { - let mut id = [0u8; 10]; - id[..size_of::()].copy_from_slice(&(i as u32).to_be_bytes()); - AncestorsScoreSortKey { - fee: Capacity::shannons(fee), - weight, - id: ProposalShortId::new(id), - ancestors_fee: Capacity::shannons(ancestors_fee), - ancestors_weight, - } - }) - .collect::>(); + ]; + let mut keys = table + .clone() + .into_iter() + .enumerate() + .map( + |(_i, (fee, weight, ancestors_fee, ancestors_weight))| AncestorsScoreSortKey { + fee: Capacity::shannons(fee), + weight, + ancestors_fee: Capacity::shannons(ancestors_fee), + ancestors_weight, + }, + ) + .collect::>(); keys.sort(); - assert_eq!( - keys.into_iter().map(|k| k.id).collect::>(), - [0, 3, 5, 9, 2, 4, 6, 8, 1, 7] - .iter() - .map(|&i| { - let mut id = [0u8; 10]; - id[..size_of::()].copy_from_slice(&(i as u32).to_be_bytes()); - ProposalShortId::new(id) - }) - .collect::>() - ); + let now = keys + .into_iter() + .map(|k| (k.fee, k.weight, k.ancestors_fee, k.ancestors_weight)) + .collect::>(); + let expect = [0, 3, 5, 9, 2, 4, 6, 8, 1, 7] + .iter() + .map(|&i| { + let key = table[i as usize]; + ( + Capacity::shannons(key.0), + key.1, + Capacity::shannons(key.2), + key.3, + ) + }) + .collect::>(); + + assert_eq!(now, expect); } #[test] From 279c3b74b42b72dc74bb88c8761bf413da094745 Mon Sep 17 00:00:00 2001 From: yukang Date: Thu, 27 Jul 2023 14:57:13 +0800 Subject: [PATCH 128/267] Fix typo --- rpc/README.md | 2 +- rpc/src/error.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/rpc/README.md b/rpc/README.md index 57edcf01a3..0f1b0020fd 100644 --- a/rpc/README.md +++ b/rpc/README.md @@ -5075,7 +5075,7 @@ For example, a cellbase transaction is not allowed in `send_transaction` RPC. (-1110): The transaction exceeded maximum size limit. -### Error `PoolRejctedRBF` +### Error `PoolRejectedRBF` (-1111): The transaction is rejected for RBF checking. diff --git a/rpc/src/error.rs b/rpc/src/error.rs index b067fbf9d0..e566e96eb2 100644 --- a/rpc/src/error.rs +++ b/rpc/src/error.rs @@ -113,7 +113,7 @@ pub enum RPCError { /// (-1110): The transaction exceeded maximum size limit. PoolRejectedTransactionBySizeLimit = -1110, /// (-1111): The transaction is rejected for RBF checking. - PoolRejctedRBF = -1111, + PoolRejectedRBF = -1111, /// (-1200): The indexer error. Indexer = -1200, } @@ -175,7 +175,7 @@ impl RPCError { Reject::DeclaredWrongCycles(..) => RPCError::PoolRejectedMalformedTransaction, Reject::Resolve(_) => RPCError::TransactionFailedToResolve, Reject::Verification(_) => RPCError::TransactionFailedToVerify, - Reject::RBFRejected(_) => RPCError::PoolRejctedRBF, + Reject::RBFRejected(_) => RPCError::PoolRejectedRBF, Reject::ExceededTransactionSizeLimit(_, _) => { RPCError::PoolRejectedTransactionBySizeLimit } From 14e5cbf8b4ca58fc2b0ed345df238f4f6020b0e4 Mon Sep 17 00:00:00 2001 From: yukang Date: Thu, 27 Jul 2023 17:51:46 +0800 Subject: [PATCH 129/267] Proposed tx will not get min_replace_fee --- tx-pool/src/service.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tx-pool/src/service.rs b/tx-pool/src/service.rs index c02d68ab57..9df681f071 100644 --- a/tx-pool/src/service.rs +++ b/tx-pool/src/service.rs @@ -779,10 +779,10 @@ async fn process(mut service: TxPoolService, message: Message) { .. }) = tx_pool.pool_map.get_by_id(&id) { - let tx_status = if status == &Status::Proposed { - TxStatus::Proposed + let (tx_status, min_replace_fee) = if status == &Status::Proposed { + (TxStatus::Proposed, None) } else { - TxStatus::Pending + (TxStatus::Pending, tx_pool.min_replace_fee(entry)) }; Ok(TransactionWithStatus::with_status( Some(entry.transaction().clone()), @@ -790,7 +790,7 @@ async fn process(mut service: TxPoolService, message: Message) { entry.timestamp, tx_status, Some(entry.fee), - tx_pool.min_replace_fee(entry), + min_replace_fee, )) } else if let Some(ref recent_reject_db) = tx_pool.recent_reject { match recent_reject_db.get(&hash) { From 446b0bdd098d49ffa04dd9d141e09f1e22529ee0 Mon Sep 17 00:00:00 2001 From: yukang Date: Fri, 28 Jul 2023 11:06:45 +0800 Subject: [PATCH 130/267] add changelog for tx-pool refactor and RBF --- CHANGELOG.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1675622984..45cb473eaf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,16 @@ +### Features +* #4079, **tx-pol:** Implement `Replace-by-Fee(RBF)` for tx-pool (@chenyukang) +This feature enables users to replace a transaction with a higher fee rate, which is useful when the transaction is stuck in the tx-pool: + * Add `min_rbf_rate` in `ckb.toml` with default value `1500`, which means the minimum extra fee rate for RBF, the unit is `shannons/KB` + * Add fields `fee` and `min_replace_fee` in `get_transaction`, which means the the minimal fee need to pay for RBF for a specific transaction + * The replaced transaction will be removed from `tx-pool` and with the status `Rejected`. + +### Improvements +* #3993, **tx-pool:** Almost reimplemented `tx-pool` with `multi_index_map`, with the following improvements (@chenyukang): + * Sort txs in pool by `score` in `Pending` stage, `txs` with higher `score` be processed first + * Evict `txs` from pool with `descendants_count` and `fee_rate` + * Eliminate redundant code for clean and consistent code + # [v0.110.0](https://github.com/nervosnetwork/ckb/compare/v0.109.0...v0.110.0) (2023-05-15) ### Features From 396584fccbd7eedb77623bbdc25e67be9c8194ed Mon Sep 17 00:00:00 2001 From: yukang Date: Tue, 1 Aug 2023 11:48:12 +0800 Subject: [PATCH 131/267] use proper name for tx-pool func --- tx-pool/src/component/pool_map.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index b7ccec0034..842861e49b 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -167,9 +167,9 @@ impl PoolMap { return Ok(false); } trace!("pool_map.add_{:?} {}", status, entry.transaction().hash()); - self.check_record_ancestors(&mut entry)?; + self.check_and_record_ancestors(&mut entry)?; self.insert_entry(&entry, status); - self.record_entry_deps(&entry); + self.record_entry_edges(&entry); self.record_entry_descendants(&entry); Ok(true) } @@ -187,7 +187,6 @@ impl PoolMap { self.entries.remove_by_id(id).map(|entry| { self.update_ancestors_index_key(&entry.inner, EntryOp::Remove); self.update_descendants_index_key(&entry.inner, EntryOp::Remove); - self.remove_entry_deps(&entry.inner); self.remove_entry_edges(&entry.inner); self.remove_entry_links(id); entry.inner @@ -366,7 +365,7 @@ impl PoolMap { } } - fn record_entry_deps(&mut self, entry: &TxEntry) { + fn record_entry_edges(&mut self, entry: &TxEntry) { let tx_short_id: ProposalShortId = entry.proposal_short_id(); let header_deps = entry.transaction().header_deps(); let related_dep_out_points: Vec<_> = entry.related_dep_out_points().cloned().collect(); @@ -419,7 +418,7 @@ impl PoolMap { } /// Check ancestors and record for entry - fn check_record_ancestors(&mut self, entry: &mut TxEntry) -> Result { + fn check_and_record_ancestors(&mut self, entry: &mut TxEntry) -> Result { let mut parents: HashSet = HashSet::with_capacity( entry.transaction().inputs().len() + entry.transaction().cell_deps().len(), ); @@ -480,9 +479,7 @@ impl PoolMap { // release input record self.edges.remove_input(&i); } - } - fn remove_entry_deps(&mut self, entry: &TxEntry) { let id = entry.proposal_short_id(); for d in entry.related_dep_out_points().cloned() { self.edges.delete_txid_by_dep(d, &id); From 792ef8704ff7c15e844f18e119419d8f605d5d9e Mon Sep 17 00:00:00 2001 From: yukang Date: Tue, 1 Aug 2023 17:19:20 +0800 Subject: [PATCH 132/267] code cleanup to remove unwrap --- tx-pool/src/component/pool_map.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index 842861e49b..ca2e224f69 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -110,6 +110,14 @@ impl PoolMap { self.entries.get_by_id(id) } + fn get_by_id_checked(&self, id: &ProposalShortId) -> &PoolEntry { + self.get_by_id(id).expect("unconsistent pool") + } + + fn get_by_id_checked(&self, id: &ProposalShortId) -> &PoolEntry { + self.get_by_id(id).expect("unconsistent pool") + } + pub(crate) fn get_by_status(&self, status: Status) -> Vec<&PoolEntry> { self.entries.get_by_status(&status) } @@ -450,10 +458,7 @@ impl PoolMap { // update parents references for ancestor_id in &ancestors { - let ancestor = self - .entries - .get_by_id(ancestor_id) - .expect("pool consistent"); + let ancestor = self.get_by_id_checked(ancestor_id); entry.add_ancestor_weight(&ancestor.inner); } if entry.ancestors_count > self.max_ancestors_count { From 96ae57b6c9d9530e884ac424de74258200c5027e Mon Sep 17 00:00:00 2001 From: yukang Date: Thu, 3 Aug 2023 11:28:12 +0800 Subject: [PATCH 133/267] add comments for PreCheckedTx --- tx-pool/src/pool.rs | 2 +- tx-pool/src/process.rs | 12 +++++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 4fb5a96dcf..f01c47a94d 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -541,7 +541,7 @@ impl TxPool { assert!(!conflicts.is_empty()); let short_id = rtx.transaction.proposal_short_id(); - // Rule #4, new tx' fee need to higher than min_rbf_fee computed from the tx_pool configuration + // Rule #4, new tx's fee need to higher than min_rbf_fee computed from the tx_pool configuration // Rule #3, new tx's fee need to higher than conflicts, here we only check the root tx if let Some(min_replace_fee) = self.calculate_min_replace_fee(conflicts, tx_size) { if fee < min_replace_fee { diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index 1ec8d1922b..ee0ed572aa 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -1002,11 +1002,13 @@ impl TxPoolService { } type PreCheckedTx = ( - Byte32, - Arc, - TxStatus, - Capacity, - usize, + Byte32, // tip_hash + Arc, // rtx + TxStatus, // status + Capacity, // tx fee + usize, // tx size + // the conflicted txs, used for latter `check_rbf` + // the root txs for removing from `tx-pool` when RBF is checked HashSet, ); From df06351e5d29bde4def8b7f650e6705634ba5097 Mon Sep 17 00:00:00 2001 From: yukang Date: Thu, 3 Aug 2023 12:09:10 +0800 Subject: [PATCH 134/267] return proper error when calculate_min_replace_fee failed --- tx-pool/src/pool.rs | 36 ++++++++++++++---------------------- tx-pool/src/process.rs | 1 - 2 files changed, 14 insertions(+), 23 deletions(-) diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index f01c47a94d..0b88cb4daf 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -11,6 +11,7 @@ use ckb_app_config::TxPoolConfig; use ckb_logger::{debug, error, warn}; use ckb_snapshot::Snapshot; use ckb_store::ChainStore; +use ckb_types::core::CapacityError; use ckb_types::{ core::{ cell::{resolve_transaction, OverlayCellChecker, OverlayCellProvider, ResolvedTransaction}, @@ -114,32 +115,21 @@ impl TxPool { }) .collect::>(); - for x in entries.iter() { - eprintln!( - "old tx: {:?} fee: {:?}", - x.inner.transaction().hash(), - x.inner.fee - ); - } - let extra_rbf_fee = self.config.min_rbf_rate.fee(size as u64); - let replaced_sum_fee: Capacity = - entries - .iter() - .map(|c| c.inner.fee) - .fold(Capacity::zero(), |acc, x| { - acc.safe_add(x).unwrap_or_else(|_| { - error!("replaced_sum_fee {} overflow by add {}", acc, x); - Capacity::zero() - }) - }); - let res = replaced_sum_fee.safe_add(extra_rbf_fee); + let replaced_sum_fee = entries + .iter() + .map(|c| c.inner.fee) + .try_fold(Capacity::zero(), |acc, x| acc.safe_add(x)); + let res = replaced_sum_fee.map_or(Err(CapacityError::Overflow), |sum| { + sum.safe_add(extra_rbf_fee) + }); if let Ok(res) = res { Some(res) } else { + let fees = entries.iter().map(|c| c.inner.fee).collect::>(); error!( - "replaced_sum_fee {} overflow by add {}", - replaced_sum_fee, extra_rbf_fee + "conflicts: {:?} replaced_sum_fee {:?} overflow by add {}", + conflicts, fees, extra_rbf_fee ); None } @@ -551,7 +541,9 @@ impl TxPool { ))); } } else { - panic!("calculate_min_replace_fee must success"); + return Err(Reject::RBFRejected( + "calculate_min_replace_fee failed".to_string(), + )); } let pool_entries = conflicts diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index ee0ed572aa..60f550e35d 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -134,7 +134,6 @@ impl TxPoolService { )); // remove old tx from tx_pool, not happened in service so we didn't call reject callbacks // here we call them manually - // TODO: how to call reject notify like service? self.callbacks.call_reject(tx_pool, &old, reject) } } From b4aec5aba3306b141bd9aaab5ca754c91a79a121 Mon Sep 17 00:00:00 2001 From: yukang Date: Fri, 4 Aug 2023 11:14:09 +0800 Subject: [PATCH 135/267] use upstream multi_index_map --- Cargo.lock | 41 +++++++++++++-------- tx-pool/Cargo.toml | 2 +- tx-pool/src/component/commit_txs_scanner.rs | 2 +- tx-pool/src/component/pool_map.rs | 9 ++--- 4 files changed, 30 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8e42a97bf3..b83e58c5f8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1423,9 +1423,9 @@ dependencies = [ "ckb-types", "ckb-util", "ckb-verification", - "ckb_multi_index_map", "hyper", "lru", + "multi_index_map", "rand 0.8.5", "rustc-hash", "sentry", @@ -1553,21 +1553,6 @@ dependencies = [ "paste", ] -[[package]] -name = "ckb_multi_index_map" -version = "0.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53c20823dfd9f9a8e30faa3b0bdcab4801fb2544957586fada3884c78dcdf38b" -dependencies = [ - "convert_case 0.6.0", - "proc-macro-error", - "proc-macro2", - "quote", - "rustc-hash", - "slab", - "syn", -] - [[package]] name = "clang-sys" version = "1.3.1" @@ -3143,6 +3128,30 @@ dependencies = [ "faster-hex", ] +[[package]] +name = "multi_index_map" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03f409d5f41e6b8a2faa0b363c4523742f0ef5e4015fd4e298a5c7dbb3a3e01c" +dependencies = [ + "multi_index_map_derive", + "rustc-hash", + "slab", +] + +[[package]] +name = "multi_index_map_derive" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98e81cd436463efbaa95a2d2bac3028e6998a4bb2ef8a82a661de3567bb79d5a" +dependencies = [ + "convert_case 0.6.0", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "native-tls" version = "0.2.11" diff --git a/tx-pool/Cargo.toml b/tx-pool/Cargo.toml index 04e465561a..c2c6c2ec67 100644 --- a/tx-pool/Cargo.toml +++ b/tx-pool/Cargo.toml @@ -36,7 +36,7 @@ sentry = { version = "0.26.0", optional = true } serde_json = "1.0" rand = "0.8.4" hyper = { version = "0.14", features = ["http1", "client", "tcp"] } -ckb_multi_index_map = "0.0.2" # ckb team fork crate +multi_index_map = "0.6.0" slab = "0.4" rustc-hash = "1.1" tokio-util = "0.7.8" diff --git a/tx-pool/src/component/commit_txs_scanner.rs b/tx-pool/src/component/commit_txs_scanner.rs index ecb5619e6d..7795a711c4 100644 --- a/tx-pool/src/component/commit_txs_scanner.rs +++ b/tx-pool/src/component/commit_txs_scanner.rs @@ -1,9 +1,9 @@ extern crate slab; use crate::component::pool_map::PoolMap; use crate::component::{entry::TxEntry, sort_key::AncestorsScoreSortKey}; -use ckb_multi_index_map::MultiIndexMap; use ckb_types::{core::Cycle, packed::ProposalShortId}; use ckb_util::LinkedHashMap; +use multi_index_map::MultiIndexMap; use std::collections::HashSet; // A template data struct used to store modified entries when package txs diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index ca2e224f69..2480cf63ae 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -6,8 +6,8 @@ use crate::component::links::{Relation, TxLinksMap}; use crate::component::sort_key::{AncestorsScoreSortKey, EvictKey}; use crate::error::Reject; use crate::TxEntry; -use ckb_logger::{debug, trace}; -use ckb_multi_index_map::MultiIndexMap; + +use ckb_logger::trace; use ckb_types::core::error::OutPointError; use ckb_types::packed::OutPoint; use ckb_types::prelude::*; @@ -16,6 +16,7 @@ use ckb_types::{ core::TransactionView, packed::{Byte32, CellOutput, ProposalShortId}, }; +use multi_index_map::MultiIndexMap; use std::collections::HashSet; use super::links::TxLinks; @@ -114,10 +115,6 @@ impl PoolMap { self.get_by_id(id).expect("unconsistent pool") } - fn get_by_id_checked(&self, id: &ProposalShortId) -> &PoolEntry { - self.get_by_id(id).expect("unconsistent pool") - } - pub(crate) fn get_by_status(&self, status: Status) -> Vec<&PoolEntry> { self.entries.get_by_status(&status) } From c902bc2280599947cb2c709863f51e227abdffd1 Mon Sep 17 00:00:00 2001 From: yukang Date: Mon, 7 Aug 2023 20:58:04 +0800 Subject: [PATCH 136/267] fix fmt --- Cargo.lock | 17 +---------------- tx-pool/src/component/pool_map.rs | 1 - tx-pool/src/component/tests/entry.rs | 3 +-- 3 files changed, 2 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8cc79b7eab..b83e58c5f8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1423,9 +1423,9 @@ dependencies = [ "ckb-types", "ckb-util", "ckb-verification", - "ckb_multi_index_map", "hyper", "lru", + "multi_index_map", "rand 0.8.5", "rustc-hash", "sentry", @@ -1553,21 +1553,6 @@ dependencies = [ "paste", ] -[[package]] -name = "ckb_multi_index_map" -version = "0.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53c20823dfd9f9a8e30faa3b0bdcab4801fb2544957586fada3884c78dcdf38b" -dependencies = [ - "convert_case 0.6.0", - "proc-macro-error", - "proc-macro2", - "quote", - "rustc-hash", - "slab", - "syn", -] - [[package]] name = "clang-sys" version = "1.3.1" diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index 06cd88bf60..edf1d2d0b3 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -502,4 +502,3 @@ impl PoolMap { }); } } - diff --git a/tx-pool/src/component/tests/entry.rs b/tx-pool/src/component/tests/entry.rs index 77a96bb8f9..6ae6708af2 100644 --- a/tx-pool/src/component/tests/entry.rs +++ b/tx-pool/src/component/tests/entry.rs @@ -1,6 +1,5 @@ -use ckb_types::core::{Capacity, FeeRate}; use crate::component::sort_key::EvictKey; - +use ckb_types::core::{Capacity, FeeRate}; #[test] fn test_min_fee_and_weight_evict() { From dfcf067eff43f9763dd0858755f53f69ea111dcf Mon Sep 17 00:00:00 2001 From: zhangsoledad <787953403@qq.com> Date: Wed, 9 Aug 2023 16:49:56 +0800 Subject: [PATCH 137/267] fix: data2 value --- rpc/README.md | 4 +- util/jsonrpc-types/src/blockchain.rs | 10 +- util/types/src/core/blockchain.rs | 13 +- util/types/src/core/tests/blockchain.rs | 4 +- util/types/src/extension/tests/check_data.rs | 133 ++++++++++--------- 5 files changed, 92 insertions(+), 72 deletions(-) diff --git a/rpc/README.md b/rpc/README.md index 6db0bf3d40..35de2c0e32 100644 --- a/rpc/README.md +++ b/rpc/README.md @@ -6681,10 +6681,12 @@ Describes the lock script and type script for a cell. Specifies how the script `code_hash` is used to match the script code and how to run the code. -Allowed kinds: “data”, “type” and “data1”. +Allowed kinds: “data”, “type”, “data1” and “data2” Refer to the section [Code Locating](https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0022-transaction-structure/0022-transaction-structure.md#code-locating) and [Upgradable Script](https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0022-transaction-structure/0022-transaction-structure.md#upgradable-script) in the RFC *CKB Transaction Structure*. +The hash type is split into the high 7 bits and the low 1 bit, when the low 1 bit is 1, it indicates the type, when the low 1 bit is 0, it indicates the data, and then it relies on the high 7 bits to indicate that the data actually corresponds to the version. + `ScriptHashType` is equivalent to `"data" | "type" | "data1" | "data2"`. * Type “data” matches script code via cell data hash, and run the script code in v0 CKB VM. diff --git a/util/jsonrpc-types/src/blockchain.rs b/util/jsonrpc-types/src/blockchain.rs index 8acaf95ce7..2df38a7959 100644 --- a/util/jsonrpc-types/src/blockchain.rs +++ b/util/jsonrpc-types/src/blockchain.rs @@ -13,11 +13,17 @@ use std::fmt; /// Specifies how the script `code_hash` is used to match the script code and how to run the code. /// -/// Allowed kinds: "data", "type" and "data1". +/// Allowed kinds: "data", "type", "data1" and “data2” /// /// Refer to the section [Code Locating](https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0022-transaction-structure/0022-transaction-structure.md#code-locating) /// and [Upgradable Script](https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0022-transaction-structure/0022-transaction-structure.md#upgradable-script) /// in the RFC *CKB Transaction Structure*. +/// +/// The hash type is split into the high 7 bits and the low 1 bit, +/// when the low 1 bit is 1, it indicates the type, +/// when the low 1 bit is 0, it indicates the data, +/// and then it relies on the high 7 bits to indicate +/// that the data actually corresponds to the version. #[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Debug)] #[serde(rename_all = "snake_case")] pub enum ScriptHashType { @@ -28,7 +34,7 @@ pub enum ScriptHashType { /// Type "data1" matches script code via cell data hash, and run the script code in v1 CKB VM. Data1 = 2, /// Type "data2" matches script code via cell data hash, and run the script code in v2 CKB VM. - Data2 = 3, + Data2 = 4, } impl Default for ScriptHashType { diff --git a/util/types/src/core/blockchain.rs b/util/types/src/core/blockchain.rs index b28a126433..7121bf09de 100644 --- a/util/types/src/core/blockchain.rs +++ b/util/types/src/core/blockchain.rs @@ -3,6 +3,11 @@ use ckb_error::OtherError; use crate::packed; /// Specifies how the script `code_hash` is used to match the script code and how to run the code. +/// The hash type is split into the high 7 bits and the low 1 bit, +/// when the low 1 bit is 1, it indicates the type, +/// when the low 1 bit is 0, it indicates the data, +/// and then it relies on the high 7 bits to indicate +/// that the data actually corresponds to the version. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] pub enum ScriptHashType { /// Type "data" matches script code via cell data hash, and run the script code in v0 CKB VM. @@ -12,7 +17,7 @@ pub enum ScriptHashType { /// Type "data1" matches script code via cell data hash, and run the script code in v1 CKB VM. Data1 = 2, /// Type "data2" matches script code via cell data hash, and run the script code in v2 CKB VM. - Data2 = 3, + Data2 = 4, } impl Default for ScriptHashType { @@ -29,7 +34,7 @@ impl TryFrom for ScriptHashType { 0 => Ok(ScriptHashType::Data), 1 => Ok(ScriptHashType::Type), 2 => Ok(ScriptHashType::Data1), - 3 => Ok(ScriptHashType::Data2), + 4 => Ok(ScriptHashType::Data2), _ => Err(OtherError::new(format!("Invalid script hash type {v}"))), } } @@ -46,7 +51,7 @@ impl TryFrom for ScriptHashType { impl ScriptHashType { #[inline] pub(crate) fn verify_value(v: u8) -> bool { - v <= 3 + v <= 4 && v != 3 } } @@ -57,7 +62,7 @@ impl Into for ScriptHashType { Self::Data => 0, Self::Type => 1, Self::Data1 => 2, - Self::Data2 => 3, + Self::Data2 => 4, } } } diff --git a/util/types/src/core/tests/blockchain.rs b/util/types/src/core/tests/blockchain.rs index 0fa63847de..2c37578c28 100644 --- a/util/types/src/core/tests/blockchain.rs +++ b/util/types/src/core/tests/blockchain.rs @@ -10,10 +10,10 @@ fn test_script_hash_type() { let default_value: u8 = default.into(); assert_eq!(default_value, 0); - let max_value = 3u8; + let max_value = 4u8; for v in 0..32 { let res = ScriptHashType::try_from(v); - if v <= max_value { + if v <= max_value && v != 3 { let value: u8 = res.unwrap().into(); assert_eq!(value, v); } else { diff --git a/util/types/src/extension/tests/check_data.rs b/util/types/src/extension/tests/check_data.rs index 05489411dd..e8f4943e42 100644 --- a/util/types/src/extension/tests/check_data.rs +++ b/util/types/src/extension/tests/check_data.rs @@ -29,74 +29,81 @@ fn test_check_data_via_transaction( #[test] fn check_data() { for ht in 0..4 { - for dt in 0..2 { - let ht_right = ht.into(); - let dt_right = dt.into(); - let ht_error = 4.into(); - let dt_error = 2.into(); + if ht != 3 { + for dt in 0..2 { + let ht_right = ht.into(); + let dt_right = dt.into(); + let ht_error = 3.into(); + let dt_error = 2.into(); - let script_right = packed::Script::new_builder().hash_type(ht_right).build(); - let script_error = packed::Script::new_builder().hash_type(ht_error).build(); + let script_right = packed::Script::new_builder().hash_type(ht_right).build(); + let script_error = packed::Script::new_builder().hash_type(ht_error).build(); - let script_opt_right = packed::ScriptOpt::new_builder() - .set(Some(script_right.clone())) - .build(); - let script_opt_error = packed::ScriptOpt::new_builder() - .set(Some(script_error.clone())) - .build(); + let script_opt_right = packed::ScriptOpt::new_builder() + .set(Some(script_right.clone())) + .build(); + let script_opt_error = packed::ScriptOpt::new_builder() + .set(Some(script_error.clone())) + .build(); - let output_right1 = packed::CellOutput::new_builder() - .lock(script_right.clone()) - .build(); - let output_right2 = packed::CellOutput::new_builder() - .type_(script_opt_right.clone()) - .build(); - let output_error1 = packed::CellOutput::new_builder() - .lock(script_error.clone()) - .build(); - let output_error2 = packed::CellOutput::new_builder() - .type_(script_opt_error.clone()) - .build(); - let output_error3 = packed::CellOutput::new_builder() - .lock(script_right) - .type_(script_opt_error) - .build(); - let output_error4 = packed::CellOutput::new_builder() - .lock(script_error) - .type_(script_opt_right) - .build(); + let output_right1 = packed::CellOutput::new_builder() + .lock(script_right.clone()) + .build(); + let output_right2 = packed::CellOutput::new_builder() + .type_(script_opt_right.clone()) + .build(); + let output_error1 = packed::CellOutput::new_builder() + .lock(script_error.clone()) + .build(); + let output_error2 = packed::CellOutput::new_builder() + .type_(script_opt_error.clone()) + .build(); + let output_error3 = packed::CellOutput::new_builder() + .lock(script_right) + .type_(script_opt_error) + .build(); + let output_error4 = packed::CellOutput::new_builder() + .lock(script_error) + .type_(script_opt_right) + .build(); - let cell_dep_right = packed::CellDep::new_builder().dep_type(dt_right).build(); - let cell_dep_error = packed::CellDep::new_builder().dep_type(dt_error).build(); + let cell_dep_right = packed::CellDep::new_builder().dep_type(dt_right).build(); + let cell_dep_error = packed::CellDep::new_builder().dep_type(dt_error).build(); - test_check_data_via_transaction(true, &[], &[], &[]); - test_check_data_via_transaction(true, &[&output_right1], &[&[]], &[&cell_dep_right]); - test_check_data_via_transaction( - true, - &[&output_right1, &output_right2], - &[&[], &[]], - &[&cell_dep_right, &cell_dep_right], - ); - test_check_data_via_transaction(false, &[&output_error1], &[&[]], &[]); - test_check_data_via_transaction(false, &[&output_error2], &[&[]], &[]); - test_check_data_via_transaction(false, &[&output_error3], &[&[]], &[]); - test_check_data_via_transaction(false, &[&output_error4], &[&[]], &[]); - test_check_data_via_transaction(false, &[], &[], &[&cell_dep_error]); - test_check_data_via_transaction( - false, - &[ - &output_right1, - &output_right2, - &output_error1, - &output_error2, - &output_error3, - &output_error4, - ], - &[&[], &[], &[], &[], &[], &[]], - &[&cell_dep_right, &cell_dep_error], - ); - test_check_data_via_transaction(false, &[&output_right1], &[], &[&cell_dep_right]); - test_check_data_via_transaction(false, &[], &[&[]], &[&cell_dep_right]); + test_check_data_via_transaction(true, &[], &[], &[]); + test_check_data_via_transaction( + true, + &[&output_right1], + &[&[]], + &[&cell_dep_right], + ); + test_check_data_via_transaction( + true, + &[&output_right1, &output_right2], + &[&[], &[]], + &[&cell_dep_right, &cell_dep_right], + ); + test_check_data_via_transaction(false, &[&output_error1], &[&[]], &[]); + test_check_data_via_transaction(false, &[&output_error2], &[&[]], &[]); + test_check_data_via_transaction(false, &[&output_error3], &[&[]], &[]); + test_check_data_via_transaction(false, &[&output_error4], &[&[]], &[]); + test_check_data_via_transaction(false, &[], &[], &[&cell_dep_error]); + test_check_data_via_transaction( + false, + &[ + &output_right1, + &output_right2, + &output_error1, + &output_error2, + &output_error3, + &output_error4, + ], + &[&[], &[], &[], &[], &[], &[]], + &[&cell_dep_right, &cell_dep_error], + ); + test_check_data_via_transaction(false, &[&output_right1], &[], &[&cell_dep_right]); + test_check_data_via_transaction(false, &[], &[&[]], &[&cell_dep_right]); + } } } } From 21f51591c4ada88ba345924b7339aebd3f2c280c Mon Sep 17 00:00:00 2001 From: yukang Date: Thu, 10 Aug 2023 13:04:16 +0800 Subject: [PATCH 138/267] use hashed_non_unique for status in entry --- tx-pool/src/component/pool_map.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index ce2c05bb0c..238c63ad83 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -45,7 +45,7 @@ pub struct PoolEntry { pub id: ProposalShortId, #[multi_index(ordered_non_unique)] pub score: AncestorsScoreSortKey, - #[multi_index(ordered_non_unique)] + #[multi_index(hashed_non_unique)] pub status: Status, #[multi_index(ordered_non_unique)] pub evict_key: EvictKey, From 84d4bf95a37ecda4eddf17237922845bc6ec6c5e Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Fri, 11 Aug 2023 16:43:50 +0800 Subject: [PATCH 139/267] Consensus: let `epoch_duration_target` affect epoch length Signed-off-by: Eval EXEC --- spec/src/consensus.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/spec/src/consensus.rs b/spec/src/consensus.rs index 4f39d87a77..0bbf219b33 100644 --- a/spec/src/consensus.rs +++ b/spec/src/consensus.rs @@ -804,6 +804,13 @@ impl Consensus { let remainder_reward = Capacity::shannons(primary_epoch_reward % epoch.length()); + let mut next_epoch_length = epoch.length(); + let epoch_length_from_param = + cmp::max(self.epoch_duration_target() / MIN_BLOCK_INTERVAL, 1); + if next_epoch_length != epoch_length_from_param { + next_epoch_length = self.epoch_duration_target() / MIN_BLOCK_INTERVAL; + } + let dummy_epoch_ext = epoch .clone() .into_builder() @@ -812,6 +819,7 @@ impl Consensus { .number(epoch.number() + 1) .last_block_hash_in_previous_epoch(header.hash()) .start_number(header.number() + 1) + .length(next_epoch_length) .build(); NextBlockEpoch::HeadBlock(dummy_epoch_ext) } else { From c07182f718579599f9d511ea8d2fdaf04ce0a4fe Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Fri, 11 Aug 2023 17:38:46 +0800 Subject: [PATCH 140/267] Consensus: neat epoch_length calculation in permanent difficulty mode Co-authored-by: Quake Wang --- spec/src/consensus.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/spec/src/consensus.rs b/spec/src/consensus.rs index 0bbf219b33..b3e1e3b9b2 100644 --- a/spec/src/consensus.rs +++ b/spec/src/consensus.rs @@ -804,12 +804,7 @@ impl Consensus { let remainder_reward = Capacity::shannons(primary_epoch_reward % epoch.length()); - let mut next_epoch_length = epoch.length(); - let epoch_length_from_param = - cmp::max(self.epoch_duration_target() / MIN_BLOCK_INTERVAL, 1); - if next_epoch_length != epoch_length_from_param { - next_epoch_length = self.epoch_duration_target() / MIN_BLOCK_INTERVAL; - } +let next_epoch_length = (self.epoch_duration_target() + MIN_BLOCK_INTERVAL - 1) / MIN_BLOCK_INTERVAL let dummy_epoch_ext = epoch .clone() From 6e572ef0f4ae890a6faa8574fd4a0033aca4403b Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Fri, 11 Aug 2023 17:39:26 +0800 Subject: [PATCH 141/267] Fix fmt --- spec/src/consensus.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/spec/src/consensus.rs b/spec/src/consensus.rs index b3e1e3b9b2..776447aef0 100644 --- a/spec/src/consensus.rs +++ b/spec/src/consensus.rs @@ -804,7 +804,9 @@ impl Consensus { let remainder_reward = Capacity::shannons(primary_epoch_reward % epoch.length()); -let next_epoch_length = (self.epoch_duration_target() + MIN_BLOCK_INTERVAL - 1) / MIN_BLOCK_INTERVAL + let next_epoch_length = (self.epoch_duration_target() + MIN_BLOCK_INTERVAL + - 1) + / MIN_BLOCK_INTERVAL; let dummy_epoch_ext = epoch .clone() From 7364dfbc382236a7fa0c47d89736881944b7b2d6 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 10 Aug 2023 20:44:05 +0800 Subject: [PATCH 142/267] Test: Add bats test for `epoch_length_in_dummy` param Signed-off-by: Eval EXEC --- util/app-config/src/tests/cli_test.sh | 9 +- .../tests/later_bats_job/change_epoch.bats | 118 ++++++++++++++++++ 2 files changed, 126 insertions(+), 1 deletion(-) create mode 100644 util/app-config/src/tests/later_bats_job/change_epoch.bats diff --git a/util/app-config/src/tests/cli_test.sh b/util/app-config/src/tests/cli_test.sh index 707b2b1dea..967e5c60e4 100755 --- a/util/app-config/src/tests/cli_test.sh +++ b/util/app-config/src/tests/cli_test.sh @@ -13,6 +13,7 @@ trap cleanup EXIT cp target/release/ckb ${CKB_BATS_TESTBED} cp util/app-config/src/tests/*.bats ${CKB_BATS_TESTBED} +cp -r util/app-config/src/tests/later_bats_job ${CKB_BATS_TESTBED} cp util/app-config/src/tests/*.sh ${CKB_BATS_TESTBED} if [ ! -d "/tmp/ckb_bats_assets/" ]; then @@ -47,9 +48,15 @@ export TMP_DIR=${CKB_BATS_TESTBED}/tmp_dir mkdir ${TMP_DIR} for bats_cases in *.bats; do - bats "$bats_cases" + bats --verbose-run --print-output-on-failure --show-output-of-passing-tests "$bats_cases" ret=$? if [ "$ret" -ne "0" ]; then exit "$ret" fi done + +bats --verbose-run --print-output-on-failure --show-output-of-passing-tests ./later_bats_job/change_epoch.bats +ret=$? +if [ "$ret" -ne "0" ]; then + exit "$ret" +fi diff --git a/util/app-config/src/tests/later_bats_job/change_epoch.bats b/util/app-config/src/tests/later_bats_job/change_epoch.bats new file mode 100644 index 0000000000..87cca69126 --- /dev/null +++ b/util/app-config/src/tests/later_bats_job/change_epoch.bats @@ -0,0 +1,118 @@ +#!/usr/bin/env bats +set -e + +bats_load_library 'bats-assert' +bats_load_library 'bats-support' + +NUMBER_OFFSET=0 +NUMBER_BITS=24 +NUMBER_MAXIMUM_VALUE=$((1 << NUMBER_BITS)) +NUMBER_MASK=$((NUMBER_MAXIMUM_VALUE - 1)) + +INDEX_OFFSET=$((NUMBER_BITS)) +INDEX_BITS=16 +INDEX_MAXIMUM_VALUE=$((1 << INDEX_BITS)) +INDEX_MASK=$((INDEX_MAXIMUM_VALUE - 1)) + +LENGTH_OFFSET=$((NUMBER_BITS + INDEX_BITS)) +LENGTH_BITS=16 +LENGTH_MAXIMUM_VALUE=$((1 << LENGTH_BITS)) +LENGTH_MASK=$((LENGTH_MAXIMUM_VALUE - 1)) + +function extract_epoch_number() { + local value=$1 + echo $(( (value >> NUMBER_OFFSET) & NUMBER_MASK )) +} + +function extract_epoch_index() { + local value=$1 + echo $(( (value >> INDEX_OFFSET) & INDEX_MASK )) +} + +function extract_epoch_length() { + local value=$1 + echo $(( (value >> LENGTH_OFFSET) & LENGTH_MASK )) +} + +function tip_header_epoch() { + curl -s -X POST http://127.0.0.1:8114 \ + -H 'Content-Type: application/json' \ + -d '{ "id": 42, "jsonrpc": "2.0", "method": "get_tip_header", "params": [ ] }' \ + | jq .result.epoch | xargs -I{} printf "%d\n" {} +} + +function tip_header_number() { + curl -s -X POST http://127.0.0.1:8114 \ + -H 'Content-Type: application/json' \ + -d '{ "id": 42, "jsonrpc": "2.0", "method": "get_tip_header", "params": [ ] }' \ + | jq .result.number | xargs -I{} printf "%d\n" {} +} + +function block_kill() { + kill $1 + while kill -0 $1; do + echo "waiting for $1 to exit" + sleep 1 + done +} + +function ckb_change_epoch_length_for_dumm_mode { #@test + ckb run -C ${CKB_DIRNAME} &> /dev/null & + + CKB_NODE_PID=$! + sleep 5 + + + TIP_EPOCH=$(tip_header_epoch) + + TIP_EPOCH_NUMBER=$(extract_epoch_number ${TIP_EPOCH}) + TIP_EPOCH_INDEX=$(extract_epoch_index ${TIP_EPOCH}) + TIP_EPOCH_LENGTH=$(extract_epoch_length ${TIP_EPOCH}) + TIP_NUMBER=$(tip_header_number) + + echo tip_number is ${TIP_NUMBER} + echo tip_epoch_number is ${TIP_EPOCH_NUMBER}, tip_epoch_index is ${TIP_EPOCH_INDEX}, tip_epoch_length is ${TIP_EPOCH_LENGTH} + + block_kill ${CKB_NODE_PID} + + wget https://raw.githubusercontent.com/nervosnetwork/ckb/develop/resource/specs/mainnet.toml + + ckb init -c dev --import-spec mainnet.toml --force + + sed -i 's/Eaglesong/Dummy/g' specs/dev.toml + sed -i '/genesis_epoch_length = 1743/a permanent_difficulty_in_dummy = true\nepoch_duration_target = 80\n' specs/dev.toml + + sed -i 's/poll_interval = 1000/poll_interval = 1/g' ckb-miner.toml + sed -i 's/value = 5000/value = 1/g' ckb-miner.toml + + sed -i 's/# \[block_assembler\]/\[block_assembler\]/g' ckb.toml + sed -i 's/# code_hash =/code_hash =/g' ckb.toml + sed -i 's/# args = "ckb-cli util blake2b --prefix-160 "/args = "0xc8328aabcd9b9e8e64fbc566c4385c3bdeb219d7"/g' ckb.toml + sed -i 's/# hash_type =/hash_type =/g' ckb.toml + sed -i 's/# message = "A 0x-prefixed hex string"/message = "0x"/g' ckb.toml + + + + ckb run --skip-spec-check --overwrite-spec -C ${CKB_DIRNAME} &> /dev/null & + CKB_NODE_PID=$! + + ckb miner -C ${CKB_DIRNAME} &> /dev/null & + CKB_MINER_PID=$! + + sleep 5 + + while [ $(tip_header_number) -lt $(( ${TIP_NUMBER} + ${TIP_EPOCH_LENGTH} )) ]; do + echo waiting for tip_number to be $(( ${TIP_NUMBER} + ${TIP_EPOCH_LENGTH} )) + sleep 1 + done + + echo latest tip_header_number is $(tip_header_number) + echo latest tip_header_epoch length is $(extract_epoch_length $(tip_header_epoch)) + echo latest tip_header_epoch number is $(extract_epoch_number $(tip_header_epoch)) + + # we specified epoch_duration_target = 80 in dev.toml, so the epoch length should be 10 + assert [ $(extract_epoch_length $(tip_header_epoch)) -eq 10 ] + + block_kill ${CKB_NODE_PID} + block_kill ${CKB_MINER_PID} +} From 219eb75ae49d28a2d9036da4707fba5a7855ce30 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Thu, 10 Aug 2023 20:52:30 +0800 Subject: [PATCH 143/267] chore: bats test should use `make prod` --- .github/workflows/ci_integration_tests_ubuntu.yaml | 2 +- Makefile | 2 +- util/app-config/src/tests/cli_test.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci_integration_tests_ubuntu.yaml b/.github/workflows/ci_integration_tests_ubuntu.yaml index 4af026b40b..467ea84cbc 100644 --- a/.github/workflows/ci_integration_tests_ubuntu.yaml +++ b/.github/workflows/ci_integration_tests_ubuntu.yaml @@ -93,7 +93,7 @@ jobs: - uses: actions/checkout@v3 - name: build ckb and run bats cli test run: | - make build + make prod make cli-test shell: bash env: diff --git a/Makefile b/Makefile index 7d7adb98fb..199a045172 100644 --- a/Makefile +++ b/Makefile @@ -24,7 +24,7 @@ doc-test: ## Run doc tests cargo test --all --doc .PHONY: cli-test -cli-test: build # Run ckb command line usage bats test +cli-test: prod # Run ckb command line usage bats test ./util/app-config/src/tests/cli_test.sh .PHONY: test diff --git a/util/app-config/src/tests/cli_test.sh b/util/app-config/src/tests/cli_test.sh index 967e5c60e4..fb592bc5b6 100755 --- a/util/app-config/src/tests/cli_test.sh +++ b/util/app-config/src/tests/cli_test.sh @@ -11,7 +11,7 @@ function cleanup { trap cleanup EXIT -cp target/release/ckb ${CKB_BATS_TESTBED} +cp target/prod/ckb ${CKB_BATS_TESTBED} cp util/app-config/src/tests/*.bats ${CKB_BATS_TESTBED} cp -r util/app-config/src/tests/later_bats_job ${CKB_BATS_TESTBED} cp util/app-config/src/tests/*.sh ${CKB_BATS_TESTBED} From 05e2f8a90bb1ea8015991f98ed501ca92817d713 Mon Sep 17 00:00:00 2001 From: yukang Date: Fri, 11 Aug 2023 21:36:12 +0800 Subject: [PATCH 144/267] only check_rbf for resolve dead outpoint --- test/src/main.rs | 1 + test/src/specs/tx_pool/replace.rs | 39 +++++++++++++++++++++++++++++-- tx-pool/src/process.rs | 5 +++- 3 files changed, 42 insertions(+), 3 deletions(-) diff --git a/test/src/main.rs b/test/src/main.rs index 4e9d0fbc5c..0658c18d01 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -461,6 +461,7 @@ fn all_specs() -> Vec> { Box::new(DifferentTxsWithSameInputWithOutRBF), Box::new(RbfBasic), Box::new(RbfSameInput), + Box::new(RbfOnlyForResolveDead), Box::new(RbfSameInputwithLessFee), Box::new(RbfTooManyDescendants), Box::new(RbfContainNewTx), diff --git a/test/src/specs/tx_pool/replace.rs b/test/src/specs/tx_pool/replace.rs index 937f63d32c..31ab21efd6 100644 --- a/test/src/specs/tx_pool/replace.rs +++ b/test/src/specs/tx_pool/replace.rs @@ -3,8 +3,7 @@ use ckb_jsonrpc_types::Status; use ckb_logger::info; use ckb_types::{ core::{capacity_bytes, Capacity, TransactionView}, - packed::CellOutputBuilder, - packed::{CellInput, OutPoint}, + packed::{Byte32, CellInput, CellOutputBuilder, OutPoint}, prelude::*, }; @@ -112,6 +111,42 @@ impl Spec for RbfSameInput { } } +pub struct RbfOnlyForResolveDead; +impl Spec for RbfOnlyForResolveDead { + fn run(&self, nodes: &mut Vec) { + let node0 = &nodes[0]; + + node0.mine_until_out_bootstrap_period(); + node0.new_block_with_blocking(|template| template.number.value() != 13); + + let tx_hash_0 = node0.generate_transaction(); + + let tx1 = node0.new_transaction(tx_hash_0.clone()); + let tx1_clone = tx1.clone(); + + // This is an unknown input + let tx_hash_1 = Byte32::zero(); + let tx2 = tx1_clone + .as_advanced_builder() + .set_inputs(vec![{ + CellInput::new_builder() + .previous_output(OutPoint::new(tx_hash_1, 0)) + .build() + }]) + .build(); + + let res = node0 + .rpc_client() + .send_transaction_result(tx2.data().into()); + let message = res.err().unwrap().to_string(); + assert!(message.contains("TransactionFailedToResolve: Resolve failed Unknown")); + } + + fn modify_app_config(&self, config: &mut ckb_app_config::CKBAppConfig) { + config.tx_pool.min_rbf_rate = ckb_types::core::FeeRate(1500); + } +} + pub struct RbfSameInputwithLessFee; // RBF Rule #3, #4 diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index 60f550e35d..c386bd0524 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -19,6 +19,7 @@ use ckb_network::PeerIndex; use ckb_snapshot::Snapshot; use ckb_store::data_loader_wrapper::AsDataLoader; use ckb_store::ChainStore; +use ckb_types::core::error::OutPointError; use ckb_types::{ core::{cell::ResolvedTransaction, BlockView, Capacity, Cycle, HeaderView, TransactionView}, packed::{Byte32, ProposalShortId}, @@ -229,7 +230,9 @@ impl TxPoolService { Ok((tip_hash, rtx, status, fee, tx_size, HashSet::new())) } Err(err) => { - if tx_pool.enable_rbf() && matches!(err, Reject::Resolve(_)) { + if tx_pool.enable_rbf() + && matches!(err, Reject::Resolve(OutPointError::Dead(_))) + { // Try RBF check let conflicts = tx_pool.pool_map.find_conflict_tx(tx); if conflicts.is_empty() { From bc7e613c4534eaee943e4e6d2f2a97b3780a6cd2 Mon Sep 17 00:00:00 2001 From: yukang Date: Fri, 11 Aug 2023 21:56:21 +0800 Subject: [PATCH 145/267] cleanup find_conflict_tx --- test/src/specs/tx_pool/replace.rs | 5 ++--- tx-pool/src/component/pool_map.rs | 10 +++------- tx-pool/src/pool.rs | 6 ++---- 3 files changed, 7 insertions(+), 14 deletions(-) diff --git a/test/src/specs/tx_pool/replace.rs b/test/src/specs/tx_pool/replace.rs index 31ab21efd6..0bb4182d0f 100644 --- a/test/src/specs/tx_pool/replace.rs +++ b/test/src/specs/tx_pool/replace.rs @@ -121,12 +121,11 @@ impl Spec for RbfOnlyForResolveDead { let tx_hash_0 = node0.generate_transaction(); - let tx1 = node0.new_transaction(tx_hash_0.clone()); - let tx1_clone = tx1.clone(); + let tx1 = node0.new_transaction(tx_hash_0); // This is an unknown input let tx_hash_1 = Byte32::zero(); - let tx2 = tx1_clone + let tx2 = tx1 .as_advanced_builder() .set_inputs(vec![{ CellInput::new_builder() diff --git a/tx-pool/src/component/pool_map.rs b/tx-pool/src/component/pool_map.rs index edf1d2d0b3..acbf53858d 100644 --- a/tx-pool/src/component/pool_map.rs +++ b/tx-pool/src/component/pool_map.rs @@ -241,13 +241,9 @@ impl PoolMap { } pub(crate) fn find_conflict_tx(&self, tx: &TransactionView) -> HashSet { - let mut res = HashSet::default(); - for i in tx.input_pts_iter() { - if let Some(id) = self.edges.get_input_ref(&i) { - res.insert(id.clone()); - } - } - res + tx.input_pts_iter() + .filter_map(|out_point| self.edges.get_input_ref(&out_point).cloned()) + .collect() } pub(crate) fn resolve_conflict(&mut self, tx: &TransactionView) -> Vec { diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 757b1c541b..2125d81460 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -97,10 +97,8 @@ impl TxPool { if !self.enable_rbf() { return None; } - let conflicts = self.pool_map.find_conflict_tx(tx.transaction()); - // we don't allow conflicted Tx in pool now - assert!(conflicts.len() == 1); - self.calculate_min_replace_fee(&conflicts, tx.size) + let ids = vec![tx.proposal_short_id()].iter().cloned().collect(); + self.calculate_min_replace_fee(&ids, tx.size) } fn calculate_min_replace_fee( From 0c41fd657de61dd43e504141fdcbf5c76f34325e Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 14 Aug 2023 08:22:53 +0800 Subject: [PATCH 146/267] Improve code comment --- script/src/verify.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/src/verify.rs b/script/src/verify.rs index af1182af12..653689e6dd 100644 --- a/script/src/verify.rs +++ b/script/src/verify.rs @@ -1076,7 +1076,7 @@ impl Date: Mon, 14 Aug 2023 11:10:56 +0800 Subject: [PATCH 147/267] Fix `WithdrawDAO` usage of `epoch_duration_target` --- test/src/specs/dao/dao_tx.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/src/specs/dao/dao_tx.rs b/test/src/specs/dao/dao_tx.rs index ac4c8d1f4b..1026866542 100644 --- a/test/src/specs/dao/dao_tx.rs +++ b/test/src/specs/dao/dao_tx.rs @@ -12,7 +12,7 @@ pub struct WithdrawDAO; impl Spec for WithdrawDAO { fn modify_chain_spec(&self, spec: &mut ckb_chain_spec::ChainSpec) { spec.params.genesis_epoch_length = Some(2); - spec.params.epoch_duration_target = Some(2); + spec.params.epoch_duration_target = Some(16); spec.params.permanent_difficulty_in_dummy = Some(true); } From 459e9fff1546b42db2cc7e628151431aeebf7dfd Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Mon, 14 Aug 2023 11:11:32 +0800 Subject: [PATCH 148/267] Fix `epoch_duration_target` usages in integration tests --- test/src/specs/dao/dao_tx.rs | 2 +- test/src/specs/dao/satoshi_dao_occupied.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/test/src/specs/dao/dao_tx.rs b/test/src/specs/dao/dao_tx.rs index 1026866542..64eec88332 100644 --- a/test/src/specs/dao/dao_tx.rs +++ b/test/src/specs/dao/dao_tx.rs @@ -40,7 +40,7 @@ pub struct WithdrawDAOWithOverflowCapacity; impl Spec for WithdrawDAOWithOverflowCapacity { fn modify_chain_spec(&self, spec: &mut ckb_chain_spec::ChainSpec) { spec.params.genesis_epoch_length = Some(2); - spec.params.epoch_duration_target = Some(2); + spec.params.epoch_duration_target = Some(16); spec.params.permanent_difficulty_in_dummy = Some(true); } diff --git a/test/src/specs/dao/satoshi_dao_occupied.rs b/test/src/specs/dao/satoshi_dao_occupied.rs index d56d83b2c4..5756d25ceb 100644 --- a/test/src/specs/dao/satoshi_dao_occupied.rs +++ b/test/src/specs/dao/satoshi_dao_occupied.rs @@ -46,7 +46,7 @@ impl Spec for DAOWithSatoshiCellOccupied { let satoshi_cell = issue_satoshi_cell(); spec.genesis.issued_cells.push(satoshi_cell); spec.params.genesis_epoch_length = Some(2); - spec.params.epoch_duration_target = Some(2); + spec.params.epoch_duration_target = Some(16); spec.params.permanent_difficulty_in_dummy = Some(true); } } @@ -142,7 +142,7 @@ impl Spec for SpendSatoshiCell { spec.genesis.issued_cells.push(issue_satoshi_cell()); spec.genesis.satoshi_gift.satoshi_cell_occupied_ratio = satoshi_cell_occupied_ratio; spec.params.genesis_epoch_length = Some(2); - spec.params.epoch_duration_target = Some(2); + spec.params.epoch_duration_target = Some(16); spec.params.permanent_difficulty_in_dummy = Some(true); } } From 9e8dc8ba56b045f13e593166f3b49058c985a7f1 Mon Sep 17 00:00:00 2001 From: yukang Date: Mon, 14 Aug 2023 11:22:32 +0800 Subject: [PATCH 149/267] fix comments --- tx-pool/src/pool.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 2125d81460..82bf11948f 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -614,7 +614,7 @@ impl TxPool { } } - // Rule #6, any old Tx should be in `Pending` or `Gap` status + // Rule #6, all conflict Txs should be in `Pending` or `Gap` status if all_statuses .iter() .any(|s| ![Status::Pending, Status::Gap].contains(s)) From 4512dc8b448478cfb1b57553140086d2dda4a63d Mon Sep 17 00:00:00 2001 From: yukang Date: Mon, 14 Aug 2023 11:37:59 +0800 Subject: [PATCH 150/267] remove unnecessary clone in rbf --- tx-pool/src/pool.rs | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 82bf11948f..59f3147cf9 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -555,16 +555,12 @@ impl TxPool { let mut all_statuses = pool_entries.iter().map(|e| e.status).collect::>(); - let conflicts = pool_entries - .iter() - .map(|e| e.inner.clone()) - .collect::>(); - // Rule #2, new tx don't contain any new unconfirmed inputs let mut inputs = HashSet::new(); - for c in conflicts.iter() { - inputs.extend(c.transaction().input_pts_iter()); + for c in pool_entries.iter() { + inputs.extend(c.inner.transaction().input_pts_iter()); } + if rtx .transaction .input_pts_iter() @@ -579,8 +575,8 @@ impl TxPool { // and the ancestor of the new tx don't have common set with the replaced tx's descendants let mut replace_count: usize = 0; let ancestors = self.pool_map.calc_ancestors(&short_id); - for conflict in conflicts.iter() { - let id = conflict.proposal_short_id(); + for conflict in pool_entries.iter() { + let id = conflict.inner.proposal_short_id(); let descendants = self.pool_map.calc_descendants(&id); replace_count += descendants.len() + 1; if replace_count > MAX_REPLACEMENT_CANDIDATES { From 95e3596c3943ec84116e6d2be9146e927eb89c7b Mon Sep 17 00:00:00 2001 From: yukang Date: Mon, 14 Aug 2023 16:18:33 +0800 Subject: [PATCH 151/267] refactor rbf check --- tx-pool/src/pool.rs | 107 ++++++++++++++++++++------------------------ 1 file changed, 48 insertions(+), 59 deletions(-) diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 59f3147cf9..caa05c12d5 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -97,25 +97,14 @@ impl TxPool { if !self.enable_rbf() { return None; } - let ids = vec![tx.proposal_short_id()].iter().cloned().collect(); - self.calculate_min_replace_fee(&ids, tx.size) + let entry = vec![self.get_pool_entry(&tx.proposal_short_id()).unwrap()]; + self.calculate_min_replace_fee(&entry, tx.size) } - fn calculate_min_replace_fee( - &self, - conflicts: &HashSet, - size: usize, - ) -> Option { - let entries = conflicts - .iter() - .map(|id| { - self.get_pool_entry(id) - .expect("conflict Tx should be in pool") - }) - .collect::>(); - + /// min_replace_fee = sum(replaced_txs.fee) + extra_rbf_fee + fn calculate_min_replace_fee(&self, conflicts: &[&PoolEntry], size: usize) -> Option { let extra_rbf_fee = self.config.min_rbf_rate.fee(size as u64); - let replaced_sum_fee = entries + let replaced_sum_fee = conflicts .iter() .map(|c| c.inner.fee) .try_fold(Capacity::zero(), |acc, x| acc.safe_add(x)); @@ -125,10 +114,12 @@ impl TxPool { if let Ok(res) = res { Some(res) } else { - let fees = entries.iter().map(|c| c.inner.fee).collect::>(); + let fees = conflicts.iter().map(|c| c.inner.fee).collect::>(); error!( "conflicts: {:?} replaced_sum_fee {:?} overflow by add {}", - conflicts, fees, extra_rbf_fee + conflicts.iter().map(|e| e.id.clone()).collect::>(), + fees, + extra_rbf_fee ); None } @@ -522,17 +513,23 @@ impl TxPool { &self, snapshot: &Snapshot, rtx: &ResolvedTransaction, - conflicts: &HashSet, + conflict_ids: &HashSet, fee: Capacity, tx_size: usize, ) -> Result<(), Reject> { assert!(self.enable_rbf()); - assert!(!conflicts.is_empty()); + assert!(!conflict_ids.is_empty()); + + let conflicts = conflict_ids + .iter() + .filter_map(|id| self.get_pool_entry(id)) + .collect::>(); + assert!(conflicts.len() == conflict_ids.len()); let short_id = rtx.transaction.proposal_short_id(); // Rule #4, new tx's fee need to higher than min_rbf_fee computed from the tx_pool configuration // Rule #3, new tx's fee need to higher than conflicts, here we only check the root tx - if let Some(min_replace_fee) = self.calculate_min_replace_fee(conflicts, tx_size) { + if let Some(min_replace_fee) = self.calculate_min_replace_fee(&conflicts, tx_size) { if fee < min_replace_fee { return Err(Reject::RBFRejected(format!( "Tx's current fee is {}, expect it to >= {} to replace old txs", @@ -545,19 +542,9 @@ impl TxPool { )); } - let pool_entries = conflicts - .iter() - .map(|id| { - self.get_pool_entry(id) - .expect("conflict Tx should be in pool") - }) - .collect::>(); - - let mut all_statuses = pool_entries.iter().map(|e| e.status).collect::>(); - // Rule #2, new tx don't contain any new unconfirmed inputs let mut inputs = HashSet::new(); - for c in pool_entries.iter() { + for c in conflicts.iter() { inputs.extend(c.inner.transaction().input_pts_iter()); } @@ -575,9 +562,8 @@ impl TxPool { // and the ancestor of the new tx don't have common set with the replaced tx's descendants let mut replace_count: usize = 0; let ancestors = self.pool_map.calc_ancestors(&short_id); - for conflict in pool_entries.iter() { - let id = conflict.inner.proposal_short_id(); - let descendants = self.pool_map.calc_descendants(&id); + for conflict in conflicts.iter() { + let descendants = self.pool_map.calc_descendants(&conflict.id); replace_count += descendants.len() + 1; if replace_count > MAX_REPLACEMENT_CANDIDATES { return Err(Reject::RBFRejected(format!( @@ -592,33 +578,36 @@ impl TxPool { )); } - for id in descendants.iter() { - if let Some(entry) = self.get_pool_entry(id) { - all_statuses.push(entry.status); - let hash = entry.inner.transaction().hash(); - if rtx - .transaction - .input_pts_iter() - .any(|pt| pt.tx_hash() == hash) - { - return Err(Reject::RBFRejected( - "new Tx contains inputs in descendants of to be replaced Tx" - .to_string(), - )); - } + let entries = descendants + .iter() + .filter_map(|id| self.get_pool_entry(id)) + .collect::>(); + + for entry in entries.iter() { + let hash = entry.inner.transaction().hash(); + if rtx + .transaction + .input_pts_iter() + .any(|pt| pt.tx_hash() == hash) + { + return Err(Reject::RBFRejected( + "new Tx contains inputs in descendants of to be replaced Tx".to_string(), + )); } } - } - // Rule #6, all conflict Txs should be in `Pending` or `Gap` status - if all_statuses - .iter() - .any(|s| ![Status::Pending, Status::Gap].contains(s)) - { - // Here we only refer to `Pending` status, since `Gap` is an internal status - return Err(Reject::RBFRejected( - "all conflict Txs should be in Pending status".to_string(), - )); + let mut entries_status = entries.iter().map(|e| e.status).collect::>(); + entries_status.push(conflict.status); + // Rule #6, all conflict Txs should be in `Pending` or `Gap` status + if entries_status + .iter() + .any(|s| ![Status::Pending, Status::Gap].contains(s)) + { + // Here we only refer to `Pending` status, since `Gap` is an internal status + return Err(Reject::RBFRejected( + "all conflict Txs should be in Pending status".to_string(), + )); + } } Ok(()) From 0374910128710f5e42dc6a4d39ddae0cfbfe695e Mon Sep 17 00:00:00 2001 From: yukang Date: Mon, 14 Aug 2023 21:57:29 +0800 Subject: [PATCH 152/267] add test case for RBFEnable and get_transaction_with_verbosity --- test/src/main.rs | 1 + test/src/specs/tx_pool/replace.rs | 38 ++++++++++++++++++++++++++++++- 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/test/src/main.rs b/test/src/main.rs index 0658c18d01..81da1d8a27 100644 --- a/test/src/main.rs +++ b/test/src/main.rs @@ -459,6 +459,7 @@ fn all_specs() -> Vec> { Box::new(TxsRelayOrder), Box::new(SendTxChain), Box::new(DifferentTxsWithSameInputWithOutRBF), + Box::new(RbfEnable), Box::new(RbfBasic), Box::new(RbfSameInput), Box::new(RbfOnlyForResolveDead), diff --git a/test/src/specs/tx_pool/replace.rs b/test/src/specs/tx_pool/replace.rs index 0bb4182d0f..90b972a640 100644 --- a/test/src/specs/tx_pool/replace.rs +++ b/test/src/specs/tx_pool/replace.rs @@ -7,6 +7,36 @@ use ckb_types::{ prelude::*, }; +pub struct RbfEnable; +impl Spec for RbfEnable { + fn run(&self, nodes: &mut Vec) { + let node0 = &nodes[0]; + + node0.mine_until_out_bootstrap_period(); + node0.new_block_with_blocking(|template| template.number.value() != 13); + let tx_hash_0 = node0.generate_transaction(); + let tx1 = node0.new_transaction(tx_hash_0); + + let output = CellOutputBuilder::default() + .capacity(capacity_bytes!(70).pack()) + .build(); + + let tx1 = tx1.as_advanced_builder().set_outputs(vec![output]).build(); + + node0.rpc_client().send_transaction(tx1.data().into()); + let ret = node0 + .rpc_client() + .get_transaction_with_verbosity(tx1.hash(), 2); + + assert_eq!(ret.min_replace_fee, None); + } + + fn modify_app_config(&self, config: &mut ckb_app_config::CKBAppConfig) { + config.tx_pool.min_rbf_rate = ckb_types::core::FeeRate(100); + config.tx_pool.min_fee_rate = ckb_types::core::FeeRate(100); + } +} + pub struct RbfBasic; impl Spec for RbfBasic { fn run(&self, nodes: &mut Vec) { @@ -30,6 +60,12 @@ impl Spec for RbfBasic { .build(); node0.rpc_client().send_transaction(tx1.data().into()); + let ret = node0 + .rpc_client() + .get_transaction_with_verbosity(tx1.hash(), 2); + // min_replace_fee is 363 + assert_eq!(ret.min_replace_fee.unwrap().to_string(), "0x16b"); + let res = node0 .rpc_client() .send_transaction_result(tx2.data().into()); @@ -50,7 +86,7 @@ impl Spec for RbfBasic { assert!(!commit_txs_hash.contains(&tx1.hash())); assert!(commit_txs_hash.contains(&tx2.hash())); - // when tx1 was confirmed, tx2 should be rejected + // when tx2 should be committed let ret = node0.rpc_client().get_transaction(tx2.hash()); assert!( matches!(ret.tx_status.status, Status::Committed), From f0ab0a168342d3961c3cb65cd51257613f040f2f Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Tue, 15 Aug 2023 14:20:10 +0800 Subject: [PATCH 153/267] Fix integration test for `epoch_duration_target` --- test/src/specs/hardfork/v2021/since.rs | 2 ++ test/src/specs/hardfork/v2023/vm_version2.rs | 1 + 2 files changed, 3 insertions(+) diff --git a/test/src/specs/hardfork/v2021/since.rs b/test/src/specs/hardfork/v2021/since.rs index 2953c11fa8..527c599ba4 100644 --- a/test/src/specs/hardfork/v2021/since.rs +++ b/test/src/specs/hardfork/v2021/since.rs @@ -78,6 +78,7 @@ impl Spec for CheckAbsoluteEpochSince { fn modify_chain_spec(&self, spec: &mut ckb_chain_spec::ChainSpec) { spec.params.permanent_difficulty_in_dummy = Some(true); spec.params.genesis_epoch_length = Some(GENESIS_EPOCH_LENGTH); + spec.params.epoch_duration_target = Some(8 * GENESIS_EPOCH_LENGTH); } } @@ -113,6 +114,7 @@ impl Spec for CheckRelativeEpochSince { fn modify_chain_spec(&self, spec: &mut ckb_chain_spec::ChainSpec) { spec.params.permanent_difficulty_in_dummy = Some(true); spec.params.genesis_epoch_length = Some(GENESIS_EPOCH_LENGTH); + spec.params.epoch_duration_target = Some(8 * GENESIS_EPOCH_LENGTH); } } diff --git a/test/src/specs/hardfork/v2023/vm_version2.rs b/test/src/specs/hardfork/v2023/vm_version2.rs index adbc3258c5..43d5406b39 100644 --- a/test/src/specs/hardfork/v2023/vm_version2.rs +++ b/test/src/specs/hardfork/v2023/vm_version2.rs @@ -97,6 +97,7 @@ impl Spec for CheckVmVersion2 { fn modify_chain_spec(&self, spec: &mut ckb_chain_spec::ChainSpec) { spec.params.permanent_difficulty_in_dummy = Some(true); spec.params.genesis_epoch_length = Some(GENESIS_EPOCH_LENGTH); + spec.params.epoch_duration_target = Some(8 * GENESIS_EPOCH_LENGTH); if spec.params.hardfork.is_none() { spec.params.hardfork = Some(Default::default()); } From 84e657dc80093199ab1feac1b0bbf6a94534d2ac Mon Sep 17 00:00:00 2001 From: EthanYuan Date: Tue, 15 Aug 2023 15:32:51 +0800 Subject: [PATCH 154/267] fix typos in rpc readme. --- rpc/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rpc/README.md b/rpc/README.md index 35de2c0e32..b361043232 100644 --- a/rpc/README.md +++ b/rpc/README.md @@ -6785,7 +6785,7 @@ Refer to RFC [CKB Transaction Structure](https://github.com/nervosnetwork/rfcs/b * `cell_deps`: `Array<` [`CellDep`](#type-celldep) `>` - An array of cell deps. - CKB locates lock script and type script code via cell deps. The script also can uses syscalls to read the cells here. + CKB locates lock script and type script code via cell deps. The script also can use syscalls to read the cells here. Unlike inputs, the live cells can be used as cell deps in multiple transactions. @@ -6947,7 +6947,7 @@ The JSON view of a transaction as well as its status. * `cycles`: [`Cycle`](#type-cycle) `|` `null` - The transaction consumed cycles. -* `time_added_to_pool`: [`Uint64`](#type-uint64) `|` `null` - If the transaction is in tx-pool, `time_added_to_pool` represent when it enter the tx-pool. unit: Millisecond +* `time_added_to_pool`: [`Uint64`](#type-uint64) `|` `null` - If the transaction is in tx-pool, `time_added_to_pool` represent when it enters the tx-pool. unit: Millisecond * `tx_status`: [`TxStatus`](#type-txstatus) - The Transaction status. From da0acc99fa9f55ab5cba39030d59f7ce7d52028e Mon Sep 17 00:00:00 2001 From: EthanYuan Date: Tue, 15 Aug 2023 15:34:38 +0800 Subject: [PATCH 155/267] fixing typos in comments --- util/jsonrpc-types/src/blockchain.rs | 8 ++++---- util/types/src/core/blockchain.rs | 2 +- util/types/src/core/tx_pool.rs | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/util/jsonrpc-types/src/blockchain.rs b/util/jsonrpc-types/src/blockchain.rs index 2df38a7959..bf40d24f8e 100644 --- a/util/jsonrpc-types/src/blockchain.rs +++ b/util/jsonrpc-types/src/blockchain.rs @@ -13,7 +13,7 @@ use std::fmt; /// Specifies how the script `code_hash` is used to match the script code and how to run the code. /// -/// Allowed kinds: "data", "type", "data1" and “data2” +/// Allowed kinds: "data", "type", "data1" and "data2" /// /// Refer to the section [Code Locating](https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0022-transaction-structure/0022-transaction-structure.md#code-locating) /// and [Upgradable Script](https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0022-transaction-structure/0022-transaction-structure.md#upgradable-script) @@ -384,7 +384,7 @@ pub struct Transaction { pub version: Version, /// An array of cell deps. /// - /// CKB locates lock script and type script code via cell deps. The script also can uses syscalls + /// CKB locates lock script and type script code via cell deps. The script also can use syscalls /// to read the cells here. /// /// Unlike inputs, the live cells can be used as cell deps in multiple transactions. @@ -536,7 +536,7 @@ pub struct TransactionWithStatusResponse { pub transaction: Option>, /// The transaction consumed cycles. pub cycles: Option, - /// If the transaction is in tx-pool, `time_added_to_pool` represent when it enter the tx-pool. unit: Millisecond + /// If the transaction is in tx-pool, `time_added_to_pool` represent when it enters the tx-pool. unit: Millisecond pub time_added_to_pool: Option, /// The Transaction status. pub tx_status: TxStatus, @@ -1461,7 +1461,7 @@ pub struct Buried { pub status: SoftForkStatus, /// Whether the rules are active pub active: bool, - /// The first epoch which the rules will be enforced + /// The first epoch which the rules will be enforced pub epoch: EpochNumber, } diff --git a/util/types/src/core/blockchain.rs b/util/types/src/core/blockchain.rs index 7121bf09de..9dc2e7cb73 100644 --- a/util/types/src/core/blockchain.rs +++ b/util/types/src/core/blockchain.rs @@ -6,7 +6,7 @@ use crate::packed; /// The hash type is split into the high 7 bits and the low 1 bit, /// when the low 1 bit is 1, it indicates the type, /// when the low 1 bit is 0, it indicates the data, -/// and then it relies on the high 7 bits to indicate +/// and then it relies on the high 7 bits to indicate /// that the data actually corresponds to the version. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] pub enum ScriptHashType { diff --git a/util/types/src/core/tx_pool.rs b/util/types/src/core/tx_pool.rs index d5b41e1d4d..a1f182905d 100644 --- a/util/types/src/core/tx_pool.rs +++ b/util/types/src/core/tx_pool.rs @@ -161,7 +161,7 @@ pub struct TransactionWithStatus { pub tx_status: TxStatus, /// The transaction verification consumed cycles pub cycles: Option, - /// If the transaction is in tx-pool, `time_added_to_pool` represent when it enter the tx-pool. unit: Millisecond + /// If the transaction is in tx-pool, `time_added_to_pool` represent when it enters the tx-pool. unit: Millisecond pub time_added_to_pool: Option, } From 3e4b947598f2de1e6716d7ed0d36a1dd2a58167b Mon Sep 17 00:00:00 2001 From: yukang Date: Tue, 15 Aug 2023 18:43:47 +0800 Subject: [PATCH 156/267] entries in Gap will not be fill proposals --- test/src/specs/tx_pool/descendant.rs | 23 +++++++++++++++++------ tx-pool/src/pool.rs | 2 -- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/test/src/specs/tx_pool/descendant.rs b/test/src/specs/tx_pool/descendant.rs index 04b5a5bc4b..ed23bb7db3 100644 --- a/test/src/specs/tx_pool/descendant.rs +++ b/test/src/specs/tx_pool/descendant.rs @@ -1,3 +1,5 @@ +use ckb_jsonrpc_types::Status; + use crate::specs::tx_pool::utils::prepare_tx_family; use crate::utils::{blank, commit, propose}; use crate::{Node, Spec}; @@ -166,21 +168,30 @@ impl Spec for SubmitTransactionWhenItsParentInProposed { // 1. Propose `tx_family.a` into proposed-pool. let family = prepare_tx_family(node); - node.submit_transaction(family.a()); - node.submit_block(&propose(node, &[family.a()])); + let tx_a = family.a(); + node.submit_transaction(tx_a); + node.submit_block(&propose(node, &[tx_a])); (0..=window.closest()).for_each(|_| { node.submit_block(&blank(node)); }); + // tx_a should in Proposed status + let tx_a_status = node.get_transaction(tx_a.hash()); + assert_eq!(tx_a_status.status, Status::Proposed); + // 2. Submit `tx_family.b` into pending-pool. Then we expect that miner propose it. node.submit_transaction(family.b()); let block = node.new_block_with_blocking(|template| template.proposals.is_empty()); + let union_proposal_ids = block.union_proposal_ids(); assert!( - block - .union_proposal_ids() - .contains(&family.b().proposal_short_id()), + union_proposal_ids.contains(&family.b().proposal_short_id()), "Miner should propose tx_family.b since it has never been proposed, actual: {:?}", - block.union_proposal_ids(), + union_proposal_ids, + ); + assert!( + !union_proposal_ids.contains(&tx_a.proposal_short_id()), + "Miner should not propose tx_family.a since it has been proposed, actual: {:?}", + union_proposal_ids, ); node.submit_block(&block); } diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index caa05c12d5..b7952f4838 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -390,8 +390,6 @@ impl TxPool { let mut proposals = HashSet::with_capacity(limit); self.pool_map .fill_proposals(limit, exclusion, &mut proposals, Status::Pending); - self.pool_map - .fill_proposals(limit, exclusion, &mut proposals, Status::Gap); proposals } From adae0b0ee4e93d6e708f86bd37d26bfebfea3e2d Mon Sep 17 00:00:00 2001 From: EthanYuan Date: Wed, 12 Jul 2023 22:52:33 +0800 Subject: [PATCH 157/267] create crate ckb-gen-types --- Cargo.lock | 17 +- Cargo.toml | 1 + util/gen-types/Cargo.toml | 19 + util/gen-types/schemas/blockchain.mol | 118 + util/gen-types/src/conversion/blockchain.rs | 62 + util/gen-types/src/conversion/mod.rs | 4 + util/gen-types/src/conversion/primitive.rs | 114 + util/gen-types/src/conversion/utilities.rs | 82 + util/gen-types/src/core/mod.rs | 8 + util/gen-types/src/core/types.rs | 29 + util/gen-types/src/core/view.rs | 53 + util/gen-types/src/generated/blockchain.rs | 8938 +++++++++++++++++++ util/gen-types/src/generated/mod.rs | 8 + util/gen-types/src/hash/calc_hash.rs | 290 + util/gen-types/src/hash/hash.rs | 86 + util/gen-types/src/hash/mod.rs | 8 + util/gen-types/src/hash/utils.rs | 206 + util/gen-types/src/lib.rs | 22 + util/gen-types/src/prelude.rs | 13 + 19 files changed, 10077 insertions(+), 1 deletion(-) create mode 100644 util/gen-types/Cargo.toml create mode 100644 util/gen-types/schemas/blockchain.mol create mode 100644 util/gen-types/src/conversion/blockchain.rs create mode 100644 util/gen-types/src/conversion/mod.rs create mode 100644 util/gen-types/src/conversion/primitive.rs create mode 100644 util/gen-types/src/conversion/utilities.rs create mode 100644 util/gen-types/src/core/mod.rs create mode 100644 util/gen-types/src/core/types.rs create mode 100644 util/gen-types/src/core/view.rs create mode 100644 util/gen-types/src/generated/blockchain.rs create mode 100644 util/gen-types/src/generated/mod.rs create mode 100644 util/gen-types/src/hash/calc_hash.rs create mode 100644 util/gen-types/src/hash/hash.rs create mode 100644 util/gen-types/src/hash/mod.rs create mode 100644 util/gen-types/src/hash/utils.rs create mode 100644 util/gen-types/src/lib.rs create mode 100644 util/gen-types/src/prelude.rs diff --git a/Cargo.lock b/Cargo.lock index 6122a592fe..b912cbf667 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -194,6 +194,12 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95916998c798756098a4eb1b3f2cd510659705a9817bf203d61abd30fbec3e7b" +[[package]] +name = "blake2b-ref" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "294d17c72e0ba59fad763caa112368d0672083779cdebbb97164f4bb4c1e339a" + [[package]] name = "blake2b-rs" version = "0.2.0" @@ -719,11 +725,20 @@ dependencies = [ "tempfile", ] +[[package]] +name = "ckb-gen-types" +version = "0.112.0-pre" +dependencies = [ + "blake2b-ref 0.3.1", + "cfg-if 1.0.0", + "molecule", +] + [[package]] name = "ckb-hash" version = "0.112.0-pre" dependencies = [ - "blake2b-ref", + "blake2b-ref 0.2.1", "blake2b-rs", ] diff --git a/Cargo.toml b/Cargo.toml index 4d0c0cd5fe..6b3d0278a7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -50,6 +50,7 @@ members = [ "util/constant", "error", "util/multisig", + "util/gen-types", "util/types", "util/jsonrpc-types", "freezer", diff --git a/util/gen-types/Cargo.toml b/util/gen-types/Cargo.toml new file mode 100644 index 0000000000..1202ce315f --- /dev/null +++ b/util/gen-types/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "ckb-gen-types" +version = "0.112.0-pre" +authors = ["Nervos Core Dev "] +edition = "2021" +license = "MIT" +description = "Provides the generated types for CKB." +homepage = "https://github.com/nervosnetwork/ckb" +repository = "https://github.com/nervosnetwork/ckb" + +[features] +default = ["std"] +calc-hash = ["blake2b-ref"] +std = ["molecule/std"] + +[dependencies] +molecule = { version = "0.7.5", default-features = false } +cfg-if = "1.0" +blake2b-ref = { version = "0.3.1", default-features = false, optional = true } \ No newline at end of file diff --git a/util/gen-types/schemas/blockchain.mol b/util/gen-types/schemas/blockchain.mol new file mode 100644 index 0000000000..3ab343d856 --- /dev/null +++ b/util/gen-types/schemas/blockchain.mol @@ -0,0 +1,118 @@ +/* Basic Types */ + +// The `UintN` is used to store a `N` bits unsigned integer +// as a byte array in little endian. +array Uint32 [byte; 4]; +array Uint64 [byte; 8]; +array Uint128 [byte; 16]; +array Byte32 [byte; 32]; +array Uint256 [byte; 32]; + +vector Bytes ; +option BytesOpt (Bytes); + +vector BytesVec ; +vector Byte32Vec ; + +/* Types for Chain */ + +option ScriptOpt (Script); + +array ProposalShortId [byte; 10]; + +vector UncleBlockVec ; +vector TransactionVec ; +vector ProposalShortIdVec ; +vector CellDepVec ; +vector CellInputVec ; +vector CellOutputVec ; + +table Script { + code_hash: Byte32, + hash_type: byte, + args: Bytes, +} + +struct OutPoint { + tx_hash: Byte32, + index: Uint32, +} + +struct CellInput { + since: Uint64, + previous_output: OutPoint, +} + +table CellOutput { + capacity: Uint64, + lock: Script, + type_: ScriptOpt, +} + +struct CellDep { + out_point: OutPoint, + dep_type: byte, +} + +table RawTransaction { + version: Uint32, + cell_deps: CellDepVec, + header_deps: Byte32Vec, + inputs: CellInputVec, + outputs: CellOutputVec, + outputs_data: BytesVec, +} + +table Transaction { + raw: RawTransaction, + witnesses: BytesVec, +} + +struct RawHeader { + version: Uint32, + compact_target: Uint32, + timestamp: Uint64, + number: Uint64, + epoch: Uint64, + parent_hash: Byte32, + transactions_root: Byte32, + proposals_hash: Byte32, + extra_hash: Byte32, + dao: Byte32, +} + +struct Header { + raw: RawHeader, + nonce: Uint128, +} + +table UncleBlock { + header: Header, + proposals: ProposalShortIdVec, +} + +table Block { + header: Header, + uncles: UncleBlockVec, + transactions: TransactionVec, + proposals: ProposalShortIdVec, +} + +table BlockV1 { + header: Header, + uncles: UncleBlockVec, + transactions: TransactionVec, + proposals: ProposalShortIdVec, + extension: Bytes, +} + +table CellbaseWitness { + lock: Script, + message: Bytes, +} + +table WitnessArgs { + lock: BytesOpt, // Lock args + input_type: BytesOpt, // Type args for input + output_type: BytesOpt, // Type args for output +} diff --git a/util/gen-types/src/conversion/blockchain.rs b/util/gen-types/src/conversion/blockchain.rs new file mode 100644 index 0000000000..6a18a51984 --- /dev/null +++ b/util/gen-types/src/conversion/blockchain.rs @@ -0,0 +1,62 @@ +use crate::{bytes::Bytes, generated::packed, prelude::*, vec::Vec}; +impl Pack for [u8; 32] { + fn pack(&self) -> packed::Byte32 { + packed::Byte32::from_slice(&self[..]).expect("impossible: fail to pack [u8; 32]") + } +} + +impl<'r> Unpack<[u8; 32]> for packed::Byte32Reader<'r> { + fn unpack(&self) -> [u8; 32] { + let ptr = self.as_slice().as_ptr() as *const [u8; 32]; + unsafe { *ptr } + } +} +impl_conversion_for_entity_unpack!([u8; 32], Byte32); + +impl Pack for [u8; 10] { + fn pack(&self) -> packed::ProposalShortId { + packed::ProposalShortId::from_slice(&self[..]) + .expect("impossible: fail to pack to ProposalShortId") + } +} + +impl<'r> Unpack<[u8; 10]> for packed::ProposalShortIdReader<'r> { + fn unpack(&self) -> [u8; 10] { + let ptr = self.as_slice().as_ptr() as *const [u8; 10]; + unsafe { *ptr } + } +} +impl_conversion_for_entity_unpack!([u8; 10], ProposalShortId); + +impl Pack for Bytes { + fn pack(&self) -> packed::Bytes { + let len = (self.len() as u32).to_le_bytes(); + let mut v = Vec::with_capacity(4 + self.len()); + v.extend_from_slice(&len[..]); + v.extend_from_slice(&self[..]); + packed::Bytes::new_unchecked(v.into()) + } +} + +impl<'r> Unpack for packed::BytesReader<'r> { + fn unpack(&self) -> Bytes { + Bytes::from(self.raw_data().to_vec()) + } +} + +impl Unpack for packed::Bytes { + fn unpack(&self) -> Bytes { + self.raw_data() + } +} + +impl_conversion_for_vector!(Bytes, BytesVec, BytesVecReader); +impl_conversion_for_packed_optional_pack!(Script, ScriptOpt); +impl_conversion_for_packed_iterator_pack!(ProposalShortId, ProposalShortIdVec); +impl_conversion_for_packed_iterator_pack!(Bytes, BytesVec); +impl_conversion_for_packed_iterator_pack!(Transaction, TransactionVec); +impl_conversion_for_packed_iterator_pack!(CellDep, CellDepVec); +impl_conversion_for_packed_iterator_pack!(CellOutput, CellOutputVec); +impl_conversion_for_packed_iterator_pack!(CellInput, CellInputVec); +impl_conversion_for_packed_iterator_pack!(UncleBlock, UncleBlockVec); +impl_conversion_for_packed_iterator_pack!(Byte32, Byte32Vec); diff --git a/util/gen-types/src/conversion/mod.rs b/util/gen-types/src/conversion/mod.rs new file mode 100644 index 0000000000..e284a9e5f6 --- /dev/null +++ b/util/gen-types/src/conversion/mod.rs @@ -0,0 +1,4 @@ +#[macro_use] +mod utilities; +mod blockchain; +mod primitive; diff --git a/util/gen-types/src/conversion/primitive.rs b/util/gen-types/src/conversion/primitive.rs new file mode 100644 index 0000000000..d638116507 --- /dev/null +++ b/util/gen-types/src/conversion/primitive.rs @@ -0,0 +1,114 @@ +use crate::{bytes::Bytes, prelude::*}; + +use crate::generated::packed; +#[cfg(not(feature = "std"))] +use alloc::{borrow::ToOwned, str, string::String, vec::Vec}; +#[cfg(feature = "std")] +use std::str; + +impl Pack for u32 { + fn pack(&self) -> packed::Uint32 { + packed::Uint32::new_unchecked(Bytes::from(self.to_le_bytes().to_vec())) + } +} + +impl Pack for u64 { + fn pack(&self) -> packed::Uint64 { + packed::Uint64::new_unchecked(Bytes::from(self.to_le_bytes().to_vec())) + } +} + +impl Pack for u128 { + fn pack(&self) -> packed::Uint128 { + packed::Uint128::new_unchecked(Bytes::from(self.to_le_bytes().to_vec())) + } +} + +impl Pack for usize { + fn pack(&self) -> packed::Uint32 { + (*self as u32).pack() + } +} + +impl<'r> Unpack for packed::Uint32Reader<'r> { + #[allow(clippy::cast_ptr_alignment)] + fn unpack(&self) -> u32 { + let le = self.as_slice().as_ptr() as *const u32; + u32::from_le(unsafe { *le }) + } +} +impl_conversion_for_entity_unpack!(u32, Uint32); + +impl<'r> Unpack for packed::Uint64Reader<'r> { + #[allow(clippy::cast_ptr_alignment)] + fn unpack(&self) -> u64 { + let le = self.as_slice().as_ptr() as *const u64; + u64::from_le(unsafe { *le }) + } +} +impl_conversion_for_entity_unpack!(u64, Uint64); + +impl<'r> Unpack for packed::Uint128Reader<'r> { + #[allow(clippy::cast_ptr_alignment)] + fn unpack(&self) -> u128 { + let le = self.as_slice().as_ptr() as *const u128; + u128::from_le(unsafe { *le }) + } +} +impl_conversion_for_entity_unpack!(u128, Uint128); + +impl<'r> Unpack for packed::Uint32Reader<'r> { + fn unpack(&self) -> usize { + let x: u32 = self.unpack(); + x as usize + } +} +impl_conversion_for_entity_unpack!(usize, Uint32); + +impl Pack for [u8] { + fn pack(&self) -> packed::Bytes { + let len = self.len(); + let mut vec: Vec = Vec::with_capacity(4 + len); + vec.extend_from_slice(&(len as u32).to_le_bytes()[..]); + vec.extend_from_slice(self); + packed::Bytes::new_unchecked(Bytes::from(vec)) + } +} + +impl<'r> Unpack> for packed::BytesReader<'r> { + fn unpack(&self) -> Vec { + self.raw_data().to_owned() + } +} +impl_conversion_for_entity_unpack!(Vec, Bytes); + +impl Pack for str { + fn pack(&self) -> packed::Bytes { + self.as_bytes().pack() + } +} + +impl<'r> packed::BytesReader<'r> { + pub fn as_utf8(&self) -> Result<&str, str::Utf8Error> { + str::from_utf8(self.raw_data()) + } + + #[allow(clippy::missing_safety_doc)] + pub unsafe fn as_utf8_unchecked(&self) -> &str { + str::from_utf8_unchecked(self.raw_data()) + } + + pub fn is_utf8(&self) -> bool { + self.as_utf8().is_ok() + } +} + +impl Pack for String { + fn pack(&self) -> packed::Bytes { + self.as_str().pack() + } +} + +impl_conversion_for_option_pack!(&str, BytesOpt); +impl_conversion_for_option_pack!(String, BytesOpt); +impl_conversion_for_option_pack!(Bytes, BytesOpt); diff --git a/util/gen-types/src/conversion/utilities.rs b/util/gen-types/src/conversion/utilities.rs new file mode 100644 index 0000000000..ed8719652f --- /dev/null +++ b/util/gen-types/src/conversion/utilities.rs @@ -0,0 +1,82 @@ +// https://github.com/nervosnetwork/ckb/blob/develop/util/types/src/conversion/utilities.rs + +macro_rules! impl_conversion_for_entity_unpack { + ($original:ty, $entity:ident) => { + impl Unpack<$original> for packed::$entity { + fn unpack(&self) -> $original { + self.as_reader().unpack() + } + } + }; +} + +macro_rules! impl_conversion_for_option_pack { + ($original:ty, $entity:ident) => { + impl Pack for Option<$original> { + fn pack(&self) -> packed::$entity { + if let Some(ref inner) = self { + packed::$entity::new_unchecked(inner.pack().as_bytes()) + } else { + packed::$entity::default() + } + } + } + }; +} + +macro_rules! impl_conversion_for_vector_pack { + ($original:ty, $entity:ident) => { + impl Pack for [$original] { + fn pack(&self) -> packed::$entity { + packed::$entity::new_builder() + .set(self.iter().map(|v| v.pack()).collect()) + .build() + } + } + }; +} + +macro_rules! impl_conversion_for_vector_unpack { + ($original:ty, $entity:ident, $reader:ident) => { + impl<'r> Unpack> for packed::$reader<'r> { + fn unpack(&self) -> Vec<$original> { + self.iter().map(|x| x.unpack()).collect() + } + } + impl_conversion_for_entity_unpack!(Vec<$original>, $entity); + }; +} + +macro_rules! impl_conversion_for_vector { + ($original:ty, $entity:ident, $reader:ident) => { + impl_conversion_for_vector_pack!($original, $entity); + impl_conversion_for_vector_unpack!($original, $entity, $reader); + }; +} + +macro_rules! impl_conversion_for_packed_optional_pack { + ($original:ident, $entity:ident) => { + impl Pack for Option { + fn pack(&self) -> packed::$entity { + if let Some(ref inner) = self { + packed::$entity::new_unchecked(inner.as_bytes()) + } else { + packed::$entity::default() + } + } + } + }; +} + +macro_rules! impl_conversion_for_packed_iterator_pack { + ($item:ident, $vec:ident) => { + impl PackVec for T + where + T: IntoIterator, + { + fn pack(self) -> packed::$vec { + packed::$vec::new_builder().extend(self).build() + } + } + }; +} diff --git a/util/gen-types/src/core/mod.rs b/util/gen-types/src/core/mod.rs new file mode 100644 index 0000000000..85a22a17d7 --- /dev/null +++ b/util/gen-types/src/core/mod.rs @@ -0,0 +1,8 @@ +mod types; +#[cfg(feature = "calc-hash")] +mod view; + +pub use types::*; + +#[cfg(feature = "calc-hash")] +pub use view::*; diff --git a/util/gen-types/src/core/types.rs b/util/gen-types/src/core/types.rs new file mode 100644 index 0000000000..d74902f8e1 --- /dev/null +++ b/util/gen-types/src/core/types.rs @@ -0,0 +1,29 @@ +use crate::generated::packed; + +pub type BlockNumber = u64; + +/// Specifies how the script `code_hash` is used to match the script code and how to run the code. +#[derive(Clone, Copy, PartialEq, Eq)] +pub enum ScriptHashType { + /// Type "data" matches script code via cell data hash, and run the script code in v0 CKB VM. + Data = 0, + /// Type "type" matches script code via cell type script hash. + Type = 1, + /// Type "data1" matches script code via cell data hash, and run the script code in v1 CKB VM. + Data1 = 2, + /// Type "data2" matches script code via cell data hash, and run the script code in v2 CKB VM. + #[cfg(feature = "ckb2023")] + Data2 = 3, +} + +impl Into for ScriptHashType { + fn into(self) -> u8 { + self as u8 + } +} + +impl Into for ScriptHashType { + fn into(self) -> packed::Byte { + (self as u8).into() + } +} diff --git a/util/gen-types/src/core/view.rs b/util/gen-types/src/core/view.rs new file mode 100644 index 0000000000..d8d981c1d0 --- /dev/null +++ b/util/gen-types/src/core/view.rs @@ -0,0 +1,53 @@ +use crate::generated::packed; +use crate::hash::hash::new_blake2b; +use crate::prelude::*; +use molecule::prelude::*; + +/// A readonly and immutable struct which includes extra hash and the decoupled +/// parts of it. +#[derive(Debug, Clone)] +pub struct ExtraHashView { + /// The uncles hash which is used to combine to the extra hash. + pub(crate) uncles_hash: packed::Byte32, + /// The first item is the new field hash, which is used to combine to the extra hash. + /// The second item is the extra hash. + pub(crate) extension_hash_and_extra_hash: Option<(packed::Byte32, packed::Byte32)>, +} + +impl ExtraHashView { + /// Creates `ExtraHashView` with `uncles_hash` and optional `extension_hash`. + pub fn new(uncles_hash: packed::Byte32, extension_hash_opt: Option) -> Self { + let extension_hash_and_extra_hash = extension_hash_opt.map(|extension_hash| { + let mut ret = [0u8; 32]; + let mut blake2b = new_blake2b(); + blake2b.update(uncles_hash.as_slice()); + blake2b.update(extension_hash.as_slice()); + blake2b.finalize(&mut ret); + (extension_hash, ret.pack()) + }); + Self { + uncles_hash, + extension_hash_and_extra_hash, + } + } + + /// Gets `uncles_hash`. + pub fn uncles_hash(&self) -> packed::Byte32 { + self.uncles_hash.clone() + } + + /// Gets `extension_hash`. + pub fn extension_hash(&self) -> Option { + self.extension_hash_and_extra_hash + .as_ref() + .map(|(ref extension_hash, _)| extension_hash.clone()) + } + + /// Gets `extra_hash`. + pub fn extra_hash(&self) -> packed::Byte32 { + self.extension_hash_and_extra_hash + .as_ref() + .map(|(_, ref extra_hash)| extra_hash.clone()) + .unwrap_or_else(|| self.uncles_hash.clone()) + } +} diff --git a/util/gen-types/src/generated/blockchain.rs b/util/gen-types/src/generated/blockchain.rs new file mode 100644 index 0000000000..c7f33186fa --- /dev/null +++ b/util/gen-types/src/generated/blockchain.rs @@ -0,0 +1,8938 @@ +// Generated by Molecule 0.7.5 + +use molecule::prelude::*; +#[derive(Clone)] +pub struct Uint32(molecule::bytes::Bytes); +impl ::core::fmt::LowerHex for Uint32 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl ::core::fmt::Debug for Uint32 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl ::core::fmt::Display for Uint32 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + let raw_data = hex_string(&self.raw_data()); + write!(f, "{}(0x{})", Self::NAME, raw_data) + } +} +impl ::core::default::Default for Uint32 { + fn default() -> Self { + let v = molecule::bytes::Bytes::from_static(&Self::DEFAULT_VALUE); + Uint32::new_unchecked(v) + } +} +impl Uint32 { + const DEFAULT_VALUE: [u8; 4] = [0, 0, 0, 0]; + pub const TOTAL_SIZE: usize = 4; + pub const ITEM_SIZE: usize = 1; + pub const ITEM_COUNT: usize = 4; + pub fn nth0(&self) -> Byte { + Byte::new_unchecked(self.0.slice(0..1)) + } + pub fn nth1(&self) -> Byte { + Byte::new_unchecked(self.0.slice(1..2)) + } + pub fn nth2(&self) -> Byte { + Byte::new_unchecked(self.0.slice(2..3)) + } + pub fn nth3(&self) -> Byte { + Byte::new_unchecked(self.0.slice(3..4)) + } + pub fn raw_data(&self) -> molecule::bytes::Bytes { + self.as_bytes() + } + pub fn as_reader<'r>(&'r self) -> Uint32Reader<'r> { + Uint32Reader::new_unchecked(self.as_slice()) + } +} +impl molecule::prelude::Entity for Uint32 { + type Builder = Uint32Builder; + const NAME: &'static str = "Uint32"; + fn new_unchecked(data: molecule::bytes::Bytes) -> Self { + Uint32(data) + } + fn as_bytes(&self) -> molecule::bytes::Bytes { + self.0.clone() + } + fn as_slice(&self) -> &[u8] { + &self.0[..] + } + fn from_slice(slice: &[u8]) -> molecule::error::VerificationResult { + Uint32Reader::from_slice(slice).map(|reader| reader.to_entity()) + } + fn from_compatible_slice(slice: &[u8]) -> molecule::error::VerificationResult { + Uint32Reader::from_compatible_slice(slice).map(|reader| reader.to_entity()) + } + fn new_builder() -> Self::Builder { + ::core::default::Default::default() + } + fn as_builder(self) -> Self::Builder { + Self::new_builder().set([self.nth0(), self.nth1(), self.nth2(), self.nth3()]) + } +} +#[derive(Clone, Copy)] +pub struct Uint32Reader<'r>(&'r [u8]); +impl<'r> ::core::fmt::LowerHex for Uint32Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl<'r> ::core::fmt::Debug for Uint32Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl<'r> ::core::fmt::Display for Uint32Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + let raw_data = hex_string(&self.raw_data()); + write!(f, "{}(0x{})", Self::NAME, raw_data) + } +} +impl<'r> Uint32Reader<'r> { + pub const TOTAL_SIZE: usize = 4; + pub const ITEM_SIZE: usize = 1; + pub const ITEM_COUNT: usize = 4; + pub fn nth0(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[0..1]) + } + pub fn nth1(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[1..2]) + } + pub fn nth2(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[2..3]) + } + pub fn nth3(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[3..4]) + } + pub fn raw_data(&self) -> &'r [u8] { + self.as_slice() + } +} +impl<'r> molecule::prelude::Reader<'r> for Uint32Reader<'r> { + type Entity = Uint32; + const NAME: &'static str = "Uint32Reader"; + fn to_entity(&self) -> Self::Entity { + Self::Entity::new_unchecked(self.as_slice().to_owned().into()) + } + fn new_unchecked(slice: &'r [u8]) -> Self { + Uint32Reader(slice) + } + fn as_slice(&self) -> &'r [u8] { + self.0 + } + fn verify(slice: &[u8], _compatible: bool) -> molecule::error::VerificationResult<()> { + use molecule::verification_error as ve; + let slice_len = slice.len(); + if slice_len != Self::TOTAL_SIZE { + return ve!(Self, TotalSizeNotMatch, Self::TOTAL_SIZE, slice_len); + } + Ok(()) + } +} +pub struct Uint32Builder(pub(crate) [Byte; 4]); +impl ::core::fmt::Debug for Uint32Builder { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:?})", Self::NAME, &self.0[..]) + } +} +impl ::core::default::Default for Uint32Builder { + fn default() -> Self { + Uint32Builder([ + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + ]) + } +} +impl Uint32Builder { + pub const TOTAL_SIZE: usize = 4; + pub const ITEM_SIZE: usize = 1; + pub const ITEM_COUNT: usize = 4; + pub fn set(mut self, v: [Byte; 4]) -> Self { + self.0 = v; + self + } + pub fn nth0(mut self, v: Byte) -> Self { + self.0[0] = v; + self + } + pub fn nth1(mut self, v: Byte) -> Self { + self.0[1] = v; + self + } + pub fn nth2(mut self, v: Byte) -> Self { + self.0[2] = v; + self + } + pub fn nth3(mut self, v: Byte) -> Self { + self.0[3] = v; + self + } +} +impl molecule::prelude::Builder for Uint32Builder { + type Entity = Uint32; + const NAME: &'static str = "Uint32Builder"; + fn expected_length(&self) -> usize { + Self::TOTAL_SIZE + } + fn write(&self, writer: &mut W) -> molecule::io::Result<()> { + writer.write_all(self.0[0].as_slice())?; + writer.write_all(self.0[1].as_slice())?; + writer.write_all(self.0[2].as_slice())?; + writer.write_all(self.0[3].as_slice())?; + Ok(()) + } + fn build(&self) -> Self::Entity { + let mut inner = Vec::with_capacity(self.expected_length()); + self.write(&mut inner) + .unwrap_or_else(|_| panic!("{} build should be ok", Self::NAME)); + Uint32::new_unchecked(inner.into()) + } +} +#[derive(Clone)] +pub struct Uint64(molecule::bytes::Bytes); +impl ::core::fmt::LowerHex for Uint64 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl ::core::fmt::Debug for Uint64 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl ::core::fmt::Display for Uint64 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + let raw_data = hex_string(&self.raw_data()); + write!(f, "{}(0x{})", Self::NAME, raw_data) + } +} +impl ::core::default::Default for Uint64 { + fn default() -> Self { + let v = molecule::bytes::Bytes::from_static(&Self::DEFAULT_VALUE); + Uint64::new_unchecked(v) + } +} +impl Uint64 { + const DEFAULT_VALUE: [u8; 8] = [0, 0, 0, 0, 0, 0, 0, 0]; + pub const TOTAL_SIZE: usize = 8; + pub const ITEM_SIZE: usize = 1; + pub const ITEM_COUNT: usize = 8; + pub fn nth0(&self) -> Byte { + Byte::new_unchecked(self.0.slice(0..1)) + } + pub fn nth1(&self) -> Byte { + Byte::new_unchecked(self.0.slice(1..2)) + } + pub fn nth2(&self) -> Byte { + Byte::new_unchecked(self.0.slice(2..3)) + } + pub fn nth3(&self) -> Byte { + Byte::new_unchecked(self.0.slice(3..4)) + } + pub fn nth4(&self) -> Byte { + Byte::new_unchecked(self.0.slice(4..5)) + } + pub fn nth5(&self) -> Byte { + Byte::new_unchecked(self.0.slice(5..6)) + } + pub fn nth6(&self) -> Byte { + Byte::new_unchecked(self.0.slice(6..7)) + } + pub fn nth7(&self) -> Byte { + Byte::new_unchecked(self.0.slice(7..8)) + } + pub fn raw_data(&self) -> molecule::bytes::Bytes { + self.as_bytes() + } + pub fn as_reader<'r>(&'r self) -> Uint64Reader<'r> { + Uint64Reader::new_unchecked(self.as_slice()) + } +} +impl molecule::prelude::Entity for Uint64 { + type Builder = Uint64Builder; + const NAME: &'static str = "Uint64"; + fn new_unchecked(data: molecule::bytes::Bytes) -> Self { + Uint64(data) + } + fn as_bytes(&self) -> molecule::bytes::Bytes { + self.0.clone() + } + fn as_slice(&self) -> &[u8] { + &self.0[..] + } + fn from_slice(slice: &[u8]) -> molecule::error::VerificationResult { + Uint64Reader::from_slice(slice).map(|reader| reader.to_entity()) + } + fn from_compatible_slice(slice: &[u8]) -> molecule::error::VerificationResult { + Uint64Reader::from_compatible_slice(slice).map(|reader| reader.to_entity()) + } + fn new_builder() -> Self::Builder { + ::core::default::Default::default() + } + fn as_builder(self) -> Self::Builder { + Self::new_builder().set([ + self.nth0(), + self.nth1(), + self.nth2(), + self.nth3(), + self.nth4(), + self.nth5(), + self.nth6(), + self.nth7(), + ]) + } +} +#[derive(Clone, Copy)] +pub struct Uint64Reader<'r>(&'r [u8]); +impl<'r> ::core::fmt::LowerHex for Uint64Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl<'r> ::core::fmt::Debug for Uint64Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl<'r> ::core::fmt::Display for Uint64Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + let raw_data = hex_string(&self.raw_data()); + write!(f, "{}(0x{})", Self::NAME, raw_data) + } +} +impl<'r> Uint64Reader<'r> { + pub const TOTAL_SIZE: usize = 8; + pub const ITEM_SIZE: usize = 1; + pub const ITEM_COUNT: usize = 8; + pub fn nth0(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[0..1]) + } + pub fn nth1(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[1..2]) + } + pub fn nth2(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[2..3]) + } + pub fn nth3(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[3..4]) + } + pub fn nth4(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[4..5]) + } + pub fn nth5(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[5..6]) + } + pub fn nth6(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[6..7]) + } + pub fn nth7(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[7..8]) + } + pub fn raw_data(&self) -> &'r [u8] { + self.as_slice() + } +} +impl<'r> molecule::prelude::Reader<'r> for Uint64Reader<'r> { + type Entity = Uint64; + const NAME: &'static str = "Uint64Reader"; + fn to_entity(&self) -> Self::Entity { + Self::Entity::new_unchecked(self.as_slice().to_owned().into()) + } + fn new_unchecked(slice: &'r [u8]) -> Self { + Uint64Reader(slice) + } + fn as_slice(&self) -> &'r [u8] { + self.0 + } + fn verify(slice: &[u8], _compatible: bool) -> molecule::error::VerificationResult<()> { + use molecule::verification_error as ve; + let slice_len = slice.len(); + if slice_len != Self::TOTAL_SIZE { + return ve!(Self, TotalSizeNotMatch, Self::TOTAL_SIZE, slice_len); + } + Ok(()) + } +} +pub struct Uint64Builder(pub(crate) [Byte; 8]); +impl ::core::fmt::Debug for Uint64Builder { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:?})", Self::NAME, &self.0[..]) + } +} +impl ::core::default::Default for Uint64Builder { + fn default() -> Self { + Uint64Builder([ + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + ]) + } +} +impl Uint64Builder { + pub const TOTAL_SIZE: usize = 8; + pub const ITEM_SIZE: usize = 1; + pub const ITEM_COUNT: usize = 8; + pub fn set(mut self, v: [Byte; 8]) -> Self { + self.0 = v; + self + } + pub fn nth0(mut self, v: Byte) -> Self { + self.0[0] = v; + self + } + pub fn nth1(mut self, v: Byte) -> Self { + self.0[1] = v; + self + } + pub fn nth2(mut self, v: Byte) -> Self { + self.0[2] = v; + self + } + pub fn nth3(mut self, v: Byte) -> Self { + self.0[3] = v; + self + } + pub fn nth4(mut self, v: Byte) -> Self { + self.0[4] = v; + self + } + pub fn nth5(mut self, v: Byte) -> Self { + self.0[5] = v; + self + } + pub fn nth6(mut self, v: Byte) -> Self { + self.0[6] = v; + self + } + pub fn nth7(mut self, v: Byte) -> Self { + self.0[7] = v; + self + } +} +impl molecule::prelude::Builder for Uint64Builder { + type Entity = Uint64; + const NAME: &'static str = "Uint64Builder"; + fn expected_length(&self) -> usize { + Self::TOTAL_SIZE + } + fn write(&self, writer: &mut W) -> molecule::io::Result<()> { + writer.write_all(self.0[0].as_slice())?; + writer.write_all(self.0[1].as_slice())?; + writer.write_all(self.0[2].as_slice())?; + writer.write_all(self.0[3].as_slice())?; + writer.write_all(self.0[4].as_slice())?; + writer.write_all(self.0[5].as_slice())?; + writer.write_all(self.0[6].as_slice())?; + writer.write_all(self.0[7].as_slice())?; + Ok(()) + } + fn build(&self) -> Self::Entity { + let mut inner = Vec::with_capacity(self.expected_length()); + self.write(&mut inner) + .unwrap_or_else(|_| panic!("{} build should be ok", Self::NAME)); + Uint64::new_unchecked(inner.into()) + } +} +#[derive(Clone)] +pub struct Uint128(molecule::bytes::Bytes); +impl ::core::fmt::LowerHex for Uint128 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl ::core::fmt::Debug for Uint128 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl ::core::fmt::Display for Uint128 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + let raw_data = hex_string(&self.raw_data()); + write!(f, "{}(0x{})", Self::NAME, raw_data) + } +} +impl ::core::default::Default for Uint128 { + fn default() -> Self { + let v = molecule::bytes::Bytes::from_static(&Self::DEFAULT_VALUE); + Uint128::new_unchecked(v) + } +} +impl Uint128 { + const DEFAULT_VALUE: [u8; 16] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; + pub const TOTAL_SIZE: usize = 16; + pub const ITEM_SIZE: usize = 1; + pub const ITEM_COUNT: usize = 16; + pub fn nth0(&self) -> Byte { + Byte::new_unchecked(self.0.slice(0..1)) + } + pub fn nth1(&self) -> Byte { + Byte::new_unchecked(self.0.slice(1..2)) + } + pub fn nth2(&self) -> Byte { + Byte::new_unchecked(self.0.slice(2..3)) + } + pub fn nth3(&self) -> Byte { + Byte::new_unchecked(self.0.slice(3..4)) + } + pub fn nth4(&self) -> Byte { + Byte::new_unchecked(self.0.slice(4..5)) + } + pub fn nth5(&self) -> Byte { + Byte::new_unchecked(self.0.slice(5..6)) + } + pub fn nth6(&self) -> Byte { + Byte::new_unchecked(self.0.slice(6..7)) + } + pub fn nth7(&self) -> Byte { + Byte::new_unchecked(self.0.slice(7..8)) + } + pub fn nth8(&self) -> Byte { + Byte::new_unchecked(self.0.slice(8..9)) + } + pub fn nth9(&self) -> Byte { + Byte::new_unchecked(self.0.slice(9..10)) + } + pub fn nth10(&self) -> Byte { + Byte::new_unchecked(self.0.slice(10..11)) + } + pub fn nth11(&self) -> Byte { + Byte::new_unchecked(self.0.slice(11..12)) + } + pub fn nth12(&self) -> Byte { + Byte::new_unchecked(self.0.slice(12..13)) + } + pub fn nth13(&self) -> Byte { + Byte::new_unchecked(self.0.slice(13..14)) + } + pub fn nth14(&self) -> Byte { + Byte::new_unchecked(self.0.slice(14..15)) + } + pub fn nth15(&self) -> Byte { + Byte::new_unchecked(self.0.slice(15..16)) + } + pub fn raw_data(&self) -> molecule::bytes::Bytes { + self.as_bytes() + } + pub fn as_reader<'r>(&'r self) -> Uint128Reader<'r> { + Uint128Reader::new_unchecked(self.as_slice()) + } +} +impl molecule::prelude::Entity for Uint128 { + type Builder = Uint128Builder; + const NAME: &'static str = "Uint128"; + fn new_unchecked(data: molecule::bytes::Bytes) -> Self { + Uint128(data) + } + fn as_bytes(&self) -> molecule::bytes::Bytes { + self.0.clone() + } + fn as_slice(&self) -> &[u8] { + &self.0[..] + } + fn from_slice(slice: &[u8]) -> molecule::error::VerificationResult { + Uint128Reader::from_slice(slice).map(|reader| reader.to_entity()) + } + fn from_compatible_slice(slice: &[u8]) -> molecule::error::VerificationResult { + Uint128Reader::from_compatible_slice(slice).map(|reader| reader.to_entity()) + } + fn new_builder() -> Self::Builder { + ::core::default::Default::default() + } + fn as_builder(self) -> Self::Builder { + Self::new_builder().set([ + self.nth0(), + self.nth1(), + self.nth2(), + self.nth3(), + self.nth4(), + self.nth5(), + self.nth6(), + self.nth7(), + self.nth8(), + self.nth9(), + self.nth10(), + self.nth11(), + self.nth12(), + self.nth13(), + self.nth14(), + self.nth15(), + ]) + } +} +#[derive(Clone, Copy)] +pub struct Uint128Reader<'r>(&'r [u8]); +impl<'r> ::core::fmt::LowerHex for Uint128Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl<'r> ::core::fmt::Debug for Uint128Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl<'r> ::core::fmt::Display for Uint128Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + let raw_data = hex_string(&self.raw_data()); + write!(f, "{}(0x{})", Self::NAME, raw_data) + } +} +impl<'r> Uint128Reader<'r> { + pub const TOTAL_SIZE: usize = 16; + pub const ITEM_SIZE: usize = 1; + pub const ITEM_COUNT: usize = 16; + pub fn nth0(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[0..1]) + } + pub fn nth1(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[1..2]) + } + pub fn nth2(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[2..3]) + } + pub fn nth3(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[3..4]) + } + pub fn nth4(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[4..5]) + } + pub fn nth5(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[5..6]) + } + pub fn nth6(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[6..7]) + } + pub fn nth7(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[7..8]) + } + pub fn nth8(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[8..9]) + } + pub fn nth9(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[9..10]) + } + pub fn nth10(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[10..11]) + } + pub fn nth11(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[11..12]) + } + pub fn nth12(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[12..13]) + } + pub fn nth13(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[13..14]) + } + pub fn nth14(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[14..15]) + } + pub fn nth15(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[15..16]) + } + pub fn raw_data(&self) -> &'r [u8] { + self.as_slice() + } +} +impl<'r> molecule::prelude::Reader<'r> for Uint128Reader<'r> { + type Entity = Uint128; + const NAME: &'static str = "Uint128Reader"; + fn to_entity(&self) -> Self::Entity { + Self::Entity::new_unchecked(self.as_slice().to_owned().into()) + } + fn new_unchecked(slice: &'r [u8]) -> Self { + Uint128Reader(slice) + } + fn as_slice(&self) -> &'r [u8] { + self.0 + } + fn verify(slice: &[u8], _compatible: bool) -> molecule::error::VerificationResult<()> { + use molecule::verification_error as ve; + let slice_len = slice.len(); + if slice_len != Self::TOTAL_SIZE { + return ve!(Self, TotalSizeNotMatch, Self::TOTAL_SIZE, slice_len); + } + Ok(()) + } +} +pub struct Uint128Builder(pub(crate) [Byte; 16]); +impl ::core::fmt::Debug for Uint128Builder { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:?})", Self::NAME, &self.0[..]) + } +} +impl ::core::default::Default for Uint128Builder { + fn default() -> Self { + Uint128Builder([ + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + ]) + } +} +impl Uint128Builder { + pub const TOTAL_SIZE: usize = 16; + pub const ITEM_SIZE: usize = 1; + pub const ITEM_COUNT: usize = 16; + pub fn set(mut self, v: [Byte; 16]) -> Self { + self.0 = v; + self + } + pub fn nth0(mut self, v: Byte) -> Self { + self.0[0] = v; + self + } + pub fn nth1(mut self, v: Byte) -> Self { + self.0[1] = v; + self + } + pub fn nth2(mut self, v: Byte) -> Self { + self.0[2] = v; + self + } + pub fn nth3(mut self, v: Byte) -> Self { + self.0[3] = v; + self + } + pub fn nth4(mut self, v: Byte) -> Self { + self.0[4] = v; + self + } + pub fn nth5(mut self, v: Byte) -> Self { + self.0[5] = v; + self + } + pub fn nth6(mut self, v: Byte) -> Self { + self.0[6] = v; + self + } + pub fn nth7(mut self, v: Byte) -> Self { + self.0[7] = v; + self + } + pub fn nth8(mut self, v: Byte) -> Self { + self.0[8] = v; + self + } + pub fn nth9(mut self, v: Byte) -> Self { + self.0[9] = v; + self + } + pub fn nth10(mut self, v: Byte) -> Self { + self.0[10] = v; + self + } + pub fn nth11(mut self, v: Byte) -> Self { + self.0[11] = v; + self + } + pub fn nth12(mut self, v: Byte) -> Self { + self.0[12] = v; + self + } + pub fn nth13(mut self, v: Byte) -> Self { + self.0[13] = v; + self + } + pub fn nth14(mut self, v: Byte) -> Self { + self.0[14] = v; + self + } + pub fn nth15(mut self, v: Byte) -> Self { + self.0[15] = v; + self + } +} +impl molecule::prelude::Builder for Uint128Builder { + type Entity = Uint128; + const NAME: &'static str = "Uint128Builder"; + fn expected_length(&self) -> usize { + Self::TOTAL_SIZE + } + fn write(&self, writer: &mut W) -> molecule::io::Result<()> { + writer.write_all(self.0[0].as_slice())?; + writer.write_all(self.0[1].as_slice())?; + writer.write_all(self.0[2].as_slice())?; + writer.write_all(self.0[3].as_slice())?; + writer.write_all(self.0[4].as_slice())?; + writer.write_all(self.0[5].as_slice())?; + writer.write_all(self.0[6].as_slice())?; + writer.write_all(self.0[7].as_slice())?; + writer.write_all(self.0[8].as_slice())?; + writer.write_all(self.0[9].as_slice())?; + writer.write_all(self.0[10].as_slice())?; + writer.write_all(self.0[11].as_slice())?; + writer.write_all(self.0[12].as_slice())?; + writer.write_all(self.0[13].as_slice())?; + writer.write_all(self.0[14].as_slice())?; + writer.write_all(self.0[15].as_slice())?; + Ok(()) + } + fn build(&self) -> Self::Entity { + let mut inner = Vec::with_capacity(self.expected_length()); + self.write(&mut inner) + .unwrap_or_else(|_| panic!("{} build should be ok", Self::NAME)); + Uint128::new_unchecked(inner.into()) + } +} +#[derive(Clone)] +pub struct Byte32(molecule::bytes::Bytes); +impl ::core::fmt::LowerHex for Byte32 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl ::core::fmt::Debug for Byte32 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl ::core::fmt::Display for Byte32 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + let raw_data = hex_string(&self.raw_data()); + write!(f, "{}(0x{})", Self::NAME, raw_data) + } +} +impl ::core::default::Default for Byte32 { + fn default() -> Self { + let v = molecule::bytes::Bytes::from_static(&Self::DEFAULT_VALUE); + Byte32::new_unchecked(v) + } +} +impl Byte32 { + const DEFAULT_VALUE: [u8; 32] = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, + ]; + pub const TOTAL_SIZE: usize = 32; + pub const ITEM_SIZE: usize = 1; + pub const ITEM_COUNT: usize = 32; + pub fn nth0(&self) -> Byte { + Byte::new_unchecked(self.0.slice(0..1)) + } + pub fn nth1(&self) -> Byte { + Byte::new_unchecked(self.0.slice(1..2)) + } + pub fn nth2(&self) -> Byte { + Byte::new_unchecked(self.0.slice(2..3)) + } + pub fn nth3(&self) -> Byte { + Byte::new_unchecked(self.0.slice(3..4)) + } + pub fn nth4(&self) -> Byte { + Byte::new_unchecked(self.0.slice(4..5)) + } + pub fn nth5(&self) -> Byte { + Byte::new_unchecked(self.0.slice(5..6)) + } + pub fn nth6(&self) -> Byte { + Byte::new_unchecked(self.0.slice(6..7)) + } + pub fn nth7(&self) -> Byte { + Byte::new_unchecked(self.0.slice(7..8)) + } + pub fn nth8(&self) -> Byte { + Byte::new_unchecked(self.0.slice(8..9)) + } + pub fn nth9(&self) -> Byte { + Byte::new_unchecked(self.0.slice(9..10)) + } + pub fn nth10(&self) -> Byte { + Byte::new_unchecked(self.0.slice(10..11)) + } + pub fn nth11(&self) -> Byte { + Byte::new_unchecked(self.0.slice(11..12)) + } + pub fn nth12(&self) -> Byte { + Byte::new_unchecked(self.0.slice(12..13)) + } + pub fn nth13(&self) -> Byte { + Byte::new_unchecked(self.0.slice(13..14)) + } + pub fn nth14(&self) -> Byte { + Byte::new_unchecked(self.0.slice(14..15)) + } + pub fn nth15(&self) -> Byte { + Byte::new_unchecked(self.0.slice(15..16)) + } + pub fn nth16(&self) -> Byte { + Byte::new_unchecked(self.0.slice(16..17)) + } + pub fn nth17(&self) -> Byte { + Byte::new_unchecked(self.0.slice(17..18)) + } + pub fn nth18(&self) -> Byte { + Byte::new_unchecked(self.0.slice(18..19)) + } + pub fn nth19(&self) -> Byte { + Byte::new_unchecked(self.0.slice(19..20)) + } + pub fn nth20(&self) -> Byte { + Byte::new_unchecked(self.0.slice(20..21)) + } + pub fn nth21(&self) -> Byte { + Byte::new_unchecked(self.0.slice(21..22)) + } + pub fn nth22(&self) -> Byte { + Byte::new_unchecked(self.0.slice(22..23)) + } + pub fn nth23(&self) -> Byte { + Byte::new_unchecked(self.0.slice(23..24)) + } + pub fn nth24(&self) -> Byte { + Byte::new_unchecked(self.0.slice(24..25)) + } + pub fn nth25(&self) -> Byte { + Byte::new_unchecked(self.0.slice(25..26)) + } + pub fn nth26(&self) -> Byte { + Byte::new_unchecked(self.0.slice(26..27)) + } + pub fn nth27(&self) -> Byte { + Byte::new_unchecked(self.0.slice(27..28)) + } + pub fn nth28(&self) -> Byte { + Byte::new_unchecked(self.0.slice(28..29)) + } + pub fn nth29(&self) -> Byte { + Byte::new_unchecked(self.0.slice(29..30)) + } + pub fn nth30(&self) -> Byte { + Byte::new_unchecked(self.0.slice(30..31)) + } + pub fn nth31(&self) -> Byte { + Byte::new_unchecked(self.0.slice(31..32)) + } + pub fn raw_data(&self) -> molecule::bytes::Bytes { + self.as_bytes() + } + pub fn as_reader<'r>(&'r self) -> Byte32Reader<'r> { + Byte32Reader::new_unchecked(self.as_slice()) + } +} +impl molecule::prelude::Entity for Byte32 { + type Builder = Byte32Builder; + const NAME: &'static str = "Byte32"; + fn new_unchecked(data: molecule::bytes::Bytes) -> Self { + Byte32(data) + } + fn as_bytes(&self) -> molecule::bytes::Bytes { + self.0.clone() + } + fn as_slice(&self) -> &[u8] { + &self.0[..] + } + fn from_slice(slice: &[u8]) -> molecule::error::VerificationResult { + Byte32Reader::from_slice(slice).map(|reader| reader.to_entity()) + } + fn from_compatible_slice(slice: &[u8]) -> molecule::error::VerificationResult { + Byte32Reader::from_compatible_slice(slice).map(|reader| reader.to_entity()) + } + fn new_builder() -> Self::Builder { + ::core::default::Default::default() + } + fn as_builder(self) -> Self::Builder { + Self::new_builder().set([ + self.nth0(), + self.nth1(), + self.nth2(), + self.nth3(), + self.nth4(), + self.nth5(), + self.nth6(), + self.nth7(), + self.nth8(), + self.nth9(), + self.nth10(), + self.nth11(), + self.nth12(), + self.nth13(), + self.nth14(), + self.nth15(), + self.nth16(), + self.nth17(), + self.nth18(), + self.nth19(), + self.nth20(), + self.nth21(), + self.nth22(), + self.nth23(), + self.nth24(), + self.nth25(), + self.nth26(), + self.nth27(), + self.nth28(), + self.nth29(), + self.nth30(), + self.nth31(), + ]) + } +} +#[derive(Clone, Copy)] +pub struct Byte32Reader<'r>(&'r [u8]); +impl<'r> ::core::fmt::LowerHex for Byte32Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl<'r> ::core::fmt::Debug for Byte32Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl<'r> ::core::fmt::Display for Byte32Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + let raw_data = hex_string(&self.raw_data()); + write!(f, "{}(0x{})", Self::NAME, raw_data) + } +} +impl<'r> Byte32Reader<'r> { + pub const TOTAL_SIZE: usize = 32; + pub const ITEM_SIZE: usize = 1; + pub const ITEM_COUNT: usize = 32; + pub fn nth0(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[0..1]) + } + pub fn nth1(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[1..2]) + } + pub fn nth2(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[2..3]) + } + pub fn nth3(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[3..4]) + } + pub fn nth4(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[4..5]) + } + pub fn nth5(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[5..6]) + } + pub fn nth6(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[6..7]) + } + pub fn nth7(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[7..8]) + } + pub fn nth8(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[8..9]) + } + pub fn nth9(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[9..10]) + } + pub fn nth10(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[10..11]) + } + pub fn nth11(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[11..12]) + } + pub fn nth12(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[12..13]) + } + pub fn nth13(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[13..14]) + } + pub fn nth14(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[14..15]) + } + pub fn nth15(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[15..16]) + } + pub fn nth16(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[16..17]) + } + pub fn nth17(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[17..18]) + } + pub fn nth18(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[18..19]) + } + pub fn nth19(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[19..20]) + } + pub fn nth20(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[20..21]) + } + pub fn nth21(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[21..22]) + } + pub fn nth22(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[22..23]) + } + pub fn nth23(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[23..24]) + } + pub fn nth24(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[24..25]) + } + pub fn nth25(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[25..26]) + } + pub fn nth26(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[26..27]) + } + pub fn nth27(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[27..28]) + } + pub fn nth28(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[28..29]) + } + pub fn nth29(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[29..30]) + } + pub fn nth30(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[30..31]) + } + pub fn nth31(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[31..32]) + } + pub fn raw_data(&self) -> &'r [u8] { + self.as_slice() + } +} +impl<'r> molecule::prelude::Reader<'r> for Byte32Reader<'r> { + type Entity = Byte32; + const NAME: &'static str = "Byte32Reader"; + fn to_entity(&self) -> Self::Entity { + Self::Entity::new_unchecked(self.as_slice().to_owned().into()) + } + fn new_unchecked(slice: &'r [u8]) -> Self { + Byte32Reader(slice) + } + fn as_slice(&self) -> &'r [u8] { + self.0 + } + fn verify(slice: &[u8], _compatible: bool) -> molecule::error::VerificationResult<()> { + use molecule::verification_error as ve; + let slice_len = slice.len(); + if slice_len != Self::TOTAL_SIZE { + return ve!(Self, TotalSizeNotMatch, Self::TOTAL_SIZE, slice_len); + } + Ok(()) + } +} +pub struct Byte32Builder(pub(crate) [Byte; 32]); +impl ::core::fmt::Debug for Byte32Builder { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:?})", Self::NAME, &self.0[..]) + } +} +impl ::core::default::Default for Byte32Builder { + fn default() -> Self { + Byte32Builder([ + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + ]) + } +} +impl Byte32Builder { + pub const TOTAL_SIZE: usize = 32; + pub const ITEM_SIZE: usize = 1; + pub const ITEM_COUNT: usize = 32; + pub fn set(mut self, v: [Byte; 32]) -> Self { + self.0 = v; + self + } + pub fn nth0(mut self, v: Byte) -> Self { + self.0[0] = v; + self + } + pub fn nth1(mut self, v: Byte) -> Self { + self.0[1] = v; + self + } + pub fn nth2(mut self, v: Byte) -> Self { + self.0[2] = v; + self + } + pub fn nth3(mut self, v: Byte) -> Self { + self.0[3] = v; + self + } + pub fn nth4(mut self, v: Byte) -> Self { + self.0[4] = v; + self + } + pub fn nth5(mut self, v: Byte) -> Self { + self.0[5] = v; + self + } + pub fn nth6(mut self, v: Byte) -> Self { + self.0[6] = v; + self + } + pub fn nth7(mut self, v: Byte) -> Self { + self.0[7] = v; + self + } + pub fn nth8(mut self, v: Byte) -> Self { + self.0[8] = v; + self + } + pub fn nth9(mut self, v: Byte) -> Self { + self.0[9] = v; + self + } + pub fn nth10(mut self, v: Byte) -> Self { + self.0[10] = v; + self + } + pub fn nth11(mut self, v: Byte) -> Self { + self.0[11] = v; + self + } + pub fn nth12(mut self, v: Byte) -> Self { + self.0[12] = v; + self + } + pub fn nth13(mut self, v: Byte) -> Self { + self.0[13] = v; + self + } + pub fn nth14(mut self, v: Byte) -> Self { + self.0[14] = v; + self + } + pub fn nth15(mut self, v: Byte) -> Self { + self.0[15] = v; + self + } + pub fn nth16(mut self, v: Byte) -> Self { + self.0[16] = v; + self + } + pub fn nth17(mut self, v: Byte) -> Self { + self.0[17] = v; + self + } + pub fn nth18(mut self, v: Byte) -> Self { + self.0[18] = v; + self + } + pub fn nth19(mut self, v: Byte) -> Self { + self.0[19] = v; + self + } + pub fn nth20(mut self, v: Byte) -> Self { + self.0[20] = v; + self + } + pub fn nth21(mut self, v: Byte) -> Self { + self.0[21] = v; + self + } + pub fn nth22(mut self, v: Byte) -> Self { + self.0[22] = v; + self + } + pub fn nth23(mut self, v: Byte) -> Self { + self.0[23] = v; + self + } + pub fn nth24(mut self, v: Byte) -> Self { + self.0[24] = v; + self + } + pub fn nth25(mut self, v: Byte) -> Self { + self.0[25] = v; + self + } + pub fn nth26(mut self, v: Byte) -> Self { + self.0[26] = v; + self + } + pub fn nth27(mut self, v: Byte) -> Self { + self.0[27] = v; + self + } + pub fn nth28(mut self, v: Byte) -> Self { + self.0[28] = v; + self + } + pub fn nth29(mut self, v: Byte) -> Self { + self.0[29] = v; + self + } + pub fn nth30(mut self, v: Byte) -> Self { + self.0[30] = v; + self + } + pub fn nth31(mut self, v: Byte) -> Self { + self.0[31] = v; + self + } +} +impl molecule::prelude::Builder for Byte32Builder { + type Entity = Byte32; + const NAME: &'static str = "Byte32Builder"; + fn expected_length(&self) -> usize { + Self::TOTAL_SIZE + } + fn write(&self, writer: &mut W) -> molecule::io::Result<()> { + writer.write_all(self.0[0].as_slice())?; + writer.write_all(self.0[1].as_slice())?; + writer.write_all(self.0[2].as_slice())?; + writer.write_all(self.0[3].as_slice())?; + writer.write_all(self.0[4].as_slice())?; + writer.write_all(self.0[5].as_slice())?; + writer.write_all(self.0[6].as_slice())?; + writer.write_all(self.0[7].as_slice())?; + writer.write_all(self.0[8].as_slice())?; + writer.write_all(self.0[9].as_slice())?; + writer.write_all(self.0[10].as_slice())?; + writer.write_all(self.0[11].as_slice())?; + writer.write_all(self.0[12].as_slice())?; + writer.write_all(self.0[13].as_slice())?; + writer.write_all(self.0[14].as_slice())?; + writer.write_all(self.0[15].as_slice())?; + writer.write_all(self.0[16].as_slice())?; + writer.write_all(self.0[17].as_slice())?; + writer.write_all(self.0[18].as_slice())?; + writer.write_all(self.0[19].as_slice())?; + writer.write_all(self.0[20].as_slice())?; + writer.write_all(self.0[21].as_slice())?; + writer.write_all(self.0[22].as_slice())?; + writer.write_all(self.0[23].as_slice())?; + writer.write_all(self.0[24].as_slice())?; + writer.write_all(self.0[25].as_slice())?; + writer.write_all(self.0[26].as_slice())?; + writer.write_all(self.0[27].as_slice())?; + writer.write_all(self.0[28].as_slice())?; + writer.write_all(self.0[29].as_slice())?; + writer.write_all(self.0[30].as_slice())?; + writer.write_all(self.0[31].as_slice())?; + Ok(()) + } + fn build(&self) -> Self::Entity { + let mut inner = Vec::with_capacity(self.expected_length()); + self.write(&mut inner) + .unwrap_or_else(|_| panic!("{} build should be ok", Self::NAME)); + Byte32::new_unchecked(inner.into()) + } +} +#[derive(Clone)] +pub struct Uint256(molecule::bytes::Bytes); +impl ::core::fmt::LowerHex for Uint256 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl ::core::fmt::Debug for Uint256 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl ::core::fmt::Display for Uint256 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + let raw_data = hex_string(&self.raw_data()); + write!(f, "{}(0x{})", Self::NAME, raw_data) + } +} +impl ::core::default::Default for Uint256 { + fn default() -> Self { + let v = molecule::bytes::Bytes::from_static(&Self::DEFAULT_VALUE); + Uint256::new_unchecked(v) + } +} +impl Uint256 { + const DEFAULT_VALUE: [u8; 32] = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, + ]; + pub const TOTAL_SIZE: usize = 32; + pub const ITEM_SIZE: usize = 1; + pub const ITEM_COUNT: usize = 32; + pub fn nth0(&self) -> Byte { + Byte::new_unchecked(self.0.slice(0..1)) + } + pub fn nth1(&self) -> Byte { + Byte::new_unchecked(self.0.slice(1..2)) + } + pub fn nth2(&self) -> Byte { + Byte::new_unchecked(self.0.slice(2..3)) + } + pub fn nth3(&self) -> Byte { + Byte::new_unchecked(self.0.slice(3..4)) + } + pub fn nth4(&self) -> Byte { + Byte::new_unchecked(self.0.slice(4..5)) + } + pub fn nth5(&self) -> Byte { + Byte::new_unchecked(self.0.slice(5..6)) + } + pub fn nth6(&self) -> Byte { + Byte::new_unchecked(self.0.slice(6..7)) + } + pub fn nth7(&self) -> Byte { + Byte::new_unchecked(self.0.slice(7..8)) + } + pub fn nth8(&self) -> Byte { + Byte::new_unchecked(self.0.slice(8..9)) + } + pub fn nth9(&self) -> Byte { + Byte::new_unchecked(self.0.slice(9..10)) + } + pub fn nth10(&self) -> Byte { + Byte::new_unchecked(self.0.slice(10..11)) + } + pub fn nth11(&self) -> Byte { + Byte::new_unchecked(self.0.slice(11..12)) + } + pub fn nth12(&self) -> Byte { + Byte::new_unchecked(self.0.slice(12..13)) + } + pub fn nth13(&self) -> Byte { + Byte::new_unchecked(self.0.slice(13..14)) + } + pub fn nth14(&self) -> Byte { + Byte::new_unchecked(self.0.slice(14..15)) + } + pub fn nth15(&self) -> Byte { + Byte::new_unchecked(self.0.slice(15..16)) + } + pub fn nth16(&self) -> Byte { + Byte::new_unchecked(self.0.slice(16..17)) + } + pub fn nth17(&self) -> Byte { + Byte::new_unchecked(self.0.slice(17..18)) + } + pub fn nth18(&self) -> Byte { + Byte::new_unchecked(self.0.slice(18..19)) + } + pub fn nth19(&self) -> Byte { + Byte::new_unchecked(self.0.slice(19..20)) + } + pub fn nth20(&self) -> Byte { + Byte::new_unchecked(self.0.slice(20..21)) + } + pub fn nth21(&self) -> Byte { + Byte::new_unchecked(self.0.slice(21..22)) + } + pub fn nth22(&self) -> Byte { + Byte::new_unchecked(self.0.slice(22..23)) + } + pub fn nth23(&self) -> Byte { + Byte::new_unchecked(self.0.slice(23..24)) + } + pub fn nth24(&self) -> Byte { + Byte::new_unchecked(self.0.slice(24..25)) + } + pub fn nth25(&self) -> Byte { + Byte::new_unchecked(self.0.slice(25..26)) + } + pub fn nth26(&self) -> Byte { + Byte::new_unchecked(self.0.slice(26..27)) + } + pub fn nth27(&self) -> Byte { + Byte::new_unchecked(self.0.slice(27..28)) + } + pub fn nth28(&self) -> Byte { + Byte::new_unchecked(self.0.slice(28..29)) + } + pub fn nth29(&self) -> Byte { + Byte::new_unchecked(self.0.slice(29..30)) + } + pub fn nth30(&self) -> Byte { + Byte::new_unchecked(self.0.slice(30..31)) + } + pub fn nth31(&self) -> Byte { + Byte::new_unchecked(self.0.slice(31..32)) + } + pub fn raw_data(&self) -> molecule::bytes::Bytes { + self.as_bytes() + } + pub fn as_reader<'r>(&'r self) -> Uint256Reader<'r> { + Uint256Reader::new_unchecked(self.as_slice()) + } +} +impl molecule::prelude::Entity for Uint256 { + type Builder = Uint256Builder; + const NAME: &'static str = "Uint256"; + fn new_unchecked(data: molecule::bytes::Bytes) -> Self { + Uint256(data) + } + fn as_bytes(&self) -> molecule::bytes::Bytes { + self.0.clone() + } + fn as_slice(&self) -> &[u8] { + &self.0[..] + } + fn from_slice(slice: &[u8]) -> molecule::error::VerificationResult { + Uint256Reader::from_slice(slice).map(|reader| reader.to_entity()) + } + fn from_compatible_slice(slice: &[u8]) -> molecule::error::VerificationResult { + Uint256Reader::from_compatible_slice(slice).map(|reader| reader.to_entity()) + } + fn new_builder() -> Self::Builder { + ::core::default::Default::default() + } + fn as_builder(self) -> Self::Builder { + Self::new_builder().set([ + self.nth0(), + self.nth1(), + self.nth2(), + self.nth3(), + self.nth4(), + self.nth5(), + self.nth6(), + self.nth7(), + self.nth8(), + self.nth9(), + self.nth10(), + self.nth11(), + self.nth12(), + self.nth13(), + self.nth14(), + self.nth15(), + self.nth16(), + self.nth17(), + self.nth18(), + self.nth19(), + self.nth20(), + self.nth21(), + self.nth22(), + self.nth23(), + self.nth24(), + self.nth25(), + self.nth26(), + self.nth27(), + self.nth28(), + self.nth29(), + self.nth30(), + self.nth31(), + ]) + } +} +#[derive(Clone, Copy)] +pub struct Uint256Reader<'r>(&'r [u8]); +impl<'r> ::core::fmt::LowerHex for Uint256Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl<'r> ::core::fmt::Debug for Uint256Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl<'r> ::core::fmt::Display for Uint256Reader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + let raw_data = hex_string(&self.raw_data()); + write!(f, "{}(0x{})", Self::NAME, raw_data) + } +} +impl<'r> Uint256Reader<'r> { + pub const TOTAL_SIZE: usize = 32; + pub const ITEM_SIZE: usize = 1; + pub const ITEM_COUNT: usize = 32; + pub fn nth0(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[0..1]) + } + pub fn nth1(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[1..2]) + } + pub fn nth2(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[2..3]) + } + pub fn nth3(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[3..4]) + } + pub fn nth4(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[4..5]) + } + pub fn nth5(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[5..6]) + } + pub fn nth6(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[6..7]) + } + pub fn nth7(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[7..8]) + } + pub fn nth8(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[8..9]) + } + pub fn nth9(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[9..10]) + } + pub fn nth10(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[10..11]) + } + pub fn nth11(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[11..12]) + } + pub fn nth12(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[12..13]) + } + pub fn nth13(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[13..14]) + } + pub fn nth14(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[14..15]) + } + pub fn nth15(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[15..16]) + } + pub fn nth16(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[16..17]) + } + pub fn nth17(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[17..18]) + } + pub fn nth18(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[18..19]) + } + pub fn nth19(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[19..20]) + } + pub fn nth20(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[20..21]) + } + pub fn nth21(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[21..22]) + } + pub fn nth22(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[22..23]) + } + pub fn nth23(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[23..24]) + } + pub fn nth24(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[24..25]) + } + pub fn nth25(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[25..26]) + } + pub fn nth26(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[26..27]) + } + pub fn nth27(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[27..28]) + } + pub fn nth28(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[28..29]) + } + pub fn nth29(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[29..30]) + } + pub fn nth30(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[30..31]) + } + pub fn nth31(&self) -> ByteReader<'r> { + ByteReader::new_unchecked(&self.as_slice()[31..32]) + } + pub fn raw_data(&self) -> &'r [u8] { + self.as_slice() + } +} +impl<'r> molecule::prelude::Reader<'r> for Uint256Reader<'r> { + type Entity = Uint256; + const NAME: &'static str = "Uint256Reader"; + fn to_entity(&self) -> Self::Entity { + Self::Entity::new_unchecked(self.as_slice().to_owned().into()) + } + fn new_unchecked(slice: &'r [u8]) -> Self { + Uint256Reader(slice) + } + fn as_slice(&self) -> &'r [u8] { + self.0 + } + fn verify(slice: &[u8], _compatible: bool) -> molecule::error::VerificationResult<()> { + use molecule::verification_error as ve; + let slice_len = slice.len(); + if slice_len != Self::TOTAL_SIZE { + return ve!(Self, TotalSizeNotMatch, Self::TOTAL_SIZE, slice_len); + } + Ok(()) + } +} +pub struct Uint256Builder(pub(crate) [Byte; 32]); +impl ::core::fmt::Debug for Uint256Builder { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:?})", Self::NAME, &self.0[..]) + } +} +impl ::core::default::Default for Uint256Builder { + fn default() -> Self { + Uint256Builder([ + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + Byte::default(), + ]) + } +} +impl Uint256Builder { + pub const TOTAL_SIZE: usize = 32; + pub const ITEM_SIZE: usize = 1; + pub const ITEM_COUNT: usize = 32; + pub fn set(mut self, v: [Byte; 32]) -> Self { + self.0 = v; + self + } + pub fn nth0(mut self, v: Byte) -> Self { + self.0[0] = v; + self + } + pub fn nth1(mut self, v: Byte) -> Self { + self.0[1] = v; + self + } + pub fn nth2(mut self, v: Byte) -> Self { + self.0[2] = v; + self + } + pub fn nth3(mut self, v: Byte) -> Self { + self.0[3] = v; + self + } + pub fn nth4(mut self, v: Byte) -> Self { + self.0[4] = v; + self + } + pub fn nth5(mut self, v: Byte) -> Self { + self.0[5] = v; + self + } + pub fn nth6(mut self, v: Byte) -> Self { + self.0[6] = v; + self + } + pub fn nth7(mut self, v: Byte) -> Self { + self.0[7] = v; + self + } + pub fn nth8(mut self, v: Byte) -> Self { + self.0[8] = v; + self + } + pub fn nth9(mut self, v: Byte) -> Self { + self.0[9] = v; + self + } + pub fn nth10(mut self, v: Byte) -> Self { + self.0[10] = v; + self + } + pub fn nth11(mut self, v: Byte) -> Self { + self.0[11] = v; + self + } + pub fn nth12(mut self, v: Byte) -> Self { + self.0[12] = v; + self + } + pub fn nth13(mut self, v: Byte) -> Self { + self.0[13] = v; + self + } + pub fn nth14(mut self, v: Byte) -> Self { + self.0[14] = v; + self + } + pub fn nth15(mut self, v: Byte) -> Self { + self.0[15] = v; + self + } + pub fn nth16(mut self, v: Byte) -> Self { + self.0[16] = v; + self + } + pub fn nth17(mut self, v: Byte) -> Self { + self.0[17] = v; + self + } + pub fn nth18(mut self, v: Byte) -> Self { + self.0[18] = v; + self + } + pub fn nth19(mut self, v: Byte) -> Self { + self.0[19] = v; + self + } + pub fn nth20(mut self, v: Byte) -> Self { + self.0[20] = v; + self + } + pub fn nth21(mut self, v: Byte) -> Self { + self.0[21] = v; + self + } + pub fn nth22(mut self, v: Byte) -> Self { + self.0[22] = v; + self + } + pub fn nth23(mut self, v: Byte) -> Self { + self.0[23] = v; + self + } + pub fn nth24(mut self, v: Byte) -> Self { + self.0[24] = v; + self + } + pub fn nth25(mut self, v: Byte) -> Self { + self.0[25] = v; + self + } + pub fn nth26(mut self, v: Byte) -> Self { + self.0[26] = v; + self + } + pub fn nth27(mut self, v: Byte) -> Self { + self.0[27] = v; + self + } + pub fn nth28(mut self, v: Byte) -> Self { + self.0[28] = v; + self + } + pub fn nth29(mut self, v: Byte) -> Self { + self.0[29] = v; + self + } + pub fn nth30(mut self, v: Byte) -> Self { + self.0[30] = v; + self + } + pub fn nth31(mut self, v: Byte) -> Self { + self.0[31] = v; + self + } +} +impl molecule::prelude::Builder for Uint256Builder { + type Entity = Uint256; + const NAME: &'static str = "Uint256Builder"; + fn expected_length(&self) -> usize { + Self::TOTAL_SIZE + } + fn write(&self, writer: &mut W) -> molecule::io::Result<()> { + writer.write_all(self.0[0].as_slice())?; + writer.write_all(self.0[1].as_slice())?; + writer.write_all(self.0[2].as_slice())?; + writer.write_all(self.0[3].as_slice())?; + writer.write_all(self.0[4].as_slice())?; + writer.write_all(self.0[5].as_slice())?; + writer.write_all(self.0[6].as_slice())?; + writer.write_all(self.0[7].as_slice())?; + writer.write_all(self.0[8].as_slice())?; + writer.write_all(self.0[9].as_slice())?; + writer.write_all(self.0[10].as_slice())?; + writer.write_all(self.0[11].as_slice())?; + writer.write_all(self.0[12].as_slice())?; + writer.write_all(self.0[13].as_slice())?; + writer.write_all(self.0[14].as_slice())?; + writer.write_all(self.0[15].as_slice())?; + writer.write_all(self.0[16].as_slice())?; + writer.write_all(self.0[17].as_slice())?; + writer.write_all(self.0[18].as_slice())?; + writer.write_all(self.0[19].as_slice())?; + writer.write_all(self.0[20].as_slice())?; + writer.write_all(self.0[21].as_slice())?; + writer.write_all(self.0[22].as_slice())?; + writer.write_all(self.0[23].as_slice())?; + writer.write_all(self.0[24].as_slice())?; + writer.write_all(self.0[25].as_slice())?; + writer.write_all(self.0[26].as_slice())?; + writer.write_all(self.0[27].as_slice())?; + writer.write_all(self.0[28].as_slice())?; + writer.write_all(self.0[29].as_slice())?; + writer.write_all(self.0[30].as_slice())?; + writer.write_all(self.0[31].as_slice())?; + Ok(()) + } + fn build(&self) -> Self::Entity { + let mut inner = Vec::with_capacity(self.expected_length()); + self.write(&mut inner) + .unwrap_or_else(|_| panic!("{} build should be ok", Self::NAME)); + Uint256::new_unchecked(inner.into()) + } +} +#[derive(Clone)] +pub struct Bytes(molecule::bytes::Bytes); +impl ::core::fmt::LowerHex for Bytes { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl ::core::fmt::Debug for Bytes { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl ::core::fmt::Display for Bytes { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + let raw_data = hex_string(&self.raw_data()); + write!(f, "{}(0x{})", Self::NAME, raw_data) + } +} +impl ::core::default::Default for Bytes { + fn default() -> Self { + let v = molecule::bytes::Bytes::from_static(&Self::DEFAULT_VALUE); + Bytes::new_unchecked(v) + } +} +impl Bytes { + const DEFAULT_VALUE: [u8; 4] = [0, 0, 0, 0]; + pub const ITEM_SIZE: usize = 1; + pub fn total_size(&self) -> usize { + molecule::NUMBER_SIZE + Self::ITEM_SIZE * self.item_count() + } + pub fn item_count(&self) -> usize { + molecule::unpack_number(self.as_slice()) as usize + } + pub fn len(&self) -> usize { + self.item_count() + } + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + pub fn get(&self, idx: usize) -> Option { + if idx >= self.len() { + None + } else { + Some(self.get_unchecked(idx)) + } + } + pub fn get_unchecked(&self, idx: usize) -> Byte { + let start = molecule::NUMBER_SIZE + Self::ITEM_SIZE * idx; + let end = start + Self::ITEM_SIZE; + Byte::new_unchecked(self.0.slice(start..end)) + } + pub fn raw_data(&self) -> molecule::bytes::Bytes { + self.0.slice(molecule::NUMBER_SIZE..) + } + pub fn as_reader<'r>(&'r self) -> BytesReader<'r> { + BytesReader::new_unchecked(self.as_slice()) + } +} +impl molecule::prelude::Entity for Bytes { + type Builder = BytesBuilder; + const NAME: &'static str = "Bytes"; + fn new_unchecked(data: molecule::bytes::Bytes) -> Self { + Bytes(data) + } + fn as_bytes(&self) -> molecule::bytes::Bytes { + self.0.clone() + } + fn as_slice(&self) -> &[u8] { + &self.0[..] + } + fn from_slice(slice: &[u8]) -> molecule::error::VerificationResult { + BytesReader::from_slice(slice).map(|reader| reader.to_entity()) + } + fn from_compatible_slice(slice: &[u8]) -> molecule::error::VerificationResult { + BytesReader::from_compatible_slice(slice).map(|reader| reader.to_entity()) + } + fn new_builder() -> Self::Builder { + ::core::default::Default::default() + } + fn as_builder(self) -> Self::Builder { + Self::new_builder().extend(self.into_iter()) + } +} +#[derive(Clone, Copy)] +pub struct BytesReader<'r>(&'r [u8]); +impl<'r> ::core::fmt::LowerHex for BytesReader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl<'r> ::core::fmt::Debug for BytesReader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl<'r> ::core::fmt::Display for BytesReader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + let raw_data = hex_string(&self.raw_data()); + write!(f, "{}(0x{})", Self::NAME, raw_data) + } +} +impl<'r> BytesReader<'r> { + pub const ITEM_SIZE: usize = 1; + pub fn total_size(&self) -> usize { + molecule::NUMBER_SIZE + Self::ITEM_SIZE * self.item_count() + } + pub fn item_count(&self) -> usize { + molecule::unpack_number(self.as_slice()) as usize + } + pub fn len(&self) -> usize { + self.item_count() + } + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + pub fn get(&self, idx: usize) -> Option> { + if idx >= self.len() { + None + } else { + Some(self.get_unchecked(idx)) + } + } + pub fn get_unchecked(&self, idx: usize) -> ByteReader<'r> { + let start = molecule::NUMBER_SIZE + Self::ITEM_SIZE * idx; + let end = start + Self::ITEM_SIZE; + ByteReader::new_unchecked(&self.as_slice()[start..end]) + } + pub fn raw_data(&self) -> &'r [u8] { + &self.as_slice()[molecule::NUMBER_SIZE..] + } +} +impl<'r> molecule::prelude::Reader<'r> for BytesReader<'r> { + type Entity = Bytes; + const NAME: &'static str = "BytesReader"; + fn to_entity(&self) -> Self::Entity { + Self::Entity::new_unchecked(self.as_slice().to_owned().into()) + } + fn new_unchecked(slice: &'r [u8]) -> Self { + BytesReader(slice) + } + fn as_slice(&self) -> &'r [u8] { + self.0 + } + fn verify(slice: &[u8], _compatible: bool) -> molecule::error::VerificationResult<()> { + use molecule::verification_error as ve; + let slice_len = slice.len(); + if slice_len < molecule::NUMBER_SIZE { + return ve!(Self, HeaderIsBroken, molecule::NUMBER_SIZE, slice_len); + } + let item_count = molecule::unpack_number(slice) as usize; + if item_count == 0 { + if slice_len != molecule::NUMBER_SIZE { + return ve!(Self, TotalSizeNotMatch, molecule::NUMBER_SIZE, slice_len); + } + return Ok(()); + } + let total_size = molecule::NUMBER_SIZE + Self::ITEM_SIZE * item_count; + if slice_len != total_size { + return ve!(Self, TotalSizeNotMatch, total_size, slice_len); + } + Ok(()) + } +} +#[derive(Debug, Default)] +pub struct BytesBuilder(pub(crate) Vec); +impl BytesBuilder { + pub const ITEM_SIZE: usize = 1; + pub fn set(mut self, v: Vec) -> Self { + self.0 = v; + self + } + pub fn push(mut self, v: Byte) -> Self { + self.0.push(v); + self + } + pub fn extend>(mut self, iter: T) -> Self { + for elem in iter { + self.0.push(elem); + } + self + } + pub fn replace(&mut self, index: usize, v: Byte) -> Option { + self.0 + .get_mut(index) + .map(|item| ::core::mem::replace(item, v)) + } +} +impl molecule::prelude::Builder for BytesBuilder { + type Entity = Bytes; + const NAME: &'static str = "BytesBuilder"; + fn expected_length(&self) -> usize { + molecule::NUMBER_SIZE + Self::ITEM_SIZE * self.0.len() + } + fn write(&self, writer: &mut W) -> molecule::io::Result<()> { + writer.write_all(&molecule::pack_number(self.0.len() as molecule::Number))?; + for inner in &self.0[..] { + writer.write_all(inner.as_slice())?; + } + Ok(()) + } + fn build(&self) -> Self::Entity { + let mut inner = Vec::with_capacity(self.expected_length()); + self.write(&mut inner) + .unwrap_or_else(|_| panic!("{} build should be ok", Self::NAME)); + Bytes::new_unchecked(inner.into()) + } +} +pub struct BytesIterator(Bytes, usize, usize); +impl ::core::iter::Iterator for BytesIterator { + type Item = Byte; + fn next(&mut self) -> Option { + if self.1 >= self.2 { + None + } else { + let ret = self.0.get_unchecked(self.1); + self.1 += 1; + Some(ret) + } + } +} +impl ::core::iter::ExactSizeIterator for BytesIterator { + fn len(&self) -> usize { + self.2 - self.1 + } +} +impl ::core::iter::IntoIterator for Bytes { + type Item = Byte; + type IntoIter = BytesIterator; + fn into_iter(self) -> Self::IntoIter { + let len = self.len(); + BytesIterator(self, 0, len) + } +} +#[derive(Clone)] +pub struct BytesOpt(molecule::bytes::Bytes); +impl ::core::fmt::LowerHex for BytesOpt { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl ::core::fmt::Debug for BytesOpt { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl ::core::fmt::Display for BytesOpt { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + if let Some(v) = self.to_opt() { + write!(f, "{}(Some({}))", Self::NAME, v) + } else { + write!(f, "{}(None)", Self::NAME) + } + } +} +impl ::core::default::Default for BytesOpt { + fn default() -> Self { + let v = molecule::bytes::Bytes::from_static(&Self::DEFAULT_VALUE); + BytesOpt::new_unchecked(v) + } +} +impl BytesOpt { + const DEFAULT_VALUE: [u8; 0] = []; + pub fn is_none(&self) -> bool { + self.0.is_empty() + } + pub fn is_some(&self) -> bool { + !self.0.is_empty() + } + pub fn to_opt(&self) -> Option { + if self.is_none() { + None + } else { + Some(Bytes::new_unchecked(self.0.clone())) + } + } + pub fn as_reader<'r>(&'r self) -> BytesOptReader<'r> { + BytesOptReader::new_unchecked(self.as_slice()) + } +} +impl molecule::prelude::Entity for BytesOpt { + type Builder = BytesOptBuilder; + const NAME: &'static str = "BytesOpt"; + fn new_unchecked(data: molecule::bytes::Bytes) -> Self { + BytesOpt(data) + } + fn as_bytes(&self) -> molecule::bytes::Bytes { + self.0.clone() + } + fn as_slice(&self) -> &[u8] { + &self.0[..] + } + fn from_slice(slice: &[u8]) -> molecule::error::VerificationResult { + BytesOptReader::from_slice(slice).map(|reader| reader.to_entity()) + } + fn from_compatible_slice(slice: &[u8]) -> molecule::error::VerificationResult { + BytesOptReader::from_compatible_slice(slice).map(|reader| reader.to_entity()) + } + fn new_builder() -> Self::Builder { + ::core::default::Default::default() + } + fn as_builder(self) -> Self::Builder { + Self::new_builder().set(self.to_opt()) + } +} +#[derive(Clone, Copy)] +pub struct BytesOptReader<'r>(&'r [u8]); +impl<'r> ::core::fmt::LowerHex for BytesOptReader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl<'r> ::core::fmt::Debug for BytesOptReader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl<'r> ::core::fmt::Display for BytesOptReader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + if let Some(v) = self.to_opt() { + write!(f, "{}(Some({}))", Self::NAME, v) + } else { + write!(f, "{}(None)", Self::NAME) + } + } +} +impl<'r> BytesOptReader<'r> { + pub fn is_none(&self) -> bool { + self.0.is_empty() + } + pub fn is_some(&self) -> bool { + !self.0.is_empty() + } + pub fn to_opt(&self) -> Option> { + if self.is_none() { + None + } else { + Some(BytesReader::new_unchecked(self.as_slice())) + } + } +} +impl<'r> molecule::prelude::Reader<'r> for BytesOptReader<'r> { + type Entity = BytesOpt; + const NAME: &'static str = "BytesOptReader"; + fn to_entity(&self) -> Self::Entity { + Self::Entity::new_unchecked(self.as_slice().to_owned().into()) + } + fn new_unchecked(slice: &'r [u8]) -> Self { + BytesOptReader(slice) + } + fn as_slice(&self) -> &'r [u8] { + self.0 + } + fn verify(slice: &[u8], compatible: bool) -> molecule::error::VerificationResult<()> { + if !slice.is_empty() { + BytesReader::verify(&slice[..], compatible)?; + } + Ok(()) + } +} +#[derive(Debug, Default)] +pub struct BytesOptBuilder(pub(crate) Option); +impl BytesOptBuilder { + pub fn set(mut self, v: Option) -> Self { + self.0 = v; + self + } +} +impl molecule::prelude::Builder for BytesOptBuilder { + type Entity = BytesOpt; + const NAME: &'static str = "BytesOptBuilder"; + fn expected_length(&self) -> usize { + self.0 + .as_ref() + .map(|ref inner| inner.as_slice().len()) + .unwrap_or(0) + } + fn write(&self, writer: &mut W) -> molecule::io::Result<()> { + self.0 + .as_ref() + .map(|ref inner| writer.write_all(inner.as_slice())) + .unwrap_or(Ok(())) + } + fn build(&self) -> Self::Entity { + let mut inner = Vec::with_capacity(self.expected_length()); + self.write(&mut inner) + .unwrap_or_else(|_| panic!("{} build should be ok", Self::NAME)); + BytesOpt::new_unchecked(inner.into()) + } +} +#[derive(Clone)] +pub struct BytesVec(molecule::bytes::Bytes); +impl ::core::fmt::LowerHex for BytesVec { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl ::core::fmt::Debug for BytesVec { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl ::core::fmt::Display for BytesVec { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{} [", Self::NAME)?; + for i in 0..self.len() { + if i == 0 { + write!(f, "{}", self.get_unchecked(i))?; + } else { + write!(f, ", {}", self.get_unchecked(i))?; + } + } + write!(f, "]") + } +} +impl ::core::default::Default for BytesVec { + fn default() -> Self { + let v = molecule::bytes::Bytes::from_static(&Self::DEFAULT_VALUE); + BytesVec::new_unchecked(v) + } +} +impl BytesVec { + const DEFAULT_VALUE: [u8; 4] = [4, 0, 0, 0]; + pub fn total_size(&self) -> usize { + molecule::unpack_number(self.as_slice()) as usize + } + pub fn item_count(&self) -> usize { + if self.total_size() == molecule::NUMBER_SIZE { + 0 + } else { + (molecule::unpack_number(&self.as_slice()[molecule::NUMBER_SIZE..]) as usize / 4) - 1 + } + } + pub fn len(&self) -> usize { + self.item_count() + } + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + pub fn get(&self, idx: usize) -> Option { + if idx >= self.len() { + None + } else { + Some(self.get_unchecked(idx)) + } + } + pub fn get_unchecked(&self, idx: usize) -> Bytes { + let slice = self.as_slice(); + let start_idx = molecule::NUMBER_SIZE * (1 + idx); + let start = molecule::unpack_number(&slice[start_idx..]) as usize; + if idx == self.len() - 1 { + Bytes::new_unchecked(self.0.slice(start..)) + } else { + let end_idx = start_idx + molecule::NUMBER_SIZE; + let end = molecule::unpack_number(&slice[end_idx..]) as usize; + Bytes::new_unchecked(self.0.slice(start..end)) + } + } + pub fn as_reader<'r>(&'r self) -> BytesVecReader<'r> { + BytesVecReader::new_unchecked(self.as_slice()) + } +} +impl molecule::prelude::Entity for BytesVec { + type Builder = BytesVecBuilder; + const NAME: &'static str = "BytesVec"; + fn new_unchecked(data: molecule::bytes::Bytes) -> Self { + BytesVec(data) + } + fn as_bytes(&self) -> molecule::bytes::Bytes { + self.0.clone() + } + fn as_slice(&self) -> &[u8] { + &self.0[..] + } + fn from_slice(slice: &[u8]) -> molecule::error::VerificationResult { + BytesVecReader::from_slice(slice).map(|reader| reader.to_entity()) + } + fn from_compatible_slice(slice: &[u8]) -> molecule::error::VerificationResult { + BytesVecReader::from_compatible_slice(slice).map(|reader| reader.to_entity()) + } + fn new_builder() -> Self::Builder { + ::core::default::Default::default() + } + fn as_builder(self) -> Self::Builder { + Self::new_builder().extend(self.into_iter()) + } +} +#[derive(Clone, Copy)] +pub struct BytesVecReader<'r>(&'r [u8]); +impl<'r> ::core::fmt::LowerHex for BytesVecReader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl<'r> ::core::fmt::Debug for BytesVecReader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl<'r> ::core::fmt::Display for BytesVecReader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{} [", Self::NAME)?; + for i in 0..self.len() { + if i == 0 { + write!(f, "{}", self.get_unchecked(i))?; + } else { + write!(f, ", {}", self.get_unchecked(i))?; + } + } + write!(f, "]") + } +} +impl<'r> BytesVecReader<'r> { + pub fn total_size(&self) -> usize { + molecule::unpack_number(self.as_slice()) as usize + } + pub fn item_count(&self) -> usize { + if self.total_size() == molecule::NUMBER_SIZE { + 0 + } else { + (molecule::unpack_number(&self.as_slice()[molecule::NUMBER_SIZE..]) as usize / 4) - 1 + } + } + pub fn len(&self) -> usize { + self.item_count() + } + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + pub fn get(&self, idx: usize) -> Option> { + if idx >= self.len() { + None + } else { + Some(self.get_unchecked(idx)) + } + } + pub fn get_unchecked(&self, idx: usize) -> BytesReader<'r> { + let slice = self.as_slice(); + let start_idx = molecule::NUMBER_SIZE * (1 + idx); + let start = molecule::unpack_number(&slice[start_idx..]) as usize; + if idx == self.len() - 1 { + BytesReader::new_unchecked(&self.as_slice()[start..]) + } else { + let end_idx = start_idx + molecule::NUMBER_SIZE; + let end = molecule::unpack_number(&slice[end_idx..]) as usize; + BytesReader::new_unchecked(&self.as_slice()[start..end]) + } + } +} +impl<'r> molecule::prelude::Reader<'r> for BytesVecReader<'r> { + type Entity = BytesVec; + const NAME: &'static str = "BytesVecReader"; + fn to_entity(&self) -> Self::Entity { + Self::Entity::new_unchecked(self.as_slice().to_owned().into()) + } + fn new_unchecked(slice: &'r [u8]) -> Self { + BytesVecReader(slice) + } + fn as_slice(&self) -> &'r [u8] { + self.0 + } + fn verify(slice: &[u8], compatible: bool) -> molecule::error::VerificationResult<()> { + use molecule::verification_error as ve; + let slice_len = slice.len(); + if slice_len < molecule::NUMBER_SIZE { + return ve!(Self, HeaderIsBroken, molecule::NUMBER_SIZE, slice_len); + } + let total_size = molecule::unpack_number(slice) as usize; + if slice_len != total_size { + return ve!(Self, TotalSizeNotMatch, total_size, slice_len); + } + if slice_len == molecule::NUMBER_SIZE { + return Ok(()); + } + if slice_len < molecule::NUMBER_SIZE * 2 { + return ve!( + Self, + TotalSizeNotMatch, + molecule::NUMBER_SIZE * 2, + slice_len + ); + } + let offset_first = molecule::unpack_number(&slice[molecule::NUMBER_SIZE..]) as usize; + if offset_first % molecule::NUMBER_SIZE != 0 || offset_first < molecule::NUMBER_SIZE * 2 { + return ve!(Self, OffsetsNotMatch); + } + if slice_len < offset_first { + return ve!(Self, HeaderIsBroken, offset_first, slice_len); + } + let mut offsets: Vec = slice[molecule::NUMBER_SIZE..offset_first] + .chunks_exact(molecule::NUMBER_SIZE) + .map(|x| molecule::unpack_number(x) as usize) + .collect(); + offsets.push(total_size); + if offsets.windows(2).any(|i| i[0] > i[1]) { + return ve!(Self, OffsetsNotMatch); + } + for pair in offsets.windows(2) { + let start = pair[0]; + let end = pair[1]; + BytesReader::verify(&slice[start..end], compatible)?; + } + Ok(()) + } +} +#[derive(Debug, Default)] +pub struct BytesVecBuilder(pub(crate) Vec); +impl BytesVecBuilder { + pub fn set(mut self, v: Vec) -> Self { + self.0 = v; + self + } + pub fn push(mut self, v: Bytes) -> Self { + self.0.push(v); + self + } + pub fn extend>(mut self, iter: T) -> Self { + for elem in iter { + self.0.push(elem); + } + self + } + pub fn replace(&mut self, index: usize, v: Bytes) -> Option { + self.0 + .get_mut(index) + .map(|item| ::core::mem::replace(item, v)) + } +} +impl molecule::prelude::Builder for BytesVecBuilder { + type Entity = BytesVec; + const NAME: &'static str = "BytesVecBuilder"; + fn expected_length(&self) -> usize { + molecule::NUMBER_SIZE * (self.0.len() + 1) + + self + .0 + .iter() + .map(|inner| inner.as_slice().len()) + .sum::() + } + fn write(&self, writer: &mut W) -> molecule::io::Result<()> { + let item_count = self.0.len(); + if item_count == 0 { + writer.write_all(&molecule::pack_number( + molecule::NUMBER_SIZE as molecule::Number, + ))?; + } else { + let (total_size, offsets) = self.0.iter().fold( + ( + molecule::NUMBER_SIZE * (item_count + 1), + Vec::with_capacity(item_count), + ), + |(start, mut offsets), inner| { + offsets.push(start); + (start + inner.as_slice().len(), offsets) + }, + ); + writer.write_all(&molecule::pack_number(total_size as molecule::Number))?; + for offset in offsets.into_iter() { + writer.write_all(&molecule::pack_number(offset as molecule::Number))?; + } + for inner in self.0.iter() { + writer.write_all(inner.as_slice())?; + } + } + Ok(()) + } + fn build(&self) -> Self::Entity { + let mut inner = Vec::with_capacity(self.expected_length()); + self.write(&mut inner) + .unwrap_or_else(|_| panic!("{} build should be ok", Self::NAME)); + BytesVec::new_unchecked(inner.into()) + } +} +pub struct BytesVecIterator(BytesVec, usize, usize); +impl ::core::iter::Iterator for BytesVecIterator { + type Item = Bytes; + fn next(&mut self) -> Option { + if self.1 >= self.2 { + None + } else { + let ret = self.0.get_unchecked(self.1); + self.1 += 1; + Some(ret) + } + } +} +impl ::core::iter::ExactSizeIterator for BytesVecIterator { + fn len(&self) -> usize { + self.2 - self.1 + } +} +impl ::core::iter::IntoIterator for BytesVec { + type Item = Bytes; + type IntoIter = BytesVecIterator; + fn into_iter(self) -> Self::IntoIter { + let len = self.len(); + BytesVecIterator(self, 0, len) + } +} +impl<'r> BytesVecReader<'r> { + pub fn iter<'t>(&'t self) -> BytesVecReaderIterator<'t, 'r> { + BytesVecReaderIterator(&self, 0, self.len()) + } +} +pub struct BytesVecReaderIterator<'t, 'r>(&'t BytesVecReader<'r>, usize, usize); +impl<'t: 'r, 'r> ::core::iter::Iterator for BytesVecReaderIterator<'t, 'r> { + type Item = BytesReader<'t>; + fn next(&mut self) -> Option { + if self.1 >= self.2 { + None + } else { + let ret = self.0.get_unchecked(self.1); + self.1 += 1; + Some(ret) + } + } +} +impl<'t: 'r, 'r> ::core::iter::ExactSizeIterator for BytesVecReaderIterator<'t, 'r> { + fn len(&self) -> usize { + self.2 - self.1 + } +} +#[derive(Clone)] +pub struct Byte32Vec(molecule::bytes::Bytes); +impl ::core::fmt::LowerHex for Byte32Vec { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl ::core::fmt::Debug for Byte32Vec { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl ::core::fmt::Display for Byte32Vec { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{} [", Self::NAME)?; + for i in 0..self.len() { + if i == 0 { + write!(f, "{}", self.get_unchecked(i))?; + } else { + write!(f, ", {}", self.get_unchecked(i))?; + } + } + write!(f, "]") + } +} +impl ::core::default::Default for Byte32Vec { + fn default() -> Self { + let v = molecule::bytes::Bytes::from_static(&Self::DEFAULT_VALUE); + Byte32Vec::new_unchecked(v) + } +} +impl Byte32Vec { + const DEFAULT_VALUE: [u8; 4] = [0, 0, 0, 0]; + pub const ITEM_SIZE: usize = 32; + pub fn total_size(&self) -> usize { + molecule::NUMBER_SIZE + Self::ITEM_SIZE * self.item_count() + } + pub fn item_count(&self) -> usize { + molecule::unpack_number(self.as_slice()) as usize + } + pub fn len(&self) -> usize { + self.item_count() + } + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + pub fn get(&self, idx: usize) -> Option { + if idx >= self.len() { + None + } else { + Some(self.get_unchecked(idx)) + } + } + pub fn get_unchecked(&self, idx: usize) -> Byte32 { + let start = molecule::NUMBER_SIZE + Self::ITEM_SIZE * idx; + let end = start + Self::ITEM_SIZE; + Byte32::new_unchecked(self.0.slice(start..end)) + } + pub fn as_reader<'r>(&'r self) -> Byte32VecReader<'r> { + Byte32VecReader::new_unchecked(self.as_slice()) + } +} +impl molecule::prelude::Entity for Byte32Vec { + type Builder = Byte32VecBuilder; + const NAME: &'static str = "Byte32Vec"; + fn new_unchecked(data: molecule::bytes::Bytes) -> Self { + Byte32Vec(data) + } + fn as_bytes(&self) -> molecule::bytes::Bytes { + self.0.clone() + } + fn as_slice(&self) -> &[u8] { + &self.0[..] + } + fn from_slice(slice: &[u8]) -> molecule::error::VerificationResult { + Byte32VecReader::from_slice(slice).map(|reader| reader.to_entity()) + } + fn from_compatible_slice(slice: &[u8]) -> molecule::error::VerificationResult { + Byte32VecReader::from_compatible_slice(slice).map(|reader| reader.to_entity()) + } + fn new_builder() -> Self::Builder { + ::core::default::Default::default() + } + fn as_builder(self) -> Self::Builder { + Self::new_builder().extend(self.into_iter()) + } +} +#[derive(Clone, Copy)] +pub struct Byte32VecReader<'r>(&'r [u8]); +impl<'r> ::core::fmt::LowerHex for Byte32VecReader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl<'r> ::core::fmt::Debug for Byte32VecReader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl<'r> ::core::fmt::Display for Byte32VecReader<'r> { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{} [", Self::NAME)?; + for i in 0..self.len() { + if i == 0 { + write!(f, "{}", self.get_unchecked(i))?; + } else { + write!(f, ", {}", self.get_unchecked(i))?; + } + } + write!(f, "]") + } +} +impl<'r> Byte32VecReader<'r> { + pub const ITEM_SIZE: usize = 32; + pub fn total_size(&self) -> usize { + molecule::NUMBER_SIZE + Self::ITEM_SIZE * self.item_count() + } + pub fn item_count(&self) -> usize { + molecule::unpack_number(self.as_slice()) as usize + } + pub fn len(&self) -> usize { + self.item_count() + } + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + pub fn get(&self, idx: usize) -> Option> { + if idx >= self.len() { + None + } else { + Some(self.get_unchecked(idx)) + } + } + pub fn get_unchecked(&self, idx: usize) -> Byte32Reader<'r> { + let start = molecule::NUMBER_SIZE + Self::ITEM_SIZE * idx; + let end = start + Self::ITEM_SIZE; + Byte32Reader::new_unchecked(&self.as_slice()[start..end]) + } +} +impl<'r> molecule::prelude::Reader<'r> for Byte32VecReader<'r> { + type Entity = Byte32Vec; + const NAME: &'static str = "Byte32VecReader"; + fn to_entity(&self) -> Self::Entity { + Self::Entity::new_unchecked(self.as_slice().to_owned().into()) + } + fn new_unchecked(slice: &'r [u8]) -> Self { + Byte32VecReader(slice) + } + fn as_slice(&self) -> &'r [u8] { + self.0 + } + fn verify(slice: &[u8], _compatible: bool) -> molecule::error::VerificationResult<()> { + use molecule::verification_error as ve; + let slice_len = slice.len(); + if slice_len < molecule::NUMBER_SIZE { + return ve!(Self, HeaderIsBroken, molecule::NUMBER_SIZE, slice_len); + } + let item_count = molecule::unpack_number(slice) as usize; + if item_count == 0 { + if slice_len != molecule::NUMBER_SIZE { + return ve!(Self, TotalSizeNotMatch, molecule::NUMBER_SIZE, slice_len); + } + return Ok(()); + } + let total_size = molecule::NUMBER_SIZE + Self::ITEM_SIZE * item_count; + if slice_len != total_size { + return ve!(Self, TotalSizeNotMatch, total_size, slice_len); + } + Ok(()) + } +} +#[derive(Debug, Default)] +pub struct Byte32VecBuilder(pub(crate) Vec); +impl Byte32VecBuilder { + pub const ITEM_SIZE: usize = 32; + pub fn set(mut self, v: Vec) -> Self { + self.0 = v; + self + } + pub fn push(mut self, v: Byte32) -> Self { + self.0.push(v); + self + } + pub fn extend>(mut self, iter: T) -> Self { + for elem in iter { + self.0.push(elem); + } + self + } + pub fn replace(&mut self, index: usize, v: Byte32) -> Option { + self.0 + .get_mut(index) + .map(|item| ::core::mem::replace(item, v)) + } +} +impl molecule::prelude::Builder for Byte32VecBuilder { + type Entity = Byte32Vec; + const NAME: &'static str = "Byte32VecBuilder"; + fn expected_length(&self) -> usize { + molecule::NUMBER_SIZE + Self::ITEM_SIZE * self.0.len() + } + fn write(&self, writer: &mut W) -> molecule::io::Result<()> { + writer.write_all(&molecule::pack_number(self.0.len() as molecule::Number))?; + for inner in &self.0[..] { + writer.write_all(inner.as_slice())?; + } + Ok(()) + } + fn build(&self) -> Self::Entity { + let mut inner = Vec::with_capacity(self.expected_length()); + self.write(&mut inner) + .unwrap_or_else(|_| panic!("{} build should be ok", Self::NAME)); + Byte32Vec::new_unchecked(inner.into()) + } +} +pub struct Byte32VecIterator(Byte32Vec, usize, usize); +impl ::core::iter::Iterator for Byte32VecIterator { + type Item = Byte32; + fn next(&mut self) -> Option { + if self.1 >= self.2 { + None + } else { + let ret = self.0.get_unchecked(self.1); + self.1 += 1; + Some(ret) + } + } +} +impl ::core::iter::ExactSizeIterator for Byte32VecIterator { + fn len(&self) -> usize { + self.2 - self.1 + } +} +impl ::core::iter::IntoIterator for Byte32Vec { + type Item = Byte32; + type IntoIter = Byte32VecIterator; + fn into_iter(self) -> Self::IntoIter { + let len = self.len(); + Byte32VecIterator(self, 0, len) + } +} +impl<'r> Byte32VecReader<'r> { + pub fn iter<'t>(&'t self) -> Byte32VecReaderIterator<'t, 'r> { + Byte32VecReaderIterator(&self, 0, self.len()) + } +} +pub struct Byte32VecReaderIterator<'t, 'r>(&'t Byte32VecReader<'r>, usize, usize); +impl<'t: 'r, 'r> ::core::iter::Iterator for Byte32VecReaderIterator<'t, 'r> { + type Item = Byte32Reader<'t>; + fn next(&mut self) -> Option { + if self.1 >= self.2 { + None + } else { + let ret = self.0.get_unchecked(self.1); + self.1 += 1; + Some(ret) + } + } +} +impl<'t: 'r, 'r> ::core::iter::ExactSizeIterator for Byte32VecReaderIterator<'t, 'r> { + fn len(&self) -> usize { + self.2 - self.1 + } +} +#[derive(Clone)] +pub struct ScriptOpt(molecule::bytes::Bytes); +impl ::core::fmt::LowerHex for ScriptOpt { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + use molecule::hex_string; + if f.alternate() { + write!(f, "0x")?; + } + write!(f, "{}", hex_string(self.as_slice())) + } +} +impl ::core::fmt::Debug for ScriptOpt { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + write!(f, "{}({:#x})", Self::NAME, self) + } +} +impl ::core::fmt::Display for ScriptOpt { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + if let Some(v) = self.to_opt() { + write!(f, "{}(Some({}))", Self::NAME, v) + } else { + write!(f, "{}(None)", Self::NAME) + } + } +} +impl ::core::default::Default for ScriptOpt { + fn default() -> Self { + let v = molecule::bytes::Bytes::from_static(&Self::DEFAULT_VALUE); + ScriptOpt::new_unchecked(v) + } +} +impl ScriptOpt { + const DEFAULT_VALUE: [u8; 0] = []; + pub fn is_none(&self) -> bool { + self.0.is_empty() + } + pub fn is_some(&self) -> bool { + !self.0.is_empty() + } + pub fn to_opt(&self) -> Option