Skip to content

Commit a747040

Browse files
committed
Merge branch 'david/eip-7702' of github.com:polytope-labs/polkadot-sdk into david/eip-7702
2 parents 3f89998 + 4c0365a commit a747040

File tree

14 files changed

+678
-208
lines changed

14 files changed

+678
-208
lines changed
Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
{
2+
"fixtures/solidity/complex/create/create2_many/test.json::1::Y M3": "Failed",
3+
"fixtures/solidity/complex/create/create_in_library/test.json::0::Y M0": "Failed",
4+
"fixtures/solidity/complex/create/create_in_library/test.json::0::Y M3": "Failed",
5+
"fixtures/solidity/complex/create/create_many/test.json::1::Y M3": "Failed",
6+
"fixtures/solidity/complex/library_call_tuple/test.json::0::Y M0": "Failed",
7+
"fixtures/solidity/complex/library_call_tuple/test.json::0::Y M3": "Failed",
8+
"fixtures/solidity/complex/solidity_by_example/applications/iterable_mapping/test.json::0::Y M0 >=0.8.1": "Failed",
9+
"fixtures/solidity/complex/solidity_by_example/applications/iterable_mapping/test.json::0::Y M3 >=0.8.1": "Failed",
10+
"fixtures/solidity/simple/internal_function_pointers/sum_oddness.sol::0::Y M3": "Failed",
11+
"fixtures/solidity/simple/internal_function_pointers/sum_oddness.sol::1::Y M3": "Failed",
12+
"fixtures/solidity/simple/pointer/large_offset.sol::0::Y M0": "Failed",
13+
"fixtures/solidity/simple/pointer/large_offset.sol::0::Y M3": "Failed",
14+
"fixtures/solidity/simple/try_catch/unbalanced_gas_limit.sol::0::Y M0": "Failed",
15+
"fixtures/solidity/simple/try_catch/unbalanced_gas_limit.sol::0::Y M3": "Failed",
16+
"fixtures/solidity/simple/yul_instructions/revert.sol::59::Y M0": "Failed",
17+
"fixtures/solidity/simple/yul_instructions/revert.sol::59::Y M3": "Failed",
18+
"fixtures/solidity/simple/yul_instructions/revert.sol::60::Y M0": "Failed",
19+
"fixtures/solidity/simple/yul_instructions/revert.sol::60::Y M3": "Failed",
20+
"fixtures/solidity/simple/yul_instructions/revert.sol::61::Y M0": "Failed",
21+
"fixtures/solidity/simple/yul_instructions/revert.sol::61::Y M3": "Failed",
22+
"fixtures/solidity/simple/yul_instructions/revert.sol::75::Y M0": "Failed",
23+
"fixtures/solidity/simple/yul_instructions/revert.sol::75::Y M3": "Failed",
24+
"fixtures/solidity/simple/yul_instructions/revert.sol::197::Y M0": "Failed",
25+
"fixtures/solidity/simple/yul_instructions/revert.sol::197::Y M3": "Failed",
26+
"fixtures/solidity/simple/yul_instructions/revert.sol::202::Y M0": "Failed",
27+
"fixtures/solidity/simple/yul_instructions/revert.sol::202::Y M3": "Failed",
28+
"fixtures/solidity/simple/yul_instructions/revert.sol::207::Y M0": "Failed",
29+
"fixtures/solidity/simple/yul_instructions/revert.sol::207::Y M3": "Failed"
30+
}
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
{}

.github/workflows/tests-evm.yml

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ jobs:
2929
permissions:
3030
pull-requests: write
3131
strategy:
32+
fail-fast: false
3233
matrix:
3334
platform:
3435
["revive-dev-node-revm-solc", "revive-dev-node-polkavm-resolc"]
@@ -38,12 +39,13 @@ jobs:
3839
- name: Update the Installed Python
3940
run: apt-get update && apt-get install -y python3-pip python3
4041
- name: Run revive differential tests
41-
uses: paritytech/revive-differential-tests/.github/actions/run-differential-tests@main
42+
uses: paritytech/revive-differential-tests/.github/actions/run-differential-tests@be448d78bd83a755fdcab96849971ecdf4eebce0
4243
with:
4344
platform: ${{ matrix.platform }}
4445
cargo-command: "forklift cargo"
45-
revive-differential-tests-ref: "main"
46+
revive-differential-tests-ref: "be448d78bd83a755fdcab96849971ecdf4eebce0"
4647
resolc-version: "0.5.0"
48+
expectations-file-path: ./.github/assets/${{ matrix.platform }}.json
4749

4850
evm-test-suite:
4951
needs: [preflight]

cumulus/pallets/parachain-system/src/lib.rs

Lines changed: 58 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,7 @@ pub use cumulus_pallet_parachain_system_proc_macro::register_validate_block;
109109
pub use relay_state_snapshot::{MessagingStateSnapshot, RelayChainStateProof};
110110
pub use unincluded_segment::{Ancestor, UsedBandwidth};
111111

112+
use crate::parachain_inherent::AbridgedInboundMessagesSizeInfo;
112113
pub use pallet::*;
113114

114115
const LOG_TARGET: &str = "parachain-system";
@@ -1164,7 +1165,7 @@ impl<T: Config> Pallet<T> {
11641165
expected_dmq_mqc_head: relay_chain::Hash,
11651166
downward_messages: AbridgedInboundDownwardMessages,
11661167
) -> Weight {
1167-
downward_messages.check_enough_messages_included("DMQ");
1168+
downward_messages.check_enough_messages_included_basic("DMQ");
11681169

11691170
let mut dmq_head = <LastDmqMqcHead<T>>::get();
11701171

@@ -1210,6 +1211,25 @@ impl<T: Config> Pallet<T> {
12101211
weight_used
12111212
}
12121213

1214+
fn get_ingress_channel_or_panic(
1215+
ingress_channels: &[(ParaId, cumulus_primitives_core::AbridgedHrmpChannel)],
1216+
sender: ParaId,
1217+
) -> &cumulus_primitives_core::AbridgedHrmpChannel {
1218+
let maybe_channel_idx = ingress_channels
1219+
.binary_search_by_key(&sender, |&(channel_sender, _)| channel_sender)
1220+
.ok();
1221+
let maybe_channel = maybe_channel_idx
1222+
.and_then(|channel_idx| ingress_channels.get(channel_idx))
1223+
.map(|(_, channel)| channel);
1224+
maybe_channel.unwrap_or_else(|| {
1225+
panic!(
1226+
"One of the messages submitted by the collator was sent from a sender ({}) \
1227+
that doesn't have a channel opened to this parachain",
1228+
<ParaId as Into<u32>>::into(sender)
1229+
)
1230+
})
1231+
}
1232+
12131233
fn check_hrmp_mcq_heads(
12141234
ingress_channels: &[(ParaId, cumulus_primitives_core::AbridgedHrmpChannel)],
12151235
mqc_heads: &mut BTreeMap<ParaId, MessageQueueChain>,
@@ -1243,17 +1263,8 @@ impl<T: Config> Pallet<T> {
12431263
}
12441264
*maybe_prev_msg_metadata = Some(msg_metadata);
12451265

1246-
// Check that the message is sent from an existing channel. The channel exists
1247-
// if its MQC head is present in `vfp.hrmp_mqc_heads`.
1248-
let sender = msg_metadata.1;
1249-
let maybe_channel_idx =
1250-
ingress_channels.binary_search_by_key(&sender, |&(channel_sender, _)| channel_sender);
1251-
assert!(
1252-
maybe_channel_idx.is_ok(),
1253-
"One of the messages submitted by the collator was sent from a sender ({}) \
1254-
that doesn't have a channel opened to this parachain",
1255-
<ParaId as Into<u32>>::into(sender)
1256-
);
1266+
// Check that the message is sent from an existing channel.
1267+
Self::get_ingress_channel_or_panic(ingress_channels, msg_metadata.1);
12571268
}
12581269

12591270
/// Process all inbound horizontal messages relayed by the collator.
@@ -1271,18 +1282,34 @@ impl<T: Config> Pallet<T> {
12711282
horizontal_messages: AbridgedInboundHrmpMessages,
12721283
relay_parent_number: relay_chain::BlockNumber,
12731284
) -> Weight {
1285+
let mut mqc_heads = <LastHrmpMqcHeads<T>>::get();
1286+
let (messages, hashed_messages) = horizontal_messages.messages();
1287+
12741288
// First, check the HRMP advancement rule.
1275-
horizontal_messages.check_enough_messages_included("HRMP");
1289+
let maybe_first_hashed_msg_sender = hashed_messages.first().map(|(sender, _msg)| *sender);
1290+
if let Some(first_hashed_msg_sender) = maybe_first_hashed_msg_sender {
1291+
let channel =
1292+
Self::get_ingress_channel_or_panic(ingress_channels, first_hashed_msg_sender);
1293+
horizontal_messages.check_enough_messages_included_advanced(
1294+
"HRMP",
1295+
AbridgedInboundMessagesSizeInfo {
1296+
max_full_messages_size: Self::messages_collection_size_limit(),
1297+
first_hashed_msg_max_size: channel.max_message_size as usize,
1298+
},
1299+
);
1300+
}
12761301

1277-
let (messages, hashed_messages) = horizontal_messages.messages();
1278-
let mut mqc_heads = <LastHrmpMqcHeads<T>>::get();
1302+
Self::prune_closed_mqc_heads(ingress_channels, &mut mqc_heads);
12791303

12801304
if messages.is_empty() {
12811305
Self::check_hrmp_mcq_heads(ingress_channels, &mut mqc_heads);
12821306
let last_processed_msg =
12831307
InboundMessageId { sent_at: relay_parent_number, reverse_idx: 0 };
1308+
12841309
LastProcessedHrmpMessage::<T>::put(last_processed_msg);
12851310
HrmpWatermark::<T>::put(relay_parent_number);
1311+
LastHrmpMqcHeads::<T>::put(&mqc_heads); // write back in case of modification
1312+
12861313
return T::DbWeight::get().reads_writes(1, 2);
12871314
}
12881315

@@ -1302,7 +1329,9 @@ impl<T: Config> Pallet<T> {
13021329
}
13031330
last_processed_msg.sent_at = msg.sent_at;
13041331
}
1305-
<LastHrmpMqcHeads<T>>::put(&mqc_heads);
1332+
1333+
LastHrmpMqcHeads::<T>::put(&mqc_heads);
1334+
13061335
for (sender, msg) in hashed_messages {
13071336
Self::check_hrmp_message_metadata(
13081337
ingress_channels,
@@ -1334,6 +1363,19 @@ impl<T: Config> Pallet<T> {
13341363
weight_used.saturating_add(T::DbWeight::get().reads_writes(2, 3))
13351364
}
13361365

1366+
/// Remove all MQC heads that do not correspond to an open channel.
1367+
fn prune_closed_mqc_heads(
1368+
ingress_channels: &[(ParaId, cumulus_primitives_core::AbridgedHrmpChannel)],
1369+
mqc_heads: &mut BTreeMap<ParaId, MessageQueueChain>,
1370+
) {
1371+
// Complexity is O(N * lg N) but could be optimized for O(N)
1372+
mqc_heads.retain(|para, _| {
1373+
ingress_channels
1374+
.binary_search_by_key(para, |&(channel_sender, _)| channel_sender)
1375+
.is_ok()
1376+
});
1377+
}
1378+
13371379
/// Drop blocks from the unincluded segment with respect to the latest parachain head.
13381380
fn maybe_drop_included_ancestors(
13391381
relay_state_proof: &RelayChainStateProof,

cumulus/pallets/parachain-system/src/parachain_inherent.rs

Lines changed: 81 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,14 @@ impl<Message: InboundMessage> InboundMessagesCollection<Message> {
142142
}
143143
}
144144

145+
/// A struct containing some info about the expected size of the abridged inbound messages.
146+
pub struct AbridgedInboundMessagesSizeInfo {
147+
/// The max size of the full messages collection
148+
pub max_full_messages_size: usize,
149+
/// The max size of the first hashed message
150+
pub first_hashed_msg_max_size: usize,
151+
}
152+
145153
/// A compressed collection of inbound messages.
146154
///
147155
/// The first messages in the collection (up to a limit) contain the full message data.
@@ -161,29 +169,51 @@ impl<Message: InboundMessage> AbridgedInboundMessagesCollection<Message> {
161169
(&self.full_messages, &self.hashed_messages)
162170
}
163171

164-
/// Check that the current collection contains as many full messages as possible.
172+
/// Check that the current collection contains at least 1 full message if needed.
173+
pub fn check_enough_messages_included_basic(&self, collection_name: &str) {
174+
if self.hashed_messages.is_empty() {
175+
return;
176+
}
177+
178+
// Here we just check that there is at least 1 full message.
179+
assert!(
180+
self.full_messages.len() >= 1,
181+
"[{}] Advancement rule violation: full messages missing",
182+
collection_name,
183+
);
184+
}
185+
186+
/// Check that the current collection contains as many full messages as possible, taking into
187+
/// consideration the collection constraints.
165188
///
166189
/// The `AbridgedInboundMessagesCollection` is provided to the runtime by a collator.
167190
/// A malicious collator can provide a collection that contains no full messages or fewer
168191
/// full messages than possible, leading to censorship.
169-
pub fn check_enough_messages_included(&self, collection_name: &str) {
170-
if self.hashed_messages.is_empty() {
171-
return;
192+
pub fn check_enough_messages_included_advanced(
193+
&self,
194+
collection_name: &str,
195+
size_info: AbridgedInboundMessagesSizeInfo,
196+
) {
197+
// We should check that the collection contains as many full messages as possible
198+
// without exceeding the max expected size.
199+
let AbridgedInboundMessagesSizeInfo { max_full_messages_size, first_hashed_msg_max_size } =
200+
size_info;
201+
202+
let mut full_messages_size = 0usize;
203+
for msg in &self.full_messages {
204+
full_messages_size = full_messages_size.saturating_add(msg.data().len());
172205
}
173206

174-
// Ideally, we should check that the collection contains as many full messages as possible
175-
// without exceeding the max expected size. The worst case scenario is that were the first
176-
// message that had to be hashed is a max size message. So in this case, the min expected
177-
// size would be `max_expected_size - max_msg_size`. However, there are multiple issues:
178-
// 1. The max message size config can change while we still have to process messages with
179-
// the old max message size.
180-
// 2. We can't access the max downward message size from the parachain runtime.
181-
//
182-
// So the safest approach is to check that there is at least 1 full message.
207+
// The worst case scenario is that were the first message that had to be hashed
208+
// is a max size message.
183209
assert!(
184-
self.full_messages.len() >= 1,
185-
"[{}] Advancement rule violation: mandatory messages missing",
210+
full_messages_size.saturating_add(first_hashed_msg_max_size) > max_full_messages_size,
211+
"[{}] Advancement rule violation: full messages size smaller than expected. \
212+
full msgs size: {}, first hashed msg max size: {}, max full msgs size: {}",
186213
collection_name,
214+
full_messages_size,
215+
first_hashed_msg_max_size,
216+
max_full_messages_size
187217
);
188218
}
189219
}
@@ -481,7 +511,7 @@ mod tests {
481511
}
482512

483513
#[test]
484-
fn check_enough_messages_included_works() {
514+
fn check_enough_messages_included_basic_works() {
485515
let mut messages = AbridgedInboundHrmpMessages {
486516
full_messages: vec![(
487517
1000.into(),
@@ -493,13 +523,45 @@ mod tests {
493523
)],
494524
};
495525

496-
messages.check_enough_messages_included("Test");
526+
messages.check_enough_messages_included_basic("Test");
497527

498528
messages.full_messages = vec![];
499-
let result = std::panic::catch_unwind(|| messages.check_enough_messages_included("Test"));
529+
let result =
530+
std::panic::catch_unwind(|| messages.check_enough_messages_included_basic("Test"));
500531
assert!(result.is_err());
501532

502533
messages.hashed_messages = vec![];
503-
messages.check_enough_messages_included("Test");
534+
messages.check_enough_messages_included_basic("Test");
535+
}
536+
537+
#[test]
538+
fn check_enough_messages_included_advanced_works() {
539+
let mixed_messages = AbridgedInboundHrmpMessages {
540+
full_messages: vec![(
541+
1000.into(),
542+
InboundHrmpMessage { sent_at: 0, data: vec![1; 50] },
543+
)],
544+
hashed_messages: vec![(
545+
2000.into(),
546+
HashedMessage { sent_at: 1, msg_hash: Default::default() },
547+
)],
548+
};
549+
let result = std::panic::catch_unwind(|| {
550+
mixed_messages.check_enough_messages_included_advanced(
551+
"Test",
552+
AbridgedInboundMessagesSizeInfo {
553+
max_full_messages_size: 100,
554+
first_hashed_msg_max_size: 50,
555+
},
556+
)
557+
});
558+
assert!(result.is_err());
559+
mixed_messages.check_enough_messages_included_advanced(
560+
"Test",
561+
AbridgedInboundMessagesSizeInfo {
562+
max_full_messages_size: 100,
563+
first_hashed_msg_max_size: 51,
564+
},
565+
);
504566
}
505567
}

0 commit comments

Comments
 (0)