92
92
#define STORE_OUT_IDX 0
93
93
#define NET_OUT_IDX 1
94
94
#define SIGN_OUT_IDX 2
95
- #define REPLAY_OUT_IDX 3
96
95
97
96
#define MAX_SLOTS_PER_EPOCH 432000UL
98
97
@@ -179,10 +178,11 @@ typedef struct {
179
178
ulong store_out_wmark ;
180
179
ulong store_out_chunk ;
181
180
182
- fd_wksp_t * replay_out_mem ;
183
- ulong replay_out_chunk0 ;
184
- ulong replay_out_wmark ;
185
- ulong replay_out_chunk ;
181
+ fd_wksp_t * repair_out_mem ;
182
+ ulong repair_out_chunk0 ;
183
+ ulong repair_out_wmark ;
184
+ ulong repair_out_chunk ;
185
+ ulong repair_out_idx ;
186
186
187
187
fd_blockstore_t blockstore_ljoin ;
188
188
fd_blockstore_t * blockstore ;
@@ -556,6 +556,7 @@ after_frag( fd_shred_ctx_t * ctx,
556
556
const ulong fanout = 200UL ;
557
557
fd_shred_dest_idx_t _dests [ 200 * (FD_REEDSOL_DATA_SHREDS_MAX + FD_REEDSOL_PARITY_SHREDS_MAX ) ];
558
558
559
+ fd_bmtree_node_t out_merkle_root ;
559
560
if ( FD_LIKELY ( ctx -> in_kind [ in_idx ]== IN_KIND_NET ) ) {
560
561
uchar * shred_buffer = ctx -> shred_buffer ;
561
562
ulong shred_buffer_sz = ctx -> shred_buffer_sz ;
@@ -571,10 +572,9 @@ after_frag( fd_shred_ctx_t * ctx,
571
572
572
573
fd_fec_set_t const * out_fec_set [1 ];
573
574
fd_shred_t const * out_shred [1 ];
574
- fd_bmtree_node_t out_merkle_root [1 ];
575
575
576
576
long add_shred_timing = - fd_tickcount ();
577
- int rv = fd_fec_resolver_add_shred ( ctx -> resolver , shred , shred_buffer_sz , slot_leader -> uc , out_fec_set , out_shred , out_merkle_root );
577
+ int rv = fd_fec_resolver_add_shred ( ctx -> resolver , shred , shred_buffer_sz , slot_leader -> uc , out_fec_set , out_shred , & out_merkle_root );
578
578
add_shred_timing += fd_tickcount ();
579
579
580
580
fd_histf_sample ( ctx -> metrics -> add_shred_timing , (ulong )add_shred_timing );
@@ -596,14 +596,22 @@ after_frag( fd_shred_ctx_t * ctx,
596
596
for ( ulong j = 0UL ; j < * max_dest_cnt ; j ++ ) send_shred ( ctx , * out_shred , sdest , dests [ j ], ctx -> tsorig );
597
597
} while ( 0 );
598
598
599
- if ( FD_LIKELY ( ctx -> blockstore && rv == FD_FEC_RESOLVER_SHRED_OKAY ) ) { /* optimize for the compiler - branch predictor will still be correct */
600
- uchar * buf = fd_chunk_to_laddr ( ctx -> replay_out_mem , ctx -> replay_out_chunk );
601
- ulong sz = fd_shred_header_sz ( shred -> variant );
602
- fd_memcpy ( buf , shred , sz );
603
- ulong tspub = fd_frag_meta_ts_comp ( fd_tickcount () );
604
- ulong replay_sig = fd_disco_shred_replay_sig ( shred -> slot , shred -> idx , shred -> fec_set_idx , fd_shred_is_code ( fd_shred_type ( shred -> variant ) ), 0 );
605
- fd_stem_publish ( stem , REPLAY_OUT_IDX , replay_sig , ctx -> replay_out_chunk , sz , 0UL , ctx -> tsorig , tspub );
606
- ctx -> replay_out_chunk = fd_dcache_compact_next ( ctx -> replay_out_chunk , sz , ctx -> replay_out_chunk0 , ctx -> replay_out_wmark );
599
+ if ( FD_LIKELY ( ctx -> repair_out_idx != ULONG_MAX ) ) { /* firedancer topo compiler hint */
600
+
601
+ /* Construct the sig from the shred. */
602
+
603
+ int is_code = fd_shred_is_code ( fd_shred_type ( shred -> variant ) );
604
+ uint shred_idx_or_data_cnt = shred -> idx ;
605
+ if ( FD_LIKELY ( is_code ) ) shred_idx_or_data_cnt = shred -> code .data_cnt ; /* optimize for code_cnt >= data_cnt */
606
+ ulong sig = fd_disco_shred_repair_sig ( shred -> slot , shred -> fec_set_idx , is_code , shred_idx_or_data_cnt );
607
+
608
+ /* Copy the shred header into the frag and publish. */
609
+
610
+ ulong sz = fd_shred_header_sz ( shred -> variant );
611
+ fd_memcpy ( fd_chunk_to_laddr ( ctx -> repair_out_mem , ctx -> repair_out_chunk ), shred , sz );
612
+ ulong tspub = fd_frag_meta_ts_comp ( fd_tickcount () );
613
+ fd_stem_publish ( stem , ctx -> repair_out_idx , sig , ctx -> repair_out_chunk , sz , 0UL , ctx -> tsorig , tspub );
614
+ ctx -> repair_out_chunk = fd_dcache_compact_next ( ctx -> repair_out_chunk , sz , ctx -> repair_out_chunk0 , ctx -> repair_out_wmark );
607
615
}
608
616
}
609
617
if ( FD_LIKELY ( rv != FD_FEC_RESOLVER_SHRED_COMPLETES ) ) return ;
@@ -643,31 +651,43 @@ after_frag( fd_shred_ctx_t * ctx,
643
651
ulong sz2 = sizeof (fd_shred34_t ) - (34UL - s34 [ 2 ].shred_cnt )* FD_SHRED_MAX_SZ ;
644
652
ulong sz3 = sizeof (fd_shred34_t ) - (34UL - s34 [ 3 ].shred_cnt )* FD_SHRED_MAX_SZ ;
645
653
646
- if ( FD_LIKELY ( ctx -> blockstore ) ) {
647
- /* If the shred has a completes flag, then in the replay tile it
648
- will do immediate polling for shreds in that FEC set, under
649
- the assumption that they live in the blockstore. When a shred
650
- completes a FEC set, we need to add the shreds to the
651
- blockstore before we notify replay of a completed FEC set.
652
- Replay does not poll the blockstore for shreds on notifies of
653
- a regular non-completing shred. */
654
+ if ( FD_LIKELY ( ctx -> blockstore ) ) { /* firedancer topo compiler hint */
655
+
656
+ /* Insert shreds into the blockstore. Note we do this regardless of
657
+ whether the shreds are for one of our leader slots or not. Even
658
+ though there is a separate link that directly connects pack and
659
+ replay when we are leader, we still need the shreds in the
660
+ blockstore to, for example, serve repair requests. */
654
661
655
662
for ( ulong i = 0UL ; i < set -> data_shred_cnt ; i ++ ) {
656
663
fd_shred_t const * data_shred = (fd_shred_t const * )fd_type_pun_const ( set -> data_shreds [ i ] );
657
664
fd_blockstore_shred_insert ( ctx -> blockstore , data_shred );
658
665
}
659
- if ( FD_LIKELY ( ctx -> in_kind [ in_idx ]== IN_KIND_NET ) ) {
660
- /* Shred came from block we didn't produce. This is not our leader
661
- slot. */
662
- fd_shred_t const * shred = (fd_shred_t const * )fd_type_pun_const ( ctx -> shred_buffer );
663
- uchar * buf = fd_chunk_to_laddr ( ctx -> replay_out_mem , ctx -> replay_out_chunk );
664
- ulong sz = fd_shred_header_sz ( shred -> variant );
665
- fd_memcpy ( buf , shred , sz );
666
- ulong tspub = fd_frag_meta_ts_comp ( fd_tickcount () );
667
- ulong replay_sig = fd_disco_shred_replay_sig ( shred -> slot , shred -> idx , shred -> fec_set_idx , fd_shred_is_code ( fd_shred_type ( shred -> variant ) ), 1 );
668
- fd_stem_publish ( stem , REPLAY_OUT_IDX , replay_sig , ctx -> replay_out_chunk , sz , 0UL , ctx -> tsorig , tspub );
669
- ctx -> replay_out_chunk = fd_dcache_compact_next ( ctx -> replay_out_chunk , sz , ctx -> replay_out_chunk0 , ctx -> replay_out_wmark );
670
- }
666
+ }
667
+
668
+ if ( FD_LIKELY ( ctx -> repair_out_idx != ULONG_MAX ) ) { /* firedancer topo compiler hint */
669
+
670
+ /* Additionally, publish a frag to notify repair that the FEC set is
671
+ complete. Note the ordering wrt blockstore shred insertion above is
672
+ intentional: shreds are inserted into the blockstore before
673
+ notifying repair. This is because the replay tile is downstream of
674
+ repair, and replay assumes the shreds are already in the blockstore
675
+ when repair notifies it that the FEC set is complete, and we don't
676
+ know whether shred will finish inserting into blockstore first or
677
+ repair will finish validating the FEC set first. */
678
+
679
+ fd_shred_t const * last = (fd_shred_t const * )fd_type_pun_const ( set -> data_shreds [ set -> data_shred_cnt - 1 ] );
680
+
681
+ /* Copy the last shred and merkle root of the FEC set into the frag. */
682
+
683
+ ulong sig = ULONG_MAX ;
684
+ uchar * chunk = fd_chunk_to_laddr ( ctx -> repair_out_mem , ctx -> repair_out_chunk );
685
+ memcpy ( chunk , last , FD_SHRED_CODE_HEADER_SZ );
686
+ memcpy ( chunk , out_merkle_root .hash , FD_SHRED_MERKLE_ROOT_SZ );
687
+ ulong sz = FD_SHRED_CODE_HEADER_SZ + FD_SHRED_MERKLE_ROOT_SZ ;
688
+ ulong tspub = fd_frag_meta_ts_comp ( fd_tickcount () );
689
+ fd_stem_publish ( stem , ctx -> repair_out_idx , sig , ctx -> repair_out_chunk , sz , 0UL , ctx -> tsorig , tspub );
690
+ ctx -> repair_out_chunk = fd_dcache_compact_next ( ctx -> repair_out_chunk , sz , ctx -> repair_out_chunk0 , ctx -> repair_out_wmark );
671
691
}
672
692
673
693
/* Send to the blockstore, skipping any empty shred34_t s. */
@@ -734,20 +754,11 @@ fd_shred_signer( void * signer_ctx,
734
754
static void
735
755
unprivileged_init ( fd_topo_t * topo ,
736
756
fd_topo_tile_t * tile ) {
737
- void * scratch = fd_topo_obj_laddr ( topo , tile -> tile_obj_id );
738
757
739
- if ( FD_LIKELY ( tile -> out_cnt == 3UL ) ) { /* frankendancer */
740
- FD_TEST ( 0 == strcmp ( topo -> links [tile -> out_link_id [STORE_OUT_IDX ]].name , "shred_store" ) );
741
- FD_TEST ( 0 == strcmp ( topo -> links [tile -> out_link_id [NET_OUT_IDX ]].name , "shred_net" ) );
742
- FD_TEST ( 0 == strcmp ( topo -> links [tile -> out_link_id [SIGN_OUT_IDX ]].name , "shred_sign" ) );
743
- } else if ( FD_LIKELY ( tile -> out_cnt == 4UL ) ) { /* firedancer */
744
- FD_TEST ( 0 == strcmp ( topo -> links [tile -> out_link_id [STORE_OUT_IDX ]].name , "shred_storei" ) );
745
- FD_TEST ( 0 == strcmp ( topo -> links [tile -> out_link_id [NET_OUT_IDX ]].name , "shred_net" ) );
746
- FD_TEST ( 0 == strcmp ( topo -> links [tile -> out_link_id [SIGN_OUT_IDX ]].name , "shred_sign" ) );
747
- FD_TEST ( 0 == strcmp ( topo -> links [tile -> out_link_id [REPLAY_OUT_IDX ]].name , "shred_replay" ) );
748
- } else {
749
- FD_LOG_ERR (( "shred tile has unexpected cnt of output links %lu" , tile -> out_cnt ));
750
- }
758
+ char * store_out_name = topo -> links [tile -> out_link_id [ STORE_OUT_IDX ]].name ;
759
+ FD_TEST ( 0 == strcmp ( store_out_name , "shred_store" ) || 0 == strcmp ( store_out_name , "shred_storei" ) );
760
+ FD_TEST ( 0 == strcmp ( topo -> links [tile -> out_link_id [ NET_OUT_IDX ]].name , "shred_net" ) );
761
+ FD_TEST ( 0 == strcmp ( topo -> links [tile -> out_link_id [ SIGN_OUT_IDX ]].name , "shred_sign" ) );
751
762
752
763
if ( FD_UNLIKELY ( !tile -> out_cnt ) )
753
764
FD_LOG_ERR (( "shred tile has no primary output link" ));
@@ -757,6 +768,8 @@ unprivileged_init( fd_topo_t * topo,
757
768
FD_LOG_ERR (( "shred tile out depths are not equal %lu %lu" ,
758
769
topo -> links [ tile -> out_link_id [ 0 ] ].depth , shred_store_mcache_depth ));
759
770
771
+ void * scratch = fd_topo_obj_laddr ( topo , tile -> tile_obj_id );
772
+
760
773
FD_SCRATCH_ALLOC_INIT ( l , scratch );
761
774
fd_shred_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND ( l , alignof( fd_shred_ctx_t ), sizeof ( fd_shred_ctx_t ) );
762
775
@@ -899,21 +912,21 @@ unprivileged_init( fd_topo_t * topo,
899
912
ctx -> store_out_wmark = fd_dcache_compact_wmark ( ctx -> store_out_mem , store_out -> dcache , store_out -> mtu );
900
913
ctx -> store_out_chunk = ctx -> store_out_chunk0 ;
901
914
902
- if ( FD_LIKELY ( tile -> out_cnt == 4UL ) ) { /* firedancer */
903
- fd_topo_link_t * replay_out = & topo -> links [ tile -> out_link_id [ REPLAY_OUT_IDX ] ];
915
+ ctx -> repair_out_idx = fd_topo_find_tile_out_link ( topo , tile , "shred_repair" , 0 /* one repair tile so always kind_id 0 */ );
916
+ if ( FD_LIKELY ( ctx -> repair_out_idx != ULONG_MAX ) ) { /* firedancer topo compiler hint */
917
+ fd_topo_link_t * repair_out = & topo -> links [ ctx -> repair_out_idx ];
904
918
905
- ctx -> replay_out_mem = topo -> workspaces [ topo -> objs [ replay_out -> dcache_obj_id ].wksp_id ].wksp ;
906
- ctx -> replay_out_chunk0 = fd_dcache_compact_chunk0 ( ctx -> replay_out_mem , replay_out -> dcache );
907
- ctx -> replay_out_wmark = fd_dcache_compact_wmark ( ctx -> replay_out_mem , replay_out -> dcache , replay_out -> mtu );
908
- ctx -> replay_out_chunk = ctx -> replay_out_chunk0 ;
919
+ ctx -> repair_out_mem = topo -> workspaces [ topo -> objs [ repair_out -> dcache_obj_id ].wksp_id ].wksp ;
920
+ ctx -> repair_out_chunk0 = fd_dcache_compact_chunk0 ( ctx -> repair_out_mem , repair_out -> dcache );
921
+ ctx -> repair_out_wmark = fd_dcache_compact_wmark ( ctx -> repair_out_mem , repair_out -> dcache , repair_out -> mtu );
922
+ ctx -> repair_out_chunk = ctx -> repair_out_chunk0 ;
909
923
}
910
924
925
+ ctx -> blockstore = NULL ;
911
926
ulong blockstore_obj_id = fd_pod_queryf_ulong ( topo -> props , ULONG_MAX , "blockstore" );
912
927
if (FD_LIKELY ( blockstore_obj_id != ULONG_MAX )) {
913
928
ctx -> blockstore = fd_blockstore_join ( & ctx -> blockstore_ljoin , fd_topo_obj_laddr ( topo , blockstore_obj_id ) );
914
929
FD_TEST ( ctx -> blockstore -> shmem -> magic == FD_BLOCKSTORE_MAGIC );
915
- } else {
916
- ctx -> blockstore = NULL ;
917
930
}
918
931
919
932
ctx -> poh_in_expect_seq = 0UL ;
0 commit comments