Skip to content

Commit 5e38532

Browse files
committed
feat(shred,repair): shred_repair link
1 parent 59f097b commit 5e38532

File tree

7 files changed

+352
-561
lines changed

7 files changed

+352
-561
lines changed

src/app/fdctl/topos/fd_firedancer.c

Lines changed: 43 additions & 39 deletions
Large diffs are not rendered by default.

src/ballet/bmtree/fd_bmtree.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -149,9 +149,10 @@ static uchar const fd_bmtree_node_prefix[32UL] __attribute__((aligned(32))) = "\
149149
struct __attribute__((packed)) fd_bmtree_node {
150150
uchar hash[ 32 ]; /* Last bytes may not be meaningful */
151151
};
152-
153152
typedef struct fd_bmtree_node fd_bmtree_node_t;
154153

154+
FD_STATIC_ASSERT( sizeof(fd_bmtree_node_t) == 32, update FD_SHRED_MERKLE_ROOT_SZ );
155+
155156
/* bmtree_hash_leaf computes `SHA-256(prefix|data), where prefix is the
156157
first prefix_sz bytes of fd_bmtree_leaf_prefix. prefix_sz is
157158
typically FD_BMTREE_LONG_PREFIX_SZ or FD_BMTREE_SHORT_PREFIX_SZ.

src/ballet/shred/fd_shred.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -149,6 +149,13 @@ typedef uchar fd_shred_merkle_t[FD_SHRED_MERKLE_NODE_SZ];
149149
/* Maximum number of data shreds in a slot, also maximum number of parity shreds in a slot */
150150
#define FD_SHRED_MAX_PER_SLOT (1 << 15UL) /* 32,768 shreds */
151151

152+
/* Many static bounds are specified around the assumption that this is a
153+
protocol limit on the max number of shreds in a slot. If this limit
154+
changes, all the relevant usages in other areas of the Firedancer
155+
codebase should be updated before modifying this assertion. */
156+
157+
FD_STATIC_ASSERT( FD_SHRED_MAX_PER_SLOT == 32768, check all usages before changing this limit! );
158+
152159
/* 36,536,320 bytes per slot */
153160
#define FD_SHRED_DATA_PAYLOAD_MAX_PER_SLOT (FD_SHRED_DATA_PAYLOAD_MAX * FD_SHRED_MAX_PER_SLOT)
154161

src/disco/fd_disco_base.h

Lines changed: 62 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -28,22 +28,32 @@
2828
/* FD_NET_MTU is the max full packet size, with ethernet, IP, and UDP
2929
headers that can go in or out of the net tile. 2048 is the maximum
3030
XSK entry size, so this value follows naturally. */
31+
3132
#define FD_NET_MTU (2048UL)
3233

3334
/* FD_TPU_MTU is the max serialized byte size of a txn sent over TPU.
3435
3536
This is minimum MTU of IPv6 packet - IPv6 header - UDP header
3637
1280 - 40 - 8 */
38+
3739
#define FD_TPU_MTU (1232UL)
3840

3941
/* FD_GOSSIP_MTU is the max sz of a gossip packet which is the same as
4042
above. */
43+
4144
#define FD_GOSSIP_MTU (FD_TPU_MTU)
4245

4346
/* FD_SHRED_STORE_MTU is the size of an fd_shred34_t (statically
4447
asserted in fd_shred_tile.c). */
48+
4549
#define FD_SHRED_STORE_MTU (41792UL)
4650

51+
/* FD_SHRED_REPAIR_MTU is the maximum size of a frag on the shred_repair
52+
link. This is the size of a data shred header + merkle root. */
53+
54+
#define FD_SHRED_REPAIR_MTU (FD_SHRED_DATA_HEADER_SZ + FD_SHRED_MERKLE_ROOT_SZ)
55+
FD_STATIC_ASSERT( FD_SHRED_REPAIR_MTU == 120 , update FD_SHRED_REPAIR_MTU );
56+
4757
#define FD_NETMUX_SIG_MIN_HDR_SZ ( 42UL) /* The default header size, which means no vlan tags and no IP options. */
4858
#define FD_NETMUX_SIG_IGNORE_HDR_SZ (102UL) /* Outside the allowable range, but still fits in 4 bits when compressed */
4959

@@ -123,29 +133,61 @@ fd_disco_replay_old_sig( ulong slot,
123133
FD_FN_CONST static inline ulong fd_disco_replay_old_sig_flags( ulong sig ) { return (sig & 0xFFUL); }
124134
FD_FN_CONST static inline ulong fd_disco_replay_old_sig_slot( ulong sig ) { return (sig >> 8); }
125135

136+
/* fd_disco_shred_repair_sig constructs a sig for the shred_repair link.
137+
The encoded fields vary depending on the type of the sig. The
138+
diagram below describes the encoding.
139+
140+
skip (1) | slot (32) | fec_set_idx (15) | is_code (1) | shred_idx or data_cnt (15)
141+
[63] | [31, 62] | [16, 30] | [15] | [0, 14]
142+
143+
The first bit of the sig indicates whether it is ok to skip the frag.
144+
This is the case when the frag is a shred header and the necessary
145+
information for the link is fully encoded in the remaining bits of
146+
the sig. If skip is 0, readers must ignore the rest of the sig and
147+
process the frag.
148+
149+
If skip = 1, the next 32 bits [31, 62] describe the slot number. Note
150+
if the slot number saturates 32 bits (ie. slot >= UINT_MAX) then skip
151+
would be 0 in the sig, so the slot number is only encoded in 32 bits
152+
within the sig when it is correct to do so.
153+
154+
The following 15 bits [16, 30] describe the fec_set_idx. This is a
155+
15-bit value because shreds are bounded to 2^15 per slot, so in the
156+
worst case there is an independent FEC set for every shred, which
157+
results in at most 2^15 FEC sets per slot.
158+
159+
The next bit [15] describes whether it is a coding shred (is_code).
160+
If is_code = 0, the sig describes a data shred, and the last 15 bits
161+
[0, 14] encode the shred_idx. If is_code = 1, the sig describes a
162+
coding shred, and the last 15 bits encode the data_cnt.
163+
164+
When type is 1, the sig describes a completed FEC set. In this case,
165+
the second bit describes whether the FEC set completes the entry
166+
batch, which will be true if the last data shred in the FEC set is
167+
marked with a DATA_COMPLETES flag (FIXME this is not invariant in the
168+
protocol yet). As with coding frags, the last 15 bits describe the
169+
data_cnt. The frag will contain the full shred header of the last
170+
data shred in the FEC set, as well the merkle root and chained merkle
171+
root of the FEC set. */
172+
173+
/* TODO this shred_repair_sig can be greatly simplified when FEC sets
174+
are uniformly coding shreds and fixed size. */
175+
126176
FD_FN_CONST static inline ulong
127-
fd_disco_shred_replay_sig( ulong slot,
128-
uint shred_idx,
129-
uint fec_set_idx,
130-
int is_code,
131-
int completes ) {
132-
133-
/* | 32 LSB of slot | 15 LSB of shred_idx | 15 LSB of fec_idx | 1 bit of shred data/code type | 1 bit if shred completes the fec set |
134-
| slot[32,63] | shred_idx[17,32] | fec_idx[2,16] | is_parity[1] | is_complete[0] | */
135-
136-
ulong slot_ul = fd_ulong_min( (ulong)slot, (ulong)UINT_MAX );
137-
ulong shred_idx_ul = fd_ulong_min( (ulong)shred_idx, (ulong)FD_SHRED_MAX_PER_SLOT );
138-
ulong fec_set_idx_ul = fd_ulong_min( (ulong)fec_set_idx, (ulong)FD_SHRED_MAX_PER_SLOT );
139-
ulong is_code_ul = (ulong)is_code;
140-
ulong completes_ul = (ulong)completes;
141-
return slot_ul << 32 | shred_idx_ul << 17 | fec_set_idx_ul << 2 | is_code_ul << 1 | completes_ul;
177+
fd_disco_shred_repair_sig( ulong slot, uint fec_set_idx, int is_code, uint shred_idx_or_data_cnt ) {
178+
return 1 | slot << 31 | fec_set_idx << 16 | (ulong)is_code << 15 | shred_idx_or_data_cnt;
142179
}
143180

144-
FD_FN_CONST static inline ulong fd_disco_shred_replay_sig_slot ( ulong sig ) { return fd_ulong_extract ( sig, 32, 63 ); }
145-
FD_FN_CONST static inline uint fd_disco_shred_replay_sig_shred_idx ( ulong sig ) { return (uint)fd_ulong_extract ( sig, 17, 31 ); }
146-
FD_FN_CONST static inline uint fd_disco_shred_replay_sig_fec_set_idx( ulong sig ) { return (uint)fd_ulong_extract ( sig, 2, 16 ); }
147-
FD_FN_CONST static inline int fd_disco_shred_replay_sig_is_code ( ulong sig ) { return fd_ulong_extract_bit( sig, 1 ); }
148-
FD_FN_CONST static inline int fd_disco_shred_replay_sig_completes ( ulong sig ) { return fd_ulong_extract_bit( sig, 0 ); }
181+
/* fd_disco_shred_repair_sig_{...} are accessors for the fields encoded
182+
in the sig described above. */
183+
184+
FD_FN_CONST static inline int fd_disco_shred_repair_sig_skip ( ulong sig ) { return fd_ulong_extract_bit( sig, 63 ); }
185+
FD_FN_CONST static inline ulong fd_disco_shred_repair_sig_slot ( ulong sig ) { return fd_ulong_extract ( sig, 31, 62 ); }
186+
FD_FN_CONST static inline uint fd_disco_shred_repair_sig_fec_set_idx ( ulong sig ) { return (uint) fd_ulong_extract ( sig, 16, 30 ); }
187+
FD_FN_CONST static inline int fd_disco_shred_repair_sig_is_code ( ulong sig ) { return fd_ulong_extract_bit( sig, 15 ); }
188+
FD_FN_CONST static inline uint fd_disco_shred_repair_sig_shred_idx ( ulong sig ) { return (uint) fd_ulong_extract_lsb( sig, 15 ); } /* only when is_code = 0 */
189+
FD_FN_CONST static inline uint fd_disco_shred_repair_sig_data_cnt ( ulong sig ) { return (uint) fd_ulong_extract_lsb( sig, 15 ); } /* only when is_code = 1 */
190+
149191

150192
FD_FN_PURE static inline ulong
151193
fd_disco_compact_chunk0( void * wksp ) {

src/disco/shred/fd_shred_tile.c

Lines changed: 69 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,6 @@
9292
#define STORE_OUT_IDX 0
9393
#define NET_OUT_IDX 1
9494
#define SIGN_OUT_IDX 2
95-
#define REPLAY_OUT_IDX 3
9695

9796
#define MAX_SLOTS_PER_EPOCH 432000UL
9897

@@ -179,10 +178,11 @@ typedef struct {
179178
ulong store_out_wmark;
180179
ulong store_out_chunk;
181180

182-
fd_wksp_t * replay_out_mem;
183-
ulong replay_out_chunk0;
184-
ulong replay_out_wmark;
185-
ulong replay_out_chunk;
181+
fd_wksp_t * repair_out_mem;
182+
ulong repair_out_chunk0;
183+
ulong repair_out_wmark;
184+
ulong repair_out_chunk;
185+
ulong repair_out_idx;
186186

187187
fd_blockstore_t blockstore_ljoin;
188188
fd_blockstore_t * blockstore;
@@ -556,6 +556,7 @@ after_frag( fd_shred_ctx_t * ctx,
556556
const ulong fanout = 200UL;
557557
fd_shred_dest_idx_t _dests[ 200*(FD_REEDSOL_DATA_SHREDS_MAX+FD_REEDSOL_PARITY_SHREDS_MAX) ];
558558

559+
fd_bmtree_node_t out_merkle_root;
559560
if( FD_LIKELY( ctx->in_kind[ in_idx ]==IN_KIND_NET ) ) {
560561
uchar * shred_buffer = ctx->shred_buffer;
561562
ulong shred_buffer_sz = ctx->shred_buffer_sz;
@@ -571,10 +572,9 @@ after_frag( fd_shred_ctx_t * ctx,
571572

572573
fd_fec_set_t const * out_fec_set[1];
573574
fd_shred_t const * out_shred[1];
574-
fd_bmtree_node_t out_merkle_root[1];
575575

576576
long add_shred_timing = -fd_tickcount();
577-
int rv = fd_fec_resolver_add_shred( ctx->resolver, shred, shred_buffer_sz, slot_leader->uc, out_fec_set, out_shred, out_merkle_root );
577+
int rv = fd_fec_resolver_add_shred( ctx->resolver, shred, shred_buffer_sz, slot_leader->uc, out_fec_set, out_shred, &out_merkle_root );
578578
add_shred_timing += fd_tickcount();
579579

580580
fd_histf_sample( ctx->metrics->add_shred_timing, (ulong)add_shred_timing );
@@ -596,14 +596,22 @@ after_frag( fd_shred_ctx_t * ctx,
596596
for( ulong j=0UL; j<*max_dest_cnt; j++ ) send_shred( ctx, *out_shred, sdest, dests[ j ], ctx->tsorig );
597597
} while( 0 );
598598

599-
if( FD_LIKELY( ctx->blockstore && rv==FD_FEC_RESOLVER_SHRED_OKAY ) ) { /* optimize for the compiler - branch predictor will still be correct */
600-
uchar * buf = fd_chunk_to_laddr( ctx->replay_out_mem, ctx->replay_out_chunk );
601-
ulong sz = fd_shred_header_sz( shred->variant );
602-
fd_memcpy( buf, shred, sz );
603-
ulong tspub = fd_frag_meta_ts_comp( fd_tickcount() );
604-
ulong replay_sig = fd_disco_shred_replay_sig( shred->slot, shred->idx, shred->fec_set_idx, fd_shred_is_code( fd_shred_type( shred->variant ) ), 0 );
605-
fd_stem_publish( stem, REPLAY_OUT_IDX, replay_sig, ctx->replay_out_chunk, sz, 0UL, ctx->tsorig, tspub );
606-
ctx->replay_out_chunk = fd_dcache_compact_next( ctx->replay_out_chunk, sz, ctx->replay_out_chunk0, ctx->replay_out_wmark );
599+
if( FD_LIKELY( ctx->repair_out_idx!=ULONG_MAX ) ) { /* firedancer topo compiler hint */
600+
601+
/* Construct the sig from the shred. */
602+
603+
int is_code = fd_shred_is_code( fd_shred_type( shred->variant ) );
604+
uint shred_idx_or_data_cnt = shred->idx;
605+
if( FD_LIKELY( is_code ) ) shred_idx_or_data_cnt = shred->code.data_cnt; /* optimize for code_cnt >= data_cnt */
606+
ulong sig = fd_disco_shred_repair_sig( shred->slot, shred->fec_set_idx, is_code, shred_idx_or_data_cnt );
607+
608+
/* Copy the shred header into the frag and publish. */
609+
610+
ulong sz = fd_shred_header_sz( shred->variant );
611+
fd_memcpy( fd_chunk_to_laddr( ctx->repair_out_mem, ctx->repair_out_chunk ), shred, sz );
612+
ulong tspub = fd_frag_meta_ts_comp( fd_tickcount() );
613+
fd_stem_publish( stem, ctx->repair_out_idx, sig, ctx->repair_out_chunk, sz, 0UL, ctx->tsorig, tspub );
614+
ctx->repair_out_chunk = fd_dcache_compact_next( ctx->repair_out_chunk, sz, ctx->repair_out_chunk0, ctx->repair_out_wmark );
607615
}
608616
}
609617
if( FD_LIKELY( rv!=FD_FEC_RESOLVER_SHRED_COMPLETES ) ) return;
@@ -643,31 +651,43 @@ after_frag( fd_shred_ctx_t * ctx,
643651
ulong sz2 = sizeof(fd_shred34_t) - (34UL - s34[ 2 ].shred_cnt)*FD_SHRED_MAX_SZ;
644652
ulong sz3 = sizeof(fd_shred34_t) - (34UL - s34[ 3 ].shred_cnt)*FD_SHRED_MAX_SZ;
645653

646-
if( FD_LIKELY( ctx->blockstore ) ) {
647-
/* If the shred has a completes flag, then in the replay tile it
648-
will do immediate polling for shreds in that FEC set, under
649-
the assumption that they live in the blockstore. When a shred
650-
completes a FEC set, we need to add the shreds to the
651-
blockstore before we notify replay of a completed FEC set.
652-
Replay does not poll the blockstore for shreds on notifies of
653-
a regular non-completing shred. */
654+
if( FD_LIKELY( ctx->blockstore ) ) { /* firedancer topo compiler hint */
655+
656+
/* Insert shreds into the blockstore. Note we do this regardless of
657+
whether the shreds are for one of our leader slots or not. Even
658+
though there is a separate link that directly connects pack and
659+
replay when we are leader, we still need the shreds in the
660+
blockstore to, for example, serve repair requests. */
654661

655662
for( ulong i=0UL; i<set->data_shred_cnt; i++ ) {
656663
fd_shred_t const * data_shred = (fd_shred_t const *)fd_type_pun_const( set->data_shreds[ i ] );
657664
fd_blockstore_shred_insert( ctx->blockstore, data_shred );
658665
}
659-
if( FD_LIKELY( ctx->in_kind[ in_idx ]==IN_KIND_NET ) ) {
660-
/* Shred came from block we didn't produce. This is not our leader
661-
slot. */
662-
fd_shred_t const * shred = (fd_shred_t const *)fd_type_pun_const( ctx->shred_buffer );
663-
uchar * buf = fd_chunk_to_laddr( ctx->replay_out_mem, ctx->replay_out_chunk );
664-
ulong sz = fd_shred_header_sz( shred->variant );
665-
fd_memcpy( buf, shred, sz );
666-
ulong tspub = fd_frag_meta_ts_comp( fd_tickcount() );
667-
ulong replay_sig = fd_disco_shred_replay_sig( shred->slot, shred->idx, shred->fec_set_idx, fd_shred_is_code( fd_shred_type( shred->variant ) ), 1 );
668-
fd_stem_publish( stem, REPLAY_OUT_IDX, replay_sig, ctx->replay_out_chunk, sz, 0UL, ctx->tsorig, tspub );
669-
ctx->replay_out_chunk = fd_dcache_compact_next( ctx->replay_out_chunk, sz, ctx->replay_out_chunk0, ctx->replay_out_wmark );
670-
}
666+
}
667+
668+
if( FD_LIKELY( ctx->repair_out_idx!=ULONG_MAX ) ) { /* firedancer topo compiler hint */
669+
670+
/* Additionally, publish a frag to notify repair that the FEC set is
671+
complete. Note the ordering wrt blockstore shred insertion above is
672+
intentional: shreds are inserted into the blockstore before
673+
notifying repair. This is because the replay tile is downstream of
674+
repair, and replay assumes the shreds are already in the blockstore
675+
when repair notifies it that the FEC set is complete, and we don't
676+
know whether shred will finish inserting into blockstore first or
677+
repair will finish validating the FEC set first. */
678+
679+
fd_shred_t const * last = (fd_shred_t const *)fd_type_pun_const( set->data_shreds[ set->data_shred_cnt - 1 ] );
680+
681+
/* Copy the last shred and merkle root of the FEC set into the frag. */
682+
683+
ulong sig = ULONG_MAX;
684+
uchar * chunk = fd_chunk_to_laddr( ctx->repair_out_mem, ctx->repair_out_chunk );
685+
memcpy( chunk, last, FD_SHRED_CODE_HEADER_SZ );
686+
memcpy( chunk, out_merkle_root.hash, FD_SHRED_MERKLE_ROOT_SZ );
687+
ulong sz = FD_SHRED_CODE_HEADER_SZ + FD_SHRED_MERKLE_ROOT_SZ;
688+
ulong tspub = fd_frag_meta_ts_comp( fd_tickcount() );
689+
fd_stem_publish( stem, ctx->repair_out_idx, sig, ctx->repair_out_chunk, sz, 0UL, ctx->tsorig, tspub );
690+
ctx->repair_out_chunk = fd_dcache_compact_next( ctx->repair_out_chunk, sz, ctx->repair_out_chunk0, ctx->repair_out_wmark );
671691
}
672692

673693
/* Send to the blockstore, skipping any empty shred34_t s. */
@@ -734,20 +754,11 @@ fd_shred_signer( void * signer_ctx,
734754
static void
735755
unprivileged_init( fd_topo_t * topo,
736756
fd_topo_tile_t * tile ) {
737-
void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
738757

739-
if( FD_LIKELY( tile->out_cnt==3UL ) ) { /* frankendancer */
740-
FD_TEST( 0==strcmp( topo->links[tile->out_link_id[STORE_OUT_IDX]].name, "shred_store" ) );
741-
FD_TEST( 0==strcmp( topo->links[tile->out_link_id[NET_OUT_IDX]].name, "shred_net" ) );
742-
FD_TEST( 0==strcmp( topo->links[tile->out_link_id[SIGN_OUT_IDX]].name, "shred_sign" ) );
743-
} else if( FD_LIKELY( tile->out_cnt==4UL ) ) { /* firedancer */
744-
FD_TEST( 0==strcmp( topo->links[tile->out_link_id[STORE_OUT_IDX]].name, "shred_storei" ) );
745-
FD_TEST( 0==strcmp( topo->links[tile->out_link_id[NET_OUT_IDX]].name, "shred_net" ) );
746-
FD_TEST( 0==strcmp( topo->links[tile->out_link_id[SIGN_OUT_IDX]].name, "shred_sign" ) );
747-
FD_TEST( 0==strcmp( topo->links[tile->out_link_id[REPLAY_OUT_IDX]].name, "shred_replay" ) );
748-
} else {
749-
FD_LOG_ERR(( "shred tile has unexpected cnt of output links %lu", tile->out_cnt ));
750-
}
758+
char * store_out_name = topo->links[tile->out_link_id[ STORE_OUT_IDX ]].name;
759+
FD_TEST( 0==strcmp( store_out_name, "shred_store" ) || 0==strcmp( store_out_name, "shred_storei" ) );
760+
FD_TEST( 0==strcmp( topo->links[tile->out_link_id[ NET_OUT_IDX ]].name, "shred_net" ) );
761+
FD_TEST( 0==strcmp( topo->links[tile->out_link_id[ SIGN_OUT_IDX ]].name, "shred_sign" ) );
751762

752763
if( FD_UNLIKELY( !tile->out_cnt ) )
753764
FD_LOG_ERR(( "shred tile has no primary output link" ));
@@ -757,6 +768,8 @@ unprivileged_init( fd_topo_t * topo,
757768
FD_LOG_ERR(( "shred tile out depths are not equal %lu %lu",
758769
topo->links[ tile->out_link_id[ 0 ] ].depth, shred_store_mcache_depth ));
759770

771+
void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
772+
760773
FD_SCRATCH_ALLOC_INIT( l, scratch );
761774
fd_shred_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_shred_ctx_t ), sizeof( fd_shred_ctx_t ) );
762775

@@ -899,21 +912,21 @@ unprivileged_init( fd_topo_t * topo,
899912
ctx->store_out_wmark = fd_dcache_compact_wmark ( ctx->store_out_mem, store_out->dcache, store_out->mtu );
900913
ctx->store_out_chunk = ctx->store_out_chunk0;
901914

902-
if( FD_LIKELY( tile->out_cnt==4UL ) ) { /* firedancer */
903-
fd_topo_link_t * replay_out = &topo->links[ tile->out_link_id[ REPLAY_OUT_IDX ] ];
915+
ctx->repair_out_idx = fd_topo_find_tile_out_link( topo, tile, "shred_repair", 0 /* one repair tile so always kind_id 0 */ );
916+
if( FD_LIKELY( ctx->repair_out_idx!=ULONG_MAX ) ) { /* firedancer topo compiler hint */
917+
fd_topo_link_t * repair_out = &topo->links[ ctx->repair_out_idx ];
904918

905-
ctx->replay_out_mem = topo->workspaces[ topo->objs[ replay_out->dcache_obj_id ].wksp_id ].wksp;
906-
ctx->replay_out_chunk0 = fd_dcache_compact_chunk0( ctx->replay_out_mem, replay_out->dcache );
907-
ctx->replay_out_wmark = fd_dcache_compact_wmark ( ctx->replay_out_mem, replay_out->dcache, replay_out->mtu );
908-
ctx->replay_out_chunk = ctx->replay_out_chunk0;
919+
ctx->repair_out_mem = topo->workspaces[ topo->objs[ repair_out->dcache_obj_id ].wksp_id ].wksp;
920+
ctx->repair_out_chunk0 = fd_dcache_compact_chunk0( ctx->repair_out_mem, repair_out->dcache );
921+
ctx->repair_out_wmark = fd_dcache_compact_wmark ( ctx->repair_out_mem, repair_out->dcache, repair_out->mtu );
922+
ctx->repair_out_chunk = ctx->repair_out_chunk0;
909923
}
910924

925+
ctx->blockstore = NULL;
911926
ulong blockstore_obj_id = fd_pod_queryf_ulong( topo->props, ULONG_MAX, "blockstore" );
912927
if (FD_LIKELY( blockstore_obj_id!=ULONG_MAX )) {
913928
ctx->blockstore = fd_blockstore_join( &ctx->blockstore_ljoin, fd_topo_obj_laddr( topo, blockstore_obj_id ) );
914929
FD_TEST( ctx->blockstore->shmem->magic == FD_BLOCKSTORE_MAGIC );
915-
} else {
916-
ctx->blockstore = NULL;
917930
}
918931

919932
ctx->poh_in_expect_seq = 0UL;

0 commit comments

Comments
 (0)