Line data Source code
1 : #include "../tiles.h"
2 :
3 : #include "generated/fd_shred_tile_seccomp.h"
4 : #include "../../util/pod/fd_pod_format.h"
5 : #include "../shred/fd_shredder.h"
6 : #include "../shred/fd_shred_batch.h"
7 : #include "../shred/fd_shred_dest.h"
8 : #include "../shred/fd_fec_resolver.h"
9 : #include "../shred/fd_stake_ci.h"
10 : #include "../store/fd_store.h"
11 : #include "../keyguard/fd_keyload.h"
12 : #include "../keyguard/fd_keyguard.h"
13 : #include "../keyguard/fd_keyswitch.h"
14 : #include "../fd_disco.h"
15 : #include "../net/fd_net_tile.h"
16 : #include "../../flamenco/leaders/fd_leaders.h"
17 : #include "../../util/net/fd_net_headers.h"
18 : #include "../../flamenco/gossip/fd_gossip_types.h"
19 : #include "../../flamenco/types/fd_types.h"
20 : #include "../../flamenco/runtime/sysvar/fd_sysvar_epoch_schedule.h"
21 :
22 : /* The shred tile handles shreds from two data sources: shreds generated
23 : from microblocks from the banking tile, and shreds retransmitted from
24 : the network.
25 :
26 : They have rather different semantics, but at the end of the day, they
27 : both result in a bunch of shreds and FEC sets that need to be sent to
28 : the blockstore and on the network, which is why one tile handles
29 : both.
30 :
31 : We segment the memory for the two types of shreds into two halves of
32 : a dcache because they follow somewhat different flow control
33 : patterns. For flow control, the normal guarantee we want to provide
34 : is that the dcache entry is not overwritten unless the mcache entry
35 : has also been overwritten. The normal way to do this when using both
36 : cyclically and with a 1-to-1 mapping is to make the dcache at least
37 : `burst` entries bigger than the mcache.
38 :
39 : In this tile, we use one output mcache with one output dcache (which
40 : is logically partitioned into two) for the two sources of data. The
41 : worst case for flow control is when we're only sending with one of
42 : the dcache partitions at a time though, so we can consider them
43 : separately.
44 :
45 : From bank: Every FEC set triggers at least two mcache entries (one
46 : for parity and one for data), so at most, we have ceil(mcache
47 : depth/2) FEC sets exposed. This means we need to decompose dcache
48 : into at least ceil(mcache depth/2)+1 FEC sets.
49 :
50 : From the network: The FEC resolver doesn't use a cyclic order, but it
51 : does promise that once it returns an FEC set, it will return at least
52 : complete_depth FEC sets before returning it again. This means we
53 : want at most complete_depth-1 FEC sets exposed, so
54 : complete_depth=ceil(mcache depth/2)+1 FEC sets as above. The FEC
55 : resolver has the ability to keep individual shreds for partial_depth
56 : calls, but because in this version of the shred tile, we send each
57 : shred to all its destinations as soon as we get it, we don't need
58 : that functionality, so we set partial_depth=1.
59 :
60 : Adding these up, we get 2*ceil(mcache_depth/2)+3+fec_resolver_depth
61 : FEC sets, which is no more than mcache_depth+4+fec_resolver_depth.
62 : Each FEC is paired with 4 fd_shred34_t structs, so that means we need
63 : to decompose the dcache into 4*mcache_depth + 4*fec_resolver_depth +
64 : 16 fd_shred34_t structs.
65 :
66 : A note on parallelization. From the network, shreds are distributed
67 : to tiles by their signature, so all the shreds for a given FEC set
68 : are processed by the same tile. From bank, the original
69 : implementation used to parallelize by batch of microblocks (so within
70 : a block, batches were distributed to different tiles). To support
71 : chained merkle shreds, the current implementation processes all the
72 : batches on tile 0 -- this should be a temporary state while Solana
73 : moves to a newer shred format that support better parallelization. */
74 :
75 : /* The memory this tile uses is a bit complicated and has some logical
76 : aliasing to facilitate zero-copy use. We have a dcache containing
77 : fd_shred34_t objects, which are basically 34 fd_shred_t objects
78 : padded to their max size, where 34 is set so that the size of the
79 : fd_shred34_t object (including some metadata) is less than
80 : USHORT_MAX, which facilitates sending it using Tango. Then, for each
81 : set of 4 consecutive fd_shred34_t objects, we have an fd_fec_set_t.
82 : The first 34 data shreds point to the payload section of the payload
83 : section of each of the packets in the first fd_shred34_t. The other
84 : 33 data shreds point into the second fd_shred34_t. Similar for the
85 : parity shreds pointing into the third and fourth fd_shred34_t. */
86 :
87 : #define FD_SHRED_TILE_SCRATCH_ALIGN 128UL
88 :
89 0 : #define IN_KIND_CONTACT (0UL)
90 0 : #define IN_KIND_EPOCH (1UL) /* Firedancer */
91 0 : #define IN_KIND_STAKE (2UL) /* Frankendancer */
92 0 : #define IN_KIND_POH (3UL)
93 0 : #define IN_KIND_NET (4UL)
94 0 : #define IN_KIND_SIGN (5UL)
95 0 : #define IN_KIND_REPAIR (6UL)
96 0 : #define IN_KIND_IPECHO (7UL)
97 0 : #define IN_KIND_GOSSIP (8UL)
98 :
99 0 : #define NET_OUT_IDX 1
100 0 : #define SIGN_OUT_IDX 2
101 :
102 0 : #define DCACHE_ENTRIES_PER_FEC_SET (4UL)
103 : FD_STATIC_ASSERT( sizeof(fd_shred34_t) < USHORT_MAX, shred_34 );
104 : FD_STATIC_ASSERT( 34*DCACHE_ENTRIES_PER_FEC_SET >= FD_REEDSOL_DATA_SHREDS_MAX+FD_REEDSOL_PARITY_SHREDS_MAX, shred_34 );
105 : FD_STATIC_ASSERT( sizeof(fd_shred34_t) == FD_SHRED_STORE_MTU, shred_34 );
106 :
107 : FD_STATIC_ASSERT( sizeof(fd_entry_batch_meta_t)==56UL, poh_shred_mtu );
108 :
109 0 : #define FD_SHRED_ADD_SHRED_EXTRA_RETVAL_CNT 2
110 :
111 : /* Number of entries in the block_ids table. Each entry is 32 byte.
112 : This table is used to keep track of block ids that we create
113 : when we're leader, so that we can access them whenever we need
114 : a *parent* block id for a new block. Larger table allows to
115 : retrieve older parent block ids. Currently it's set for worst
116 : case parent offset of USHORT_MAX (max allowed in a shred),
117 : making the total table 2MiB.
118 : See also comment on chained_merkle_root. */
119 0 : #define BLOCK_IDS_TABLE_CNT USHORT_MAX
120 :
121 : /* See note on parallelization above. Currently we process all batches in tile 0. */
122 : #if 1
123 : #define SHOULD_PROCESS_THESE_SHREDS ( ctx->round_robin_id==0 )
124 : #else
125 : #define SHOULD_PROCESS_THESE_SHREDS ( ctx->batch_cnt%ctx->round_robin_cnt==ctx->round_robin_id )
126 : #endif
127 :
128 : /* The behavior of the shred tile is slightly different for
129 : Frankendancer vs Firedancer. For example, Frankendancer produces
130 : chained merkle shreds, while Firedancer doesn't yet. We can check
131 : at runtime the difference by inspecting the topology. The simplest
132 : way is to test if ctx->store is initialized.
133 :
134 : FIXME don't assume only frank vs. fire */
135 : #define IS_FIREDANCER ( ctx->store!=NULL )
136 :
137 : typedef union {
138 : struct {
139 : fd_wksp_t * mem;
140 : ulong chunk0;
141 : ulong wmark;
142 : };
143 : fd_net_rx_bounds_t net_rx;
144 : } fd_shred_in_ctx_t;
145 :
146 : typedef struct {
147 : fd_shredder_t * shredder;
148 : fd_fec_resolver_t * resolver;
149 : fd_pubkey_t identity_key[1]; /* Just the public key */
150 :
151 : ulong round_robin_id;
152 : ulong round_robin_cnt;
153 : /* Number of batches shredded from PoH during the current slot.
154 : This should be the same for all the shred tiles. */
155 : ulong batch_cnt;
156 : /* Slot of the most recent microblock we've seen from PoH,
157 : or 0 if we haven't seen one yet */
158 : ulong slot;
159 :
160 : fd_keyswitch_t * keyswitch;
161 : fd_keyguard_client_t keyguard_client[1];
162 :
163 : /* shred34 and fec_sets are very related: fec_sets[i] has pointers
164 : to the shreds in shred34[4*i + k] for k=0,1,2,3. */
165 : fd_shred34_t * shred34;
166 : fd_fec_set_t * fec_sets;
167 :
168 : fd_stake_ci_t * stake_ci;
169 : /* These are used in between during_frag and after_frag */
170 : fd_shred_dest_weighted_t * new_dest_ptr;
171 : ulong new_dest_cnt;
172 : ulong shredded_txn_cnt;
173 :
174 : ulong poh_in_expect_seq;
175 :
176 : ushort net_id;
177 :
178 : int skip_frag;
179 :
180 : ulong adtl_dests_leader_cnt;
181 : fd_shred_dest_weighted_t adtl_dests_leader [ FD_TOPO_ADTL_DESTS_MAX ];
182 : ulong adtl_dests_retransmit_cnt;
183 : fd_shred_dest_weighted_t adtl_dests_retransmit[ FD_TOPO_ADTL_DESTS_MAX ];
184 :
185 : fd_ip4_udp_hdrs_t data_shred_net_hdr [1];
186 : fd_ip4_udp_hdrs_t parity_shred_net_hdr[1];
187 :
188 : ulong shredder_fec_set_idx; /* In [0, shredder_max_fec_set_idx) */
189 : ulong shredder_max_fec_set_idx; /* exclusive */
190 :
191 : uchar shredder_merkle_root[32];
192 :
193 : ulong send_fec_set_idx[ FD_SHRED_BATCH_FEC_SETS_MAX ];
194 : ulong send_fec_set_cnt;
195 : ulong tsorig; /* timestamp of the last packet in compressed form */
196 :
197 : /* Includes Ethernet, IP, UDP headers */
198 : ulong shred_buffer_sz;
199 : uchar shred_buffer[ FD_NET_MTU ];
200 :
201 : fd_shred_in_ctx_t in[ 32 ];
202 : int in_kind[ 32 ];
203 :
204 : fd_wksp_t * net_out_mem;
205 : ulong net_out_chunk0;
206 : ulong net_out_wmark;
207 : ulong net_out_chunk;
208 :
209 : ulong store_out_idx;
210 : fd_wksp_t * store_out_mem;
211 : ulong store_out_chunk0;
212 : ulong store_out_wmark;
213 : ulong store_out_chunk;
214 :
215 : /* This is the output link for shreds that is currently consumed by
216 : the repair and replay tile. */
217 : ulong shred_out_idx;
218 : fd_wksp_t * shred_out_mem;
219 : ulong shred_out_chunk0;
220 : ulong shred_out_wmark;
221 : ulong shred_out_chunk;
222 :
223 : fd_store_t * store;
224 :
225 : fd_gossip_update_message_t gossip_upd_buf[1];
226 :
227 : struct {
228 : fd_histf_t contact_info_cnt[ 1 ];
229 : fd_histf_t batch_sz[ 1 ];
230 : fd_histf_t batch_microblock_cnt[ 1 ];
231 : fd_histf_t shredding_timing[ 1 ];
232 : fd_histf_t add_shred_timing[ 1 ];
233 : ulong shred_processing_result[ FD_FEC_RESOLVER_ADD_SHRED_RETVAL_CNT+FD_SHRED_ADD_SHRED_EXTRA_RETVAL_CNT ];
234 : ulong invalid_block_id_cnt;
235 : ulong shred_rejected_unchained_cnt;
236 : ulong repair_rcv_cnt;
237 : ulong repair_rcv_bytes;
238 : ulong turbine_rcv_cnt;
239 : ulong turbine_rcv_bytes;
240 : fd_histf_t store_insert_wait[ 1 ];
241 : fd_histf_t store_insert_work[ 1 ];
242 : } metrics[ 1 ];
243 :
244 : struct {
245 : ulong txn_cnt;
246 : ulong pos; /* in payload, range [0, FD_SHRED_BATCH_RAW_BUF_SZ-8UL) */
247 : ulong slot; /* set to 0 when pos==0 */
248 : union {
249 : struct {
250 : ulong microblock_cnt;
251 : uchar payload[ FD_SHRED_BATCH_RAW_BUF_SZ - 8UL ];
252 : };
253 : uchar raw[ FD_SHRED_BATCH_RAW_BUF_SZ ];
254 : };
255 : } pending_batch;
256 :
257 : fd_epoch_schedule_t epoch_schedule[1];
258 : fd_shred_features_activation_t features_activation[1];
259 : /* too large to be left in the stack */
260 : fd_shred_dest_idx_t scratchpad_dests[ FD_SHRED_DEST_MAX_FANOUT*(FD_REEDSOL_DATA_SHREDS_MAX+FD_REEDSOL_PARITY_SHREDS_MAX) ];
261 :
262 : uchar * chained_merkle_root;
263 : fd_bmtree_node_t out_merkle_roots[ FD_SHRED_BATCH_FEC_SETS_MAX ];
264 : uchar block_ids[ BLOCK_IDS_TABLE_CNT ][ FD_SHRED_MERKLE_ROOT_SZ ];
265 : } fd_shred_ctx_t;
266 :
267 : /* shred features are generally considered active at the epoch *following*
268 : the epoch in which the feature gate is activated.
269 :
270 : As an optimization, when the activation slot is received, it is converted
271 : into the first slot of the subsequent epoch. This allows for a more
272 : efficient check (shred_slot >= feature_slot) and avoids the overhead of
273 : repeatedly converting slots into epochs for comparison.
274 :
275 : This function is only for Firedancer, while Frankendancer already receives
276 : the final activation slot from POH tile.
277 :
278 : In Agave, this is done with check_feature_activation():
279 : https://github.com/anza-xyz/agave/blob/v3.1.4/turbine/src/cluster_nodes.rs#L771
280 : https://github.com/anza-xyz/agave/blob/v3.1.4/core/src/shred_fetch_stage.rs#L456 */
281 : static inline ulong
282 0 : fd_shred_get_feature_activation_slot0( ulong feature_slot, fd_shred_ctx_t * ctx ) {
283 : /* if the feature does not have an activation slot yet, return ULONG_MAX */
284 0 : if( FD_UNLIKELY( feature_slot==ULONG_MAX ) ) {
285 0 : return ULONG_MAX;
286 0 : }
287 : /* if we don't have an epoch schedule yet, return ULONG_MAX */
288 0 : if( FD_UNLIKELY( ctx->epoch_schedule->slots_per_epoch==0 ) ) {
289 0 : return ULONG_MAX;
290 0 : }
291 : /* compute the activation epoch, add one, return the first slot. */
292 0 : ulong feature_epoch = 1 + fd_slot_to_epoch( ctx->epoch_schedule, feature_slot, NULL );
293 0 : return fd_epoch_slot0( ctx->epoch_schedule, feature_epoch );
294 0 : }
295 :
296 : FD_FN_CONST static inline ulong
297 0 : scratch_align( void ) {
298 0 : return 128UL;
299 0 : }
300 :
301 : FD_FN_PURE static inline ulong
302 0 : scratch_footprint( fd_topo_tile_t const * tile ) {
303 :
304 0 : ulong fec_resolver_footprint = fd_fec_resolver_footprint( tile->shred.fec_resolver_depth, 1UL, tile->shred.depth,
305 0 : 128UL * tile->shred.fec_resolver_depth );
306 0 : ulong fec_set_cnt = tile->shred.depth + tile->shred.fec_resolver_depth + 4UL;
307 :
308 0 : ulong l = FD_LAYOUT_INIT;
309 0 : l = FD_LAYOUT_APPEND( l, alignof(fd_shred_ctx_t), sizeof(fd_shred_ctx_t) );
310 0 : l = FD_LAYOUT_APPEND( l, fd_stake_ci_align(), fd_stake_ci_footprint() );
311 0 : l = FD_LAYOUT_APPEND( l, fd_fec_resolver_align(), fec_resolver_footprint );
312 0 : l = FD_LAYOUT_APPEND( l, fd_shredder_align(), fd_shredder_footprint() );
313 0 : l = FD_LAYOUT_APPEND( l, alignof(fd_fec_set_t), sizeof(fd_fec_set_t)*fec_set_cnt );
314 0 : return FD_LAYOUT_FINI( l, scratch_align() );
315 0 : }
316 :
317 : static inline void
318 0 : during_housekeeping( fd_shred_ctx_t * ctx ) {
319 0 : if( FD_UNLIKELY( fd_keyswitch_state_query( ctx->keyswitch )==FD_KEYSWITCH_STATE_SWITCH_PENDING ) ) {
320 0 : ulong seq_must_complete = ctx->keyswitch->param;
321 :
322 0 : if( FD_UNLIKELY( fd_seq_lt( ctx->poh_in_expect_seq, seq_must_complete ) ) ) {
323 : /* See fd_keyswitch.h, we need to flush any in-flight shreds from
324 : the leader pipeline before switching key. */
325 0 : FD_LOG_WARNING(( "Flushing in-flight unpublished shreds, must reach seq %lu, currently at %lu ...", seq_must_complete, ctx->poh_in_expect_seq ));
326 0 : return;
327 0 : }
328 :
329 0 : memcpy( ctx->identity_key->uc, ctx->keyswitch->bytes, 32UL );
330 0 : fd_stake_ci_set_identity( ctx->stake_ci, ctx->identity_key );
331 0 : fd_keyswitch_state( ctx->keyswitch, FD_KEYSWITCH_STATE_COMPLETED );
332 0 : }
333 0 : }
334 :
335 : static inline void
336 0 : metrics_write( fd_shred_ctx_t * ctx ) {
337 0 : FD_MHIST_COPY( SHRED, CLUSTER_CONTACT_INFO_CNT, ctx->metrics->contact_info_cnt );
338 0 : FD_MHIST_COPY( SHRED, BATCH_SZ, ctx->metrics->batch_sz );
339 0 : FD_MHIST_COPY( SHRED, BATCH_MICROBLOCK_CNT, ctx->metrics->batch_microblock_cnt );
340 0 : FD_MHIST_COPY( SHRED, SHREDDING_DURATION_SECONDS, ctx->metrics->shredding_timing );
341 0 : FD_MHIST_COPY( SHRED, ADD_SHRED_DURATION_SECONDS, ctx->metrics->add_shred_timing );
342 0 : FD_MCNT_SET ( SHRED, SHRED_REPAIR_RCV, ctx->metrics->repair_rcv_cnt );
343 0 : FD_MCNT_SET ( SHRED, SHRED_REPAIR_RCV_BYTES, ctx->metrics->repair_rcv_bytes );
344 0 : FD_MCNT_SET ( SHRED, SHRED_TURBINE_RCV, ctx->metrics->turbine_rcv_cnt );
345 0 : FD_MCNT_SET ( SHRED, SHRED_TURBINE_RCV_BYTES, ctx->metrics->turbine_rcv_bytes );
346 :
347 0 : FD_MCNT_SET ( SHRED, INVALID_BLOCK_ID, ctx->metrics->invalid_block_id_cnt );
348 0 : FD_MCNT_SET ( SHRED, SHRED_REJECTED_UNCHAINED, ctx->metrics->shred_rejected_unchained_cnt );
349 0 : FD_MHIST_COPY( SHRED, STORE_INSERT_WAIT, ctx->metrics->store_insert_wait );
350 0 : FD_MHIST_COPY( SHRED, STORE_INSERT_WORK, ctx->metrics->store_insert_work );
351 :
352 0 : FD_MCNT_ENUM_COPY( SHRED, SHRED_PROCESSED, ctx->metrics->shred_processing_result );
353 0 : }
354 :
355 : static inline void
356 : handle_new_cluster_contact_info( fd_shred_ctx_t * ctx,
357 0 : uchar const * buf ) {
358 0 : ulong const * header = (ulong const *)fd_type_pun_const( buf );
359 :
360 0 : ulong dest_cnt = header[ 0 ];
361 0 : fd_histf_sample( ctx->metrics->contact_info_cnt, dest_cnt );
362 :
363 0 : if( dest_cnt >= MAX_SHRED_DESTS )
364 0 : FD_LOG_ERR(( "Cluster nodes had %lu destinations, which was more than the max of %lu", dest_cnt, MAX_SHRED_DESTS ));
365 :
366 0 : fd_shred_dest_wire_t const * in_dests = fd_type_pun_const( header+1UL );
367 0 : fd_shred_dest_weighted_t * dests = fd_stake_ci_dest_add_init( ctx->stake_ci );
368 :
369 0 : ctx->new_dest_ptr = dests;
370 0 : ctx->new_dest_cnt = dest_cnt;
371 :
372 0 : for( ulong i=0UL; i<dest_cnt; i++ ) {
373 0 : memcpy( dests[i].pubkey.uc, in_dests[i].pubkey, 32UL );
374 0 : dests[i].ip4 = in_dests[i].ip4_addr;
375 0 : dests[i].port = in_dests[i].udp_port;
376 0 : }
377 0 : }
378 :
379 : static inline void
380 0 : finalize_new_cluster_contact_info( fd_shred_ctx_t * ctx ) {
381 0 : fd_stake_ci_dest_add_fini( ctx->stake_ci, ctx->new_dest_cnt );
382 0 : }
383 :
384 : static inline int
385 : before_frag( fd_shred_ctx_t * ctx,
386 : ulong in_idx,
387 : ulong seq,
388 0 : ulong sig ) {
389 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_IPECHO ) ) {
390 0 : FD_TEST( sig!=0UL && sig<=USHORT_MAX );
391 0 : fd_shredder_set_shred_version ( ctx->shredder, (ushort)sig );
392 0 : fd_fec_resolver_set_shred_version( ctx->resolver, (ushort)sig );
393 0 : return 1;
394 0 : }
395 :
396 0 : if( FD_UNLIKELY( !ctx->shredder->shred_version ) ) return -1;
397 :
398 0 : if( FD_LIKELY( ctx->in_kind[ in_idx ]==IN_KIND_POH ) ) {
399 0 : ctx->poh_in_expect_seq = seq+1UL;
400 0 : return (int)(fd_disco_poh_sig_pkt_type( sig )!=POH_PKT_TYPE_MICROBLOCK) & (int)(fd_disco_poh_sig_pkt_type( sig )!=POH_PKT_TYPE_FEAT_ACT_SLOT);
401 0 : }
402 0 : if( FD_LIKELY( ctx->in_kind[ in_idx ]==IN_KIND_NET ) ) {
403 0 : return (int)(fd_disco_netmux_sig_proto( sig )!=DST_PROTO_SHRED) & (int)(fd_disco_netmux_sig_proto( sig )!=DST_PROTO_REPAIR);
404 0 : }
405 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_GOSSIP ) ){
406 0 : return sig!=FD_GOSSIP_UPDATE_TAG_CONTACT_INFO &&
407 0 : sig!=FD_GOSSIP_UPDATE_TAG_CONTACT_INFO_REMOVE;
408 0 : }
409 0 : return 0;
410 0 : }
411 :
412 : static void
413 : during_frag( fd_shred_ctx_t * ctx,
414 : ulong in_idx,
415 : ulong seq FD_PARAM_UNUSED,
416 : ulong sig,
417 : ulong chunk,
418 : ulong sz,
419 0 : ulong ctl ) {
420 :
421 0 : ctx->skip_frag = 0;
422 :
423 0 : ctx->tsorig = fd_frag_meta_ts_comp( fd_tickcount() );
424 :
425 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_REPAIR ) ) {
426 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark ) )
427 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
428 0 : ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
429 :
430 0 : uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
431 0 : fd_memcpy( ctx->shred_buffer, dcache_entry, sz );
432 0 : return;
433 0 : }
434 :
435 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_CONTACT ) ) {
436 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark ) )
437 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
438 0 : ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
439 :
440 0 : uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
441 0 : handle_new_cluster_contact_info( ctx, dcache_entry );
442 0 : return;
443 0 : }
444 :
445 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_GOSSIP ) ) {
446 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark ) )
447 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
448 0 : ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
449 0 : uchar const * gossip_upd_msg = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
450 0 : fd_memcpy( ctx->gossip_upd_buf, gossip_upd_msg, sz );
451 0 : return;
452 0 : }
453 :
454 : /* Firedancer only */
455 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_EPOCH ) ) {
456 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark ) )
457 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
458 0 : ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
459 :
460 0 : uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
461 0 : fd_epoch_info_msg_t const * epoch_msg = fd_type_pun_const( dcache_entry );
462 :
463 0 : fd_stake_ci_epoch_msg_init( ctx->stake_ci, epoch_msg );
464 :
465 0 : *ctx->epoch_schedule = epoch_msg->epoch_schedule;
466 0 : ctx->features_activation->enforce_fixed_fec_set = fd_shred_get_feature_activation_slot0(
467 0 : epoch_msg->features.enforce_fixed_fec_set, ctx );
468 0 : ctx->features_activation->switch_to_chacha8_turbine = fd_shred_get_feature_activation_slot0(
469 0 : epoch_msg->features.switch_to_chacha8_turbine, ctx );
470 :
471 0 : return;
472 0 : }
473 :
474 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_STAKE ) ) {
475 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark ) )
476 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
477 0 : ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
478 :
479 0 : uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
480 0 : fd_stake_ci_stake_msg_init( ctx->stake_ci, fd_type_pun_const( dcache_entry ) );
481 0 : return;
482 0 : }
483 :
484 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_POH ) ) {
485 0 : ctx->send_fec_set_cnt = 0UL;
486 :
487 0 : if( FD_UNLIKELY( (fd_disco_poh_sig_pkt_type( sig )==POH_PKT_TYPE_FEAT_ACT_SLOT) ) ) {
488 : /* There is a subset of FD_SHRED_FEATURES_ACTIVATION_... slots that
489 : the shred tile needs to be aware of. Since this requires the
490 : bank, we are forced (so far) to receive them from the poh tile
491 : (as a POH_PKT_TYPE_FEAT_ACT_SLOT). */
492 0 : uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
493 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz!=(sizeof(fd_shred_features_activation_t)) ) )
494 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
495 0 : ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
496 :
497 0 : fd_shred_features_activation_t const * act_data = (fd_shred_features_activation_t const *)dcache_entry;
498 0 : memcpy( ctx->features_activation, act_data, sizeof(fd_shred_features_activation_t) );
499 0 : }
500 0 : else { /* (fd_disco_poh_sig_pkt_type( sig )==POH_PKT_TYPE_MICROBLOCK) */
501 : /* This is a frag from the PoH tile. We'll copy it to our pending
502 : microblock batch and shred it if necessary (last in block or
503 : above watermark). We just go ahead and shred it here, even
504 : though we may get overrun. If we do end up getting overrun, we
505 : just won't send these shreds out and we'll reuse the FEC set for
506 : the next one. From a higher level though, if we do get overrun,
507 : a bunch of shreds will never be transmitted, and we'll end up
508 : producing a block that never lands on chain. */
509 :
510 0 : uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
511 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz>FD_POH_SHRED_MTU ||
512 0 : sz<(sizeof(fd_entry_batch_meta_t)+sizeof(fd_entry_batch_header_t)) ) )
513 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
514 0 : ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
515 :
516 0 : fd_entry_batch_meta_t const * entry_meta = (fd_entry_batch_meta_t const *)dcache_entry;
517 0 : uchar const * entry = dcache_entry + sizeof(fd_entry_batch_meta_t);
518 0 : ulong entry_sz = sz - sizeof(fd_entry_batch_meta_t);
519 :
520 0 : fd_entry_batch_header_t const * microblock = (fd_entry_batch_header_t const *)entry;
521 :
522 : /* It should never be possible for this to fail, but we check it
523 : anyway. */
524 0 : FD_TEST( entry_sz + ctx->pending_batch.pos <= sizeof(ctx->pending_batch.payload) );
525 :
526 0 : ulong target_slot = fd_disco_poh_sig_slot( sig );
527 0 : if( FD_UNLIKELY( (ctx->pending_batch.microblock_cnt>0) & (ctx->pending_batch.slot!=target_slot) ) ) {
528 : /* TODO: The Agave client sends a dummy entry batch with only 1
529 : byte and the block-complete bit set. This helps other
530 : validators know that the block is dead and they should not try
531 : to continue building a fork on it. We probably want a similar
532 : approach eventually. */
533 0 : FD_LOG_WARNING(( "Abandoning %lu microblocks for slot %lu and switching to slot %lu",
534 0 : ctx->pending_batch.microblock_cnt, ctx->pending_batch.slot, target_slot ));
535 0 : ctx->pending_batch.slot = 0UL;
536 0 : ctx->pending_batch.pos = 0UL;
537 0 : ctx->pending_batch.microblock_cnt = 0UL;
538 0 : ctx->pending_batch.txn_cnt = 0UL;
539 0 : ctx->batch_cnt = 0UL;
540 :
541 0 : FD_MCNT_INC( SHRED, MICROBLOCKS_ABANDONED, 1UL );
542 0 : }
543 :
544 0 : ctx->pending_batch.slot = target_slot;
545 0 : if( FD_UNLIKELY( target_slot!=ctx->slot )) {
546 : /* Reset batch count if we are in a new slot */
547 0 : ctx->batch_cnt = 0UL;
548 0 : ctx->slot = target_slot;
549 :
550 : /* At the beginning of a new slot, prepare chained_merkle_root.
551 : chained_merkle_root is initialized at the block_id of the parent
552 : block, there's two cases:
553 :
554 : 1. block_id is passed in by the poh tile:
555 : - it's always passed when parent block had a different leader
556 : - it may be passed when we were leader for parent block (there
557 : are race conditions when it's not passed)
558 :
559 : 2. block_id is taken from block_ids table if we were the leader
560 : for the parent block (when we were NOT the leader, because of
561 : equivocation, we can't store block_id in the table)
562 :
563 : chained_merkle_root is stored in block_ids table at target_slot
564 : and it's progressively updated as more microblocks are received.
565 : As a result, when we move to a new slot, the block_ids table at
566 : the old slot will contain the block_id.
567 :
568 : The block_ids table is designed to protect against the race condition
569 : case in 1., therefore the table may not be set in some cases, e.g. if
570 : a validator (re)starts, but in those cases we don't expect the race
571 : condition to apply. */
572 0 : ctx->chained_merkle_root = ctx->block_ids[ target_slot % BLOCK_IDS_TABLE_CNT ];
573 0 : if( FD_UNLIKELY( SHOULD_PROCESS_THESE_SHREDS ) ) {
574 0 : if( FD_LIKELY( entry_meta->parent_block_id_valid ) ) {
575 : /* 1. Initialize chained_merkle_root sent from poh tile */
576 0 : memcpy( ctx->chained_merkle_root, entry_meta->parent_block_id, FD_SHRED_MERKLE_ROOT_SZ );
577 0 : } else {
578 0 : ulong parent_slot = target_slot - entry_meta->parent_offset;
579 0 : fd_epoch_leaders_t const * lsched = fd_stake_ci_get_lsched_for_slot( ctx->stake_ci, parent_slot );
580 0 : fd_pubkey_t const * slot_leader = fd_epoch_leaders_get( lsched, parent_slot );
581 :
582 0 : if( lsched && slot_leader && fd_memeq( slot_leader, ctx->identity_key, sizeof(fd_pubkey_t) ) ) {
583 : /* 2. Initialize chained_merkle_root from block_ids table, if we were the leader */
584 0 : memcpy( ctx->chained_merkle_root, ctx->block_ids[ parent_slot % BLOCK_IDS_TABLE_CNT ], FD_SHRED_MERKLE_ROOT_SZ );
585 0 : } else {
586 : /* This should never happen, log a metric and set chained_merkle_root to 0 */
587 0 : ctx->metrics->invalid_block_id_cnt++;
588 0 : memset( ctx->chained_merkle_root, 0, FD_SHRED_MERKLE_ROOT_SZ );
589 0 : }
590 0 : }
591 0 : }
592 0 : }
593 :
594 0 : if( FD_LIKELY( !SHOULD_PROCESS_THESE_SHREDS ) ) {
595 : /* If we are not processing this batch, filter in after_frag. */
596 0 : ctx->skip_frag = 1;
597 0 : }
598 :
599 0 : ulong pending_batch_wmark = FD_SHRED_BATCH_WMARK_CHAINED;
600 0 : uchar * chained_merkle_root = ctx->chained_merkle_root;
601 0 : ulong load_for_32_shreds = FD_SHREDDER_CHAINED_FEC_SET_PAYLOAD_SZ;
602 : /* All fec sets in the last batch of a block need to be resigned.
603 : This needs to match Agave's behavior - as a reference, see:
604 : https://github.com/anza-xyz/agave/blob/v2.3/ledger/src/shred/merkle.rs#L1040 */
605 0 : if( FD_UNLIKELY( entry_meta->block_complete ) ) {
606 0 : pending_batch_wmark = FD_SHRED_BATCH_WMARK_RESIGNED;
607 : /* chained_merkle_root also applies to resigned FEC sets. */
608 0 : load_for_32_shreds = FD_SHREDDER_RESIGNED_FEC_SET_PAYLOAD_SZ;
609 0 : }
610 :
611 : /* If this microblock completes the block, the batch is then
612 : finalized here. Otherwise, we check whether the new entry
613 : would exceed the pending_batch_wmark. If true, then the
614 : batch is closed now, shredded, and a new batch is started
615 : with the incoming microblock. If false, no shredding takes
616 : place, and the microblock is added to the current batch. */
617 0 : int batch_would_exceed_wmark = ( ctx->pending_batch.pos + entry_sz ) > pending_batch_wmark;
618 0 : int include_in_current_batch = entry_meta->block_complete | ( !batch_would_exceed_wmark );
619 0 : int process_current_batch = entry_meta->block_complete | batch_would_exceed_wmark;
620 0 : int init_new_batch = !include_in_current_batch;
621 :
622 0 : if( FD_LIKELY( include_in_current_batch ) ) {
623 0 : if( FD_UNLIKELY( SHOULD_PROCESS_THESE_SHREDS ) ) {
624 : /* Ugh, yet another memcpy */
625 0 : fd_memcpy( ctx->pending_batch.payload + ctx->pending_batch.pos, entry, entry_sz );
626 0 : }
627 0 : ctx->pending_batch.pos += entry_sz;
628 0 : ctx->pending_batch.microblock_cnt += 1UL;
629 0 : ctx->pending_batch.txn_cnt += microblock->txn_cnt;
630 0 : }
631 :
632 0 : if( FD_LIKELY( process_current_batch )) {
633 : /* Batch and padding size calculation. */
634 0 : ulong batch_sz = sizeof(ulong) + ctx->pending_batch.pos; /* without padding */
635 0 : ulong batch_sz_padded = load_for_32_shreds * ( ( batch_sz + load_for_32_shreds - 1UL ) / load_for_32_shreds );
636 0 : ulong padding_sz = batch_sz_padded - batch_sz;
637 :
638 0 : if( FD_UNLIKELY( SHOULD_PROCESS_THESE_SHREDS ) ) {
639 : /* If it's our turn, shred this batch. FD_UNLIKELY because shred
640 : tile cnt generally >= 2 */
641 :
642 0 : long shredding_timing = -fd_tickcount();
643 :
644 0 : fd_memset( ctx->pending_batch.payload + ctx->pending_batch.pos, 0, padding_sz );
645 :
646 0 : ctx->send_fec_set_cnt = 0UL; /* verbose */
647 0 : ctx->shredded_txn_cnt = ctx->pending_batch.txn_cnt;
648 :
649 0 : fd_shredder_init_batch( ctx->shredder, ctx->pending_batch.raw, batch_sz_padded, target_slot, entry_meta );
650 :
651 0 : ulong pend_sz = batch_sz_padded;
652 0 : ulong pend_idx = 0;
653 0 : while( pend_sz > 0UL ) {
654 :
655 0 : fd_fec_set_t * out = ctx->fec_sets + ctx->shredder_fec_set_idx;
656 :
657 0 : FD_TEST( fd_shredder_next_fec_set( ctx->shredder, out, chained_merkle_root, ctx->out_merkle_roots[pend_idx].hash ) );
658 :
659 0 : d_rcvd_join( d_rcvd_new( d_rcvd_delete( d_rcvd_leave( out->data_shred_rcvd ) ) ) );
660 0 : p_rcvd_join( p_rcvd_new( p_rcvd_delete( p_rcvd_leave( out->parity_shred_rcvd ) ) ) );
661 :
662 0 : ctx->send_fec_set_idx[ ctx->send_fec_set_cnt ] = ctx->shredder_fec_set_idx;
663 0 : ctx->send_fec_set_cnt += 1UL;
664 0 : ctx->shredder_fec_set_idx = (ctx->shredder_fec_set_idx+1UL)%ctx->shredder_max_fec_set_idx;
665 :
666 0 : pend_sz -= load_for_32_shreds;
667 0 : pend_idx++;
668 0 : }
669 :
670 0 : fd_shredder_fini_batch( ctx->shredder );
671 0 : shredding_timing += fd_tickcount();
672 :
673 : /* Update metrics */
674 0 : fd_histf_sample( ctx->metrics->batch_sz, batch_sz /* without padding */ );
675 0 : fd_histf_sample( ctx->metrics->batch_microblock_cnt, ctx->pending_batch.microblock_cnt );
676 0 : fd_histf_sample( ctx->metrics->shredding_timing, (ulong)shredding_timing );
677 0 : } else {
678 0 : ctx->send_fec_set_cnt = 0UL; /* verbose */
679 :
680 0 : ulong shred_type = FD_SHRED_TYPE_MERKLE_DATA_CHAINED;
681 0 : if( FD_UNLIKELY( entry_meta->block_complete ) ) {
682 0 : shred_type = FD_SHRED_TYPE_MERKLE_DATA_CHAINED_RESIGNED;
683 0 : }
684 0 : if( FD_LIKELY( IS_FIREDANCER ) ) {
685 0 : shred_type = FD_SHRED_TYPE_MERKLE_DATA;
686 0 : }
687 0 : fd_shredder_skip_batch( ctx->shredder, batch_sz_padded, target_slot, shred_type );
688 0 : }
689 :
690 0 : ctx->pending_batch.slot = 0UL;
691 0 : ctx->pending_batch.pos = 0UL;
692 0 : ctx->pending_batch.microblock_cnt = 0UL;
693 0 : ctx->pending_batch.txn_cnt = 0UL;
694 0 : ctx->batch_cnt++;
695 0 : }
696 :
697 0 : if( FD_UNLIKELY( init_new_batch ) ) {
698 : /* TODO: this assumes that SHOULD_PROCESS_THESE_SHREDS is
699 : constant across batches. Otherwise, the condition may
700 : need to be removed (or adjusted). */
701 0 : if( FD_UNLIKELY( SHOULD_PROCESS_THESE_SHREDS ) ) {
702 : /* Ugh, yet another memcpy */
703 0 : fd_memcpy( ctx->pending_batch.payload + 0UL /* verbose */, entry, entry_sz );
704 0 : }
705 0 : ctx->pending_batch.slot = target_slot;
706 0 : ctx->pending_batch.pos = entry_sz;
707 0 : ctx->pending_batch.microblock_cnt = 1UL;
708 0 : ctx->pending_batch.txn_cnt = microblock->txn_cnt;
709 0 : }
710 0 : }
711 0 : } else if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_NET ) ) {
712 : /* The common case, from the net tile. The FEC resolver API does
713 : not present a prepare/commit model. If we get overrun between
714 : when the FEC resolver verifies the signature and when it stores
715 : the local copy, we could end up storing and retransmitting
716 : garbage. Instead we copy it locally, sadly, and only give it to
717 : the FEC resolver when we know it won't be overrun anymore. */
718 0 : uchar const * dcache_entry = fd_net_rx_translate_frag( &ctx->in[ in_idx ].net_rx, chunk, ctl, sz );
719 0 : ulong hdr_sz = fd_disco_netmux_sig_hdr_sz( sig );
720 0 : FD_TEST( hdr_sz <= sz ); /* Should be ensured by the net tile */
721 0 : fd_shred_t const * shred = fd_shred_parse( dcache_entry+hdr_sz, sz-hdr_sz );
722 0 : if( FD_UNLIKELY( !shred ) ) {
723 0 : ctx->skip_frag = 1;
724 0 : return;
725 0 : };
726 :
727 0 : if( FD_UNLIKELY( fd_disco_netmux_sig_proto( sig )==DST_PROTO_REPAIR ) ) {
728 0 : ctx->metrics->repair_rcv_cnt++;
729 0 : ctx->metrics->repair_rcv_bytes += sz;
730 0 : } else {
731 0 : ctx->metrics->turbine_rcv_cnt++;
732 0 : ctx->metrics->turbine_rcv_bytes += sz;
733 0 : }
734 :
735 : /* Drop unchained merkle shreds */
736 0 : int is_unchained = !fd_shred_is_chained( fd_shred_type( shred->variant ) );
737 0 : if( FD_UNLIKELY( is_unchained ) ) {
738 0 : ctx->metrics->shred_rejected_unchained_cnt++;
739 0 : ctx->skip_frag = 1;
740 0 : return;
741 0 : };
742 :
743 : /* all shreds in the same FEC set will have the same signature
744 : so we can round-robin shreds between the shred tiles based on
745 : just the signature without splitting individual FEC sets. */
746 0 : ulong sig = fd_ulong_load_8( shred->signature );
747 0 : if( FD_LIKELY( sig%ctx->round_robin_cnt!=ctx->round_robin_id ) ) {
748 0 : ctx->skip_frag = 1;
749 0 : return;
750 0 : }
751 0 : fd_memcpy( ctx->shred_buffer, dcache_entry+hdr_sz, sz-hdr_sz );
752 0 : ctx->shred_buffer_sz = sz-hdr_sz;
753 0 : }
754 0 : }
755 :
756 : static inline void
757 : send_shred( fd_shred_ctx_t * ctx,
758 : fd_stem_context_t * stem,
759 : fd_shred_t const * shred,
760 : fd_shred_dest_weighted_t const * dest,
761 0 : ulong tsorig ) {
762 :
763 0 : if( FD_UNLIKELY( !dest->ip4 ) ) return;
764 :
765 0 : uchar * packet = fd_chunk_to_laddr( ctx->net_out_mem, ctx->net_out_chunk );
766 :
767 0 : int is_data = fd_shred_is_data( fd_shred_type( shred->variant ) );
768 0 : fd_ip4_udp_hdrs_t * hdr = (fd_ip4_udp_hdrs_t *)packet;
769 0 : *hdr = *( is_data ? ctx->data_shred_net_hdr : ctx->parity_shred_net_hdr );
770 :
771 0 : fd_ip4_hdr_t * ip4 = hdr->ip4;
772 0 : ip4->daddr = dest->ip4;
773 0 : ip4->net_id = fd_ushort_bswap( ctx->net_id++ );
774 0 : ip4->check = 0U;
775 0 : ip4->check = fd_ip4_hdr_check_fast( ip4 );
776 :
777 0 : hdr->udp->net_dport = fd_ushort_bswap( dest->port );
778 :
779 0 : ulong shred_sz = fd_ulong_if( is_data, FD_SHRED_MIN_SZ, FD_SHRED_MAX_SZ );
780 0 : #if FD_HAS_AVX
781 : /* We're going to copy this shred potentially a bunch of times without
782 : reading it again, and we'd rather not thrash our cache, so we want
783 : to use non-temporal writes here. We need to make sure we don't
784 : touch the cache line containing the network headers that we just
785 : wrote to though. We know the destination is 64 byte aligned. */
786 0 : FD_STATIC_ASSERT( sizeof(*hdr)<64UL, non_temporal );
787 : /* src[0:sizeof(hdrs)] is invalid, but now we want to copy
788 : dest[i]=src[i] for i>=sizeof(hdrs), so it simplifies the code. */
789 0 : uchar const * src = (uchar const *)((ulong)shred - sizeof(fd_ip4_udp_hdrs_t));
790 0 : memcpy( packet+sizeof(fd_ip4_udp_hdrs_t), src+sizeof(fd_ip4_udp_hdrs_t), 64UL-sizeof(fd_ip4_udp_hdrs_t) );
791 :
792 0 : ulong end_offset = shred_sz + sizeof(fd_ip4_udp_hdrs_t);
793 0 : ulong i;
794 0 : for( i=64UL; end_offset-i<64UL; i+=64UL ) {
795 0 : # if FD_HAS_AVX512
796 0 : _mm512_stream_si512( (void *)(packet+i ), _mm512_loadu_si512( (void const *)(src+i ) ) );
797 : # else
798 0 : _mm256_stream_si256( (void *)(packet+i ), _mm256_loadu_si256( (void const *)(src+i ) ) );
799 0 : _mm256_stream_si256( (void *)(packet+i+32UL), _mm256_loadu_si256( (void const *)(src+i+32UL) ) );
800 0 : # endif
801 0 : }
802 0 : _mm_sfence();
803 0 : fd_memcpy( packet+i, src+i, end_offset-i ); /* Copy the last partial cache line */
804 :
805 : #else
806 : fd_memcpy( packet+sizeof(fd_ip4_udp_hdrs_t), shred, shred_sz );
807 : #endif
808 :
809 0 : ulong pkt_sz = shred_sz + sizeof(fd_ip4_udp_hdrs_t);
810 0 : ulong tspub = fd_frag_meta_ts_comp( fd_tickcount() );
811 0 : ulong sig = fd_disco_netmux_sig( dest->ip4, dest->port, dest->ip4, DST_PROTO_OUTGOING, sizeof(fd_ip4_udp_hdrs_t) );
812 0 : ulong const chunk = ctx->net_out_chunk;
813 0 : fd_stem_publish( stem, NET_OUT_IDX, sig, chunk, pkt_sz, 0UL, tsorig, tspub );
814 0 : ctx->net_out_chunk = fd_dcache_compact_next( chunk, pkt_sz, ctx->net_out_chunk0, ctx->net_out_wmark );
815 0 : }
816 :
817 : static void
818 : after_frag( fd_shred_ctx_t * ctx,
819 : ulong in_idx,
820 : ulong seq,
821 : ulong sig,
822 : ulong sz,
823 : ulong tsorig,
824 : ulong _tspub,
825 0 : fd_stem_context_t * stem ) {
826 0 : (void)seq;
827 0 : (void)sz;
828 0 : (void)tsorig;
829 0 : (void)_tspub;
830 :
831 0 : if( FD_UNLIKELY( ctx->skip_frag ) ) return;
832 :
833 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_CONTACT ) ) {
834 0 : finalize_new_cluster_contact_info( ctx );
835 0 : return;
836 0 : }
837 :
838 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_EPOCH ) ) {
839 0 : fd_stake_ci_epoch_msg_fini( ctx->stake_ci );
840 0 : return;
841 0 : }
842 :
843 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_STAKE ) ) {
844 0 : fd_stake_ci_stake_msg_fini( ctx->stake_ci );
845 0 : return;
846 0 : }
847 :
848 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_GOSSIP ) ) {
849 0 : if( ctx->gossip_upd_buf->tag==FD_GOSSIP_UPDATE_TAG_CONTACT_INFO ) {
850 0 : fd_contact_info_t const * ci = ctx->gossip_upd_buf->contact_info.contact_info;
851 0 : fd_ip4_port_t tvu_addr = ci->sockets[ FD_CONTACT_INFO_SOCKET_TVU ];
852 0 : if( !tvu_addr.l ){
853 0 : fd_stake_ci_dest_remove( ctx->stake_ci, &ci->pubkey );
854 0 : } else {
855 0 : fd_stake_ci_dest_update( ctx->stake_ci, &ci->pubkey, tvu_addr.addr, fd_ushort_bswap( tvu_addr.port ) );
856 0 : }
857 0 : } else if( ctx->gossip_upd_buf->tag==FD_GOSSIP_UPDATE_TAG_CONTACT_INFO_REMOVE ) {
858 0 : if( FD_UNLIKELY( !memcmp( ctx->identity_key->uc, ctx->gossip_upd_buf->origin_pubkey, 32UL ) ) ) {
859 : /* If our own contact info was dropped, we update with dummy IP
860 : instead of removing since stake_ci expects our contact info
861 : in the sdests table all the time. fd_stake_ci_new initializes
862 : both ei->sdests with our contact info so this should always
863 : update (and not append). */
864 0 : fd_stake_ci_dest_update( ctx->stake_ci, (fd_pubkey_t *)ctx->gossip_upd_buf->origin_pubkey, 1U, 0U );
865 0 : } else {
866 0 : fd_stake_ci_dest_remove( ctx->stake_ci, (fd_pubkey_t *)ctx->gossip_upd_buf->origin_pubkey );
867 0 : }
868 0 : }
869 0 : return;
870 0 : }
871 :
872 0 : if( FD_UNLIKELY( (ctx->in_kind[ in_idx ]==IN_KIND_POH) & (ctx->send_fec_set_cnt==0UL) ) ) {
873 : /* Entry from PoH that didn't trigger a new FEC set to be made */
874 0 : return;
875 0 : }
876 :
877 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_REPAIR ) ) {
878 0 : FD_MCNT_INC( SHRED, FORCE_COMPLETE_REQUEST, 1UL );
879 0 : fd_ed25519_sig_t const * shred_sig = (fd_ed25519_sig_t const *)fd_type_pun( ctx->shred_buffer );
880 0 : if( FD_UNLIKELY( fd_fec_resolver_done_contains( ctx->resolver, shred_sig ) ) ) {
881 : /* This is a FEC completion message from the repair tile. We need
882 : to make sure that we don't force complete something that's just
883 : been completed. */
884 0 : FD_MCNT_INC( SHRED, FORCE_COMPLETE_FAILURE, 1UL );
885 0 : return;
886 0 : }
887 :
888 0 : uint last_idx = fd_disco_repair_shred_sig_last_shred_idx( sig );
889 0 : uchar buf_last_shred[FD_SHRED_MIN_SZ];
890 0 : int rv = fd_fec_resolver_shred_query( ctx->resolver, shred_sig, last_idx, buf_last_shred );
891 0 : if( FD_UNLIKELY( rv != FD_FEC_RESOLVER_SHRED_OKAY ) ) {
892 :
893 : /* We will hit this case if FEC is no longer in curr_map, or if
894 : the shred signature is invalid, which is okay.
895 :
896 : There's something of a race condition here. It's possible (but
897 : very unlikely) that between when the repair tile observed the
898 : FEC set needed to be force completed and now, the FEC set was
899 : completed, and then so many additional FEC sets were completed
900 : that it fell off the end of the done list. In that case
901 : fd_fec_resolver_done_contains would have returned false, but
902 : fd_fec_resolver_shred_query will not return OKAY, which means
903 : we'll end up in this block of code. If the FEC set was
904 : completed, then there's nothing we need to do. If it was
905 : spilled, then we'll need to re-repair all the shreds in the FEC
906 : set, but it's not fatal. */
907 :
908 0 : FD_MCNT_INC( SHRED, FORCE_COMPLETE_FAILURE, 1UL );
909 0 : return;
910 0 : }
911 0 : fd_shred_t * out_last_shred = (fd_shred_t *)fd_type_pun( buf_last_shred );
912 :
913 0 : fd_fec_set_t const * out_fec_set[1];
914 0 : rv = fd_fec_resolver_force_complete( ctx->resolver, out_last_shred, out_fec_set, &ctx->out_merkle_roots[0] );
915 0 : if( FD_UNLIKELY( rv != FD_FEC_RESOLVER_SHRED_COMPLETES ) ) {
916 0 : FD_BASE58_ENCODE_32_BYTES( *shred_sig, shred_sig_b58 );
917 0 : FD_LOG_WARNING(( "Shred tile %lu cannot force complete the slot %lu fec_set_idx %u last_idx %u %s", ctx->round_robin_id, out_last_shred->slot, out_last_shred->fec_set_idx, last_idx, shred_sig_b58 ));
918 0 : FD_MCNT_INC( SHRED, FORCE_COMPLETE_FAILURE, 1UL );
919 0 : return;
920 0 : }
921 0 : FD_MCNT_INC( SHRED, FORCE_COMPLETE_SUCCESS, 1UL );
922 0 : FD_TEST( ctx->fec_sets <= *out_fec_set );
923 0 : ctx->send_fec_set_idx[ 0UL ] = (ulong)(*out_fec_set - ctx->fec_sets);
924 0 : ctx->send_fec_set_cnt = 1UL;
925 0 : ctx->shredded_txn_cnt = 0UL;
926 0 : }
927 :
928 0 : ulong fanout = 200UL; /* Default Agave's DATA_PLANE_FANOUT = 200UL */
929 :
930 0 : if( FD_LIKELY( ctx->in_kind[ in_idx ]==IN_KIND_NET ) ) {
931 0 : uchar * shred_buffer = ctx->shred_buffer;
932 0 : ulong shred_buffer_sz = ctx->shred_buffer_sz;
933 :
934 0 : fd_shred_t const * shred = fd_shred_parse( shred_buffer, shred_buffer_sz );
935 :
936 0 : if( FD_UNLIKELY( !shred ) ) { ctx->metrics->shred_processing_result[ 1 ]++; return; }
937 :
938 0 : fd_epoch_leaders_t const * lsched = fd_stake_ci_get_lsched_for_slot( ctx->stake_ci, shred->slot );
939 0 : if( FD_UNLIKELY( !lsched ) ) { ctx->metrics->shred_processing_result[ 0 ]++; return; }
940 :
941 0 : fd_pubkey_t const * slot_leader = fd_epoch_leaders_get( lsched, shred->slot );
942 0 : if( FD_UNLIKELY( !slot_leader ) ) { ctx->metrics->shred_processing_result[ 0 ]++; return; } /* Count this as bad slot too */
943 :
944 0 : uint nonce = fd_disco_netmux_sig_proto( sig ) == DST_PROTO_SHRED ? UINT_MAX : FD_LOAD(uint, shred_buffer + fd_shred_sz( shred ) );
945 :
946 0 : fd_fec_set_t const * out_fec_set[1];
947 0 : fd_shred_t const * out_shred[1];
948 0 : fd_fec_resolver_spilled_t spilled_fec = { 0 };
949 0 : int enforce_fixed_fec = (shred->slot >= ctx->features_activation->enforce_fixed_fec_set);
950 :
951 0 : long add_shred_timing = -fd_tickcount();
952 0 : int rv = fd_fec_resolver_add_shred( ctx->resolver, shred, shred_buffer_sz, slot_leader->uc, out_fec_set, out_shred, &ctx->out_merkle_roots[0], &spilled_fec, enforce_fixed_fec );
953 0 : add_shred_timing += fd_tickcount();
954 :
955 0 : fd_histf_sample( ctx->metrics->add_shred_timing, (ulong)add_shred_timing );
956 0 : ctx->metrics->shred_processing_result[ rv + FD_FEC_RESOLVER_ADD_SHRED_RETVAL_OFF+FD_SHRED_ADD_SHRED_EXTRA_RETVAL_CNT ]++;
957 :
958 0 : if( FD_UNLIKELY( ctx->shred_out_idx!=ULONG_MAX && /* Only send to repair in full Firedancer */
959 0 : spilled_fec.slot!=0 && spilled_fec.max_dshred_idx!=FD_SHRED_BLK_MAX ) ) {
960 : /* We've spilled an in-progress FEC set in the fec_resolver. We
961 : need to let repair know to clear out it's cached info for that
962 : fec set and re-repair those shreds. */
963 0 : ulong sig_ = fd_disco_shred_out_shred_sig( 0, spilled_fec.slot, spilled_fec.fec_set_idx, 0, spilled_fec.max_dshred_idx );
964 0 : fd_stem_publish( stem, ctx->shred_out_idx, sig_, ctx->shred_out_chunk, 0, 0, ctx->tsorig, ctx->tsorig );
965 0 : }
966 :
967 0 : if( (rv==FD_FEC_RESOLVER_SHRED_OKAY) | (rv==FD_FEC_RESOLVER_SHRED_COMPLETES) ) {
968 0 : if( FD_LIKELY( fd_disco_netmux_sig_proto( sig ) != DST_PROTO_REPAIR ) ) {
969 : /* Relay this shred */
970 0 : ulong max_dest_cnt[1];
971 0 : do {
972 : /* If we've validated the shred and it COMPLETES but we can't
973 : compute the destination for whatever reason, don't forward
974 : the shred, but still send it to the blockstore. */
975 0 : fd_shred_dest_t * sdest = fd_stake_ci_get_sdest_for_slot( ctx->stake_ci, shred->slot );
976 0 : if( FD_UNLIKELY( !sdest ) ) break;
977 0 : int use_chacha8 = ( shred->slot >= ctx->features_activation->switch_to_chacha8_turbine );
978 0 : fd_shred_dest_idx_t * dests = fd_shred_dest_compute_children( sdest, &shred, 1UL, ctx->scratchpad_dests, 1UL, fanout, fanout, max_dest_cnt, use_chacha8 );
979 0 : if( FD_UNLIKELY( !dests ) ) break;
980 :
981 0 : for( ulong i=0UL; i<ctx->adtl_dests_retransmit_cnt; i++ ) send_shred( ctx, stem, *out_shred, ctx->adtl_dests_retransmit+i, ctx->tsorig );
982 0 : for( ulong j=0UL; j<*max_dest_cnt; j++ ) send_shred( ctx, stem, *out_shred, fd_shred_dest_idx_to_dest( sdest, dests[ j ] ), ctx->tsorig );
983 0 : } while( 0 );
984 0 : }
985 :
986 0 : if( FD_LIKELY( ctx->shred_out_idx!=ULONG_MAX ) ) { /* Only send to repair/replay in full Firedancer */
987 :
988 : /* Construct the sig from the shred. */
989 :
990 0 : int is_code = fd_shred_is_code( fd_shred_type( shred->variant ) );
991 0 : uint shred_idx_or_data_cnt = shred->idx;
992 0 : if( FD_LIKELY( is_code ) ) shred_idx_or_data_cnt = shred->code.data_cnt; /* optimize for code_cnt >= data_cnt */
993 0 : ulong _sig = fd_disco_shred_out_shred_sig( fd_disco_netmux_sig_proto(sig)==DST_PROTO_SHRED, shred->slot, shred->fec_set_idx, is_code, shred_idx_or_data_cnt );
994 :
995 : /* Copy the shred header into the frag and publish. */
996 :
997 0 : ulong sz = fd_shred_header_sz( shred->variant );
998 0 : fd_memcpy( fd_chunk_to_laddr( ctx->shred_out_mem, ctx->shred_out_chunk ), shred, sz );
999 0 : FD_STORE(uint, fd_chunk_to_laddr( ctx->shred_out_mem, ctx->shred_out_chunk ) + sz, nonce );
1000 0 : sz += 4UL;
1001 :
1002 0 : ulong tspub = fd_frag_meta_ts_comp( fd_tickcount() );
1003 0 : fd_stem_publish( stem, ctx->shred_out_idx, _sig, ctx->shred_out_chunk, sz, 0UL, ctx->tsorig, tspub );
1004 0 : ctx->shred_out_chunk = fd_dcache_compact_next( ctx->shred_out_chunk, sz, ctx->shred_out_chunk0, ctx->shred_out_wmark );
1005 0 : }
1006 0 : }
1007 0 : if( FD_LIKELY( rv!=FD_FEC_RESOLVER_SHRED_COMPLETES ) ) return;
1008 :
1009 0 : FD_TEST( ctx->fec_sets <= *out_fec_set );
1010 0 : ctx->send_fec_set_idx[ 0UL ] = (ulong)(*out_fec_set - ctx->fec_sets);
1011 0 : ctx->send_fec_set_cnt = 1UL;
1012 0 : ctx->shredded_txn_cnt = 0UL;
1013 0 : }
1014 :
1015 0 : if( FD_UNLIKELY( ctx->send_fec_set_cnt==0UL ) ) return;
1016 :
1017 : /* Try to distribute shredded txn count across the fec sets.
1018 : This is an approximation, but it is acceptable. */
1019 0 : ulong shredded_txn_cnt_per_fec_set = ctx->shredded_txn_cnt / ctx->send_fec_set_cnt;
1020 0 : ulong shredded_txn_cnt_remain = ctx->shredded_txn_cnt - shredded_txn_cnt_per_fec_set * ctx->send_fec_set_cnt;
1021 0 : ulong shredded_txn_cnt_last_fec_set = shredded_txn_cnt_per_fec_set + shredded_txn_cnt_remain;
1022 :
1023 : /* If this shred completes a FEC set or is part of a microblock from
1024 : pack (ie. we're leader), we now have a full FEC set: so we notify
1025 : repair and insert into the blockstore, as well as retransmit. */
1026 :
1027 0 : for( ulong fset_k=0; fset_k<ctx->send_fec_set_cnt; fset_k++ ) {
1028 :
1029 0 : fd_fec_set_t * set = ctx->fec_sets + ctx->send_fec_set_idx[ fset_k ];
1030 0 : fd_shred34_t * s34 = ctx->shred34 + 4UL*ctx->send_fec_set_idx[ fset_k ];
1031 :
1032 0 : s34[ 0 ].shred_cnt = fd_ulong_min( set->data_shred_cnt, 34UL );
1033 0 : s34[ 1 ].shred_cnt = set->data_shred_cnt - fd_ulong_min( set->data_shred_cnt, 34UL );
1034 0 : s34[ 2 ].shred_cnt = fd_ulong_min( set->parity_shred_cnt, 34UL );
1035 0 : s34[ 3 ].shred_cnt = set->parity_shred_cnt - fd_ulong_min( set->parity_shred_cnt, 34UL );
1036 :
1037 0 : ulong s34_cnt = 2UL + !!(s34[ 1 ].shred_cnt) + !!(s34[ 3 ].shred_cnt);
1038 0 : ulong txn_per_s34 = fd_ulong_if( fset_k<( ctx->send_fec_set_cnt - 1UL ), shredded_txn_cnt_per_fec_set, shredded_txn_cnt_last_fec_set ) / s34_cnt;
1039 :
1040 : /* Attribute the transactions evenly to the non-empty shred34s */
1041 0 : for( ulong j=0UL; j<4UL; j++ ) s34[ j ].est_txn_cnt = fd_ulong_if( s34[ j ].shred_cnt>0UL, txn_per_s34, 0UL );
1042 :
1043 : /* Add whatever is left to the last shred34 */
1044 0 : s34[ fd_ulong_if( s34[ 3 ].shred_cnt>0UL, 3, 2 ) ].est_txn_cnt += ctx->shredded_txn_cnt - txn_per_s34*s34_cnt;
1045 :
1046 : /* Set the sz field so that metrics are more accurate. */
1047 0 : ulong sz0 = sizeof(fd_shred34_t) - (34UL - s34[ 0 ].shred_cnt)*FD_SHRED_MAX_SZ;
1048 0 : ulong sz1 = sizeof(fd_shred34_t) - (34UL - s34[ 1 ].shred_cnt)*FD_SHRED_MAX_SZ;
1049 0 : ulong sz2 = sizeof(fd_shred34_t) - (34UL - s34[ 2 ].shred_cnt)*FD_SHRED_MAX_SZ;
1050 0 : ulong sz3 = sizeof(fd_shred34_t) - (34UL - s34[ 3 ].shred_cnt)*FD_SHRED_MAX_SZ;
1051 :
1052 0 : fd_shred_t const * last = (fd_shred_t const *)fd_type_pun_const( set->data_shreds[ set->data_shred_cnt - 1 ] );
1053 :
1054 : /* Compute merkle root and chained merkle root. */
1055 :
1056 0 : if( FD_LIKELY( ctx->store ) ) { /* firedancer-only */
1057 :
1058 : /* Insert shreds into the store. We do this regardless of whether
1059 : we are leader. */
1060 :
1061 : /* See top-level documentation in fd_store.h under CONCURRENCY to
1062 : understand why it is safe to use a Store read vs. write lock in
1063 : Shred tile. */
1064 :
1065 0 : long shacq_start, shacq_end, shrel_end;
1066 0 : fd_store_fec_t * fec = NULL;
1067 0 : FD_STORE_SHARED_LOCK( ctx->store, shacq_start, shacq_end, shrel_end ) {
1068 0 : fec = fd_store_insert( ctx->store, ctx->round_robin_id, (fd_hash_t *)fd_type_pun( &ctx->out_merkle_roots[fset_k] ) );
1069 0 : } FD_STORE_SHARED_LOCK_END;
1070 :
1071 0 : if( FD_UNLIKELY( !fec ) ) {
1072 : /* fec can be null for several reasons, but the most likely case
1073 : that Firedancer can run into during regular operation is when
1074 : it is our leader slot and someone is sending us back our own
1075 : FEC set shreds. We could end up trying to insert our own FEC
1076 : set twice. In development, this can also occur if you run
1077 : with a staked key and switch to another staked key without
1078 : changing the turbine receive port. */
1079 0 : return;
1080 0 : }
1081 :
1082 0 : for( ulong i=0UL; i<set->data_shred_cnt; i++ ) {
1083 0 : fd_shred_t * data_shred = (fd_shred_t *)fd_type_pun( set->data_shreds[i] );
1084 0 : ulong payload_sz = fd_shred_payload_sz( data_shred );
1085 0 : if( FD_UNLIKELY( fec->data_sz + payload_sz > FD_STORE_DATA_MAX ) ) {
1086 :
1087 : /* This code is only reachable if shred tile has completed the
1088 : FEC set, which implies it was able to validate it, yet
1089 : somehow the total payload sz of this FEC set exceeds the
1090 : maximum payload sz. This indicates either a serious bug or
1091 : shred tile is compromised so log_crit. */
1092 :
1093 0 : FD_LOG_CRIT(( "Shred tile %lu: completed FEC set %lu %u data_sz: %lu exceeds FD_STORE_DATA_MAX: %lu. Ignoring FEC set.", ctx->round_robin_id, data_shred->slot, data_shred->fec_set_idx, fec->data_sz + payload_sz, FD_STORE_DATA_MAX ));
1094 0 : }
1095 0 : fd_memcpy( fec->data + fec->data_sz, fd_shred_data_payload( data_shred ), payload_sz );
1096 0 : fec->data_sz += payload_sz;
1097 0 : if( FD_LIKELY( i<32UL ) ) fec->block_offs[ i ] = (uint)payload_sz + fd_uint_if( i==0UL, 0UL, fec->block_offs[ i-1UL ] );
1098 0 : }
1099 :
1100 : /* It's safe to memcpy the FEC payload outside of the shared-lock,
1101 : because the fec object ptr is guaranteed to be valid. It is
1102 : not possible for a store_publish to free/invalidate the fec
1103 : object during the data memcpy, because the free can only happen
1104 : after the fec is linked to its parent, which happens in the
1105 : repair tile, and crucially, only after we call stem publish in
1106 : this tile. Copying outside the shared lock scope also means
1107 : that we can lower the duration for which the shared lock is
1108 : held, and enables replay to acquire the exclusive lock and
1109 : avoid getting starved. */
1110 :
1111 0 : fd_histf_sample( ctx->metrics->store_insert_wait, (ulong)fd_long_max(shacq_end - shacq_start, 0) );
1112 0 : fd_histf_sample( ctx->metrics->store_insert_work, (ulong)fd_long_max(shrel_end - shacq_end, 0) );
1113 0 : }
1114 :
1115 0 : if( FD_LIKELY( ctx->shred_out_idx!=ULONG_MAX ) ) { /* firedancer-only */
1116 :
1117 : /* Additionally, publish a frag to notify repair and replay that
1118 : the FEC set is complete. Note the ordering wrt store shred
1119 : insertion above is intentional: shreds are inserted into the
1120 : store before notifying repair and replay. This is because the
1121 : replay tile assumes the shreds are already in the store when
1122 : replay gets a notification from the shred tile that the FEC is
1123 : complete. We we don't know whether shred will finish inserting
1124 : into store first or repair will finish validating the FEC set
1125 : first. The header and merkle root of the last shred in the FEC
1126 : set are sent as part of this frag.
1127 :
1128 : This message, the shred msg, and the FEC evict msg constitute
1129 : the max 3 possible messages to repair/replay per after_frag.
1130 : In reality, it is only possible to publish all 3 in the case
1131 : where we receive a coding shred first for a FEC set where
1132 : (N=1,K=18), which allows for the FEC set to be instantly
1133 : completed by the singular coding shred, and that also happens
1134 : to evict a FEC set from the curr_map. When fix-32 arrives, the
1135 : link burst value can be lowered to 2. */
1136 :
1137 0 : int is_leader_fec = ctx->in_kind[ in_idx ]==IN_KIND_POH;
1138 :
1139 0 : ulong sig = fd_disco_shred_out_fec_sig( last->slot, last->fec_set_idx, (uint)set->data_shred_cnt, last->data.flags & FD_SHRED_DATA_FLAG_SLOT_COMPLETE, last->data.flags & FD_SHRED_DATA_FLAG_DATA_COMPLETE );
1140 0 : uchar * chunk = fd_chunk_to_laddr( ctx->shred_out_mem, ctx->shred_out_chunk );
1141 0 : memcpy( chunk, last, FD_SHRED_DATA_HEADER_SZ );
1142 0 : memcpy( chunk+FD_SHRED_DATA_HEADER_SZ, ctx->out_merkle_roots[fset_k].hash, FD_SHRED_MERKLE_ROOT_SZ );
1143 0 : memcpy( chunk+FD_SHRED_DATA_HEADER_SZ + FD_SHRED_MERKLE_ROOT_SZ, (uchar *)last + fd_shred_chain_off( last->variant ), FD_SHRED_MERKLE_ROOT_SZ );
1144 0 : memcpy( chunk+FD_SHRED_DATA_HEADER_SZ + (FD_SHRED_MERKLE_ROOT_SZ*2UL), &is_leader_fec, sizeof(int));
1145 :
1146 0 : ulong sz = FD_SHRED_DATA_HEADER_SZ + FD_SHRED_MERKLE_ROOT_SZ * 2 + sizeof(int);
1147 0 : ulong tspub = fd_frag_meta_ts_comp( fd_tickcount() );
1148 0 : fd_stem_publish( stem, ctx->shred_out_idx, sig, ctx->shred_out_chunk, sz, 0UL, ctx->tsorig, tspub );
1149 0 : ctx->shred_out_chunk = fd_dcache_compact_next( ctx->shred_out_chunk, sz, ctx->shred_out_chunk0, ctx->shred_out_wmark );
1150 :
1151 0 : } else if( FD_UNLIKELY( ctx->store_out_idx != ULONG_MAX ) ) { /* frankendancer-only */
1152 :
1153 : /* Send to the blockstore, skipping any empty shred34_t s. */
1154 :
1155 0 : ulong new_sig = ctx->in_kind[ in_idx ]!=IN_KIND_NET; /* sig==0 means the store tile will do extra checks */
1156 0 : ulong tspub = fd_frag_meta_ts_comp( fd_tickcount() );
1157 0 : fd_stem_publish( stem, 0UL, new_sig, fd_laddr_to_chunk( ctx->store_out_mem, s34+0UL ), sz0, 0UL, ctx->tsorig, tspub );
1158 0 : if( FD_UNLIKELY( s34[ 1 ].shred_cnt ) )
1159 0 : fd_stem_publish( stem, 0UL, new_sig, fd_laddr_to_chunk( ctx->store_out_mem, s34+1UL ), sz1, 0UL, ctx->tsorig, tspub );
1160 0 : if( FD_UNLIKELY( s34[ 2 ].shred_cnt ) )
1161 0 : fd_stem_publish( stem, 0UL, new_sig, fd_laddr_to_chunk( ctx->store_out_mem, s34+2UL), sz2, 0UL, ctx->tsorig, tspub );
1162 0 : if( FD_UNLIKELY( s34[ 3 ].shred_cnt ) )
1163 0 : fd_stem_publish( stem, 0UL, new_sig, fd_laddr_to_chunk( ctx->store_out_mem, s34+3UL ), sz3, 0UL, ctx->tsorig, tspub );
1164 0 : }
1165 :
1166 : /* Compute all the destinations for all the new shreds */
1167 :
1168 0 : fd_shred_t const * new_shreds[ FD_REEDSOL_DATA_SHREDS_MAX+FD_REEDSOL_PARITY_SHREDS_MAX ];
1169 0 : ulong k=0UL;
1170 0 : for( ulong i=0UL; i<set->data_shred_cnt; i++ )
1171 0 : if( !d_rcvd_test( set->data_shred_rcvd, i ) ) new_shreds[ k++ ] = (fd_shred_t const *)set->data_shreds [ i ];
1172 0 : for( ulong i=0UL; i<set->parity_shred_cnt; i++ )
1173 0 : if( !p_rcvd_test( set->parity_shred_rcvd, i ) ) new_shreds[ k++ ] = (fd_shred_t const *)set->parity_shreds[ i ];
1174 :
1175 0 : if( FD_UNLIKELY( !k ) ) return;
1176 0 : fd_shred_dest_t * sdest = fd_stake_ci_get_sdest_for_slot( ctx->stake_ci, new_shreds[ 0 ]->slot );
1177 0 : if( FD_UNLIKELY( !sdest ) ) return;
1178 0 : int use_chacha8 = ( new_shreds[ 0 ]->slot >= ctx->features_activation->switch_to_chacha8_turbine );
1179 :
1180 0 : ulong out_stride;
1181 0 : ulong max_dest_cnt[1];
1182 0 : fd_shred_dest_idx_t * dests;
1183 0 : if( FD_LIKELY( ctx->in_kind[ in_idx ]==IN_KIND_NET ) ) {
1184 0 : for( ulong i=0UL; i<k; i++ ) {
1185 0 : for( ulong j=0UL; j<ctx->adtl_dests_retransmit_cnt; j++ ) send_shred( ctx, stem, new_shreds[ i ], ctx->adtl_dests_retransmit+j, ctx->tsorig );
1186 0 : }
1187 0 : out_stride = k;
1188 : /* In the case of feature activation, the fanout used below is
1189 : the same as the one calculated/modified previously at the
1190 : beginning of after_frag() for IN_KIND_NET in this slot. */
1191 0 : dests = fd_shred_dest_compute_children( sdest, new_shreds, k, ctx->scratchpad_dests, k, fanout, fanout, max_dest_cnt, use_chacha8 );
1192 0 : } else {
1193 0 : for( ulong i=0UL; i<k; i++ ) {
1194 0 : for( ulong j=0UL; j<ctx->adtl_dests_leader_cnt; j++ ) send_shred( ctx, stem, new_shreds[ i ], ctx->adtl_dests_leader+j, ctx->tsorig );
1195 0 : }
1196 0 : out_stride = 1UL;
1197 0 : *max_dest_cnt = 1UL;
1198 0 : dests = fd_shred_dest_compute_first ( sdest, new_shreds, k, ctx->scratchpad_dests, use_chacha8 );
1199 0 : }
1200 0 : if( FD_UNLIKELY( !dests ) ) return;
1201 :
1202 : /* Send only the ones we didn't receive. */
1203 0 : for( ulong i=0UL; i<k; i++ ) {
1204 0 : for( ulong j=0UL; j<*max_dest_cnt; j++ ) send_shred( ctx, stem, new_shreds[ i ], fd_shred_dest_idx_to_dest( sdest, dests[ j*out_stride+i ]), ctx->tsorig );
1205 0 : }
1206 0 : }
1207 0 : }
1208 :
1209 : static void
1210 : privileged_init( fd_topo_t * topo,
1211 0 : fd_topo_tile_t * tile ) {
1212 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
1213 0 : FD_TEST( scratch!=NULL );
1214 :
1215 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
1216 0 : fd_shred_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_shred_ctx_t ), sizeof( fd_shred_ctx_t ) );
1217 :
1218 0 : if( FD_UNLIKELY( !strcmp( tile->shred.identity_key_path, "" ) ) )
1219 0 : FD_LOG_ERR(( "identity_key_path not set" ));
1220 :
1221 0 : ctx->identity_key[ 0 ] = *(fd_pubkey_t const *)fd_type_pun_const( fd_keyload_load( tile->shred.identity_key_path, /* pubkey only: */ 1 ) );
1222 0 : }
1223 :
1224 : static void
1225 : fd_shred_signer( void * signer_ctx,
1226 : uchar signature[ static 64 ],
1227 0 : uchar const merkle_root[ static 32 ] ) {
1228 0 : fd_keyguard_client_sign( signer_ctx, signature, merkle_root, 32UL, FD_KEYGUARD_SIGN_TYPE_ED25519 );
1229 0 : }
1230 :
1231 : static void
1232 : unprivileged_init( fd_topo_t * topo,
1233 0 : fd_topo_tile_t * tile ) {
1234 :
1235 0 : FD_TEST( 0==strcmp( topo->links[tile->out_link_id[ NET_OUT_IDX ]].name, "shred_net" ) );
1236 0 : FD_TEST( 0==strcmp( topo->links[tile->out_link_id[ SIGN_OUT_IDX ]].name, "shred_sign" ) );
1237 :
1238 0 : if( FD_UNLIKELY( !tile->out_cnt ) )
1239 0 : FD_LOG_ERR(( "shred tile has no primary output link" ));
1240 :
1241 0 : ulong shred_store_mcache_depth = tile->shred.depth;
1242 0 : if( topo->links[ tile->out_link_id[ 0 ] ].depth != shred_store_mcache_depth )
1243 0 : FD_LOG_ERR(( "shred tile out depths are not equal %lu %lu",
1244 0 : topo->links[ tile->out_link_id[ 0 ] ].depth, shred_store_mcache_depth ));
1245 :
1246 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
1247 0 : FD_TEST( scratch!=NULL );
1248 :
1249 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
1250 0 : fd_shred_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_shred_ctx_t ), sizeof( fd_shred_ctx_t ) );
1251 :
1252 0 : ctx->round_robin_cnt = fd_topo_tile_name_cnt( topo, tile->name );
1253 0 : ctx->round_robin_id = tile->kind_id;
1254 0 : ctx->batch_cnt = 0UL;
1255 0 : ctx->slot = ULONG_MAX;
1256 :
1257 : /* If the default partial_depth is ever changed, correspondingly
1258 : change the size of the fd_fec_intra_pool in fd_fec_repair. */
1259 0 : ulong fec_resolver_footprint = fd_fec_resolver_footprint( tile->shred.fec_resolver_depth, 1UL, shred_store_mcache_depth,
1260 0 : 128UL * tile->shred.fec_resolver_depth );
1261 0 : ulong fec_set_cnt = shred_store_mcache_depth + tile->shred.fec_resolver_depth + 4UL;
1262 0 : ulong fec_sets_required_sz = fec_set_cnt*DCACHE_ENTRIES_PER_FEC_SET*sizeof(fd_shred34_t);
1263 :
1264 0 : void * fec_sets_shmem = NULL;
1265 0 : ctx->shred_out_idx = fd_topo_find_tile_out_link( topo, tile, "shred_out", ctx->round_robin_id );
1266 0 : ctx->store_out_idx = fd_topo_find_tile_out_link( topo, tile, "shred_store", ctx->round_robin_id );
1267 0 : if( FD_LIKELY( ctx->shred_out_idx!=ULONG_MAX ) ) { /* firedancer-only */
1268 0 : fd_topo_link_t * shred_out = &topo->links[ tile->out_link_id[ ctx->shred_out_idx ] ];
1269 0 : ctx->shred_out_mem = topo->workspaces[ topo->objs[ shred_out->dcache_obj_id ].wksp_id ].wksp;
1270 0 : ctx->shred_out_chunk0 = fd_dcache_compact_chunk0( ctx->shred_out_mem, shred_out->dcache );
1271 0 : ctx->shred_out_wmark = fd_dcache_compact_wmark ( ctx->shred_out_mem, shred_out->dcache, shred_out->mtu );
1272 0 : ctx->shred_out_chunk = ctx->shred_out_chunk0;
1273 0 : FD_TEST( fd_dcache_compact_is_safe( ctx->shred_out_mem, shred_out->dcache, shred_out->mtu, shred_out->depth ) );
1274 0 : ulong fec_sets_obj_id = fd_pod_queryf_ulong( topo->props, ULONG_MAX, "fec_sets" );
1275 0 : if( FD_UNLIKELY( fec_sets_obj_id == ULONG_MAX ) ) FD_LOG_ERR(( "invalid firedancer topo" ));
1276 0 : fd_topo_obj_t const * obj = &topo->objs[ fec_sets_obj_id ];
1277 0 : if( FD_UNLIKELY( obj->footprint<(fec_sets_required_sz*ctx->round_robin_cnt) ) ) {
1278 0 : FD_LOG_ERR(( "fec_sets wksp obj too small. It is %lu bytes but must be at least %lu bytes. ",
1279 0 : obj->footprint,
1280 0 : fec_sets_required_sz ));
1281 0 : }
1282 0 : fec_sets_shmem = (uchar *)fd_topo_obj_laddr( topo, fec_sets_obj_id ) + (ctx->round_robin_id * fec_sets_required_sz);
1283 0 : } else if ( FD_LIKELY( ctx->store_out_idx!=ULONG_MAX ) ) { /* frankendancer-only */
1284 0 : FD_TEST( 0==strcmp( topo->links[tile->out_link_id[ ctx->store_out_idx ]].name, "shred_store" ) );
1285 0 : fec_sets_shmem = topo->links[ tile->out_link_id[ ctx->store_out_idx ] ].dcache;
1286 0 : if( FD_UNLIKELY( fd_dcache_data_sz( fec_sets_shmem )<fec_sets_required_sz ) ) {
1287 0 : FD_LOG_ERR(( "shred_store dcache too small. It is %lu bytes but must be at least %lu bytes. ",
1288 0 : fd_dcache_data_sz( fec_sets_shmem ),
1289 0 : fec_sets_required_sz ));
1290 0 : }
1291 0 : }
1292 :
1293 0 : if( FD_UNLIKELY( !tile->shred.fec_resolver_depth ) ) FD_LOG_ERR(( "fec_resolver_depth not set" ));
1294 0 : if( FD_UNLIKELY( !tile->shred.shred_listen_port ) ) FD_LOG_ERR(( "shred_listen_port not set" ));
1295 :
1296 0 : void * _stake_ci = FD_SCRATCH_ALLOC_APPEND( l, fd_stake_ci_align(), fd_stake_ci_footprint() );
1297 0 : void * _resolver = FD_SCRATCH_ALLOC_APPEND( l, fd_fec_resolver_align(), fec_resolver_footprint );
1298 0 : void * _shredder = FD_SCRATCH_ALLOC_APPEND( l, fd_shredder_align(), fd_shredder_footprint() );
1299 0 : void * _fec_sets = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_fec_set_t), sizeof(fd_fec_set_t)*fec_set_cnt );
1300 :
1301 0 : fd_fec_set_t * fec_sets = (fd_fec_set_t *)_fec_sets;
1302 0 : fd_shred34_t * shred34 = (fd_shred34_t *)fec_sets_shmem;
1303 :
1304 0 : for( ulong i=0UL; i<fec_set_cnt; i++ ) {
1305 0 : fd_shred34_t * p34_base = shred34 + i*DCACHE_ENTRIES_PER_FEC_SET;
1306 0 : for( ulong k=0UL; k<DCACHE_ENTRIES_PER_FEC_SET; k++ ) {
1307 0 : fd_shred34_t * p34 = p34_base + k;
1308 :
1309 0 : p34->stride = (ulong)p34->pkts[1].buffer - (ulong)p34->pkts[0].buffer;
1310 0 : p34->offset = (ulong)p34->pkts[0].buffer - (ulong)p34;
1311 0 : p34->shred_sz = fd_ulong_if( k<2UL, 1203UL, 1228UL );
1312 0 : }
1313 :
1314 0 : uchar ** data_shred = fec_sets[ i ].data_shreds;
1315 0 : uchar ** parity_shred = fec_sets[ i ].parity_shreds;
1316 0 : for( ulong j=0UL; j<FD_REEDSOL_DATA_SHREDS_MAX; j++ ) data_shred [ j ] = p34_base[ j/34UL ].pkts[ j%34UL ].buffer;
1317 0 : for( ulong j=0UL; j<FD_REEDSOL_PARITY_SHREDS_MAX; j++ ) parity_shred[ j ] = p34_base[ 2UL + j/34UL ].pkts[ j%34UL ].buffer;
1318 0 : }
1319 :
1320 0 : #define NONNULL( x ) (__extension__({ \
1321 0 : __typeof__((x)) __x = (x); \
1322 0 : if( FD_UNLIKELY( !__x ) ) FD_LOG_ERR(( #x " was unexpectedly NULL" )); \
1323 0 : __x; }))
1324 :
1325 0 : int has_ipecho_in = fd_topo_find_tile_in_link( topo, tile, "ipecho_out", 0UL )!=ULONG_MAX;
1326 0 : ushort expected_shred_version = tile->shred.expected_shred_version;
1327 0 : if( FD_UNLIKELY( !has_ipecho_in && !expected_shred_version ) ) {
1328 0 : ulong busy_obj_id = fd_pod_query_ulong( topo->props, "pohh_shred", ULONG_MAX );
1329 0 : FD_TEST( busy_obj_id!=ULONG_MAX );
1330 0 : ulong * gossip_shred_version = fd_fseq_join( fd_topo_obj_laddr( topo, busy_obj_id ) );
1331 0 : FD_LOG_INFO(( "Waiting for shred version to be determined via gossip." ));
1332 0 : ulong _expected_shred_version = ULONG_MAX;
1333 0 : do {
1334 0 : _expected_shred_version = FD_VOLATILE_CONST( *gossip_shred_version );
1335 0 : } while( _expected_shred_version==ULONG_MAX );
1336 :
1337 0 : if( FD_UNLIKELY( _expected_shred_version>USHORT_MAX ) ) FD_LOG_ERR(( "invalid shred version %lu", _expected_shred_version ));
1338 0 : FD_LOG_INFO(( "Using shred version %hu", (ushort)_expected_shred_version ));
1339 0 : expected_shred_version = (ushort)_expected_shred_version;
1340 0 : }
1341 :
1342 0 : ctx->keyswitch = fd_keyswitch_join( fd_topo_obj_laddr( topo, tile->keyswitch_obj_id ) );
1343 0 : FD_TEST( ctx->keyswitch );
1344 :
1345 : /* populate ctx */
1346 0 : ulong sign_in_idx = fd_topo_find_tile_in_link( topo, tile, "sign_shred", tile->kind_id );
1347 0 : FD_TEST( sign_in_idx!=ULONG_MAX );
1348 0 : fd_topo_link_t * sign_in = &topo->links[ tile->in_link_id[ sign_in_idx ] ];
1349 0 : fd_topo_link_t * sign_out = &topo->links[ tile->out_link_id[ SIGN_OUT_IDX ] ];
1350 0 : NONNULL( fd_keyguard_client_join( fd_keyguard_client_new( ctx->keyguard_client,
1351 0 : sign_out->mcache,
1352 0 : sign_out->dcache,
1353 0 : sign_in->mcache,
1354 0 : sign_in->dcache,
1355 0 : sign_out->mtu ) ) );
1356 :
1357 0 : ulong shred_limit = fd_ulong_if( tile->shred.larger_shred_limits_per_block, 32UL*32UL*1024UL, 32UL*1024UL );
1358 0 : fd_fec_set_t * resolver_sets = fec_sets + (shred_store_mcache_depth+1UL)/2UL + 1UL;
1359 0 : ctx->shredder = NONNULL( fd_shredder_join ( fd_shredder_new ( _shredder, fd_shred_signer, ctx->keyguard_client ) ) );
1360 0 : ctx->resolver = NONNULL( fd_fec_resolver_join ( fd_fec_resolver_new ( _resolver,
1361 0 : fd_shred_signer, ctx->keyguard_client,
1362 0 : tile->shred.fec_resolver_depth, 1UL,
1363 0 : (shred_store_mcache_depth+3UL)/2UL,
1364 0 : 128UL * tile->shred.fec_resolver_depth, resolver_sets,
1365 0 : shred_limit ) ) );
1366 :
1367 0 : if( FD_LIKELY( !!expected_shred_version ) ) {
1368 0 : fd_shredder_set_shred_version ( ctx->shredder, expected_shred_version );
1369 0 : fd_fec_resolver_set_shred_version( ctx->resolver, expected_shred_version );
1370 0 : }
1371 :
1372 0 : ctx->shred34 = shred34;
1373 0 : ctx->fec_sets = fec_sets;
1374 :
1375 0 : ctx->stake_ci = fd_stake_ci_join( fd_stake_ci_new( _stake_ci, ctx->identity_key ) );
1376 :
1377 0 : ctx->net_id = (ushort)0;
1378 :
1379 0 : fd_ip4_udp_hdr_init( ctx->data_shred_net_hdr, FD_SHRED_MIN_SZ, 0, tile->shred.shred_listen_port );
1380 0 : fd_ip4_udp_hdr_init( ctx->parity_shred_net_hdr, FD_SHRED_MAX_SZ, 0, tile->shred.shred_listen_port );
1381 :
1382 0 : ctx->adtl_dests_retransmit_cnt = tile->shred.adtl_dests_retransmit_cnt;
1383 0 : for( ulong i=0UL; i<ctx->adtl_dests_retransmit_cnt; i++) {
1384 0 : ctx->adtl_dests_retransmit[ i ].ip4 = tile->shred.adtl_dests_retransmit[ i ].ip;
1385 0 : ctx->adtl_dests_retransmit[ i ].port = tile->shred.adtl_dests_retransmit[ i ].port;
1386 0 : }
1387 0 : ctx->adtl_dests_leader_cnt = tile->shred.adtl_dests_leader_cnt;
1388 0 : for( ulong i=0UL; i<ctx->adtl_dests_leader_cnt; i++) {
1389 0 : ctx->adtl_dests_leader[i].ip4 = tile->shred.adtl_dests_leader[i].ip;
1390 0 : ctx->adtl_dests_leader[i].port = tile->shred.adtl_dests_leader[i].port;
1391 0 : }
1392 :
1393 0 : uchar has_contact_info_in = 0;
1394 0 : for( ulong i=0UL; i<tile->in_cnt; i++ ) {
1395 0 : fd_topo_link_t const * link = &topo->links[ tile->in_link_id[ i ] ];
1396 0 : fd_topo_wksp_t const * link_wksp = &topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ];
1397 :
1398 0 : if( FD_LIKELY( !strcmp( link->name, "net_shred" ) ) ) {
1399 0 : ctx->in_kind[ i ] = IN_KIND_NET;
1400 0 : fd_net_rx_bounds_init( &ctx->in[ i ].net_rx, link->dcache );
1401 0 : continue; /* only net_rx needs to be set in this case. */
1402 0 : }
1403 0 : else if( FD_LIKELY( !strcmp( link->name, "poh_shred" ) ) ) ctx->in_kind[ i ] = IN_KIND_POH; /* Firedancer */
1404 0 : else if( FD_LIKELY( !strcmp( link->name, "pohh_shred" ) ) ) ctx->in_kind[ i ] = IN_KIND_POH; /* Frankendancer */
1405 0 : else if( FD_LIKELY( !strcmp( link->name, "stake_out" ) ) ) ctx->in_kind[ i ] = IN_KIND_STAKE; /* Frankendancer */
1406 0 : else if( FD_LIKELY( !strcmp( link->name, "replay_epoch" ) ) ) ctx->in_kind[ i ] = IN_KIND_EPOCH; /* Firedancer */
1407 0 : else if( FD_LIKELY( !strcmp( link->name, "sign_shred" ) ) ) ctx->in_kind[ i ] = IN_KIND_SIGN;
1408 0 : else if( FD_LIKELY( !strcmp( link->name, "repair_shred" ) ) ) ctx->in_kind[ i ] = IN_KIND_REPAIR;
1409 0 : else if( FD_LIKELY( !strcmp( link->name, "ipecho_out" ) ) ) ctx->in_kind[ i ] = IN_KIND_IPECHO;
1410 0 : else if( FD_LIKELY( !strcmp( link->name, "crds_shred" ) ) ) { ctx->in_kind[ i ] = IN_KIND_CONTACT;
1411 0 : if( FD_UNLIKELY( has_contact_info_in ) ) FD_LOG_ERR(( "shred tile has multiple contact info in link types, can only be either gossip_out or crds_shred" ));
1412 0 : has_contact_info_in = 1;
1413 0 : }
1414 0 : else if( FD_LIKELY( !strcmp( link->name, "gossip_out" ) ) ) { ctx->in_kind[ i ] = IN_KIND_GOSSIP;
1415 0 : if( FD_UNLIKELY( has_contact_info_in ) ) FD_LOG_ERR(( "shred tile has multiple contact info in link types, can only be either gossip_out or crds_shred" ));
1416 0 : has_contact_info_in = 1;
1417 0 : }
1418 :
1419 0 : else FD_LOG_ERR(( "shred tile has unexpected input link %lu %s", i, link->name ));
1420 :
1421 0 : if( FD_LIKELY( !!link->mtu ) ) {
1422 0 : ctx->in[ i ].mem = link_wksp->wksp;
1423 0 : ctx->in[ i ].chunk0 = fd_dcache_compact_chunk0( ctx->in[ i ].mem, link->dcache );
1424 0 : ctx->in[ i ].wmark = fd_dcache_compact_wmark ( ctx->in[ i ].mem, link->dcache, link->mtu );
1425 0 : }
1426 0 : }
1427 :
1428 0 : fd_topo_link_t * net_out = &topo->links[ tile->out_link_id[ NET_OUT_IDX ] ];
1429 :
1430 0 : ctx->net_out_chunk0 = fd_dcache_compact_chunk0( fd_wksp_containing( net_out->dcache ), net_out->dcache );
1431 0 : ctx->net_out_mem = topo->workspaces[ topo->objs[ net_out->dcache_obj_id ].wksp_id ].wksp;
1432 0 : ctx->net_out_wmark = fd_dcache_compact_wmark ( ctx->net_out_mem, net_out->dcache, net_out->mtu );
1433 0 : ctx->net_out_chunk = ctx->net_out_chunk0;
1434 :
1435 0 : ctx->store = NULL;
1436 0 : ulong store_obj_id = fd_pod_queryf_ulong( topo->props, ULONG_MAX, "store" );
1437 0 : if( FD_LIKELY( store_obj_id!=ULONG_MAX ) ) { /* firedancer-only */
1438 0 : ctx->store = fd_store_join( fd_topo_obj_laddr( topo, store_obj_id ) );
1439 0 : FD_TEST( ctx->store->magic==FD_STORE_MAGIC );
1440 0 : FD_TEST( ctx->store->part_cnt==ctx->round_robin_cnt ); /* single-writer (shred tile) per store part */
1441 0 : }
1442 :
1443 0 : if( FD_LIKELY( ctx->shred_out_idx!=ULONG_MAX ) ) { /* firedancer-only */
1444 0 : fd_topo_link_t * shred_out = &topo->links[ tile->out_link_id[ ctx->shred_out_idx ] ];
1445 0 : ctx->shred_out_mem = topo->workspaces[ topo->objs[ shred_out->dcache_obj_id ].wksp_id ].wksp;
1446 0 : ctx->shred_out_chunk0 = fd_dcache_compact_chunk0( ctx->shred_out_mem, shred_out->dcache );
1447 0 : ctx->shred_out_wmark = fd_dcache_compact_wmark ( ctx->shred_out_mem, shred_out->dcache, shred_out->mtu );
1448 0 : ctx->shred_out_chunk = ctx->shred_out_chunk0;
1449 0 : FD_TEST( fd_dcache_compact_is_safe( ctx->shred_out_mem, shred_out->dcache, shred_out->mtu, shred_out->depth ) );
1450 0 : }
1451 :
1452 0 : if( FD_LIKELY( ctx->store_out_idx!=ULONG_MAX ) ) { /* frankendancer-only */
1453 0 : fd_topo_link_t * store_out = &topo->links[ tile->out_link_id[ ctx->store_out_idx ] ];
1454 0 : ctx->store_out_mem = topo->workspaces[ topo->objs[ store_out->dcache_obj_id ].wksp_id ].wksp;
1455 0 : ctx->store_out_chunk0 = fd_dcache_compact_chunk0( ctx->store_out_mem, store_out->dcache );
1456 0 : ctx->store_out_wmark = fd_dcache_compact_wmark ( ctx->store_out_mem, store_out->dcache, store_out->mtu );
1457 0 : ctx->store_out_chunk = ctx->store_out_chunk0;
1458 0 : FD_TEST( fd_dcache_compact_is_safe( ctx->store_out_mem, store_out->dcache, store_out->mtu, store_out->depth ) );
1459 0 : }
1460 :
1461 0 : ctx->poh_in_expect_seq = 0UL;
1462 :
1463 0 : ctx->shredder_fec_set_idx = 0UL;
1464 0 : ctx->shredder_max_fec_set_idx = (shred_store_mcache_depth+1UL)/2UL + 1UL;
1465 :
1466 0 : ctx->chained_merkle_root = NULL;
1467 0 : memset( ctx->out_merkle_roots, 0, sizeof(ctx->out_merkle_roots) );
1468 :
1469 0 : for( ulong i=0UL; i<FD_SHRED_BATCH_FEC_SETS_MAX; i++ ) { ctx->send_fec_set_idx[ i ] = ULONG_MAX; }
1470 0 : ctx->send_fec_set_cnt = 0UL;
1471 :
1472 0 : ctx->shred_buffer_sz = 0UL;
1473 0 : memset( ctx->shred_buffer, 0xFF, FD_NET_MTU );
1474 :
1475 0 : fd_histf_join( fd_histf_new( ctx->metrics->contact_info_cnt, FD_MHIST_MIN( SHRED, CLUSTER_CONTACT_INFO_CNT ),
1476 0 : FD_MHIST_MAX( SHRED, CLUSTER_CONTACT_INFO_CNT ) ) );
1477 0 : fd_histf_join( fd_histf_new( ctx->metrics->batch_sz, FD_MHIST_MIN( SHRED, BATCH_SZ ),
1478 0 : FD_MHIST_MAX( SHRED, BATCH_SZ ) ) );
1479 0 : fd_histf_join( fd_histf_new( ctx->metrics->batch_microblock_cnt, FD_MHIST_MIN( SHRED, BATCH_MICROBLOCK_CNT ),
1480 0 : FD_MHIST_MAX( SHRED, BATCH_MICROBLOCK_CNT ) ) );
1481 0 : fd_histf_join( fd_histf_new( ctx->metrics->shredding_timing, FD_MHIST_SECONDS_MIN( SHRED, SHREDDING_DURATION_SECONDS ),
1482 0 : FD_MHIST_SECONDS_MAX( SHRED, SHREDDING_DURATION_SECONDS ) ) );
1483 0 : fd_histf_join( fd_histf_new( ctx->metrics->add_shred_timing, FD_MHIST_SECONDS_MIN( SHRED, ADD_SHRED_DURATION_SECONDS ),
1484 0 : FD_MHIST_SECONDS_MAX( SHRED, ADD_SHRED_DURATION_SECONDS ) ) );
1485 0 : fd_histf_join( fd_histf_new( ctx->metrics->store_insert_wait, FD_MHIST_SECONDS_MIN( SHRED, STORE_INSERT_WAIT ),
1486 0 : FD_MHIST_SECONDS_MAX( SHRED, STORE_INSERT_WAIT ) ) );
1487 0 : fd_histf_join( fd_histf_new( ctx->metrics->store_insert_work, FD_MHIST_SECONDS_MIN( SHRED, STORE_INSERT_WORK ),
1488 0 : FD_MHIST_SECONDS_MAX( SHRED, STORE_INSERT_WORK ) ) );
1489 0 : memset( ctx->metrics->shred_processing_result, '\0', sizeof(ctx->metrics->shred_processing_result) );
1490 0 : ctx->metrics->invalid_block_id_cnt = 0UL;
1491 0 : ctx->metrics->shred_rejected_unchained_cnt = 0UL;
1492 0 : ctx->metrics->repair_rcv_cnt = 0UL;
1493 0 : ctx->metrics->repair_rcv_bytes = 0UL;
1494 0 : ctx->metrics->turbine_rcv_cnt = 0UL;
1495 0 : ctx->metrics->turbine_rcv_bytes = 0UL;
1496 :
1497 0 : ctx->pending_batch.microblock_cnt = 0UL;
1498 0 : ctx->pending_batch.txn_cnt = 0UL;
1499 0 : ctx->pending_batch.pos = 0UL;
1500 0 : ctx->pending_batch.slot = 0UL;
1501 0 : memset( ctx->pending_batch.payload, 0, sizeof(ctx->pending_batch.payload) );
1502 :
1503 0 : memset( ctx->epoch_schedule, 0, sizeof(ctx->epoch_schedule) );
1504 0 : for( ulong i=0UL; i<FD_SHRED_FEATURES_ACTIVATION_SLOT_CNT; i++ ) {
1505 0 : ctx->features_activation->slots[i] = FD_SHRED_FEATURES_ACTIVATION_SLOT_DISABLED;
1506 0 : }
1507 :
1508 0 : ulong scratch_top = FD_SCRATCH_ALLOC_FINI( l, 1UL );
1509 0 : if( FD_UNLIKELY( scratch_top > (ulong)scratch + scratch_footprint( tile ) ) )
1510 0 : FD_LOG_ERR(( "scratch overflow %lu %lu %lu", scratch_top - (ulong)scratch - scratch_footprint( tile ), scratch_top, (ulong)scratch + scratch_footprint( tile ) ));
1511 :
1512 0 : memset( ctx->block_ids, 0, sizeof(ctx->block_ids) );
1513 0 : }
1514 :
1515 : static ulong
1516 : populate_allowed_seccomp( fd_topo_t const * topo,
1517 : fd_topo_tile_t const * tile,
1518 : ulong out_cnt,
1519 0 : struct sock_filter * out ) {
1520 0 : (void)topo;
1521 0 : (void)tile;
1522 :
1523 0 : populate_sock_filter_policy_fd_shred_tile( out_cnt, out, (uint)fd_log_private_logfile_fd() );
1524 0 : return sock_filter_policy_fd_shred_tile_instr_cnt;
1525 0 : }
1526 :
1527 : static ulong
1528 : populate_allowed_fds( fd_topo_t const * topo,
1529 : fd_topo_tile_t const * tile,
1530 : ulong out_fds_cnt,
1531 0 : int * out_fds ) {
1532 0 : (void)topo;
1533 0 : (void)tile;
1534 :
1535 0 : if( FD_UNLIKELY( out_fds_cnt<2UL ) ) FD_LOG_ERR(( "out_fds_cnt %lu", out_fds_cnt ));
1536 :
1537 0 : ulong out_cnt = 0UL;
1538 0 : out_fds[ out_cnt++ ] = 2; /* stderr */
1539 0 : if( FD_LIKELY( -1!=fd_log_private_logfile_fd() ) )
1540 0 : out_fds[ out_cnt++ ] = fd_log_private_logfile_fd(); /* logfile */
1541 0 : return out_cnt;
1542 0 : }
1543 :
1544 : /* Excluding net_out (where the link is unreliable), STEM_BURST needs
1545 : to guarantee enough credits for the worst case. There are 4 cases
1546 : to consider: (IN_KIND_NET/IN_KIND_POH) x (Frankendancer/Firedancer)
1547 : In the IN_KIND_NET case: (Frankendancer) that can be 4 frags to
1548 : store; (Firedancer) that is one frag for the shred to repair, and
1549 : then another frag to repair for the FEC set.
1550 : In the IN_KIND_POH case: (Frankendancer) there might be
1551 : FD_SHRED_BATCH_FEC_SETS_MAX FEC sets, but we know they are 32:32,
1552 : which means only two shred34s per FEC set; (Firedancer) that is
1553 : FD_SHRED_BATCH_FEC_SETS_MAX frags to repair (one per FEC set).
1554 : Therefore, the worst case is IN_KIND_POH for Frankendancer. */
1555 0 : #define STEM_BURST (FD_SHRED_BATCH_FEC_SETS_MAX*2UL)
1556 :
1557 : /* See explanation in fd_pack */
1558 0 : #define STEM_LAZY (128L*3000L)
1559 :
1560 0 : #define STEM_CALLBACK_CONTEXT_TYPE fd_shred_ctx_t
1561 0 : #define STEM_CALLBACK_CONTEXT_ALIGN alignof(fd_shred_ctx_t)
1562 :
1563 0 : #define STEM_CALLBACK_DURING_HOUSEKEEPING during_housekeeping
1564 0 : #define STEM_CALLBACK_METRICS_WRITE metrics_write
1565 0 : #define STEM_CALLBACK_BEFORE_FRAG before_frag
1566 0 : #define STEM_CALLBACK_DURING_FRAG during_frag
1567 0 : #define STEM_CALLBACK_AFTER_FRAG after_frag
1568 :
1569 : #include "../stem/fd_stem.c"
1570 :
1571 : fd_topo_run_tile_t fd_tile_shred = {
1572 : .name = "shred",
1573 : .populate_allowed_seccomp = populate_allowed_seccomp,
1574 : .populate_allowed_fds = populate_allowed_fds,
1575 : .scratch_align = scratch_align,
1576 : .scratch_footprint = scratch_footprint,
1577 : .privileged_init = privileged_init,
1578 : .unprivileged_init = unprivileged_init,
1579 : .run = stem_run,
1580 : };
|