Line data Source code
1 : #include "../tiles.h"
2 :
3 : #include "generated/fd_shred_tile_seccomp.h"
4 : #include "../../util/pod/fd_pod_format.h"
5 : #include "fd_shredder.h"
6 : #include "fd_shred_batch.h"
7 : #include "fd_shred_dest.h"
8 : #include "fd_fec_resolver.h"
9 : #include "fd_stake_ci.h"
10 : #include "fd_rnonce_ss.h"
11 : #include "fd_shred_tile.h"
12 : #include "../store/fd_store.h"
13 : #include "../keyguard/fd_keyload.h"
14 : #include "../keyguard/fd_keyguard.h"
15 : #include "../keyguard/fd_keyguard_client.h"
16 : #include "../keyguard/fd_keyswitch.h"
17 : #include "../fd_disco.h"
18 : #include "../net/fd_net_tile.h"
19 : #include "../../flamenco/leaders/fd_leaders.h"
20 : #include "../../util/net/fd_net_headers.h"
21 : #include "../../flamenco/gossip/fd_gossip_message.h"
22 : #include "../../flamenco/runtime/sysvar/fd_sysvar_epoch_schedule.h"
23 : #include "../../discof/tower/fd_tower_slot_rooted.h"
24 :
25 : /* The shred tile handles shreds from two data sources: shreds generated
26 : from microblocks from the leader pipeline, and shreds retransmitted
27 : from the network.
28 :
29 : They have rather different semantics, but at the end of the day, they
30 : both result in a bunch of shreds and FEC sets that need to be sent to
31 : the blockstore and on the network, which is why one tile handles
32 : both.
33 :
34 : We segment the memory for the two types of shreds into two halves of
35 : a dcache because they follow somewhat different flow control
36 : patterns. For flow control, the normal guarantee we want to provide
37 : is that the dcache entry is not overwritten unless the mcache entry
38 : has also been overwritten. The normal way to do this when using both
39 : cyclically and with a 1-to-1 mapping is to make the dcache at least
40 : `burst` entries bigger than the mcache.
41 :
42 : In this tile, we use one output mcache (of depth d) with one output
43 : dcache (which is logically partitioned into two) for the two sources
44 : of data. The worst case for flow control is when we're only sending
45 : with one of the dcache partitions at a time though, so we can
46 : consider them separately.
47 :
48 : Leader pipeline: Every entry triggers s FEC sets to be created, where
49 : s is in [0, FD_SHRED_BATCH_FEC_SETS_MAX]. Each FEC set corresponds
50 : to 1 dcache entry and 1 mcache entry. This means we can have d FEC
51 : sets exposed while producing FD_SHRED_BATCH_FEC_SETS_MAX more FEC
52 : sets, so the leader pipeline section of the dcache needs at least
53 : d+FD_SHRED_BATCH_FEC_SETS_MAX entries.
54 :
55 : From the network: The FEC resolver doesn't use a cyclic order, but it
56 : does promise that once it returns an FEC set, it will return at least
57 : complete_depth FEC sets before returning it again. This means we
58 : want at most complete_depth-1 FEC sets exposed, so
59 : complete_depth=d+1 FEC sets. The FEC resolver has the
60 : ability to keep individual shreds for partial_depth calls, but
61 : because in this version of the shred tile, we send each shred to all
62 : its destinations as soon as we get it, we don't need that
63 : functionality, so we set partial_depth=1.
64 :
65 : Adding these up and plugging in the current value of
66 : BATCH_FEC_SETS_MAX, we get 2*d+6+fec_resolver_depth FEC sets. The
67 : topology code doesn't allow specifying mcache depth and dcache depth
68 : independently. That means we have to lie about the MTU and burst.
69 : We say the MTU is double what it actually is, and then the burst is
70 : 4+fec_resolver_depth/2. That means we get
71 : 2*d+2*(4+fec_resolver_depth/2) >= 2*d+6+fec_resolver_depth FEC sets.
72 :
73 : A note on parallelization. From the network, shreds are distributed
74 : to tiles based on a validator-specific seeded hash of (slot, FEC set
75 : index) so all the shreds for a given FEC set (and any equivocating
76 : FEC set) are processed by the same tile. From the leader pipeline,
77 : the original implementation used to parallelize by batch of
78 : microblocks (so within a block, batches were distributed to different
79 : tiles). To support chained merkle shreds, the current implementation
80 : processes all the batches on tile 0 -- this should be a temporary
81 : state while Solana moves to a newer shred format that support better
82 : parallelization. */
83 :
84 : #define FD_SHRED_TILE_SCRATCH_ALIGN 128UL
85 :
86 0 : #define IN_KIND_CONTACT ( 0UL)
87 0 : #define IN_KIND_EPOCH ( 1UL) /* Firedancer */
88 0 : #define IN_KIND_STAKE ( 2UL) /* Frankendancer */
89 0 : #define IN_KIND_POH ( 3UL)
90 0 : #define IN_KIND_NET ( 4UL)
91 0 : #define IN_KIND_SIGN ( 5UL)
92 : #define IN_KIND_REPAIR ( 6UL)
93 0 : #define IN_KIND_IPECHO ( 7UL)
94 0 : #define IN_KIND_GOSSIP ( 8UL)
95 0 : #define IN_KIND_ROOTED ( 9UL)
96 0 : #define IN_KIND_ROOTEDH (10UL)
97 :
98 0 : #define NET_OUT_IDX 1
99 0 : #define SIGN_OUT_IDX 2
100 :
101 : FD_STATIC_ASSERT( sizeof(fd_entry_batch_meta_t)==56UL, poh_shred_mtu );
102 : FD_STATIC_ASSERT( sizeof(fd_fec_set_t)==FD_SHRED_STORE_MTU, shred_store_mtu );
103 :
104 0 : #define FD_SHRED_ADD_SHRED_EXTRA_RETVAL_CNT 2
105 :
106 : /* Number of entries in the block_ids table. Each entry is 32 byte.
107 : This table is used to keep track of block ids that we create
108 : when we're leader, so that we can access them whenever we need
109 : a *parent* block id for a new block. Larger table allows to
110 : retrieve older parent block ids. Currently it's set for worst
111 : case parent offset of USHORT_MAX (max allowed in a shred),
112 : making the total table 2MiB.
113 : See also comment on chained_merkle_root. */
114 0 : #define BLOCK_IDS_TABLE_CNT USHORT_MAX
115 :
116 : /* See note on parallelization above. Currently we process all batches in tile 0. */
117 : #if 1
118 : #define SHOULD_PROCESS_THESE_SHREDS ( ctx->round_robin_id==0 )
119 : #else
120 : #define SHOULD_PROCESS_THESE_SHREDS ( ctx->batch_cnt%ctx->round_robin_cnt==ctx->round_robin_id )
121 : #endif
122 :
123 : /* The behavior of the shred tile is slightly different for
124 : Frankendancer vs Firedancer. For example, Frankendancer produces
125 : chained merkle shreds, while Firedancer doesn't yet. We can check
126 : at runtime the difference by inspecting the topology. The simplest
127 : way is to test if ctx->store is initialized.
128 :
129 : FIXME don't assume only frank vs. fire */
130 : #define IS_FIREDANCER ( ctx->store!=NULL )
131 :
132 : typedef union {
133 : struct {
134 : fd_wksp_t * mem;
135 : ulong chunk0;
136 : ulong wmark;
137 : };
138 : fd_net_rx_bounds_t net_rx;
139 : } fd_shred_in_ctx_t;
140 :
141 : typedef struct {
142 : fd_shredder_t * shredder;
143 : fd_fec_resolver_t * resolver;
144 : fd_pubkey_t identity_key[1]; /* Just the public key */
145 :
146 : ulong round_robin_id;
147 : ulong round_robin_cnt;
148 : /* Number of batches shredded from PoH during the current slot.
149 : This should be the same for all the shred tiles. */
150 : ulong batch_cnt;
151 : /* Slot of the most recent microblock we've seen from PoH,
152 : or 0 if we haven't seen one yet */
153 : ulong slot;
154 :
155 : fd_rnonce_ss_t repair_nonce_ss[1];
156 :
157 : fd_keyswitch_t * keyswitch;
158 : fd_keyguard_client_t keyguard_client[1];
159 :
160 : fd_fec_set_t * fec_sets;
161 :
162 : fd_stake_ci_t * stake_ci;
163 : /* These are used in between during_frag and after_frag */
164 : fd_shred_dest_weighted_t * new_dest_ptr;
165 : ulong new_dest_cnt;
166 : ulong shredded_txn_cnt;
167 : ulong new_root;
168 :
169 : ulong poh_in_expect_seq;
170 :
171 : ushort net_id;
172 :
173 : int skip_frag;
174 :
175 : ulong adtl_dests_leader_cnt;
176 : fd_shred_dest_weighted_t adtl_dests_leader [ FD_TOPO_ADTL_DESTS_MAX ];
177 : ulong adtl_dests_retransmit_cnt;
178 : fd_shred_dest_weighted_t adtl_dests_retransmit[ FD_TOPO_ADTL_DESTS_MAX ];
179 :
180 : fd_ip4_udp_hdrs_t data_shred_net_hdr [1];
181 : fd_ip4_udp_hdrs_t parity_shred_net_hdr[1];
182 :
183 : ulong shredder_fec_set_idx; /* In [0, shredder_max_fec_set_idx) */
184 : ulong shredder_max_fec_set_idx; /* exclusive */
185 :
186 : uchar shredder_merkle_root[32];
187 :
188 : ulong send_fec_set_idx[ FD_SHRED_BATCH_FEC_SETS_MAX ];
189 : ulong send_fec_set_cnt;
190 : ulong tsorig; /* timestamp of the last packet in compressed form */
191 :
192 : /* Includes Ethernet, IP, UDP headers */
193 : ulong shred_buffer_sz;
194 : uchar shred_buffer[ FD_NET_MTU ];
195 :
196 : /* resolver_seed gets generated in privileged_init but used in
197 : unprivileged_init, so we store it here in between. */
198 : ulong resolver_seed;
199 :
200 : fd_shred_in_ctx_t in[ 32 ];
201 : int in_kind[ 32 ];
202 :
203 : fd_wksp_t * net_out_mem;
204 : ulong net_out_chunk0;
205 : ulong net_out_wmark;
206 : ulong net_out_chunk;
207 :
208 : ulong store_out_idx;
209 : fd_wksp_t * store_out_mem;
210 : ulong store_out_chunk0;
211 : ulong store_out_wmark;
212 : ulong store_out_chunk;
213 :
214 : /* This is the output link for shreds that is currently consumed by
215 : the repair and replay tile. */
216 : ulong shred_out_idx;
217 : fd_wksp_t * shred_out_mem;
218 : ulong shred_out_chunk0;
219 : ulong shred_out_wmark;
220 : ulong shred_out_chunk;
221 :
222 : fd_store_t * store;
223 :
224 : fd_gossip_update_message_t gossip_upd_buf[1];
225 :
226 : struct {
227 : fd_histf_t contact_info_cnt[ 1 ];
228 : fd_histf_t batch_sz[ 1 ];
229 : fd_histf_t batch_microblock_cnt[ 1 ];
230 : fd_histf_t shredding_timing[ 1 ];
231 : fd_histf_t add_shred_timing[ 1 ];
232 : ulong shred_processing_result[ FD_FEC_RESOLVER_ADD_SHRED_RETVAL_CNT+FD_SHRED_ADD_SHRED_EXTRA_RETVAL_CNT ];
233 : ulong invalid_block_id_cnt;
234 : ulong shred_rejected_unchained_cnt;
235 : ulong repair_rcv_cnt;
236 : ulong repair_rcv_bytes;
237 : ulong turbine_rcv_cnt;
238 : ulong turbine_rcv_bytes;
239 : ulong bad_nonce;
240 : } metrics[ 1 ];
241 :
242 : struct {
243 : ulong txn_cnt;
244 : ulong pos; /* in payload, range [0, FD_SHRED_BATCH_RAW_BUF_SZ-8UL) */
245 : ulong slot; /* set to 0 when pos==0 */
246 : union {
247 : struct {
248 : ulong microblock_cnt;
249 : uchar payload[ FD_SHRED_BATCH_RAW_BUF_SZ - 8UL ];
250 : };
251 : uchar raw[ FD_SHRED_BATCH_RAW_BUF_SZ ];
252 : };
253 : } pending_batch;
254 :
255 : fd_epoch_schedule_t epoch_schedule[1];
256 : fd_shred_features_activation_t features_activation[1];
257 : /* too large to be left in the stack */
258 : fd_shred_dest_idx_t scratchpad_dests[ FD_SHRED_DEST_MAX_FANOUT*(FD_REEDSOL_DATA_SHREDS_MAX+FD_REEDSOL_PARITY_SHREDS_MAX) ];
259 :
260 : uchar * chained_merkle_root;
261 : fd_bmtree_node_t out_merkle_roots[ FD_SHRED_BATCH_FEC_SETS_MAX ];
262 : uchar block_ids[ BLOCK_IDS_TABLE_CNT ][ FD_SHRED_MERKLE_ROOT_SZ ];
263 : } fd_shred_ctx_t;
264 :
265 : /* shred features are generally considered active at the epoch *following*
266 : the epoch in which the feature gate is activated.
267 :
268 : As an optimization, when the activation slot is received, it is converted
269 : into the first slot of the subsequent epoch. This allows for a more
270 : efficient check (shred_slot >= feature_slot) and avoids the overhead of
271 : repeatedly converting slots into epochs for comparison.
272 :
273 : This function is only for Firedancer, while Frankendancer already receives
274 : the final activation slot from POH tile.
275 :
276 : In Agave, this is done with check_feature_activation():
277 : https://github.com/anza-xyz/agave/blob/v3.1.4/turbine/src/cluster_nodes.rs#L771
278 : https://github.com/anza-xyz/agave/blob/v3.1.4/core/src/shred_fetch_stage.rs#L456 */
279 : static inline ulong
280 0 : fd_shred_get_feature_activation_slot0( ulong feature_slot, fd_shred_ctx_t * ctx ) {
281 : /* if the feature does not have an activation slot yet, return ULONG_MAX */
282 0 : if( FD_UNLIKELY( feature_slot==ULONG_MAX ) ) {
283 0 : return ULONG_MAX;
284 0 : }
285 : /* if we don't have an epoch schedule yet, return ULONG_MAX */
286 0 : if( FD_UNLIKELY( ctx->epoch_schedule->slots_per_epoch==0 ) ) {
287 0 : return ULONG_MAX;
288 0 : }
289 : /* compute the activation epoch, add one, return the first slot. */
290 0 : ulong feature_epoch = 1 + fd_slot_to_epoch( ctx->epoch_schedule, feature_slot, NULL );
291 0 : return fd_epoch_slot0( ctx->epoch_schedule, feature_epoch );
292 0 : }
293 :
294 : FD_FN_CONST static inline ulong
295 0 : scratch_align( void ) {
296 0 : return 128UL;
297 0 : }
298 :
299 : FD_FN_PURE static inline ulong
300 0 : scratch_footprint( fd_topo_tile_t const * tile ) {
301 :
302 0 : ulong fec_resolver_footprint = fd_fec_resolver_footprint( tile->shred.fec_resolver_depth, 1UL, tile->shred.depth+1UL,
303 0 : 128UL * tile->shred.fec_resolver_depth );
304 0 : ulong l = FD_LAYOUT_INIT;
305 0 : l = FD_LAYOUT_APPEND( l, alignof(fd_shred_ctx_t), sizeof(fd_shred_ctx_t) );
306 0 : l = FD_LAYOUT_APPEND( l, fd_stake_ci_align(), fd_stake_ci_footprint() );
307 0 : l = FD_LAYOUT_APPEND( l, fd_fec_resolver_align(), fec_resolver_footprint );
308 0 : l = FD_LAYOUT_APPEND( l, fd_shredder_align(), fd_shredder_footprint() );
309 0 : return FD_LAYOUT_FINI( l, scratch_align() );
310 0 : }
311 :
312 : static inline void
313 0 : during_housekeeping( fd_shred_ctx_t * ctx ) {
314 0 : if( FD_UNLIKELY( fd_keyswitch_state_query( ctx->keyswitch )==FD_KEYSWITCH_STATE_SWITCH_PENDING ) ) {
315 0 : ulong seq_must_complete = ctx->keyswitch->param;
316 :
317 0 : if( FD_UNLIKELY( fd_seq_lt( ctx->poh_in_expect_seq, seq_must_complete ) ) ) {
318 : /* See fd_keyswitch.h, we need to flush any in-flight shreds from
319 : the leader pipeline before switching key. */
320 0 : FD_LOG_WARNING(( "Flushing in-flight unpublished shreds, must reach seq %lu, currently at %lu ...", seq_must_complete, ctx->poh_in_expect_seq ));
321 0 : return;
322 0 : }
323 :
324 0 : memcpy( ctx->identity_key->uc, ctx->keyswitch->bytes, 32UL );
325 0 : fd_stake_ci_set_identity( ctx->stake_ci, ctx->identity_key );
326 0 : fd_keyswitch_state( ctx->keyswitch, FD_KEYSWITCH_STATE_COMPLETED );
327 0 : }
328 0 : }
329 :
330 : static inline void
331 0 : metrics_write( fd_shred_ctx_t * ctx ) {
332 0 : FD_MHIST_COPY( SHRED, CLUSTER_CONTACT_INFO_CNT, ctx->metrics->contact_info_cnt );
333 0 : FD_MHIST_COPY( SHRED, BATCH_SZ, ctx->metrics->batch_sz );
334 0 : FD_MHIST_COPY( SHRED, BATCH_MICROBLOCK_CNT, ctx->metrics->batch_microblock_cnt );
335 0 : FD_MHIST_COPY( SHRED, SHREDDING_DURATION_SECONDS, ctx->metrics->shredding_timing );
336 0 : FD_MHIST_COPY( SHRED, ADD_SHRED_DURATION_SECONDS, ctx->metrics->add_shred_timing );
337 0 : FD_MCNT_SET ( SHRED, SHRED_REPAIR_RCV, ctx->metrics->repair_rcv_cnt );
338 0 : FD_MCNT_SET ( SHRED, SHRED_REPAIR_RCV_BYTES, ctx->metrics->repair_rcv_bytes );
339 0 : FD_MCNT_SET ( SHRED, SHRED_TURBINE_RCV, ctx->metrics->turbine_rcv_cnt );
340 0 : FD_MCNT_SET ( SHRED, SHRED_TURBINE_RCV_BYTES, ctx->metrics->turbine_rcv_bytes );
341 0 : FD_MCNT_SET ( SHRED, BAD_NONCE, ctx->metrics->bad_nonce );
342 :
343 0 : FD_MCNT_SET ( SHRED, INVALID_BLOCK_ID, ctx->metrics->invalid_block_id_cnt );
344 0 : FD_MCNT_SET ( SHRED, SHRED_REJECTED_UNCHAINED, ctx->metrics->shred_rejected_unchained_cnt );
345 :
346 0 : FD_MCNT_ENUM_COPY( SHRED, SHRED_PROCESSED, ctx->metrics->shred_processing_result );
347 0 : }
348 :
349 : static inline void
350 : handle_new_cluster_contact_info( fd_shred_ctx_t * ctx,
351 0 : uchar const * buf ) {
352 0 : ulong const * header = (ulong const *)fd_type_pun_const( buf );
353 :
354 0 : ulong dest_cnt = header[ 0 ];
355 0 : fd_histf_sample( ctx->metrics->contact_info_cnt, dest_cnt );
356 :
357 0 : if( dest_cnt >= MAX_SHRED_DESTS )
358 0 : FD_LOG_ERR(( "Cluster nodes had %lu destinations, which was more than the max of %lu", dest_cnt, MAX_SHRED_DESTS ));
359 :
360 0 : fd_shred_dest_wire_t const * in_dests = fd_type_pun_const( header+1UL );
361 0 : fd_shred_dest_weighted_t * dests = fd_stake_ci_dest_add_init( ctx->stake_ci );
362 :
363 0 : ctx->new_dest_ptr = dests;
364 0 : ctx->new_dest_cnt = dest_cnt;
365 :
366 0 : for( ulong i=0UL; i<dest_cnt; i++ ) {
367 0 : memcpy( dests[i].pubkey.uc, in_dests[i].pubkey, 32UL );
368 0 : dests[i].ip4 = in_dests[i].ip4_addr;
369 0 : dests[i].port = in_dests[i].udp_port;
370 0 : }
371 0 : }
372 :
373 : static inline void
374 0 : finalize_new_cluster_contact_info( fd_shred_ctx_t * ctx ) {
375 0 : fd_stake_ci_dest_add_fini( ctx->stake_ci, ctx->new_dest_cnt );
376 0 : }
377 :
378 : static inline int
379 : before_frag( fd_shred_ctx_t * ctx,
380 : ulong in_idx,
381 : ulong seq,
382 0 : ulong sig ) {
383 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_IPECHO ) ) {
384 0 : FD_TEST( sig!=0UL && sig<=USHORT_MAX );
385 0 : fd_shredder_set_shred_version ( ctx->shredder, (ushort)sig );
386 0 : fd_fec_resolver_set_shred_version( ctx->resolver, (ushort)sig );
387 0 : return 1;
388 0 : }
389 :
390 0 : if( FD_UNLIKELY( !ctx->shredder->shred_version ) ) return -1;
391 :
392 0 : if( FD_LIKELY( ctx->in_kind[ in_idx ]==IN_KIND_POH ) ) {
393 0 : ctx->poh_in_expect_seq = seq+1UL;
394 0 : return (int)(fd_disco_poh_sig_pkt_type( sig )!=POH_PKT_TYPE_MICROBLOCK) & (int)(fd_disco_poh_sig_pkt_type( sig )!=POH_PKT_TYPE_FEAT_ACT_SLOT);
395 0 : }
396 0 : if( FD_LIKELY( ctx->in_kind[ in_idx ]==IN_KIND_NET ) ) {
397 0 : return (int)(fd_disco_netmux_sig_proto( sig )!=DST_PROTO_SHRED) & (int)(fd_disco_netmux_sig_proto( sig )!=DST_PROTO_REPAIR);
398 0 : }
399 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_GOSSIP ) ){
400 0 : return sig!=FD_GOSSIP_UPDATE_TAG_CONTACT_INFO &&
401 0 : sig!=FD_GOSSIP_UPDATE_TAG_CONTACT_INFO_REMOVE;
402 0 : }
403 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_ROOTEDH ) ) {
404 0 : return sig!=0UL; /* only care about rooted banks, not completed blockhash */
405 0 : }
406 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_ROOTED ) ) {
407 0 : return sig!=FD_TOWER_SIG_SLOT_ROOTED; /* only care about slot_confirmed messages */
408 0 : }
409 0 : return 0;
410 0 : }
411 :
412 : static void
413 : during_frag( fd_shred_ctx_t * ctx,
414 : ulong in_idx,
415 : ulong seq FD_PARAM_UNUSED,
416 : ulong sig,
417 : ulong chunk,
418 : ulong sz,
419 0 : ulong ctl ) {
420 :
421 0 : ctx->skip_frag = 0;
422 :
423 0 : ctx->tsorig = fd_frag_meta_ts_comp( fd_tickcount() );
424 :
425 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_REPAIR ) ) {
426 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz>FD_NET_MTU ) )
427 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
428 0 : ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
429 :
430 0 : uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
431 0 : fd_memcpy( ctx->shred_buffer, dcache_entry, sz );
432 0 : return;
433 0 : }
434 :
435 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_CONTACT ) ) {
436 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark ) )
437 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
438 0 : ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
439 :
440 0 : uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
441 0 : handle_new_cluster_contact_info( ctx, dcache_entry );
442 0 : return;
443 0 : }
444 :
445 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_GOSSIP ) ) {
446 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz>sizeof(fd_gossip_update_message_t) ) )
447 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
448 0 : ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
449 0 : uchar const * gossip_upd_msg = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
450 0 : fd_memcpy( ctx->gossip_upd_buf, gossip_upd_msg, sz );
451 0 : return;
452 0 : }
453 :
454 : /* Firedancer only */
455 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_ROOTED ) ) {
456 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark ) )
457 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
458 0 : ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
459 0 : fd_tower_slot_rooted_t const * rooted_msg = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
460 0 : ctx->new_root = rooted_msg->slot;
461 0 : return;
462 0 : }
463 :
464 : /* Frankendancer only */
465 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_ROOTEDH ) ) {
466 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark ) )
467 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
468 0 : ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
469 : /* The message format is a pointer to the bank (which is in the
470 : agave address space, so we couldn't access it even if we wanted
471 : to) followed by the rooted slot. */
472 0 : ulong const * replay_msg = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
473 0 : ctx->new_root = replay_msg[ 1 ];
474 0 : return;
475 0 : }
476 :
477 : /* Firedancer only */
478 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_EPOCH ) ) {
479 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark ) )
480 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
481 0 : ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
482 :
483 0 : uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
484 0 : fd_epoch_info_msg_t const * epoch_msg = fd_type_pun_const( dcache_entry );
485 :
486 0 : FD_TEST( epoch_msg->staked_vote_cnt<=MAX_COMPRESSED_STAKE_WEIGHTS );
487 0 : FD_TEST( epoch_msg->staked_id_cnt<=MAX_SHRED_DESTS );
488 :
489 0 : fd_stake_ci_epoch_msg_init( ctx->stake_ci, epoch_msg );
490 :
491 0 : *ctx->epoch_schedule = epoch_msg->epoch_schedule;
492 0 : ctx->features_activation->enforce_fixed_fec_set = fd_shred_get_feature_activation_slot0(
493 0 : epoch_msg->features.enforce_fixed_fec_set, ctx );
494 0 : ctx->features_activation->discard_unexpected_data_complete_shreds = fd_shred_get_feature_activation_slot0(
495 0 : epoch_msg->features.discard_unexpected_data_complete_shreds, ctx );
496 :
497 0 : fd_fec_resolver_set_discard_unexpected_data_complete_shreds( ctx->resolver,
498 0 : ctx->features_activation->discard_unexpected_data_complete_shreds );
499 :
500 0 : return;
501 0 : }
502 :
503 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_STAKE ) ) {
504 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark ) )
505 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
506 0 : ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
507 :
508 0 : uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
509 0 : fd_stake_ci_stake_msg_init( ctx->stake_ci, fd_type_pun_const( dcache_entry ) );
510 0 : return;
511 0 : }
512 :
513 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_POH ) ) {
514 0 : ctx->send_fec_set_cnt = 0UL;
515 :
516 0 : if( FD_UNLIKELY( (fd_disco_poh_sig_pkt_type( sig )==POH_PKT_TYPE_FEAT_ACT_SLOT) ) ) {
517 : /* There is a subset of FD_SHRED_FEATURES_ACTIVATION_... slots that
518 : the shred tile needs to be aware of. Since this requires the
519 : bank, we are forced (so far) to receive them from the poh tile
520 : (as a POH_PKT_TYPE_FEAT_ACT_SLOT). */
521 0 : uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
522 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz!=(sizeof(fd_shred_features_activation_t)) ) )
523 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
524 0 : ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
525 :
526 0 : fd_shred_features_activation_t const * act_data = (fd_shred_features_activation_t const *)dcache_entry;
527 0 : memcpy( ctx->features_activation, act_data, sizeof(fd_shred_features_activation_t) );
528 :
529 0 : fd_fec_resolver_set_discard_unexpected_data_complete_shreds( ctx->resolver,
530 0 : ctx->features_activation->discard_unexpected_data_complete_shreds );
531 0 : }
532 0 : else { /* (fd_disco_poh_sig_pkt_type( sig )==POH_PKT_TYPE_MICROBLOCK) */
533 : /* This is a frag from the PoH tile. We'll copy it to our pending
534 : microblock batch and shred it if necessary (last in block or
535 : above watermark). We just go ahead and shred it here, even
536 : though we may get overrun. If we do end up getting overrun, we
537 : just won't send these shreds out and we'll reuse the FEC set for
538 : the next one. From a higher level though, if we do get overrun,
539 : a bunch of shreds will never be transmitted, and we'll end up
540 : producing a block that never lands on chain. */
541 :
542 0 : uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
543 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz>FD_POH_SHRED_MTU ||
544 0 : sz<(sizeof(fd_entry_batch_meta_t)+sizeof(fd_entry_batch_header_t)) ) )
545 0 : FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
546 0 : ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
547 :
548 0 : fd_entry_batch_meta_t const * entry_meta = (fd_entry_batch_meta_t const *)dcache_entry;
549 0 : uchar const * entry = dcache_entry + sizeof(fd_entry_batch_meta_t);
550 0 : ulong entry_sz = sz - sizeof(fd_entry_batch_meta_t);
551 :
552 0 : fd_entry_batch_header_t const * microblock = (fd_entry_batch_header_t const *)entry;
553 :
554 : /* It should never be possible for this to fail, but we check it
555 : anyway. */
556 0 : FD_TEST( entry_sz + ctx->pending_batch.pos <= sizeof(ctx->pending_batch.payload) );
557 :
558 0 : ulong target_slot = fd_disco_poh_sig_slot( sig );
559 0 : if( FD_UNLIKELY( (ctx->pending_batch.microblock_cnt>0) & (ctx->pending_batch.slot!=target_slot) ) ) {
560 : /* TODO: The Agave client sends a dummy entry batch with only 1
561 : byte and the block-complete bit set. This helps other
562 : validators know that the block is dead and they should not try
563 : to continue building a fork on it. We probably want a similar
564 : approach eventually. */
565 0 : FD_LOG_WARNING(( "Abandoning %lu microblocks for slot %lu and switching to slot %lu",
566 0 : ctx->pending_batch.microblock_cnt, ctx->pending_batch.slot, target_slot ));
567 0 : ctx->pending_batch.slot = 0UL;
568 0 : ctx->pending_batch.pos = 0UL;
569 0 : ctx->pending_batch.microblock_cnt = 0UL;
570 0 : ctx->pending_batch.txn_cnt = 0UL;
571 0 : ctx->batch_cnt = 0UL;
572 :
573 0 : FD_MCNT_INC( SHRED, MICROBLOCKS_ABANDONED, 1UL );
574 0 : }
575 :
576 0 : ctx->pending_batch.slot = target_slot;
577 : /* We want to send out some shreds immediately when we start a new
578 : slot to help with leader targeting. */
579 0 : int new_slot = 0;
580 0 : if( FD_UNLIKELY( target_slot!=ctx->slot )) {
581 : /* Reset batch count if we are in a new slot */
582 0 : ctx->batch_cnt = 0UL;
583 0 : ctx->slot = target_slot;
584 0 : new_slot = 1;
585 :
586 : /* At the beginning of a new slot, prepare chained_merkle_root.
587 : chained_merkle_root is initialized at the block_id of the parent
588 : block, there's two cases:
589 :
590 : 1. block_id is passed in by the poh tile:
591 : - it's always passed when parent block had a different leader
592 : - it may be passed when we were leader for parent block (there
593 : are race conditions when it's not passed)
594 :
595 : 2. block_id is taken from block_ids table if we were the leader
596 : for the parent block (when we were NOT the leader, because of
597 : equivocation, we can't store block_id in the table)
598 :
599 : chained_merkle_root is stored in block_ids table at target_slot
600 : and it's progressively updated as more microblocks are received.
601 : As a result, when we move to a new slot, the block_ids table at
602 : the old slot will contain the block_id.
603 :
604 : The block_ids table is designed to protect against the race condition
605 : case in 1., therefore the table may not be set in some cases, e.g. if
606 : a validator (re)starts, but in those cases we don't expect the race
607 : condition to apply. */
608 0 : ctx->chained_merkle_root = ctx->block_ids[ target_slot % BLOCK_IDS_TABLE_CNT ];
609 0 : if( FD_UNLIKELY( SHOULD_PROCESS_THESE_SHREDS ) ) {
610 0 : if( FD_LIKELY( entry_meta->parent_block_id_valid ) ) {
611 : /* 1. Initialize chained_merkle_root sent from poh tile */
612 0 : memcpy( ctx->chained_merkle_root, entry_meta->parent_block_id, FD_SHRED_MERKLE_ROOT_SZ );
613 0 : } else {
614 0 : ulong parent_slot = target_slot - entry_meta->parent_offset;
615 0 : fd_epoch_leaders_t const * lsched = fd_stake_ci_get_lsched_for_slot( ctx->stake_ci, parent_slot );
616 0 : fd_pubkey_t const * slot_leader = fd_epoch_leaders_get( lsched, parent_slot );
617 :
618 0 : if( lsched && slot_leader && fd_memeq( slot_leader, ctx->identity_key, sizeof(fd_pubkey_t) ) ) {
619 : /* 2. Initialize chained_merkle_root from block_ids table, if we were the leader */
620 0 : memcpy( ctx->chained_merkle_root, ctx->block_ids[ parent_slot % BLOCK_IDS_TABLE_CNT ], FD_SHRED_MERKLE_ROOT_SZ );
621 0 : } else {
622 : /* This should never happen, log a metric and set chained_merkle_root to 0 */
623 0 : ctx->metrics->invalid_block_id_cnt++;
624 0 : memset( ctx->chained_merkle_root, 0, FD_SHRED_MERKLE_ROOT_SZ );
625 0 : }
626 0 : }
627 0 : }
628 0 : }
629 :
630 0 : if( FD_LIKELY( !SHOULD_PROCESS_THESE_SHREDS ) ) {
631 : /* If we are not processing this batch, filter in after_frag. */
632 0 : ctx->skip_frag = 1;
633 0 : }
634 :
635 0 : ulong pending_batch_wmark = FD_SHRED_BATCH_WMARK_CHAINED;
636 0 : uchar * chained_merkle_root = ctx->chained_merkle_root;
637 0 : ulong load_for_32_shreds = FD_SHREDDER_CHAINED_FEC_SET_PAYLOAD_SZ;
638 : /* All fec sets in the last batch of a block need to be resigned.
639 : This needs to match Agave's behavior - as a reference, see:
640 : https://github.com/anza-xyz/agave/blob/v2.3/ledger/src/shred/merkle.rs#L1040 */
641 0 : if( FD_UNLIKELY( entry_meta->block_complete ) ) {
642 0 : pending_batch_wmark = FD_SHRED_BATCH_WMARK_RESIGNED;
643 : /* chained_merkle_root also applies to resigned FEC sets. */
644 0 : load_for_32_shreds = FD_SHREDDER_RESIGNED_FEC_SET_PAYLOAD_SZ;
645 0 : }
646 :
647 : /* If this microblock completes the block, the batch is then
648 : finalized here. Otherwise, we check whether the new entry
649 : would exceed the pending_batch_wmark. If true, then the
650 : batch is closed now, shredded, and a new batch is started
651 : with the incoming microblock. If false, no shredding takes
652 : place, and the microblock is added to the current batch. */
653 0 : int forced_end_batch = entry_meta->block_complete | new_slot;
654 0 : int batch_would_exceed_wmark = ( ctx->pending_batch.pos + entry_sz ) > pending_batch_wmark;
655 0 : int include_in_current_batch = forced_end_batch | ( !batch_would_exceed_wmark );
656 0 : int process_current_batch = forced_end_batch | batch_would_exceed_wmark;
657 0 : int init_new_batch = !include_in_current_batch;
658 :
659 0 : if( FD_LIKELY( include_in_current_batch ) ) {
660 0 : if( FD_UNLIKELY( SHOULD_PROCESS_THESE_SHREDS ) ) {
661 : /* Ugh, yet another memcpy */
662 0 : fd_memcpy( ctx->pending_batch.payload + ctx->pending_batch.pos, entry, entry_sz );
663 0 : }
664 0 : ctx->pending_batch.pos += entry_sz;
665 0 : ctx->pending_batch.microblock_cnt += 1UL;
666 0 : ctx->pending_batch.txn_cnt += microblock->txn_cnt;
667 0 : }
668 :
669 0 : if( FD_LIKELY( process_current_batch )) {
670 : /* Batch and padding size calculation. */
671 0 : ulong batch_sz = sizeof(ulong) + ctx->pending_batch.pos; /* without padding */
672 0 : ulong batch_sz_padded = load_for_32_shreds * ( ( batch_sz + load_for_32_shreds - 1UL ) / load_for_32_shreds );
673 0 : ulong padding_sz = batch_sz_padded - batch_sz;
674 :
675 0 : if( FD_UNLIKELY( SHOULD_PROCESS_THESE_SHREDS ) ) {
676 : /* If it's our turn, shred this batch. FD_UNLIKELY because shred
677 : tile cnt generally >= 2 */
678 :
679 0 : long shredding_timing = -fd_tickcount();
680 :
681 0 : fd_memset( ctx->pending_batch.payload + ctx->pending_batch.pos, 0, padding_sz );
682 :
683 0 : ctx->send_fec_set_cnt = 0UL; /* verbose */
684 0 : ctx->shredded_txn_cnt = ctx->pending_batch.txn_cnt;
685 :
686 0 : fd_shredder_init_batch( ctx->shredder, ctx->pending_batch.raw, batch_sz_padded, target_slot, entry_meta );
687 :
688 0 : ulong pend_sz = batch_sz_padded;
689 0 : ulong pend_idx = 0;
690 0 : while( pend_sz > 0UL ) {
691 :
692 0 : fd_fec_set_t * out = ctx->fec_sets + ctx->shredder_fec_set_idx;
693 :
694 0 : FD_TEST( fd_shredder_next_fec_set( ctx->shredder, out, chained_merkle_root ) );
695 0 : memcpy( ctx->out_merkle_roots[pend_idx].hash, chained_merkle_root, 32UL );
696 :
697 0 : out->data_shred_rcvd = 0U;
698 0 : out->parity_shred_rcvd = 0U;
699 :
700 0 : ctx->send_fec_set_idx[ ctx->send_fec_set_cnt ] = ctx->shredder_fec_set_idx;
701 0 : ctx->send_fec_set_cnt += 1UL;
702 0 : ctx->shredder_fec_set_idx = (ctx->shredder_fec_set_idx+1UL)%ctx->shredder_max_fec_set_idx;
703 :
704 0 : pend_sz -= load_for_32_shreds;
705 0 : pend_idx++;
706 0 : }
707 :
708 0 : fd_shredder_fini_batch( ctx->shredder );
709 0 : shredding_timing += fd_tickcount();
710 :
711 : /* Update metrics */
712 0 : fd_histf_sample( ctx->metrics->batch_sz, batch_sz /* without padding */ );
713 0 : fd_histf_sample( ctx->metrics->batch_microblock_cnt, ctx->pending_batch.microblock_cnt );
714 0 : fd_histf_sample( ctx->metrics->shredding_timing, (ulong)shredding_timing );
715 0 : } else {
716 0 : ctx->send_fec_set_cnt = 0UL; /* verbose */
717 :
718 0 : fd_shredder_skip_batch( ctx->shredder, batch_sz_padded, target_slot, entry_meta->block_complete );
719 0 : }
720 :
721 0 : ctx->pending_batch.slot = 0UL;
722 0 : ctx->pending_batch.pos = 0UL;
723 0 : ctx->pending_batch.microblock_cnt = 0UL;
724 0 : ctx->pending_batch.txn_cnt = 0UL;
725 0 : ctx->batch_cnt++;
726 0 : }
727 :
728 0 : if( FD_UNLIKELY( init_new_batch ) ) {
729 : /* TODO: this assumes that SHOULD_PROCESS_THESE_SHREDS is
730 : constant across batches. Otherwise, the condition may
731 : need to be removed (or adjusted). */
732 0 : if( FD_UNLIKELY( SHOULD_PROCESS_THESE_SHREDS ) ) {
733 : /* Ugh, yet another memcpy */
734 0 : fd_memcpy( ctx->pending_batch.payload + 0UL /* verbose */, entry, entry_sz );
735 0 : }
736 0 : ctx->pending_batch.slot = target_slot;
737 0 : ctx->pending_batch.pos = entry_sz;
738 0 : ctx->pending_batch.microblock_cnt = 1UL;
739 0 : ctx->pending_batch.txn_cnt = microblock->txn_cnt;
740 0 : }
741 0 : }
742 0 : } else if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_NET ) ) {
743 : /* The common case, from the net tile. The FEC resolver API does
744 : not present a prepare/commit model. If we get overrun between
745 : when the FEC resolver verifies the signature and when it stores
746 : the local copy, we could end up storing and retransmitting
747 : garbage. Instead we copy it locally, sadly, and only give it to
748 : the FEC resolver when we know it won't be overrun anymore. */
749 0 : uchar const * dcache_entry = fd_net_rx_translate_frag( &ctx->in[ in_idx ].net_rx, chunk, ctl, sz );
750 0 : ulong hdr_sz = fd_disco_netmux_sig_hdr_sz( sig );
751 0 : FD_TEST( hdr_sz <= sz ); /* Should be ensured by the net tile */
752 0 : fd_shred_t const * shred = fd_shred_parse( dcache_entry+hdr_sz, sz-hdr_sz );
753 0 : if( FD_UNLIKELY( !shred ) ) {
754 0 : ctx->skip_frag = 1;
755 0 : return;
756 0 : };
757 :
758 0 : if( FD_UNLIKELY( fd_disco_netmux_sig_proto( sig )==DST_PROTO_REPAIR ) ) {
759 0 : ctx->metrics->repair_rcv_cnt++;
760 0 : ctx->metrics->repair_rcv_bytes += sz;
761 0 : } else {
762 0 : ctx->metrics->turbine_rcv_cnt++;
763 0 : ctx->metrics->turbine_rcv_bytes += sz;
764 0 : }
765 :
766 : /* Drop unchained merkle shreds */
767 0 : int is_unchained = !fd_shred_is_chained( fd_shred_type( shred->variant ) );
768 0 : if( FD_UNLIKELY( is_unchained ) ) {
769 0 : ctx->metrics->shred_rejected_unchained_cnt++;
770 0 : ctx->skip_frag = 1;
771 0 : return;
772 0 : };
773 :
774 : /* all shreds in the same FEC set will have the same signature
775 : so we can round-robin shreds between the shred tiles based on
776 : just the signature without splitting individual FEC sets. */
777 0 : ulong sig = fd_ulong_load_8( shred->signature );
778 0 : if( FD_LIKELY( sig%ctx->round_robin_cnt!=ctx->round_robin_id ) ) {
779 0 : ctx->skip_frag = 1;
780 0 : return;
781 0 : }
782 0 : fd_memcpy( ctx->shred_buffer, dcache_entry+hdr_sz, sz-hdr_sz );
783 0 : ctx->shred_buffer_sz = sz-hdr_sz;
784 0 : }
785 0 : }
786 :
787 : static inline void
788 : send_shred( fd_shred_ctx_t * ctx,
789 : fd_stem_context_t * stem,
790 : fd_shred_t const * shred,
791 : fd_shred_dest_weighted_t const * dest,
792 0 : ulong tsorig ) {
793 :
794 0 : if( FD_UNLIKELY( !dest->ip4 ) ) return;
795 :
796 0 : uchar * packet = fd_chunk_to_laddr( ctx->net_out_mem, ctx->net_out_chunk );
797 :
798 0 : int is_data = fd_shred_is_data( fd_shred_type( shred->variant ) );
799 0 : fd_ip4_udp_hdrs_t * hdr = (fd_ip4_udp_hdrs_t *)packet;
800 0 : *hdr = *( is_data ? ctx->data_shred_net_hdr : ctx->parity_shred_net_hdr );
801 :
802 0 : fd_ip4_hdr_t * ip4 = hdr->ip4;
803 0 : ip4->daddr = dest->ip4;
804 0 : ip4->net_id = fd_ushort_bswap( ctx->net_id++ );
805 0 : ip4->check = 0U;
806 0 : ip4->check = fd_ip4_hdr_check_fast( ip4 );
807 :
808 0 : hdr->udp->net_dport = fd_ushort_bswap( dest->port );
809 :
810 0 : ulong shred_sz = fd_ulong_if( is_data, FD_SHRED_MIN_SZ, FD_SHRED_MAX_SZ );
811 0 : #if FD_HAS_AVX
812 : /* We're going to copy this shred potentially a bunch of times without
813 : reading it again, and we'd rather not thrash our cache, so we want
814 : to use non-temporal writes here. We need to make sure we don't
815 : touch the cache line containing the network headers that we just
816 : wrote to though. We know the destination is 64 byte aligned. */
817 0 : FD_STATIC_ASSERT( sizeof(*hdr)<64UL, non_temporal );
818 : /* src[0:sizeof(hdrs)] is invalid, but now we want to copy
819 : dest[i]=src[i] for i>=sizeof(hdrs), so it simplifies the code. */
820 0 : uchar const * src = (uchar const *)((ulong)shred - sizeof(fd_ip4_udp_hdrs_t));
821 0 : memcpy( packet+sizeof(fd_ip4_udp_hdrs_t), src+sizeof(fd_ip4_udp_hdrs_t), 64UL-sizeof(fd_ip4_udp_hdrs_t) );
822 :
823 0 : ulong end_offset = shred_sz + sizeof(fd_ip4_udp_hdrs_t);
824 0 : ulong i;
825 0 : for( i=64UL; end_offset-i<64UL; i+=64UL ) {
826 0 : # if FD_HAS_AVX512
827 0 : _mm512_stream_si512( (void *)(packet+i ), _mm512_loadu_si512( (void const *)(src+i ) ) );
828 : # else
829 0 : _mm256_stream_si256( (void *)(packet+i ), _mm256_loadu_si256( (void const *)(src+i ) ) );
830 0 : _mm256_stream_si256( (void *)(packet+i+32UL), _mm256_loadu_si256( (void const *)(src+i+32UL) ) );
831 0 : # endif
832 0 : }
833 0 : _mm_sfence();
834 0 : fd_memcpy( packet+i, src+i, end_offset-i ); /* Copy the last partial cache line */
835 :
836 : #else
837 : fd_memcpy( packet+sizeof(fd_ip4_udp_hdrs_t), shred, shred_sz );
838 : #endif
839 :
840 0 : ulong pkt_sz = shred_sz + sizeof(fd_ip4_udp_hdrs_t);
841 0 : ulong tspub = fd_frag_meta_ts_comp( fd_tickcount() );
842 0 : ulong sig = fd_disco_netmux_sig( dest->ip4, dest->port, dest->ip4, DST_PROTO_OUTGOING, sizeof(fd_ip4_udp_hdrs_t) );
843 0 : ulong const chunk = ctx->net_out_chunk;
844 0 : fd_stem_publish( stem, NET_OUT_IDX, sig, chunk, pkt_sz, 0UL, tsorig, tspub );
845 0 : ctx->net_out_chunk = fd_dcache_compact_next( chunk, pkt_sz, ctx->net_out_chunk0, ctx->net_out_wmark );
846 0 : }
847 :
848 : static void
849 : after_frag( fd_shred_ctx_t * ctx,
850 : ulong in_idx,
851 : ulong seq,
852 : ulong sig,
853 : ulong sz,
854 : ulong tsorig,
855 : ulong _tspub,
856 0 : fd_stem_context_t * stem ) {
857 0 : (void)seq;
858 0 : (void)sz;
859 0 : (void)tsorig;
860 0 : (void)_tspub;
861 :
862 0 : if( FD_UNLIKELY( ctx->skip_frag ) ) return;
863 :
864 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_CONTACT ) ) {
865 0 : finalize_new_cluster_contact_info( ctx );
866 0 : return;
867 0 : }
868 :
869 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_EPOCH ) ) {
870 0 : fd_stake_ci_epoch_msg_fini( ctx->stake_ci );
871 0 : return;
872 0 : }
873 :
874 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_STAKE ) ) {
875 0 : fd_stake_ci_stake_msg_fini( ctx->stake_ci );
876 0 : return;
877 0 : }
878 :
879 0 : if( FD_UNLIKELY( (ctx->in_kind[ in_idx ]==IN_KIND_ROOTED) | (ctx->in_kind[ in_idx ]==IN_KIND_ROOTEDH) ) ) {
880 0 : if( FD_LIKELY( (ctx->new_root > 0UL) & (ctx->new_root<ULONG_MAX) ) ) fd_fec_resolver_advance_slot_old( ctx->resolver, ctx->new_root );
881 0 : return;
882 0 : }
883 :
884 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_GOSSIP ) ) {
885 0 : if( ctx->gossip_upd_buf->tag==FD_GOSSIP_UPDATE_TAG_CONTACT_INFO ) {
886 0 : fd_gossip_contact_info_t const * ci = ctx->gossip_upd_buf->contact_info->value;
887 0 : fd_ip4_port_t tvu_addr;
888 0 : tvu_addr.addr = ci->sockets[ FD_GOSSIP_CONTACT_INFO_SOCKET_TVU ].is_ipv6 ? 0U : ci->sockets[ FD_GOSSIP_CONTACT_INFO_SOCKET_TVU ].ip4;
889 0 : tvu_addr.port = ci->sockets[ FD_GOSSIP_CONTACT_INFO_SOCKET_TVU ].port;
890 0 : if( !tvu_addr.l ){
891 0 : fd_stake_ci_dest_remove( ctx->stake_ci, fd_type_pun_const( ctx->gossip_upd_buf->origin ) );
892 0 : } else {
893 0 : fd_stake_ci_dest_update( ctx->stake_ci, fd_type_pun_const( ctx->gossip_upd_buf->origin ), tvu_addr.addr, fd_ushort_bswap( tvu_addr.port ) );
894 0 : }
895 0 : } else if( ctx->gossip_upd_buf->tag==FD_GOSSIP_UPDATE_TAG_CONTACT_INFO_REMOVE ) {
896 0 : if( FD_UNLIKELY( !memcmp( ctx->identity_key->uc, ctx->gossip_upd_buf->origin, 32UL ) ) ) {
897 : /* If our own contact info was dropped, we update with dummy IP
898 : instead of removing since stake_ci expects our contact info
899 : in the sdests table all the time. fd_stake_ci_new initializes
900 : both ei->sdests with our contact info so this should always
901 : update (and not append). */
902 0 : fd_stake_ci_dest_update( ctx->stake_ci, fd_type_pun_const( ctx->gossip_upd_buf->origin ), 1U, 0U );
903 0 : } else {
904 0 : fd_stake_ci_dest_remove( ctx->stake_ci, fd_type_pun_const( ctx->gossip_upd_buf->origin ) );
905 0 : }
906 0 : }
907 0 : return;
908 0 : }
909 :
910 0 : if( FD_UNLIKELY( (ctx->in_kind[ in_idx ]==IN_KIND_POH) & (ctx->send_fec_set_cnt==0UL) ) ) {
911 : /* Entry from PoH that didn't trigger a new FEC set to be made */
912 0 : return;
913 0 : }
914 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_REPAIR ) ) {
915 0 : return;
916 0 : }
917 :
918 0 : ulong fanout = 200UL; /* Default Agave's DATA_PLANE_FANOUT = 200UL */
919 :
920 0 : if( FD_LIKELY( ctx->in_kind[ in_idx ]==IN_KIND_NET ) ) {
921 0 : uchar * shred_buffer = ctx->shred_buffer;
922 0 : ulong shred_buffer_sz = ctx->shred_buffer_sz;
923 :
924 0 : fd_shred_t const * shred = fd_shred_parse( shred_buffer, shred_buffer_sz );
925 :
926 0 : if( FD_UNLIKELY( !shred ) ) { ctx->metrics->shred_processing_result[ 1 ]++; return; }
927 :
928 0 : fd_epoch_leaders_t const * lsched = fd_stake_ci_get_lsched_for_slot( ctx->stake_ci, shred->slot );
929 0 : if( FD_UNLIKELY( !lsched ) ) { ctx->metrics->shred_processing_result[ 0 ]++; return; }
930 :
931 0 : fd_pubkey_t const * slot_leader = fd_epoch_leaders_get( lsched, shred->slot );
932 0 : if( FD_UNLIKELY( !slot_leader ) ) { ctx->metrics->shred_processing_result[ 0 ]++; return; } /* Count this as bad slot too */
933 :
934 0 : fd_fec_set_t const * out_fec_set[1];
935 0 : fd_shred_t const * out_shred[1];
936 0 : fd_fec_resolver_spilled_t spilled_fec = { 0 };
937 0 : int from_repair = 0;
938 :
939 0 : uint nonce = UINT_MAX;
940 0 : ulong shred_sz = fd_shred_sz( shred );
941 0 : if( FD_UNLIKELY( (fd_disco_netmux_sig_proto( sig )==DST_PROTO_REPAIR) & (shred_buffer_sz>=shred_sz+sizeof(uint)) ) ) {
942 0 : nonce = FD_LOAD(uint, shred_buffer + shred_sz );
943 0 : long est_now_ns = fd_log_wallclock(); /* TODO: switch to fd_clock for performance */
944 0 : int nonce_okay = fd_rnonce_ss_verify( ctx->repair_nonce_ss, nonce, shred->slot, shred->idx, est_now_ns );
945 0 : ctx->metrics->bad_nonce += (ulong)(!nonce_okay);
946 0 : from_repair = nonce_okay;
947 0 : }
948 :
949 0 : long add_shred_timing = -fd_tickcount();
950 0 : int rv = fd_fec_resolver_add_shred( ctx->resolver, shred, shred_buffer_sz, from_repair, slot_leader->uc, out_fec_set, out_shred, &ctx->out_merkle_roots[0], &spilled_fec );
951 0 : add_shred_timing += fd_tickcount();
952 :
953 0 : fd_histf_sample( ctx->metrics->add_shred_timing, (ulong)add_shred_timing );
954 0 : ctx->metrics->shred_processing_result[ rv + FD_FEC_RESOLVER_ADD_SHRED_RETVAL_OFF+FD_SHRED_ADD_SHRED_EXTRA_RETVAL_CNT ]++;
955 :
956 0 : if( FD_UNLIKELY( ctx->shred_out_idx!=ULONG_MAX && /* Only send to repair in full Firedancer */
957 0 : spilled_fec.slot!=0 ) ) {
958 : /* We've spilled an in-progress FEC set in the fec_resolver. We
959 : need to let repair know to clear out it's cached info for that
960 : fec set and re-repair those shreds. */
961 0 : fd_fec_evicted_t * evicted_msg = (fd_fec_evicted_t *)fd_type_pun( fd_chunk_to_laddr( ctx->shred_out_mem, ctx->shred_out_chunk ) );
962 0 : evicted_msg->slot = spilled_fec.slot;
963 0 : evicted_msg->fec_set_idx = spilled_fec.fec_set_idx;
964 :
965 0 : fd_stem_publish( stem, ctx->shred_out_idx, SHRED_SIG_FEC_EVICTED, ctx->shred_out_chunk, sizeof(fd_fec_evicted_t), 0, ctx->tsorig, fd_frag_meta_ts_comp( fd_tickcount() ) );
966 0 : ctx->shred_out_chunk = fd_dcache_compact_next( ctx->shred_out_chunk, sizeof(fd_fec_evicted_t), ctx->shred_out_chunk0, ctx->shred_out_wmark );
967 0 : }
968 :
969 0 : if( FD_LIKELY( ctx->shred_out_idx!=ULONG_MAX /* Only send to repair/replay in full Firedancer */
970 0 : && ( ( rv==FD_FEC_RESOLVER_SHRED_OKAY )
971 0 : | ( rv==FD_FEC_RESOLVER_SHRED_COMPLETES )
972 0 : | ( rv==FD_FEC_RESOLVER_SHRED_DUPLICATE )
973 0 : | ( rv==FD_FEC_RESOLVER_SHRED_EQUIVOC ) ) ) ) {
974 :
975 : /* Construct the sig from fec_resolver result and shred source. */
976 :
977 0 : ulong _sig = fd_disco_netmux_sig_proto( sig )==DST_PROTO_REPAIR
978 0 : ? ( from_repair /*nonce_okay*/ ? SHRED_SIG_SRC_REPAIR : SHRED_SIG_SRC_BAD_REPAIR )
979 0 : : ( SHRED_SIG_SRC_TURBINE );
980 0 : _sig = ((ulong)rv << 32UL) | _sig;
981 :
982 : /* Copy the full shred into the frag and publish. */
983 :
984 0 : fd_shred_base_t * shred_msg = (fd_shred_base_t *)fd_chunk_to_laddr( ctx->shred_out_mem, ctx->shred_out_chunk );
985 0 : memcpy( shred_msg->shred_, shred, fd_shred_sz( shred ) );
986 0 : memcpy( &shred_msg->merkle_root, ctx->out_merkle_roots[0].hash, sizeof(fd_hash_t) );
987 0 : if( FD_UNLIKELY( fd_disco_netmux_sig_proto( sig )==DST_PROTO_REPAIR ) ) { shred_msg->rnonce = nonce; }
988 :
989 0 : ulong tspub = fd_frag_meta_ts_comp( fd_tickcount() );
990 0 : fd_stem_publish( stem, ctx->shred_out_idx, _sig, ctx->shred_out_chunk, sizeof(fd_shred_base_t), 0UL, ctx->tsorig, tspub );
991 0 : ctx->shred_out_chunk = fd_dcache_compact_next( ctx->shred_out_chunk, sizeof(fd_shred_base_t), ctx->shred_out_chunk0, ctx->shred_out_wmark );
992 0 : }
993 :
994 0 : if( FD_LIKELY( fd_disco_netmux_sig_proto( sig ) != DST_PROTO_REPAIR &&
995 0 : ( (rv==FD_FEC_RESOLVER_SHRED_OKAY) | (rv==FD_FEC_RESOLVER_SHRED_COMPLETES) ) ) ) {
996 : /* Relay this shred */
997 0 : ulong max_dest_cnt[1];
998 0 : do {
999 : /* If we've validated the shred and it COMPLETES but we can't
1000 : compute the destination for whatever reason, don't forward
1001 : the shred, but still send it to the blockstore. */
1002 0 : fd_shred_dest_t * sdest = fd_stake_ci_get_sdest_for_slot( ctx->stake_ci, shred->slot );
1003 0 : if( FD_UNLIKELY( !sdest ) ) break;
1004 0 : fd_shred_dest_idx_t * dests = fd_shred_dest_compute_children( sdest, &shred, 1UL, ctx->scratchpad_dests, 1UL, fanout, fanout, max_dest_cnt );
1005 0 : if( FD_UNLIKELY( !dests ) ) break;
1006 :
1007 0 : for( ulong i=0UL; i<ctx->adtl_dests_retransmit_cnt; i++ ) send_shred( ctx, stem, *out_shred, ctx->adtl_dests_retransmit+i, ctx->tsorig );
1008 0 : for( ulong j=0UL; j<*max_dest_cnt; j++ ) send_shred( ctx, stem, *out_shred, fd_shred_dest_idx_to_dest( sdest, dests[ j ] ), ctx->tsorig );
1009 0 : } while( 0 );
1010 0 : }
1011 :
1012 0 : if( FD_LIKELY( rv!=FD_FEC_RESOLVER_SHRED_COMPLETES ) ) return;
1013 :
1014 0 : FD_TEST( ctx->fec_sets <= *out_fec_set );
1015 0 : ctx->send_fec_set_idx[ 0UL ] = (ulong)(*out_fec_set - ctx->fec_sets);
1016 0 : ctx->send_fec_set_cnt = 1UL;
1017 0 : ctx->shredded_txn_cnt = 0UL;
1018 0 : }
1019 :
1020 0 : if( FD_UNLIKELY( ctx->send_fec_set_cnt==0UL ) ) return;
1021 :
1022 : /* Try to distribute shredded txn count across the fec sets.
1023 : This is an approximation, but it is acceptable. */
1024 0 : ulong shredded_txn_cnt_per_fec_set = ctx->shredded_txn_cnt / ctx->send_fec_set_cnt;
1025 0 : ulong shredded_txn_cnt_remain = ctx->shredded_txn_cnt - shredded_txn_cnt_per_fec_set * ctx->send_fec_set_cnt;
1026 0 : ulong shredded_txn_cnt_last_fec_set = shredded_txn_cnt_per_fec_set + shredded_txn_cnt_remain;
1027 :
1028 : /* If this shred completes a FEC set or is part of a microblock from
1029 : pack (ie. we're leader), we now have a full FEC set: so we notify
1030 : repair and insert into the blockstore, as well as retransmit. */
1031 :
1032 0 : for( ulong fset_k=0; fset_k<ctx->send_fec_set_cnt; fset_k++ ) {
1033 :
1034 0 : fd_fec_set_t * set = ctx->fec_sets + ctx->send_fec_set_idx[ fset_k ];
1035 :
1036 0 : fd_shred_t const * last = set->data_shreds[ FD_FEC_SHRED_CNT - 1 ].s;
1037 :
1038 : /* Compute merkle root and chained merkle root. */
1039 :
1040 0 : if( FD_LIKELY( ctx->store ) ) { /* firedancer-only */
1041 :
1042 : /* Insert shreds into the store. We do this regardless of whether
1043 : we are leader. */
1044 :
1045 0 : fd_store_fec_t * fec = fd_store_insert( ctx->store, ctx->round_robin_id, (fd_hash_t *)fd_type_pun( &ctx->out_merkle_roots[fset_k] ) );
1046 :
1047 : /* Firedancer is configured such that the store never fills up, as
1048 : the reasm is responsible for also evicting from store (based on
1049 : its eviction policy, see fd_reasm.h). fec is only NULL when the
1050 : store is full, so this is either a bug or misconfiguration. */
1051 :
1052 0 : if( FD_UNLIKELY( !fec ) ) FD_LOG_CRIT(( "store full" ));
1053 :
1054 : /* It's safe to memcpy the FEC payload outside of the shared lock,
1055 : because the store ele is guaranteed to remain valid here. It
1056 : is not possible for a fd_store_remove to interleave, because
1057 : remove is only called by replay_tile, which (crucially) is only
1058 : sent this FEC via stem publish after we have finished copying.
1059 :
1060 : Copying outside the shared lock scope also means that we can
1061 : lower the duration for which the shared lock is held, and
1062 : enables replay to acquire the exclusive lock for removes
1063 : without getting starved. */
1064 :
1065 0 : if( FD_LIKELY( !fec->data_sz ) ) {
1066 : /* if data_sz is non-zero, we've already inserted this FEC set into the store */
1067 0 : for( ulong i=0UL; i<FD_FEC_SHRED_CNT; i++ ) {
1068 0 : fd_shred_t * data_shred = set->data_shreds[i].s;
1069 0 : ulong payload_sz = fd_shred_payload_sz( data_shred );
1070 0 : if( FD_UNLIKELY( fec->data_sz + payload_sz > ctx->store->fec_data_max ) ) {
1071 :
1072 : /* This code is only reachable if shred tile has completed the
1073 : FEC set, which implies it was able to validate it, yet
1074 : somehow the total payload sz of this FEC set exceeds the
1075 : maximum payload sz. This indicates either a serious bug or
1076 : shred tile is compromised so FD_LOG_CRIT. */
1077 :
1078 0 : FD_LOG_CRIT(( "Shred tile %lu: completed FEC set %lu %u data_sz: %lu exceeds data_max: %lu. Ignoring FEC set.", ctx->round_robin_id, data_shred->slot, data_shred->fec_set_idx, fec->data_sz + payload_sz, ctx->store->fec_data_max ));
1079 0 : }
1080 0 : fd_memcpy( fd_store_fec_data( ctx->store, fec ) + fec->data_sz, fd_shred_data_payload( data_shred ), payload_sz );
1081 0 : fec->data_sz += payload_sz;
1082 0 : if( FD_LIKELY( i<32UL ) ) fec->shred_offs[ i ] = (uint)payload_sz + (i==0UL ? 0U : fec->shred_offs[ i-1UL ]);
1083 0 : }
1084 0 : }
1085 0 : }
1086 :
1087 0 : if( FD_LIKELY( ctx->shred_out_idx!=ULONG_MAX ) ) { /* firedancer-only */
1088 :
1089 : /* Additionally, publish a frag to notify repair and replay that
1090 : the FEC set is complete. Note the ordering wrt store shred
1091 : insertion above is intentional: shreds are inserted into the
1092 : store before notifying repair and replay. This is because the
1093 : replay tile assumes the shreds are already in the store when
1094 : replay gets a notification from the shred tile that the FEC is
1095 : complete. We we don't know whether shred will finish inserting
1096 : into store first or repair will finish validating the FEC set
1097 : first. The header and merkle root of the last shred in the FEC
1098 : set are sent as part of this frag.
1099 :
1100 : This message, the shred msg, and the FEC evict msg constitute
1101 : the max 3 possible messages to repair/replay per after_frag.
1102 : In reality, it is only possible to publish all 3 in the case
1103 : where we receive a coding shred first for a FEC set where
1104 : (N=1,K=18), which allows for the FEC set to be instantly
1105 : completed by the singular coding shred, and that also happens
1106 : to evict a FEC set from the curr_map. When fix-32 arrives, the
1107 : link burst value can be lowered to 2. */
1108 0 : ulong sig = ctx->in_kind[ in_idx ]==IN_KIND_POH ? SHRED_SIG_FEC_COMPLETE_LEADER : SHRED_SIG_FEC_COMPLETE;
1109 :
1110 0 : fd_fec_complete_t * complete_msg = fd_chunk_to_laddr( ctx->shred_out_mem, ctx->shred_out_chunk );
1111 0 : complete_msg->last_shred_hdr = *last;
1112 0 : memcpy( &complete_msg->merkle_root, ctx->out_merkle_roots[fset_k].hash, sizeof(fd_hash_t) );
1113 0 : complete_msg->chained_merkle_root = *(fd_hash_t *)fd_type_pun((uchar *)last + fd_shred_chain_off( last->variant ));
1114 :
1115 0 : fd_stem_publish( stem, ctx->shred_out_idx, sig, ctx->shred_out_chunk, sizeof(fd_fec_complete_t), 0UL, ctx->tsorig, fd_frag_meta_ts_comp( fd_tickcount() ) );
1116 0 : ctx->shred_out_chunk = fd_dcache_compact_next( ctx->shred_out_chunk, sizeof(fd_fec_complete_t), ctx->shred_out_chunk0, ctx->shred_out_wmark );
1117 :
1118 0 : } else if( FD_UNLIKELY( ctx->store_out_idx != ULONG_MAX ) ) { /* frankendancer-only */
1119 :
1120 : /* Send to the blockstore */
1121 :
1122 0 : ulong txn_cnt = fd_ulong_if( fset_k==ctx->send_fec_set_cnt-1UL, shredded_txn_cnt_last_fec_set, shredded_txn_cnt_per_fec_set );
1123 : /* If the low 32 bits of sig are 0, the store tile will do extra
1124 : checks */
1125 0 : ulong new_sig = txn_cnt<<32 | (ulong)(ctx->in_kind[ in_idx ]!=IN_KIND_NET);
1126 0 : ulong tspub = fd_frag_meta_ts_comp( fd_tickcount() );
1127 : /* The size is actually slightly larger than USHORT_MAX, but the store tile
1128 : knows to use sizeof(fd_fec_set_t) instead of the sz field. Put
1129 : USHORT_MAX so that monitoring tools are at least close. */
1130 0 : ulong sz = fd_ulong_min( sizeof(fd_fec_set_t), USHORT_MAX );
1131 0 : fd_stem_publish( stem, 0UL, new_sig, fd_laddr_to_chunk( ctx->store_out_mem, set ), sz, 0UL, ctx->tsorig, tspub );
1132 0 : }
1133 :
1134 : /* Compute all the destinations for all the new shreds */
1135 :
1136 0 : fd_shred_t const * new_shreds[ FD_REEDSOL_DATA_SHREDS_MAX+FD_REEDSOL_PARITY_SHREDS_MAX ];
1137 0 : ulong k=0UL;
1138 0 : for( ulong i=0UL; i<FD_FEC_SHRED_CNT; i++ )
1139 0 : if( !(set->data_shred_rcvd & (1U<<i)) ) new_shreds[ k++ ] = set->data_shreds [ i ].s;
1140 0 : for( ulong i=0UL; i<FD_FEC_SHRED_CNT; i++ )
1141 0 : if( !(set->parity_shred_rcvd & (1U<<i)) ) new_shreds[ k++ ] = set->parity_shreds[ i ].s;
1142 :
1143 0 : if( FD_UNLIKELY( !k ) ) return;
1144 0 : fd_shred_dest_t * sdest = fd_stake_ci_get_sdest_for_slot( ctx->stake_ci, new_shreds[ 0 ]->slot );
1145 0 : if( FD_UNLIKELY( !sdest ) ) return;
1146 :
1147 0 : ulong out_stride;
1148 0 : ulong max_dest_cnt[1];
1149 0 : fd_shred_dest_idx_t * dests;
1150 0 : if( FD_LIKELY( ctx->in_kind[ in_idx ]==IN_KIND_NET ) ) {
1151 0 : for( ulong i=0UL; i<k; i++ ) {
1152 0 : for( ulong j=0UL; j<ctx->adtl_dests_retransmit_cnt; j++ ) send_shred( ctx, stem, new_shreds[ i ], ctx->adtl_dests_retransmit+j, ctx->tsorig );
1153 0 : }
1154 0 : out_stride = k;
1155 : /* In the case of feature activation, the fanout used below is
1156 : the same as the one calculated/modified previously at the
1157 : beginning of after_frag() for IN_KIND_NET in this slot. */
1158 0 : dests = fd_shred_dest_compute_children( sdest, new_shreds, k, ctx->scratchpad_dests, k, fanout, fanout, max_dest_cnt );
1159 0 : } else {
1160 0 : for( ulong i=0UL; i<k; i++ ) {
1161 0 : for( ulong j=0UL; j<ctx->adtl_dests_leader_cnt; j++ ) send_shred( ctx, stem, new_shreds[ i ], ctx->adtl_dests_leader+j, ctx->tsorig );
1162 0 : }
1163 0 : out_stride = 1UL;
1164 0 : *max_dest_cnt = 1UL;
1165 0 : dests = fd_shred_dest_compute_first ( sdest, new_shreds, k, ctx->scratchpad_dests );
1166 0 : }
1167 0 : if( FD_UNLIKELY( !dests ) ) return;
1168 :
1169 : /* Send only the ones we didn't receive. */
1170 0 : for( ulong i=0UL; i<k; i++ ) {
1171 0 : for( ulong j=0UL; j<*max_dest_cnt; j++ ) send_shred( ctx, stem, new_shreds[ i ], fd_shred_dest_idx_to_dest( sdest, dests[ j*out_stride+i ]), ctx->tsorig );
1172 0 : }
1173 0 : }
1174 0 : }
1175 :
1176 : static void
1177 : privileged_init( fd_topo_t * topo,
1178 0 : fd_topo_tile_t * tile ) {
1179 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
1180 0 : FD_TEST( scratch!=NULL );
1181 :
1182 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
1183 0 : fd_shred_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_shred_ctx_t ), sizeof( fd_shred_ctx_t ) );
1184 :
1185 0 : if( FD_UNLIKELY( !strcmp( tile->shred.identity_key_path, "" ) ) )
1186 0 : FD_LOG_ERR(( "identity_key_path not set" ));
1187 :
1188 0 : ctx->identity_key[ 0 ] = *(fd_pubkey_t const *)fd_type_pun_const( fd_keyload_load( tile->shred.identity_key_path, /* pubkey only: */ 1 ) );
1189 :
1190 0 : if( FD_UNLIKELY( !fd_rng_secure( &(ctx->resolver_seed), sizeof(ulong) ) ) ) {
1191 0 : FD_LOG_CRIT(( "fd_rng_secure failed" ));
1192 0 : }
1193 : /* This is only needed in frankendancer, but we'll overwrite it with
1194 : the value the repair tile generated in full firedancer. */
1195 0 : if( FD_UNLIKELY( !fd_rng_secure( ctx->repair_nonce_ss->bytes, sizeof(fd_rnonce_ss_t) ) ) ) {
1196 0 : FD_LOG_CRIT(( "fd_rng_secure failed" ));
1197 0 : }
1198 0 : }
1199 :
1200 : static void
1201 : fd_shred_signer( void * signer_ctx,
1202 : uchar signature[ static 64 ],
1203 0 : uchar const merkle_root[ static 32 ] ) {
1204 0 : fd_keyguard_client_sign( signer_ctx, signature, merkle_root, 32UL, FD_KEYGUARD_SIGN_TYPE_ED25519 );
1205 0 : }
1206 :
1207 : static void
1208 : unprivileged_init( fd_topo_t * topo,
1209 0 : fd_topo_tile_t * tile ) {
1210 :
1211 0 : FD_TEST( 0==strcmp( topo->links[tile->out_link_id[ NET_OUT_IDX ]].name, "shred_net" ) );
1212 0 : FD_TEST( 0==strcmp( topo->links[tile->out_link_id[ SIGN_OUT_IDX ]].name, "shred_sign" ) );
1213 :
1214 0 : if( FD_UNLIKELY( !tile->out_cnt ) )
1215 0 : FD_LOG_ERR(( "shred tile has no primary output link" ));
1216 :
1217 0 : ulong shred_store_mcache_depth = tile->shred.depth;
1218 0 : if( topo->links[ tile->out_link_id[ 0 ] ].depth != shred_store_mcache_depth )
1219 0 : FD_LOG_ERR(( "shred tile out depths are not equal %lu %lu",
1220 0 : topo->links[ tile->out_link_id[ 0 ] ].depth, shred_store_mcache_depth ));
1221 :
1222 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
1223 0 : FD_TEST( scratch!=NULL );
1224 :
1225 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
1226 0 : fd_shred_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_shred_ctx_t ), sizeof( fd_shred_ctx_t ) );
1227 :
1228 0 : ctx->round_robin_cnt = fd_topo_tile_name_cnt( topo, tile->name );
1229 0 : ctx->round_robin_id = tile->kind_id;
1230 0 : ctx->batch_cnt = 0UL;
1231 0 : ctx->slot = ULONG_MAX;
1232 :
1233 : /* If the default partial_depth is ever changed, correspondingly
1234 : change the size of the fd_fec_intra_pool in fd_fec_repair. */
1235 0 : ulong fec_resolver_footprint = fd_fec_resolver_footprint( tile->shred.fec_resolver_depth, 1UL, shred_store_mcache_depth + 1UL,
1236 0 : 128UL * tile->shred.fec_resolver_depth );
1237 : /* See long comment at the top of this file for the computation of
1238 : fec_set_cnt. */
1239 0 : ulong fec_set_cnt = 2UL*shred_store_mcache_depth + tile->shred.fec_resolver_depth + FD_SHRED_BATCH_FEC_SETS_MAX + 2UL;
1240 0 : ulong fec_sets_required_sz = fec_set_cnt*sizeof(fd_fec_set_t);
1241 :
1242 0 : void * fec_sets_shmem = NULL;
1243 0 : ctx->shred_out_idx = fd_topo_find_tile_out_link( topo, tile, "shred_out", ctx->round_robin_id );
1244 0 : ctx->store_out_idx = fd_topo_find_tile_out_link( topo, tile, "shred_store", ctx->round_robin_id );
1245 0 : if( FD_LIKELY( ctx->shred_out_idx!=ULONG_MAX ) ) { /* firedancer-only */
1246 0 : fd_topo_link_t * shred_out = &topo->links[ tile->out_link_id[ ctx->shred_out_idx ] ];
1247 0 : ctx->shred_out_mem = topo->workspaces[ topo->objs[ shred_out->dcache_obj_id ].wksp_id ].wksp;
1248 0 : ctx->shred_out_chunk0 = fd_dcache_compact_chunk0( ctx->shred_out_mem, shred_out->dcache );
1249 0 : ctx->shred_out_wmark = fd_dcache_compact_wmark ( ctx->shred_out_mem, shred_out->dcache, shred_out->mtu );
1250 0 : ctx->shred_out_chunk = ctx->shred_out_chunk0;
1251 0 : FD_TEST( fd_dcache_compact_is_safe( ctx->shred_out_mem, shred_out->dcache, shred_out->mtu, shred_out->depth ) );
1252 0 : ulong fec_sets_obj_id = fd_pod_queryf_ulong( topo->props, ULONG_MAX, "fec_sets" );
1253 0 : if( FD_UNLIKELY( fec_sets_obj_id == ULONG_MAX ) ) FD_LOG_ERR(( "invalid firedancer topo" ));
1254 0 : fd_topo_obj_t const * obj = &topo->objs[ fec_sets_obj_id ];
1255 0 : if( FD_UNLIKELY( obj->footprint<(fec_sets_required_sz*ctx->round_robin_cnt) ) ) {
1256 0 : FD_LOG_ERR(( "fec_sets wksp obj too small. It is %lu bytes but must be at least %lu bytes. ",
1257 0 : obj->footprint,
1258 0 : fec_sets_required_sz ));
1259 0 : }
1260 0 : fec_sets_shmem = (uchar *)fd_topo_obj_laddr( topo, fec_sets_obj_id ) + (ctx->round_robin_id * fec_sets_required_sz);
1261 :
1262 : /* Initialize the rnonce. The repair tile sets it, so we can only
1263 : do this in firedancer mode. In frankendancer mode, we initialize
1264 : it randomly in privileged_init just so that an attacker can't
1265 : guess it. */
1266 0 : FD_LOG_DEBUG(( "Loading rnonce_ss" ));
1267 0 : ulong rnonce_ss_id = fd_pod_queryf_ulong( topo->props, ULONG_MAX, "rnonce_ss" );
1268 0 : FD_TEST( rnonce_ss_id!=ULONG_MAX );
1269 0 : void const * shared_rnonce = fd_topo_obj_laddr( topo, rnonce_ss_id );
1270 0 : ulong * nonce_initialized = (ulong *)(sizeof(fd_rnonce_ss_t)+(uchar const *)shared_rnonce);
1271 0 : while( !FD_VOLATILE_CONST( *nonce_initialized ) ) FD_SPIN_PAUSE();
1272 0 : FD_COMPILER_MFENCE();
1273 0 : memcpy( ctx->repair_nonce_ss, shared_rnonce, sizeof(fd_rnonce_ss_t) );
1274 0 : FD_LOG_DEBUG(( "Loaded rnonce_ss" ));
1275 :
1276 0 : } else if ( FD_LIKELY( ctx->store_out_idx!=ULONG_MAX ) ) { /* frankendancer-only */
1277 0 : FD_TEST( 0==strcmp( topo->links[tile->out_link_id[ ctx->store_out_idx ]].name, "shred_store" ) );
1278 0 : fec_sets_shmem = topo->links[ tile->out_link_id[ ctx->store_out_idx ] ].dcache;
1279 0 : if( FD_UNLIKELY( fd_dcache_data_sz( fec_sets_shmem )<fec_sets_required_sz ) ) {
1280 0 : FD_LOG_ERR(( "shred_store dcache too small. It is %lu bytes but must be at least %lu bytes. ",
1281 0 : fd_dcache_data_sz( fec_sets_shmem ),
1282 0 : fec_sets_required_sz ));
1283 0 : }
1284 0 : }
1285 :
1286 0 : if( FD_UNLIKELY( !tile->shred.fec_resolver_depth ) ) FD_LOG_ERR(( "fec_resolver_depth not set" ));
1287 0 : if( FD_UNLIKELY( !tile->shred.shred_listen_port ) ) FD_LOG_ERR(( "shred_listen_port not set" ));
1288 :
1289 0 : void * _stake_ci = FD_SCRATCH_ALLOC_APPEND( l, fd_stake_ci_align(), fd_stake_ci_footprint() );
1290 0 : void * _resolver = FD_SCRATCH_ALLOC_APPEND( l, fd_fec_resolver_align(), fec_resolver_footprint );
1291 0 : void * _shredder = FD_SCRATCH_ALLOC_APPEND( l, fd_shredder_align(), fd_shredder_footprint() );
1292 :
1293 0 : fd_fec_set_t * fec_sets = (fd_fec_set_t *)fec_sets_shmem;
1294 :
1295 0 : #define NONNULL( x ) (__extension__({ \
1296 0 : __typeof__((x)) __x = (x); \
1297 0 : if( FD_UNLIKELY( !__x ) ) FD_LOG_ERR(( #x " was unexpectedly NULL" )); \
1298 0 : __x; }))
1299 :
1300 0 : int has_ipecho_in = fd_topo_find_tile_in_link( topo, tile, "ipecho_out", 0UL )!=ULONG_MAX;
1301 0 : ushort expected_shred_version = tile->shred.expected_shred_version;
1302 0 : if( FD_UNLIKELY( !has_ipecho_in && !expected_shred_version ) ) {
1303 0 : ulong busy_obj_id = fd_pod_query_ulong( topo->props, "pohh_shred", ULONG_MAX );
1304 0 : FD_TEST( busy_obj_id!=ULONG_MAX );
1305 0 : ulong * gossip_shred_version = fd_fseq_join( fd_topo_obj_laddr( topo, busy_obj_id ) );
1306 0 : FD_LOG_INFO(( "Waiting for shred version to be determined via gossip." ));
1307 0 : ulong _expected_shred_version = ULONG_MAX;
1308 0 : do {
1309 0 : _expected_shred_version = FD_VOLATILE_CONST( *gossip_shred_version );
1310 0 : } while( _expected_shred_version==ULONG_MAX );
1311 :
1312 0 : if( FD_UNLIKELY( _expected_shred_version>USHORT_MAX ) ) FD_LOG_ERR(( "invalid shred version %lu", _expected_shred_version ));
1313 0 : FD_LOG_INFO(( "Using shred version %hu", (ushort)_expected_shred_version ));
1314 0 : expected_shred_version = (ushort)_expected_shred_version;
1315 0 : }
1316 :
1317 0 : ctx->keyswitch = fd_keyswitch_join( fd_topo_obj_laddr( topo, tile->id_keyswitch_obj_id ) );
1318 0 : FD_TEST( ctx->keyswitch );
1319 :
1320 : /* populate ctx */
1321 0 : ulong sign_in_idx = fd_topo_find_tile_in_link( topo, tile, "sign_shred", tile->kind_id );
1322 0 : FD_TEST( sign_in_idx!=ULONG_MAX );
1323 0 : fd_topo_link_t * sign_in = &topo->links[ tile->in_link_id[ sign_in_idx ] ];
1324 0 : fd_topo_link_t * sign_out = &topo->links[ tile->out_link_id[ SIGN_OUT_IDX ] ];
1325 0 : NONNULL( fd_keyguard_client_join( fd_keyguard_client_new( ctx->keyguard_client,
1326 0 : sign_out->mcache,
1327 0 : sign_out->dcache,
1328 0 : sign_in->mcache,
1329 0 : sign_in->dcache,
1330 0 : sign_out->mtu ) ) );
1331 :
1332 0 : ulong shred_limit = fd_ulong_if( tile->shred.larger_shred_limits_per_block, 32UL*32UL*1024UL, 32UL*1024UL );
1333 0 : fd_fec_set_t * resolver_sets = fec_sets + shred_store_mcache_depth + FD_SHRED_BATCH_FEC_SETS_MAX;
1334 0 : ctx->shredder = NONNULL( fd_shredder_join ( fd_shredder_new ( _shredder, fd_shred_signer, ctx->keyguard_client ) ) );
1335 0 : ctx->resolver = NONNULL( fd_fec_resolver_join ( fd_fec_resolver_new ( _resolver,
1336 0 : fd_shred_signer, ctx->keyguard_client,
1337 0 : tile->shred.fec_resolver_depth, 1UL,
1338 0 : shred_store_mcache_depth+1UL,
1339 0 : 128UL * tile->shred.fec_resolver_depth, resolver_sets,
1340 0 : shred_limit,
1341 0 : ctx->resolver_seed ) ) );
1342 :
1343 0 : if( FD_LIKELY( !!expected_shred_version ) ) {
1344 0 : fd_shredder_set_shred_version ( ctx->shredder, expected_shred_version );
1345 0 : fd_fec_resolver_set_shred_version( ctx->resolver, expected_shred_version );
1346 0 : }
1347 :
1348 0 : ctx->fec_sets = fec_sets;
1349 :
1350 0 : ctx->stake_ci = fd_stake_ci_join( fd_stake_ci_new( _stake_ci, ctx->identity_key ) );
1351 :
1352 0 : ctx->net_id = (ushort)0;
1353 :
1354 0 : fd_ip4_udp_hdr_init( ctx->data_shred_net_hdr, FD_SHRED_MIN_SZ, 0, tile->shred.shred_listen_port );
1355 0 : fd_ip4_udp_hdr_init( ctx->parity_shred_net_hdr, FD_SHRED_MAX_SZ, 0, tile->shred.shred_listen_port );
1356 :
1357 0 : ctx->adtl_dests_retransmit_cnt = tile->shred.adtl_dests_retransmit_cnt;
1358 0 : for( ulong i=0UL; i<ctx->adtl_dests_retransmit_cnt; i++) {
1359 0 : ctx->adtl_dests_retransmit[ i ].ip4 = tile->shred.adtl_dests_retransmit[ i ].ip;
1360 0 : ctx->adtl_dests_retransmit[ i ].port = tile->shred.adtl_dests_retransmit[ i ].port;
1361 0 : }
1362 0 : ctx->adtl_dests_leader_cnt = tile->shred.adtl_dests_leader_cnt;
1363 0 : for( ulong i=0UL; i<ctx->adtl_dests_leader_cnt; i++) {
1364 0 : ctx->adtl_dests_leader[i].ip4 = tile->shred.adtl_dests_leader[i].ip;
1365 0 : ctx->adtl_dests_leader[i].port = tile->shred.adtl_dests_leader[i].port;
1366 0 : }
1367 :
1368 0 : uchar has_contact_info_in = 0;
1369 0 : for( ulong i=0UL; i<tile->in_cnt; i++ ) {
1370 0 : fd_topo_link_t const * link = &topo->links[ tile->in_link_id[ i ] ];
1371 0 : fd_topo_wksp_t const * link_wksp = &topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ];
1372 :
1373 0 : if( FD_LIKELY( !strcmp( link->name, "net_shred" ) ) ) {
1374 0 : ctx->in_kind[ i ] = IN_KIND_NET;
1375 0 : fd_net_rx_bounds_init( &ctx->in[ i ].net_rx, link->dcache );
1376 0 : continue; /* only net_rx needs to be set in this case. */
1377 0 : }
1378 0 : else if( FD_LIKELY( !strcmp( link->name, "poh_shred" ) ) ) ctx->in_kind[ i ] = IN_KIND_POH; /* Firedancer */
1379 0 : else if( FD_LIKELY( !strcmp( link->name, "pohh_shred" ) ) ) ctx->in_kind[ i ] = IN_KIND_POH; /* Frankendancer */
1380 0 : else if( FD_LIKELY( !strcmp( link->name, "stake_out" ) ) ) ctx->in_kind[ i ] = IN_KIND_STAKE; /* Frankendancer */
1381 0 : else if( FD_LIKELY( !strcmp( link->name, "replay_epoch" ) ) ) ctx->in_kind[ i ] = IN_KIND_EPOCH; /* Firedancer */
1382 0 : else if( FD_LIKELY( !strcmp( link->name, "sign_shred" ) ) ) ctx->in_kind[ i ] = IN_KIND_SIGN;
1383 0 : else if( FD_LIKELY( !strcmp( link->name, "ipecho_out" ) ) ) ctx->in_kind[ i ] = IN_KIND_IPECHO;
1384 0 : else if( FD_LIKELY( !strcmp( link->name, "tower_out" ) ) ) ctx->in_kind[ i ] = IN_KIND_ROOTED;
1385 0 : else if( FD_LIKELY( !strcmp( link->name, "replay_resol" ) ) ) ctx->in_kind[ i ] = IN_KIND_ROOTEDH;
1386 0 : else if( FD_LIKELY( !strcmp( link->name, "crds_shred" ) ) ) { ctx->in_kind[ i ] = IN_KIND_CONTACT;
1387 0 : if( FD_UNLIKELY( has_contact_info_in ) ) FD_LOG_ERR(( "shred tile has multiple contact info in link types, can only be either gossip_out or crds_shred" ));
1388 0 : has_contact_info_in = 1;
1389 0 : }
1390 0 : else if( FD_LIKELY( !strcmp( link->name, "gossip_out" ) ) ) { ctx->in_kind[ i ] = IN_KIND_GOSSIP;
1391 0 : if( FD_UNLIKELY( has_contact_info_in ) ) FD_LOG_ERR(( "shred tile has multiple contact info in link types, can only be either gossip_out or crds_shred" ));
1392 0 : has_contact_info_in = 1;
1393 0 : }
1394 :
1395 0 : else FD_LOG_ERR(( "shred tile has unexpected input link %lu %s", i, link->name ));
1396 :
1397 0 : if( FD_LIKELY( !!link->mtu ) ) {
1398 0 : ctx->in[ i ].mem = link_wksp->wksp;
1399 0 : ctx->in[ i ].chunk0 = fd_dcache_compact_chunk0( ctx->in[ i ].mem, link->dcache );
1400 0 : ctx->in[ i ].wmark = fd_dcache_compact_wmark ( ctx->in[ i ].mem, link->dcache, link->mtu );
1401 0 : }
1402 0 : }
1403 :
1404 0 : fd_topo_link_t * net_out = &topo->links[ tile->out_link_id[ NET_OUT_IDX ] ];
1405 :
1406 0 : ctx->net_out_chunk0 = fd_dcache_compact_chunk0( fd_wksp_containing( net_out->dcache ), net_out->dcache );
1407 0 : ctx->net_out_mem = topo->workspaces[ topo->objs[ net_out->dcache_obj_id ].wksp_id ].wksp;
1408 0 : ctx->net_out_wmark = fd_dcache_compact_wmark ( ctx->net_out_mem, net_out->dcache, net_out->mtu );
1409 0 : ctx->net_out_chunk = ctx->net_out_chunk0;
1410 :
1411 0 : ctx->store = NULL;
1412 0 : ulong store_obj_id = fd_pod_queryf_ulong( topo->props, ULONG_MAX, "store" );
1413 0 : if( FD_LIKELY( store_obj_id!=ULONG_MAX ) ) { /* firedancer-only */
1414 0 : ctx->store = fd_store_join( fd_topo_obj_laddr( topo, store_obj_id ) );
1415 0 : FD_TEST( ctx->store->magic==FD_STORE_MAGIC );
1416 0 : FD_TEST( ctx->store->part_cnt==ctx->round_robin_cnt ); /* single-writer (shred tile) per store part */
1417 0 : FD_TEST( !fd_store_verify( ctx->store ) );
1418 0 : }
1419 :
1420 0 : if( FD_LIKELY( ctx->shred_out_idx!=ULONG_MAX ) ) { /* firedancer-only */
1421 0 : fd_topo_link_t * shred_out = &topo->links[ tile->out_link_id[ ctx->shred_out_idx ] ];
1422 0 : ctx->shred_out_mem = topo->workspaces[ topo->objs[ shred_out->dcache_obj_id ].wksp_id ].wksp;
1423 0 : ctx->shred_out_chunk0 = fd_dcache_compact_chunk0( ctx->shred_out_mem, shred_out->dcache );
1424 0 : ctx->shred_out_wmark = fd_dcache_compact_wmark ( ctx->shred_out_mem, shred_out->dcache, shred_out->mtu );
1425 0 : ctx->shred_out_chunk = ctx->shred_out_chunk0;
1426 0 : FD_TEST( fd_dcache_compact_is_safe( ctx->shred_out_mem, shred_out->dcache, shred_out->mtu, shred_out->depth ) );
1427 0 : }
1428 :
1429 0 : if( FD_LIKELY( ctx->store_out_idx!=ULONG_MAX ) ) { /* frankendancer-only */
1430 0 : fd_topo_link_t * store_out = &topo->links[ tile->out_link_id[ ctx->store_out_idx ] ];
1431 0 : ctx->store_out_mem = topo->workspaces[ topo->objs[ store_out->dcache_obj_id ].wksp_id ].wksp;
1432 0 : ctx->store_out_chunk0 = fd_dcache_compact_chunk0( ctx->store_out_mem, store_out->dcache );
1433 0 : ctx->store_out_wmark = fd_dcache_compact_wmark ( ctx->store_out_mem, store_out->dcache, store_out->mtu );
1434 0 : ctx->store_out_chunk = ctx->store_out_chunk0;
1435 0 : FD_TEST( fd_dcache_compact_is_safe( ctx->store_out_mem, store_out->dcache, store_out->mtu, store_out->depth ) );
1436 0 : }
1437 :
1438 0 : ctx->poh_in_expect_seq = 0UL;
1439 :
1440 0 : ctx->shredder_fec_set_idx = 0UL;
1441 0 : ctx->shredder_max_fec_set_idx = shred_store_mcache_depth + FD_SHRED_BATCH_FEC_SETS_MAX;
1442 :
1443 0 : ctx->chained_merkle_root = NULL;
1444 0 : memset( ctx->out_merkle_roots, 0, sizeof(ctx->out_merkle_roots) );
1445 :
1446 0 : for( ulong i=0UL; i<FD_SHRED_BATCH_FEC_SETS_MAX; i++ ) { ctx->send_fec_set_idx[ i ] = ULONG_MAX; }
1447 0 : ctx->send_fec_set_cnt = 0UL;
1448 :
1449 0 : ctx->shred_buffer_sz = 0UL;
1450 0 : memset( ctx->shred_buffer, 0xFF, FD_NET_MTU );
1451 :
1452 0 : fd_histf_join( fd_histf_new( ctx->metrics->contact_info_cnt, FD_MHIST_MIN( SHRED, CLUSTER_CONTACT_INFO_CNT ),
1453 0 : FD_MHIST_MAX( SHRED, CLUSTER_CONTACT_INFO_CNT ) ) );
1454 0 : fd_histf_join( fd_histf_new( ctx->metrics->batch_sz, FD_MHIST_MIN( SHRED, BATCH_SZ ),
1455 0 : FD_MHIST_MAX( SHRED, BATCH_SZ ) ) );
1456 0 : fd_histf_join( fd_histf_new( ctx->metrics->batch_microblock_cnt, FD_MHIST_MIN( SHRED, BATCH_MICROBLOCK_CNT ),
1457 0 : FD_MHIST_MAX( SHRED, BATCH_MICROBLOCK_CNT ) ) );
1458 0 : fd_histf_join( fd_histf_new( ctx->metrics->shredding_timing, FD_MHIST_SECONDS_MIN( SHRED, SHREDDING_DURATION_SECONDS ),
1459 0 : FD_MHIST_SECONDS_MAX( SHRED, SHREDDING_DURATION_SECONDS ) ) );
1460 0 : fd_histf_join( fd_histf_new( ctx->metrics->add_shred_timing, FD_MHIST_SECONDS_MIN( SHRED, ADD_SHRED_DURATION_SECONDS ),
1461 0 : FD_MHIST_SECONDS_MAX( SHRED, ADD_SHRED_DURATION_SECONDS ) ) );
1462 0 : memset( ctx->metrics->shred_processing_result, '\0', sizeof(ctx->metrics->shred_processing_result) );
1463 0 : ctx->metrics->invalid_block_id_cnt = 0UL;
1464 0 : ctx->metrics->shred_rejected_unchained_cnt = 0UL;
1465 0 : ctx->metrics->repair_rcv_cnt = 0UL;
1466 0 : ctx->metrics->repair_rcv_bytes = 0UL;
1467 0 : ctx->metrics->turbine_rcv_cnt = 0UL;
1468 0 : ctx->metrics->turbine_rcv_bytes = 0UL;
1469 0 : ctx->metrics->bad_nonce = 0UL;
1470 :
1471 0 : ctx->pending_batch.microblock_cnt = 0UL;
1472 0 : ctx->pending_batch.txn_cnt = 0UL;
1473 0 : ctx->pending_batch.pos = 0UL;
1474 0 : ctx->pending_batch.slot = 0UL;
1475 0 : memset( ctx->pending_batch.payload, 0, sizeof(ctx->pending_batch.payload) );
1476 :
1477 0 : memset( ctx->epoch_schedule, 0, sizeof(ctx->epoch_schedule) );
1478 0 : for( ulong i=0UL; i<FD_SHRED_FEATURES_ACTIVATION_SLOT_CNT; i++ ) {
1479 0 : ctx->features_activation->slots[i] = FD_SHRED_FEATURES_ACTIVATION_SLOT_DISABLED;
1480 0 : }
1481 :
1482 0 : ulong scratch_top = FD_SCRATCH_ALLOC_FINI( l, 1UL );
1483 0 : if( FD_UNLIKELY( scratch_top > (ulong)scratch + scratch_footprint( tile ) ) )
1484 0 : FD_LOG_ERR(( "scratch overflow %lu %lu %lu", scratch_top - (ulong)scratch - scratch_footprint( tile ), scratch_top, (ulong)scratch + scratch_footprint( tile ) ));
1485 :
1486 0 : memset( ctx->block_ids, 0, sizeof(ctx->block_ids) );
1487 0 : }
1488 :
1489 : static ulong
1490 : populate_allowed_seccomp( fd_topo_t const * topo,
1491 : fd_topo_tile_t const * tile,
1492 : ulong out_cnt,
1493 0 : struct sock_filter * out ) {
1494 0 : (void)topo;
1495 0 : (void)tile;
1496 :
1497 0 : populate_sock_filter_policy_fd_shred_tile( out_cnt, out, (uint)fd_log_private_logfile_fd() );
1498 0 : return sock_filter_policy_fd_shred_tile_instr_cnt;
1499 0 : }
1500 :
1501 : static ulong
1502 : populate_allowed_fds( fd_topo_t const * topo,
1503 : fd_topo_tile_t const * tile,
1504 : ulong out_fds_cnt,
1505 0 : int * out_fds ) {
1506 0 : (void)topo;
1507 0 : (void)tile;
1508 :
1509 0 : if( FD_UNLIKELY( out_fds_cnt<2UL ) ) FD_LOG_ERR(( "out_fds_cnt %lu", out_fds_cnt ));
1510 :
1511 0 : ulong out_cnt = 0UL;
1512 0 : out_fds[ out_cnt++ ] = 2; /* stderr */
1513 0 : if( FD_LIKELY( -1!=fd_log_private_logfile_fd() ) )
1514 0 : out_fds[ out_cnt++ ] = fd_log_private_logfile_fd(); /* logfile */
1515 0 : return out_cnt;
1516 0 : }
1517 :
1518 : /* Excluding net_out (where the link is unreliable), STEM_BURST needs
1519 : to guarantee enough credits for the worst case. There are 4 cases
1520 : to consider: (IN_KIND_NET/IN_KIND_POH) x (Frankendancer/Firedancer)
1521 : In the IN_KIND_NET case: (Frankendancer) sends 1 frag to
1522 : store; (Firedancer) that is one frag for the shred to repair, and
1523 : then another frag to repair for the FEC set.
1524 : In the IN_KIND_POH case: (Frankendancer) there might be
1525 : FD_SHRED_BATCH_FEC_SETS_MAX FEC sets; (Firedancer) that is
1526 : FD_SHRED_BATCH_FEC_SETS_MAX frags to repair (one per FEC set).
1527 : Therefore, the worst case is IN_KIND_POH for Frankendancer. */
1528 0 : #define STEM_BURST (FD_SHRED_BATCH_FEC_SETS_MAX)
1529 :
1530 : /* See explanation in fd_pack */
1531 0 : #define STEM_LAZY (128L*3000L)
1532 :
1533 0 : #define STEM_CALLBACK_CONTEXT_TYPE fd_shred_ctx_t
1534 0 : #define STEM_CALLBACK_CONTEXT_ALIGN alignof(fd_shred_ctx_t)
1535 :
1536 0 : #define STEM_CALLBACK_DURING_HOUSEKEEPING during_housekeeping
1537 0 : #define STEM_CALLBACK_METRICS_WRITE metrics_write
1538 0 : #define STEM_CALLBACK_BEFORE_FRAG before_frag
1539 0 : #define STEM_CALLBACK_DURING_FRAG during_frag
1540 0 : #define STEM_CALLBACK_AFTER_FRAG after_frag
1541 :
1542 : #include "../stem/fd_stem.c"
1543 :
1544 : fd_topo_run_tile_t fd_tile_shred = {
1545 : .name = "shred",
1546 : .populate_allowed_seccomp = populate_allowed_seccomp,
1547 : .populate_allowed_fds = populate_allowed_fds,
1548 : .scratch_align = scratch_align,
1549 : .scratch_footprint = scratch_footprint,
1550 : .privileged_init = privileged_init,
1551 : .unprivileged_init = unprivileged_init,
1552 : .run = stem_run,
1553 : };
|