Line data Source code
1 : #include "fd_replay_tile.h"
2 : #include "fd_sched.h"
3 : #include "fd_execrp.h"
4 : #include "fd_vote_tracker.h"
5 : #include "generated/fd_replay_tile_seccomp.h"
6 :
7 : #include "../genesis/fd_genesi_tile.h"
8 : #include "../poh/fd_poh.h"
9 : #include "../poh/fd_poh_tile.h"
10 : #include "../tower/fd_tower_tile.h"
11 : #include "../resolv/fd_resolv_tile.h"
12 : #include "../restore/utils/fd_ssload.h"
13 :
14 : #include "../../disco/tiles.h"
15 : #include "../../disco/fd_txn_m.h"
16 : #include "../../disco/store/fd_store.h"
17 : #include "../../disco/pack/fd_pack.h"
18 : #include "../../discof/fd_accdb_topo.h"
19 : #include "../../discof/reasm/fd_reasm.h"
20 : #include "../../disco/keyguard/fd_keyload.h"
21 : #include "../../disco/genesis/fd_genesis_cluster.h"
22 : #include "../../util/pod/fd_pod.h"
23 : #include "../../flamenco/accdb/fd_accdb_admin_v1.h"
24 : #include "../../flamenco/accdb/fd_accdb_admin_v2.h"
25 : #include "../../flamenco/accdb/fd_accdb_impl_v1.h"
26 : #include "../../flamenco/accdb/fd_accdb_sync.h"
27 : #include "../../flamenco/accdb/fd_vinyl_req_pool.h"
28 : #include "../../flamenco/rewards/fd_rewards.h"
29 : #include "../../flamenco/leaders/fd_multi_epoch_leaders.h"
30 : #include "../../flamenco/progcache/fd_progcache_admin.h"
31 : #include "../../disco/metrics/fd_metrics.h"
32 :
33 : #include "../../flamenco/runtime/fd_runtime.h"
34 : #include "../../flamenco/runtime/fd_runtime_stack.h"
35 : #include "../../flamenco/runtime/fd_genesis_parse.h"
36 : #include "../../flamenco/fd_flamenco_base.h"
37 : #include "../../flamenco/runtime/sysvar/fd_sysvar_epoch_schedule.h"
38 :
39 : #include "../../flamenco/runtime/tests/fd_dump_pb.h"
40 :
41 : #include <errno.h>
42 : #include <stdio.h>
43 :
44 : /* Replay concepts:
45 :
46 : - Blocks are aggregations of entries aka. microblocks which are
47 : groupings of txns and are constructed by the block producer (see
48 : fd_pack).
49 :
50 : - Entries are grouped into entry batches by the block producer (see
51 : fd_pack / fd_shredder).
52 :
53 : - Entry batches are divided into chunks known as shreds by the block
54 : producer (see fd_shredder).
55 :
56 : - Shreds are grouped into forward-error-correction sets (FEC sets) by
57 : the block producer (see fd_shredder).
58 :
59 : - Shreds are transmitted to the rest of the cluster via the Turbine
60 : protocol (see fd_shredder / fd_shred).
61 :
62 : - Once enough shreds within a FEC set are received to recover the
63 : entirety of the shred data encoded by that FEC set, the receiver
64 : can "complete" the FEC set (see fd_fec_resolver).
65 :
66 : - If shreds in the FEC set are missing such that it can't complete,
67 : the receiver can use the Repair protocol to request missing shreds
68 : in FEC set (see fd_repair).
69 :
70 : - The current Repair protocol does not support requesting coding
71 : shreds. As a result, some FEC sets might be actually complete
72 : (contain all data shreds). Repair currently hacks around this by
73 : forcing completion but the long-term solution is to add support for
74 : fec_repairing coding shreds via Repair.
75 :
76 : - FEC sets are delivered in partial-order to the Replay tile by the
77 : Repair tile. Currently Replay only supports replaying entry batches
78 : so FEC sets need to reassembled into an entry batch before they can
79 : be replayed. The new Dispatcher will change this by taking a FEC
80 : set as input instead. */
81 :
82 0 : #define IN_KIND_SNAP ( 0)
83 0 : #define IN_KIND_GENESIS ( 1)
84 0 : #define IN_KIND_IPECHO ( 2)
85 0 : #define IN_KIND_TOWER ( 3)
86 0 : #define IN_KIND_RESOLV ( 4)
87 0 : #define IN_KIND_POH ( 5)
88 0 : #define IN_KIND_EXECRP ( 6)
89 0 : #define IN_KIND_SHRED ( 7)
90 0 : #define IN_KIND_TXSEND ( 8)
91 0 : #define IN_KIND_GUI ( 9)
92 0 : #define IN_KIND_RPC (10)
93 :
94 : #define DEBUG_LOGGING 0
95 :
96 : /* The first bank that the replay tile produces either for genesis
97 : or the snapshot boot will always be at bank index 0. */
98 0 : #define FD_REPLAY_BOOT_BANK_IDX (0UL)
99 :
100 : struct fd_replay_in_link {
101 : fd_wksp_t * mem;
102 : ulong chunk0;
103 : ulong wmark;
104 : ulong mtu;
105 : };
106 :
107 : typedef struct fd_replay_in_link fd_replay_in_link_t;
108 :
109 : struct fd_replay_out_link {
110 : ulong idx;
111 : fd_wksp_t * mem;
112 : ulong chunk0;
113 : ulong wmark;
114 : ulong chunk;
115 : };
116 :
117 : typedef struct fd_replay_out_link fd_replay_out_link_t;
118 :
119 : /* fd_block_id_map is a simple map of block-ids to bank indices. The
120 : map sits on top of an array of fd_block_id_ele_t. This serves as a
121 : translation layer between block ids to bank indices. */
122 :
123 : struct fd_block_id_ele {
124 : fd_hash_t block_id;
125 : int block_id_seen;
126 : ulong slot;
127 : ulong next_;
128 : };
129 : typedef struct fd_block_id_ele fd_block_id_ele_t;
130 :
131 : #define MAP_NAME fd_block_id_map
132 : #define MAP_ELE_T fd_block_id_ele_t
133 : #define MAP_KEY_T fd_hash_t
134 0 : #define MAP_KEY block_id
135 0 : #define MAP_NEXT next_
136 0 : #define MAP_KEY_EQ(k0,k1) (!memcmp((k0),(k1), sizeof(fd_hash_t)))
137 0 : #define MAP_KEY_HASH(key,seed) (fd_hash((seed),(key),sizeof(fd_hash_t)))
138 : #include "../../util/tmpl/fd_map_chain.c"
139 :
140 : static inline ulong
141 0 : fd_block_id_ele_get_idx( fd_block_id_ele_t * ele_arr, fd_block_id_ele_t * ele ) {
142 0 : return (ulong)(ele - ele_arr);
143 0 : }
144 :
145 : struct fd_replay_tile {
146 : fd_wksp_t * wksp;
147 :
148 : fd_accdb_admin_t accdb_admin[1];
149 : fd_accdb_user_t accdb[1];
150 : fd_progcache_admin_t progcache_admin[1];
151 :
152 : fd_txncache_t * txncache;
153 : fd_store_t * store;
154 : fd_banks_t banks[1];
155 :
156 : /* This flag is 1 If we have seen a vote signature that our node has
157 : sent out get rooted at least one time. The value is 0 otherwise.
158 : We can't become leader and pack blocks until this flag has been
159 : set. This parallels the Agave 'has_new_vote_been_rooted'. */
160 : int has_identity_vote_rooted;
161 : int wait_for_vote_to_start_leader;
162 :
163 : ulong reasm_seed;
164 : fd_reasm_t * reasm;
165 :
166 : /* Replay state machine. */
167 : fd_sched_t * sched;
168 : ulong exec_cnt;
169 : fd_replay_out_link_t exec_out[ 1 ]; /* Sending work down to exec tiles */
170 :
171 : ulong vote_tracker_seed;
172 : fd_vote_tracker_t * vote_tracker;
173 :
174 : int has_genesis_hash;
175 : char genesis_path[ PATH_MAX ];
176 : uchar genesis_hash[ 32UL ];
177 : ulong cluster_type;
178 :
179 : #define FD_REPLAY_HARD_FORKS_MAX (64UL)
180 : ulong hard_forks_cnt;
181 : ulong hard_forks[ FD_REPLAY_HARD_FORKS_MAX ];
182 :
183 : ushort expected_shred_version;
184 : ushort ipecho_shred_version;
185 :
186 : /* A note on publishing ...
187 :
188 : The watermarks are used to publish our fork-aware structures. For
189 : example, store, banks, and txncache need to be published to release
190 : resources occupied by rooted or dead blocks. In general,
191 : publishing has the effect of pruning forks in those structures,
192 : indicating that it is ok to release the memory being occupied by
193 : the blocks on said forks. Tower is responsible for informing us of
194 : the latest block on the consensus rooted fork. As soon as we can,
195 : we should move the published root as close as possible to the
196 : latest consensus root, publishing/pruning everything on the fork
197 : tree along the way. That is, all the blocks that directly descend
198 : from the current published root (inclusive) to the new published
199 : root (exclusive) on the rooted fork, as well as all the minority
200 : forks that branch from said blocks.
201 :
202 : Ideally, we'd move the published root to the consensus root
203 : immediately upon receiving a new consensus root. However, that's
204 : not always safe to do. One thing we need to be careful about is
205 : making sure that there are no more users/consumers of
206 : soon-to-be-pruned blocks, lest a use-after-free occurs. This can
207 : be done by using a reference counter for each block. Any
208 : concurrent activity, such as transaction execution in the exec
209 : tiles, should retain a refcnt on the block for as
210 : long as it needs access to the shared fork-aware structures related
211 : to that block. Eventually, refcnt on a given block will drop down
212 : to 0 as the block either finishes replaying or gets marked as dead,
213 : and any other tile that has retained a refcnt on the block releases
214 : it. At that point, it becomes a candidate for pruning. The key to
215 : safe publishing then becomes figuring out how far we could advance
216 : the published root, such that every minority fork branching off of
217 : blocks in between the current published root (inclusive) and the
218 : new published root (exclusive) is safe to be pruned. This is a
219 : straightforward tree traversal, where if a block B on the rooted
220 : fork has refcnt 0, and all minority forks branching off of B also
221 : have refcnt 0, then B is safe to be pruned. We advance the
222 : published root to the farthest consecutively prunable block on the
223 : rooted fork. Note that reasm presents the replay tile with a clean
224 : view of the world where every block is chained off of a parent
225 : block. So there are no orpahned/dangling tree nodes to worry
226 : about. The world is a nice single tree as far as replay is
227 : concerned.
228 :
229 : In the following fork tree, every node is a block and the number in
230 : parentheses is the refcnt on the block. The chain marked with
231 : double slashes is the rooted fork. Suppose the published root is
232 : at block P, and consensus root is at block T. We can't publish
233 : past block P because Q has refcnt 1.
234 :
235 :
236 : P(0)
237 : / \\
238 : Q(1) A(0)
239 : / || \
240 : X(0) B(0) C(0)
241 : / || \
242 : Y(0) M(0) R(0)
243 : / || / \
244 : D(2) T(0) J(0) L(0)
245 : ||
246 : ..
247 : ..
248 : ..
249 : ||
250 : blocks we might be actively replaying
251 :
252 :
253 : When refcnt on Q drops to 0, we would be able to advance the
254 : published root to block M, because blocks P, A, and B, as well as
255 : all subtrees branching off of them, have refcnt 0, and therefore
256 : can be pruned. Block M itself cannot be pruned yet because its
257 : child block D has refcnt 2. After publishing/pruning, the fork
258 : tree would be:
259 :
260 :
261 : M(0)
262 : / ||
263 : D(2) T(0)
264 : ||
265 : ..
266 : ..
267 : ..
268 : ||
269 : blocks we might be actively replaying
270 :
271 :
272 : As a result, the shared fork-aware structures can free resources
273 : for blocks P, A, B, and all subtrees branching off of them.
274 :
275 : For the reference counting part, the replay tile is the sole entity
276 : that can update the refcnt. This ensures that all refcnt increment
277 : and decrement attempts are serialized at the replay tile, and that
278 : there are no racy resurrection of a soon-to-be-pruned block. If a
279 : refcnt increment request arrives after a block has been pruned,
280 : replay simply rejects the request.
281 :
282 : A note on the implementation of the above ...
283 :
284 : Upon receiving a new consensus root, we descend down the rooted
285 : fork from the current published root to the new consensus root. On
286 : each node/block of the rooted fork, we do a summation of the refcnt
287 : on the block and all the minority fork blocks branching from the
288 : block. If the summation is 0, the block is safe for pruning. We
289 : advance the published root to the far end of the consecutive run of
290 : 0 refcnt sums originating from the current published root. On our
291 : descent down the minority forks, we also mark any block that hasn't
292 : finished replaying as dead, so we don't waste time executing them.
293 : No more transactions shall be dispatched for execution from dead
294 : blocks.
295 :
296 : Blocks start out with a refcnt of 0. Other tiles may send a
297 : request to the replay tile for a reference on a block. The
298 : transaction dispatcher is another source of refcnt updates. On
299 : every dispatch of a transaction for block B, we increment the
300 : refcnt for B. And on every transaction finalization, we decrement
301 : the refcnt for B. This means that whenever the refcnt on a block
302 : is 0, there is no more reference on that block from the execution
303 : pipeline. While it might be tempting to simply increment the
304 : refcnt once when we start replaying a block, and decrement the
305 : refcnt once when we finish a block, this more fine-grained refcnt
306 : update strategy allows for aborting and potentially immediate
307 : pruning of blocks under interleaved block replay. Upon receiving a
308 : new consensus root, we can simply look at the refcnt on minority
309 : fork blocks, and a refcnt of 0 would imply that the block is safe
310 : for pruning, even if we haven't finished replaying it. Without the
311 : fine-grained refcnt, we would need to first stop dispatching from
312 : the aborted block, and then wait for a full drain of the execution
313 : pipeline to know for sure that there are no more in-flight
314 : transactions executing on the aborted block. Note that this will
315 : allow the refcnt on any block to transiently drop down to 0. We
316 : will not mistakenly prune an actively replaying block, aka a leaf
317 : node, that is chaining off of the rooted fork, because the
318 : consensus root is always an ancestor of the actively replaying tip.
319 : */
320 : fd_hash_t consensus_root; /* The most recent block to have reached max lockout in the tower. */
321 : ulong consensus_root_slot; /* slot number of the above. */
322 : ulong consensus_root_bank_idx; /* bank index of the above. */
323 : ulong published_root_slot; /* slot number of the published root. */
324 : ulong published_root_bank_idx; /* bank index of the published root. */
325 :
326 : /* We need to maintain a tile-local mapping of block-ids to bank index
327 : and vice versa. This translation layer is needed for conversion
328 : since tower operates on block-ids and downstream consumers of FEC
329 : sets operate on bank indices. This mapping must happen both ways:
330 : 1. tower sends us block ids and we must map them to bank indices.
331 : 2. when a block is completed, we must map the bank index to a block
332 : id to send a slot complete message to tower. */
333 : ulong block_id_len;
334 : fd_block_id_ele_t * block_id_arr;
335 : ulong block_id_map_seed;
336 : fd_block_id_map_t * block_id_map;
337 :
338 : /* Capture-related configs */
339 : fd_capture_ctx_t * capture_ctx;
340 : FILE * capture_file;
341 : fd_capture_link_buf_t cap_repl_out[1];
342 :
343 : /* Protobuf dumping context for debugging runtime execution and
344 : collecting seed corpora. */
345 : fd_dump_proto_ctx_t * dump_proto_ctx;
346 :
347 : /* Whether the runtime has been booted either from snapshot loading
348 : or from genesis. */
349 : int is_booted;
350 :
351 : /* Buffer to store vote towers that need to be published to the Tower
352 : tile. */
353 :
354 : fd_multi_epoch_leaders_t * mleaders;
355 :
356 : int larger_max_cost_per_block;
357 :
358 : fd_pubkey_t identity_pubkey[1]; /* TODO: Keyswitch */
359 :
360 : /* When we transition to becoming leader, we can only unbecome the
361 : leader if we have received a block id from the FEC reassembler, and
362 : a message from PoH that the leader slot has ended. After both of
363 : these conditions are met, then we are free to unbecome the leader.
364 : */
365 : int is_leader;
366 : int recv_poh;
367 : ulong next_leader_slot;
368 : long next_leader_tickcount;
369 : ulong highwater_leader_slot;
370 : ulong reset_slot;
371 : fd_bank_t reset_bank[1];
372 : fd_hash_t reset_block_id;
373 : long reset_timestamp_nanos;
374 : double slot_duration_nanos;
375 : double slot_duration_ticks;
376 : fd_bank_t leader_bank[1];
377 :
378 : ulong resolv_tile_cnt;
379 :
380 : int in_kind[ 128 ];
381 : fd_replay_in_link_t in[ 128 ];
382 :
383 : fd_replay_out_link_t replay_out[1];
384 :
385 : fd_replay_out_link_t epoch_out[1];
386 :
387 : /* The gui tile needs to reliably own a reference to the most recent
388 : completed active bank. Replay needs to know if the gui as a
389 : consumer is enabled so it can increment the bank's refcnt before
390 : publishing the bank_idx to the gui. */
391 : int gui_enabled;
392 : int rpc_enabled;
393 :
394 : # if FD_HAS_FLATCC
395 : /* For dumping blocks to protobuf. For backtest only. */
396 : fd_block_dump_ctx_t * block_dump_ctx;
397 : # endif
398 :
399 : /* We need a few pieces of information to compute the right addresses
400 : for bundle crank information that we need to send to pack. */
401 : struct {
402 : int enabled;
403 : fd_pubkey_t vote_account;
404 : fd_bundle_crank_gen_t gen[1];
405 : } bundle;
406 :
407 : struct {
408 : fd_histf_t store_read_wait[ 1 ];
409 : fd_histf_t store_read_work[ 1 ];
410 : fd_histf_t store_publish_wait[ 1 ];
411 : fd_histf_t store_publish_work[ 1 ];
412 : fd_histf_t store_link_wait[ 1 ];
413 : fd_histf_t store_link_work[ 1 ];
414 :
415 : ulong slots_total;
416 : ulong transactions_total;
417 :
418 : ulong reasm_latest_slot;
419 : ulong reasm_latest_fec_idx;
420 :
421 : ulong sched_full;
422 : ulong reasm_empty;
423 : ulong leader_bid_wait;
424 : ulong banks_full;
425 : ulong storage_root_behind;
426 :
427 : fd_histf_t root_slot_dur[1];
428 : fd_histf_t root_account_dur[1];
429 : } metrics;
430 :
431 : uchar __attribute__((aligned(FD_MULTI_EPOCH_LEADERS_ALIGN))) mleaders_mem[ FD_MULTI_EPOCH_LEADERS_FOOTPRINT ];
432 :
433 : fd_runtime_stack_t runtime_stack;
434 : };
435 :
436 : typedef struct fd_replay_tile fd_replay_tile_t;
437 :
438 : FD_FN_CONST static inline ulong
439 0 : scratch_align( void ) {
440 0 : return 128UL;
441 0 : }
442 : FD_FN_PURE static inline ulong
443 0 : scratch_footprint( fd_topo_tile_t const * tile ) {
444 0 : ulong chain_cnt = fd_block_id_map_chain_cnt_est( tile->replay.max_live_slots );
445 :
446 0 : ulong l = FD_LAYOUT_INIT;
447 0 : l = FD_LAYOUT_APPEND( l, alignof(fd_replay_tile_t), sizeof(fd_replay_tile_t) );
448 0 : l = FD_LAYOUT_APPEND( l, alignof(fd_block_id_ele_t), sizeof(fd_block_id_ele_t) * tile->replay.max_live_slots );
449 0 : l = FD_LAYOUT_APPEND( l, fd_block_id_map_align(), fd_block_id_map_footprint( chain_cnt ) );
450 0 : l = FD_LAYOUT_APPEND( l, fd_txncache_align(), fd_txncache_footprint( tile->replay.max_live_slots ) );
451 0 : l = FD_LAYOUT_APPEND( l, fd_reasm_align(), fd_reasm_footprint( tile->replay.fec_max ) );
452 0 : l = FD_LAYOUT_APPEND( l, fd_sched_align(), fd_sched_footprint( tile->replay.max_live_slots ) );
453 0 : l = FD_LAYOUT_APPEND( l, fd_vinyl_req_pool_align(), fd_vinyl_req_pool_footprint( 1UL, 1UL ) );
454 0 : l = FD_LAYOUT_APPEND( l, fd_vote_tracker_align(), fd_vote_tracker_footprint() );
455 0 : l = FD_LAYOUT_APPEND( l, fd_capture_ctx_align(), fd_capture_ctx_footprint() );
456 0 : l = FD_LAYOUT_APPEND( l, alignof(fd_dump_proto_ctx_t), sizeof(fd_dump_proto_ctx_t) );
457 :
458 0 : # if FD_HAS_FLATCC
459 0 : if( FD_UNLIKELY( tile->replay.dump_block_to_pb ) ) {
460 0 : l = FD_LAYOUT_APPEND( l, fd_block_dump_context_align(), fd_block_dump_context_footprint() );
461 0 : }
462 0 : # endif
463 :
464 0 : l = FD_LAYOUT_FINI( l, scratch_align() );
465 :
466 0 : return l;
467 0 : }
468 :
469 : static inline void
470 0 : metrics_write( fd_replay_tile_t * ctx ) {
471 0 : FD_MHIST_COPY( REPLAY, STORE_LINK_WAIT, ctx->metrics.store_link_wait );
472 0 : FD_MHIST_COPY( REPLAY, STORE_LINK_WORK, ctx->metrics.store_link_work );
473 0 : FD_MHIST_COPY( REPLAY, STORE_READ_WAIT, ctx->metrics.store_read_wait );
474 0 : FD_MHIST_COPY( REPLAY, STORE_READ_WORK, ctx->metrics.store_read_work );
475 0 : FD_MHIST_COPY( REPLAY, STORE_PUBLISH_WAIT, ctx->metrics.store_publish_wait );
476 0 : FD_MHIST_COPY( REPLAY, STORE_PUBLISH_WORK, ctx->metrics.store_publish_work );
477 :
478 0 : FD_MGAUGE_SET( REPLAY, ROOT_SLOT, ctx->consensus_root_slot==ULONG_MAX ? 0UL : ctx->consensus_root_slot );
479 0 : ulong leader_slot = ctx->leader_bank->data ? fd_bank_slot_get( ctx->leader_bank ) : 0UL;
480 0 : FD_MGAUGE_SET( REPLAY, LEADER_SLOT, leader_slot );
481 :
482 0 : if( FD_LIKELY( ctx->leader_bank->data ) ) {
483 0 : FD_MGAUGE_SET( REPLAY, NEXT_LEADER_SLOT, leader_slot );
484 0 : FD_MGAUGE_SET( REPLAY, LEADER_SLOT, leader_slot );
485 0 : } else {
486 0 : FD_MGAUGE_SET( REPLAY, NEXT_LEADER_SLOT, ctx->next_leader_slot==ULONG_MAX ? 0UL : ctx->next_leader_slot );
487 0 : FD_MGAUGE_SET( REPLAY, LEADER_SLOT, 0UL );
488 0 : }
489 0 : FD_MGAUGE_SET( REPLAY, RESET_SLOT, ctx->reset_slot==ULONG_MAX ? 0UL : ctx->reset_slot );
490 :
491 0 : fd_bank_data_t * bank_pool = fd_banks_get_bank_pool( ctx->banks->data );
492 0 : ulong live_banks = fd_banks_pool_max( bank_pool ) - fd_banks_pool_free( bank_pool );
493 0 : FD_MGAUGE_SET( REPLAY, LIVE_BANKS, live_banks );
494 :
495 0 : ulong reasm_free = fd_reasm_free( ctx->reasm );
496 0 : FD_MGAUGE_SET( REPLAY, REASM_FREE, reasm_free );
497 :
498 0 : FD_MCNT_SET( REPLAY, SLOTS_TOTAL, ctx->metrics.slots_total );
499 0 : FD_MCNT_SET( REPLAY, TRANSACTIONS_TOTAL, ctx->metrics.transactions_total );
500 :
501 0 : FD_MGAUGE_SET( REPLAY, REASM_LATEST_SLOT, ctx->metrics.reasm_latest_slot );
502 0 : FD_MGAUGE_SET( REPLAY, REASM_LATEST_FEC_IDX, ctx->metrics.reasm_latest_fec_idx );
503 :
504 0 : FD_MCNT_SET( REPLAY, SCHED_FULL, ctx->metrics.sched_full );
505 0 : FD_MCNT_SET( REPLAY, REASM_EMPTY, ctx->metrics.reasm_empty );
506 0 : FD_MCNT_SET( REPLAY, LEADER_BID_WAIT, ctx->metrics.leader_bid_wait );
507 0 : FD_MCNT_SET( REPLAY, BANKS_FULL, ctx->metrics.banks_full );
508 0 : FD_MCNT_SET( REPLAY, STORAGE_ROOT_BEHIND, ctx->metrics.storage_root_behind );
509 :
510 0 : FD_MCNT_SET( REPLAY, PROGCACHE_ROOTED, ctx->progcache_admin->metrics.root_cnt );
511 0 : FD_MCNT_SET( REPLAY, PROGCACHE_GC_ROOT, ctx->progcache_admin->metrics.gc_root_cnt );
512 :
513 0 : FD_MCNT_SET( REPLAY, ACCDB_CREATED, ctx->accdb->base.created_cnt );
514 0 : FD_MCNT_SET( REPLAY, ACCDB_REVERTED, ctx->accdb_admin->base.revert_cnt );
515 0 : FD_MCNT_SET( REPLAY, ACCDB_ROOTED, ctx->accdb_admin->base.root_cnt );
516 0 : FD_MCNT_SET( REPLAY, ACCDB_ROOTED_BYTES, ctx->accdb_admin->base.root_tot_sz );
517 0 : FD_MCNT_SET( REPLAY, ACCDB_GC_ROOT, ctx->accdb_admin->base.gc_root_cnt );
518 0 : FD_MCNT_SET( REPLAY, ACCDB_RECLAIMED, ctx->accdb_admin->base.reclaim_cnt );
519 0 : FD_MHIST_COPY( REPLAY, ROOT_SLOT_DURATION_SECONDS, ctx->metrics.root_slot_dur );
520 0 : FD_MHIST_COPY( REPLAY, ROOT_ACCOUNT_DURATION_SECONDS, ctx->metrics.root_account_dur );
521 0 : FD_MCNT_SET( REPLAY, ROOT_ELAPSED_SECONDS_DB, (ulong)ctx->accdb_admin->base.dt_vinyl );
522 0 : FD_MCNT_SET( REPLAY, ROOT_ELAPSED_SECONDS_COPY, (ulong)ctx->accdb_admin->base.dt_copy );
523 0 : FD_MCNT_SET( REPLAY, ROOT_ELAPSED_SECONDS_GC, (ulong)ctx->accdb_admin->base.dt_gc );
524 0 : }
525 :
526 : static inline ulong
527 : generate_epoch_info_msg( ulong epoch,
528 : fd_epoch_schedule_t const * epoch_schedule,
529 : fd_vote_states_t const * epoch_stakes,
530 : fd_features_t const * features,
531 : fd_epoch_info_msg_t * epoch_info_msg,
532 0 : int current_epoch ) {
533 0 : fd_vote_stake_weight_t * stake_weights = epoch_info_msg->weights;
534 :
535 0 : epoch_info_msg->epoch = epoch;
536 0 : epoch_info_msg->start_slot = fd_epoch_slot0( epoch_schedule, epoch );
537 0 : epoch_info_msg->slot_cnt = epoch_schedule->slots_per_epoch;
538 0 : epoch_info_msg->excluded_stake = 0UL;
539 0 : epoch_info_msg->vote_keyed_lsched = 1UL;
540 :
541 : /* FIXME: SIMD-0180 - hack to (de)activate in testnet vs mainnet.
542 : This code can be removed once the feature is active. */
543 0 : if( (1==epoch_schedule->warmup && epoch<FD_SIMD0180_ACTIVE_EPOCH_TESTNET) ||
544 0 : (0==epoch_schedule->warmup && epoch<FD_SIMD0180_ACTIVE_EPOCH_MAINNET) ) {
545 0 : epoch_info_msg->vote_keyed_lsched = 0UL;
546 0 : }
547 :
548 : /* epoch_stakes from manifest are already filtered (stake>0), but not sorted */
549 0 : fd_vote_states_iter_t iter_[1];
550 0 : ulong idx = 0UL;
551 0 : for( fd_vote_states_iter_t * iter = fd_vote_states_iter_init( iter_, epoch_stakes ); !fd_vote_states_iter_done( iter ); fd_vote_states_iter_next( iter ) ) {
552 0 : fd_vote_state_ele_t * vote_state = fd_vote_states_iter_ele( iter );
553 :
554 0 : ulong stake = current_epoch ? vote_state->stake_t_1 : vote_state->stake_t_2;
555 0 : if( FD_UNLIKELY( !stake ) ) continue;
556 :
557 0 : stake_weights[ idx ].stake = stake;
558 0 : memcpy( stake_weights[ idx ].id_key.uc, &vote_state->node_account, sizeof(fd_pubkey_t) );
559 0 : memcpy( stake_weights[ idx ].vote_key.uc, &vote_state->vote_account, sizeof(fd_pubkey_t) );
560 0 : idx++;
561 0 : }
562 0 : epoch_info_msg->staked_cnt = idx;
563 0 : sort_vote_weights_by_stake_vote_inplace( stake_weights, idx );
564 :
565 0 : epoch_info_msg->features = *features;
566 :
567 0 : return fd_epoch_info_msg_sz( epoch_info_msg->staked_cnt );
568 0 : }
569 :
570 : static void
571 : publish_epoch_info( fd_replay_tile_t * ctx,
572 : fd_stem_context_t * stem,
573 : fd_bank_t * bank,
574 0 : int current_epoch ) {
575 0 : fd_epoch_schedule_t const * schedule = fd_bank_epoch_schedule_query( bank );
576 0 : ulong epoch = fd_slot_to_epoch( schedule, fd_bank_slot_get( bank ), NULL );
577 :
578 0 : fd_vote_states_t const * vote_states = fd_bank_vote_states_locking_query( bank );
579 :
580 0 : fd_features_t const * features = fd_bank_features_query( bank );
581 :
582 0 : fd_epoch_info_msg_t * epoch_info_msg = fd_chunk_to_laddr( ctx->epoch_out->mem, ctx->epoch_out->chunk );
583 0 : ulong epoch_info_sz = generate_epoch_info_msg( epoch+fd_ulong_if( current_epoch, 1UL, 0UL), schedule, vote_states, features, epoch_info_msg, current_epoch );
584 0 : ulong epoch_info_sig = 4UL;
585 0 : fd_stem_publish( stem, ctx->epoch_out->idx, epoch_info_sig, ctx->epoch_out->chunk, epoch_info_sz, 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
586 0 : ctx->epoch_out->chunk = fd_dcache_compact_next( ctx->epoch_out->chunk, epoch_info_sz, ctx->epoch_out->chunk0, ctx->epoch_out->wmark );
587 :
588 0 : fd_multi_epoch_leaders_epoch_msg_init( ctx->mleaders, epoch_info_msg );
589 0 : fd_multi_epoch_leaders_epoch_msg_fini( ctx->mleaders );
590 :
591 0 : fd_bank_vote_states_end_locking_query( bank );
592 0 : }
593 :
594 : /**********************************************************************/
595 : /* Transaction execution state machine helpers */
596 : /**********************************************************************/
597 :
598 : static void
599 : replay_block_start( fd_replay_tile_t * ctx,
600 : fd_stem_context_t * stem,
601 : ulong bank_idx,
602 : ulong parent_bank_idx,
603 0 : ulong slot ) {
604 0 : long before = fd_log_wallclock();
605 :
606 0 : fd_bank_t bank[1];
607 0 : if( FD_UNLIKELY( !fd_banks_bank_query( bank, ctx->banks, bank_idx ) ) ) {
608 0 : FD_LOG_CRIT(( "invariant violation: bank is NULL for bank index %lu", bank_idx ));
609 0 : }
610 0 : if( FD_UNLIKELY( bank->data->flags!=FD_BANK_FLAGS_INIT ) ) {
611 0 : FD_LOG_CRIT(( "invariant violation: bank is not in correct state for bank index %lu", bank_idx ));
612 0 : }
613 :
614 0 : bank->data->preparation_begin_nanos = before;
615 :
616 0 : fd_bank_t parent_bank[1];
617 0 : if( FD_UNLIKELY( !fd_banks_bank_query( parent_bank, ctx->banks, parent_bank_idx ) ) ) {
618 0 : FD_LOG_CRIT(( "invariant violation: parent bank is NULL for bank index %lu", parent_bank_idx ));
619 0 : }
620 0 : ulong parent_slot = fd_bank_slot_get( parent_bank );
621 :
622 : /* Clone the bank from the parent. We must special case the first
623 : slot that is executed as the snapshot does not provide a parent
624 : block id. */
625 :
626 0 : if( FD_UNLIKELY( !fd_banks_clone_from_parent( bank, ctx->banks, bank_idx ) ) ) {
627 0 : FD_LOG_CRIT(( "invariant violation: bank is NULL for bank index %lu", bank_idx ));
628 0 : }
629 0 : fd_bank_slot_set( bank, slot );
630 0 : fd_bank_parent_slot_set( bank, parent_slot );
631 0 : bank->data->txncache_fork_id = fd_txncache_attach_child( ctx->txncache, parent_bank->data->txncache_fork_id );
632 :
633 : /* Create a new funk txn for the block. */
634 :
635 0 : fd_funk_txn_xid_t xid = { .ul = { slot, bank_idx } };
636 0 : fd_funk_txn_xid_t parent_xid = { .ul = { parent_slot, parent_bank_idx } };
637 0 : fd_accdb_attach_child( ctx->accdb_admin, &parent_xid, &xid );
638 0 : fd_progcache_txn_attach_child( ctx->progcache_admin, &parent_xid, &xid );
639 :
640 : /* Update any required runtime state and handle any potential epoch
641 : boundary change. */
642 :
643 0 : fd_bank_shred_cnt_set( bank, 0UL );
644 0 : fd_bank_execution_fees_set( bank, 0UL );
645 0 : fd_bank_priority_fees_set( bank, 0UL );
646 0 : fd_bank_tips_set( bank, 0UL );
647 :
648 0 : fd_bank_has_identity_vote_set( bank, 0 );
649 :
650 : /* Update block height. */
651 0 : fd_bank_block_height_set( bank, fd_bank_block_height_get( bank ) + 1UL );
652 :
653 0 : int is_epoch_boundary = 0;
654 0 : fd_runtime_block_execute_prepare( ctx->banks, bank, ctx->accdb, &ctx->runtime_stack, ctx->capture_ctx, &is_epoch_boundary );
655 0 : if( FD_UNLIKELY( is_epoch_boundary ) ) publish_epoch_info( ctx, stem, bank, 1 );
656 :
657 0 : ulong max_tick_height;
658 0 : if( FD_UNLIKELY( FD_RUNTIME_EXECUTE_SUCCESS!=fd_runtime_compute_max_tick_height( fd_bank_ticks_per_slot_get( parent_bank ), slot, &max_tick_height ) ) ) {
659 0 : FD_LOG_CRIT(( "couldn't compute tick height/max tick height slot %lu ticks_per_slot %lu", slot, fd_bank_ticks_per_slot_get( parent_bank ) ));
660 0 : }
661 0 : fd_bank_max_tick_height_set( bank, max_tick_height );
662 0 : fd_bank_tick_height_set( bank, fd_bank_max_tick_height_get( parent_bank ) ); /* The parent's max tick height is our starting tick height. */
663 0 : fd_sched_set_poh_params( ctx->sched, bank->data->idx, fd_bank_tick_height_get( bank ), fd_bank_max_tick_height_get( bank ), fd_bank_hashes_per_tick_get( bank ), fd_bank_poh_query( parent_bank ) );
664 :
665 0 : FD_LOG_DEBUG(( "replay_block_start: bank_idx=%lu slot=%lu parent_bank_idx=%lu", bank_idx, slot, parent_bank_idx ));
666 0 : }
667 :
668 : static void
669 0 : cost_tracker_snap( fd_bank_t * bank, fd_replay_slot_completed_t * slot_info ) {
670 0 : if( bank->data->cost_tracker_pool_idx!=fd_bank_cost_tracker_pool_idx_null( fd_bank_get_cost_tracker_pool( bank->data ) ) ) {
671 0 : fd_cost_tracker_t const * cost_tracker = fd_bank_cost_tracker_locking_query( bank );
672 0 : slot_info->cost_tracker.block_cost = cost_tracker->block_cost;
673 0 : slot_info->cost_tracker.vote_cost = cost_tracker->vote_cost;
674 0 : slot_info->cost_tracker.allocated_accounts_data_size = cost_tracker->allocated_accounts_data_size;
675 0 : slot_info->cost_tracker.block_cost_limit = cost_tracker->block_cost_limit;
676 0 : slot_info->cost_tracker.vote_cost_limit = cost_tracker->vote_cost_limit;
677 0 : slot_info->cost_tracker.account_cost_limit = cost_tracker->account_cost_limit;
678 0 : fd_bank_cost_tracker_end_locking_query( bank );
679 0 : } else {
680 0 : memset( &slot_info->cost_tracker, 0, sizeof(slot_info->cost_tracker) );
681 0 : }
682 0 : }
683 :
684 : static ulong
685 0 : get_identity_balance( fd_replay_tile_t * ctx, fd_funk_txn_xid_t xid ) {
686 0 : ulong identity_balance = ULONG_MAX;
687 0 : fd_accdb_ro_t identity_acc[1];
688 0 : if( FD_LIKELY( fd_accdb_open_ro( ctx->accdb, identity_acc, &xid, ctx->identity_pubkey ) ) ) {
689 0 : identity_balance = identity_acc->meta->lamports;
690 0 : fd_accdb_close_ro( ctx->accdb, identity_acc );
691 0 : }
692 0 : return identity_balance;
693 0 : }
694 :
695 : static void
696 : publish_slot_completed( fd_replay_tile_t * ctx,
697 : fd_stem_context_t * stem,
698 : fd_bank_t * bank,
699 : int is_initial,
700 0 : int is_leader ) {
701 :
702 0 : ulong slot = fd_bank_slot_get( bank );
703 :
704 0 : fd_block_id_ele_t * block_id_ele = &ctx->block_id_arr[ bank->data->idx ];
705 :
706 : /* HACKY: hacky way of checking if we should send a null parent block
707 : id */
708 0 : fd_hash_t parent_block_id = {0};
709 0 : if( FD_UNLIKELY( !is_initial ) ) {
710 0 : parent_block_id = ctx->block_id_arr[ bank->data->parent_idx ].block_id;
711 0 : }
712 :
713 0 : fd_hash_t const * bank_hash = fd_bank_bank_hash_query( bank );
714 0 : fd_hash_t const * block_hash = fd_blockhashes_peek_last_hash( fd_bank_block_hash_queue_query( bank ) );
715 0 : FD_TEST( bank_hash );
716 0 : FD_TEST( block_hash );
717 :
718 0 : if( FD_LIKELY( !is_initial ) ) fd_txncache_finalize_fork( ctx->txncache, bank->data->txncache_fork_id, 0UL, block_hash->uc );
719 :
720 0 : fd_epoch_schedule_t const * epoch_schedule = fd_bank_epoch_schedule_query( bank );
721 0 : ulong slot_idx;
722 0 : ulong epoch = fd_slot_to_epoch( epoch_schedule, slot, &slot_idx );
723 :
724 0 : ctx->metrics.slots_total++;
725 0 : ctx->metrics.transactions_total = fd_bank_txn_count_get( bank );
726 :
727 0 : fd_replay_slot_completed_t * slot_info = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
728 0 : slot_info->slot = slot;
729 0 : slot_info->root_slot = ctx->consensus_root_slot;
730 0 : slot_info->storage_slot = ctx->published_root_slot;
731 0 : slot_info->epoch = epoch;
732 0 : slot_info->slot_in_epoch = slot_idx;
733 0 : slot_info->block_height = fd_bank_block_height_get( bank );
734 0 : slot_info->parent_slot = fd_bank_parent_slot_get( bank );
735 0 : slot_info->block_id = block_id_ele->block_id;
736 0 : slot_info->parent_block_id = parent_block_id;
737 0 : slot_info->bank_hash = *bank_hash;
738 0 : slot_info->block_hash = *block_hash;
739 0 : slot_info->transaction_count = fd_bank_txn_count_get( bank );
740 :
741 0 : fd_inflation_t inflation = fd_bank_inflation_get( bank );
742 0 : slot_info->inflation.foundation = inflation.foundation;
743 0 : slot_info->inflation.foundation_term = inflation.foundation_term;
744 0 : slot_info->inflation.terminal = inflation.terminal;
745 0 : slot_info->inflation.initial = inflation.initial;
746 0 : slot_info->inflation.taper = inflation.taper;
747 :
748 0 : fd_rent_t rent = fd_bank_rent_get( bank );
749 0 : slot_info->rent.burn_percent = rent.burn_percent;
750 0 : slot_info->rent.lamports_per_uint8_year = rent.lamports_per_uint8_year;
751 0 : slot_info->rent.exemption_threshold = rent.exemption_threshold;
752 :
753 0 : slot_info->first_fec_set_received_nanos = bank->data->first_fec_set_received_nanos;
754 0 : slot_info->preparation_begin_nanos = bank->data->preparation_begin_nanos;
755 0 : slot_info->first_transaction_scheduled_nanos = bank->data->first_transaction_scheduled_nanos;
756 0 : slot_info->last_transaction_finished_nanos = bank->data->last_transaction_finished_nanos;
757 0 : slot_info->completion_time_nanos = fd_log_wallclock();
758 :
759 : /* refcnt should be incremented by 1 for each consumer that uses
760 : `bank_idx`. Each consumer should decrement the bank's refcnt once
761 : they are done using the bank. */
762 0 : bank->data->refcnt++; /* tower_tile */
763 0 : if( FD_LIKELY( ctx->rpc_enabled ) ) bank->data->refcnt++; /* rpc tile */
764 0 : if( FD_LIKELY( ctx->gui_enabled ) ) bank->data->refcnt++; /* gui tile */
765 0 : slot_info->bank_idx = bank->data->idx;
766 0 : FD_LOG_DEBUG(( "bank (idx=%lu, slot=%lu) refcnt incremented to %lu for tower, rpc, gui", bank->data->idx, slot, bank->data->refcnt ));
767 :
768 0 : slot_info->parent_bank_idx = ULONG_MAX;
769 0 : fd_bank_t parent_bank[1];
770 0 : if( FD_LIKELY( fd_banks_get_parent( parent_bank, ctx->banks, bank ) && ctx->gui_enabled ) ) {
771 0 : parent_bank->data->refcnt++;
772 0 : FD_LOG_DEBUG(( "bank (idx=%lu, slot=%lu) refcnt incremented to %lu for gui", parent_bank->data->idx, fd_bank_slot_get( parent_bank ), parent_bank->data->refcnt ));
773 0 : slot_info->parent_bank_idx = parent_bank->data->idx;
774 0 : }
775 :
776 0 : slot_info->is_leader = is_leader;
777 :
778 0 : FD_BASE58_ENCODE_32_BYTES( ctx->block_id_arr[ bank->data->idx ].block_id.uc, block_id_cstr );
779 0 : FD_BASE58_ENCODE_32_BYTES( fd_bank_bank_hash_query( bank )->uc, bank_hash_cstr );
780 0 : FD_LOG_DEBUG(( "publish_slot_completed: bank_idx=%lu slot=%lu bank_hash=%s block_id=%s", bank->data->idx, slot, bank_hash_cstr, block_id_cstr ));
781 :
782 0 : fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_SLOT_COMPLETED, ctx->replay_out->chunk, sizeof(fd_replay_slot_completed_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
783 0 : ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_replay_slot_completed_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
784 0 : }
785 :
786 : static void
787 : publish_slot_dead( fd_replay_tile_t * ctx,
788 : fd_stem_context_t * stem,
789 0 : fd_bank_t * bank ) {
790 0 : FD_TEST( ctx->block_id_arr[ bank->data->idx ].block_id_seen );
791 0 : fd_replay_slot_dead_t * slot_dead = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
792 0 : slot_dead->slot = fd_bank_slot_get( bank );
793 0 : slot_dead->block_id = ctx->block_id_arr[ bank->data->idx ].block_id;
794 0 : fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_SLOT_DEAD, ctx->replay_out->chunk, sizeof(fd_replay_slot_dead_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
795 0 : ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_replay_slot_dead_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
796 0 : }
797 :
798 : static void
799 : replay_block_finalize( fd_replay_tile_t * ctx,
800 : fd_stem_context_t * stem,
801 0 : fd_bank_t * bank ) {
802 0 : bank->data->last_transaction_finished_nanos = fd_log_wallclock();
803 :
804 0 : FD_TEST( !(bank->data->flags&FD_BANK_FLAGS_FROZEN) );
805 :
806 : /* Set poh hash in bank. */
807 0 : fd_hash_t * poh = fd_sched_get_poh( ctx->sched, bank->data->idx );
808 0 : fd_bank_poh_set( bank, *poh );
809 :
810 : /* Set shred count in bank. */
811 0 : fd_bank_shred_cnt_set( bank, fd_sched_get_shred_cnt( ctx->sched, bank->data->idx ) );
812 :
813 : /* Do hashing and other end-of-block processing. */
814 0 : fd_runtime_block_execute_finalize( bank, ctx->accdb, ctx->capture_ctx );
815 :
816 : /* Copy out cost tracker fields before freezing */
817 0 : fd_replay_slot_completed_t * slot_info = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
818 0 : cost_tracker_snap( bank, slot_info );
819 :
820 : /* fetch identity / vote balance updates infrequently */
821 0 : ulong slot = fd_bank_slot_get( bank );
822 0 : fd_funk_txn_xid_t xid = { .ul = { slot, bank->data->idx } };
823 0 : slot_info->identity_balance = FD_UNLIKELY( slot%4096==0UL ) ? get_identity_balance( ctx, xid ) : ULONG_MAX;
824 :
825 : /* Mark the bank as frozen. */
826 0 : fd_banks_mark_bank_frozen( ctx->banks, bank );
827 :
828 : /**********************************************************************/
829 : /* Bank hash comparison, and halt if there's a mismatch after replay */
830 : /**********************************************************************/
831 :
832 0 : fd_hash_t const * bank_hash = fd_bank_bank_hash_query( bank );
833 0 : FD_TEST( bank_hash );
834 :
835 : /* Must be last so we can measure completion time correctly, even
836 : though we could technically do this before the hash cmp and vote
837 : tower stuff. */
838 0 : publish_slot_completed( ctx, stem, bank, 0, 0 /* is_leader */ );
839 :
840 0 : # if FD_HAS_FLATCC
841 : /* If enabled, dump the block to a file and reset the dumping
842 : context state */
843 0 : if( FD_UNLIKELY( ctx->dump_proto_ctx && ctx->dump_proto_ctx->dump_block_to_pb ) ) {
844 0 : fd_dump_block_to_protobuf( ctx->block_dump_ctx, ctx->banks, bank, ctx->accdb, ctx->dump_proto_ctx );
845 0 : fd_block_dump_context_reset( ctx->block_dump_ctx );
846 0 : }
847 0 : # endif
848 0 : }
849 :
850 : /**********************************************************************/
851 : /* Leader bank management */
852 : /**********************************************************************/
853 :
854 : static fd_bank_t *
855 : prepare_leader_bank( fd_replay_tile_t * ctx,
856 : ulong slot,
857 : long now,
858 : fd_hash_t const * parent_block_id,
859 0 : fd_stem_context_t * stem ) {
860 0 : long before = fd_log_wallclock();
861 :
862 : /* Make sure that we are not already leader. */
863 0 : FD_TEST( ctx->leader_bank->data==NULL );
864 :
865 0 : fd_block_id_ele_t * parent_ele = fd_block_id_map_ele_query( ctx->block_id_map, parent_block_id, NULL, ctx->block_id_arr );
866 0 : if( FD_UNLIKELY( !parent_ele ) ) {
867 0 : FD_BASE58_ENCODE_32_BYTES( parent_block_id->key, parent_block_id_b58 );
868 0 : FD_LOG_CRIT(( "invariant violation: parent bank index not found for merkle root %s", parent_block_id_b58 ));
869 0 : }
870 0 : ulong parent_bank_idx = fd_block_id_ele_get_idx( ctx->block_id_arr, parent_ele );
871 :
872 0 : fd_bank_t parent_bank[1];
873 0 : if( FD_UNLIKELY( !fd_banks_bank_query( parent_bank, ctx->banks, parent_bank_idx ) ) ) {
874 0 : FD_LOG_CRIT(( "invariant violation: parent bank not found for bank index %lu", parent_bank_idx ));
875 0 : }
876 0 : ulong parent_slot = fd_bank_slot_get( parent_bank );
877 :
878 0 : if( FD_UNLIKELY( !fd_banks_new_bank( ctx->leader_bank, ctx->banks, parent_bank_idx, now ) ) ) {
879 0 : FD_LOG_CRIT(( "invariant violation: leader bank is NULL for slot %lu", slot ));
880 0 : }
881 :
882 0 : if( FD_UNLIKELY( !fd_banks_clone_from_parent( ctx->leader_bank, ctx->banks, ctx->leader_bank->data->idx ) ) ) {
883 0 : FD_LOG_CRIT(( "invariant violation: bank is NULL for slot %lu", slot ));
884 0 : }
885 :
886 : /* At this point we want to remove any stale block id map entry that
887 : corresponds to this bank. */
888 0 : fd_block_id_ele_t * block_id_ele = &ctx->block_id_arr[ ctx->leader_bank->data->idx ];
889 0 : if( FD_LIKELY( fd_block_id_map_ele_query( ctx->block_id_map, &block_id_ele->block_id, NULL, ctx->block_id_arr )==block_id_ele ) ) {
890 0 : FD_TEST( fd_block_id_map_ele_remove( ctx->block_id_map, &block_id_ele->block_id, NULL, ctx->block_id_arr ) );
891 0 : }
892 0 : block_id_ele->block_id_seen = 0;
893 0 : block_id_ele->slot = slot;
894 :
895 0 : ctx->leader_bank->data->preparation_begin_nanos = before;
896 :
897 0 : fd_bank_slot_set( ctx->leader_bank, slot );
898 0 : fd_bank_parent_slot_set( ctx->leader_bank, parent_slot );
899 0 : ctx->leader_bank->data->txncache_fork_id = fd_txncache_attach_child( ctx->txncache, parent_bank->data->txncache_fork_id );
900 : /* prepare the funk transaction for the leader bank */
901 0 : fd_funk_txn_xid_t xid = { .ul = { slot, ctx->leader_bank->data->idx } };
902 0 : fd_funk_txn_xid_t parent_xid = { .ul = { parent_slot, parent_bank_idx } };
903 0 : fd_accdb_attach_child( ctx->accdb_admin, &parent_xid, &xid );
904 0 : fd_progcache_txn_attach_child( ctx->progcache_admin, &parent_xid, &xid );
905 :
906 0 : fd_bank_execution_fees_set( ctx->leader_bank, 0UL );
907 0 : fd_bank_priority_fees_set( ctx->leader_bank, 0UL );
908 0 : fd_bank_shred_cnt_set( ctx->leader_bank, 0UL );
909 0 : fd_bank_tips_set( ctx->leader_bank, 0UL );
910 :
911 : /* Update block height. */
912 0 : fd_bank_block_height_set( ctx->leader_bank, fd_bank_block_height_get( ctx->leader_bank ) + 1UL );
913 :
914 0 : int is_epoch_boundary = 0;
915 0 : fd_runtime_block_execute_prepare( ctx->banks, ctx->leader_bank, ctx->accdb, &ctx->runtime_stack, ctx->capture_ctx, &is_epoch_boundary );
916 0 : if( FD_UNLIKELY( is_epoch_boundary ) ) publish_epoch_info( ctx, stem, ctx->leader_bank, 1 );
917 :
918 0 : ulong max_tick_height;
919 0 : if( FD_UNLIKELY( FD_RUNTIME_EXECUTE_SUCCESS!=fd_runtime_compute_max_tick_height( fd_bank_ticks_per_slot_get( parent_bank ), slot, &max_tick_height ) ) ) {
920 0 : FD_LOG_CRIT(( "couldn't compute tick height/max tick height slot %lu ticks_per_slot %lu", slot, fd_bank_ticks_per_slot_get( parent_bank ) ));
921 0 : }
922 0 : fd_bank_max_tick_height_set( ctx->leader_bank, max_tick_height );
923 0 : fd_bank_tick_height_set( ctx->leader_bank, fd_bank_max_tick_height_get( parent_bank ) ); /* The parent's max tick height is our starting tick height. */
924 :
925 : /* Now that a bank has been created for the leader slot, increment the
926 : reference count until we are done with the leader slot. */
927 0 : ctx->leader_bank->data->refcnt++;
928 :
929 0 : return ctx->leader_bank;
930 0 : }
931 :
932 : static void
933 : fini_leader_bank( fd_replay_tile_t * ctx,
934 0 : fd_stem_context_t * stem ) {
935 :
936 0 : FD_TEST( ctx->leader_bank->data!=NULL );
937 0 : FD_TEST( ctx->is_leader );
938 0 : FD_TEST( ctx->block_id_arr[ ctx->leader_bank->data->idx ].block_id_seen );
939 0 : FD_TEST( ctx->recv_poh );
940 :
941 0 : ctx->leader_bank->data->last_transaction_finished_nanos = fd_log_wallclock();
942 :
943 0 : ulong curr_slot = fd_bank_slot_get( ctx->leader_bank );
944 :
945 0 : fd_sched_block_add_done( ctx->sched, ctx->leader_bank->data->idx, ctx->leader_bank->data->parent_idx, curr_slot );
946 :
947 0 : fd_runtime_block_execute_finalize( ctx->leader_bank, ctx->accdb, ctx->capture_ctx );
948 :
949 0 : fd_replay_slot_completed_t * slot_info = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
950 0 : cost_tracker_snap( ctx->leader_bank, slot_info );
951 0 : fd_funk_txn_xid_t xid = { .ul = { curr_slot, ctx->leader_bank->data->idx } };
952 0 : slot_info->identity_balance = FD_UNLIKELY( curr_slot%4096==0UL ) ? get_identity_balance( ctx, xid ) : ULONG_MAX;
953 :
954 0 : fd_banks_mark_bank_frozen( ctx->banks, ctx->leader_bank );
955 :
956 0 : fd_hash_t const * bank_hash = fd_bank_bank_hash_query( ctx->leader_bank );
957 0 : FD_TEST( bank_hash );
958 :
959 0 : publish_slot_completed( ctx, stem, ctx->leader_bank, 0, 1 /* is_leader */ );
960 :
961 : /* The reference on the bank is finally no longer needed. */
962 0 : ctx->leader_bank->data->refcnt--;
963 :
964 : /* We are no longer leader so we can clear the bank index we use for
965 : being the leader. */
966 0 : ctx->leader_bank->data = NULL;
967 0 : ctx->recv_poh = 0;
968 0 : ctx->is_leader = 0;
969 0 : }
970 :
971 : static void
972 : publish_root_advanced( fd_replay_tile_t * ctx,
973 0 : fd_stem_context_t * stem ) {
974 :
975 0 : fd_bank_t bank[1];
976 0 : if( FD_UNLIKELY( !fd_banks_bank_query( bank, ctx->banks, ctx->consensus_root_bank_idx ) ) ) {
977 0 : FD_LOG_CRIT(( "invariant violation: consensus root bank is NULL at bank index %lu", ctx->consensus_root_bank_idx ));
978 0 : }
979 :
980 0 : if( ctx->rpc_enabled ) {
981 0 : bank->data->refcnt++;
982 0 : FD_LOG_DEBUG(( "bank (idx=%lu, slot=%lu) refcnt incremented to %lu for gui", bank->data->idx, fd_bank_slot_get( bank ), bank->data->refcnt ));
983 0 : }
984 :
985 : /* Increment the reference count on the consensus root bank to account
986 : for the number of exec tiles that are waiting on it. */
987 0 : bank->data->refcnt += ctx->resolv_tile_cnt;
988 0 : FD_LOG_DEBUG(( "bank (idx=%lu, slot=%lu) refcnt incremented to %lu for resolv", bank->data->idx, fd_bank_slot_get( bank ), bank->data->refcnt ));
989 :
990 0 : fd_replay_root_advanced_t * msg = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
991 0 : msg->bank_idx = bank->data->idx;
992 :
993 0 : fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_ROOT_ADVANCED, ctx->replay_out->chunk, sizeof(fd_replay_root_advanced_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
994 0 : ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_replay_root_advanced_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
995 0 : }
996 :
997 : /* init_funk performs pre-flight checks for the account database and
998 : program cache. Ensures that the account database was set up
999 : correctly by bootstrap components (e.g. genesis or snapshot loader).
1000 : Mirrors the account database's fork tree down to the program cache. */
1001 :
1002 : static void
1003 : init_funk( fd_replay_tile_t * ctx,
1004 0 : ulong bank_slot ) {
1005 : /* Ensure that the loaded bank root corresponds to the account
1006 : database's root. */
1007 0 : fd_funk_t * funk = fd_accdb_user_v1_funk( ctx->accdb );
1008 0 : if( FD_UNLIKELY( !funk->shmem ) ) {
1009 0 : FD_LOG_CRIT(( "failed to initialize account database: replay tile is not joined to database shared memory objects" ));
1010 0 : }
1011 0 : fd_funk_txn_xid_t const * accdb_pub = fd_funk_last_publish( funk );
1012 0 : if( FD_UNLIKELY( accdb_pub->ul[0]!=bank_slot ) ) {
1013 0 : FD_LOG_CRIT(( "failed to initialize account database: accdb is at slot %lu, but chain state is at slot %lu\n"
1014 0 : "This is a bug in startup components.",
1015 0 : accdb_pub->ul[0], bank_slot ));
1016 0 : }
1017 0 : if( FD_UNLIKELY( fd_funk_last_publish_is_frozen( funk ) ) ) {
1018 0 : FD_LOG_CRIT(( "failed to initialize account database: accdb fork graph is not clean.\n"
1019 0 : "The account database should only contain state for the root slot at this point,\n"
1020 0 : "but there are incomplete database transactions leftover.\n"
1021 0 : "This is a bug in startup components." ));
1022 0 : }
1023 :
1024 : /* The program cache tracks the account database's fork graph at all
1025 : times. Perform initial synchronization: pivot from funk 'root' (a
1026 : sentinel XID) to 'last publish' (the bootstrap root slot). */
1027 0 : if( FD_UNLIKELY( !ctx->progcache_admin->funk->shmem ) ) {
1028 0 : FD_LOG_CRIT(( "failed to initialize account database: replay tile is not joined to program cache" ));
1029 0 : }
1030 0 : fd_progcache_clear( ctx->progcache_admin );
1031 :
1032 0 : fd_funk_txn_xid_t last_publish = fd_accdb_root_get( ctx->accdb_admin );
1033 0 : fd_progcache_txn_attach_child( ctx->progcache_admin, fd_funk_root( ctx->progcache_admin->funk ), &last_publish );
1034 0 : fd_progcache_txn_advance_root( ctx->progcache_admin, &last_publish );
1035 0 : }
1036 :
1037 : static void
1038 0 : init_after_snapshot( fd_replay_tile_t * ctx ) {
1039 : /* Now that the snapshot has been loaded in, we have to refresh the
1040 : stake delegations since the manifest does not contain the full set
1041 : of data required for the stake delegations. See
1042 : fd_stake_delegations.h for why this is required. */
1043 :
1044 0 : fd_bank_t bank[1];
1045 0 : if( FD_UNLIKELY( !fd_banks_bank_query( bank, ctx->banks, FD_REPLAY_BOOT_BANK_IDX ) ) ) {
1046 0 : FD_LOG_CRIT(( "invariant violation: replay bank is NULL at bank index %lu", FD_REPLAY_BOOT_BANK_IDX ));
1047 0 : }
1048 :
1049 0 : fd_funk_txn_xid_t xid = { .ul = { fd_bank_slot_get( bank ), bank->data->idx } };
1050 0 : init_funk( ctx, fd_bank_slot_get( bank ) );
1051 :
1052 0 : fd_stake_delegations_t * root_delegations = fd_banks_stake_delegations_root_query( ctx->banks );
1053 :
1054 0 : fd_stake_delegations_refresh( root_delegations, ctx->accdb, &xid );
1055 :
1056 : /* We want to clear out any stale vote states that may have been
1057 : included in the snapshot manifest. If an agave snapshot is created
1058 : shortly after a vote state is removed from the stakes cache (Agave
1059 : equivalent of vote states), then the vote state will be included in
1060 : the snapshot manifest. */
1061 :
1062 0 : fd_vote_states_t * vote_states = fd_bank_vote_states_locking_modify( bank );
1063 0 : fd_vote_states_iter_t iter_[1];
1064 :
1065 0 : ulong stale_vote_acc_cnt = 0UL;
1066 0 : for( fd_vote_states_iter_t * iter = fd_vote_states_iter_init( iter_, vote_states );
1067 0 : !fd_vote_states_iter_done( iter );
1068 0 : fd_vote_states_iter_next( iter ) ) {
1069 0 : fd_vote_state_ele_t * vote_state = fd_vote_states_iter_ele( iter );
1070 0 : fd_accdb_ro_t ro[1];
1071 0 : if( FD_UNLIKELY( !fd_accdb_open_ro( ctx->accdb, ro, &xid, &vote_state->vote_account ) ) ) {
1072 0 : ctx->runtime_stack.vote_accounts.stale_accs[ stale_vote_acc_cnt++ ] = vote_state->vote_account;
1073 0 : FD_BASE58_ENCODE_32_BYTES( vote_state->vote_account.uc, acc_cstr );
1074 0 : FD_LOG_DEBUG(( "vote account %s from manifest is stale", acc_cstr ));
1075 0 : } else {
1076 0 : fd_accdb_close_ro( ctx->accdb, ro );
1077 0 : }
1078 0 : }
1079 :
1080 0 : for( ulong i=0UL; i<stale_vote_acc_cnt; i++ ) {
1081 0 : fd_vote_states_remove( vote_states, &ctx->runtime_stack.vote_accounts.stale_accs[ i ] );
1082 0 : }
1083 0 : fd_bank_vote_states_end_locking_modify( bank );
1084 :
1085 : /* After both snapshots have been loaded in, we can determine if we should
1086 : start distributing rewards. */
1087 :
1088 0 : fd_rewards_recalculate_partitioned_rewards( ctx->banks, bank, ctx->accdb, &xid, &ctx->runtime_stack, ctx->capture_ctx );
1089 :
1090 0 : ulong snapshot_slot = fd_bank_slot_get( bank );
1091 0 : if( FD_UNLIKELY( !snapshot_slot ) ) {
1092 : /* Genesis-specific setup. */
1093 : /* FIXME: This branch does not set up a new block exec ctx
1094 : properly. Needs to do whatever prepare_new_block_execution
1095 : does, but just hacking that in breaks stuff. */
1096 0 : fd_runtime_update_leaders( bank, &ctx->runtime_stack );
1097 :
1098 0 : ulong hashcnt_per_slot = fd_bank_hashes_per_tick_get( bank ) * fd_bank_ticks_per_slot_get( bank );
1099 0 : fd_hash_t * poh = fd_bank_poh_modify( bank );
1100 0 : while( hashcnt_per_slot-- ) {
1101 0 : fd_sha256_hash( poh->hash, 32UL, poh->hash );
1102 0 : }
1103 :
1104 0 : int is_epoch_boundary = 0;
1105 0 : fd_runtime_block_execute_prepare( ctx->banks, bank, ctx->accdb, &ctx->runtime_stack, ctx->capture_ctx, &is_epoch_boundary );
1106 0 : FD_TEST( !is_epoch_boundary );
1107 0 : fd_runtime_block_execute_finalize( bank, ctx->accdb, ctx->capture_ctx );
1108 :
1109 0 : snapshot_slot = 0UL;
1110 0 : }
1111 :
1112 0 : }
1113 :
1114 : static inline int
1115 : maybe_become_leader( fd_replay_tile_t * ctx,
1116 0 : fd_stem_context_t * stem ) {
1117 0 : FD_TEST( ctx->is_booted );
1118 0 : if( FD_LIKELY( ctx->next_leader_slot==ULONG_MAX || ctx->is_leader || (!ctx->has_identity_vote_rooted && ctx->wait_for_vote_to_start_leader) || ctx->replay_out->idx==ULONG_MAX ) ) return 0;
1119 :
1120 0 : FD_TEST( ctx->next_leader_slot>ctx->reset_slot );
1121 0 : long now = fd_tickcount();
1122 0 : if( FD_LIKELY( now<ctx->next_leader_tickcount ) ) return 0;
1123 :
1124 : /* TODO:
1125 : if( FD_UNLIKELY( ctx->halted_switching_key ) ) return 0; */
1126 :
1127 : /* If a prior leader is still in the process of publishing their slot,
1128 : delay ours to let them finish ... unless they are so delayed that
1129 : we risk getting skipped by the leader following us. 1.2 seconds
1130 : is a reasonable default here, although any value between 0 and 1.6
1131 : seconds could be considered reasonable. This is arbitrary and
1132 : chosen due to intuition. */
1133 0 : if( FD_UNLIKELY( now<ctx->next_leader_tickcount+(long)(3.0*ctx->slot_duration_ticks) ) ) {
1134 0 : FD_TEST( ctx->reset_bank->data );
1135 :
1136 : /* TODO: Make the max_active_descendant calculation more efficient
1137 : by caching it in the bank structure and updating it as banks are
1138 : created and completed. */
1139 0 : ulong max_active_descendant = 0UL;
1140 0 : ulong child_idx = ctx->reset_bank->data->child_idx;
1141 0 : while( child_idx!=ULONG_MAX ) {
1142 0 : fd_bank_t child_bank[1];
1143 0 : fd_banks_bank_query( child_bank, ctx->banks, child_idx );
1144 0 : max_active_descendant = fd_ulong_max( max_active_descendant, fd_bank_slot_get( child_bank ) );
1145 0 : child_idx = child_bank->data->sibling_idx;
1146 0 : }
1147 :
1148 : /* If the max_active_descendant is >= next_leader_slot, we waited
1149 : too long and a leader after us started publishing to try and skip
1150 : us. Just start our leader slot immediately, we might win ... */
1151 0 : if( FD_LIKELY( max_active_descendant>=ctx->reset_slot && max_active_descendant<ctx->next_leader_slot ) ) {
1152 : /* If one of the leaders between the reset slot and our leader
1153 : slot is in the process of publishing (they have a descendant
1154 : bank that is in progress of being replayed), then keep waiting.
1155 : We probably wouldn't get a leader slot out before they
1156 : finished.
1157 :
1158 : Unless... we are past the deadline to start our slot by more
1159 : than 1.2 seconds, in which case we should probably start it to
1160 : avoid getting skipped by the leader behind us. */
1161 0 : return 0;
1162 0 : }
1163 0 : }
1164 :
1165 0 : long now_nanos = fd_log_wallclock();
1166 :
1167 0 : ctx->is_leader = 1;
1168 0 : ctx->recv_poh = 0;
1169 :
1170 0 : FD_TEST( ctx->highwater_leader_slot==ULONG_MAX || ctx->highwater_leader_slot<ctx->next_leader_slot );
1171 0 : ctx->highwater_leader_slot = ctx->next_leader_slot;
1172 :
1173 0 : FD_LOG_INFO(( "becoming leader for slot %lu, parent slot is %lu", ctx->next_leader_slot, ctx->reset_slot ));
1174 :
1175 : /* Acquires bank, sets up initial state, and refcnts it. */
1176 0 : fd_bank_t * bank = prepare_leader_bank( ctx, ctx->next_leader_slot, now_nanos, &ctx->reset_block_id, stem );
1177 0 : fd_funk_txn_xid_t xid = { .ul = { ctx->next_leader_slot, ctx->leader_bank->data->idx } };
1178 :
1179 0 : fd_bundle_crank_tip_payment_config_t config[1] = { 0 };
1180 0 : fd_pubkey_t tip_receiver_owner = {0};
1181 :
1182 0 : if( FD_UNLIKELY( ctx->bundle.enabled ) ) {
1183 0 : fd_acct_addr_t tip_payment_config[1];
1184 0 : fd_acct_addr_t tip_receiver[1];
1185 0 : fd_bundle_crank_get_addresses( ctx->bundle.gen, fd_bank_epoch_get( bank ), tip_payment_config, tip_receiver );
1186 :
1187 0 : fd_accdb_ro_t tip_config_acc[1];
1188 0 : if( FD_UNLIKELY( !fd_accdb_open_ro( ctx->accdb, tip_config_acc, &xid, tip_payment_config ) ) ) {
1189 : /* FIXME This should not crash the validator */
1190 0 : FD_BASE58_ENCODE_32_BYTES( tip_payment_config->b, tip_config_acc_b58 );
1191 0 : FD_LOG_CRIT(( "tip payment config account %s does not exist", tip_config_acc_b58 ));
1192 0 : }
1193 0 : ulong tip_cfg_sz = fd_accdb_ref_data_sz( tip_config_acc );
1194 0 : if( FD_UNLIKELY( tip_cfg_sz < sizeof(fd_bundle_crank_tip_payment_config_t) ) ) {
1195 : /* FIXME This should not crash the validator */
1196 0 : FD_LOG_HEXDUMP_CRIT(( "invalid tip payment config account data", fd_accdb_ref_data_const( tip_config_acc ), tip_cfg_sz ));
1197 0 : }
1198 0 : memcpy( config, fd_accdb_ref_data_const( tip_config_acc ), sizeof(fd_bundle_crank_tip_payment_config_t) );
1199 0 : fd_accdb_close_ro( ctx->accdb, tip_config_acc );
1200 :
1201 : /* It is possible that the tip receiver account does not exist yet
1202 : if it is the first time in an epoch. */
1203 0 : fd_accdb_ro_t tip_receiver_acc[1];
1204 0 : if( FD_LIKELY( fd_accdb_open_ro( ctx->accdb, tip_receiver_acc, &xid, tip_receiver ) ) ) {
1205 0 : tip_receiver_owner = FD_LOAD( fd_pubkey_t, fd_accdb_ref_owner( tip_receiver_acc ) );
1206 0 : fd_accdb_close_ro( ctx->accdb, tip_receiver_acc );
1207 0 : }
1208 0 : }
1209 :
1210 :
1211 0 : fd_became_leader_t * msg = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
1212 0 : msg->slot = ctx->next_leader_slot;
1213 0 : msg->slot_start_ns = now_nanos;
1214 0 : msg->slot_end_ns = now_nanos+(long)ctx->slot_duration_nanos;
1215 0 : msg->bank = NULL;
1216 0 : msg->bank_idx = bank->data->idx;
1217 0 : msg->ticks_per_slot = fd_bank_ticks_per_slot_get( bank );
1218 0 : msg->hashcnt_per_tick = fd_bank_hashes_per_tick_get( bank );
1219 0 : msg->tick_duration_ns = (ulong)(ctx->slot_duration_nanos/(double)msg->ticks_per_slot);
1220 0 : msg->bundle->config[0] = config[0];
1221 0 : memcpy( msg->bundle->last_blockhash, fd_bank_poh_query( bank )->hash, sizeof(fd_hash_t) );
1222 0 : memcpy( msg->bundle->tip_receiver_owner, tip_receiver_owner.uc, sizeof(fd_pubkey_t) );
1223 :
1224 0 : if( FD_UNLIKELY( msg->hashcnt_per_tick==1UL ) ) {
1225 : /* Low power producer, maximum of one microblock per tick in the slot */
1226 0 : msg->max_microblocks_in_slot = msg->ticks_per_slot;
1227 0 : } else {
1228 : /* See the long comment in after_credit for this limit */
1229 0 : msg->max_microblocks_in_slot = fd_ulong_min( MAX_MICROBLOCKS_PER_SLOT, msg->ticks_per_slot*(msg->hashcnt_per_tick-1UL) );
1230 0 : }
1231 :
1232 0 : msg->total_skipped_ticks = msg->ticks_per_slot*(ctx->next_leader_slot-ctx->reset_slot);
1233 0 : msg->epoch = fd_slot_to_epoch( fd_bank_epoch_schedule_query( bank ), ctx->next_leader_slot, NULL );
1234 :
1235 0 : fd_cost_tracker_t const * cost_tracker = fd_bank_cost_tracker_locking_query( bank );
1236 :
1237 0 : msg->limits.slot_max_cost = ctx->larger_max_cost_per_block ? LARGER_MAX_COST_PER_BLOCK : cost_tracker->block_cost_limit;
1238 0 : msg->limits.slot_max_vote_cost = cost_tracker->vote_cost_limit;
1239 0 : msg->limits.slot_max_write_cost_per_acct = cost_tracker->account_cost_limit;
1240 :
1241 0 : fd_bank_cost_tracker_end_locking_query( bank );
1242 :
1243 0 : if( FD_UNLIKELY( msg->ticks_per_slot+msg->total_skipped_ticks>USHORT_MAX ) ) {
1244 : /* There can be at most USHORT_MAX skipped ticks, because the
1245 : parent_offset field in the shred data is only 2 bytes wide. */
1246 0 : FD_LOG_ERR(( "too many skipped ticks %lu for slot %lu, chain must halt", msg->ticks_per_slot+msg->total_skipped_ticks, ctx->next_leader_slot ));
1247 0 : }
1248 :
1249 0 : fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_BECAME_LEADER, ctx->replay_out->chunk, sizeof(fd_became_leader_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
1250 0 : ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_became_leader_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
1251 :
1252 0 : ctx->next_leader_slot = ULONG_MAX;
1253 0 : ctx->next_leader_tickcount = LONG_MAX;
1254 :
1255 0 : return 1;
1256 0 : }
1257 :
1258 : static void
1259 : process_poh_message( fd_replay_tile_t * ctx,
1260 0 : fd_poh_leader_slot_ended_t const * slot_ended ) {
1261 :
1262 0 : FD_TEST( ctx->is_booted );
1263 0 : FD_TEST( ctx->is_leader );
1264 0 : FD_TEST( ctx->leader_bank->data!=NULL );
1265 :
1266 0 : FD_TEST( ctx->highwater_leader_slot>=slot_ended->slot );
1267 0 : FD_TEST( ctx->next_leader_slot>ctx->highwater_leader_slot );
1268 :
1269 : /* Update the poh hash in the bank. We will want to maintain a refcnt
1270 : on the bank until we have recieved the block id for the block after
1271 : it has been shredded. */
1272 :
1273 0 : memcpy( fd_bank_poh_modify( ctx->leader_bank ), slot_ended->blockhash, sizeof(fd_hash_t) );
1274 :
1275 0 : ctx->recv_poh = 1;
1276 0 : }
1277 :
1278 : static void
1279 : publish_reset( fd_replay_tile_t * ctx,
1280 : fd_stem_context_t * stem,
1281 0 : fd_bank_t * bank ) {
1282 0 : if( FD_UNLIKELY( ctx->replay_out->idx==ULONG_MAX ) ) return;
1283 :
1284 0 : fd_hash_t const * block_hash = fd_blockhashes_peek_last_hash( fd_bank_block_hash_queue_query( bank ) );
1285 0 : FD_TEST( block_hash );
1286 :
1287 0 : fd_poh_reset_t * reset = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
1288 :
1289 0 : reset->bank_idx = bank->data->idx;
1290 0 : reset->timestamp = fd_log_wallclock();
1291 0 : reset->completed_slot = fd_bank_slot_get( bank );
1292 0 : reset->hashcnt_per_tick = fd_bank_hashes_per_tick_get( bank );
1293 0 : reset->ticks_per_slot = fd_bank_ticks_per_slot_get( bank );
1294 0 : reset->tick_duration_ns = (ulong)(ctx->slot_duration_nanos/(double)reset->ticks_per_slot);
1295 0 : fd_memcpy( reset->completed_blockhash, block_hash->uc, sizeof(fd_hash_t) );
1296 :
1297 0 : ulong ticks_per_slot = fd_bank_ticks_per_slot_get( bank );
1298 0 : if( FD_UNLIKELY( reset->hashcnt_per_tick==1UL ) ) {
1299 : /* Low power producer, maximum of one microblock per tick in the slot */
1300 0 : reset->max_microblocks_in_slot = ticks_per_slot;
1301 0 : } else {
1302 : /* See the long comment in after_credit for this limit */
1303 0 : reset->max_microblocks_in_slot = fd_ulong_min( MAX_MICROBLOCKS_PER_SLOT, ticks_per_slot*(reset->hashcnt_per_tick-1UL) );
1304 0 : }
1305 0 : reset->next_leader_slot = ctx->next_leader_slot;
1306 :
1307 0 : fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_RESET, ctx->replay_out->chunk, sizeof(fd_poh_reset_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
1308 0 : ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_poh_reset_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
1309 0 : }
1310 :
1311 : static void
1312 : boot_genesis( fd_replay_tile_t * ctx,
1313 : fd_stem_context_t * stem,
1314 : ulong in_idx,
1315 0 : ulong chunk ) {
1316 : /* If we are bootstrapping, we can't wait to wait for our identity
1317 : vote to be rooted as this creates a circular dependency. */
1318 0 : ctx->has_identity_vote_rooted = 1;
1319 :
1320 0 : uchar const * lthash = (uchar*)fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
1321 0 : uchar const * genesis_hash = (uchar*)fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk )+sizeof(fd_lthash_value_t);
1322 :
1323 0 : fd_genesis_t const * genesis = fd_type_pun( (uchar*)fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk )+sizeof(fd_hash_t)+sizeof(fd_lthash_value_t) );
1324 :
1325 0 : fd_bank_t bank[1];
1326 0 : FD_TEST( fd_banks_bank_query( bank, ctx->banks, FD_REPLAY_BOOT_BANK_IDX ) );
1327 0 : fd_funk_txn_xid_t xid = { .ul = { 0UL, FD_REPLAY_BOOT_BANK_IDX } };
1328 :
1329 : /* Do genesis-related processing in a non-rooted transaction */
1330 0 : fd_funk_txn_xid_t root_xid = { .ul = { LONG_MAX, LONG_MAX } };
1331 0 : fd_funk_txn_xid_t target_xid = { .ul = { 0UL, 0UL } };
1332 0 : fd_accdb_attach_child( ctx->accdb_admin, &root_xid, &target_xid );
1333 0 : fd_runtime_read_genesis( ctx->banks, bank, ctx->accdb, &xid, NULL, fd_type_pun_const( genesis_hash ), fd_type_pun_const( lthash ), genesis, &ctx->runtime_stack );
1334 0 : fd_accdb_advance_root( ctx->accdb_admin, &target_xid );
1335 :
1336 0 : static const fd_txncache_fork_id_t txncache_root = { .val = USHORT_MAX };
1337 0 : bank->data->txncache_fork_id = fd_txncache_attach_child( ctx->txncache, txncache_root );
1338 :
1339 0 : fd_hash_t const * block_hash = fd_blockhashes_peek_last_hash( fd_bank_block_hash_queue_query( bank ) );
1340 0 : fd_txncache_finalize_fork( ctx->txncache, bank->data->txncache_fork_id, 0UL, block_hash->uc );
1341 :
1342 0 : publish_epoch_info( ctx, stem, bank, 0 );
1343 0 : publish_epoch_info( ctx, stem, bank, 1 );
1344 :
1345 : /* We call this after fd_runtime_read_genesis, which sets up the
1346 : slot_bank needed in blockstore_init. */
1347 0 : init_after_snapshot( ctx );
1348 :
1349 : /* Initialize store for genesis case, similar to snapshot case */
1350 0 : fd_hash_t genesis_block_id = { .ul[0] = FD_RUNTIME_INITIAL_BLOCK_ID };
1351 0 : fd_store_exacq( ctx->store );
1352 0 : if( FD_UNLIKELY( fd_store_root( ctx->store ) ) ) {
1353 0 : FD_LOG_CRIT(( "invariant violation: store root is not 0 for genesis" ));
1354 0 : }
1355 0 : fd_store_insert( ctx->store, 0, &genesis_block_id );
1356 0 : ctx->store->slot0 = 0UL; /* Genesis slot */
1357 0 : fd_store_exrel( ctx->store );
1358 :
1359 0 : ctx->published_root_slot = 0UL;
1360 0 : fd_sched_block_add_done( ctx->sched, bank->data->idx, ULONG_MAX, 0UL );
1361 :
1362 0 : fd_bank_block_height_set( bank, 1UL );
1363 :
1364 0 : ctx->consensus_root = (fd_hash_t){ .ul[0] = FD_RUNTIME_INITIAL_BLOCK_ID };
1365 0 : ctx->consensus_root_slot = 0UL;
1366 0 : ctx->consensus_root_bank_idx = 0UL;
1367 0 : ctx->published_root_slot = 0UL;
1368 0 : ctx->published_root_bank_idx = 0UL;
1369 :
1370 0 : ctx->reset_slot = 0UL;
1371 0 : fd_memcpy( ctx->reset_bank, bank, sizeof(fd_bank_t) );
1372 0 : ctx->reset_timestamp_nanos = fd_log_wallclock();
1373 0 : ctx->next_leader_slot = fd_multi_epoch_leaders_get_next_slot( ctx->mleaders, 1UL, ctx->identity_pubkey );
1374 0 : if( FD_LIKELY( ctx->next_leader_slot != ULONG_MAX ) ) {
1375 0 : ctx->next_leader_tickcount = (long)((double)(ctx->next_leader_slot-ctx->reset_slot-1UL)*ctx->slot_duration_ticks) + fd_tickcount();
1376 0 : } else {
1377 0 : ctx->next_leader_tickcount = LONG_MAX;
1378 0 : }
1379 :
1380 0 : ctx->is_booted = 1;
1381 0 : maybe_become_leader( ctx, stem );
1382 :
1383 0 : fd_hash_t initial_block_id = { .ul = { FD_RUNTIME_INITIAL_BLOCK_ID } };
1384 0 : fd_reasm_fec_t * fec = fd_reasm_insert( ctx->reasm, &initial_block_id, NULL, 0 /* genesis slot */, 0, 0, 0, 0, 1, 0 ); /* FIXME manifest block_id */
1385 0 : fec->bank_idx = 0UL;
1386 :
1387 :
1388 0 : fd_block_id_ele_t * block_id_ele = &ctx->block_id_arr[ 0 ];
1389 0 : FD_TEST( block_id_ele );
1390 0 : block_id_ele->block_id = initial_block_id;
1391 0 : block_id_ele->slot = 0UL;
1392 :
1393 0 : FD_TEST( fd_block_id_map_ele_insert( ctx->block_id_map, block_id_ele, ctx->block_id_arr ) );
1394 :
1395 0 : fd_replay_slot_completed_t * slot_info = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
1396 0 : slot_info->identity_balance = get_identity_balance( ctx, xid );
1397 :
1398 0 : publish_slot_completed( ctx, stem, bank, 1, 0 /* is_leader */ );
1399 0 : publish_root_advanced( ctx, stem );
1400 0 : publish_reset( ctx, stem, bank );
1401 0 : }
1402 :
1403 : static inline void
1404 0 : maybe_verify_cluster_type( fd_replay_tile_t * ctx ) {
1405 0 : if( FD_UNLIKELY( !ctx->is_booted || !ctx->has_genesis_hash ) ) {
1406 0 : return;
1407 0 : }
1408 :
1409 0 : FD_BASE58_ENCODE_32_BYTES( ctx->genesis_hash, hash_cstr );
1410 0 : ulong cluster = fd_genesis_cluster_identify( hash_cstr );
1411 : /* Map pyth-related clusters to unkwown. */
1412 0 : switch( cluster ) {
1413 0 : case FD_CLUSTER_PYTHNET:
1414 0 : case FD_CLUSTER_PYTHTEST:
1415 0 : cluster = FD_CLUSTER_UNKNOWN;
1416 0 : }
1417 :
1418 0 : if( FD_UNLIKELY( cluster!=ctx->cluster_type ) ) {
1419 0 : FD_LOG_ERR(( "Your genesis.bin file at `%s` has a genesis hash of `%s` which means the cluster is %s "
1420 0 : "but the snapshot you loaded is for a different cluster %s. If you are trying to join the "
1421 0 : "%s cluster, you can delete the genesis.bin file and restart the node to download the correct "
1422 0 : "genesis file automatically.",
1423 0 : ctx->genesis_path,
1424 0 : hash_cstr,
1425 0 : fd_genesis_cluster_name( cluster ),
1426 0 : fd_genesis_cluster_name( ctx->cluster_type ),
1427 0 : fd_genesis_cluster_name( cluster ) ));
1428 0 : }
1429 0 : }
1430 :
1431 : static void
1432 : on_snapshot_message( fd_replay_tile_t * ctx,
1433 : fd_stem_context_t * stem,
1434 : ulong in_idx,
1435 : ulong chunk,
1436 0 : ulong sig ) {
1437 0 : ulong msg = fd_ssmsg_sig_message( sig );
1438 0 : if( FD_LIKELY( msg==FD_SSMSG_DONE ) ) {
1439 : /* An end of message notification indicates the snapshot is loaded.
1440 : Replay is able to start executing from this point onwards. */
1441 : /* TODO: replay should finish booting. Could make replay a
1442 : state machine and set the state here accordingly. */
1443 0 : ctx->is_booted = 1;
1444 :
1445 0 : fd_bank_t bank[1];
1446 0 : if( FD_UNLIKELY( !fd_banks_bank_query( bank, ctx->banks, FD_REPLAY_BOOT_BANK_IDX ) ) ) {
1447 0 : FD_LOG_CRIT(( "invariant violation: bank is NULL for bank index %lu", FD_REPLAY_BOOT_BANK_IDX ));
1448 0 : }
1449 :
1450 0 : ulong snapshot_slot = fd_bank_slot_get( bank );
1451 : /* FIXME: This is a hack because the block id of the snapshot slot
1452 : is not provided in the snapshot. A possible solution is to get
1453 : the block id of the snapshot slot from repair. */
1454 0 : fd_hash_t manifest_block_id = { .ul = { FD_RUNTIME_INITIAL_BLOCK_ID } };
1455 :
1456 0 : fd_store_exacq( ctx->store );
1457 0 : FD_TEST( !fd_store_root( ctx->store ) );
1458 0 : fd_store_insert( ctx->store, 0, &manifest_block_id );
1459 0 : ctx->store->slot0 = snapshot_slot; /* FIXME manifest_block_id */
1460 0 : fd_store_exrel( ctx->store );
1461 :
1462 0 : fd_funk_txn_xid_t xid = { .ul = { snapshot_slot, FD_REPLAY_BOOT_BANK_IDX } };
1463 0 : fd_features_restore( bank, ctx->accdb, &xid );
1464 :
1465 : /* Typically, when we cross an epoch boundary during normal
1466 : operation, we publish the stake weights for the new epoch. But
1467 : since we are starting from a snapshot, we need to publish two
1468 : epochs worth of stake weights: the previous epoch (which is
1469 : needed for voting on the current epoch), and the current epoch
1470 : (which is needed for voting on the next epoch). */
1471 0 : publish_epoch_info( ctx, stem, bank, 0 );
1472 0 : publish_epoch_info( ctx, stem, bank, 1 );
1473 :
1474 0 : ctx->consensus_root = manifest_block_id;
1475 0 : ctx->consensus_root_slot = snapshot_slot;
1476 0 : ctx->consensus_root_bank_idx = 0UL;
1477 0 : ctx->published_root_slot = ctx->consensus_root_slot;
1478 0 : ctx->published_root_bank_idx = 0UL;
1479 :
1480 0 : ctx->reset_slot = snapshot_slot;
1481 0 : fd_memcpy( ctx->reset_bank, bank, sizeof(fd_bank_t) );
1482 0 : ctx->reset_timestamp_nanos = fd_log_wallclock();
1483 0 : ctx->next_leader_slot = fd_multi_epoch_leaders_get_next_slot( ctx->mleaders, 1UL, ctx->identity_pubkey );
1484 0 : if( FD_LIKELY( ctx->next_leader_slot != ULONG_MAX ) ) {
1485 0 : ctx->next_leader_tickcount = (long)((double)(ctx->next_leader_slot-ctx->reset_slot-1UL)*ctx->slot_duration_ticks) + fd_tickcount();
1486 0 : } else {
1487 0 : ctx->next_leader_tickcount = LONG_MAX;
1488 0 : }
1489 :
1490 0 : fd_sched_block_add_done( ctx->sched, bank->data->idx, ULONG_MAX, snapshot_slot );
1491 0 : FD_TEST( bank->data->idx==0UL );
1492 :
1493 0 : fd_runtime_update_leaders( bank, &ctx->runtime_stack );
1494 :
1495 0 : fd_block_id_ele_t * block_id_ele = &ctx->block_id_arr[ 0 ];
1496 0 : FD_TEST( block_id_ele );
1497 0 : block_id_ele->block_id = manifest_block_id;
1498 0 : block_id_ele->slot = snapshot_slot;
1499 0 : FD_TEST( fd_block_id_map_ele_insert( ctx->block_id_map, block_id_ele, ctx->block_id_arr ) );
1500 :
1501 : /* We call this after fd_runtime_read_genesis, which sets up the
1502 : slot_bank needed in blockstore_init. */
1503 0 : init_after_snapshot( ctx );
1504 :
1505 0 : fd_replay_slot_completed_t * slot_info = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
1506 0 : slot_info->identity_balance = get_identity_balance( ctx, xid );
1507 :
1508 0 : publish_slot_completed( ctx, stem, bank, 1, 0 /* is_leader */ );
1509 0 : publish_root_advanced( ctx, stem );
1510 :
1511 0 : fd_reasm_fec_t * fec = fd_reasm_insert( ctx->reasm, &manifest_block_id, NULL, snapshot_slot, 0, 0, 0, 0, 1, 0 ); /* FIXME manifest block_id */
1512 0 : fec->bank_idx = 0UL;
1513 :
1514 0 : ctx->cluster_type = fd_bank_cluster_type_get( bank );
1515 :
1516 0 : maybe_verify_cluster_type( ctx );
1517 :
1518 0 : return;
1519 0 : }
1520 :
1521 0 : switch( msg ) {
1522 0 : case FD_SSMSG_MANIFEST_FULL:
1523 0 : case FD_SSMSG_MANIFEST_INCREMENTAL: {
1524 : /* We may either receive a full snapshot manifest or an
1525 : incremental snapshot manifest. Note that this external message
1526 : id is only used temporarily because replay cannot yet receive
1527 : the firedancer-internal snapshot manifest message. */
1528 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark ) )
1529 0 : FD_LOG_ERR(( "chunk %lu from in %d corrupt, not in range [%lu,%lu]", chunk, ctx->in_kind[ in_idx ], ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
1530 :
1531 0 : fd_bank_t bank[1];
1532 0 : fd_ssload_recover( fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk ),
1533 0 : ctx->banks,
1534 0 : fd_banks_bank_query( bank, ctx->banks, FD_REPLAY_BOOT_BANK_IDX ),
1535 0 : ctx->runtime_stack.stakes.vote_credits,
1536 0 : msg==FD_SSMSG_MANIFEST_INCREMENTAL );
1537 :
1538 0 : fd_snapshot_manifest_t const * manifest = fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
1539 0 : ctx->hard_forks_cnt = manifest->hard_forks_len;
1540 0 : for( ulong i=0UL; i<manifest->hard_forks_len; i++ ) ctx->hard_forks[ i ] = manifest->hard_forks[ i ];
1541 0 : break;
1542 0 : }
1543 0 : default: {
1544 0 : FD_LOG_ERR(( "Received unknown snapshot message with msg %lu", msg ));
1545 0 : return;
1546 0 : }
1547 0 : }
1548 :
1549 0 : return;
1550 0 : }
1551 :
1552 : static void
1553 : dispatch_task( fd_replay_tile_t * ctx,
1554 : fd_stem_context_t * stem,
1555 0 : fd_sched_task_t * task ) {
1556 :
1557 0 : switch( task->task_type ) {
1558 0 : case FD_SCHED_TT_TXN_EXEC: {
1559 0 : fd_txn_p_t * txn_p = fd_sched_get_txn( ctx->sched, task->txn_exec->txn_idx );
1560 :
1561 0 : fd_bank_t bank[1];
1562 0 : FD_TEST( fd_banks_bank_query( bank, ctx->banks, task->txn_exec->bank_idx ) );
1563 :
1564 0 : # if FD_HAS_FLATCC
1565 : /* Add the transaction to the block dumper if necessary. This
1566 : logic doesn't need to be fork-aware since it's only meant to
1567 : be used in backtest. */
1568 0 : if( FD_UNLIKELY( ctx->dump_proto_ctx && ctx->dump_proto_ctx->dump_block_to_pb ) ) {
1569 0 : fd_dump_block_to_protobuf_collect_tx( ctx->block_dump_ctx, txn_p );
1570 0 : }
1571 0 : # endif
1572 :
1573 0 : bank->data->refcnt++;
1574 :
1575 0 : if( FD_UNLIKELY( !bank->data->first_transaction_scheduled_nanos ) ) bank->data->first_transaction_scheduled_nanos = fd_log_wallclock();
1576 :
1577 0 : fd_replay_out_link_t * exec_out = ctx->exec_out;
1578 0 : fd_execrp_txn_exec_msg_t * exec_msg = fd_chunk_to_laddr( exec_out->mem, exec_out->chunk );
1579 0 : memcpy( exec_msg->txn, txn_p, sizeof(fd_txn_p_t) );
1580 0 : exec_msg->bank_idx = task->txn_exec->bank_idx;
1581 0 : exec_msg->txn_idx = task->txn_exec->txn_idx;
1582 0 : if( FD_UNLIKELY( ctx->capture_ctx ) ) {
1583 0 : exec_msg->capture_txn_idx = ctx->capture_ctx->current_txn_idx++;
1584 0 : }
1585 0 : fd_stem_publish( stem, exec_out->idx, (FD_EXECRP_TT_TXN_EXEC<<32) | task->txn_exec->exec_idx, exec_out->chunk, sizeof(*exec_msg), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
1586 0 : exec_out->chunk = fd_dcache_compact_next( exec_out->chunk, sizeof(*exec_msg), exec_out->chunk0, exec_out->wmark );
1587 0 : break;
1588 0 : }
1589 0 : case FD_SCHED_TT_TXN_SIGVERIFY: {
1590 0 : fd_txn_p_t * txn_p = fd_sched_get_txn( ctx->sched, task->txn_sigverify->txn_idx );
1591 :
1592 0 : fd_bank_t bank[1];
1593 0 : FD_TEST( fd_banks_bank_query( bank, ctx->banks, task->txn_sigverify->bank_idx ) );
1594 0 : bank->data->refcnt++;
1595 :
1596 0 : fd_replay_out_link_t * exec_out = ctx->exec_out;
1597 0 : fd_execrp_txn_sigverify_msg_t * exec_msg = fd_chunk_to_laddr( exec_out->mem, exec_out->chunk );
1598 0 : memcpy( exec_msg->txn, txn_p, sizeof(fd_txn_p_t) );
1599 0 : exec_msg->bank_idx = task->txn_sigverify->bank_idx;
1600 0 : exec_msg->txn_idx = task->txn_sigverify->txn_idx;
1601 0 : fd_stem_publish( stem, exec_out->idx, (FD_EXECRP_TT_TXN_SIGVERIFY<<32) | task->txn_sigverify->exec_idx, exec_out->chunk, sizeof(*exec_msg), 0UL, 0UL, 0UL );
1602 0 : exec_out->chunk = fd_dcache_compact_next( exec_out->chunk, sizeof(*exec_msg), exec_out->chunk0, exec_out->wmark );
1603 0 : break;
1604 0 : };
1605 0 : case FD_SCHED_TT_POH_HASH: {
1606 0 : fd_bank_t bank[ 1 ];
1607 0 : FD_TEST( fd_banks_bank_query( bank, ctx->banks, task->poh_hash->bank_idx ) );
1608 0 : bank->data->refcnt++;
1609 :
1610 0 : fd_replay_out_link_t * exec_out = ctx->exec_out;
1611 0 : fd_execrp_poh_hash_msg_t * exec_msg = fd_chunk_to_laddr( exec_out->mem, exec_out->chunk );
1612 0 : exec_msg->bank_idx = task->poh_hash->bank_idx;
1613 0 : exec_msg->mblk_idx = task->poh_hash->mblk_idx;
1614 0 : exec_msg->hashcnt = task->poh_hash->hashcnt;
1615 0 : memcpy( exec_msg->hash, task->poh_hash->hash, sizeof(fd_hash_t) );
1616 0 : fd_stem_publish( stem, exec_out->idx, (FD_EXECRP_TT_POH_HASH<<32) | task->poh_hash->exec_idx, exec_out->chunk, sizeof(*exec_msg), 0UL, 0UL, 0UL );
1617 0 : exec_out->chunk = fd_dcache_compact_next( exec_out->chunk, sizeof(*exec_msg), exec_out->chunk0, exec_out->wmark );
1618 0 : break;
1619 0 : };
1620 0 : default: {
1621 0 : FD_LOG_CRIT(( "unexpected task type %lu", task->task_type ));
1622 0 : }
1623 0 : }
1624 0 : }
1625 :
1626 : /* Returns 1 if charge_busy. */
1627 : static int
1628 : replay( fd_replay_tile_t * ctx,
1629 0 : fd_stem_context_t * stem ) {
1630 :
1631 0 : if( FD_UNLIKELY( !ctx->is_booted ) ) return 0;
1632 :
1633 0 : int charge_busy = 0;
1634 0 : fd_sched_task_t task[ 1 ];
1635 0 : if( FD_UNLIKELY( !fd_sched_task_next_ready( ctx->sched, task ) ) ) {
1636 0 : return charge_busy; /* Nothing to execute or do. */
1637 0 : }
1638 :
1639 0 : charge_busy = 1;
1640 :
1641 0 : switch( task->task_type ) {
1642 0 : case FD_SCHED_TT_BLOCK_START: {
1643 0 : replay_block_start( ctx, stem, task->block_start->bank_idx, task->block_start->parent_bank_idx, task->block_start->slot );
1644 0 : fd_sched_task_done( ctx->sched, FD_SCHED_TT_BLOCK_START, ULONG_MAX, ULONG_MAX, NULL );
1645 0 : break;
1646 0 : }
1647 0 : case FD_SCHED_TT_BLOCK_END: {
1648 0 : fd_bank_t bank[1];
1649 0 : fd_banks_bank_query( bank, ctx->banks, task->block_end->bank_idx );
1650 0 : if( FD_LIKELY( !(bank->data->flags&FD_BANK_FLAGS_DEAD) ) ) replay_block_finalize( ctx, stem, bank );
1651 0 : fd_sched_task_done( ctx->sched, FD_SCHED_TT_BLOCK_END, ULONG_MAX, ULONG_MAX, NULL );
1652 0 : break;
1653 0 : }
1654 0 : case FD_SCHED_TT_TXN_EXEC:
1655 0 : case FD_SCHED_TT_TXN_SIGVERIFY:
1656 0 : case FD_SCHED_TT_POH_HASH: {
1657 : /* Likely/common case: we have a transaction we actually need to
1658 : execute. */
1659 0 : dispatch_task( ctx, stem, task );
1660 0 : break;
1661 0 : }
1662 0 : case FD_SCHED_TT_MARK_DEAD: {
1663 0 : fd_bank_t bank[ 1 ];
1664 0 : FD_TEST( fd_banks_bank_query( bank, ctx->banks, task->mark_dead->bank_idx ) );
1665 0 : publish_slot_dead( ctx, stem, bank );
1666 0 : fd_banks_mark_bank_dead( ctx->banks, bank );
1667 0 : break;
1668 0 : }
1669 0 : default: {
1670 0 : FD_LOG_CRIT(( "unexpected task type %lu", task->task_type ));
1671 0 : }
1672 0 : }
1673 :
1674 0 : return charge_busy;
1675 0 : }
1676 :
1677 : static int
1678 0 : can_process_fec( fd_replay_tile_t * ctx ) {
1679 0 : fd_reasm_fec_t * fec;
1680 0 : if( FD_UNLIKELY( !fd_sched_can_ingest( ctx->sched, 1UL ) ) ) {
1681 0 : ctx->metrics.sched_full++;
1682 0 : return 0;
1683 0 : }
1684 :
1685 0 : if( FD_UNLIKELY( (fec = fd_reasm_peek( ctx->reasm ))==NULL ) ) {
1686 0 : ctx->metrics.reasm_empty++;
1687 0 : return 0;
1688 0 : }
1689 :
1690 0 : ctx->metrics.reasm_latest_slot = fec->slot;
1691 0 : ctx->metrics.reasm_latest_fec_idx = fec->fec_set_idx;
1692 :
1693 0 : if( FD_UNLIKELY( ctx->is_leader && fec->fec_set_idx==0U && fd_reasm_parent( ctx->reasm, fec )->bank_idx==ctx->leader_bank->data->idx ) ) {
1694 : /* There's a race that's exceedingly rare, where we receive the
1695 : FEC set for the slot right after our leader rotation before we
1696 : freeze the bank for the last slot in our leader rotation.
1697 : Leader slot freezing happens only after if we've received the
1698 : final PoH hash from the poh tile as well as the final FEC set
1699 : for the leader slot. So the race happens when FEC sets are
1700 : delivered and processed sooner than the PoH hash, aka when the
1701 : poh=>shred=>replay path for the block id somehow beats the
1702 : poh=>replay path for the poh hash. To mitigate this race,
1703 : we must block on ingesting the FEC set for the ensuing slot
1704 : before the leader bank freezes, because that would violate
1705 : ordering invariants in banks and sched. */
1706 0 : FD_TEST( ctx->block_id_arr[ ctx->leader_bank->data->idx ].block_id_seen );
1707 0 : FD_TEST( !ctx->recv_poh );
1708 0 : ctx->metrics.leader_bid_wait++;
1709 0 : return 0;
1710 0 : }
1711 :
1712 : /* If fec_set_idx is 0, we need a new bank for a new slot. Banks must
1713 : not be full in this case. */
1714 0 : if( FD_UNLIKELY( fd_banks_is_full( ctx->banks ) && fec->fec_set_idx==0 ) ) {
1715 0 : ctx->metrics.banks_full++;
1716 0 : return 0;
1717 0 : }
1718 :
1719 : /* Otherwise, banks may not be full, so we can always create a new
1720 : bank if needed. Or, if banks are full, the current fec set's
1721 : ancestor (idx 0) already created a bank for this slot.*/
1722 0 : return 1;
1723 0 : }
1724 :
1725 : static void
1726 : process_fec_set( fd_replay_tile_t * ctx,
1727 : fd_stem_context_t * stem,
1728 0 : fd_reasm_fec_t * reasm_fec ) {
1729 0 : long now = fd_log_wallclock();
1730 :
1731 : /* Linking only requires a shared lock because the fields that are
1732 : modified are only read on publish which uses exclusive lock. */
1733 :
1734 0 : long shacq_start, shacq_end, shrel_end;
1735 :
1736 0 : FD_STORE_SHARED_LOCK( ctx->store, shacq_start, shacq_end, shrel_end ) {
1737 0 : if( FD_UNLIKELY( !fd_store_link( ctx->store, &reasm_fec->key, &reasm_fec->cmr ) ) ) {
1738 0 : FD_BASE58_ENCODE_32_BYTES( reasm_fec->key.key, key_b58 );
1739 0 : FD_BASE58_ENCODE_32_BYTES( reasm_fec->cmr.key, cmr_b58 );
1740 0 : FD_LOG_WARNING(( "failed to link %s %s. slot %lu fec_set_idx %u", key_b58, cmr_b58, reasm_fec->slot, reasm_fec->fec_set_idx ));
1741 0 : }
1742 0 : } FD_STORE_SHARED_LOCK_END;
1743 0 : fd_histf_sample( ctx->metrics.store_link_wait, (ulong)fd_long_max( shacq_end - shacq_start, 0L ) );
1744 0 : fd_histf_sample( ctx->metrics.store_link_work, (ulong)fd_long_max( shrel_end - shacq_end, 0L ) );
1745 :
1746 : /* Update the reasm_fec with the correct bank index and parent bank
1747 : index. If the FEC belongs to a leader, we have already allocated
1748 : a bank index for the FEC and it just needs to be propagated to the
1749 : reasm_fec. */
1750 :
1751 0 : reasm_fec->parent_bank_idx = fd_reasm_parent( ctx->reasm, reasm_fec )->bank_idx;
1752 :
1753 0 : if( FD_UNLIKELY( reasm_fec->is_leader ) ) {
1754 : /* If we are the leader we just need to copy in the bank index that
1755 : the leader slot is using. */
1756 0 : FD_TEST( ctx->leader_bank->data!=NULL );
1757 0 : reasm_fec->bank_idx = ctx->leader_bank->data->idx;
1758 0 : } else if( FD_UNLIKELY( reasm_fec->fec_set_idx==0U ) ) {
1759 : /* If we are seeing a FEC with fec set idx 0, this means that we are
1760 : starting a new slot, and we need a new bank index. */
1761 0 : fd_bank_t bank[1];
1762 0 : reasm_fec->bank_idx = fd_banks_new_bank( bank, ctx->banks, reasm_fec->parent_bank_idx, now )->data->idx;
1763 0 : FD_LOG_DEBUG(( "reserving bank_idx=%lu for slot=%lu", reasm_fec->bank_idx, reasm_fec->slot ));
1764 : /* At this point remove any stale entry in the block id map if it
1765 : exists and set the block id as not having been seen yet. This is
1766 : safe because we know that the old entry for this bank index has
1767 : already been pruned away. */
1768 0 : fd_block_id_ele_t * block_id_ele = &ctx->block_id_arr[ reasm_fec->bank_idx ];
1769 0 : if( FD_LIKELY( fd_block_id_map_ele_query( ctx->block_id_map, &block_id_ele->block_id, NULL, ctx->block_id_arr )==block_id_ele ) ) {
1770 0 : FD_TEST( fd_block_id_map_ele_remove( ctx->block_id_map, &block_id_ele->block_id, NULL, ctx->block_id_arr ) );
1771 0 : }
1772 0 : block_id_ele->block_id_seen = 0;
1773 0 : block_id_ele->slot = reasm_fec->slot;
1774 0 : } else {
1775 : /* We are continuing to execute through a slot that we already have
1776 : a bank index for. */
1777 0 : reasm_fec->bank_idx = reasm_fec->parent_bank_idx;
1778 0 : }
1779 :
1780 0 : if( FD_UNLIKELY( reasm_fec->slot_complete ) ) {
1781 0 : fd_block_id_ele_t * block_id_ele = &ctx->block_id_arr[ reasm_fec->bank_idx ];
1782 0 : FD_TEST( block_id_ele );
1783 :
1784 0 : block_id_ele->block_id_seen = 1;
1785 0 : block_id_ele->block_id = reasm_fec->key;
1786 0 : FD_TEST( fd_block_id_map_ele_insert( ctx->block_id_map, block_id_ele, ctx->block_id_arr ) );
1787 0 : }
1788 :
1789 0 : if( FD_UNLIKELY( reasm_fec->is_leader ) ) {
1790 : /* If we are the leader, we don't need to process the FEC set any
1791 : further. */
1792 0 : return;
1793 0 : }
1794 :
1795 : /* Forks form a partial ordering over FEC sets. The Repair tile
1796 : delivers FEC sets in-order per fork, but FEC set ordering across
1797 : forks is arbitrary */
1798 0 : fd_sched_fec_t sched_fec[ 1 ];
1799 :
1800 : # if DEBUG_LOGGING
1801 : FD_BASE58_ENCODE_32_BYTES( reasm_fec->key.key, key_b58 );
1802 : FD_BASE58_ENCODE_32_BYTES( reasm_fec->cmr.key, cmr_b58 );
1803 : FD_LOG_INFO(( "replay processing FEC set for slot %lu fec_set_idx %u, mr %s cmr %s", reasm_fec->slot, reasm_fec->fec_set_idx, key_b58, cmr_b58 ));
1804 : # endif
1805 :
1806 : /* Read FEC set from the store. This should happen before we try to
1807 : ingest the FEC set. This allows us to filter out frags that were
1808 : in-flight when we published away minority forks that the frags land
1809 : on. These frags would have no bank to execute against, because
1810 : their corresponding banks, or parent banks, have also been pruned
1811 : during publishing. A query against store will rightfully tell us
1812 : that the underlying data is not found, implying that this is for a
1813 : minority fork that we can safely ignore. */
1814 0 : FD_STORE_SHARED_LOCK( ctx->store, shacq_start, shacq_end, shrel_end ) {
1815 0 : fd_store_fec_t * store_fec = fd_store_query( ctx->store, &reasm_fec->key );
1816 0 : if( FD_UNLIKELY( !store_fec ) ) {
1817 : /* The only case in which a FEC is not found in the store after
1818 : repair has notified is if the FEC was on a minority fork that
1819 : has already been published away. In this case we abandon the
1820 : entire slice because it is no longer relevant. */
1821 0 : FD_BASE58_ENCODE_32_BYTES( reasm_fec->key.key, key_b58 );
1822 0 : FD_LOG_WARNING(( "store fec for slot: %lu is on minority fork already pruned by publish. abandoning slice. root: %lu. pruned merkle: %s", reasm_fec->slot, ctx->consensus_root_slot, key_b58 ));
1823 0 : return;
1824 0 : }
1825 0 : FD_TEST( store_fec );
1826 0 : sched_fec->fec = store_fec;
1827 0 : sched_fec->shred_cnt = reasm_fec->data_cnt;
1828 0 : } FD_STORE_SHARED_LOCK_END;
1829 :
1830 0 : fd_histf_sample( ctx->metrics.store_read_wait, (ulong)fd_long_max( shacq_end - shacq_start, 0UL ) );
1831 0 : fd_histf_sample( ctx->metrics.store_read_work, (ulong)fd_long_max( shrel_end - shacq_end, 0UL ) );
1832 :
1833 0 : sched_fec->is_last_in_batch = !!reasm_fec->data_complete;
1834 0 : sched_fec->is_last_in_block = !!reasm_fec->slot_complete;
1835 0 : sched_fec->bank_idx = reasm_fec->bank_idx;
1836 0 : sched_fec->parent_bank_idx = reasm_fec->parent_bank_idx;
1837 0 : sched_fec->slot = reasm_fec->slot;
1838 0 : sched_fec->parent_slot = reasm_fec->slot - reasm_fec->parent_off;
1839 0 : sched_fec->is_first_in_block = reasm_fec->fec_set_idx==0U;
1840 0 : fd_funk_txn_xid_t const root = fd_accdb_root_get( ctx->accdb_admin );
1841 0 : fd_funk_txn_xid_copy( sched_fec->alut_ctx->xid, &root );
1842 0 : sched_fec->alut_ctx->accdb[0] = ctx->accdb[0];
1843 0 : sched_fec->alut_ctx->els = ctx->published_root_slot;
1844 :
1845 0 : fd_bank_t bank[1];
1846 0 : fd_banks_bank_query( bank, ctx->banks, sched_fec->bank_idx );
1847 0 : if( FD_UNLIKELY( bank->data->flags&FD_BANK_FLAGS_DEAD ) ) {
1848 0 : if( FD_UNLIKELY( reasm_fec->slot_complete ) ) publish_slot_dead( ctx, stem, bank );
1849 0 : return;
1850 0 : }
1851 :
1852 0 : if( sched_fec->is_first_in_block ) {
1853 0 : bank->data->refcnt++;
1854 0 : FD_LOG_DEBUG(( "bank (idx=%lu, slot=%lu) refcnt incremented to %lu for sched", bank->data->idx, sched_fec->slot, bank->data->refcnt ));
1855 0 : }
1856 :
1857 0 : if( FD_UNLIKELY( !fd_sched_fec_ingest( ctx->sched, sched_fec ) ) ) {
1858 0 : fd_banks_mark_bank_dead( ctx->banks, bank );
1859 0 : }
1860 0 : }
1861 :
1862 : /* accdb_advance_root moves account records from the unrooted to the
1863 : rooted database. */
1864 :
1865 : static inline ulong
1866 0 : accdb_root_op_total( fd_replay_tile_t const * ctx ) {
1867 0 : return ctx->accdb_admin->base.root_cnt +
1868 0 : ctx->accdb_admin->base.reclaim_cnt;
1869 0 : }
1870 :
1871 : static void
1872 : accdb_advance_root( fd_replay_tile_t * ctx,
1873 : ulong slot,
1874 0 : ulong bank_idx ) {
1875 0 : fd_funk_txn_xid_t xid = { .ul[0] = slot, .ul[1] = bank_idx };
1876 0 : FD_LOG_DEBUG(( "advancing root to slot=%lu", slot ));
1877 :
1878 0 : long rooted_accounts = -(long)accdb_root_op_total( ctx );
1879 0 : long root_accounts_dt = -fd_tickcount();
1880 0 : fd_accdb_advance_root( ctx->accdb_admin, &xid );
1881 0 : rooted_accounts += (long)accdb_root_op_total( ctx );
1882 0 : root_accounts_dt += fd_tickcount();
1883 0 : fd_histf_sample( ctx->metrics.root_slot_dur, (ulong)root_accounts_dt );
1884 0 : fd_histf_sample( ctx->metrics.root_account_dur, (ulong)root_accounts_dt / (ulong)fd_long_max( rooted_accounts, 1L ) );
1885 :
1886 0 : fd_progcache_txn_advance_root( ctx->progcache_admin, &xid );
1887 0 : }
1888 :
1889 : static int
1890 0 : advance_published_root( fd_replay_tile_t * ctx ) {
1891 :
1892 0 : fd_block_id_ele_t * block_id_ele = fd_block_id_map_ele_query( ctx->block_id_map, &ctx->consensus_root, NULL, ctx->block_id_arr );
1893 0 : if( FD_UNLIKELY( !block_id_ele ) ) {
1894 0 : FD_BASE58_ENCODE_32_BYTES( ctx->consensus_root.key, consensus_root_b58 );
1895 0 : FD_LOG_CRIT(( "invariant violation: block id ele not found for consensus root %s", consensus_root_b58 ));
1896 0 : }
1897 0 : ulong target_bank_idx = fd_block_id_ele_get_idx( ctx->block_id_arr, block_id_ele );
1898 :
1899 : /* If the identity vote has been seen on a bank that should be rooted,
1900 : then we are now ready to produce blocks. */
1901 0 : if( FD_UNLIKELY( !ctx->has_identity_vote_rooted ) ) {
1902 0 : fd_bank_t root_bank[1];
1903 0 : if( FD_UNLIKELY( !fd_banks_bank_query( root_bank, ctx->banks, target_bank_idx ) ) ) FD_LOG_CRIT(( "invariant violation: root bank not found for bank index %lu", target_bank_idx ));
1904 0 : if( FD_LIKELY( fd_bank_has_identity_vote_get( root_bank ) ) ) ctx->has_identity_vote_rooted = 1;
1905 0 : }
1906 :
1907 0 : ulong advanceable_root_idx = ULONG_MAX;
1908 0 : if( FD_UNLIKELY( !fd_banks_advance_root_prepare( ctx->banks, target_bank_idx, &advanceable_root_idx ) ) ) {
1909 0 : ctx->metrics.storage_root_behind++;
1910 0 : return 0;
1911 0 : }
1912 :
1913 0 : fd_bank_t bank[1];
1914 0 : FD_TEST( fd_banks_bank_query( bank, ctx->banks, advanceable_root_idx ) );
1915 :
1916 0 : fd_block_id_ele_t * advanceable_root_ele = &ctx->block_id_arr[ advanceable_root_idx ];
1917 0 : if( FD_UNLIKELY( !advanceable_root_ele ) ) {
1918 0 : FD_LOG_CRIT(( "invariant violation: advanceable root ele not found for bank index %lu", advanceable_root_idx ));
1919 0 : }
1920 :
1921 0 : long exacq_start, exacq_end, exrel_end;
1922 0 : FD_STORE_EXCLUSIVE_LOCK( ctx->store, exacq_start, exacq_end, exrel_end ) {
1923 0 : fd_store_publish( ctx->store, &advanceable_root_ele->block_id );
1924 0 : } FD_STORE_EXCLUSIVE_LOCK_END;
1925 :
1926 0 : fd_histf_sample( ctx->metrics.store_publish_wait, (ulong)fd_long_max( exacq_end-exacq_start, 0UL ) );
1927 0 : fd_histf_sample( ctx->metrics.store_publish_work, (ulong)fd_long_max( exrel_end-exacq_end, 0UL ) );
1928 :
1929 0 : ulong advanceable_root_slot = fd_bank_slot_get( bank );
1930 0 : accdb_advance_root( ctx, advanceable_root_slot, bank->data->idx );
1931 :
1932 0 : fd_txncache_advance_root( ctx->txncache, bank->data->txncache_fork_id );
1933 0 : fd_sched_advance_root( ctx->sched, advanceable_root_idx );
1934 0 : fd_banks_advance_root( ctx->banks, advanceable_root_idx );
1935 0 : fd_reasm_publish( ctx->reasm, &advanceable_root_ele->block_id );
1936 :
1937 0 : ctx->published_root_slot = advanceable_root_slot;
1938 0 : ctx->published_root_bank_idx = advanceable_root_idx;
1939 :
1940 0 : return 1;
1941 0 : }
1942 :
1943 : static void
1944 : after_credit( fd_replay_tile_t * ctx,
1945 : fd_stem_context_t * stem,
1946 : int * opt_poll_in,
1947 0 : int * charge_busy ) {
1948 0 : if( FD_UNLIKELY( !ctx->is_booted ) ) return;
1949 :
1950 0 : if( FD_UNLIKELY( maybe_become_leader( ctx, stem ) ) ) {
1951 0 : *charge_busy = 1;
1952 0 : *opt_poll_in = 0;
1953 0 : return;
1954 0 : }
1955 :
1956 : /* If we are leader, we can only unbecome the leader iff we have
1957 : received the poh hash from the poh tile and block id from reasm. */
1958 0 : if( FD_UNLIKELY( ctx->is_leader && ctx->recv_poh && ctx->block_id_arr[ ctx->leader_bank->data->idx ].block_id_seen ) ) {
1959 0 : fini_leader_bank( ctx, stem );
1960 0 : *charge_busy = 1;
1961 0 : *opt_poll_in = 0;
1962 0 : return;
1963 0 : }
1964 :
1965 0 : ulong bank_idx;
1966 0 : while( (bank_idx=fd_sched_pruned_block_next( ctx->sched ))!=ULONG_MAX ) {
1967 0 : fd_bank_t bank[1];
1968 0 : FD_TEST( fd_banks_bank_query( bank, ctx->banks, bank_idx ) );
1969 0 : bank->data->refcnt--;
1970 0 : FD_LOG_DEBUG(( "bank (idx=%lu) refcnt decremented to %lu for sched", bank->data->idx, bank->data->refcnt ));
1971 0 : }
1972 :
1973 : /* If the published_root is not caught up to the consensus root, then
1974 : we should try to advance the published root. */
1975 0 : if( FD_UNLIKELY( ctx->consensus_root_bank_idx!=ctx->published_root_bank_idx && advance_published_root( ctx ) ) ) {
1976 0 : *charge_busy = 1;
1977 0 : *opt_poll_in = 0;
1978 0 : return;
1979 0 : }
1980 :
1981 : /* If the reassembler has a fec that is ready, we should process it
1982 : and pass it to the scheduler. */
1983 0 : if( FD_LIKELY( can_process_fec( ctx ) ) ) {
1984 0 : fd_reasm_fec_t * fec = fd_reasm_pop( ctx->reasm );
1985 0 : process_fec_set( ctx, stem, fec );
1986 0 : *charge_busy = 1;
1987 0 : *opt_poll_in = 0;
1988 0 : return;
1989 0 : }
1990 :
1991 0 : *charge_busy = replay( ctx, stem );
1992 0 : *opt_poll_in = !*charge_busy;
1993 0 : }
1994 :
1995 : static int
1996 : before_frag( fd_replay_tile_t * ctx,
1997 : ulong in_idx,
1998 : ulong seq FD_PARAM_UNUSED,
1999 0 : ulong sig FD_PARAM_UNUSED ) {
2000 :
2001 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_SHRED ) ) {
2002 : /* If reasm is full, we can not insert any more FEC sets. We must
2003 : not consume any frags from shred_out until reasm can process more
2004 : FEC sets. */
2005 :
2006 0 : if( FD_UNLIKELY( !fd_reasm_free( ctx->reasm ) ) ) {
2007 0 : return -1;
2008 0 : }
2009 0 : }
2010 :
2011 0 : return 0;
2012 0 : }
2013 :
2014 : static void
2015 : process_exec_task_done( fd_replay_tile_t * ctx,
2016 : fd_stem_context_t * stem,
2017 : fd_execrp_task_done_msg_t * msg,
2018 0 : ulong sig ) {
2019 :
2020 0 : ulong exec_tile_idx = sig&0xFFFFFFFFUL;
2021 :
2022 0 : fd_bank_t bank[1];
2023 0 : FD_TEST( fd_banks_bank_query( bank, ctx->banks, msg->bank_idx ) );
2024 0 : FD_TEST( bank->data );
2025 0 : bank->data->refcnt--;
2026 :
2027 0 : switch( sig>>32 ) {
2028 0 : case FD_EXECRP_TT_TXN_EXEC: {
2029 0 : if( FD_UNLIKELY( !ctx->has_identity_vote_rooted ) ) {
2030 : /* Query the txn signature against our recently generated vote
2031 : txn signatures. If the query is successful, then we have
2032 : seen our own vote transaction land and this should be marked
2033 : in the bank. We go through this exercise until we've seen
2034 : our vote rooted. */
2035 0 : fd_txn_p_t * txn_p = fd_sched_get_txn( ctx->sched, msg->txn_exec->txn_idx );
2036 0 : if( fd_vote_tracker_query_sig( ctx->vote_tracker, fd_type_pun_const( txn_p->payload+TXN( txn_p )->signature_off ) ) ) {
2037 0 : *fd_bank_has_identity_vote_modify( bank ) += 1;
2038 0 : }
2039 0 : }
2040 0 : if( FD_UNLIKELY( msg->txn_exec->err && !(bank->data->flags&FD_BANK_FLAGS_DEAD) ) ) {
2041 : /* Every transaction in a valid block has to execute.
2042 : Otherwise, we should mark the block as dead. */
2043 0 : fd_banks_mark_bank_dead( ctx->banks, bank );
2044 0 : fd_sched_block_abandon( ctx->sched, bank->data->idx );
2045 :
2046 : /* We can only publish the slot as dead if we have seen the
2047 : block id for this slot. */
2048 0 : if( ctx->block_id_arr[ bank->data->idx ].block_id_seen ) {
2049 0 : publish_slot_dead( ctx, stem, bank );
2050 0 : }
2051 0 : }
2052 0 : if( FD_UNLIKELY( (bank->data->flags&FD_BANK_FLAGS_DEAD) && bank->data->refcnt==0UL ) ) {
2053 0 : fd_banks_mark_bank_frozen( ctx->banks, bank );
2054 0 : }
2055 0 : int res = fd_sched_task_done( ctx->sched, FD_SCHED_TT_TXN_EXEC, msg->txn_exec->txn_idx, exec_tile_idx, NULL );
2056 0 : FD_TEST( res==0 );
2057 0 : break;
2058 0 : }
2059 0 : case FD_EXECRP_TT_TXN_SIGVERIFY: {
2060 0 : if( FD_UNLIKELY( msg->txn_sigverify->err && !(bank->data->flags&FD_BANK_FLAGS_DEAD) ) ) {
2061 : /* Every transaction in a valid block has to sigverify.
2062 : Otherwise, we should mark the block as dead. Also freeze the
2063 : bank if possible. */
2064 0 : fd_banks_mark_bank_dead( ctx->banks, bank );
2065 0 : fd_sched_block_abandon( ctx->sched, bank->data->idx );
2066 :
2067 : /* We can only publish the slot as dead if we have seen the
2068 : block id for this slot. */
2069 0 : if( ctx->block_id_arr[ bank->data->idx ].block_id_seen ) {
2070 0 : publish_slot_dead( ctx, stem, bank );
2071 0 : }
2072 0 : }
2073 0 : if( FD_UNLIKELY( (bank->data->flags&FD_BANK_FLAGS_DEAD) && bank->data->refcnt==0UL ) ) {
2074 0 : fd_banks_mark_bank_frozen( ctx->banks, bank );
2075 0 : }
2076 0 : int res = fd_sched_task_done( ctx->sched, FD_SCHED_TT_TXN_SIGVERIFY, msg->txn_sigverify->txn_idx, exec_tile_idx, NULL );
2077 0 : FD_TEST( res==0 );
2078 0 : break;
2079 0 : }
2080 0 : case FD_EXECRP_TT_POH_HASH: {
2081 0 : int res = fd_sched_task_done( ctx->sched, FD_SCHED_TT_POH_HASH, ULONG_MAX, exec_tile_idx, msg->poh_hash );
2082 0 : if( FD_UNLIKELY( res<0 && !(bank->data->flags&FD_BANK_FLAGS_DEAD) ) ) {
2083 0 : fd_banks_mark_bank_dead( ctx->banks, bank );
2084 :
2085 0 : if( ctx->block_id_arr[ bank->data->idx ].block_id_seen ) {
2086 0 : publish_slot_dead( ctx, stem, bank );
2087 0 : }
2088 0 : }
2089 0 : if( FD_UNLIKELY( (bank->data->flags&FD_BANK_FLAGS_DEAD) && bank->data->refcnt==0UL ) ) {
2090 0 : fd_banks_mark_bank_frozen( ctx->banks, bank );
2091 0 : }
2092 0 : break;
2093 0 : }
2094 0 : default: FD_LOG_CRIT(( "unexpected sig 0x%lx", sig ));
2095 0 : }
2096 :
2097 : /* Reference counter just decreased, and an exec tile just got freed
2098 : up. If there's a need to be more aggressively pruning, we could
2099 : check here if more slots just became publishable and publish. Not
2100 : publishing here shouldn't bloat the fork tree too much though. We
2101 : mark minority forks dead as soon as we can, and execution dispatch
2102 : stops on dead blocks. So shortly afterwards, dead blocks should be
2103 : eligible for pruning as in-flight transactions retire from the
2104 : execution pipeline. */
2105 :
2106 0 : }
2107 :
2108 : static void
2109 : process_tower_slot_done( fd_replay_tile_t * ctx,
2110 : fd_stem_context_t * stem,
2111 : fd_tower_slot_done_t const * msg,
2112 0 : ulong seq ) {
2113 0 : fd_bank_t replay_bank[1];
2114 0 : if( FD_UNLIKELY( !fd_banks_bank_query( replay_bank, ctx->banks, msg->replay_bank_idx ) ) ) FD_LOG_CRIT(( "invariant violation: bank not found for bank index %lu", msg->replay_bank_idx ));
2115 0 : replay_bank->data->refcnt--;
2116 0 : FD_LOG_DEBUG(( "bank (idx=%lu, slot=%lu) refcnt decremented to %lu for tower", replay_bank->data->idx, msg->replay_slot, replay_bank->data->refcnt ));
2117 :
2118 0 : ctx->reset_block_id = msg->reset_block_id;
2119 0 : ctx->reset_slot = msg->reset_slot;
2120 0 : ctx->reset_timestamp_nanos = fd_log_wallclock();
2121 0 : ulong min_leader_slot = fd_ulong_max( msg->reset_slot+1UL, fd_ulong_if( ctx->highwater_leader_slot==ULONG_MAX, 0UL, ctx->highwater_leader_slot+1UL ) );
2122 0 : ctx->next_leader_slot = fd_multi_epoch_leaders_get_next_slot( ctx->mleaders, min_leader_slot, ctx->identity_pubkey );
2123 0 : if( FD_LIKELY( ctx->next_leader_slot != ULONG_MAX ) ) {
2124 0 : ctx->next_leader_tickcount = (long)((double)(ctx->next_leader_slot-ctx->reset_slot-1UL)*ctx->slot_duration_ticks) + fd_tickcount();
2125 0 : } else {
2126 0 : ctx->next_leader_tickcount = LONG_MAX;
2127 0 : }
2128 :
2129 0 : fd_block_id_ele_t * block_id_ele = fd_block_id_map_ele_query( ctx->block_id_map, &msg->reset_block_id, NULL, ctx->block_id_arr );
2130 0 : if( FD_UNLIKELY( !block_id_ele ) ) {
2131 0 : FD_BASE58_ENCODE_32_BYTES( msg->reset_block_id.key, reset_block_id_b58 );
2132 0 : FD_LOG_CRIT(( "invariant violation: block id ele doesn't exist for reset block id: %s, slot: %lu", reset_block_id_b58, msg->reset_slot ));
2133 0 : }
2134 0 : ulong reset_bank_idx = fd_block_id_ele_get_idx( ctx->block_id_arr, block_id_ele );
2135 :
2136 0 : fd_bank_t bank[1];
2137 0 : if( FD_UNLIKELY( !fd_banks_bank_query( bank, ctx->banks, reset_bank_idx ) ) ) {
2138 0 : FD_LOG_CRIT(( "invariant violation: bank not found for bank index %lu", reset_bank_idx ));
2139 0 : }
2140 :
2141 0 : if( FD_LIKELY( msg->root_slot!=ULONG_MAX ) ) FD_TEST( msg->root_slot<=msg->reset_slot );
2142 0 : fd_memcpy( ctx->reset_bank, bank, sizeof(fd_bank_t) );
2143 :
2144 0 : if( FD_LIKELY( ctx->replay_out->idx!=ULONG_MAX ) ) {
2145 0 : fd_poh_reset_t * reset = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
2146 :
2147 0 : reset->bank_idx = bank->data->idx;
2148 0 : reset->timestamp = ctx->reset_timestamp_nanos;
2149 0 : reset->completed_slot = ctx->reset_slot;
2150 0 : reset->hashcnt_per_tick = fd_bank_hashes_per_tick_get( bank );
2151 0 : reset->ticks_per_slot = fd_bank_ticks_per_slot_get( bank );
2152 0 : reset->tick_duration_ns = (ulong)(ctx->slot_duration_nanos/(double)reset->ticks_per_slot);
2153 :
2154 0 : fd_memcpy( reset->completed_block_id, &block_id_ele->block_id, sizeof(fd_hash_t) );
2155 :
2156 0 : fd_blockhashes_t const * block_hash_queue = fd_bank_block_hash_queue_query( bank );
2157 0 : fd_hash_t const * last_hash = fd_blockhashes_peek_last_hash( block_hash_queue );
2158 0 : FD_TEST( last_hash );
2159 0 : fd_memcpy( reset->completed_blockhash, last_hash->uc, sizeof(fd_hash_t) );
2160 :
2161 0 : ulong ticks_per_slot = fd_bank_ticks_per_slot_get( bank );
2162 0 : if( FD_UNLIKELY( reset->hashcnt_per_tick==1UL ) ) {
2163 : /* Low power producer, maximum of one microblock per tick in the slot */
2164 0 : reset->max_microblocks_in_slot = ticks_per_slot;
2165 0 : } else {
2166 : /* See the long comment in after_credit for this limit */
2167 0 : reset->max_microblocks_in_slot = fd_ulong_min( MAX_MICROBLOCKS_PER_SLOT, ticks_per_slot*(reset->hashcnt_per_tick-1UL) );
2168 0 : }
2169 0 : reset->next_leader_slot = ctx->next_leader_slot;
2170 :
2171 0 : fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_RESET, ctx->replay_out->chunk, sizeof(fd_poh_reset_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
2172 0 : ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_poh_reset_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
2173 0 : }
2174 :
2175 0 : FD_LOG_INFO(( "tower_slot_done(reset_slot=%lu, next_leader_slot=%lu, vote_slot=%lu, replay_slot=%lu, root_slot=%lu, seqno=%lu)", msg->reset_slot, ctx->next_leader_slot, msg->vote_slot, msg->replay_slot, msg->root_slot, seq ));
2176 0 : maybe_become_leader( ctx, stem );
2177 :
2178 0 : if( FD_LIKELY( msg->root_slot!=ULONG_MAX ) ) {
2179 :
2180 0 : FD_TEST( msg->root_slot>=ctx->consensus_root_slot );
2181 0 : fd_block_id_ele_t * block_id_ele = fd_block_id_map_ele_query( ctx->block_id_map, &msg->root_block_id, NULL, ctx->block_id_arr );
2182 0 : FD_TEST( block_id_ele );
2183 0 : ctx->consensus_root_slot = msg->root_slot;
2184 0 : ctx->consensus_root = msg->root_block_id;
2185 0 : ctx->consensus_root_bank_idx = fd_block_id_ele_get_idx( ctx->block_id_arr, block_id_ele );
2186 :
2187 0 : publish_root_advanced( ctx, stem );
2188 :
2189 0 : fd_sched_root_notify( ctx->sched, ctx->consensus_root_bank_idx );
2190 0 : }
2191 :
2192 0 : ulong distance = 0UL;
2193 0 : fd_bank_t * parent = bank;
2194 0 : while( parent ) {
2195 0 : if( FD_UNLIKELY( parent->data->idx==ctx->consensus_root_bank_idx ) ) break;
2196 0 : parent = fd_banks_get_parent( bank, ctx->banks, parent );
2197 0 : distance++;
2198 0 : }
2199 :
2200 0 : FD_MGAUGE_SET( REPLAY, ROOT_DISTANCE, distance );
2201 0 : }
2202 :
2203 : static void
2204 : process_fec_complete( fd_replay_tile_t * ctx,
2205 0 : uchar const * shred_buf ) {
2206 0 : fd_shred_t const * shred = (fd_shred_t const *)fd_type_pun_const( shred_buf );
2207 :
2208 0 : fd_hash_t const * merkle_root = (fd_hash_t const *)fd_type_pun_const( shred_buf + FD_SHRED_DATA_HEADER_SZ );
2209 0 : fd_hash_t const * chained_merkle_root = (fd_hash_t const *)fd_type_pun_const( shred_buf + FD_SHRED_DATA_HEADER_SZ + sizeof(fd_hash_t) );
2210 0 : int is_leader_fec = *(int const *) fd_type_pun_const( shred_buf + FD_SHRED_DATA_HEADER_SZ + sizeof(fd_hash_t) + sizeof(fd_hash_t) );
2211 :
2212 0 : int data_complete = !!( shred->data.flags & FD_SHRED_DATA_FLAG_DATA_COMPLETE );
2213 0 : int slot_complete = !!( shred->data.flags & FD_SHRED_DATA_FLAG_SLOT_COMPLETE );
2214 :
2215 0 : FD_TEST( !fd_reasm_query( ctx->reasm, merkle_root ) );
2216 0 : if( FD_UNLIKELY( shred->slot - shred->data.parent_off == fd_reasm_slot0( ctx->reasm ) && shred->fec_set_idx == 0) ) {
2217 0 : chained_merkle_root = &fd_reasm_root( ctx->reasm )->key;
2218 0 : }
2219 :
2220 0 : FD_TEST( fd_reasm_free( ctx->reasm ) );
2221 :
2222 0 : FD_TEST( fd_reasm_insert( ctx->reasm, merkle_root, chained_merkle_root, shred->slot, shred->fec_set_idx, shred->data.parent_off, (ushort)(shred->idx - shred->fec_set_idx + 1), data_complete, slot_complete, is_leader_fec ) );
2223 0 : }
2224 :
2225 : static void
2226 0 : process_resolv_slot_completed( fd_replay_tile_t * ctx, ulong bank_idx ) {
2227 0 : fd_bank_t bank[1];
2228 0 : FD_TEST( fd_banks_bank_query( bank, ctx->banks, bank_idx ) );
2229 0 : bank->data->refcnt--;
2230 0 : FD_LOG_DEBUG(( "bank (idx=%lu, slot=%lu) refcnt decremented to %lu for resolv", bank->data->idx, fd_bank_slot_get( bank ), bank->data->refcnt ));
2231 0 : }
2232 :
2233 : static void
2234 : process_vote_txn_sent( fd_replay_tile_t * ctx,
2235 0 : fd_txn_m_t * txnm ) {
2236 : /* The send tile has signed and sent a vote. Add this vote to the
2237 : vote tracker. We go through this exercise until we've seen our
2238 : vote rooted. */
2239 0 : if( FD_UNLIKELY( !ctx->has_identity_vote_rooted ) ) {
2240 0 : uchar * payload = ((uchar *)txnm) + sizeof(fd_txn_m_t);
2241 0 : uchar txn_mem[ FD_TXN_MAX_SZ ] __attribute__((aligned(alignof(fd_txn_t))));
2242 0 : fd_txn_t * txn = (fd_txn_t *)txn_mem;
2243 0 : if( FD_UNLIKELY( !fd_txn_parse( payload, txnm->payload_sz, txn_mem, NULL ) ) ) {
2244 0 : FD_LOG_CRIT(( "Could not parse txn from send tile" ));
2245 0 : }
2246 0 : fd_vote_tracker_insert( ctx->vote_tracker, fd_type_pun_const( payload+txn->signature_off ) );
2247 0 : }
2248 0 : }
2249 :
2250 : static inline void
2251 0 : maybe_verify_shred_version( fd_replay_tile_t * ctx ) {
2252 0 : if( FD_LIKELY( ctx->expected_shred_version && ctx->ipecho_shred_version ) ) {
2253 0 : if( FD_UNLIKELY( ctx->expected_shred_version!=ctx->ipecho_shred_version ) ) {
2254 0 : FD_LOG_ERR(( "shred version mismatch: expected %u but got %u from ipecho", ctx->expected_shred_version, ctx->ipecho_shred_version ) );
2255 0 : }
2256 0 : }
2257 :
2258 0 : if( FD_LIKELY( ctx->has_genesis_hash && ctx->hard_forks_cnt!=ULONG_MAX && (ctx->expected_shred_version || ctx->ipecho_shred_version) ) ) {
2259 0 : ushort expected_shred_version = ctx->expected_shred_version ? ctx->expected_shred_version : ctx->ipecho_shred_version;
2260 :
2261 0 : union {
2262 0 : uchar c[ 32 ];
2263 0 : ushort s[ 16 ];
2264 0 : } running_hash;
2265 0 : fd_memcpy( running_hash.c, ctx->genesis_hash, sizeof(fd_hash_t) );
2266 :
2267 0 : ulong processed = 0UL;
2268 0 : ulong min_value = 0UL;
2269 0 : while( processed<ctx->hard_forks_cnt ) {
2270 0 : ulong min_index = ULONG_MAX;
2271 0 : for( ulong i=0UL; i<ctx->hard_forks_cnt; i++ ) {
2272 0 : if( ctx->hard_forks[ i ]>=min_value && (min_index==ULONG_MAX || ctx->hard_forks[ i ]<ctx->hard_forks[ min_index ] ) ) {
2273 0 : min_index = i;
2274 0 : }
2275 0 : }
2276 :
2277 0 : FD_TEST( min_index!=ULONG_MAX );
2278 0 : min_value = ctx->hard_forks[ min_index ];
2279 0 : ulong min_count = 0UL;
2280 0 : for( ulong i=0UL; i<ctx->hard_forks_cnt; i++ ) {
2281 0 : if( ctx->hard_forks[ i ]==min_value ) min_count++;
2282 0 : }
2283 :
2284 0 : uchar data[ 48UL ];
2285 0 : fd_memcpy( data, running_hash.c, sizeof(fd_hash_t) );
2286 0 : fd_memcpy( data+32UL, &min_value, sizeof(ulong) );
2287 0 : fd_memcpy( data+40UL, &min_count, sizeof(ulong) );
2288 :
2289 0 : FD_TEST( fd_sha256_hash( data, 48UL, running_hash.c ) );
2290 0 : processed += min_count;
2291 0 : min_value += 1UL;
2292 0 : }
2293 :
2294 0 : ushort xor = 0;
2295 0 : for( ulong i=0UL; i<16UL; i++ ) xor ^= running_hash.s[ i ];
2296 :
2297 0 : xor = fd_ushort_bswap( xor );
2298 0 : xor = fd_ushort_if( xor<USHORT_MAX, (ushort)(xor + 1), USHORT_MAX );
2299 :
2300 0 : if( FD_UNLIKELY( expected_shred_version!=xor ) ) {
2301 0 : FD_BASE58_ENCODE_32_BYTES( ctx->genesis_hash, genesis_hash_b58 );
2302 0 : FD_LOG_ERR(( "shred version mismatch: expected %u but got %u from genesis hash %s and hard forks", expected_shred_version, xor, genesis_hash_b58 ));
2303 0 : }
2304 0 : }
2305 0 : }
2306 :
2307 : static void
2308 : process_tower_optimistic_confirmed( fd_replay_tile_t * ctx,
2309 : fd_stem_context_t * stem,
2310 0 : fd_tower_slot_confirmed_t const * msg ) {
2311 0 : FD_TEST( msg->bank_idx!=ULONG_MAX );
2312 :
2313 0 : if( ctx->rpc_enabled ) {
2314 0 : fd_bank_t bank[1];
2315 0 : FD_TEST( fd_banks_bank_query( bank, ctx->banks, msg->bank_idx ) );
2316 0 : bank->data->refcnt++;
2317 0 : FD_LOG_DEBUG(( "bank (idx=%lu, slot=%lu) refcnt incremented to %lu for rpc", bank->data->idx, fd_bank_slot_get( bank ), bank->data->refcnt ));
2318 0 : }
2319 :
2320 0 : fd_replay_oc_advanced_t * replay_msg = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
2321 0 : replay_msg->bank_idx = msg->bank_idx;
2322 0 : replay_msg->slot = msg->slot;
2323 :
2324 0 : fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_OC_ADVANCED, ctx->replay_out->chunk, sizeof(fd_replay_oc_advanced_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
2325 0 : ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_replay_oc_advanced_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
2326 0 : }
2327 :
2328 : static inline int
2329 : returnable_frag( fd_replay_tile_t * ctx,
2330 : ulong in_idx,
2331 : ulong seq,
2332 : ulong sig,
2333 : ulong chunk,
2334 : ulong sz,
2335 : ulong ctl,
2336 : ulong tsorig,
2337 : ulong tspub,
2338 0 : fd_stem_context_t * stem ) {
2339 0 : (void)seq;
2340 0 : (void)ctl;
2341 0 : (void)tsorig;
2342 0 : (void)tspub;
2343 :
2344 0 : if( FD_UNLIKELY( sz!=0UL && (chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz>ctx->in[ in_idx ].mtu ) ) )
2345 0 : FD_LOG_ERR(( "chunk %lu %lu from in %d corrupt, not in range [%lu,%lu]", chunk, sz, ctx->in_kind[ in_idx ], ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
2346 :
2347 0 : switch( ctx->in_kind[in_idx] ) {
2348 0 : case IN_KIND_GENESIS: {
2349 0 : uchar const * src = fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
2350 0 : ctx->has_genesis_hash = 1;
2351 0 : if( FD_LIKELY( sig==GENESI_SIG_BOOTSTRAP_COMPLETED ) ) {
2352 0 : boot_genesis( ctx, stem, in_idx, chunk );
2353 0 : fd_memcpy( ctx->genesis_hash, src+sizeof(fd_lthash_value_t), sizeof(fd_hash_t) );
2354 0 : } else {
2355 0 : fd_memcpy( ctx->genesis_hash, src, sizeof(fd_hash_t) );
2356 0 : }
2357 :
2358 0 : maybe_verify_cluster_type( ctx );
2359 0 : maybe_verify_shred_version( ctx );
2360 0 : break;
2361 0 : }
2362 0 : case IN_KIND_IPECHO: {
2363 0 : FD_TEST( sig && sig<=USHORT_MAX );
2364 0 : ctx->ipecho_shred_version = (ushort)sig;
2365 0 : maybe_verify_shred_version( ctx );
2366 0 : break;
2367 0 : }
2368 0 : case IN_KIND_SNAP:
2369 0 : on_snapshot_message( ctx, stem, in_idx, chunk, sig );
2370 0 : maybe_verify_shred_version( ctx );
2371 0 : break;
2372 0 : case IN_KIND_EXECRP: {
2373 0 : process_exec_task_done( ctx, stem, fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk ), sig );
2374 0 : break;
2375 0 : }
2376 0 : case IN_KIND_POH: {
2377 0 : process_poh_message( ctx, fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk ) );
2378 0 : break;
2379 0 : }
2380 0 : case IN_KIND_RESOLV: {
2381 0 : fd_resolv_slot_exchanged_t * exchanged_slot = fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
2382 0 : process_resolv_slot_completed( ctx, exchanged_slot->bank_idx );
2383 0 : break;
2384 0 : }
2385 0 : case IN_KIND_TOWER: {
2386 0 : if( FD_LIKELY( sig==FD_TOWER_SIG_SLOT_DONE ) ) {
2387 0 : process_tower_slot_done( ctx, stem, fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk ), seq );
2388 0 : }
2389 0 : else if( FD_LIKELY( sig==FD_TOWER_SIG_SLOT_CONFIRMED ) ) {
2390 0 : fd_tower_slot_confirmed_t const * msg = fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
2391 0 : if( msg->kind==FD_TOWER_SLOT_CONFIRMED_OPTIMISTIC ) process_tower_optimistic_confirmed( ctx, stem, msg );
2392 0 : }
2393 0 : else if( FD_LIKELY( sig==FD_TOWER_SIG_SLOT_IGNORED ) ) {
2394 0 : fd_tower_slot_ignored_t const * msg = fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
2395 0 : fd_tower_slot_done_t ignored = {
2396 0 : .replay_slot = msg->slot,
2397 0 : .replay_bank_idx = msg->bank_idx,
2398 0 : .vote_slot = ULONG_MAX,
2399 0 : .reset_slot = ctx->reset_slot, /* Use most recent reset slot */
2400 0 : .reset_block_id = ctx->reset_block_id,
2401 0 : .root_slot = ULONG_MAX
2402 0 : };
2403 0 : process_tower_slot_done( ctx, stem, &ignored, seq );
2404 0 : }
2405 0 : break;
2406 0 : }
2407 0 : case IN_KIND_SHRED: {
2408 : /* TODO: This message/sz should be defined. */
2409 0 : if( sz==FD_SHRED_DATA_HEADER_SZ + sizeof(fd_hash_t) + sizeof(fd_hash_t) + sizeof(int) ) {
2410 : /* If receive a FEC complete message. */
2411 0 : process_fec_complete( ctx, fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk ) );
2412 0 : }
2413 0 : break;
2414 0 : }
2415 0 : case IN_KIND_TXSEND: {
2416 0 : process_vote_txn_sent( ctx, fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk ) );
2417 0 : break;
2418 0 : }
2419 0 : case IN_KIND_RPC:
2420 0 : case IN_KIND_GUI: {
2421 0 : fd_bank_t bank[1];
2422 0 : FD_TEST( fd_banks_bank_query( bank, ctx->banks, sig ) );
2423 0 : bank->data->refcnt--;
2424 0 : FD_LOG_DEBUG(( "bank (idx=%lu, slot=%lu) refcnt decremented to %lu for %s", bank->data->idx, fd_bank_slot_get( bank ), bank->data->refcnt, ctx->in_kind[in_idx]==IN_KIND_RPC ? "rpc" : "gui" ));
2425 0 : break;
2426 0 : }
2427 0 : default:
2428 0 : FD_LOG_ERR(( "unhandled kind %d", ctx->in_kind[ in_idx ] ));
2429 0 : }
2430 :
2431 0 : return 0;
2432 0 : }
2433 :
2434 : static inline fd_replay_out_link_t
2435 : out1( fd_topo_t const * topo,
2436 : fd_topo_tile_t const * tile,
2437 0 : char const * name ) {
2438 0 : ulong idx = ULONG_MAX;
2439 :
2440 0 : for( ulong i=0UL; i<tile->out_cnt; i++ ) {
2441 0 : fd_topo_link_t const * link = &topo->links[ tile->out_link_id[ i ] ];
2442 0 : if( !strcmp( link->name, name ) ) {
2443 0 : if( FD_UNLIKELY( idx!=ULONG_MAX ) ) FD_LOG_ERR(( "tile %s:%lu had multiple output links named %s but expected one", tile->name, tile->kind_id, name ));
2444 0 : idx = i;
2445 0 : }
2446 0 : }
2447 :
2448 0 : if( FD_UNLIKELY( idx==ULONG_MAX ) ) return (fd_replay_out_link_t){ .idx = ULONG_MAX, .mem = NULL, .chunk0 = 0, .wmark = 0, .chunk = 0 };
2449 :
2450 0 : void * mem = topo->workspaces[ topo->objs[ topo->links[ tile->out_link_id[ idx ] ].dcache_obj_id ].wksp_id ].wksp;
2451 0 : ulong chunk0 = fd_dcache_compact_chunk0( mem, topo->links[ tile->out_link_id[ idx ] ].dcache );
2452 0 : ulong wmark = fd_dcache_compact_wmark ( mem, topo->links[ tile->out_link_id[ idx ] ].dcache, topo->links[ tile->out_link_id[ idx ] ].mtu );
2453 :
2454 0 : return (fd_replay_out_link_t){ .idx = idx, .mem = mem, .chunk0 = chunk0, .wmark = wmark, .chunk = chunk0 };
2455 0 : }
2456 :
2457 : static void
2458 : privileged_init( fd_topo_t * topo,
2459 0 : fd_topo_tile_t * tile ) {
2460 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
2461 :
2462 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
2463 0 : fd_replay_tile_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_replay_tile_t), sizeof(fd_replay_tile_t) );
2464 :
2465 0 : if( FD_UNLIKELY( !strcmp( tile->replay.identity_key_path, "" ) ) ) FD_LOG_ERR(( "identity_key_path not set" ));
2466 :
2467 0 : ctx->identity_pubkey[ 0 ] = *(fd_pubkey_t const *)fd_type_pun_const( fd_keyload_load( tile->replay.identity_key_path, /* pubkey only: */ 1 ) );
2468 :
2469 0 : if( FD_UNLIKELY( !tile->replay.bundle.vote_account_path[0] ) ) {
2470 0 : tile->replay.bundle.enabled = 0;
2471 0 : }
2472 :
2473 0 : if( FD_UNLIKELY( tile->replay.bundle.enabled ) ) {
2474 0 : if( FD_UNLIKELY( !fd_base58_decode_32( tile->replay.bundle.vote_account_path, ctx->bundle.vote_account.uc ) ) ) {
2475 0 : const uchar * vote_key = fd_keyload_load( tile->replay.bundle.vote_account_path, /* pubkey only: */ 1 );
2476 0 : fd_memcpy( ctx->bundle.vote_account.uc, vote_key, 32UL );
2477 0 : }
2478 0 : }
2479 :
2480 0 : if( FD_UNLIKELY( !fd_rng_secure( &ctx->reasm_seed, sizeof(ulong) ) ) ) {
2481 0 : FD_LOG_CRIT(( "fd_rng_secure failed" ));
2482 0 : }
2483 :
2484 0 : if( FD_UNLIKELY( !fd_rng_secure( &ctx->vote_tracker_seed, sizeof(ulong) ) ) ) {
2485 0 : FD_LOG_CRIT(( "fd_rng_secure failed" ));
2486 0 : }
2487 :
2488 0 : if( FD_UNLIKELY( !fd_rng_secure( &ctx->block_id_map_seed, sizeof(ulong) ) ) ) {
2489 0 : FD_LOG_CRIT(( "fd_rng_secure failed" ));
2490 0 : }
2491 0 : }
2492 :
2493 : static void
2494 : unprivileged_init( fd_topo_t * topo,
2495 0 : fd_topo_tile_t * tile ) {
2496 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
2497 :
2498 0 : ulong chain_cnt = fd_block_id_map_chain_cnt_est( tile->replay.max_live_slots );
2499 :
2500 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
2501 0 : fd_replay_tile_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_replay_tile_t), sizeof(fd_replay_tile_t) );
2502 0 : void * block_id_arr_mem = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_block_id_ele_t), sizeof(fd_block_id_ele_t) * tile->replay.max_live_slots );
2503 0 : void * block_id_map_mem = FD_SCRATCH_ALLOC_APPEND( l, fd_block_id_map_align(), fd_block_id_map_footprint( chain_cnt ) );
2504 0 : void * _txncache = FD_SCRATCH_ALLOC_APPEND( l, fd_txncache_align(), fd_txncache_footprint( tile->replay.max_live_slots ) );
2505 0 : void * reasm_mem = FD_SCRATCH_ALLOC_APPEND( l, fd_reasm_align(), fd_reasm_footprint( tile->replay.fec_max ) );
2506 0 : void * sched_mem = FD_SCRATCH_ALLOC_APPEND( l, fd_sched_align(), fd_sched_footprint( tile->replay.max_live_slots ) );
2507 0 : void * vinyl_req_pool_mem = FD_SCRATCH_ALLOC_APPEND( l, fd_vinyl_req_pool_align(), fd_vinyl_req_pool_footprint( 1UL, 1UL ) );
2508 0 : void * vote_tracker_mem = FD_SCRATCH_ALLOC_APPEND( l, fd_vote_tracker_align(), fd_vote_tracker_footprint() );
2509 0 : void * _capture_ctx = FD_SCRATCH_ALLOC_APPEND( l, fd_capture_ctx_align(), fd_capture_ctx_footprint() );
2510 0 : void * dump_proto_ctx_mem = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_dump_proto_ctx_t), sizeof(fd_dump_proto_ctx_t) );
2511 0 : # if FD_HAS_FLATCC
2512 0 : void * block_dump_ctx = NULL;
2513 0 : if( FD_UNLIKELY( tile->replay.dump_block_to_pb ) ) {
2514 0 : block_dump_ctx = FD_SCRATCH_ALLOC_APPEND( l, fd_block_dump_context_align(), fd_block_dump_context_footprint() );
2515 0 : }
2516 0 : # endif
2517 :
2518 0 : ulong store_obj_id = fd_pod_query_ulong( topo->props, "store", ULONG_MAX );
2519 0 : FD_TEST( store_obj_id!=ULONG_MAX );
2520 0 : ctx->store = fd_store_join( fd_topo_obj_laddr( topo, store_obj_id ) );
2521 0 : FD_TEST( ctx->store );
2522 :
2523 0 : ulong banks_obj_id = fd_pod_query_ulong( topo->props, "banks", ULONG_MAX );
2524 0 : FD_TEST( banks_obj_id!=ULONG_MAX );
2525 0 : ulong banks_locks_obj_id = fd_pod_query_ulong( topo->props, "banks_locks", ULONG_MAX );
2526 0 : FD_TEST( banks_locks_obj_id!=ULONG_MAX );
2527 :
2528 0 : FD_TEST( fd_banks_join( ctx->banks, fd_topo_obj_laddr( topo, banks_obj_id ), fd_topo_obj_laddr( topo, banks_locks_obj_id ) ) );
2529 :
2530 0 : fd_bank_data_t * bank_pool = fd_banks_get_bank_pool( ctx->banks->data );
2531 0 : FD_MGAUGE_SET( REPLAY, MAX_LIVE_BANKS, fd_banks_pool_max( bank_pool ) );
2532 :
2533 0 : fd_bank_t bank[1];
2534 0 : FD_TEST( fd_banks_init_bank( bank, ctx->banks ) );
2535 0 : fd_bank_slot_set( bank, 0UL );
2536 0 : FD_TEST( bank->data->idx==FD_REPLAY_BOOT_BANK_IDX );
2537 :
2538 0 : ctx->consensus_root_slot = ULONG_MAX;
2539 0 : ctx->consensus_root = (fd_hash_t){ .ul[0] = FD_RUNTIME_INITIAL_BLOCK_ID };
2540 0 : ctx->published_root_slot = ULONG_MAX;
2541 :
2542 0 : ctx->expected_shred_version = tile->replay.expected_shred_version;
2543 0 : ctx->ipecho_shred_version = 0;
2544 0 : fd_memcpy( ctx->genesis_path, tile->replay.genesis_path, sizeof(ctx->genesis_path) );
2545 0 : ctx->has_genesis_hash = 0;
2546 0 : ctx->cluster_type = FD_CLUSTER_UNKNOWN;
2547 0 : ctx->hard_forks_cnt = ULONG_MAX;
2548 :
2549 0 : if( FD_UNLIKELY( tile->replay.bundle.enabled ) ) {
2550 0 : ctx->bundle.enabled = 1;
2551 0 : if( FD_UNLIKELY( !fd_bundle_crank_gen_init( ctx->bundle.gen,
2552 0 : (fd_acct_addr_t const *)tile->replay.bundle.tip_distribution_program_addr,
2553 0 : (fd_acct_addr_t const *)tile->replay.bundle.tip_payment_program_addr,
2554 0 : (fd_acct_addr_t const *)ctx->bundle.vote_account.uc,
2555 0 : (fd_acct_addr_t const *)ctx->bundle.vote_account.uc, "NAN", 0UL ) ) ) {
2556 0 : FD_LOG_ERR(( "failed to initialize bundle crank gen" ));
2557 0 : }
2558 0 : } else {
2559 0 : ctx->bundle.enabled = 0;
2560 0 : }
2561 :
2562 0 : fd_features_t * features = fd_bank_features_modify( bank );
2563 0 : fd_features_enable_cleaned_up( features, &FD_RUNTIME_CLUSTER_VERSION );
2564 :
2565 0 : char const * one_off_features[ 16UL ];
2566 0 : FD_TEST( tile->replay.enable_features_cnt<=sizeof(one_off_features)/sizeof(one_off_features[0]) );
2567 0 : for( ulong i=0UL; i<tile->replay.enable_features_cnt; i++ ) one_off_features[ i ] = tile->replay.enable_features[i];
2568 0 : fd_features_enable_one_offs( features, one_off_features, (uint)tile->replay.enable_features_cnt, 0UL );
2569 :
2570 0 : fd_topo_obj_t const * vinyl_data = fd_topo_find_tile_obj( topo, tile, "vinyl_data" );
2571 :
2572 0 : FD_TEST( fd_progcache_admin_join( ctx->progcache_admin, fd_topo_obj_laddr( topo, tile->replay.progcache_obj_id ) ) );
2573 :
2574 0 : ulong funk_obj_id;
2575 0 : FD_TEST( (funk_obj_id = fd_pod_query_ulong( topo->props, "funk", ULONG_MAX ) )!=ULONG_MAX );
2576 0 : if( !vinyl_data ) {
2577 0 : FD_TEST( fd_accdb_admin_v1_init( ctx->accdb_admin, fd_topo_obj_laddr( topo,funk_obj_id ) ) );
2578 0 : } else {
2579 0 : fd_topo_obj_t const * vinyl_rq = fd_topo_find_tile_obj( topo, tile, "vinyl_rq" );
2580 0 : fd_topo_obj_t const * vinyl_req_pool = fd_topo_find_tile_obj( topo, tile, "vinyl_rpool" );
2581 0 : FD_TEST( fd_accdb_admin_v2_init( ctx->accdb_admin,
2582 0 : fd_topo_obj_laddr( topo, funk_obj_id ),
2583 0 : fd_topo_obj_laddr( topo, vinyl_rq->id ),
2584 0 : topo->workspaces[ vinyl_data->wksp_id ].wksp,
2585 0 : fd_topo_obj_laddr( topo, vinyl_req_pool->id ),
2586 0 : vinyl_rq->id ) );
2587 0 : fd_accdb_admin_v2_delay_set( ctx->accdb_admin, tile->replay.write_delay_slots );
2588 0 : }
2589 0 : fd_accdb_init_from_topo( ctx->accdb, topo, tile );
2590 :
2591 0 : void * _txncache_shmem = fd_topo_obj_laddr( topo, tile->replay.txncache_obj_id );
2592 0 : fd_txncache_shmem_t * txncache_shmem = fd_txncache_shmem_join( _txncache_shmem );
2593 0 : FD_TEST( txncache_shmem );
2594 0 : ctx->txncache = fd_txncache_join( fd_txncache_new( _txncache, txncache_shmem ) );
2595 0 : FD_TEST( ctx->txncache );
2596 :
2597 0 : ctx->capture_ctx = NULL;
2598 0 : if( FD_UNLIKELY( strcmp( "", tile->replay.solcap_capture ) ) ) {
2599 0 : ctx->capture_ctx = fd_capture_ctx_join( fd_capture_ctx_new( _capture_ctx ) );
2600 0 : ctx->capture_ctx->solcap_start_slot = tile->replay.capture_start_slot;
2601 0 : ctx->capture_ctx->capture_solcap = 1;
2602 0 : }
2603 :
2604 0 : ctx->dump_proto_ctx = NULL;
2605 0 : if( FD_UNLIKELY( strcmp( "", tile->replay.dump_proto_dir ) ) ) {
2606 0 : ctx->dump_proto_ctx = dump_proto_ctx_mem;
2607 0 : ctx->dump_proto_ctx->dump_proto_output_dir = tile->replay.dump_proto_dir;
2608 0 : if( FD_LIKELY( tile->replay.dump_block_to_pb ) ) {
2609 0 : ctx->dump_proto_ctx->dump_block_to_pb = !!tile->replay.dump_block_to_pb;
2610 0 : }
2611 0 : }
2612 :
2613 0 : # if FD_HAS_FLATCC
2614 0 : if( FD_UNLIKELY( tile->replay.dump_block_to_pb ) ) {
2615 0 : ctx->block_dump_ctx = fd_block_dump_context_join( fd_block_dump_context_new( block_dump_ctx ) );
2616 0 : } else {
2617 0 : ctx->block_dump_ctx = NULL;
2618 0 : }
2619 0 : # endif
2620 :
2621 0 : ctx->exec_cnt = fd_topo_tile_name_cnt( topo, "execrp" );
2622 :
2623 0 : ctx->is_booted = 0;
2624 :
2625 0 : ctx->larger_max_cost_per_block = tile->replay.larger_max_cost_per_block;
2626 :
2627 0 : ctx->reasm = fd_reasm_join( fd_reasm_new( reasm_mem, tile->replay.fec_max, ctx->reasm_seed ) );
2628 0 : FD_TEST( ctx->reasm );
2629 :
2630 0 : ctx->sched = fd_sched_join( fd_sched_new( sched_mem, tile->replay.max_live_slots, ctx->exec_cnt ), tile->replay.max_live_slots );
2631 0 : FD_TEST( ctx->sched );
2632 :
2633 0 : FD_TEST( fd_vinyl_req_pool_new( vinyl_req_pool_mem, 1UL, 1UL ) );
2634 :
2635 0 : ctx->vote_tracker = fd_vote_tracker_join( fd_vote_tracker_new( vote_tracker_mem, ctx->vote_tracker_seed ) );
2636 0 : FD_TEST( ctx->vote_tracker );
2637 :
2638 0 : ctx->has_identity_vote_rooted = 0;
2639 0 : ctx->wait_for_vote_to_start_leader = tile->replay.wait_for_vote_to_start_leader;
2640 :
2641 0 : ctx->mleaders = fd_multi_epoch_leaders_join( fd_multi_epoch_leaders_new( ctx->mleaders_mem ) );
2642 0 : FD_TEST( ctx->mleaders );
2643 :
2644 0 : ctx->is_leader = 0;
2645 0 : ctx->reset_slot = 0UL;
2646 0 : fd_memset( ctx->reset_bank, 0, sizeof(fd_bank_t) );
2647 0 : ctx->reset_block_id = (fd_hash_t){ .ul[0] = FD_RUNTIME_INITIAL_BLOCK_ID };
2648 0 : ctx->reset_timestamp_nanos = 0UL;
2649 0 : ctx->next_leader_slot = ULONG_MAX;
2650 0 : ctx->next_leader_tickcount = LONG_MAX;
2651 0 : ctx->highwater_leader_slot = ULONG_MAX;
2652 0 : ctx->slot_duration_nanos = 350L*1000L*1000L; /* TODO: Not fixed ... not always 350ms ... */
2653 0 : ctx->slot_duration_ticks = (double)ctx->slot_duration_nanos*fd_tempo_tick_per_ns( NULL );
2654 0 : ctx->leader_bank->data = NULL;
2655 :
2656 0 : ctx->block_id_len = tile->replay.max_live_slots;
2657 0 : ctx->block_id_arr = (fd_block_id_ele_t *)block_id_arr_mem;
2658 0 : ctx->block_id_map = fd_block_id_map_join( fd_block_id_map_new( block_id_map_mem, chain_cnt, ctx->block_id_map_seed ) );
2659 0 : FD_TEST( ctx->block_id_map );
2660 :
2661 0 : ctx->resolv_tile_cnt = fd_topo_tile_name_cnt( topo, "resolv" );
2662 :
2663 0 : FD_TEST( tile->in_cnt<=sizeof(ctx->in)/sizeof(ctx->in[0]) );
2664 0 : for( ulong i=0UL; i<tile->in_cnt; i++ ) {
2665 0 : fd_topo_link_t * link = &topo->links[ tile->in_link_id[ i ] ];
2666 0 : fd_topo_wksp_t * link_wksp = &topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ];
2667 :
2668 0 : if( FD_LIKELY( link->dcache ) ) {
2669 0 : ctx->in[ i ].mem = link_wksp->wksp;
2670 0 : ctx->in[ i ].chunk0 = fd_dcache_compact_chunk0( ctx->in[ i ].mem, link->dcache );
2671 0 : ctx->in[ i ].wmark = fd_dcache_compact_wmark ( ctx->in[ i ].mem, link->dcache, link->mtu );
2672 0 : ctx->in[ i ].mtu = link->mtu;
2673 0 : }
2674 :
2675 0 : if( !strcmp( link->name, "genesi_out" ) ) ctx->in_kind[ i ] = IN_KIND_GENESIS;
2676 0 : else if( !strcmp( link->name, "ipecho_out" ) ) ctx->in_kind[ i ] = IN_KIND_IPECHO;
2677 0 : else if( !strcmp( link->name, "snapin_manif" ) ) ctx->in_kind[ i ] = IN_KIND_SNAP;
2678 0 : else if( !strcmp( link->name, "execrp_replay" ) ) ctx->in_kind[ i ] = IN_KIND_EXECRP;
2679 0 : else if( !strcmp( link->name, "tower_out" ) ) ctx->in_kind[ i ] = IN_KIND_TOWER;
2680 0 : else if( !strcmp( link->name, "poh_replay" ) ) ctx->in_kind[ i ] = IN_KIND_POH;
2681 0 : else if( !strcmp( link->name, "resolv_replay" ) ) ctx->in_kind[ i ] = IN_KIND_RESOLV;
2682 0 : else if( !strcmp( link->name, "shred_out" ) ) ctx->in_kind[ i ] = IN_KIND_SHRED;
2683 0 : else if( !strcmp( link->name, "txsend_out" ) ) ctx->in_kind[ i ] = IN_KIND_TXSEND;
2684 0 : else if( !strcmp( link->name, "gui_replay" ) ) ctx->in_kind[ i ] = IN_KIND_GUI;
2685 0 : else if( !strcmp( link->name, "rpc_replay" ) ) ctx->in_kind[ i ] = IN_KIND_RPC;
2686 0 : else FD_LOG_ERR(( "unexpected input link name %s", link->name ));
2687 0 : }
2688 :
2689 0 : *ctx->epoch_out = out1( topo, tile, "replay_epoch" ); FD_TEST( ctx->epoch_out->idx!=ULONG_MAX );
2690 0 : *ctx->replay_out = out1( topo, tile, "replay_out" ); FD_TEST( ctx->replay_out->idx!=ULONG_MAX );
2691 :
2692 0 : ulong idx = fd_topo_find_tile_out_link( topo, tile, "replay_execrp", 0UL );
2693 0 : FD_TEST( idx!=ULONG_MAX );
2694 0 : fd_topo_link_t * link = &topo->links[ tile->out_link_id[ idx ] ];
2695 :
2696 0 : fd_replay_out_link_t * exec_out = ctx->exec_out;
2697 0 : exec_out->idx = idx;
2698 0 : exec_out->mem = topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ].wksp;
2699 0 : exec_out->chunk0 = fd_dcache_compact_chunk0( exec_out->mem, link->dcache );
2700 0 : exec_out->wmark = fd_dcache_compact_wmark( exec_out->mem, link->dcache, link->mtu );
2701 0 : exec_out->chunk = exec_out->chunk0;
2702 :
2703 0 : ctx->gui_enabled = fd_topo_find_tile( topo, "gui", 0UL )!=ULONG_MAX;
2704 0 : ctx->rpc_enabled = fd_topo_find_tile( topo, "rpc", 0UL )!=ULONG_MAX;
2705 :
2706 0 : if( FD_UNLIKELY( strcmp( "", tile->replay.solcap_capture ) ) ) {
2707 0 : idx = fd_topo_find_tile_out_link( topo, tile, "cap_repl", 0UL );
2708 0 : FD_TEST( idx!=ULONG_MAX );
2709 0 : link = &topo->links[ tile->out_link_id[ idx ] ];
2710 :
2711 :
2712 0 : fd_capture_link_buf_t * cap_repl_out = ctx->cap_repl_out;
2713 0 : cap_repl_out->base.vt = &fd_capture_link_buf_vt;
2714 0 : cap_repl_out->idx = idx;
2715 0 : cap_repl_out->mem = topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ].wksp;
2716 0 : cap_repl_out->chunk0 = fd_dcache_compact_chunk0( cap_repl_out->mem, link->dcache );
2717 0 : cap_repl_out->wmark = fd_dcache_compact_wmark( cap_repl_out->mem, link->dcache, link->mtu );
2718 0 : cap_repl_out->chunk = cap_repl_out->chunk0;
2719 0 : cap_repl_out->mcache = link->mcache;
2720 0 : cap_repl_out->depth = fd_mcache_depth( link->mcache );
2721 0 : cap_repl_out->seq = 0UL;
2722 :
2723 0 : ctx->capture_ctx->capctx_type.buf = cap_repl_out;
2724 0 : ctx->capture_ctx->capture_link = &cap_repl_out->base;
2725 0 : ctx->capture_ctx->current_txn_idx = 0UL;
2726 :
2727 :
2728 0 : ulong consumer_tile_idx = fd_topo_find_tile( topo, "solcap", 0UL );
2729 0 : fd_topo_tile_t * consumer_tile = &topo->tiles[ consumer_tile_idx ];
2730 0 : cap_repl_out->fseq = NULL;
2731 0 : for( ulong j = 0UL; j < consumer_tile->in_cnt; j++ ) {
2732 0 : if( FD_UNLIKELY( consumer_tile->in_link_id[ j ] == link->id ) ) {
2733 0 : cap_repl_out->fseq = fd_fseq_join( fd_topo_obj_laddr( topo, consumer_tile->in_link_fseq_obj_id[ j ] ) );
2734 0 : FD_TEST( cap_repl_out->fseq );
2735 0 : break;
2736 0 : }
2737 0 : }
2738 0 : }
2739 :
2740 0 : fd_memset( &ctx->metrics, 0, sizeof(ctx->metrics) );
2741 :
2742 0 : fd_histf_join( fd_histf_new( ctx->metrics.store_link_wait, FD_MHIST_SECONDS_MIN( REPLAY, STORE_LINK_WAIT ),
2743 0 : FD_MHIST_SECONDS_MAX( REPLAY, STORE_LINK_WAIT ) ) );
2744 0 : fd_histf_join( fd_histf_new( ctx->metrics.store_link_work, FD_MHIST_SECONDS_MIN( REPLAY, STORE_LINK_WORK ),
2745 0 : FD_MHIST_SECONDS_MAX( REPLAY, STORE_LINK_WORK ) ) );
2746 0 : fd_histf_join( fd_histf_new( ctx->metrics.store_read_wait, FD_MHIST_SECONDS_MIN( REPLAY, STORE_READ_WAIT ),
2747 0 : FD_MHIST_SECONDS_MAX( REPLAY, STORE_READ_WAIT ) ) );
2748 0 : fd_histf_join( fd_histf_new( ctx->metrics.store_read_work, FD_MHIST_SECONDS_MIN( REPLAY, STORE_READ_WORK ),
2749 0 : FD_MHIST_SECONDS_MAX( REPLAY, STORE_READ_WORK ) ) );
2750 0 : fd_histf_join( fd_histf_new( ctx->metrics.store_publish_wait, FD_MHIST_SECONDS_MIN( REPLAY, STORE_PUBLISH_WAIT ),
2751 0 : FD_MHIST_SECONDS_MAX( REPLAY, STORE_PUBLISH_WAIT ) ) );
2752 0 : fd_histf_join( fd_histf_new( ctx->metrics.store_publish_work, FD_MHIST_SECONDS_MIN( REPLAY, STORE_PUBLISH_WORK ),
2753 0 : FD_MHIST_SECONDS_MAX( REPLAY, STORE_PUBLISH_WORK ) ) );
2754 0 : fd_histf_join( fd_histf_new( ctx->metrics.root_slot_dur, FD_MHIST_SECONDS_MIN( REPLAY, ROOT_SLOT_DURATION_SECONDS ),
2755 0 : FD_MHIST_SECONDS_MAX( REPLAY, ROOT_SLOT_DURATION_SECONDS ) ) );
2756 0 : fd_histf_join( fd_histf_new( ctx->metrics.root_account_dur, FD_MHIST_SECONDS_MIN( REPLAY, ROOT_ACCOUNT_DURATION_SECONDS ),
2757 0 : FD_MHIST_SECONDS_MAX( REPLAY, ROOT_ACCOUNT_DURATION_SECONDS ) ) );
2758 :
2759 0 : ulong scratch_top = FD_SCRATCH_ALLOC_FINI( l, 1UL );
2760 0 : if( FD_UNLIKELY( scratch_top > (ulong)scratch + scratch_footprint( tile ) ) )
2761 0 : FD_LOG_ERR(( "scratch overflow %lu %lu %lu", scratch_top - (ulong)scratch - scratch_footprint( tile ), scratch_top, (ulong)scratch + scratch_footprint( tile ) ));
2762 0 : }
2763 :
2764 : static ulong
2765 : populate_allowed_seccomp( fd_topo_t const * topo FD_FN_UNUSED,
2766 : fd_topo_tile_t const * tile FD_FN_UNUSED,
2767 : ulong out_cnt,
2768 0 : struct sock_filter * out ) {
2769 :
2770 0 : populate_sock_filter_policy_fd_replay_tile( out_cnt, out, (uint)fd_log_private_logfile_fd() );
2771 0 : return sock_filter_policy_fd_replay_tile_instr_cnt;
2772 0 : }
2773 :
2774 : static ulong
2775 : populate_allowed_fds( fd_topo_t const * topo FD_FN_UNUSED,
2776 : fd_topo_tile_t const * tile FD_FN_UNUSED,
2777 : ulong out_fds_cnt,
2778 0 : int * out_fds ) {
2779 :
2780 0 : if( FD_UNLIKELY( out_fds_cnt<2UL ) ) FD_LOG_ERR(( "out_fds_cnt %lu", out_fds_cnt ));
2781 :
2782 0 : ulong out_cnt = 0UL;
2783 0 : out_fds[ out_cnt++ ] = 2; /* stderr */
2784 0 : if( FD_LIKELY( -1!=fd_log_private_logfile_fd() ) )
2785 0 : out_fds[ out_cnt++ ] = fd_log_private_logfile_fd(); /* logfile */
2786 0 : return out_cnt;
2787 0 : }
2788 :
2789 : #undef DEBUG_LOGGING
2790 :
2791 : /* counting carefully, after_credit can generate at most 7 frags and
2792 : returnable_frag boot_genesis can also generate at most 7 frags, so 14
2793 : is a conservative bound. */
2794 0 : #define STEM_BURST (14UL)
2795 :
2796 : /* TODO: calculate this properly/fix stem to work with larger numbers of links */
2797 : /* 1000 chosen empirically as anything larger slowed down replay times. Need to calculate
2798 : this properly. */
2799 0 : #define STEM_LAZY ((long)10e3)
2800 :
2801 0 : #define STEM_CALLBACK_CONTEXT_TYPE fd_replay_tile_t
2802 0 : #define STEM_CALLBACK_CONTEXT_ALIGN alignof(fd_replay_tile_t)
2803 :
2804 0 : #define STEM_CALLBACK_METRICS_WRITE metrics_write
2805 0 : #define STEM_CALLBACK_AFTER_CREDIT after_credit
2806 0 : #define STEM_CALLBACK_BEFORE_FRAG before_frag
2807 0 : #define STEM_CALLBACK_RETURNABLE_FRAG returnable_frag
2808 :
2809 : #include "../../disco/stem/fd_stem.c"
2810 :
2811 : fd_topo_run_tile_t fd_tile_replay = {
2812 : .name = "replay",
2813 : .populate_allowed_seccomp = populate_allowed_seccomp,
2814 : .populate_allowed_fds = populate_allowed_fds,
2815 : .scratch_align = scratch_align,
2816 : .scratch_footprint = scratch_footprint,
2817 : .privileged_init = privileged_init,
2818 : .unprivileged_init = unprivileged_init,
2819 : .run = stem_run,
2820 : };
|