Line data Source code
1 : #include "fd_replay_tile.h"
2 : #include "fd_sched.h"
3 : #include "fd_exec.h"
4 : #include "fd_vote_tracker.h"
5 : #include "generated/fd_replay_tile_seccomp.h"
6 :
7 : #include "../genesis/fd_genesi_tile.h"
8 : #include "../poh/fd_poh.h"
9 : #include "../poh/fd_poh_tile.h"
10 : #include "../tower/fd_tower_tile.h"
11 : #include "../resolv/fd_resolv_tile.h"
12 : #include "../restore/utils/fd_ssload.h"
13 :
14 : #include "../../disco/tiles.h"
15 : #include "../../disco/fd_txn_m.h"
16 : #include "../../disco/store/fd_store.h"
17 : #include "../../disco/pack/fd_pack.h"
18 : #include "../../discof/reasm/fd_reasm.h"
19 : #include "../../disco/keyguard/fd_keyload.h"
20 : #include "../../disco/genesis/fd_genesis_cluster.h"
21 : #include "../../util/pod/fd_pod.h"
22 : #include "../../flamenco/accdb/fd_accdb_admin.h"
23 : #include "../../flamenco/accdb/fd_accdb_impl_v1.h"
24 : #include "../../flamenco/rewards/fd_rewards.h"
25 : #include "../../flamenco/leaders/fd_multi_epoch_leaders.h"
26 : #include "../../flamenco/progcache/fd_progcache_admin.h"
27 : #include "../../disco/metrics/fd_metrics.h"
28 :
29 : #include "../../flamenco/runtime/fd_runtime.h"
30 : #include "../../flamenco/runtime/fd_runtime_stack.h"
31 : #include "../../flamenco/fd_flamenco_base.h"
32 : #include "../../flamenco/runtime/sysvar/fd_sysvar_epoch_schedule.h"
33 :
34 : #include "../../flamenco/runtime/tests/fd_dump_pb.h"
35 :
36 : #include <errno.h>
37 : #include <stdio.h>
38 :
39 : /* Replay concepts:
40 :
41 : - Blocks are aggregations of entries aka. microblocks which are
42 : groupings of txns and are constructed by the block producer (see
43 : fd_pack).
44 :
45 : - Entries are grouped into entry batches by the block producer (see
46 : fd_pack / fd_shredder).
47 :
48 : - Entry batches are divided into chunks known as shreds by the block
49 : producer (see fd_shredder).
50 :
51 : - Shreds are grouped into forward-error-correction sets (FEC sets) by
52 : the block producer (see fd_shredder).
53 :
54 : - Shreds are transmitted to the rest of the cluster via the Turbine
55 : protocol (see fd_shredder / fd_shred).
56 :
57 : - Once enough shreds within a FEC set are received to recover the
58 : entirety of the shred data encoded by that FEC set, the receiver
59 : can "complete" the FEC set (see fd_fec_resolver).
60 :
61 : - If shreds in the FEC set are missing such that it can't complete,
62 : the receiver can use the Repair protocol to request missing shreds
63 : in FEC set (see fd_repair).
64 :
65 : - The current Repair protocol does not support requesting coding
66 : shreds. As a result, some FEC sets might be actually complete
67 : (contain all data shreds). Repair currently hacks around this by
68 : forcing completion but the long-term solution is to add support for
69 : fec_repairing coding shreds via Repair.
70 :
71 : - FEC sets are delivered in partial-order to the Replay tile by the
72 : Repair tile. Currently Replay only supports replaying entry batches
73 : so FEC sets need to reassembled into an entry batch before they can
74 : be replayed. The new Dispatcher will change this by taking a FEC
75 : set as input instead. */
76 :
77 0 : #define IN_KIND_SNAP ( 0)
78 0 : #define IN_KIND_GENESIS ( 1)
79 0 : #define IN_KIND_IPECHO ( 2)
80 0 : #define IN_KIND_TOWER ( 3)
81 0 : #define IN_KIND_RESOLV ( 4)
82 0 : #define IN_KIND_POH ( 5)
83 0 : #define IN_KIND_EXEC ( 6)
84 0 : #define IN_KIND_SHRED ( 7)
85 0 : #define IN_KIND_VTXN ( 8)
86 0 : #define IN_KIND_GUI ( 9)
87 0 : #define IN_KIND_RPC (10)
88 :
89 : #define DEBUG_LOGGING 0
90 :
91 : /* The first bank that the replay tile produces either for genesis
92 : or the snapshot boot will always be at bank index 0. */
93 0 : #define FD_REPLAY_BOOT_BANK_IDX (0UL)
94 :
95 : struct fd_replay_in_link {
96 : fd_wksp_t * mem;
97 : ulong chunk0;
98 : ulong wmark;
99 : ulong mtu;
100 : };
101 :
102 : typedef struct fd_replay_in_link fd_replay_in_link_t;
103 :
104 : struct fd_replay_out_link {
105 : ulong idx;
106 : fd_wksp_t * mem;
107 : ulong chunk0;
108 : ulong wmark;
109 : ulong chunk;
110 : };
111 :
112 : typedef struct fd_replay_out_link fd_replay_out_link_t;
113 :
114 : /* fd_block_id_map is a simple map of block-ids to bank indices. The
115 : map sits on top of an array of fd_block_id_ele_t. This serves as a
116 : translation layer between block ids to bank indices. */
117 :
118 : struct fd_block_id_ele {
119 : fd_hash_t block_id;
120 : ulong slot; /* = FD_SLOT_NULL if not initialized */
121 : ulong next_;
122 : };
123 : typedef struct fd_block_id_ele fd_block_id_ele_t;
124 :
125 : #define MAP_NAME fd_block_id_map
126 : #define MAP_ELE_T fd_block_id_ele_t
127 : #define MAP_KEY_T fd_hash_t
128 0 : #define MAP_KEY block_id
129 0 : #define MAP_NEXT next_
130 0 : #define MAP_KEY_EQ(k0,k1) (!memcmp((k0),(k1), sizeof(fd_hash_t)))
131 0 : #define MAP_KEY_HASH(key,seed) (fd_hash((seed),(key),sizeof(fd_hash_t)))
132 : #include "../../util/tmpl/fd_map_chain.c"
133 :
134 : static inline ulong
135 0 : fd_block_id_ele_get_idx( fd_block_id_ele_t * ele_arr, fd_block_id_ele_t * ele ) {
136 0 : return (ulong)(ele - ele_arr);
137 0 : }
138 :
139 : struct fd_replay_tile {
140 : fd_wksp_t * wksp;
141 :
142 : fd_accdb_admin_t accdb_admin[1];
143 : fd_accdb_user_t accdb[1];
144 : fd_progcache_admin_t progcache_admin[1];
145 :
146 : fd_txncache_t * txncache;
147 : fd_store_t * store;
148 : fd_banks_t * banks;
149 :
150 : /* This flag is 1 If we have seen a vote signature that our node has
151 : sent out get rooted at least one time. The value is 0 otherwise.
152 : We can't become leader and pack blocks until this flag has been
153 : set. This parallels the Agave 'has_new_vote_been_rooted'.
154 :
155 : TODO: Add a flag to the toml to make this optional. */
156 : int has_identity_vote_rooted;
157 :
158 : ulong reasm_seed;
159 : fd_reasm_t * reasm;
160 :
161 : /* Replay state machine. */
162 : fd_sched_t * sched;
163 : ulong exec_cnt;
164 : fd_replay_out_link_t exec_out[ 1 ]; /* Sending work down to exec tiles */
165 :
166 : ulong vote_tracker_seed;
167 : fd_vote_tracker_t * vote_tracker;
168 :
169 : int has_genesis_hash;
170 : char genesis_path[ PATH_MAX ];
171 : uchar genesis_hash[ 32UL ];
172 : ulong cluster_type;
173 :
174 : #define FD_REPLAY_HARD_FORKS_MAX (64UL)
175 : ulong hard_forks_cnt;
176 : ulong hard_forks[ FD_REPLAY_HARD_FORKS_MAX ];
177 :
178 : ushort expected_shred_version;
179 : ushort ipecho_shred_version;
180 :
181 : /* A note on publishing ...
182 :
183 : The watermarks are used to publish our fork-aware structures. For
184 : example, store, banks, and txncache need to be published to release
185 : resources occupied by rooted or dead blocks. In general,
186 : publishing has the effect of pruning forks in those structures,
187 : indicating that it is ok to release the memory being occupied by
188 : the blocks on said forks. Tower is responsible for informing us of
189 : the latest block on the consensus rooted fork. As soon as we can,
190 : we should move the published root as close as possible to the
191 : latest consensus root, publishing/pruning everything on the fork
192 : tree along the way. That is, all the blocks that directly descend
193 : from the current published root (inclusive) to the new published
194 : root (exclusive) on the rooted fork, as well as all the minority
195 : forks that branch from said blocks.
196 :
197 : Ideally, we'd move the published root to the consensus root
198 : immediately upon receiving a new consensus root. However, that's
199 : not always safe to do. One thing we need to be careful about is
200 : making sure that there are no more users/consumers of
201 : soon-to-be-pruned blocks, lest a use-after-free occurs. This can
202 : be done by using a reference counter for each block. Any
203 : concurrent activity, such as transaction execution in the exec
204 : tiles, should retain a refcnt on the block for as
205 : long as it needs access to the shared fork-aware structures related
206 : to that block. Eventually, refcnt on a given block will drop down
207 : to 0 as the block either finishes replaying or gets marked as dead,
208 : and any other tile that has retained a refcnt on the block releases
209 : it. At that point, it becomes a candidate for pruning. The key to
210 : safe publishing then becomes figuring out how far we could advance
211 : the published root, such that every minority fork branching off of
212 : blocks in between the current published root (inclusive) and the
213 : new published root (exclusive) is safe to be pruned. This is a
214 : straightforward tree traversal, where if a block B on the rooted
215 : fork has refcnt 0, and all minority forks branching off of B also
216 : have refcnt 0, then B is safe to be pruned. We advance the
217 : published root to the farthest consecutively prunable block on the
218 : rooted fork. Note that reasm presents the replay tile with a clean
219 : view of the world where every block is chained off of a parent
220 : block. So there are no orpahned/dangling tree nodes to worry
221 : about. The world is a nice single tree as far as replay is
222 : concerned.
223 :
224 : In the following fork tree, every node is a block and the number in
225 : parentheses is the refcnt on the block. The chain marked with
226 : double slashes is the rooted fork. Suppose the published root is
227 : at block P, and consensus root is at block T. We can't publish
228 : past block P because Q has refcnt 1.
229 :
230 :
231 : P(0)
232 : / \\
233 : Q(1) A(0)
234 : / || \
235 : X(0) B(0) C(0)
236 : / || \
237 : Y(0) M(0) R(0)
238 : / || / \
239 : D(2) T(0) J(0) L(0)
240 : ||
241 : ..
242 : ..
243 : ..
244 : ||
245 : blocks we might be actively replaying
246 :
247 :
248 : When refcnt on Q drops to 0, we would be able to advance the
249 : published root to block M, because blocks P, A, and B, as well as
250 : all subtrees branching off of them, have refcnt 0, and therefore
251 : can be pruned. Block M itself cannot be pruned yet because its
252 : child block D has refcnt 2. After publishing/pruning, the fork
253 : tree would be:
254 :
255 :
256 : M(0)
257 : / ||
258 : D(2) T(0)
259 : ||
260 : ..
261 : ..
262 : ..
263 : ||
264 : blocks we might be actively replaying
265 :
266 :
267 : As a result, the shared fork-aware structures can free resources
268 : for blocks P, A, B, and all subtrees branching off of them.
269 :
270 : For the reference counting part, the replay tile is the sole entity
271 : that can update the refcnt. This ensures that all refcnt increment
272 : and decrement attempts are serialized at the replay tile, and that
273 : there are no racy resurrection of a soon-to-be-pruned block. If a
274 : refcnt increment request arrives after a block has been pruned,
275 : replay simply rejects the request.
276 :
277 : A note on the implementation of the above ...
278 :
279 : Upon receiving a new consensus root, we descend down the rooted
280 : fork from the current published root to the new consensus root. On
281 : each node/block of the rooted fork, we do a summation of the refcnt
282 : on the block and all the minority fork blocks branching from the
283 : block. If the summation is 0, the block is safe for pruning. We
284 : advance the published root to the far end of the consecutive run of
285 : 0 refcnt sums originating from the current published root. On our
286 : descent down the minority forks, we also mark any block that hasn't
287 : finished replaying as dead, so we don't waste time executing them.
288 : No more transactions shall be dispatched for execution from dead
289 : blocks.
290 :
291 : Blocks start out with a refcnt of 0. Other tiles may send a
292 : request to the replay tile for a reference on a block. The
293 : transaction dispatcher is another source of refcnt updates. On
294 : every dispatch of a transaction for block B, we increment the
295 : refcnt for B. And on every transaction finalization, we decrement
296 : the refcnt for B. This means that whenever the refcnt on a block
297 : is 0, there is no more reference on that block from the execution
298 : pipeline. While it might be tempting to simply increment the
299 : refcnt once when we start replaying a block, and decrement the
300 : refcnt once when we finish a block, this more fine-grained refcnt
301 : update strategy allows for aborting and potentially immediate
302 : pruning of blocks under interleaved block replay. Upon receiving a
303 : new consensus root, we can simply look at the refcnt on minority
304 : fork blocks, and a refcnt of 0 would imply that the block is safe
305 : for pruning, even if we haven't finished replaying it. Without the
306 : fine-grained refcnt, we would need to first stop dispatching from
307 : the aborted block, and then wait for a full drain of the execution
308 : pipeline to know for sure that there are no more in-flight
309 : transactions executing on the aborted block. Note that this will
310 : allow the refcnt on any block to transiently drop down to 0. We
311 : will not mistakenly prune an actively replaying block, aka a leaf
312 : node, that is chaining off of the rooted fork, because the
313 : consensus root is always an ancestor of the actively replaying tip.
314 : */
315 : fd_hash_t consensus_root; /* The most recent block to have reached max lockout in the tower. */
316 : ulong consensus_root_slot; /* slot number of the above. */
317 : ulong consensus_root_bank_idx; /* bank index of the above. */
318 : ulong published_root_slot; /* slot number of the published root. */
319 : ulong published_root_bank_idx; /* bank index of the published root. */
320 :
321 : /* We need to maintain a tile-local mapping of block-ids to bank index
322 : and vice versa. This translation layer is needed for conversion
323 : since tower operates on block-ids and downstream consumers of FEC
324 : sets operate on bank indices. This mapping must happen both ways:
325 : 1. tower sends us block ids and we must map them to bank indices.
326 : 2. when a block is completed, we must map the bank index to a block
327 : id to send a slot complete message to tower. */
328 : ulong block_id_len;
329 : fd_block_id_ele_t * block_id_arr;
330 : ulong block_id_map_seed;
331 : fd_block_id_map_t * block_id_map;
332 :
333 : /* Capture-related configs */
334 : fd_capture_ctx_t * capture_ctx;
335 : FILE * capture_file;
336 : fd_capture_link_buf_t cap_repl_out[1];
337 :
338 : /* Whether the runtime has been booted either from snapshot loading
339 : or from genesis. */
340 : int is_booted;
341 :
342 : /* Buffer to store vote towers that need to be published to the Tower
343 : tile. */
344 :
345 : fd_multi_epoch_leaders_t * mleaders;
346 :
347 : int larger_max_cost_per_block;
348 :
349 : fd_pubkey_t identity_pubkey[1]; /* TODO: Keyswitch */
350 :
351 : /* When we transition to becoming leader, we can only unbecome the
352 : leader if we have received a block id from the FEC reassembler, and
353 : a message from PoH that the leader slot has ended. After both of
354 : these conditions are met, then we are free to unbecome the leader.
355 : */
356 : int is_leader;
357 : int recv_poh;
358 : int recv_block_id;
359 : ulong next_leader_slot;
360 : long next_leader_tickcount;
361 : ulong highwater_leader_slot;
362 : ulong reset_slot;
363 : fd_bank_t * reset_bank;
364 : fd_hash_t reset_block_id;
365 : long reset_timestamp_nanos;
366 : double slot_duration_nanos;
367 : double slot_duration_ticks;
368 : fd_bank_t * leader_bank; /* ==NULL if not currently the leader */
369 :
370 : ulong resolv_tile_cnt;
371 :
372 : int in_kind[ 64 ];
373 : fd_replay_in_link_t in[ 64 ];
374 :
375 : fd_replay_out_link_t replay_out[1];
376 :
377 : fd_replay_out_link_t stake_out[1];
378 :
379 : /* The gui tile needs to reliably own a reference to the most recent
380 : completed active bank. Replay needs to know if the gui as a
381 : consumer is enabled so it can increment the bank's refcnt before
382 : publishing the bank_idx to the gui. */
383 : int gui_enabled;
384 : int rpc_enabled;
385 :
386 : # if FD_HAS_FLATCC
387 : /* For dumping blocks to protobuf. For backtest only. */
388 : fd_block_dump_ctx_t * block_dump_ctx;
389 : # endif
390 :
391 : /* We need a few pieces of information to compute the right addresses
392 : for bundle crank information that we need to send to pack. */
393 : struct {
394 : int enabled;
395 : fd_pubkey_t vote_account;
396 : fd_bundle_crank_gen_t gen[1];
397 : } bundle;
398 :
399 : struct {
400 : fd_histf_t store_read_wait[ 1 ];
401 : fd_histf_t store_read_work[ 1 ];
402 : fd_histf_t store_publish_wait[ 1 ];
403 : fd_histf_t store_publish_work[ 1 ];
404 : fd_histf_t store_link_wait[ 1 ];
405 : fd_histf_t store_link_work[ 1 ];
406 :
407 : ulong slots_total;
408 : ulong transactions_total;
409 :
410 : ulong reasm_latest_slot;
411 : ulong reasm_latest_fec_idx;
412 :
413 : ulong sched_full;
414 : ulong reasm_empty;
415 : ulong leader_bid_wait;
416 : ulong banks_full;
417 : } metrics;
418 :
419 : uchar __attribute__((aligned(FD_MULTI_EPOCH_LEADERS_ALIGN))) mleaders_mem[ FD_MULTI_EPOCH_LEADERS_FOOTPRINT ];
420 :
421 : fd_runtime_stack_t runtime_stack;
422 : };
423 :
424 : typedef struct fd_replay_tile fd_replay_tile_t;
425 :
426 : FD_FN_CONST static inline ulong
427 0 : scratch_align( void ) {
428 0 : return 128UL;
429 0 : }
430 : FD_FN_PURE static inline ulong
431 0 : scratch_footprint( fd_topo_tile_t const * tile ) {
432 0 : ulong chain_cnt = fd_block_id_map_chain_cnt_est( tile->replay.max_live_slots );
433 :
434 0 : ulong l = FD_LAYOUT_INIT;
435 0 : l = FD_LAYOUT_APPEND( l, alignof(fd_replay_tile_t), sizeof(fd_replay_tile_t) );
436 0 : l = FD_LAYOUT_APPEND( l, alignof(fd_block_id_ele_t), sizeof(fd_block_id_ele_t) * tile->replay.max_live_slots );
437 0 : l = FD_LAYOUT_APPEND( l, fd_block_id_map_align(), fd_block_id_map_footprint( chain_cnt ) );
438 0 : l = FD_LAYOUT_APPEND( l, fd_txncache_align(), fd_txncache_footprint( tile->replay.max_live_slots ) );
439 0 : l = FD_LAYOUT_APPEND( l, fd_reasm_align(), fd_reasm_footprint( 1 << 20 ) );
440 0 : l = FD_LAYOUT_APPEND( l, fd_sched_align(), fd_sched_footprint( tile->replay.max_live_slots ) );
441 0 : l = FD_LAYOUT_APPEND( l, fd_vote_tracker_align(), fd_vote_tracker_footprint() );
442 0 : l = FD_LAYOUT_APPEND( l, fd_capture_ctx_align(), fd_capture_ctx_footprint() );
443 :
444 0 : # if FD_HAS_FLATCC
445 0 : if( FD_UNLIKELY( tile->replay.dump_block_to_pb ) ) {
446 0 : l = FD_LAYOUT_APPEND( l, fd_block_dump_context_align(), fd_block_dump_context_footprint() );
447 0 : }
448 0 : # endif
449 :
450 0 : l = FD_LAYOUT_FINI( l, scratch_align() );
451 :
452 0 : return l;
453 0 : }
454 :
455 : static inline void
456 0 : metrics_write( fd_replay_tile_t * ctx ) {
457 0 : FD_MHIST_COPY( REPLAY, STORE_LINK_WAIT, ctx->metrics.store_link_wait );
458 0 : FD_MHIST_COPY( REPLAY, STORE_LINK_WORK, ctx->metrics.store_link_work );
459 0 : FD_MHIST_COPY( REPLAY, STORE_READ_WAIT, ctx->metrics.store_read_wait );
460 0 : FD_MHIST_COPY( REPLAY, STORE_READ_WORK, ctx->metrics.store_read_work );
461 0 : FD_MHIST_COPY( REPLAY, STORE_PUBLISH_WAIT, ctx->metrics.store_publish_wait );
462 0 : FD_MHIST_COPY( REPLAY, STORE_PUBLISH_WORK, ctx->metrics.store_publish_work );
463 :
464 0 : FD_MGAUGE_SET( REPLAY, ROOT_SLOT, ctx->consensus_root_slot==ULONG_MAX ? 0UL : ctx->consensus_root_slot );
465 0 : ulong leader_slot = ctx->leader_bank ? fd_bank_slot_get( ctx->leader_bank ) : 0UL;
466 0 : FD_MGAUGE_SET( REPLAY, LEADER_SLOT, leader_slot );
467 :
468 0 : if( FD_LIKELY( ctx->leader_bank ) ) {
469 0 : FD_MGAUGE_SET( REPLAY, NEXT_LEADER_SLOT, leader_slot );
470 0 : FD_MGAUGE_SET( REPLAY, LEADER_SLOT, leader_slot );
471 0 : } else {
472 0 : FD_MGAUGE_SET( REPLAY, NEXT_LEADER_SLOT, ctx->next_leader_slot==ULONG_MAX ? 0UL : ctx->next_leader_slot );
473 0 : FD_MGAUGE_SET( REPLAY, LEADER_SLOT, 0UL );
474 0 : }
475 0 : FD_MGAUGE_SET( REPLAY, RESET_SLOT, ctx->reset_slot==ULONG_MAX ? 0UL : ctx->reset_slot );
476 :
477 0 : fd_bank_t * bank_pool = fd_banks_get_bank_pool( ctx->banks );
478 0 : ulong live_banks = fd_banks_pool_max( bank_pool ) - fd_banks_pool_free( bank_pool );
479 0 : FD_MGAUGE_SET( REPLAY, LIVE_BANKS, live_banks );
480 :
481 0 : ulong reasm_free = fd_reasm_free( ctx->reasm );
482 0 : FD_MGAUGE_SET( REPLAY, REASM_FREE, reasm_free );
483 :
484 0 : FD_MCNT_SET( REPLAY, SLOTS_TOTAL, ctx->metrics.slots_total );
485 0 : FD_MCNT_SET( REPLAY, TRANSACTIONS_TOTAL, ctx->metrics.transactions_total );
486 :
487 0 : FD_MGAUGE_SET( REPLAY, REASM_LATEST_SLOT, ctx->metrics.reasm_latest_slot );
488 0 : FD_MGAUGE_SET( REPLAY, REASM_LATEST_FEC_IDX, ctx->metrics.reasm_latest_fec_idx );
489 :
490 0 : FD_MCNT_SET( REPLAY, SCHED_FULL, ctx->metrics.sched_full );
491 0 : FD_MCNT_SET( REPLAY, REASM_EMPTY, ctx->metrics.reasm_empty );
492 0 : FD_MCNT_SET( REPLAY, LEADER_BID_WAIT, ctx->metrics.leader_bid_wait );
493 0 : FD_MCNT_SET( REPLAY, BANKS_FULL, ctx->metrics.banks_full );
494 :
495 0 : FD_MCNT_SET( REPLAY, PROGCACHE_ROOTED, ctx->progcache_admin->metrics.root_cnt );
496 0 : FD_MCNT_SET( REPLAY, PROGCACHE_GC_ROOT, ctx->progcache_admin->metrics.gc_root_cnt );
497 :
498 0 : FD_MCNT_SET( REPLAY, ACCDB_CREATED, ctx->accdb->base.created_cnt );
499 0 : FD_MCNT_SET( REPLAY, ACCDB_REVERTED, ctx->accdb_admin->metrics.revert_cnt );
500 0 : FD_MCNT_SET( REPLAY, ACCDB_ROOTED, ctx->accdb_admin->metrics.root_cnt );
501 0 : FD_MCNT_SET( REPLAY, ACCDB_GC_ROOT, ctx->accdb_admin->metrics.gc_root_cnt );
502 0 : }
503 :
504 : static inline ulong
505 : generate_stake_weight_msg( ulong epoch,
506 : fd_epoch_schedule_t const * epoch_schedule,
507 : fd_vote_states_t const * epoch_stakes,
508 0 : ulong * stake_weight_msg_out ) {
509 0 : fd_stake_weight_msg_t * stake_weight_msg = (fd_stake_weight_msg_t *)fd_type_pun( stake_weight_msg_out );
510 0 : fd_vote_stake_weight_t * stake_weights = stake_weight_msg->weights;
511 :
512 0 : stake_weight_msg->epoch = epoch;
513 0 : stake_weight_msg->start_slot = fd_epoch_slot0( epoch_schedule, epoch );
514 0 : stake_weight_msg->slot_cnt = epoch_schedule->slots_per_epoch;
515 0 : stake_weight_msg->excluded_stake = 0UL;
516 0 : stake_weight_msg->vote_keyed_lsched = 1UL;
517 :
518 : /* FIXME: SIMD-0180 - hack to (de)activate in testnet vs mainnet.
519 : This code can be removed once the feature is active. */
520 0 : if( (1==epoch_schedule->warmup && epoch<FD_SIMD0180_ACTIVE_EPOCH_TESTNET) ||
521 0 : (0==epoch_schedule->warmup && epoch<FD_SIMD0180_ACTIVE_EPOCH_MAINNET) ) {
522 0 : stake_weight_msg->vote_keyed_lsched = 0UL;
523 0 : }
524 :
525 : /* epoch_stakes from manifest are already filtered (stake>0), but not sorted */
526 0 : fd_vote_states_iter_t iter_[1];
527 0 : ulong idx = 0UL;
528 0 : for( fd_vote_states_iter_t * iter = fd_vote_states_iter_init( iter_, epoch_stakes ); !fd_vote_states_iter_done( iter ); fd_vote_states_iter_next( iter ) ) {
529 0 : fd_vote_state_ele_t * vote_state = fd_vote_states_iter_ele( iter );
530 0 : if( FD_UNLIKELY( !vote_state->stake ) ) continue;
531 :
532 0 : stake_weights[ idx ].stake = vote_state->stake;
533 0 : memcpy( stake_weights[ idx ].id_key.uc, &vote_state->node_account, sizeof(fd_pubkey_t) );
534 0 : memcpy( stake_weights[ idx ].vote_key.uc, &vote_state->vote_account, sizeof(fd_pubkey_t) );
535 0 : idx++;
536 0 : }
537 0 : stake_weight_msg->staked_cnt = idx;
538 0 : sort_vote_weights_by_stake_vote_inplace( stake_weights, idx );
539 :
540 0 : return fd_stake_weight_msg_sz( idx );
541 0 : }
542 :
543 : static void
544 : publish_stake_weights( fd_replay_tile_t * ctx,
545 : fd_stem_context_t * stem,
546 : fd_bank_t * bank,
547 0 : int current_epoch ) {
548 0 : fd_epoch_schedule_t const * schedule = fd_bank_epoch_schedule_query( bank );
549 0 : ulong epoch = fd_slot_to_epoch( schedule, fd_bank_slot_get( bank ), NULL );
550 :
551 0 : fd_vote_states_t const * vote_states_prev;
552 0 : if( FD_LIKELY( current_epoch ) ) vote_states_prev = fd_bank_vote_states_prev_locking_query( bank );
553 0 : else vote_states_prev = fd_bank_vote_states_prev_prev_locking_query( bank );
554 :
555 0 : ulong * stake_weights_msg = fd_chunk_to_laddr( ctx->stake_out->mem, ctx->stake_out->chunk );
556 0 : ulong stake_weights_sz = generate_stake_weight_msg( epoch+fd_ulong_if( current_epoch, 1UL, 0UL), schedule, vote_states_prev, stake_weights_msg );
557 0 : ulong stake_weights_sig = 4UL;
558 0 : fd_stem_publish( stem, ctx->stake_out->idx, stake_weights_sig, ctx->stake_out->chunk, stake_weights_sz, 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
559 0 : ctx->stake_out->chunk = fd_dcache_compact_next( ctx->stake_out->chunk, stake_weights_sz, ctx->stake_out->chunk0, ctx->stake_out->wmark );
560 :
561 0 : if( FD_LIKELY( current_epoch ) ) fd_bank_vote_states_prev_end_locking_query( bank );
562 0 : else fd_bank_vote_states_prev_prev_end_locking_query( bank );
563 :
564 0 : fd_multi_epoch_leaders_stake_msg_init( ctx->mleaders, fd_type_pun_const( stake_weights_msg ) );
565 0 : fd_multi_epoch_leaders_stake_msg_fini( ctx->mleaders );
566 0 : }
567 :
568 : /**********************************************************************/
569 : /* Transaction execution state machine helpers */
570 : /**********************************************************************/
571 :
572 : static fd_bank_t *
573 : replay_block_start( fd_replay_tile_t * ctx,
574 : fd_stem_context_t * stem,
575 : ulong bank_idx,
576 : ulong parent_bank_idx,
577 0 : ulong slot ) {
578 0 : long before = fd_log_wallclock();
579 :
580 : /* Switch to a new block that we don't have a bank for. */
581 :
582 0 : fd_bank_t * bank = fd_banks_bank_query( ctx->banks, bank_idx );
583 0 : if( FD_UNLIKELY( !bank ) ) {
584 0 : FD_LOG_CRIT(( "invariant violation: bank is NULL for bank index %lu", bank_idx ));
585 0 : }
586 0 : if( FD_UNLIKELY( bank->flags!=FD_BANK_FLAGS_INIT ) ) {
587 0 : FD_LOG_CRIT(( "invariant violation: bank is not in correct state for bank index %lu", bank_idx ));
588 0 : }
589 :
590 0 : bank->preparation_begin_nanos = before;
591 :
592 0 : fd_bank_t * parent_bank = fd_banks_bank_query( ctx->banks, parent_bank_idx );
593 0 : if( FD_UNLIKELY( !parent_bank ) ) {
594 0 : FD_LOG_CRIT(( "invariant violation: parent bank is NULL for bank index %lu", parent_bank_idx ));
595 0 : }
596 0 : if( FD_UNLIKELY( !(parent_bank->flags&FD_BANK_FLAGS_FROZEN) ) ) {
597 0 : FD_LOG_CRIT(( "invariant violation: parent bank is not frozen for bank index %lu", parent_bank_idx ));
598 0 : }
599 0 : ulong parent_slot = fd_bank_slot_get( parent_bank );
600 :
601 : /* Clone the bank from the parent. We must special case the first
602 : slot that is executed as the snapshot does not provide a parent
603 : block id. */
604 :
605 0 : bank = fd_banks_clone_from_parent( ctx->banks, bank_idx, parent_bank_idx );
606 0 : if( FD_UNLIKELY( !bank ) ) {
607 0 : FD_LOG_CRIT(( "invariant violation: bank is NULL for bank index %lu", bank_idx ));
608 0 : }
609 0 : fd_bank_slot_set( bank, slot );
610 0 : fd_bank_parent_slot_set( bank, parent_slot );
611 0 : bank->txncache_fork_id = fd_txncache_attach_child( ctx->txncache, parent_bank->txncache_fork_id );
612 :
613 : /* Create a new funk txn for the block. */
614 :
615 0 : fd_funk_txn_xid_t xid = { .ul = { slot, bank_idx } };
616 0 : fd_funk_txn_xid_t parent_xid = { .ul = { parent_slot, parent_bank_idx } };
617 0 : fd_accdb_attach_child( ctx->accdb_admin, &parent_xid, &xid );
618 0 : fd_progcache_txn_attach_child( ctx->progcache_admin, &parent_xid, &xid );
619 :
620 : /* Update any required runtime state and handle any potential epoch
621 : boundary change. */
622 :
623 0 : fd_bank_shred_cnt_set( bank, 0UL );
624 0 : fd_bank_execution_fees_set( bank, 0UL );
625 0 : fd_bank_priority_fees_set( bank, 0UL );
626 0 : fd_bank_tips_set( bank, 0UL );
627 :
628 0 : fd_bank_has_identity_vote_set( bank, 0 );
629 :
630 : /* Set the tick height. */
631 0 : fd_bank_tick_height_set( bank, fd_bank_max_tick_height_get( bank ) );
632 :
633 : /* Update block height. */
634 0 : fd_bank_block_height_set( bank, fd_bank_block_height_get( bank ) + 1UL );
635 :
636 0 : ulong * max_tick_height = fd_bank_max_tick_height_modify( bank );
637 0 : ulong ticks_per_slot = fd_bank_ticks_per_slot_get( bank );
638 0 : if( FD_UNLIKELY( FD_RUNTIME_EXECUTE_SUCCESS != fd_runtime_compute_max_tick_height( ticks_per_slot, slot, max_tick_height ) ) ) {
639 0 : FD_LOG_CRIT(( "couldn't compute tick height/max tick height slot %lu ticks_per_slot %lu", slot, ticks_per_slot ));
640 0 : }
641 :
642 0 : int is_epoch_boundary = 0;
643 0 : fd_runtime_block_execute_prepare( ctx->banks, bank, ctx->accdb, &ctx->runtime_stack, ctx->capture_ctx, &is_epoch_boundary );
644 0 : if( FD_UNLIKELY( is_epoch_boundary ) ) publish_stake_weights( ctx, stem, bank, 1 );
645 :
646 0 : return bank;
647 0 : }
648 :
649 : static void
650 0 : cost_tracker_snap( fd_bank_t * bank, fd_replay_slot_completed_t * slot_info ) {
651 0 : if( bank->cost_tracker_pool_idx!=fd_bank_cost_tracker_pool_idx_null( fd_bank_get_cost_tracker_pool( bank ) ) ) {
652 0 : fd_cost_tracker_t const * cost_tracker = fd_bank_cost_tracker_locking_query( bank );
653 0 : slot_info->cost_tracker.block_cost = cost_tracker->block_cost;
654 0 : slot_info->cost_tracker.vote_cost = cost_tracker->vote_cost;
655 0 : slot_info->cost_tracker.allocated_accounts_data_size = cost_tracker->allocated_accounts_data_size;
656 0 : slot_info->cost_tracker.block_cost_limit = cost_tracker->block_cost_limit;
657 0 : slot_info->cost_tracker.vote_cost_limit = cost_tracker->vote_cost_limit;
658 0 : slot_info->cost_tracker.account_cost_limit = cost_tracker->account_cost_limit;
659 0 : fd_bank_cost_tracker_end_locking_query( bank );
660 0 : } else {
661 0 : memset( &slot_info->cost_tracker, 0, sizeof(slot_info->cost_tracker) );
662 0 : }
663 0 : }
664 :
665 : static ulong
666 0 : get_identity_balance( fd_replay_tile_t * ctx, fd_funk_txn_xid_t xid ) {
667 0 : ulong identity_balance = ULONG_MAX;
668 0 : fd_txn_account_t identity_acc[1];
669 0 : fd_funk_t * funk = fd_accdb_user_v1_funk( ctx->accdb );
670 0 : int err = fd_txn_account_init_from_funk_readonly( identity_acc,
671 0 : ctx->identity_pubkey,
672 0 : funk,
673 0 : &xid );
674 0 : if( FD_LIKELY( !err && identity_acc->meta ) ) identity_balance = identity_acc->meta->lamports;
675 :
676 0 : return identity_balance;
677 0 : }
678 :
679 : static void
680 : publish_slot_completed( fd_replay_tile_t * ctx,
681 : fd_stem_context_t * stem,
682 : fd_bank_t * bank,
683 : int is_initial,
684 0 : int is_leader ) {
685 :
686 0 : ulong slot = fd_bank_slot_get( bank );
687 :
688 0 : fd_block_id_ele_t * block_id_ele = &ctx->block_id_arr[ bank->idx ];
689 :
690 : /* HACKY: hacky way of checking if we should send a null parent block
691 : id */
692 0 : fd_hash_t parent_block_id = {0};
693 0 : if( FD_UNLIKELY( !is_initial ) ) {
694 0 : parent_block_id = ctx->block_id_arr[ bank->parent_idx ].block_id;
695 0 : }
696 :
697 0 : fd_hash_t const * bank_hash = fd_bank_bank_hash_query( bank );
698 0 : fd_hash_t const * block_hash = fd_blockhashes_peek_last_hash( fd_bank_block_hash_queue_query( bank ) );
699 0 : FD_TEST( bank_hash );
700 0 : FD_TEST( block_hash );
701 :
702 0 : if( FD_LIKELY( !is_initial ) ) fd_txncache_finalize_fork( ctx->txncache, bank->txncache_fork_id, 0UL, block_hash->uc );
703 :
704 0 : fd_epoch_schedule_t const * epoch_schedule = fd_bank_epoch_schedule_query( bank );
705 0 : ulong slot_idx;
706 0 : ulong epoch = fd_slot_to_epoch( epoch_schedule, slot, &slot_idx );
707 :
708 0 : ctx->metrics.slots_total++;
709 0 : ctx->metrics.transactions_total = fd_bank_txn_count_get( bank );
710 :
711 0 : fd_replay_slot_completed_t * slot_info = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
712 0 : slot_info->slot = slot;
713 0 : slot_info->root_slot = ctx->consensus_root_slot;
714 0 : slot_info->storage_slot = ctx->published_root_slot;
715 0 : slot_info->epoch = epoch;
716 0 : slot_info->slot_in_epoch = slot_idx;
717 0 : slot_info->block_height = fd_bank_block_height_get( bank );
718 0 : slot_info->parent_slot = fd_bank_parent_slot_get( bank );
719 0 : slot_info->block_id = block_id_ele->block_id;
720 0 : slot_info->parent_block_id = parent_block_id;
721 0 : slot_info->bank_hash = *bank_hash;
722 0 : slot_info->block_hash = *block_hash;
723 0 : slot_info->transaction_count = fd_bank_txn_count_get( bank );
724 :
725 0 : fd_inflation_t inflation = fd_bank_inflation_get( bank );
726 0 : slot_info->inflation.foundation = inflation.foundation;
727 0 : slot_info->inflation.foundation_term = inflation.foundation_term;
728 0 : slot_info->inflation.terminal = inflation.terminal;
729 0 : slot_info->inflation.initial = inflation.initial;
730 0 : slot_info->inflation.taper = inflation.taper;
731 :
732 0 : fd_rent_t rent = fd_bank_rent_get( bank );
733 0 : slot_info->rent.burn_percent = rent.burn_percent;
734 0 : slot_info->rent.lamports_per_uint8_year = rent.lamports_per_uint8_year;
735 0 : slot_info->rent.exemption_threshold = rent.exemption_threshold;
736 :
737 0 : slot_info->first_fec_set_received_nanos = bank->first_fec_set_received_nanos;
738 0 : slot_info->preparation_begin_nanos = bank->preparation_begin_nanos;
739 0 : slot_info->first_transaction_scheduled_nanos = bank->first_transaction_scheduled_nanos;
740 0 : slot_info->last_transaction_finished_nanos = bank->last_transaction_finished_nanos;
741 0 : slot_info->completion_time_nanos = fd_log_wallclock();
742 :
743 : /* refcnt should be incremented by 1 for each consumer that uses
744 : `bank_idx`. Each consumer should decrement the bank's refcnt once
745 : they are done usin the bank. */
746 0 : bank->refcnt++; /* tower_tile */
747 0 : if( FD_LIKELY( ctx->gui_enabled ) ) bank->refcnt++; /* gui tile */
748 0 : slot_info->bank_idx = bank->idx;
749 :
750 0 : slot_info->parent_bank_idx = ULONG_MAX;
751 0 : fd_bank_t * parent_bank = fd_banks_get_parent( ctx->banks, bank );
752 0 : if( FD_LIKELY( parent_bank && ctx->gui_enabled ) ) {
753 0 : parent_bank->refcnt++;
754 0 : slot_info->parent_bank_idx = parent_bank->idx;
755 0 : }
756 :
757 0 : slot_info->is_leader = is_leader;
758 :
759 0 : fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_SLOT_COMPLETED, ctx->replay_out->chunk, sizeof(fd_replay_slot_completed_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
760 0 : ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_replay_slot_completed_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
761 0 : }
762 :
763 : static void
764 : publish_slot_dead( fd_replay_tile_t * ctx,
765 : fd_stem_context_t * stem,
766 0 : fd_bank_t * bank ) {
767 0 : fd_replay_slot_dead_t * slot_dead = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
768 0 : slot_dead->slot = fd_bank_slot_get( bank );
769 0 : slot_dead->block_id = ctx->block_id_arr[ bank->idx ].block_id;
770 0 : fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_SLOT_DEAD, ctx->replay_out->chunk, sizeof(fd_replay_slot_dead_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
771 0 : ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_replay_slot_dead_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
772 0 : }
773 :
774 : static void
775 : replay_block_finalize( fd_replay_tile_t * ctx,
776 : fd_stem_context_t * stem,
777 0 : fd_bank_t * bank ) {
778 0 : bank->last_transaction_finished_nanos = fd_log_wallclock();
779 :
780 0 : FD_TEST( !(bank->flags&FD_BANK_FLAGS_FROZEN) );
781 :
782 : /* Set poh hash in bank. */
783 0 : fd_hash_t * poh = fd_sched_get_poh( ctx->sched, bank->idx );
784 0 : fd_bank_poh_set( bank, *poh );
785 :
786 : /* Set shred count in bank. */
787 0 : fd_bank_shred_cnt_set( bank, fd_sched_get_shred_cnt( ctx->sched, bank->idx ) );
788 :
789 : /* Do hashing and other end-of-block processing. */
790 0 : fd_runtime_block_execute_finalize( bank, ctx->accdb, ctx->capture_ctx );
791 :
792 : /* Copy out cost tracker fields before freezing */
793 0 : fd_replay_slot_completed_t * slot_info = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
794 0 : cost_tracker_snap( bank, slot_info );
795 :
796 : /* fetch identity / vote balance updates infrequently */
797 0 : ulong slot = fd_bank_slot_get( bank );
798 0 : fd_funk_txn_xid_t xid = { .ul = { slot, bank->idx } };
799 0 : slot_info->identity_balance = FD_UNLIKELY( slot%4096==0UL ) ? get_identity_balance( ctx, xid ) : ULONG_MAX;
800 :
801 : /* Mark the bank as frozen. */
802 0 : fd_banks_mark_bank_frozen( ctx->banks, bank );
803 :
804 : /**********************************************************************/
805 : /* Bank hash comparison, and halt if there's a mismatch after replay */
806 : /**********************************************************************/
807 :
808 0 : fd_hash_t const * bank_hash = fd_bank_bank_hash_query( bank );
809 0 : FD_TEST( bank_hash );
810 :
811 : /* Must be last so we can measure completion time correctly, even
812 : though we could technically do this before the hash cmp and vote
813 : tower stuff. */
814 0 : publish_slot_completed( ctx, stem, bank, 0, 0 /* is_leader */ );
815 :
816 0 : # if FD_HAS_FLATCC
817 : /* If enabled, dump the block to a file and reset the dumping
818 : context state */
819 0 : if( FD_UNLIKELY( ctx->capture_ctx && ctx->capture_ctx->dump_block_to_pb ) ) {
820 0 : fd_funk_t * funk = fd_accdb_user_v1_funk( ctx->accdb );
821 0 : fd_dump_block_to_protobuf( ctx->block_dump_ctx, ctx->banks, bank, funk, ctx->capture_ctx );
822 0 : fd_block_dump_context_reset( ctx->block_dump_ctx );
823 0 : }
824 0 : # endif
825 0 : }
826 :
827 : /**********************************************************************/
828 : /* Leader bank management */
829 : /**********************************************************************/
830 :
831 : static fd_bank_t *
832 : prepare_leader_bank( fd_replay_tile_t * ctx,
833 : ulong slot,
834 : long now,
835 : fd_hash_t const * parent_block_id,
836 0 : fd_stem_context_t * stem ) {
837 0 : long before = fd_log_wallclock();
838 :
839 : /* Make sure that we are not already leader. */
840 0 : FD_TEST( ctx->leader_bank==NULL );
841 :
842 0 : fd_block_id_ele_t * parent_ele = fd_block_id_map_ele_query( ctx->block_id_map, parent_block_id, NULL, ctx->block_id_arr );
843 0 : if( FD_UNLIKELY( !parent_ele ) ) {
844 0 : FD_LOG_CRIT(( "invariant violation: parent bank index not found for merkle root %s", FD_BASE58_ENC_32_ALLOCA( parent_block_id->uc ) ));
845 0 : }
846 0 : ulong parent_bank_idx = fd_block_id_ele_get_idx( ctx->block_id_arr, parent_ele );
847 :
848 0 : fd_bank_t * parent_bank = fd_banks_bank_query( ctx->banks, parent_bank_idx );
849 0 : if( FD_UNLIKELY( !parent_bank ) ) {
850 0 : FD_LOG_CRIT(( "invariant violation: parent bank not found for bank index %lu", parent_bank_idx ));
851 0 : }
852 0 : ulong parent_slot = fd_bank_slot_get( parent_bank );
853 :
854 0 : ctx->leader_bank = fd_banks_new_bank( ctx->banks, parent_bank_idx, now );
855 0 : if( FD_UNLIKELY( !ctx->leader_bank ) ) {
856 0 : FD_LOG_CRIT(( "invariant violation: leader bank is NULL for slot %lu", slot ));
857 0 : }
858 :
859 0 : if( FD_UNLIKELY( !fd_banks_clone_from_parent( ctx->banks, ctx->leader_bank->idx, parent_bank_idx ) ) ) {
860 0 : FD_LOG_CRIT(( "invariant violation: bank is NULL for slot %lu", slot ));
861 0 : }
862 :
863 0 : ctx->leader_bank->preparation_begin_nanos = before;
864 :
865 0 : fd_bank_slot_set( ctx->leader_bank, slot );
866 0 : fd_bank_parent_slot_set( ctx->leader_bank, parent_slot );
867 0 : ctx->leader_bank->txncache_fork_id = fd_txncache_attach_child( ctx->txncache, parent_bank->txncache_fork_id );
868 : /* prepare the funk transaction for the leader bank */
869 0 : fd_funk_txn_xid_t xid = { .ul = { slot, ctx->leader_bank->idx } };
870 0 : fd_funk_txn_xid_t parent_xid = { .ul = { parent_slot, parent_bank_idx } };
871 0 : fd_accdb_attach_child( ctx->accdb_admin, &parent_xid, &xid );
872 0 : fd_progcache_txn_attach_child( ctx->progcache_admin, &parent_xid, &xid );
873 :
874 0 : fd_bank_execution_fees_set( ctx->leader_bank, 0UL );
875 0 : fd_bank_priority_fees_set( ctx->leader_bank, 0UL );
876 0 : fd_bank_shred_cnt_set( ctx->leader_bank, 0UL );
877 0 : fd_bank_tips_set( ctx->leader_bank, 0UL );
878 :
879 : /* Set the tick height. */
880 0 : fd_bank_tick_height_set( ctx->leader_bank, fd_bank_max_tick_height_get( ctx->leader_bank ) );
881 :
882 : /* Update block height. */
883 0 : fd_bank_block_height_set( ctx->leader_bank, fd_bank_block_height_get( ctx->leader_bank ) + 1UL );
884 :
885 0 : ulong * max_tick_height = fd_bank_max_tick_height_modify( ctx->leader_bank );
886 0 : ulong ticks_per_slot = fd_bank_ticks_per_slot_get( ctx->leader_bank );
887 0 : if( FD_UNLIKELY( FD_RUNTIME_EXECUTE_SUCCESS != fd_runtime_compute_max_tick_height( ticks_per_slot, slot, max_tick_height ) ) ) {
888 0 : FD_LOG_CRIT(( "couldn't compute tick height/max tick height slot %lu ticks_per_slot %lu", slot, ticks_per_slot ));
889 0 : }
890 :
891 0 : int is_epoch_boundary = 0;
892 0 : fd_runtime_block_execute_prepare( ctx->banks, ctx->leader_bank, ctx->accdb, &ctx->runtime_stack, ctx->capture_ctx, &is_epoch_boundary );
893 0 : if( FD_UNLIKELY( is_epoch_boundary ) ) publish_stake_weights( ctx, stem, ctx->leader_bank, 1 );
894 :
895 : /* Now that a bank has been created for the leader slot, increment the
896 : reference count until we are done with the leader slot. */
897 0 : ctx->leader_bank->refcnt++;
898 :
899 0 : return ctx->leader_bank;
900 0 : }
901 :
902 : static void
903 : fini_leader_bank( fd_replay_tile_t * ctx,
904 0 : fd_stem_context_t * stem ) {
905 :
906 0 : FD_TEST( ctx->leader_bank!=NULL );
907 0 : FD_TEST( ctx->is_leader );
908 0 : FD_TEST( ctx->recv_block_id );
909 0 : FD_TEST( ctx->recv_poh );
910 :
911 0 : ctx->leader_bank->last_transaction_finished_nanos = fd_log_wallclock();
912 :
913 0 : ulong curr_slot = fd_bank_slot_get( ctx->leader_bank );
914 :
915 0 : fd_sched_block_add_done( ctx->sched, ctx->leader_bank->idx, ctx->leader_bank->parent_idx, curr_slot );
916 :
917 : /* Do hashing and other end-of-block processing */
918 0 : fd_funk_t * funk = fd_accdb_user_v1_funk( ctx->accdb );
919 0 : fd_funk_txn_map_t * txn_map = fd_funk_txn_map( funk );
920 0 : if( FD_UNLIKELY( !txn_map->map ) ) {
921 0 : FD_LOG_ERR(( "Could not find valid funk transaction map" ));
922 0 : }
923 :
924 0 : fd_runtime_block_execute_finalize( ctx->leader_bank, ctx->accdb, ctx->capture_ctx );
925 :
926 0 : fd_replay_slot_completed_t * slot_info = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
927 0 : cost_tracker_snap( ctx->leader_bank, slot_info );
928 0 : fd_funk_txn_xid_t xid = { .ul = { curr_slot, ctx->leader_bank->idx } };
929 0 : slot_info->identity_balance = FD_UNLIKELY( curr_slot%4096==0UL ) ? get_identity_balance( ctx, xid ) : ULONG_MAX;
930 :
931 0 : fd_banks_mark_bank_frozen( ctx->banks, ctx->leader_bank );
932 :
933 0 : fd_hash_t const * bank_hash = fd_bank_bank_hash_query( ctx->leader_bank );
934 0 : FD_TEST( bank_hash );
935 :
936 0 : publish_slot_completed( ctx, stem, ctx->leader_bank, 0, 1 /* is_leader */ );
937 :
938 : /* The reference on the bank is finally no longer needed. */
939 0 : ctx->leader_bank->refcnt--;
940 :
941 : /* We are no longer leader so we can clear the bank index we use for
942 : being the leader. */
943 0 : ctx->leader_bank = NULL;
944 0 : ctx->recv_block_id = 0;
945 0 : ctx->recv_poh = 0;
946 0 : ctx->is_leader = 0;
947 0 : }
948 :
949 : static void
950 : publish_root_advanced( fd_replay_tile_t * ctx,
951 0 : fd_stem_context_t * stem ) {
952 :
953 : /* FIXME: for now we want to send the child of the consensus root to
954 : avoid data races with funk root advancing. This is a temporary
955 : hack because currently it is not safe to query against the xid for
956 : the root that is being advanced in funk. This doesn't eliminate
957 : the data race that exists in funk, but reduces how often it occurs.
958 :
959 : Case that causes a data race:
960 : replay: we are advancing the root from slot A->B
961 : resolv: we are resolving ALUTs against slot B */
962 :
963 0 : fd_bank_t * consensus_root_bank = fd_banks_bank_query( ctx->banks, ctx->consensus_root_bank_idx );
964 0 : if( FD_UNLIKELY( !consensus_root_bank ) ) {
965 0 : FD_LOG_CRIT(( "invariant violation: consensus root bank is NULL at bank index %lu", ctx->consensus_root_bank_idx ));
966 0 : }
967 :
968 0 : if( FD_UNLIKELY( consensus_root_bank->child_idx==ULONG_MAX ) ) {
969 0 : return;
970 0 : }
971 :
972 0 : fd_bank_t * bank = fd_banks_bank_query( ctx->banks, consensus_root_bank->child_idx );
973 0 : if( FD_UNLIKELY( !bank ) ) {
974 0 : FD_LOG_CRIT(( "invariant violation: consensus root bank child is NULL at bank index %lu", consensus_root_bank->child_idx ));
975 0 : }
976 :
977 : /* Increment the reference count on the consensus root bank to account
978 : for the number of exec tiles that are waiting on it. */
979 0 : bank->refcnt += ctx->resolv_tile_cnt;
980 :
981 0 : fd_replay_root_advanced_t * msg = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
982 0 : msg->bank_idx = bank->idx;
983 :
984 0 : fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_ROOT_ADVANCED, ctx->replay_out->chunk, sizeof(fd_replay_root_advanced_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
985 0 : ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_replay_root_advanced_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
986 0 : }
987 :
988 : /* init_funk performs pre-flight checks for the account database and
989 : program cache. Ensures that the account database was set up
990 : correctly by bootstrap components (e.g. genesis or snapshot loader).
991 : Mirrors the account database's fork tree down to the program cache. */
992 :
993 : static void
994 : init_funk( fd_replay_tile_t * ctx,
995 0 : ulong bank_slot ) {
996 : /* Ensure that the loaded bank root corresponds to the account
997 : database's root. */
998 0 : fd_funk_t * funk = ctx->accdb_admin->funk;
999 0 : if( FD_UNLIKELY( !funk->shmem ) ) {
1000 0 : FD_LOG_CRIT(( "failed to initialize account database: replay tile is not joined to database shared memory objects" ));
1001 0 : }
1002 0 : fd_funk_txn_xid_t const * accdb_pub = fd_funk_last_publish( funk );
1003 0 : if( FD_UNLIKELY( accdb_pub->ul[0]!=bank_slot ) ) {
1004 0 : FD_LOG_CRIT(( "failed to initialize account database: accdb is at slot %lu, but chain state is at slot %lu\n"
1005 0 : "This is a bug in startup components.",
1006 0 : accdb_pub->ul[0], bank_slot ));
1007 0 : }
1008 0 : if( FD_UNLIKELY( fd_funk_last_publish_is_frozen( funk ) ) ) {
1009 0 : FD_LOG_CRIT(( "failed to initialize account database: accdb fork graph is not clean.\n"
1010 0 : "The account database should only contain state for the root slot at this point,\n"
1011 0 : "but there are incomplete database transactions leftover.\n"
1012 0 : "This is a bug in startup components." ));
1013 0 : }
1014 :
1015 : /* The program cache tracks the account database's fork graph at all
1016 : times. Perform initial synchronization: pivot from funk 'root' (a
1017 : sentinel XID) to 'last publish' (the bootstrap root slot). */
1018 0 : if( FD_UNLIKELY( !ctx->progcache_admin->funk->shmem ) ) {
1019 0 : FD_LOG_CRIT(( "failed to initialize account database: replay tile is not joined to program cache" ));
1020 0 : }
1021 0 : fd_progcache_clear( ctx->progcache_admin );
1022 0 : fd_progcache_txn_attach_child( ctx->progcache_admin, fd_funk_root( ctx->progcache_admin->funk ), fd_funk_last_publish( ctx->accdb_admin->funk ) );
1023 0 : fd_progcache_txn_advance_root( ctx->progcache_admin, fd_funk_last_publish( ctx->accdb_admin->funk ) );
1024 0 : }
1025 :
1026 : static void
1027 0 : init_after_snapshot( fd_replay_tile_t * ctx ) {
1028 : /* Now that the snapshot has been loaded in, we have to refresh the
1029 : stake delegations since the manifest does not contain the full set
1030 : of data required for the stake delegations. See
1031 : fd_stake_delegations.h for why this is required. */
1032 :
1033 0 : fd_bank_t * bank = fd_banks_bank_query( ctx->banks, FD_REPLAY_BOOT_BANK_IDX );
1034 0 : if( FD_UNLIKELY( !bank ) ) {
1035 0 : FD_LOG_CRIT(( "invariant violation: replay bank is NULL at bank index %lu", FD_REPLAY_BOOT_BANK_IDX ));
1036 0 : }
1037 :
1038 0 : fd_funk_t * funk = fd_accdb_user_v1_funk( ctx->accdb );
1039 0 : fd_funk_txn_xid_t xid = { .ul = { fd_bank_slot_get( bank ), bank->idx } };
1040 0 : init_funk( ctx, fd_bank_slot_get( bank ) );
1041 :
1042 0 : fd_stake_delegations_t * root_delegations = fd_banks_stake_delegations_root_query( ctx->banks );
1043 :
1044 0 : fd_stake_delegations_refresh( root_delegations, funk, &xid );
1045 :
1046 : /* After both snapshots have been loaded in, we can determine if we should
1047 : start distributing rewards. */
1048 :
1049 0 : fd_rewards_recalculate_partitioned_rewards( ctx->banks, bank, funk, &xid, &ctx->runtime_stack, ctx->capture_ctx );
1050 :
1051 0 : ulong snapshot_slot = fd_bank_slot_get( bank );
1052 0 : if( FD_UNLIKELY( !snapshot_slot ) ) {
1053 : /* Genesis-specific setup. */
1054 : /* FIXME: This branch does not set up a new block exec ctx
1055 : properly. Needs to do whatever prepare_new_block_execution
1056 : does, but just hacking that in breaks stuff. */
1057 0 : fd_runtime_update_leaders( bank, &ctx->runtime_stack );
1058 :
1059 0 : ulong hashcnt_per_slot = fd_bank_hashes_per_tick_get( bank ) * fd_bank_ticks_per_slot_get( bank );
1060 0 : fd_hash_t * poh = fd_bank_poh_modify( bank );
1061 0 : while( hashcnt_per_slot-- ) {
1062 0 : fd_sha256_hash( poh->hash, 32UL, poh->hash );
1063 0 : }
1064 :
1065 0 : int is_epoch_boundary = 0;
1066 0 : fd_runtime_block_execute_prepare( ctx->banks, bank, ctx->accdb, &ctx->runtime_stack, ctx->capture_ctx, &is_epoch_boundary );
1067 0 : FD_TEST( !is_epoch_boundary );
1068 0 : fd_runtime_block_execute_finalize( bank, ctx->accdb, ctx->capture_ctx );
1069 :
1070 0 : snapshot_slot = 0UL;
1071 0 : }
1072 :
1073 0 : }
1074 :
1075 : static inline int
1076 : maybe_become_leader( fd_replay_tile_t * ctx,
1077 0 : fd_stem_context_t * stem ) {
1078 0 : FD_TEST( ctx->is_booted );
1079 0 : if( FD_LIKELY( ctx->next_leader_slot==ULONG_MAX || ctx->is_leader || !ctx->has_identity_vote_rooted || ctx->replay_out->idx==ULONG_MAX ) ) return 0;
1080 :
1081 0 : FD_TEST( ctx->next_leader_slot>ctx->reset_slot );
1082 0 : long now = fd_tickcount();
1083 0 : if( FD_LIKELY( now<ctx->next_leader_tickcount ) ) return 0;
1084 :
1085 : /* TODO:
1086 : if( FD_UNLIKELY( ctx->halted_switching_key ) ) return 0; */
1087 :
1088 : /* If a prior leader is still in the process of publishing their slot,
1089 : delay ours to let them finish ... unless they are so delayed that
1090 : we risk getting skipped by the leader following us. 1.2 seconds
1091 : is a reasonable default here, although any value between 0 and 1.6
1092 : seconds could be considered reasonable. This is arbitrary and
1093 : chosen due to intuition. */
1094 0 : if( FD_UNLIKELY( now<ctx->next_leader_tickcount+(long)(3.0*ctx->slot_duration_ticks) ) ) {
1095 0 : FD_TEST( ctx->reset_bank );
1096 :
1097 : /* TODO: Make the max_active_descendant calculation more efficient
1098 : by caching it in the bank structure and updating it as banks are
1099 : created and completed. */
1100 0 : ulong max_active_descendant = 0UL;
1101 0 : ulong child_idx = ctx->reset_bank->child_idx;
1102 0 : while( child_idx!=ULONG_MAX ) {
1103 0 : fd_bank_t const * child_bank = fd_banks_bank_query( ctx->banks, child_idx );
1104 0 : max_active_descendant = fd_ulong_max( max_active_descendant, fd_bank_slot_get( child_bank ) );
1105 0 : child_idx = child_bank->sibling_idx;
1106 0 : }
1107 :
1108 : /* If the max_active_descendant is >= next_leader_slot, we waited
1109 : too long and a leader after us started publishing to try and skip
1110 : us. Just start our leader slot immediately, we might win ... */
1111 0 : if( FD_LIKELY( max_active_descendant>=ctx->reset_slot && max_active_descendant<ctx->next_leader_slot ) ) {
1112 : /* If one of the leaders between the reset slot and our leader
1113 : slot is in the process of publishing (they have a descendant
1114 : bank that is in progress of being replayed), then keep waiting.
1115 : We probably wouldn't get a leader slot out before they
1116 : finished.
1117 :
1118 : Unless... we are past the deadline to start our slot by more
1119 : than 1.2 seconds, in which case we should probably start it to
1120 : avoid getting skipped by the leader behind us. */
1121 0 : return 0;
1122 0 : }
1123 0 : }
1124 :
1125 0 : long now_nanos = fd_log_wallclock();
1126 :
1127 0 : ctx->is_leader = 1;
1128 0 : ctx->recv_poh = 0;
1129 0 : ctx->recv_block_id = 0;
1130 :
1131 0 : FD_TEST( ctx->highwater_leader_slot==ULONG_MAX || ctx->highwater_leader_slot<ctx->next_leader_slot );
1132 0 : ctx->highwater_leader_slot = ctx->next_leader_slot;
1133 :
1134 0 : FD_LOG_INFO(( "becoming leader for slot %lu, parent slot is %lu", ctx->next_leader_slot, ctx->reset_slot ));
1135 :
1136 : /* Acquires bank, sets up initial state, and refcnts it. */
1137 0 : fd_bank_t * bank = prepare_leader_bank( ctx, ctx->next_leader_slot, now_nanos, &ctx->reset_block_id, stem );
1138 0 : fd_funk_txn_xid_t xid = { .ul = { ctx->next_leader_slot, ctx->leader_bank->idx } };
1139 :
1140 0 : fd_bundle_crank_tip_payment_config_t config[1] = { 0 };
1141 0 : fd_acct_addr_t tip_receiver_owner[1] = { 0 };
1142 :
1143 0 : if( FD_UNLIKELY( ctx->bundle.enabled ) ) {
1144 0 : fd_acct_addr_t tip_payment_config[1];
1145 0 : fd_acct_addr_t tip_receiver[1];
1146 0 : fd_bundle_crank_get_addresses( ctx->bundle.gen, fd_bank_epoch_get( bank ), tip_payment_config, tip_receiver );
1147 :
1148 0 : fd_funk_t * funk = fd_accdb_user_v1_funk( ctx->accdb );
1149 0 : fd_txn_account_t tip_config_acc[1];
1150 0 : int err = fd_txn_account_init_from_funk_readonly( tip_config_acc,
1151 0 : (fd_hash_t *)tip_payment_config->b,
1152 0 : funk,
1153 0 : &xid );
1154 0 : if( FD_UNLIKELY( err ) ) {
1155 0 : FD_LOG_CRIT(( "failed to initialize tip payment config account: err=%d", err ));
1156 0 : }
1157 0 : memcpy( config, fd_txn_account_get_data( tip_config_acc ), sizeof(fd_bundle_crank_tip_payment_config_t) );
1158 :
1159 : /* It is possible that the tip receiver account does not exist yet
1160 : if it is the first time in an epoch. */
1161 0 : fd_txn_account_t tip_receiver_acc[1];
1162 0 : err = fd_txn_account_init_from_funk_readonly( tip_receiver_acc,
1163 0 : (fd_hash_t *)tip_receiver->b,
1164 0 : funk,
1165 0 : &xid );
1166 0 : if( FD_LIKELY( !err ) ) {
1167 0 : memcpy( tip_receiver_owner, tip_receiver_acc->meta->owner, sizeof(fd_acct_addr_t) );
1168 0 : }
1169 0 : }
1170 :
1171 :
1172 0 : fd_became_leader_t * msg = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
1173 0 : msg->slot = ctx->next_leader_slot;
1174 0 : msg->slot_start_ns = now_nanos;
1175 0 : msg->slot_end_ns = now_nanos+(long)ctx->slot_duration_nanos;
1176 0 : msg->bank = NULL;
1177 0 : msg->bank_idx = bank->idx;
1178 0 : msg->ticks_per_slot = fd_bank_ticks_per_slot_get( bank );
1179 0 : msg->hashcnt_per_tick = fd_bank_hashes_per_tick_get( bank );
1180 0 : msg->tick_duration_ns = (ulong)(ctx->slot_duration_nanos/(double)msg->ticks_per_slot);
1181 0 : msg->bundle->config[0] = config[0];
1182 0 : memcpy( msg->bundle->last_blockhash, (fd_hash_t *)fd_bank_poh_query( bank )->hash, 32UL );
1183 0 : memcpy( msg->bundle->tip_receiver_owner, tip_receiver_owner, 32UL );
1184 :
1185 :
1186 0 : if( FD_UNLIKELY( msg->hashcnt_per_tick==1UL ) ) {
1187 : /* Low power producer, maximum of one microblock per tick in the slot */
1188 0 : msg->max_microblocks_in_slot = msg->ticks_per_slot;
1189 0 : } else {
1190 : /* See the long comment in after_credit for this limit */
1191 0 : msg->max_microblocks_in_slot = fd_ulong_min( MAX_MICROBLOCKS_PER_SLOT, msg->ticks_per_slot*(msg->hashcnt_per_tick-1UL) );
1192 0 : }
1193 :
1194 0 : msg->total_skipped_ticks = msg->ticks_per_slot*(ctx->next_leader_slot-ctx->reset_slot);
1195 0 : msg->epoch = fd_slot_to_epoch( fd_bank_epoch_schedule_query( bank ), ctx->next_leader_slot, NULL );
1196 :
1197 0 : fd_cost_tracker_t const * cost_tracker = fd_bank_cost_tracker_locking_query( bank );
1198 :
1199 0 : msg->limits.slot_max_cost = ctx->larger_max_cost_per_block ? LARGER_MAX_COST_PER_BLOCK : cost_tracker->block_cost_limit;
1200 0 : msg->limits.slot_max_vote_cost = cost_tracker->vote_cost_limit;
1201 0 : msg->limits.slot_max_write_cost_per_acct = cost_tracker->account_cost_limit;
1202 :
1203 0 : fd_bank_cost_tracker_end_locking_query( bank );
1204 :
1205 0 : if( FD_UNLIKELY( msg->ticks_per_slot+msg->total_skipped_ticks>USHORT_MAX ) ) {
1206 : /* There can be at most USHORT_MAX skipped ticks, because the
1207 : parent_offset field in the shred data is only 2 bytes wide. */
1208 0 : FD_LOG_ERR(( "too many skipped ticks %lu for slot %lu, chain must halt", msg->ticks_per_slot+msg->total_skipped_ticks, ctx->next_leader_slot ));
1209 0 : }
1210 :
1211 0 : fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_BECAME_LEADER, ctx->replay_out->chunk, sizeof(fd_became_leader_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
1212 0 : ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_became_leader_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
1213 :
1214 0 : ctx->next_leader_slot = ULONG_MAX;
1215 0 : ctx->next_leader_tickcount = LONG_MAX;
1216 :
1217 0 : return 1;
1218 0 : }
1219 :
1220 : static void
1221 : process_poh_message( fd_replay_tile_t * ctx,
1222 0 : fd_poh_leader_slot_ended_t const * slot_ended ) {
1223 :
1224 0 : FD_TEST( ctx->is_booted );
1225 0 : FD_TEST( ctx->is_leader );
1226 0 : FD_TEST( ctx->leader_bank!=NULL );
1227 :
1228 0 : FD_TEST( ctx->highwater_leader_slot>=slot_ended->slot );
1229 0 : FD_TEST( ctx->next_leader_slot>ctx->highwater_leader_slot );
1230 :
1231 : /* Update the poh hash in the bank. We will want to maintain a refcnt
1232 : on the bank until we have recieved the block id for the block after
1233 : it has been shredded. */
1234 :
1235 0 : memcpy( fd_bank_poh_modify( ctx->leader_bank ), slot_ended->blockhash, sizeof(fd_hash_t) );
1236 :
1237 0 : ctx->recv_poh = 1;
1238 0 : }
1239 :
1240 : static void
1241 : publish_reset( fd_replay_tile_t * ctx,
1242 : fd_stem_context_t * stem,
1243 0 : fd_bank_t * bank ) {
1244 0 : if( FD_UNLIKELY( ctx->replay_out->idx==ULONG_MAX ) ) return;
1245 :
1246 0 : fd_hash_t const * block_hash = fd_blockhashes_peek_last_hash( fd_bank_block_hash_queue_query( bank ) );
1247 0 : FD_TEST( block_hash );
1248 :
1249 0 : fd_poh_reset_t * reset = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
1250 :
1251 0 : reset->bank_idx = bank->idx;
1252 0 : reset->timestamp = fd_log_wallclock();
1253 0 : reset->completed_slot = fd_bank_slot_get( bank );
1254 0 : reset->hashcnt_per_tick = fd_bank_hashes_per_tick_get( bank );
1255 0 : reset->ticks_per_slot = fd_bank_ticks_per_slot_get( bank );
1256 0 : reset->tick_duration_ns = (ulong)(ctx->slot_duration_nanos/(double)reset->ticks_per_slot);
1257 0 : fd_memcpy( reset->completed_blockhash, block_hash->uc, sizeof(fd_hash_t) );
1258 :
1259 0 : ulong ticks_per_slot = fd_bank_ticks_per_slot_get( bank );
1260 0 : if( FD_UNLIKELY( reset->hashcnt_per_tick==1UL ) ) {
1261 : /* Low power producer, maximum of one microblock per tick in the slot */
1262 0 : reset->max_microblocks_in_slot = ticks_per_slot;
1263 0 : } else {
1264 : /* See the long comment in after_credit for this limit */
1265 0 : reset->max_microblocks_in_slot = fd_ulong_min( MAX_MICROBLOCKS_PER_SLOT, ticks_per_slot*(reset->hashcnt_per_tick-1UL) );
1266 0 : }
1267 0 : reset->next_leader_slot = ctx->next_leader_slot;
1268 :
1269 0 : if( FD_LIKELY( ctx->rpc_enabled ) ) bank->refcnt++;
1270 :
1271 0 : fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_RESET, ctx->replay_out->chunk, sizeof(fd_poh_reset_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
1272 0 : ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_poh_reset_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
1273 0 : }
1274 :
1275 : static void
1276 : boot_genesis( fd_replay_tile_t * ctx,
1277 : fd_stem_context_t * stem,
1278 : ulong in_idx,
1279 0 : ulong chunk ) {
1280 : /* If we are bootstrapping, we can't wait to wait for our identity
1281 : vote to be rooted as this creates a circular dependency. */
1282 0 : ctx->has_identity_vote_rooted = 1;
1283 :
1284 0 : uchar const * lthash = (uchar*)fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
1285 0 : uchar const * genesis_hash = (uchar*)fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk )+sizeof(fd_lthash_value_t);
1286 :
1287 : // TODO: Do not pass the fd_types type between tiles, it have offsets
1288 : // that are unsafe and can't be validated as being in-bounds. Need to
1289 : // pass an actual owned genesis type.
1290 0 : fd_genesis_solana_global_t const * genesis = fd_type_pun( (uchar*)fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk )+sizeof(fd_hash_t)+sizeof(fd_lthash_value_t) );
1291 :
1292 0 : fd_bank_t * bank = fd_banks_bank_query( ctx->banks, FD_REPLAY_BOOT_BANK_IDX );
1293 0 : FD_TEST( bank );
1294 0 : fd_funk_txn_xid_t xid = { .ul = { 0UL, FD_REPLAY_BOOT_BANK_IDX } };
1295 :
1296 : /* Do genesis-related processing in a non-rooted transaction */
1297 0 : fd_funk_txn_xid_t root_xid; fd_funk_txn_xid_set_root( &root_xid );
1298 0 : fd_funk_txn_xid_t target_xid = { .ul = { 0UL, 0UL } };
1299 0 : fd_accdb_attach_child( ctx->accdb_admin, &root_xid, &target_xid );
1300 0 : fd_runtime_read_genesis( ctx->banks, bank, ctx->accdb, &xid, NULL, fd_type_pun_const( genesis_hash ), fd_type_pun_const( lthash ), genesis, &ctx->runtime_stack );
1301 0 : fd_accdb_advance_root( ctx->accdb_admin, &target_xid );
1302 :
1303 0 : static const fd_txncache_fork_id_t txncache_root = { .val = USHORT_MAX };
1304 0 : bank->txncache_fork_id = fd_txncache_attach_child( ctx->txncache, txncache_root );
1305 :
1306 0 : fd_hash_t const * block_hash = fd_blockhashes_peek_last_hash( fd_bank_block_hash_queue_query( bank ) );
1307 0 : fd_txncache_finalize_fork( ctx->txncache, bank->txncache_fork_id, 0UL, block_hash->uc );
1308 :
1309 0 : publish_stake_weights( ctx, stem, bank, 0 );
1310 0 : publish_stake_weights( ctx, stem, bank, 1 );
1311 :
1312 : /* We call this after fd_runtime_read_genesis, which sets up the
1313 : slot_bank needed in blockstore_init. */
1314 0 : init_after_snapshot( ctx );
1315 :
1316 : /* Initialize store for genesis case, similar to snapshot case */
1317 0 : fd_hash_t genesis_block_id = { .ul[0] = FD_RUNTIME_INITIAL_BLOCK_ID };
1318 0 : fd_store_exacq( ctx->store );
1319 0 : if( FD_UNLIKELY( fd_store_root( ctx->store ) ) ) {
1320 0 : FD_LOG_CRIT(( "invariant violation: store root is not 0 for genesis" ));
1321 0 : }
1322 0 : fd_store_insert( ctx->store, 0, &genesis_block_id );
1323 0 : ctx->store->slot0 = 0UL; /* Genesis slot */
1324 0 : fd_store_exrel( ctx->store );
1325 :
1326 0 : ctx->published_root_slot = 0UL;
1327 0 : fd_sched_block_add_done( ctx->sched, bank->idx, ULONG_MAX, 0UL );
1328 :
1329 0 : fd_bank_block_height_set( bank, 1UL );
1330 :
1331 0 : ctx->consensus_root = (fd_hash_t){ .ul[0] = FD_RUNTIME_INITIAL_BLOCK_ID };
1332 0 : ctx->consensus_root_slot = 0UL;
1333 0 : ctx->consensus_root_bank_idx = 0UL;
1334 0 : ctx->published_root_slot = 0UL;
1335 0 : ctx->published_root_bank_idx = 0UL;
1336 :
1337 0 : ctx->reset_slot = 0UL;
1338 0 : ctx->reset_bank = bank;
1339 0 : ctx->reset_timestamp_nanos = fd_log_wallclock();
1340 0 : ctx->next_leader_slot = fd_multi_epoch_leaders_get_next_slot( ctx->mleaders, 1UL, ctx->identity_pubkey );
1341 0 : if( FD_LIKELY( ctx->next_leader_slot != ULONG_MAX ) ) {
1342 0 : ctx->next_leader_tickcount = (long)((double)(ctx->next_leader_slot-ctx->reset_slot-1UL)*ctx->slot_duration_ticks) + fd_tickcount();
1343 0 : } else {
1344 0 : ctx->next_leader_tickcount = LONG_MAX;
1345 0 : }
1346 :
1347 0 : ctx->is_booted = 1;
1348 0 : maybe_become_leader( ctx, stem );
1349 :
1350 0 : fd_hash_t initial_block_id = { .ul = { FD_RUNTIME_INITIAL_BLOCK_ID } };
1351 0 : fd_reasm_fec_t * fec = fd_reasm_insert( ctx->reasm, &initial_block_id, NULL, 0 /* genesis slot */, 0, 0, 0, 0, 1, 0 ); /* FIXME manifest block_id */
1352 0 : fec->bank_idx = 0UL;
1353 :
1354 :
1355 0 : fd_block_id_ele_t * block_id_ele = &ctx->block_id_arr[ 0 ];
1356 0 : FD_TEST( block_id_ele );
1357 0 : block_id_ele->block_id = initial_block_id;
1358 0 : block_id_ele->slot = 0UL;
1359 :
1360 0 : FD_TEST( fd_block_id_map_ele_insert( ctx->block_id_map, block_id_ele, ctx->block_id_arr ) );
1361 :
1362 0 : fd_replay_slot_completed_t * slot_info = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
1363 0 : slot_info->identity_balance = get_identity_balance( ctx, xid );
1364 :
1365 0 : publish_slot_completed( ctx, stem, bank, 1, 0 /* is_leader */ );
1366 0 : publish_root_advanced( ctx, stem );
1367 0 : publish_reset( ctx, stem, bank );
1368 0 : }
1369 :
1370 : static inline void
1371 0 : maybe_verify_cluster_type( fd_replay_tile_t * ctx ) {
1372 0 : if( FD_UNLIKELY( !ctx->is_booted || !ctx->has_genesis_hash ) ) {
1373 0 : return;
1374 0 : }
1375 :
1376 0 : FD_BASE58_ENCODE_32_BYTES( ctx->genesis_hash, hash_cstr );
1377 0 : ulong cluster = fd_genesis_cluster_identify( hash_cstr );
1378 : /* Map pyth-related clusters to unkwown. */
1379 0 : switch( cluster ) {
1380 0 : case FD_CLUSTER_PYTHNET:
1381 0 : case FD_CLUSTER_PYTHTEST:
1382 0 : cluster = FD_CLUSTER_UNKNOWN;
1383 0 : }
1384 :
1385 0 : if( FD_UNLIKELY( cluster!=ctx->cluster_type ) ) {
1386 0 : FD_LOG_ERR(( "Your genesis.bin file at `%s` has a genesis hash of `%s` which means the cluster is %s "
1387 0 : "but the snapshot you loaded is for a different cluster %s. If you are trying to join the "
1388 0 : "%s cluster, you can delete the genesis.bin file and restart the node to download the correct "
1389 0 : "genesis file automatically.",
1390 0 : ctx->genesis_path,
1391 0 : hash_cstr,
1392 0 : fd_genesis_cluster_name( cluster ),
1393 0 : fd_genesis_cluster_name( ctx->cluster_type ),
1394 0 : fd_genesis_cluster_name( cluster ) ));
1395 0 : }
1396 0 : }
1397 :
1398 : static void
1399 : on_snapshot_message( fd_replay_tile_t * ctx,
1400 : fd_stem_context_t * stem,
1401 : ulong in_idx,
1402 : ulong chunk,
1403 0 : ulong sig ) {
1404 0 : ulong msg = fd_ssmsg_sig_message( sig );
1405 0 : if( FD_LIKELY( msg==FD_SSMSG_DONE ) ) {
1406 : /* An end of message notification indicates the snapshot is loaded.
1407 : Replay is able to start executing from this point onwards. */
1408 : /* TODO: replay should finish booting. Could make replay a
1409 : state machine and set the state here accordingly. */
1410 0 : ctx->is_booted = 1;
1411 :
1412 0 : fd_bank_t * bank = fd_banks_bank_query( ctx->banks, FD_REPLAY_BOOT_BANK_IDX );
1413 0 : if( FD_UNLIKELY( !bank ) ) {
1414 0 : FD_LOG_CRIT(( "invariant violation: bank is NULL for bank index %lu", FD_REPLAY_BOOT_BANK_IDX ));
1415 0 : }
1416 :
1417 0 : ulong snapshot_slot = fd_bank_slot_get( bank );
1418 : /* FIXME: This is a hack because the block id of the snapshot slot
1419 : is not provided in the snapshot. A possible solution is to get
1420 : the block id of the snapshot slot from repair. */
1421 0 : fd_hash_t manifest_block_id = { .ul = { FD_RUNTIME_INITIAL_BLOCK_ID } };
1422 :
1423 0 : fd_store_exacq( ctx->store );
1424 0 : FD_TEST( !fd_store_root( ctx->store ) );
1425 0 : fd_store_insert( ctx->store, 0, &manifest_block_id );
1426 0 : ctx->store->slot0 = snapshot_slot; /* FIXME manifest_block_id */
1427 0 : fd_store_exrel( ctx->store );
1428 :
1429 : /* Typically, when we cross an epoch boundary during normal
1430 : operation, we publish the stake weights for the new epoch. But
1431 : since we are starting from a snapshot, we need to publish two
1432 : epochs worth of stake weights: the previous epoch (which is
1433 : needed for voting on the current epoch), and the current epoch
1434 : (which is needed for voting on the next epoch). */
1435 0 : publish_stake_weights( ctx, stem, bank, 0 );
1436 0 : publish_stake_weights( ctx, stem, bank, 1 );
1437 :
1438 0 : ctx->consensus_root = manifest_block_id;
1439 0 : ctx->consensus_root_slot = snapshot_slot;
1440 0 : ctx->consensus_root_bank_idx = 0UL;
1441 0 : ctx->published_root_slot = ctx->consensus_root_slot;
1442 0 : ctx->published_root_bank_idx = 0UL;
1443 :
1444 0 : ctx->reset_slot = snapshot_slot;
1445 0 : ctx->reset_bank = bank;
1446 0 : ctx->reset_timestamp_nanos = fd_log_wallclock();
1447 0 : ctx->next_leader_slot = fd_multi_epoch_leaders_get_next_slot( ctx->mleaders, 1UL, ctx->identity_pubkey );
1448 0 : if( FD_LIKELY( ctx->next_leader_slot != ULONG_MAX ) ) {
1449 0 : ctx->next_leader_tickcount = (long)((double)(ctx->next_leader_slot-ctx->reset_slot-1UL)*ctx->slot_duration_ticks) + fd_tickcount();
1450 0 : } else {
1451 0 : ctx->next_leader_tickcount = LONG_MAX;
1452 0 : }
1453 :
1454 0 : fd_sched_block_add_done( ctx->sched, bank->idx, ULONG_MAX, snapshot_slot );
1455 0 : FD_TEST( bank->idx==0UL );
1456 :
1457 0 : fd_funk_txn_xid_t xid = { .ul = { snapshot_slot, FD_REPLAY_BOOT_BANK_IDX } };
1458 :
1459 0 : fd_funk_t * funk = fd_accdb_user_v1_funk( ctx->accdb );
1460 0 : fd_features_restore( bank, funk, &xid );
1461 :
1462 0 : fd_runtime_update_leaders( bank, &ctx->runtime_stack );
1463 :
1464 0 : fd_block_id_ele_t * block_id_ele = &ctx->block_id_arr[ 0 ];
1465 0 : FD_TEST( block_id_ele );
1466 0 : block_id_ele->block_id = manifest_block_id;
1467 0 : block_id_ele->slot = snapshot_slot;
1468 0 : FD_TEST( fd_block_id_map_ele_insert( ctx->block_id_map, block_id_ele, ctx->block_id_arr ) );
1469 :
1470 : /* We call this after fd_runtime_read_genesis, which sets up the
1471 : slot_bank needed in blockstore_init. */
1472 0 : init_after_snapshot( ctx );
1473 :
1474 0 : fd_replay_slot_completed_t * slot_info = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
1475 0 : slot_info->identity_balance = get_identity_balance( ctx, xid );
1476 :
1477 0 : publish_slot_completed( ctx, stem, bank, 1, 0 /* is_leader */ );
1478 0 : publish_root_advanced( ctx, stem );
1479 :
1480 0 : fd_reasm_fec_t * fec = fd_reasm_insert( ctx->reasm, &manifest_block_id, NULL, snapshot_slot, 0, 0, 0, 0, 1, 0 ); /* FIXME manifest block_id */
1481 0 : fec->bank_idx = 0UL;
1482 :
1483 0 : ctx->cluster_type = fd_bank_cluster_type_get( bank );
1484 :
1485 0 : maybe_verify_cluster_type( ctx );
1486 :
1487 0 : return;
1488 0 : }
1489 :
1490 0 : switch( msg ) {
1491 0 : case FD_SSMSG_MANIFEST_FULL:
1492 0 : case FD_SSMSG_MANIFEST_INCREMENTAL: {
1493 : /* We may either receive a full snapshot manifest or an
1494 : incremental snapshot manifest. Note that this external message
1495 : id is only used temporarily because replay cannot yet receive
1496 : the firedancer-internal snapshot manifest message. */
1497 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark ) )
1498 0 : FD_LOG_ERR(( "chunk %lu from in %d corrupt, not in range [%lu,%lu]", chunk, ctx->in_kind[ in_idx ], ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
1499 :
1500 0 : fd_ssload_recover( fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk ),
1501 0 : ctx->banks,
1502 0 : fd_banks_bank_query( ctx->banks, FD_REPLAY_BOOT_BANK_IDX ),
1503 0 : ctx->runtime_stack.stakes.vote_credits );
1504 :
1505 0 : fd_snapshot_manifest_t const * manifest = fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
1506 0 : ctx->hard_forks_cnt = manifest->hard_forks_len;
1507 0 : for( ulong i=0UL; i<manifest->hard_forks_len; i++ ) ctx->hard_forks[ i ] = manifest->hard_forks[ i ];
1508 0 : break;
1509 0 : }
1510 0 : default: {
1511 0 : FD_LOG_ERR(( "Received unknown snapshot message with msg %lu", msg ));
1512 0 : return;
1513 0 : }
1514 0 : }
1515 :
1516 0 : return;
1517 0 : }
1518 :
1519 : static void
1520 : dispatch_task( fd_replay_tile_t * ctx,
1521 : fd_stem_context_t * stem,
1522 0 : fd_sched_task_t * task ) {
1523 :
1524 0 : switch( task->task_type ) {
1525 0 : case FD_SCHED_TT_TXN_EXEC: {
1526 0 : fd_txn_p_t * txn_p = fd_sched_get_txn( ctx->sched, task->txn_exec->txn_idx );
1527 :
1528 : /* FIXME: this should be done during txn parsing so that we don't
1529 : have to loop over all accounts a second time. */
1530 : /* Insert or reverify invoked programs for this epoch, if needed. */
1531 0 : fd_bank_t * bank = fd_banks_bank_query( ctx->banks, task->txn_exec->bank_idx );
1532 :
1533 0 : # if FD_HAS_FLATCC
1534 : /* Add the transaction to the block dumper if necessary. This
1535 : logic doesn't need to be fork-aware since it's only meant to
1536 : be used in backtest. */
1537 0 : if( FD_UNLIKELY( ctx->capture_ctx && ctx->capture_ctx->dump_block_to_pb ) ) {
1538 0 : fd_dump_block_to_protobuf_collect_tx( ctx->block_dump_ctx, txn_p );
1539 0 : }
1540 0 : # endif
1541 :
1542 0 : bank->refcnt++;
1543 :
1544 0 : if( FD_UNLIKELY( !bank->first_transaction_scheduled_nanos ) ) bank->first_transaction_scheduled_nanos = fd_log_wallclock();
1545 :
1546 0 : fd_replay_out_link_t * exec_out = ctx->exec_out;
1547 0 : fd_exec_txn_exec_msg_t * exec_msg = fd_chunk_to_laddr( exec_out->mem, exec_out->chunk );
1548 0 : memcpy( &exec_msg->txn, txn_p, sizeof(fd_txn_p_t) );
1549 0 : exec_msg->bank_idx = task->txn_exec->bank_idx;
1550 0 : exec_msg->txn_idx = task->txn_exec->txn_idx;
1551 0 : if( FD_UNLIKELY( ctx->capture_ctx ) ) {
1552 0 : exec_msg->capture_txn_idx = ctx->capture_ctx->current_txn_idx++;
1553 0 : }
1554 0 : fd_stem_publish( stem, exec_out->idx, (FD_EXEC_TT_TXN_EXEC<<32) | task->txn_exec->exec_idx, exec_out->chunk, sizeof(*exec_msg), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
1555 0 : exec_out->chunk = fd_dcache_compact_next( exec_out->chunk, sizeof(*exec_msg), exec_out->chunk0, exec_out->wmark );
1556 0 : break;
1557 0 : }
1558 0 : case FD_SCHED_TT_TXN_SIGVERIFY: {
1559 0 : fd_txn_p_t * txn_p = fd_sched_get_txn( ctx->sched, task->txn_sigverify->txn_idx );
1560 :
1561 0 : fd_bank_t * bank = fd_banks_bank_query( ctx->banks, task->txn_sigverify->bank_idx );
1562 0 : bank->refcnt++;
1563 :
1564 0 : fd_replay_out_link_t * exec_out = ctx->exec_out;
1565 0 : fd_exec_txn_sigverify_msg_t * exec_msg = fd_chunk_to_laddr( exec_out->mem, exec_out->chunk );
1566 0 : memcpy( &exec_msg->txn, txn_p, sizeof(fd_txn_p_t) );
1567 0 : exec_msg->bank_idx = task->txn_sigverify->bank_idx;
1568 0 : exec_msg->txn_idx = task->txn_sigverify->txn_idx;
1569 0 : fd_stem_publish( stem, exec_out->idx, (FD_EXEC_TT_TXN_SIGVERIFY<<32) | task->txn_sigverify->exec_idx, exec_out->chunk, sizeof(*exec_msg), 0UL, 0UL, 0UL );
1570 0 : exec_out->chunk = fd_dcache_compact_next( exec_out->chunk, sizeof(*exec_msg), exec_out->chunk0, exec_out->wmark );
1571 0 : break;
1572 0 : };
1573 0 : default: {
1574 0 : FD_LOG_CRIT(( "unexpected task type %lu", task->task_type ));
1575 0 : }
1576 0 : }
1577 0 : }
1578 :
1579 : /* Returns 1 if charge_busy. */
1580 : static int
1581 : replay( fd_replay_tile_t * ctx,
1582 0 : fd_stem_context_t * stem ) {
1583 :
1584 0 : if( FD_UNLIKELY( !ctx->is_booted ) ) return 0;
1585 :
1586 0 : int charge_busy = 0;
1587 0 : fd_sched_task_t task[ 1 ];
1588 0 : if( FD_UNLIKELY( !fd_sched_task_next_ready( ctx->sched, task ) ) ) {
1589 0 : return charge_busy; /* Nothing to execute or do. */
1590 0 : }
1591 :
1592 0 : charge_busy = 1;
1593 :
1594 0 : switch( task->task_type ) {
1595 0 : case FD_SCHED_TT_BLOCK_START: {
1596 0 : replay_block_start( ctx, stem, task->block_start->bank_idx, task->block_start->parent_bank_idx, task->block_start->slot );
1597 0 : fd_sched_task_done( ctx->sched, FD_SCHED_TT_BLOCK_START, ULONG_MAX, ULONG_MAX );
1598 0 : break;
1599 0 : }
1600 0 : case FD_SCHED_TT_BLOCK_END: {
1601 0 : fd_bank_t * bank = fd_banks_bank_query( ctx->banks, task->block_end->bank_idx );
1602 0 : if( FD_LIKELY( !(bank->flags&FD_BANK_FLAGS_DEAD) ) ) replay_block_finalize( ctx, stem, bank );
1603 0 : fd_sched_task_done( ctx->sched, FD_SCHED_TT_BLOCK_END, ULONG_MAX, ULONG_MAX );
1604 0 : break;
1605 0 : }
1606 0 : case FD_SCHED_TT_TXN_EXEC:
1607 0 : case FD_SCHED_TT_TXN_SIGVERIFY: {
1608 : /* Likely/common case: we have a transaction we actually need to
1609 : execute. */
1610 0 : dispatch_task( ctx, stem, task );
1611 0 : break;
1612 0 : }
1613 0 : default: {
1614 0 : FD_LOG_CRIT(( "unexpected task type %lu", task->task_type ));
1615 0 : }
1616 0 : }
1617 :
1618 0 : return charge_busy;
1619 0 : }
1620 :
1621 : static int
1622 0 : can_process_fec( fd_replay_tile_t * ctx ) {
1623 0 : fd_reasm_fec_t * fec;
1624 0 : if( FD_UNLIKELY( !fd_sched_can_ingest( ctx->sched, 1UL ) ) ) {
1625 0 : ctx->metrics.sched_full++;
1626 0 : return 0;
1627 0 : }
1628 :
1629 0 : if( FD_UNLIKELY( (fec = fd_reasm_peek( ctx->reasm ))==NULL ) ) {
1630 0 : ctx->metrics.reasm_empty++;
1631 0 : return 0;
1632 0 : }
1633 :
1634 0 : ctx->metrics.reasm_latest_slot = fec->slot;
1635 0 : ctx->metrics.reasm_latest_fec_idx = fec->fec_set_idx;
1636 :
1637 0 : if( FD_UNLIKELY( ctx->is_leader && fec->fec_set_idx==0U && fd_reasm_parent( ctx->reasm, fec )->bank_idx==ctx->leader_bank->idx ) ) {
1638 : /* There's a race that's exceedingly rare, where we receive the
1639 : FEC set for the slot right after our leader rotation before we
1640 : freeze the bank for the last slot in our leader rotation.
1641 : Leader slot freezing happens only after if we've received the
1642 : final PoH hash from the poh tile as well as the final FEC set
1643 : for the leader slot. So the race happens when FEC sets are
1644 : delivered and processed sooner than the PoH hash, aka when the
1645 : poh=>shred=>replay path for the block id somehow beats the
1646 : poh=>replay path for the poh hash. To mitigate this race,
1647 : we must block on ingesting the FEC set for the ensuing slot
1648 : before the leader bank freezes, because that would violate
1649 : ordering invariants in banks and sched. */
1650 0 : FD_TEST( ctx->recv_block_id );
1651 0 : FD_TEST( !ctx->recv_poh );
1652 0 : ctx->metrics.leader_bid_wait++;
1653 0 : return 0;
1654 0 : }
1655 :
1656 : /* If fec_set_idx is 0, we need a new bank for a new slot. Banks must
1657 : not be full in this case. */
1658 0 : if( FD_UNLIKELY( fd_banks_is_full( ctx->banks ) && fec->fec_set_idx==0 ) ) {
1659 0 : ctx->metrics.banks_full++;
1660 0 : return 0;
1661 0 : }
1662 :
1663 : /* Otherwise, banks may not be full, so we can always create a new
1664 : bank if needed. Or, if banks are full, the current fec set's
1665 : ancestor (idx 0) already created a bank for this slot.*/
1666 0 : return 1;
1667 0 : }
1668 :
1669 : static void
1670 : process_fec_set( fd_replay_tile_t * ctx,
1671 : fd_stem_context_t * stem,
1672 0 : fd_reasm_fec_t * reasm_fec ) {
1673 0 : long now = fd_log_wallclock();
1674 :
1675 : /* Linking only requires a shared lock because the fields that are
1676 : modified are only read on publish which uses exclusive lock. */
1677 :
1678 0 : long shacq_start, shacq_end, shrel_end;
1679 :
1680 0 : FD_STORE_SHARED_LOCK( ctx->store, shacq_start, shacq_end, shrel_end ) {
1681 0 : if( FD_UNLIKELY( !fd_store_link( ctx->store, &reasm_fec->key, &reasm_fec->cmr ) ) ) FD_LOG_WARNING(( "failed to link %s %s. slot %lu fec_set_idx %u", FD_BASE58_ENC_32_ALLOCA( &reasm_fec->key ), FD_BASE58_ENC_32_ALLOCA( &reasm_fec->cmr ), reasm_fec->slot, reasm_fec->fec_set_idx ));
1682 0 : } FD_STORE_SHARED_LOCK_END;
1683 0 : fd_histf_sample( ctx->metrics.store_link_wait, (ulong)fd_long_max( shacq_end - shacq_start, 0L ) );
1684 0 : fd_histf_sample( ctx->metrics.store_link_work, (ulong)fd_long_max( shrel_end - shacq_end, 0L ) );
1685 :
1686 : /* Update the reasm_fec with the correct bank index and parent bank
1687 : index. If the FEC belongs to a leader, we have already allocated
1688 : a bank index for the FEC and it just needs to be propagated to the
1689 : reasm_fec. */
1690 :
1691 0 : reasm_fec->parent_bank_idx = fd_reasm_parent( ctx->reasm, reasm_fec )->bank_idx;
1692 :
1693 0 : if( FD_UNLIKELY( reasm_fec->leader ) ) {
1694 : /* If we are the leader we just need to copy in the bank index that
1695 : the leader slot is using. */
1696 0 : FD_TEST( ctx->leader_bank!=NULL );
1697 0 : reasm_fec->bank_idx = ctx->leader_bank->idx;
1698 0 : } else if( FD_UNLIKELY( reasm_fec->fec_set_idx==0U ) ) {
1699 : /* If we are seeing a FEC with fec set idx 0, this means that we are
1700 : starting a new slot, and we need a new bank index. */
1701 0 : reasm_fec->bank_idx = fd_banks_new_bank( ctx->banks, reasm_fec->parent_bank_idx, now )->idx;
1702 0 : } else {
1703 : /* We are continuing to execute through a slot that we already have
1704 : a bank index for. */
1705 0 : reasm_fec->bank_idx = reasm_fec->parent_bank_idx;
1706 0 : }
1707 :
1708 0 : if( FD_UNLIKELY( reasm_fec->slot_complete ) ) {
1709 : /* Once the block id for a block is known it must be added to the
1710 : leader block mapping. */
1711 0 : fd_block_id_ele_t * block_id_ele = &ctx->block_id_arr[ reasm_fec->bank_idx ];
1712 0 : FD_TEST( block_id_ele );
1713 :
1714 : /* If an entry already exists for this bank index in the block id
1715 : map, we can safely remove it and replace it with the new entry.
1716 : This is safe because we know that the old entry for this fork
1717 : index has already been pruned away. */
1718 0 : if( FD_LIKELY( block_id_ele->slot!=FD_SLOT_NULL && fd_block_id_map_ele_query( ctx->block_id_map, &block_id_ele->block_id, NULL, ctx->block_id_arr ) ) ) {
1719 0 : FD_TEST( fd_block_id_map_ele_remove( ctx->block_id_map, &block_id_ele->block_id, NULL, ctx->block_id_arr ) );
1720 0 : }
1721 :
1722 0 : block_id_ele->block_id = reasm_fec->key;
1723 0 : block_id_ele->slot = reasm_fec->slot;
1724 :
1725 0 : FD_TEST( fd_block_id_map_ele_insert( ctx->block_id_map, block_id_ele, ctx->block_id_arr ) );
1726 :
1727 0 : if( FD_UNLIKELY( reasm_fec->leader ) ) {
1728 0 : ctx->recv_block_id = 1;
1729 0 : }
1730 0 : }
1731 :
1732 0 : if( FD_UNLIKELY( reasm_fec->leader ) ) {
1733 0 : return;
1734 0 : }
1735 :
1736 : /* Forks form a partial ordering over FEC sets. The Repair tile
1737 : delivers FEC sets in-order per fork, but FEC set ordering across
1738 : forks is arbitrary */
1739 0 : fd_sched_fec_t sched_fec[ 1 ];
1740 :
1741 : # if DEBUG_LOGGING
1742 : FD_LOG_INFO(( "replay processing FEC set for slot %lu fec_set_idx %u, mr %s cmr %s", reasm_fec->slot, reasm_fec->fec_set_idx, FD_BASE58_ENC_32_ALLOCA( &reasm_fec->key ), FD_BASE58_ENC_32_ALLOCA( &reasm_fec->cmr ) ));
1743 : # endif
1744 :
1745 : /* Read FEC set from the store. This should happen before we try to
1746 : ingest the FEC set. This allows us to filter out frags that were
1747 : in-flight when we published away minority forks that the frags land
1748 : on. These frags would have no bank to execute against, because
1749 : their corresponding banks, or parent banks, have also been pruned
1750 : during publishing. A query against store will rightfully tell us
1751 : that the underlying data is not found, implying that this is for a
1752 : minority fork that we can safely ignore. */
1753 0 : FD_STORE_SHARED_LOCK( ctx->store, shacq_start, shacq_end, shrel_end ) {
1754 0 : fd_store_fec_t * store_fec = fd_store_query( ctx->store, &reasm_fec->key );
1755 0 : if( FD_UNLIKELY( !store_fec ) ) {
1756 : /* The only case in which a FEC is not found in the store after
1757 : repair has notified is if the FEC was on a minority fork that
1758 : has already been published away. In this case we abandon the
1759 : entire slice because it is no longer relevant. */
1760 0 : FD_LOG_WARNING(( "store fec for slot: %lu is on minority fork already pruned by publish. abandoning slice. root: %lu. pruned merkle: %s", reasm_fec->slot, ctx->consensus_root_slot, FD_BASE58_ENC_32_ALLOCA( &reasm_fec->key ) ));
1761 0 : return;
1762 0 : }
1763 0 : FD_TEST( store_fec );
1764 0 : sched_fec->fec = store_fec;
1765 0 : sched_fec->shred_cnt = reasm_fec->data_cnt;
1766 0 : } FD_STORE_SHARED_LOCK_END;
1767 :
1768 0 : fd_histf_sample( ctx->metrics.store_read_wait, (ulong)fd_long_max( shacq_end - shacq_start, 0UL ) );
1769 0 : fd_histf_sample( ctx->metrics.store_read_work, (ulong)fd_long_max( shrel_end - shacq_end, 0UL ) );
1770 :
1771 0 : sched_fec->is_last_in_batch = !!reasm_fec->data_complete;
1772 0 : sched_fec->is_last_in_block = !!reasm_fec->slot_complete;
1773 0 : sched_fec->bank_idx = reasm_fec->bank_idx;
1774 0 : sched_fec->parent_bank_idx = reasm_fec->parent_bank_idx;
1775 0 : sched_fec->slot = reasm_fec->slot;
1776 0 : sched_fec->parent_slot = reasm_fec->slot - reasm_fec->parent_off;
1777 0 : sched_fec->is_first_in_block = reasm_fec->fec_set_idx==0U;
1778 0 : fd_funk_txn_xid_copy( sched_fec->alut_ctx->xid, fd_funk_last_publish( ctx->accdb_admin->funk ) );
1779 0 : sched_fec->alut_ctx->accdb[0] = ctx->accdb[0];
1780 0 : sched_fec->alut_ctx->els = ctx->published_root_slot;
1781 :
1782 0 : if( FD_UNLIKELY( !fd_sched_fec_ingest( ctx->sched, sched_fec ) ) ) {
1783 0 : fd_bank_t * bank = fd_banks_bank_query( ctx->banks, sched_fec->bank_idx );
1784 0 : publish_slot_dead( ctx, stem, bank );
1785 0 : fd_banks_mark_bank_dead( ctx->banks, bank );
1786 0 : }
1787 0 : }
1788 :
1789 : static void
1790 : funk_publish( fd_replay_tile_t * ctx,
1791 : ulong slot,
1792 0 : ulong bank_idx ) {
1793 0 : fd_funk_txn_xid_t xid = { .ul[0] = slot, .ul[1] = bank_idx };
1794 0 : FD_LOG_DEBUG(( "publishing slot=%lu", slot ));
1795 :
1796 : /* This is the standard case. Publish all transactions up to and
1797 : including the watermark. This will publish any in-prep ancestors
1798 : of root_txn as well. */
1799 0 : fd_accdb_advance_root( ctx->accdb_admin, &xid );
1800 0 : fd_progcache_txn_advance_root( ctx->progcache_admin, &xid );
1801 0 : }
1802 :
1803 : static int
1804 0 : advance_published_root( fd_replay_tile_t * ctx ) {
1805 :
1806 0 : fd_block_id_ele_t * block_id_ele = fd_block_id_map_ele_query( ctx->block_id_map, &ctx->consensus_root, NULL, ctx->block_id_arr );
1807 0 : if( FD_UNLIKELY( !block_id_ele ) ) {
1808 0 : FD_LOG_CRIT(( "invariant violation: block id ele not found for consensus root %s", FD_BASE58_ENC_32_ALLOCA( &ctx->consensus_root ) ));
1809 0 : }
1810 0 : ulong target_bank_idx = fd_block_id_ele_get_idx( ctx->block_id_arr, block_id_ele );
1811 :
1812 0 : fd_sched_root_notify( ctx->sched, target_bank_idx );
1813 :
1814 : /* If the identity vote has been seen on a bank that should be rooted,
1815 : then we are now ready to produce blocks. */
1816 0 : if( FD_UNLIKELY( !ctx->has_identity_vote_rooted ) ) {
1817 0 : fd_bank_t * root_bank = fd_banks_bank_query( ctx->banks, target_bank_idx );
1818 0 : if( FD_UNLIKELY( !root_bank ) ) FD_LOG_CRIT(( "invariant violation: root bank not found for bank index %lu", target_bank_idx ));
1819 0 : if( FD_LIKELY( fd_bank_has_identity_vote_get( root_bank ) ) ) ctx->has_identity_vote_rooted = 1;
1820 0 : }
1821 :
1822 0 : ulong advanceable_root_idx = ULONG_MAX;
1823 0 : if( FD_UNLIKELY( !fd_banks_advance_root_prepare( ctx->banks, target_bank_idx, &advanceable_root_idx ) ) ) return 0;
1824 :
1825 0 : fd_bank_t * bank = fd_banks_bank_query( ctx->banks, advanceable_root_idx );
1826 0 : FD_TEST( bank );
1827 :
1828 0 : fd_block_id_ele_t * advanceable_root_ele = &ctx->block_id_arr[ advanceable_root_idx ];
1829 0 : if( FD_UNLIKELY( !advanceable_root_ele ) ) {
1830 0 : FD_LOG_CRIT(( "invariant violation: advanceable root ele not found for bank index %lu", advanceable_root_idx ));
1831 0 : }
1832 :
1833 0 : long exacq_start, exacq_end, exrel_end;
1834 0 : FD_STORE_EXCLUSIVE_LOCK( ctx->store, exacq_start, exacq_end, exrel_end ) {
1835 0 : fd_store_publish( ctx->store, &advanceable_root_ele->block_id );
1836 0 : } FD_STORE_EXCLUSIVE_LOCK_END;
1837 :
1838 0 : fd_histf_sample( ctx->metrics.store_publish_wait, (ulong)fd_long_max( exacq_end-exacq_start, 0UL ) );
1839 0 : fd_histf_sample( ctx->metrics.store_publish_work, (ulong)fd_long_max( exrel_end-exacq_end, 0UL ) );
1840 :
1841 0 : ulong advanceable_root_slot = fd_bank_slot_get( bank );
1842 0 : funk_publish( ctx, advanceable_root_slot, bank->idx );
1843 :
1844 0 : fd_txncache_advance_root( ctx->txncache, bank->txncache_fork_id );
1845 0 : fd_sched_advance_root( ctx->sched, advanceable_root_idx );
1846 0 : fd_banks_advance_root( ctx->banks, advanceable_root_idx );
1847 0 : fd_reasm_publish( ctx->reasm, &advanceable_root_ele->block_id );
1848 :
1849 0 : ctx->published_root_slot = advanceable_root_slot;
1850 0 : ctx->published_root_bank_idx = advanceable_root_idx;
1851 :
1852 0 : return 1;
1853 0 : }
1854 :
1855 : static void
1856 : after_credit( fd_replay_tile_t * ctx,
1857 : fd_stem_context_t * stem,
1858 : int * opt_poll_in,
1859 0 : int * charge_busy ) {
1860 0 : if( FD_UNLIKELY( !ctx->is_booted ) ) return;
1861 :
1862 0 : if( FD_UNLIKELY( maybe_become_leader( ctx, stem ) ) ) {
1863 0 : *charge_busy = 1;
1864 0 : *opt_poll_in = 0;
1865 0 : return;
1866 0 : }
1867 :
1868 : /* If we are leader, we can only unbecome the leader iff we have
1869 : received the poh hash from the poh tile and block id from reasm. */
1870 0 : if( FD_UNLIKELY( ctx->is_leader && ctx->recv_block_id && ctx->recv_poh ) ) {
1871 0 : fini_leader_bank( ctx, stem );
1872 0 : *charge_busy = 1;
1873 0 : *opt_poll_in = 0;
1874 0 : return;
1875 0 : }
1876 :
1877 : /* If the published_root is not caught up to the consensus root, then
1878 : we should try to advance the published root. */
1879 0 : if( FD_UNLIKELY( ctx->consensus_root_bank_idx!=ctx->published_root_bank_idx && advance_published_root( ctx ) ) ) {
1880 0 : *charge_busy = 1;
1881 0 : *opt_poll_in = 0;
1882 0 : return;
1883 0 : }
1884 :
1885 : /* If the reassembler has a fec that is ready, we should process it
1886 : and pass it to the scheduler. */
1887 :
1888 : /* FIXME: The reasm logic needs to get reworked to support
1889 : equivocation more robustly. */
1890 0 : if( FD_LIKELY( can_process_fec( ctx ) ) ) {
1891 0 : fd_reasm_fec_t * fec = fd_reasm_peek( ctx->reasm );
1892 :
1893 : /* If fec->eqvoc is set that means that equivocation mid-block was
1894 : detected in fd_reasm_t. We need to replay up to and including
1895 : the equivocating FEC on a new bank. */
1896 :
1897 0 : if( FD_UNLIKELY( fec->eqvoc ) ) {
1898 0 : FD_LOG_WARNING(( "Block equivocation detected at slot %lu", fec->slot ));
1899 :
1900 : /* We need to figure out which and how many FECs we need to
1901 : (re)insert into the scheduler. We work backwards from the
1902 : equivocating FEC, querying for chained merkle roots until we
1903 : reach the first FEC in the slot.
1904 : TODO: replace the magic number with a constant for the max
1905 : number of fecs possible in a slot with fix-32. */
1906 0 : fd_reasm_fec_t * fecs[ 1024 ] = { [0] = fec };
1907 0 : ulong fec_cnt = 1UL;
1908 0 : while( fecs[ fec_cnt-1UL ]->fec_set_idx!=0UL ) {
1909 0 : fec = fd_reasm_query( ctx->reasm, &fecs[ fec_cnt-1UL ]->cmr );
1910 0 : fecs[ fec_cnt++ ] = fec;
1911 0 : }
1912 :
1913 : /* If we don't have enough space in the scheduler to ingest all of
1914 : FECs, we can't proceed yet. */
1915 0 : if( FD_UNLIKELY( !fd_sched_can_ingest( ctx->sched, fec_cnt ) ) ) return;
1916 :
1917 : /* Now that we have validated that sched can ingest all of the
1918 : required FECs, it is finally safe to remove the equivocating
1919 : fec from the reasm deque. */
1920 0 : fd_reasm_out( ctx->reasm );
1921 :
1922 : /* Now we can process all of the FECs. */
1923 0 : for( ulong i=fec_cnt; i>0UL; i-- ) {
1924 0 : process_fec_set( ctx, stem, fecs[i-1UL] );
1925 0 : }
1926 0 : } else {
1927 : /* Standard case. */
1928 0 : fec = fd_reasm_out( ctx->reasm );
1929 0 : process_fec_set( ctx, stem, fec );
1930 0 : }
1931 :
1932 0 : *charge_busy = 1;
1933 0 : *opt_poll_in = 0;
1934 0 : return;
1935 0 : }
1936 :
1937 0 : *charge_busy = replay( ctx, stem );
1938 0 : *opt_poll_in = !*charge_busy;
1939 0 : }
1940 :
1941 : static int
1942 : before_frag( fd_replay_tile_t * ctx,
1943 : ulong in_idx,
1944 : ulong seq FD_PARAM_UNUSED,
1945 0 : ulong sig FD_PARAM_UNUSED ) {
1946 :
1947 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_SHRED ) ) {
1948 : /* If reasm is full, we can not insert any more FEC sets. We must
1949 : not consume any frags from shred_out until reasm can process more
1950 : FEC sets. */
1951 :
1952 0 : if( FD_UNLIKELY( !fd_reasm_free( ctx->reasm ) ) ) {
1953 0 : return -1;
1954 0 : }
1955 0 : }
1956 :
1957 0 : return 0;
1958 0 : }
1959 :
1960 : static void
1961 : process_exec_task_done( fd_replay_tile_t * ctx,
1962 : fd_stem_context_t * stem,
1963 : fd_exec_task_done_msg_t * msg,
1964 0 : ulong sig ) {
1965 :
1966 0 : ulong exec_tile_idx = sig&0xFFFFFFFFUL;
1967 :
1968 0 : fd_bank_t * bank = fd_banks_bank_query( ctx->banks, msg->bank_idx );
1969 0 : bank->refcnt--;
1970 :
1971 0 : switch( sig>>32 ) {
1972 0 : case FD_EXEC_TT_TXN_EXEC: {
1973 0 : if( FD_UNLIKELY( !ctx->has_identity_vote_rooted ) ) {
1974 : /* Query the txn signature against our recently generated vote
1975 : txn signatures. If the query is successful, then we have
1976 : seen our own vote transaction land and this should be marked
1977 : in the bank. We go through this exercise until we've seen
1978 : our vote rooted. */
1979 0 : fd_txn_p_t * txn_p = fd_sched_get_txn( ctx->sched, msg->txn_exec->txn_idx );
1980 0 : if( fd_vote_tracker_query_sig( ctx->vote_tracker, fd_type_pun_const( txn_p->payload+TXN( txn_p )->signature_off ) ) ) {
1981 0 : *fd_bank_has_identity_vote_modify( bank ) += 1;
1982 0 : }
1983 0 : }
1984 0 : if( FD_UNLIKELY( msg->txn_exec->err && !(bank->flags&FD_BANK_FLAGS_DEAD) ) ) {
1985 : /* Every transaction in a valid block has to execute.
1986 : Otherwise, we should mark the block as dead. Also freeze the
1987 : bank if possible. */
1988 0 : publish_slot_dead( ctx, stem, bank );
1989 0 : fd_banks_mark_bank_dead( ctx->banks, bank );
1990 0 : fd_sched_block_abandon( ctx->sched, bank->idx );
1991 0 : }
1992 0 : if( FD_UNLIKELY( (bank->flags&FD_BANK_FLAGS_DEAD) && bank->refcnt==0UL ) ) {
1993 0 : fd_banks_mark_bank_frozen( ctx->banks, bank );
1994 0 : }
1995 0 : fd_sched_task_done( ctx->sched, FD_SCHED_TT_TXN_EXEC, msg->txn_exec->txn_idx, exec_tile_idx );
1996 0 : break;
1997 0 : }
1998 0 : case FD_EXEC_TT_TXN_SIGVERIFY: {
1999 0 : if( FD_UNLIKELY( msg->txn_sigverify->err && !(bank->flags&FD_BANK_FLAGS_DEAD) ) ) {
2000 : /* Every transaction in a valid block has to sigverify.
2001 : Otherwise, we should mark the block as dead. Also freeze the
2002 : bank if possible. */
2003 0 : publish_slot_dead( ctx, stem, bank );
2004 0 : fd_banks_mark_bank_dead( ctx->banks, bank );
2005 0 : fd_sched_block_abandon( ctx->sched, bank->idx );
2006 0 : }
2007 0 : if( FD_UNLIKELY( (bank->flags&FD_BANK_FLAGS_DEAD) && bank->refcnt==0UL ) ) {
2008 0 : fd_banks_mark_bank_frozen( ctx->banks, bank );
2009 0 : }
2010 0 : fd_sched_task_done( ctx->sched, FD_SCHED_TT_TXN_SIGVERIFY, msg->txn_sigverify->txn_idx, exec_tile_idx );
2011 0 : break;
2012 0 : }
2013 0 : default: FD_LOG_CRIT(( "unexpected sig 0x%lx", sig ));
2014 0 : }
2015 :
2016 : /* Reference counter just decreased, and an exec tile just got freed
2017 : up. If there's a need to be more aggressively pruning, we could
2018 : check here if more slots just became publishable and publish. Not
2019 : publishing here shouldn't bloat the fork tree too much though. We
2020 : mark minority forks dead as soon as we can, and execution dispatch
2021 : stops on dead blocks. So shortly afterwards, dead blocks should be
2022 : eligible for pruning as in-flight transactions retire from the
2023 : execution pipeline. */
2024 :
2025 0 : }
2026 :
2027 : static void
2028 : process_tower_slot_done( fd_replay_tile_t * ctx,
2029 : fd_stem_context_t * stem,
2030 0 : fd_tower_slot_done_t const * msg ) {
2031 0 : fd_bank_t * replay_bank = fd_banks_bank_query( ctx->banks, msg->replay_bank_idx );
2032 0 : if( FD_UNLIKELY( !replay_bank ) ) FD_LOG_CRIT(( "invariant violation: bank not found for bank index %lu", msg->replay_bank_idx ));
2033 0 : replay_bank->refcnt--;
2034 :
2035 0 : ctx->reset_block_id = msg->reset_block_id;
2036 0 : ctx->reset_slot = msg->reset_slot;
2037 0 : ctx->reset_timestamp_nanos = fd_log_wallclock();
2038 0 : ulong min_leader_slot = fd_ulong_max( msg->reset_slot+1UL, fd_ulong_if( ctx->highwater_leader_slot==ULONG_MAX, 0UL, ctx->highwater_leader_slot+1UL ) );
2039 0 : ctx->next_leader_slot = fd_multi_epoch_leaders_get_next_slot( ctx->mleaders, min_leader_slot, ctx->identity_pubkey );
2040 0 : if( FD_LIKELY( ctx->next_leader_slot != ULONG_MAX ) ) {
2041 0 : ctx->next_leader_tickcount = (long)((double)(ctx->next_leader_slot-ctx->reset_slot-1UL)*ctx->slot_duration_ticks) + fd_tickcount();
2042 0 : } else {
2043 0 : ctx->next_leader_tickcount = LONG_MAX;
2044 0 : }
2045 :
2046 0 : fd_block_id_ele_t * block_id_ele = fd_block_id_map_ele_query( ctx->block_id_map, &msg->reset_block_id, NULL, ctx->block_id_arr );
2047 0 : if( FD_UNLIKELY( !block_id_ele ) ) {
2048 0 : FD_LOG_CRIT(( "invariant violation: block id ele doesn't exist for reset block id: %s, slot: %lu", FD_BASE58_ENC_32_ALLOCA( &msg->reset_block_id ), msg->reset_slot ));
2049 0 : }
2050 0 : ulong reset_bank_idx = fd_block_id_ele_get_idx( ctx->block_id_arr, block_id_ele );
2051 :
2052 0 : fd_bank_t * bank = fd_banks_bank_query( ctx->banks, reset_bank_idx );
2053 0 : if( FD_UNLIKELY( !bank ) ) {
2054 0 : FD_LOG_CRIT(( "invariant violation: bank not found for bank index %lu", reset_bank_idx ));
2055 0 : }
2056 :
2057 0 : if( FD_LIKELY( msg->root_slot!=ULONG_MAX ) ) FD_TEST( msg->root_slot<=msg->reset_slot );
2058 0 : ctx->reset_bank = bank;
2059 :
2060 0 : if( FD_LIKELY( ctx->replay_out->idx!=ULONG_MAX ) ) {
2061 0 : fd_poh_reset_t * reset = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
2062 :
2063 0 : reset->bank_idx = bank->idx;
2064 0 : reset->timestamp = ctx->reset_timestamp_nanos;
2065 0 : reset->completed_slot = ctx->reset_slot;
2066 0 : reset->hashcnt_per_tick = fd_bank_hashes_per_tick_get( bank );
2067 0 : reset->ticks_per_slot = fd_bank_ticks_per_slot_get( bank );
2068 0 : reset->tick_duration_ns = (ulong)(ctx->slot_duration_nanos/(double)reset->ticks_per_slot);
2069 :
2070 0 : fd_memcpy( reset->completed_block_id, &block_id_ele->block_id, sizeof(fd_hash_t) );
2071 :
2072 0 : fd_blockhashes_t const * block_hash_queue = fd_bank_block_hash_queue_query( bank );
2073 0 : fd_hash_t const * last_hash = fd_blockhashes_peek_last_hash( block_hash_queue );
2074 0 : FD_TEST( last_hash );
2075 0 : fd_memcpy( reset->completed_blockhash, last_hash->uc, sizeof(fd_hash_t) );
2076 :
2077 0 : ulong ticks_per_slot = fd_bank_ticks_per_slot_get( bank );
2078 0 : if( FD_UNLIKELY( reset->hashcnt_per_tick==1UL ) ) {
2079 : /* Low power producer, maximum of one microblock per tick in the slot */
2080 0 : reset->max_microblocks_in_slot = ticks_per_slot;
2081 0 : } else {
2082 : /* See the long comment in after_credit for this limit */
2083 0 : reset->max_microblocks_in_slot = fd_ulong_min( MAX_MICROBLOCKS_PER_SLOT, ticks_per_slot*(reset->hashcnt_per_tick-1UL) );
2084 0 : }
2085 0 : reset->next_leader_slot = ctx->next_leader_slot;
2086 :
2087 0 : if( FD_LIKELY( ctx->rpc_enabled ) ) bank->refcnt++;
2088 :
2089 0 : fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_RESET, ctx->replay_out->chunk, sizeof(fd_poh_reset_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
2090 0 : ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_poh_reset_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
2091 0 : }
2092 :
2093 0 : FD_LOG_INFO(( "tower_slot_done(reset_slot=%lu, next_leader_slot=%lu, vote_slot=%lu)", msg->reset_slot, ctx->next_leader_slot, msg->vote_slot ));
2094 0 : maybe_become_leader( ctx, stem );
2095 :
2096 0 : if( FD_LIKELY( msg->root_slot!=ULONG_MAX ) ) {
2097 :
2098 0 : FD_TEST( msg->root_slot>=ctx->consensus_root_slot );
2099 0 : fd_block_id_ele_t * block_id_ele = fd_block_id_map_ele_query( ctx->block_id_map, &msg->root_block_id, NULL, ctx->block_id_arr );
2100 0 : FD_TEST( block_id_ele );
2101 :
2102 0 : ctx->consensus_root_slot = msg->root_slot;
2103 0 : ctx->consensus_root = msg->root_block_id;
2104 0 : ctx->consensus_root_bank_idx = fd_block_id_ele_get_idx( ctx->block_id_arr, block_id_ele );
2105 :
2106 0 : publish_root_advanced( ctx, stem );
2107 0 : }
2108 :
2109 0 : ulong distance = 0UL;
2110 0 : fd_bank_t * parent = bank;
2111 0 : while( parent ) {
2112 0 : if( FD_UNLIKELY( parent->idx==ctx->consensus_root_bank_idx ) ) break;
2113 0 : parent = fd_banks_get_parent( ctx->banks, parent );
2114 0 : distance++;
2115 0 : }
2116 :
2117 0 : FD_MGAUGE_SET( REPLAY, ROOT_DISTANCE, distance );
2118 0 : }
2119 :
2120 : static void
2121 : process_fec_complete( fd_replay_tile_t * ctx,
2122 0 : uchar const * shred_buf ) {
2123 0 : fd_shred_t const * shred = (fd_shred_t const *)fd_type_pun_const( shred_buf );
2124 :
2125 0 : fd_hash_t const * merkle_root = (fd_hash_t const *)fd_type_pun_const( shred_buf + FD_SHRED_DATA_HEADER_SZ );
2126 0 : fd_hash_t const * chained_merkle_root = (fd_hash_t const *)fd_type_pun_const( shred_buf + FD_SHRED_DATA_HEADER_SZ + sizeof(fd_hash_t) );
2127 0 : int is_leader_fec = *(int const *) fd_type_pun_const( shred_buf + FD_SHRED_DATA_HEADER_SZ + sizeof(fd_hash_t) + sizeof(fd_hash_t) );
2128 :
2129 0 : int data_complete = !!( shred->data.flags & FD_SHRED_DATA_FLAG_DATA_COMPLETE );
2130 0 : int slot_complete = !!( shred->data.flags & FD_SHRED_DATA_FLAG_SLOT_COMPLETE );
2131 :
2132 0 : FD_TEST( !fd_reasm_query( ctx->reasm, merkle_root ) );
2133 0 : if( FD_UNLIKELY( shred->slot - shred->data.parent_off == fd_reasm_slot0( ctx->reasm ) && shred->fec_set_idx == 0) ) {
2134 0 : chained_merkle_root = &fd_reasm_root( ctx->reasm )->key;
2135 0 : }
2136 :
2137 0 : FD_TEST( fd_reasm_free( ctx->reasm ) );
2138 :
2139 0 : FD_TEST( fd_reasm_insert( ctx->reasm, merkle_root, chained_merkle_root, shred->slot, shred->fec_set_idx, shred->data.parent_off, (ushort)(shred->idx - shred->fec_set_idx + 1), data_complete, slot_complete, is_leader_fec ) );
2140 0 : }
2141 :
2142 : static void
2143 0 : process_resolv_slot_completed( fd_replay_tile_t * ctx, ulong bank_idx ) {
2144 0 : fd_bank_t * bank = fd_banks_bank_query( ctx->banks, bank_idx );
2145 0 : FD_TEST( bank );
2146 :
2147 0 : bank->refcnt--;
2148 0 : }
2149 :
2150 : static void
2151 : process_vote_txn_sent( fd_replay_tile_t * ctx,
2152 0 : fd_txn_m_t * txnm ) {
2153 : /* The send tile has signed and sent a vote. Add this vote to the
2154 : vote tracker. We go through this exercise until we've seen our
2155 : vote rooted. */
2156 0 : if( FD_UNLIKELY( !ctx->has_identity_vote_rooted ) ) {
2157 0 : uchar * payload = ((uchar *)txnm) + sizeof(fd_txn_m_t);
2158 0 : uchar txn_mem[ FD_TXN_MAX_SZ ] __attribute__((aligned(alignof(fd_txn_t))));
2159 0 : fd_txn_t * txn = (fd_txn_t *)txn_mem;
2160 0 : if( FD_UNLIKELY( !fd_txn_parse( payload, txnm->payload_sz, txn_mem, NULL ) ) ) {
2161 0 : FD_LOG_CRIT(( "Could not parse txn from send tile" ));
2162 0 : }
2163 0 : fd_vote_tracker_insert( ctx->vote_tracker, fd_type_pun_const( payload+txn->signature_off ) );
2164 0 : }
2165 0 : }
2166 :
2167 : static inline void
2168 0 : maybe_verify_shred_version( fd_replay_tile_t * ctx ) {
2169 0 : if( FD_LIKELY( ctx->expected_shred_version && ctx->ipecho_shred_version ) ) {
2170 0 : if( FD_UNLIKELY( ctx->expected_shred_version!=ctx->ipecho_shred_version ) ) {
2171 0 : FD_LOG_ERR(( "shred version mismatch: expected %u but got %u from ipecho", ctx->expected_shred_version, ctx->ipecho_shred_version ) );
2172 0 : }
2173 0 : }
2174 :
2175 0 : if( FD_LIKELY( ctx->has_genesis_hash && ctx->hard_forks_cnt!=ULONG_MAX && (ctx->expected_shred_version || ctx->ipecho_shred_version) ) ) {
2176 0 : ushort expected_shred_version = ctx->expected_shred_version ? ctx->expected_shred_version : ctx->ipecho_shred_version;
2177 :
2178 0 : union {
2179 0 : uchar c[ 32 ];
2180 0 : ushort s[ 16 ];
2181 0 : } running_hash;
2182 0 : fd_memcpy( running_hash.c, ctx->genesis_hash, sizeof(fd_hash_t) );
2183 :
2184 0 : ulong processed = 0UL;
2185 0 : ulong min_value = 0UL;
2186 0 : while( processed<ctx->hard_forks_cnt ) {
2187 0 : ulong min_index = ULONG_MAX;
2188 0 : for( ulong i=0UL; i<ctx->hard_forks_cnt; i++ ) {
2189 0 : if( ctx->hard_forks[ i ]>=min_value && (min_index==ULONG_MAX || ctx->hard_forks[ i ]<ctx->hard_forks[ min_index ] ) ) {
2190 0 : min_index = i;
2191 0 : }
2192 0 : }
2193 :
2194 0 : FD_TEST( min_index!=ULONG_MAX );
2195 0 : min_value = ctx->hard_forks[ min_index ];
2196 0 : ulong min_count = 0UL;
2197 0 : for( ulong i=0UL; i<ctx->hard_forks_cnt; i++ ) {
2198 0 : if( ctx->hard_forks[ i ]==min_value ) min_count++;
2199 0 : }
2200 :
2201 0 : uchar data[ 48UL ];
2202 0 : fd_memcpy( data, running_hash.c, sizeof(fd_hash_t) );
2203 0 : fd_memcpy( data+32UL, &min_value, sizeof(ulong) );
2204 0 : fd_memcpy( data+40UL, &min_count, sizeof(ulong) );
2205 :
2206 0 : FD_TEST( fd_sha256_hash( data, 48UL, running_hash.c ) );
2207 0 : processed += min_count;
2208 0 : min_value += 1UL;
2209 0 : }
2210 :
2211 0 : ushort xor = 0;
2212 0 : for( ulong i=0UL; i<16UL; i++ ) xor ^= running_hash.s[ i ];
2213 :
2214 0 : xor = fd_ushort_bswap( xor );
2215 0 : xor = fd_ushort_if( xor<USHORT_MAX, (ushort)(xor + 1), USHORT_MAX );
2216 :
2217 0 : if( FD_UNLIKELY( expected_shred_version!=xor ) ) {
2218 0 : FD_LOG_ERR(( "shred version mismatch: expected %u but got %u from genesis hash %s and hard forks", expected_shred_version, xor, FD_BASE58_ENC_32_ALLOCA( &ctx->genesis_hash ) ));
2219 0 : }
2220 0 : }
2221 0 : }
2222 :
2223 : static inline int
2224 : returnable_frag( fd_replay_tile_t * ctx,
2225 : ulong in_idx,
2226 : ulong seq,
2227 : ulong sig,
2228 : ulong chunk,
2229 : ulong sz,
2230 : ulong ctl,
2231 : ulong tsorig,
2232 : ulong tspub,
2233 0 : fd_stem_context_t * stem ) {
2234 0 : (void)seq;
2235 0 : (void)ctl;
2236 0 : (void)tsorig;
2237 0 : (void)tspub;
2238 :
2239 0 : if( FD_UNLIKELY( sz!=0UL && (chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz>ctx->in[ in_idx ].mtu ) ) )
2240 0 : FD_LOG_ERR(( "chunk %lu %lu from in %d corrupt, not in range [%lu,%lu]", chunk, sz, ctx->in_kind[ in_idx ], ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
2241 :
2242 0 : switch( ctx->in_kind[in_idx] ) {
2243 0 : case IN_KIND_GENESIS: {
2244 0 : uchar const * src = fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
2245 0 : ctx->has_genesis_hash = 1;
2246 0 : if( FD_LIKELY( sig==GENESI_SIG_BOOTSTRAP_COMPLETED ) ) {
2247 0 : boot_genesis( ctx, stem, in_idx, chunk );
2248 0 : fd_memcpy( ctx->genesis_hash, src+sizeof(fd_lthash_value_t), sizeof(fd_hash_t) );
2249 0 : } else {
2250 0 : fd_memcpy( ctx->genesis_hash, src, sizeof(fd_hash_t) );
2251 0 : }
2252 :
2253 0 : maybe_verify_cluster_type( ctx );
2254 0 : maybe_verify_shred_version( ctx );
2255 0 : break;
2256 0 : }
2257 0 : case IN_KIND_IPECHO: {
2258 0 : FD_TEST( sig && sig<=USHORT_MAX );
2259 0 : ctx->ipecho_shred_version = (ushort)sig;
2260 0 : maybe_verify_shred_version( ctx );
2261 0 : break;
2262 0 : }
2263 0 : case IN_KIND_SNAP:
2264 0 : on_snapshot_message( ctx, stem, in_idx, chunk, sig );
2265 0 : maybe_verify_shred_version( ctx );
2266 0 : break;
2267 0 : case IN_KIND_EXEC: {
2268 0 : process_exec_task_done( ctx, stem, fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk ), sig );
2269 0 : break;
2270 0 : }
2271 0 : case IN_KIND_POH: {
2272 0 : process_poh_message( ctx, fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk ) );
2273 0 : break;
2274 0 : }
2275 0 : case IN_KIND_RESOLV: {
2276 0 : fd_resolv_slot_exchanged_t * exchanged_slot = fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
2277 0 : process_resolv_slot_completed( ctx, exchanged_slot->bank_idx );
2278 0 : break;
2279 0 : }
2280 0 : case IN_KIND_TOWER: {
2281 0 : if ( FD_LIKELY( sig==FD_TOWER_SIG_SLOT_DONE ) ) process_tower_slot_done( ctx, stem, fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk ) );
2282 0 : else if( FD_LIKELY( sig==FD_TOWER_SIG_SLOT_CONFIRMED ) ) {
2283 0 : fd_tower_slot_confirmed_t const * msg = fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
2284 :
2285 : /* Implement replay plugin API here */
2286 :
2287 0 : switch( msg->kind ) {
2288 0 : case FD_TOWER_SLOT_CONFIRMED_OPTIMISTIC: break;
2289 0 : case FD_TOWER_SLOT_CONFIRMED_ROOTED: break;
2290 0 : }
2291 0 : };
2292 0 : break;
2293 0 : }
2294 0 : case IN_KIND_SHRED: {
2295 : /* TODO: This message/sz should be defined. */
2296 0 : if( sz==FD_SHRED_DATA_HEADER_SZ + sizeof(fd_hash_t) + sizeof(fd_hash_t) + sizeof(int) ) {
2297 : /* If receive a FEC complete message. */
2298 0 : process_fec_complete( ctx, fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk ) );
2299 0 : }
2300 0 : break;
2301 0 : }
2302 0 : case IN_KIND_VTXN: {
2303 0 : process_vote_txn_sent( ctx, fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk ) );
2304 0 : break;
2305 0 : }
2306 0 : case IN_KIND_RPC:
2307 0 : case IN_KIND_GUI: {
2308 0 : fd_bank_t * bank = fd_banks_bank_query( ctx->banks, sig );
2309 0 : FD_TEST( bank );
2310 0 : bank->refcnt--;
2311 0 : break;
2312 0 : }
2313 0 : default:
2314 0 : FD_LOG_ERR(( "unhandled kind %d", ctx->in_kind[ in_idx ] ));
2315 0 : }
2316 :
2317 0 : return 0;
2318 0 : }
2319 :
2320 : static inline fd_replay_out_link_t
2321 : out1( fd_topo_t const * topo,
2322 : fd_topo_tile_t const * tile,
2323 0 : char const * name ) {
2324 0 : ulong idx = ULONG_MAX;
2325 :
2326 0 : for( ulong i=0UL; i<tile->out_cnt; i++ ) {
2327 0 : fd_topo_link_t const * link = &topo->links[ tile->out_link_id[ i ] ];
2328 0 : if( !strcmp( link->name, name ) ) {
2329 0 : if( FD_UNLIKELY( idx!=ULONG_MAX ) ) FD_LOG_ERR(( "tile %s:%lu had multiple output links named %s but expected one", tile->name, tile->kind_id, name ));
2330 0 : idx = i;
2331 0 : }
2332 0 : }
2333 :
2334 0 : if( FD_UNLIKELY( idx==ULONG_MAX ) ) return (fd_replay_out_link_t){ .idx = ULONG_MAX, .mem = NULL, .chunk0 = 0, .wmark = 0, .chunk = 0 };
2335 :
2336 0 : void * mem = topo->workspaces[ topo->objs[ topo->links[ tile->out_link_id[ idx ] ].dcache_obj_id ].wksp_id ].wksp;
2337 0 : ulong chunk0 = fd_dcache_compact_chunk0( mem, topo->links[ tile->out_link_id[ idx ] ].dcache );
2338 0 : ulong wmark = fd_dcache_compact_wmark ( mem, topo->links[ tile->out_link_id[ idx ] ].dcache, topo->links[ tile->out_link_id[ idx ] ].mtu );
2339 :
2340 0 : return (fd_replay_out_link_t){ .idx = idx, .mem = mem, .chunk0 = chunk0, .wmark = wmark, .chunk = chunk0 };
2341 0 : }
2342 :
2343 : static void
2344 : privileged_init( fd_topo_t * topo,
2345 0 : fd_topo_tile_t * tile ) {
2346 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
2347 :
2348 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
2349 0 : fd_replay_tile_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_replay_tile_t), sizeof(fd_replay_tile_t) );
2350 :
2351 0 : if( FD_UNLIKELY( !strcmp( tile->replay.identity_key_path, "" ) ) ) FD_LOG_ERR(( "identity_key_path not set" ));
2352 :
2353 0 : ctx->identity_pubkey[ 0 ] = *(fd_pubkey_t const *)fd_type_pun_const( fd_keyload_load( tile->replay.identity_key_path, /* pubkey only: */ 1 ) );
2354 :
2355 0 : if( FD_UNLIKELY( !tile->replay.bundle.vote_account_path[0] ) ) {
2356 0 : tile->replay.bundle.enabled = 0;
2357 0 : }
2358 :
2359 0 : if( FD_UNLIKELY( tile->replay.bundle.enabled ) ) {
2360 0 : if( FD_UNLIKELY( !fd_base58_decode_32( tile->replay.bundle.vote_account_path, ctx->bundle.vote_account.uc ) ) ) {
2361 0 : const uchar * vote_key = fd_keyload_load( tile->replay.bundle.vote_account_path, /* pubkey only: */ 1 );
2362 0 : fd_memcpy( ctx->bundle.vote_account.uc, vote_key, 32UL );
2363 0 : }
2364 0 : }
2365 :
2366 0 : if( FD_UNLIKELY( !fd_rng_secure( &ctx->reasm_seed, sizeof(ulong) ) ) ) {
2367 0 : FD_LOG_CRIT(( "fd_rng_secure failed" ));
2368 0 : }
2369 :
2370 0 : if( FD_UNLIKELY( !fd_rng_secure( &ctx->vote_tracker_seed, sizeof(ulong) ) ) ) {
2371 0 : FD_LOG_CRIT(( "fd_rng_secure failed" ));
2372 0 : }
2373 :
2374 0 : if( FD_UNLIKELY( !fd_rng_secure( &ctx->block_id_map_seed, sizeof(ulong) ) ) ) {
2375 0 : FD_LOG_CRIT(( "fd_rng_secure failed" ));
2376 0 : }
2377 0 : }
2378 :
2379 : static void
2380 : unprivileged_init( fd_topo_t * topo,
2381 0 : fd_topo_tile_t * tile ) {
2382 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
2383 :
2384 0 : ulong chain_cnt = fd_block_id_map_chain_cnt_est( tile->replay.max_live_slots );
2385 :
2386 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
2387 0 : fd_replay_tile_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_replay_tile_t), sizeof(fd_replay_tile_t) );
2388 0 : void * block_id_arr_mem = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_block_id_ele_t), sizeof(fd_block_id_ele_t) * tile->replay.max_live_slots );
2389 0 : void * block_id_map_mem = FD_SCRATCH_ALLOC_APPEND( l, fd_block_id_map_align(), fd_block_id_map_footprint( chain_cnt ) );
2390 0 : void * _txncache = FD_SCRATCH_ALLOC_APPEND( l, fd_txncache_align(), fd_txncache_footprint( tile->replay.max_live_slots ) );
2391 0 : void * reasm_mem = FD_SCRATCH_ALLOC_APPEND( l, fd_reasm_align(), fd_reasm_footprint( 1 << 20 ) );
2392 0 : void * sched_mem = FD_SCRATCH_ALLOC_APPEND( l, fd_sched_align(), fd_sched_footprint( tile->replay.max_live_slots ) );
2393 0 : void * vote_tracker_mem = FD_SCRATCH_ALLOC_APPEND( l, fd_vote_tracker_align(), fd_vote_tracker_footprint() );
2394 0 : void * _capture_ctx = FD_SCRATCH_ALLOC_APPEND( l, fd_capture_ctx_align(), fd_capture_ctx_footprint() );
2395 0 : # if FD_HAS_FLATCC
2396 0 : void * block_dump_ctx = NULL;
2397 0 : if( FD_UNLIKELY( tile->replay.dump_block_to_pb ) ) {
2398 0 : block_dump_ctx = FD_SCRATCH_ALLOC_APPEND( l, fd_block_dump_context_align(), fd_block_dump_context_footprint() );
2399 0 : }
2400 0 : # endif
2401 :
2402 0 : ulong store_obj_id = fd_pod_query_ulong( topo->props, "store", ULONG_MAX );
2403 0 : FD_TEST( store_obj_id!=ULONG_MAX );
2404 0 : ctx->store = fd_store_join( fd_topo_obj_laddr( topo, store_obj_id ) );
2405 0 : FD_TEST( ctx->store );
2406 :
2407 0 : ulong banks_obj_id = fd_pod_query_ulong( topo->props, "banks", ULONG_MAX );
2408 0 : FD_TEST( banks_obj_id!=ULONG_MAX );
2409 0 : ctx->banks = fd_banks_join( fd_topo_obj_laddr( topo, banks_obj_id ) );
2410 0 : FD_TEST( ctx->banks );
2411 :
2412 0 : fd_bank_t * bank_pool = fd_banks_get_bank_pool( ctx->banks );
2413 0 : FD_MGAUGE_SET( REPLAY, MAX_LIVE_BANKS, fd_banks_pool_max( bank_pool ) );
2414 :
2415 0 : fd_bank_t * bank = fd_banks_init_bank( ctx->banks );
2416 0 : fd_bank_slot_set( bank, 0UL );
2417 0 : FD_TEST( bank );
2418 0 : FD_TEST( bank->idx==FD_REPLAY_BOOT_BANK_IDX );
2419 :
2420 0 : ctx->consensus_root_slot = ULONG_MAX;
2421 0 : ctx->consensus_root = (fd_hash_t){ .ul[0] = FD_RUNTIME_INITIAL_BLOCK_ID };
2422 0 : ctx->published_root_slot = ULONG_MAX;
2423 :
2424 0 : ctx->expected_shred_version = tile->replay.expected_shred_version;
2425 0 : ctx->ipecho_shred_version = 0;
2426 0 : fd_memcpy( ctx->genesis_path, tile->replay.genesis_path, sizeof(ctx->genesis_path) );
2427 0 : ctx->has_genesis_hash = 0;
2428 0 : ctx->cluster_type = FD_CLUSTER_UNKNOWN;
2429 0 : ctx->hard_forks_cnt = ULONG_MAX;
2430 :
2431 0 : if( FD_UNLIKELY( tile->replay.bundle.enabled ) ) {
2432 0 : ctx->bundle.enabled = 1;
2433 0 : if( FD_UNLIKELY( !fd_bundle_crank_gen_init( ctx->bundle.gen,
2434 0 : (fd_acct_addr_t const *)tile->replay.bundle.tip_distribution_program_addr,
2435 0 : (fd_acct_addr_t const *)tile->replay.bundle.tip_payment_program_addr,
2436 0 : (fd_acct_addr_t const *)ctx->bundle.vote_account.uc,
2437 0 : (fd_acct_addr_t const *)ctx->bundle.vote_account.uc, "NAN", 0UL ) ) ) {
2438 0 : FD_LOG_ERR(( "failed to initialize bundle crank gen" ));
2439 0 : }
2440 0 : } else {
2441 0 : ctx->bundle.enabled = 0;
2442 0 : }
2443 :
2444 0 : fd_features_t * features = fd_bank_features_modify( bank );
2445 0 : fd_features_enable_cleaned_up( features, &FD_RUNTIME_CLUSTER_VERSION );
2446 :
2447 0 : char const * one_off_features[ 16UL ];
2448 0 : FD_TEST( tile->replay.enable_features_cnt<=sizeof(one_off_features)/sizeof(one_off_features[0]) );
2449 0 : for( ulong i=0UL; i<tile->replay.enable_features_cnt; i++ ) one_off_features[ i ] = tile->replay.enable_features[i];
2450 0 : fd_features_enable_one_offs( features, one_off_features, (uint)tile->replay.enable_features_cnt, 0UL );
2451 :
2452 0 : FD_TEST( fd_accdb_admin_join ( ctx->accdb_admin, fd_topo_obj_laddr( topo, tile->replay.funk_obj_id ) ) );
2453 0 : FD_TEST( fd_accdb_user_v1_init ( ctx->accdb, fd_topo_obj_laddr( topo, tile->replay.funk_obj_id ) ) );
2454 0 : FD_TEST( fd_progcache_admin_join( ctx->progcache_admin, fd_topo_obj_laddr( topo, tile->replay.progcache_obj_id ) ) );
2455 :
2456 0 : void * _txncache_shmem = fd_topo_obj_laddr( topo, tile->replay.txncache_obj_id );
2457 0 : fd_txncache_shmem_t * txncache_shmem = fd_txncache_shmem_join( _txncache_shmem );
2458 0 : FD_TEST( txncache_shmem );
2459 0 : ctx->txncache = fd_txncache_join( fd_txncache_new( _txncache, txncache_shmem ) );
2460 0 : FD_TEST( ctx->txncache );
2461 :
2462 0 : ctx->capture_ctx = NULL;
2463 0 : if( FD_UNLIKELY( strcmp( "", tile->replay.solcap_capture ) || strcmp( "", tile->replay.dump_proto_dir ) ) ) {
2464 0 : ctx->capture_ctx = fd_capture_ctx_join( fd_capture_ctx_new( _capture_ctx ) );
2465 0 : ctx->capture_ctx->solcap_start_slot = tile->replay.capture_start_slot;
2466 0 : }
2467 :
2468 0 : if( FD_UNLIKELY( strcmp( "", tile->replay.dump_proto_dir ) ) ) {
2469 0 : ctx->capture_ctx->dump_proto_output_dir = tile->replay.dump_proto_dir;
2470 0 : if( FD_LIKELY( tile->replay.dump_block_to_pb ) ) ctx->capture_ctx->dump_block_to_pb = tile->replay.dump_block_to_pb;
2471 0 : }
2472 :
2473 0 : # if FD_HAS_FLATCC
2474 0 : if( FD_UNLIKELY( tile->replay.dump_block_to_pb ) ) {
2475 0 : ctx->block_dump_ctx = fd_block_dump_context_join( fd_block_dump_context_new( block_dump_ctx ) );
2476 0 : } else {
2477 0 : ctx->block_dump_ctx = NULL;
2478 0 : }
2479 0 : # endif
2480 :
2481 0 : ctx->exec_cnt = fd_topo_tile_name_cnt( topo, "exec" );
2482 :
2483 0 : ctx->is_booted = 0;
2484 :
2485 0 : ctx->larger_max_cost_per_block = tile->replay.larger_max_cost_per_block;
2486 :
2487 0 : ctx->reasm = fd_reasm_join( fd_reasm_new( reasm_mem, 1 << 20, ctx->reasm_seed ) );
2488 0 : FD_TEST( ctx->reasm );
2489 :
2490 0 : ctx->sched = fd_sched_join( fd_sched_new( sched_mem, tile->replay.max_live_slots, ctx->exec_cnt ), tile->replay.max_live_slots );
2491 0 : FD_TEST( ctx->sched );
2492 :
2493 0 : ctx->vote_tracker = fd_vote_tracker_join( fd_vote_tracker_new( vote_tracker_mem, ctx->vote_tracker_seed ) );
2494 0 : FD_TEST( ctx->vote_tracker );
2495 :
2496 0 : ctx->has_identity_vote_rooted = 0;
2497 :
2498 0 : ctx->mleaders = fd_multi_epoch_leaders_join( fd_multi_epoch_leaders_new( ctx->mleaders_mem ) );
2499 0 : FD_TEST( ctx->mleaders );
2500 :
2501 0 : ctx->is_leader = 0;
2502 0 : ctx->reset_slot = 0UL;
2503 0 : ctx->reset_bank = NULL;
2504 0 : ctx->reset_block_id = (fd_hash_t){ .ul[0] = FD_RUNTIME_INITIAL_BLOCK_ID };
2505 0 : ctx->reset_timestamp_nanos = 0UL;
2506 0 : ctx->next_leader_slot = ULONG_MAX;
2507 0 : ctx->next_leader_tickcount = LONG_MAX;
2508 0 : ctx->highwater_leader_slot = ULONG_MAX;
2509 0 : ctx->slot_duration_nanos = 350L*1000L*1000L; /* TODO: Not fixed ... not always 350ms ... */
2510 0 : ctx->slot_duration_ticks = (double)ctx->slot_duration_nanos*fd_tempo_tick_per_ns( NULL );
2511 0 : ctx->leader_bank = NULL;
2512 :
2513 0 : ctx->block_id_len = tile->replay.max_live_slots;
2514 0 : ctx->block_id_arr = (fd_block_id_ele_t *)block_id_arr_mem;
2515 0 : ctx->block_id_map = fd_block_id_map_join( fd_block_id_map_new( block_id_map_mem, chain_cnt, ctx->block_id_map_seed ) );
2516 0 : FD_TEST( ctx->block_id_map );
2517 :
2518 0 : for( ulong i=0UL; i<tile->replay.max_live_slots; i++ ) {
2519 0 : ctx->block_id_arr[ i ].slot = FD_SLOT_NULL;
2520 0 : }
2521 :
2522 0 : ctx->resolv_tile_cnt = fd_topo_tile_name_cnt( topo, "resolv" );
2523 :
2524 0 : FD_TEST( tile->in_cnt<=sizeof(ctx->in)/sizeof(ctx->in[0]) );
2525 0 : for( ulong i=0UL; i<tile->in_cnt; i++ ) {
2526 0 : fd_topo_link_t * link = &topo->links[ tile->in_link_id[ i ] ];
2527 0 : fd_topo_wksp_t * link_wksp = &topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ];
2528 :
2529 0 : if( FD_LIKELY( link->dcache ) ) {
2530 0 : ctx->in[ i ].mem = link_wksp->wksp;
2531 0 : ctx->in[ i ].chunk0 = fd_dcache_compact_chunk0( ctx->in[ i ].mem, link->dcache );
2532 0 : ctx->in[ i ].wmark = fd_dcache_compact_wmark ( ctx->in[ i ].mem, link->dcache, link->mtu );
2533 0 : ctx->in[ i ].mtu = link->mtu;
2534 0 : }
2535 :
2536 0 : if( !strcmp( link->name, "genesi_out" ) ) ctx->in_kind[ i ] = IN_KIND_GENESIS;
2537 0 : else if( !strcmp( link->name, "ipecho_out" ) ) ctx->in_kind[ i ] = IN_KIND_IPECHO;
2538 0 : else if( !strcmp( link->name, "snapin_manif" ) ) ctx->in_kind[ i ] = IN_KIND_SNAP;
2539 0 : else if( !strcmp( link->name, "exec_replay" ) ) ctx->in_kind[ i ] = IN_KIND_EXEC;
2540 0 : else if( !strcmp( link->name, "tower_out" ) ) ctx->in_kind[ i ] = IN_KIND_TOWER;
2541 0 : else if( !strcmp( link->name, "poh_replay" ) ) ctx->in_kind[ i ] = IN_KIND_POH;
2542 0 : else if( !strcmp( link->name, "resolv_repla" ) ) ctx->in_kind[ i ] = IN_KIND_RESOLV;
2543 0 : else if( !strcmp( link->name, "shred_out" ) ) ctx->in_kind[ i ] = IN_KIND_SHRED;
2544 0 : else if( !strcmp( link->name, "send_out" ) ) ctx->in_kind[ i ] = IN_KIND_VTXN;
2545 0 : else if( !strcmp( link->name, "gui_replay" ) ) ctx->in_kind[ i ] = IN_KIND_GUI;
2546 0 : else if( !strcmp( link->name, "rpc_replay" ) ) ctx->in_kind[ i ] = IN_KIND_RPC;
2547 0 : else FD_LOG_ERR(( "unexpected input link name %s", link->name ));
2548 0 : }
2549 :
2550 0 : *ctx->stake_out = out1( topo, tile, "replay_stake" ); FD_TEST( ctx->stake_out->idx!=ULONG_MAX );
2551 0 : *ctx->replay_out = out1( topo, tile, "replay_out" ); FD_TEST( ctx->replay_out->idx!=ULONG_MAX );
2552 :
2553 0 : ulong idx = fd_topo_find_tile_out_link( topo, tile, "replay_exec", 0UL );
2554 0 : FD_TEST( idx!=ULONG_MAX );
2555 0 : fd_topo_link_t * link = &topo->links[ tile->out_link_id[ idx ] ];
2556 :
2557 0 : fd_replay_out_link_t * exec_out = ctx->exec_out;
2558 0 : exec_out->idx = idx;
2559 0 : exec_out->mem = topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ].wksp;
2560 0 : exec_out->chunk0 = fd_dcache_compact_chunk0( exec_out->mem, link->dcache );
2561 0 : exec_out->wmark = fd_dcache_compact_wmark( exec_out->mem, link->dcache, link->mtu );
2562 0 : exec_out->chunk = exec_out->chunk0;
2563 :
2564 0 : ctx->gui_enabled = fd_topo_find_tile( topo, "gui", 0UL )!=ULONG_MAX;
2565 0 : ctx->rpc_enabled = fd_topo_find_tile( topo, "rpc", 0UL )!=ULONG_MAX;
2566 :
2567 0 : if( FD_UNLIKELY( strcmp( "", tile->replay.solcap_capture ) ) ) {
2568 0 : idx = fd_topo_find_tile_out_link( topo, tile, "cap_repl", 0UL );
2569 0 : FD_TEST( idx!=ULONG_MAX );
2570 0 : link = &topo->links[ tile->out_link_id[ idx ] ];
2571 :
2572 :
2573 0 : fd_capture_link_buf_t * cap_repl_out = ctx->cap_repl_out;
2574 0 : cap_repl_out->base.vt = &fd_capture_link_buf_vt;
2575 0 : cap_repl_out->idx = idx;
2576 0 : cap_repl_out->mem = topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ].wksp;
2577 0 : cap_repl_out->chunk0 = fd_dcache_compact_chunk0( cap_repl_out->mem, link->dcache );
2578 0 : cap_repl_out->wmark = fd_dcache_compact_wmark( cap_repl_out->mem, link->dcache, link->mtu );
2579 0 : cap_repl_out->chunk = cap_repl_out->chunk0;
2580 0 : cap_repl_out->mcache = link->mcache;
2581 0 : cap_repl_out->depth = fd_mcache_depth( link->mcache );
2582 0 : cap_repl_out->seq = 0UL;
2583 :
2584 0 : ctx->capture_ctx->capctx_type.buf = cap_repl_out;
2585 0 : ctx->capture_ctx->capture_link = &cap_repl_out->base;
2586 0 : ctx->capture_ctx->current_txn_idx = 0UL;
2587 :
2588 :
2589 0 : ulong consumer_tile_idx = fd_topo_find_tile( topo, "solcap", 0UL );
2590 0 : fd_topo_tile_t * consumer_tile = &topo->tiles[ consumer_tile_idx ];
2591 0 : cap_repl_out->fseq = NULL;
2592 0 : for( ulong j = 0UL; j < consumer_tile->in_cnt; j++ ) {
2593 0 : if( FD_UNLIKELY( consumer_tile->in_link_id[ j ] == link->id ) ) {
2594 0 : cap_repl_out->fseq = fd_fseq_join( fd_topo_obj_laddr( topo, consumer_tile->in_link_fseq_obj_id[ j ] ) );
2595 0 : FD_TEST( cap_repl_out->fseq );
2596 0 : break;
2597 0 : }
2598 0 : }
2599 0 : }
2600 :
2601 0 : fd_memset( &ctx->metrics, 0, sizeof(ctx->metrics) );
2602 :
2603 0 : fd_histf_join( fd_histf_new( ctx->metrics.store_link_wait, FD_MHIST_SECONDS_MIN( REPLAY, STORE_LINK_WAIT ),
2604 0 : FD_MHIST_SECONDS_MAX( REPLAY, STORE_LINK_WAIT ) ) );
2605 0 : fd_histf_join( fd_histf_new( ctx->metrics.store_link_work, FD_MHIST_SECONDS_MIN( REPLAY, STORE_LINK_WORK ),
2606 0 : FD_MHIST_SECONDS_MAX( REPLAY, STORE_LINK_WORK ) ) );
2607 0 : fd_histf_join( fd_histf_new( ctx->metrics.store_read_wait, FD_MHIST_SECONDS_MIN( REPLAY, STORE_READ_WAIT ),
2608 0 : FD_MHIST_SECONDS_MAX( REPLAY, STORE_READ_WAIT ) ) );
2609 0 : fd_histf_join( fd_histf_new( ctx->metrics.store_read_work, FD_MHIST_SECONDS_MIN( REPLAY, STORE_READ_WORK ),
2610 0 : FD_MHIST_SECONDS_MAX( REPLAY, STORE_READ_WORK ) ) );
2611 0 : fd_histf_join( fd_histf_new( ctx->metrics.store_publish_wait, FD_MHIST_SECONDS_MIN( REPLAY, STORE_PUBLISH_WAIT ),
2612 0 : FD_MHIST_SECONDS_MAX( REPLAY, STORE_PUBLISH_WAIT ) ) );
2613 0 : fd_histf_join( fd_histf_new( ctx->metrics.store_publish_work, FD_MHIST_SECONDS_MIN( REPLAY, STORE_PUBLISH_WORK ),
2614 0 : FD_MHIST_SECONDS_MAX( REPLAY, STORE_PUBLISH_WORK ) ) );
2615 :
2616 0 : ulong scratch_top = FD_SCRATCH_ALLOC_FINI( l, 1UL );
2617 0 : if( FD_UNLIKELY( scratch_top > (ulong)scratch + scratch_footprint( tile ) ) )
2618 0 : FD_LOG_ERR(( "scratch overflow %lu %lu %lu", scratch_top - (ulong)scratch - scratch_footprint( tile ), scratch_top, (ulong)scratch + scratch_footprint( tile ) ));
2619 0 : }
2620 :
2621 : static ulong
2622 : populate_allowed_seccomp( fd_topo_t const * topo FD_FN_UNUSED,
2623 : fd_topo_tile_t const * tile FD_FN_UNUSED,
2624 : ulong out_cnt,
2625 0 : struct sock_filter * out ) {
2626 :
2627 0 : populate_sock_filter_policy_fd_replay_tile( out_cnt, out, (uint)fd_log_private_logfile_fd() );
2628 0 : return sock_filter_policy_fd_replay_tile_instr_cnt;
2629 0 : }
2630 :
2631 : static ulong
2632 : populate_allowed_fds( fd_topo_t const * topo FD_FN_UNUSED,
2633 : fd_topo_tile_t const * tile FD_FN_UNUSED,
2634 : ulong out_fds_cnt,
2635 0 : int * out_fds ) {
2636 :
2637 0 : if( FD_UNLIKELY( out_fds_cnt<2UL ) ) FD_LOG_ERR(( "out_fds_cnt %lu", out_fds_cnt ));
2638 :
2639 0 : ulong out_cnt = 0UL;
2640 0 : out_fds[ out_cnt++ ] = 2; /* stderr */
2641 0 : if( FD_LIKELY( -1!=fd_log_private_logfile_fd() ) )
2642 0 : out_fds[ out_cnt++ ] = fd_log_private_logfile_fd(); /* logfile */
2643 0 : return out_cnt;
2644 0 : }
2645 :
2646 : #undef DEBUG_LOGGING
2647 :
2648 : /* counting carefully, after_credit can generate at most 7 frags and
2649 : returnable_frag boot_genesis can also generate at most 7 frags, so 14
2650 : is a conservative bound. */
2651 0 : #define STEM_BURST (14UL)
2652 :
2653 : /* TODO: calculate this properly/fix stem to work with larger numbers of links */
2654 : /* 1000 chosen empirically as anything larger slowed down replay times. Need to calculate
2655 : this properly. */
2656 0 : #define STEM_LAZY ((long)10e3)
2657 :
2658 0 : #define STEM_CALLBACK_CONTEXT_TYPE fd_replay_tile_t
2659 0 : #define STEM_CALLBACK_CONTEXT_ALIGN alignof(fd_replay_tile_t)
2660 :
2661 0 : #define STEM_CALLBACK_METRICS_WRITE metrics_write
2662 0 : #define STEM_CALLBACK_AFTER_CREDIT after_credit
2663 0 : #define STEM_CALLBACK_BEFORE_FRAG before_frag
2664 0 : #define STEM_CALLBACK_RETURNABLE_FRAG returnable_frag
2665 :
2666 : #include "../../disco/stem/fd_stem.c"
2667 :
2668 : fd_topo_run_tile_t fd_tile_replay = {
2669 : .name = "replay",
2670 : .populate_allowed_seccomp = populate_allowed_seccomp,
2671 : .populate_allowed_fds = populate_allowed_fds,
2672 : .scratch_align = scratch_align,
2673 : .scratch_footprint = scratch_footprint,
2674 : .privileged_init = privileged_init,
2675 : .unprivileged_init = unprivileged_init,
2676 : .run = stem_run,
2677 : };
|