Line data Source code
1 : #include "fd_replay_tile.h"
2 : #include "fd_sched.h"
3 : #include "fd_execrp.h"
4 : #include "fd_vote_tracker.h"
5 : #include "generated/fd_replay_tile_seccomp.h"
6 :
7 : #include "../genesis/fd_genesi_tile.h"
8 : #include "../poh/fd_poh.h"
9 : #include "../poh/fd_poh_tile.h"
10 : #include "../tower/fd_tower_tile.h"
11 : #include "../resolv/fd_resolv_tile.h"
12 : #include "../restore/utils/fd_ssload.h"
13 :
14 : #include "../../disco/tiles.h"
15 : #include "../../disco/fd_txn_m.h"
16 : #include "../../disco/store/fd_store.h"
17 : #include "../../disco/pack/fd_pack.h"
18 : #include "../../discof/fd_accdb_topo.h"
19 : #include "../../discof/reasm/fd_reasm.h"
20 : #include "../../disco/keyguard/fd_keyload.h"
21 : #include "../../disco/keyguard/fd_keyswitch.h"
22 : #include "../../disco/genesis/fd_genesis_cluster.h"
23 : #include "../../discof/genesis/genesis_hash.h"
24 : #include "../../util/pod/fd_pod.h"
25 : #include "../../flamenco/accdb/fd_accdb_admin_v1.h"
26 : #include "../../flamenco/accdb/fd_accdb_admin_v2.h"
27 : #include "../../flamenco/accdb/fd_accdb_impl_v1.h"
28 : #include "../../flamenco/accdb/fd_accdb_sync.h"
29 : #include "../../flamenco/accdb/fd_vinyl_req_pool.h"
30 : #include "../../flamenco/rewards/fd_rewards.h"
31 : #include "../../flamenco/leaders/fd_multi_epoch_leaders.h"
32 : #include "../../flamenco/progcache/fd_progcache_admin.h"
33 : #include "../../disco/metrics/fd_metrics.h"
34 :
35 : #include "../../flamenco/runtime/fd_runtime.h"
36 : #include "../../flamenco/runtime/fd_runtime_stack.h"
37 : #include "../../flamenco/runtime/fd_genesis_parse.h"
38 : #include "../../flamenco/fd_flamenco_base.h"
39 : #include "../../flamenco/runtime/sysvar/fd_sysvar_epoch_schedule.h"
40 : #include "../../flamenco/runtime/program/fd_precompiles.h"
41 : #include "../../flamenco/runtime/tests/fd_dump_pb.h"
42 :
43 : #include <stdio.h>
44 :
45 : /* Replay concepts:
46 :
47 : - Blocks are aggregations of entries aka. microblocks which are
48 : groupings of txns and are constructed by the block producer (see
49 : fd_pack).
50 :
51 : - Entries are grouped into entry batches by the block producer (see
52 : fd_pack / fd_shredder).
53 :
54 : - Entry batches are divided into chunks known as shreds by the block
55 : producer (see fd_shredder).
56 :
57 : - Shreds are grouped into forward-error-correction sets (FEC sets) by
58 : the block producer (see fd_shredder).
59 :
60 : - Shreds are transmitted to the rest of the cluster via the Turbine
61 : protocol (see fd_shredder / fd_shred).
62 :
63 : - Once enough shreds within a FEC set are received to recover the
64 : entirety of the shred data encoded by that FEC set, the receiver
65 : can "complete" the FEC set (see fd_fec_resolver).
66 :
67 : - If shreds in the FEC set are missing such that it can't complete,
68 : the receiver can use the Repair protocol to request missing shreds
69 : in FEC set (see fd_repair).
70 :
71 : - The current Repair protocol does not support requesting coding
72 : shreds. As a result, some FEC sets might be actually complete
73 : (contain all data shreds). Repair currently hacks around this by
74 : forcing completion but the long-term solution is to add support for
75 : fec_repairing coding shreds via Repair.
76 :
77 : - FEC sets are delivered in partial-order to the Replay tile by the
78 : Repair tile. Currently Replay only supports replaying entry batches
79 : so FEC sets need to reassembled into an entry batch before they can
80 : be replayed. The new Dispatcher will change this by taking a FEC
81 : set as input instead. */
82 :
83 0 : #define IN_KIND_SNAP ( 0)
84 0 : #define IN_KIND_GENESIS ( 1)
85 0 : #define IN_KIND_IPECHO ( 2)
86 0 : #define IN_KIND_TOWER ( 3)
87 0 : #define IN_KIND_RESOLV ( 4)
88 0 : #define IN_KIND_POH ( 5)
89 0 : #define IN_KIND_EXECRP ( 6)
90 0 : #define IN_KIND_SHRED ( 7)
91 0 : #define IN_KIND_TXSEND ( 8)
92 0 : #define IN_KIND_GUI ( 9)
93 0 : #define IN_KIND_RPC (10)
94 0 : #define IN_KIND_GOSSIP_OUT (11)
95 :
96 : #define DEBUG_LOGGING 0
97 :
98 : /* The first bank that the replay tile produces either for genesis
99 : or the snapshot boot will always be at bank index 0. */
100 0 : #define FD_REPLAY_BOOT_BANK_IDX (0UL)
101 :
102 : struct fd_replay_in_link {
103 : fd_wksp_t * mem;
104 : ulong chunk0;
105 : ulong wmark;
106 : ulong mtu;
107 : };
108 :
109 : typedef struct fd_replay_in_link fd_replay_in_link_t;
110 :
111 : struct fd_replay_out_link {
112 : ulong idx;
113 : fd_wksp_t * mem;
114 : ulong chunk0;
115 : ulong wmark;
116 : ulong chunk;
117 : };
118 :
119 : typedef struct fd_replay_out_link fd_replay_out_link_t;
120 :
121 : /* fd_block_id_map is a simple map of block-ids to bank indices. The
122 : map sits on top of an array of fd_block_id_ele_t. This serves as a
123 : translation layer between block ids to bank indices. The data
124 : array is indexed by bank index and the latest observed merkle root
125 : for the bank index is stored in the array. Once the block id has
126 : been observed, the entry is keyed by the latest merkle root (aka the
127 : block id). */
128 :
129 : struct fd_block_id_ele {
130 : fd_hash_t latest_mr;
131 : uint latest_fec_idx;
132 : int block_id_seen;
133 : ulong slot;
134 : ulong next_;
135 : };
136 : typedef struct fd_block_id_ele fd_block_id_ele_t;
137 :
138 : #define MAP_NAME fd_block_id_map
139 : #define MAP_ELE_T fd_block_id_ele_t
140 : #define MAP_KEY_T fd_hash_t
141 0 : #define MAP_KEY latest_mr
142 0 : #define MAP_NEXT next_
143 0 : #define MAP_KEY_EQ(k0,k1) (!memcmp((k0),(k1), sizeof(fd_hash_t)))
144 0 : #define MAP_KEY_HASH(key,seed) (fd_hash((seed),(key),sizeof(fd_hash_t)))
145 : #include "../../util/tmpl/fd_map_chain.c"
146 :
147 : static inline ulong
148 0 : fd_block_id_ele_get_idx( fd_block_id_ele_t * ele_arr, fd_block_id_ele_t * ele ) {
149 0 : return (ulong)(ele - ele_arr);
150 0 : }
151 :
152 : struct fd_replay_tile {
153 : fd_wksp_t * wksp;
154 :
155 : fd_accdb_admin_t accdb_admin[1];
156 : fd_accdb_user_t accdb[1];
157 : fd_progcache_admin_t progcache_admin[1];
158 :
159 : fd_txncache_t * txncache;
160 : fd_store_t * store;
161 : fd_banks_t banks[1];
162 :
163 : /* This flag is 1 If we have seen a vote signature that our node has
164 : sent out get rooted at least one time. The value is 0 otherwise.
165 : We can't become leader and pack blocks until this flag has been
166 : set. This parallels the Agave 'has_new_vote_been_rooted'. */
167 : int identity_vote_rooted;
168 : int wait_for_vote_to_start_leader;
169 :
170 : /* wfs_enabled is 1 if the validator is booted in
171 : wait_for_supermajority mode. In this mode replay (and, by extension,
172 : downstream consumers) is not allowed to make progress until 80% of
173 : the cluster has published their ContactInfo in Gossip with a
174 : shred version matching expected_shred_version. When this happens,
175 : wfs_complete will be set to 1. */
176 : int wfs_enabled;
177 : int wfs_complete;
178 :
179 : fd_hash_t expected_bank_hash;
180 :
181 : ulong reasm_seed;
182 : fd_reasm_t * reasm;
183 :
184 : fd_sched_t * sched;
185 :
186 : ulong vote_tracker_seed;
187 : fd_vote_tracker_t * vote_tracker;
188 :
189 : int has_genesis_hash;
190 : char genesis_path[ PATH_MAX ];
191 : fd_hash_t genesis_hash[1];
192 : fd_genesis_t genesis[1];
193 : ulong cluster_type;
194 :
195 : int has_genesis_timestamp;
196 : ulong genesis_timestamp;
197 : int has_expected_genesis_timestamp;
198 : ulong expected_genesis_timestamp;
199 :
200 : #define FD_REPLAY_HARD_FORKS_MAX (64UL)
201 : ulong hard_forks_cnt;
202 : ulong hard_forks[ FD_REPLAY_HARD_FORKS_MAX ];
203 : ulong hard_forks_cnts[ FD_REPLAY_HARD_FORKS_MAX ];
204 :
205 : ushort expected_shred_version;
206 : ushort ipecho_shred_version;
207 :
208 : /* A note on publishing ...
209 :
210 : The watermarks are used to publish our fork-aware structures. For
211 : example, store, banks, and txncache need to be published to release
212 : resources occupied by rooted or dead blocks. In general,
213 : publishing has the effect of pruning forks in those structures,
214 : indicating that it is ok to release the memory being occupied by
215 : the blocks on said forks. Tower is responsible for informing us of
216 : the latest block on the consensus rooted fork. As soon as we can,
217 : we should move the published root as close as possible to the
218 : latest consensus root, publishing/pruning everything on the fork
219 : tree along the way. That is, all the blocks that directly descend
220 : from the current published root (inclusive) to the new published
221 : root (exclusive) on the rooted fork, as well as all the minority
222 : forks that branch from said blocks.
223 :
224 : Ideally, we'd move the published root to the consensus root
225 : immediately upon receiving a new consensus root. However, that's
226 : not always safe to do. One thing we need to be careful about is
227 : making sure that there are no more users/consumers of
228 : soon-to-be-pruned blocks, lest a use-after-free occurs. This can
229 : be done by using a reference counter for each block. Any
230 : concurrent activity, such as transaction execution in the exec
231 : tiles, should retain a refcnt on the block for as
232 : long as it needs access to the shared fork-aware structures related
233 : to that block. Eventually, refcnt on a given block will drop down
234 : to 0 as the block either finishes replaying or gets marked as dead,
235 : and any other tile that has retained a refcnt on the block releases
236 : it. At that point, it becomes a candidate for pruning. The key to
237 : safe publishing then becomes figuring out how far we could advance
238 : the published root, such that every minority fork branching off of
239 : blocks in between the current published root (inclusive) and the
240 : new published root (exclusive) is safe to be pruned. This is a
241 : straightforward tree traversal, where if a block B on the rooted
242 : fork has refcnt 0, and all minority forks branching off of B also
243 : have refcnt 0, then B is safe to be pruned. We advance the
244 : published root to the farthest consecutively prunable block on the
245 : rooted fork. Note that reasm presents the replay tile with a clean
246 : view of the world where every block is chained off of a parent
247 : block. So there are no orpahned/dangling tree nodes to worry
248 : about. The world is a nice single tree as far as replay is
249 : concerned.
250 :
251 : In the following fork tree, every node is a block and the number in
252 : parentheses is the refcnt on the block. The chain marked with
253 : double slashes is the rooted fork. Suppose the published root is
254 : at block P, and consensus root is at block T. We can't publish
255 : past block P because Q has refcnt 1.
256 :
257 :
258 : P(0)
259 : / \\
260 : Q(1) A(0)
261 : / || \
262 : X(0) B(0) C(0)
263 : / || \
264 : Y(0) M(0) R(0)
265 : / || / \
266 : D(2) T(0) J(0) L(0)
267 : ||
268 : ..
269 : ..
270 : ..
271 : ||
272 : blocks we might be actively replaying
273 :
274 :
275 : When refcnt on Q drops to 0, we would be able to advance the
276 : published root to block M, because blocks P, A, and B, as well as
277 : all subtrees branching off of them, have refcnt 0, and therefore
278 : can be pruned. Block M itself cannot be pruned yet because its
279 : child block D has refcnt 2. After publishing/pruning, the fork
280 : tree would be:
281 :
282 :
283 : M(0)
284 : / ||
285 : D(2) T(0)
286 : ||
287 : ..
288 : ..
289 : ..
290 : ||
291 : blocks we might be actively replaying
292 :
293 :
294 : As a result, the shared fork-aware structures can free resources
295 : for blocks P, A, B, and all subtrees branching off of them.
296 :
297 : For the reference counting part, the replay tile is the sole entity
298 : that can update the refcnt. This ensures that all refcnt increment
299 : and decrement attempts are serialized at the replay tile, and that
300 : there are no racy resurrection of a soon-to-be-pruned block. If a
301 : refcnt increment request arrives after a block has been pruned,
302 : replay simply rejects the request.
303 :
304 : A note on the implementation of the above ...
305 :
306 : Upon receiving a new consensus root, we descend down the rooted
307 : fork from the current published root to the new consensus root. On
308 : each node/block of the rooted fork, we do a summation of the refcnt
309 : on the block and all the minority fork blocks branching from the
310 : block. If the summation is 0, the block is safe for pruning. We
311 : advance the published root to the far end of the consecutive run of
312 : 0 refcnt sums originating from the current published root. On our
313 : descent down the minority forks, we also mark any block that hasn't
314 : finished replaying as dead, so we don't waste time executing them.
315 : No more transactions shall be dispatched for execution from dead
316 : blocks.
317 :
318 : Blocks start out with a refcnt of 0. Other tiles may send a
319 : request to the replay tile for a reference on a block. The
320 : transaction dispatcher is another source of refcnt updates. On
321 : every dispatch of a transaction for block B, we increment the
322 : refcnt for B. And on every transaction finalization, we decrement
323 : the refcnt for B. This means that whenever the refcnt on a block
324 : is 0, there is no more reference on that block from the execution
325 : pipeline. While it might be tempting to simply increment the
326 : refcnt once when we start replaying a block, and decrement the
327 : refcnt once when we finish a block, this more fine-grained refcnt
328 : update strategy allows for aborting and potentially immediate
329 : pruning of blocks under interleaved block replay. Upon receiving a
330 : new consensus root, we can simply look at the refcnt on minority
331 : fork blocks, and a refcnt of 0 would imply that the block is safe
332 : for pruning, even if we haven't finished replaying it. Without the
333 : fine-grained refcnt, we would need to first stop dispatching from
334 : the aborted block, and then wait for a full drain of the execution
335 : pipeline to know for sure that there are no more in-flight
336 : transactions executing on the aborted block. Note that this will
337 : allow the refcnt on any block to transiently drop down to 0. We
338 : will not mistakenly prune an actively replaying block, aka a leaf
339 : node, that is chaining off of the rooted fork, because the
340 : consensus root is always an ancestor of the actively replaying tip.
341 : */
342 : fd_hash_t consensus_root; /* The most recent block to have reached max lockout in the tower. */
343 : ulong consensus_root_slot; /* slot number of the above. */
344 : ulong consensus_root_bank_idx; /* bank index of the above. */
345 : ulong published_root_slot; /* slot number of the published root. */
346 : ulong published_root_bank_idx; /* bank index of the published root. */
347 :
348 : /* Randomly generated block id for the initial genesis/snapshot slot.
349 : To be replaced with block id in the snapshot manifest when SIMD-333
350 : is activated. */
351 :
352 : fd_hash_t initial_block_id;
353 :
354 : /* We need to maintain a tile-local mapping of block-ids to bank index
355 : and vice versa. This translation layer is needed for conversion
356 : since tower operates on block-ids and downstream consumers of FEC
357 : sets operate on bank indices. This mapping must happen both ways:
358 : 1. tower sends us block ids and we must map them to bank indices.
359 : 2. when a block is completed, we must map the bank index to a block
360 : id to send a slot complete message to tower. */
361 : ulong block_id_len;
362 : fd_block_id_ele_t * block_id_arr;
363 : ulong block_id_map_seed;
364 : fd_block_id_map_t * block_id_map;
365 :
366 : /* Capture-related configs */
367 : fd_capture_ctx_t * capture_ctx;
368 : FILE * capture_file;
369 : fd_capture_link_buf_t cap_repl_out[1];
370 :
371 : /* Protobuf dumping context for debugging runtime execution and
372 : collecting seed corpora. */
373 : fd_dump_proto_ctx_t * dump_proto_ctx;
374 :
375 : /* Whether the runtime has been booted either from snapshot loading
376 : or from genesis. */
377 : int is_booted;
378 :
379 : /* Buffer to store vote towers that need to be published to the Tower
380 : tile. */
381 :
382 : fd_multi_epoch_leaders_t * mleaders;
383 :
384 : int larger_max_cost_per_block;
385 :
386 : /* When we transition to becoming leader, we can only unbecome the
387 : leader if we have received a block id from the FEC reassembler, and
388 : a message from PoH that the leader slot has ended. After both of
389 : these conditions are met, then we are free to unbecome the leader.
390 : */
391 : uint is_leader : 1;
392 : uint supports_leader : 1;
393 : int recv_poh;
394 : ulong next_leader_slot;
395 : long next_leader_tickcount;
396 : ulong highwater_leader_slot;
397 : ulong reset_slot;
398 : fd_bank_t reset_bank[1];
399 : fd_hash_t reset_block_id;
400 : long reset_timestamp_nanos;
401 : double slot_duration_nanos;
402 : double slot_duration_ticks;
403 : fd_bank_t leader_bank[1];
404 :
405 : fd_pubkey_t identity_pubkey[1];
406 : ulong identity_idx;
407 :
408 : fd_keyswitch_t * keyswitch;
409 : int halt_leader;
410 :
411 : ulong resolv_tile_cnt;
412 :
413 : int in_kind[ 128 ];
414 : fd_replay_in_link_t in[ 128 ];
415 :
416 : fd_replay_out_link_t exec_out[ 1 ];
417 :
418 : fd_replay_out_link_t replay_out[1];
419 :
420 : fd_replay_out_link_t epoch_out[1];
421 :
422 : /* The gui tile needs to reliably own a reference to the most recent
423 : completed active bank. Replay needs to know if the gui as a
424 : consumer is enabled so it can increment the bank's refcnt before
425 : publishing the bank_idx to the gui. */
426 : int gui_enabled;
427 : int rpc_enabled;
428 :
429 : # if FD_HAS_FLATCC
430 : /* For dumping blocks to protobuf. For backtest only. */
431 : fd_block_dump_ctx_t * block_dump_ctx;
432 : # endif
433 :
434 : /* We need a few pieces of information to compute the right addresses
435 : for bundle crank information that we need to send to pack. */
436 : struct {
437 : int enabled;
438 : fd_pubkey_t vote_account;
439 : fd_bundle_crank_gen_t gen[1];
440 : } bundle;
441 :
442 : struct {
443 : ulong store_query_acquire;
444 : ulong store_query_release;
445 : fd_histf_t store_query_wait[1];
446 : fd_histf_t store_query_work[1];
447 : ulong store_query_cnt;
448 : ulong store_query_missing_cnt;
449 : ulong store_query_mr;
450 : ulong store_query_missing_mr;
451 :
452 : ulong slots_total;
453 : ulong transactions_total;
454 :
455 : ulong reasm_latest_slot;
456 : ulong reasm_latest_fec_idx;
457 :
458 : ulong sched_full;
459 : ulong reasm_empty;
460 : ulong leader_bid_wait;
461 : ulong banks_full;
462 : ulong storage_root_behind;
463 :
464 : fd_histf_t root_slot_dur[1];
465 : fd_histf_t root_account_dur[1];
466 : } metrics;
467 :
468 : uchar __attribute__((aligned(FD_MULTI_EPOCH_LEADERS_ALIGN))) mleaders_mem[ FD_MULTI_EPOCH_LEADERS_FOOTPRINT ];
469 :
470 : ulong runtime_stack_seed;
471 : fd_runtime_stack_t runtime_stack;
472 : };
473 :
474 : typedef struct fd_replay_tile fd_replay_tile_t;
475 :
476 : FD_FN_CONST static inline ulong
477 0 : scratch_align( void ) {
478 0 : return 128UL;
479 0 : }
480 : FD_FN_PURE static inline ulong
481 0 : scratch_footprint( fd_topo_tile_t const * tile ) {
482 0 : ulong chain_cnt = fd_block_id_map_chain_cnt_est( tile->replay.max_live_slots );
483 :
484 0 : ulong l = FD_LAYOUT_INIT;
485 0 : l = FD_LAYOUT_APPEND( l, alignof(fd_replay_tile_t), sizeof(fd_replay_tile_t) );
486 0 : l = FD_LAYOUT_APPEND( l, alignof(fd_block_id_ele_t), sizeof(fd_block_id_ele_t) * tile->replay.max_live_slots );
487 0 : l = FD_LAYOUT_APPEND( l, fd_block_id_map_align(), fd_block_id_map_footprint( chain_cnt ) );
488 0 : l = FD_LAYOUT_APPEND( l, fd_txncache_align(), fd_txncache_footprint( tile->replay.max_live_slots ) );
489 0 : l = FD_LAYOUT_APPEND( l, fd_reasm_align(), fd_reasm_footprint( tile->replay.fec_max ) );
490 0 : l = FD_LAYOUT_APPEND( l, fd_sched_align(), fd_sched_footprint( tile->replay.sched_depth, tile->replay.max_live_slots ) );
491 0 : l = FD_LAYOUT_APPEND( l, fd_vinyl_req_pool_align(), fd_vinyl_req_pool_footprint( 1UL, 1UL ) );
492 0 : l = FD_LAYOUT_APPEND( l, fd_vote_tracker_align(), fd_vote_tracker_footprint() );
493 0 : l = FD_LAYOUT_APPEND( l, fd_capture_ctx_align(), fd_capture_ctx_footprint() );
494 0 : l = FD_LAYOUT_APPEND( l, alignof(fd_dump_proto_ctx_t), sizeof(fd_dump_proto_ctx_t) );
495 :
496 0 : # if FD_HAS_FLATCC
497 0 : if( FD_UNLIKELY( tile->replay.dump_block_to_pb ) ) {
498 0 : l = FD_LAYOUT_APPEND( l, fd_block_dump_context_align(), fd_block_dump_context_footprint() );
499 0 : }
500 0 : # endif
501 :
502 0 : l = FD_LAYOUT_FINI( l, scratch_align() );
503 :
504 0 : return l;
505 0 : }
506 :
507 : static inline void
508 0 : metrics_write( fd_replay_tile_t * ctx ) {
509 0 : FD_MCNT_SET ( REPLAY, STORE_QUERY_ACQUIRE, ctx->metrics.store_query_acquire );
510 0 : FD_MCNT_SET ( REPLAY, STORE_QUERY_RELEASE, ctx->metrics.store_query_release );
511 0 : FD_MHIST_COPY( REPLAY, STORE_QUERY_WAIT, ctx->metrics.store_query_wait );
512 0 : FD_MHIST_COPY( REPLAY, STORE_QUERY_WORK, ctx->metrics.store_query_work );
513 0 : FD_MCNT_SET ( REPLAY, STORE_QUERY_CNT, ctx->metrics.store_query_cnt );
514 0 : FD_MCNT_SET ( REPLAY, STORE_QUERY_MISSING_CNT, ctx->metrics.store_query_missing_cnt );
515 0 : FD_MGAUGE_SET( REPLAY, STORE_QUERY_MR, ctx->metrics.store_query_mr );
516 0 : FD_MGAUGE_SET( REPLAY, STORE_QUERY_MISSING_MR, ctx->metrics.store_query_missing_mr );
517 :
518 0 : FD_MGAUGE_SET( REPLAY, ROOT_SLOT, ctx->consensus_root_slot==ULONG_MAX ? 0UL : ctx->consensus_root_slot );
519 0 : ulong leader_slot = ctx->leader_bank->data ? fd_bank_slot_get( ctx->leader_bank ) : 0UL;
520 0 : FD_MGAUGE_SET( REPLAY, LEADER_SLOT, leader_slot );
521 :
522 0 : if( FD_LIKELY( ctx->leader_bank->data ) ) {
523 0 : FD_MGAUGE_SET( REPLAY, NEXT_LEADER_SLOT, leader_slot );
524 0 : FD_MGAUGE_SET( REPLAY, LEADER_SLOT, leader_slot );
525 0 : } else {
526 0 : FD_MGAUGE_SET( REPLAY, NEXT_LEADER_SLOT, ctx->next_leader_slot==ULONG_MAX ? 0UL : ctx->next_leader_slot );
527 0 : FD_MGAUGE_SET( REPLAY, LEADER_SLOT, 0UL );
528 0 : }
529 0 : FD_MGAUGE_SET( REPLAY, RESET_SLOT, ctx->reset_slot==ULONG_MAX ? 0UL : ctx->reset_slot );
530 :
531 0 : fd_bank_data_t * bank_pool = fd_banks_get_bank_pool( ctx->banks->data );
532 0 : ulong live_banks = fd_banks_pool_max( bank_pool ) - fd_banks_pool_free( bank_pool );
533 0 : FD_MGAUGE_SET( REPLAY, LIVE_BANKS, live_banks );
534 :
535 0 : ulong reasm_free = fd_reasm_free( ctx->reasm );
536 0 : FD_MGAUGE_SET( REPLAY, REASM_FREE, reasm_free );
537 :
538 0 : FD_MCNT_SET( REPLAY, SLOTS_TOTAL, ctx->metrics.slots_total );
539 0 : FD_MCNT_SET( REPLAY, TRANSACTIONS_TOTAL, ctx->metrics.transactions_total );
540 :
541 0 : FD_MGAUGE_SET( REPLAY, REASM_LATEST_SLOT, ctx->metrics.reasm_latest_slot );
542 0 : FD_MGAUGE_SET( REPLAY, REASM_LATEST_FEC_IDX, ctx->metrics.reasm_latest_fec_idx );
543 :
544 0 : FD_MCNT_SET( REPLAY, SCHED_FULL, ctx->metrics.sched_full );
545 0 : FD_MCNT_SET( REPLAY, REASM_EMPTY, ctx->metrics.reasm_empty );
546 0 : FD_MCNT_SET( REPLAY, LEADER_BID_WAIT, ctx->metrics.leader_bid_wait );
547 0 : FD_MCNT_SET( REPLAY, BANKS_FULL, ctx->metrics.banks_full );
548 0 : FD_MCNT_SET( REPLAY, STORAGE_ROOT_BEHIND, ctx->metrics.storage_root_behind );
549 :
550 0 : FD_MCNT_SET( REPLAY, PROGCACHE_ROOTED, ctx->progcache_admin->metrics.root_cnt );
551 0 : FD_MCNT_SET( REPLAY, PROGCACHE_GC_ROOT, ctx->progcache_admin->metrics.gc_root_cnt );
552 :
553 0 : FD_MCNT_SET( REPLAY, ACCDB_CREATED, ctx->accdb->base.created_cnt );
554 0 : FD_MCNT_SET( REPLAY, ACCDB_REVERTED, ctx->accdb_admin->base.revert_cnt );
555 0 : FD_MCNT_SET( REPLAY, ACCDB_ROOTED, ctx->accdb_admin->base.root_cnt );
556 0 : FD_MCNT_SET( REPLAY, ACCDB_ROOTED_BYTES, ctx->accdb_admin->base.root_tot_sz );
557 0 : FD_MCNT_SET( REPLAY, ACCDB_GC_ROOT, ctx->accdb_admin->base.gc_root_cnt );
558 0 : FD_MCNT_SET( REPLAY, ACCDB_RECLAIMED, ctx->accdb_admin->base.reclaim_cnt );
559 0 : FD_MHIST_COPY( REPLAY, ROOT_SLOT_DURATION_SECONDS, ctx->metrics.root_slot_dur );
560 0 : FD_MHIST_COPY( REPLAY, ROOT_ACCOUNT_DURATION_SECONDS, ctx->metrics.root_account_dur );
561 0 : FD_MCNT_SET( REPLAY, ROOT_ELAPSED_SECONDS_DB, (ulong)ctx->accdb_admin->base.dt_vinyl );
562 0 : FD_MCNT_SET( REPLAY, ROOT_ELAPSED_SECONDS_COPY, (ulong)ctx->accdb_admin->base.dt_copy );
563 0 : FD_MCNT_SET( REPLAY, ROOT_ELAPSED_SECONDS_GC, (ulong)ctx->accdb_admin->base.dt_gc );
564 0 : }
565 :
566 : static inline ulong
567 : generate_epoch_info_msg( ulong epoch,
568 : fd_epoch_schedule_t const * epoch_schedule,
569 : fd_vote_stakes_t * vote_stakes,
570 : ushort vote_stakes_fork_idx,
571 : fd_features_t const * features,
572 : fd_epoch_info_msg_t * epoch_info_msg,
573 0 : int current_epoch ) {
574 0 : fd_vote_stake_weight_t * stake_weights = epoch_info_msg->weights;
575 :
576 0 : epoch_info_msg->epoch = epoch;
577 0 : epoch_info_msg->start_slot = fd_epoch_slot0( epoch_schedule, epoch );
578 0 : epoch_info_msg->slot_cnt = fd_epoch_slot_cnt( epoch_schedule, epoch );
579 0 : epoch_info_msg->excluded_stake = 0UL;
580 0 : epoch_info_msg->vote_keyed_lsched = 1UL;
581 :
582 : /* FIXME: SIMD-0180 - hack to (de)activate in testnet vs mainnet.
583 : This code can be removed once the feature is active. */
584 0 : if( (1==epoch_schedule->warmup && epoch<FD_SIMD0180_ACTIVE_EPOCH_TESTNET) ||
585 0 : (0==epoch_schedule->warmup && epoch<FD_SIMD0180_ACTIVE_EPOCH_MAINNET) ) {
586 0 : epoch_info_msg->vote_keyed_lsched = 0UL;
587 0 : }
588 :
589 : /* epoch_stakes from manifest are already filtered (stake>0), but not sorted */
590 0 : ulong idx = 0UL;
591 0 : uchar __attribute__((aligned(FD_VOTE_STAKES_ITER_ALIGN))) iter_mem[ FD_VOTE_STAKES_ITER_FOOTPRINT ];
592 0 : for( fd_vote_stakes_iter_t * iter = fd_vote_stakes_fork_iter_init( vote_stakes, vote_stakes_fork_idx, iter_mem );
593 0 : !fd_vote_stakes_fork_iter_done( vote_stakes, vote_stakes_fork_idx, iter );
594 0 : fd_vote_stakes_fork_iter_next( vote_stakes, vote_stakes_fork_idx, iter ) ) {
595 :
596 0 : fd_pubkey_t pubkey;
597 0 : ulong stake_t_1;
598 0 : ulong stake_t_2;
599 0 : fd_pubkey_t node_account_t_1;
600 0 : fd_pubkey_t node_account_t_2;
601 0 : fd_vote_stakes_fork_iter_ele( vote_stakes, vote_stakes_fork_idx, iter, &pubkey, &stake_t_1, &stake_t_2, &node_account_t_1, &node_account_t_2 );
602 :
603 0 : ulong stake = current_epoch ? stake_t_1 : stake_t_2;
604 0 : fd_pubkey_t node_account = current_epoch ? node_account_t_1 : node_account_t_2;
605 0 : if( FD_UNLIKELY( !stake ) ) continue;
606 :
607 0 : stake_weights[ idx ].stake = stake;
608 0 : memcpy( stake_weights[ idx ].id_key.uc, &node_account, sizeof(fd_pubkey_t) );
609 0 : memcpy( stake_weights[ idx ].vote_key.uc, &pubkey, sizeof(fd_pubkey_t) );
610 0 : idx++;
611 0 : }
612 0 : epoch_info_msg->staked_cnt = idx;
613 0 : sort_vote_weights_by_stake_vote_inplace( stake_weights, idx );
614 :
615 0 : epoch_info_msg->epoch_schedule = *epoch_schedule;
616 0 : epoch_info_msg->features = *features;
617 :
618 0 : return fd_epoch_info_msg_sz( epoch_info_msg->staked_cnt );
619 0 : }
620 :
621 : static void
622 : publish_epoch_info( fd_replay_tile_t * ctx,
623 : fd_stem_context_t * stem,
624 : fd_bank_t * bank,
625 0 : int current_epoch ) {
626 0 : fd_epoch_schedule_t const * schedule = fd_bank_epoch_schedule_query( bank );
627 0 : ulong epoch = fd_slot_to_epoch( schedule, fd_bank_slot_get( bank ), NULL );
628 :
629 0 : fd_features_t const * features = fd_bank_features_query( bank );
630 :
631 0 : fd_epoch_info_msg_t * epoch_info_msg = fd_chunk_to_laddr( ctx->epoch_out->mem, ctx->epoch_out->chunk );
632 :
633 0 : fd_vote_stakes_t * vote_stakes = fd_bank_vote_stakes_locking_modify( bank );
634 0 : ulong epoch_info_sz = generate_epoch_info_msg( epoch+fd_ulong_if( current_epoch, 1UL, 0UL), schedule, vote_stakes, bank->data->vote_stakes_fork_id, features, epoch_info_msg, current_epoch );
635 0 : fd_bank_vote_stakes_end_locking_modify( bank );
636 :
637 0 : ulong epoch_info_sig = 4UL;
638 0 : fd_stem_publish( stem, ctx->epoch_out->idx, epoch_info_sig, ctx->epoch_out->chunk, epoch_info_sz, 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
639 0 : ctx->epoch_out->chunk = fd_dcache_compact_next( ctx->epoch_out->chunk, epoch_info_sz, ctx->epoch_out->chunk0, ctx->epoch_out->wmark );
640 :
641 0 : fd_multi_epoch_leaders_epoch_msg_init( ctx->mleaders, epoch_info_msg );
642 0 : fd_multi_epoch_leaders_epoch_msg_fini( ctx->mleaders );
643 :
644 0 : }
645 :
646 : /**********************************************************************/
647 : /* Transaction execution state machine helpers */
648 : /**********************************************************************/
649 :
650 : static void
651 : replay_block_start( fd_replay_tile_t * ctx,
652 : fd_stem_context_t * stem,
653 : ulong bank_idx,
654 : ulong parent_bank_idx,
655 0 : ulong slot ) {
656 0 : long before = fd_log_wallclock();
657 :
658 0 : fd_bank_t bank[1];
659 0 : if( FD_UNLIKELY( !fd_banks_bank_query( bank, ctx->banks, bank_idx ) ) ) {
660 0 : FD_LOG_CRIT(( "invariant violation: bank is NULL for bank index %lu", bank_idx ));
661 0 : }
662 0 : if( FD_UNLIKELY( bank->data->flags!=FD_BANK_FLAGS_INIT ) ) {
663 0 : FD_LOG_CRIT(( "invariant violation: bank is not in correct state for bank index %lu", bank_idx ));
664 0 : }
665 :
666 0 : bank->data->preparation_begin_nanos = before;
667 :
668 0 : fd_bank_t parent_bank[1];
669 0 : if( FD_UNLIKELY( !fd_banks_bank_query( parent_bank, ctx->banks, parent_bank_idx ) ) ) {
670 0 : FD_LOG_CRIT(( "invariant violation: parent bank is NULL for bank index %lu", parent_bank_idx ));
671 0 : }
672 0 : ulong parent_slot = fd_bank_slot_get( parent_bank );
673 :
674 : /* Clone the bank from the parent. We must special case the first
675 : slot that is executed as the snapshot does not provide a parent
676 : block id. */
677 :
678 0 : if( FD_UNLIKELY( !fd_banks_clone_from_parent( bank, ctx->banks, bank_idx ) ) ) {
679 0 : FD_LOG_CRIT(( "invariant violation: bank is NULL for bank index %lu", bank_idx ));
680 0 : }
681 0 : fd_bank_slot_set( bank, slot );
682 0 : fd_bank_parent_slot_set( bank, parent_slot );
683 0 : bank->data->txncache_fork_id = fd_txncache_attach_child( ctx->txncache, parent_bank->data->txncache_fork_id );
684 :
685 : /* Create a new funk txn for the block. */
686 :
687 0 : fd_funk_txn_xid_t xid = { .ul = { slot, bank_idx } };
688 0 : fd_funk_txn_xid_t parent_xid = { .ul = { parent_slot, parent_bank_idx } };
689 0 : fd_accdb_attach_child( ctx->accdb_admin, &parent_xid, &xid );
690 0 : fd_progcache_txn_attach_child( ctx->progcache_admin, &parent_xid, &xid );
691 :
692 : /* Update required runtime state and handle potential boundary. */
693 :
694 0 : fd_bank_shred_cnt_set( bank, 0UL );
695 0 : fd_bank_execution_fees_set( bank, 0UL );
696 0 : fd_bank_priority_fees_set( bank, 0UL );
697 0 : fd_bank_tips_set( bank, 0UL );
698 0 : fd_bank_identity_vote_idx_set( bank, ULONG_MAX );
699 :
700 0 : fd_bank_block_height_set( bank, fd_bank_block_height_get( bank ) + 1UL );
701 :
702 0 : int is_epoch_boundary = 0;
703 0 : fd_runtime_block_execute_prepare( ctx->banks, bank, ctx->accdb, &ctx->runtime_stack, ctx->capture_ctx, &is_epoch_boundary );
704 0 : if( FD_UNLIKELY( is_epoch_boundary ) ) publish_epoch_info( ctx, stem, bank, 1 );
705 :
706 0 : ulong max_tick_height;
707 0 : if( FD_UNLIKELY( FD_RUNTIME_EXECUTE_SUCCESS!=fd_runtime_compute_max_tick_height( fd_bank_ticks_per_slot_get( parent_bank ), slot, &max_tick_height ) ) ) {
708 0 : FD_LOG_CRIT(( "couldn't compute tick height/max tick height slot %lu ticks_per_slot %lu", slot, fd_bank_ticks_per_slot_get( parent_bank ) ));
709 0 : }
710 0 : fd_bank_max_tick_height_set( bank, max_tick_height );
711 0 : fd_bank_tick_height_set( bank, fd_bank_max_tick_height_get( parent_bank ) ); /* The parent's max tick height is our starting tick height. */
712 0 : fd_sched_set_poh_params( ctx->sched, bank->data->idx, fd_bank_tick_height_get( bank ), fd_bank_max_tick_height_get( bank ), fd_bank_hashes_per_tick_get( bank ), fd_bank_poh_query( parent_bank ) );
713 :
714 0 : FD_LOG_DEBUG(( "replay_block_start: bank_idx=%lu slot=%lu parent_bank_idx=%lu", bank_idx, slot, parent_bank_idx ));
715 0 : }
716 :
717 : static void
718 0 : cost_tracker_snap( fd_bank_t * bank, fd_replay_slot_completed_t * slot_info ) {
719 0 : if( bank->data->cost_tracker_pool_idx!=fd_bank_cost_tracker_pool_idx_null( fd_bank_get_cost_tracker_pool( bank->data ) ) ) {
720 0 : fd_cost_tracker_t const * cost_tracker = fd_bank_cost_tracker_locking_query( bank );
721 0 : slot_info->cost_tracker.block_cost = cost_tracker->block_cost;
722 0 : slot_info->cost_tracker.vote_cost = cost_tracker->vote_cost;
723 0 : slot_info->cost_tracker.allocated_accounts_data_size = cost_tracker->allocated_accounts_data_size;
724 0 : slot_info->cost_tracker.block_cost_limit = cost_tracker->block_cost_limit;
725 0 : slot_info->cost_tracker.vote_cost_limit = cost_tracker->vote_cost_limit;
726 0 : slot_info->cost_tracker.account_cost_limit = cost_tracker->account_cost_limit;
727 0 : fd_bank_cost_tracker_end_locking_query( bank );
728 0 : } else {
729 0 : memset( &slot_info->cost_tracker, 0, sizeof(slot_info->cost_tracker) );
730 0 : }
731 0 : }
732 :
733 : static ulong
734 0 : get_identity_balance( fd_replay_tile_t * ctx, fd_funk_txn_xid_t xid ) {
735 0 : ulong identity_balance = ULONG_MAX;
736 0 : fd_accdb_ro_t identity_acc[1];
737 0 : if( FD_LIKELY( fd_accdb_open_ro( ctx->accdb, identity_acc, &xid, ctx->identity_pubkey ) ) ) {
738 0 : identity_balance = identity_acc->meta->lamports;
739 0 : fd_accdb_close_ro( ctx->accdb, identity_acc );
740 0 : }
741 0 : return identity_balance;
742 0 : }
743 :
744 : static void
745 : publish_slot_completed( fd_replay_tile_t * ctx,
746 : fd_stem_context_t * stem,
747 : fd_bank_t * bank,
748 : int is_initial,
749 0 : int is_leader ) {
750 :
751 0 : ulong slot = fd_bank_slot_get( bank );
752 :
753 0 : fd_block_id_ele_t * block_id_ele = &ctx->block_id_arr[ bank->data->idx ];
754 :
755 : /* HACKY: hacky way of checking if we should send a null parent block
756 : id */
757 0 : fd_hash_t parent_block_id = {0};
758 0 : if( FD_UNLIKELY( !is_initial ) ) {
759 0 : parent_block_id = ctx->block_id_arr[ bank->data->parent_idx ].latest_mr;
760 0 : }
761 :
762 0 : fd_hash_t const * bank_hash = fd_bank_bank_hash_query( bank );
763 0 : fd_hash_t const * block_hash = fd_blockhashes_peek_last_hash( fd_bank_block_hash_queue_query( bank ) );
764 0 : FD_TEST( bank_hash );
765 0 : FD_TEST( block_hash );
766 :
767 0 : if( FD_LIKELY( !is_initial ) ) fd_txncache_finalize_fork( ctx->txncache, bank->data->txncache_fork_id, 0UL, block_hash->uc );
768 :
769 0 : fd_epoch_schedule_t const * epoch_schedule = fd_bank_epoch_schedule_query( bank );
770 0 : ulong slot_idx;
771 0 : ulong epoch = fd_slot_to_epoch( epoch_schedule, slot, &slot_idx );
772 :
773 0 : ctx->metrics.slots_total++;
774 0 : ctx->metrics.transactions_total = fd_bank_txn_count_get( bank );
775 :
776 0 : fd_replay_slot_completed_t * slot_info = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
777 0 : slot_info->slot = slot;
778 0 : slot_info->root_slot = ctx->consensus_root_slot;
779 0 : slot_info->storage_slot = ctx->published_root_slot;
780 0 : slot_info->epoch = epoch;
781 0 : slot_info->slot_in_epoch = slot_idx;
782 0 : slot_info->slots_per_epoch = fd_epoch_slot_cnt( epoch_schedule, epoch );
783 0 : slot_info->block_height = fd_bank_block_height_get( bank );
784 0 : slot_info->parent_slot = fd_bank_parent_slot_get( bank );
785 0 : slot_info->block_id = block_id_ele->latest_mr;
786 0 : slot_info->parent_block_id = parent_block_id;
787 0 : slot_info->bank_hash = *bank_hash;
788 0 : slot_info->block_hash = *block_hash;
789 0 : slot_info->transaction_count = fd_bank_txn_count_get( bank );
790 :
791 0 : fd_inflation_t inflation = fd_bank_inflation_get( bank );
792 0 : slot_info->inflation.foundation = inflation.foundation;
793 0 : slot_info->inflation.foundation_term = inflation.foundation_term;
794 0 : slot_info->inflation.terminal = inflation.terminal;
795 0 : slot_info->inflation.initial = inflation.initial;
796 0 : slot_info->inflation.taper = inflation.taper;
797 :
798 0 : fd_rent_t rent = fd_bank_rent_get( bank );
799 0 : slot_info->rent.burn_percent = rent.burn_percent;
800 0 : slot_info->rent.lamports_per_uint8_year = rent.lamports_per_uint8_year;
801 0 : slot_info->rent.exemption_threshold = rent.exemption_threshold;
802 :
803 0 : slot_info->first_fec_set_received_nanos = bank->data->first_fec_set_received_nanos;
804 0 : slot_info->preparation_begin_nanos = bank->data->preparation_begin_nanos;
805 0 : slot_info->first_transaction_scheduled_nanos = bank->data->first_transaction_scheduled_nanos;
806 0 : slot_info->last_transaction_finished_nanos = bank->data->last_transaction_finished_nanos;
807 0 : slot_info->completion_time_nanos = fd_log_wallclock();
808 :
809 : /* refcnt should be incremented by 1 for each consumer that uses
810 : `bank_idx`. Each consumer should decrement the bank's refcnt once
811 : they are done using the bank. */
812 0 : bank->data->refcnt++; /* tower_tile */
813 0 : if( FD_LIKELY( ctx->rpc_enabled ) ) bank->data->refcnt++; /* rpc tile */
814 0 : if( FD_LIKELY( ctx->gui_enabled ) ) bank->data->refcnt++; /* gui tile */
815 0 : slot_info->bank_idx = bank->data->idx;
816 0 : FD_LOG_DEBUG(( "bank (idx=%lu, slot=%lu) refcnt incremented to %lu for tower, rpc, gui", bank->data->idx, slot, bank->data->refcnt ));
817 :
818 0 : slot_info->parent_bank_idx = ULONG_MAX;
819 0 : fd_bank_t parent_bank[1];
820 0 : if( FD_LIKELY( fd_banks_get_parent( parent_bank, ctx->banks, bank ) && ctx->gui_enabled ) ) {
821 0 : parent_bank->data->refcnt++;
822 0 : FD_LOG_DEBUG(( "bank (idx=%lu, slot=%lu) refcnt incremented to %lu for gui", parent_bank->data->idx, fd_bank_slot_get( parent_bank ), parent_bank->data->refcnt ));
823 0 : slot_info->parent_bank_idx = parent_bank->data->idx;
824 0 : }
825 :
826 0 : slot_info->is_leader = is_leader;
827 :
828 0 : FD_BASE58_ENCODE_32_BYTES( ctx->block_id_arr[ bank->data->idx ].latest_mr.uc, block_id_cstr );
829 0 : FD_BASE58_ENCODE_32_BYTES( fd_bank_bank_hash_query( bank )->uc, bank_hash_cstr );
830 0 : FD_LOG_DEBUG(( "publish_slot_completed: bank_idx=%lu slot=%lu bank_hash=%s block_id=%s", bank->data->idx, slot, bank_hash_cstr, block_id_cstr ));
831 :
832 0 : fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_SLOT_COMPLETED, ctx->replay_out->chunk, sizeof(fd_replay_slot_completed_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
833 0 : ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_replay_slot_completed_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
834 0 : }
835 :
836 : static void
837 : publish_slot_dead( fd_replay_tile_t * ctx,
838 : fd_stem_context_t * stem,
839 : ulong slot,
840 0 : fd_hash_t const * block_id ) {
841 0 : fd_replay_slot_dead_t * slot_dead = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
842 0 : slot_dead->slot = slot;
843 0 : slot_dead->block_id = *block_id;
844 0 : fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_SLOT_DEAD, ctx->replay_out->chunk, sizeof(fd_replay_slot_dead_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
845 0 : ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_replay_slot_dead_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
846 0 : }
847 :
848 : static void
849 : publish_txn_executed( fd_replay_tile_t * ctx,
850 : fd_stem_context_t * stem,
851 0 : ulong txn_idx ) {
852 0 : fd_sched_txn_info_t * txn_info = fd_sched_get_txn_info( ctx->sched, txn_idx );
853 0 : fd_replay_txn_executed_t * txn_executed = fd_type_pun( fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk ) );
854 0 : *txn_executed->txn = *fd_sched_get_txn( ctx->sched, txn_idx );
855 0 : txn_executed->txn_err = txn_info->txn_err;
856 0 : txn_executed->is_committable = !!(txn_info->flags&FD_SCHED_TXN_IS_COMMITTABLE);
857 0 : txn_executed->is_fees_only = !!(txn_info->flags&FD_SCHED_TXN_IS_FEES_ONLY);
858 0 : txn_executed->tick_parsed = txn_info->tick_parsed;
859 0 : txn_executed->tick_sigverify_disp = txn_info->tick_sigverify_disp;
860 0 : txn_executed->tick_sigverify_done = txn_info->tick_sigverify_done;
861 0 : txn_executed->tick_exec_disp = txn_info->tick_exec_disp;
862 0 : txn_executed->tick_exec_done = txn_info->tick_exec_done;
863 0 : fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_TXN_EXECUTED, ctx->replay_out->chunk, sizeof(*txn_executed), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
864 0 : ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(*txn_executed), ctx->replay_out->chunk0, ctx->replay_out->wmark );
865 0 : }
866 :
867 : static void
868 : replay_block_finalize( fd_replay_tile_t * ctx,
869 : fd_stem_context_t * stem,
870 0 : fd_bank_t * bank ) {
871 0 : bank->data->last_transaction_finished_nanos = fd_log_wallclock();
872 :
873 0 : FD_TEST( !(bank->data->flags&FD_BANK_FLAGS_FROZEN) );
874 :
875 : /* Set poh hash in bank. */
876 0 : fd_hash_t * poh = fd_sched_get_poh( ctx->sched, bank->data->idx );
877 0 : fd_bank_poh_set( bank, *poh );
878 :
879 : /* Set shred count in bank. */
880 0 : fd_bank_shred_cnt_set( bank, fd_sched_get_shred_cnt( ctx->sched, bank->data->idx ) );
881 :
882 : /* Do hashing and other end-of-block processing. */
883 0 : fd_runtime_block_execute_finalize( bank, ctx->accdb, ctx->capture_ctx );
884 :
885 : /* Copy out cost tracker fields before freezing */
886 0 : fd_replay_slot_completed_t * slot_info = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
887 0 : cost_tracker_snap( bank, slot_info );
888 :
889 : /* fetch identity / vote balance updates infrequently */
890 0 : ulong slot = fd_bank_slot_get( bank );
891 0 : fd_funk_txn_xid_t xid = { .ul = { slot, bank->data->idx } };
892 0 : slot_info->identity_balance = FD_UNLIKELY( slot%4096==0UL ) ? get_identity_balance( ctx, xid ) : ULONG_MAX;
893 :
894 : /* Mark the bank as frozen. */
895 0 : fd_banks_mark_bank_frozen( ctx->banks, bank );
896 :
897 : /**********************************************************************/
898 : /* Bank hash comparison, and halt if there's a mismatch after replay */
899 : /**********************************************************************/
900 :
901 0 : fd_hash_t const * bank_hash = fd_bank_bank_hash_query( bank );
902 0 : FD_TEST( bank_hash );
903 :
904 : /* Must be last so we can measure completion time correctly, even
905 : though we could technically do this before the hash cmp and vote
906 : tower stuff. */
907 0 : publish_slot_completed( ctx, stem, bank, 0, 0 /* is_leader */ );
908 :
909 0 : # if FD_HAS_FLATCC
910 : /* If enabled, dump the block to a file and reset the dumping
911 : context state */
912 0 : if( FD_UNLIKELY( ctx->dump_proto_ctx && ctx->dump_proto_ctx->dump_block_to_pb ) ) {
913 0 : fd_dump_block_to_protobuf( ctx->block_dump_ctx, ctx->banks, bank, ctx->accdb, ctx->dump_proto_ctx );
914 0 : fd_block_dump_context_reset( ctx->block_dump_ctx );
915 0 : }
916 0 : # endif
917 0 : }
918 :
919 : /**********************************************************************/
920 : /* Leader bank management */
921 : /**********************************************************************/
922 :
923 : static fd_bank_t *
924 : prepare_leader_bank( fd_replay_tile_t * ctx,
925 : ulong slot,
926 : long now,
927 : fd_hash_t const * parent_block_id,
928 0 : fd_stem_context_t * stem ) {
929 0 : long before = fd_log_wallclock();
930 :
931 : /* Make sure that we are not already leader. */
932 0 : FD_TEST( ctx->leader_bank->data==NULL );
933 :
934 0 : fd_block_id_ele_t * parent_ele = fd_block_id_map_ele_query( ctx->block_id_map, parent_block_id, NULL, ctx->block_id_arr );
935 0 : if( FD_UNLIKELY( !parent_ele ) ) {
936 0 : FD_BASE58_ENCODE_32_BYTES( parent_block_id->key, parent_block_id_b58 );
937 0 : FD_LOG_CRIT(( "invariant violation: parent bank index not found for merkle root %s", parent_block_id_b58 ));
938 0 : }
939 0 : ulong parent_bank_idx = fd_block_id_ele_get_idx( ctx->block_id_arr, parent_ele );
940 :
941 0 : fd_bank_t parent_bank[1];
942 0 : if( FD_UNLIKELY( !fd_banks_bank_query( parent_bank, ctx->banks, parent_bank_idx ) ) ) {
943 0 : FD_LOG_CRIT(( "invariant violation: parent bank not found for bank index %lu", parent_bank_idx ));
944 0 : }
945 0 : ulong parent_slot = fd_bank_slot_get( parent_bank );
946 :
947 0 : if( FD_UNLIKELY( !fd_banks_new_bank( ctx->leader_bank, ctx->banks, parent_bank_idx, now ) ) ) {
948 0 : FD_LOG_CRIT(( "invariant violation: leader bank is NULL for slot %lu", slot ));
949 0 : }
950 :
951 0 : if( FD_UNLIKELY( !fd_banks_clone_from_parent( ctx->leader_bank, ctx->banks, ctx->leader_bank->data->idx ) ) ) {
952 0 : FD_LOG_CRIT(( "invariant violation: bank is NULL for slot %lu", slot ));
953 0 : }
954 :
955 0 : ctx->leader_bank->data->preparation_begin_nanos = before;
956 :
957 0 : fd_bank_slot_set( ctx->leader_bank, slot );
958 0 : fd_bank_parent_slot_set( ctx->leader_bank, parent_slot );
959 0 : ctx->leader_bank->data->txncache_fork_id = fd_txncache_attach_child( ctx->txncache, parent_bank->data->txncache_fork_id );
960 : /* prepare the funk transaction for the leader bank */
961 0 : fd_funk_txn_xid_t xid = { .ul = { slot, ctx->leader_bank->data->idx } };
962 0 : fd_funk_txn_xid_t parent_xid = { .ul = { parent_slot, parent_bank_idx } };
963 0 : fd_accdb_attach_child( ctx->accdb_admin, &parent_xid, &xid );
964 0 : fd_progcache_txn_attach_child( ctx->progcache_admin, &parent_xid, &xid );
965 :
966 0 : fd_bank_execution_fees_set( ctx->leader_bank, 0UL );
967 0 : fd_bank_priority_fees_set( ctx->leader_bank, 0UL );
968 0 : fd_bank_shred_cnt_set( ctx->leader_bank, 0UL );
969 0 : fd_bank_tips_set( ctx->leader_bank, 0UL );
970 0 : fd_bank_identity_vote_idx_set( ctx->leader_bank, ULONG_MAX );
971 :
972 : /* Update block height. */
973 0 : fd_bank_block_height_set( ctx->leader_bank, fd_bank_block_height_get( ctx->leader_bank ) + 1UL );
974 :
975 0 : int is_epoch_boundary = 0;
976 0 : fd_runtime_block_execute_prepare( ctx->banks, ctx->leader_bank, ctx->accdb, &ctx->runtime_stack, ctx->capture_ctx, &is_epoch_boundary );
977 0 : if( FD_UNLIKELY( is_epoch_boundary ) ) publish_epoch_info( ctx, stem, ctx->leader_bank, 1 );
978 :
979 0 : ulong max_tick_height;
980 0 : if( FD_UNLIKELY( FD_RUNTIME_EXECUTE_SUCCESS!=fd_runtime_compute_max_tick_height( fd_bank_ticks_per_slot_get( parent_bank ), slot, &max_tick_height ) ) ) {
981 0 : FD_LOG_CRIT(( "couldn't compute tick height/max tick height slot %lu ticks_per_slot %lu", slot, fd_bank_ticks_per_slot_get( parent_bank ) ));
982 0 : }
983 0 : fd_bank_max_tick_height_set( ctx->leader_bank, max_tick_height );
984 0 : fd_bank_tick_height_set( ctx->leader_bank, fd_bank_max_tick_height_get( parent_bank ) ); /* The parent's max tick height is our starting tick height. */
985 :
986 : /* Now that a bank has been created for the leader slot, increment the
987 : reference count until we are done with the leader slot. */
988 0 : ctx->leader_bank->data->refcnt++;
989 :
990 0 : return ctx->leader_bank;
991 0 : }
992 :
993 : static inline void
994 0 : maybe_switch_identity( fd_replay_tile_t * ctx ) {
995 :
996 0 : if( FD_LIKELY( fd_keyswitch_state_query( ctx->keyswitch )!=FD_KEYSWITCH_STATE_SWITCH_PENDING ) ) return;
997 :
998 : /* Switch identity */
999 :
1000 0 : FD_LOG_DEBUG(( "keyswitch: switching identity" ));
1001 :
1002 0 : memcpy( ctx->identity_pubkey, ctx->keyswitch->bytes, 32UL );
1003 0 : fd_keyswitch_state( ctx->keyswitch, FD_KEYSWITCH_STATE_COMPLETED );
1004 :
1005 : /* The next leader slot will be incorrect now that the identity has
1006 : switched. The next leader slot normally gets updated based on the
1007 : reset slot returned by tower. */
1008 0 : ulong min_leader_slot = fd_ulong_max( ctx->reset_slot+1UL, fd_ulong_if( ctx->highwater_leader_slot==ULONG_MAX, 0UL, ctx->highwater_leader_slot+1UL ) );
1009 0 : ctx->next_leader_slot = fd_multi_epoch_leaders_get_next_slot( ctx->mleaders, min_leader_slot, ctx->identity_pubkey );
1010 0 : if( FD_LIKELY( ctx->next_leader_slot != ULONG_MAX ) ) {
1011 0 : ctx->next_leader_tickcount = (long)((double)(ctx->next_leader_slot-ctx->reset_slot-1UL)*ctx->slot_duration_ticks) + fd_tickcount();
1012 0 : } else {
1013 0 : ctx->next_leader_tickcount = LONG_MAX;
1014 0 : }
1015 :
1016 0 : ctx->identity_vote_rooted = 0;
1017 0 : ctx->identity_idx++;
1018 0 : fd_vote_tracker_reset( ctx->vote_tracker );
1019 0 : }
1020 :
1021 : static void
1022 : fini_leader_bank( fd_replay_tile_t * ctx,
1023 0 : fd_stem_context_t * stem ) {
1024 :
1025 0 : FD_TEST( ctx->leader_bank->data!=NULL );
1026 0 : FD_TEST( ctx->is_leader );
1027 0 : FD_TEST( ctx->block_id_arr[ ctx->leader_bank->data->idx ].block_id_seen );
1028 0 : FD_TEST( ctx->recv_poh );
1029 :
1030 0 : ctx->leader_bank->data->last_transaction_finished_nanos = fd_log_wallclock();
1031 :
1032 0 : ulong curr_slot = fd_bank_slot_get( ctx->leader_bank );
1033 :
1034 0 : fd_sched_block_add_done( ctx->sched, ctx->leader_bank->data->idx, ctx->leader_bank->data->parent_idx, curr_slot );
1035 :
1036 0 : fd_runtime_block_execute_finalize( ctx->leader_bank, ctx->accdb, ctx->capture_ctx );
1037 :
1038 0 : fd_replay_slot_completed_t * slot_info = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
1039 0 : cost_tracker_snap( ctx->leader_bank, slot_info );
1040 0 : fd_funk_txn_xid_t xid = { .ul = { curr_slot, ctx->leader_bank->data->idx } };
1041 0 : slot_info->identity_balance = FD_UNLIKELY( curr_slot%4096==0UL ) ? get_identity_balance( ctx, xid ) : ULONG_MAX;
1042 :
1043 0 : fd_banks_mark_bank_frozen( ctx->banks, ctx->leader_bank );
1044 :
1045 0 : fd_hash_t const * bank_hash = fd_bank_bank_hash_query( ctx->leader_bank );
1046 0 : FD_TEST( bank_hash );
1047 :
1048 0 : publish_slot_completed( ctx, stem, ctx->leader_bank, 0, 1 /* is_leader */ );
1049 :
1050 : /* The reference on the bank is finally no longer needed. */
1051 0 : ctx->leader_bank->data->refcnt--;
1052 :
1053 : /* We are no longer leader so we can clear the bank index we use for
1054 : being the leader. */
1055 0 : ctx->leader_bank->data = NULL;
1056 0 : ctx->recv_poh = 0;
1057 0 : ctx->is_leader = 0;
1058 :
1059 0 : maybe_switch_identity( ctx );
1060 0 : }
1061 :
1062 : static void
1063 : publish_root_advanced( fd_replay_tile_t * ctx,
1064 0 : fd_stem_context_t * stem ) {
1065 :
1066 0 : fd_bank_t bank[1];
1067 0 : if( FD_UNLIKELY( !fd_banks_bank_query( bank, ctx->banks, ctx->consensus_root_bank_idx ) ) ) {
1068 0 : FD_LOG_CRIT(( "invariant violation: consensus root bank is NULL at bank index %lu", ctx->consensus_root_bank_idx ));
1069 0 : }
1070 :
1071 0 : if( ctx->rpc_enabled ) {
1072 0 : bank->data->refcnt++;
1073 0 : FD_LOG_DEBUG(( "bank (idx=%lu, slot=%lu) refcnt incremented to %lu for gui", bank->data->idx, fd_bank_slot_get( bank ), bank->data->refcnt ));
1074 0 : }
1075 :
1076 : /* Increment the reference count on the consensus root bank to account
1077 : for the number of resolv tiles that are waiting on it. */
1078 0 : bank->data->refcnt += ctx->resolv_tile_cnt;
1079 0 : FD_LOG_DEBUG(( "bank (idx=%lu, slot=%lu) refcnt incremented to %lu for resolv", bank->data->idx, fd_bank_slot_get( bank ), bank->data->refcnt ));
1080 :
1081 0 : fd_replay_root_advanced_t * msg = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
1082 0 : msg->bank_idx = bank->data->idx;
1083 :
1084 0 : fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_ROOT_ADVANCED, ctx->replay_out->chunk, sizeof(fd_replay_root_advanced_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
1085 0 : ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_replay_root_advanced_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
1086 0 : }
1087 :
1088 : /* init_funk performs pre-flight checks for the account database and
1089 : program cache. Ensures that the account database was set up
1090 : correctly by bootstrap components (e.g. genesis or snapshot loader).
1091 : Mirrors the account database's fork tree down to the program cache. */
1092 :
1093 : static void
1094 : init_funk( fd_replay_tile_t * ctx,
1095 0 : ulong bank_slot ) {
1096 : /* Ensure that the loaded bank root corresponds to the account
1097 : database's root. */
1098 0 : fd_funk_t * funk = fd_accdb_user_v1_funk( ctx->accdb );
1099 0 : if( FD_UNLIKELY( !funk->shmem ) ) {
1100 0 : FD_LOG_CRIT(( "failed to initialize account database: replay tile is not joined to database shared memory objects" ));
1101 0 : }
1102 0 : fd_funk_txn_xid_t const * accdb_pub = fd_funk_last_publish( funk );
1103 0 : if( FD_UNLIKELY( accdb_pub->ul[0]!=bank_slot ) ) {
1104 0 : FD_LOG_CRIT(( "failed to initialize account database: accdb is at slot %lu, but chain state is at slot %lu\n"
1105 0 : "This is a bug in startup components.",
1106 0 : accdb_pub->ul[0], bank_slot ));
1107 0 : }
1108 0 : if( FD_UNLIKELY( fd_funk_last_publish_is_frozen( funk ) ) ) {
1109 0 : FD_LOG_CRIT(( "failed to initialize account database: accdb fork graph is not clean.\n"
1110 0 : "The account database should only contain state for the root slot at this point,\n"
1111 0 : "but there are incomplete database transactions leftover.\n"
1112 0 : "This is a bug in startup components." ));
1113 0 : }
1114 :
1115 : /* The program cache tracks the account database's fork graph at all
1116 : times. Perform initial synchronization: pivot from funk 'root' (a
1117 : sentinel XID) to 'last publish' (the bootstrap root slot). */
1118 0 : if( FD_UNLIKELY( !ctx->progcache_admin->funk->shmem ) ) {
1119 0 : FD_LOG_CRIT(( "failed to initialize account database: replay tile is not joined to program cache" ));
1120 0 : }
1121 0 : fd_progcache_clear( ctx->progcache_admin );
1122 :
1123 0 : fd_funk_txn_xid_t last_publish = fd_accdb_root_get( ctx->accdb_admin );
1124 0 : fd_progcache_txn_attach_child( ctx->progcache_admin, fd_funk_root( ctx->progcache_admin->funk ), &last_publish );
1125 0 : fd_progcache_txn_advance_root( ctx->progcache_admin, &last_publish );
1126 0 : }
1127 :
1128 : static void
1129 0 : init_after_snapshot( fd_replay_tile_t * ctx ) {
1130 : /* Now that the snapshot has been loaded in, we have to refresh the
1131 : stake delegations since the manifest does not contain the full set
1132 : of data required for the stake delegations. See
1133 : fd_stake_delegations.h for why this is required. */
1134 :
1135 0 : fd_bank_t bank[1];
1136 0 : if( FD_UNLIKELY( !fd_banks_bank_query( bank, ctx->banks, FD_REPLAY_BOOT_BANK_IDX ) ) ) {
1137 0 : FD_LOG_CRIT(( "invariant violation: replay bank is NULL at bank index %lu", FD_REPLAY_BOOT_BANK_IDX ));
1138 0 : }
1139 :
1140 0 : fd_funk_txn_xid_t xid = { .ul = { fd_bank_slot_get( bank ), bank->data->idx } };
1141 0 : init_funk( ctx, fd_bank_slot_get( bank ) );
1142 :
1143 0 : fd_stake_delegations_t * root_delegations = fd_banks_stake_delegations_root_query( ctx->banks );
1144 :
1145 0 : fd_stake_delegations_refresh( root_delegations, ctx->accdb, &xid );
1146 :
1147 0 : fd_vote_stakes_t * vote_stakes = fd_bank_vote_stakes_locking_modify( bank );
1148 0 : ushort fork_idx = bank->data->vote_stakes_fork_id;
1149 :
1150 0 : ulong stale_accs = 0UL;
1151 0 : uchar __attribute__((aligned(FD_VOTE_STAKES_ITER_ALIGN))) iter_mem[ FD_VOTE_STAKES_ITER_FOOTPRINT ];
1152 0 : for( fd_vote_stakes_iter_t * iter = fd_vote_stakes_fork_iter_init( vote_stakes, fork_idx, iter_mem );
1153 0 : !fd_vote_stakes_fork_iter_done( vote_stakes, fork_idx, iter );
1154 0 : fd_vote_stakes_fork_iter_next( vote_stakes, fork_idx, iter ) ) {
1155 0 : fd_pubkey_t pubkey;
1156 0 : fd_vote_stakes_fork_iter_ele( vote_stakes, fork_idx, iter, &pubkey, NULL, NULL, NULL, NULL );
1157 :
1158 0 : fd_accdb_ro_t acc[1];
1159 0 : if( FD_UNLIKELY( !fd_accdb_open_ro( ctx->accdb, acc, &xid, &pubkey ) ) ) {
1160 0 : ctx->runtime_stack.vote_accounts.stale_accs[stale_accs++] = pubkey;
1161 0 : continue;
1162 0 : }
1163 0 : fd_accdb_close_ro( ctx->accdb, acc );
1164 0 : }
1165 :
1166 0 : for( ulong i=0UL; i<stale_accs; i++ ) {
1167 0 : fd_vote_stakes_root_purge_key( vote_stakes, &ctx->runtime_stack.vote_accounts.stale_accs[i] );
1168 0 : }
1169 :
1170 0 : fd_bank_vote_stakes_end_locking_modify( bank );
1171 :
1172 :
1173 :
1174 : /* After both snapshots have been loaded in, we can determine if we should
1175 : start distributing rewards. */
1176 :
1177 0 : fd_rewards_recalculate_partitioned_rewards( ctx->banks, bank, ctx->accdb, &xid, &ctx->runtime_stack, ctx->capture_ctx );
1178 :
1179 0 : ulong snapshot_slot = fd_bank_slot_get( bank );
1180 0 : if( FD_UNLIKELY( !snapshot_slot ) ) {
1181 : /* Genesis-specific setup. */
1182 : /* FIXME: This branch does not set up a new block exec ctx
1183 : properly. Needs to do whatever prepare_new_block_execution
1184 : does, but just hacking that in breaks stuff. */
1185 0 : fd_runtime_update_leaders( bank, &ctx->runtime_stack );
1186 :
1187 0 : ulong hashcnt_per_slot = fd_bank_hashes_per_tick_get( bank ) * fd_bank_ticks_per_slot_get( bank );
1188 0 : fd_hash_t * poh = fd_bank_poh_modify( bank );
1189 0 : while( hashcnt_per_slot-- ) {
1190 0 : fd_sha256_hash( poh->hash, 32UL, poh->hash );
1191 0 : }
1192 :
1193 0 : int is_epoch_boundary = 0;
1194 0 : fd_runtime_block_execute_prepare( ctx->banks, bank, ctx->accdb, &ctx->runtime_stack, ctx->capture_ctx, &is_epoch_boundary );
1195 0 : FD_TEST( !is_epoch_boundary );
1196 0 : fd_runtime_block_execute_finalize( bank, ctx->accdb, ctx->capture_ctx );
1197 :
1198 0 : snapshot_slot = 0UL;
1199 0 : }
1200 0 : }
1201 :
1202 : static inline int
1203 : maybe_become_leader( fd_replay_tile_t * ctx,
1204 0 : fd_stem_context_t * stem ) {
1205 0 : FD_TEST( ctx->is_booted );
1206 0 : if( FD_LIKELY( ctx->next_leader_slot==ULONG_MAX || ctx->is_leader || (!ctx->identity_vote_rooted && ctx->wait_for_vote_to_start_leader) || ctx->replay_out->idx==ULONG_MAX || !ctx->wfs_complete ) ) return 0;
1207 0 : if( FD_UNLIKELY( ctx->halt_leader ) ) return 0;
1208 0 : if( !ctx->supports_leader ) return 0;
1209 :
1210 0 : FD_TEST( ctx->next_leader_slot>ctx->reset_slot );
1211 0 : long now = fd_tickcount();
1212 0 : if( FD_LIKELY( now<ctx->next_leader_tickcount ) ) return 0;
1213 :
1214 : /* If a prior leader is still in the process of publishing their slot,
1215 : delay ours to let them finish ... unless they are so delayed that
1216 : we risk getting skipped by the leader following us. 1.2 seconds
1217 : is a reasonable default here, although any value between 0 and 1.6
1218 : seconds could be considered reasonable. This is arbitrary and
1219 : chosen due to intuition. */
1220 0 : if( FD_UNLIKELY( now<ctx->next_leader_tickcount+(long)(3.0*ctx->slot_duration_ticks) ) ) {
1221 0 : FD_TEST( ctx->reset_bank->data );
1222 :
1223 : /* TODO: Make the max_active_descendant calculation more efficient
1224 : by caching it in the bank structure and updating it as banks are
1225 : created and completed. */
1226 0 : ulong max_active_descendant = 0UL;
1227 0 : ulong child_idx = ctx->reset_bank->data->child_idx;
1228 0 : while( child_idx!=ULONG_MAX ) {
1229 0 : fd_bank_t child_bank[1];
1230 0 : fd_banks_bank_query( child_bank, ctx->banks, child_idx );
1231 0 : max_active_descendant = fd_ulong_max( max_active_descendant, fd_bank_slot_get( child_bank ) );
1232 0 : child_idx = child_bank->data->sibling_idx;
1233 0 : }
1234 :
1235 : /* If the max_active_descendant is >= next_leader_slot, we waited
1236 : too long and a leader after us started publishing to try and skip
1237 : us. Just start our leader slot immediately, we might win ... */
1238 0 : if( FD_LIKELY( max_active_descendant>=ctx->reset_slot && max_active_descendant<ctx->next_leader_slot ) ) {
1239 : /* If one of the leaders between the reset slot and our leader
1240 : slot is in the process of publishing (they have a descendant
1241 : bank that is in progress of being replayed), then keep waiting.
1242 : We probably wouldn't get a leader slot out before they
1243 : finished.
1244 :
1245 : Unless... we are past the deadline to start our slot by more
1246 : than 1.2 seconds, in which case we should probably start it to
1247 : avoid getting skipped by the leader behind us. */
1248 0 : return 0;
1249 0 : }
1250 0 : }
1251 :
1252 0 : long now_nanos = fd_log_wallclock();
1253 :
1254 0 : ctx->is_leader = 1;
1255 0 : ctx->recv_poh = 0;
1256 :
1257 0 : FD_TEST( ctx->highwater_leader_slot==ULONG_MAX || ctx->highwater_leader_slot<ctx->next_leader_slot );
1258 0 : ctx->highwater_leader_slot = ctx->next_leader_slot;
1259 :
1260 0 : FD_LOG_INFO(( "becoming leader for slot %lu, parent slot is %lu", ctx->next_leader_slot, ctx->reset_slot ));
1261 :
1262 : /* Acquires bank, sets up initial state, and refcnts it. */
1263 0 : fd_bank_t * bank = prepare_leader_bank( ctx, ctx->next_leader_slot, now_nanos, &ctx->reset_block_id, stem );
1264 0 : fd_funk_txn_xid_t xid = { .ul = { ctx->next_leader_slot, ctx->leader_bank->data->idx } };
1265 :
1266 0 : fd_bundle_crank_tip_payment_config_t config[1] = { 0 };
1267 0 : fd_pubkey_t tip_receiver_owner = {0};
1268 :
1269 0 : if( FD_UNLIKELY( ctx->bundle.enabled ) ) {
1270 0 : fd_acct_addr_t tip_payment_config[1];
1271 0 : fd_acct_addr_t tip_receiver[1];
1272 0 : fd_bundle_crank_get_addresses( ctx->bundle.gen, fd_bank_epoch_get( bank ), tip_payment_config, tip_receiver );
1273 :
1274 0 : fd_accdb_ro_t tip_config_acc[1];
1275 0 : if( FD_UNLIKELY( !fd_accdb_open_ro( ctx->accdb, tip_config_acc, &xid, tip_payment_config ) ) ) {
1276 : /* FIXME This should not crash the validator */
1277 0 : FD_BASE58_ENCODE_32_BYTES( tip_payment_config->b, tip_config_acc_b58 );
1278 0 : FD_LOG_CRIT(( "tip payment config account %s does not exist", tip_config_acc_b58 ));
1279 0 : }
1280 0 : ulong tip_cfg_sz = fd_accdb_ref_data_sz( tip_config_acc );
1281 0 : if( FD_UNLIKELY( tip_cfg_sz < sizeof(fd_bundle_crank_tip_payment_config_t) ) ) {
1282 : /* FIXME This should not crash the validator */
1283 0 : FD_LOG_HEXDUMP_CRIT(( "invalid tip payment config account data", fd_accdb_ref_data_const( tip_config_acc ), tip_cfg_sz ));
1284 0 : }
1285 0 : memcpy( config, fd_accdb_ref_data_const( tip_config_acc ), sizeof(fd_bundle_crank_tip_payment_config_t) );
1286 0 : fd_accdb_close_ro( ctx->accdb, tip_config_acc );
1287 :
1288 : /* It is possible that the tip receiver account does not exist yet
1289 : if it is the first time in an epoch. */
1290 0 : fd_accdb_ro_t tip_receiver_acc[1];
1291 0 : if( FD_LIKELY( fd_accdb_open_ro( ctx->accdb, tip_receiver_acc, &xid, tip_receiver ) ) ) {
1292 0 : tip_receiver_owner = FD_LOAD( fd_pubkey_t, fd_accdb_ref_owner( tip_receiver_acc ) );
1293 0 : fd_accdb_close_ro( ctx->accdb, tip_receiver_acc );
1294 0 : }
1295 0 : }
1296 :
1297 :
1298 0 : fd_became_leader_t * msg = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
1299 0 : msg->slot = ctx->next_leader_slot;
1300 0 : msg->slot_start_ns = now_nanos;
1301 0 : msg->slot_end_ns = now_nanos+(long)ctx->slot_duration_nanos;
1302 0 : msg->bank = NULL;
1303 0 : msg->bank_idx = bank->data->idx;
1304 0 : msg->ticks_per_slot = fd_bank_ticks_per_slot_get( bank );
1305 0 : msg->hashcnt_per_tick = fd_bank_hashes_per_tick_get( bank );
1306 0 : msg->tick_duration_ns = (ulong)(ctx->slot_duration_nanos/(double)msg->ticks_per_slot);
1307 0 : msg->bundle->config[0] = config[0];
1308 0 : memcpy( msg->bundle->last_blockhash, fd_bank_poh_query( bank )->hash, sizeof(fd_hash_t) );
1309 0 : memcpy( msg->bundle->tip_receiver_owner, tip_receiver_owner.uc, sizeof(fd_pubkey_t) );
1310 :
1311 0 : if( FD_UNLIKELY( msg->hashcnt_per_tick==1UL ) ) {
1312 : /* Low power producer, maximum of one microblock per tick in the slot */
1313 0 : msg->max_microblocks_in_slot = msg->ticks_per_slot;
1314 0 : } else {
1315 : /* See the long comment in after_credit for this limit */
1316 0 : msg->max_microblocks_in_slot = fd_ulong_min( MAX_MICROBLOCKS_PER_SLOT, msg->ticks_per_slot*(msg->hashcnt_per_tick-1UL) );
1317 0 : }
1318 :
1319 0 : msg->total_skipped_ticks = msg->ticks_per_slot*(ctx->next_leader_slot-ctx->reset_slot);
1320 0 : msg->epoch = fd_slot_to_epoch( fd_bank_epoch_schedule_query( bank ), ctx->next_leader_slot, NULL );
1321 :
1322 0 : fd_cost_tracker_t const * cost_tracker = fd_bank_cost_tracker_locking_query( bank );
1323 :
1324 0 : msg->limits.slot_max_cost = ctx->larger_max_cost_per_block ? LARGER_MAX_COST_PER_BLOCK : cost_tracker->block_cost_limit;
1325 0 : msg->limits.slot_max_vote_cost = cost_tracker->vote_cost_limit;
1326 0 : msg->limits.slot_max_write_cost_per_acct = cost_tracker->account_cost_limit;
1327 :
1328 0 : fd_bank_cost_tracker_end_locking_query( bank );
1329 :
1330 0 : if( FD_UNLIKELY( msg->ticks_per_slot+msg->total_skipped_ticks>USHORT_MAX ) ) {
1331 : /* There can be at most USHORT_MAX skipped ticks, because the
1332 : parent_offset field in the shred data is only 2 bytes wide. */
1333 0 : FD_LOG_ERR(( "too many skipped ticks %lu for slot %lu, chain must halt", msg->ticks_per_slot+msg->total_skipped_ticks, ctx->next_leader_slot ));
1334 0 : }
1335 :
1336 0 : fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_BECAME_LEADER, ctx->replay_out->chunk, sizeof(fd_became_leader_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
1337 0 : ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_became_leader_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
1338 :
1339 0 : ctx->next_leader_slot = ULONG_MAX;
1340 0 : ctx->next_leader_tickcount = LONG_MAX;
1341 :
1342 0 : return 1;
1343 0 : }
1344 :
1345 : static void
1346 : process_poh_message( fd_replay_tile_t * ctx,
1347 0 : fd_poh_leader_slot_ended_t const * slot_ended ) {
1348 :
1349 0 : FD_TEST( ctx->is_booted );
1350 0 : FD_TEST( ctx->is_leader );
1351 0 : FD_TEST( ctx->leader_bank->data!=NULL );
1352 :
1353 0 : FD_TEST( ctx->highwater_leader_slot>=slot_ended->slot );
1354 0 : FD_TEST( ctx->next_leader_slot>ctx->highwater_leader_slot );
1355 :
1356 : /* Update the poh hash in the bank. We will want to maintain a refcnt
1357 : on the bank until we have recieved the block id for the block after
1358 : it has been shredded. */
1359 :
1360 0 : memcpy( fd_bank_poh_modify( ctx->leader_bank ), slot_ended->blockhash, sizeof(fd_hash_t) );
1361 :
1362 0 : ctx->recv_poh = 1;
1363 0 : }
1364 :
1365 : static void
1366 : publish_reset( fd_replay_tile_t * ctx,
1367 : fd_stem_context_t * stem,
1368 0 : fd_bank_t * bank ) {
1369 0 : if( FD_UNLIKELY( ctx->replay_out->idx==ULONG_MAX ) ) return;
1370 :
1371 0 : fd_hash_t const * block_hash = fd_blockhashes_peek_last_hash( fd_bank_block_hash_queue_query( bank ) );
1372 0 : FD_TEST( block_hash );
1373 :
1374 0 : fd_poh_reset_t * reset = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
1375 :
1376 0 : reset->bank_idx = bank->data->idx;
1377 0 : reset->timestamp = fd_log_wallclock();
1378 0 : reset->completed_slot = fd_bank_slot_get( bank );
1379 0 : reset->hashcnt_per_tick = fd_bank_hashes_per_tick_get( bank );
1380 0 : reset->ticks_per_slot = fd_bank_ticks_per_slot_get( bank );
1381 0 : reset->tick_duration_ns = (ulong)(ctx->slot_duration_nanos/(double)reset->ticks_per_slot);
1382 0 : fd_memcpy( reset->completed_block_id, ctx->reset_block_id.uc, sizeof(fd_hash_t) );
1383 0 : fd_memcpy( reset->completed_blockhash, block_hash->uc, sizeof(fd_hash_t) );
1384 :
1385 0 : ulong ticks_per_slot = fd_bank_ticks_per_slot_get( bank );
1386 0 : if( FD_UNLIKELY( reset->hashcnt_per_tick==1UL ) ) {
1387 : /* Low power producer, maximum of one microblock per tick in the slot */
1388 0 : reset->max_microblocks_in_slot = ticks_per_slot;
1389 0 : } else {
1390 : /* See the long comment in after_credit for this limit */
1391 0 : reset->max_microblocks_in_slot = fd_ulong_min( MAX_MICROBLOCKS_PER_SLOT, ticks_per_slot*(reset->hashcnt_per_tick-1UL) );
1392 0 : }
1393 0 : reset->next_leader_slot = ctx->next_leader_slot;
1394 :
1395 0 : fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_RESET, ctx->replay_out->chunk, sizeof(fd_poh_reset_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
1396 0 : ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_poh_reset_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
1397 0 : }
1398 :
1399 : static void
1400 : store_xinsert( fd_store_t * store,
1401 0 : fd_hash_t const * merkle_root ) {
1402 0 : fd_store_pool_t pool = {
1403 0 : .pool = fd_wksp_laddr_fast( fd_store_wksp( store ), store->pool_mem_gaddr ),
1404 0 : .ele = fd_wksp_laddr_fast( fd_store_wksp( store ), store->pool_ele_gaddr ),
1405 0 : .ele_max = store->fec_max
1406 0 : };
1407 0 : int err; fd_store_fec_t * fec = fd_store_pool_acquire( &pool, NULL, 1 /* blocking */, &err );
1408 0 : if( FD_UNLIKELY( err!=FD_POOL_SUCCESS ) ) FD_LOG_CRIT(( "store pool: %s", fd_store_pool_strerror( err ) ));
1409 0 : fec->key.merkle_root = *merkle_root;
1410 0 : fec->key.part_idx = 0;
1411 0 : fec->cmr = (fd_hash_t){ 0 };
1412 0 : fec->next = fd_store_pool_idx_null();
1413 0 : fec->data_sz = 0UL;
1414 :
1415 0 : FD_STORE_XLOCK_BEGIN( store ) {
1416 0 : fd_store_map_ele_insert( fd_wksp_laddr_fast( fd_store_wksp( store ), store->map_gaddr ), fec, pool.ele );
1417 0 : } FD_STORE_XLOCK_END;
1418 0 : }
1419 :
1420 : static void
1421 : boot_genesis( fd_replay_tile_t * ctx,
1422 : fd_stem_context_t * stem,
1423 0 : fd_genesis_meta_t const * meta ) {
1424 : /* If we are bootstrapping, we can't wait to wait for our identity
1425 : vote to be rooted as this creates a circular dependency. */
1426 0 : ctx->identity_vote_rooted = 1;
1427 :
1428 0 : uchar const * genesis_blob = (uchar const *)( meta+1 );
1429 0 : FD_TEST( meta->bootstrap && meta->has_lthash );
1430 0 : FD_TEST( fd_genesis_parse( ctx->genesis, genesis_blob, meta->blob_sz ) );
1431 :
1432 0 : fd_bank_t bank[1];
1433 0 : FD_TEST( fd_banks_bank_query( bank, ctx->banks, FD_REPLAY_BOOT_BANK_IDX ) );
1434 0 : fd_funk_txn_xid_t xid = { .ul = { 0UL, FD_REPLAY_BOOT_BANK_IDX } };
1435 :
1436 : /* Do genesis-related processing in a non-rooted transaction */
1437 0 : fd_funk_txn_xid_t root_xid = { .ul = { LONG_MAX, LONG_MAX } };
1438 0 : fd_funk_txn_xid_t target_xid = { .ul = { 0UL, 0UL } };
1439 0 : fd_accdb_attach_child( ctx->accdb_admin, &root_xid, &target_xid );
1440 0 : fd_runtime_read_genesis( ctx->banks, bank, ctx->accdb, &xid, NULL, &meta->genesis_hash, &meta->lthash, ctx->genesis, genesis_blob, &ctx->runtime_stack );
1441 0 : fd_accdb_advance_root( ctx->accdb_admin, &target_xid );
1442 :
1443 0 : static const fd_txncache_fork_id_t txncache_root = { .val = USHORT_MAX };
1444 0 : bank->data->txncache_fork_id = fd_txncache_attach_child( ctx->txncache, txncache_root );
1445 :
1446 0 : fd_hash_t const * block_hash = fd_blockhashes_peek_last_hash( fd_bank_block_hash_queue_query( bank ) );
1447 0 : fd_txncache_finalize_fork( ctx->txncache, bank->data->txncache_fork_id, 0UL, block_hash->uc );
1448 :
1449 0 : publish_epoch_info( ctx, stem, bank, 0 );
1450 0 : publish_epoch_info( ctx, stem, bank, 1 );
1451 :
1452 : /* We call this after fd_runtime_read_genesis, which sets up the
1453 : slot_bank needed in blockstore_init. */
1454 0 : init_after_snapshot( ctx );
1455 :
1456 0 : ctx->published_root_slot = 0UL;
1457 0 : fd_sched_block_add_done( ctx->sched, bank->data->idx, ULONG_MAX, 0UL );
1458 :
1459 0 : fd_bank_block_height_set( bank, 1UL );
1460 :
1461 0 : ctx->consensus_root = ctx->initial_block_id;
1462 0 : ctx->consensus_root_slot = 0UL;
1463 0 : ctx->consensus_root_bank_idx = 0UL;
1464 0 : ctx->published_root_slot = 0UL;
1465 0 : ctx->published_root_bank_idx = 0UL;
1466 :
1467 0 : ctx->reset_slot = 0UL;
1468 0 : fd_memcpy( ctx->reset_bank, bank, sizeof(fd_bank_t) );
1469 0 : ctx->reset_timestamp_nanos = fd_log_wallclock();
1470 0 : ctx->next_leader_slot = fd_multi_epoch_leaders_get_next_slot( ctx->mleaders, 1UL, ctx->identity_pubkey );
1471 0 : if( FD_LIKELY( ctx->next_leader_slot != ULONG_MAX ) ) {
1472 0 : ctx->next_leader_tickcount = (long)((double)(ctx->next_leader_slot-ctx->reset_slot-1UL)*ctx->slot_duration_ticks) + fd_tickcount();
1473 0 : } else {
1474 0 : ctx->next_leader_tickcount = LONG_MAX;
1475 0 : }
1476 :
1477 0 : ctx->is_booted = 1;
1478 0 : maybe_become_leader( ctx, stem );
1479 :
1480 0 : fd_hash_t initial_block_id = ctx->initial_block_id;
1481 0 : fd_reasm_fec_t * fec = fd_reasm_insert( ctx->reasm, &initial_block_id, NULL, 0 /* genesis slot */, 0, 0, 0, 0, 1, 0 ); /* FIXME manifest block_id */
1482 0 : fec->bank_idx = bank->data->idx;
1483 0 : fec->bank_seq = bank->data->bank_seq;
1484 0 : store_xinsert( ctx->store, &initial_block_id );
1485 :
1486 0 : fd_block_id_ele_t * block_id_ele = &ctx->block_id_arr[ 0 ];
1487 0 : block_id_ele->latest_mr = initial_block_id;
1488 0 : block_id_ele->slot = 0UL;
1489 :
1490 0 : FD_TEST( fd_block_id_map_ele_insert( ctx->block_id_map, block_id_ele, ctx->block_id_arr ) );
1491 :
1492 0 : fd_replay_slot_completed_t * slot_info = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
1493 0 : slot_info->identity_balance = get_identity_balance( ctx, xid );
1494 :
1495 0 : publish_slot_completed( ctx, stem, bank, 1, 0 /* is_leader */ );
1496 0 : publish_root_advanced( ctx, stem );
1497 0 : publish_reset( ctx, stem, bank );
1498 0 : }
1499 :
1500 : static inline void
1501 0 : maybe_verify_cluster_type( fd_replay_tile_t * ctx ) {
1502 0 : if( FD_UNLIKELY( !ctx->is_booted || !ctx->has_genesis_hash ) ) {
1503 0 : return;
1504 0 : }
1505 :
1506 0 : FD_BASE58_ENCODE_32_BYTES( ctx->genesis_hash->uc, hash_cstr );
1507 0 : ulong cluster = fd_genesis_cluster_identify( hash_cstr );
1508 : /* Map pyth-related clusters to unkwown. */
1509 0 : switch( cluster ) {
1510 0 : case FD_CLUSTER_PYTHNET:
1511 0 : case FD_CLUSTER_PYTHTEST:
1512 0 : cluster = FD_CLUSTER_UNKNOWN;
1513 0 : }
1514 :
1515 0 : if( FD_UNLIKELY( cluster!=ctx->cluster_type ) ) {
1516 0 : FD_LOG_ERR(( "Your genesis.bin file at `%s` has a genesis hash of `%s` which means the cluster is %s "
1517 0 : "but the snapshot you loaded is for a different cluster %s. If you are trying to join the "
1518 0 : "%s cluster, you can delete the genesis.bin file and restart the node to download the correct "
1519 0 : "genesis file automatically.",
1520 0 : ctx->genesis_path,
1521 0 : hash_cstr,
1522 0 : fd_genesis_cluster_name( cluster ),
1523 0 : fd_genesis_cluster_name( ctx->cluster_type ),
1524 0 : fd_genesis_cluster_name( cluster ) ));
1525 0 : }
1526 0 : }
1527 :
1528 : static void
1529 : on_snapshot_message( fd_replay_tile_t * ctx,
1530 : fd_stem_context_t * stem,
1531 : ulong in_idx,
1532 : ulong chunk,
1533 0 : ulong sig ) {
1534 0 : ulong msg = fd_ssmsg_sig_message( sig );
1535 0 : if( FD_LIKELY( msg==FD_SSMSG_DONE ) ) {
1536 : /* An end of message notification indicates the snapshot is loaded.
1537 : Replay is able to start executing from this point onwards. */
1538 : /* TODO: replay should finish booting. Could make replay a
1539 : state machine and set the state here accordingly. */
1540 0 : ctx->is_booted = 1;
1541 :
1542 0 : fd_bank_t bank[1];
1543 0 : if( FD_UNLIKELY( !fd_banks_bank_query( bank, ctx->banks, FD_REPLAY_BOOT_BANK_IDX ) ) ) {
1544 0 : FD_LOG_CRIT(( "invariant violation: bank is NULL for bank index %lu", FD_REPLAY_BOOT_BANK_IDX ));
1545 0 : }
1546 :
1547 0 : ulong snapshot_slot = fd_bank_slot_get( bank );
1548 :
1549 0 : fd_hash_t bank_hash = fd_bank_bank_hash_get( bank );
1550 0 : if( FD_UNLIKELY( ctx->wfs_enabled && memcmp( ctx->expected_bank_hash.uc, bank_hash.uc, sizeof(fd_hash_t) ) ) ) {
1551 0 : FD_BASE58_ENCODE_32_BYTES( ctx->expected_bank_hash.uc, expected_bank_hash_cstr );
1552 0 : FD_BASE58_ENCODE_32_BYTES( bank_hash.uc, actual_bank_hash_cstr );
1553 0 : FD_LOG_ERR(( "[consensus.wait_for_supermajority_with_bank_hash] expected_bank_hash=%s does not match snapshot slot"
1554 0 : "=%lu bank_hash=%s. If you are loading a snapshot from the network, check that the slot matches the "
1555 0 : "cluster restart slot. ", expected_bank_hash_cstr, snapshot_slot, actual_bank_hash_cstr ));
1556 0 : }
1557 0 : if( FD_UNLIKELY( ctx->wfs_enabled ) ) {
1558 0 : FD_LOG_NOTICE(( "waiting for supermajority at snapshot slot %lu", snapshot_slot ));
1559 0 : }
1560 :
1561 : /* FIXME: This is a hack because the block id of the snapshot slot
1562 : is not provided in the snapshot. A possible solution is to get
1563 : the block id of the snapshot slot from repair. */
1564 0 : fd_hash_t manifest_block_id = ctx->initial_block_id;
1565 :
1566 0 : fd_funk_txn_xid_t xid = { .ul = { snapshot_slot, FD_REPLAY_BOOT_BANK_IDX } };
1567 0 : fd_features_restore( bank, ctx->accdb, &xid );
1568 :
1569 : /* Typically, when we cross an epoch boundary during normal
1570 : operation, we publish the stake weights for the new epoch. But
1571 : since we are starting from a snapshot, we need to publish two
1572 : epochs worth of stake weights: the previous epoch (which is
1573 : needed for voting on the current epoch), and the current epoch
1574 : (which is needed for voting on the next epoch). */
1575 0 : publish_epoch_info( ctx, stem, bank, 0 );
1576 0 : publish_epoch_info( ctx, stem, bank, 1 );
1577 :
1578 0 : ctx->consensus_root = manifest_block_id;
1579 0 : ctx->consensus_root_slot = snapshot_slot;
1580 0 : ctx->consensus_root_bank_idx = 0UL;
1581 0 : ctx->published_root_slot = ctx->consensus_root_slot;
1582 0 : ctx->published_root_bank_idx = 0UL;
1583 :
1584 0 : ctx->reset_slot = snapshot_slot;
1585 0 : fd_memcpy( ctx->reset_bank, bank, sizeof(fd_bank_t) );
1586 0 : ctx->reset_timestamp_nanos = fd_log_wallclock();
1587 0 : ctx->next_leader_slot = fd_multi_epoch_leaders_get_next_slot( ctx->mleaders, 1UL, ctx->identity_pubkey );
1588 0 : if( FD_LIKELY( ctx->next_leader_slot != ULONG_MAX ) ) {
1589 0 : ctx->next_leader_tickcount = (long)((double)(ctx->next_leader_slot-ctx->reset_slot-1UL)*ctx->slot_duration_ticks) + fd_tickcount();
1590 0 : } else {
1591 0 : ctx->next_leader_tickcount = LONG_MAX;
1592 0 : }
1593 :
1594 0 : fd_sched_block_add_done( ctx->sched, bank->data->idx, ULONG_MAX, snapshot_slot );
1595 0 : FD_TEST( bank->data->idx==0UL );
1596 :
1597 0 : fd_runtime_update_leaders( bank, &ctx->runtime_stack );
1598 :
1599 0 : fd_block_id_ele_t * block_id_ele = &ctx->block_id_arr[ 0 ];
1600 0 : block_id_ele->latest_mr = manifest_block_id;
1601 0 : block_id_ele->slot = snapshot_slot;
1602 0 : block_id_ele->block_id_seen = 1;
1603 0 : block_id_ele->latest_fec_idx = 0U;
1604 0 : FD_TEST( fd_block_id_map_ele_insert( ctx->block_id_map, block_id_ele, ctx->block_id_arr ) );
1605 :
1606 : /* We call this after fd_runtime_read_genesis, which sets up the
1607 : slot_bank needed in blockstore_init. */
1608 0 : init_after_snapshot( ctx );
1609 :
1610 0 : fd_replay_slot_completed_t * slot_info = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
1611 0 : slot_info->identity_balance = get_identity_balance( ctx, xid );
1612 :
1613 0 : publish_slot_completed( ctx, stem, bank, 1, 0 /* is_leader */ );
1614 0 : publish_root_advanced( ctx, stem );
1615 :
1616 0 : fd_reasm_fec_t * fec = fd_reasm_insert( ctx->reasm, &manifest_block_id, NULL, snapshot_slot, 0, 0, 0, 0, 1, 0 ); /* FIXME manifest block_id */
1617 0 : fec->bank_idx = bank->data->idx;
1618 0 : fec->bank_seq = bank->data->bank_seq;
1619 0 : store_xinsert( ctx->store, &manifest_block_id );
1620 :
1621 0 : ctx->cluster_type = fd_bank_cluster_type_get( bank );
1622 :
1623 0 : maybe_verify_cluster_type( ctx );
1624 :
1625 0 : return;
1626 0 : }
1627 :
1628 0 : switch( msg ) {
1629 0 : case FD_SSMSG_MANIFEST_FULL:
1630 0 : case FD_SSMSG_MANIFEST_INCREMENTAL: {
1631 : /* We may either receive a full snapshot manifest or an
1632 : incremental snapshot manifest. Note that this external message
1633 : id is only used temporarily because replay cannot yet receive
1634 : the firedancer-internal snapshot manifest message. */
1635 0 : if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark ) )
1636 0 : FD_LOG_ERR(( "chunk %lu from in %d corrupt, not in range [%lu,%lu]", chunk, ctx->in_kind[ in_idx ], ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
1637 :
1638 0 : fd_bank_t bank[1];
1639 0 : fd_ssload_recover( fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk ),
1640 0 : ctx->banks,
1641 0 : fd_banks_bank_query( bank, ctx->banks, FD_REPLAY_BOOT_BANK_IDX ),
1642 0 : &ctx->runtime_stack,
1643 0 : msg==FD_SSMSG_MANIFEST_INCREMENTAL );
1644 :
1645 0 : fd_snapshot_manifest_t const * manifest = fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
1646 0 : ctx->hard_forks_cnt = manifest->hard_forks_len;
1647 0 : for( ulong i=0UL; i<manifest->hard_forks_len; i++ ) {
1648 0 : ctx->hard_forks[ i ] = manifest->hard_forks[ i ];
1649 0 : ctx->hard_forks_cnts[ i ] = manifest->hard_forks_cnts[ i ];
1650 0 : }
1651 0 : ctx->has_expected_genesis_timestamp = 1;
1652 0 : ctx->expected_genesis_timestamp = manifest->creation_time_millis;
1653 0 : break;
1654 0 : }
1655 0 : default: {
1656 0 : FD_LOG_ERR(( "Received unknown snapshot message with msg %lu", msg ));
1657 0 : return;
1658 0 : }
1659 0 : }
1660 :
1661 0 : return;
1662 0 : }
1663 :
1664 : static void
1665 : dispatch_task( fd_replay_tile_t * ctx,
1666 : fd_stem_context_t * stem,
1667 0 : fd_sched_task_t * task ) {
1668 :
1669 0 : switch( task->task_type ) {
1670 0 : case FD_SCHED_TT_TXN_EXEC: {
1671 0 : fd_txn_p_t * txn_p = fd_sched_get_txn( ctx->sched, task->txn_exec->txn_idx );
1672 :
1673 0 : fd_bank_t bank[1];
1674 0 : FD_TEST( fd_banks_bank_query( bank, ctx->banks, task->txn_exec->bank_idx ) );
1675 :
1676 0 : # if FD_HAS_FLATCC
1677 : /* Add the transaction to the block dumper if necessary. This
1678 : logic doesn't need to be fork-aware since it's only meant to
1679 : be used in backtest. */
1680 0 : if( FD_UNLIKELY( ctx->dump_proto_ctx && ctx->dump_proto_ctx->dump_block_to_pb ) ) {
1681 0 : fd_dump_block_to_protobuf_collect_tx( ctx->block_dump_ctx, txn_p );
1682 0 : }
1683 0 : # endif
1684 :
1685 0 : bank->data->refcnt++;
1686 :
1687 0 : if( FD_UNLIKELY( !bank->data->first_transaction_scheduled_nanos ) ) bank->data->first_transaction_scheduled_nanos = fd_log_wallclock();
1688 :
1689 0 : fd_replay_out_link_t * exec_out = ctx->exec_out;
1690 0 : fd_execrp_txn_exec_msg_t * exec_msg = fd_chunk_to_laddr( exec_out->mem, exec_out->chunk );
1691 0 : memcpy( exec_msg->txn, txn_p, sizeof(fd_txn_p_t) );
1692 0 : exec_msg->bank_idx = task->txn_exec->bank_idx;
1693 0 : exec_msg->txn_idx = task->txn_exec->txn_idx;
1694 0 : if( FD_UNLIKELY( ctx->capture_ctx ) ) {
1695 0 : exec_msg->capture_txn_idx = ctx->capture_ctx->current_txn_idx++;
1696 0 : }
1697 0 : fd_stem_publish( stem, exec_out->idx, (FD_EXECRP_TT_TXN_EXEC<<32) | task->txn_exec->exec_idx, exec_out->chunk, sizeof(*exec_msg), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
1698 0 : exec_out->chunk = fd_dcache_compact_next( exec_out->chunk, sizeof(*exec_msg), exec_out->chunk0, exec_out->wmark );
1699 0 : break;
1700 0 : }
1701 0 : case FD_SCHED_TT_TXN_SIGVERIFY: {
1702 0 : fd_txn_p_t * txn_p = fd_sched_get_txn( ctx->sched, task->txn_sigverify->txn_idx );
1703 :
1704 0 : fd_bank_t bank[1];
1705 0 : FD_TEST( fd_banks_bank_query( bank, ctx->banks, task->txn_sigverify->bank_idx ) );
1706 0 : bank->data->refcnt++;
1707 :
1708 0 : fd_replay_out_link_t * exec_out = ctx->exec_out;
1709 0 : fd_execrp_txn_sigverify_msg_t * exec_msg = fd_chunk_to_laddr( exec_out->mem, exec_out->chunk );
1710 0 : memcpy( exec_msg->txn, txn_p, sizeof(fd_txn_p_t) );
1711 0 : exec_msg->bank_idx = task->txn_sigverify->bank_idx;
1712 0 : exec_msg->txn_idx = task->txn_sigverify->txn_idx;
1713 0 : fd_stem_publish( stem, exec_out->idx, (FD_EXECRP_TT_TXN_SIGVERIFY<<32) | task->txn_sigverify->exec_idx, exec_out->chunk, sizeof(*exec_msg), 0UL, 0UL, 0UL );
1714 0 : exec_out->chunk = fd_dcache_compact_next( exec_out->chunk, sizeof(*exec_msg), exec_out->chunk0, exec_out->wmark );
1715 0 : break;
1716 0 : };
1717 0 : case FD_SCHED_TT_POH_HASH: {
1718 0 : fd_bank_t bank[ 1 ];
1719 0 : FD_TEST( fd_banks_bank_query( bank, ctx->banks, task->poh_hash->bank_idx ) );
1720 0 : bank->data->refcnt++;
1721 :
1722 0 : fd_replay_out_link_t * exec_out = ctx->exec_out;
1723 0 : fd_execrp_poh_hash_msg_t * exec_msg = fd_chunk_to_laddr( exec_out->mem, exec_out->chunk );
1724 0 : exec_msg->bank_idx = task->poh_hash->bank_idx;
1725 0 : exec_msg->mblk_idx = task->poh_hash->mblk_idx;
1726 0 : exec_msg->hashcnt = task->poh_hash->hashcnt;
1727 0 : memcpy( exec_msg->hash, task->poh_hash->hash, sizeof(fd_hash_t) );
1728 0 : fd_stem_publish( stem, exec_out->idx, (FD_EXECRP_TT_POH_HASH<<32) | task->poh_hash->exec_idx, exec_out->chunk, sizeof(*exec_msg), 0UL, 0UL, 0UL );
1729 0 : exec_out->chunk = fd_dcache_compact_next( exec_out->chunk, sizeof(*exec_msg), exec_out->chunk0, exec_out->wmark );
1730 0 : break;
1731 0 : };
1732 0 : default: {
1733 0 : FD_LOG_CRIT(( "unexpected task type %lu", task->task_type ));
1734 0 : }
1735 0 : }
1736 0 : }
1737 :
1738 : static void
1739 : mark_bank_dead( fd_replay_tile_t * ctx,
1740 : fd_stem_context_t * stem,
1741 0 : ulong bank_idx ) {
1742 0 : fd_bank_t bank[1];
1743 0 : FD_TEST( fd_banks_bank_query( bank, ctx->banks, bank_idx ) );
1744 0 : fd_banks_mark_bank_dead( ctx->banks, bank_idx );
1745 :
1746 0 : fd_block_id_ele_t * block_id_ele = &ctx->block_id_arr[ bank_idx ];
1747 0 : if( block_id_ele->block_id_seen ) publish_slot_dead( ctx, stem, block_id_ele->slot, &block_id_ele->latest_mr );
1748 :
1749 0 : fd_reasm_fec_t * fec = fd_reasm_query( ctx->reasm, &block_id_ele->latest_mr );
1750 0 : if( FD_UNLIKELY( !fec ) ) return;
1751 0 : fec->bank_dead = 1;
1752 :
1753 0 : }
1754 :
1755 : /* Returns 1 if charge_busy. */
1756 : static int
1757 : replay( fd_replay_tile_t * ctx,
1758 0 : fd_stem_context_t * stem ) {
1759 :
1760 0 : if( FD_UNLIKELY( !ctx->is_booted ) ) return 0;
1761 :
1762 0 : int charge_busy = 0;
1763 0 : fd_sched_task_t task[ 1 ];
1764 0 : if( FD_UNLIKELY( !fd_sched_task_next_ready( ctx->sched, task ) ) ) {
1765 0 : return charge_busy; /* Nothing to execute or do. */
1766 0 : }
1767 :
1768 0 : charge_busy = 1;
1769 :
1770 0 : switch( task->task_type ) {
1771 0 : case FD_SCHED_TT_BLOCK_START: {
1772 0 : replay_block_start( ctx, stem, task->block_start->bank_idx, task->block_start->parent_bank_idx, task->block_start->slot );
1773 0 : fd_sched_task_done( ctx->sched, FD_SCHED_TT_BLOCK_START, ULONG_MAX, ULONG_MAX, NULL );
1774 0 : break;
1775 0 : }
1776 0 : case FD_SCHED_TT_BLOCK_END: {
1777 0 : fd_bank_t bank[1];
1778 0 : fd_banks_bank_query( bank, ctx->banks, task->block_end->bank_idx );
1779 0 : if( FD_LIKELY( !(bank->data->flags&FD_BANK_FLAGS_DEAD) ) ) replay_block_finalize( ctx, stem, bank );
1780 0 : fd_sched_task_done( ctx->sched, FD_SCHED_TT_BLOCK_END, ULONG_MAX, ULONG_MAX, NULL );
1781 0 : break;
1782 0 : }
1783 0 : case FD_SCHED_TT_TXN_EXEC:
1784 0 : case FD_SCHED_TT_TXN_SIGVERIFY:
1785 0 : case FD_SCHED_TT_POH_HASH: {
1786 : /* Common case: we have a transaction we need to execute. */
1787 0 : dispatch_task( ctx, stem, task );
1788 0 : break;
1789 0 : }
1790 0 : case FD_SCHED_TT_MARK_DEAD: {
1791 0 : fd_bank_t bank[ 1 ];
1792 0 : FD_TEST( fd_banks_bank_query( bank, ctx->banks, task->mark_dead->bank_idx ) );
1793 0 : mark_bank_dead( ctx, stem, task->mark_dead->bank_idx );
1794 0 : break;
1795 0 : }
1796 0 : default: {
1797 0 : FD_LOG_CRIT(( "unexpected task type %lu", task->task_type ));
1798 0 : }
1799 0 : }
1800 :
1801 0 : return charge_busy;
1802 0 : }
1803 :
1804 : static int
1805 : can_process_fec( fd_replay_tile_t * ctx,
1806 0 : int * evict_banks_out ) {
1807 0 : fd_reasm_fec_t * fec;
1808 0 : if( FD_UNLIKELY( fd_sched_can_ingest_cnt( ctx->sched )==0UL ) ) {
1809 0 : ctx->metrics.sched_full++;
1810 0 : return 0;
1811 0 : }
1812 :
1813 0 : if( FD_UNLIKELY( (fec = fd_reasm_peek( ctx->reasm ))==NULL ) ) {
1814 0 : ctx->metrics.reasm_empty++;
1815 0 : return 0;
1816 0 : }
1817 :
1818 0 : ctx->metrics.reasm_latest_slot = fec->slot;
1819 0 : ctx->metrics.reasm_latest_fec_idx = fec->fec_set_idx;
1820 :
1821 0 : if( FD_UNLIKELY( ctx->is_leader && fec->fec_set_idx==0U && fd_reasm_parent( ctx->reasm, fec )->bank_idx==ctx->leader_bank->data->idx ) ) {
1822 : /* This guards against a rare race where we receive the FEC set for
1823 : the slot right after our leader rotation before we freeze the
1824 : bank for the last slot in our leader rotation. Leader slot
1825 : freezing happens only after if we've received the final PoH hash
1826 : from the poh tile as well as the final FEC set for the leader
1827 : slot. So the race happens when FEC sets are delivered and
1828 : processed sooner than the PoH hash, aka when the
1829 : poh=>shred=>replay path for the block id beats the poh=>replay
1830 : path for the poh hash. To mitigate this race, we must block on
1831 : ingesting the FEC set for the ensuing slot before the leader
1832 : bank freezes, because that would violate ordering invariants in
1833 : banks and sched. */
1834 0 : FD_TEST( ctx->block_id_arr[ ctx->leader_bank->data->idx ].block_id_seen );
1835 0 : FD_TEST( !ctx->recv_poh );
1836 0 : ctx->metrics.leader_bid_wait++;
1837 0 : return 0;
1838 0 : }
1839 :
1840 : /* If fec_set_idx is 0, we need a new bank for a new slot. Banks must
1841 : not be full in this case. */
1842 0 : if( FD_UNLIKELY( fd_banks_is_full( ctx->banks ) && fec->fec_set_idx==0 ) ) {
1843 0 : ctx->metrics.banks_full++;
1844 : /* We only want to evict banks if sched is drained and banks is no
1845 : longer making progress. Otherwise, sched might not release
1846 : refcnts on the frontier/leaf banks immediately, and the eviction
1847 : will have to wait for sched to drain anyways. */
1848 0 : if( FD_UNLIKELY( fd_sched_is_drained( ctx->sched ) ) ) *evict_banks_out = 1;
1849 0 : return 0;
1850 0 : }
1851 :
1852 : /* Otherwise, banks may not be full, so we can always create a new
1853 : bank if needed. Or, if banks are full, the current fec set's
1854 : ancestor (idx 0) already created a bank for this slot.*/
1855 0 : return 1;
1856 0 : }
1857 :
1858 : static void
1859 : insert_fec_set( fd_replay_tile_t * ctx,
1860 : fd_stem_context_t * stem,
1861 0 : fd_reasm_fec_t * reasm_fec ) {
1862 :
1863 0 : long now = fd_log_wallclock();
1864 :
1865 0 : reasm_fec->parent_bank_idx = fd_reasm_parent( ctx->reasm, reasm_fec )->bank_idx;
1866 :
1867 0 : fd_bank_t parent_bank[1];
1868 0 : FD_TEST( fd_banks_bank_query( parent_bank, ctx->banks, reasm_fec->parent_bank_idx ) );
1869 0 : reasm_fec->parent_bank_seq = parent_bank->data->bank_seq;
1870 :
1871 0 : if( FD_UNLIKELY( reasm_fec->fec_set_idx==0U ) ) {
1872 : /* If the first FEC set for a slot is observed, provision a new bank
1873 : if you are not the leader. Remove any stale block id map entry
1874 : and update the block id entry. */
1875 0 : fd_bank_t bank_[1];
1876 0 : fd_bank_t * bank = NULL;
1877 0 : if( FD_UNLIKELY( reasm_fec->is_leader ) ) {
1878 0 : bank = ctx->leader_bank;
1879 0 : } else {
1880 0 : bank = fd_banks_new_bank( bank_, ctx->banks, reasm_fec->parent_bank_idx, now );
1881 0 : }
1882 :
1883 0 : reasm_fec->bank_idx = bank->data->idx;
1884 0 : reasm_fec->bank_seq = bank->data->bank_seq;
1885 :
1886 : /* At this point remove any stale entry in the block id map if it
1887 : exists and set the block id as not having been seen yet. This is
1888 : safe because we know that the old entry for this bank index has
1889 : already been pruned away. */
1890 0 : fd_block_id_ele_t * block_id_ele = &ctx->block_id_arr[ reasm_fec->bank_idx ];
1891 0 : if( FD_LIKELY( fd_block_id_map_ele_query( ctx->block_id_map, &block_id_ele->latest_mr, NULL, ctx->block_id_arr )==block_id_ele ) ) {
1892 0 : FD_TEST( fd_block_id_map_ele_remove( ctx->block_id_map, &block_id_ele->latest_mr, NULL, ctx->block_id_arr ) );
1893 0 : }
1894 0 : block_id_ele->block_id_seen = 0;
1895 0 : block_id_ele->slot = reasm_fec->slot;
1896 0 : block_id_ele->latest_fec_idx = 0U;
1897 0 : block_id_ele->latest_mr = reasm_fec->key;
1898 0 : } else {
1899 : /* We are continuing to execute through a slot that we already have
1900 : a bank index for. */
1901 0 : reasm_fec->bank_idx = reasm_fec->parent_bank_idx;
1902 0 : reasm_fec->bank_seq = reasm_fec->parent_bank_seq;
1903 :
1904 0 : fd_block_id_ele_t * block_id_ele = &ctx->block_id_arr[ reasm_fec->bank_idx ];
1905 0 : if( FD_UNLIKELY( block_id_ele->latest_fec_idx>reasm_fec->fec_set_idx ) ) {
1906 0 : FD_LOG_WARNING(( "dropping FEC set (slot=%lu, fec_set_idx=%u) because it is at least as old as the latest FEC set (slot=%lu, fec_set_idx=%u)", reasm_fec->slot, reasm_fec->fec_set_idx, block_id_ele->slot, block_id_ele->latest_fec_idx ));
1907 0 : return;
1908 0 : }
1909 0 : block_id_ele->latest_fec_idx = reasm_fec->fec_set_idx;
1910 0 : block_id_ele->latest_mr = reasm_fec->key;
1911 0 : }
1912 :
1913 0 : if( FD_UNLIKELY( reasm_fec->slot_complete ) ) {
1914 0 : fd_block_id_ele_t * block_id_ele = &ctx->block_id_arr[ reasm_fec->bank_idx ];
1915 0 : block_id_ele->block_id_seen = 1;
1916 0 : block_id_ele->latest_mr = reasm_fec->key;
1917 0 : block_id_ele->latest_fec_idx = reasm_fec->fec_set_idx;
1918 0 : FD_TEST( fd_block_id_map_ele_insert( ctx->block_id_map, block_id_ele, ctx->block_id_arr ) );
1919 0 : }
1920 :
1921 : /* If we are the leader, we don't need to process the FEC set. */
1922 0 : if( FD_UNLIKELY( reasm_fec->is_leader ) ) return;
1923 :
1924 : /* Forks form a partial ordering over FEC sets. The Repair tile
1925 : delivers FEC sets in-order per fork, but FEC set ordering across
1926 : forks is arbitrary */
1927 0 : fd_sched_fec_t sched_fec[ 1 ];
1928 :
1929 : # if DEBUG_LOGGING
1930 : FD_BASE58_ENCODE_32_BYTES( reasm_fec->key.key, key_b58 );
1931 : FD_BASE58_ENCODE_32_BYTES( reasm_fec->cmr.key, cmr_b58 );
1932 : FD_LOG_INFO(( "replay processing FEC set for slot %lu fec_set_idx %u, mr %s cmr %s", reasm_fec->slot, reasm_fec->fec_set_idx, key_b58, cmr_b58 ));
1933 : # endif
1934 :
1935 0 : sched_fec->shred_cnt = reasm_fec->data_cnt;
1936 0 : sched_fec->is_last_in_batch = !!reasm_fec->data_complete;
1937 0 : sched_fec->is_last_in_block = !!reasm_fec->slot_complete;
1938 0 : sched_fec->bank_idx = reasm_fec->bank_idx;
1939 0 : sched_fec->parent_bank_idx = reasm_fec->parent_bank_idx;
1940 0 : sched_fec->slot = reasm_fec->slot;
1941 0 : sched_fec->parent_slot = reasm_fec->slot - reasm_fec->parent_off;
1942 0 : sched_fec->is_first_in_block = reasm_fec->fec_set_idx==0U;
1943 0 : fd_funk_txn_xid_t const root = fd_accdb_root_get( ctx->accdb_admin );
1944 0 : fd_funk_txn_xid_copy( sched_fec->alut_ctx->xid, &root );
1945 0 : sched_fec->alut_ctx->accdb[0] = ctx->accdb[0];
1946 0 : sched_fec->alut_ctx->els = ctx->published_root_slot;
1947 :
1948 0 : fd_bank_t bank[1];
1949 0 : FD_TEST( fd_banks_bank_query( bank, ctx->banks, sched_fec->bank_idx ) );
1950 0 : if( sched_fec->is_first_in_block ) {
1951 0 : bank->data->refcnt++;
1952 0 : FD_LOG_DEBUG(( "bank (idx=%lu, slot=%lu) refcnt incremented to %lu for sched", bank->data->idx, sched_fec->slot, bank->data->refcnt ));
1953 0 : }
1954 :
1955 : /* Read FEC set from the store. This should happen before we try to
1956 : ingest the FEC set. This allows us to filter out frags that were
1957 : in-flight when we published away minority forks that the frags land
1958 : on. These frags would have no bank to execute against, because
1959 : their corresponding banks, or parent banks, have also been pruned
1960 : during publishing. A query against store will rightfully tell us
1961 : that the underlying data is not found, implying that this is for a
1962 : minority fork that we can safeljy ignore. */
1963 :
1964 0 : ulong wait = (ulong)fd_log_wallclock();
1965 0 : ulong work = wait;
1966 0 : FD_STORE_SLOCK_BEGIN( ctx->store ) {
1967 0 : ctx->metrics.store_query_acquire++;
1968 0 : work = (ulong)fd_log_wallclock();
1969 0 : fd_histf_sample( ctx->metrics.store_query_wait, work - wait );
1970 :
1971 0 : fd_store_fec_t * store_fec = fd_store_query( ctx->store, &reasm_fec->key );
1972 0 : ctx->metrics.store_query_cnt++;
1973 0 : if( FD_UNLIKELY( !store_fec ) ) {
1974 :
1975 : /* The only case in which a FEC is not found in the store after
1976 : repair has notified is if the FEC was on a minority fork that
1977 : has already been published away. In this case we abandon the
1978 : entire slice because it is no longer relevant. */
1979 :
1980 0 : ctx->metrics.store_query_missing_cnt++;
1981 0 : ctx->metrics.store_query_missing_mr = reasm_fec->key.ul[0];
1982 0 : FD_BASE58_ENCODE_32_BYTES( reasm_fec->key.key, key_b58 );
1983 0 : FD_LOG_WARNING(( "store fec for slot: %lu is on minority fork already pruned by publish. abandoning slice. root: %lu. pruned merkle: %s", reasm_fec->slot, ctx->consensus_root_slot, key_b58 ));
1984 0 : return;
1985 0 : }
1986 0 : sched_fec->fec = store_fec;
1987 0 : if( FD_UNLIKELY( !fd_sched_fec_ingest( ctx->sched, sched_fec ) ) ) { /* FIXME this critical section is unnecessarily complex. should refactor to just be held for the memcpy and block_offs. */
1988 0 : mark_bank_dead( ctx, stem, sched_fec->bank_idx );
1989 0 : }
1990 0 : } FD_STORE_SLOCK_END;
1991 :
1992 0 : ctx->metrics.store_query_release++;
1993 0 : fd_histf_sample( ctx->metrics.store_query_work, (ulong)fd_log_wallclock() - work );
1994 0 : }
1995 :
1996 : static void
1997 : process_fec_set( fd_replay_tile_t * ctx,
1998 : fd_stem_context_t * stem,
1999 0 : fd_reasm_fec_t * reasm_fec ) {
2000 :
2001 0 : fd_reasm_fec_t * parent = fd_reasm_parent( ctx->reasm, reasm_fec );
2002 0 : if( FD_UNLIKELY( !parent ) ) {
2003 0 : FD_LOG_WARNING(( "dropping FEC set (slot=%lu, fec_set_idx=%u) because it is unconnected in reasm", reasm_fec->slot, reasm_fec->fec_set_idx ));
2004 0 : return;
2005 0 : }
2006 :
2007 0 : if( FD_UNLIKELY( parent->bank_dead ) ) {
2008 : /* Inherit the dead flag from the parent. If a dead slot is
2009 : completed, we publish the slot as dead. Don't insert FECs for
2010 : dead slots. */
2011 0 : reasm_fec->bank_dead = 1;
2012 0 : if( FD_UNLIKELY( reasm_fec->slot_complete ) ) publish_slot_dead( ctx, stem, reasm_fec->slot, &reasm_fec->key );
2013 0 : FD_LOG_DEBUG(( "dropping FEC set (slot=%lu, fec_set_idx=%u) because parent bank is marked dead", reasm_fec->slot, reasm_fec->fec_set_idx ));
2014 0 : return;
2015 0 : }
2016 :
2017 : /* Standard case, the parent FEC has a valid corresponding bank. */
2018 0 : fd_bank_t parent_fec_bank[1];
2019 0 : if( FD_LIKELY( fd_banks_bank_query( parent_fec_bank, ctx->banks, parent->bank_idx ) &&
2020 0 : parent_fec_bank->data->bank_seq==parent->bank_seq ) ) {
2021 0 : insert_fec_set( ctx, stem, reasm_fec );
2022 0 : return;
2023 0 : }
2024 :
2025 : /* In the case the FEC doesn't directly connect, iterate up the reasm
2026 : tree to find the closest valid slot complete that corresponds to a
2027 : valid bank. */
2028 :
2029 : /* First keep track of all of the slot completes up to and including
2030 : the fec we want to insert off of. */
2031 0 : fd_reasm_fec_t * path[ FD_BANKS_MAX_BANKS ];
2032 0 : ulong path_cnt = 0UL;
2033 0 : path[ path_cnt++ ] = reasm_fec;
2034 :
2035 0 : for( fd_reasm_fec_t * curr = reasm_fec;; ) {
2036 0 : curr = fd_reasm_parent( ctx->reasm, curr );
2037 0 : if( FD_UNLIKELY( !curr ) ) return; /* If can't connect, drop the FEC. */
2038 0 : if( FD_LIKELY( !curr->slot_complete ) ) continue;
2039 :
2040 0 : FD_TEST( path_cnt<=FD_BANKS_MAX_BANKS );
2041 0 : path[ path_cnt++ ] = curr;
2042 :
2043 0 : fd_bank_t curr_bank[1];
2044 0 : if( FD_LIKELY( fd_banks_bank_query( curr_bank, ctx->banks, curr->bank_idx ) && curr_bank->data->bank_seq==curr->bank_seq ) ) break;
2045 0 : }
2046 :
2047 0 : for( ulong i=path_cnt; i>0UL; i-- ) {
2048 0 : fd_reasm_fec_t * slot_fecs[ FD_SHRED_BLK_MAX/32 ]; /* TODO: replace with fix-32 macro. */
2049 0 : fd_reasm_fec_t * leaf = path[ i-1 ];
2050 :
2051 : /* If there's not capacity in the sched or banks, return early and
2052 : drop the FEC. We have inserted as much as we can for now. */
2053 0 : if( FD_UNLIKELY( fd_sched_can_ingest_cnt( ctx->sched )<leaf->fec_set_idx ) ) return;
2054 0 : if( FD_UNLIKELY( fd_banks_is_full( ctx->banks ) ) ) return;
2055 :
2056 : /* Gather all FECs for this slot; */
2057 0 : fd_reasm_fec_t * curr = leaf;
2058 0 : for(;;) {
2059 0 : slot_fecs[ curr->fec_set_idx ] = curr;
2060 0 : if( curr->fec_set_idx==0U ) break;
2061 0 : curr = fd_reasm_parent( ctx->reasm, curr );
2062 0 : FD_TEST( curr );
2063 0 : }
2064 :
2065 0 : for( ulong j=0UL; j<=leaf->fec_set_idx; j++ ) {
2066 0 : insert_fec_set( ctx, stem, slot_fecs[ j ] );
2067 0 : }
2068 0 : }
2069 0 : }
2070 :
2071 : /* accdb_advance_root moves account records from the unrooted to the
2072 : rooted database. */
2073 :
2074 : static inline ulong
2075 0 : accdb_root_op_total( fd_replay_tile_t const * ctx ) {
2076 0 : return ctx->accdb_admin->base.root_cnt +
2077 0 : ctx->accdb_admin->base.reclaim_cnt;
2078 0 : }
2079 :
2080 : static void
2081 : accdb_advance_root( fd_replay_tile_t * ctx,
2082 : ulong slot,
2083 0 : ulong bank_idx ) {
2084 0 : fd_funk_txn_xid_t xid = { .ul[0] = slot, .ul[1] = bank_idx };
2085 0 : FD_LOG_DEBUG(( "advancing root to slot=%lu", slot ));
2086 :
2087 0 : long rooted_accounts = -(long)accdb_root_op_total( ctx );
2088 0 : long root_accounts_dt = -fd_tickcount();
2089 0 : fd_accdb_advance_root( ctx->accdb_admin, &xid );
2090 0 : rooted_accounts += (long)accdb_root_op_total( ctx );
2091 0 : root_accounts_dt += fd_tickcount();
2092 0 : fd_histf_sample( ctx->metrics.root_slot_dur, (ulong)root_accounts_dt );
2093 0 : fd_histf_sample( ctx->metrics.root_account_dur, (ulong)root_accounts_dt / (ulong)fd_long_max( rooted_accounts, 1L ) );
2094 :
2095 0 : fd_progcache_txn_advance_root( ctx->progcache_admin, &xid );
2096 0 : }
2097 :
2098 : static int
2099 0 : advance_published_root( fd_replay_tile_t * ctx ) {
2100 :
2101 0 : fd_block_id_ele_t * block_id_ele = fd_block_id_map_ele_query( ctx->block_id_map, &ctx->consensus_root, NULL, ctx->block_id_arr );
2102 0 : if( FD_UNLIKELY( !block_id_ele ) ) {
2103 0 : FD_BASE58_ENCODE_32_BYTES( ctx->consensus_root.key, consensus_root_b58 );
2104 0 : FD_LOG_CRIT(( "invariant violation: block id ele not found for consensus root %s", consensus_root_b58 ));
2105 0 : }
2106 0 : ulong target_bank_idx = fd_block_id_ele_get_idx( ctx->block_id_arr, block_id_ele );
2107 :
2108 : /* If the identity vote has been seen on a bank that should be rooted,
2109 : then we are now ready to produce blocks. */
2110 0 : if( FD_UNLIKELY( !ctx->identity_vote_rooted ) ) {
2111 0 : fd_bank_t root_bank[1];
2112 0 : if( FD_UNLIKELY( !fd_banks_bank_query( root_bank, ctx->banks, target_bank_idx ) ) ) FD_LOG_CRIT(( "invariant violation: root bank not found for bank index %lu", target_bank_idx ));
2113 0 : if( fd_bank_identity_vote_idx_get( root_bank )==ctx->identity_idx ) ctx->identity_vote_rooted = 1;
2114 0 : }
2115 :
2116 0 : ulong advanceable_root_idx = ULONG_MAX;
2117 0 : if( FD_UNLIKELY( !fd_banks_advance_root_prepare( ctx->banks, target_bank_idx, &advanceable_root_idx ) ) ) {
2118 0 : ctx->metrics.storage_root_behind++;
2119 0 : return 0;
2120 0 : }
2121 :
2122 0 : fd_bank_t bank[1];
2123 0 : FD_TEST( fd_banks_bank_query( bank, ctx->banks, advanceable_root_idx ) );
2124 :
2125 0 : if( FD_UNLIKELY( advanceable_root_idx >= ctx->block_id_len ) ) {
2126 0 : FD_LOG_CRIT(( "invariant violation: advanceable root ele out of bounds [0, %lu) index %lu", ctx->block_id_len, advanceable_root_idx ));
2127 0 : }
2128 0 : fd_block_id_ele_t * advanceable_root_ele = &ctx->block_id_arr[ advanceable_root_idx ];
2129 :
2130 0 : ulong advanceable_root_slot = fd_bank_slot_get( bank );
2131 0 : accdb_advance_root( ctx, advanceable_root_slot, bank->data->idx );
2132 :
2133 0 : fd_txncache_advance_root( ctx->txncache, bank->data->txncache_fork_id );
2134 0 : fd_sched_advance_root( ctx->sched, advanceable_root_idx );
2135 0 : fd_banks_advance_root( ctx->banks, advanceable_root_idx );
2136 :
2137 : /* Set metrics pointers. */
2138 :
2139 :
2140 : /* Reasm also prunes from the store during its publish. */
2141 :
2142 0 : fd_reasm_publish( ctx->reasm, &advanceable_root_ele->latest_mr, ctx->store );
2143 :
2144 0 : ctx->published_root_slot = advanceable_root_slot;
2145 0 : ctx->published_root_bank_idx = advanceable_root_idx;
2146 :
2147 0 : return 1;
2148 0 : }
2149 :
2150 : static void
2151 : after_credit( fd_replay_tile_t * ctx,
2152 : fd_stem_context_t * stem,
2153 : int * opt_poll_in,
2154 0 : int * charge_busy ) {
2155 0 : if( FD_UNLIKELY( !ctx->is_booted || !ctx->wfs_complete ) ) return;
2156 :
2157 0 : if( FD_UNLIKELY( maybe_become_leader( ctx, stem ) ) ) {
2158 0 : *charge_busy = 1;
2159 0 : *opt_poll_in = 0;
2160 0 : return;
2161 0 : }
2162 :
2163 : /* If we are leader, we can only unbecome the leader iff we have
2164 : received the poh hash from the poh tile and block id from reasm.
2165 : We have to do an additional check against the slot of the leader
2166 : bank because we lazily remove entries from the block id arr. */
2167 0 : if( FD_UNLIKELY( ctx->is_leader &&
2168 0 : ctx->recv_poh &&
2169 0 : ctx->block_id_arr[ ctx->leader_bank->data->idx ].block_id_seen &&
2170 0 : ctx->block_id_arr[ ctx->leader_bank->data->idx ].slot==fd_bank_slot_get( ctx->leader_bank ) ) ) {
2171 :
2172 0 : fini_leader_bank( ctx, stem );
2173 0 : *charge_busy = 1;
2174 0 : *opt_poll_in = 0;
2175 0 : return;
2176 0 : }
2177 :
2178 0 : ulong bank_idx;
2179 0 : while( (bank_idx=fd_sched_pruned_block_next( ctx->sched ))!=ULONG_MAX ) {
2180 0 : fd_bank_t bank[1];
2181 0 : FD_TEST( fd_banks_bank_query( bank, ctx->banks, bank_idx ) );
2182 0 : bank->data->refcnt--;
2183 0 : FD_LOG_DEBUG(( "bank (idx=%lu) refcnt decremented to %lu for sched", bank->data->idx, bank->data->refcnt ));
2184 0 : }
2185 :
2186 : /* If the published_root is not caught up to the consensus root, then
2187 : we should try to advance the published root. */
2188 0 : if( FD_UNLIKELY( ctx->consensus_root_bank_idx!=ctx->published_root_bank_idx && advance_published_root( ctx ) ) ) {
2189 0 : *charge_busy = 1;
2190 0 : *opt_poll_in = 0;
2191 0 : return;
2192 0 : }
2193 :
2194 0 : if( FD_UNLIKELY( fd_banks_prune_dead_banks( ctx->banks ) ) ) {
2195 : // FIXME: anything pruned from banks should also be pruned from txncache and accdb
2196 0 : *charge_busy = 1;
2197 0 : *opt_poll_in = 0;
2198 0 : return;
2199 0 : }
2200 :
2201 : /* If the reassembler has a fec that is ready, we should process it
2202 : and pass it to the scheduler. */
2203 0 : int evict_banks = 0;
2204 0 : if( FD_LIKELY( can_process_fec( ctx, &evict_banks ) ) ) {
2205 0 : fd_reasm_fec_t * fec = fd_reasm_pop( ctx->reasm );
2206 0 : process_fec_set( ctx, stem, fec );
2207 0 : *charge_busy = 1;
2208 0 : *opt_poll_in = 0;
2209 0 : return;
2210 0 : }
2211 :
2212 0 : if( FD_UNLIKELY( evict_banks ) ) {
2213 0 : FD_LOG_WARNING(( "banks are full and partially executed frontier banks are being evicted" ));
2214 0 : ulong frontier_cnt = 0UL;
2215 0 : ulong frontier_indices[ FD_BANKS_MAX_BANKS ];
2216 0 : fd_banks_get_frontier( ctx->banks, frontier_indices, &frontier_cnt );
2217 :
2218 : /* Mark all frontier banks as dead. As refcnts on said banks are
2219 : drained, they will be pruned away. */
2220 0 : for( ulong i=0UL; i<frontier_cnt; i++ ) {
2221 0 : fd_bank_t bank[1];
2222 0 : FD_TEST( fd_banks_bank_query( bank, ctx->banks, frontier_indices[i] ) );
2223 0 : if( FD_UNLIKELY( ctx->is_leader && frontier_indices[i]==ctx->leader_bank->data->idx ) ) continue;
2224 0 : mark_bank_dead( ctx, stem, bank->data->idx );
2225 0 : fd_sched_block_abandon( ctx->sched, bank->data->idx );
2226 0 : }
2227 0 : }
2228 :
2229 0 : *charge_busy = replay( ctx, stem );
2230 0 : *opt_poll_in = !*charge_busy;
2231 0 : }
2232 :
2233 : static int
2234 : before_frag( fd_replay_tile_t * ctx,
2235 : ulong in_idx,
2236 : ulong seq FD_PARAM_UNUSED,
2237 0 : ulong sig ) {
2238 :
2239 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_SHRED ) ) {
2240 : /* If reasm is full, we can not insert any more FEC sets. We must
2241 : not consume any frags from shred_out until reasm can process more
2242 : FEC sets. */
2243 :
2244 0 : if( FD_UNLIKELY( !fd_reasm_free( ctx->reasm ) ) ) {
2245 0 : return -1;
2246 0 : }
2247 0 : }
2248 :
2249 0 : if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_GOSSIP_OUT && sig!=FD_GOSSIP_UPDATE_TAG_WFS_DONE ) ) return 1;
2250 0 : return 0;
2251 0 : }
2252 :
2253 : static void
2254 : process_exec_task_done( fd_replay_tile_t * ctx,
2255 : fd_stem_context_t * stem,
2256 : fd_execrp_task_done_msg_t * msg,
2257 0 : ulong sig ) {
2258 :
2259 0 : ulong exec_tile_idx = sig&0xFFFFFFFFUL;
2260 :
2261 0 : fd_bank_t bank[1];
2262 0 : FD_TEST( fd_banks_bank_query( bank, ctx->banks, msg->bank_idx ) );
2263 0 : FD_TEST( bank->data );
2264 0 : bank->data->refcnt--;
2265 :
2266 0 : switch( sig>>32 ) {
2267 0 : case FD_EXECRP_TT_TXN_EXEC: {
2268 0 : if( FD_UNLIKELY( !ctx->identity_vote_rooted ) ) {
2269 : /* Query the txn signature against our recently generated vote
2270 : txn signatures. If the query is successful, then we have
2271 : seen our own vote transaction land and this should be marked
2272 : in the bank. We go through this exercise until we've seen
2273 : our vote rooted. */
2274 0 : fd_txn_p_t * txn_p = fd_sched_get_txn( ctx->sched, msg->txn_exec->txn_idx );
2275 :
2276 0 : fd_pubkey_t * identity_pubkey_out = NULL;
2277 0 : if( fd_vote_tracker_query_sig( ctx->vote_tracker, fd_type_pun_const( txn_p->payload+TXN( txn_p )->signature_off ), &identity_pubkey_out ) && fd_pubkey_eq( identity_pubkey_out, ctx->identity_pubkey ) ) {
2278 0 : fd_bank_identity_vote_idx_set( bank, ctx->identity_idx );
2279 0 : }
2280 0 : }
2281 0 : if( FD_UNLIKELY( !msg->txn_exec->is_committable && !(bank->data->flags&FD_BANK_FLAGS_DEAD) ) ) {
2282 : /* Every transaction in a valid block has to execute.
2283 : Otherwise, we should mark the block as dead. */
2284 0 : mark_bank_dead( ctx, stem, bank->data->idx );
2285 0 : fd_sched_block_abandon( ctx->sched, bank->data->idx );
2286 0 : }
2287 0 : if( FD_UNLIKELY( (bank->data->flags&FD_BANK_FLAGS_DEAD) && bank->data->refcnt==0UL ) ) {
2288 0 : fd_banks_mark_bank_frozen( ctx->banks, bank );
2289 0 : }
2290 0 : int res = fd_sched_task_done( ctx->sched, FD_SCHED_TT_TXN_EXEC, msg->txn_exec->txn_idx, exec_tile_idx, NULL );
2291 0 : FD_TEST( res==0 );
2292 0 : fd_sched_txn_info_t * txn_info = fd_sched_get_txn_info( ctx->sched, msg->txn_exec->txn_idx );
2293 0 : txn_info->flags |= FD_SCHED_TXN_EXEC_DONE;
2294 0 : if( FD_LIKELY( !(txn_info->flags&FD_SCHED_TXN_SIGVERIFY_DONE)||!txn_info->txn_err ) ) { /* Set execution status if sigverify hasn't happened yet or if sigverify was a success. */
2295 0 : txn_info->txn_err = msg->txn_exec->txn_err;
2296 0 : txn_info->flags |= fd_ulong_if( msg->txn_exec->is_committable, FD_SCHED_TXN_IS_COMMITTABLE, 0UL );
2297 0 : txn_info->flags |= fd_ulong_if( msg->txn_exec->is_fees_only, FD_SCHED_TXN_IS_FEES_ONLY, 0UL );
2298 0 : }
2299 0 : if( FD_UNLIKELY( (txn_info->flags&FD_SCHED_TXN_REPLAY_DONE)==FD_SCHED_TXN_REPLAY_DONE ) ) { /* UNLIKELY because generally exec happens before sigverify. */
2300 0 : publish_txn_executed( ctx, stem, msg->txn_exec->txn_idx );
2301 0 : }
2302 0 : break;
2303 0 : }
2304 0 : case FD_EXECRP_TT_TXN_SIGVERIFY: {
2305 0 : fd_sched_txn_info_t * txn_info = fd_sched_get_txn_info( ctx->sched, msg->txn_sigverify->txn_idx );
2306 0 : txn_info->flags |= FD_SCHED_TXN_SIGVERIFY_DONE;
2307 0 : if( FD_UNLIKELY( msg->txn_sigverify->err ) ) {
2308 0 : txn_info->txn_err = FD_RUNTIME_TXN_ERR_SIGNATURE_FAILURE;
2309 0 : txn_info->flags &= ~FD_SCHED_TXN_IS_COMMITTABLE;
2310 0 : txn_info->flags &= ~FD_SCHED_TXN_IS_FEES_ONLY;
2311 0 : }
2312 0 : if( FD_UNLIKELY( msg->txn_sigverify->err && !(bank->data->flags&FD_BANK_FLAGS_DEAD) ) ) {
2313 : /* Every transaction in a valid block has to sigverify.
2314 : Otherwise, we should mark the block as dead. Also freeze the
2315 : bank if possible. */
2316 0 : mark_bank_dead( ctx, stem, bank->data->idx );
2317 0 : fd_sched_block_abandon( ctx->sched, bank->data->idx );
2318 0 : }
2319 0 : if( FD_UNLIKELY( (bank->data->flags&FD_BANK_FLAGS_DEAD) && bank->data->refcnt==0UL ) ) {
2320 0 : fd_banks_mark_bank_frozen( ctx->banks, bank );
2321 0 : }
2322 0 : int res = fd_sched_task_done( ctx->sched, FD_SCHED_TT_TXN_SIGVERIFY, msg->txn_sigverify->txn_idx, exec_tile_idx, NULL );
2323 0 : FD_TEST( res==0 );
2324 0 : if( FD_LIKELY( (txn_info->flags&FD_SCHED_TXN_REPLAY_DONE)==FD_SCHED_TXN_REPLAY_DONE ) ) {
2325 0 : publish_txn_executed( ctx, stem, msg->txn_exec->txn_idx );
2326 0 : }
2327 0 : break;
2328 0 : }
2329 0 : case FD_EXECRP_TT_POH_HASH: {
2330 0 : int res = fd_sched_task_done( ctx->sched, FD_SCHED_TT_POH_HASH, ULONG_MAX, exec_tile_idx, msg->poh_hash );
2331 0 : if( FD_UNLIKELY( res<0 && !(bank->data->flags&FD_BANK_FLAGS_DEAD) ) ) {
2332 0 : mark_bank_dead( ctx, stem, bank->data->idx );
2333 0 : }
2334 0 : if( FD_UNLIKELY( (bank->data->flags&FD_BANK_FLAGS_DEAD) && bank->data->refcnt==0UL ) ) {
2335 0 : fd_banks_mark_bank_frozen( ctx->banks, bank );
2336 0 : }
2337 0 : break;
2338 0 : }
2339 0 : default: FD_LOG_CRIT(( "unexpected sig 0x%lx", sig ));
2340 0 : }
2341 :
2342 : /* Reference counter just decreased, and an exec tile just got freed
2343 : up. If there's a need to be more aggressively pruning, we could
2344 : check here if more slots just became publishable and publish. Not
2345 : publishing here shouldn't bloat the fork tree too much though. We
2346 : mark minority forks dead as soon as we can, and execution dispatch
2347 : stops on dead blocks. So shortly afterwards, dead blocks should be
2348 : eligible for pruning as in-flight transactions retire from the
2349 : execution pipeline. */
2350 :
2351 0 : }
2352 :
2353 : static void
2354 : process_tower_slot_done( fd_replay_tile_t * ctx,
2355 : fd_stem_context_t * stem,
2356 : fd_tower_slot_done_t const * msg,
2357 0 : ulong seq ) {
2358 0 : fd_bank_t replay_bank[1];
2359 0 : if( FD_UNLIKELY( !fd_banks_bank_query( replay_bank, ctx->banks, msg->replay_bank_idx ) ) ) FD_LOG_CRIT(( "invariant violation: bank not found for bank index %lu", msg->replay_bank_idx ));
2360 0 : replay_bank->data->refcnt--;
2361 0 : FD_LOG_DEBUG(( "bank (idx=%lu, slot=%lu) refcnt decremented to %lu for tower", replay_bank->data->idx, msg->replay_slot, replay_bank->data->refcnt ));
2362 :
2363 0 : ctx->reset_block_id = msg->reset_block_id;
2364 0 : ctx->reset_slot = msg->reset_slot;
2365 0 : ctx->reset_timestamp_nanos = fd_log_wallclock();
2366 0 : ulong min_leader_slot = fd_ulong_max( msg->reset_slot+1UL, fd_ulong_if( ctx->highwater_leader_slot==ULONG_MAX, 0UL, ctx->highwater_leader_slot+1UL ) );
2367 0 : ctx->next_leader_slot = fd_multi_epoch_leaders_get_next_slot( ctx->mleaders, min_leader_slot, ctx->identity_pubkey );
2368 0 : if( FD_LIKELY( ctx->next_leader_slot != ULONG_MAX ) ) {
2369 0 : ctx->next_leader_tickcount = (long)((double)(ctx->next_leader_slot-ctx->reset_slot-1UL)*ctx->slot_duration_ticks) + fd_tickcount();
2370 0 : } else {
2371 0 : ctx->next_leader_tickcount = LONG_MAX;
2372 0 : }
2373 :
2374 0 : fd_block_id_ele_t * block_id_ele = fd_block_id_map_ele_query( ctx->block_id_map, &msg->reset_block_id, NULL, ctx->block_id_arr );
2375 0 : if( FD_UNLIKELY( !block_id_ele ) ) {
2376 0 : FD_BASE58_ENCODE_32_BYTES( msg->reset_block_id.key, reset_block_id_b58 );
2377 0 : FD_LOG_CRIT(( "invariant violation: block id ele doesn't exist for reset block id: %s, slot: %lu", reset_block_id_b58, msg->reset_slot ));
2378 0 : }
2379 0 : ulong reset_bank_idx = fd_block_id_ele_get_idx( ctx->block_id_arr, block_id_ele );
2380 :
2381 0 : fd_bank_t bank[1];
2382 0 : if( FD_UNLIKELY( !fd_banks_bank_query( bank, ctx->banks, reset_bank_idx ) ) ) {
2383 0 : FD_LOG_CRIT(( "invariant violation: bank not found for bank index %lu", reset_bank_idx ));
2384 0 : }
2385 :
2386 0 : if( FD_LIKELY( msg->root_slot!=ULONG_MAX ) ) FD_TEST( msg->root_slot<=msg->reset_slot );
2387 0 : fd_memcpy( ctx->reset_bank, bank, sizeof(fd_bank_t) );
2388 :
2389 0 : if( FD_LIKELY( ctx->replay_out->idx!=ULONG_MAX ) ) {
2390 0 : fd_poh_reset_t * reset = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
2391 :
2392 0 : reset->bank_idx = bank->data->idx;
2393 0 : reset->timestamp = ctx->reset_timestamp_nanos;
2394 0 : reset->completed_slot = ctx->reset_slot;
2395 0 : reset->hashcnt_per_tick = fd_bank_hashes_per_tick_get( bank );
2396 0 : reset->ticks_per_slot = fd_bank_ticks_per_slot_get( bank );
2397 0 : reset->tick_duration_ns = (ulong)(ctx->slot_duration_nanos/(double)reset->ticks_per_slot);
2398 :
2399 0 : fd_memcpy( reset->completed_block_id, &block_id_ele->latest_mr, sizeof(fd_hash_t) );
2400 :
2401 0 : fd_blockhashes_t const * block_hash_queue = fd_bank_block_hash_queue_query( bank );
2402 0 : fd_hash_t const * last_hash = fd_blockhashes_peek_last_hash( block_hash_queue );
2403 0 : FD_TEST( last_hash );
2404 0 : fd_memcpy( reset->completed_blockhash, last_hash->uc, sizeof(fd_hash_t) );
2405 :
2406 0 : ulong ticks_per_slot = fd_bank_ticks_per_slot_get( bank );
2407 0 : if( FD_UNLIKELY( reset->hashcnt_per_tick==1UL ) ) {
2408 : /* Low power producer, maximum of one microblock per tick in the slot */
2409 0 : reset->max_microblocks_in_slot = ticks_per_slot;
2410 0 : } else {
2411 : /* See the long comment in after_credit for this limit */
2412 0 : reset->max_microblocks_in_slot = fd_ulong_min( MAX_MICROBLOCKS_PER_SLOT, ticks_per_slot*(reset->hashcnt_per_tick-1UL) );
2413 0 : }
2414 0 : reset->next_leader_slot = ctx->next_leader_slot;
2415 :
2416 0 : fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_RESET, ctx->replay_out->chunk, sizeof(fd_poh_reset_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
2417 0 : ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_poh_reset_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
2418 0 : }
2419 :
2420 0 : FD_LOG_INFO(( "tower_slot_done(reset_slot=%lu, next_leader_slot=%lu, vote_slot=%lu, replay_slot=%lu, root_slot=%lu, seqno=%lu)", msg->reset_slot, ctx->next_leader_slot, msg->vote_slot, msg->replay_slot, msg->root_slot, seq ));
2421 0 : maybe_become_leader( ctx, stem );
2422 :
2423 0 : if( FD_LIKELY( msg->root_slot!=ULONG_MAX ) ) {
2424 :
2425 0 : FD_TEST( msg->root_slot>=ctx->consensus_root_slot );
2426 0 : fd_block_id_ele_t * block_id_ele = fd_block_id_map_ele_query( ctx->block_id_map, &msg->root_block_id, NULL, ctx->block_id_arr );
2427 0 : FD_TEST( block_id_ele );
2428 0 : ctx->consensus_root_slot = msg->root_slot;
2429 0 : ctx->consensus_root = msg->root_block_id;
2430 0 : ctx->consensus_root_bank_idx = fd_block_id_ele_get_idx( ctx->block_id_arr, block_id_ele );
2431 :
2432 0 : publish_root_advanced( ctx, stem );
2433 :
2434 0 : fd_sched_root_notify( ctx->sched, ctx->consensus_root_bank_idx );
2435 0 : }
2436 :
2437 0 : ulong distance = 0UL;
2438 0 : fd_bank_t * parent = bank;
2439 0 : while( parent ) {
2440 0 : if( FD_UNLIKELY( parent->data->idx==ctx->consensus_root_bank_idx ) ) break;
2441 0 : parent = fd_banks_get_parent( bank, ctx->banks, parent );
2442 0 : distance++;
2443 0 : }
2444 :
2445 0 : FD_MGAUGE_SET( REPLAY, ROOT_DISTANCE, distance );
2446 0 : }
2447 :
2448 : static void
2449 : process_fec_complete( fd_replay_tile_t * ctx,
2450 0 : uchar const * shred_buf ) {
2451 0 : fd_shred_t const * shred = (fd_shred_t const *)fd_type_pun_const( shred_buf );
2452 :
2453 0 : fd_hash_t const * merkle_root = (fd_hash_t const *)fd_type_pun_const( shred_buf + FD_SHRED_DATA_HEADER_SZ );
2454 0 : fd_hash_t const * chained_merkle_root = (fd_hash_t const *)fd_type_pun_const( shred_buf + FD_SHRED_DATA_HEADER_SZ + sizeof(fd_hash_t) );
2455 0 : int is_leader_fec = *(int const *) fd_type_pun_const( shred_buf + FD_SHRED_DATA_HEADER_SZ + sizeof(fd_hash_t) + sizeof(fd_hash_t) );
2456 :
2457 0 : int data_complete = !!( shred->data.flags & FD_SHRED_DATA_FLAG_DATA_COMPLETE );
2458 0 : int slot_complete = !!( shred->data.flags & FD_SHRED_DATA_FLAG_SLOT_COMPLETE );
2459 :
2460 0 : if( FD_UNLIKELY( shred->slot - shred->data.parent_off == fd_reasm_slot0( ctx->reasm ) && shred->fec_set_idx == 0) ) {
2461 0 : chained_merkle_root = &fd_reasm_root( ctx->reasm )->key;
2462 0 : }
2463 :
2464 0 : if( FD_UNLIKELY( !fd_reasm_free( ctx->reasm ) ) ) {
2465 0 : FD_LOG_CRIT(( "unimplemented" )); /* TODO reasm eviction */
2466 0 : }
2467 0 : if( FD_UNLIKELY( fd_reasm_query( ctx->reasm, merkle_root ) ) ) return;
2468 0 : FD_TEST( fd_reasm_insert( ctx->reasm, merkle_root, chained_merkle_root, shred->slot, shred->fec_set_idx, shred->data.parent_off, (ushort)(shred->idx - shred->fec_set_idx + 1), data_complete, slot_complete, is_leader_fec ) );
2469 0 : }
2470 :
2471 : static void
2472 0 : process_resolv_slot_completed( fd_replay_tile_t * ctx, ulong bank_idx ) {
2473 0 : fd_bank_t bank[1];
2474 0 : FD_TEST( fd_banks_bank_query( bank, ctx->banks, bank_idx ) );
2475 0 : bank->data->refcnt--;
2476 0 : FD_LOG_DEBUG(( "bank (idx=%lu, slot=%lu) refcnt decremented to %lu for resolv", bank->data->idx, fd_bank_slot_get( bank ), bank->data->refcnt ));
2477 0 : }
2478 :
2479 : static void
2480 : process_vote_txn_sent( fd_replay_tile_t * ctx,
2481 0 : fd_txn_m_t * txnm ) {
2482 : /* The send tile has signed and sent a vote. Add this vote to the
2483 : vote tracker. We go through this exercise until the client has
2484 : seen a vote corresponding to the current identity rooted. */
2485 0 : if( FD_UNLIKELY( !ctx->identity_vote_rooted ) ) {
2486 0 : uchar * payload = (uchar *)txnm + sizeof(fd_txn_m_t);
2487 0 : uchar txn_mem[ FD_TXN_MAX_SZ ] __attribute__((aligned(alignof(fd_txn_t))));
2488 0 : fd_txn_t * txn = (fd_txn_t *)txn_mem;
2489 0 : if( FD_UNLIKELY( !fd_txn_parse( payload, txnm->payload_sz, txn_mem, NULL ) ) ) {
2490 0 : FD_LOG_CRIT(( "Could not parse txn from send tile" ));
2491 0 : }
2492 : /* The identity of the validator that the signed the vote will
2493 : always be the first signer in the vote transaction. */
2494 0 : fd_pubkey_t * vote_identity = fd_type_pun( payload+txn->acct_addr_off );
2495 0 : fd_vote_tracker_insert( ctx->vote_tracker, vote_identity, fd_type_pun_const( payload+txn->signature_off ) );
2496 0 : }
2497 0 : }
2498 :
2499 : static inline void
2500 0 : maybe_verify_shred_version( fd_replay_tile_t * ctx ) {
2501 0 : if( FD_LIKELY( ctx->expected_shred_version && ctx->ipecho_shred_version ) ) {
2502 0 : if( FD_UNLIKELY( ctx->expected_shred_version!=ctx->ipecho_shred_version ) ) {
2503 0 : FD_LOG_ERR(( "shred version mismatch: expected %u but got %u from ipecho", ctx->expected_shred_version, ctx->ipecho_shred_version ) );
2504 0 : }
2505 0 : }
2506 :
2507 0 : if( FD_LIKELY( ctx->has_genesis_hash && ctx->hard_forks_cnt!=ULONG_MAX && (ctx->expected_shred_version || ctx->ipecho_shred_version) ) ) {
2508 0 : ushort expected_shred_version = ctx->expected_shred_version ? ctx->expected_shred_version : ctx->ipecho_shred_version;
2509 :
2510 0 : ushort actual_shred_version = compute_shred_version( ctx->genesis_hash->uc, ctx->hard_forks, ctx->hard_forks_cnts, ctx->hard_forks_cnt );
2511 :
2512 0 : if( FD_UNLIKELY( expected_shred_version!=actual_shred_version ) ) {
2513 0 : FD_BASE58_ENCODE_32_BYTES( ctx->genesis_hash->uc, genesis_hash_b58 );
2514 0 : FD_LOG_ERR(( "Your genesis.bin file at `%s` combined with the hard_forks from the loaded snapshot have produced "
2515 0 : "a shred version of %hu but the entrypoint you connected to on boot reported a shred version of %hu. "
2516 0 : "This likely means that the genesis.bin file you have is for a different cluster than the one you "
2517 0 : "are trying to connect to, you can delete it and restart the node to download the correct genesis "
2518 0 : "file automatically.", ctx->genesis_path, actual_shred_version, expected_shred_version ));
2519 0 : }
2520 0 : }
2521 0 : }
2522 :
2523 : static inline void
2524 0 : maybe_verify_genesis_timestamp( fd_replay_tile_t * ctx ) {
2525 0 : if( FD_LIKELY( !ctx->has_expected_genesis_timestamp || !ctx->has_genesis_timestamp ) ) return;
2526 0 : if( FD_LIKELY( ctx->genesis_timestamp==ctx->expected_genesis_timestamp ) ) return;
2527 :
2528 0 : FD_LOG_ERR(( "Your genesis.bin file at `%s` has a genesis timestamp of %lu but the snapshot you loaded has a genesis "
2529 0 : "timestamp of %lu. This either means that the genesis.bin file you have is for a different cluster than "
2530 0 : "the one you are trying to connect to, or you have loaded a snapshot for the wrong cluster. In either "
2531 0 : "case, you can delete the problematic file and restart the node to download the correct one automatically.",
2532 0 : ctx->genesis_path, ctx->genesis_timestamp, ctx->expected_genesis_timestamp ));
2533 0 : }
2534 :
2535 : static void
2536 : process_tower_optimistic_confirmed( fd_replay_tile_t * ctx,
2537 : fd_stem_context_t * stem,
2538 0 : fd_tower_slot_confirmed_t const * msg ) {
2539 :
2540 0 : fd_block_id_ele_t * block_id_ele = fd_block_id_map_ele_query( ctx->block_id_map, &msg->block_id, NULL, ctx->block_id_arr );
2541 0 : if( FD_UNLIKELY( !block_id_ele ) ) {
2542 0 : FD_BASE58_ENCODE_32_BYTES( msg->block_id.key, block_id_b58 );
2543 0 : FD_LOG_WARNING(( "missing bank for confirmed block_id: %s level %d", block_id_b58, msg->level ));
2544 0 : return;
2545 0 : }
2546 :
2547 0 : ulong bank_idx = fd_block_id_ele_get_idx( ctx->block_id_arr, block_id_ele );
2548 0 : fd_bank_t bank_[1]; fd_bank_t * bank = fd_banks_bank_query( bank_, ctx->banks, bank_idx );
2549 :
2550 :
2551 0 : if( FD_UNLIKELY( !bank ) ) {
2552 0 : FD_BASE58_ENCODE_32_BYTES( msg->block_id.key, block_id_cstr );
2553 0 : FD_LOG_WARNING(( "failed to query optimistically confirmed bank for block id %s", block_id_cstr ));
2554 0 : return;
2555 0 : }
2556 :
2557 0 : if( ctx->rpc_enabled ) {
2558 0 : bank->data->refcnt++;
2559 0 : FD_LOG_DEBUG(( "bank (idx=%lu, slot=%lu) refcnt incremented to %lu for rpc", bank->data->idx, fd_bank_slot_get( bank ), bank->data->refcnt ));
2560 0 : }
2561 :
2562 0 : fd_replay_oc_advanced_t * replay_msg = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
2563 0 : replay_msg->bank_idx = bank_idx;
2564 0 : replay_msg->slot = msg->slot;
2565 :
2566 0 : fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_OC_ADVANCED, ctx->replay_out->chunk, sizeof(fd_replay_oc_advanced_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
2567 0 : ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_replay_oc_advanced_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
2568 0 : }
2569 :
2570 : static inline int
2571 : returnable_frag( fd_replay_tile_t * ctx,
2572 : ulong in_idx,
2573 : ulong seq,
2574 : ulong sig,
2575 : ulong chunk,
2576 : ulong sz,
2577 : ulong ctl,
2578 : ulong tsorig,
2579 : ulong tspub,
2580 0 : fd_stem_context_t * stem ) {
2581 0 : (void)seq;
2582 0 : (void)ctl;
2583 0 : (void)tsorig;
2584 0 : (void)tspub;
2585 :
2586 0 : if( FD_UNLIKELY( sz!=0UL && (chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz>ctx->in[ in_idx ].mtu ) ) )
2587 0 : FD_LOG_ERR(( "chunk %lu %lu from in %d corrupt, not in range [%lu,%lu]", chunk, sz, ctx->in_kind[ in_idx ], ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
2588 :
2589 0 : switch( ctx->in_kind[in_idx] ) {
2590 0 : case IN_KIND_GENESIS: {
2591 0 : fd_genesis_meta_t const * meta = fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
2592 0 : ctx->has_genesis_hash = 1;
2593 0 : ctx->has_genesis_timestamp = 1;
2594 0 : ctx->genesis_timestamp = meta->creation_time_millis;
2595 0 : *ctx->genesis_hash = meta->genesis_hash;
2596 0 : if( FD_LIKELY( meta->bootstrap ) ) {
2597 0 : boot_genesis( ctx, stem, meta );
2598 0 : } else {
2599 0 : uchar const * genesis_blob = (uchar const *)( meta+1 );
2600 0 : FD_TEST( fd_genesis_parse( ctx->genesis, genesis_blob, meta->blob_sz ) );
2601 0 : }
2602 0 : ctx->has_genesis_timestamp = 1;
2603 0 : ctx->genesis_timestamp = ctx->genesis->creation_time;
2604 :
2605 0 : maybe_verify_cluster_type( ctx );
2606 0 : maybe_verify_shred_version( ctx );
2607 0 : maybe_verify_genesis_timestamp( ctx );
2608 0 : break;
2609 0 : }
2610 0 : case IN_KIND_IPECHO: {
2611 0 : FD_TEST( sig && sig<=USHORT_MAX );
2612 0 : ctx->ipecho_shred_version = (ushort)sig;
2613 0 : maybe_verify_shred_version( ctx );
2614 0 : break;
2615 0 : }
2616 0 : case IN_KIND_SNAP: {
2617 0 : on_snapshot_message( ctx, stem, in_idx, chunk, sig );
2618 0 : maybe_verify_shred_version( ctx );
2619 0 : maybe_verify_genesis_timestamp( ctx );
2620 0 : break;
2621 0 : }
2622 0 : case IN_KIND_EXECRP: {
2623 0 : process_exec_task_done( ctx, stem, fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk ), sig );
2624 0 : break;
2625 0 : }
2626 0 : case IN_KIND_POH: {
2627 0 : process_poh_message( ctx, fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk ) );
2628 0 : break;
2629 0 : }
2630 0 : case IN_KIND_RESOLV: {
2631 0 : fd_resolv_slot_exchanged_t * exchanged_slot = fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
2632 0 : process_resolv_slot_completed( ctx, exchanged_slot->bank_idx );
2633 0 : break;
2634 0 : }
2635 0 : case IN_KIND_TOWER: {
2636 0 : if( FD_LIKELY( sig==FD_TOWER_SIG_SLOT_DONE ) ) {
2637 0 : process_tower_slot_done( ctx, stem, fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk ), seq );
2638 0 : } else if( FD_LIKELY( sig==FD_TOWER_SIG_SLOT_CONFIRMED ) ) {
2639 0 : fd_tower_slot_confirmed_t const * msg = fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
2640 0 : if( msg->level==FD_TOWER_SLOT_CONFIRMED_OPTIMISTIC && !msg->fwd ) process_tower_optimistic_confirmed( ctx, stem, msg );
2641 0 : if( msg->level==FD_TOWER_SLOT_CONFIRMED_DUPLICATE ) fd_reasm_confirm( ctx->reasm, &msg->block_id );
2642 0 : } else if( FD_LIKELY( sig==FD_TOWER_SIG_SLOT_IGNORED ) ) {
2643 0 : fd_tower_slot_ignored_t const * msg = fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
2644 0 : fd_tower_slot_done_t ignored = {
2645 0 : .replay_slot = msg->slot,
2646 0 : .replay_bank_idx = msg->bank_idx,
2647 0 : .vote_slot = ULONG_MAX,
2648 0 : .reset_slot = ctx->reset_slot, /* Use most recent reset slot */
2649 0 : .reset_block_id = ctx->reset_block_id,
2650 0 : .root_slot = ULONG_MAX
2651 0 : };
2652 0 : process_tower_slot_done( ctx, stem, &ignored, seq );
2653 0 : }
2654 0 : break;
2655 0 : }
2656 0 : case IN_KIND_SHRED: {
2657 : /* TODO: This message/sz should be defined. */
2658 0 : if( sz!=0 && fd_disco_shred_out_msg_type( sig )==FD_SHRED_OUT_MSG_TYPE_FEC ) {
2659 : /* If receive a FEC complete message. */
2660 0 : process_fec_complete( ctx, fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk ) );
2661 0 : }
2662 0 : break;
2663 0 : }
2664 0 : case IN_KIND_TXSEND: {
2665 0 : process_vote_txn_sent( ctx, fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk ) );
2666 0 : break;
2667 0 : }
2668 0 : case IN_KIND_GOSSIP_OUT: {
2669 0 : FD_TEST( sig==FD_GOSSIP_UPDATE_TAG_WFS_DONE );
2670 0 : ctx->wfs_complete = 1;
2671 0 : FD_LOG_NOTICE(( "Done waiting for supermajority. More than 80 percent of cluster stake has joined." ));
2672 0 : break;
2673 0 : }
2674 0 : case IN_KIND_RPC:
2675 0 : case IN_KIND_GUI: {
2676 0 : fd_bank_t bank[1];
2677 0 : FD_TEST( fd_banks_bank_query( bank, ctx->banks, sig ) );
2678 0 : bank->data->refcnt--;
2679 0 : FD_LOG_DEBUG(( "bank (idx=%lu, slot=%lu) refcnt decremented to %lu for %s", bank->data->idx, fd_bank_slot_get( bank ), bank->data->refcnt, ctx->in_kind[ in_idx ]==IN_KIND_RPC ? "rpc" : "gui" ));
2680 0 : break;
2681 0 : }
2682 0 : default:
2683 0 : FD_LOG_ERR(( "unhandled kind %d", ctx->in_kind[ in_idx ] ));
2684 0 : }
2685 :
2686 0 : return 0;
2687 0 : }
2688 :
2689 : static inline fd_replay_out_link_t
2690 : out1( fd_topo_t const * topo,
2691 : fd_topo_tile_t const * tile,
2692 0 : char const * name ) {
2693 0 : ulong idx = ULONG_MAX;
2694 :
2695 0 : for( ulong i=0UL; i<tile->out_cnt; i++ ) {
2696 0 : fd_topo_link_t const * link = &topo->links[ tile->out_link_id[ i ] ];
2697 0 : if( !strcmp( link->name, name ) ) {
2698 0 : if( FD_UNLIKELY( idx!=ULONG_MAX ) ) FD_LOG_ERR(( "tile %s:%lu had multiple output links named %s but expected one", tile->name, tile->kind_id, name ));
2699 0 : idx = i;
2700 0 : }
2701 0 : }
2702 :
2703 0 : if( FD_UNLIKELY( idx==ULONG_MAX ) ) return (fd_replay_out_link_t){ .idx = ULONG_MAX, .mem = NULL, .chunk0 = 0, .wmark = 0, .chunk = 0 };
2704 :
2705 0 : void * mem = topo->workspaces[ topo->objs[ topo->links[ tile->out_link_id[ idx ] ].dcache_obj_id ].wksp_id ].wksp;
2706 0 : ulong chunk0 = fd_dcache_compact_chunk0( mem, topo->links[ tile->out_link_id[ idx ] ].dcache );
2707 0 : ulong wmark = fd_dcache_compact_wmark ( mem, topo->links[ tile->out_link_id[ idx ] ].dcache, topo->links[ tile->out_link_id[ idx ] ].mtu );
2708 :
2709 0 : return (fd_replay_out_link_t){ .idx = idx, .mem = mem, .chunk0 = chunk0, .wmark = wmark, .chunk = chunk0 };
2710 0 : }
2711 :
2712 : static void
2713 : privileged_init( fd_topo_t * topo,
2714 0 : fd_topo_tile_t * tile ) {
2715 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
2716 :
2717 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
2718 0 : fd_replay_tile_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_replay_tile_t), sizeof(fd_replay_tile_t) );
2719 :
2720 0 : if( FD_UNLIKELY( !strcmp( tile->replay.identity_key_path, "" ) ) ) FD_LOG_ERR(( "identity_key_path not set" ));
2721 :
2722 0 : ctx->identity_pubkey[ 0 ] = *(fd_pubkey_t const *)fd_type_pun_const( fd_keyload_load( tile->replay.identity_key_path, /* pubkey only: */ 1 ) );
2723 0 : ctx->identity_idx = 0UL;
2724 :
2725 0 : if( FD_UNLIKELY( !tile->replay.bundle.vote_account_path[0] ) ) {
2726 0 : tile->replay.bundle.enabled = 0;
2727 0 : }
2728 :
2729 0 : if( FD_UNLIKELY( tile->replay.bundle.enabled ) ) {
2730 0 : if( FD_UNLIKELY( !fd_base58_decode_32( tile->replay.bundle.vote_account_path, ctx->bundle.vote_account.uc ) ) ) {
2731 0 : const uchar * vote_key = fd_keyload_load( tile->replay.bundle.vote_account_path, /* pubkey only: */ 1 );
2732 0 : fd_memcpy( ctx->bundle.vote_account.uc, vote_key, 32UL );
2733 0 : }
2734 0 : }
2735 :
2736 0 : if( FD_UNLIKELY( !fd_rng_secure( &ctx->reasm_seed, sizeof(ulong) ) ) ) {
2737 0 : FD_LOG_CRIT(( "fd_rng_secure failed" ));
2738 0 : }
2739 :
2740 0 : if( FD_UNLIKELY( !fd_rng_secure( &ctx->vote_tracker_seed, sizeof(ulong) ) ) ) {
2741 0 : FD_LOG_CRIT(( "fd_rng_secure failed" ));
2742 0 : }
2743 :
2744 0 : if( FD_UNLIKELY( !fd_rng_secure( &ctx->block_id_map_seed, sizeof(ulong) ) ) ) {
2745 0 : FD_LOG_CRIT(( "fd_rng_secure failed" ));
2746 0 : }
2747 :
2748 0 : if( FD_UNLIKELY( !fd_rng_secure( &ctx->initial_block_id, sizeof(fd_hash_t) ) ) ) {
2749 0 : FD_LOG_CRIT(( "fd_rng_secure failed" ));
2750 0 : }
2751 :
2752 0 : if( FD_UNLIKELY( !fd_rng_secure( &ctx->runtime_stack_seed, sizeof(ulong) ) ) ) {
2753 0 : FD_LOG_CRIT(( "fd_rng_secure failed" ));
2754 0 : }
2755 0 : }
2756 :
2757 : static void
2758 : unprivileged_init( fd_topo_t * topo,
2759 0 : fd_topo_tile_t * tile ) {
2760 0 : void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
2761 :
2762 0 : ulong chain_cnt = fd_block_id_map_chain_cnt_est( tile->replay.max_live_slots );
2763 :
2764 0 : FD_SCRATCH_ALLOC_INIT( l, scratch );
2765 0 : fd_replay_tile_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_replay_tile_t), sizeof(fd_replay_tile_t) );
2766 0 : void * block_id_arr_mem = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_block_id_ele_t), sizeof(fd_block_id_ele_t) * tile->replay.max_live_slots );
2767 0 : void * block_id_map_mem = FD_SCRATCH_ALLOC_APPEND( l, fd_block_id_map_align(), fd_block_id_map_footprint( chain_cnt ) );
2768 0 : void * _txncache = FD_SCRATCH_ALLOC_APPEND( l, fd_txncache_align(), fd_txncache_footprint( tile->replay.max_live_slots ) );
2769 0 : void * reasm_mem = FD_SCRATCH_ALLOC_APPEND( l, fd_reasm_align(), fd_reasm_footprint( tile->replay.fec_max ) );
2770 0 : void * sched_mem = FD_SCRATCH_ALLOC_APPEND( l, fd_sched_align(), fd_sched_footprint( tile->replay.sched_depth, tile->replay.max_live_slots ) );
2771 0 : void * vinyl_req_pool_mem = FD_SCRATCH_ALLOC_APPEND( l, fd_vinyl_req_pool_align(), fd_vinyl_req_pool_footprint( 1UL, 1UL ) );
2772 0 : void * vote_tracker_mem = FD_SCRATCH_ALLOC_APPEND( l, fd_vote_tracker_align(), fd_vote_tracker_footprint() );
2773 0 : void * _capture_ctx = FD_SCRATCH_ALLOC_APPEND( l, fd_capture_ctx_align(), fd_capture_ctx_footprint() );
2774 0 : void * dump_proto_ctx_mem = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_dump_proto_ctx_t), sizeof(fd_dump_proto_ctx_t) );
2775 0 : # if FD_HAS_FLATCC
2776 0 : void * block_dump_ctx = NULL;
2777 0 : if( FD_UNLIKELY( tile->replay.dump_block_to_pb ) ) {
2778 0 : block_dump_ctx = FD_SCRATCH_ALLOC_APPEND( l, fd_block_dump_context_align(), fd_block_dump_context_footprint() );
2779 0 : }
2780 0 : # endif
2781 :
2782 0 : FD_TEST( fd_vote_rewards_map_join( fd_vote_rewards_map_new( ctx->runtime_stack.stakes.vote_map_mem, FD_RUNTIME_EXPECTED_VOTE_ACCOUNTS, ctx->runtime_stack_seed ) ) );
2783 :
2784 0 : ctx->wksp = topo->workspaces[ topo->objs[ tile->tile_obj_id ].wksp_id ].wksp;
2785 :
2786 0 : ulong store_obj_id = fd_pod_query_ulong( topo->props, "store", ULONG_MAX );
2787 0 : FD_TEST( store_obj_id!=ULONG_MAX );
2788 0 : ctx->store = fd_store_join( fd_topo_obj_laddr( topo, store_obj_id ) );
2789 0 : FD_TEST( ctx->store );
2790 :
2791 0 : ulong banks_obj_id = fd_pod_query_ulong( topo->props, "banks", ULONG_MAX );
2792 0 : FD_TEST( banks_obj_id!=ULONG_MAX );
2793 0 : ulong banks_locks_obj_id = fd_pod_query_ulong( topo->props, "banks_locks", ULONG_MAX );
2794 0 : FD_TEST( banks_locks_obj_id!=ULONG_MAX );
2795 :
2796 0 : FD_TEST( fd_banks_join( ctx->banks, fd_topo_obj_laddr( topo, banks_obj_id ), fd_topo_obj_laddr( topo, banks_locks_obj_id ) ) );
2797 :
2798 0 : fd_bank_data_t * bank_pool = fd_banks_get_bank_pool( ctx->banks->data );
2799 0 : FD_MGAUGE_SET( REPLAY, MAX_LIVE_BANKS, fd_banks_pool_max( bank_pool ) );
2800 :
2801 0 : fd_bank_t bank[1];
2802 0 : FD_TEST( fd_banks_init_bank( bank, ctx->banks ) );
2803 0 : fd_bank_slot_set( bank, 0UL );
2804 0 : FD_TEST( bank->data->idx==FD_REPLAY_BOOT_BANK_IDX );
2805 :
2806 0 : ctx->consensus_root_slot = ULONG_MAX;
2807 0 : ctx->consensus_root = ctx->initial_block_id;
2808 0 : ctx->published_root_slot = ULONG_MAX;
2809 :
2810 0 : ctx->expected_shred_version = tile->replay.expected_shred_version;
2811 0 : ctx->ipecho_shred_version = 0;
2812 0 : fd_memcpy( ctx->genesis_path, tile->replay.genesis_path, sizeof(ctx->genesis_path) );
2813 0 : ctx->has_genesis_hash = 0;
2814 0 : ctx->has_genesis_timestamp = 0;
2815 0 : ctx->has_expected_genesis_timestamp = 0;
2816 0 : ctx->cluster_type = FD_CLUSTER_UNKNOWN;
2817 0 : ctx->hard_forks_cnt = ULONG_MAX;
2818 :
2819 0 : if( FD_UNLIKELY( tile->replay.bundle.enabled ) ) {
2820 0 : ctx->bundle.enabled = 1;
2821 0 : if( FD_UNLIKELY( !fd_bundle_crank_gen_init( ctx->bundle.gen,
2822 0 : (fd_acct_addr_t const *)tile->replay.bundle.tip_distribution_program_addr,
2823 0 : (fd_acct_addr_t const *)tile->replay.bundle.tip_payment_program_addr,
2824 0 : (fd_acct_addr_t const *)ctx->bundle.vote_account.uc,
2825 0 : (fd_acct_addr_t const *)ctx->bundle.vote_account.uc, "NAN", 0UL ) ) ) {
2826 0 : FD_LOG_ERR(( "failed to initialize bundle crank gen" ));
2827 0 : }
2828 0 : } else {
2829 0 : ctx->bundle.enabled = 0;
2830 0 : }
2831 :
2832 0 : fd_features_t * features = fd_bank_features_modify( bank );
2833 0 : fd_features_enable_cleaned_up( features );
2834 :
2835 0 : char const * one_off_features[ 16UL ];
2836 0 : FD_TEST( tile->replay.enable_features_cnt<=sizeof(one_off_features)/sizeof(one_off_features[0]) );
2837 0 : for( ulong i=0UL; i<tile->replay.enable_features_cnt; i++ ) one_off_features[ i ] = tile->replay.enable_features[i];
2838 0 : fd_features_enable_one_offs( features, one_off_features, (uint)tile->replay.enable_features_cnt, 0UL );
2839 :
2840 0 : fd_topo_obj_t const * vinyl_data = fd_topo_find_tile_obj( topo, tile, "vinyl_data" );
2841 :
2842 0 : ulong progcache_obj_id; FD_TEST( (progcache_obj_id = fd_pod_query_ulong( topo->props, "progcache", ULONG_MAX ) )!=ULONG_MAX );
2843 0 : ulong progcache_locks_obj_id; FD_TEST( (progcache_locks_obj_id = fd_pod_query_ulong( topo->props, "progcache_locks", ULONG_MAX ) )!=ULONG_MAX );
2844 0 : FD_TEST( fd_progcache_admin_join( ctx->progcache_admin,
2845 0 : fd_topo_obj_laddr( topo, progcache_obj_id ),
2846 0 : fd_topo_obj_laddr( topo, progcache_locks_obj_id ) ) );
2847 :
2848 0 : ulong funk_obj_id; FD_TEST( (funk_obj_id = fd_pod_query_ulong( topo->props, "funk", ULONG_MAX ) )!=ULONG_MAX );
2849 0 : ulong funk_locks_obj_id; FD_TEST( (funk_locks_obj_id = fd_pod_query_ulong( topo->props, "funk_locks", ULONG_MAX ) )!=ULONG_MAX );
2850 0 : ulong max_depth = tile->replay.max_live_slots + tile->replay.write_delay_slots;
2851 0 : if( !vinyl_data ) {
2852 0 : FD_TEST( fd_accdb_admin_v1_init( ctx->accdb_admin,
2853 0 : fd_topo_obj_laddr( topo, funk_obj_id ),
2854 0 : fd_topo_obj_laddr( topo, funk_locks_obj_id ) ) );
2855 0 : } else {
2856 0 : fd_topo_obj_t const * vinyl_rq = fd_topo_find_tile_obj( topo, tile, "vinyl_rq" );
2857 0 : fd_topo_obj_t const * vinyl_req_pool = fd_topo_find_tile_obj( topo, tile, "vinyl_rpool" );
2858 0 : FD_TEST( fd_accdb_admin_v2_init( ctx->accdb_admin,
2859 0 : fd_topo_obj_laddr( topo, funk_obj_id ),
2860 0 : fd_topo_obj_laddr( topo, funk_locks_obj_id ),
2861 0 : fd_topo_obj_laddr( topo, vinyl_rq->id ),
2862 0 : topo->workspaces[ vinyl_data->wksp_id ].wksp,
2863 0 : fd_topo_obj_laddr( topo, vinyl_req_pool->id ),
2864 0 : vinyl_rq->id,
2865 0 : max_depth ) );
2866 0 : fd_accdb_admin_v2_delay_set( ctx->accdb_admin, tile->replay.write_delay_slots );
2867 0 : }
2868 0 : fd_accdb_init_from_topo( ctx->accdb, topo, tile, max_depth );
2869 :
2870 0 : void * _txncache_shmem = fd_topo_obj_laddr( topo, tile->replay.txncache_obj_id );
2871 0 : fd_txncache_shmem_t * txncache_shmem = fd_txncache_shmem_join( _txncache_shmem );
2872 0 : FD_TEST( txncache_shmem );
2873 0 : ctx->txncache = fd_txncache_join( fd_txncache_new( _txncache, txncache_shmem ) );
2874 0 : FD_TEST( ctx->txncache );
2875 :
2876 0 : ctx->capture_ctx = NULL;
2877 0 : if( FD_UNLIKELY( strcmp( "", tile->replay.solcap_capture ) ) ) {
2878 0 : ctx->capture_ctx = fd_capture_ctx_join( fd_capture_ctx_new( _capture_ctx ) );
2879 0 : ctx->capture_ctx->solcap_start_slot = tile->replay.capture_start_slot;
2880 0 : ctx->capture_ctx->capture_solcap = 1;
2881 0 : }
2882 :
2883 0 : ctx->dump_proto_ctx = NULL;
2884 0 : if( FD_UNLIKELY( strcmp( "", tile->replay.dump_proto_dir ) ) ) {
2885 0 : ctx->dump_proto_ctx = dump_proto_ctx_mem;
2886 0 : ctx->dump_proto_ctx->dump_proto_output_dir = tile->replay.dump_proto_dir;
2887 0 : if( FD_LIKELY( tile->replay.dump_block_to_pb ) ) {
2888 0 : ctx->dump_proto_ctx->dump_block_to_pb = !!tile->replay.dump_block_to_pb;
2889 0 : }
2890 0 : }
2891 :
2892 0 : # if FD_HAS_FLATCC
2893 0 : if( FD_UNLIKELY( tile->replay.dump_block_to_pb ) ) {
2894 0 : ctx->block_dump_ctx = fd_block_dump_context_join( fd_block_dump_context_new( block_dump_ctx ) );
2895 0 : } else {
2896 0 : ctx->block_dump_ctx = NULL;
2897 0 : }
2898 0 : # endif
2899 :
2900 0 : ctx->is_booted = 0;
2901 :
2902 0 : ctx->larger_max_cost_per_block = tile->replay.larger_max_cost_per_block;
2903 :
2904 0 : ctx->reasm = fd_reasm_join( fd_reasm_new( reasm_mem, tile->replay.fec_max, ctx->reasm_seed ) );
2905 0 : FD_TEST( ctx->reasm );
2906 :
2907 0 : ctx->sched = fd_sched_join( fd_sched_new( sched_mem, tile->replay.sched_depth, tile->replay.max_live_slots, fd_topo_tile_name_cnt( topo, "execrp" ) ) );
2908 0 : FD_TEST( ctx->sched );
2909 :
2910 0 : FD_TEST( fd_vinyl_req_pool_new( vinyl_req_pool_mem, 1UL, 1UL ) );
2911 :
2912 0 : ctx->vote_tracker = fd_vote_tracker_join( fd_vote_tracker_new( vote_tracker_mem, ctx->vote_tracker_seed ) );
2913 0 : FD_TEST( ctx->vote_tracker );
2914 :
2915 0 : ctx->identity_vote_rooted = 0;
2916 :
2917 0 : ctx->wait_for_vote_to_start_leader = tile->replay.wait_for_vote_to_start_leader;
2918 :
2919 0 : ctx->wfs_enabled = memcmp( tile->replay.wait_for_supermajority_with_bank_hash.uc, ((fd_pubkey_t){ 0 }).uc, sizeof(fd_pubkey_t) );
2920 0 : ctx->expected_bank_hash = tile->replay.wait_for_supermajority_with_bank_hash;
2921 0 : ctx->wfs_complete = !ctx->wfs_enabled;
2922 :
2923 0 : ctx->mleaders = fd_multi_epoch_leaders_join( fd_multi_epoch_leaders_new( ctx->mleaders_mem ) );
2924 0 : FD_TEST( ctx->mleaders );
2925 :
2926 0 : ctx->is_leader = 0;
2927 0 : ctx->supports_leader = fd_topo_find_tile( topo, "pack", 0UL )!=ULONG_MAX;
2928 0 : ctx->reset_slot = 0UL;
2929 0 : fd_memset( ctx->reset_bank, 0, sizeof(fd_bank_t) );
2930 0 : ctx->reset_block_id = ctx->initial_block_id;
2931 0 : ctx->reset_timestamp_nanos = 0UL;
2932 0 : ctx->next_leader_slot = ULONG_MAX;
2933 0 : ctx->next_leader_tickcount = LONG_MAX;
2934 0 : ctx->highwater_leader_slot = ULONG_MAX;
2935 0 : ctx->slot_duration_nanos = 350L*1000L*1000L; /* TODO: Not fixed ... not always 350ms ... */
2936 0 : ctx->slot_duration_ticks = (double)ctx->slot_duration_nanos*fd_tempo_tick_per_ns( NULL );
2937 0 : ctx->leader_bank->data = NULL;
2938 :
2939 0 : ctx->block_id_len = tile->replay.max_live_slots;
2940 0 : ctx->block_id_arr = (fd_block_id_ele_t *)block_id_arr_mem;
2941 0 : ctx->block_id_map = fd_block_id_map_join( fd_block_id_map_new( block_id_map_mem, chain_cnt, ctx->block_id_map_seed ) );
2942 0 : FD_TEST( ctx->block_id_map );
2943 0 : for( ulong i=0UL; i<tile->replay.max_live_slots; i++ ) ctx->block_id_arr[ i ].block_id_seen = 0;
2944 :
2945 0 : ctx->resolv_tile_cnt = fd_topo_tile_name_cnt( topo, "resolv" );
2946 :
2947 0 : ctx->keyswitch = fd_keyswitch_join( fd_topo_obj_laddr( topo, tile->id_keyswitch_obj_id ) );
2948 0 : FD_TEST( ctx->keyswitch );
2949 0 : ctx->halt_leader = 0;
2950 :
2951 0 : FD_TEST( tile->in_cnt<=sizeof(ctx->in)/sizeof(ctx->in[0]) );
2952 0 : for( ulong i=0UL; i<tile->in_cnt; i++ ) {
2953 0 : fd_topo_link_t * link = &topo->links[ tile->in_link_id[ i ] ];
2954 0 : fd_topo_wksp_t * link_wksp = &topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ];
2955 :
2956 0 : if( FD_LIKELY( link->dcache ) ) {
2957 0 : ctx->in[ i ].mem = link_wksp->wksp;
2958 0 : ctx->in[ i ].chunk0 = fd_dcache_compact_chunk0( ctx->in[ i ].mem, link->dcache );
2959 0 : ctx->in[ i ].wmark = fd_dcache_compact_wmark ( ctx->in[ i ].mem, link->dcache, link->mtu );
2960 0 : ctx->in[ i ].mtu = link->mtu;
2961 0 : }
2962 :
2963 0 : if( !strcmp( link->name, "genesi_out" ) ) ctx->in_kind[ i ] = IN_KIND_GENESIS;
2964 0 : else if( !strcmp( link->name, "ipecho_out" ) ) ctx->in_kind[ i ] = IN_KIND_IPECHO;
2965 0 : else if( !strcmp( link->name, "snapin_manif" ) ) ctx->in_kind[ i ] = IN_KIND_SNAP;
2966 0 : else if( !strcmp( link->name, "execrp_replay" ) ) ctx->in_kind[ i ] = IN_KIND_EXECRP;
2967 0 : else if( !strcmp( link->name, "tower_out" ) ) ctx->in_kind[ i ] = IN_KIND_TOWER;
2968 0 : else if( !strcmp( link->name, "poh_replay" ) ) ctx->in_kind[ i ] = IN_KIND_POH;
2969 0 : else if( !strcmp( link->name, "resolv_replay" ) ) ctx->in_kind[ i ] = IN_KIND_RESOLV;
2970 0 : else if( !strcmp( link->name, "shred_out" ) ) ctx->in_kind[ i ] = IN_KIND_SHRED;
2971 0 : else if( !strcmp( link->name, "txsend_out" ) ) ctx->in_kind[ i ] = IN_KIND_TXSEND;
2972 0 : else if( !strcmp( link->name, "gui_replay" ) ) ctx->in_kind[ i ] = IN_KIND_GUI;
2973 0 : else if( !strcmp( link->name, "rpc_replay" ) ) ctx->in_kind[ i ] = IN_KIND_RPC;
2974 0 : else if( !strcmp( link->name, "gossip_out" ) ) ctx->in_kind[ i ] = IN_KIND_GOSSIP_OUT;
2975 0 : else FD_LOG_ERR(( "unexpected input link name %s", link->name ));
2976 0 : }
2977 :
2978 0 : *ctx->epoch_out = out1( topo, tile, "replay_epoch" ); FD_TEST( ctx->epoch_out->idx!=ULONG_MAX );
2979 0 : *ctx->replay_out = out1( topo, tile, "replay_out" ); FD_TEST( ctx->replay_out->idx!=ULONG_MAX );
2980 0 : *ctx->exec_out = out1( topo, tile, "replay_execrp" ); FD_TEST( ctx->exec_out->idx!=ULONG_MAX );
2981 :
2982 0 : ctx->gui_enabled = fd_topo_find_tile( topo, "gui", 0UL )!=ULONG_MAX;
2983 0 : ctx->rpc_enabled = fd_topo_find_tile( topo, "rpc", 0UL )!=ULONG_MAX;
2984 :
2985 0 : if( FD_UNLIKELY( strcmp( "", tile->replay.solcap_capture ) ) ) {
2986 0 : ulong idx = fd_topo_find_tile_out_link( topo, tile, "cap_repl", 0UL );
2987 0 : FD_TEST( idx!=ULONG_MAX );
2988 0 : fd_topo_link_t * link = &topo->links[ tile->out_link_id[ idx ] ];
2989 :
2990 :
2991 0 : fd_capture_link_buf_t * cap_repl_out = ctx->cap_repl_out;
2992 0 : cap_repl_out->base.vt = &fd_capture_link_buf_vt;
2993 0 : cap_repl_out->idx = idx;
2994 0 : cap_repl_out->mem = topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ].wksp;
2995 0 : cap_repl_out->chunk0 = fd_dcache_compact_chunk0( cap_repl_out->mem, link->dcache );
2996 0 : cap_repl_out->wmark = fd_dcache_compact_wmark( cap_repl_out->mem, link->dcache, link->mtu );
2997 0 : cap_repl_out->chunk = cap_repl_out->chunk0;
2998 0 : cap_repl_out->mcache = link->mcache;
2999 0 : cap_repl_out->depth = fd_mcache_depth( link->mcache );
3000 0 : cap_repl_out->seq = 0UL;
3001 :
3002 0 : ctx->capture_ctx->capctx_type.buf = cap_repl_out;
3003 0 : ctx->capture_ctx->capture_link = &cap_repl_out->base;
3004 0 : ctx->capture_ctx->current_txn_idx = 0UL;
3005 :
3006 :
3007 0 : ulong consumer_tile_idx = fd_topo_find_tile( topo, "solcap", 0UL );
3008 0 : fd_topo_tile_t * consumer_tile = &topo->tiles[ consumer_tile_idx ];
3009 0 : cap_repl_out->fseq = NULL;
3010 0 : for( ulong j = 0UL; j < consumer_tile->in_cnt; j++ ) {
3011 0 : if( FD_UNLIKELY( consumer_tile->in_link_id[ j ] == link->id ) ) {
3012 0 : cap_repl_out->fseq = fd_fseq_join( fd_topo_obj_laddr( topo, consumer_tile->in_link_fseq_obj_id[ j ] ) );
3013 0 : FD_TEST( cap_repl_out->fseq );
3014 0 : break;
3015 0 : }
3016 0 : }
3017 0 : }
3018 :
3019 0 : fd_memset( &ctx->metrics, 0, sizeof(ctx->metrics) );
3020 :
3021 0 : fd_histf_join( fd_histf_new( ctx->metrics.store_query_wait, FD_MHIST_SECONDS_MIN( REPLAY, STORE_QUERY_WAIT ),
3022 0 : FD_MHIST_SECONDS_MAX( REPLAY, STORE_QUERY_WAIT ) ) );
3023 0 : fd_histf_join( fd_histf_new( ctx->metrics.store_query_work, FD_MHIST_SECONDS_MIN( REPLAY, STORE_QUERY_WORK ),
3024 0 : FD_MHIST_SECONDS_MAX( REPLAY, STORE_QUERY_WORK ) ) );
3025 :
3026 0 : fd_histf_join( fd_histf_new( ctx->metrics.root_slot_dur, FD_MHIST_SECONDS_MIN( REPLAY, ROOT_SLOT_DURATION_SECONDS ),
3027 0 : FD_MHIST_SECONDS_MAX( REPLAY, ROOT_SLOT_DURATION_SECONDS ) ) );
3028 0 : fd_histf_join( fd_histf_new( ctx->metrics.root_account_dur, FD_MHIST_SECONDS_MIN( REPLAY, ROOT_ACCOUNT_DURATION_SECONDS ),
3029 0 : FD_MHIST_SECONDS_MAX( REPLAY, ROOT_ACCOUNT_DURATION_SECONDS ) ) );
3030 :
3031 : /* Ensure precompiles are available, crash fast otherwise */
3032 0 : fd_precompiles();
3033 :
3034 0 : ulong scratch_top = FD_SCRATCH_ALLOC_FINI( l, 1UL );
3035 0 : if( FD_UNLIKELY( scratch_top > (ulong)scratch + scratch_footprint( tile ) ) )
3036 0 : FD_LOG_ERR(( "scratch overflow %lu %lu %lu", scratch_top - (ulong)scratch - scratch_footprint( tile ), scratch_top, (ulong)scratch + scratch_footprint( tile ) ));
3037 0 : }
3038 :
3039 : static ulong
3040 : populate_allowed_seccomp( fd_topo_t const * topo FD_FN_UNUSED,
3041 : fd_topo_tile_t const * tile FD_FN_UNUSED,
3042 : ulong out_cnt,
3043 0 : struct sock_filter * out ) {
3044 :
3045 0 : populate_sock_filter_policy_fd_replay_tile( out_cnt, out, (uint)fd_log_private_logfile_fd() );
3046 0 : return sock_filter_policy_fd_replay_tile_instr_cnt;
3047 0 : }
3048 :
3049 : static ulong
3050 : populate_allowed_fds( fd_topo_t const * topo FD_FN_UNUSED,
3051 : fd_topo_tile_t const * tile FD_FN_UNUSED,
3052 : ulong out_fds_cnt,
3053 0 : int * out_fds ) {
3054 :
3055 0 : if( FD_UNLIKELY( out_fds_cnt<2UL ) ) FD_LOG_ERR(( "out_fds_cnt %lu", out_fds_cnt ));
3056 :
3057 0 : ulong out_cnt = 0UL;
3058 0 : out_fds[ out_cnt++ ] = 2; /* stderr */
3059 0 : if( FD_LIKELY( -1!=fd_log_private_logfile_fd() ) )
3060 0 : out_fds[ out_cnt++ ] = fd_log_private_logfile_fd(); /* logfile */
3061 0 : return out_cnt;
3062 0 : }
3063 :
3064 : static inline void
3065 0 : during_housekeeping( fd_replay_tile_t * ctx ) {
3066 0 : if( FD_UNLIKELY( fd_keyswitch_state_query( ctx->keyswitch )==FD_KEYSWITCH_STATE_UNHALT_PENDING ) ) {
3067 0 : FD_CRIT( ctx->halt_leader, "state machine corruption" );
3068 0 : FD_LOG_DEBUG(( "keyswitch: unhalting leader" ));
3069 0 : ctx->halt_leader = 0;
3070 0 : fd_keyswitch_state( ctx->keyswitch, FD_KEYSWITCH_STATE_COMPLETED );
3071 0 : }
3072 :
3073 0 : if( FD_UNLIKELY( fd_keyswitch_state_query( ctx->keyswitch )==FD_KEYSWITCH_STATE_SWITCH_PENDING ) ) {
3074 0 : FD_LOG_DEBUG(( "keyswitch: halting leader" ));
3075 0 : ctx->halt_leader = 1;
3076 0 : if( !ctx->is_leader ) maybe_switch_identity( ctx );
3077 0 : }
3078 0 : }
3079 :
3080 : #undef DEBUG_LOGGING
3081 :
3082 : /* counting carefully, after_credit can generate at most 7 frags and
3083 : returnable_frag boot_genesis can also generate at most 7 frags, so 14
3084 : is a conservative bound. */
3085 0 : #define STEM_BURST (14UL)
3086 :
3087 : /* TODO: calculate this properly/fix stem to work with larger numbers of links */
3088 : /* 1000 chosen empirically as anything larger slowed down replay times. Need to calculate
3089 : this properly. */
3090 0 : #define STEM_LAZY ((long)10e3)
3091 :
3092 0 : #define STEM_CALLBACK_CONTEXT_TYPE fd_replay_tile_t
3093 0 : #define STEM_CALLBACK_CONTEXT_ALIGN alignof(fd_replay_tile_t)
3094 :
3095 0 : #define STEM_CALLBACK_METRICS_WRITE metrics_write
3096 0 : #define STEM_CALLBACK_AFTER_CREDIT after_credit
3097 0 : #define STEM_CALLBACK_BEFORE_FRAG before_frag
3098 0 : #define STEM_CALLBACK_RETURNABLE_FRAG returnable_frag
3099 0 : #define STEM_CALLBACK_DURING_HOUSEKEEPING during_housekeeping
3100 :
3101 : #include "../../disco/stem/fd_stem.c"
3102 :
3103 : fd_topo_run_tile_t fd_tile_replay = {
3104 : .name = "replay",
3105 : .populate_allowed_seccomp = populate_allowed_seccomp,
3106 : .populate_allowed_fds = populate_allowed_fds,
3107 : .scratch_align = scratch_align,
3108 : .scratch_footprint = scratch_footprint,
3109 : .privileged_init = privileged_init,
3110 : .unprivileged_init = unprivileged_init,
3111 : .run = stem_run,
3112 : };
|