LCOV - code coverage report
Current view: top level - discof/replay - fd_replay_tile.c (source / functions) Hit Total Coverage
Test: cov.lcov Lines: 0 1871 0.0 %
Date: 2026-03-11 05:41:05 Functions: 0 104 0.0 %

          Line data    Source code
       1             : #include "fd_replay_tile.h"
       2             : #include "fd_sched.h"
       3             : #include "fd_execrp.h"
       4             : #include "fd_vote_tracker.h"
       5             : #include "generated/fd_replay_tile_seccomp.h"
       6             : 
       7             : #include "../genesis/fd_genesi_tile.h"
       8             : #include "../poh/fd_poh.h"
       9             : #include "../poh/fd_poh_tile.h"
      10             : #include "../tower/fd_tower_tile.h"
      11             : #include "../resolv/fd_resolv_tile.h"
      12             : #include "../restore/utils/fd_ssload.h"
      13             : 
      14             : #include "../../disco/tiles.h"
      15             : #include "../../disco/fd_txn_m.h"
      16             : #include "../../disco/store/fd_store.h"
      17             : #include "../../disco/shred/fd_fec_set.h"
      18             : #include "../../disco/pack/fd_pack.h"
      19             : #include "../../discof/fd_accdb_topo.h"
      20             : #include "../../discof/reasm/fd_reasm.h"
      21             : #include "../../disco/keyguard/fd_keyload.h"
      22             : #include "../../disco/keyguard/fd_keyswitch.h"
      23             : #include "../../disco/genesis/fd_genesis_cluster.h"
      24             : #include "../../discof/genesis/genesis_hash.h"
      25             : #include "../../util/pod/fd_pod.h"
      26             : #include "../../flamenco/accdb/fd_accdb_admin_v1.h"
      27             : #include "../../flamenco/accdb/fd_accdb_admin_v2.h"
      28             : #include "../../flamenco/accdb/fd_accdb_impl_v1.h"
      29             : #include "../../flamenco/accdb/fd_accdb_sync.h"
      30             : #include "../../flamenco/accdb/fd_vinyl_req_pool.h"
      31             : #include "../../flamenco/rewards/fd_rewards.h"
      32             : #include "../../flamenco/leaders/fd_multi_epoch_leaders.h"
      33             : #include "../../flamenco/progcache/fd_progcache_admin.h"
      34             : #include "../../disco/metrics/fd_metrics.h"
      35             : 
      36             : #include "../../flamenco/fd_flamenco_base.h"
      37             : #include "../../flamenco/runtime/fd_runtime.h"
      38             : #include "../../flamenco/runtime/fd_runtime_stack.h"
      39             : #include "../../flamenco/runtime/fd_genesis_parse.h"
      40             : #include "../../flamenco/runtime/sysvar/fd_sysvar_epoch_schedule.h"
      41             : #include "../../flamenco/runtime/program/fd_precompiles.h"
      42             : #include "../../flamenco/runtime/program/vote/fd_vote_state_versioned.h"
      43             : #include "../../flamenco/runtime/tests/fd_dump_pb.h"
      44             : 
      45             : #include <stdio.h>
      46             : 
      47             : /* Replay concepts:
      48             : 
      49             :    - Blocks are aggregations of entries aka. microblocks which are
      50             :      groupings of txns and are constructed by the block producer (see
      51             :      fd_pack).
      52             : 
      53             :    - Entries are grouped into entry batches by the block producer (see
      54             :      fd_pack / fd_shredder).
      55             : 
      56             :    - Entry batches are divided into chunks known as shreds by the block
      57             :      producer (see fd_shredder).
      58             : 
      59             :    - Shreds are grouped into forward-error-correction sets (FEC sets) by
      60             :      the block producer (see fd_shredder).
      61             : 
      62             :    - Shreds are transmitted to the rest of the cluster via the Turbine
      63             :      protocol (see fd_shredder / fd_shred).
      64             : 
      65             :    - Once enough shreds within a FEC set are received to recover the
      66             :      entirety of the shred data encoded by that FEC set, the receiver
      67             :      can "complete" the FEC set (see fd_fec_resolver).
      68             : 
      69             :    - If shreds in the FEC set are missing such that it can't complete,
      70             :      the receiver can use the Repair protocol to request missing shreds
      71             :      in FEC set (see fd_repair).
      72             : 
      73             :   -  The current Repair protocol does not support requesting coding
      74             :      shreds.  As a result, some FEC sets might be actually complete
      75             :      (contain all data shreds).  Repair currently hacks around this by
      76             :      forcing completion but the long-term solution is to add support for
      77             :      fec_repairing coding shreds via Repair.
      78             : 
      79             :   - FEC sets are delivered in partial-order to the Replay tile by the
      80             :     Repair tile.  Currently Replay only supports replaying entry batches
      81             :     so FEC sets need to reassembled into an entry batch before they can
      82             :     be replayed.  The new Dispatcher will change this by taking a FEC
      83             :     set as input instead. */
      84             : 
      85           0 : #define IN_KIND_SNAP       ( 0)
      86           0 : #define IN_KIND_GENESIS    ( 1)
      87           0 : #define IN_KIND_IPECHO     ( 2)
      88           0 : #define IN_KIND_TOWER      ( 3)
      89           0 : #define IN_KIND_RESOLV     ( 4)
      90           0 : #define IN_KIND_POH        ( 5)
      91           0 : #define IN_KIND_EXECRP     ( 6)
      92           0 : #define IN_KIND_REPAIR     ( 7)
      93           0 : #define IN_KIND_TXSEND     ( 8)
      94           0 : #define IN_KIND_RPC        ( 9)
      95           0 : #define IN_KIND_GOSSIP_OUT (10)
      96             : 
      97             : #define DEBUG_LOGGING 0
      98             : 
      99             : /* The first bank that the replay tile produces either for genesis
     100             :    or the snapshot boot will always be at bank index 0. */
     101           0 : #define FD_REPLAY_BOOT_BANK_IDX (0UL)
     102             : 
     103             : struct fd_replay_in_link {
     104             :   fd_wksp_t * mem;
     105             :   ulong       chunk0;
     106             :   ulong       wmark;
     107             :   ulong       mtu;
     108             : };
     109             : 
     110             : typedef struct fd_replay_in_link fd_replay_in_link_t;
     111             : 
     112             : struct fd_replay_out_link {
     113             :   ulong       idx;
     114             :   fd_wksp_t * mem;
     115             :   ulong       chunk0;
     116             :   ulong       wmark;
     117             :   ulong       chunk;
     118             : };
     119             : 
     120             : typedef struct fd_replay_out_link fd_replay_out_link_t;
     121             : 
     122             : /* fd_block_id_map is a simple map of block-ids to bank indices.  The
     123             :    map sits on top of an array of fd_block_id_ele_t.  This serves as a
     124             :    translation layer between block ids to bank indices.  The data
     125             :    array is indexed by bank index and the latest observed merkle root
     126             :    for the bank index is stored in the array.  Once the block id has
     127             :    been observed, the entry is keyed by the latest merkle root (aka the
     128             :    block id). */
     129             : 
     130             : struct fd_block_id_ele {
     131             :   fd_hash_t latest_mr;
     132             :   uint      latest_fec_idx;
     133             :   int       block_id_seen;
     134             :   ulong     slot;
     135             :   ulong     next_;
     136             : };
     137             : typedef struct fd_block_id_ele fd_block_id_ele_t;
     138             : 
     139             : #define MAP_NAME               fd_block_id_map
     140             : #define MAP_ELE_T              fd_block_id_ele_t
     141             : #define MAP_KEY_T              fd_hash_t
     142           0 : #define MAP_KEY                latest_mr
     143           0 : #define MAP_NEXT               next_
     144           0 : #define MAP_KEY_EQ(k0,k1)      (!memcmp((k0),(k1), sizeof(fd_hash_t)))
     145           0 : #define MAP_KEY_HASH(key,seed) (fd_hash((seed),(key),sizeof(fd_hash_t)))
     146             : #include "../../util/tmpl/fd_map_chain.c"
     147             : 
     148             : static inline ulong
     149           0 : fd_block_id_ele_get_idx( fd_block_id_ele_t * ele_arr, fd_block_id_ele_t * ele ) {
     150           0 :   return (ulong)(ele - ele_arr);
     151           0 : }
     152             : 
     153             : struct fd_replay_tile {
     154             :   fd_wksp_t * wksp;
     155             : 
     156             :   fd_accdb_admin_t     accdb_admin[1];
     157             :   fd_accdb_user_t      accdb[1];
     158             :   fd_progcache_admin_t progcache_admin[1];
     159             : 
     160             :   fd_txncache_t * txncache;
     161             :   fd_store_t *    store;
     162             :   fd_banks_t      banks[1];
     163             : 
     164             :   /* This flag is 1 If we have seen a vote signature that our node has
     165             :      sent out get rooted at least one time.  The value is 0 otherwise.
     166             :      We can't become leader and pack blocks until this flag has been
     167             :      set.  This parallels the Agave 'has_new_vote_been_rooted'. */
     168             :   int identity_vote_rooted;
     169             :   int wait_for_vote_to_start_leader;
     170             : 
     171             :   /* wfs_enabled is 1 if the validator is booted in
     172             :      wait_for_supermajority mode. In this mode replay (and, by extension,
     173             :      downstream consumers) is not allowed to make progress until 80% of
     174             :      the cluster has published their ContactInfo in Gossip with a
     175             :      shred version matching expected_shred_version. When this happens,
     176             :      wfs_complete will be set to 1. */
     177             :   int   wfs_enabled;
     178             :   int   wfs_complete;
     179             : 
     180             :   fd_hash_t expected_bank_hash;
     181             : 
     182             :   ulong            reasm_seed;
     183             :   fd_reasm_t     * reasm;
     184             :   fd_reasm_fec_t * reasm_evicted;       /* evicted FEC by reasm_insert must be stored in returnable_frag, and then drained in after_credit */
     185             : 
     186             :   fd_sched_t * sched;
     187             : 
     188             :   ulong                vote_tracker_seed;
     189             :   fd_vote_tracker_t *  vote_tracker;
     190             : 
     191             :   int          has_genesis_hash;
     192             :   char         genesis_path[ PATH_MAX ];
     193             :   fd_hash_t    genesis_hash[1];
     194             :   fd_genesis_t genesis[1];
     195             :   ulong        cluster_type;
     196             : 
     197             :   int   has_genesis_timestamp;
     198             :   ulong genesis_timestamp;
     199             :   int   has_expected_genesis_timestamp;
     200             :   ulong expected_genesis_timestamp;
     201             : 
     202             : #define FD_REPLAY_HARD_FORKS_MAX (64UL)
     203             :   ulong hard_forks_cnt;
     204             :   ulong hard_forks[ FD_REPLAY_HARD_FORKS_MAX ];
     205             :   ulong hard_forks_cnts[ FD_REPLAY_HARD_FORKS_MAX ];
     206             : 
     207             :   ushort expected_shred_version;
     208             :   ushort ipecho_shred_version;
     209             : 
     210             :   /* A note on publishing ...
     211             : 
     212             :      The watermarks are used to publish our fork-aware structures.  For
     213             :      example, store, banks, and txncache need to be published to release
     214             :      resources occupied by rooted or dead blocks.  In general,
     215             :      publishing has the effect of pruning forks in those structures,
     216             :      indicating that it is ok to release the memory being occupied by
     217             :      the blocks on said forks.  Tower is responsible for informing us of
     218             :      the latest block on the consensus rooted fork.  As soon as we can,
     219             :      we should move the published root as close as possible to the
     220             :      latest consensus root, publishing/pruning everything on the fork
     221             :      tree along the way.  That is, all the blocks that directly descend
     222             :      from the current published root (inclusive) to the new published
     223             :      root (exclusive) on the rooted fork, as well as all the minority
     224             :      forks that branch from said blocks.
     225             : 
     226             :      Ideally, we'd move the published root to the consensus root
     227             :      immediately upon receiving a new consensus root.  However, that's
     228             :      not always safe to do.  One thing we need to be careful about is
     229             :      making sure that there are no more users/consumers of
     230             :      soon-to-be-pruned blocks, lest a use-after-free occurs.  This can
     231             :      be done by using a reference counter for each block.  Any
     232             :      concurrent activity, such as transaction execution in the exec
     233             :      tiles, should retain a refcnt on the block for as
     234             :      long as it needs access to the shared fork-aware structures related
     235             :      to that block.  Eventually, refcnt on a given block will drop down
     236             :      to 0 as the block either finishes replaying or gets marked as dead,
     237             :      and any other tile that has retained a refcnt on the block releases
     238             :      it.  At that point, it becomes a candidate for pruning.  The key to
     239             :      safe publishing then becomes figuring out how far we could advance
     240             :      the published root, such that every minority fork branching off of
     241             :      blocks in between the current published root (inclusive) and the
     242             :      new published root (exclusive) is safe to be pruned.  This is a
     243             :      straightforward tree traversal, where if a block B on the rooted
     244             :      fork has refcnt 0, and all minority forks branching off of B also
     245             :      have refcnt 0, then B is safe to be pruned.  We advance the
     246             :      published root to the farthest consecutively prunable block on the
     247             :      rooted fork.  Note that reasm presents the replay tile with a clean
     248             :      view of the world where every block is chained off of a parent
     249             :      block.  So there are no orpahned/dangling tree nodes to worry
     250             :      about.  The world is a nice single tree as far as replay is
     251             :      concerned.
     252             : 
     253             :      In the following fork tree, every node is a block and the number in
     254             :      parentheses is the refcnt on the block.  The chain marked with
     255             :      double slashes is the rooted fork.  Suppose the published root is
     256             :      at block P, and consensus root is at block T.  We can't publish
     257             :      past block P because Q has refcnt 1.
     258             : 
     259             : 
     260             :           P(0)
     261             :         /    \\
     262             :       Q(1)    A(0)
     263             :             / ||  \
     264             :         X(0) B(0)  C(0)
     265             :        /      || \
     266             :       Y(0)   M(0) R(0)
     267             :             / ||   /  \
     268             :         D(2) T(0) J(0) L(0)
     269             :               ||
     270             :               ..
     271             :               ..
     272             :               ..
     273             :               ||
     274             :       blocks we might be actively replaying
     275             : 
     276             : 
     277             :      When refcnt on Q drops to 0, we would be able to advance the
     278             :      published root to block M, because blocks P, A, and B, as well as
     279             :      all subtrees branching off of them, have refcnt 0, and therefore
     280             :      can be pruned.  Block M itself cannot be pruned yet because its
     281             :      child block D has refcnt 2.  After publishing/pruning, the fork
     282             :      tree would be:
     283             : 
     284             : 
     285             :              M(0)
     286             :             / ||
     287             :         D(2) T(0)
     288             :               ||
     289             :               ..
     290             :               ..
     291             :               ..
     292             :               ||
     293             :       blocks we might be actively replaying
     294             : 
     295             : 
     296             :      As a result, the shared fork-aware structures can free resources
     297             :      for blocks P, A, B, and all subtrees branching off of them.
     298             : 
     299             :      For the reference counting part, the replay tile is the sole entity
     300             :      that can update the refcnt.  This ensures that all refcnt increment
     301             :      and decrement attempts are serialized at the replay tile, and that
     302             :      there are no racy resurrection of a soon-to-be-pruned block.  If a
     303             :      refcnt increment request arrives after a block has been pruned,
     304             :      replay simply rejects the request.
     305             : 
     306             :      A note on the implementation of the above ...
     307             : 
     308             :      Upon receiving a new consensus root, we descend down the rooted
     309             :      fork from the current published root to the new consensus root.  On
     310             :      each node/block of the rooted fork, we do a summation of the refcnt
     311             :      on the block and all the minority fork blocks branching from the
     312             :      block.  If the summation is 0, the block is safe for pruning.  We
     313             :      advance the published root to the far end of the consecutive run of
     314             :      0 refcnt sums originating from the current published root.  On our
     315             :      descent down the minority forks, we also mark any block that hasn't
     316             :      finished replaying as dead, so we don't waste time executing them.
     317             :      No more transactions shall be dispatched for execution from dead
     318             :      blocks.
     319             : 
     320             :      Blocks start out with a refcnt of 0.  Other tiles may send a
     321             :      request to the replay tile for a reference on a block.  The
     322             :      transaction dispatcher is another source of refcnt updates.  On
     323             :      every dispatch of a transaction for block B, we increment the
     324             :      refcnt for B.  And on every transaction finalization, we decrement
     325             :      the refcnt for B.  This means that whenever the refcnt on a block
     326             :      is 0, there is no more reference on that block from the execution
     327             :      pipeline.  While it might be tempting to simply increment the
     328             :      refcnt once when we start replaying a block, and decrement the
     329             :      refcnt once when we finish a block, this more fine-grained refcnt
     330             :      update strategy allows for aborting and potentially immediate
     331             :      pruning of blocks under interleaved block replay.  Upon receiving a
     332             :      new consensus root, we can simply look at the refcnt on minority
     333             :      fork blocks, and a refcnt of 0 would imply that the block is safe
     334             :      for pruning, even if we haven't finished replaying it.  Without the
     335             :      fine-grained refcnt, we would need to first stop dispatching from
     336             :      the aborted block, and then wait for a full drain of the execution
     337             :      pipeline to know for sure that there are no more in-flight
     338             :      transactions executing on the aborted block.  Note that this will
     339             :      allow the refcnt on any block to transiently drop down to 0.  We
     340             :      will not mistakenly prune an actively replaying block, aka a leaf
     341             :      node, that is chaining off of the rooted fork, because the
     342             :      consensus root is always an ancestor of the actively replaying tip.
     343             :      */
     344             :   fd_hash_t consensus_root;          /* The most recent block to have reached max lockout in the tower. */
     345             :   ulong     consensus_root_slot;     /* slot number of the above. */
     346             :   ulong     consensus_root_bank_idx; /* bank index of the above. */
     347             :   ulong     published_root_slot;     /* slot number of the published root. */
     348             :   ulong     published_root_bank_idx; /* bank index of the published root. */
     349             : 
     350             :   /* Randomly generated block id for the initial genesis/snapshot slot.
     351             :      To be replaced with block id in the snapshot manifest when SIMD-333
     352             :      is activated. */
     353             : 
     354             :   fd_hash_t initial_block_id;
     355             : 
     356             :   /* We need to maintain a tile-local mapping of block-ids to bank index
     357             :      and vice versa.  This translation layer is needed for conversion
     358             :      since tower operates on block-ids and downstream consumers of FEC
     359             :      sets operate on bank indices.  This mapping must happen both ways:
     360             :      1. tower sends us block ids and we must map them to bank indices.
     361             :      2. when a block is completed, we must map the bank index to a block
     362             :         id to send a slot complete message to tower. */
     363             :   ulong               block_id_len;
     364             :   fd_block_id_ele_t * block_id_arr;
     365             :   ulong               block_id_map_seed;
     366             :   fd_block_id_map_t * block_id_map;
     367             : 
     368             :   /* Capture-related configs */
     369             :   fd_capture_ctx_t *     capture_ctx;
     370             :   FILE *                 capture_file;
     371             :   fd_capture_link_buf_t  cap_repl_out[1];
     372             : 
     373             :   /* Protobuf dumping context for debugging runtime execution and
     374             :      collecting seed corpora. */
     375             :   fd_dump_proto_ctx_t * dump_proto_ctx;
     376             : 
     377             :   /* Whether the runtime has been booted either from snapshot loading
     378             :      or from genesis. */
     379             :   int is_booted;
     380             : 
     381             :   /* Buffer to store vote towers that need to be published to the Tower
     382             :      tile. */
     383             : 
     384             :   fd_multi_epoch_leaders_t * mleaders;
     385             : 
     386             :   int larger_max_cost_per_block;
     387             : 
     388             :   /* When we transition to becoming leader, we can only unbecome the
     389             :      leader if we have received a block id from the FEC reassembler, and
     390             :      a message from PoH that the leader slot has ended.  After both of
     391             :      these conditions are met, then we are free to unbecome the leader.
     392             :   */
     393             :   uint        is_leader : 1;
     394             :   uint        supports_leader : 1;
     395             :   int         recv_poh;
     396             :   ulong       next_leader_slot;
     397             :   long        next_leader_tickcount;
     398             :   ulong       highwater_leader_slot;
     399             :   ulong       reset_slot;
     400             :   fd_bank_t   reset_bank[1];
     401             :   fd_hash_t   reset_block_id;
     402             :   long        reset_timestamp_nanos;
     403             :   double      slot_duration_nanos;
     404             :   double      slot_duration_ticks;
     405             :   fd_bank_t   leader_bank[1];
     406             : 
     407             :   fd_pubkey_t      identity_pubkey[1];
     408             :   ulong            identity_idx;
     409             : 
     410             :   fd_keyswitch_t * keyswitch;
     411             :   int              halt_leader;
     412             : 
     413             :   ulong  resolv_tile_cnt;
     414             : 
     415             :   int in_kind[ 128 ];
     416             :   fd_replay_in_link_t in[ 128 ];
     417             : 
     418             :   fd_replay_out_link_t exec_out[ 1 ];
     419             : 
     420             :   fd_replay_out_link_t replay_out[1];
     421             : 
     422             :   fd_replay_out_link_t epoch_out[1];
     423             : 
     424             :   /* The rpc tile needs to occasionally own a reference to a live bank.
     425             :      Replay needs to know if the rpc as a consumer is enabled so it can
     426             :      increment the bank's refcnt before publishing bank_idx. */
     427             :   int rpc_enabled;
     428             : 
     429             : # if FD_HAS_FLATCC
     430             :   /* For dumping blocks to protobuf. For backtest only. */
     431             :   fd_block_dump_ctx_t * block_dump_ctx;
     432             : # endif
     433             : 
     434             :   /* We need a few pieces of information to compute the right addresses
     435             :      for bundle crank information that we need to send to pack. */
     436             :   struct {
     437             :     int                   enabled;
     438             :     fd_pubkey_t           vote_account;
     439             :     fd_bundle_crank_gen_t gen[1];
     440             :   } bundle;
     441             : 
     442             :   struct {
     443             :     ulong      store_query_acquire;
     444             :     ulong      store_query_release;
     445             :     fd_histf_t store_query_wait[1];
     446             :     fd_histf_t store_query_work[1];
     447             :     ulong      store_query_cnt;
     448             :     ulong      store_query_missing_cnt;
     449             :     ulong      store_query_mr;
     450             :     ulong      store_query_missing_mr;
     451             : 
     452             :     ulong slots_total;
     453             :     ulong transactions_total;
     454             : 
     455             :     ulong reasm_latest_slot;
     456             :     ulong reasm_latest_fec_idx;
     457             : 
     458             :     ulong sched_full;
     459             :     ulong reasm_empty;
     460             :     ulong leader_bid_wait;
     461             :     ulong banks_full;
     462             :     ulong storage_root_behind;
     463             : 
     464             :     fd_histf_t root_slot_dur[1];
     465             :     fd_histf_t root_account_dur[1];
     466             :   } metrics;
     467             : 
     468             :   uchar __attribute__((aligned(FD_MULTI_EPOCH_LEADERS_ALIGN))) mleaders_mem[ FD_MULTI_EPOCH_LEADERS_FOOTPRINT ];
     469             : 
     470             :   ulong              runtime_stack_seed;
     471             :   fd_runtime_stack_t runtime_stack;
     472             : };
     473             : 
     474             : typedef struct fd_replay_tile fd_replay_tile_t;
     475             : 
     476             : FD_FN_CONST static inline ulong
     477           0 : scratch_align( void ) {
     478           0 :   return 128UL;
     479           0 : }
     480             : FD_FN_PURE static inline ulong
     481           0 : scratch_footprint( fd_topo_tile_t const * tile ) {
     482           0 :   ulong chain_cnt = fd_block_id_map_chain_cnt_est( tile->replay.max_live_slots );
     483             : 
     484           0 :   ulong l = FD_LAYOUT_INIT;
     485           0 :   l = FD_LAYOUT_APPEND( l, alignof(fd_replay_tile_t),    sizeof(fd_replay_tile_t) );
     486           0 :   l = FD_LAYOUT_APPEND( l, alignof(fd_block_id_ele_t),   sizeof(fd_block_id_ele_t) * tile->replay.max_live_slots );
     487           0 :   l = FD_LAYOUT_APPEND( l, fd_block_id_map_align(),      fd_block_id_map_footprint( chain_cnt ) );
     488           0 :   l = FD_LAYOUT_APPEND( l, fd_txncache_align(),          fd_txncache_footprint( tile->replay.max_live_slots ) );
     489           0 :   l = FD_LAYOUT_APPEND( l, fd_reasm_align(),             fd_reasm_footprint( tile->replay.fec_max ) );
     490           0 :   l = FD_LAYOUT_APPEND( l, fd_sched_align(),             fd_sched_footprint( tile->replay.sched_depth, tile->replay.max_live_slots ) );
     491           0 :   l = FD_LAYOUT_APPEND( l, fd_vinyl_req_pool_align(),    fd_vinyl_req_pool_footprint( 1UL, 1UL ) );
     492           0 :   l = FD_LAYOUT_APPEND( l, fd_vote_tracker_align(),      fd_vote_tracker_footprint() );
     493           0 :   l = FD_LAYOUT_APPEND( l, fd_capture_ctx_align(),       fd_capture_ctx_footprint() );
     494           0 :   l = FD_LAYOUT_APPEND( l, alignof(fd_dump_proto_ctx_t), sizeof(fd_dump_proto_ctx_t) );
     495             : 
     496           0 : # if FD_HAS_FLATCC
     497           0 :   if( FD_UNLIKELY( tile->replay.dump_block_to_pb ) ) {
     498           0 :     l = FD_LAYOUT_APPEND( l, fd_block_dump_context_align(), fd_block_dump_context_footprint() );
     499           0 :   }
     500           0 : # endif
     501             : 
     502           0 :   l = FD_LAYOUT_FINI( l, scratch_align() );
     503             : 
     504           0 :   return l;
     505           0 : }
     506             : 
     507             : static inline void
     508           0 : metrics_write( fd_replay_tile_t * ctx ) {
     509           0 :   FD_MCNT_SET  ( REPLAY, STORE_QUERY_ACQUIRE,      ctx->metrics.store_query_acquire      );
     510           0 :   FD_MCNT_SET  ( REPLAY, STORE_QUERY_RELEASE,      ctx->metrics.store_query_release      );
     511           0 :   FD_MHIST_COPY( REPLAY, STORE_QUERY_WAIT,         ctx->metrics.store_query_wait         );
     512           0 :   FD_MHIST_COPY( REPLAY, STORE_QUERY_WORK,         ctx->metrics.store_query_work         );
     513           0 :   FD_MCNT_SET  ( REPLAY, STORE_QUERY_CNT,          ctx->metrics.store_query_cnt          );
     514           0 :   FD_MCNT_SET  ( REPLAY, STORE_QUERY_MISSING_CNT,  ctx->metrics.store_query_missing_cnt  );
     515           0 :   FD_MGAUGE_SET( REPLAY, STORE_QUERY_MR,           ctx->metrics.store_query_mr           );
     516           0 :   FD_MGAUGE_SET( REPLAY, STORE_QUERY_MISSING_MR,   ctx->metrics.store_query_missing_mr   );
     517             : 
     518           0 :   FD_MGAUGE_SET( REPLAY, ROOT_SLOT, ctx->consensus_root_slot==ULONG_MAX ? 0UL : ctx->consensus_root_slot );
     519           0 :   ulong leader_slot = ctx->leader_bank->data ? fd_bank_slot_get( ctx->leader_bank ) : 0UL;
     520           0 :   FD_MGAUGE_SET( REPLAY, LEADER_SLOT, leader_slot );
     521             : 
     522           0 :   if( FD_LIKELY( ctx->leader_bank->data ) ) {
     523           0 :     FD_MGAUGE_SET( REPLAY, NEXT_LEADER_SLOT, leader_slot );
     524           0 :     FD_MGAUGE_SET( REPLAY, LEADER_SLOT, leader_slot );
     525           0 :   } else {
     526           0 :     FD_MGAUGE_SET( REPLAY, NEXT_LEADER_SLOT, ctx->next_leader_slot==ULONG_MAX ? 0UL : ctx->next_leader_slot );
     527           0 :     FD_MGAUGE_SET( REPLAY, LEADER_SLOT, 0UL );
     528           0 :   }
     529           0 :   FD_MGAUGE_SET( REPLAY, RESET_SLOT, ctx->reset_slot==ULONG_MAX ? 0UL : ctx->reset_slot );
     530             : 
     531           0 :   fd_bank_data_t * bank_pool = fd_banks_get_bank_pool( ctx->banks->data );
     532           0 :   ulong live_banks = fd_banks_pool_max( bank_pool ) - fd_banks_pool_free( bank_pool );
     533           0 :   FD_MGAUGE_SET( REPLAY, LIVE_BANKS, live_banks );
     534             : 
     535           0 :   ulong reasm_free = fd_reasm_free( ctx->reasm );
     536           0 :   FD_MGAUGE_SET( REPLAY, REASM_FREE, reasm_free );
     537             : 
     538           0 :   FD_MCNT_SET( REPLAY, SLOTS_TOTAL, ctx->metrics.slots_total );
     539           0 :   FD_MCNT_SET( REPLAY, TRANSACTIONS_TOTAL, ctx->metrics.transactions_total );
     540             : 
     541           0 :   FD_MGAUGE_SET( REPLAY, REASM_LATEST_SLOT,    ctx->metrics.reasm_latest_slot );
     542           0 :   FD_MGAUGE_SET( REPLAY, REASM_LATEST_FEC_IDX, ctx->metrics.reasm_latest_fec_idx );
     543             : 
     544           0 :   FD_MCNT_SET( REPLAY, SCHED_FULL,          ctx->metrics.sched_full );
     545           0 :   FD_MCNT_SET( REPLAY, REASM_EMPTY,         ctx->metrics.reasm_empty );
     546           0 :   FD_MCNT_SET( REPLAY, LEADER_BID_WAIT,     ctx->metrics.leader_bid_wait );
     547           0 :   FD_MCNT_SET( REPLAY, BANKS_FULL,          ctx->metrics.banks_full );
     548           0 :   FD_MCNT_SET( REPLAY, STORAGE_ROOT_BEHIND, ctx->metrics.storage_root_behind );
     549             : 
     550           0 :   FD_MCNT_SET( REPLAY, PROGCACHE_ROOTED,  ctx->progcache_admin->metrics.root_cnt );
     551           0 :   FD_MCNT_SET( REPLAY, PROGCACHE_GC_ROOT, ctx->progcache_admin->metrics.gc_root_cnt );
     552             : 
     553           0 :   FD_MCNT_SET( REPLAY, ACCDB_CREATED,      ctx->accdb->base.created_cnt       );
     554           0 :   FD_MCNT_SET( REPLAY, ACCDB_REVERTED,     ctx->accdb_admin->base.revert_cnt  );
     555           0 :   FD_MCNT_SET( REPLAY, ACCDB_ROOTED,       ctx->accdb_admin->base.root_cnt    );
     556           0 :   FD_MCNT_SET( REPLAY, ACCDB_ROOTED_BYTES, ctx->accdb_admin->base.root_tot_sz );
     557           0 :   FD_MCNT_SET( REPLAY, ACCDB_GC_ROOT,      ctx->accdb_admin->base.gc_root_cnt );
     558           0 :   FD_MCNT_SET( REPLAY, ACCDB_RECLAIMED,    ctx->accdb_admin->base.reclaim_cnt );
     559           0 :   FD_MHIST_COPY( REPLAY, ROOT_SLOT_DURATION_SECONDS,    ctx->metrics.root_slot_dur    );
     560           0 :   FD_MHIST_COPY( REPLAY, ROOT_ACCOUNT_DURATION_SECONDS, ctx->metrics.root_account_dur );
     561           0 :   FD_MCNT_SET( REPLAY, ROOT_ELAPSED_SECONDS_DB,   (ulong)ctx->accdb_admin->base.dt_vinyl );
     562           0 :   FD_MCNT_SET( REPLAY, ROOT_ELAPSED_SECONDS_COPY, (ulong)ctx->accdb_admin->base.dt_copy  );
     563           0 :   FD_MCNT_SET( REPLAY, ROOT_ELAPSED_SECONDS_GC,   (ulong)ctx->accdb_admin->base.dt_gc    );
     564           0 : }
     565             : 
     566             : static inline ulong
     567             : generate_epoch_info_msg( ulong                       slot,
     568             :                          ulong                       epoch,
     569             :                          fd_epoch_schedule_t const * epoch_schedule,
     570             :                          fd_top_votes_t const *      top_votes,
     571             :                          fd_vote_stakes_t *          vote_stakes,
     572             :                          ushort                      vote_stakes_fork_idx,
     573             :                          fd_features_t const *       features,
     574             :                          fd_epoch_info_msg_t *       epoch_info_msg,
     575           0 :                          int                         current_epoch ) {
     576           0 :   fd_vote_stake_weight_t * stake_weights = epoch_info_msg->weights;
     577             : 
     578           0 :   epoch_info_msg->epoch             = epoch;
     579           0 :   epoch_info_msg->start_slot        = fd_epoch_slot0( epoch_schedule, epoch );
     580           0 :   epoch_info_msg->slot_cnt          = fd_epoch_slot_cnt( epoch_schedule, epoch );
     581           0 :   epoch_info_msg->excluded_stake    = 0UL;
     582           0 :   epoch_info_msg->vote_keyed_lsched = 1UL;
     583             : 
     584             :   /* FIXME: SIMD-0180 - hack to (de)activate in testnet vs mainnet.
     585             :      This code can be removed once the feature is active. */
     586           0 :   if( (1==epoch_schedule->warmup && epoch<FD_SIMD0180_ACTIVE_EPOCH_TESTNET) ||
     587           0 :       (0==epoch_schedule->warmup && epoch<FD_SIMD0180_ACTIVE_EPOCH_MAINNET) ) {
     588           0 :     epoch_info_msg->vote_keyed_lsched = 0UL;
     589           0 :   }
     590             : 
     591           0 :   ulong idx = 0UL;
     592             : 
     593           0 :   if( FD_FEATURE_ACTIVE( slot, features, validator_admission_ticket ) ) {
     594             : 
     595           0 :     uchar __attribute__((aligned(FD_TOP_VOTES_ITER_ALIGN))) iter_mem[ FD_TOP_VOTES_ITER_FOOTPRINT ];
     596           0 :     for( fd_top_votes_iter_t * iter = fd_top_votes_iter_init( top_votes, iter_mem );
     597           0 :          !fd_top_votes_iter_done( top_votes, iter );
     598           0 :          fd_top_votes_iter_next( top_votes, iter ) ) {
     599           0 :       fd_pubkey_t pubkey;
     600           0 :       fd_top_votes_iter_ele( top_votes, iter, &pubkey, NULL, NULL, NULL, NULL );
     601             : 
     602           0 :       ulong       stake;
     603           0 :       fd_pubkey_t node_account;
     604           0 :       int         found;
     605           0 :       if( current_epoch ) {
     606           0 :         found = fd_vote_stakes_query_t_1( vote_stakes, vote_stakes_fork_idx, &pubkey, &stake, &node_account );
     607           0 :       } else {
     608           0 :         found = fd_vote_stakes_query_t_2( vote_stakes, vote_stakes_fork_idx, &pubkey, &stake, &node_account );
     609           0 :       }
     610           0 :       if( FD_UNLIKELY( !found ) ) continue;
     611             : 
     612           0 :       stake_weights[ idx ].stake = stake;
     613           0 :       memcpy( stake_weights[ idx ].id_key.uc, &node_account, sizeof(fd_pubkey_t) );
     614           0 :       memcpy( stake_weights[ idx ].vote_key.uc, &pubkey, sizeof(fd_pubkey_t) );
     615           0 :       idx++;
     616           0 :     }
     617           0 :   } else {
     618           0 :     uchar __attribute__((aligned(FD_VOTE_STAKES_ITER_ALIGN))) iter_mem[ FD_VOTE_STAKES_ITER_FOOTPRINT ];
     619           0 :     for( fd_vote_stakes_iter_t * iter = fd_vote_stakes_fork_iter_init( vote_stakes, vote_stakes_fork_idx, iter_mem );
     620           0 :          !fd_vote_stakes_fork_iter_done( vote_stakes, vote_stakes_fork_idx, iter );
     621           0 :          fd_vote_stakes_fork_iter_next( vote_stakes, vote_stakes_fork_idx, iter ) ) {
     622             : 
     623           0 :       fd_pubkey_t pubkey;
     624           0 :       ulong       stake_t_1;
     625           0 :       ulong       stake_t_2;
     626           0 :       fd_pubkey_t node_account_t_1;
     627           0 :       fd_pubkey_t node_account_t_2;
     628           0 :       fd_vote_stakes_fork_iter_ele( vote_stakes, vote_stakes_fork_idx, iter, &pubkey, &stake_t_1, &stake_t_2, &node_account_t_1, &node_account_t_2 );
     629             : 
     630           0 :       ulong       stake        = current_epoch ? stake_t_1 : stake_t_2;
     631           0 :       fd_pubkey_t node_account = current_epoch ? node_account_t_1 : node_account_t_2;
     632           0 :       if( FD_UNLIKELY( !stake ) ) continue;
     633             : 
     634           0 :       stake_weights[ idx ].stake = stake;
     635           0 :       memcpy( stake_weights[ idx ].id_key.uc, &node_account, sizeof(fd_pubkey_t) );
     636           0 :       memcpy( stake_weights[ idx ].vote_key.uc, &pubkey, sizeof(fd_pubkey_t) );
     637           0 :       idx++;
     638           0 :     }
     639           0 :   }
     640             : 
     641           0 :   epoch_info_msg->staked_cnt = idx;
     642           0 :   sort_vote_weights_by_stake_vote_inplace( stake_weights, idx );
     643             : 
     644           0 :   epoch_info_msg->epoch_schedule = *epoch_schedule;
     645           0 :   epoch_info_msg->features       = *features;
     646             : 
     647           0 :   return fd_epoch_info_msg_sz( epoch_info_msg->staked_cnt );
     648           0 : }
     649             : 
     650             : static void
     651             : publish_epoch_info( fd_replay_tile_t *   ctx,
     652             :                     fd_stem_context_t *  stem,
     653             :                     fd_bank_t *          bank,
     654           0 :                     int                  current_epoch ) {
     655           0 :   fd_epoch_schedule_t const * schedule = fd_bank_epoch_schedule_query( bank );
     656           0 :   ulong epoch = fd_slot_to_epoch( schedule, fd_bank_slot_get( bank ), NULL );
     657             : 
     658           0 :   fd_features_t const * features = fd_bank_features_query( bank );
     659             : 
     660           0 :   fd_epoch_info_msg_t * epoch_info_msg = fd_chunk_to_laddr( ctx->epoch_out->mem, ctx->epoch_out->chunk );
     661             : 
     662           0 :   fd_vote_stakes_t * vote_stakes = fd_bank_vote_stakes_locking_modify( bank );
     663           0 :   fd_top_votes_t const * top_votes = fd_bank_top_votes_query( bank );
     664           0 :   ulong epoch_info_sz = generate_epoch_info_msg( fd_bank_slot_get( bank ), epoch+fd_ulong_if( current_epoch, 1UL, 0UL), schedule, top_votes, vote_stakes, bank->data->vote_stakes_fork_id, features, epoch_info_msg, current_epoch );
     665           0 :   fd_bank_vote_stakes_end_locking_modify( bank );
     666             : 
     667           0 :   ulong epoch_info_sig = 4UL;
     668           0 :   fd_stem_publish( stem, ctx->epoch_out->idx, epoch_info_sig, ctx->epoch_out->chunk, epoch_info_sz, 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
     669           0 :   ctx->epoch_out->chunk = fd_dcache_compact_next( ctx->epoch_out->chunk, epoch_info_sz, ctx->epoch_out->chunk0, ctx->epoch_out->wmark );
     670             : 
     671           0 :   fd_multi_epoch_leaders_epoch_msg_init( ctx->mleaders, epoch_info_msg );
     672           0 :   fd_multi_epoch_leaders_epoch_msg_fini( ctx->mleaders );
     673             : 
     674           0 : }
     675             : 
     676             : /**********************************************************************/
     677             : /* Transaction execution state machine helpers                        */
     678             : /**********************************************************************/
     679             : 
     680             : static void
     681             : replay_block_start( fd_replay_tile_t *  ctx,
     682             :                     fd_stem_context_t * stem,
     683             :                     ulong               bank_idx,
     684             :                     ulong               parent_bank_idx,
     685           0 :                     ulong               slot ) {
     686           0 :   long before = fd_log_wallclock();
     687             : 
     688           0 :   fd_bank_t bank[1];
     689           0 :   if( FD_UNLIKELY( !fd_banks_bank_query( bank, ctx->banks, bank_idx ) ) ) {
     690           0 :     FD_LOG_CRIT(( "invariant violation: bank is NULL for bank index %lu", bank_idx ));
     691           0 :   }
     692           0 :   if( FD_UNLIKELY( bank->data->flags!=FD_BANK_FLAGS_INIT ) ) {
     693           0 :     FD_LOG_CRIT(( "invariant violation: bank is not in correct state for bank index %lu", bank_idx ));
     694           0 :   }
     695             : 
     696           0 :   bank->data->preparation_begin_nanos = before;
     697             : 
     698           0 :   fd_bank_t parent_bank[1];
     699           0 :   if( FD_UNLIKELY( !fd_banks_bank_query( parent_bank, ctx->banks, parent_bank_idx ) ) ) {
     700           0 :     FD_LOG_CRIT(( "invariant violation: parent bank is NULL for bank index %lu", parent_bank_idx ));
     701           0 :   }
     702           0 :   ulong parent_slot = fd_bank_slot_get( parent_bank );
     703             : 
     704             :   /* Clone the bank from the parent.  We must special case the first
     705             :      slot that is executed as the snapshot does not provide a parent
     706             :      block id. */
     707             : 
     708           0 :   if( FD_UNLIKELY( !fd_banks_clone_from_parent( bank, ctx->banks, bank_idx ) ) ) {
     709           0 :     FD_LOG_CRIT(( "invariant violation: bank is NULL for bank index %lu", bank_idx ));
     710           0 :   }
     711           0 :   fd_bank_slot_set( bank, slot );
     712           0 :   fd_bank_parent_slot_set( bank, parent_slot );
     713           0 :   bank->data->txncache_fork_id = fd_txncache_attach_child( ctx->txncache, parent_bank->data->txncache_fork_id );
     714             : 
     715             :   /* Create a new funk txn for the block. */
     716             : 
     717           0 :   fd_funk_txn_xid_t xid        = { .ul = { slot, bank_idx } };
     718           0 :   fd_funk_txn_xid_t parent_xid = { .ul = { parent_slot, parent_bank_idx } };
     719           0 :   fd_accdb_attach_child( ctx->accdb_admin, &parent_xid, &xid );
     720           0 :   fd_progcache_txn_attach_child( ctx->progcache_admin, &parent_xid, &xid );
     721             : 
     722             :   /* Update required runtime state and handle potential boundary. */
     723             : 
     724           0 :   fd_bank_shred_cnt_set( bank, 0UL );
     725           0 :   fd_bank_execution_fees_set( bank, 0UL );
     726           0 :   fd_bank_priority_fees_set( bank, 0UL );
     727           0 :   fd_bank_tips_set( bank, 0UL );
     728           0 :   fd_bank_identity_vote_idx_set( bank, ULONG_MAX );
     729             : 
     730           0 :   fd_bank_block_height_set( bank, fd_bank_block_height_get( bank ) + 1UL );
     731             : 
     732           0 :   int is_epoch_boundary = 0;
     733           0 :   fd_runtime_block_execute_prepare( ctx->banks, bank, ctx->accdb, &ctx->runtime_stack, ctx->capture_ctx, &is_epoch_boundary );
     734           0 :   if( FD_UNLIKELY( is_epoch_boundary ) ) publish_epoch_info( ctx, stem, bank, 1 );
     735             : 
     736           0 :   ulong max_tick_height;
     737           0 :   if( FD_UNLIKELY( FD_RUNTIME_EXECUTE_SUCCESS!=fd_runtime_compute_max_tick_height( fd_bank_ticks_per_slot_get( parent_bank ), slot, &max_tick_height ) ) ) {
     738           0 :     FD_LOG_CRIT(( "couldn't compute tick height/max tick height slot %lu ticks_per_slot %lu", slot, fd_bank_ticks_per_slot_get( parent_bank ) ));
     739           0 :   }
     740           0 :   fd_bank_max_tick_height_set( bank, max_tick_height );
     741           0 :   fd_bank_tick_height_set( bank, fd_bank_max_tick_height_get( parent_bank ) ); /* The parent's max tick height is our starting tick height. */
     742           0 :   fd_sched_set_poh_params( ctx->sched, bank->data->idx, fd_bank_tick_height_get( bank ), fd_bank_max_tick_height_get( bank ), fd_bank_hashes_per_tick_get( bank ), fd_bank_poh_query( parent_bank ) );
     743             : 
     744           0 :   FD_LOG_DEBUG(( "replay_block_start: bank_idx=%lu slot=%lu parent_bank_idx=%lu", bank_idx, slot, parent_bank_idx ));
     745           0 : }
     746             : 
     747             : static void
     748           0 : cost_tracker_snap( fd_bank_t * bank, fd_replay_slot_completed_t * slot_info ) {
     749           0 :   if( bank->data->cost_tracker_pool_idx!=fd_bank_cost_tracker_pool_idx_null( fd_bank_get_cost_tracker_pool( bank->data ) ) ) {
     750           0 :     fd_cost_tracker_t const * cost_tracker = fd_bank_cost_tracker_locking_query( bank );
     751           0 :     if( FD_UNLIKELY( cost_tracker->block_cost_limit==0UL ) ) {
     752           0 :       memset( &slot_info->cost_tracker, -1 /* ULONG_MAX */, sizeof(slot_info->cost_tracker) );
     753           0 :     } else {
     754           0 :       slot_info->cost_tracker.block_cost                   = cost_tracker->block_cost;
     755           0 :       slot_info->cost_tracker.vote_cost                    = cost_tracker->vote_cost;
     756           0 :       slot_info->cost_tracker.allocated_accounts_data_size = cost_tracker->allocated_accounts_data_size;
     757           0 :       slot_info->cost_tracker.block_cost_limit             = cost_tracker->block_cost_limit;
     758           0 :       slot_info->cost_tracker.vote_cost_limit              = cost_tracker->vote_cost_limit;
     759           0 :       slot_info->cost_tracker.account_cost_limit           = cost_tracker->account_cost_limit;
     760           0 :     }
     761           0 :     fd_bank_cost_tracker_end_locking_query( bank );
     762           0 :   } else {
     763           0 :     memset( &slot_info->cost_tracker, -1 /* ULONG_MAX */, sizeof(slot_info->cost_tracker) );
     764           0 :   }
     765           0 : }
     766             : 
     767             : static ulong
     768           0 : get_identity_balance( fd_replay_tile_t * ctx, fd_funk_txn_xid_t xid ) {
     769           0 :   ulong identity_balance = ULONG_MAX;
     770           0 :   fd_accdb_ro_t identity_acc[1];
     771           0 :   if( FD_LIKELY( fd_accdb_open_ro( ctx->accdb, identity_acc, &xid, ctx->identity_pubkey ) ) ) {
     772           0 :     identity_balance = identity_acc->meta->lamports;
     773           0 :     fd_accdb_close_ro( ctx->accdb, identity_acc );
     774           0 :   }
     775           0 :   return identity_balance;
     776           0 : }
     777             : 
     778             : static void
     779             : publish_slot_completed( fd_replay_tile_t *  ctx,
     780             :                         fd_stem_context_t * stem,
     781             :                         fd_bank_t *         bank,
     782             :                         int                 is_initial,
     783           0 :                         int                 is_leader ) {
     784             : 
     785           0 :   ulong slot = fd_bank_slot_get( bank );
     786             : 
     787           0 :   fd_block_id_ele_t * block_id_ele = &ctx->block_id_arr[ bank->data->idx ];
     788             : 
     789             :   /* HACKY: hacky way of checking if we should send a null parent block
     790             :      id */
     791           0 :   fd_hash_t parent_block_id = {0};
     792           0 :   if( FD_UNLIKELY( !is_initial ) ) {
     793           0 :     parent_block_id = ctx->block_id_arr[ bank->data->parent_idx ].latest_mr;
     794           0 :   }
     795             : 
     796           0 :   fd_hash_t const * bank_hash  = fd_bank_bank_hash_query( bank );
     797           0 :   fd_hash_t const * block_hash = fd_blockhashes_peek_last_hash( fd_bank_block_hash_queue_query( bank ) );
     798           0 :   FD_TEST( bank_hash  );
     799           0 :   FD_TEST( block_hash );
     800             : 
     801           0 :   if( FD_LIKELY( !is_initial ) ) fd_txncache_finalize_fork( ctx->txncache, bank->data->txncache_fork_id, 0UL, block_hash->uc );
     802             : 
     803           0 :   fd_epoch_schedule_t const * epoch_schedule = fd_bank_epoch_schedule_query( bank );
     804           0 :   ulong slot_idx;
     805           0 :   ulong epoch = fd_slot_to_epoch( epoch_schedule, slot, &slot_idx );
     806             : 
     807           0 :   ctx->metrics.slots_total++;
     808           0 :   ctx->metrics.transactions_total = fd_bank_txn_count_get( bank );
     809             : 
     810           0 :   fd_replay_slot_completed_t * slot_info = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
     811           0 :   slot_info->slot                  = slot;
     812           0 :   slot_info->root_slot             = ctx->consensus_root_slot;
     813           0 :   slot_info->storage_slot          = ctx->published_root_slot;
     814           0 :   slot_info->epoch                 = epoch;
     815           0 :   slot_info->slot_in_epoch         = slot_idx;
     816           0 :   slot_info->slots_per_epoch       = fd_epoch_slot_cnt( epoch_schedule, epoch );
     817           0 :   slot_info->block_height          = fd_bank_block_height_get( bank );
     818           0 :   slot_info->parent_slot           = fd_bank_parent_slot_get( bank );
     819           0 :   slot_info->block_id              = block_id_ele->latest_mr;
     820           0 :   slot_info->parent_block_id       = parent_block_id;
     821           0 :   slot_info->bank_hash             = *bank_hash;
     822           0 :   slot_info->block_hash            = *block_hash;
     823           0 :   slot_info->transaction_count     = fd_bank_txn_count_get( bank );
     824             : 
     825           0 :   fd_inflation_t inflation = fd_bank_inflation_get( bank );
     826           0 :   slot_info->inflation.foundation      = inflation.foundation;
     827           0 :   slot_info->inflation.foundation_term = inflation.foundation_term;
     828           0 :   slot_info->inflation.terminal        = inflation.terminal;
     829           0 :   slot_info->inflation.initial         = inflation.initial;
     830           0 :   slot_info->inflation.taper           = inflation.taper;
     831             : 
     832           0 :   fd_rent_t rent = fd_bank_rent_get( bank );
     833           0 :   slot_info->rent.burn_percent            = rent.burn_percent;
     834           0 :   slot_info->rent.lamports_per_uint8_year = rent.lamports_per_uint8_year;
     835           0 :   slot_info->rent.exemption_threshold     = rent.exemption_threshold;
     836             : 
     837           0 :   slot_info->first_fec_set_received_nanos      = bank->data->first_fec_set_received_nanos;
     838           0 :   slot_info->preparation_begin_nanos           = bank->data->preparation_begin_nanos;
     839           0 :   slot_info->first_transaction_scheduled_nanos = bank->data->first_transaction_scheduled_nanos;
     840           0 :   slot_info->last_transaction_finished_nanos   = bank->data->last_transaction_finished_nanos;
     841           0 :   slot_info->completion_time_nanos             = fd_log_wallclock();
     842             : 
     843             :   /* refcnt should be incremented by 1 for each consumer that uses
     844             :      `bank_idx`.  Each consumer should decrement the bank's refcnt once
     845             :      they are done using the bank. */
     846           0 :   bank->data->refcnt++; /* tower_tile */
     847           0 :   if( FD_LIKELY( ctx->rpc_enabled ) ) bank->data->refcnt++; /* rpc tile */
     848           0 :   slot_info->bank_idx = bank->data->idx;
     849           0 :   FD_LOG_DEBUG(( "bank (idx=%lu, slot=%lu) refcnt incremented to %lu for tower, rpc", bank->data->idx, slot, bank->data->refcnt ));
     850             : 
     851           0 :   fd_bank_t parent_bank[1];
     852           0 :   if( FD_LIKELY( fd_banks_get_parent( parent_bank, ctx->banks, bank ) ) ) {
     853           0 :     ulong total_txn_cnt          = fd_bank_txn_count_get( bank )                 - fd_bank_txn_count_get( parent_bank );
     854           0 :     ulong nonvote_txn_cnt        = fd_bank_nonvote_txn_count_get( bank ) - fd_bank_nonvote_txn_count_get( parent_bank );
     855           0 :     ulong failed_txn_cnt         = fd_bank_failed_txn_count_get( bank )          - fd_bank_failed_txn_count_get( parent_bank );
     856           0 :     ulong nonvote_failed_txn_cnt = fd_bank_nonvote_failed_txn_count_get( bank )  - fd_bank_nonvote_failed_txn_count_get( parent_bank );
     857             : 
     858           0 :     slot_info->nonvote_success = nonvote_txn_cnt - nonvote_failed_txn_cnt;
     859           0 :     slot_info->nonvote_failed  = nonvote_failed_txn_cnt;
     860           0 :     slot_info->vote_failed     = failed_txn_cnt - nonvote_failed_txn_cnt;
     861           0 :     slot_info->vote_success    = total_txn_cnt - nonvote_txn_cnt - slot_info->vote_failed;
     862           0 :   } else {
     863           0 :     slot_info->vote_failed     = ULONG_MAX;
     864           0 :     slot_info->vote_success    = ULONG_MAX;
     865           0 :     slot_info->nonvote_success = ULONG_MAX;
     866           0 :     slot_info->nonvote_failed  = ULONG_MAX;
     867           0 :   }
     868             : 
     869           0 :   slot_info->is_leader = is_leader;
     870           0 :   slot_info->transaction_fee = fd_bank_execution_fees_get( bank );
     871           0 :   slot_info->transaction_fee -= (slot_info->transaction_fee>>1); /* burn */
     872           0 :   slot_info->priority_fee = fd_bank_priority_fees_get( bank );
     873           0 :   slot_info->tips = fd_bank_tips_get( bank );
     874           0 :   slot_info->shred_cnt = fd_bank_shred_cnt_get( bank );
     875             : 
     876           0 :   FD_BASE58_ENCODE_32_BYTES( ctx->block_id_arr[ bank->data->idx ].latest_mr.uc, block_id_cstr );
     877           0 :   FD_BASE58_ENCODE_32_BYTES( fd_bank_bank_hash_query( bank )->uc, bank_hash_cstr );
     878           0 :   FD_LOG_DEBUG(( "publish_slot_completed: bank_idx=%lu slot=%lu bank_hash=%s block_id=%s", bank->data->idx, slot, bank_hash_cstr, block_id_cstr ));
     879             : 
     880           0 :   fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_SLOT_COMPLETED, ctx->replay_out->chunk, sizeof(fd_replay_slot_completed_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
     881           0 :   ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_replay_slot_completed_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
     882           0 : }
     883             : 
     884             : static void
     885             : publish_slot_dead( fd_replay_tile_t *  ctx,
     886             :                    fd_stem_context_t * stem,
     887             :                    ulong               slot,
     888           0 :                    fd_hash_t const *   block_id ) {
     889           0 :   fd_replay_slot_dead_t * slot_dead = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
     890           0 :   slot_dead->slot                   = slot;
     891           0 :   slot_dead->block_id               = *block_id;
     892           0 :   fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_SLOT_DEAD, ctx->replay_out->chunk, sizeof(fd_replay_slot_dead_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
     893           0 :   ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_replay_slot_dead_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
     894           0 : }
     895             : 
     896             : static void
     897             : publish_txn_executed( fd_replay_tile_t *  ctx,
     898             :                       fd_stem_context_t * stem,
     899           0 :                       ulong               txn_idx ) {
     900           0 :   fd_sched_txn_info_t * txn_info = fd_sched_get_txn_info( ctx->sched, txn_idx );
     901           0 :   fd_replay_txn_executed_t * txn_executed = fd_type_pun( fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk ) );
     902           0 :   *txn_executed->txn = *fd_sched_get_txn( ctx->sched, txn_idx );
     903           0 :   txn_executed->txn_err = txn_info->txn_err;
     904           0 :   txn_executed->is_committable = !!(txn_info->flags&FD_SCHED_TXN_IS_COMMITTABLE);
     905           0 :   txn_executed->is_fees_only = !!(txn_info->flags&FD_SCHED_TXN_IS_FEES_ONLY);
     906           0 :   txn_executed->tick_parsed = txn_info->tick_parsed;
     907           0 :   txn_executed->tick_sigverify_disp = txn_info->tick_sigverify_disp;
     908           0 :   txn_executed->tick_sigverify_done = txn_info->tick_sigverify_done;
     909           0 :   txn_executed->tick_exec_disp = txn_info->tick_exec_disp;
     910           0 :   txn_executed->tick_exec_done = txn_info->tick_exec_done;
     911           0 :   fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_TXN_EXECUTED, ctx->replay_out->chunk, sizeof(*txn_executed), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
     912           0 :   ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(*txn_executed), ctx->replay_out->chunk0, ctx->replay_out->wmark );
     913           0 : }
     914             : 
     915             : static void
     916             : replay_block_finalize( fd_replay_tile_t *  ctx,
     917             :                        fd_stem_context_t * stem,
     918           0 :                        fd_bank_t *         bank ) {
     919           0 :   bank->data->last_transaction_finished_nanos = fd_log_wallclock();
     920             : 
     921           0 :   FD_TEST( !(bank->data->flags&FD_BANK_FLAGS_FROZEN) );
     922             : 
     923             :   /* Set poh hash in bank. */
     924           0 :   fd_hash_t * poh = fd_sched_get_poh( ctx->sched, bank->data->idx );
     925           0 :   fd_bank_poh_set( bank, *poh );
     926             : 
     927             :   /* Set shred count in bank. */
     928           0 :   fd_bank_shred_cnt_set( bank, fd_sched_get_shred_cnt( ctx->sched, bank->data->idx ) );
     929             : 
     930             :   /* Do hashing and other end-of-block processing. */
     931           0 :   fd_runtime_block_execute_finalize( bank, ctx->accdb, ctx->capture_ctx );
     932             : 
     933             :   /* Copy out cost tracker fields before freezing */
     934           0 :   fd_replay_slot_completed_t * slot_info = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
     935           0 :   cost_tracker_snap( bank, slot_info );
     936             : 
     937             :   /* fetch identity / vote balance updates infrequently */
     938           0 :   ulong slot = fd_bank_slot_get( bank );
     939           0 :   fd_funk_txn_xid_t xid = { .ul = { slot, bank->data->idx } };
     940           0 :   slot_info->identity_balance = FD_UNLIKELY( slot%4096==0UL ) ? get_identity_balance( ctx, xid ) : ULONG_MAX;
     941             : 
     942             :   /* Mark the bank as frozen. */
     943           0 :   fd_banks_mark_bank_frozen( ctx->banks, bank );
     944             : 
     945             :   /**********************************************************************/
     946             :   /* Bank hash comparison, and halt if there's a mismatch after replay  */
     947             :   /**********************************************************************/
     948             : 
     949           0 :   fd_hash_t const * bank_hash = fd_bank_bank_hash_query( bank );
     950           0 :   FD_TEST( bank_hash );
     951             : 
     952             :   /* Must be last so we can measure completion time correctly, even
     953             :      though we could technically do this before the hash cmp and vote
     954             :      tower stuff. */
     955           0 :   publish_slot_completed( ctx, stem, bank, 0, 0 /* is_leader */ );
     956             : 
     957           0 : # if FD_HAS_FLATCC
     958             :   /* If enabled, dump the block to a file and reset the dumping
     959             :      context state */
     960           0 :   if( FD_UNLIKELY( ctx->dump_proto_ctx && ctx->dump_proto_ctx->dump_block_to_pb ) ) {
     961           0 :     fd_dump_block_to_protobuf( ctx->block_dump_ctx, ctx->banks, bank, ctx->accdb, ctx->dump_proto_ctx );
     962           0 :     fd_block_dump_context_reset( ctx->block_dump_ctx );
     963           0 :   }
     964           0 : # endif
     965           0 : }
     966             : 
     967             : /**********************************************************************/
     968             : /* Leader bank management                                             */
     969             : /**********************************************************************/
     970             : 
     971             : static fd_bank_t *
     972             : prepare_leader_bank( fd_replay_tile_t *  ctx,
     973             :                      ulong               slot,
     974             :                      long                now,
     975             :                      fd_hash_t const *   parent_block_id,
     976           0 :                      fd_stem_context_t * stem ) {
     977           0 :   long before = fd_log_wallclock();
     978             : 
     979             :   /* Make sure that we are not already leader. */
     980           0 :   FD_TEST( ctx->leader_bank->data==NULL );
     981             : 
     982           0 :   fd_block_id_ele_t * parent_ele = fd_block_id_map_ele_query( ctx->block_id_map, parent_block_id, NULL, ctx->block_id_arr );
     983           0 :   if( FD_UNLIKELY( !parent_ele ) ) {
     984           0 :     FD_BASE58_ENCODE_32_BYTES( parent_block_id->key, parent_block_id_b58 );
     985           0 :     FD_LOG_CRIT(( "invariant violation: parent bank index not found for merkle root %s", parent_block_id_b58 ));
     986           0 :   }
     987           0 :   ulong parent_bank_idx = fd_block_id_ele_get_idx( ctx->block_id_arr, parent_ele );
     988             : 
     989           0 :   fd_bank_t parent_bank[1];
     990           0 :   if( FD_UNLIKELY( !fd_banks_bank_query( parent_bank, ctx->banks, parent_bank_idx ) ) ) {
     991           0 :     FD_LOG_CRIT(( "invariant violation: parent bank not found for bank index %lu", parent_bank_idx ));
     992           0 :   }
     993           0 :   ulong parent_slot = fd_bank_slot_get( parent_bank );
     994             : 
     995           0 :   if( FD_UNLIKELY( !fd_banks_new_bank( ctx->leader_bank, ctx->banks, parent_bank_idx, now ) ) ) {
     996           0 :     FD_LOG_CRIT(( "invariant violation: leader bank is NULL for slot %lu", slot ));
     997           0 :   }
     998             : 
     999           0 :   if( FD_UNLIKELY( !fd_banks_clone_from_parent( ctx->leader_bank, ctx->banks, ctx->leader_bank->data->idx ) ) ) {
    1000           0 :     FD_LOG_CRIT(( "invariant violation: bank is NULL for slot %lu", slot ));
    1001           0 :   }
    1002             : 
    1003           0 :   ctx->leader_bank->data->preparation_begin_nanos = before;
    1004             : 
    1005           0 :   fd_bank_slot_set( ctx->leader_bank, slot );
    1006           0 :   fd_bank_parent_slot_set( ctx->leader_bank, parent_slot );
    1007           0 :   ctx->leader_bank->data->txncache_fork_id = fd_txncache_attach_child( ctx->txncache, parent_bank->data->txncache_fork_id );
    1008             :   /* prepare the funk transaction for the leader bank */
    1009           0 :   fd_funk_txn_xid_t xid        = { .ul = { slot, ctx->leader_bank->data->idx } };
    1010           0 :   fd_funk_txn_xid_t parent_xid = { .ul = { parent_slot, parent_bank_idx } };
    1011           0 :   fd_accdb_attach_child( ctx->accdb_admin, &parent_xid, &xid );
    1012           0 :   fd_progcache_txn_attach_child( ctx->progcache_admin, &parent_xid, &xid );
    1013             : 
    1014           0 :   fd_bank_execution_fees_set( ctx->leader_bank, 0UL );
    1015           0 :   fd_bank_priority_fees_set( ctx->leader_bank, 0UL );
    1016           0 :   fd_bank_shred_cnt_set( ctx->leader_bank, 0UL );
    1017           0 :   fd_bank_tips_set( ctx->leader_bank, 0UL );
    1018           0 :   fd_bank_identity_vote_idx_set( ctx->leader_bank, ULONG_MAX );
    1019             : 
    1020             :   /* Update block height. */
    1021           0 :   fd_bank_block_height_set( ctx->leader_bank, fd_bank_block_height_get( ctx->leader_bank ) + 1UL );
    1022             : 
    1023           0 :   int is_epoch_boundary = 0;
    1024           0 :   fd_runtime_block_execute_prepare( ctx->banks, ctx->leader_bank, ctx->accdb, &ctx->runtime_stack, ctx->capture_ctx, &is_epoch_boundary );
    1025           0 :   if( FD_UNLIKELY( is_epoch_boundary ) ) publish_epoch_info( ctx, stem, ctx->leader_bank, 1 );
    1026             : 
    1027           0 :   ulong max_tick_height;
    1028           0 :   if( FD_UNLIKELY( FD_RUNTIME_EXECUTE_SUCCESS!=fd_runtime_compute_max_tick_height( fd_bank_ticks_per_slot_get( parent_bank ), slot, &max_tick_height ) ) ) {
    1029           0 :     FD_LOG_CRIT(( "couldn't compute tick height/max tick height slot %lu ticks_per_slot %lu", slot, fd_bank_ticks_per_slot_get( parent_bank ) ));
    1030           0 :   }
    1031           0 :   fd_bank_max_tick_height_set( ctx->leader_bank, max_tick_height );
    1032           0 :   fd_bank_tick_height_set( ctx->leader_bank, fd_bank_max_tick_height_get( parent_bank ) ); /* The parent's max tick height is our starting tick height. */
    1033             : 
    1034             :   /* Now that a bank has been created for the leader slot, increment the
    1035             :      reference count until we are done with the leader slot. */
    1036           0 :   ctx->leader_bank->data->refcnt++;
    1037             : 
    1038           0 :   return ctx->leader_bank;
    1039           0 : }
    1040             : 
    1041             : static inline void
    1042           0 : maybe_switch_identity( fd_replay_tile_t * ctx ) {
    1043             : 
    1044           0 :   if( FD_LIKELY( fd_keyswitch_state_query( ctx->keyswitch )!=FD_KEYSWITCH_STATE_SWITCH_PENDING ) ) return;
    1045             : 
    1046             :   /* Switch identity */
    1047             : 
    1048           0 :   FD_LOG_DEBUG(( "keyswitch: switching identity" ));
    1049             : 
    1050           0 :   memcpy( ctx->identity_pubkey, ctx->keyswitch->bytes, 32UL );
    1051           0 :   fd_keyswitch_state( ctx->keyswitch, FD_KEYSWITCH_STATE_COMPLETED );
    1052             : 
    1053             :   /* The next leader slot will be incorrect now that the identity has
    1054             :      switched.  The next leader slot normally gets updated based on the
    1055             :      reset slot returned by tower. */
    1056           0 :   ulong min_leader_slot = fd_ulong_max( ctx->reset_slot+1UL, fd_ulong_if( ctx->highwater_leader_slot==ULONG_MAX, 0UL, ctx->highwater_leader_slot+1UL ) );
    1057           0 :   ctx->next_leader_slot = fd_multi_epoch_leaders_get_next_slot( ctx->mleaders, min_leader_slot, ctx->identity_pubkey );
    1058           0 :   if( FD_LIKELY( ctx->next_leader_slot != ULONG_MAX ) ) {
    1059           0 :     ctx->next_leader_tickcount = (long)((double)(ctx->next_leader_slot-ctx->reset_slot-1UL)*ctx->slot_duration_ticks) + fd_tickcount();
    1060           0 :   } else {
    1061           0 :     ctx->next_leader_tickcount = LONG_MAX;
    1062           0 :   }
    1063             : 
    1064           0 :   ctx->identity_vote_rooted = 0;
    1065           0 :   ctx->identity_idx++;
    1066           0 :   fd_vote_tracker_reset( ctx->vote_tracker );
    1067           0 : }
    1068             : 
    1069             : static void
    1070             : fini_leader_bank( fd_replay_tile_t *  ctx,
    1071           0 :                   fd_stem_context_t * stem ) {
    1072             : 
    1073           0 :   FD_TEST( ctx->leader_bank->data!=NULL );
    1074           0 :   FD_TEST( ctx->is_leader );
    1075           0 :   FD_TEST( ctx->block_id_arr[ ctx->leader_bank->data->idx ].block_id_seen );
    1076           0 :   FD_TEST( ctx->recv_poh );
    1077             : 
    1078           0 :   ctx->leader_bank->data->last_transaction_finished_nanos = fd_log_wallclock();
    1079             : 
    1080           0 :   ulong curr_slot = fd_bank_slot_get( ctx->leader_bank );
    1081             : 
    1082           0 :   fd_sched_block_add_done( ctx->sched, ctx->leader_bank->data->idx, ctx->leader_bank->data->parent_idx, curr_slot );
    1083             : 
    1084           0 :   fd_runtime_block_execute_finalize( ctx->leader_bank, ctx->accdb, ctx->capture_ctx );
    1085             : 
    1086           0 :   fd_replay_slot_completed_t * slot_info = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
    1087           0 :   cost_tracker_snap( ctx->leader_bank, slot_info );
    1088           0 :   fd_funk_txn_xid_t xid = { .ul = { curr_slot, ctx->leader_bank->data->idx } };
    1089           0 :   slot_info->identity_balance = FD_UNLIKELY( curr_slot%4096==0UL ) ? get_identity_balance( ctx, xid ) : ULONG_MAX;
    1090             : 
    1091           0 :   fd_banks_mark_bank_frozen( ctx->banks, ctx->leader_bank );
    1092             : 
    1093           0 :   fd_hash_t const * bank_hash  = fd_bank_bank_hash_query( ctx->leader_bank );
    1094           0 :   FD_TEST( bank_hash );
    1095             : 
    1096           0 :   publish_slot_completed( ctx, stem, ctx->leader_bank, 0, 1 /* is_leader */ );
    1097             : 
    1098             :   /* The reference on the bank is finally no longer needed. */
    1099           0 :   ctx->leader_bank->data->refcnt--;
    1100             : 
    1101             :   /* We are no longer leader so we can clear the bank index we use for
    1102             :      being the leader. */
    1103           0 :   ctx->leader_bank->data = NULL;
    1104           0 :   ctx->recv_poh          = 0;
    1105           0 :   ctx->is_leader         = 0;
    1106             : 
    1107           0 :   maybe_switch_identity( ctx );
    1108           0 : }
    1109             : 
    1110             : static void
    1111             : publish_root_advanced( fd_replay_tile_t *  ctx,
    1112           0 :                        fd_stem_context_t * stem ) {
    1113             : 
    1114           0 :   fd_bank_t bank[1];
    1115           0 :   if( FD_UNLIKELY( !fd_banks_bank_query( bank, ctx->banks, ctx->consensus_root_bank_idx ) ) ) {
    1116           0 :     FD_LOG_CRIT(( "invariant violation: consensus root bank is NULL at bank index %lu", ctx->consensus_root_bank_idx ));
    1117           0 :   }
    1118             : 
    1119           0 :   if( ctx->rpc_enabled ) {
    1120           0 :     bank->data->refcnt++;
    1121           0 :     FD_LOG_DEBUG(( "bank (idx=%lu, slot=%lu) refcnt incremented to %lu for rpc", bank->data->idx, fd_bank_slot_get( bank ), bank->data->refcnt ));
    1122           0 :   }
    1123             : 
    1124             :   /* Increment the reference count on the consensus root bank to account
    1125             :      for the number of resolv tiles that are waiting on it. */
    1126           0 :   bank->data->refcnt += ctx->resolv_tile_cnt;
    1127           0 :   FD_LOG_DEBUG(( "bank (idx=%lu, slot=%lu) refcnt incremented to %lu for resolv", bank->data->idx, fd_bank_slot_get( bank ), bank->data->refcnt ));
    1128             : 
    1129           0 :   fd_replay_root_advanced_t * msg = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
    1130           0 :   msg->bank_idx = bank->data->idx;
    1131             : 
    1132           0 :   fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_ROOT_ADVANCED, ctx->replay_out->chunk, sizeof(fd_replay_root_advanced_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
    1133           0 :   ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_replay_root_advanced_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
    1134           0 : }
    1135             : 
    1136             : /* init_funk performs pre-flight checks for the account database and
    1137             :    program cache.  Ensures that the account database was set up
    1138             :    correctly by bootstrap components (e.g. genesis or snapshot loader).
    1139             :    Mirrors the account database's fork tree down to the program cache. */
    1140             : 
    1141             : static void
    1142             : init_funk( fd_replay_tile_t * ctx,
    1143           0 :            ulong              bank_slot ) {
    1144             :   /* Ensure that the loaded bank root corresponds to the account
    1145             :      database's root. */
    1146           0 :   fd_funk_t * funk = fd_accdb_user_v1_funk( ctx->accdb );
    1147           0 :   if( FD_UNLIKELY( !funk->shmem ) ) {
    1148           0 :     FD_LOG_CRIT(( "failed to initialize account database: replay tile is not joined to database shared memory objects" ));
    1149           0 :   }
    1150           0 :   fd_funk_txn_xid_t const * accdb_pub = fd_funk_last_publish( funk );
    1151           0 :   if( FD_UNLIKELY( accdb_pub->ul[0]!=bank_slot ) ) {
    1152           0 :     FD_LOG_CRIT(( "failed to initialize account database: accdb is at slot %lu, but chain state is at slot %lu\n"
    1153           0 :                   "This is a bug in startup components.",
    1154           0 :                   accdb_pub->ul[0], bank_slot ));
    1155           0 :   }
    1156           0 :   if( FD_UNLIKELY( fd_funk_last_publish_is_frozen( funk ) ) ) {
    1157           0 :     FD_LOG_CRIT(( "failed to initialize account database: accdb fork graph is not clean.\n"
    1158           0 :                   "The account database should only contain state for the root slot at this point,\n"
    1159           0 :                   "but there are incomplete database transactions leftover.\n"
    1160           0 :                   "This is a bug in startup components."  ));
    1161           0 :   }
    1162             : 
    1163             :   /* The program cache tracks the account database's fork graph at all
    1164             :      times.  Perform initial synchronization: pivot from funk 'root' (a
    1165             :      sentinel XID) to 'last publish' (the bootstrap root slot). */
    1166           0 :   if( FD_UNLIKELY( !ctx->progcache_admin->funk->shmem ) ) {
    1167           0 :     FD_LOG_CRIT(( "failed to initialize account database: replay tile is not joined to program cache" ));
    1168           0 :   }
    1169           0 :   fd_progcache_clear( ctx->progcache_admin );
    1170             : 
    1171           0 :   fd_funk_txn_xid_t last_publish = fd_accdb_root_get( ctx->accdb_admin );
    1172           0 :   fd_progcache_txn_attach_child( ctx->progcache_admin, fd_funk_root( ctx->progcache_admin->funk ), &last_publish );
    1173           0 :   fd_progcache_txn_advance_root( ctx->progcache_admin,                                             &last_publish );
    1174           0 : }
    1175             : 
    1176             : static void
    1177           0 : init_after_snapshot( fd_replay_tile_t * ctx ) {
    1178             :   /* Now that the snapshot has been loaded in, we have to refresh the
    1179             :      stake delegations since the manifest does not contain the full set
    1180             :      of data required for the stake delegations. See
    1181             :      fd_stake_delegations.h for why this is required. */
    1182             : 
    1183           0 :   fd_bank_t bank[1];
    1184           0 :   if( FD_UNLIKELY( !fd_banks_bank_query( bank, ctx->banks, FD_REPLAY_BOOT_BANK_IDX ) ) ) {
    1185           0 :     FD_LOG_CRIT(( "invariant violation: replay bank is NULL at bank index %lu", FD_REPLAY_BOOT_BANK_IDX ));
    1186           0 :   }
    1187             : 
    1188           0 :   fd_funk_txn_xid_t xid = { .ul = { fd_bank_slot_get( bank ), bank->data->idx } };
    1189           0 :   init_funk( ctx, fd_bank_slot_get( bank ) );
    1190             : 
    1191           0 :   fd_stake_delegations_t * root_delegations = fd_banks_stake_delegations_root_query( ctx->banks );
    1192             : 
    1193           0 :   fd_stake_delegations_refresh( root_delegations, ctx->accdb, &xid );
    1194             : 
    1195           0 :   fd_top_votes_t * top_votes = fd_bank_top_votes_modify( bank );
    1196             : 
    1197           0 :   fd_vote_stakes_t * vote_stakes = fd_bank_vote_stakes_locking_modify( bank );
    1198           0 :   ushort fork_idx = bank->data->vote_stakes_fork_id;
    1199             : 
    1200           0 :   uchar __attribute__((aligned(FD_VOTE_STAKES_ITER_ALIGN))) iter_mem[ FD_VOTE_STAKES_ITER_FOOTPRINT ];
    1201           0 :   for( fd_vote_stakes_iter_t * iter = fd_vote_stakes_fork_iter_init( vote_stakes, fork_idx, iter_mem );
    1202           0 :        !fd_vote_stakes_fork_iter_done( vote_stakes, fork_idx, iter );
    1203           0 :        fd_vote_stakes_fork_iter_next( vote_stakes, fork_idx, iter ) ) {
    1204           0 :     fd_pubkey_t pubkey;
    1205           0 :     fd_pubkey_t node_account_t_2;
    1206           0 :     ulong       stake_t_2;
    1207           0 :     fd_vote_stakes_fork_iter_ele( vote_stakes, fork_idx, iter, &pubkey, NULL, &stake_t_2, NULL, &node_account_t_2 );
    1208             : 
    1209           0 :     int is_valid = 1;
    1210           0 :     fd_accdb_ro_t acc[1];
    1211           0 :     if( FD_UNLIKELY( !fd_accdb_open_ro( ctx->accdb, acc, &xid, &pubkey ) ) ) {
    1212           0 :       is_valid = 0;
    1213           0 :     } else if( FD_UNLIKELY( !fd_vsv_is_correct_size_and_initialized( acc->meta ) ) ) {
    1214           0 :       fd_accdb_close_ro( ctx->accdb, acc );
    1215           0 :       is_valid = 0;
    1216           0 :     }
    1217             : 
    1218           0 :     if( FD_LIKELY( is_valid ) ) {
    1219           0 :       fd_vote_block_timestamp_t last_vote = fd_vsv_get_vote_block_timestamp( fd_account_data( acc->meta ), acc->meta->dlen );
    1220           0 :       fd_top_votes_insert( top_votes, &pubkey, &node_account_t_2, stake_t_2, last_vote.slot, last_vote.timestamp );
    1221           0 :       fd_accdb_close_ro( ctx->accdb, acc );
    1222           0 :     } else {
    1223           0 :       fd_top_votes_insert( top_votes, &pubkey, &node_account_t_2, stake_t_2, 0UL, 0L );
    1224           0 :       fd_top_votes_invalidate( top_votes, &pubkey );
    1225           0 :     }
    1226           0 :   }
    1227             : 
    1228           0 :   fd_bank_vote_stakes_end_locking_modify( bank );
    1229             : 
    1230             :   /* After both snapshots have been loaded in, we can determine if we should
    1231             :      start distributing rewards. */
    1232             : 
    1233           0 :   fd_rewards_recalculate_partitioned_rewards( ctx->banks, bank, ctx->accdb, &xid, &ctx->runtime_stack, ctx->capture_ctx );
    1234             : 
    1235           0 :   ulong snapshot_slot = fd_bank_slot_get( bank );
    1236           0 :   if( FD_UNLIKELY( !snapshot_slot ) ) {
    1237             :     /* Genesis-specific setup. */
    1238             :     /* FIXME: This branch does not set up a new block exec ctx
    1239             :        properly. Needs to do whatever prepare_new_block_execution
    1240             :        does, but just hacking that in breaks stuff. */
    1241           0 :     fd_runtime_update_leaders( bank, &ctx->runtime_stack );
    1242             : 
    1243           0 :     ulong hashcnt_per_slot = fd_bank_hashes_per_tick_get( bank ) * fd_bank_ticks_per_slot_get( bank );
    1244           0 :     fd_hash_t * poh = fd_bank_poh_modify( bank );
    1245           0 :     while( hashcnt_per_slot-- ) {
    1246           0 :       fd_sha256_hash( poh->hash, 32UL, poh->hash );
    1247           0 :     }
    1248             : 
    1249           0 :     int is_epoch_boundary = 0;
    1250           0 :     fd_runtime_block_execute_prepare( ctx->banks, bank, ctx->accdb, &ctx->runtime_stack, ctx->capture_ctx, &is_epoch_boundary );
    1251           0 :     FD_TEST( !is_epoch_boundary );
    1252           0 :     fd_runtime_block_execute_finalize( bank, ctx->accdb, ctx->capture_ctx );
    1253             : 
    1254           0 :     snapshot_slot = 0UL;
    1255           0 :   }
    1256           0 : }
    1257             : 
    1258             : static inline int
    1259             : maybe_become_leader( fd_replay_tile_t *  ctx,
    1260           0 :                      fd_stem_context_t * stem ) {
    1261           0 :   FD_TEST( ctx->is_booted );
    1262           0 :   if( FD_LIKELY( ctx->next_leader_slot==ULONG_MAX || ctx->is_leader || (!ctx->identity_vote_rooted && ctx->wait_for_vote_to_start_leader) || ctx->replay_out->idx==ULONG_MAX || !ctx->wfs_complete ) ) return 0;
    1263           0 :   if( FD_UNLIKELY( ctx->halt_leader ) ) return 0;
    1264           0 :   if( !ctx->supports_leader ) return 0;
    1265             : 
    1266           0 :   FD_TEST( ctx->next_leader_slot>ctx->reset_slot );
    1267           0 :   long now = fd_tickcount();
    1268           0 :   if( FD_LIKELY( now<ctx->next_leader_tickcount ) ) return 0;
    1269             : 
    1270             :   /* If a prior leader is still in the process of publishing their slot,
    1271             :      delay ours to let them finish ... unless they are so delayed that
    1272             :      we risk getting skipped by the leader following us.  1.2 seconds
    1273             :      is a reasonable default here, although any value between 0 and 1.6
    1274             :      seconds could be considered reasonable.  This is arbitrary and
    1275             :      chosen due to intuition. */
    1276           0 :   if( FD_UNLIKELY( now<ctx->next_leader_tickcount+(long)(3.0*ctx->slot_duration_ticks) ) ) {
    1277           0 :     FD_TEST( ctx->reset_bank->data );
    1278             : 
    1279             :     /* TODO: Make the max_active_descendant calculation more efficient
    1280             :        by caching it in the bank structure and updating it as banks are
    1281             :        created and completed. */
    1282           0 :     ulong max_active_descendant = 0UL;
    1283           0 :     ulong child_idx = ctx->reset_bank->data->child_idx;
    1284           0 :     while( child_idx!=ULONG_MAX ) {
    1285           0 :       fd_bank_t child_bank[1];
    1286           0 :       fd_banks_bank_query( child_bank, ctx->banks, child_idx );
    1287           0 :       max_active_descendant = fd_ulong_max( max_active_descendant, fd_bank_slot_get( child_bank ) );
    1288           0 :       child_idx = child_bank->data->sibling_idx;
    1289           0 :     }
    1290             : 
    1291             :     /* If the max_active_descendant is >= next_leader_slot, we waited
    1292             :        too long and a leader after us started publishing to try and skip
    1293             :        us.  Just start our leader slot immediately, we might win ... */
    1294           0 :     if( FD_LIKELY( max_active_descendant>=ctx->reset_slot && max_active_descendant<ctx->next_leader_slot ) ) {
    1295             :       /* If one of the leaders between the reset slot and our leader
    1296             :          slot is in the process of publishing (they have a descendant
    1297             :          bank that is in progress of being replayed), then keep waiting.
    1298             :          We probably wouldn't get a leader slot out before they
    1299             :          finished.
    1300             : 
    1301             :          Unless... we are past the deadline to start our slot by more
    1302             :          than 1.2 seconds, in which case we should probably start it to
    1303             :          avoid getting skipped by the leader behind us. */
    1304           0 :       return 0;
    1305           0 :     }
    1306           0 :   }
    1307             : 
    1308           0 :   long now_nanos = fd_log_wallclock();
    1309             : 
    1310           0 :   ctx->is_leader = 1;
    1311           0 :   ctx->recv_poh  = 0;
    1312             : 
    1313           0 :   FD_TEST( ctx->highwater_leader_slot==ULONG_MAX || ctx->highwater_leader_slot<ctx->next_leader_slot );
    1314           0 :   ctx->highwater_leader_slot = ctx->next_leader_slot;
    1315             : 
    1316           0 :   FD_LOG_INFO(( "becoming leader for slot %lu, parent slot is %lu", ctx->next_leader_slot, ctx->reset_slot ));
    1317             : 
    1318             :   /* Acquires bank, sets up initial state, and refcnts it. */
    1319           0 :   fd_bank_t *       bank = prepare_leader_bank( ctx, ctx->next_leader_slot, now_nanos, &ctx->reset_block_id, stem );
    1320           0 :   fd_funk_txn_xid_t xid  = { .ul = { ctx->next_leader_slot, ctx->leader_bank->data->idx } };
    1321             : 
    1322           0 :   fd_bundle_crank_tip_payment_config_t config[1] = { 0 };
    1323           0 :   fd_pubkey_t tip_receiver_owner = {0};
    1324             : 
    1325           0 :   if( FD_UNLIKELY( ctx->bundle.enabled ) ) {
    1326           0 :     fd_acct_addr_t tip_payment_config[1];
    1327           0 :     fd_acct_addr_t tip_receiver[1];
    1328           0 :     fd_bundle_crank_get_addresses( ctx->bundle.gen, fd_bank_epoch_get( bank ), tip_payment_config, tip_receiver );
    1329             : 
    1330           0 :     fd_accdb_ro_t tip_config_acc[1];
    1331           0 :     if( FD_UNLIKELY( !fd_accdb_open_ro( ctx->accdb, tip_config_acc, &xid, tip_payment_config ) ) ) {
    1332             :       /* FIXME This should not crash the validator */
    1333           0 :       FD_BASE58_ENCODE_32_BYTES( tip_payment_config->b, tip_config_acc_b58 );
    1334           0 :       FD_LOG_CRIT(( "tip payment config account %s does not exist", tip_config_acc_b58 ));
    1335           0 :     }
    1336           0 :     ulong tip_cfg_sz = fd_accdb_ref_data_sz( tip_config_acc );
    1337           0 :     if( FD_UNLIKELY( tip_cfg_sz < sizeof(fd_bundle_crank_tip_payment_config_t) ) ) {
    1338             :       /* FIXME This should not crash the validator */
    1339           0 :       FD_LOG_HEXDUMP_CRIT(( "invalid tip payment config account data", fd_accdb_ref_data_const( tip_config_acc ), tip_cfg_sz ));
    1340           0 :     }
    1341           0 :     memcpy( config, fd_accdb_ref_data_const( tip_config_acc ), sizeof(fd_bundle_crank_tip_payment_config_t) );
    1342           0 :     fd_accdb_close_ro( ctx->accdb, tip_config_acc );
    1343             : 
    1344             :     /* It is possible that the tip receiver account does not exist yet
    1345             :        if it is the first time in an epoch. */
    1346           0 :     fd_accdb_ro_t tip_receiver_acc[1];
    1347           0 :     if( FD_LIKELY( fd_accdb_open_ro( ctx->accdb, tip_receiver_acc, &xid, tip_receiver ) ) ) {
    1348           0 :       tip_receiver_owner = FD_LOAD( fd_pubkey_t, fd_accdb_ref_owner( tip_receiver_acc ) );
    1349           0 :       fd_accdb_close_ro( ctx->accdb, tip_receiver_acc );
    1350           0 :     }
    1351           0 :   }
    1352             : 
    1353             : 
    1354           0 :   fd_became_leader_t * msg = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
    1355           0 :   msg->slot = ctx->next_leader_slot;
    1356           0 :   msg->slot_start_ns = now_nanos;
    1357           0 :   msg->slot_end_ns   = now_nanos+(long)ctx->slot_duration_nanos;
    1358           0 :   msg->bank = NULL;
    1359           0 :   msg->bank_idx = bank->data->idx;
    1360           0 :   msg->ticks_per_slot = fd_bank_ticks_per_slot_get( bank );
    1361           0 :   msg->hashcnt_per_tick = fd_bank_hashes_per_tick_get( bank );
    1362           0 :   msg->tick_duration_ns = (ulong)(ctx->slot_duration_nanos/(double)msg->ticks_per_slot);
    1363           0 :   msg->bundle->config[0]       = config[0];
    1364           0 :   memcpy( msg->bundle->last_blockhash,     fd_bank_poh_query( bank )->hash, sizeof(fd_hash_t)   );
    1365           0 :   memcpy( msg->bundle->tip_receiver_owner, tip_receiver_owner.uc,           sizeof(fd_pubkey_t) );
    1366             : 
    1367           0 :   if( FD_UNLIKELY( msg->hashcnt_per_tick==1UL ) ) {
    1368             :     /* Low power producer, maximum of one microblock per tick in the slot */
    1369           0 :     msg->max_microblocks_in_slot = msg->ticks_per_slot;
    1370           0 :   } else {
    1371             :     /* See the long comment in after_credit for this limit */
    1372           0 :     msg->max_microblocks_in_slot = fd_ulong_min( MAX_MICROBLOCKS_PER_SLOT, msg->ticks_per_slot*(msg->hashcnt_per_tick-1UL) );
    1373           0 :   }
    1374             : 
    1375           0 :   msg->total_skipped_ticks = msg->ticks_per_slot*(ctx->next_leader_slot-ctx->reset_slot);
    1376           0 :   msg->epoch = fd_slot_to_epoch( fd_bank_epoch_schedule_query( bank ), ctx->next_leader_slot, NULL );
    1377             : 
    1378           0 :   fd_cost_tracker_t const * cost_tracker = fd_bank_cost_tracker_locking_query( bank );
    1379             : 
    1380           0 :   msg->limits.slot_max_cost = ctx->larger_max_cost_per_block ? LARGER_MAX_COST_PER_BLOCK : cost_tracker->block_cost_limit;
    1381           0 :   msg->limits.slot_max_vote_cost = cost_tracker->vote_cost_limit;
    1382           0 :   msg->limits.slot_max_write_cost_per_acct = cost_tracker->account_cost_limit;
    1383             : 
    1384           0 :   fd_bank_cost_tracker_end_locking_query( bank );
    1385             : 
    1386           0 :   if( FD_UNLIKELY( msg->ticks_per_slot+msg->total_skipped_ticks>USHORT_MAX ) ) {
    1387             :     /* There can be at most USHORT_MAX skipped ticks, because the
    1388             :        parent_offset field in the shred data is only 2 bytes wide. */
    1389           0 :     FD_LOG_ERR(( "too many skipped ticks %lu for slot %lu, chain must halt", msg->ticks_per_slot+msg->total_skipped_ticks, ctx->next_leader_slot ));
    1390           0 :   }
    1391             : 
    1392           0 :   fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_BECAME_LEADER, ctx->replay_out->chunk, sizeof(fd_became_leader_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
    1393           0 :   ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_became_leader_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
    1394             : 
    1395           0 :   ctx->next_leader_slot      = ULONG_MAX;
    1396           0 :   ctx->next_leader_tickcount = LONG_MAX;
    1397             : 
    1398           0 :   return 1;
    1399           0 : }
    1400             : 
    1401             : static void
    1402             : process_poh_message( fd_replay_tile_t *                 ctx,
    1403           0 :                      fd_poh_leader_slot_ended_t const * slot_ended ) {
    1404             : 
    1405           0 :   FD_TEST( ctx->is_booted );
    1406           0 :   FD_TEST( ctx->is_leader );
    1407           0 :   FD_TEST( ctx->leader_bank->data!=NULL );
    1408             : 
    1409           0 :   FD_TEST( ctx->highwater_leader_slot>=slot_ended->slot );
    1410           0 :   FD_TEST( ctx->next_leader_slot>ctx->highwater_leader_slot );
    1411             : 
    1412             :   /* Update the poh hash in the bank.  We will want to maintain a refcnt
    1413             :      on the bank until we have recieved the block id for the block after
    1414             :      it has been shredded. */
    1415             : 
    1416           0 :   memcpy( fd_bank_poh_modify( ctx->leader_bank ), slot_ended->blockhash, sizeof(fd_hash_t) );
    1417             : 
    1418           0 :   ctx->recv_poh = 1;
    1419           0 : }
    1420             : 
    1421             : static void
    1422             : publish_reset( fd_replay_tile_t *  ctx,
    1423             :                fd_stem_context_t * stem,
    1424           0 :                fd_bank_t *         bank ) {
    1425           0 :   if( FD_UNLIKELY( ctx->replay_out->idx==ULONG_MAX ) ) return;
    1426             : 
    1427           0 :   fd_hash_t const * block_hash = fd_blockhashes_peek_last_hash( fd_bank_block_hash_queue_query( bank ) );
    1428           0 :   FD_TEST( block_hash );
    1429             : 
    1430           0 :   fd_poh_reset_t * reset = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
    1431             : 
    1432           0 :   reset->bank_idx         = bank->data->idx;
    1433           0 :   reset->timestamp        = fd_log_wallclock();
    1434           0 :   reset->completed_slot   = fd_bank_slot_get( bank );
    1435           0 :   reset->hashcnt_per_tick = fd_bank_hashes_per_tick_get( bank );
    1436           0 :   reset->ticks_per_slot   = fd_bank_ticks_per_slot_get( bank );
    1437           0 :   reset->tick_duration_ns = (ulong)(ctx->slot_duration_nanos/(double)reset->ticks_per_slot);
    1438           0 :   fd_memcpy( reset->completed_block_id, ctx->reset_block_id.uc, sizeof(fd_hash_t) );
    1439           0 :   fd_memcpy( reset->completed_blockhash, block_hash->uc, sizeof(fd_hash_t) );
    1440             : 
    1441           0 :   ulong ticks_per_slot = fd_bank_ticks_per_slot_get( bank );
    1442           0 :   if( FD_UNLIKELY( reset->hashcnt_per_tick==1UL ) ) {
    1443             :     /* Low power producer, maximum of one microblock per tick in the slot */
    1444           0 :     reset->max_microblocks_in_slot = ticks_per_slot;
    1445           0 :   } else {
    1446             :     /* See the long comment in after_credit for this limit */
    1447           0 :     reset->max_microblocks_in_slot = fd_ulong_min( MAX_MICROBLOCKS_PER_SLOT, ticks_per_slot*(reset->hashcnt_per_tick-1UL) );
    1448           0 :   }
    1449           0 :   reset->next_leader_slot = ctx->next_leader_slot;
    1450             : 
    1451           0 :   fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_RESET, ctx->replay_out->chunk, sizeof(fd_poh_reset_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
    1452           0 :   ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_poh_reset_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
    1453           0 : }
    1454             : 
    1455             : static void
    1456             : store_xinsert( fd_store_t      * store,
    1457           0 :                fd_hash_t const * merkle_root ) {
    1458           0 :   fd_store_pool_t pool = {
    1459           0 :       .pool    = fd_wksp_laddr_fast( fd_store_wksp( store ), store->pool_mem_gaddr ),
    1460           0 :       .ele     = fd_wksp_laddr_fast( fd_store_wksp( store ), store->pool_ele_gaddr ),
    1461           0 :       .ele_max = store->fec_max
    1462           0 :   };
    1463           0 :   int err; fd_store_fec_t * fec = fd_store_pool_acquire( &pool, NULL, 1 /* blocking */, &err );
    1464           0 :   if( FD_UNLIKELY( err!=FD_POOL_SUCCESS ) ) FD_LOG_CRIT(( "store pool: %s", fd_store_pool_strerror( err ) ));
    1465           0 :   fec->key.merkle_root = *merkle_root;
    1466           0 :   fec->key.part_idx    = 0;
    1467           0 :   fec->cmr             = (fd_hash_t){ 0 };
    1468           0 :   fec->next            = fd_store_pool_idx_null();
    1469           0 :   fec->data_sz         = 0UL;
    1470             : 
    1471           0 :   FD_STORE_XLOCK_BEGIN( store ) {
    1472           0 :     fd_store_map_ele_insert( fd_wksp_laddr_fast( fd_store_wksp( store ), store->map_gaddr ), fec, pool.ele );
    1473           0 :   } FD_STORE_XLOCK_END;
    1474           0 : }
    1475             : 
    1476             : static void
    1477             : boot_genesis( fd_replay_tile_t *        ctx,
    1478             :               fd_stem_context_t *       stem,
    1479           0 :               fd_genesis_meta_t const * meta ) {
    1480             :   /* If we are bootstrapping, we can't wait to wait for our identity
    1481             :      vote to be rooted as this creates a circular dependency. */
    1482           0 :   ctx->identity_vote_rooted = 1;
    1483             : 
    1484           0 :   uchar const * genesis_blob = (uchar const *)( meta+1 );
    1485           0 :   FD_TEST( meta->bootstrap && meta->has_lthash );
    1486           0 :   FD_TEST( fd_genesis_parse( ctx->genesis, genesis_blob, meta->blob_sz ) );
    1487             : 
    1488           0 :   fd_bank_t bank[1];
    1489           0 :   FD_TEST( fd_banks_bank_query( bank, ctx->banks, FD_REPLAY_BOOT_BANK_IDX ) );
    1490           0 :   fd_funk_txn_xid_t xid = { .ul = { 0UL, FD_REPLAY_BOOT_BANK_IDX } };
    1491             : 
    1492             :   /* Do genesis-related processing in a non-rooted transaction */
    1493           0 :   fd_funk_txn_xid_t root_xid = { .ul = { LONG_MAX, LONG_MAX } };
    1494           0 :   fd_funk_txn_xid_t target_xid = { .ul = { 0UL, 0UL } };
    1495           0 :   fd_accdb_attach_child( ctx->accdb_admin, &root_xid, &target_xid );
    1496           0 :   fd_runtime_read_genesis( ctx->banks, bank, ctx->accdb, &xid, NULL, &meta->genesis_hash, &meta->lthash, ctx->genesis, genesis_blob, &ctx->runtime_stack );
    1497           0 :   fd_accdb_advance_root( ctx->accdb_admin, &target_xid );
    1498             : 
    1499           0 :   static const fd_txncache_fork_id_t txncache_root = { .val = USHORT_MAX };
    1500           0 :   bank->data->txncache_fork_id = fd_txncache_attach_child( ctx->txncache, txncache_root );
    1501             : 
    1502           0 :   fd_hash_t const * block_hash = fd_blockhashes_peek_last_hash( fd_bank_block_hash_queue_query( bank ) );
    1503           0 :   fd_txncache_finalize_fork( ctx->txncache, bank->data->txncache_fork_id, 0UL, block_hash->uc );
    1504             : 
    1505           0 :   publish_epoch_info( ctx, stem, bank, 0 );
    1506           0 :   publish_epoch_info( ctx, stem, bank, 1 );
    1507             : 
    1508             :   /* We call this after fd_runtime_read_genesis, which sets up the
    1509             :      slot_bank needed in blockstore_init. */
    1510           0 :   init_after_snapshot( ctx );
    1511             : 
    1512           0 :   ctx->published_root_slot = 0UL;
    1513           0 :   fd_sched_block_add_done( ctx->sched, bank->data->idx, ULONG_MAX, 0UL );
    1514             : 
    1515           0 :   fd_bank_block_height_set( bank, 1UL );
    1516             : 
    1517           0 :   ctx->consensus_root          = ctx->initial_block_id;
    1518           0 :   ctx->consensus_root_slot     = 0UL;
    1519           0 :   ctx->consensus_root_bank_idx = 0UL;
    1520           0 :   ctx->published_root_slot     = 0UL;
    1521           0 :   ctx->published_root_bank_idx = 0UL;
    1522             : 
    1523           0 :   ctx->reset_slot            = 0UL;
    1524           0 :   fd_memcpy( ctx->reset_bank, bank, sizeof(fd_bank_t) );
    1525           0 :   ctx->reset_timestamp_nanos = fd_log_wallclock();
    1526           0 :   ctx->next_leader_slot      = fd_multi_epoch_leaders_get_next_slot( ctx->mleaders, 1UL, ctx->identity_pubkey );
    1527           0 :   if( FD_LIKELY( ctx->next_leader_slot != ULONG_MAX ) ) {
    1528           0 :     ctx->next_leader_tickcount = (long)((double)(ctx->next_leader_slot-ctx->reset_slot-1UL)*ctx->slot_duration_ticks) + fd_tickcount();
    1529           0 :   } else {
    1530           0 :     ctx->next_leader_tickcount = LONG_MAX;
    1531           0 :   }
    1532             : 
    1533           0 :   ctx->is_booted = 1;
    1534           0 :   maybe_become_leader( ctx, stem );
    1535             : 
    1536           0 :   fd_hash_t initial_block_id = ctx->initial_block_id;
    1537           0 :   fd_reasm_fec_t * fec       = fd_reasm_insert( ctx->reasm, &initial_block_id, NULL, 0 /* genesis slot */, 0, 0, 0, 0, 1, 0, ctx->store, &ctx->reasm_evicted ); /* FIXME manifest block_id */
    1538           0 :   fec->bank_idx              = bank->data->idx;
    1539           0 :   fec->bank_seq              = bank->data->bank_seq;
    1540           0 :   store_xinsert( ctx->store, &initial_block_id );
    1541             : 
    1542           0 :   fd_block_id_ele_t * block_id_ele = &ctx->block_id_arr[ 0 ];
    1543           0 :   block_id_ele->latest_mr = initial_block_id;
    1544           0 :   block_id_ele->slot      = 0UL;
    1545             : 
    1546           0 :   FD_TEST( fd_block_id_map_ele_insert( ctx->block_id_map, block_id_ele, ctx->block_id_arr ) );
    1547             : 
    1548           0 :   fd_replay_slot_completed_t * slot_info = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
    1549           0 :   cost_tracker_snap( bank, slot_info );
    1550           0 :   slot_info->identity_balance = get_identity_balance( ctx, xid );
    1551             : 
    1552           0 :   publish_slot_completed( ctx, stem, bank, 1, 0 /* is_leader */ );
    1553           0 :   publish_root_advanced( ctx, stem );
    1554           0 :   publish_reset( ctx, stem, bank );
    1555           0 : }
    1556             : 
    1557             : static inline void
    1558           0 : maybe_verify_cluster_type( fd_replay_tile_t * ctx ) {
    1559           0 :   if( FD_UNLIKELY( !ctx->is_booted || !ctx->has_genesis_hash ) ) {
    1560           0 :     return;
    1561           0 :   }
    1562             : 
    1563           0 :   FD_BASE58_ENCODE_32_BYTES( ctx->genesis_hash->uc, hash_cstr );
    1564           0 :   ulong cluster = fd_genesis_cluster_identify( hash_cstr );
    1565             :   /* Map pyth-related clusters to unkwown. */
    1566           0 :   switch( cluster ) {
    1567           0 :     case FD_CLUSTER_PYTHNET:
    1568           0 :     case FD_CLUSTER_PYTHTEST:
    1569           0 :       cluster = FD_CLUSTER_UNKNOWN;
    1570           0 :   }
    1571             : 
    1572           0 :   if( FD_UNLIKELY( cluster!=ctx->cluster_type ) ) {
    1573           0 :     FD_LOG_ERR(( "Your genesis.bin file at `%s` has a genesis hash of `%s` which means the cluster is %s "
    1574           0 :                  "but the snapshot you loaded is for a different cluster %s. If you are trying to join the "
    1575           0 :                  "%s cluster, you can delete the genesis.bin file and restart the node to download the correct "
    1576           0 :                  "genesis file automatically.",
    1577           0 :                  ctx->genesis_path,
    1578           0 :                  hash_cstr,
    1579           0 :                  fd_genesis_cluster_name( cluster ),
    1580           0 :                  fd_genesis_cluster_name( ctx->cluster_type ),
    1581           0 :                  fd_genesis_cluster_name( cluster ) ));
    1582           0 :   }
    1583           0 : }
    1584             : 
    1585             : static void
    1586             : on_snapshot_message( fd_replay_tile_t *  ctx,
    1587             :                      fd_stem_context_t * stem,
    1588             :                      ulong               in_idx,
    1589             :                      ulong               chunk,
    1590           0 :                      ulong               sig ) {
    1591           0 :   ulong msg = fd_ssmsg_sig_message( sig );
    1592           0 :   if( FD_LIKELY( msg==FD_SSMSG_DONE ) ) {
    1593             :     /* An end of message notification indicates the snapshot is loaded.
    1594             :        Replay is able to start executing from this point onwards. */
    1595             :     /* TODO: replay should finish booting. Could make replay a
    1596             :        state machine and set the state here accordingly. */
    1597           0 :     ctx->is_booted = 1;
    1598             : 
    1599           0 :     fd_bank_t bank[1];
    1600           0 :     if( FD_UNLIKELY( !fd_banks_bank_query( bank, ctx->banks, FD_REPLAY_BOOT_BANK_IDX ) ) ) {
    1601           0 :       FD_LOG_CRIT(( "invariant violation: bank is NULL for bank index %lu", FD_REPLAY_BOOT_BANK_IDX ));
    1602           0 :     }
    1603             : 
    1604           0 :     ulong snapshot_slot = fd_bank_slot_get( bank );
    1605             : 
    1606           0 :     fd_hash_t bank_hash = fd_bank_bank_hash_get( bank );
    1607           0 :     if( FD_UNLIKELY( ctx->wfs_enabled && memcmp( ctx->expected_bank_hash.uc, bank_hash.uc, sizeof(fd_hash_t) ) ) ) {
    1608           0 :       FD_BASE58_ENCODE_32_BYTES( ctx->expected_bank_hash.uc, expected_bank_hash_cstr );
    1609           0 :       FD_BASE58_ENCODE_32_BYTES( bank_hash.uc,                 actual_bank_hash_cstr );
    1610           0 :       FD_LOG_ERR(( "[consensus.wait_for_supermajority_with_bank_hash] expected_bank_hash=%s does not match snapshot slot"
    1611           0 :                    "=%lu bank_hash=%s. If you are loading a snapshot from the network, check that the slot matches the "
    1612           0 :                    "cluster restart slot. ", expected_bank_hash_cstr, snapshot_slot, actual_bank_hash_cstr ));
    1613           0 :     }
    1614           0 :     if( FD_UNLIKELY( ctx->wfs_enabled ) ) {
    1615           0 :       FD_LOG_NOTICE(( "waiting for supermajority at snapshot slot %lu", snapshot_slot ));
    1616           0 :     }
    1617             : 
    1618             :     /* FIXME: This is a hack because the block id of the snapshot slot
    1619             :        is not provided in the snapshot.  A possible solution is to get
    1620             :        the block id of the snapshot slot from repair. */
    1621           0 :     fd_hash_t manifest_block_id = ctx->initial_block_id;
    1622             : 
    1623           0 :     fd_funk_txn_xid_t xid = { .ul = { snapshot_slot, FD_REPLAY_BOOT_BANK_IDX } };
    1624           0 :     fd_features_restore( bank, ctx->accdb, &xid );
    1625             : 
    1626             :     /* Typically, when we cross an epoch boundary during normal
    1627             :        operation, we publish the stake weights for the new epoch.  But
    1628             :        since we are starting from a snapshot, we need to publish two
    1629             :        epochs worth of stake weights: the previous epoch (which is
    1630             :        needed for voting on the current epoch), and the current epoch
    1631             :        (which is needed for voting on the next epoch). */
    1632           0 :     publish_epoch_info( ctx, stem, bank, 0 );
    1633           0 :     publish_epoch_info( ctx, stem, bank, 1 );
    1634             : 
    1635           0 :     ctx->consensus_root          = manifest_block_id;
    1636           0 :     ctx->consensus_root_slot     = snapshot_slot;
    1637           0 :     ctx->consensus_root_bank_idx = 0UL;
    1638           0 :     ctx->published_root_slot     = ctx->consensus_root_slot;
    1639           0 :     ctx->published_root_bank_idx = 0UL;
    1640             : 
    1641           0 :     ctx->reset_slot            = snapshot_slot;
    1642           0 :     fd_memcpy( ctx->reset_bank, bank, sizeof(fd_bank_t) );
    1643           0 :     ctx->reset_timestamp_nanos = fd_log_wallclock();
    1644           0 :     ctx->next_leader_slot      = fd_multi_epoch_leaders_get_next_slot( ctx->mleaders, 1UL, ctx->identity_pubkey );
    1645           0 :     if( FD_LIKELY( ctx->next_leader_slot != ULONG_MAX ) ) {
    1646           0 :       ctx->next_leader_tickcount = (long)((double)(ctx->next_leader_slot-ctx->reset_slot-1UL)*ctx->slot_duration_ticks) + fd_tickcount();
    1647           0 :     } else {
    1648           0 :       ctx->next_leader_tickcount = LONG_MAX;
    1649           0 :     }
    1650             : 
    1651           0 :     fd_sched_block_add_done( ctx->sched, bank->data->idx, ULONG_MAX, snapshot_slot );
    1652           0 :     FD_TEST( bank->data->idx==0UL );
    1653             : 
    1654           0 :     fd_runtime_update_leaders( bank, &ctx->runtime_stack );
    1655             : 
    1656           0 :     fd_block_id_ele_t * block_id_ele = &ctx->block_id_arr[ 0 ];
    1657           0 :     block_id_ele->latest_mr      = manifest_block_id;
    1658           0 :     block_id_ele->slot           = snapshot_slot;
    1659           0 :     block_id_ele->block_id_seen  = 1;
    1660           0 :     block_id_ele->latest_fec_idx = 0U;
    1661           0 :     FD_TEST( fd_block_id_map_ele_insert( ctx->block_id_map, block_id_ele, ctx->block_id_arr ) );
    1662             : 
    1663             :     /* We call this after fd_runtime_read_genesis, which sets up the
    1664             :        slot_bank needed in blockstore_init. */
    1665           0 :     init_after_snapshot( ctx );
    1666             : 
    1667           0 :     fd_replay_slot_completed_t * slot_info = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
    1668           0 :     cost_tracker_snap( bank, slot_info );
    1669           0 :     slot_info->identity_balance = get_identity_balance( ctx, xid );
    1670             : 
    1671           0 :     publish_slot_completed( ctx, stem, bank, 1, 0 /* is_leader */ );
    1672           0 :     publish_root_advanced( ctx, stem );
    1673             : 
    1674           0 :     fd_reasm_fec_t * fec = fd_reasm_insert( ctx->reasm, &manifest_block_id, NULL, snapshot_slot, 0, 0, 0, 0, 1, 0, ctx->store, &ctx->reasm_evicted ); /* FIXME manifest block_id */
    1675           0 :     fec->bank_idx        = bank->data->idx;
    1676           0 :     fec->bank_seq        = bank->data->bank_seq;
    1677           0 :     store_xinsert( ctx->store, &manifest_block_id );
    1678             : 
    1679           0 :     ctx->cluster_type = fd_bank_cluster_type_get( bank );
    1680             : 
    1681           0 :     maybe_verify_cluster_type( ctx );
    1682             : 
    1683           0 :     return;
    1684           0 :   }
    1685             : 
    1686           0 :   switch( msg ) {
    1687           0 :     case FD_SSMSG_MANIFEST_FULL:
    1688           0 :     case FD_SSMSG_MANIFEST_INCREMENTAL: {
    1689             :       /* We may either receive a full snapshot manifest or an
    1690             :          incremental snapshot manifest.  Note that this external message
    1691             :          id is only used temporarily because replay cannot yet receive
    1692             :          the firedancer-internal snapshot manifest message. */
    1693           0 :       if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark ) )
    1694           0 :         FD_LOG_ERR(( "chunk %lu from in %d corrupt, not in range [%lu,%lu]", chunk, ctx->in_kind[ in_idx ], ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
    1695             : 
    1696           0 :       fd_bank_t bank[1];
    1697           0 :       fd_ssload_recover( fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk ),
    1698           0 :                          ctx->banks,
    1699           0 :                          fd_banks_bank_query( bank, ctx->banks, FD_REPLAY_BOOT_BANK_IDX ),
    1700           0 :                          &ctx->runtime_stack,
    1701           0 :                          msg==FD_SSMSG_MANIFEST_INCREMENTAL );
    1702             : 
    1703           0 :       fd_snapshot_manifest_t const * manifest = fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
    1704           0 :       ctx->hard_forks_cnt = manifest->hard_forks_len;
    1705           0 :       for( ulong i=0UL; i<manifest->hard_forks_len; i++ ) {
    1706           0 :         ctx->hard_forks[ i ] = manifest->hard_forks[ i ];
    1707           0 :         ctx->hard_forks_cnts[ i ] = manifest->hard_forks_cnts[ i ];
    1708           0 :       }
    1709           0 :       ctx->has_expected_genesis_timestamp = 1;
    1710           0 :       ctx->expected_genesis_timestamp     = manifest->creation_time_millis;
    1711           0 :       break;
    1712           0 :     }
    1713           0 :     default: {
    1714           0 :       FD_LOG_ERR(( "Received unknown snapshot message with msg %lu", msg ));
    1715           0 :       return;
    1716           0 :     }
    1717           0 :   }
    1718             : 
    1719           0 :   return;
    1720           0 : }
    1721             : 
    1722             : static void
    1723             : dispatch_task( fd_replay_tile_t *  ctx,
    1724             :                fd_stem_context_t * stem,
    1725           0 :                fd_sched_task_t *   task ) {
    1726             : 
    1727           0 :   switch( task->task_type ) {
    1728           0 :     case FD_SCHED_TT_TXN_EXEC: {
    1729           0 :       fd_txn_p_t * txn_p = fd_sched_get_txn( ctx->sched, task->txn_exec->txn_idx );
    1730             : 
    1731           0 :       fd_bank_t bank[1];
    1732           0 :       FD_TEST( fd_banks_bank_query( bank, ctx->banks, task->txn_exec->bank_idx ) );
    1733             : 
    1734           0 : #     if FD_HAS_FLATCC
    1735             :       /* Add the transaction to the block dumper if necessary. This
    1736             :          logic doesn't need to be fork-aware since it's only meant to
    1737             :          be used in backtest. */
    1738           0 :       if( FD_UNLIKELY( ctx->dump_proto_ctx && ctx->dump_proto_ctx->dump_block_to_pb ) ) {
    1739           0 :         fd_dump_block_to_protobuf_collect_tx( ctx->block_dump_ctx, txn_p );
    1740           0 :       }
    1741           0 : #     endif
    1742             : 
    1743           0 :       bank->data->refcnt++;
    1744             : 
    1745           0 :       if( FD_UNLIKELY( !bank->data->first_transaction_scheduled_nanos ) ) bank->data->first_transaction_scheduled_nanos = fd_log_wallclock();
    1746             : 
    1747           0 :       fd_replay_out_link_t *   exec_out = ctx->exec_out;
    1748           0 :       fd_execrp_txn_exec_msg_t * exec_msg = fd_chunk_to_laddr( exec_out->mem, exec_out->chunk );
    1749           0 :       memcpy( exec_msg->txn, txn_p, sizeof(fd_txn_p_t) );
    1750           0 :       exec_msg->bank_idx = task->txn_exec->bank_idx;
    1751           0 :       exec_msg->txn_idx  = task->txn_exec->txn_idx;
    1752           0 :       if( FD_UNLIKELY( ctx->capture_ctx ) ) {
    1753           0 :         exec_msg->capture_txn_idx = ctx->capture_ctx->current_txn_idx++;
    1754           0 :       }
    1755           0 :       fd_stem_publish( stem, exec_out->idx, (FD_EXECRP_TT_TXN_EXEC<<32) | task->txn_exec->exec_idx, exec_out->chunk, sizeof(*exec_msg), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
    1756           0 :       exec_out->chunk = fd_dcache_compact_next( exec_out->chunk, sizeof(*exec_msg), exec_out->chunk0, exec_out->wmark );
    1757           0 :       break;
    1758           0 :     }
    1759           0 :     case FD_SCHED_TT_TXN_SIGVERIFY: {
    1760           0 :       fd_txn_p_t * txn_p = fd_sched_get_txn( ctx->sched, task->txn_sigverify->txn_idx );
    1761             : 
    1762           0 :       fd_bank_t bank[1];
    1763           0 :       FD_TEST( fd_banks_bank_query( bank, ctx->banks, task->txn_sigverify->bank_idx ) );
    1764           0 :       bank->data->refcnt++;
    1765             : 
    1766           0 :       fd_replay_out_link_t *        exec_out = ctx->exec_out;
    1767           0 :       fd_execrp_txn_sigverify_msg_t * exec_msg = fd_chunk_to_laddr( exec_out->mem, exec_out->chunk );
    1768           0 :       memcpy( exec_msg->txn, txn_p, sizeof(fd_txn_p_t) );
    1769           0 :       exec_msg->bank_idx = task->txn_sigverify->bank_idx;
    1770           0 :       exec_msg->txn_idx  = task->txn_sigverify->txn_idx;
    1771           0 :       fd_stem_publish( stem, exec_out->idx, (FD_EXECRP_TT_TXN_SIGVERIFY<<32) | task->txn_sigverify->exec_idx, exec_out->chunk, sizeof(*exec_msg), 0UL, 0UL, 0UL );
    1772           0 :       exec_out->chunk = fd_dcache_compact_next( exec_out->chunk, sizeof(*exec_msg), exec_out->chunk0, exec_out->wmark );
    1773           0 :       break;
    1774           0 :     };
    1775           0 :     case FD_SCHED_TT_POH_HASH: {
    1776           0 :       fd_bank_t bank[ 1 ];
    1777           0 :       FD_TEST( fd_banks_bank_query( bank, ctx->banks, task->poh_hash->bank_idx ) );
    1778           0 :       bank->data->refcnt++;
    1779             : 
    1780           0 :       fd_replay_out_link_t *   exec_out = ctx->exec_out;
    1781           0 :       fd_execrp_poh_hash_msg_t * exec_msg = fd_chunk_to_laddr( exec_out->mem, exec_out->chunk );
    1782           0 :       exec_msg->bank_idx = task->poh_hash->bank_idx;
    1783           0 :       exec_msg->mblk_idx = task->poh_hash->mblk_idx;
    1784           0 :       exec_msg->hashcnt  = task->poh_hash->hashcnt;
    1785           0 :       memcpy( exec_msg->hash, task->poh_hash->hash, sizeof(fd_hash_t) );
    1786           0 :       fd_stem_publish( stem, exec_out->idx, (FD_EXECRP_TT_POH_HASH<<32) | task->poh_hash->exec_idx, exec_out->chunk, sizeof(*exec_msg), 0UL, 0UL, 0UL );
    1787           0 :       exec_out->chunk = fd_dcache_compact_next( exec_out->chunk, sizeof(*exec_msg), exec_out->chunk0, exec_out->wmark );
    1788           0 :       break;
    1789           0 :     };
    1790           0 :     default: {
    1791           0 :       FD_LOG_CRIT(( "unexpected task type %lu", task->task_type ));
    1792           0 :     }
    1793           0 :   }
    1794           0 : }
    1795             : 
    1796             : static void
    1797             : mark_bank_dead( fd_replay_tile_t *  ctx,
    1798             :                 fd_stem_context_t * stem,
    1799           0 :                 ulong               bank_idx ) {
    1800           0 :   fd_bank_t bank[1];
    1801           0 :   FD_TEST( fd_banks_bank_query( bank, ctx->banks, bank_idx ) );
    1802           0 :   fd_banks_mark_bank_dead( ctx->banks, bank_idx );
    1803             : 
    1804           0 :   fd_block_id_ele_t * block_id_ele = &ctx->block_id_arr[ bank_idx ];
    1805           0 :   if( block_id_ele->block_id_seen ) publish_slot_dead( ctx, stem, block_id_ele->slot, &block_id_ele->latest_mr );
    1806             : 
    1807           0 :   fd_reasm_fec_t * fec = fd_reasm_query( ctx->reasm, &block_id_ele->latest_mr );
    1808           0 :   if( FD_UNLIKELY( !fec ) ) return;
    1809           0 :   fec->bank_dead = 1;
    1810             : 
    1811           0 : }
    1812             : 
    1813             : /* Returns 1 if charge_busy. */
    1814             : static int
    1815             : replay( fd_replay_tile_t *  ctx,
    1816           0 :         fd_stem_context_t * stem ) {
    1817             : 
    1818           0 :   if( FD_UNLIKELY( !ctx->is_booted ) ) return 0;
    1819             : 
    1820           0 :   int charge_busy = 0;
    1821           0 :   fd_sched_task_t task[ 1 ];
    1822           0 :   if( FD_UNLIKELY( !fd_sched_task_next_ready( ctx->sched, task ) ) ) {
    1823           0 :     return charge_busy; /* Nothing to execute or do. */
    1824           0 :   }
    1825             : 
    1826           0 :   charge_busy = 1;
    1827             : 
    1828           0 :   switch( task->task_type ) {
    1829           0 :     case FD_SCHED_TT_BLOCK_START: {
    1830           0 :       replay_block_start( ctx, stem, task->block_start->bank_idx, task->block_start->parent_bank_idx, task->block_start->slot );
    1831           0 :       fd_sched_task_done( ctx->sched, FD_SCHED_TT_BLOCK_START, ULONG_MAX, ULONG_MAX, NULL );
    1832           0 :       break;
    1833           0 :     }
    1834           0 :     case FD_SCHED_TT_BLOCK_END: {
    1835           0 :       fd_bank_t bank[1];
    1836           0 :       fd_banks_bank_query( bank, ctx->banks, task->block_end->bank_idx );
    1837           0 :       if( FD_LIKELY( !(bank->data->flags&FD_BANK_FLAGS_DEAD) ) ) replay_block_finalize( ctx, stem, bank );
    1838           0 :       fd_sched_task_done( ctx->sched, FD_SCHED_TT_BLOCK_END, ULONG_MAX, ULONG_MAX, NULL );
    1839           0 :       break;
    1840           0 :     }
    1841           0 :     case FD_SCHED_TT_TXN_EXEC:
    1842           0 :     case FD_SCHED_TT_TXN_SIGVERIFY:
    1843           0 :     case FD_SCHED_TT_POH_HASH: {
    1844             :       /* Common case: we have a transaction we need to execute. */
    1845           0 :       dispatch_task( ctx, stem, task );
    1846           0 :       break;
    1847           0 :     }
    1848           0 :     case FD_SCHED_TT_MARK_DEAD: {
    1849           0 :       fd_bank_t bank[ 1 ];
    1850           0 :       FD_TEST( fd_banks_bank_query( bank, ctx->banks, task->mark_dead->bank_idx ) );
    1851           0 :       mark_bank_dead( ctx, stem, task->mark_dead->bank_idx );
    1852           0 :       break;
    1853           0 :     }
    1854           0 :     default: {
    1855           0 :       FD_LOG_CRIT(( "unexpected task type %lu", task->task_type ));
    1856           0 :     }
    1857           0 :   }
    1858             : 
    1859           0 :   return charge_busy;
    1860           0 : }
    1861             : 
    1862             : static int
    1863             : can_process_fec( fd_replay_tile_t * ctx,
    1864           0 :                  int *              evict_banks_out ) {
    1865           0 :   fd_reasm_fec_t * fec;
    1866           0 :   if( FD_UNLIKELY( fd_sched_can_ingest_cnt( ctx->sched )==0UL ) ) {
    1867           0 :     ctx->metrics.sched_full++;
    1868           0 :     return 0;
    1869           0 :   }
    1870             : 
    1871           0 :   if( FD_UNLIKELY( (fec = fd_reasm_peek( ctx->reasm ))==NULL ) ) {
    1872           0 :     ctx->metrics.reasm_empty++;
    1873           0 :     return 0;
    1874           0 :   }
    1875             : 
    1876           0 :   ctx->metrics.reasm_latest_slot    = fec->slot;
    1877           0 :   ctx->metrics.reasm_latest_fec_idx = fec->fec_set_idx;
    1878             : 
    1879           0 :   if( FD_UNLIKELY( ctx->is_leader && fec->fec_set_idx==0U && fd_reasm_parent( ctx->reasm, fec )->bank_idx==ctx->leader_bank->data->idx ) ) {
    1880             :     /* This guards against a rare race where we receive the FEC set for
    1881             :        the slot right after our leader rotation before we freeze the
    1882             :        bank for the last slot in our leader rotation.  Leader slot
    1883             :        freezing happens only after if we've received the final PoH hash
    1884             :        from the poh tile as well as the final FEC set for the leader
    1885             :        slot.  So the race happens when FEC sets are delivered and
    1886             :        processed sooner than the PoH hash, aka when the
    1887             :        poh=>shred=>replay path for the block id beats the poh=>replay
    1888             :        path for the poh hash.  To mitigate this race, we must block on
    1889             :        ingesting the FEC set for the ensuing slot before the leader
    1890             :        bank freezes, because that would violate ordering invariants in
    1891             :        banks and sched. */
    1892           0 :     FD_TEST( ctx->block_id_arr[ ctx->leader_bank->data->idx ].block_id_seen );
    1893           0 :     FD_TEST( !ctx->recv_poh );
    1894           0 :     ctx->metrics.leader_bid_wait++;
    1895           0 :     return 0;
    1896           0 :   }
    1897             : 
    1898             :   /* If fec_set_idx is 0, we need a new bank for a new slot.  Banks must
    1899             :      not be full in this case. */
    1900           0 :   if( FD_UNLIKELY( fd_banks_is_full( ctx->banks ) && fec->fec_set_idx==0 ) ) {
    1901           0 :     ctx->metrics.banks_full++;
    1902             :     /* We only want to evict banks if sched is drained and banks is no
    1903             :        longer making progress.  Otherwise, sched might not release
    1904             :        refcnts on the frontier/leaf banks immediately, and the eviction
    1905             :        will have to wait for sched to drain anyways. */
    1906           0 :     if( FD_UNLIKELY( fd_sched_is_drained( ctx->sched ) ) ) *evict_banks_out = 1;
    1907           0 :     return 0;
    1908           0 :   }
    1909             : 
    1910             :   /* Otherwise, banks may not be full, so we can always create a new
    1911             :      bank if needed.  Or, if banks are full, the current fec set's
    1912             :      ancestor (idx 0) already created a bank for this slot.*/
    1913           0 :   return 1;
    1914           0 : }
    1915             : 
    1916             : static void
    1917             : insert_fec_set( fd_replay_tile_t *  ctx,
    1918             :                 fd_stem_context_t * stem,
    1919           0 :                 fd_reasm_fec_t *    reasm_fec ) {
    1920             : 
    1921           0 :   long now = fd_log_wallclock();
    1922             : 
    1923           0 :   reasm_fec->parent_bank_idx = fd_reasm_parent( ctx->reasm, reasm_fec )->bank_idx;
    1924             : 
    1925           0 :   fd_bank_t parent_bank[1];
    1926           0 :   FD_TEST( fd_banks_bank_query( parent_bank, ctx->banks, reasm_fec->parent_bank_idx ) );
    1927           0 :   reasm_fec->parent_bank_seq = parent_bank->data->bank_seq;
    1928             : 
    1929           0 :   if( FD_UNLIKELY( reasm_fec->fec_set_idx==0U ) ) {
    1930             :     /* If the first FEC set for a slot is observed, provision a new bank
    1931             :        if you are not the leader.  Remove any stale block id map entry
    1932             :        and update the block id entry. */
    1933           0 :     fd_bank_t   bank_[1];
    1934           0 :     fd_bank_t * bank = NULL;
    1935           0 :     if( FD_UNLIKELY( reasm_fec->is_leader ) ) {
    1936           0 :       bank = ctx->leader_bank;
    1937           0 :     } else {
    1938           0 :       bank = fd_banks_new_bank( bank_, ctx->banks, reasm_fec->parent_bank_idx, now );
    1939           0 :     }
    1940             : 
    1941           0 :     reasm_fec->bank_idx = bank->data->idx;
    1942           0 :     reasm_fec->bank_seq = bank->data->bank_seq;
    1943             : 
    1944             :     /* At this point remove any stale entry in the block id map if it
    1945             :        exists and set the block id as not having been seen yet.  This is
    1946             :        safe because we know that the old entry for this bank index has
    1947             :        already been pruned away. */
    1948           0 :     fd_block_id_ele_t * block_id_ele = &ctx->block_id_arr[ reasm_fec->bank_idx ];
    1949           0 :     if( FD_LIKELY( fd_block_id_map_ele_query( ctx->block_id_map, &block_id_ele->latest_mr, NULL, ctx->block_id_arr )==block_id_ele ) ) {
    1950           0 :       FD_TEST( fd_block_id_map_ele_remove( ctx->block_id_map, &block_id_ele->latest_mr, NULL, ctx->block_id_arr ) );
    1951           0 :     }
    1952           0 :     block_id_ele->block_id_seen  = 0;
    1953           0 :     block_id_ele->slot           = reasm_fec->slot;
    1954           0 :     block_id_ele->latest_fec_idx = 0U;
    1955           0 :     block_id_ele->latest_mr      = reasm_fec->key;
    1956           0 :   } else {
    1957             :     /* We are continuing to execute through a slot that we already have
    1958             :        a bank index for. */
    1959           0 :     reasm_fec->bank_idx = reasm_fec->parent_bank_idx;
    1960           0 :     reasm_fec->bank_seq = reasm_fec->parent_bank_seq;
    1961             : 
    1962           0 :     FD_TEST( reasm_fec->bank_idx!=ULONG_MAX );
    1963             : 
    1964           0 :     fd_block_id_ele_t * block_id_ele = &ctx->block_id_arr[ reasm_fec->bank_idx ];
    1965           0 :     if( FD_UNLIKELY( block_id_ele->latest_fec_idx>=reasm_fec->fec_set_idx ) ) {
    1966           0 :       FD_LOG_WARNING(( "dropping FEC set (slot=%lu, fec_set_idx=%u) because it is at least as old as the latest FEC set (slot=%lu, fec_set_idx=%u)", reasm_fec->slot, reasm_fec->fec_set_idx, block_id_ele->slot, block_id_ele->latest_fec_idx ));
    1967           0 :       return;
    1968           0 :     }
    1969           0 :     block_id_ele->latest_fec_idx = reasm_fec->fec_set_idx;
    1970           0 :     block_id_ele->latest_mr      = reasm_fec->key;
    1971           0 :   }
    1972             : 
    1973           0 :   if( FD_UNLIKELY( reasm_fec->slot_complete ) ) {
    1974           0 :     fd_block_id_ele_t * block_id_ele = &ctx->block_id_arr[ reasm_fec->bank_idx ];
    1975           0 :     block_id_ele->block_id_seen  = 1;
    1976           0 :     block_id_ele->latest_mr      = reasm_fec->key;
    1977           0 :     block_id_ele->latest_fec_idx = reasm_fec->fec_set_idx;
    1978           0 :     FD_TEST( fd_block_id_map_ele_insert( ctx->block_id_map, block_id_ele, ctx->block_id_arr ) );
    1979           0 :   }
    1980             : 
    1981             :   /* If we are the leader, we don't need to process the FEC set. */
    1982           0 :   if( FD_UNLIKELY( reasm_fec->is_leader ) ) return;
    1983             : 
    1984             :   /* Forks form a partial ordering over FEC sets. The Repair tile
    1985             :       delivers FEC sets in-order per fork, but FEC set ordering across
    1986             :       forks is arbitrary */
    1987           0 :   fd_sched_fec_t sched_fec[ 1 ];
    1988             : 
    1989             : # if DEBUG_LOGGING
    1990             :   FD_BASE58_ENCODE_32_BYTES( reasm_fec->key.key, key_b58 );
    1991             :   FD_BASE58_ENCODE_32_BYTES( reasm_fec->cmr.key, cmr_b58 );
    1992             :   FD_LOG_INFO(( "replay processing FEC set for slot %lu fec_set_idx %u, mr %s cmr %s", reasm_fec->slot, reasm_fec->fec_set_idx, key_b58, cmr_b58 ));
    1993             : # endif
    1994             : 
    1995           0 :   sched_fec->shred_cnt              = reasm_fec->data_cnt;
    1996           0 :   sched_fec->is_last_in_batch       = !!reasm_fec->data_complete;
    1997           0 :   sched_fec->is_last_in_block       = !!reasm_fec->slot_complete;
    1998           0 :   sched_fec->bank_idx               = reasm_fec->bank_idx;
    1999           0 :   sched_fec->parent_bank_idx        = reasm_fec->parent_bank_idx;
    2000           0 :   sched_fec->slot                   = reasm_fec->slot;
    2001           0 :   sched_fec->parent_slot            = reasm_fec->slot - reasm_fec->parent_off;
    2002           0 :   sched_fec->is_first_in_block      = reasm_fec->fec_set_idx==0U;
    2003           0 :   fd_funk_txn_xid_t const root = fd_accdb_root_get( ctx->accdb_admin );
    2004           0 :   fd_funk_txn_xid_copy( sched_fec->alut_ctx->xid, &root );
    2005           0 :   sched_fec->alut_ctx->accdb[0]     = ctx->accdb[0];
    2006           0 :   sched_fec->alut_ctx->els          = ctx->published_root_slot;
    2007             : 
    2008           0 :   fd_bank_t bank[1];
    2009           0 :   FD_TEST( fd_banks_bank_query( bank, ctx->banks, sched_fec->bank_idx ) );
    2010           0 :   if( sched_fec->is_first_in_block ) {
    2011           0 :     bank->data->refcnt++;
    2012           0 :     FD_LOG_DEBUG(( "bank (idx=%lu, slot=%lu) refcnt incremented to %lu for sched", bank->data->idx, sched_fec->slot, bank->data->refcnt ));
    2013           0 :   }
    2014             : 
    2015             :   /* Read FEC set from the store.  This should happen before we try to
    2016             :      ingest the FEC set.  This allows us to filter out frags that were
    2017             :      in-flight when we published away minority forks that the frags land
    2018             :      on.  These frags would have no bank to execute against, because
    2019             :      their corresponding banks, or parent banks, have also been pruned
    2020             :      during publishing.  A query against store will rightfully tell us
    2021             :      that the underlying data is not found, implying that this is for a
    2022             :      minority fork that we can safeljy ignore. */
    2023             : 
    2024           0 :   ulong wait = (ulong)fd_log_wallclock();
    2025           0 :   ulong work = wait;
    2026           0 :   FD_STORE_SLOCK_BEGIN( ctx->store ) {
    2027           0 :     ctx->metrics.store_query_acquire++;
    2028           0 :     work = (ulong)fd_log_wallclock();
    2029           0 :     fd_histf_sample( ctx->metrics.store_query_wait, work - wait );
    2030             : 
    2031           0 :     fd_store_fec_t * store_fec = fd_store_query( ctx->store, &reasm_fec->key );
    2032           0 :     ctx->metrics.store_query_cnt++;
    2033           0 :     if( FD_UNLIKELY( !store_fec ) ) {
    2034             : 
    2035             :       /* The only case in which a FEC is not found in the store after
    2036             :          repair has notified is if the FEC was on a minority fork that
    2037             :          has already been published away.  In this case we abandon the
    2038             :          entire slice because it is no longer relevant.  */
    2039             : 
    2040           0 :       ctx->metrics.store_query_missing_cnt++;
    2041           0 :       ctx->metrics.store_query_missing_mr = reasm_fec->key.ul[0];
    2042           0 :       FD_BASE58_ENCODE_32_BYTES( reasm_fec->key.key, key_b58 );
    2043           0 :       FD_LOG_WARNING(( "store fec for slot: %lu is on minority fork already pruned by publish. abandoning slice. root: %lu. pruned merkle: %s", reasm_fec->slot, ctx->consensus_root_slot, key_b58 ));
    2044           0 :       return;
    2045           0 :     }
    2046           0 :     sched_fec->fec = store_fec;
    2047           0 :     if( FD_UNLIKELY( !fd_sched_fec_ingest( ctx->sched, sched_fec ) ) ) { /* FIXME this critical section is unnecessarily complex. should refactor to just be held for the memcpy and block_offs. */
    2048           0 :       mark_bank_dead( ctx, stem, sched_fec->bank_idx );
    2049           0 :     }
    2050           0 :   } FD_STORE_SLOCK_END;
    2051             : 
    2052           0 :   ctx->metrics.store_query_release++;
    2053           0 :   fd_histf_sample( ctx->metrics.store_query_work, (ulong)fd_log_wallclock() - work );
    2054           0 : }
    2055             : 
    2056             : static void
    2057             : process_fec_set( fd_replay_tile_t *  ctx,
    2058             :                  fd_stem_context_t * stem,
    2059           0 :                  fd_reasm_fec_t *    reasm_fec ) {
    2060             : 
    2061           0 :   fd_reasm_fec_t * parent = fd_reasm_parent( ctx->reasm, reasm_fec );
    2062           0 :   if( FD_UNLIKELY( !parent ) ) {
    2063           0 :     FD_LOG_WARNING(( "dropping FEC set (slot=%lu, fec_set_idx=%u) because it is unconnected in reasm", reasm_fec->slot, reasm_fec->fec_set_idx ));
    2064           0 :     return;
    2065           0 :   }
    2066             : 
    2067           0 :   if( FD_UNLIKELY( parent->bank_dead ) ) {
    2068             :     /* Inherit the dead flag from the parent.  If a dead slot is
    2069             :        completed, we publish the slot as dead.  Don't insert FECs for
    2070             :        dead slots. */
    2071           0 :     reasm_fec->bank_dead = 1;
    2072           0 :     if( FD_UNLIKELY( reasm_fec->slot_complete ) ) publish_slot_dead( ctx, stem, reasm_fec->slot, &reasm_fec->key );
    2073           0 :     FD_LOG_DEBUG(( "dropping FEC set (slot=%lu, fec_set_idx=%u) because parent bank is marked dead", reasm_fec->slot, reasm_fec->fec_set_idx ));
    2074           0 :     return;
    2075           0 :   }
    2076             : 
    2077             :   /* Standard case, the parent FEC has a valid corresponding bank. */
    2078           0 :   fd_bank_t parent_fec_bank[1];
    2079           0 :   if( FD_LIKELY( fd_banks_bank_query( parent_fec_bank, ctx->banks, parent->bank_idx ) &&
    2080           0 :                  parent_fec_bank->data->bank_seq==parent->bank_seq ) ) {
    2081           0 :     insert_fec_set( ctx, stem, reasm_fec );
    2082           0 :     return;
    2083           0 :   }
    2084             : 
    2085             :   /* In the case the FEC doesn't directly connect, iterate up the reasm
    2086             :      tree to find the closest valid slot complete that corresponds to a
    2087             :      valid bank. */
    2088             : 
    2089             :   /* First keep track of all of the slot completes up to and including
    2090             :      the fec we want to insert off of. */
    2091           0 :   fd_reasm_fec_t * path[ FD_BANKS_MAX_BANKS ];
    2092           0 :   ulong            path_cnt = 0UL;
    2093           0 :   path[ path_cnt++ ] = reasm_fec;
    2094             : 
    2095           0 :   for( fd_reasm_fec_t * curr = reasm_fec;; ) {
    2096           0 :     curr = fd_reasm_parent( ctx->reasm, curr );
    2097           0 :     FD_TEST( curr );
    2098           0 :     if( FD_LIKELY( !curr->slot_complete ) ) continue;
    2099             : 
    2100           0 :     fd_bank_t curr_bank[1];
    2101           0 :     if( FD_LIKELY( fd_banks_bank_query( curr_bank, ctx->banks, curr->bank_idx ) && curr_bank->data->bank_seq==curr->bank_seq ) ) break;
    2102             : 
    2103           0 :     FD_TEST( path_cnt<=FD_BANKS_MAX_BANKS );
    2104           0 :     path[ path_cnt++ ] = curr;
    2105           0 :   }
    2106             : 
    2107           0 :   for( ulong i=path_cnt; i>0UL; i-- ) {
    2108           0 :     fd_reasm_fec_t * leaf = path[ i-1 ];
    2109             : 
    2110             :     /* If there's not capacity in the sched or banks, return early and
    2111             :        drop the FEC.  We have inserted as much as we can for now. */
    2112           0 :     if( FD_UNLIKELY( fd_sched_can_ingest_cnt( ctx->sched ) < (leaf->fec_set_idx/FD_FEC_SHRED_CNT + 1) ) ) return;
    2113           0 :     if( FD_UNLIKELY( fd_banks_is_full( ctx->banks ) ) ) return;
    2114             : 
    2115             :     /* Gather all FECs for this slot; */
    2116           0 :     fd_reasm_fec_t * slot_fecs[ FD_FEC_BLK_MAX ];
    2117           0 :     fd_reasm_fec_t * curr = leaf;
    2118           0 :     for(;;) {
    2119           0 :       slot_fecs[ curr->fec_set_idx/FD_FEC_SHRED_CNT ] = curr;
    2120           0 :       if( curr->fec_set_idx==0U ) break;
    2121           0 :       curr = fd_reasm_parent( ctx->reasm, curr );
    2122           0 :       FD_TEST( curr );
    2123           0 :     }
    2124           0 :     FD_LOG_NOTICE(( "backfilling FEC sets for slot %lu from fec_set_idx %u to fec_set_idx %u", leaf->slot, leaf->fec_set_idx, curr->fec_set_idx ));
    2125             : 
    2126           0 :     for( ulong j=0UL; j<=leaf->fec_set_idx/FD_FEC_SHRED_CNT; j++ ) {
    2127           0 :       insert_fec_set( ctx, stem, slot_fecs[ j ] );
    2128           0 :     }
    2129           0 :   }
    2130           0 : }
    2131             : 
    2132             : /* accdb_advance_root moves account records from the unrooted to the
    2133             :    rooted database. */
    2134             : 
    2135             : static inline ulong
    2136           0 : accdb_root_op_total( fd_replay_tile_t const * ctx ) {
    2137           0 :   return ctx->accdb_admin->base.root_cnt +
    2138           0 :          ctx->accdb_admin->base.reclaim_cnt;
    2139           0 : }
    2140             : 
    2141             : static void
    2142             : accdb_advance_root( fd_replay_tile_t * ctx,
    2143             :                     ulong              slot,
    2144           0 :                     ulong              bank_idx ) {
    2145           0 :   fd_funk_txn_xid_t xid = { .ul[0] = slot, .ul[1] = bank_idx };
    2146           0 :   FD_LOG_DEBUG(( "advancing root to slot=%lu", slot ));
    2147             : 
    2148           0 :   long rooted_accounts   = -(long)accdb_root_op_total( ctx );
    2149           0 :   long root_accounts_dt  = -fd_tickcount();
    2150           0 :   fd_accdb_advance_root( ctx->accdb_admin, &xid );
    2151           0 :   rooted_accounts       += (long)accdb_root_op_total( ctx );
    2152           0 :   root_accounts_dt      += fd_tickcount();
    2153           0 :   fd_histf_sample( ctx->metrics.root_slot_dur,    (ulong)root_accounts_dt );
    2154           0 :   fd_histf_sample( ctx->metrics.root_account_dur, (ulong)root_accounts_dt / (ulong)fd_long_max( rooted_accounts, 1L ) );
    2155             : 
    2156           0 :   fd_progcache_txn_advance_root( ctx->progcache_admin, &xid );
    2157           0 : }
    2158             : 
    2159             : static int
    2160           0 : advance_published_root( fd_replay_tile_t * ctx ) {
    2161             : 
    2162           0 :   fd_block_id_ele_t * block_id_ele = fd_block_id_map_ele_query( ctx->block_id_map, &ctx->consensus_root, NULL, ctx->block_id_arr );
    2163           0 :   if( FD_UNLIKELY( !block_id_ele ) ) {
    2164           0 :     FD_BASE58_ENCODE_32_BYTES( ctx->consensus_root.key, consensus_root_b58 );
    2165           0 :     FD_LOG_CRIT(( "invariant violation: block id ele not found for consensus root %s", consensus_root_b58 ));
    2166           0 :   }
    2167           0 :   ulong target_bank_idx = fd_block_id_ele_get_idx( ctx->block_id_arr, block_id_ele );
    2168             : 
    2169             :   /* If the identity vote has been seen on a bank that should be rooted,
    2170             :      then we are now ready to produce blocks. */
    2171           0 :   if( FD_UNLIKELY( !ctx->identity_vote_rooted ) ) {
    2172           0 :     fd_bank_t root_bank[1];
    2173           0 :     if( FD_UNLIKELY( !fd_banks_bank_query( root_bank, ctx->banks, target_bank_idx ) ) ) FD_LOG_CRIT(( "invariant violation: root bank not found for bank index %lu", target_bank_idx ));
    2174           0 :     if( fd_bank_identity_vote_idx_get( root_bank )==ctx->identity_idx ) ctx->identity_vote_rooted = 1;
    2175           0 :   }
    2176             : 
    2177           0 :   ulong advanceable_root_idx = ULONG_MAX;
    2178           0 :   if( FD_UNLIKELY( !fd_banks_advance_root_prepare( ctx->banks, target_bank_idx, &advanceable_root_idx ) ) ) {
    2179           0 :     ctx->metrics.storage_root_behind++;
    2180           0 :     return 0;
    2181           0 :   }
    2182             : 
    2183           0 :   fd_bank_t bank[1];
    2184           0 :   FD_TEST( fd_banks_bank_query( bank, ctx->banks, advanceable_root_idx ) );
    2185             : 
    2186           0 :   if( FD_UNLIKELY( advanceable_root_idx >= ctx->block_id_len ) ) {
    2187           0 :     FD_LOG_CRIT(( "invariant violation: advanceable root ele out of bounds [0, %lu) index %lu", ctx->block_id_len, advanceable_root_idx ));
    2188           0 :   }
    2189           0 :   fd_block_id_ele_t * advanceable_root_ele = &ctx->block_id_arr[ advanceable_root_idx ];
    2190             : 
    2191           0 :   ulong advanceable_root_slot = fd_bank_slot_get( bank );
    2192           0 :   accdb_advance_root( ctx, advanceable_root_slot, bank->data->idx );
    2193             : 
    2194           0 :   fd_txncache_advance_root( ctx->txncache, bank->data->txncache_fork_id );
    2195           0 :   fd_sched_advance_root( ctx->sched, advanceable_root_idx );
    2196           0 :   fd_banks_advance_root( ctx->banks, advanceable_root_idx );
    2197             : 
    2198             :   /* Set metrics pointers. */
    2199             : 
    2200             : 
    2201             :   /* Reasm also prunes from the store during its publish. */
    2202             : 
    2203           0 :   fd_reasm_publish( ctx->reasm, &advanceable_root_ele->latest_mr, ctx->store );
    2204             : 
    2205           0 :   ctx->published_root_slot     = advanceable_root_slot;
    2206           0 :   ctx->published_root_bank_idx = advanceable_root_idx;
    2207             : 
    2208           0 :   return 1;
    2209           0 : }
    2210             : 
    2211             : static void
    2212             : after_credit( fd_replay_tile_t *  ctx,
    2213             :               fd_stem_context_t * stem,
    2214             :               int *               opt_poll_in,
    2215           0 :               int *               charge_busy ) {
    2216           0 :   if( FD_UNLIKELY( !ctx->is_booted || !ctx->wfs_complete ) ) return;
    2217             : 
    2218           0 :   if( FD_UNLIKELY( maybe_become_leader( ctx, stem ) ) ) {
    2219           0 :     *charge_busy = 1;
    2220           0 :     *opt_poll_in = 0;
    2221           0 :     return;
    2222           0 :   }
    2223             : 
    2224             :   /* If we are leader, we can only unbecome the leader iff we have
    2225             :      received the poh hash from the poh tile and block id from reasm.
    2226             :      We have to do an additional check against the slot of the leader
    2227             :      bank because we lazily remove entries from the block id arr. */
    2228           0 :   if( FD_UNLIKELY( ctx->is_leader &&
    2229           0 :                    ctx->recv_poh &&
    2230           0 :                    ctx->block_id_arr[ ctx->leader_bank->data->idx ].block_id_seen &&
    2231           0 :                    ctx->block_id_arr[ ctx->leader_bank->data->idx ].slot==fd_bank_slot_get( ctx->leader_bank ) ) ) {
    2232             : 
    2233           0 :     fini_leader_bank( ctx, stem );
    2234           0 :     *charge_busy = 1;
    2235           0 :     *opt_poll_in = 0;
    2236           0 :     return;
    2237           0 :   }
    2238             : 
    2239           0 :   ulong bank_idx;
    2240           0 :   while( (bank_idx=fd_sched_pruned_block_next( ctx->sched ))!=ULONG_MAX ) {
    2241           0 :     fd_bank_t bank[1];
    2242           0 :     FD_TEST( fd_banks_bank_query( bank, ctx->banks, bank_idx ) );
    2243           0 :     bank->data->refcnt--;
    2244           0 :     FD_LOG_DEBUG(( "bank (idx=%lu) refcnt decremented to %lu for sched", bank->data->idx, bank->data->refcnt ));
    2245           0 :   }
    2246             : 
    2247             :   /* If the published_root is not caught up to the consensus root, then
    2248             :      we should try to advance the published root. */
    2249           0 :   if( FD_UNLIKELY( ctx->consensus_root_bank_idx!=ctx->published_root_bank_idx && advance_published_root( ctx ) ) ) {
    2250           0 :     *charge_busy = 1;
    2251           0 :     *opt_poll_in = 0;
    2252           0 :     return;
    2253           0 :   }
    2254             : 
    2255           0 :   if( FD_UNLIKELY( fd_banks_prune_dead_banks( ctx->banks ) ) ) {
    2256             :     // FIXME: anything pruned from banks should also be pruned from txncache and accdb
    2257           0 :     *charge_busy = 1;
    2258           0 :     *opt_poll_in = 0;
    2259           0 :     return;
    2260           0 :   }
    2261             : 
    2262             :   /* if reasm evicted is set, publish starting from reasm_evicted down
    2263             :      to the leaf node to repair so repair can re-request for it */
    2264             : 
    2265           0 :   if( FD_UNLIKELY( ctx->reasm_evicted ) ) {
    2266           0 :     fd_replay_fec_evicted_t evicted = (fd_replay_fec_evicted_t){ .mr = ctx->reasm_evicted->key, .slot = ctx->reasm_evicted->slot, .fec_set_idx = ctx->reasm_evicted->fec_set_idx, .bank_idx = ctx->reasm_evicted->bank_idx };
    2267           0 :     fd_memcpy( fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk ), &evicted, sizeof(fd_replay_fec_evicted_t) );
    2268           0 :     fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_REASM_EVICTED, ctx->replay_out->chunk,  sizeof(fd_replay_fec_evicted_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
    2269           0 :     ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_replay_fec_evicted_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
    2270             : 
    2271             :     /* eviction policy only evicts chains of nodes until there is a
    2272             :        fork, so guaranteed that the evict path is always the left-child */
    2273           0 :     fd_reasm_pool_release( ctx->reasm, ctx->reasm_evicted );
    2274           0 :     ctx->reasm_evicted = fd_reasm_child( ctx->reasm, ctx->reasm_evicted ); /* indexes into pool, safe to use */
    2275             : 
    2276           0 :     *charge_busy = 1;
    2277           0 :     *opt_poll_in = 0;
    2278           0 :     return;
    2279           0 :   }
    2280             : 
    2281             :   /* If the reassembler has a fec that is ready, we should process it
    2282             :      and pass it to the scheduler. */
    2283           0 :   int evict_banks = 0;
    2284           0 :   if( FD_LIKELY( can_process_fec( ctx, &evict_banks ) ) ) {
    2285           0 :     fd_reasm_fec_t * fec = fd_reasm_pop( ctx->reasm );
    2286           0 :     process_fec_set( ctx, stem, fec );
    2287           0 :     *charge_busy = 1;
    2288           0 :     *opt_poll_in = 0;
    2289           0 :     return;
    2290           0 :   }
    2291             : 
    2292           0 :   if( FD_UNLIKELY( evict_banks ) ) {
    2293           0 :     FD_LOG_WARNING(( "banks are full and partially executed frontier banks are being evicted" ));
    2294           0 :     ulong frontier_cnt = 0UL;
    2295           0 :     ulong frontier_indices[ FD_BANKS_MAX_BANKS ];
    2296           0 :     fd_banks_get_frontier( ctx->banks, frontier_indices, &frontier_cnt );
    2297             : 
    2298             :     /* Mark all frontier banks as dead.  As refcnts on said banks are
    2299             :        drained, they will be pruned away. */
    2300           0 :     for( ulong i=0UL; i<frontier_cnt; i++ ) {
    2301           0 :       fd_bank_t bank[1];
    2302           0 :       FD_TEST( fd_banks_bank_query( bank, ctx->banks, frontier_indices[i] ) );
    2303           0 :       if( FD_UNLIKELY( ctx->is_leader && frontier_indices[i]==ctx->leader_bank->data->idx ) ) continue;
    2304           0 :       mark_bank_dead( ctx, stem, bank->data->idx );
    2305           0 :       fd_sched_block_abandon( ctx->sched, bank->data->idx );
    2306             : 
    2307             :       /* evict it from reasm - we can guarantee this is a leaf because
    2308             :          no new bank is allocated until a slot boundary.  If a fec has
    2309             :          children that are of a different slot, then it would never be
    2310             :          evicted from banks because that means the bank finished
    2311             :          executing on that slot. */
    2312           0 :       fd_block_id_ele_t * block_id_ele = &ctx->block_id_arr[ bank->data->idx ];
    2313           0 :       fd_reasm_fec_t * fec = fd_reasm_query( ctx->reasm, &block_id_ele->latest_mr );
    2314           0 :       FD_TEST( fec && fec->child == ULONG_MAX );
    2315           0 :       ctx->reasm_evicted = fd_reasm_remove( ctx->reasm, fec, ctx->store );
    2316           0 :     }
    2317           0 :   }
    2318             : 
    2319           0 :   *charge_busy = replay( ctx, stem );
    2320           0 :   *opt_poll_in = !*charge_busy;
    2321           0 : }
    2322             : 
    2323             : static int
    2324             : before_frag( fd_replay_tile_t * ctx,
    2325             :              ulong              in_idx,
    2326             :              ulong              seq FD_PARAM_UNUSED,
    2327           0 :              ulong              sig ) {
    2328             : 
    2329           0 :   if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_GOSSIP_OUT && sig!=FD_GOSSIP_UPDATE_TAG_WFS_DONE ) ) return 1;
    2330           0 :   return 0;
    2331           0 : }
    2332             : 
    2333             : static void
    2334             : process_exec_task_done( fd_replay_tile_t *          ctx,
    2335             :                         fd_stem_context_t *         stem,
    2336             :                         fd_execrp_task_done_msg_t * msg,
    2337           0 :                         ulong                       sig ) {
    2338             : 
    2339           0 :   ulong exec_tile_idx = sig&0xFFFFFFFFUL;
    2340             : 
    2341           0 :   fd_bank_t bank[1];
    2342           0 :   FD_TEST( fd_banks_bank_query( bank, ctx->banks, msg->bank_idx ) );
    2343           0 :   FD_TEST( bank->data );
    2344           0 :   bank->data->refcnt--;
    2345             : 
    2346           0 :   switch( sig>>32 ) {
    2347           0 :     case FD_EXECRP_TT_TXN_EXEC: {
    2348           0 :       if( FD_UNLIKELY( !ctx->identity_vote_rooted ) ) {
    2349             :         /* Query the txn signature against our recently generated vote
    2350             :            txn signatures.  If the query is successful, then we have
    2351             :            seen our own vote transaction land and this should be marked
    2352             :            in the bank.  We go through this exercise until we've seen
    2353             :            our vote rooted. */
    2354           0 :         fd_txn_p_t * txn_p = fd_sched_get_txn( ctx->sched, msg->txn_exec->txn_idx );
    2355             : 
    2356           0 :         fd_pubkey_t * identity_pubkey_out = NULL;
    2357           0 :         if( fd_vote_tracker_query_sig( ctx->vote_tracker, fd_type_pun_const( txn_p->payload+TXN( txn_p )->signature_off ), &identity_pubkey_out ) && fd_pubkey_eq( identity_pubkey_out, ctx->identity_pubkey ) ) {
    2358           0 :           fd_bank_identity_vote_idx_set( bank, ctx->identity_idx );
    2359           0 :         }
    2360           0 :       }
    2361           0 :       if( FD_UNLIKELY( !msg->txn_exec->is_committable && !(bank->data->flags&FD_BANK_FLAGS_DEAD) ) ) {
    2362             :         /* Every transaction in a valid block has to execute.
    2363             :            Otherwise, we should mark the block as dead. */
    2364           0 :         mark_bank_dead( ctx, stem, bank->data->idx );
    2365           0 :         fd_sched_block_abandon( ctx->sched, bank->data->idx );
    2366           0 :       }
    2367           0 :       if( FD_UNLIKELY( (bank->data->flags&FD_BANK_FLAGS_DEAD) && bank->data->refcnt==0UL ) ) {
    2368           0 :         fd_banks_mark_bank_frozen( ctx->banks, bank );
    2369           0 :       }
    2370           0 :       int res = fd_sched_task_done( ctx->sched, FD_SCHED_TT_TXN_EXEC, msg->txn_exec->txn_idx, exec_tile_idx, NULL );
    2371           0 :       FD_TEST( res==0 );
    2372           0 :       fd_sched_txn_info_t * txn_info = fd_sched_get_txn_info( ctx->sched, msg->txn_exec->txn_idx );
    2373           0 :       txn_info->flags |= FD_SCHED_TXN_EXEC_DONE;
    2374           0 :       if( FD_LIKELY( !(txn_info->flags&FD_SCHED_TXN_SIGVERIFY_DONE)||!txn_info->txn_err ) ) { /* Set execution status if sigverify hasn't happened yet or if sigverify was a success. */
    2375           0 :         txn_info->txn_err = msg->txn_exec->txn_err;
    2376           0 :         txn_info->flags  |= fd_ulong_if( msg->txn_exec->is_committable, FD_SCHED_TXN_IS_COMMITTABLE, 0UL );
    2377           0 :         txn_info->flags  |= fd_ulong_if( msg->txn_exec->is_fees_only,   FD_SCHED_TXN_IS_FEES_ONLY,   0UL );
    2378           0 :       }
    2379           0 :       if( FD_UNLIKELY( (txn_info->flags&FD_SCHED_TXN_REPLAY_DONE)==FD_SCHED_TXN_REPLAY_DONE ) ) { /* UNLIKELY because generally exec happens before sigverify. */
    2380           0 :         publish_txn_executed( ctx, stem, msg->txn_exec->txn_idx );
    2381           0 :       }
    2382           0 :       break;
    2383           0 :     }
    2384           0 :     case FD_EXECRP_TT_TXN_SIGVERIFY: {
    2385           0 :       fd_sched_txn_info_t * txn_info = fd_sched_get_txn_info( ctx->sched, msg->txn_sigverify->txn_idx );
    2386           0 :       txn_info->flags |= FD_SCHED_TXN_SIGVERIFY_DONE;
    2387           0 :       if( FD_UNLIKELY( msg->txn_sigverify->err ) ) {
    2388           0 :         txn_info->txn_err = FD_RUNTIME_TXN_ERR_SIGNATURE_FAILURE;
    2389           0 :         txn_info->flags  &= ~FD_SCHED_TXN_IS_COMMITTABLE;
    2390           0 :         txn_info->flags  &= ~FD_SCHED_TXN_IS_FEES_ONLY;
    2391           0 :       }
    2392           0 :       if( FD_UNLIKELY( msg->txn_sigverify->err && !(bank->data->flags&FD_BANK_FLAGS_DEAD) ) ) {
    2393             :         /* Every transaction in a valid block has to sigverify.
    2394             :            Otherwise, we should mark the block as dead.  Also freeze the
    2395             :            bank if possible. */
    2396           0 :         mark_bank_dead( ctx, stem, bank->data->idx );
    2397           0 :         fd_sched_block_abandon( ctx->sched, bank->data->idx );
    2398           0 :       }
    2399           0 :       if( FD_UNLIKELY( (bank->data->flags&FD_BANK_FLAGS_DEAD) && bank->data->refcnt==0UL ) ) {
    2400           0 :         fd_banks_mark_bank_frozen( ctx->banks, bank );
    2401           0 :       }
    2402           0 :       int res = fd_sched_task_done( ctx->sched, FD_SCHED_TT_TXN_SIGVERIFY, msg->txn_sigverify->txn_idx, exec_tile_idx, NULL );
    2403           0 :       FD_TEST( res==0 );
    2404           0 :       if( FD_LIKELY( (txn_info->flags&FD_SCHED_TXN_REPLAY_DONE)==FD_SCHED_TXN_REPLAY_DONE ) ) {
    2405           0 :         publish_txn_executed( ctx, stem, msg->txn_exec->txn_idx );
    2406           0 :       }
    2407           0 :       break;
    2408           0 :     }
    2409           0 :     case FD_EXECRP_TT_POH_HASH: {
    2410           0 :       int res = fd_sched_task_done( ctx->sched, FD_SCHED_TT_POH_HASH, ULONG_MAX, exec_tile_idx, msg->poh_hash );
    2411           0 :       if( FD_UNLIKELY( res<0 && !(bank->data->flags&FD_BANK_FLAGS_DEAD) ) ) {
    2412           0 :         mark_bank_dead( ctx, stem, bank->data->idx );
    2413           0 :       }
    2414           0 :       if( FD_UNLIKELY( (bank->data->flags&FD_BANK_FLAGS_DEAD) && bank->data->refcnt==0UL ) ) {
    2415           0 :         fd_banks_mark_bank_frozen( ctx->banks, bank );
    2416           0 :       }
    2417           0 :       break;
    2418           0 :     }
    2419           0 :     default: FD_LOG_CRIT(( "unexpected sig 0x%lx", sig ));
    2420           0 :   }
    2421             : 
    2422             :   /* Reference counter just decreased, and an exec tile just got freed
    2423             :      up.  If there's a need to be more aggressively pruning, we could
    2424             :      check here if more slots just became publishable and publish.  Not
    2425             :      publishing here shouldn't bloat the fork tree too much though.  We
    2426             :      mark minority forks dead as soon as we can, and execution dispatch
    2427             :      stops on dead blocks.  So shortly afterwards, dead blocks should be
    2428             :      eligible for pruning as in-flight transactions retire from the
    2429             :      execution pipeline. */
    2430             : 
    2431           0 : }
    2432             : 
    2433             : static void
    2434             : process_tower_slot_done( fd_replay_tile_t *           ctx,
    2435             :                          fd_stem_context_t *          stem,
    2436             :                          fd_tower_slot_done_t const * msg,
    2437           0 :                          ulong                        seq ) {
    2438           0 :   fd_bank_t replay_bank[1];
    2439           0 :   if( FD_UNLIKELY( !fd_banks_bank_query( replay_bank, ctx->banks, msg->replay_bank_idx ) ) ) FD_LOG_CRIT(( "invariant violation: bank not found for bank index %lu", msg->replay_bank_idx ));
    2440           0 :   replay_bank->data->refcnt--;
    2441           0 :   FD_LOG_DEBUG(( "bank (idx=%lu, slot=%lu) refcnt decremented to %lu for tower", replay_bank->data->idx, msg->replay_slot, replay_bank->data->refcnt ));
    2442             : 
    2443           0 :   ctx->reset_block_id = msg->reset_block_id;
    2444           0 :   ctx->reset_slot     = msg->reset_slot;
    2445           0 :   ctx->reset_timestamp_nanos = fd_log_wallclock();
    2446           0 :   ulong min_leader_slot = fd_ulong_max( msg->reset_slot+1UL, fd_ulong_if( ctx->highwater_leader_slot==ULONG_MAX, 0UL, ctx->highwater_leader_slot+1UL ) );
    2447           0 :   ctx->next_leader_slot = fd_multi_epoch_leaders_get_next_slot( ctx->mleaders, min_leader_slot, ctx->identity_pubkey );
    2448           0 :   if( FD_LIKELY( ctx->next_leader_slot != ULONG_MAX ) ) {
    2449           0 :     ctx->next_leader_tickcount = (long)((double)(ctx->next_leader_slot-ctx->reset_slot-1UL)*ctx->slot_duration_ticks) + fd_tickcount();
    2450           0 :   } else {
    2451           0 :     ctx->next_leader_tickcount = LONG_MAX;
    2452           0 :   }
    2453             : 
    2454           0 :   fd_block_id_ele_t * block_id_ele = fd_block_id_map_ele_query( ctx->block_id_map, &msg->reset_block_id, NULL, ctx->block_id_arr );
    2455           0 :   if( FD_UNLIKELY( !block_id_ele ) ) {
    2456           0 :     FD_BASE58_ENCODE_32_BYTES( msg->reset_block_id.key, reset_block_id_b58 );
    2457           0 :     FD_LOG_CRIT(( "invariant violation: block id ele doesn't exist for reset block id: %s, slot: %lu", reset_block_id_b58, msg->reset_slot ));
    2458           0 :   }
    2459           0 :   ulong reset_bank_idx = fd_block_id_ele_get_idx( ctx->block_id_arr, block_id_ele );
    2460             : 
    2461           0 :   fd_bank_t bank[1];
    2462           0 :   if( FD_UNLIKELY( !fd_banks_bank_query( bank, ctx->banks, reset_bank_idx ) ) ) {
    2463           0 :     FD_LOG_CRIT(( "invariant violation: bank not found for bank index %lu", reset_bank_idx ));
    2464           0 :   }
    2465             : 
    2466           0 :   if( FD_LIKELY( msg->root_slot!=ULONG_MAX ) ) FD_TEST( msg->root_slot<=msg->reset_slot );
    2467           0 :   fd_memcpy( ctx->reset_bank, bank, sizeof(fd_bank_t) );
    2468             : 
    2469           0 :   if( FD_LIKELY( ctx->replay_out->idx!=ULONG_MAX ) ) {
    2470           0 :     fd_poh_reset_t * reset = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
    2471             : 
    2472           0 :     reset->bank_idx = bank->data->idx;
    2473           0 :     reset->timestamp = ctx->reset_timestamp_nanos;
    2474           0 :     reset->completed_slot = ctx->reset_slot;
    2475           0 :     reset->hashcnt_per_tick = fd_bank_hashes_per_tick_get( bank );
    2476           0 :     reset->ticks_per_slot = fd_bank_ticks_per_slot_get( bank );
    2477           0 :     reset->tick_duration_ns = (ulong)(ctx->slot_duration_nanos/(double)reset->ticks_per_slot);
    2478             : 
    2479           0 :     fd_memcpy( reset->completed_block_id, &block_id_ele->latest_mr, sizeof(fd_hash_t) );
    2480             : 
    2481           0 :     fd_blockhashes_t const * block_hash_queue = fd_bank_block_hash_queue_query( bank );
    2482           0 :     fd_hash_t const * last_hash = fd_blockhashes_peek_last_hash( block_hash_queue );
    2483           0 :     FD_TEST( last_hash );
    2484           0 :     fd_memcpy( reset->completed_blockhash, last_hash->uc, sizeof(fd_hash_t) );
    2485             : 
    2486           0 :     ulong ticks_per_slot = fd_bank_ticks_per_slot_get( bank );
    2487           0 :     if( FD_UNLIKELY( reset->hashcnt_per_tick==1UL ) ) {
    2488             :       /* Low power producer, maximum of one microblock per tick in the slot */
    2489           0 :       reset->max_microblocks_in_slot = ticks_per_slot;
    2490           0 :     } else {
    2491             :       /* See the long comment in after_credit for this limit */
    2492           0 :       reset->max_microblocks_in_slot = fd_ulong_min( MAX_MICROBLOCKS_PER_SLOT, ticks_per_slot*(reset->hashcnt_per_tick-1UL) );
    2493           0 :     }
    2494           0 :     reset->next_leader_slot = ctx->next_leader_slot;
    2495             : 
    2496           0 :     fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_RESET, ctx->replay_out->chunk, sizeof(fd_poh_reset_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
    2497           0 :     ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_poh_reset_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
    2498           0 :   }
    2499             : 
    2500           0 :   FD_LOG_INFO(( "tower_slot_done(reset_slot=%lu, next_leader_slot=%lu, vote_slot=%lu, replay_slot=%lu, root_slot=%lu, seqno=%lu)", msg->reset_slot, ctx->next_leader_slot, msg->vote_slot, msg->replay_slot, msg->root_slot, seq ));
    2501           0 :   maybe_become_leader( ctx, stem );
    2502             : 
    2503           0 :   if( FD_LIKELY( msg->root_slot!=ULONG_MAX ) ) {
    2504             : 
    2505           0 :     FD_TEST( msg->root_slot>=ctx->consensus_root_slot );
    2506           0 :     fd_block_id_ele_t * block_id_ele = fd_block_id_map_ele_query( ctx->block_id_map, &msg->root_block_id, NULL, ctx->block_id_arr );
    2507           0 :     FD_TEST( block_id_ele );
    2508           0 :     ctx->consensus_root_slot     = msg->root_slot;
    2509           0 :     ctx->consensus_root          = msg->root_block_id;
    2510           0 :     ctx->consensus_root_bank_idx = fd_block_id_ele_get_idx( ctx->block_id_arr, block_id_ele );
    2511             : 
    2512           0 :     publish_root_advanced( ctx, stem );
    2513             : 
    2514           0 :     fd_sched_root_notify( ctx->sched, ctx->consensus_root_bank_idx );
    2515           0 :   }
    2516             : 
    2517           0 :   ulong distance = 0UL;
    2518           0 :   fd_bank_t * parent = bank;
    2519           0 :   while( parent ) {
    2520           0 :     if( FD_UNLIKELY( parent->data->idx==ctx->consensus_root_bank_idx ) ) break;
    2521           0 :     parent = fd_banks_get_parent( bank, ctx->banks, parent );
    2522           0 :     distance++;
    2523           0 :   }
    2524             : 
    2525           0 :   FD_MGAUGE_SET( REPLAY, ROOT_DISTANCE, distance );
    2526           0 : }
    2527             : 
    2528             : static void
    2529             : process_fec_complete( fd_replay_tile_t *  ctx,
    2530             :                       fd_stem_context_t * stem,
    2531           0 :                       uchar const *       shred_buf ) {
    2532           0 :   fd_shred_t const * shred = (fd_shred_t const *)fd_type_pun_const( shred_buf );
    2533             : 
    2534           0 :   fd_hash_t const * merkle_root         = (fd_hash_t const *)fd_type_pun_const( shred_buf + FD_SHRED_DATA_HEADER_SZ );
    2535           0 :   fd_hash_t const * chained_merkle_root = (fd_hash_t const *)fd_type_pun_const( shred_buf + FD_SHRED_DATA_HEADER_SZ + sizeof(fd_hash_t) );
    2536           0 :   int               is_leader_fec       = *(int const *)     fd_type_pun_const( shred_buf + FD_SHRED_DATA_HEADER_SZ + sizeof(fd_hash_t) + sizeof(fd_hash_t) );
    2537             : 
    2538           0 :   int data_complete = !!( shred->data.flags & FD_SHRED_DATA_FLAG_DATA_COMPLETE );
    2539           0 :   int slot_complete = !!( shred->data.flags & FD_SHRED_DATA_FLAG_SLOT_COMPLETE );
    2540             : 
    2541           0 :   if( FD_UNLIKELY( shred->slot - shred->data.parent_off == fd_reasm_slot0( ctx->reasm ) && shred->fec_set_idx == 0) ) {
    2542           0 :     chained_merkle_root = &fd_reasm_root( ctx->reasm )->key;
    2543           0 :   }
    2544             : 
    2545           0 :   if( FD_UNLIKELY( fd_reasm_query( ctx->reasm, merkle_root ) ) ) return;
    2546           0 :   fd_reasm_fec_t * fec = fd_reasm_insert( ctx->reasm, merkle_root, chained_merkle_root, shred->slot, shred->fec_set_idx, shred->data.parent_off, (ushort)(shred->idx - shred->fec_set_idx + 1), data_complete, slot_complete, is_leader_fec, ctx->store, &ctx->reasm_evicted );
    2547             : 
    2548           0 :   if( FD_UNLIKELY( !fec ) ) {
    2549             :     /* reasm failed to insert.  We don't want to just put this back on
    2550             :        the returnable_frag queue because it's unclear whether this FEC
    2551             :        is truly something we want to process.  Therefore our best option
    2552             :        is to punt it and "go around."  reasm_insert populates it's last
    2553             :        pool element with the data of the failed insert, so we make sure
    2554             :        to publish the failed insert data to repair in after_credit. */
    2555           0 :     return;
    2556           0 :   }
    2557             : 
    2558           0 :   if( FD_UNLIKELY( ctx->reasm_evicted && ctx->reasm_evicted->bank_idx != ULONG_MAX ) ) {
    2559           0 :     mark_bank_dead( ctx, stem, ctx->reasm_evicted->bank_idx );
    2560           0 :     fd_sched_block_abandon( ctx->sched, ctx->reasm_evicted->bank_idx );
    2561           0 :   }
    2562           0 : }
    2563             : 
    2564             : static void
    2565           0 : process_resolv_slot_completed( fd_replay_tile_t * ctx, ulong bank_idx ) {
    2566           0 :   fd_bank_t bank[1];
    2567           0 :   FD_TEST( fd_banks_bank_query( bank, ctx->banks, bank_idx ) );
    2568           0 :   bank->data->refcnt--;
    2569           0 :   FD_LOG_DEBUG(( "bank (idx=%lu, slot=%lu) refcnt decremented to %lu for resolv", bank->data->idx, fd_bank_slot_get( bank ), bank->data->refcnt ));
    2570           0 : }
    2571             : 
    2572             : static void
    2573             : process_vote_txn_sent( fd_replay_tile_t *  ctx,
    2574           0 :                        fd_txn_m_t *        txnm ) {
    2575             :   /* The send tile has signed and sent a vote.  Add this vote to the
    2576             :      vote tracker.  We go through this exercise until the client has
    2577             :      seen a vote corresponding to the current identity rooted. */
    2578           0 :   if( FD_UNLIKELY( !ctx->identity_vote_rooted ) ) {
    2579           0 :     uchar *    payload = (uchar *)txnm + sizeof(fd_txn_m_t);
    2580           0 :     uchar      txn_mem[ FD_TXN_MAX_SZ ] __attribute__((aligned(alignof(fd_txn_t))));
    2581           0 :     fd_txn_t * txn = (fd_txn_t *)txn_mem;
    2582           0 :     if( FD_UNLIKELY( !fd_txn_parse( payload, txnm->payload_sz, txn_mem, NULL ) ) ) {
    2583           0 :       FD_LOG_CRIT(( "Could not parse txn from send tile" ));
    2584           0 :     }
    2585             :     /* The identity of the validator that the signed the vote will
    2586             :        always be the first signer in the vote transaction. */
    2587           0 :     fd_pubkey_t * vote_identity = fd_type_pun( payload+txn->acct_addr_off );
    2588           0 :     fd_vote_tracker_insert( ctx->vote_tracker, vote_identity, fd_type_pun_const( payload+txn->signature_off ) );
    2589           0 :   }
    2590           0 : }
    2591             : 
    2592             : static inline void
    2593           0 : maybe_verify_shred_version( fd_replay_tile_t * ctx ) {
    2594           0 :   if( FD_LIKELY( ctx->expected_shred_version && ctx->ipecho_shred_version ) ) {
    2595           0 :     if( FD_UNLIKELY( ctx->expected_shred_version!=ctx->ipecho_shred_version ) ) {
    2596           0 :       FD_LOG_ERR(( "shred version mismatch: expected %u but got %u from ipecho", ctx->expected_shred_version, ctx->ipecho_shred_version ) );
    2597           0 :     }
    2598           0 :   }
    2599             : 
    2600           0 :   if( FD_LIKELY( ctx->has_genesis_hash && ctx->hard_forks_cnt!=ULONG_MAX && (ctx->expected_shred_version || ctx->ipecho_shred_version) ) ) {
    2601           0 :     ushort expected_shred_version = ctx->expected_shred_version ? ctx->expected_shred_version : ctx->ipecho_shred_version;
    2602             : 
    2603           0 :     ushort actual_shred_version = compute_shred_version( ctx->genesis_hash->uc, ctx->hard_forks, ctx->hard_forks_cnts, ctx->hard_forks_cnt );
    2604             : 
    2605           0 :     if( FD_UNLIKELY( expected_shred_version!=actual_shred_version ) ) {
    2606           0 :       FD_BASE58_ENCODE_32_BYTES( ctx->genesis_hash->uc, genesis_hash_b58 );
    2607           0 :       FD_LOG_ERR(( "Your genesis.bin file at `%s` combined with the hard_forks from the loaded snapshot have produced "
    2608           0 :                    "a shred version of %hu but the entrypoint you connected to on boot reported a shred version of %hu. "
    2609           0 :                    "This likely means that the genesis.bin file you have is for a different cluster than the one you "
    2610           0 :                    "are trying to connect to, you can delete it and restart the node to download the correct genesis "
    2611           0 :                    "file automatically.", ctx->genesis_path, actual_shred_version, expected_shred_version ));
    2612           0 :     }
    2613           0 :   }
    2614           0 : }
    2615             : 
    2616             : static inline void
    2617           0 : maybe_verify_genesis_timestamp( fd_replay_tile_t * ctx ) {
    2618           0 :   if( FD_LIKELY( !ctx->has_expected_genesis_timestamp || !ctx->has_genesis_timestamp ) ) return;
    2619           0 :   if( FD_LIKELY( ctx->genesis_timestamp==ctx->expected_genesis_timestamp ) ) return;
    2620             : 
    2621           0 :   FD_LOG_ERR(( "Your genesis.bin file at `%s` has a genesis timestamp of %lu but the snapshot you loaded has a genesis "
    2622           0 :                "timestamp of %lu. This either means that the genesis.bin file you have is for a different cluster than "
    2623           0 :                "the one you are trying to connect to, or you have loaded a snapshot for the wrong cluster. In either "
    2624           0 :                "case, you can delete the problematic file and restart the node to download the correct one automatically.",
    2625           0 :                ctx->genesis_path, ctx->genesis_timestamp, ctx->expected_genesis_timestamp ));
    2626           0 : }
    2627             : 
    2628             : static void
    2629             : process_tower_optimistic_confirmed( fd_replay_tile_t *                ctx,
    2630             :                                     fd_stem_context_t *               stem,
    2631           0 :                                     fd_tower_slot_confirmed_t const * msg ) {
    2632             : 
    2633           0 :   fd_block_id_ele_t * block_id_ele = fd_block_id_map_ele_query( ctx->block_id_map, &msg->block_id, NULL, ctx->block_id_arr );
    2634           0 :   if( FD_UNLIKELY( !block_id_ele ) ) {
    2635           0 :     FD_BASE58_ENCODE_32_BYTES( msg->block_id.key, block_id_b58 );
    2636           0 :     FD_LOG_WARNING(( "missing bank for confirmed block_id: %s level %d", block_id_b58, msg->level ));
    2637           0 :     return;
    2638           0 :   }
    2639             : 
    2640           0 :   ulong bank_idx = fd_block_id_ele_get_idx( ctx->block_id_arr, block_id_ele );
    2641           0 :   fd_bank_t bank_[1]; fd_bank_t * bank = fd_banks_bank_query( bank_, ctx->banks, bank_idx );
    2642             : 
    2643             : 
    2644           0 :   if( FD_UNLIKELY( !bank ) ) {
    2645           0 :     FD_BASE58_ENCODE_32_BYTES( msg->block_id.key, block_id_cstr );
    2646           0 :     FD_LOG_WARNING(( "failed to query optimistically confirmed bank for block id %s", block_id_cstr ));
    2647           0 :     return;
    2648           0 :   }
    2649             : 
    2650           0 :   if( ctx->rpc_enabled ) {
    2651           0 :     bank->data->refcnt++;
    2652           0 :     FD_LOG_DEBUG(( "bank (idx=%lu, slot=%lu) refcnt incremented to %lu for rpc", bank->data->idx, fd_bank_slot_get( bank ), bank->data->refcnt ));
    2653           0 :   }
    2654             : 
    2655           0 :   fd_replay_oc_advanced_t * replay_msg = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk );
    2656           0 :   replay_msg->bank_idx = bank_idx;
    2657           0 :   replay_msg->slot = msg->slot;
    2658             : 
    2659           0 :   fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_OC_ADVANCED, ctx->replay_out->chunk, sizeof(fd_replay_oc_advanced_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) );
    2660           0 :   ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_replay_oc_advanced_t), ctx->replay_out->chunk0, ctx->replay_out->wmark );
    2661           0 : }
    2662             : 
    2663             : static inline int
    2664             : returnable_frag( fd_replay_tile_t *  ctx,
    2665             :                  ulong               in_idx,
    2666             :                  ulong               seq,
    2667             :                  ulong               sig,
    2668             :                  ulong               chunk,
    2669             :                  ulong               sz,
    2670             :                  ulong               ctl,
    2671             :                  ulong               tsorig,
    2672             :                  ulong               tspub,
    2673           0 :                  fd_stem_context_t * stem ) {
    2674           0 :   (void)seq;
    2675           0 :   (void)ctl;
    2676           0 :   (void)tsorig;
    2677           0 :   (void)tspub;
    2678             : 
    2679           0 :   if( FD_UNLIKELY( sz!=0UL && (chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz>ctx->in[ in_idx ].mtu ) ) )
    2680           0 :     FD_LOG_ERR(( "chunk %lu %lu from in %d corrupt, not in range [%lu,%lu]", chunk, sz, ctx->in_kind[ in_idx ], ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
    2681             : 
    2682           0 :   switch( ctx->in_kind[in_idx] ) {
    2683           0 :     case IN_KIND_GENESIS: {
    2684           0 :       fd_genesis_meta_t const * meta = fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
    2685           0 :       ctx->has_genesis_hash = 1;
    2686           0 :       ctx->has_genesis_timestamp = 1;
    2687           0 :       ctx->genesis_timestamp = meta->creation_time_millis;
    2688           0 :       *ctx->genesis_hash = meta->genesis_hash;
    2689           0 :       if( FD_LIKELY( meta->bootstrap ) ) {
    2690           0 :         boot_genesis( ctx, stem, meta );
    2691           0 :       } else {
    2692           0 :         uchar const * genesis_blob = (uchar const *)( meta+1 );
    2693           0 :         FD_TEST( fd_genesis_parse( ctx->genesis, genesis_blob, meta->blob_sz ) );
    2694           0 :       }
    2695           0 :       ctx->has_genesis_timestamp = 1;
    2696           0 :       ctx->genesis_timestamp     = ctx->genesis->creation_time;
    2697             : 
    2698           0 :       maybe_verify_cluster_type( ctx );
    2699           0 :       maybe_verify_shred_version( ctx );
    2700           0 :       maybe_verify_genesis_timestamp( ctx );
    2701           0 :       break;
    2702           0 :     }
    2703           0 :     case IN_KIND_IPECHO: {
    2704           0 :       FD_TEST( sig && sig<=USHORT_MAX );
    2705           0 :       ctx->ipecho_shred_version = (ushort)sig;
    2706           0 :       maybe_verify_shred_version( ctx );
    2707           0 :       break;
    2708           0 :     }
    2709           0 :     case IN_KIND_SNAP: {
    2710           0 :       on_snapshot_message( ctx, stem, in_idx, chunk, sig );
    2711           0 :       maybe_verify_shred_version( ctx );
    2712           0 :       maybe_verify_genesis_timestamp( ctx );
    2713           0 :       break;
    2714           0 :     }
    2715           0 :     case IN_KIND_EXECRP: {
    2716           0 :       process_exec_task_done( ctx, stem, fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk ), sig );
    2717           0 :       break;
    2718           0 :     }
    2719           0 :     case IN_KIND_POH: {
    2720           0 :       process_poh_message( ctx, fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk ) );
    2721           0 :       break;
    2722           0 :     }
    2723           0 :     case IN_KIND_RESOLV: {
    2724           0 :       fd_resolv_slot_exchanged_t * exchanged_slot = fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
    2725           0 :       process_resolv_slot_completed( ctx, exchanged_slot->bank_idx );
    2726           0 :       break;
    2727           0 :     }
    2728           0 :     case IN_KIND_TOWER: {
    2729           0 :       if( FD_LIKELY( sig==FD_TOWER_SIG_SLOT_DONE ) ) {
    2730           0 :         process_tower_slot_done( ctx, stem, fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk ), seq );
    2731           0 :       } else if( FD_LIKELY( sig==FD_TOWER_SIG_SLOT_CONFIRMED ) ) {
    2732           0 :         fd_tower_slot_confirmed_t const * msg = fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
    2733           0 :         if( msg->level==FD_TOWER_SLOT_CONFIRMED_OPTIMISTIC && !msg->fwd ) process_tower_optimistic_confirmed( ctx, stem, msg );
    2734           0 :         if( msg->level==FD_TOWER_SLOT_CONFIRMED_DUPLICATE )               fd_reasm_confirm( ctx->reasm, &msg->block_id );
    2735           0 :       } else if( FD_LIKELY( sig==FD_TOWER_SIG_SLOT_IGNORED ) ) {
    2736           0 :         fd_tower_slot_ignored_t const * msg = fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
    2737           0 :         fd_tower_slot_done_t ignored = {
    2738           0 :           .replay_slot     = msg->slot,
    2739           0 :           .replay_bank_idx = msg->bank_idx,
    2740           0 :           .vote_slot       = ULONG_MAX,
    2741           0 :           .reset_slot      = ctx->reset_slot,     /* Use most recent reset slot */
    2742           0 :           .reset_block_id  = ctx->reset_block_id,
    2743           0 :           .root_slot       = ULONG_MAX
    2744           0 :         };
    2745           0 :         process_tower_slot_done( ctx, stem, &ignored, seq );
    2746           0 :       }
    2747           0 :       break;
    2748           0 :     }
    2749           0 :     case IN_KIND_REPAIR: {
    2750             :       /* TODO: This message/sz should be defined. */
    2751           0 :       if( sz!=0 && fd_disco_shred_out_msg_type( sig )==FD_SHRED_OUT_MSG_TYPE_FEC ) {
    2752             :         /* If receive a FEC complete message. */
    2753           0 :         process_fec_complete( ctx, stem, fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk ) );
    2754           0 :       }
    2755           0 :       break;
    2756           0 :     }
    2757           0 :     case IN_KIND_TXSEND: {
    2758           0 :       process_vote_txn_sent( ctx, fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk ) );
    2759           0 :       break;
    2760           0 :     }
    2761           0 :     case IN_KIND_GOSSIP_OUT: {
    2762           0 :       FD_TEST( sig==FD_GOSSIP_UPDATE_TAG_WFS_DONE );
    2763           0 :       ctx->wfs_complete = 1;
    2764           0 :       FD_LOG_NOTICE(( "Done waiting for supermajority. More than 80 percent of cluster stake has joined." ));
    2765           0 :       break;
    2766           0 :     }
    2767           0 :     case IN_KIND_RPC: {
    2768           0 :       fd_bank_t bank[1];
    2769           0 :       FD_TEST( fd_banks_bank_query( bank, ctx->banks, sig ) );
    2770           0 :       bank->data->refcnt--;
    2771           0 :       FD_LOG_DEBUG(( "bank (idx=%lu, slot=%lu) refcnt decremented to %lu for %s", bank->data->idx, fd_bank_slot_get( bank ), bank->data->refcnt, ctx->in_kind[ in_idx ]==IN_KIND_RPC ? "rpc" : "gui" ));
    2772           0 :       break;
    2773           0 :     }
    2774           0 :     default:
    2775           0 :       FD_LOG_ERR(( "unhandled kind %d", ctx->in_kind[ in_idx ] ));
    2776           0 :   }
    2777             : 
    2778           0 :   return 0;
    2779           0 : }
    2780             : 
    2781             : static inline fd_replay_out_link_t
    2782             : out1( fd_topo_t const *      topo,
    2783             :       fd_topo_tile_t const * tile,
    2784           0 :       char const *           name ) {
    2785           0 :   ulong idx = ULONG_MAX;
    2786             : 
    2787           0 :   for( ulong i=0UL; i<tile->out_cnt; i++ ) {
    2788           0 :     fd_topo_link_t const * link = &topo->links[ tile->out_link_id[ i ] ];
    2789           0 :     if( !strcmp( link->name, name ) ) {
    2790           0 :       if( FD_UNLIKELY( idx!=ULONG_MAX ) ) FD_LOG_ERR(( "tile %s:%lu had multiple output links named %s but expected one", tile->name, tile->kind_id, name ));
    2791           0 :       idx = i;
    2792           0 :     }
    2793           0 :   }
    2794             : 
    2795           0 :   if( FD_UNLIKELY( idx==ULONG_MAX ) ) return (fd_replay_out_link_t){ .idx = ULONG_MAX, .mem = NULL, .chunk0 = 0, .wmark = 0, .chunk = 0 };
    2796             : 
    2797           0 :   void * mem = topo->workspaces[ topo->objs[ topo->links[ tile->out_link_id[ idx ] ].dcache_obj_id ].wksp_id ].wksp;
    2798           0 :   ulong chunk0 = fd_dcache_compact_chunk0( mem, topo->links[ tile->out_link_id[ idx ] ].dcache );
    2799           0 :   ulong wmark  = fd_dcache_compact_wmark ( mem, topo->links[ tile->out_link_id[ idx ] ].dcache, topo->links[ tile->out_link_id[ idx ] ].mtu );
    2800             : 
    2801           0 :   return (fd_replay_out_link_t){ .idx = idx, .mem = mem, .chunk0 = chunk0, .wmark = wmark, .chunk = chunk0 };
    2802           0 : }
    2803             : 
    2804             : static void
    2805             : privileged_init( fd_topo_t *      topo,
    2806           0 :                  fd_topo_tile_t * tile ) {
    2807           0 :   void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
    2808             : 
    2809           0 :   FD_SCRATCH_ALLOC_INIT( l, scratch );
    2810           0 :   fd_replay_tile_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_replay_tile_t), sizeof(fd_replay_tile_t) );
    2811             : 
    2812           0 :   if( FD_UNLIKELY( !strcmp( tile->replay.identity_key_path, "" ) ) ) FD_LOG_ERR(( "identity_key_path not set" ));
    2813             : 
    2814           0 :   ctx->identity_pubkey[ 0 ] = *(fd_pubkey_t const *)fd_type_pun_const( fd_keyload_load( tile->replay.identity_key_path, /* pubkey only: */ 1 ) );
    2815           0 :   ctx->identity_idx         = 0UL;
    2816             : 
    2817           0 :   if( FD_UNLIKELY( !tile->replay.bundle.vote_account_path[0] ) ) {
    2818           0 :     tile->replay.bundle.enabled = 0;
    2819           0 :   }
    2820             : 
    2821           0 :   if( FD_UNLIKELY( tile->replay.bundle.enabled ) ) {
    2822           0 :     if( FD_UNLIKELY( !fd_base58_decode_32( tile->replay.bundle.vote_account_path, ctx->bundle.vote_account.uc ) ) ) {
    2823           0 :       const uchar * vote_key = fd_keyload_load( tile->replay.bundle.vote_account_path, /* pubkey only: */ 1 );
    2824           0 :       fd_memcpy( ctx->bundle.vote_account.uc, vote_key, 32UL );
    2825           0 :     }
    2826           0 :   }
    2827             : 
    2828           0 :   if( FD_UNLIKELY( !fd_rng_secure( &ctx->reasm_seed, sizeof(ulong) ) ) ) {
    2829           0 :     FD_LOG_CRIT(( "fd_rng_secure failed" ));
    2830           0 :   }
    2831             : 
    2832           0 :   if( FD_UNLIKELY( !fd_rng_secure( &ctx->vote_tracker_seed, sizeof(ulong) ) ) ) {
    2833           0 :     FD_LOG_CRIT(( "fd_rng_secure failed" ));
    2834           0 :   }
    2835             : 
    2836           0 :   if( FD_UNLIKELY( !fd_rng_secure( &ctx->block_id_map_seed, sizeof(ulong) ) ) ) {
    2837           0 :     FD_LOG_CRIT(( "fd_rng_secure failed" ));
    2838           0 :   }
    2839             : 
    2840           0 :   if( FD_UNLIKELY( !fd_rng_secure( &ctx->initial_block_id, sizeof(fd_hash_t) ) ) ) {
    2841           0 :     FD_LOG_CRIT(( "fd_rng_secure failed" ));
    2842           0 :   }
    2843             : 
    2844           0 :   if( FD_UNLIKELY( !fd_rng_secure( &ctx->runtime_stack_seed, sizeof(ulong) ) ) ) {
    2845           0 :     FD_LOG_CRIT(( "fd_rng_secure failed" ));
    2846           0 :   }
    2847           0 : }
    2848             : 
    2849             : static void
    2850             : unprivileged_init( fd_topo_t *      topo,
    2851           0 :                    fd_topo_tile_t * tile ) {
    2852           0 :   void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
    2853             : 
    2854           0 :   ulong chain_cnt = fd_block_id_map_chain_cnt_est( tile->replay.max_live_slots );
    2855             : 
    2856           0 :   FD_SCRATCH_ALLOC_INIT( l, scratch );
    2857           0 :   fd_replay_tile_t * ctx    = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_replay_tile_t),   sizeof(fd_replay_tile_t) );
    2858           0 :   void * block_id_arr_mem   = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_block_id_ele_t),  sizeof(fd_block_id_ele_t) * tile->replay.max_live_slots );
    2859           0 :   void * block_id_map_mem   = FD_SCRATCH_ALLOC_APPEND( l, fd_block_id_map_align(),     fd_block_id_map_footprint( chain_cnt ) );
    2860           0 :   void * _txncache          = FD_SCRATCH_ALLOC_APPEND( l, fd_txncache_align(),         fd_txncache_footprint( tile->replay.max_live_slots ) );
    2861           0 :   void * reasm_mem          = FD_SCRATCH_ALLOC_APPEND( l, fd_reasm_align(),            fd_reasm_footprint( tile->replay.fec_max ) );
    2862           0 :   void * sched_mem          = FD_SCRATCH_ALLOC_APPEND( l, fd_sched_align(),            fd_sched_footprint( tile->replay.sched_depth, tile->replay.max_live_slots ) );
    2863           0 :   void * vinyl_req_pool_mem = FD_SCRATCH_ALLOC_APPEND( l, fd_vinyl_req_pool_align(),   fd_vinyl_req_pool_footprint( 1UL, 1UL ) );
    2864           0 :   void * vote_tracker_mem   = FD_SCRATCH_ALLOC_APPEND( l, fd_vote_tracker_align(),     fd_vote_tracker_footprint() );
    2865           0 :   void * _capture_ctx       = FD_SCRATCH_ALLOC_APPEND( l, fd_capture_ctx_align(),      fd_capture_ctx_footprint() );
    2866           0 :   void * dump_proto_ctx_mem = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_dump_proto_ctx_t), sizeof(fd_dump_proto_ctx_t) );
    2867           0 : # if FD_HAS_FLATCC
    2868           0 :   void * block_dump_ctx     = NULL;
    2869           0 :   if( FD_UNLIKELY( tile->replay.dump_block_to_pb ) ) {
    2870           0 :     block_dump_ctx = FD_SCRATCH_ALLOC_APPEND( l, fd_block_dump_context_align(), fd_block_dump_context_footprint() );
    2871           0 :   }
    2872           0 : # endif
    2873             : 
    2874           0 :   FD_TEST( fd_vote_rewards_map_join( fd_vote_rewards_map_new( ctx->runtime_stack.stakes.vote_map_mem, FD_RUNTIME_EXPECTED_VOTE_ACCOUNTS, ctx->runtime_stack_seed ) ) );
    2875             : 
    2876           0 :   ctx->wksp = topo->workspaces[ topo->objs[ tile->tile_obj_id ].wksp_id ].wksp;
    2877             : 
    2878           0 :   ulong store_obj_id = fd_pod_query_ulong( topo->props, "store", ULONG_MAX );
    2879           0 :   FD_TEST( store_obj_id!=ULONG_MAX );
    2880           0 :   ctx->store = fd_store_join( fd_topo_obj_laddr( topo, store_obj_id ) );
    2881           0 :   FD_TEST( ctx->store );
    2882             : 
    2883           0 :   ulong banks_obj_id = fd_pod_query_ulong( topo->props, "banks", ULONG_MAX );
    2884           0 :   FD_TEST( banks_obj_id!=ULONG_MAX );
    2885           0 :   ulong banks_locks_obj_id = fd_pod_query_ulong( topo->props, "banks_locks", ULONG_MAX );
    2886           0 :   FD_TEST( banks_locks_obj_id!=ULONG_MAX );
    2887             : 
    2888           0 :   FD_TEST( fd_banks_join( ctx->banks, fd_topo_obj_laddr( topo, banks_obj_id ), fd_topo_obj_laddr( topo, banks_locks_obj_id ) ) );
    2889             : 
    2890           0 :   fd_bank_data_t * bank_pool = fd_banks_get_bank_pool( ctx->banks->data );
    2891           0 :   FD_MGAUGE_SET( REPLAY, MAX_LIVE_BANKS, fd_banks_pool_max( bank_pool ) );
    2892             : 
    2893           0 :   fd_bank_t bank[1];
    2894           0 :   FD_TEST( fd_banks_init_bank( bank, ctx->banks ) );
    2895           0 :   fd_bank_slot_set( bank, 0UL );
    2896           0 :   FD_TEST( bank->data->idx==FD_REPLAY_BOOT_BANK_IDX );
    2897             : 
    2898           0 :   ctx->consensus_root_slot = ULONG_MAX;
    2899           0 :   ctx->consensus_root      = ctx->initial_block_id;
    2900           0 :   ctx->published_root_slot = ULONG_MAX;
    2901             : 
    2902           0 :   ctx->expected_shred_version = tile->replay.expected_shred_version;
    2903           0 :   ctx->ipecho_shred_version = 0;
    2904           0 :   fd_memcpy( ctx->genesis_path, tile->replay.genesis_path, sizeof(ctx->genesis_path) );
    2905           0 :   ctx->has_genesis_hash = 0;
    2906           0 :   ctx->has_genesis_timestamp          = 0;
    2907           0 :   ctx->has_expected_genesis_timestamp = 0;
    2908           0 :   ctx->cluster_type = FD_CLUSTER_UNKNOWN;
    2909           0 :   ctx->hard_forks_cnt = ULONG_MAX;
    2910             : 
    2911           0 :   if( FD_UNLIKELY( tile->replay.bundle.enabled ) ) {
    2912           0 :     ctx->bundle.enabled = 1;
    2913           0 :     if( FD_UNLIKELY( !fd_bundle_crank_gen_init( ctx->bundle.gen,
    2914           0 :              (fd_acct_addr_t const *)tile->replay.bundle.tip_distribution_program_addr,
    2915           0 :              (fd_acct_addr_t const *)tile->replay.bundle.tip_payment_program_addr,
    2916           0 :              (fd_acct_addr_t const *)ctx->bundle.vote_account.uc,
    2917           0 :              (fd_acct_addr_t const *)ctx->bundle.vote_account.uc, "NAN", 0UL ) ) ) {
    2918           0 :       FD_LOG_ERR(( "failed to initialize bundle crank gen" ));
    2919           0 :     }
    2920           0 :   } else {
    2921           0 :     ctx->bundle.enabled = 0;
    2922           0 :   }
    2923             : 
    2924           0 :   fd_features_t * features = fd_bank_features_modify( bank );
    2925           0 :   fd_features_enable_cleaned_up( features );
    2926             : 
    2927           0 :   char const * one_off_features[ 16UL ];
    2928           0 :   FD_TEST( tile->replay.enable_features_cnt<=sizeof(one_off_features)/sizeof(one_off_features[0]) );
    2929           0 :   for( ulong i=0UL; i<tile->replay.enable_features_cnt; i++ ) one_off_features[ i ] = tile->replay.enable_features[i];
    2930           0 :   fd_features_enable_one_offs( features, one_off_features, (uint)tile->replay.enable_features_cnt, 0UL );
    2931             : 
    2932           0 :   fd_topo_obj_t const * vinyl_data = fd_topo_find_tile_obj( topo, tile, "vinyl_data" );
    2933             : 
    2934           0 :   ulong progcache_obj_id;       FD_TEST( (progcache_obj_id       = fd_pod_query_ulong( topo->props, "progcache",       ULONG_MAX ) )!=ULONG_MAX );
    2935           0 :   ulong progcache_locks_obj_id; FD_TEST( (progcache_locks_obj_id = fd_pod_query_ulong( topo->props, "progcache_locks", ULONG_MAX ) )!=ULONG_MAX );
    2936           0 :   FD_TEST( fd_progcache_admin_join( ctx->progcache_admin,
    2937           0 :       fd_topo_obj_laddr( topo, progcache_obj_id       ),
    2938           0 :       fd_topo_obj_laddr( topo, progcache_locks_obj_id ) ) );
    2939             : 
    2940           0 :   ulong funk_obj_id;       FD_TEST( (funk_obj_id       = fd_pod_query_ulong( topo->props, "funk",       ULONG_MAX ) )!=ULONG_MAX );
    2941           0 :   ulong funk_locks_obj_id; FD_TEST( (funk_locks_obj_id = fd_pod_query_ulong( topo->props, "funk_locks", ULONG_MAX ) )!=ULONG_MAX );
    2942           0 :   ulong max_depth = tile->replay.max_live_slots + tile->replay.write_delay_slots;
    2943           0 :   if( !vinyl_data ) {
    2944           0 :     FD_TEST( fd_accdb_admin_v1_init( ctx->accdb_admin,
    2945           0 :         fd_topo_obj_laddr( topo, funk_obj_id       ),
    2946           0 :         fd_topo_obj_laddr( topo, funk_locks_obj_id ) ) );
    2947           0 :   } else {
    2948           0 :     fd_topo_obj_t const * vinyl_rq       = fd_topo_find_tile_obj( topo, tile, "vinyl_rq" );
    2949           0 :     fd_topo_obj_t const * vinyl_req_pool = fd_topo_find_tile_obj( topo, tile, "vinyl_rpool" );
    2950           0 :     FD_TEST( fd_accdb_admin_v2_init( ctx->accdb_admin,
    2951           0 :         fd_topo_obj_laddr( topo, funk_obj_id       ),
    2952           0 :         fd_topo_obj_laddr( topo, funk_locks_obj_id ),
    2953           0 :         fd_topo_obj_laddr( topo, vinyl_rq->id      ),
    2954           0 :         topo->workspaces[ vinyl_data->wksp_id ].wksp,
    2955           0 :         fd_topo_obj_laddr( topo, vinyl_req_pool->id ),
    2956           0 :         vinyl_rq->id,
    2957           0 :         max_depth ) );
    2958           0 :     fd_accdb_admin_v2_delay_set( ctx->accdb_admin, tile->replay.write_delay_slots );
    2959           0 :   }
    2960           0 :   fd_accdb_init_from_topo( ctx->accdb, topo, tile, max_depth );
    2961             : 
    2962           0 :   void * _txncache_shmem = fd_topo_obj_laddr( topo, tile->replay.txncache_obj_id );
    2963           0 :   fd_txncache_shmem_t * txncache_shmem = fd_txncache_shmem_join( _txncache_shmem );
    2964           0 :   FD_TEST( txncache_shmem );
    2965           0 :   ctx->txncache = fd_txncache_join( fd_txncache_new( _txncache, txncache_shmem ) );
    2966           0 :   FD_TEST( ctx->txncache );
    2967             : 
    2968           0 :   ctx->capture_ctx = NULL;
    2969           0 :   if( FD_UNLIKELY( strcmp( "", tile->replay.solcap_capture ) ) ) {
    2970           0 :     ctx->capture_ctx = fd_capture_ctx_join( fd_capture_ctx_new( _capture_ctx ) );
    2971           0 :     ctx->capture_ctx->solcap_start_slot = tile->replay.capture_start_slot;
    2972           0 :     ctx->capture_ctx->capture_solcap = 1;
    2973           0 :   }
    2974             : 
    2975           0 :   ctx->dump_proto_ctx = NULL;
    2976           0 :   if( FD_UNLIKELY( strcmp( "", tile->replay.dump_proto_dir ) ) ) {
    2977           0 :     ctx->dump_proto_ctx                        = dump_proto_ctx_mem;
    2978           0 :     ctx->dump_proto_ctx->dump_proto_output_dir = tile->replay.dump_proto_dir;
    2979           0 :     if( FD_LIKELY( tile->replay.dump_block_to_pb ) ) {
    2980           0 :       ctx->dump_proto_ctx->dump_block_to_pb = !!tile->replay.dump_block_to_pb;
    2981           0 :     }
    2982           0 :   }
    2983             : 
    2984           0 : # if FD_HAS_FLATCC
    2985           0 :   if( FD_UNLIKELY( tile->replay.dump_block_to_pb ) ) {
    2986           0 :     ctx->block_dump_ctx = fd_block_dump_context_join( fd_block_dump_context_new( block_dump_ctx ) );
    2987           0 :   } else {
    2988           0 :     ctx->block_dump_ctx = NULL;
    2989           0 :   }
    2990           0 : # endif
    2991             : 
    2992           0 :   ctx->is_booted = 0;
    2993             : 
    2994           0 :   ctx->larger_max_cost_per_block = tile->replay.larger_max_cost_per_block;
    2995             : 
    2996           0 :   ctx->reasm = fd_reasm_join( fd_reasm_new( reasm_mem, tile->replay.fec_max, ctx->reasm_seed ) );
    2997           0 :   FD_TEST( ctx->reasm );
    2998             : 
    2999           0 :   ctx->sched = fd_sched_join( fd_sched_new( sched_mem, tile->replay.sched_depth, tile->replay.max_live_slots, fd_topo_tile_name_cnt( topo, "execrp" ) ) );
    3000           0 :   FD_TEST( ctx->sched );
    3001             : 
    3002           0 :   FD_TEST( fd_vinyl_req_pool_new( vinyl_req_pool_mem, 1UL, 1UL ) );
    3003             : 
    3004           0 :   ctx->vote_tracker = fd_vote_tracker_join( fd_vote_tracker_new( vote_tracker_mem, ctx->vote_tracker_seed ) );
    3005           0 :   FD_TEST( ctx->vote_tracker );
    3006             : 
    3007           0 :   ctx->identity_vote_rooted = 0;
    3008             : 
    3009           0 :   ctx->wait_for_vote_to_start_leader = tile->replay.wait_for_vote_to_start_leader;
    3010             : 
    3011           0 :   ctx->wfs_enabled = memcmp( tile->replay.wait_for_supermajority_with_bank_hash.uc, ((fd_pubkey_t){ 0 }).uc, sizeof(fd_pubkey_t) );
    3012           0 :   ctx->expected_bank_hash = tile->replay.wait_for_supermajority_with_bank_hash;
    3013           0 :   ctx->wfs_complete = !ctx->wfs_enabled;
    3014             : 
    3015           0 :   ctx->mleaders = fd_multi_epoch_leaders_join( fd_multi_epoch_leaders_new( ctx->mleaders_mem ) );
    3016           0 :   FD_TEST( ctx->mleaders );
    3017             : 
    3018           0 :   ctx->is_leader             = 0;
    3019           0 :   ctx->supports_leader       = fd_topo_find_tile( topo, "pack", 0UL )!=ULONG_MAX;
    3020           0 :   ctx->reset_slot            = 0UL;
    3021           0 :   fd_memset( ctx->reset_bank, 0, sizeof(fd_bank_t) );
    3022           0 :   ctx->reset_block_id        = ctx->initial_block_id;
    3023           0 :   ctx->reset_timestamp_nanos = 0UL;
    3024           0 :   ctx->next_leader_slot      = ULONG_MAX;
    3025           0 :   ctx->next_leader_tickcount = LONG_MAX;
    3026           0 :   ctx->highwater_leader_slot = ULONG_MAX;
    3027           0 :   ctx->slot_duration_nanos   = 350L*1000L*1000L; /* TODO: Not fixed ... not always 350ms ... */
    3028           0 :   ctx->slot_duration_ticks   = (double)ctx->slot_duration_nanos*fd_tempo_tick_per_ns( NULL );
    3029           0 :   ctx->leader_bank->data     = NULL;
    3030             : 
    3031           0 :   ctx->block_id_len = tile->replay.max_live_slots;
    3032           0 :   ctx->block_id_arr = (fd_block_id_ele_t *)block_id_arr_mem;
    3033           0 :   ctx->block_id_map = fd_block_id_map_join( fd_block_id_map_new( block_id_map_mem, chain_cnt, ctx->block_id_map_seed ) );
    3034           0 :   FD_TEST( ctx->block_id_map );
    3035           0 :   for( ulong i=0UL; i<tile->replay.max_live_slots; i++ ) ctx->block_id_arr[ i ].block_id_seen = 0;
    3036             : 
    3037           0 :   ctx->resolv_tile_cnt = fd_topo_tile_name_cnt( topo, "resolv" );
    3038             : 
    3039           0 :   ctx->keyswitch = fd_keyswitch_join( fd_topo_obj_laddr( topo, tile->id_keyswitch_obj_id ) );
    3040           0 :   FD_TEST( ctx->keyswitch );
    3041           0 :   ctx->halt_leader = 0;
    3042             : 
    3043           0 :   FD_TEST( tile->in_cnt<=sizeof(ctx->in)/sizeof(ctx->in[0]) );
    3044           0 :   for( ulong i=0UL; i<tile->in_cnt; i++ ) {
    3045           0 :     fd_topo_link_t * link = &topo->links[ tile->in_link_id[ i ] ];
    3046           0 :     fd_topo_wksp_t * link_wksp = &topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ];
    3047             : 
    3048           0 :     if( FD_LIKELY( link->dcache ) ) {
    3049           0 :       ctx->in[ i ].mem    = link_wksp->wksp;
    3050           0 :       ctx->in[ i ].chunk0 = fd_dcache_compact_chunk0( ctx->in[ i ].mem, link->dcache );
    3051           0 :       ctx->in[ i ].wmark  = fd_dcache_compact_wmark ( ctx->in[ i ].mem, link->dcache, link->mtu );
    3052           0 :       ctx->in[ i ].mtu    = link->mtu;
    3053           0 :     }
    3054             : 
    3055           0 :     if(      !strcmp( link->name, "genesi_out"    ) ) ctx->in_kind[ i ] = IN_KIND_GENESIS;
    3056           0 :     else if( !strcmp( link->name, "ipecho_out"    ) ) ctx->in_kind[ i ] = IN_KIND_IPECHO;
    3057           0 :     else if( !strcmp( link->name, "snapin_manif"  ) ) ctx->in_kind[ i ] = IN_KIND_SNAP;
    3058           0 :     else if( !strcmp( link->name, "execrp_replay" ) ) ctx->in_kind[ i ] = IN_KIND_EXECRP;
    3059           0 :     else if( !strcmp( link->name, "tower_out"     ) ) ctx->in_kind[ i ] = IN_KIND_TOWER;
    3060           0 :     else if( !strcmp( link->name, "poh_replay"    ) ) ctx->in_kind[ i ] = IN_KIND_POH;
    3061           0 :     else if( !strcmp( link->name, "resolv_replay" ) ) ctx->in_kind[ i ] = IN_KIND_RESOLV;
    3062           0 :     else if( !strcmp( link->name, "repair_out"    ) ) ctx->in_kind[ i ] = IN_KIND_REPAIR;
    3063           0 :     else if( !strcmp( link->name, "txsend_out"    ) ) ctx->in_kind[ i ] = IN_KIND_TXSEND;
    3064           0 :     else if( !strcmp( link->name, "rpc_replay"    ) ) ctx->in_kind[ i ] = IN_KIND_RPC;
    3065           0 :     else if( !strcmp( link->name, "gossip_out"    ) ) ctx->in_kind[ i ] = IN_KIND_GOSSIP_OUT;
    3066           0 :     else FD_LOG_ERR(( "unexpected input link name %s", link->name ));
    3067           0 :   }
    3068             : 
    3069           0 :   *ctx->epoch_out  = out1( topo, tile, "replay_epoch" ); FD_TEST( ctx->epoch_out->idx!=ULONG_MAX );
    3070           0 :   *ctx->replay_out = out1( topo, tile, "replay_out"   ); FD_TEST( ctx->replay_out->idx!=ULONG_MAX );
    3071           0 :   *ctx->exec_out   = out1( topo, tile, "replay_execrp"  ); FD_TEST( ctx->exec_out->idx!=ULONG_MAX );
    3072             : 
    3073           0 :   ctx->rpc_enabled = fd_topo_find_tile( topo, "rpc", 0UL )!=ULONG_MAX;
    3074             : 
    3075           0 :   if( FD_UNLIKELY( strcmp( "", tile->replay.solcap_capture ) ) ) {
    3076           0 :     ulong idx = fd_topo_find_tile_out_link( topo, tile, "cap_repl", 0UL );
    3077           0 :     FD_TEST( idx!=ULONG_MAX );
    3078           0 :     fd_topo_link_t * link = &topo->links[ tile->out_link_id[ idx ] ];
    3079             : 
    3080             : 
    3081           0 :     fd_capture_link_buf_t * cap_repl_out = ctx->cap_repl_out;
    3082           0 :     cap_repl_out->base.vt = &fd_capture_link_buf_vt;
    3083           0 :     cap_repl_out->idx     = idx;
    3084           0 :     cap_repl_out->mem     = topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ].wksp;
    3085           0 :     cap_repl_out->chunk0  = fd_dcache_compact_chunk0( cap_repl_out->mem, link->dcache );
    3086           0 :     cap_repl_out->wmark   = fd_dcache_compact_wmark( cap_repl_out->mem, link->dcache, link->mtu );
    3087           0 :     cap_repl_out->chunk   = cap_repl_out->chunk0;
    3088           0 :     cap_repl_out->mcache  = link->mcache;
    3089           0 :     cap_repl_out->depth   = fd_mcache_depth( link->mcache );
    3090           0 :     cap_repl_out->seq     = 0UL;
    3091             : 
    3092           0 :     ctx->capture_ctx->capctx_type.buf  = cap_repl_out;
    3093           0 :     ctx->capture_ctx->capture_link    = &cap_repl_out->base;
    3094           0 :     ctx->capture_ctx->current_txn_idx = 0UL;
    3095             : 
    3096             : 
    3097           0 :     ulong consumer_tile_idx = fd_topo_find_tile( topo, "solcap", 0UL );
    3098           0 :     fd_topo_tile_t * consumer_tile = &topo->tiles[ consumer_tile_idx ];
    3099           0 :     cap_repl_out->fseq = NULL;
    3100           0 :     for( ulong j = 0UL; j < consumer_tile->in_cnt; j++ ) {
    3101           0 :       if( FD_UNLIKELY( consumer_tile->in_link_id[ j ]  == link->id ) ) {
    3102           0 :         cap_repl_out->fseq = fd_fseq_join( fd_topo_obj_laddr( topo, consumer_tile->in_link_fseq_obj_id[ j ] ) );
    3103           0 :         FD_TEST( cap_repl_out->fseq );
    3104           0 :         break;
    3105           0 :       }
    3106           0 :     }
    3107           0 :   }
    3108             : 
    3109           0 :   fd_memset( &ctx->metrics, 0, sizeof(ctx->metrics) );
    3110             : 
    3111           0 :   fd_histf_join( fd_histf_new( ctx->metrics.store_query_wait,   FD_MHIST_SECONDS_MIN( REPLAY, STORE_QUERY_WAIT ),
    3112           0 :                                                                 FD_MHIST_SECONDS_MAX( REPLAY, STORE_QUERY_WAIT ) ) );
    3113           0 :   fd_histf_join( fd_histf_new( ctx->metrics.store_query_work,   FD_MHIST_SECONDS_MIN( REPLAY, STORE_QUERY_WORK ),
    3114           0 :                                                                 FD_MHIST_SECONDS_MAX( REPLAY, STORE_QUERY_WORK ) ) );
    3115             : 
    3116           0 :   fd_histf_join( fd_histf_new( ctx->metrics.root_slot_dur,      FD_MHIST_SECONDS_MIN( REPLAY, ROOT_SLOT_DURATION_SECONDS ),
    3117           0 :                                                                 FD_MHIST_SECONDS_MAX( REPLAY, ROOT_SLOT_DURATION_SECONDS ) ) );
    3118           0 :   fd_histf_join( fd_histf_new( ctx->metrics.root_account_dur,   FD_MHIST_SECONDS_MIN( REPLAY, ROOT_ACCOUNT_DURATION_SECONDS ),
    3119           0 :                                                                 FD_MHIST_SECONDS_MAX( REPLAY, ROOT_ACCOUNT_DURATION_SECONDS ) ) );
    3120             : 
    3121             :   /* Ensure precompiles are available, crash fast otherwise */
    3122           0 :   fd_precompiles();
    3123             : 
    3124           0 :   ulong scratch_top = FD_SCRATCH_ALLOC_FINI( l, 1UL );
    3125           0 :   if( FD_UNLIKELY( scratch_top > (ulong)scratch + scratch_footprint( tile ) ) )
    3126           0 :     FD_LOG_ERR(( "scratch overflow %lu %lu %lu", scratch_top - (ulong)scratch - scratch_footprint( tile ), scratch_top, (ulong)scratch + scratch_footprint( tile ) ));
    3127           0 : }
    3128             : 
    3129             : static ulong
    3130             : populate_allowed_seccomp( fd_topo_t const *      topo FD_FN_UNUSED,
    3131             :                           fd_topo_tile_t const * tile FD_FN_UNUSED,
    3132             :                           ulong                  out_cnt,
    3133           0 :                           struct sock_filter *   out ) {
    3134             : 
    3135           0 :   populate_sock_filter_policy_fd_replay_tile( out_cnt, out, (uint)fd_log_private_logfile_fd() );
    3136           0 :   return sock_filter_policy_fd_replay_tile_instr_cnt;
    3137           0 : }
    3138             : 
    3139             : static ulong
    3140             : populate_allowed_fds( fd_topo_t const *      topo FD_FN_UNUSED,
    3141             :                       fd_topo_tile_t const * tile FD_FN_UNUSED,
    3142             :                       ulong                  out_fds_cnt,
    3143           0 :                       int *                  out_fds ) {
    3144             : 
    3145           0 :   if( FD_UNLIKELY( out_fds_cnt<2UL ) ) FD_LOG_ERR(( "out_fds_cnt %lu", out_fds_cnt ));
    3146             : 
    3147           0 :   ulong out_cnt = 0UL;
    3148           0 :   out_fds[ out_cnt++ ] = 2; /* stderr */
    3149           0 :   if( FD_LIKELY( -1!=fd_log_private_logfile_fd() ) )
    3150           0 :     out_fds[ out_cnt++ ] = fd_log_private_logfile_fd(); /* logfile */
    3151           0 :   return out_cnt;
    3152           0 : }
    3153             : 
    3154             : static inline void
    3155           0 : during_housekeeping( fd_replay_tile_t * ctx ) {
    3156           0 :   if( FD_UNLIKELY( fd_keyswitch_state_query( ctx->keyswitch )==FD_KEYSWITCH_STATE_UNHALT_PENDING ) ) {
    3157           0 :     FD_CRIT( ctx->halt_leader, "state machine corruption" );
    3158           0 :     FD_LOG_DEBUG(( "keyswitch: unhalting leader" ));
    3159           0 :     ctx->halt_leader = 0;
    3160           0 :     fd_keyswitch_state( ctx->keyswitch, FD_KEYSWITCH_STATE_COMPLETED );
    3161           0 :   }
    3162             : 
    3163           0 :   if( FD_UNLIKELY( fd_keyswitch_state_query( ctx->keyswitch )==FD_KEYSWITCH_STATE_SWITCH_PENDING ) ) {
    3164           0 :     FD_LOG_DEBUG(( "keyswitch: halting leader" ));
    3165           0 :     ctx->halt_leader = 1;
    3166           0 :     if( !ctx->is_leader ) maybe_switch_identity( ctx );
    3167           0 :   }
    3168           0 : }
    3169             : 
    3170             : #undef DEBUG_LOGGING
    3171             : 
    3172             : /* counting carefully, after_credit can generate at most 7 frags and
    3173             :    returnable_frag boot_genesis can also generate at most 7 frags, so 14
    3174             :    is a conservative bound. */
    3175           0 : #define STEM_BURST (14UL)
    3176             : 
    3177             : /* fd_tempo_lazy_default( 16384 ) where 16384 is the minimum out-link
    3178             :    depth (i.e. cr_max) but excludes replay_epoch, which is so infrequent
    3179             :    credit availability is a non-issue.   */
    3180           0 : #define STEM_LAZY ((long)36865)
    3181             : 
    3182           0 : #define STEM_CALLBACK_CONTEXT_TYPE  fd_replay_tile_t
    3183           0 : #define STEM_CALLBACK_CONTEXT_ALIGN alignof(fd_replay_tile_t)
    3184             : 
    3185           0 : #define STEM_CALLBACK_METRICS_WRITE       metrics_write
    3186           0 : #define STEM_CALLBACK_AFTER_CREDIT        after_credit
    3187           0 : #define STEM_CALLBACK_BEFORE_FRAG         before_frag
    3188           0 : #define STEM_CALLBACK_RETURNABLE_FRAG     returnable_frag
    3189           0 : #define STEM_CALLBACK_DURING_HOUSEKEEPING during_housekeeping
    3190             : 
    3191             : #include "../../disco/stem/fd_stem.c"
    3192             : 
    3193             : fd_topo_run_tile_t fd_tile_replay = {
    3194             :   .name                     = "replay",
    3195             :   .populate_allowed_seccomp = populate_allowed_seccomp,
    3196             :   .populate_allowed_fds     = populate_allowed_fds,
    3197             :   .scratch_align            = scratch_align,
    3198             :   .scratch_footprint        = scratch_footprint,
    3199             :   .privileged_init          = privileged_init,
    3200             :   .unprivileged_init        = unprivileged_init,
    3201             :   .run                      = stem_run,
    3202             : };

Generated by: LCOV version 1.14