LCOV - code coverage report
Current view: top level - discof/replay - fd_sched.c (source / functions) Hit Total Coverage
Test: cov.lcov Lines: 0 1140 0.0 %
Date: 2025-11-29 04:46:19 Functions: 0 49 0.0 %

          Line data    Source code
       1             : #include <stdio.h> /* for vsnprintf */
       2             : #include <stdarg.h> /* for va_list */
       3             : 
       4             : #include "fd_sched.h"
       5             : #include "../../util/math/fd_stat.h" /* for sorted search */
       6             : #include "../../disco/fd_disco_base.h" /* for FD_MAX_TXN_PER_SLOT */
       7             : #include "../../flamenco/accdb/fd_accdb_impl_v1.h"
       8             : #include "../../flamenco/runtime/fd_runtime.h" /* for fd_runtime_load_txn_address_lookup_tables */
       9             : #include "../../flamenco/runtime/sysvar/fd_sysvar_slot_hashes.h" /* for ALUTs */
      10             : 
      11           0 : #define FD_SCHED_MAX_DEPTH                 (FD_RDISP_MAX_DEPTH>>2)
      12           0 : #define FD_SCHED_MAX_STAGING_LANES_LOG     (2)
      13           0 : #define FD_SCHED_MAX_STAGING_LANES         (1UL<<FD_SCHED_MAX_STAGING_LANES_LOG)
      14             : #define FD_SCHED_MAX_EXEC_TILE_CNT         (64UL)
      15           0 : #define FD_SCHED_MAX_PRINT_BUF_SZ          (2UL<<20)
      16             : 
      17             : /* 64 ticks per slot, and a single gigantic microblock containing min
      18             :    size transactions. */
      19             : FD_STATIC_ASSERT( FD_MAX_TXN_PER_SLOT_SHRED==((FD_SHRED_DATA_PAYLOAD_MAX_PER_SLOT-65UL*sizeof(fd_microblock_hdr_t))/FD_TXN_MIN_SERIALIZED_SZ), max_txn_per_slot_shred );
      20             : 
      21             : /* We size the buffer to be able to hold residual data from the previous
      22             :    FEC set that only becomes parseable after the next FEC set is
      23             :    ingested, as well as the incoming FEC set.  The largest minimally
      24             :    parseable unit of data is a transaction.  So that much data may
      25             :    straddle FEC set boundaries.  Other minimally parseable units of data
      26             :    include the microblock header and the microblock count within a
      27             :    batch. */
      28           0 : #define FD_SCHED_MAX_PAYLOAD_PER_FEC       (FD_STORE_DATA_MAX)
      29             : #define FD_SCHED_MAX_FEC_BUF_SZ            (FD_SCHED_MAX_PAYLOAD_PER_FEC+FD_TXN_MTU)
      30             : FD_STATIC_ASSERT( FD_TXN_MTU>=sizeof(fd_microblock_hdr_t), resize buffer for residual data );
      31             : FD_STATIC_ASSERT( FD_TXN_MTU>=sizeof(ulong),               resize buffer for residual data );
      32             : 
      33           0 : #define FD_SCHED_MAX_TXN_PER_FEC           ((FD_SCHED_MAX_PAYLOAD_PER_FEC-1UL)/FD_TXN_MIN_SERIALIZED_SZ+1UL) /* 478 */
      34             : 
      35           0 : #define FD_SCHED_MAGIC (0xace8a79c181f89b6UL) /* echo -n "fd_sched_v0" | sha512sum | head -c 16 */
      36             : 
      37           0 : #define FD_SCHED_PARSER_OK          (0)
      38           0 : #define FD_SCHED_PARSER_AGAIN_LATER (1)
      39           0 : #define FD_SCHED_PARSER_BAD_BLOCK   (2)
      40             : 
      41             : 
      42             : /* Structs. */
      43             : 
      44             : #define SET_NAME txn_bitset
      45             : #define SET_MAX  FD_SCHED_MAX_DEPTH
      46             : #include "../../util/tmpl/fd_set.c"
      47             : 
      48             : struct fd_sched_block {
      49             :   ulong               slot;
      50             :   ulong               parent_slot;
      51             :   ulong               parent_idx;  /* Index of the parent in the pool. */
      52             :   ulong               child_idx;   /* Index of the left-child in the pool. */
      53             :   ulong               sibling_idx; /* Index of the right-sibling in the pool. */
      54             : 
      55             :   /* Counters. */
      56             :   uint                txn_parsed_cnt;
      57             :   /*                  txn_queued_cnt = txn_parsed_cnt-txn_in_flight_cnt-txn_done_cnt */
      58             :   uint                txn_exec_in_flight_cnt;
      59             :   uint                txn_exec_done_cnt;
      60             :   uint                txn_sigverify_in_flight_cnt;
      61             :   uint                txn_sigverify_done_cnt;
      62             :   uint                txn_done_cnt; /* A transaction is considered done when all types of tasks associated with it are done. */
      63             :   ulong               txn_pool_max_popcnt;   /* Peak transaction pool occupancy during the time this block was replaying. */
      64             :   ulong               block_pool_max_popcnt; /* Peak block pool occupancy. */
      65             :   uint                shred_cnt;
      66             :   uint                fec_cnt;
      67             :   ulong               txn_idx[ FD_MAX_TXN_PER_SLOT ]; /* Indexed by parse order. */
      68             : 
      69             :   /* Parser state. */
      70             :   uchar               txn[ FD_TXN_MAX_SZ ] __attribute__((aligned(alignof(fd_txn_t))));
      71             :   fd_hash_t           poh;          /* Latest PoH hash we've seen from the ingested FEC sets. */
      72             :   ulong               mblks_rem;    /* Number of microblocks remaining in the current batch. */
      73             :   ulong               txns_rem;     /* Number of transactions remaining in the current microblock. */
      74             :   fd_acct_addr_t      aluts[ 256 ]; /* Resolve ALUT accounts into this buffer for more parallelism. */
      75             :   uint                fec_buf_sz;   /* Size of the fec_buf in bytes. */
      76             :   uint                fec_buf_soff; /* Starting offset into fec_buf for unparsed transactions. */
      77             :   uint                fec_buf_boff; /* Byte offset into raw block data of the first byte currently in fec_buf */
      78             :   uint                fec_eob:1;    /* FEC end-of-batch: set if the last FEC set in the batch is being
      79             :                                        ingested. */
      80             :   uint                fec_sob:1;    /* FEC start-of-batch: set if the parser expects to be receiving a new
      81             :                                        batch. */
      82             : 
      83             :   /* Block state. */
      84             :   uint                fec_eos:1;                          /* FEC end-of-stream: set if the last FEC set in the block has been
      85             :                                                              ingested. */
      86             :   uint                rooted:1;                           /* Set if the block is rooted. */
      87             :   uint                dying:1;                            /* Set if the block has been abandoned and no transactions should be
      88             :                                                              scheduled from it. */
      89             :   uint                in_sched:1;                         /* Set if the block is being tracked by the scheduler. */
      90             :   uint                in_rdisp:1;                         /* Set if the block is being tracked by the dispatcher, either as staged
      91             :                                                              or unstaged. */
      92             :   uint                block_start_signaled:1;             /* Set if the start-of-block sentinel has been dispatched. */
      93             :   uint                block_end_signaled:1;               /* Set if the end-of-block sentinel has been dispatched. */
      94             :   uint                block_start_done:1;                 /* Set if the start-of-block processing has been completed. */
      95             :   uint                block_end_done:1;                   /* Set if the end-of-block processing has been completed. */
      96             :   uint                staged:1;                           /* Set if the block is in a dispatcher staging lane; a staged block is
      97             :                                                              tracked by the dispatcher. */
      98             :   ulong               staging_lane;                       /* Ignored if staged==0. */
      99             :   ulong               luf_depth;                          /* Depth of longest unstaged fork starting from this node; only
     100             :                                                              stageable unstaged descendants are counted. */
     101             :   uchar               fec_buf[ FD_SCHED_MAX_FEC_BUF_SZ ]; /* The previous FEC set could have some residual data that only becomes
     102             :                                                              parseable after the next FEC set is ingested. */
     103             :   uint                shred_blk_offs[ FD_SHRED_BLK_MAX ]; /* The byte offsets into block data of ingested shreds */
     104             : };
     105             : typedef struct fd_sched_block fd_sched_block_t;
     106             : 
     107             : FD_STATIC_ASSERT( sizeof(fd_hash_t)==sizeof(((fd_microblock_hdr_t *)0)->hash), unexpected poh hash size );
     108             : 
     109             : 
     110             : struct fd_sched_metrics {
     111             :   uint  block_added_cnt;
     112             :   uint  block_added_staged_cnt;
     113             :   uint  block_added_unstaged_cnt;
     114             :   uint  block_added_dead_ood_cnt;
     115             :   uint  block_removed_cnt;
     116             :   uint  block_abandoned_cnt;
     117             :   uint  block_bad_cnt;
     118             :   uint  block_promoted_cnt;
     119             :   uint  block_demoted_cnt;
     120             :   uint  deactivate_no_child_cnt;
     121             :   uint  deactivate_no_txn_cnt;
     122             :   uint  deactivate_pruned_cnt;
     123             :   uint  deactivate_abandoned_cnt;
     124             :   uint  lane_switch_cnt;
     125             :   uint  lane_promoted_cnt;
     126             :   uint  lane_demoted_cnt;
     127             :   uint  alut_success_cnt;
     128             :   uint  alut_serializing_cnt;
     129             :   uint  txn_abandoned_parsed_cnt;
     130             :   uint  txn_abandoned_exec_done_cnt;
     131             :   uint  txn_abandoned_done_cnt;
     132             :   uint  txn_max_in_flight_cnt;
     133             :   ulong txn_weighted_in_flight_cnt;
     134             :   ulong txn_weighted_in_flight_tickcount;
     135             :   ulong txn_none_in_flight_tickcount;
     136             :   ulong txn_parsed_cnt;
     137             :   ulong txn_exec_done_cnt;
     138             :   ulong txn_sigverify_done_cnt;
     139             :   ulong txn_done_cnt;
     140             :   ulong bytes_ingested_cnt;
     141             :   ulong bytes_ingested_unparsed_cnt;
     142             :   ulong bytes_dropped_cnt;
     143             :   ulong fec_cnt;
     144             : };
     145             : typedef struct fd_sched_metrics fd_sched_metrics_t;
     146             : 
     147             : struct fd_sched {
     148             :   char                print_buf[ FD_SCHED_MAX_PRINT_BUF_SZ ];
     149             :   ulong               print_buf_sz;
     150             :   fd_sched_metrics_t  metrics[ 1 ];
     151             :   ulong               canary; /* == FD_SCHED_MAGIC */
     152             :   ulong               block_cnt_max; /* Immutable. */
     153             :   ulong               exec_cnt;      /* Immutable. */
     154             :   long                txn_in_flight_last_tick;
     155             :   ulong               root_idx;
     156             :   fd_rdisp_t *        rdisp;
     157             :   ulong               txn_exec_ready_bitset[ 1 ];
     158             :   ulong               sigverify_ready_bitset[ 1 ];
     159             :   ulong               active_bank_idx; /* Index of the actively replayed block, or ULONG_MAX if no block is
     160             :                                           actively replayed; has to have a transaction to dispatch; staged
     161             :                                           blocks that have no transactions to dispatch are not eligible for
     162             :                                           being active. */
     163             :   ulong               staged_bitset;    /* Bit i set if staging lane i is occupied. */
     164             :   ulong               staged_head_bank_idx[ FD_SCHED_MAX_STAGING_LANES ]; /* Head of the linear chain in each staging lane, ignored if bit i is
     165             :                                                                              not set in the bitset. */
     166             :   ulong               txn_pool_free_cnt;
     167             :   fd_txn_p_t          txn_pool[ FD_SCHED_MAX_DEPTH ];
     168             :   ulong               tile_to_bank_idx[ FD_SCHED_MAX_EXEC_TILE_CNT ]; /* Index of the bank that the exec tile is executing against. */
     169             :   txn_bitset_t        exec_done_set[ txn_bitset_word_cnt ];      /* Indexed by txn_idx. */
     170             :   txn_bitset_t        sigverify_done_set[ txn_bitset_word_cnt ]; /* Indexed by txn_idx. */
     171             :   fd_sched_block_t *  block_pool; /* Just a flat array. */
     172             :   ulong               block_pool_popcnt;
     173             : };
     174             : typedef struct fd_sched fd_sched_t;
     175             : 
     176             : 
     177             : /* Internal helpers. */
     178             : 
     179             : static void
     180             : add_block( fd_sched_t * sched,
     181             :            ulong        bank_idx,
     182             :            ulong        parent_bank_idx );
     183             : 
     184             : FD_WARN_UNUSED static int
     185             : fd_sched_parse( fd_sched_t * sched, fd_sched_block_t * block, fd_sched_alut_ctx_t * alut_ctx );
     186             : 
     187             : FD_WARN_UNUSED static int
     188             : fd_sched_parse_txn( fd_sched_t * sched, fd_sched_block_t * block, fd_sched_alut_ctx_t * alut_ctx );
     189             : 
     190             : static void
     191             : try_activate_block( fd_sched_t * sched );
     192             : 
     193             : static void
     194             : check_or_set_active_block( fd_sched_t * sched );
     195             : 
     196             : static void
     197             : subtree_abandon( fd_sched_t * sched, fd_sched_block_t * block );
     198             : 
     199             : static void
     200             : maybe_switch_block( fd_sched_t * sched, ulong bank_idx );
     201             : 
     202             : FD_FN_UNUSED static ulong
     203             : find_and_stage_longest_unstaged_fork( fd_sched_t * sched, int lane_idx );
     204             : 
     205             : static ulong
     206             : compute_longest_unstaged_fork( fd_sched_t * sched, ulong bank_idx );
     207             : 
     208             : static ulong
     209             : stage_longest_unstaged_fork( fd_sched_t * sched, ulong bank_idx, int lane_idx );
     210             : 
     211             : static inline fd_sched_block_t *
     212           0 : block_pool_ele( fd_sched_t * sched, ulong idx ) {
     213           0 :   FD_TEST( idx<sched->block_cnt_max || idx==ULONG_MAX );
     214           0 :   return idx==ULONG_MAX ? NULL : sched->block_pool+idx;
     215           0 : }
     216             : 
     217             : FD_FN_UNUSED static inline int
     218           0 : block_is_void( fd_sched_block_t * block ) {
     219           0 :   /* We've seen everything in the block and no transaction got parsed
     220           0 :      out. */
     221           0 :   return block->fec_eos && block->txn_parsed_cnt==0;
     222           0 : }
     223             : 
     224             : static inline int
     225           0 : block_should_signal_end( fd_sched_block_t * block ) {
     226           0 :   return block->fec_eos && block->txn_parsed_cnt==block->txn_done_cnt && block->block_start_done && !block->block_end_signaled;
     227           0 : }
     228             : 
     229             : static inline int
     230           0 : block_will_signal_end( fd_sched_block_t * block ) {
     231           0 :   return block->fec_eos && !block->block_end_signaled;
     232           0 : }
     233             : 
     234             : /* Is there something known to be dispatchable in the block?  This is an
     235             :    important liveness property.  A block that doesn't contain any known
     236             :    dispatchable tasks will be deactivated or demoted. */
     237             : static inline int
     238           0 : block_is_dispatchable( fd_sched_block_t * block ) {
     239           0 :   ulong exec_queued_cnt      = block->txn_parsed_cnt-block->txn_exec_in_flight_cnt-block->txn_exec_done_cnt;
     240           0 :   ulong sigverify_queued_cnt = block->txn_parsed_cnt-block->txn_sigverify_in_flight_cnt-block->txn_sigverify_done_cnt;
     241           0 :   return exec_queued_cnt>0UL ||
     242           0 :          sigverify_queued_cnt>0UL ||
     243           0 :          !block->block_start_signaled ||
     244           0 :          block_will_signal_end( block );
     245           0 : }
     246             : 
     247             : static inline int
     248           0 : block_is_in_flight( fd_sched_block_t * block ) {
     249           0 :   return block->txn_exec_in_flight_cnt || block->txn_sigverify_in_flight_cnt || (block->block_end_signaled && !block->block_end_done);
     250           0 : }
     251             : 
     252             : static inline int
     253           0 : block_is_done( fd_sched_block_t * block ) {
     254           0 :   return block->fec_eos && block->txn_parsed_cnt==block->txn_done_cnt && block->block_start_done && block->block_end_done;
     255           0 : }
     256             : 
     257             : static inline int
     258           0 : block_is_stageable( fd_sched_block_t * block ) {
     259           0 :   int rv = !block_is_done( block ) && !block->dying;
     260           0 :   if( FD_UNLIKELY( rv && !block->in_rdisp ) ) {
     261             :     /* Invariant: stageable blocks may be currently staged or unstaged,
     262             :        but must be in the dispatcher either way.  When a block
     263             :        transitions to DONE, it will be immediately removed from the
     264             :        dispatcher.  When a block transitions to DYING, it will be
     265             :        eventually abandoned from the dispatcher. */
     266           0 :     FD_LOG_CRIT(( "invariant violation: stageable block->in_rdisp==0, txn_parsed_cnt %u, txn_done_cnt %u, fec_eos %u,, slot %lu, parent slot %lu",
     267           0 :                   block->txn_parsed_cnt, block->txn_done_cnt, (uint)block->fec_eos, block->slot, block->parent_slot ));
     268           0 :   }
     269           0 :   return rv;
     270           0 : }
     271             : 
     272             : static inline int
     273           0 : block_is_promotable( fd_sched_block_t * block ) {
     274           0 :   return block_is_stageable( block ) && block_is_dispatchable( block ) && !block->staged;
     275           0 : }
     276             : 
     277             : static inline int
     278           0 : block_is_activatable( fd_sched_block_t * block ) {
     279           0 :   return block_is_stageable( block ) && block_is_dispatchable( block ) && block->staged;
     280           0 : }
     281             : 
     282             : static inline int
     283           0 : block_should_deactivate( fd_sched_block_t * block ) {
     284             :   /* We allow a grace period, during which a block has nothing to
     285             :      dispatch, but has something in-flight.  The block is allowed to
     286             :      stay activated and ingest FEC sets during this time.  The block
     287             :      will be deactivated if there's still nothing to dispatch by the
     288             :      time all in-flight tasks are completed. */
     289           0 :   return !block_is_activatable( block ) && !block_is_in_flight( block );
     290           0 : }
     291             : 
     292             : static inline ulong
     293           0 : block_to_idx( fd_sched_t * sched, fd_sched_block_t * block ) { return (ulong)(block-sched->block_pool); }
     294             : 
     295             : __attribute__((format(printf,2,3)))
     296             : static void
     297             : fd_sched_printf( fd_sched_t * sched,
     298             :                  char const * fmt,
     299           0 :                  ... ) {
     300           0 :   va_list ap;
     301           0 :   ulong len;
     302           0 :   va_start( ap, fmt );
     303           0 :   int ret = vsnprintf( sched->print_buf+sched->print_buf_sz,
     304           0 :                        FD_SCHED_MAX_PRINT_BUF_SZ-sched->print_buf_sz,
     305           0 :                        fmt, ap );
     306           0 :   va_end( ap );
     307           0 :   len = fd_ulong_if( ret<0, 0UL, fd_ulong_min( (ulong)ret, FD_SCHED_MAX_PRINT_BUF_SZ-sched->print_buf_sz-1UL ) );
     308           0 :   sched->print_buf[ sched->print_buf_sz+len ] = '\0';
     309           0 :   sched->print_buf_sz += len;
     310           0 : }
     311             : 
     312             : FD_FN_UNUSED static void
     313           0 : print_block_metrics( fd_sched_t * sched, fd_sched_block_t * block ) {
     314           0 :   fd_sched_printf( sched, "block idx %lu, block slot %lu, parent_slot %lu, fec_eos %d, rooted %d, txn_parsed_cnt %u, txn_exec_done_cnt %u, txn_sigverify_done_cnt %u, txn_done_cnt %u, shred_cnt %u, fec_cnt %u, txn_pool_max_popcnt %lu/%lu, block_pool_max_popcnt %lu/%lu, mblks_rem %lu, txns_rem %lu, fec_buf_sz %u, fec_buf_boff %u, fec_buf_soff %u, fec_eob %d, fec_sob %d\n",
     315           0 :                    block_to_idx( sched, block ), block->slot, block->parent_slot, block->fec_eos, block->rooted, block->txn_parsed_cnt, block->txn_exec_done_cnt, block->txn_sigverify_done_cnt, block->txn_done_cnt, block->shred_cnt, block->fec_cnt, block->txn_pool_max_popcnt, FD_SCHED_MAX_DEPTH, block->block_pool_max_popcnt, sched->block_cnt_max, block->mblks_rem, block->txns_rem, block->fec_buf_sz, block->fec_buf_boff, block->fec_buf_soff, block->fec_eob, block->fec_sob );
     316           0 : }
     317             : 
     318             : FD_FN_UNUSED static void
     319           0 : print_block_debug( fd_sched_t * sched, fd_sched_block_t * block ) {
     320           0 :   fd_sched_printf( sched, "block idx %lu, block slot %lu, parent_slot %lu, staged %d (lane %lu), dying %d, in_rdisp %d, fec_eos %d, rooted %d, block_start_signaled %d, block_end_signaled %d, block_start_done %d, block_end_done %d, txn_parsed_cnt %u, txn_exec_in_flight_cnt %u, txn_exec_done_cnt %u, txn_sigverify_in_flight_cnt %u, txn_sigverify_done_cnt %u, txn_done_cnt %u, shred_cnt %u, fec_cnt %u, txn_pool_max_popcnt %lu/%lu, block_pool_max_popcnt %lu/%lu, mblks_rem %lu, txns_rem %lu, fec_buf_sz %u, fec_buf_boff %u, fec_buf_soff %u, fec_eob %d, fec_sob %d\n",
     321           0 :                    block_to_idx( sched, block ), block->slot, block->parent_slot, block->staged, block->staging_lane, block->dying, block->in_rdisp, block->fec_eos, block->rooted, block->block_start_signaled, block->block_end_signaled, block->block_start_done, block->block_end_done, block->txn_parsed_cnt, block->txn_exec_in_flight_cnt, block->txn_exec_done_cnt, block->txn_sigverify_in_flight_cnt, block->txn_sigverify_done_cnt, block->txn_done_cnt, block->shred_cnt, block->fec_cnt, block->txn_pool_max_popcnt, FD_SCHED_MAX_DEPTH, block->block_pool_max_popcnt, sched->block_cnt_max, block->mblks_rem, block->txns_rem, block->fec_buf_sz, block->fec_buf_boff, block->fec_buf_soff, block->fec_eob, block->fec_sob );
     322           0 : }
     323             : 
     324             : FD_FN_UNUSED static void
     325           0 : print_block_and_parent( fd_sched_t * sched, fd_sched_block_t * block ) {
     326           0 :   print_block_debug( sched, block );
     327           0 :   fd_sched_block_t * parent = block_pool_ele( sched, block->parent_idx );
     328           0 :   if( FD_LIKELY( parent ) ) print_block_debug( sched, parent );
     329           0 : }
     330             : 
     331             : FD_FN_UNUSED static void
     332           0 : print_metrics( fd_sched_t * sched ) {
     333           0 :     fd_sched_printf( sched, "metrics: block_added_cnt %u, block_added_staged_cnt %u, block_added_unstaged_cnt %u, block_added_dead_ood_cnt %u, block_removed_cnt %u, block_abandoned_cnt %u, block_bad_cnt %u, block_promoted_cnt %u, block_demoted_cnt %u, deactivate_no_child_cnt %u, deactivate_no_txn_cnt %u, deactivate_pruned_cnt %u, deactivate_abandoned_cnt %u, lane_switch_cnt %u, lane_promoted_cnt %u, lane_demoted_cnt %u, alut_success_cnt %u, alut_serializing_cnt %u, txn_abandoned_parsed_cnt %u, txn_abandoned_exec_done_cnt %u, txn_abandoned_done_cnt %u, txn_max_in_flight_cnt %u, txn_weighted_in_flight_cnt %lu, txn_weighted_in_flight_tickcount %lu, txn_none_in_flight_tickcount %lu, txn_parsed_cnt %lu, txn_exec_done_cnt %lu, txn_sigverify_done_cnt %lu, txn_done_cnt %lu, bytes_ingested_cnt %lu, bytes_ingested_unparsed_cnt %lu, bytes_dropped_cnt %lu, fec_cnt %lu\n",
     334           0 :                    sched->metrics->block_added_cnt, sched->metrics->block_added_staged_cnt, sched->metrics->block_added_unstaged_cnt, sched->metrics->block_added_dead_ood_cnt, sched->metrics->block_removed_cnt, sched->metrics->block_abandoned_cnt, sched->metrics->block_bad_cnt, sched->metrics->block_promoted_cnt, sched->metrics->block_demoted_cnt, sched->metrics->deactivate_no_child_cnt, sched->metrics->deactivate_no_txn_cnt, sched->metrics->deactivate_pruned_cnt, sched->metrics->deactivate_abandoned_cnt, sched->metrics->lane_switch_cnt, sched->metrics->lane_promoted_cnt, sched->metrics->lane_demoted_cnt, sched->metrics->alut_success_cnt, sched->metrics->alut_serializing_cnt, sched->metrics->txn_abandoned_parsed_cnt, sched->metrics->txn_abandoned_exec_done_cnt, sched->metrics->txn_abandoned_done_cnt, sched->metrics->txn_max_in_flight_cnt, sched->metrics->txn_weighted_in_flight_cnt, sched->metrics->txn_weighted_in_flight_tickcount, sched->metrics->txn_none_in_flight_tickcount, sched->metrics->txn_parsed_cnt, sched->metrics->txn_exec_done_cnt, sched->metrics->txn_sigverify_done_cnt, sched->metrics->txn_done_cnt, sched->metrics->bytes_ingested_cnt, sched->metrics->bytes_ingested_unparsed_cnt, sched->metrics->bytes_dropped_cnt, sched->metrics->fec_cnt );
     335             : 
     336           0 : }
     337             : 
     338             : FD_FN_UNUSED static void
     339           0 : print_sched( fd_sched_t * sched ) {
     340           0 :   fd_sched_printf( sched, "sched canary 0x%lx, exec_cnt %lu, root_idx %lu, txn_exec_ready_bitset[ 0 ] 0x%lx, sigverify_ready_bitset[ 0 ] 0x%lx, active_idx %lu, staged_bitset %lu, staged_head_idx[0] %lu, staged_head_idx[1] %lu, staged_head_idx[2] %lu, staged_head_idx[3] %lu, txn_pool_free_cnt %lu/%lu, block_pool_popcnt %lu/%lu\n",
     341           0 :                    sched->canary, sched->exec_cnt, sched->root_idx, sched->txn_exec_ready_bitset[ 0 ], sched->sigverify_ready_bitset[ 0 ], sched->active_bank_idx, sched->staged_bitset, sched->staged_head_bank_idx[ 0 ], sched->staged_head_bank_idx[ 1 ], sched->staged_head_bank_idx[ 2 ], sched->staged_head_bank_idx[ 3 ], sched->txn_pool_free_cnt, FD_SCHED_MAX_DEPTH, sched->block_pool_popcnt, sched->block_cnt_max );
     342           0 :   fd_sched_block_t * active_block = block_pool_ele( sched, sched->active_bank_idx );
     343           0 :   if( active_block ) print_block_debug( sched, active_block );
     344           0 :   for( int l=0; l<(int)FD_SCHED_MAX_STAGING_LANES; l++ ) {
     345           0 :     if( fd_ulong_extract_bit( sched->staged_bitset, l ) ) {
     346           0 :       fd_sched_block_t * block = block_pool_ele( sched, sched->staged_head_bank_idx[ l ] );
     347           0 :       print_block_debug( sched, block );
     348           0 :     }
     349           0 :   }
     350           0 : }
     351             : 
     352             : FD_FN_UNUSED static void
     353           0 : print_all( fd_sched_t * sched, fd_sched_block_t * block ) {
     354           0 :   print_metrics( sched );
     355           0 :   print_sched( sched );
     356           0 :   print_block_and_parent( sched, block );
     357           0 : }
     358             : 
     359             : 
     360             : /* Public functions. */
     361             : 
     362           0 : ulong fd_sched_align( void ) {
     363           0 :   return fd_ulong_max( alignof(fd_sched_t),
     364           0 :          fd_ulong_max( fd_rdisp_align(),
     365           0 :          fd_ulong_max( alignof(fd_sched_block_t), 64UL ))); /* Minimally cache line aligned. */
     366           0 : }
     367             : 
     368             : ulong
     369           0 : fd_sched_footprint( ulong block_cnt_max ) {
     370           0 :   ulong l = FD_LAYOUT_INIT;
     371           0 :   l = FD_LAYOUT_APPEND( l, fd_sched_align(),          sizeof(fd_sched_t)                                      );
     372           0 :   l = FD_LAYOUT_APPEND( l, fd_rdisp_align(),          fd_rdisp_footprint( FD_SCHED_MAX_DEPTH, block_cnt_max ) ); /* dispatcher */
     373           0 :   l = FD_LAYOUT_APPEND( l, alignof(fd_sched_block_t), block_cnt_max*sizeof(fd_sched_block_t)                  ); /* block pool */
     374           0 :   return FD_LAYOUT_FINI( l, fd_sched_align() );
     375           0 : }
     376             : 
     377             : void *
     378           0 : fd_sched_new( void * mem, ulong block_cnt_max, ulong exec_cnt ) {
     379           0 :   FD_TEST( exec_cnt && exec_cnt<=FD_SCHED_MAX_EXEC_TILE_CNT );
     380             : 
     381           0 :   FD_SCRATCH_ALLOC_INIT( l, mem );
     382           0 :   fd_sched_t * sched = FD_SCRATCH_ALLOC_APPEND( l, fd_sched_align(),          sizeof(fd_sched_t)                                      );
     383           0 :   void * _rdisp      = FD_SCRATCH_ALLOC_APPEND( l, fd_rdisp_align(),          fd_rdisp_footprint( FD_SCHED_MAX_DEPTH, block_cnt_max ) );
     384           0 :   void * _bpool      = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_sched_block_t), block_cnt_max*sizeof(fd_sched_block_t)                  );
     385           0 :   FD_SCRATCH_ALLOC_FINI( l, fd_sched_align() );
     386             : 
     387           0 :   ulong seed = ((ulong)fd_tickcount()) ^ FD_SCHED_MAGIC;
     388           0 :   fd_rdisp_new( _rdisp, FD_SCHED_MAX_DEPTH, block_cnt_max, seed );
     389             : 
     390           0 :   fd_sched_block_t * bpool = (fd_sched_block_t *)_bpool;
     391           0 :   for( ulong i=0; i<block_cnt_max; i++ ) {
     392           0 :     bpool[ i ].in_sched = 0;
     393           0 :   }
     394             : 
     395           0 :   fd_memset( sched->metrics, 0, sizeof(fd_sched_metrics_t) );
     396           0 :   sched->txn_in_flight_last_tick = LONG_MAX;
     397             : 
     398           0 :   sched->canary           = FD_SCHED_MAGIC;
     399           0 :   sched->block_cnt_max    = block_cnt_max;
     400           0 :   sched->exec_cnt         = exec_cnt;
     401           0 :   sched->root_idx         = ULONG_MAX;
     402           0 :   sched->active_bank_idx  = ULONG_MAX;
     403           0 :   sched->staged_bitset    = 0UL;
     404             : 
     405           0 :   sched->txn_exec_ready_bitset[ 0 ]  = fd_ulong_mask_lsb( (int)exec_cnt );
     406           0 :   sched->sigverify_ready_bitset[ 0 ] = fd_ulong_mask_lsb( (int)exec_cnt );
     407             : 
     408           0 :   sched->txn_pool_free_cnt = FD_SCHED_MAX_DEPTH-1UL; /* -1 because index 0 is unusable as a sentinel reserved by the dispatcher */
     409             : 
     410           0 :   txn_bitset_new( sched->exec_done_set );
     411           0 :   txn_bitset_new( sched->sigverify_done_set );
     412             : 
     413           0 :   sched->block_pool_popcnt = 0UL;
     414             : 
     415           0 :   return sched;
     416           0 : }
     417             : 
     418             : fd_sched_t *
     419           0 : fd_sched_join( void * mem, ulong block_cnt_max ) {
     420           0 :   fd_sched_t * sched = (fd_sched_t *)mem;
     421             : 
     422           0 :   FD_TEST( sched->canary==FD_SCHED_MAGIC );
     423           0 :   FD_TEST( sched->block_cnt_max==block_cnt_max );
     424             : 
     425           0 :   FD_SCRATCH_ALLOC_INIT( l, mem );
     426           0 :   /*           */ FD_SCRATCH_ALLOC_APPEND( l, fd_sched_align(),          sizeof(fd_sched_t)                                      );
     427           0 :   void * _rdisp = FD_SCRATCH_ALLOC_APPEND( l, fd_rdisp_align(),          fd_rdisp_footprint( FD_SCHED_MAX_DEPTH, block_cnt_max ) );
     428           0 :   void * _bpool = FD_SCRATCH_ALLOC_APPEND( l, alignof(fd_sched_block_t), block_cnt_max*sizeof(fd_sched_block_t)                  );
     429           0 :   FD_SCRATCH_ALLOC_FINI( l, fd_sched_align() );
     430             : 
     431           0 :   sched->rdisp      = fd_rdisp_join( _rdisp );
     432           0 :   sched->block_pool = _bpool;
     433             : 
     434           0 :   txn_bitset_join( sched->exec_done_set );
     435           0 :   txn_bitset_join( sched->sigverify_done_set );
     436             : 
     437           0 :   return sched;
     438           0 : }
     439             : 
     440             : int
     441           0 : fd_sched_fec_can_ingest( fd_sched_t * sched, fd_sched_fec_t * fec ) {
     442           0 :   FD_TEST( sched->canary==FD_SCHED_MAGIC );
     443           0 :   FD_TEST( fec->bank_idx<sched->block_cnt_max );
     444           0 :   FD_TEST( fec->parent_bank_idx<sched->block_cnt_max );
     445             : 
     446           0 :   if( FD_UNLIKELY( fec->fec->data_sz>FD_SCHED_MAX_PAYLOAD_PER_FEC ) ) {
     447           0 :     sched->print_buf_sz = 0UL;
     448           0 :     print_metrics( sched );
     449           0 :     print_sched( sched );
     450           0 :     FD_LOG_NOTICE(( "%s", sched->print_buf ));
     451           0 :     FD_LOG_CRIT(( "invalid FEC set: fec->data_sz %lu, slot %lu, parent slot %lu", fec->fec->data_sz, fec->slot, fec->parent_slot ));
     452           0 :   }
     453             : 
     454           0 :   ulong fec_buf_sz = 0UL;
     455           0 :   fd_sched_block_t * block = block_pool_ele( sched, fec->bank_idx );
     456           0 :   if( FD_LIKELY( !fec->is_first_in_block ) ) {
     457           0 :     fec_buf_sz += block->fec_buf_sz-block->fec_buf_soff;
     458           0 :   } else {
     459             :     /* No residual data as this is a fresh new block. */
     460           0 :   }
     461             :   /* Addition is safe and won't overflow because we checked the FEC set
     462             :      size above. */
     463           0 :   fec_buf_sz += fec->fec->data_sz;
     464             :   /* Assuming every transaction is min size, do we have enough free
     465             :      entries in the txn pool?  For a more precise txn count, we would
     466             :      have to do some parsing. */
     467           0 :   return sched->txn_pool_free_cnt>=fec_buf_sz/FD_TXN_MIN_SERIALIZED_SZ;
     468           0 : }
     469             : 
     470             : int
     471             : fd_sched_can_ingest( fd_sched_t * sched,
     472           0 :                      ulong        fec_cnt ) {
     473           0 :   FD_TEST( sched->canary==FD_SCHED_MAGIC );
     474             :   /* Worst case, we need one byte from the incoming data to extract a
     475             :      transaction out of the residual data, and the rest of the incoming
     476             :      data contributes toward min sized transactions. */
     477           0 :   return sched->txn_pool_free_cnt>=(FD_SCHED_MAX_TXN_PER_FEC*fec_cnt);
     478           0 : }
     479             : 
     480             : FD_WARN_UNUSED int
     481           0 : fd_sched_fec_ingest( fd_sched_t * sched, fd_sched_fec_t * fec ) {
     482           0 :   FD_TEST( sched->canary==FD_SCHED_MAGIC );
     483           0 :   FD_TEST( fec->bank_idx<sched->block_cnt_max );
     484           0 :   FD_TEST( fec->parent_bank_idx<sched->block_cnt_max );
     485             : 
     486           0 :   fd_sched_block_t * block = block_pool_ele( sched, fec->bank_idx );
     487             : 
     488           0 :   if( FD_UNLIKELY( fec->fec->data_sz>FD_SCHED_MAX_PAYLOAD_PER_FEC ) ) {
     489           0 :     sched->print_buf_sz = 0UL;
     490           0 :     print_all( sched, block );
     491           0 :     FD_LOG_NOTICE(( "%s", sched->print_buf ));
     492           0 :     FD_LOG_CRIT(( "invalid FEC set: fec->data_sz %lu, slot %lu, parent slot %lu", fec->fec->data_sz, fec->slot, fec->parent_slot ));
     493           0 :   }
     494             : 
     495           0 :   if( FD_UNLIKELY( fec->is_first_in_block ) ) {
     496             :     /* This is a new block. */
     497           0 :     add_block( sched, fec->bank_idx, fec->parent_bank_idx );
     498           0 :     block->slot        = fec->slot;
     499           0 :     block->parent_slot = fec->parent_slot;
     500             : 
     501           0 :     if( FD_UNLIKELY( block->dying ) ) {
     502             :       /* The child of a dead block is also dead.  We added it to our
     503             :          fork tree just so we could track an entire lineage of dead
     504             :          children and propagate the dead property to the entire lineage,
     505             :          in case there were frags for more than one dead children
     506             :          in-flight at the time the parent was abandoned.  That being
     507             :          said, we shouldn't need to add the dead child to the
     508             :          dispatcher. */
     509           0 :       sched->metrics->block_added_dead_ood_cnt++;
     510             : 
     511             :       /* Ignore the FEC set for a dead block. */
     512           0 :       sched->metrics->bytes_dropped_cnt += fec->fec->data_sz;
     513           0 :       return 1;
     514           0 :     }
     515             : 
     516             :     /* Try to find a staging lane for this block. */
     517           0 :     int alloc_lane = 0;
     518           0 :     fd_sched_block_t * parent_block = block_pool_ele( sched, fec->parent_bank_idx );
     519           0 :     if( FD_LIKELY( parent_block->staged ) ) {
     520             :       /* Parent is staged.  So see if we can continue down the same
     521             :          staging lane. */
     522           0 :       ulong staging_lane = parent_block->staging_lane;
     523           0 :       ulong child_idx    = parent_block->child_idx;
     524           0 :       while( child_idx!=ULONG_MAX ) {
     525           0 :         fd_sched_block_t * child = block_pool_ele( sched, child_idx );
     526           0 :         if( child->staged && child->staging_lane==staging_lane ) {
     527             :           /* Found a child on the same lane.  So we're done. */
     528           0 :           staging_lane = FD_RDISP_UNSTAGED;
     529           0 :           break;
     530           0 :         }
     531           0 :         child_idx = child->sibling_idx;
     532           0 :       }
     533             :       /* No child is staged on the same lane as the parent.  So stage
     534             :          this block.  This is the common case. */
     535           0 :       if( FD_LIKELY( staging_lane!=FD_RDISP_UNSTAGED ) ) {
     536           0 :         block->in_rdisp     = 1;
     537           0 :         block->staged       = 1;
     538           0 :         block->staging_lane = staging_lane;
     539           0 :         fd_rdisp_add_block( sched->rdisp, fec->bank_idx, staging_lane );
     540           0 :         sched->metrics->block_added_cnt++;
     541           0 :         sched->metrics->block_added_staged_cnt++;
     542           0 :       } else {
     543           0 :         alloc_lane = 1;
     544           0 :       }
     545           0 :     } else {
     546           0 :       if( block_is_stageable( parent_block ) ) {
     547             :         /* Parent is unstaged but stageable.  So let's be unstaged too.
     548             :            This is a policy decision to be lazy and not promote parent
     549             :            at the moment. */
     550           0 :         block->in_rdisp = 1;
     551           0 :         block->staged   = 0;
     552           0 :         fd_rdisp_add_block( sched->rdisp, fec->bank_idx, FD_RDISP_UNSTAGED );
     553           0 :         sched->metrics->block_added_cnt++;
     554           0 :         sched->metrics->block_added_unstaged_cnt++;
     555           0 :       } else {
     556           0 :         alloc_lane = 1;
     557           0 :       }
     558           0 :     }
     559           0 :     if( FD_UNLIKELY( alloc_lane ) ) {
     560             :       /* We weren't able to inherit the parent's staging lane.  So try
     561             :          to find a new staging lane. */
     562           0 :       if( FD_LIKELY( sched->staged_bitset!=fd_ulong_mask_lsb( FD_SCHED_MAX_STAGING_LANES ) ) ) { /* Optimize for lane available. */
     563           0 :         int lane_idx = fd_ulong_find_lsb( ~sched->staged_bitset );
     564           0 :         if( FD_UNLIKELY( lane_idx>=(int)FD_SCHED_MAX_STAGING_LANES ) ) {
     565           0 :           FD_LOG_CRIT(( "invariant violation: lane_idx %d, sched->staged_bitset %lx",
     566           0 :                         lane_idx, sched->staged_bitset ));
     567           0 :         }
     568           0 :         sched->staged_bitset = fd_ulong_set_bit( sched->staged_bitset, lane_idx );
     569           0 :         sched->staged_head_bank_idx[ lane_idx ] = fec->bank_idx;
     570           0 :         block->in_rdisp     = 1;
     571           0 :         block->staged       = 1;
     572           0 :         block->staging_lane = (ulong)lane_idx;
     573           0 :         fd_rdisp_add_block( sched->rdisp, fec->bank_idx, block->staging_lane );
     574           0 :         sched->metrics->block_added_cnt++;
     575           0 :         sched->metrics->block_added_staged_cnt++;
     576           0 :       } else {
     577             :         /* No lanes available. */
     578           0 :         block->in_rdisp = 1;
     579           0 :         block->staged   = 0;
     580           0 :         fd_rdisp_add_block( sched->rdisp, fec->bank_idx, FD_RDISP_UNSTAGED );
     581           0 :         sched->metrics->block_added_cnt++;
     582           0 :         sched->metrics->block_added_unstaged_cnt++;
     583           0 :       }
     584           0 :     }
     585           0 :   }
     586             : 
     587           0 :   block->txn_pool_max_popcnt   = fd_ulong_max( block->txn_pool_max_popcnt, FD_SCHED_MAX_DEPTH-sched->txn_pool_free_cnt );
     588           0 :   block->block_pool_max_popcnt = fd_ulong_max( block->block_pool_max_popcnt, sched->block_pool_popcnt );
     589             : 
     590           0 :   if( FD_UNLIKELY( block->dying ) ) {
     591             :     /* Ignore the FEC set for a dead block. */
     592           0 :     sched->metrics->bytes_dropped_cnt += fec->fec->data_sz;
     593           0 :     return 1;
     594           0 :   }
     595             : 
     596           0 :   if( FD_UNLIKELY( !block->in_rdisp ) ) {
     597             :     /* Invariant: block must be in the dispatcher at this point. */
     598           0 :     sched->print_buf_sz = 0UL;
     599           0 :     print_all( sched, block );
     600           0 :     FD_LOG_NOTICE(( "%s", sched->print_buf ));
     601           0 :     FD_LOG_CRIT(( "invariant violation: block->in_rdisp==0, slot %lu, parent slot %lu",
     602           0 :                   block->slot, block->parent_slot ));
     603           0 :   }
     604             : 
     605           0 :   if( FD_UNLIKELY( block->fec_eos ) ) {
     606             :     /* This means something is wrong upstream.  We're getting more FEC
     607             :        sets for a block that has already ended, or so we were told. */
     608           0 :     sched->print_buf_sz = 0UL;
     609           0 :     print_all( sched, block );
     610           0 :     FD_LOG_NOTICE(( "%s", sched->print_buf ));
     611           0 :     FD_LOG_CRIT(( "invariant violation: block->fec_eos set but getting more FEC sets, slot %lu, parent slot %lu", fec->slot, fec->parent_slot ));
     612           0 :   }
     613           0 :   if( FD_UNLIKELY( block->fec_eob && fec->is_last_in_batch ) ) {
     614             :     /* If the previous FEC set ingestion and parse was successful,
     615             :        block->fec_eob should be cleared.  The fact that fec_eob is set
     616             :        means that the previous batch didn't parse properly.  So this is
     617             :        a bad block.  We should refuse to replay down the fork. */
     618           0 :     FD_LOG_INFO(( "bad block: failed to parse, slot %lu, parent slot %lu", fec->slot, fec->parent_slot ));
     619           0 :     sched->print_buf_sz = 0UL;
     620           0 :     print_all( sched, block );
     621           0 :     FD_LOG_DEBUG(( "%s", sched->print_buf ));
     622           0 :     subtree_abandon( sched, block );
     623           0 :     sched->metrics->bytes_dropped_cnt += fec->fec->data_sz;
     624           0 :     sched->metrics->block_bad_cnt++;
     625           0 :     check_or_set_active_block( sched );
     626           0 :     return 0;
     627           0 :   }
     628           0 :   if( FD_UNLIKELY( block->child_idx!=ULONG_MAX ) ) {
     629             :     /* This means something is wrong upstream.  FEC sets are not being
     630             :        delivered in replay order.  We got a child block FEC set before
     631             :        this block was completely delivered. */
     632           0 :     sched->print_buf_sz = 0UL;
     633           0 :     print_all( sched, block );
     634           0 :     fd_sched_block_t * child_block = block_pool_ele( sched, block->child_idx );
     635           0 :     print_block_debug( sched, child_block );
     636           0 :     FD_LOG_NOTICE(( "%s", sched->print_buf ));
     637           0 :     FD_LOG_CRIT(( "invariant violation: block->child_idx %lu, slot %lu, parent slot %lu", block->child_idx, fec->slot, fec->parent_slot ));
     638           0 :   }
     639             : 
     640           0 :   FD_TEST( block->fec_buf_sz>=block->fec_buf_soff );
     641           0 :   if( FD_LIKELY( block->fec_buf_sz>block->fec_buf_soff ) ) {
     642             :     /* If there is residual data from the previous FEC set within the
     643             :        same batch, we move it to the beginning of the buffer and append
     644             :        the new FEC set. */
     645           0 :     memmove( block->fec_buf, block->fec_buf+block->fec_buf_soff, block->fec_buf_sz-block->fec_buf_soff );
     646           0 :   }
     647           0 :   block->fec_buf_boff += block->fec_buf_soff;
     648           0 :   block->fec_buf_sz   -= block->fec_buf_soff;
     649           0 :   block->fec_buf_soff  = 0;
     650             :   /* Addition is safe and won't overflow because we checked the FEC
     651             :      set size above. */
     652           0 :   if( FD_UNLIKELY( block->fec_buf_sz+fec->fec->data_sz>FD_SCHED_MAX_FEC_BUF_SZ ) ) {
     653             :     /* In a conformant block, there shouldn't be more than a
     654             :        transaction's worth of residual data left over from the previous
     655             :        FEC set within the same batch.  So if this condition doesn't
     656             :        hold, it's a bad block.  Instead of crashing, we should refuse to
     657             :        replay down the fork. */
     658           0 :     FD_LOG_INFO(( "bad block: fec_buf_sz %u, fec->data_sz %lu, slot %lu, parent slot %lu", block->fec_buf_sz, fec->fec->data_sz, fec->slot, fec->parent_slot ));
     659           0 :     sched->print_buf_sz = 0UL;
     660           0 :     print_all( sched, block );
     661           0 :     FD_LOG_DEBUG(( "%s", sched->print_buf ));
     662           0 :     subtree_abandon( sched, block );
     663           0 :     sched->metrics->bytes_dropped_cnt += fec->fec->data_sz;
     664           0 :     sched->metrics->block_bad_cnt++;
     665           0 :     check_or_set_active_block( sched );
     666           0 :     return 0;
     667           0 :   }
     668             : 
     669             :   /* Append the new FEC set to the end of the buffer. */
     670           0 :   fd_memcpy( block->fec_buf+block->fec_buf_sz, fec->fec->data, fec->fec->data_sz );
     671           0 :   block->fec_buf_sz += (uint)fec->fec->data_sz;
     672           0 :   sched->metrics->bytes_ingested_cnt += fec->fec->data_sz;
     673             : 
     674           0 :   block->fec_eob = fec->is_last_in_batch;
     675           0 :   block->fec_eos = fec->is_last_in_block;
     676             : 
     677           0 :   ulong block_sz = block->shred_cnt>0 ? block->shred_blk_offs[ block->shred_cnt-1 ] : 0UL;
     678           0 :   for( ulong i=0; i<fec->shred_cnt; i++ ) {
     679           0 :     if( FD_LIKELY( i<32UL ) ) {
     680           0 :       block->shred_blk_offs[ block->shred_cnt++ ] = (uint)block_sz + fec->fec->block_offs[ i ];
     681           0 :     } else if( FD_UNLIKELY( i!=fec->shred_cnt-1UL ) ) {
     682             :       /* We don't track shred boundaries after 32 shreds, assume they're
     683             :          sized uniformly */
     684           0 :       ulong num_overflow_shreds = fec->shred_cnt-32UL;
     685           0 :       ulong overflow_idx        = i-32UL;
     686           0 :       ulong overflow_data_sz    = fec->fec->data_sz-fec->fec->block_offs[ 31 ];
     687           0 :       block->shred_blk_offs[ block->shred_cnt++ ] = (uint)block_sz + fec->fec->block_offs[ 31 ] + (uint)(overflow_data_sz / num_overflow_shreds * (overflow_idx + 1UL));
     688           0 :     } else {
     689           0 :       block->shred_blk_offs[ block->shred_cnt++ ] = (uint)block_sz + (uint)fec->fec->data_sz;
     690           0 :     }
     691           0 :   }
     692             : 
     693           0 :   int err = fd_sched_parse( sched, block, fec->alut_ctx );
     694             : 
     695           0 :   block->fec_cnt++;
     696           0 :   sched->metrics->fec_cnt++;
     697             : 
     698           0 :   if( FD_UNLIKELY( err==FD_SCHED_PARSER_BAD_BLOCK ) ) {
     699           0 :     FD_LOG_INFO(( "bad block: slot %lu, parent slot %lu", block->slot, block->parent_slot ));
     700           0 :     sched->print_buf_sz = 0UL;
     701           0 :     print_all( sched, block );
     702           0 :     FD_LOG_DEBUG(( "%s", sched->print_buf ));
     703           0 :     subtree_abandon( sched, block );
     704           0 :     sched->metrics->bytes_dropped_cnt += block->fec_buf_sz-block->fec_buf_soff;
     705           0 :     sched->metrics->block_bad_cnt++;
     706           0 :     check_or_set_active_block( sched );
     707           0 :     return 0;
     708           0 :   }
     709             : 
     710             :   /* Check if we need to set the active block. */
     711           0 :   check_or_set_active_block( sched );
     712             : 
     713           0 :   return 1;
     714           0 : }
     715             : 
     716             : ulong
     717           0 : fd_sched_task_next_ready( fd_sched_t * sched, fd_sched_task_t * out ) {
     718           0 :   FD_TEST( sched->canary==FD_SCHED_MAGIC );
     719             : 
     720           0 :   ulong exec_ready_bitset0 = sched->txn_exec_ready_bitset[ 0 ];
     721           0 :   ulong exec_fully_ready_bitset = sched->sigverify_ready_bitset[ 0 ] & exec_ready_bitset0;
     722           0 :   if( FD_UNLIKELY( !exec_fully_ready_bitset ) ) {
     723             :     /* Early exit if no exec tiles available. */
     724           0 :     return 0UL;
     725           0 :   }
     726             : 
     727           0 :   if( FD_UNLIKELY( sched->active_bank_idx==ULONG_MAX ) ) {
     728             :     /* No need to try activating a block.  If we're in this state,
     729             :        there's truly nothing to execute.  We will activate something
     730             :        when we ingest a FEC set with transactions. */
     731           0 :     return 0UL;
     732           0 :   }
     733             : 
     734           0 :   out->task_type = FD_SCHED_TT_NULL;
     735             : 
     736             :   /* We could in theory reevaluate staging lane allocation here and do
     737             :      promotion/demotion as needed.  It's a policy decision to minimize
     738             :      fork churn for now and just execute down the same active fork. */
     739             : 
     740           0 :   ulong bank_idx = sched->active_bank_idx;
     741           0 :   fd_sched_block_t * block = block_pool_ele( sched, bank_idx );
     742           0 :   if( FD_UNLIKELY( block_should_deactivate( block ) ) ) {
     743           0 :     sched->print_buf_sz = 0UL;
     744           0 :     print_all( sched, block );
     745           0 :     FD_LOG_NOTICE(( "%s", sched->print_buf ));
     746           0 :     FD_LOG_CRIT(( "invariant violation: active_bank_idx %lu is not activatable nor has anything in-flight", sched->active_bank_idx ));
     747           0 :   }
     748             : 
     749           0 :   block->txn_pool_max_popcnt   = fd_ulong_max( block->txn_pool_max_popcnt, FD_SCHED_MAX_DEPTH-sched->txn_pool_free_cnt );
     750           0 :   block->block_pool_max_popcnt = fd_ulong_max( block->block_pool_max_popcnt, sched->block_pool_popcnt );
     751             : 
     752           0 :   if( FD_UNLIKELY( !block->block_start_signaled ) ) {
     753           0 :     out->task_type = FD_SCHED_TT_BLOCK_START;
     754           0 :     out->block_start->bank_idx        = bank_idx;
     755           0 :     out->block_start->parent_bank_idx = block->parent_idx;
     756           0 :     out->block_start->slot            = block->slot;
     757           0 :     block->block_start_signaled = 1;
     758           0 :     return 1UL;
     759           0 :   }
     760             : 
     761           0 :   ulong exec_tile_idx0 = fd_ulong_if( !!exec_fully_ready_bitset, (ulong)fd_ulong_find_lsb( exec_fully_ready_bitset ), ULONG_MAX );
     762           0 :   ulong exec_queued_cnt = block->txn_parsed_cnt-block->txn_exec_in_flight_cnt-block->txn_exec_done_cnt;
     763           0 :   if( FD_LIKELY( exec_queued_cnt>0UL && fd_ulong_popcnt( exec_fully_ready_bitset ) ) ) { /* Optimize for no fork switching. */
     764             :     /* Transaction execution has the highest priority.  Current mainnet
     765             :        block times are very much dominated by critical path transaction
     766             :        execution.  To achieve the fastest block replay speed, we can't
     767             :        afford to make any mistake in critical path dispatching.  Any
     768             :        deviation from perfect critical path dispatching is basically
     769             :        irrecoverable.  As such, we try to keep all the exec tiles busy
     770             :        with transaction execution, but we allow at most one transaction
     771             :        to be in-flight per exec tile.  This is to ensure that whenever a
     772             :        critical path transaction completes, we have at least one exec
     773             :        tile, e.g. the one that just completed said transaction, readily
     774             :        available to continue executing down the critical path. */
     775           0 :     out->txn_exec->txn_idx = fd_rdisp_get_next_ready( sched->rdisp, bank_idx );
     776           0 :     if( FD_UNLIKELY( out->txn_exec->txn_idx==0UL ) ) {
     777             :       /* There are transactions queued but none ready for execution.
     778             :          This implies that there must be in-flight transactions on whose
     779             :          completion the queued transactions depend. So we return and
     780             :          wait for those in-flight transactions to retire.  This is a
     781             :          policy decision to execute as much as we can down the current
     782             :          fork. */
     783           0 :       if( FD_UNLIKELY( !block->txn_exec_in_flight_cnt ) ) {
     784           0 :         sched->print_buf_sz = 0UL;
     785           0 :         print_all( sched, block );
     786           0 :         FD_LOG_NOTICE(( "%s", sched->print_buf ));
     787           0 :         FD_LOG_CRIT(( "invariant violation: no ready transaction found but block->txn_exec_in_flight_cnt==0" ));
     788           0 :       }
     789             : 
     790             :       /* Dispatch more sigverify tasks only if at least one exec tile is
     791             :          executing transactions or completely idle.  Allow at most one
     792             :          sigverify task in-flight per tile, and only dispatch to
     793             :          completely idle tiles. */
     794           0 :       ulong sigverify_ready_bitset = exec_fully_ready_bitset;
     795           0 :       ulong sigverify_queued_cnt = block->txn_parsed_cnt-block->txn_sigverify_in_flight_cnt-block->txn_sigverify_done_cnt;
     796           0 :       if( FD_LIKELY( sigverify_queued_cnt>0UL && fd_ulong_popcnt( sigverify_ready_bitset )>fd_int_if( block->txn_exec_in_flight_cnt>0U, 0, 1 ) ) ) {
     797             :         /* Dispatch transactions for sigverify in parse order. */
     798           0 :         int exec_tile_idx_sigverify = fd_ulong_find_lsb( sigverify_ready_bitset );
     799           0 :         out->task_type = FD_SCHED_TT_TXN_SIGVERIFY;
     800           0 :         out->txn_sigverify->bank_idx = bank_idx;
     801           0 :         out->txn_sigverify->txn_idx  = block->txn_idx[ block->txn_sigverify_done_cnt+block->txn_sigverify_in_flight_cnt ];
     802           0 :         out->txn_sigverify->exec_idx = (ulong)exec_tile_idx_sigverify;
     803           0 :         sched->sigverify_ready_bitset[ 0 ] = fd_ulong_clear_bit( sched->sigverify_ready_bitset[ 0 ], exec_tile_idx_sigverify );
     804           0 :         sched->tile_to_bank_idx[ exec_tile_idx_sigverify ] = bank_idx;
     805           0 :         block->txn_sigverify_in_flight_cnt++;
     806           0 :         if( FD_UNLIKELY( (~sched->txn_exec_ready_bitset[ 0 ])&(~sched->sigverify_ready_bitset[ 0 ])&fd_ulong_mask_lsb( (int)sched->exec_cnt ) ) ) FD_LOG_CRIT(( "invariant violation: txn_exec_ready_bitset 0x%lx sigverify_ready_bitset 0x%lx", sched->txn_exec_ready_bitset[ 0 ], sched->sigverify_ready_bitset[ 0 ] ));
     807           0 :         return 1UL;
     808           0 :       }
     809           0 :       return 0UL;
     810           0 :     }
     811           0 :     out->task_type = FD_SCHED_TT_TXN_EXEC;
     812           0 :     out->txn_exec->bank_idx = bank_idx;
     813           0 :     out->txn_exec->slot     = block->slot;
     814           0 :     out->txn_exec->exec_idx = exec_tile_idx0;
     815           0 :     FD_TEST( out->txn_exec->exec_idx!=ULONG_MAX );
     816             : 
     817           0 :     long now = fd_tickcount();
     818           0 :     ulong delta = (ulong)(now-sched->txn_in_flight_last_tick);
     819           0 :     ulong txn_exec_busy_cnt = sched->exec_cnt-(ulong)fd_ulong_popcnt( exec_ready_bitset0 );
     820           0 :     sched->metrics->txn_none_in_flight_tickcount     += fd_ulong_if( txn_exec_busy_cnt==0UL && sched->txn_in_flight_last_tick!=LONG_MAX, delta, 0UL );
     821           0 :     sched->metrics->txn_weighted_in_flight_tickcount += fd_ulong_if( txn_exec_busy_cnt!=0UL, delta, 0UL );
     822           0 :     sched->metrics->txn_weighted_in_flight_cnt       += delta*txn_exec_busy_cnt;
     823           0 :     sched->txn_in_flight_last_tick = now;
     824             : 
     825           0 :     sched->txn_exec_ready_bitset[ 0 ] = fd_ulong_clear_bit( exec_ready_bitset0, (int)exec_tile_idx0);
     826           0 :     sched->tile_to_bank_idx[ exec_tile_idx0 ] = bank_idx;
     827             : 
     828           0 :     block->txn_exec_in_flight_cnt++;
     829           0 :     sched->metrics->txn_max_in_flight_cnt = fd_uint_max( sched->metrics->txn_max_in_flight_cnt, block->txn_exec_in_flight_cnt );
     830             : 
     831           0 :     ulong total_exec_busy_cnt = sched->exec_cnt-(ulong)fd_ulong_popcnt( sched->txn_exec_ready_bitset[ 0 ]&sched->sigverify_ready_bitset[ 0 ] );
     832           0 :     if( FD_UNLIKELY( (~sched->txn_exec_ready_bitset[ 0 ])&(~sched->sigverify_ready_bitset[ 0 ])&fd_ulong_mask_lsb( (int)sched->exec_cnt ) ) ) FD_LOG_CRIT(( "invariant violation: txn_exec_ready_bitset 0x%lx sigverify_ready_bitset 0x%lx", sched->txn_exec_ready_bitset[ 0 ], sched->sigverify_ready_bitset[ 0 ] ));
     833           0 :     if( FD_UNLIKELY( block->txn_exec_in_flight_cnt+block->txn_sigverify_in_flight_cnt!=total_exec_busy_cnt ) ) {
     834             :       /* Ideally we'd simply assert that the two sides of the equation
     835             :          are equal.  But abandoned blocks throw a wrench into this.  We
     836             :          allow abandoned blocks to have in-flight transactions that are
     837             :          naturally drained while we try to dispatch from another block.
     838             :          In such cases, the total number of in-flight transactions
     839             :          should include the abandoned blocks too.  The contract is that
     840             :          blocks with in-flight transactions cannot be abandoned or
     841             :          demoted from rdisp.  So a dying block has to be the head of one
     842             :          of the staging lanes. */
     843           0 :       ulong total_in_flight = 0UL;
     844           0 :       for( int l=0; l<(int)FD_SCHED_MAX_STAGING_LANES; l++ ) {
     845           0 :         if( fd_ulong_extract_bit( sched->staged_bitset, l ) ) {
     846           0 :           fd_sched_block_t * staged_block = block_pool_ele( sched, sched->staged_head_bank_idx[ l ] );
     847           0 :           if( FD_UNLIKELY( block_is_in_flight( staged_block )&&!(staged_block==block||staged_block->dying) ) ) {
     848           0 :             sched->print_buf_sz = 0UL;
     849           0 :             print_all( sched, staged_block );
     850           0 :             FD_LOG_NOTICE(( "%s", sched->print_buf ));
     851           0 :             FD_LOG_CRIT(( "invariant violation: in-flight block is neither active nor dying" ));
     852           0 :           }
     853           0 :           total_in_flight += staged_block->txn_exec_in_flight_cnt;
     854           0 :           total_in_flight += staged_block->txn_sigverify_in_flight_cnt;
     855           0 :         }
     856           0 :       }
     857           0 :       if( FD_UNLIKELY( total_in_flight!=total_exec_busy_cnt ) ) {
     858           0 :         sched->print_buf_sz = 0UL;
     859           0 :         print_all( sched, block );
     860           0 :         FD_LOG_NOTICE(( "%s", sched->print_buf ));
     861           0 :         FD_LOG_CRIT(( "invariant violation: total_in_flight %lu != total_exec_busy_cnt %lu", total_in_flight, total_exec_busy_cnt ));
     862           0 :       }
     863           0 :       FD_LOG_DEBUG(( "exec_busy_cnt %lu checks out", total_exec_busy_cnt ));
     864           0 :     }
     865           0 :     return 1UL;
     866           0 :   }
     867             : 
     868             :   /* At this point txn_queued_cnt==0 */
     869             : 
     870             :   /* Try to dispatch a sigverify task, but leave one exec tile idle for
     871             :      critical path execution, unless there's not going to be any more
     872             :      real transactions for the critical path.  In the degenerate case of
     873             :      only one exec tile, keep it busy. */
     874           0 :   ulong sigverify_ready_bitset = exec_fully_ready_bitset;
     875           0 :   ulong sigverify_queued_cnt = block->txn_parsed_cnt-block->txn_sigverify_in_flight_cnt-block->txn_sigverify_done_cnt;
     876           0 :   if( FD_LIKELY( sigverify_queued_cnt>0UL && fd_ulong_popcnt( sigverify_ready_bitset )>fd_int_if( block->fec_eos||block->txn_exec_in_flight_cnt>0U||sched->exec_cnt==1UL, 0, 1 ) ) ) {
     877             :     /* Dispatch transactions for sigverify in parse order. */
     878           0 :     int exec_tile_idx_sigverify = fd_ulong_find_lsb( sigverify_ready_bitset );
     879           0 :     out->task_type = FD_SCHED_TT_TXN_SIGVERIFY;
     880           0 :     out->txn_sigverify->txn_idx  = block->txn_idx[ block->txn_sigverify_done_cnt+block->txn_sigverify_in_flight_cnt ];
     881           0 :     out->txn_sigverify->bank_idx = bank_idx;
     882           0 :     out->txn_sigverify->exec_idx = (ulong)exec_tile_idx_sigverify;
     883           0 :     sched->sigverify_ready_bitset[ 0 ] = fd_ulong_clear_bit( sched->sigverify_ready_bitset[ 0 ], exec_tile_idx_sigverify );
     884           0 :     sched->tile_to_bank_idx[ exec_tile_idx_sigverify ] = bank_idx;
     885           0 :     block->txn_sigverify_in_flight_cnt++;
     886           0 :     if( FD_UNLIKELY( (~sched->txn_exec_ready_bitset[ 0 ])&(~sched->sigverify_ready_bitset[ 0 ])&fd_ulong_mask_lsb( (int)sched->exec_cnt ) ) ) FD_LOG_CRIT(( "invariant violation: txn_exec_ready_bitset 0x%lx sigverify_ready_bitset 0x%lx", sched->txn_exec_ready_bitset[ 0 ], sched->sigverify_ready_bitset[ 0 ] ));
     887           0 :     return 1UL;
     888           0 :   }
     889             : 
     890           0 :   if( FD_UNLIKELY( block_should_signal_end( block ) ) ) {
     891           0 :     FD_TEST( block->block_start_signaled );
     892           0 :     out->task_type = FD_SCHED_TT_BLOCK_END;
     893           0 :     out->block_end->bank_idx = bank_idx;
     894           0 :     block->block_end_signaled = 1;
     895           0 :     return 1UL;
     896           0 :   }
     897             : 
     898             :   /* Nothing queued for the active block.  If we haven't received all
     899             :      the FEC sets for it, then return and wait for more FEC sets, while
     900             :      there are in-flight transactions.  This is a policy decision to
     901             :      minimize fork churn and allow for executing down the current fork
     902             :      as much as we can.  If we have received all the FEC sets for it,
     903             :      then we'd still like to return and wait for the in-flight
     904             :      transactions to retire, before switching to a different block.
     905             : 
     906             :      Either way, there should be in-flight transactions.  We deactivate
     907             :      the active block the moment we exhausted transactions from it. */
     908           0 :   if( FD_UNLIKELY( !block_is_in_flight( block ) ) ) {
     909           0 :     sched->print_buf_sz = 0UL;
     910           0 :     print_all( sched, block );
     911           0 :     FD_LOG_NOTICE(( "%s", sched->print_buf ));
     912           0 :     FD_LOG_CRIT(( "invariant violation: expected in-flight transactions but none" ));
     913           0 :   }
     914             : 
     915           0 :   return 0UL;
     916           0 : }
     917             : 
     918             : void
     919           0 : fd_sched_task_done( fd_sched_t * sched, ulong task_type, ulong txn_idx, ulong exec_idx ) {
     920           0 :   FD_TEST( sched->canary==FD_SCHED_MAGIC );
     921             : 
     922           0 :   ulong bank_idx = ULONG_MAX;
     923           0 :   switch( task_type ) {
     924           0 :     case FD_SCHED_TT_BLOCK_START:
     925           0 :     case FD_SCHED_TT_BLOCK_END: {
     926           0 :       (void)txn_idx;
     927           0 :       bank_idx = sched->active_bank_idx;
     928           0 :       break;
     929           0 :     }
     930           0 :     case FD_SCHED_TT_TXN_EXEC:
     931           0 :     case FD_SCHED_TT_TXN_SIGVERIFY: {
     932           0 :       FD_TEST( txn_idx<FD_SCHED_MAX_DEPTH );
     933           0 :       bank_idx = sched->tile_to_bank_idx[ exec_idx ];
     934           0 :       break;
     935           0 :     }
     936           0 :     default: FD_LOG_CRIT(( "unsupported task_type %lu", task_type ));
     937           0 :   }
     938           0 :   fd_sched_block_t * block = block_pool_ele( sched, bank_idx );
     939             : 
     940           0 :   if( FD_UNLIKELY( !block->staged ) ) {
     941             :     /* Invariant: only staged blocks can have in-flight transactions. */
     942           0 :     FD_LOG_CRIT(( "invariant violation: block->staged==0, slot %lu, parent slot %lu",
     943           0 :                   block->slot, block->parent_slot ));
     944           0 :   }
     945           0 :   if( FD_UNLIKELY( !block->in_rdisp ) ) {
     946             :     /* Invariant: staged blocks must be in the dispatcher. */
     947           0 :     FD_LOG_CRIT(( "invariant violation: block->in_rdisp==0, slot %lu, parent slot %lu",
     948           0 :                   block->slot, block->parent_slot ));
     949           0 :   }
     950             : 
     951           0 :   block->txn_pool_max_popcnt   = fd_ulong_max( block->txn_pool_max_popcnt, FD_SCHED_MAX_DEPTH-sched->txn_pool_free_cnt );
     952           0 :   block->block_pool_max_popcnt = fd_ulong_max( block->block_pool_max_popcnt, sched->block_pool_popcnt );
     953             : 
     954           0 :   int exec_tile_idx = (int)exec_idx;
     955             : 
     956           0 :   switch( task_type ) {
     957           0 :     case FD_SCHED_TT_BLOCK_START: {
     958           0 :       FD_TEST( !block->block_start_done );
     959           0 :       block->block_start_done = 1;
     960           0 :       break;
     961           0 :     }
     962           0 :     case FD_SCHED_TT_BLOCK_END: {
     963             :       /* It may seem redundant to be invoking task_done() on these
     964             :          somewhat fake tasks.  But these are necessary to drive state
     965             :          transition for empty blocks or slow blocks. */
     966           0 :       FD_TEST( !block->block_end_done );
     967           0 :       block->block_end_done = 1;
     968           0 :       break;
     969           0 :     }
     970           0 :     case FD_SCHED_TT_TXN_EXEC: {
     971           0 :       long now = fd_tickcount();
     972           0 :       ulong delta = (ulong)(now-sched->txn_in_flight_last_tick);
     973           0 :       ulong txn_exec_busy_cnt = sched->exec_cnt-(ulong)fd_ulong_popcnt( sched->txn_exec_ready_bitset[ 0 ] );
     974           0 :       sched->metrics->txn_weighted_in_flight_tickcount += delta;
     975           0 :       sched->metrics->txn_weighted_in_flight_cnt       += delta*txn_exec_busy_cnt;
     976           0 :       sched->txn_in_flight_last_tick = now;
     977             : 
     978           0 :       block->txn_exec_done_cnt++;
     979           0 :       block->txn_exec_in_flight_cnt--;
     980           0 :       sched->metrics->txn_exec_done_cnt++;
     981           0 :       txn_bitset_insert( sched->exec_done_set, txn_idx );
     982           0 :       if( txn_bitset_test( sched->exec_done_set, txn_idx ) && txn_bitset_test( sched->sigverify_done_set, txn_idx ) ) {
     983             :         /* Release txn_idx if both exec and sigverify are done.  This is
     984             :            guaranteed to only happen once per transaction because
     985             :            whichever one completed first would not release. */
     986           0 :         fd_rdisp_complete_txn( sched->rdisp, txn_idx, 1 );
     987           0 :         sched->txn_pool_free_cnt++;
     988           0 :         block->txn_done_cnt++;
     989           0 :         sched->metrics->txn_done_cnt++;
     990           0 :       } else {
     991           0 :         fd_rdisp_complete_txn( sched->rdisp, txn_idx, 0 );
     992           0 :       }
     993             : 
     994           0 :       FD_TEST( !fd_ulong_extract_bit( sched->txn_exec_ready_bitset[ 0 ], exec_tile_idx ) );
     995           0 :       sched->txn_exec_ready_bitset[ 0 ] = fd_ulong_set_bit( sched->txn_exec_ready_bitset[ 0 ], exec_tile_idx );
     996           0 :       break;
     997           0 :     }
     998           0 :     case FD_SCHED_TT_TXN_SIGVERIFY: {
     999           0 :       block->txn_sigverify_done_cnt++;
    1000           0 :       block->txn_sigverify_in_flight_cnt--;
    1001           0 :       sched->metrics->txn_sigverify_done_cnt++;
    1002           0 :       txn_bitset_insert( sched->sigverify_done_set, txn_idx );
    1003           0 :       if( txn_bitset_test( sched->exec_done_set, txn_idx ) && txn_bitset_test( sched->sigverify_done_set, txn_idx ) ) {
    1004             :         /* Release txn_idx if both exec and sigverify are done.  This is
    1005             :            guaranteed to only happen once per transaction because
    1006             :            whichever one completed first would not release. */
    1007           0 :         fd_rdisp_complete_txn( sched->rdisp, txn_idx, 1 );
    1008           0 :         sched->txn_pool_free_cnt++;
    1009           0 :         block->txn_done_cnt++;
    1010           0 :         sched->metrics->txn_done_cnt++;
    1011           0 :       }
    1012             : 
    1013           0 :       FD_TEST( !fd_ulong_extract_bit( sched->sigverify_ready_bitset[ 0 ], exec_tile_idx ) );
    1014           0 :       sched->sigverify_ready_bitset[ 0 ] = fd_ulong_set_bit( sched->sigverify_ready_bitset[ 0 ], exec_tile_idx );
    1015           0 :       break;
    1016           0 :     }
    1017           0 :   }
    1018             : 
    1019           0 :   if( FD_UNLIKELY( block->dying && !block_is_in_flight( block ) ) ) {
    1020           0 :     if( FD_UNLIKELY( sched->active_bank_idx==bank_idx ) ) {
    1021           0 :       FD_LOG_CRIT(( "invariant violation: active block shouldn't be dying, bank_idx %lu, slot %lu, parent slot %lu",
    1022           0 :                     bank_idx, block->slot, block->parent_slot ));
    1023           0 :     }
    1024           0 :     FD_LOG_DEBUG(( "dying block %lu drained", block->slot ));
    1025           0 :     subtree_abandon( sched, block );
    1026           0 :     return;
    1027           0 :   }
    1028             : 
    1029           0 :   if( FD_UNLIKELY( !block->dying && sched->active_bank_idx!=bank_idx ) ) {
    1030             :     /* Block is not dead.  So we should be actively replaying it. */
    1031           0 :     fd_sched_block_t * active_block = block_pool_ele( sched, sched->active_bank_idx );
    1032           0 :     FD_LOG_CRIT(( "invariant violation: sched->active_bank_idx %lu, slot %lu, parent slot %lu, bank_idx %lu, slot %lu, parent slot %lu",
    1033           0 :                   sched->active_bank_idx, active_block->slot, active_block->parent_slot,
    1034           0 :                   bank_idx, block->slot, block->parent_slot ));
    1035           0 :   }
    1036             : 
    1037           0 :   maybe_switch_block( sched, bank_idx );
    1038           0 : }
    1039             : 
    1040             : void
    1041           0 : fd_sched_block_abandon( fd_sched_t * sched, ulong bank_idx ) {
    1042           0 :   FD_TEST( sched->canary==FD_SCHED_MAGIC );
    1043           0 :   FD_TEST( bank_idx<sched->block_cnt_max );
    1044             : 
    1045           0 :   fd_sched_block_t * block = block_pool_ele( sched, bank_idx );
    1046           0 :   if( FD_UNLIKELY( bank_idx!=sched->active_bank_idx ) ) {
    1047             :     /* Invariant: abandoning should only be performed on actively
    1048             :        replayed blocks.  We impose this requirement on the caller
    1049             :        because the dispatcher expects blocks to be abandoned in the same
    1050             :        order that they were added, and having this requirement makes it
    1051             :        easier to please the dispatcher. */
    1052           0 :     sched->print_buf_sz = 0UL;
    1053           0 :     print_all( sched, block );
    1054           0 :     FD_LOG_NOTICE(( "%s", sched->print_buf ));
    1055           0 :     FD_LOG_CRIT(( "invariant violation: active_bank_idx %lu, bank_idx %lu, slot %lu, parent slot %lu",
    1056           0 :                   sched->active_bank_idx, bank_idx, block->slot, block->parent_slot ));
    1057           0 :   }
    1058             : 
    1059           0 :   subtree_abandon( sched, block );
    1060             : 
    1061             :   /* Reset the active block. */
    1062           0 :   FD_LOG_DEBUG(( "reset active_bank_idx %lu", sched->active_bank_idx ));
    1063           0 :   sched->active_bank_idx = ULONG_MAX;
    1064           0 :   sched->metrics->deactivate_abandoned_cnt++;
    1065           0 :   FD_LOG_INFO(( "block %lu abandoned", block->slot ));
    1066           0 :   sched->print_buf_sz = 0UL;
    1067           0 :   print_all( sched, block );
    1068           0 :   FD_LOG_DEBUG(( "%s", sched->print_buf ));
    1069           0 :   try_activate_block( sched );
    1070           0 : }
    1071             : 
    1072             : void
    1073           0 : fd_sched_block_add_done( fd_sched_t * sched, ulong bank_idx, ulong parent_bank_idx, ulong slot ) {
    1074           0 :   FD_TEST( sched->canary==FD_SCHED_MAGIC );
    1075           0 :   FD_TEST( bank_idx<sched->block_cnt_max );
    1076             : 
    1077           0 :   fd_sched_block_t * block = block_pool_ele( sched, bank_idx );
    1078           0 :   add_block( sched, bank_idx, parent_bank_idx );
    1079           0 :   block->slot                   = slot;
    1080           0 :   block->txn_parsed_cnt         = UINT_MAX;
    1081           0 :   block->txn_exec_done_cnt      = UINT_MAX;
    1082           0 :   block->txn_sigverify_done_cnt = UINT_MAX;
    1083           0 :   block->txn_done_cnt           = UINT_MAX;
    1084           0 :   block->fec_eos                = 1;
    1085           0 :   block->block_start_signaled   = 1;
    1086           0 :   block->block_end_signaled     = 1;
    1087           0 :   block->block_start_done       = 1;
    1088           0 :   block->block_end_done         = 1;
    1089           0 :   if( FD_LIKELY( parent_bank_idx!=ULONG_MAX ) ) {
    1090           0 :     fd_sched_block_t * parent_block = block_pool_ele( sched, parent_bank_idx );
    1091           0 :     block->parent_slot = parent_block->slot;
    1092           0 :   }
    1093           0 :   if( FD_UNLIKELY( parent_bank_idx==ULONG_MAX ) ) {
    1094             :     /* Assumes that a NULL parent implies the snapshot slot. */
    1095           0 :     block->parent_slot = ULONG_MAX;
    1096           0 :     block->rooted      = 1;
    1097           0 :     sched->root_idx    = bank_idx;
    1098           0 :   }
    1099           0 : }
    1100             : 
    1101             : void
    1102           0 : fd_sched_advance_root( fd_sched_t * sched, ulong root_idx ) {
    1103           0 :   FD_TEST( sched->canary==FD_SCHED_MAGIC );
    1104           0 :   FD_TEST( root_idx<sched->block_cnt_max );
    1105           0 :   FD_TEST( sched->root_idx<sched->block_cnt_max );
    1106             : 
    1107           0 :   fd_sched_block_t * new_root = block_pool_ele( sched, root_idx );
    1108           0 :   fd_sched_block_t * old_root = block_pool_ele( sched, sched->root_idx );
    1109           0 :   if( FD_UNLIKELY( !old_root->rooted ) ) {
    1110           0 :     FD_LOG_CRIT(( "invariant violation: old_root is not rooted, slot %lu, parent slot %lu",
    1111           0 :                   old_root->slot, old_root->parent_slot ));
    1112           0 :   }
    1113             : 
    1114             :   /* Early exit if the new root is the same as the old root. */
    1115           0 :   if( FD_UNLIKELY( root_idx==sched->root_idx ) ) {
    1116           0 :     FD_LOG_INFO(( "new root is the same as the old root, slot %lu, parent slot %lu",
    1117           0 :                   new_root->slot, new_root->parent_slot ));
    1118           0 :     return;
    1119           0 :   }
    1120             : 
    1121           0 :   fd_sched_block_t * head = old_root;
    1122           0 :   head->parent_idx        = ULONG_MAX;
    1123           0 :   fd_sched_block_t * tail = head;
    1124             : 
    1125           0 :   while( head ) {
    1126           0 :     FD_TEST( head->in_sched );
    1127           0 :     head->in_sched = 0;
    1128             : 
    1129           0 :     sched->print_buf_sz = 0UL;
    1130           0 :     print_block_metrics( sched, head );
    1131           0 :     FD_LOG_DEBUG(( "%s", sched->print_buf ));
    1132             : 
    1133           0 :     ulong child_idx = head->child_idx;
    1134           0 :     while( child_idx!=ULONG_MAX ) {
    1135           0 :       fd_sched_block_t * child = block_pool_ele( sched, child_idx );
    1136             :       /* Add children to be visited.  We abuse the parent_idx field to
    1137             :          link up the next block to visit. */
    1138           0 :       if( child!=new_root ) {
    1139           0 :         tail->parent_idx = child_idx;
    1140           0 :         tail             = child;
    1141           0 :         tail->parent_idx = ULONG_MAX;
    1142           0 :       }
    1143           0 :       child_idx = child->sibling_idx;
    1144           0 :     }
    1145             : 
    1146             :     /* Prune the current block.  We will never publish halfway into a
    1147             :        staging lane, because anything on the rooted fork should have
    1148             :        finished replaying gracefully and be out of the dispatcher.  In
    1149             :        fact, anything that we are publishing away should be out of the
    1150             :        dispatcher at this point.  And there should be no more in-flight
    1151             :        transactions. */
    1152           0 :     if( FD_UNLIKELY( block_is_in_flight( head ) ) ) {
    1153           0 :       FD_LOG_CRIT(( "invariant violation: block has transactions in flight (%u exec %u sigverify), slot %lu, parent slot %lu",
    1154           0 :                     head->txn_exec_in_flight_cnt, head->txn_sigverify_in_flight_cnt, head->slot, head->parent_slot ));
    1155           0 :     }
    1156           0 :     if( FD_UNLIKELY( head->in_rdisp ) ) {
    1157             :       /* We should have removed it from the dispatcher when we were
    1158             :          notified of the new root, or when in-flight transactions were
    1159             :          drained. */
    1160           0 :       FD_LOG_CRIT(( "invariant violation: block is in the dispatcher, slot %lu, parent slot %lu",
    1161           0 :                     head->slot, head->parent_slot ));
    1162           0 :     }
    1163           0 :     sched->block_pool_popcnt--;
    1164           0 :     fd_sched_block_t * next = block_pool_ele( sched, head->parent_idx );
    1165           0 :     head = next;
    1166           0 :   }
    1167             : 
    1168           0 :   new_root->parent_idx = ULONG_MAX;
    1169           0 :   sched->root_idx = root_idx;
    1170           0 : }
    1171             : 
    1172             : void
    1173           0 : fd_sched_root_notify( fd_sched_t * sched, ulong root_idx ) {
    1174           0 :   FD_TEST( sched->canary==FD_SCHED_MAGIC );
    1175           0 :   FD_TEST( root_idx<sched->block_cnt_max );
    1176           0 :   FD_TEST( sched->root_idx<sched->block_cnt_max );
    1177             : 
    1178           0 :   fd_sched_block_t * block    = block_pool_ele( sched, root_idx );
    1179           0 :   fd_sched_block_t * old_root = block_pool_ele( sched, sched->root_idx );
    1180           0 :   if( FD_UNLIKELY( !old_root->rooted ) ) {
    1181           0 :     FD_LOG_CRIT(( "invariant violation: old_root is not rooted, slot %lu, parent slot %lu",
    1182           0 :                   old_root->slot, old_root->parent_slot ));
    1183           0 :   }
    1184             : 
    1185             :   /* Early exit if the new root is the same as the old root. */
    1186           0 :   if( FD_UNLIKELY( root_idx==sched->root_idx ) ) {
    1187           0 :     FD_LOG_INFO(( "new root is the same as the old root, slot %lu, parent slot %lu",
    1188           0 :                   block->slot, block->parent_slot ));
    1189           0 :     return;
    1190           0 :   }
    1191             : 
    1192             :   /* Mark every node from the new root up through its parents to the
    1193             :      old root as being rooted. */
    1194           0 :   fd_sched_block_t * curr = block;
    1195           0 :   fd_sched_block_t * prev = NULL;
    1196           0 :   while( curr ) {
    1197           0 :     if( FD_UNLIKELY( !block_is_done( curr ) ) ) {
    1198           0 :       FD_LOG_CRIT(( "invariant violation: rooting a block that is not done, slot %lu, parent slot %lu",
    1199           0 :                     curr->slot, curr->parent_slot ));
    1200           0 :     }
    1201           0 :     if( FD_UNLIKELY( curr->dying ) ) {
    1202           0 :       FD_LOG_CRIT(( "invariant violation: rooting a block that is dying, slot %lu, parent slot %lu",
    1203           0 :                     curr->slot, curr->parent_slot ));
    1204           0 :     }
    1205           0 :     if( FD_UNLIKELY( curr->staged ) ) {
    1206           0 :       FD_LOG_CRIT(( "invariant violation: rooting a block that is staged, slot %lu, parent slot %lu",
    1207           0 :                     curr->slot, curr->parent_slot ));
    1208           0 :     }
    1209           0 :     if( FD_UNLIKELY( curr->in_rdisp ) ) {
    1210           0 :       FD_LOG_CRIT(( "invariant violation: rooting a block that is in the dispatcher, slot %lu, parent slot %lu",
    1211           0 :                     curr->slot, curr->parent_slot ));
    1212           0 :     }
    1213           0 :     curr->rooted = 1;
    1214           0 :     prev = curr;
    1215           0 :     curr = block_pool_ele( sched, curr->parent_idx );
    1216           0 :   }
    1217             : 
    1218             :   /* If we didn't reach the old root, the new root is not a descendant. */
    1219           0 :   if( FD_UNLIKELY( prev!=old_root ) ) {
    1220           0 :     FD_LOG_CRIT(( "invariant violation: new root is not a descendant of old root, new root slot %lu, parent slot %lu, old root slot %lu, parent slot %lu",
    1221           0 :                   block->slot, block->parent_slot, old_root->slot, old_root->parent_slot ));
    1222           0 :   }
    1223             : 
    1224           0 :   ulong old_active_bank_idx = sched->active_bank_idx;
    1225             : 
    1226             :   /* Now traverse from old root towards new root, and abandon all
    1227             :      minority forks. */
    1228           0 :   curr = old_root;
    1229           0 :   while( curr && curr->rooted && curr!=block ) { /* curr!=block to avoid abandoning good forks. */
    1230           0 :     fd_sched_block_t * rooted_child_block = NULL;
    1231           0 :     ulong              child_idx          = curr->child_idx;
    1232           0 :     while( child_idx!=ULONG_MAX ) {
    1233           0 :       fd_sched_block_t * child = block_pool_ele( sched, child_idx );
    1234           0 :       if( child->rooted ) {
    1235           0 :         rooted_child_block = child;
    1236           0 :       } else {
    1237             :         /* This is a minority fork. */
    1238           0 :         FD_LOG_DEBUG(( "abandoning minority fork on block %lu", child->slot ));
    1239           0 :         subtree_abandon( sched, child );
    1240           0 :       }
    1241           0 :       child_idx = child->sibling_idx;
    1242           0 :     }
    1243           0 :     curr = rooted_child_block;
    1244           0 :   }
    1245             : 
    1246             :   /* If the active block got abandoned, we need to reset it. */
    1247           0 :   if( sched->active_bank_idx==ULONG_MAX ) {
    1248           0 :     sched->metrics->deactivate_pruned_cnt += fd_uint_if( old_active_bank_idx!=ULONG_MAX, 1U, 0U );
    1249           0 :     try_activate_block( sched );
    1250           0 :   }
    1251           0 : }
    1252             : 
    1253             : fd_txn_p_t *
    1254           0 : fd_sched_get_txn( fd_sched_t * sched, ulong txn_idx ) {
    1255           0 :   FD_TEST( sched->canary==FD_SCHED_MAGIC );
    1256           0 :   if( FD_UNLIKELY( txn_idx>=FD_SCHED_MAX_DEPTH ) ) {
    1257           0 :     return NULL;
    1258           0 :   }
    1259           0 :   return sched->txn_pool+txn_idx;
    1260           0 : }
    1261             : 
    1262             : fd_hash_t *
    1263           0 : fd_sched_get_poh( fd_sched_t * sched, ulong bank_idx ) {
    1264           0 :   FD_TEST( sched->canary==FD_SCHED_MAGIC );
    1265           0 :   FD_TEST( bank_idx<sched->block_cnt_max );
    1266           0 :   fd_sched_block_t * block = block_pool_ele( sched, bank_idx );
    1267           0 :   return &block->poh;
    1268           0 : }
    1269             : 
    1270             : uint
    1271           0 : fd_sched_get_shred_cnt( fd_sched_t * sched, ulong bank_idx ) {
    1272           0 :   FD_TEST( sched->canary==FD_SCHED_MAGIC );
    1273           0 :   FD_TEST( bank_idx<sched->block_cnt_max );
    1274           0 :   fd_sched_block_t * block = block_pool_ele( sched, bank_idx );
    1275           0 :   return block->shred_cnt;
    1276           0 : }
    1277             : 
    1278             : char *
    1279           0 : fd_sched_get_state_cstr( fd_sched_t * sched ) {
    1280           0 :   sched->print_buf_sz = 0UL;
    1281           0 :   print_metrics( sched );
    1282           0 :   print_sched( sched );
    1283           0 :   return sched->print_buf;
    1284           0 : }
    1285             : 
    1286           0 : void * fd_sched_leave ( fd_sched_t * sched ) { return sched; }
    1287           0 : void * fd_sched_delete( void * mem         ) { return   mem; }
    1288             : 
    1289             : 
    1290             : /* Internal helpers. */
    1291             : 
    1292             : static void
    1293             : add_block( fd_sched_t * sched,
    1294             :            ulong        bank_idx,
    1295           0 :            ulong        parent_bank_idx ) {
    1296           0 :   fd_sched_block_t * block = block_pool_ele( sched, bank_idx );
    1297           0 :   FD_TEST( !block->in_sched );
    1298           0 :   sched->block_pool_popcnt++;
    1299             : 
    1300           0 :   block->txn_parsed_cnt              = 0U;
    1301           0 :   block->txn_exec_in_flight_cnt      = 0U;
    1302           0 :   block->txn_exec_done_cnt           = 0U;
    1303           0 :   block->txn_sigverify_in_flight_cnt = 0U;
    1304           0 :   block->txn_sigverify_done_cnt      = 0U;
    1305           0 :   block->txn_done_cnt                = 0U;
    1306           0 :   block->txn_pool_max_popcnt         = FD_SCHED_MAX_DEPTH-sched->txn_pool_free_cnt;
    1307           0 :   block->block_pool_max_popcnt       = sched->block_pool_popcnt;
    1308           0 :   block->shred_cnt                   = 0U;
    1309           0 :   block->fec_cnt                     = 0U;
    1310             : 
    1311           0 :   block->mblks_rem    = 0UL;
    1312           0 :   block->txns_rem     = 0UL;
    1313           0 :   block->fec_buf_sz   = 0U;
    1314           0 :   block->fec_buf_boff = 0U;
    1315           0 :   block->fec_buf_soff = 0U;
    1316           0 :   block->fec_eob      = 0;
    1317           0 :   block->fec_sob      = 1;
    1318             : 
    1319           0 :   block->fec_eos              = 0;
    1320           0 :   block->rooted               = 0;
    1321           0 :   block->dying                = 0;
    1322           0 :   block->in_sched             = 1;
    1323           0 :   block->in_rdisp             = 0;
    1324           0 :   block->block_start_signaled = 0;
    1325           0 :   block->block_end_signaled   = 0;
    1326           0 :   block->block_start_done     = 0;
    1327           0 :   block->block_end_done       = 0;
    1328           0 :   block->staged               = 0;
    1329             : 
    1330           0 :   block->luf_depth = 0UL;
    1331             : 
    1332             :   /* New leaf node, no child, no sibling. */
    1333           0 :   block->child_idx   = ULONG_MAX;
    1334           0 :   block->sibling_idx = ULONG_MAX;
    1335           0 :   block->parent_idx  = ULONG_MAX;
    1336             : 
    1337           0 :   if( FD_UNLIKELY( parent_bank_idx==ULONG_MAX ) ) {
    1338           0 :     return;
    1339           0 :   }
    1340             : 
    1341             :   /* node->parent link */
    1342           0 :   fd_sched_block_t * parent_block = block_pool_ele( sched, parent_bank_idx );
    1343           0 :   block->parent_idx = parent_bank_idx;
    1344             : 
    1345             :   /* parent->node and sibling->node links */
    1346           0 :   ulong child_idx = bank_idx;
    1347           0 :   if( FD_LIKELY( parent_block->child_idx==ULONG_MAX ) ) { /* Optimize for no forking. */
    1348           0 :     parent_block->child_idx = child_idx;
    1349           0 :   } else {
    1350           0 :     fd_sched_block_t * curr_block = block_pool_ele( sched, parent_block->child_idx );
    1351           0 :     while( curr_block->sibling_idx!=ULONG_MAX ) {
    1352           0 :       curr_block = block_pool_ele( sched, curr_block->sibling_idx );
    1353           0 :     }
    1354           0 :     curr_block->sibling_idx = child_idx;
    1355           0 :   }
    1356             : 
    1357           0 :   if( FD_UNLIKELY( parent_block->dying ) ) {
    1358           0 :     block->dying = 1;
    1359           0 :   }
    1360           0 : }
    1361             : 
    1362           0 : #define CHECK( cond )  do {             \
    1363           0 :   if( FD_UNLIKELY( !(cond) ) ) {        \
    1364           0 :     return FD_SCHED_PARSER_AGAIN_LATER; \
    1365           0 :   }                                     \
    1366           0 : } while( 0 )
    1367             : 
    1368             : /* CHECK that it is safe to read at least n more bytes. */
    1369           0 : #define CHECK_LEFT( n ) CHECK( (n)<=(block->fec_buf_sz-block->fec_buf_soff) )
    1370             : 
    1371             : /* Consume as much as possible from the buffer.  By the end of this
    1372             :    function, we will either have residual data that is unparseable only
    1373             :    because it is a batch that straddles FEC set boundaries, or we will
    1374             :    have reached the end of a batch.  In the former case, any remaining
    1375             :    bytes should be concatenated with the next FEC set for further
    1376             :    parsing.  In the latter case, any remaining bytes should be thrown
    1377             :    away. */
    1378             : FD_WARN_UNUSED static int
    1379           0 : fd_sched_parse( fd_sched_t * sched, fd_sched_block_t * block, fd_sched_alut_ctx_t * alut_ctx ) {
    1380           0 :   while( 1 ) {
    1381           0 :     while( block->txns_rem>0UL ) {
    1382           0 :       int err;
    1383           0 :       if( FD_UNLIKELY( (err=fd_sched_parse_txn( sched, block, alut_ctx ))!=FD_SCHED_PARSER_OK ) ) {
    1384           0 :         return err;
    1385           0 :       }
    1386           0 :     }
    1387           0 :     if( block->txns_rem==0UL && block->mblks_rem>0UL ) {
    1388           0 :       CHECK_LEFT( sizeof(fd_microblock_hdr_t) );
    1389           0 :       fd_microblock_hdr_t * hdr = (fd_microblock_hdr_t *)fd_type_pun( block->fec_buf+block->fec_buf_soff );
    1390           0 :       block->fec_buf_soff      += (uint)sizeof(fd_microblock_hdr_t);
    1391             : 
    1392           0 :       memcpy( block->poh.hash, hdr->hash, sizeof(block->poh.hash) );
    1393           0 :       block->txns_rem = hdr->txn_cnt;
    1394           0 :       block->mblks_rem--;
    1395           0 :       continue;
    1396           0 :     }
    1397           0 :     if( block->txns_rem==0UL && block->mblks_rem==0UL && block->fec_sob ) {
    1398           0 :       CHECK_LEFT( sizeof(ulong) );
    1399           0 :       FD_TEST( block->fec_buf_soff==0U );
    1400           0 :       block->mblks_rem     = FD_LOAD( ulong, block->fec_buf );
    1401           0 :       block->fec_buf_soff += (uint)sizeof(ulong);
    1402             :       /* FIXME what happens if someone sends us mblks_rem==0UL here? */
    1403             : 
    1404           0 :       block->fec_sob = 0;
    1405           0 :       continue;
    1406           0 :     }
    1407           0 :     if( block->txns_rem==0UL && block->mblks_rem==0UL ) {
    1408           0 :       break;
    1409           0 :     }
    1410           0 :   }
    1411           0 :   if( block->fec_eob ) {
    1412             :     /* Ignore trailing bytes at the end of a batch. */
    1413           0 :     sched->metrics->bytes_ingested_unparsed_cnt += block->fec_buf_sz-block->fec_buf_soff;
    1414           0 :     block->fec_buf_boff += block->fec_buf_sz;
    1415           0 :     block->fec_buf_soff = 0U;
    1416           0 :     block->fec_buf_sz   = 0U;
    1417           0 :     block->fec_sob      = 1;
    1418           0 :     block->fec_eob      = 0;
    1419           0 :   }
    1420           0 :   return FD_SCHED_PARSER_OK;
    1421           0 : }
    1422             : 
    1423             : FD_WARN_UNUSED static int
    1424           0 : fd_sched_parse_txn( fd_sched_t * sched, fd_sched_block_t * block, fd_sched_alut_ctx_t * alut_ctx ) {
    1425           0 :   fd_txn_t * txn = fd_type_pun( block->txn );
    1426             : 
    1427             :   /* FIXME: For the replay pipeline, we allow up to 128 instructions per
    1428             :      transaction.  Note that we are not concomitantly bumping the size
    1429             :      of fd_txn_t.  We allow this because transactions like that do get
    1430             :      packed by other validators, so we have to replay them.  Those
    1431             :      transactions will eventually fail in the runtime, which imposes a
    1432             :      limit of 64 instructions, but unfortunately they are not tossed out
    1433             :      at parse time and they land on chain.  static_instruction_limit is
    1434             :      going to enforece this limit at parse time, and transactions like
    1435             :      that would not land on chain.  Then this short term change should
    1436             :      be rolled back. */
    1437           0 :   ulong pay_sz = 0UL;
    1438           0 :   ulong txn_sz = fd_txn_parse_core( block->fec_buf+block->fec_buf_soff,
    1439           0 :                                     fd_ulong_min( FD_TXN_MTU, block->fec_buf_sz-block->fec_buf_soff ),
    1440           0 :                                     txn,
    1441           0 :                                     NULL,
    1442           0 :                                     &pay_sz,
    1443           0 :                                     FD_TXN_INSTR_MAX*2UL );
    1444             : 
    1445           0 :   if( FD_UNLIKELY( !pay_sz || !txn_sz ) ) {
    1446             :     /* Can't parse out a full transaction. */
    1447           0 :     return FD_SCHED_PARSER_AGAIN_LATER;
    1448           0 :   }
    1449             : 
    1450           0 :   if( FD_UNLIKELY( block->txn_parsed_cnt>=FD_MAX_TXN_PER_SLOT ) ) {
    1451             :     /* The block contains more transactions than a valid block would.
    1452             :        Mark the block dead instead of keep processing it. */
    1453           0 :     return FD_SCHED_PARSER_BAD_BLOCK;
    1454           0 :   }
    1455             : 
    1456             :   /* Try to expand ALUTs. */
    1457           0 :   int has_aluts   = txn->transaction_version==FD_TXN_V0 && txn->addr_table_adtl_cnt>0;
    1458           0 :   int serializing = 0;
    1459           0 :   if( has_aluts ) {
    1460           0 :     fd_funk_t * funk = fd_accdb_user_v1_funk( alut_ctx->accdb );
    1461           0 :     uchar __attribute__((aligned(FD_SLOT_HASHES_GLOBAL_ALIGN))) slot_hashes_mem[ FD_SYSVAR_SLOT_HASHES_FOOTPRINT ];
    1462           0 :     fd_slot_hashes_global_t const * slot_hashes_global = fd_sysvar_slot_hashes_read( funk, alut_ctx->xid, slot_hashes_mem );
    1463           0 :     if( FD_LIKELY( slot_hashes_global ) ) {
    1464           0 :       fd_slot_hash_t * slot_hash = deq_fd_slot_hash_t_join( (uchar *)slot_hashes_global + slot_hashes_global->hashes_offset );
    1465           0 :       serializing = !!fd_runtime_load_txn_address_lookup_tables( txn, block->fec_buf+block->fec_buf_soff, funk, alut_ctx->xid, alut_ctx->els, slot_hash, block->aluts );
    1466           0 :       sched->metrics->alut_success_cnt += (uint)!serializing;
    1467           0 :     } else {
    1468           0 :       serializing = 1;
    1469           0 :     }
    1470           0 :   }
    1471             : 
    1472           0 :   ulong bank_idx = (ulong)(block-sched->block_pool);
    1473           0 :   ulong txn_idx   = fd_rdisp_add_txn( sched->rdisp, bank_idx, txn, block->fec_buf+block->fec_buf_soff, serializing ? NULL : block->aluts, serializing );
    1474           0 :   FD_TEST( txn_idx!=0UL );
    1475           0 :   sched->metrics->txn_parsed_cnt++;
    1476           0 :   sched->metrics->alut_serializing_cnt += (uint)serializing;
    1477           0 :   sched->txn_pool_free_cnt--;
    1478           0 :   fd_txn_p_t * txn_p = sched->txn_pool + txn_idx;
    1479           0 :   txn_p->payload_sz  = pay_sz;
    1480             : 
    1481           0 :   txn_p->start_shred_idx = (ushort)fd_sort_up_uint_split( block->shred_blk_offs, block->shred_cnt, block->fec_buf_boff+block->fec_buf_soff );
    1482           0 :   txn_p->start_shred_idx = fd_ushort_if( txn_p->start_shred_idx>0U, (ushort)(txn_p->start_shred_idx-1U), txn_p->start_shred_idx );
    1483           0 :   txn_p->end_shred_idx = (ushort)fd_sort_up_uint_split( block->shred_blk_offs, block->shred_cnt, block->fec_buf_boff+block->fec_buf_soff+(uint)pay_sz );
    1484             : 
    1485           0 :   fd_memcpy( txn_p->payload, block->fec_buf+block->fec_buf_soff, pay_sz );
    1486           0 :   fd_memcpy( TXN(txn_p),     txn,                                txn_sz );
    1487           0 :   txn_bitset_remove( sched->exec_done_set, txn_idx );
    1488           0 :   txn_bitset_remove( sched->sigverify_done_set, txn_idx );
    1489           0 :   block->txn_idx[ block->txn_parsed_cnt ] = txn_idx;
    1490           0 :   block->fec_buf_soff += (uint)pay_sz;
    1491           0 :   block->txn_parsed_cnt++;
    1492             : #if FD_SCHED_SKIP_SIGVERIFY
    1493             :   txn_bitset_insert( sched->sigverify_done_set, txn_idx );
    1494             :   block->txn_sigverify_done_cnt++;
    1495             : #endif
    1496           0 :   block->txns_rem--;
    1497           0 :   return FD_SCHED_PARSER_OK;
    1498           0 : }
    1499             : 
    1500             : #undef CHECK
    1501             : #undef CHECK_LEFT
    1502             : 
    1503             : static void
    1504           0 : try_activate_block( fd_sched_t * sched ) {
    1505             : 
    1506             :   /* See if there are any allocated staging lanes that we can activate
    1507             :      for scheduling ... */
    1508           0 :   ulong staged_bitset = sched->staged_bitset;
    1509           0 :   while( staged_bitset ) {
    1510           0 :     int lane_idx  = fd_ulong_find_lsb( staged_bitset );
    1511           0 :     staged_bitset = fd_ulong_pop_lsb( staged_bitset );
    1512             : 
    1513           0 :     ulong              head_idx     = sched->staged_head_bank_idx[ lane_idx ];
    1514           0 :     fd_sched_block_t * head_block   = block_pool_ele( sched, head_idx );
    1515           0 :     fd_sched_block_t * parent_block = block_pool_ele( sched, head_block->parent_idx );
    1516           0 :     if( FD_UNLIKELY( parent_block->dying ) ) {
    1517             :       /* Invariant: no child of a dying block should be staged. */
    1518           0 :       FD_LOG_CRIT(( "invariant violation: staged_head_bank_idx %lu, slot %lu, parent slot %lu on lane %d has parent_block->dying set, slot %lu, parent slot %lu",
    1519           0 :                     head_idx, head_block->slot, head_block->parent_slot, lane_idx, parent_block->slot, parent_block->parent_slot ));
    1520           0 :     }
    1521             :     //FIXME: restore this invariant check when we have immediate demotion of dying blocks
    1522             :     // if( FD_UNLIKELY( head_block->dying ) ) {
    1523             :     //   /* Invariant: no dying block should be staged. */
    1524             :     //   FD_LOG_CRIT(( "invariant violation: staged_head_bank_idx %lu, slot %lu, prime %lu on lane %u has head_block->dying set",
    1525             :     //                 head_idx, (ulong)head_block->block_id.slot, (ulong)head_block->block_id.prime, lane_idx ));
    1526             :     // }
    1527           0 :     if( block_is_done( parent_block ) && block_is_activatable( head_block ) ) {
    1528             :       /* ... Yes, on this staging lane the parent block is done.  So we
    1529             :          can switch to the staged child. */
    1530           0 :       sched->active_bank_idx = head_idx;
    1531           0 :       sched->metrics->lane_switch_cnt++;
    1532           0 :       return;
    1533           0 :     }
    1534           0 :   }
    1535             : 
    1536             :   /* ... No, promote unstaged blocks. */
    1537           0 :   ulong root_idx = sched->root_idx;
    1538           0 :   if( FD_UNLIKELY( root_idx==ULONG_MAX ) ) {
    1539           0 :     FD_LOG_CRIT(( "invariant violation: root_idx==ULONG_MAX indicating fd_sched is unintialized" ));
    1540           0 :   }
    1541             :   /* Find and stage the longest stageable unstaged fork.  This is a
    1542             :      policy decision. */
    1543           0 :   ulong depth = compute_longest_unstaged_fork( sched, root_idx );
    1544           0 :   if( FD_LIKELY( depth>0UL ) ) {
    1545           0 :     if( FD_UNLIKELY( sched->staged_bitset==fd_ulong_mask_lsb( FD_SCHED_MAX_STAGING_LANES ) ) ) {
    1546             :       /* No more staging lanes available.  All of them are occupied by
    1547             :          slow squatters.  Demote one of them. */
    1548             :       //FIXME implement this, note that only empty blocks can be
    1549             :       //demoted, and so blocks with in-flight transactions, including
    1550             :       //dying in-flight blocks, shouldn't be demoted
    1551           0 :       FD_LOG_CRIT(( "unimplemented" ));
    1552           0 :       sched->metrics->lane_demoted_cnt++;
    1553             :       // sched->metrics->block_demoted_cnt++; for every demoted block
    1554           0 :     }
    1555           0 :     FD_TEST( sched->staged_bitset!=fd_ulong_mask_lsb( FD_SCHED_MAX_STAGING_LANES ) );
    1556           0 :     int lane_idx = fd_ulong_find_lsb( ~sched->staged_bitset );
    1557           0 :     if( FD_UNLIKELY( lane_idx>=(int)FD_SCHED_MAX_STAGING_LANES ) ) {
    1558           0 :       FD_LOG_CRIT(( "invariant violation: lane_idx %d, sched->staged_bitset %lx",
    1559           0 :                     lane_idx, sched->staged_bitset ));
    1560           0 :     }
    1561           0 :     ulong head_bank_idx = stage_longest_unstaged_fork( sched, root_idx, lane_idx );
    1562           0 :     if( FD_UNLIKELY( head_bank_idx==ULONG_MAX ) ) {
    1563             :       /* We found a promotable fork depth>0.  This should not happen. */
    1564           0 :       FD_LOG_CRIT(( "invariant violation: head_bank_idx==ULONG_MAX" ));
    1565           0 :     }
    1566             :     /* We don't bother with promotion unless the block is immediately
    1567             :        dispatchable.  So it's okay to set the active block here. */
    1568           0 :     sched->active_bank_idx = head_bank_idx;
    1569           0 :     return;
    1570           0 :   }
    1571             :   /* No unstaged blocks to promote.  So we're done.  Yay. */
    1572           0 : }
    1573             : 
    1574             : static void
    1575           0 : check_or_set_active_block( fd_sched_t * sched ) {
    1576           0 :   if( FD_UNLIKELY( sched->active_bank_idx==ULONG_MAX ) ) {
    1577           0 :     try_activate_block( sched );
    1578           0 :   } else {
    1579           0 :     fd_sched_block_t * active_block = block_pool_ele( sched, sched->active_bank_idx );
    1580           0 :     if( FD_UNLIKELY( block_should_deactivate( active_block ) ) ) {
    1581           0 :       sched->print_buf_sz = 0UL;
    1582           0 :       print_all( sched, active_block );
    1583           0 :       FD_LOG_NOTICE(( "%s", sched->print_buf ));
    1584           0 :       FD_LOG_CRIT(( "invariant violation: should have been deactivated" ));
    1585           0 :     }
    1586           0 :   }
    1587           0 : }
    1588             : 
    1589             : /* It's safe to call this function more than once on the same block. */
    1590             : static void
    1591           0 : subtree_abandon( fd_sched_t * sched, fd_sched_block_t * block ) {
    1592           0 :   if( FD_UNLIKELY( block->rooted ) ) {
    1593           0 :     FD_LOG_CRIT(( "invariant violation: rooted block should not be abandoned, slot %lu, parent slot %lu",
    1594           0 :                   block->slot, block->parent_slot ));
    1595           0 :   }
    1596             :   /* All minority fork nodes pass through this function eventually.  So
    1597             :      this is a good point to check per-node invariants for minority
    1598             :      forks. */
    1599           0 :   if( FD_UNLIKELY( block->staged && !block->in_rdisp ) ) {
    1600           0 :     FD_LOG_CRIT(( "invariant violation: staged block is not in the dispatcher, slot %lu, parent slot %lu",
    1601           0 :                   block->slot, block->parent_slot ));
    1602           0 :   }
    1603             : 
    1604             :   /* Setting the flag is non-optional and can happen more than once. */
    1605           0 :   block->dying = 1;
    1606             : 
    1607             :   /* Removal from dispatcher should only happen once. */
    1608           0 :   if( block->in_rdisp ) {
    1609           0 :     fd_sched_block_t * parent = block_pool_ele( sched, block->parent_idx );
    1610           0 :     if( FD_UNLIKELY( !parent ) ) {
    1611             :       /* Only the root has no parent.  Abandon should never be called on
    1612             :          the root.  So any block we are trying to abandon should have a
    1613             :          parent. */
    1614           0 :       FD_LOG_CRIT(( "invariant violation: parent not found slot %lu, parent slot %lu",
    1615           0 :                     block->slot, block->parent_slot ));
    1616           0 :     }
    1617             : 
    1618             :     /* The dispatcher expects blocks to be abandoned in the same order
    1619             :        that they were added on each lane.  There are no requirements on
    1620             :        the order of abandoning if two blocks are not on the same lane,
    1621             :        or if a block is unstaged.  This means that in general we
    1622             :        shouldn't abandon a child block if the parent hasn't been
    1623             :        abandoned yet, if and only if they are on the same lane.  So wait
    1624             :        until we can abandon the parent, and then descend down the fork
    1625             :        tree to ensure orderly abandoning. */
    1626           0 :     int in_order = !parent->in_rdisp || /* parent is not in the dispatcher */
    1627           0 :                    !parent->staged   || /* parent is in the dispatcher but not staged */
    1628           0 :                    !block->staged    || /* parent is in the dispatcher and staged but this block is unstaged */
    1629           0 :                    block->staging_lane!=parent->staging_lane; /* this block is on a different staging lane than its parent */
    1630             : 
    1631           0 :     if( FD_UNLIKELY( in_order && block->staged && sched->active_bank_idx==sched->staged_head_bank_idx[ block->staging_lane ] && sched->active_bank_idx!=ULONG_MAX ) ) {
    1632           0 :       FD_TEST( block_pool_ele( sched, sched->active_bank_idx )==block );
    1633           0 :       FD_LOG_DEBUG(( "reset active_bank_idx %lu", sched->active_bank_idx ));
    1634           0 :       sched->active_bank_idx = ULONG_MAX;
    1635           0 :     }
    1636             : 
    1637             :     /* We inform the dispatcher of an abandon only when there are no
    1638             :        more in-flight transactions.  Otherwise, if the dispatcher
    1639             :        recycles the same txn_id that was just abandoned, and we receive
    1640             :        completion of an in-flight transaction whose txn_id was just
    1641             :        recycled. */
    1642             :     // FIXME The recycling might be fine now that we no longer use
    1643             :     // txn_id to index into anything.  We might be able to just drop
    1644             :     // txn_id on abandoned blocks.
    1645           0 :     int abandon = in_order && block->txn_exec_in_flight_cnt==0 && block->txn_sigverify_in_flight_cnt==0;
    1646             : 
    1647           0 :     if( abandon ) {
    1648           0 :       block->in_rdisp = 0;
    1649           0 :       fd_rdisp_abandon_block( sched->rdisp, (ulong)(block-sched->block_pool) );
    1650           0 :       sched->txn_pool_free_cnt += block->txn_parsed_cnt-block->txn_done_cnt; /* in_flight_cnt==0 */
    1651           0 :       sched->metrics->block_abandoned_cnt++;
    1652           0 :       sched->metrics->txn_abandoned_parsed_cnt    += block->txn_parsed_cnt;
    1653           0 :       sched->metrics->txn_abandoned_exec_done_cnt += block->txn_exec_done_cnt;
    1654           0 :       sched->metrics->txn_abandoned_done_cnt      += block->txn_done_cnt;
    1655             : 
    1656             :       /* Now release the staging lane. */
    1657             :       //FIXME when demote supports non-empty blocks, we should demote
    1658             :       //the block from the lane unconditionally and immediately,
    1659             :       //regardles of whether it's safe to abandon or not.  So a block
    1660             :       //would go immediately from staged to unstaged and eventually to
    1661             :       //abandoned.
    1662           0 :       if( FD_LIKELY( block->staged ) ) {
    1663           0 :         block->staged = 0;
    1664           0 :         sched->staged_bitset = fd_ulong_clear_bit( sched->staged_bitset, (int)block->staging_lane );
    1665           0 :         sched->staged_head_bank_idx[ block->staging_lane ] = ULONG_MAX;
    1666           0 :       }
    1667           0 :     }
    1668           0 :   }
    1669             : 
    1670             :   /* Abandon the entire fork chaining off of this block. */
    1671           0 :   ulong child_idx = block->child_idx;
    1672           0 :   while( child_idx!=ULONG_MAX ) {
    1673           0 :     fd_sched_block_t * child = block_pool_ele( sched, child_idx );
    1674           0 :     subtree_abandon( sched, child );
    1675           0 :     child_idx = child->sibling_idx;
    1676           0 :   }
    1677           0 : }
    1678             : 
    1679             : static void
    1680           0 : maybe_switch_block( fd_sched_t * sched, ulong bank_idx ) {
    1681           0 :   fd_sched_block_t * block = block_pool_ele( sched, bank_idx );
    1682           0 :   if( FD_UNLIKELY( block_is_done( block ) ) ) {
    1683           0 :     block->in_rdisp = 0;
    1684           0 :     block->staged   = 0;
    1685           0 :     fd_rdisp_remove_block( sched->rdisp, bank_idx );
    1686           0 :     sched->metrics->block_removed_cnt++;
    1687             : 
    1688             :     /* See if there is a child block down the same staging lane.  This
    1689             :        is a policy decision to minimize fork churn.  We could in theory
    1690             :        reevaluate staging lane allocation here and do promotion/demotion
    1691             :        as needed. */
    1692           0 :     ulong child_idx = block->child_idx;
    1693           0 :     while( child_idx!=ULONG_MAX ) {
    1694           0 :       fd_sched_block_t * child = block_pool_ele( sched, child_idx );
    1695           0 :       if( FD_LIKELY( child->staged && child->staging_lane==block->staging_lane ) ) {
    1696             :         /* There is a child block down the same staging lane ... */
    1697           0 :         if( FD_LIKELY( !child->dying ) ) {
    1698             :           /* ... and the child isn't dead */
    1699           0 :           if( FD_UNLIKELY( !block_is_activatable( child ) ) ) {
    1700             :             /* ... but the child is not activatable, likely because
    1701             :                there are no transactions available yet. */
    1702           0 :             FD_LOG_DEBUG(( "reset active_bank_idx %lu", sched->active_bank_idx ));
    1703           0 :             sched->active_bank_idx = ULONG_MAX;
    1704           0 :             sched->metrics->deactivate_no_txn_cnt++;
    1705           0 :             try_activate_block( sched );
    1706           0 :             return;
    1707           0 :           }
    1708             :           /* ... and it's immediately dispatchable, so switch the active
    1709             :              block to it, and have the child inherit the head status of
    1710             :              the lane.  This is the common case. */
    1711           0 :           sched->active_bank_idx = child_idx;
    1712           0 :           sched->staged_head_bank_idx[ block->staging_lane ] = child_idx;
    1713           0 :           if( FD_UNLIKELY( !fd_ulong_extract_bit( sched->staged_bitset, (int)block->staging_lane ) ) ) {
    1714           0 :             FD_LOG_CRIT(( "invariant violation: staged_bitset 0x%lx bit %lu is not set, slot %lu, parent slot %lu, child slot %lu, parent slot %lu",
    1715           0 :                           sched->staged_bitset, block->staging_lane, block->slot, block->parent_slot, child->slot, child->parent_slot ));
    1716           0 :           }
    1717           0 :           return;
    1718           0 :         } else {
    1719             :           /* ... but the child block is considered dead, likely because
    1720             :              the parser considers it invalid. */
    1721           0 :           FD_LOG_INFO(( "child block %lu is already dead", child->slot ));
    1722           0 :           subtree_abandon( sched, child );
    1723           0 :           break;
    1724           0 :         }
    1725           0 :       }
    1726           0 :       child_idx = child->sibling_idx;
    1727           0 :     }
    1728             :     /* There isn't a child block down the same staging lane.  This is
    1729             :        the last block in the staging lane.  Release the staging lane. */
    1730           0 :     sched->staged_bitset = fd_ulong_clear_bit( sched->staged_bitset, (int)block->staging_lane );
    1731           0 :     sched->staged_head_bank_idx[ block->staging_lane ] = ULONG_MAX;
    1732             : 
    1733             :     /* Reset the active block. */
    1734           0 :     FD_LOG_DEBUG(( "reset active_bank_idx %lu", sched->active_bank_idx ));
    1735           0 :     sched->active_bank_idx = ULONG_MAX;
    1736           0 :     sched->metrics->deactivate_no_child_cnt++;
    1737           0 :     try_activate_block( sched );
    1738           0 :   } else if( block_should_deactivate( block ) ) {
    1739             :     /* We exhausted the active block, but it's not fully done yet.  We
    1740             :        are just not getting FEC sets for it fast enough.  This could
    1741             :        happen when the network path is congested, or when the leader
    1742             :        simply went down.  Reset the active block. */
    1743           0 :     sched->active_bank_idx = ULONG_MAX;
    1744           0 :     sched->metrics->deactivate_no_txn_cnt++;
    1745           0 :     try_activate_block( sched );
    1746           0 :   }
    1747           0 : }
    1748             : 
    1749             : FD_FN_UNUSED static ulong
    1750           0 : find_and_stage_longest_unstaged_fork( fd_sched_t * sched, int lane_idx ) {
    1751           0 :   ulong root_idx = sched->root_idx;
    1752           0 : 
    1753           0 :   if( FD_UNLIKELY( root_idx==ULONG_MAX ) ) {
    1754           0 :     FD_LOG_CRIT(( "invariant violation: root_idx==ULONG_MAX indicating fd_sched is unintialized" ));
    1755           0 :   }
    1756           0 : 
    1757           0 :   /* First pass: compute the longest unstaged fork depth for each node
    1758           0 :      in the fork tree. */
    1759           0 :   ulong depth = compute_longest_unstaged_fork( sched, root_idx );
    1760           0 : 
    1761           0 :   /* Second pass: stage blocks on the longest unstaged fork. */
    1762           0 :   ulong head_bank_idx = stage_longest_unstaged_fork( sched, root_idx, lane_idx );
    1763           0 : 
    1764           0 :   if( FD_UNLIKELY( (depth>0UL && head_bank_idx==ULONG_MAX) || (depth==0UL && head_bank_idx!=ULONG_MAX) ) ) {
    1765           0 :     FD_LOG_CRIT(( "invariant violation: depth %lu, head_bank_idx %lu",
    1766           0 :                   depth, head_bank_idx ));
    1767           0 :   }
    1768           0 : 
    1769           0 :   return head_bank_idx;
    1770           0 : }
    1771             : 
    1772             : /* Returns length of the longest stageable unstaged fork, if there is
    1773             :    one, and 0 otherwise. */
    1774             : static ulong
    1775           0 : compute_longest_unstaged_fork( fd_sched_t * sched, ulong bank_idx ) {
    1776           0 :   if( FD_UNLIKELY( bank_idx==ULONG_MAX ) ) {
    1777           0 :     FD_LOG_CRIT(( "invariant violation: bank_idx==ULONG_MAX" ));
    1778           0 :   }
    1779             : 
    1780           0 :   fd_sched_block_t * block = block_pool_ele( sched, bank_idx );
    1781             : 
    1782           0 :   ulong max_child_depth = 0UL;
    1783           0 :   ulong child_idx       = block->child_idx;
    1784           0 :   while( child_idx!=ULONG_MAX ) {
    1785           0 :     ulong child_depth = compute_longest_unstaged_fork( sched, child_idx );
    1786           0 :     if( child_depth > max_child_depth ) {
    1787           0 :       max_child_depth = child_depth;
    1788           0 :     }
    1789           0 :     fd_sched_block_t * child = block_pool_ele( sched, child_idx );
    1790           0 :     child_idx = child->sibling_idx;
    1791           0 :   }
    1792             : 
    1793           0 :   block->luf_depth = max_child_depth + fd_ulong_if( block_is_promotable( block ), 1UL, 0UL );
    1794           0 :   return block->luf_depth;
    1795           0 : }
    1796             : 
    1797             : static ulong
    1798           0 : stage_longest_unstaged_fork_helper( fd_sched_t * sched, ulong bank_idx, int lane_idx ) {
    1799           0 :   if( FD_UNLIKELY( bank_idx==ULONG_MAX ) ) {
    1800           0 :     FD_LOG_CRIT(( "invariant violation: bank_idx==ULONG_MAX" ));
    1801           0 :   }
    1802             : 
    1803           0 :   fd_sched_block_t * block = block_pool_ele( sched, bank_idx );
    1804             : 
    1805           0 :   int   stage_it = fd_int_if( block_is_promotable( block ), 1, 0 );
    1806           0 :   ulong rv       = fd_ulong_if( stage_it, bank_idx, ULONG_MAX );
    1807           0 :   if( FD_LIKELY( stage_it ) ) {
    1808           0 :     block->staged = 1;
    1809           0 :     block->staging_lane = (ulong)lane_idx;
    1810           0 :     fd_rdisp_promote_block( sched->rdisp, bank_idx, block->staging_lane );
    1811           0 :     sched->metrics->block_promoted_cnt++;
    1812           0 :   }
    1813             : 
    1814             :   /* Base case: leaf node. */
    1815           0 :   if( block->child_idx==ULONG_MAX ) return rv;
    1816             : 
    1817           0 :   ulong max_depth      = 0UL;
    1818           0 :   ulong best_child_idx = ULONG_MAX;
    1819           0 :   ulong child_idx      = block->child_idx;
    1820           0 :   while( child_idx!=ULONG_MAX ) {
    1821           0 :     fd_sched_block_t * child = block_pool_ele( sched, child_idx );
    1822           0 :     if( child->luf_depth>max_depth ) {
    1823           0 :       max_depth      = child->luf_depth;
    1824           0 :       best_child_idx = child_idx;
    1825           0 :     }
    1826           0 :     child_idx = child->sibling_idx;
    1827           0 :   }
    1828             : 
    1829             :   /* Recursively stage descendants. */
    1830           0 :   if( best_child_idx!=ULONG_MAX ) {
    1831           0 :     ulong head_bank_idx = stage_longest_unstaged_fork_helper( sched, best_child_idx, lane_idx );
    1832           0 :     rv = fd_ulong_if( rv!=ULONG_MAX, rv, head_bank_idx );
    1833           0 :   }
    1834             : 
    1835           0 :   return rv;
    1836           0 : }
    1837             : 
    1838             : /* Returns idx of head block of staged lane on success, idx_null
    1839             :    otherwise. */
    1840             : static ulong
    1841           0 : stage_longest_unstaged_fork( fd_sched_t * sched, ulong bank_idx, int lane_idx ) {
    1842           0 :   ulong head_bank_idx = stage_longest_unstaged_fork_helper( sched, bank_idx, lane_idx );
    1843           0 :   if( FD_LIKELY( head_bank_idx!=ULONG_MAX ) ) {
    1844           0 :     sched->metrics->lane_promoted_cnt++;
    1845           0 :     sched->staged_bitset = fd_ulong_set_bit( sched->staged_bitset, lane_idx );
    1846           0 :     sched->staged_head_bank_idx[ lane_idx ] = head_bank_idx;
    1847           0 :   }
    1848           0 :   return head_bank_idx;
    1849           0 : }

Generated by: LCOV version 1.14