LCOV - code coverage report
Current view: top level - app/fdctl/run/tiles - fd_poh.c (source / functions) Hit Total Coverage
Test: cov.lcov Lines: 11 769 1.4 %
Date: 2025-01-08 12:08:44 Functions: 2 40 5.0 %

          Line data    Source code
       1             : #include "../../../../disco/tiles.h"
       2             : 
       3             : #include "../../../../disco/plugin/fd_plugin.h"
       4             : 
       5             : /* Let's say there was a computer, the "leader" computer, that acted as
       6             :    a bank.  Users could send it messages saying they wanted to deposit
       7             :    money, or transfer it to someone else.
       8             : 
       9             :    That's how, for example, Bank of America works but there are problems
      10             :    with it.  One simple problem is: the bank can set your balance to
      11             :    zero if they don't like you.
      12             : 
      13             :    You could try to fix this by having the bank periodically publish the
      14             :    list of all account balances and transactions.  If the customers add
      15             :    unforgeable signatures to their deposit slips and transfers, then
      16             :    the bank cannot zero a balance without it being obvious to everyone.
      17             : 
      18             :    There's still problems.  The bank can't lie about your balance now or
      19             :    take your money, but it can just not accept deposits on your behalf
      20             :    by ignoring you.
      21             : 
      22             :    You could fix this by getting a few independent banks together, lets
      23             :    say Bank of America, Bank of England, and Westpac, and having them
      24             :    rotate who operates the leader computer periodically.  If one bank
      25             :    ignores your deposits, you can just wait and send them to the next
      26             :    one.
      27             : 
      28             :    This is Solana.
      29             : 
      30             :    There's still problems of course but they are largely technical.  How
      31             :    do the banks agree who is leader?  How do you recover if a leader
      32             :    misbehaves?  How do customers verify the transactions aren't forged?
      33             :    How do banks receive and publish and verify each others work quickly?
      34             :    These are the main technical innovations that enable Solana to work
      35             :    well.
      36             : 
      37             :    What about Proof of History?
      38             : 
      39             :    One particular niche problem is about the leader schedule.  When the
      40             :    leader computer is moving from one bank to another, the new bank must
      41             :    wait for the old bank to say it's done and provide a final list of
      42             :    balances that it can start working off of.  But: what if the computer
      43             :    at the old bank crashes and never says its done?
      44             : 
      45             :    Does the new leader just take over at some point?  What if the new
      46             :    leader is malicious, and says the past thousand leaders crashed, and
      47             :    there have been no transactions for days?  How do you check?
      48             : 
      49             :    This is what Proof of History solves.  Each bank in the network must
      50             :    constantly do a lot of busywork (compute hashes), even when it is not
      51             :    leader.
      52             : 
      53             :    If the prior thousand leaders crashed, and no transactions happened
      54             :    in an hour, the new leader would have to show they did about an hour
      55             :    of busywork for everyone else to believe them.
      56             : 
      57             :    A better name for this is proof of skipping.  If a leader is skipping
      58             :    slots (building off of a slot that is not the direct parent), it must
      59             :    prove that it waited a good amount of time to do so.
      60             : 
      61             :    It's not a perfect solution.  For one thing, some banks have really
      62             :    fast computers and can compute a lot of busywork in a short amount of
      63             :    time, allowing them to skip prior slot(s) anyway.  But: there is a
      64             :    social component that prevents validators from skipping the prior
      65             :    leader slot.  It is easy to detect when this happens and the network
      66             :    could respond by ignoring their votes or stake.
      67             : 
      68             :    You could come up with other schemes: for example, the network could
      69             :    just use wall clock time.  If a new leader publishes a block without
      70             :    waiting 400 milliseconds for the prior slot to complete, then there
      71             :    is no "proof of skipping" and the nodes ignore the slot.
      72             : 
      73             :    These schemes have a problem in that they are not deterministic
      74             :    across the network (different computers have different clocks), and
      75             :    so they will cause frequent forks which are very expensive to
      76             :    resolve.  Even though the proof of history scheme is not perfect,
      77             :    it is better than any alternative which is not deterministic.
      78             : 
      79             :    With all that background, we can now describe at a high level what
      80             :    this PoH tile actually does,
      81             : 
      82             :     (1) Whenever any other leader in the network finishes a slot, and
      83             :         the slot is determined to be the best one to build off of, this
      84             :         tile gets "reset" onto that block, the so called "reset slot".
      85             : 
      86             :     (2) The tile is constantly doing busy work, hash(hash(hash(...))) on
      87             :         top of the last reset slot, even when it is not leader.
      88             : 
      89             :     (3) When the tile becomes leader, it continues hashing from where it
      90             :         was.  Typically, the prior leader finishes their slot, so the
      91             :         reset slot will be the parent one, and this tile only publishes
      92             :         hashes for its own slot.  But if prior slots were skipped, then
      93             :         there might be a whole chain already waiting.
      94             : 
      95             :     That's pretty much it.  When we are leader, in addition to doing
      96             :     busywork, we publish ticks and microblocks to the shred tile.  A
      97             :     microblock is a non-empty group of transactions whose hashes are
      98             :     mixed-in to the chain, while a tick is a periodic stamp of the
      99             :     current hash, with no transactions (nothing mixed in).  We need
     100             :     to send both to the shred tile, as ticks are important for other
     101             :     validators to verify in parallel.
     102             : 
     103             :     As well, the tile should never become leader for a slot that it has
     104             :     published anything for, otherwise it may create a duplicate block.
     105             : 
     106             :     Some particularly common misunderstandings:
     107             : 
     108             :      - PoH is critical to security.
     109             : 
     110             :        This largely isn't true.  The target hash rate of the network is
     111             :        so slow (1 hash per 500 nanoseconds) that a malicious leader can
     112             :        easily catch up if they start from an old hash, and the only
     113             :        practical attack prevented is the proof of skipping.  Most of the
     114             :        long range attacks in the Solana whitepaper are not relevant.
     115             : 
     116             :      - PoH keeps passage of time.
     117             : 
     118             :        This is also not true.  The way the network keeps time so it can
     119             :        decide who is leader is that, each leader uses their operating
     120             :        system clock to time 400 milliseconds and publishes their block
     121             :        when this timer expires.
     122             : 
     123             :        If a leader just hashed as fast as they could, they could publish
     124             :        a block in tens of milliseconds, and the rest of the network
     125             :        would happily accept it.  This is why the Solana "clock" as
     126             :        determined by PoH is not accurate and drifts over time.
     127             : 
     128             :      - PoH prevents transaction reordering by the leader.
     129             : 
     130             :        The leader can, in theory, wait until the very end of their
     131             :        leader slot to publish anything at all to the network.  They can,
     132             :        in particular, hold all received transactions for 400
     133             :        milliseconds and then reorder and publish some right at the end
     134             :        to advantage certain transactions.
     135             : 
     136             :     You might be wondering... if all the PoH chain is helping us do is
     137             :     prove that slots were skipped correctly, why do we need to "mix in"
     138             :     transactions to the hash value?  Or do anything at all for slots
     139             :     where we don't skip the prior slot?
     140             : 
     141             :     It's a good question, and the answer is that this behavior is not
     142             :     necessary.  An ideal implementation of PoH have no concept of ticks
     143             :     or mixins, and would not be part of the TPU pipeline at all.
     144             :     Instead, there would be a simple field "skip_proof" on the last
     145             :     shred we send for a slot, the hash(hash(...)) value.  This field
     146             :     would only be filled in (and only verified by replayers) in cases
     147             :     where the slot actually skipped a parent.
     148             : 
     149             :     Then what is the "clock?  In Solana, time is constructed as follows:
     150             : 
     151             :     HASHES
     152             : 
     153             :         The base unit of time is a hash.  Hereafter, any values whose
     154             :         units are in hashes are called a "hashcnt" to distinguish them
     155             :         from actual hashed values.
     156             : 
     157             :         Agave generally defines a constant duration for each tick
     158             :         (see below) and then varies the number of hashcnt per tick, but
     159             :         as we consider the hashcnt the base unit of time, Firedancer and
     160             :         this PoH implementation defines everything in terms of hashcnt
     161             :         duration instead.
     162             : 
     163             :         In mainnet-beta, testnet, and devnet the hashcnt ticks over
     164             :         (increments) every 100 nanoseconds.  The hashcnt rate is
     165             :         specified as 500 nanoseconds according to the genesis, but there
     166             :         are several features which increase the number of hashes per
     167             :         tick while keeping tick duration constant, which make the time
     168             :         per hashcnt lower.  These features up to and including the
     169             :         `update_hashes_per_tick6` feature are activated on mainnet-beta,
     170             :         devnet, and testnet, and are described in the TICKS section
     171             :         below.
     172             : 
     173             :         Other chains and development environments might have a different
     174             :         hashcnt rate in the genesis, or they might not have activated
     175             :         the features which increase the rate yet, which we also support.
     176             : 
     177             :         In practice, although each validator follows a hashcnt rate of
     178             :         100 nanoseconds, the overall observed hashcnt rate of the
     179             :         network is a little slower than once every 100 nanoseconds,
     180             :         mostly because there are gaps and clock synchronization issues
     181             :         during handoff between leaders.  This is referred to as clock
     182             :         drift.
     183             : 
     184             :     TICKS
     185             : 
     186             :         The leader needs to periodically checkpoint the hash value
     187             :         associated with a given hashcnt so that they can publish it to
     188             :         other nodes for verification.
     189             : 
     190             :         On mainnet-beta, testnet, and devnet this occurs once every
     191             :         62,500 hashcnts, or approximately once every 6.4 microseconds.
     192             :         This value is determined at genesis time, and according to the
     193             :         features below, and could be different in development
     194             :         environments or on other chains which we support.
     195             : 
     196             :         Due to protocol limitations, when mixing in transactions to the
     197             :         proof-of-history chain, it cannot occur on a tick boundary (but
     198             :         can occur at any other hashcnt).
     199             : 
     200             :         Ticks exist mainly so that verification can happen in parallel.
     201             :         A verifier computer, rather than needing to do hash(hash(...))
     202             :         all in sequence to verify a proof-of-history chain, can do,
     203             : 
     204             :          Core 0: hash(hash(...))
     205             :          Core 1: hash(hash(...))
     206             :          Core 2: hash(hash(...))
     207             :          Core 3: hash(hash(...))
     208             :          ...
     209             : 
     210             :         Between each pair of tick boundaries.
     211             : 
     212             :         Solana sometimes calls the current tick the "tick height",
     213             :         although it makes more sense to think of it as a counter from
     214             :         zero, it's just the number of ticks since the genesis hash.
     215             : 
     216             :         There is a set of features which increase the number of hashcnts
     217             :         per tick.  These are all deployed on mainnet-beta, devnet, and
     218             :         testnet.
     219             : 
     220             :            name:             update_hashes_per_tick
     221             :            id:               3uFHb9oKdGfgZGJK9EHaAXN4USvnQtAFC13Fh5gGFS5B
     222             :            hashes per tick:  12,500
     223             :            hashcnt duration: 500 nanos
     224             : 
     225             :            name:             update_hashes_per_tick2
     226             :            id:               EWme9uFqfy1ikK1jhJs8fM5hxWnK336QJpbscNtizkTU
     227             :            hashes per tick:  17,500
     228             :            hashcnt duration: 357.142857143 nanos
     229             : 
     230             :            name:             update_hashes_per_tick3
     231             :            id:               8C8MCtsab5SsfammbzvYz65HHauuUYdbY2DZ4sznH6h5
     232             :            hashes per tick:  27,500
     233             :            hashcnt duration: 227.272727273 nanos
     234             : 
     235             :            name:             update_hashes_per_tick4
     236             :            id:               8We4E7DPwF2WfAN8tRTtWQNhi98B99Qpuj7JoZ3Aikgg
     237             :            hashes per tick:  47,500
     238             :            hashcnt duration: 131.578947368 nanos
     239             : 
     240             :            name:             update_hashes_per_tick5
     241             :            id:               BsKLKAn1WM4HVhPRDsjosmqSg2J8Tq5xP2s2daDS6Ni4
     242             :            hashes per tick:  57,500
     243             :            hashcnt duration: 108.695652174 nanos
     244             : 
     245             :            name:             update_hashes_per_tick6
     246             :            id:               FKu1qYwLQSiehz644H6Si65U5ZQ2cp9GxsyFUfYcuADv
     247             :            hashes per tick:  62,500
     248             :            hashcnt duration: 100 nanos
     249             : 
     250             :         In development environments, there is a way to configure the
     251             :         hashcnt per tick to be "none" during genesis, for a so-called
     252             :         "low power" tick producer.  The idea is not to spin cores during
     253             :         development.  This is equivalent to setting the hashcnt per tick
     254             :         to be 1, and increasing the hashcnt duration to the desired tick
     255             :         duration.
     256             : 
     257             :     SLOTS
     258             : 
     259             :         Each leader needs to be leader for a fixed amount of time, which
     260             :         is called a slot.  During a slot, a leader has an opportunity to
     261             :         receive transactions and produce a block for the network,
     262             :         although they may miss ("skip") the slot if they are offline or
     263             :         not behaving.
     264             : 
     265             :         In mainnet-beta, testnet, and devnet a slot is 64 ticks, or
     266             :         4,000,000 hashcnts, or approximately 400 milliseconds.
     267             : 
     268             :         Due to the way the leader schedule is constructed, each leader
     269             :         is always given at least four (4) consecutive slots in the
     270             :         schedule. This means when becoming leader you will be leader
     271             :         for at least 4 slots, or 1.6 seconds.
     272             : 
     273             :         It is rare, although can happen that a leader gets more than 4
     274             :         consecutive slots (eg, 8, or 12), if they are lucky with the
     275             :         leader schedule generation.
     276             : 
     277             :         The number of ticks in a slot is fixed at genesis time, and
     278             :         could be different for development or other chains, which we
     279             :         support.  There is nothing special about 4 leader slots in a
     280             :         row, and this might be changed in future, and the proof of
     281             :         history makes no assumptions that this is the case.
     282             : 
     283             :     EPOCHS
     284             : 
     285             :         Infrequently, the network needs to do certain housekeeping,
     286             :         mainly things like collecting rent and deciding on the leader
     287             :         schedule.  The length of an epoch is fixed on mainnet-beta,
     288             :         devnet and testnet at 420,000 slots, or around ~2 (1.94) days.
     289             :         This value is fixed at genesis time, and could be different for
     290             :         other chains including development, which we support.  Typically
     291             :         in development, epochs are every 8,192 slots, or around  ~1 hour
     292             :         (54.61 minutes), although it depends on the number of ticks per
     293             :         slot and the target hashcnt rate of the genesis as well.
     294             : 
     295             :         In development, epochs need not be a fixed length either.  There
     296             :         is a "warmup" option, where epochs start short and grow, which
     297             :         is useful for quickly warming up stake during development.
     298             : 
     299             :         The epoch is important because it is the only time the leader
     300             :         schedule is updated.  The leader schedule is a list of which
     301             :         leader is leader for which slot, and is generated by a special
     302             :         algorithm that is deterministic and known to all nodes.
     303             : 
     304             :         The leader schedule is computed one epoch in advance, so that
     305             :         at slot T, we always know who will be leader up until the end
     306             :         of slot T+EPOCH_LENGTH.  Specifically, the leader schedule for
     307             :         epoch N is computed during the epoch boundary crossing from
     308             :         N-2 to N-1. For mainnet-beta, the slots per epoch is fixed and
     309             :         will always be 420,000. */
     310             : 
     311             : #include "../../../../ballet/pack/fd_pack.h"
     312             : #include "../../../../ballet/sha256/fd_sha256.h"
     313             : #include "../../../../disco/metrics/fd_metrics.h"
     314             : #include "../../../../disco/topo/fd_pod_format.h"
     315             : #include "../../../../disco/shred/fd_shredder.h"
     316             : #include "../../../../disco/shred/fd_stake_ci.h"
     317             : #include "../../../../disco/bank/fd_bank_abi.h"
     318             : #include "../../../../disco/keyguard/fd_keyload.h"
     319             : #include "../../../../disco/metrics/generated/fd_metrics_poh.h"
     320             : #include "../../../../flamenco/leaders/fd_leaders.h"
     321             : 
     322             : /* The maximum number of microblocks that pack is allowed to pack into a
     323             :    single slot.  This is not consensus critical, and pack could, if we
     324             :    let it, produce as many microblocks as it wants, and the slot would
     325             :    still be valid.
     326             : 
     327             :    We have this here instead so that PoH can estimate slot completion,
     328             :    and keep the hashcnt up to date as pack progresses through packing
     329             :    the slot.  If this upper bound was not enforced, PoH could tick to
     330             :    the last hash of the slot and have no hashes left to mixin incoming
     331             :    microblocks from pack, so this upper bound is a coordination
     332             :    mechanism so that PoH can progress hashcnts while the slot is active,
     333             :    and know that pack will not need those hashcnts later to do mixins. */
     334           0 : #define MAX_MICROBLOCKS_PER_SLOT (32768UL)
     335             : 
     336             : /* When we are hashing in the background in case a prior leader skips
     337             :    their slot, we need to store the result of each tick hash so we can
     338             :    publish them when we become leader.  The network requires at least
     339             :    one leader slot to publish in each epoch for the leader schedule to
     340             :    generate, so in the worst case we might need two full epochs of slots
     341             :    to store the hashes.  (Eg, if epoch T only had a published slot in
     342             :    position 0 and epoch T+1 only had a published slot right at the end).
     343             : 
     344             :    There is a tighter bound: the block data limit of mainnet-beta is
     345             :    currently FD_PACK_MAX_DATA_PER_BLOCK, or 27,332,342 bytes per slot.
     346             :    At 48 bytes per tick, it is not possible to publish a slot that skips
     347             :    569,424 or more prior slots. */
     348           0 : #define MAX_SKIPPED_TICKS (1UL+(FD_PACK_MAX_DATA_PER_BLOCK/48UL))
     349             : 
     350           0 : #define IN_KIND_BANK  (0)
     351           0 : #define IN_KIND_PACK  (1)
     352           0 : #define IN_KIND_STAKE (2)
     353             : 
     354             : 
     355             : typedef struct {
     356             :   fd_wksp_t * mem;
     357             :   ulong       chunk0;
     358             :   ulong       wmark;
     359             : } fd_poh_in_ctx_t;
     360             : 
     361             : typedef struct {
     362             :   ulong       idx;
     363             :   fd_wksp_t * mem;
     364             :   ulong       chunk0;
     365             :   ulong       wmark;
     366             :   ulong       chunk;
     367             : } fd_poh_out_ctx_t;
     368             : 
     369             : typedef struct {
     370             :   fd_stem_context_t * stem;
     371             : 
     372             :   /* Static configuration determined at genesis creation time.  See
     373             :      long comment above for more information. */
     374             :   ulong  tick_duration_ns;
     375             :   ulong  hashcnt_per_tick;
     376             :   ulong  ticks_per_slot;
     377             : 
     378             :   /* Derived from the above configuration, but we precompute it. */
     379             :   double slot_duration_ns;
     380             :   double hashcnt_duration_ns;
     381             :   ulong  hashcnt_per_slot;
     382             :   /* Constant, fixed at initialization.  The maximum number of
     383             :      microblocks that the pack tile can publish in each slot. */
     384             :   ulong max_microblocks_per_slot;
     385             : 
     386             :   /* The current slot and hashcnt within that slot of the proof of
     387             :      history, including hashes we have been producing in the background
     388             :      while waiting for our next leader slot. */
     389             :   ulong slot;
     390             :   ulong hashcnt;
     391             :   ulong cus_used;
     392             : 
     393             :   /* When we send a microblock on to the shred tile, we need to tell
     394             :      it how many hashes there have been since the last microblock, so
     395             :      this tracks the hashcnt of the last published microblock.
     396             : 
     397             :      If we are skipping slots prior to our leader slot, the last_slot
     398             :      will be quite old, and potentially much larger than the number of
     399             :      hashcnts in one slot. */
     400             :   ulong last_slot;
     401             :   ulong last_hashcnt;
     402             : 
     403             :   /* If we have published a tick or a microblock for a particular slot
     404             :      to the shred tile, we should never become leader for that slot
     405             :      again, otherwise we could publish a duplicate block.
     406             : 
     407             :      This value tracks the max slot that we have published a tick or
     408             :      microblock for so we can prevent this. */
     409             :   ulong highwater_leader_slot;
     410             : 
     411             :   /* See how this field is used below.  If we have sequential leader
     412             :      slots, we don't reset the expected slot end time between the two,
     413             :      to prevent clock drift.  If we didn't do this, our 2nd slot would
     414             :      end 400ms + `time_for_replay_to_move_slot_and_reset_poh` after
     415             :      our 1st, rather than just strictly 400ms. */
     416             :   int  lagged_consecutive_leader_start;
     417             :   ulong expect_sequential_leader_slot;
     418             : 
     419             :   /* There's a race condition ... let's say two banks A and B, bank A
     420             :      processes some transactions, then releases the account locks, and
     421             :      sends the microblock to PoH to be stamped.  Pack now re-packs the
     422             :      same accounts with a new microblock, sends to bank B, bank B
     423             :      executes and sends the microblock to PoH, and this all happens fast
     424             :      enough that PoH picks the 2nd block to stamp before the 1st.  The
     425             :      accounts database changes now are misordered with respect to PoH so
     426             :      replay could fail.
     427             : 
     428             :      To prevent this race, we order all microblocks and only process
     429             :      them in PoH in the order they are produced by pack.  This is a
     430             :      little bit over-strict, we just need to ensure that microblocks
     431             :      with conflicting accounts execute in order, but this is easiest to
     432             :      implement for now. */
     433             :   ulong expect_microblock_idx;
     434             : 
     435             :   /* The PoH tile must never drop microblocks that get committed by the
     436             :      bank, so it needs to always be able to mixin a microblock hash.
     437             :      Mixing in requires incrementing the hashcnt, so we need to ensure
     438             :      at all times that there is enough hascnts left in the slot to
     439             :      mixin whatever future microblocks pack might produce for it.
     440             : 
     441             :      This value tracks that.  At any time, max_microblocks_per_slot
     442             :      - microblocks_lower_bound is an upper bound on the maximum number
     443             :      of microblocks that might still be received in this slot. */
     444             :   ulong microblocks_lower_bound;
     445             : 
     446             :   uchar __attribute__((aligned(32UL))) hash[ 32 ];
     447             : 
     448             :   /* When we are not leader, we need to save the hashes that were
     449             :      produced in case the prior leader skips.  If they skip, we will
     450             :      replay these skipped hashes into our next leader bank so that
     451             :      the slot hashes sysvar can be updated correctly, and also publish
     452             :      them to peer nodes as part of our outgoing shreds. */
     453             :   uchar skipped_tick_hashes[ MAX_SKIPPED_TICKS ][ 32 ];
     454             : 
     455             :   /* The timestamp in nanoseconds of when the reset slot was received.
     456             :      This is the timestamp we are building on top of to determine when
     457             :      our next leader slot starts. */
     458             :   long reset_slot_start_ns;
     459             : 
     460             :   /* The timestamp in nanoseconds of when we got the bank for the
     461             :      current leader slot. */
     462             :   long leader_bank_start_ns;
     463             : 
     464             :   /* The hashcnt corresponding to the start of the current reset slot. */
     465             :   ulong reset_slot;
     466             : 
     467             :   /* The hashcnt at which our next leader slot begins, or ULONG max if
     468             :      we have no known next leader slot. */
     469             :   ulong next_leader_slot;
     470             : 
     471             :   /* If an in progress frag should be skipped */
     472             :   int skip_frag;
     473             : 
     474             :   ulong max_active_descendant;
     475             : 
     476             :   /* If we currently are the leader according the clock AND we have
     477             :      received the leader bank for the slot from the replay stage,
     478             :      this value will be non-NULL.
     479             : 
     480             :      Note that we might be inside our leader slot, but not have a bank
     481             :      yet, in which case this will still be NULL.
     482             : 
     483             :      It will be NULL for a brief race period between consecutive leader
     484             :      slots, as we ping-pong back to replay stage waiting for a new bank.
     485             : 
     486             :      Agave refers to this as the "working bank". */
     487             :   void const * current_leader_bank;
     488             : 
     489             :   fd_sha256_t * sha256;
     490             : 
     491             :   fd_stake_ci_t * stake_ci;
     492             : 
     493             :   fd_pubkey_t identity_key;
     494             : 
     495             :   /* The Agave client needs to be notified when the leader changes,
     496             :      so that they can resume the replay stage if it was suspended waiting. */
     497             :   void * signal_leader_change;
     498             : 
     499             :   /* These are temporarily set in during_frag so they can be used in
     500             :      after_frag once the frag has been validated as not overrun. */
     501             :   uchar _txns[ USHORT_MAX ];
     502             :   fd_microblock_trailer_t _microblock_trailer[ 1 ];
     503             : 
     504             :   int in_kind[ 64 ];
     505             :   fd_poh_in_ctx_t in[ 64 ];
     506             : 
     507             :   fd_poh_out_ctx_t shred_out[ 1 ];
     508             :   fd_poh_out_ctx_t pack_out[ 1 ];
     509             :   fd_poh_out_ctx_t plugin_out[ 1 ];
     510             : 
     511             :   fd_histf_t begin_leader_delay[ 1 ];
     512             :   fd_histf_t first_microblock_delay[ 1 ];
     513             :   fd_histf_t slot_done_delay[ 1 ];
     514             : } fd_poh_ctx_t;
     515             : 
     516             : /* The PoH recorder is implemented in Firedancer but for now needs to
     517             :    work with Agave, so we have a locking scheme for them to
     518             :    co-operate.
     519             : 
     520             :    This is because the PoH tile lives in the Agave memory address
     521             :    space and their version of concurrency is locking the PoH recorder
     522             :    and reading arbitrary fields.
     523             : 
     524             :    So we allow them to lock the PoH tile, although with a very bad (for
     525             :    them) locking scheme.  By default, the tile has full and exclusive
     526             :    access to the data.  If part of Agave wishes to read/write they
     527             :    can either,
     528             : 
     529             :      1. Rewrite their concurrency to message passing based on mcache
     530             :         (preferred, but not feasible).
     531             :      2. Signal to the tile they wish to acquire the lock, by setting
     532             :         fd_poh_waiting_lock to 1.
     533             : 
     534             :    During after_credit, the tile will check if the waiting lock is set
     535             :    to 1, and if so, set the returned lock to 1, indicating to the waiter
     536             :    that they may now proceed.
     537             : 
     538             :    When the waiter is done reading and writing, they restore the
     539             :    returned lock value back to zero, and the POH tile continues with its
     540             :    day. */
     541             : 
     542             : static fd_poh_ctx_t * fd_poh_global_ctx;
     543             : 
     544             : static volatile ulong fd_poh_waiting_lock __attribute__((aligned(128UL)));
     545             : static volatile ulong fd_poh_returned_lock __attribute__((aligned(128UL)));
     546             : 
     547             : /* Agave also needs to write to some mcaches, so we trampoline
     548             :    that via. the PoH tile as well. */
     549             : 
     550             : struct poh_link {
     551             :   fd_frag_meta_t * mcache;
     552             :   ulong            depth;
     553             :   ulong            tx_seq;
     554             : 
     555             :   void *           mem;
     556             :   void *           dcache;
     557             :   ulong            chunk0;
     558             :   ulong            wmark;
     559             :   ulong            chunk;
     560             : 
     561             :   ulong            cr_avail;
     562             :   ulong            rx_cnt;
     563             :   ulong *          rx_fseqs[ 32UL ];
     564             : };
     565             : 
     566             : typedef struct poh_link poh_link_t;
     567             : 
     568             : poh_link_t gossip_dedup;
     569             : poh_link_t stake_out;
     570             : poh_link_t crds_shred;
     571             : poh_link_t replay_resolv;
     572             : 
     573             : poh_link_t replay_plugin;
     574             : poh_link_t gossip_plugin;
     575             : poh_link_t start_progress_plugin;
     576             : poh_link_t vote_listener_plugin;
     577             : 
     578             : static void
     579           0 : poh_link_wait_credit( poh_link_t * link ) {
     580           0 :   if( FD_LIKELY( link->cr_avail ) ) return;
     581             : 
     582           0 :   while( 1 ) {
     583           0 :     ulong cr_query = ULONG_MAX;
     584           0 :     for( ulong i=0UL; i<link->rx_cnt; i++ ) {
     585           0 :       ulong const * _rx_seq = link->rx_fseqs[ i ];
     586           0 :       ulong rx_seq = FD_VOLATILE_CONST( *_rx_seq );
     587           0 :       ulong rx_cr_query = (ulong)fd_long_max( (long)link->depth - fd_long_max( fd_seq_diff( link->tx_seq, rx_seq ), 0L ), 0L );
     588           0 :       cr_query = fd_ulong_min( rx_cr_query, cr_query );
     589           0 :     }
     590           0 :     if( FD_LIKELY( cr_query>0UL ) ) {
     591           0 :       link->cr_avail = cr_query;
     592           0 :       break;
     593           0 :     }
     594           0 :     FD_SPIN_PAUSE();
     595           0 :   }
     596           0 : }
     597             : 
     598             : static void
     599             : poh_link_publish( poh_link_t *  link,
     600             :                   ulong         sig,
     601             :                   uchar const * data,
     602           0 :                   ulong         data_sz ) {
     603           0 :   while( FD_UNLIKELY( !FD_VOLATILE_CONST( link->mcache ) ) ) FD_SPIN_PAUSE();
     604           0 :   if( FD_UNLIKELY( !link->mem ) ) return; /* link not enabled, don't publish */
     605           0 :   poh_link_wait_credit( link );
     606             : 
     607           0 :   uchar * dst = (uchar *)fd_chunk_to_laddr( link->mem, link->chunk );
     608           0 :   fd_memcpy( dst, data, data_sz );
     609           0 :   ulong tspub = (ulong)fd_frag_meta_ts_comp( fd_tickcount() );
     610           0 :   fd_mcache_publish( link->mcache, link->depth, link->tx_seq, sig, link->chunk, data_sz, 0UL, 0UL, tspub );
     611           0 :   link->chunk = fd_dcache_compact_next( link->chunk, data_sz, link->chunk0, link->wmark );
     612           0 :   link->cr_avail--;
     613           0 :   link->tx_seq++;
     614           0 : }
     615             : 
     616             : static void
     617             : poh_link_init( poh_link_t *     link,
     618             :                fd_topo_t *      topo,
     619             :                fd_topo_tile_t * tile,
     620           0 :                ulong            out_idx ) {
     621           0 :   fd_topo_link_t * topo_link = &topo->links[ tile->out_link_id[ out_idx ] ];
     622           0 :   fd_topo_wksp_t * wksp = &topo->workspaces[ topo->objs[ topo_link->dcache_obj_id ].wksp_id ];
     623             : 
     624           0 :   link->mem      = wksp->wksp;
     625           0 :   link->depth    = fd_mcache_depth( topo_link->mcache );
     626           0 :   link->tx_seq   = 0UL;
     627           0 :   link->dcache   = topo_link->dcache;
     628           0 :   link->chunk0   = fd_dcache_compact_chunk0( wksp->wksp, topo_link->dcache );
     629           0 :   link->wmark    = fd_dcache_compact_wmark ( wksp->wksp, topo_link->dcache, topo_link->mtu );
     630           0 :   link->chunk    = link->chunk0;
     631           0 :   link->cr_avail = 0UL;
     632           0 :   link->rx_cnt   = 0UL;
     633           0 :   for( ulong i=0UL; i<topo->tile_cnt; i++ ) {
     634           0 :     fd_topo_tile_t * _tile = &topo->tiles[ i ];
     635           0 :     for( ulong j=0UL; j<_tile->in_cnt; j++ ) {
     636           0 :       if( _tile->in_link_id[ j ]==topo_link->id && _tile->in_link_reliable[ j ] ) {
     637           0 :         FD_TEST( link->rx_cnt<32UL );
     638           0 :         link->rx_fseqs[ link->rx_cnt++ ] = _tile->in_link_fseq[ j ];
     639           0 :         break;
     640           0 :       }
     641           0 :     }
     642           0 :   }
     643           0 :   FD_COMPILER_MFENCE();
     644           0 :   link->mcache = topo_link->mcache;
     645           0 :   FD_COMPILER_MFENCE();
     646           0 :   FD_TEST( link->mcache );
     647           0 : }
     648             : 
     649             : /* To help show correctness, functions that might be called from
     650             :    Rust, either directly or indirectly, have this fake "attribute"
     651             :    CALLED_FROM_RUST, which is actually nothing.  Calls from Rust
     652             :    typically execute on threads did not call fd_boot, so they do not
     653             :    have the typical FD_TL variables.  In particular, they cannot use
     654             :    normal metrics, and their log messages don't have full context.
     655             :    Additionally, Rust functions marked CALLED_FROM_RUST cannot call back
     656             :    into a C fd_ext function without causing a deadlock (although the
     657             :    other Rust fd_ext functions have a similar problem).
     658             : 
     659             :    To prevent annotation from polluting the whole codebase, calls to
     660             :    functions outside this file are manually checked and marked as being
     661             :    safe at each call rather than annotated. */
     662             : #define CALLED_FROM_RUST
     663             : 
     664             : static CALLED_FROM_RUST fd_poh_ctx_t *
     665           0 : fd_ext_poh_write_lock( void ) {
     666           0 :   for(;;) {
     667             :     /* Acquire the waiter lock to make sure we are the first writer in the queue. */
     668           0 :     if( FD_LIKELY( !FD_ATOMIC_CAS( &fd_poh_waiting_lock, 0UL, 1UL) ) ) break;
     669           0 :     FD_SPIN_PAUSE();
     670           0 :   }
     671           0 :   FD_COMPILER_MFENCE();
     672           0 :   for(;;) {
     673             :     /* Now wait for the tile to tell us we can proceed. */
     674           0 :     if( FD_LIKELY( FD_VOLATILE_CONST( fd_poh_returned_lock ) ) ) break;
     675           0 :     FD_SPIN_PAUSE();
     676           0 :   }
     677           0 :   FD_COMPILER_MFENCE();
     678           0 :   return fd_poh_global_ctx;
     679           0 : }
     680             : 
     681             : static CALLED_FROM_RUST void
     682           0 : fd_ext_poh_write_unlock( void ) {
     683           0 :   FD_COMPILER_MFENCE();
     684           0 :   FD_VOLATILE( fd_poh_returned_lock ) = 0UL;
     685           0 : }
     686             : 
     687             : /* The PoH tile needs to interact with the Agave address space to
     688             :    do certain operations that Firedancer hasn't reimplemented yet, a.k.a
     689             :    transaction execution.  We have Agave export some wrapper
     690             :    functions that we call into during regular tile execution.  These do
     691             :    not need any locking, since they are called serially from the single
     692             :    PoH tile. */
     693             : 
     694             : extern CALLED_FROM_RUST void fd_ext_bank_acquire( void const * bank );
     695             : extern CALLED_FROM_RUST void fd_ext_bank_release( void const * bank );
     696             : extern CALLED_FROM_RUST void fd_ext_poh_signal_leader_change( void * sender );
     697             : extern                  void fd_ext_poh_register_tick( void const * bank, uchar const * hash );
     698             : 
     699             : /* fd_ext_poh_initialize is called by Agave on startup to
     700             :    initialize the PoH tile with some static configuration, and the
     701             :    initial reset slot and hash which it retrieves from a snapshot.
     702             : 
     703             :    This function is called by some random Agave thread, but
     704             :    it blocks booting of the PoH tile.  The tile will spin until it
     705             :    determines that this initialization has happened.
     706             : 
     707             :    signal_leader_change is an opaque Rust object that is used to
     708             :    tell the replay stage that the leader has changed.  It is a
     709             :    Box::into_raw(Arc::increment_strong(crossbeam::Sender)), so it
     710             :    has infinite lifetime unless this C code releases the refcnt.
     711             : 
     712             :    It can be used with `fd_ext_poh_signal_leader_change` which
     713             :    will just issue a nonblocking send on the channel. */
     714             : 
     715             : CALLED_FROM_RUST void
     716             : fd_ext_poh_initialize( ulong         tick_duration_ns,    /* See clock comments above, will be 6.4 microseconds for mainnet-beta. */
     717             :                        ulong         hashcnt_per_tick,    /* See clock comments above, will be 62,500 for mainnet-beta. */
     718             :                        ulong         ticks_per_slot,      /* See clock comments above, will almost always be 64. */
     719             :                        ulong         tick_height,         /* The counter (height) of the tick to start hashing on top of. */
     720             :                        uchar const * last_entry_hash,     /* Points to start of a 32 byte region of memory, the hash itself at the tick height. */
     721           0 :                        void *        signal_leader_change /* See comment above. */ ) {
     722           0 :   FD_COMPILER_MFENCE();
     723           0 :   for(;;) {
     724             :     /* Make sure the ctx is initialized before trying to take the lock. */
     725           0 :     if( FD_LIKELY( FD_VOLATILE_CONST( fd_poh_global_ctx ) ) ) break;
     726           0 :     FD_SPIN_PAUSE();
     727           0 :   }
     728           0 :   fd_poh_ctx_t * ctx = fd_ext_poh_write_lock();
     729             : 
     730           0 :   ctx->slot                = tick_height/ticks_per_slot;
     731           0 :   ctx->hashcnt             = 0UL;
     732           0 :   ctx->cus_used            = 0UL;
     733           0 :   ctx->last_slot           = ctx->slot;
     734           0 :   ctx->last_hashcnt        = 0UL;
     735           0 :   ctx->reset_slot          = ctx->slot;
     736           0 :   ctx->reset_slot_start_ns = fd_log_wallclock(); /* safe to call from Rust */
     737             : 
     738           0 :   memcpy( ctx->hash, last_entry_hash, 32UL );
     739             : 
     740           0 :   ctx->signal_leader_change = signal_leader_change;
     741             : 
     742             :   /* Static configuration about the clock. */
     743           0 :   ctx->tick_duration_ns = tick_duration_ns;
     744           0 :   ctx->hashcnt_per_tick = hashcnt_per_tick;
     745           0 :   ctx->ticks_per_slot   = ticks_per_slot;
     746             : 
     747             :   /* Recompute derived information about the clock. */
     748           0 :   ctx->slot_duration_ns    = (double)ticks_per_slot*(double)tick_duration_ns;
     749           0 :   ctx->hashcnt_duration_ns = (double)tick_duration_ns/(double)hashcnt_per_tick;
     750           0 :   ctx->hashcnt_per_slot    = ticks_per_slot*hashcnt_per_tick;
     751             : 
     752           0 :   if( FD_UNLIKELY( ctx->hashcnt_per_tick==1UL ) ) {
     753             :     /* Low power producer, maximum of one microblock per tick in the slot */
     754           0 :     ctx->max_microblocks_per_slot = ctx->ticks_per_slot;
     755           0 :   } else {
     756             :     /* See the long comment in after_credit for this limit */
     757           0 :     ctx->max_microblocks_per_slot = fd_ulong_min( MAX_MICROBLOCKS_PER_SLOT, ctx->ticks_per_slot*(ctx->hashcnt_per_tick-1UL) );
     758           0 :   }
     759             : 
     760           0 :   fd_ext_poh_write_unlock();
     761           0 : }
     762             : 
     763             : /* fd_ext_poh_acquire_bank gets the current leader bank if there is one
     764             :    currently active.  PoH might think we are leader without having a
     765             :    leader bank if the replay stage has not yet noticed we are leader.
     766             : 
     767             :    The bank that is returned is owned the caller, and must be converted
     768             :    to an Arc<Bank> by calling Arc::from_raw() on it.  PoH increments the
     769             :    reference count before returning the bank, so that it can also keep
     770             :    its internal copy.
     771             : 
     772             :    If there is no leader bank, NULL is returned.  In this case, the
     773             :    caller should not call `Arc::from_raw()`. */
     774             : 
     775             : CALLED_FROM_RUST void const *
     776           0 : fd_ext_poh_acquire_leader_bank( void ) {
     777           0 :   fd_poh_ctx_t * ctx = fd_ext_poh_write_lock();
     778           0 :   void const * bank = NULL;
     779           0 :   if( FD_LIKELY( ctx->current_leader_bank ) ) {
     780             :     /* Clone refcount before we release the lock. */
     781           0 :     fd_ext_bank_acquire( ctx->current_leader_bank );
     782           0 :     bank = ctx->current_leader_bank;
     783           0 :   }
     784           0 :   fd_ext_poh_write_unlock();
     785           0 :   return bank;
     786           0 : }
     787             : 
     788             : /* fd_ext_poh_reset_slot returns the slot height one above the last good
     789             :    (unskipped) slot we are building on top of.  This is always a good
     790             :    known value, and will not be ULONG_MAX. */
     791             : 
     792             : CALLED_FROM_RUST ulong
     793           0 : fd_ext_poh_reset_slot( void ) {
     794           0 :   fd_poh_ctx_t * ctx = fd_ext_poh_write_lock();
     795           0 :   ulong reset_slot = ctx->reset_slot;
     796           0 :   fd_ext_poh_write_unlock();
     797           0 :   return reset_slot;
     798           0 : }
     799             : 
     800             : CALLED_FROM_RUST void
     801           0 : fd_ext_poh_update_active_descendant( ulong max_active_descendant ) {
     802           0 :   fd_poh_ctx_t * ctx = fd_ext_poh_write_lock();
     803           0 :   ctx->max_active_descendant = max_active_descendant;
     804           0 :   fd_ext_poh_write_unlock();
     805           0 : }
     806             : 
     807             : /* fd_ext_poh_reached_leader_slot returns 1 if we have reached a slot
     808             :    where we are leader.  This is used by the replay stage to determine
     809             :    if it should create a new leader bank descendant of the prior reset
     810             :    slot block.
     811             : 
     812             :    Sometimes, even when we reach our slot we do not return 1, as we are
     813             :    giving a grace period to the prior leader to finish publishing their
     814             :    block.
     815             : 
     816             :    out_leader_slot is the slot height of the leader slot we reached, and
     817             :    reset_slot is the slot height of the last good (unskipped) slot we
     818             :    are building on top of. */
     819             : 
     820             : CALLED_FROM_RUST int
     821             : fd_ext_poh_reached_leader_slot( ulong * out_leader_slot,
     822           0 :                                 ulong * out_reset_slot ) {
     823           0 :   fd_poh_ctx_t * ctx = fd_ext_poh_write_lock();
     824             : 
     825           0 :   *out_leader_slot = ctx->next_leader_slot;
     826           0 :   *out_reset_slot  = ctx->reset_slot;
     827             : 
     828           0 :   if( FD_UNLIKELY( ctx->next_leader_slot==ULONG_MAX ||
     829           0 :                    ctx->slot<ctx->next_leader_slot ) ) {
     830             :     /* Didn't reach our leader slot yet. */
     831           0 :     fd_ext_poh_write_unlock();
     832           0 :     return 0;
     833           0 :   }
     834             : 
     835           0 :   if( FD_LIKELY( ctx->reset_slot==ctx->next_leader_slot ) ) {
     836             :     /* We were reset onto our leader slot, because the prior leader
     837             :        completed theirs, so we should start immediately, no need for a
     838             :        grace period. */
     839           0 :     fd_ext_poh_write_unlock();
     840           0 :     return 1;
     841           0 :   }
     842             : 
     843           0 :   long now_ns = fd_log_wallclock();
     844           0 :   long expected_start_time_ns = ctx->reset_slot_start_ns + (long)((double)(ctx->next_leader_slot-ctx->reset_slot)*ctx->slot_duration_ns);
     845             : 
     846             :   /* If a prior leader is still in the process of publishing their slot,
     847             :      delay ours to let them finish ... unless they are so delayed that
     848             :      we risk getting skipped by the leader following us.  1.2 seconds
     849             :      is a reasonable default here, although any value between 0 and 1.6
     850             :      seconds could be considered reasonable.  This is arbitrary and
     851             :      chosen due to intuition. */
     852             : 
     853           0 :   if( FD_UNLIKELY( now_ns<expected_start_time_ns+(long)(3.0*ctx->slot_duration_ns) ) ) {
     854             :     /* If the max_active_descendant is >= next_leader_slot, we waited
     855             :        too long and a leader after us started publishing to try and skip
     856             :        us.  Just start our leader slot immediately, we mgiht win ... */
     857             : 
     858           0 :     if( FD_LIKELY( ctx->max_active_descendant>=ctx->reset_slot && ctx->max_active_descendant<ctx->next_leader_slot ) ) {
     859             :       /* If one of the leaders between the reset slot and our leader
     860             :          slot is in the process of publishing (they have a descendant
     861             :          bank that is in progress of being replayed), then keep waiting.
     862             :          We probably wouldn't get a leader slot out before they
     863             :          finished.
     864             : 
     865             :          Unless... we are past the deadline to start our slot by more
     866             :          than 1.2 seconds, in which case we should probably start it to
     867             :          avoid getting skipped by the leader behind us. */
     868           0 :       fd_ext_poh_write_unlock();
     869           0 :       return 0;
     870           0 :     }
     871           0 :   }
     872             : 
     873           0 :   fd_ext_poh_write_unlock();
     874           0 :   return 1;
     875           0 : }
     876             : 
     877             : CALLED_FROM_RUST static inline void
     878             : publish_plugin_slot_start( fd_poh_ctx_t * ctx,
     879             :                            ulong          slot,
     880           0 :                            ulong          parent_slot ) {
     881           0 :   if( FD_UNLIKELY( !ctx->plugin_out->mem ) ) return;
     882             : 
     883           0 :   fd_plugin_msg_slot_start_t * slot_start = (fd_plugin_msg_slot_start_t *)fd_chunk_to_laddr( ctx->plugin_out->mem, ctx->plugin_out->chunk );
     884           0 :   *slot_start = (fd_plugin_msg_slot_start_t){ .slot = slot, .parent_slot = parent_slot };
     885           0 :   fd_stem_publish( ctx->stem, ctx->plugin_out->idx, FD_PLUGIN_MSG_SLOT_START, ctx->plugin_out->chunk, sizeof(fd_plugin_msg_slot_start_t), 0UL, 0UL, 0UL );
     886           0 :   ctx->plugin_out->chunk = fd_dcache_compact_next( ctx->plugin_out->chunk, sizeof(fd_plugin_msg_slot_start_t), ctx->plugin_out->chunk0, ctx->plugin_out->wmark );
     887           0 : }
     888             : 
     889             : CALLED_FROM_RUST static inline void
     890             : publish_plugin_slot_end( fd_poh_ctx_t * ctx,
     891             :                          ulong          slot,
     892           0 :                          ulong          cus_used ) {
     893           0 :   if( FD_UNLIKELY( !ctx->plugin_out->mem ) ) return;
     894             : 
     895           0 :   fd_plugin_msg_slot_end_t * slot_end = (fd_plugin_msg_slot_end_t *)fd_chunk_to_laddr( ctx->plugin_out->mem, ctx->plugin_out->chunk );
     896           0 :   *slot_end = (fd_plugin_msg_slot_end_t){ .slot = slot, .cus_used = cus_used };
     897           0 :   fd_stem_publish( ctx->stem, ctx->plugin_out->idx, FD_PLUGIN_MSG_SLOT_END, ctx->plugin_out->chunk, sizeof(fd_plugin_msg_slot_end_t), 0UL, 0UL, 0UL );
     898           0 :   ctx->plugin_out->chunk = fd_dcache_compact_next( ctx->plugin_out->chunk, sizeof(fd_plugin_msg_slot_end_t), ctx->plugin_out->chunk0, ctx->plugin_out->wmark );
     899           0 : }
     900             : 
     901             : CALLED_FROM_RUST static void
     902             : publish_became_leader( fd_poh_ctx_t * ctx,
     903           0 :                        ulong          slot ) {
     904           0 :   double tick_per_ns = fd_tempo_tick_per_ns( NULL );
     905           0 :   fd_histf_sample( ctx->begin_leader_delay, (ulong)((double)(fd_log_wallclock()-ctx->reset_slot_start_ns)/tick_per_ns) );
     906             : 
     907           0 :   if( FD_UNLIKELY( ctx->lagged_consecutive_leader_start ) ) {
     908             :     /* If we are mirroring Agave behavior, the wall clock gets reset
     909             :        here so we don't count time spent waiting for a bank to freeze
     910             :        or replay stage to actually start the slot towards our 400ms.
     911             :        
     912             :        See extended comments in the config file on this option. */
     913           0 :     ctx->reset_slot_start_ns = fd_log_wallclock() - (long)((double)(slot-ctx->reset_slot)*ctx->slot_duration_ns);
     914           0 :   }
     915             : 
     916           0 :   long slot_start_ns = ctx->reset_slot_start_ns + (long)((double)(slot-ctx->reset_slot)*ctx->slot_duration_ns);
     917             : 
     918             :   /* No need to check flow control, there are always credits became when we
     919             :      are leader, we will not "become" leader again until we are done, so at
     920             :      most one frag in flight at a time. */
     921             : 
     922           0 :   uchar * dst = (uchar *)fd_chunk_to_laddr( ctx->pack_out->mem, ctx->pack_out->chunk );
     923             : 
     924           0 :   fd_became_leader_t * leader = (fd_became_leader_t *)dst;
     925           0 :   leader->slot_start_ns           = slot_start_ns;
     926           0 :   leader->slot_end_ns             = (long)((double)slot_start_ns + ctx->slot_duration_ns);
     927           0 :   leader->bank                    = ctx->current_leader_bank;
     928           0 :   leader->max_microblocks_in_slot = ctx->max_microblocks_per_slot;
     929           0 :   leader->ticks_per_slot          = ctx->ticks_per_slot;
     930           0 :   leader->total_skipped_ticks     = ctx->ticks_per_slot*(slot-ctx->reset_slot);
     931             : 
     932           0 :   if( FD_UNLIKELY( leader->ticks_per_slot+leader->total_skipped_ticks>=MAX_SKIPPED_TICKS ) )
     933           0 :     FD_LOG_ERR(( "Too many skipped ticks %lu for slot %lu, chain must halt", leader->ticks_per_slot+leader->total_skipped_ticks, slot ));
     934             : 
     935           0 :   ulong sig = fd_disco_poh_sig( slot, POH_PKT_TYPE_BECAME_LEADER, 0UL );
     936           0 :   fd_stem_publish( ctx->stem, ctx->pack_out->idx, sig, ctx->pack_out->chunk, sizeof(fd_became_leader_t), 0UL, 0UL, 0UL );
     937           0 :   ctx->pack_out->chunk = fd_dcache_compact_next( ctx->pack_out->chunk, sizeof(fd_became_leader_t), ctx->pack_out->chunk0, ctx->pack_out->wmark );
     938           0 : }
     939             : 
     940             : /* The PoH tile knows when it should become leader by waiting for its
     941             :    leader slot (with the operating system clock).  This function is so
     942             :    that when it becomes the leader, it can be told what the leader bank
     943             :    is by the replay stage.  See the notes in the long comment above for
     944             :    more on how this works. */
     945             : 
     946             : CALLED_FROM_RUST void
     947             : fd_ext_poh_begin_leader( void const * bank,
     948             :                          ulong        slot,
     949           0 :                          ulong        hashcnt_per_tick ) {
     950           0 :   fd_poh_ctx_t * ctx = fd_ext_poh_write_lock();
     951             : 
     952           0 :   FD_TEST( !ctx->current_leader_bank );
     953             : 
     954           0 :   if( FD_UNLIKELY( slot!=ctx->slot ) )             FD_LOG_ERR(( "Trying to begin leader slot %lu but we are now on slot %lu", slot, ctx->slot ));
     955           0 :   if( FD_UNLIKELY( slot!=ctx->next_leader_slot ) ) FD_LOG_ERR(( "Trying to begin leader slot %lu but next leader slot is %lu", slot, ctx->next_leader_slot ));
     956             : 
     957           0 :   if( FD_UNLIKELY( ctx->hashcnt_per_tick!=hashcnt_per_tick ) ) {
     958           0 :     FD_LOG_WARNING(( "hashes per tick changed from %lu to %lu", ctx->hashcnt_per_tick, hashcnt_per_tick ));
     959             : 
     960             :     /* Recompute derived information about the clock. */
     961           0 :     ctx->hashcnt_duration_ns = (double)ctx->tick_duration_ns/(double)hashcnt_per_tick;
     962           0 :     ctx->hashcnt_per_slot = ctx->ticks_per_slot*hashcnt_per_tick;
     963           0 :     ctx->hashcnt_per_tick = hashcnt_per_tick;
     964             : 
     965           0 :     if( FD_UNLIKELY( ctx->hashcnt_per_tick==1UL ) ) {
     966             :       /* Low power producer, maximum of one microblock per tick in the slot */
     967           0 :       ctx->max_microblocks_per_slot = ctx->ticks_per_slot;
     968           0 :     } else {
     969             :       /* See the long comment in after_credit for this limit */
     970           0 :       ctx->max_microblocks_per_slot = fd_ulong_min( MAX_MICROBLOCKS_PER_SLOT, ctx->ticks_per_slot*(ctx->hashcnt_per_tick-1UL) );
     971           0 :     }
     972             : 
     973             :     /* Discard any ticks we might have done in the interim.  They will
     974             :        have the wrong number of hashes per tick.  We can just catch back
     975             :        up quickly if not too many slots were skipped and hopefully
     976             :        publish on time.  Note that tick production and verification of
     977             :        skipped slots is done for the eventual bank that publishes a
     978             :        slot, for example:
     979             : 
     980             :         Reset Slot:            998
     981             :         Epoch Transition Slot: 1000
     982             :         Leader Slot:           1002
     983             : 
     984             :        In this case, if a feature changing the hashcnt_per_tick is
     985             :        activated in slot 1000, and we are publishing empty ticks for
     986             :        slots 998, 999, 1000, and 1001, they should all have the new
     987             :        hashes_per_tick number of hashes, rather than the older one, or
     988             :        some combination. */
     989             : 
     990           0 :     FD_TEST( ctx->last_slot==ctx->reset_slot );
     991           0 :     FD_TEST( !ctx->last_hashcnt );
     992           0 :     ctx->slot = ctx->reset_slot;
     993           0 :     ctx->hashcnt = 0UL;
     994           0 :   }
     995             : 
     996           0 :   ctx->current_leader_bank     = bank;
     997           0 :   ctx->microblocks_lower_bound = 0UL;
     998           0 :   ctx->cus_used                = 0UL;
     999           0 :   ctx->expect_microblock_idx   = 0UL;
    1000             : 
    1001             :   /* We are about to start publishing to the shred tile for this slot
    1002             :      so update the highwater mark so we never republish in this slot
    1003             :      again.  Also check that the leader slot is greater than the
    1004             :      highwater, which should have been ensured earlier. */
    1005             : 
    1006           0 :   FD_TEST( ctx->highwater_leader_slot==ULONG_MAX || slot>=ctx->highwater_leader_slot );
    1007           0 :   ctx->highwater_leader_slot = fd_ulong_max( fd_ulong_if( ctx->highwater_leader_slot==ULONG_MAX, 0UL, ctx->highwater_leader_slot ), slot );
    1008             : 
    1009           0 :   publish_became_leader( ctx, slot );
    1010           0 :   FD_LOG_INFO(( "fd_ext_poh_begin_leader(slot=%lu, highwater_leader_slot=%lu, last_slot=%lu, last_hashcnt=%lu)", slot, ctx->highwater_leader_slot, ctx->last_slot, ctx->last_hashcnt ));
    1011             : 
    1012           0 :   fd_ext_poh_write_unlock();
    1013           0 : }
    1014             : 
    1015             : /* Determine what the next slot is in the leader schedule is that we are
    1016             :    leader.  Includes the current slot.  If we are not leader in what
    1017             :    remains of the current and next epoch, return ULONG_MAX. */
    1018             : 
    1019             : static inline CALLED_FROM_RUST ulong
    1020           0 : next_leader_slot( fd_poh_ctx_t * ctx ) {
    1021             :   /* If we have published anything in a particular slot, then we
    1022             :      should never become leader for that slot again. */
    1023           0 :   ulong min_leader_slot = fd_ulong_max( ctx->slot, fd_ulong_if( ctx->highwater_leader_slot==ULONG_MAX, 0UL, ctx->highwater_leader_slot ) );
    1024             : 
    1025           0 :   for(;;) {
    1026           0 :     fd_epoch_leaders_t * leaders = fd_stake_ci_get_lsched_for_slot( ctx->stake_ci, min_leader_slot ); /* Safe to call from Rust */
    1027           0 :     if( FD_UNLIKELY( !leaders ) ) break;
    1028             : 
    1029           0 :     while( min_leader_slot<(leaders->slot0+leaders->slot_cnt) ) {
    1030           0 :       fd_pubkey_t const * leader = fd_epoch_leaders_get( leaders, min_leader_slot ); /* Safe to call from Rust */
    1031           0 :       if( FD_UNLIKELY( !memcmp( leader->key, ctx->identity_key.key, 32UL ) ) ) return min_leader_slot;
    1032           0 :       min_leader_slot++;
    1033           0 :     }
    1034           0 :   }
    1035             : 
    1036           0 :   return ULONG_MAX;
    1037           0 : }
    1038             : 
    1039             : static CALLED_FROM_RUST void
    1040           0 : no_longer_leader( fd_poh_ctx_t * ctx ) {
    1041           0 :   if( FD_UNLIKELY( ctx->current_leader_bank ) ) fd_ext_bank_release( ctx->current_leader_bank );
    1042             :   /* If we stop being leader in a slot, we can never become leader in
    1043             :       that slot again, and all in-flight microblocks for that slot
    1044             :       should be dropped. */
    1045           0 :   ctx->highwater_leader_slot = fd_ulong_max( fd_ulong_if( ctx->highwater_leader_slot==ULONG_MAX, 0UL, ctx->highwater_leader_slot ), ctx->slot );
    1046           0 :   ctx->current_leader_bank = NULL;
    1047           0 :   ctx->next_leader_slot = next_leader_slot( ctx );
    1048             : 
    1049           0 :   FD_COMPILER_MFENCE();
    1050           0 :   fd_ext_poh_signal_leader_change( ctx->signal_leader_change );
    1051           0 :   FD_LOG_INFO(( "no_longer_leader(next_leader_slot=%lu)", ctx->next_leader_slot ));
    1052           0 : }
    1053             : 
    1054             : /* fd_ext_poh_reset is called by the Agave client when a slot on
    1055             :    the active fork has finished a block and we need to reset our PoH to
    1056             :    be ticking on top of the block it produced. */
    1057             : 
    1058             : CALLED_FROM_RUST void
    1059             : fd_ext_poh_reset( ulong         completed_bank_slot, /* The slot that successfully produced a block */
    1060             :                   uchar const * reset_blockhash,     /* The hash of the last tick in the produced block */
    1061           0 :                   ulong         hashcnt_per_tick     /* The hashcnt per tick of the bank that completed */ ) {
    1062           0 :   fd_poh_ctx_t * ctx = fd_ext_poh_write_lock();
    1063             : 
    1064           0 :   ulong slot_before_reset = ctx->slot;
    1065           0 :   int leader_before_reset = ctx->slot>=ctx->next_leader_slot;
    1066           0 :   if( FD_UNLIKELY( leader_before_reset && ctx->current_leader_bank ) ) {
    1067             :     /* If we were in the middle of a leader slot that we notified pack
    1068             :        pack to start packing for we can never publish into that slot
    1069             :        again, mark all in-flight microblocks to be dropped. */
    1070           0 :     ctx->highwater_leader_slot = fd_ulong_max( fd_ulong_if( ctx->highwater_leader_slot==ULONG_MAX, 0UL, ctx->highwater_leader_slot ), 1UL+ctx->slot );
    1071           0 :   }
    1072             : 
    1073           0 :   ctx->leader_bank_start_ns = fd_log_wallclock(); /* safe to call from Rust */
    1074           0 :   if( FD_UNLIKELY( ctx->expect_sequential_leader_slot==(completed_bank_slot+1UL) ) ) {
    1075             :     /* If we are being reset onto a slot, it means some block was fully
    1076             :        processed, so we reset to build on top of it.  Typically we want
    1077             :        to update the reset_slot_start_ns to the current time, because
    1078             :        the network will give the next leader 400ms to publish,
    1079             :        regardless of how long the prior leader took.
    1080             : 
    1081             :        But: if we were leader in the prior slot, and the block was our
    1082             :        own we can do better.  We know that the next slot should start
    1083             :        exactly 400ms after the prior one started, so we can use that as
    1084             :        the reset slot start time instead. */
    1085           0 :     ctx->reset_slot_start_ns = ctx->reset_slot_start_ns + (long)((double)((completed_bank_slot+1UL)-ctx->reset_slot)*ctx->slot_duration_ns);
    1086           0 :   } else {
    1087           0 :     ctx->reset_slot_start_ns = ctx->leader_bank_start_ns;
    1088           0 :   }
    1089           0 :   ctx->expect_sequential_leader_slot = ULONG_MAX;
    1090             : 
    1091           0 :   memcpy( ctx->hash, reset_blockhash, 32UL );
    1092           0 :   ctx->slot         = completed_bank_slot+1UL;
    1093           0 :   ctx->hashcnt      = 0UL;
    1094           0 :   ctx->last_slot    = ctx->slot;
    1095           0 :   ctx->last_hashcnt = 0UL;
    1096           0 :   ctx->reset_slot   = ctx->slot;
    1097             : 
    1098           0 :   if( FD_UNLIKELY( ctx->hashcnt_per_tick!=hashcnt_per_tick ) ) {
    1099           0 :     FD_LOG_WARNING(( "hashes per tick changed from %lu to %lu", ctx->hashcnt_per_tick, hashcnt_per_tick ));
    1100             : 
    1101             :     /* Recompute derived information about the clock. */
    1102           0 :     ctx->hashcnt_duration_ns = (double)ctx->tick_duration_ns/(double)hashcnt_per_tick;
    1103           0 :     ctx->hashcnt_per_slot = ctx->ticks_per_slot*hashcnt_per_tick;
    1104           0 :     ctx->hashcnt_per_tick = hashcnt_per_tick;
    1105             : 
    1106           0 :     if( FD_UNLIKELY( ctx->hashcnt_per_tick==1UL ) ) {
    1107             :       /* Low power producer, maximum of one microblock per tick in the slot */
    1108           0 :       ctx->max_microblocks_per_slot = ctx->ticks_per_slot;
    1109           0 :     } else {
    1110             :       /* See the long comment in after_credit for this limit */
    1111           0 :       ctx->max_microblocks_per_slot = fd_ulong_min( MAX_MICROBLOCKS_PER_SLOT, ctx->ticks_per_slot*(ctx->hashcnt_per_tick-1UL) );
    1112           0 :     }
    1113           0 :   }
    1114             : 
    1115             :   /* When we reset, we need to allow PoH to tick freely again rather
    1116             :      than being constrained.  If we are leader after the reset, this
    1117             :      is OK because we won't tick until we get a bank, and the lower
    1118             :      bound will be reset with the value from the bank. */
    1119           0 :   ctx->microblocks_lower_bound = ctx->max_microblocks_per_slot;
    1120             : 
    1121           0 :   if( FD_UNLIKELY( leader_before_reset ) ) {
    1122             :     /* No longer have a leader bank if we are reset. Replay stage will
    1123             :        call back again to give us a new one if we should become leader
    1124             :        for the reset slot.
    1125             : 
    1126             :        The order is important here, ctx->hashcnt must be updated before
    1127             :        calling no_longer_leader. */
    1128           0 :     no_longer_leader( ctx );
    1129           0 :   }
    1130           0 :   ctx->next_leader_slot = next_leader_slot( ctx );
    1131           0 :   FD_LOG_INFO(( "fd_ext_poh_reset(slot=%lu,next_leader_slot=%lu)", ctx->reset_slot, ctx->next_leader_slot ));
    1132             : 
    1133           0 :   if( FD_UNLIKELY( ctx->slot>=ctx->next_leader_slot ) ) {
    1134             :     /* We are leader after the reset... two cases: */
    1135           0 :     if( FD_LIKELY( ctx->slot==slot_before_reset ) ) {
    1136             :       /* 1. We are reset onto the same slot we are already leader on.
    1137             :             This is a common case when we have two leader slots in a
    1138             :             row, replay stage will reset us to our own slot.  No need to
    1139             :             do anything here, we already sent a SLOT_START. */
    1140           0 :       FD_TEST( leader_before_reset );
    1141           0 :     } else {
    1142             :       /* 2. We are reset onto a different slot. If we were leader
    1143             :             before, we should first end that slot, then begin the new
    1144             :             one if we are newly leader now. */
    1145           0 :       if( FD_LIKELY( leader_before_reset ) ) publish_plugin_slot_end( ctx, slot_before_reset, ctx->cus_used );
    1146           0 :       else                                   publish_plugin_slot_start( ctx, ctx->next_leader_slot, ctx->reset_slot );
    1147           0 :     }
    1148           0 :   } else {
    1149           0 :     if( FD_UNLIKELY( leader_before_reset ) ) publish_plugin_slot_end( ctx, slot_before_reset, ctx->cus_used );
    1150           0 :   }
    1151             : 
    1152           0 :   fd_ext_poh_write_unlock();
    1153           0 : }
    1154             : 
    1155             : /* Since it can't easily return an Option<Pubkey>, return 1 for Some and
    1156             :    0 for None. */
    1157             : CALLED_FROM_RUST int
    1158             : fd_ext_poh_get_leader_after_n_slots( ulong n,
    1159           0 :                                      uchar out_pubkey[ static 32 ] ) {
    1160           0 :   fd_poh_ctx_t * ctx = fd_ext_poh_write_lock();
    1161           0 :   ulong slot = ctx->slot + n;
    1162           0 :   fd_epoch_leaders_t * leaders = fd_stake_ci_get_lsched_for_slot( ctx->stake_ci, slot ); /* Safe to call from Rust */
    1163             : 
    1164           0 :   int copied = 0;
    1165           0 :   if( FD_LIKELY( leaders ) ) {
    1166           0 :     fd_pubkey_t const * leader = fd_epoch_leaders_get( leaders, slot ); /* Safe to call from Rust */
    1167           0 :     if( FD_LIKELY( leader ) ) {
    1168           0 :       memcpy( out_pubkey, leader, 32UL );
    1169           0 :       copied = 1;
    1170           0 :     }
    1171           0 :   }
    1172           0 :   fd_ext_poh_write_unlock();
    1173           0 :   return copied;
    1174           0 : }
    1175             : 
    1176             : FD_FN_CONST static inline ulong
    1177           3 : scratch_align( void ) {
    1178           3 :   return 128UL;
    1179           3 : }
    1180             : 
    1181             : FD_FN_PURE static inline ulong
    1182           3 : scratch_footprint( fd_topo_tile_t const * tile ) {
    1183           3 :   (void)tile;
    1184           3 :   ulong l = FD_LAYOUT_INIT;
    1185           3 :   l = FD_LAYOUT_APPEND( l, alignof( fd_poh_ctx_t ), sizeof( fd_poh_ctx_t ) );
    1186           3 :   l = FD_LAYOUT_APPEND( l, fd_stake_ci_align(), fd_stake_ci_footprint() );
    1187           3 :   l = FD_LAYOUT_APPEND( l, FD_SHA256_ALIGN, FD_SHA256_FOOTPRINT );
    1188           3 :   return FD_LAYOUT_FINI( l, scratch_align() );
    1189           3 : }
    1190             : 
    1191             : static void
    1192             : publish_tick( fd_poh_ctx_t *      ctx,
    1193             :               fd_stem_context_t * stem,
    1194             :               uchar               hash[ static 32 ],
    1195           0 :               int                 is_skipped ) {
    1196           0 :   ulong hashcnt = ctx->hashcnt_per_tick*(1UL+(ctx->last_hashcnt/ctx->hashcnt_per_tick));
    1197             : 
    1198           0 :   uchar * dst = (uchar *)fd_chunk_to_laddr( ctx->shred_out->mem, ctx->shred_out->chunk );
    1199             : 
    1200           0 :   FD_TEST( ctx->last_slot>=ctx->reset_slot );
    1201           0 :   fd_entry_batch_meta_t * meta = (fd_entry_batch_meta_t *)dst;
    1202           0 :   if( FD_UNLIKELY( is_skipped ) ) {
    1203             :     /* We are publishing ticks for a skipped slot, the reference tick
    1204             :        and block complete flags should always be zero. */
    1205           0 :     meta->reference_tick = 0UL;
    1206           0 :     meta->block_complete = 0;
    1207           0 :   } else {
    1208           0 :     meta->reference_tick = hashcnt/ctx->hashcnt_per_tick;
    1209           0 :     meta->block_complete = hashcnt==ctx->hashcnt_per_slot;
    1210           0 :   }
    1211             : 
    1212           0 :   ulong slot = fd_ulong_if( meta->block_complete, ctx->slot-1UL, ctx->slot );
    1213           0 :   meta->parent_offset = 1UL+slot-ctx->reset_slot;
    1214             : 
    1215           0 :   FD_TEST( hashcnt>ctx->last_hashcnt );
    1216           0 :   ulong hash_delta = hashcnt-ctx->last_hashcnt;
    1217             : 
    1218           0 :   dst += sizeof(fd_entry_batch_meta_t);
    1219           0 :   fd_entry_batch_header_t * tick = (fd_entry_batch_header_t *)dst;
    1220           0 :   tick->hashcnt_delta = hash_delta;
    1221           0 :   fd_memcpy( tick->hash, hash, 32UL );
    1222           0 :   tick->txn_cnt = 0UL;
    1223             : 
    1224           0 :   ulong tspub = (ulong)fd_frag_meta_ts_comp( fd_tickcount() );
    1225           0 :   ulong sz = sizeof(fd_entry_batch_meta_t)+sizeof(fd_entry_batch_header_t);
    1226           0 :   ulong sig = fd_disco_poh_sig( slot, POH_PKT_TYPE_MICROBLOCK, 0UL );
    1227           0 :   fd_stem_publish( stem, ctx->shred_out->idx, sig, ctx->shred_out->chunk, sz, 0UL, 0UL, tspub );
    1228           0 :   ctx->shred_out->chunk = fd_dcache_compact_next( ctx->shred_out->chunk, sz, ctx->shred_out->chunk0, ctx->shred_out->wmark );
    1229             : 
    1230           0 :   if( FD_UNLIKELY( hashcnt==ctx->hashcnt_per_slot ) ) {
    1231           0 :     ctx->last_slot++;
    1232           0 :     ctx->last_hashcnt = 0UL;
    1233           0 :   } else {
    1234           0 :     ctx->last_hashcnt = hashcnt;
    1235           0 :   }
    1236           0 : }
    1237             : 
    1238             : static inline void
    1239             : after_credit( fd_poh_ctx_t *      ctx,
    1240             :               fd_stem_context_t * stem,
    1241             :               int *               opt_poll_in,
    1242           0 :               int *               charge_busy ) {
    1243           0 :   ctx->stem = stem;
    1244             : 
    1245           0 :   FD_COMPILER_MFENCE();
    1246           0 :   if( FD_UNLIKELY( fd_poh_waiting_lock ) )  {
    1247           0 :     FD_VOLATILE( fd_poh_returned_lock ) = 1UL;
    1248           0 :     FD_COMPILER_MFENCE();
    1249           0 :     for(;;) {
    1250           0 :       if( FD_UNLIKELY( !FD_VOLATILE_CONST( fd_poh_returned_lock ) ) ) break;
    1251           0 :       FD_SPIN_PAUSE();
    1252           0 :     }
    1253           0 :     FD_COMPILER_MFENCE();
    1254           0 :     FD_VOLATILE( fd_poh_waiting_lock ) = 0UL;
    1255           0 :     *opt_poll_in = 0;
    1256           0 :     *charge_busy = 1;
    1257           0 :     return;
    1258           0 :   }
    1259           0 :   FD_COMPILER_MFENCE();
    1260             : 
    1261           0 :   int is_leader = ctx->next_leader_slot!=ULONG_MAX && ctx->slot>=ctx->next_leader_slot;
    1262           0 :   if( FD_UNLIKELY( is_leader && !ctx->current_leader_bank ) ) {
    1263             :     /* If we are the leader, but we didn't yet learn what the leader
    1264             :        bank object is from the replay stage, do not do any hashing.
    1265             : 
    1266             :        This is not ideal, but greatly simplifies the control flow. */
    1267           0 :     return;
    1268           0 :   }
    1269             : 
    1270             :   /* If we have skipped ticks pending because we skipped some slots to
    1271             :      become leader, register them now one at a time. */
    1272           0 :   if( FD_UNLIKELY( is_leader && ctx->last_slot<ctx->slot ) ) {
    1273           0 :     ulong publish_hashcnt = ctx->last_hashcnt+ctx->hashcnt_per_tick;
    1274           0 :     ulong tick_idx = (ctx->last_slot*ctx->ticks_per_slot+publish_hashcnt/ctx->hashcnt_per_tick)%MAX_SKIPPED_TICKS;
    1275             : 
    1276           0 :     fd_ext_poh_register_tick( ctx->current_leader_bank, ctx->skipped_tick_hashes[ tick_idx ] );
    1277           0 :     publish_tick( ctx, stem, ctx->skipped_tick_hashes[ tick_idx ], 1 );
    1278             : 
    1279             :     /* If we are catching up now and publishing a bunch of skipped
    1280             :        ticks, we do not want to process any incoming microblocks until
    1281             :        all the skipped ticks have been published out; otherwise we would
    1282             :        intersperse skipped tick messages with microblocks. */
    1283           0 :     *opt_poll_in = 0;
    1284           0 :     *charge_busy = 1;
    1285           0 :     return;
    1286           0 :   }
    1287             : 
    1288           0 :   int low_power_mode = ctx->hashcnt_per_tick==1UL;
    1289             : 
    1290             :   /* If we are the leader, always leave enough capacity in the slot so
    1291             :      that we can mixin any potential microblocks still coming from the
    1292             :      pack tile for this slot. */
    1293           0 :   ulong max_remaining_microblocks = ctx->max_microblocks_per_slot - ctx->microblocks_lower_bound;
    1294             :   /* With hashcnt_per_tick hashes per tick, we actually get
    1295             :      hashcnt_per_tick-1 chances to mixin a microblock.  For each tick
    1296             :      span that we need to reserve, we also need to reserve the hashcnt
    1297             :      for the tick, hence the +
    1298             :      max_remaining_microblocks/(hashcnt_per_tick-1) rounded up.
    1299             : 
    1300             :      However, if hashcnt_per_tick is 1 because we're in low power mode,
    1301             :      this should probably just be max_remaining_microblocks. */
    1302           0 :   ulong max_remaining_ticks_or_microblocks = max_remaining_microblocks;
    1303           0 :   if( FD_LIKELY( !low_power_mode ) ) max_remaining_ticks_or_microblocks += (max_remaining_microblocks+ctx->hashcnt_per_tick-2UL)/(ctx->hashcnt_per_tick-1UL);
    1304             : 
    1305           0 :   ulong restricted_hashcnt = fd_ulong_if( ctx->hashcnt_per_slot>=max_remaining_ticks_or_microblocks, ctx->hashcnt_per_slot-max_remaining_ticks_or_microblocks, 0UL );
    1306             : 
    1307           0 :   ulong min_hashcnt = ctx->hashcnt;
    1308             : 
    1309           0 :   if( FD_LIKELY( !low_power_mode ) ) {
    1310             :     /* Recall that there are two kinds of events that will get published
    1311             :        to the shredder,
    1312             : 
    1313             :          (a) Ticks. These occur every 62,500 (hashcnt_per_tick) hashcnts,
    1314             :              and there will be 64 (ticks_per_slot) of them in each slot.
    1315             : 
    1316             :              Ticks must not have any transactions mixed into the hash.
    1317             :              This is not strictly needed in theory, but is required by the
    1318             :              current consensus protocol.  They get published here in
    1319             :              after_credit.
    1320             : 
    1321             :          (b) Microblocks.  These can occur at any other hashcnt, as long
    1322             :              as it is not a tick.  Microblocks cannot be empty, and must
    1323             :              have at least one transactions mixed in.  These get
    1324             :              published in after_frag.
    1325             : 
    1326             :        If hashcnt_per_tick is 1, then we are in low power mode and the
    1327             :        following does not apply, since we can mix in transactions at any
    1328             :        time.
    1329             : 
    1330             :        In the normal, non-low-power mode, though, we have to be careful
    1331             :        to make sure that we do not publish microblocks on tick
    1332             :        boundaries.  To do that, we need to obey two rules:
    1333             :          (i)  after_credit must not leave hashcnt one before a tick
    1334             :               boundary
    1335             :          (ii) if after_credit begins one before a tick boundary, it must
    1336             :               advance hashcnt and publish the tick
    1337             : 
    1338             :        There's some interplay between min_hashcnt and restricted_hashcnt
    1339             :        here, and we need to show that there's always a value of
    1340             :        target_hashcnt we can pick such that
    1341             :            min_hashcnt <= target_hashcnt <= restricted_hashcnt.
    1342             :        We'll prove this by induction for current_slot==0 and
    1343             :        is_leader==true, since all other slots should be the same.
    1344             : 
    1345             :        Let m_j and r_j be the min_hashcnt and restricted_hashcnt
    1346             :        (respectively) for the jth call to after_credit in a slot.  We
    1347             :        want to show that for all values of j, it's possible to pick a
    1348             :        value h_j, the value of target_hashcnt for the jth call to
    1349             :        after_credit (which is also the value of hashcnt after
    1350             :        after_credit has completed) such that m_j<=h_j<=r_j.
    1351             : 
    1352             :        Additionally, let T be hashcnt_per_tick and N be ticks_per_slot.
    1353             : 
    1354             :        Starting with the base case, j==0.  m_j=0, and
    1355             :          r_0 = N*T - max_microblocks_per_slot
    1356             :                    - ceil(max_microblocks_per_slot/(T-1)).
    1357             : 
    1358             :        This is monotonic decreasing in max_microblocks_per_slot, so it
    1359             :        achieves its minimum when max_microblocks_per_slot is its
    1360             :        maximum.
    1361             :            r_0 >= N*T - N*(T-1) - ceil( (N*(T-1))/(T-1))
    1362             :                 = N*T - N*(T-1)-N = 0.
    1363             :        Thus, m_0 <= r_0, as desired.
    1364             : 
    1365             : 
    1366             : 
    1367             :        Then, for the inductive step, assume there exists h_j such that
    1368             :        m_j<=h_j<=r_j, and we want to show that there exists h_{j+1},
    1369             :        which is the same as showing m_{j+1}<=r_{j+1}.
    1370             : 
    1371             :        Let a_j be 1 if we had a microblock immediately following the jth
    1372             :        call to after_credit, and 0 otherwise.  Then hashcnt at the start
    1373             :        of the (j+1)th call to after_frag is h_j+a_j.
    1374             :        Also, set b_{j+1}=1 if we are in the case covered by rule (ii)
    1375             :        above during the (j+1)th call to after_credit, i.e. if
    1376             :        (h_j+a_j)%T==T-1.  Thus, m_{j+1} = h_j + a_j + b_{j+1}.
    1377             : 
    1378             :        If we received an additional microblock, then
    1379             :        max_remaining_microblocks goes down by 1, and
    1380             :        max_remaining_ticks_or_microblocks goes down by either 1 or 2,
    1381             :        which means restricted_hashcnt goes up by either 1 or 2.  In
    1382             :        particular, it goes up by 2 if the new value of
    1383             :        max_remaining_microblocks (at the start of the (j+1)th call to
    1384             :        after_credit) is congruent to 0 mod T-1.  Let b'_{j+1} be 1 if
    1385             :        this condition is met and 0 otherwise.  If we receive a
    1386             :        done_packing message, restricted_hashcnt can go up by more, but
    1387             :        we can ignore that case, since it is less restrictive.
    1388             :        Thus, r_{j+1}=r_j+a_j+b'_{j+1}.
    1389             : 
    1390             :        If h_j < r_j (strictly less), then h_j+a_j < r_j+a_j.  And thus,
    1391             :        since b_{j+1}<=b'_{j+1}+1, just by virtue of them both being
    1392             :        binary,
    1393             :              h_j + a_j + b_{j+1} <  r_j + a_j + b'_{j+1} + 1,
    1394             :        which is the same (for integers) as
    1395             :              h_j + a_j + b_{j+1} <= r_j + a_j + b'_{j+1},
    1396             :                  m_{j+1}         <= r_{j+1}
    1397             : 
    1398             :        On the other hand, if h_j==r_j, this is easy unless b_{j+1}==1,
    1399             :        which can also only happen if a_j==1.  Then (h_j+a_j)%T==T-1,
    1400             :        which means there's an integer k such that
    1401             : 
    1402             :              h_j+a_j==(ticks_per_slot-k)*T-1
    1403             :              h_j    ==ticks_per_slot*T -  k*(T-1)-1  - k-1
    1404             :                     ==ticks_per_slot*T - (k*(T-1)+1) - ceil( (k*(T-1)+1)/(T-1) )
    1405             : 
    1406             :        Since h_j==r_j in this case, and
    1407             :        r_j==(ticks_per_slot*T) - max_remaining_microblocks_j - ceil(max_remaining_microblocks_j/(T-1)),
    1408             :        we can see that the value of max_remaining_microblocks at the
    1409             :        start of the jth call to after_credit is k*(T-1)+1.  Again, since
    1410             :        a_j==1, then the value of max_remaining_microblocks at the start
    1411             :        of the j+1th call to after_credit decreases by 1 to k*(T-1),
    1412             :        which means b'_{j+1}=1.
    1413             : 
    1414             :        Thus, h_j + a_j + b_{j+1} == r_j + a_j + b'_{j+1}, so, in
    1415             :        particular, h_{j+1}<=r_{j+1} as desired. */
    1416           0 :      min_hashcnt += (ulong)(min_hashcnt%ctx->hashcnt_per_tick == (ctx->hashcnt_per_tick-1UL)); /* add b_{j+1}, enforcing rule (ii) */
    1417           0 :   }
    1418             :   /* Now figure out how many hashes are needed to "catch up" the hash
    1419             :      count to the current system clock, and clamp it to the allowed
    1420             :      range. */
    1421           0 :   long now = fd_log_wallclock();
    1422           0 :   ulong target_hashcnt;
    1423           0 :   if( FD_LIKELY( !is_leader ) ) {
    1424           0 :     target_hashcnt = (ulong)((double)(now - ctx->reset_slot_start_ns) / ctx->hashcnt_duration_ns) - (ctx->slot-ctx->reset_slot)*ctx->hashcnt_per_slot;
    1425           0 :   } else {
    1426             :     /* We might have gotten very behind on hashes, but if we are leader
    1427             :        we want to catch up gradually over the remainder of our leader
    1428             :        slot, not all at once right now.  This helps keep the tile from
    1429             :        being oversubscribed and taking a long time to process incoming
    1430             :        microblocks. */
    1431           0 :     long expected_slot_start_ns = ctx->reset_slot_start_ns + (long)((double)(ctx->slot-ctx->reset_slot)*ctx->slot_duration_ns);
    1432           0 :     double actual_slot_duration_ns = ctx->slot_duration_ns<(double)(ctx->leader_bank_start_ns - expected_slot_start_ns) ? 0.0 : ctx->slot_duration_ns - (double)(ctx->leader_bank_start_ns - expected_slot_start_ns);
    1433           0 :     double actual_hashcnt_duration_ns = actual_slot_duration_ns / (double)ctx->hashcnt_per_slot;
    1434           0 :     target_hashcnt = fd_ulong_if( actual_hashcnt_duration_ns==0.0, restricted_hashcnt, (ulong)((double)(now - ctx->leader_bank_start_ns) / actual_hashcnt_duration_ns) );
    1435           0 :   }
    1436             :   /* Clamp to [min_hashcnt, restricted_hashcnt] as above */
    1437           0 :   target_hashcnt = fd_ulong_max( fd_ulong_min( target_hashcnt, restricted_hashcnt ), min_hashcnt );
    1438             : 
    1439             :   /* The above proof showed that it was always possible to pick a value
    1440             :      of target_hashcnt, but we still have a lot of freedom in how to
    1441             :      pick it.  It simplifies the code a lot if we don't keep going after
    1442             :      a tick in this function.  In particular, we want to publish at most
    1443             :      1 tick in this call, since otherwise we could consume infinite
    1444             :      credits to publish here.  The credits are set so that we should
    1445             :      only ever publish one tick during this loop.  Also, all the extra
    1446             :      stuff (leader transitions, publishing ticks, etc.) we have to do
    1447             :      happens at tick boundaries, so this lets us consolidate all those
    1448             :      cases.
    1449             : 
    1450             :      Mathematically, since the current value of hashcnt is h_j+a_j, the
    1451             :      next tick (advancing a full tick if we're currently at a tick) is
    1452             :      t_{j+1} = T*(floor( (h_j+a_j)/T )+1).  We need to show that if we set
    1453             :      h'_{j+1} = min( h_{j+1}, t_{j+1} ), it is still valid.
    1454             : 
    1455             :      First, h'_{j+1} <= h_{j+1} <= r_{j+1}, so we're okay in that
    1456             :      direction.
    1457             : 
    1458             :      Next, observe that t_{j+1}>=h_j + a_j + 1, and recall that b_{j+1}
    1459             :      is 0 or 1. So then,
    1460             :                     t_{j+1} >= h_j+a_j+b_{j+1} = m_{j+1}.
    1461             : 
    1462             :      We know h_{j+1) >= m_{j+1} from before, so then h'_{j+1} >=
    1463             :      m_{j+1}, as desired. */
    1464             : 
    1465           0 :   ulong next_tick_hashcnt = ctx->hashcnt_per_tick * (1UL+(ctx->hashcnt/ctx->hashcnt_per_tick));
    1466           0 :   target_hashcnt = fd_ulong_min( target_hashcnt, next_tick_hashcnt );
    1467             : 
    1468             :   /* We still need to enforce rule (i). We know that min_hashcnt%T !=
    1469             :      T-1 because of rule (ii).  That means that if target_hashcnt%T ==
    1470             :      T-1 at this point, target_hashcnt > min_hashcnt (notice the
    1471             :      strict), so target_hashcnt-1 >= min_hashcnt and is thus still a
    1472             :      valid choice for target_hashcnt. */
    1473           0 :   target_hashcnt -= (ulong)( (!low_power_mode) & ((target_hashcnt%ctx->hashcnt_per_tick)==(ctx->hashcnt_per_tick-1UL)) );
    1474             : 
    1475           0 :   FD_TEST( target_hashcnt >= ctx->hashcnt       );
    1476           0 :   FD_TEST( target_hashcnt >= min_hashcnt        );
    1477           0 :   FD_TEST( target_hashcnt <= restricted_hashcnt );
    1478             : 
    1479           0 :   if( FD_UNLIKELY( ctx->hashcnt==target_hashcnt ) ) return; /* Nothing to do, don't publish a tick twice */
    1480             : 
    1481           0 :   *charge_busy = 1;
    1482             : 
    1483           0 :   while( ctx->hashcnt<target_hashcnt ) {
    1484           0 :     fd_sha256_hash( ctx->hash, 32UL, ctx->hash );
    1485           0 :     ctx->hashcnt++;
    1486           0 :   }
    1487             : 
    1488           0 :   if( FD_UNLIKELY( ctx->hashcnt==ctx->hashcnt_per_slot ) ) {
    1489           0 :     ctx->slot++;
    1490           0 :     ctx->hashcnt = 0UL;
    1491           0 :   }
    1492             : 
    1493           0 :   if( FD_UNLIKELY( !is_leader && !(ctx->hashcnt%ctx->hashcnt_per_tick ) ) ) {
    1494             :     /* We finished a tick while not leader... save the current hash so
    1495             :        it can be played back into the bank when we become the leader. */
    1496           0 :     ulong tick_idx = (ctx->slot*ctx->ticks_per_slot+ctx->hashcnt/ctx->hashcnt_per_tick)%MAX_SKIPPED_TICKS;
    1497           0 :     fd_memcpy( ctx->skipped_tick_hashes[ tick_idx ], ctx->hash, 32UL );
    1498             : 
    1499           0 :     ulong initial_tick_idx = (ctx->last_slot*ctx->ticks_per_slot+ctx->last_hashcnt/ctx->hashcnt_per_tick)%MAX_SKIPPED_TICKS;
    1500           0 :     if( FD_UNLIKELY( tick_idx==initial_tick_idx ) ) FD_LOG_ERR(( "Too many skipped ticks from slot %lu to slot %lu, chain must halt", ctx->last_slot, ctx->slot ));
    1501           0 :   }
    1502             : 
    1503           0 :   if( FD_UNLIKELY( is_leader && !(ctx->hashcnt%ctx->hashcnt_per_tick) ) ) {
    1504             :     /* We ticked while leader... tell the leader bank. */
    1505           0 :     fd_ext_poh_register_tick( ctx->current_leader_bank, ctx->hash );
    1506             : 
    1507             :     /* And send an empty microblock (a tick) to the shred tile. */
    1508           0 :     publish_tick( ctx, stem, ctx->hash, 0 );
    1509           0 :   }
    1510             : 
    1511           0 :   if( FD_UNLIKELY( !is_leader && ctx->slot>=ctx->next_leader_slot ) ) {
    1512             :     /* We ticked while not leader and are now leader... transition
    1513             :        the state machine. */
    1514           0 :     publish_plugin_slot_start( ctx, ctx->next_leader_slot, ctx->reset_slot );
    1515           0 :     FD_LOG_INFO(( "fd_poh_ticked_into_leader(slot=%lu, reset_slot=%lu)", ctx->next_leader_slot, ctx->reset_slot ));
    1516           0 :   }
    1517             : 
    1518           0 :   if( FD_UNLIKELY( is_leader && ctx->slot>ctx->next_leader_slot ) ) {
    1519             :     /* We ticked while leader and are no longer leader... transition
    1520             :        the state machine. */
    1521           0 :     FD_TEST( !max_remaining_microblocks );
    1522           0 :     publish_plugin_slot_end( ctx, ctx->next_leader_slot, ctx->cus_used );
    1523           0 :     FD_LOG_INFO(( "fd_poh_ticked_outof_leader(slot=%lu)", ctx->next_leader_slot ));
    1524             : 
    1525           0 :     no_longer_leader( ctx );
    1526           0 :     ctx->expect_sequential_leader_slot = ctx->slot;
    1527             : 
    1528           0 :     double tick_per_ns = fd_tempo_tick_per_ns( NULL );
    1529           0 :     fd_histf_sample( ctx->slot_done_delay, (ulong)((double)(fd_log_wallclock()-ctx->reset_slot_start_ns)/tick_per_ns) );
    1530           0 :     ctx->next_leader_slot = next_leader_slot( ctx );
    1531             : 
    1532           0 :     if( FD_UNLIKELY( ctx->slot>=ctx->next_leader_slot ) ) {
    1533             :       /* We finished a leader slot, and are immediately leader for the
    1534             :          following slot... transition. */
    1535           0 :       publish_plugin_slot_start( ctx, ctx->next_leader_slot, ctx->next_leader_slot-1UL );
    1536           0 :       FD_LOG_INFO(( "fd_poh_ticked_into_leader(slot=%lu, reset_slot=%lu)", ctx->next_leader_slot, ctx->next_leader_slot-1UL ));
    1537           0 :     }
    1538           0 :   }
    1539           0 : }
    1540             : 
    1541             : static inline void
    1542           0 : metrics_write( fd_poh_ctx_t * ctx ) {
    1543           0 :   FD_MHIST_COPY( POH, BEGIN_LEADER_DELAY_SECONDS,     ctx->begin_leader_delay );
    1544           0 :   FD_MHIST_COPY( POH, FIRST_MICROBLOCK_DELAY_SECONDS, ctx->first_microblock_delay );
    1545           0 :   FD_MHIST_COPY( POH, SLOT_DONE_DELAY_SECONDS,        ctx->slot_done_delay );
    1546           0 : }
    1547             : 
    1548             : static int
    1549             : before_frag( fd_poh_ctx_t * ctx,
    1550             :              ulong          in_idx,
    1551             :              ulong          seq,
    1552           0 :              ulong          sig ) {
    1553           0 :   (void)in_idx;
    1554           0 :   (void)seq;
    1555             : 
    1556           0 :   if( FD_LIKELY( ctx->in_kind[ in_idx ]==IN_KIND_BANK ) ) {
    1557           0 :     ulong microblock_idx = fd_disco_bank_sig_microblock_idx( sig );
    1558           0 :     FD_TEST( microblock_idx>=ctx->expect_microblock_idx );
    1559             : 
    1560             :     /* Return the fragment to stem so we can process it later, if it's
    1561             :        not next in the sequence. */
    1562           0 :     if( FD_UNLIKELY( microblock_idx>ctx->expect_microblock_idx ) ) return -1;
    1563             : 
    1564           0 :     ctx->expect_microblock_idx++;
    1565           0 :   }
    1566             : 
    1567           0 :   return 0;
    1568           0 : }
    1569             : 
    1570             : static inline void
    1571             : during_frag( fd_poh_ctx_t * ctx,
    1572             :              ulong          in_idx,
    1573             :              ulong          seq,
    1574             :              ulong          sig,
    1575             :              ulong          chunk,
    1576           0 :              ulong          sz  ) {
    1577           0 :   (void)seq;
    1578           0 :   (void)sig;
    1579             : 
    1580           0 :   ctx->skip_frag = 0;
    1581             : 
    1582           0 :   if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_STAKE ) ) {
    1583           0 :     if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark ) )
    1584           0 :       FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
    1585           0 :             ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
    1586             : 
    1587           0 :     uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
    1588           0 :     fd_stake_ci_stake_msg_init( ctx->stake_ci, dcache_entry );
    1589           0 :     return;
    1590           0 :   }
    1591             : 
    1592           0 :   ulong pkt_type;
    1593           0 :   ulong slot;
    1594           0 :   switch( ctx->in_kind[ in_idx ] ) {
    1595           0 :     case IN_KIND_BANK: {
    1596           0 :       pkt_type = POH_PKT_TYPE_MICROBLOCK;
    1597           0 :       slot = fd_disco_bank_sig_slot( sig );
    1598           0 :       break;
    1599           0 :     }
    1600           0 :     case IN_KIND_PACK: {
    1601           0 :       pkt_type = fd_disco_poh_sig_pkt_type( sig );
    1602           0 :       slot = fd_disco_poh_sig_slot( sig );
    1603           0 :       break;
    1604           0 :     }
    1605           0 :     default:
    1606           0 :       FD_LOG_ERR(( "unexpected in_kind %d", ctx->in_kind[ in_idx ] ));
    1607           0 :   }
    1608             : 
    1609           0 :   int is_frag_for_prior_leader_slot = 0;
    1610           0 :   if( FD_LIKELY( pkt_type==POH_PKT_TYPE_DONE_PACKING || pkt_type==POH_PKT_TYPE_MICROBLOCK ) ) {
    1611             :     /* The following sequence is possible...
    1612             : 
    1613             :         1. We become leader in slot 10
    1614             :         2. While leader, we switch to a fork that is on slot 8, where
    1615             :             we are leader
    1616             :         3. We get the in-flight microblocks for slot 10
    1617             : 
    1618             :       These in-flight microblocks need to be dropped, so we check
    1619             :       against the high water mark (highwater_leader_slot) rather than
    1620             :       the current hashcnt here when determining what to drop.
    1621             : 
    1622             :       We know if the slot is lower than the high water mark it's from a stale
    1623             :       leader slot, because we will not become leader for the same slot twice
    1624             :       even if we are reset back in time (to prevent duplicate blocks). */
    1625           0 :     is_frag_for_prior_leader_slot = slot<ctx->highwater_leader_slot;
    1626           0 :   }
    1627             : 
    1628           0 :   if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_PACK ) ) {
    1629             :     /* We now know the real amount of microblocks published, so set an
    1630             :        exact bound for once we receive them. */
    1631           0 :     ctx->skip_frag = 1;
    1632           0 :     if( pkt_type==POH_PKT_TYPE_DONE_PACKING ) {
    1633           0 :       if( FD_UNLIKELY( is_frag_for_prior_leader_slot ) ) return;
    1634             : 
    1635           0 :       FD_TEST( ctx->microblocks_lower_bound<=ctx->max_microblocks_per_slot );
    1636           0 :       fd_done_packing_t const * done_packing = fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
    1637           0 :       FD_LOG_INFO(( "done_packing(slot=%lu,seen_microblocks=%lu,microblocks_in_slot=%lu)",
    1638           0 :                     ctx->slot,
    1639           0 :                     ctx->microblocks_lower_bound,
    1640           0 :                     done_packing->microblocks_in_slot ));
    1641           0 :       ctx->microblocks_lower_bound += ctx->max_microblocks_per_slot - done_packing->microblocks_in_slot;
    1642           0 :     }
    1643           0 :     return;
    1644           0 :   } else {
    1645           0 :     if( FD_UNLIKELY( chunk<ctx->in[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz>USHORT_MAX ) )
    1646           0 :       FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz, ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
    1647             : 
    1648           0 :     uchar * src = (uchar *)fd_chunk_to_laddr( ctx->in[ in_idx ].mem, chunk );
    1649             : 
    1650           0 :     fd_memcpy( ctx->_txns, src, sz-sizeof(fd_microblock_trailer_t) );
    1651           0 :     fd_memcpy( ctx->_microblock_trailer, src+sz-sizeof(fd_microblock_trailer_t), sizeof(fd_microblock_trailer_t) );
    1652             : 
    1653           0 :     ctx->skip_frag = is_frag_for_prior_leader_slot;
    1654           0 :   }
    1655           0 : }
    1656             : 
    1657             : static void
    1658             : publish_microblock( fd_poh_ctx_t *      ctx,
    1659             :                     fd_stem_context_t * stem,
    1660             :                     ulong               slot,
    1661             :                     ulong               hashcnt_delta,
    1662           0 :                     ulong               txn_cnt ) {
    1663           0 :   uchar * dst = (uchar *)fd_chunk_to_laddr( ctx->shred_out->mem, ctx->shred_out->chunk );
    1664           0 :   FD_TEST( slot>=ctx->reset_slot );
    1665           0 :   fd_entry_batch_meta_t * meta = (fd_entry_batch_meta_t *)dst;
    1666           0 :   meta->parent_offset = 1UL+slot-ctx->reset_slot;
    1667           0 :   meta->reference_tick = (ctx->hashcnt/ctx->hashcnt_per_tick) % ctx->ticks_per_slot;
    1668           0 :   meta->block_complete = !ctx->hashcnt;
    1669             : 
    1670           0 :   dst += sizeof(fd_entry_batch_meta_t);
    1671           0 :   fd_entry_batch_header_t * header = (fd_entry_batch_header_t *)dst;
    1672           0 :   header->hashcnt_delta = hashcnt_delta;
    1673           0 :   fd_memcpy( header->hash, ctx->hash, 32UL );
    1674             : 
    1675           0 :   dst += sizeof(fd_entry_batch_header_t);
    1676           0 :   ulong payload_sz = 0UL;
    1677           0 :   ulong included_txn_cnt = 0UL;
    1678           0 :   for( ulong i=0UL; i<txn_cnt; i++ ) {
    1679           0 :     fd_txn_p_t * txn = (fd_txn_p_t *)(ctx->_txns + i*sizeof(fd_txn_p_t));
    1680           0 :     if( FD_UNLIKELY( !(txn->flags & FD_TXN_P_FLAGS_EXECUTE_SUCCESS) ) ) continue;
    1681             : 
    1682           0 :     fd_memcpy( dst, txn->payload, txn->payload_sz );
    1683           0 :     payload_sz += txn->payload_sz;
    1684           0 :     dst        += txn->payload_sz;
    1685           0 :     included_txn_cnt++;
    1686           0 :   }
    1687           0 :   header->txn_cnt = included_txn_cnt;
    1688             : 
    1689             :   /* We always have credits to publish here, because we have a burst
    1690             :      value of 3 credits, and at most we will publish_tick() once and
    1691             :      then publish_became_leader() once, leaving one credit here to
    1692             :      publish the microblock. */
    1693           0 :   ulong tspub = (ulong)fd_frag_meta_ts_comp( fd_tickcount() );
    1694           0 :   ulong sz = sizeof(fd_entry_batch_meta_t)+sizeof(fd_entry_batch_header_t)+payload_sz;
    1695           0 :   ulong new_sig = fd_disco_poh_sig( slot, POH_PKT_TYPE_MICROBLOCK, 0UL );
    1696           0 :   fd_stem_publish( stem, ctx->shred_out->idx, new_sig, ctx->shred_out->chunk, sz, 0UL, 0UL, tspub );
    1697           0 :   ctx->shred_out->chunk = fd_dcache_compact_next( ctx->shred_out->chunk, sz, ctx->shred_out->chunk0, ctx->shred_out->wmark );
    1698           0 : }
    1699             : 
    1700             : static inline void
    1701             : after_frag( fd_poh_ctx_t *      ctx,
    1702             :             ulong               in_idx,
    1703             :             ulong               seq,
    1704             :             ulong               sig,
    1705             :             ulong               sz,
    1706             :             ulong               tsorig,
    1707           0 :             fd_stem_context_t * stem ) {
    1708           0 :   (void)in_idx;
    1709           0 :   (void)seq;
    1710           0 :   (void)tsorig;
    1711             : 
    1712           0 :   if( FD_UNLIKELY( ctx->skip_frag ) ) return;
    1713             : 
    1714           0 :   if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_STAKE ) ) {
    1715           0 :     fd_stake_ci_stake_msg_fini( ctx->stake_ci );
    1716             :     /* It might seem like we do not need to do state transitions in and
    1717             :        out of being the leader here, since leader schedule updates are
    1718             :        always one epoch in advance (whether we are leader or not would
    1719             :        never change for the currently executing slot) but this is not
    1720             :        true for new ledgers when the validator first boots.  We will
    1721             :        likely be the leader in slot 1, and get notified of the leader
    1722             :        schedule for that slot while we are still in it.
    1723             : 
    1724             :        For safety we just handle both transitions, in and out, although
    1725             :        the only one possible should be into leader. */
    1726           0 :     ulong next_leader_slot_after_frag = next_leader_slot( ctx );
    1727             : 
    1728           0 :     int currently_leader  = ctx->slot>=ctx->next_leader_slot;
    1729           0 :     int leader_after_frag = ctx->slot>=next_leader_slot_after_frag;
    1730             : 
    1731           0 :     FD_LOG_INFO(( "stake_update(before_leader=%lu,after_leader=%lu)",
    1732           0 :                   ctx->next_leader_slot,
    1733           0 :                   next_leader_slot_after_frag ));
    1734             : 
    1735           0 :     ctx->next_leader_slot = next_leader_slot_after_frag;
    1736           0 :     if( FD_UNLIKELY( currently_leader && !leader_after_frag ) ) {
    1737             :       /* Shouldn't ever happen, otherwise we need to do a state
    1738             :          transition out of being leader. */
    1739           0 :       FD_LOG_ERR(( "stake update caused us to no longer be leader in an active slot" ));
    1740           0 :     }
    1741             : 
    1742             :     /* Nothing to do if we transition into being leader, since it
    1743             :        will just get picked up by the regular tick loop. */
    1744           0 :     if( FD_UNLIKELY( !currently_leader && leader_after_frag ) ) {
    1745           0 :       publish_plugin_slot_start( ctx, next_leader_slot_after_frag, ctx->reset_slot );
    1746           0 :     }
    1747             : 
    1748           0 :     return;
    1749           0 :   }
    1750             : 
    1751           0 :   if( FD_UNLIKELY( !ctx->microblocks_lower_bound ) ) {
    1752           0 :     double tick_per_ns = fd_tempo_tick_per_ns( NULL );
    1753           0 :     fd_histf_sample( ctx->first_microblock_delay, (ulong)((double)(fd_log_wallclock()-ctx->reset_slot_start_ns)/tick_per_ns) );
    1754           0 :   }
    1755             : 
    1756           0 :   ulong target_slot = fd_disco_bank_sig_slot( sig );
    1757             : 
    1758           0 :   if( FD_UNLIKELY( target_slot!=ctx->next_leader_slot || target_slot!=ctx->slot ) ) {
    1759           0 :     FD_LOG_ERR(( "packed too early or late target_slot=%lu, current_slot=%lu. highwater_leader_slot=%lu",
    1760           0 :                  target_slot, ctx->slot, ctx->highwater_leader_slot ));
    1761           0 :   }
    1762             : 
    1763           0 :   FD_TEST( ctx->current_leader_bank );
    1764           0 :   FD_TEST( ctx->microblocks_lower_bound<ctx->max_microblocks_per_slot );
    1765           0 :   ctx->microblocks_lower_bound += 1UL;
    1766             : 
    1767           0 :   ulong txn_cnt = (sz-sizeof(fd_microblock_trailer_t))/sizeof(fd_txn_p_t);
    1768           0 :   fd_txn_p_t * txns = (fd_txn_p_t *)(ctx->_txns);
    1769           0 :   ulong executed_txn_cnt = 0UL;
    1770           0 :   ulong cus_used         = 0UL;
    1771           0 :   for( ulong i=0UL; i<txn_cnt; i++ ) {
    1772           0 :     if( FD_LIKELY( txns[ i ].flags & FD_TXN_P_FLAGS_EXECUTE_SUCCESS ) ) {
    1773           0 :       executed_txn_cnt++;
    1774           0 :       cus_used += txns[ i ].bank_cu.actual_consumed_cus;
    1775           0 :     }
    1776           0 :   }
    1777             : 
    1778             :   /* We don't publish transactions that fail to execute.  If all the
    1779             :      transactions failed to execute, the microblock would be empty,
    1780             :      causing agave to think it's a tick and complain.  Instead, we just
    1781             :      skip the microblock and don't hash or update the hashcnt. */
    1782           0 :   if( FD_UNLIKELY( !executed_txn_cnt ) ) return;
    1783             : 
    1784           0 :   uchar data[ 64 ];
    1785           0 :   fd_memcpy( data, ctx->hash, 32UL );
    1786           0 :   fd_memcpy( data+32UL, ctx->_microblock_trailer->hash, 32UL );
    1787           0 :   fd_sha256_hash( data, 64UL, ctx->hash );
    1788             : 
    1789           0 :   ctx->hashcnt++;
    1790           0 :   FD_TEST( ctx->hashcnt>ctx->last_hashcnt );
    1791           0 :   ulong hashcnt_delta = ctx->hashcnt - ctx->last_hashcnt;
    1792             : 
    1793             :   /* The hashing loop above will never leave us exactly one away from
    1794             :      crossing a tick boundary, so this increment will never cause the
    1795             :      current tick (or the slot) to change, except in low power mode
    1796             :      for development, in which case we do need to register the tick
    1797             :      with the leader bank.  We don't need to publish the tick since
    1798             :      sending the microblock below is the publishing action. */
    1799           0 :   if( FD_UNLIKELY( !(ctx->hashcnt%ctx->hashcnt_per_slot ) ) ) {
    1800           0 :     ctx->slot++;
    1801           0 :     ctx->hashcnt = 0UL;
    1802           0 :   }
    1803             : 
    1804           0 :   ctx->last_slot    = ctx->slot;
    1805           0 :   ctx->last_hashcnt = ctx->hashcnt;
    1806             : 
    1807           0 :   ctx->cus_used += cus_used;
    1808             : 
    1809           0 :   if( FD_UNLIKELY( !(ctx->hashcnt%ctx->hashcnt_per_tick ) ) ) {
    1810           0 :     fd_ext_poh_register_tick( ctx->current_leader_bank, ctx->hash );
    1811           0 :     if( FD_UNLIKELY( ctx->slot>ctx->next_leader_slot ) ) {
    1812             :       /* We ticked while leader and are no longer leader... transition
    1813             :          the state machine. */
    1814           0 :       publish_plugin_slot_end( ctx, ctx->next_leader_slot, ctx->cus_used );
    1815             : 
    1816           0 :       no_longer_leader( ctx );
    1817             : 
    1818           0 :       if( FD_UNLIKELY( ctx->slot>=ctx->next_leader_slot ) ) {
    1819             :         /* We finished a leader slot, and are immediately leader for the
    1820             :            following slot... transition. */
    1821           0 :         publish_plugin_slot_start( ctx, ctx->next_leader_slot, ctx->next_leader_slot-1UL );
    1822           0 :       }
    1823           0 :     }
    1824           0 :   }
    1825             : 
    1826           0 :   publish_microblock( ctx, stem, target_slot, hashcnt_delta, txn_cnt );
    1827           0 : }
    1828             : 
    1829             : static void
    1830             : privileged_init( fd_topo_t *      topo,
    1831           0 :                  fd_topo_tile_t * tile ) {
    1832           0 :   void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
    1833             : 
    1834           0 :   FD_SCRATCH_ALLOC_INIT( l, scratch );
    1835           0 :   fd_poh_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_poh_ctx_t ), sizeof( fd_poh_ctx_t ) );
    1836             : 
    1837           0 :   if( FD_UNLIKELY( !strcmp( tile->poh.identity_key_path, "" ) ) )
    1838           0 :     FD_LOG_ERR(( "identity_key_path not set" ));
    1839             : 
    1840           0 :   const uchar * identity_key = fd_keyload_load( tile->poh.identity_key_path, /* pubkey only: */ 1 );
    1841           0 :   fd_memcpy( ctx->identity_key.uc, identity_key, 32UL );
    1842           0 : }
    1843             : 
    1844             : /* The Agave client needs to communicate to the shred tile what
    1845             :    the shred version is on boot, but shred tile does not live in the
    1846             :    same address space, so have the PoH tile pass the value through
    1847             :    via. a shared memory ulong. */
    1848             : 
    1849             : static volatile ulong * fd_shred_version;
    1850             : 
    1851             : void
    1852           0 : fd_ext_shred_set_shred_version( ulong shred_version ) {
    1853           0 :   while( FD_UNLIKELY( !fd_shred_version ) ) FD_SPIN_PAUSE();
    1854           0 :   *fd_shred_version = shred_version;
    1855           0 : }
    1856             : 
    1857             : void
    1858             : fd_ext_poh_publish_gossip_vote( uchar * data,
    1859           0 :                                 ulong   data_len ) {
    1860           0 :   poh_link_publish( &gossip_dedup, 1UL, data, data_len );
    1861           0 : }
    1862             : 
    1863             : void
    1864             : fd_ext_poh_publish_leader_schedule( uchar * data,
    1865           0 :                                     ulong   data_len ) {
    1866           0 :   poh_link_publish( &stake_out, 2UL, data, data_len );
    1867           0 : }
    1868             : 
    1869             : void
    1870             : fd_ext_poh_publish_cluster_info( uchar * data,
    1871           0 :                                  ulong   data_len ) {
    1872           0 :   poh_link_publish( &crds_shred, 2UL, data, data_len );
    1873           0 : }
    1874             : 
    1875             : void
    1876             : fd_ext_plugin_publish_replay_stage( ulong   sig,
    1877             :                                     uchar * data,
    1878           0 :                                     ulong   data_len ) {
    1879           0 :   poh_link_publish( &replay_plugin, sig, data, data_len );
    1880           0 : }
    1881             : 
    1882             : void
    1883             : fd_ext_plugin_publish_start_progress( ulong   sig,
    1884             :                                     uchar * data,
    1885           0 :                                     ulong   data_len ) {
    1886           0 :   poh_link_publish( &start_progress_plugin, sig, data, data_len );
    1887           0 : }
    1888             : 
    1889             : void
    1890             : fd_ext_plugin_publish_vote_listener( ulong   sig,
    1891             :                                      uchar * data,
    1892           0 :                                      ulong   data_len ) {
    1893           0 :   poh_link_publish( &vote_listener_plugin, sig, data, data_len );
    1894           0 : }
    1895             : 
    1896             : void
    1897             : fd_ext_plugin_publish_periodic( ulong   sig,
    1898             :                                 uchar * data,
    1899           0 :                                 ulong   data_len ) {
    1900           0 :   poh_link_publish( &gossip_plugin, sig, data, data_len );
    1901           0 : }
    1902             : 
    1903             : void
    1904             : fd_ext_resolv_publish_root_bank( uchar * data,
    1905           0 :                                  ulong   data_len ) {
    1906           0 :   poh_link_publish( &replay_resolv, 0UL, data, data_len );
    1907           0 : }
    1908             : 
    1909             : void
    1910             : fd_ext_resolv_publish_completed_blockhash( uchar * data,
    1911           0 :                                            ulong   data_len ) {
    1912           0 :   poh_link_publish( &replay_resolv, 1UL, data, data_len );
    1913           0 : }
    1914             : 
    1915             : static inline fd_poh_out_ctx_t
    1916             : out1( fd_topo_t const *      topo,
    1917             :       fd_topo_tile_t const * tile,
    1918           0 :       char const *           name ) {
    1919           0 :   ulong idx = ULONG_MAX;
    1920             : 
    1921           0 :   for( ulong i=0UL; i<tile->out_cnt; i++ ) {
    1922           0 :     fd_topo_link_t const * link = &topo->links[ tile->out_link_id[ i ] ];
    1923           0 :     if( !strcmp( link->name, name ) ) {
    1924           0 :       if( FD_UNLIKELY( idx!=ULONG_MAX ) ) FD_LOG_ERR(( "tile %s:%lu had multiple output links named %s but expected one", tile->name, tile->kind_id, name ));
    1925           0 :       idx = i;
    1926           0 :     }
    1927           0 :   }
    1928             : 
    1929           0 :   if( FD_UNLIKELY( idx==ULONG_MAX ) ) FD_LOG_ERR(( "tile %s:%lu had no output link named %s", tile->name, tile->kind_id, name ));
    1930             : 
    1931           0 :   void * mem = topo->workspaces[ topo->objs[ topo->links[ tile->out_link_id[ idx ] ].dcache_obj_id ].wksp_id ].wksp;
    1932           0 :   ulong chunk0 = fd_dcache_compact_chunk0( mem, topo->links[ tile->out_link_id[ idx ] ].dcache );
    1933           0 :   ulong wmark  = fd_dcache_compact_wmark ( mem, topo->links[ tile->out_link_id[ idx ] ].dcache, topo->links[ tile->out_link_id[ idx ] ].mtu );
    1934             : 
    1935           0 :   return (fd_poh_out_ctx_t){ .idx = idx, .mem = mem, .chunk0 = chunk0, .wmark = wmark, .chunk = chunk0 };
    1936           0 : }
    1937             : 
    1938             : static void
    1939             : unprivileged_init( fd_topo_t *      topo,
    1940           0 :                    fd_topo_tile_t * tile ) {
    1941           0 :   void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
    1942             : 
    1943           0 :   FD_SCRATCH_ALLOC_INIT( l, scratch );
    1944           0 :   fd_poh_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_poh_ctx_t ), sizeof( fd_poh_ctx_t ) );
    1945           0 :   void * stake_ci = FD_SCRATCH_ALLOC_APPEND( l, fd_stake_ci_align(),              fd_stake_ci_footprint()            );
    1946           0 :   void * sha256   = FD_SCRATCH_ALLOC_APPEND( l, FD_SHA256_ALIGN,                  FD_SHA256_FOOTPRINT                );
    1947             : 
    1948           0 : #define NONNULL( x ) (__extension__({                                        \
    1949           0 :       __typeof__((x)) __x = (x);                                             \
    1950           0 :       if( FD_UNLIKELY( !__x ) ) FD_LOG_ERR(( #x " was unexpectedly NULL" )); \
    1951           0 :       __x; }))
    1952             : 
    1953           0 :   ctx->stake_ci = NONNULL( fd_stake_ci_join( fd_stake_ci_new( stake_ci, &ctx->identity_key ) ) );
    1954           0 :   ctx->sha256 = NONNULL( fd_sha256_join( fd_sha256_new( sha256 ) ) );
    1955           0 :   ctx->current_leader_bank = NULL;
    1956           0 :   ctx->signal_leader_change = NULL;
    1957             : 
    1958           0 :   ctx->slot                  = 0UL;
    1959           0 :   ctx->hashcnt               = 0UL;
    1960           0 :   ctx->last_hashcnt          = 0UL;
    1961           0 :   ctx->highwater_leader_slot = ULONG_MAX;
    1962           0 :   ctx->next_leader_slot      = ULONG_MAX;
    1963           0 :   ctx->reset_slot            = ULONG_MAX;
    1964             : 
    1965           0 :   ctx->lagged_consecutive_leader_start = tile->poh.lagged_consecutive_leader_start;
    1966           0 :   ctx->expect_sequential_leader_slot = ULONG_MAX;
    1967             : 
    1968           0 :   ctx->microblocks_lower_bound = 0UL;
    1969             : 
    1970           0 :   ctx->max_active_descendant = 0UL;
    1971             : 
    1972           0 :   ulong poh_shred_obj_id = fd_pod_query_ulong( topo->props, "poh_shred", ULONG_MAX );
    1973           0 :   FD_TEST( poh_shred_obj_id!=ULONG_MAX );
    1974             : 
    1975           0 :   fd_shred_version = fd_fseq_join( fd_topo_obj_laddr( topo, poh_shred_obj_id ) );
    1976           0 :   FD_TEST( fd_shred_version );
    1977             : 
    1978           0 :   poh_link_init( &gossip_dedup,          topo, tile, out1( topo, tile, "gossip_dedup" ).idx );
    1979           0 :   poh_link_init( &stake_out,             topo, tile, out1( topo, tile, "stake_out"    ).idx );
    1980           0 :   poh_link_init( &crds_shred,            topo, tile, out1( topo, tile, "crds_shred"   ).idx );
    1981           0 :   poh_link_init( &replay_resolv,         topo, tile, out1( topo, tile, "replay_resol" ).idx );
    1982             : 
    1983           0 :   if( FD_LIKELY( tile->poh.plugins_enabled ) ) {
    1984           0 :     poh_link_init( &replay_plugin,         topo, tile, out1( topo, tile, "replay_plugi" ).idx );
    1985           0 :     poh_link_init( &gossip_plugin,         topo, tile, out1( topo, tile, "gossip_plugi" ).idx );
    1986           0 :     poh_link_init( &start_progress_plugin, topo, tile, out1( topo, tile, "startp_plugi" ).idx );
    1987           0 :     poh_link_init( &vote_listener_plugin,  topo, tile, out1( topo, tile, "votel_plugin" ).idx );
    1988           0 :   } else {
    1989             :     /* Mark these mcaches as "available", so the system boots, but the
    1990             :        memory is not set so nothing will actually get published via.
    1991             :        the links. */
    1992           0 :     FD_COMPILER_MFENCE();
    1993           0 :     replay_plugin.mcache = (fd_frag_meta_t*)1;
    1994           0 :     gossip_plugin.mcache = (fd_frag_meta_t*)1;
    1995           0 :     start_progress_plugin.mcache = (fd_frag_meta_t*)1;
    1996           0 :     vote_listener_plugin.mcache = (fd_frag_meta_t*)1;
    1997           0 :     FD_COMPILER_MFENCE();
    1998           0 :   }
    1999             : 
    2000           0 :   FD_LOG_INFO(( "PoH waiting to be initialized by Agave client... %lu %lu", fd_poh_waiting_lock, fd_poh_returned_lock ));
    2001           0 :   FD_VOLATILE( fd_poh_global_ctx ) = ctx;
    2002           0 :   FD_COMPILER_MFENCE();
    2003           0 :   for(;;) {
    2004           0 :     if( FD_LIKELY( FD_VOLATILE_CONST( fd_poh_waiting_lock ) ) ) break;
    2005           0 :     FD_SPIN_PAUSE();
    2006           0 :   }
    2007           0 :   FD_VOLATILE( fd_poh_waiting_lock ) = 0UL;
    2008           0 :   FD_VOLATILE( fd_poh_returned_lock ) = 1UL;
    2009           0 :   FD_COMPILER_MFENCE();
    2010           0 :   for(;;) {
    2011           0 :     if( FD_UNLIKELY( !FD_VOLATILE_CONST( fd_poh_returned_lock ) ) ) break;
    2012           0 :     FD_SPIN_PAUSE();
    2013           0 :   }
    2014           0 :   FD_COMPILER_MFENCE();
    2015             : 
    2016           0 :   if( FD_UNLIKELY( ctx->reset_slot==ULONG_MAX ) ) FD_LOG_ERR(( "PoH was not initialized by Agave client" ));
    2017             : 
    2018           0 :   fd_histf_join( fd_histf_new( ctx->begin_leader_delay, FD_MHIST_SECONDS_MIN( POH, BEGIN_LEADER_DELAY_SECONDS ),
    2019           0 :                                                         FD_MHIST_SECONDS_MAX( POH, BEGIN_LEADER_DELAY_SECONDS ) ) );
    2020           0 :   fd_histf_join( fd_histf_new( ctx->first_microblock_delay, FD_MHIST_SECONDS_MIN( POH, FIRST_MICROBLOCK_DELAY_SECONDS  ),
    2021           0 :                                                             FD_MHIST_SECONDS_MAX( POH, FIRST_MICROBLOCK_DELAY_SECONDS  ) ) );
    2022           0 :   fd_histf_join( fd_histf_new( ctx->slot_done_delay, FD_MHIST_SECONDS_MIN( POH, SLOT_DONE_DELAY_SECONDS  ),
    2023           0 :                                                      FD_MHIST_SECONDS_MAX( POH, SLOT_DONE_DELAY_SECONDS  ) ) );
    2024             : 
    2025           0 :   for( ulong i=0UL; i<tile->in_cnt; i++ ) {
    2026           0 :     fd_topo_link_t * link = &topo->links[ tile->in_link_id[ i ] ];
    2027           0 :     fd_topo_wksp_t * link_wksp = &topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ];
    2028             : 
    2029           0 :     ctx->in[ i ].mem    = link_wksp->wksp;
    2030           0 :     ctx->in[ i ].chunk0 = fd_dcache_compact_chunk0( ctx->in[ i ].mem, link->dcache );
    2031           0 :     ctx->in[ i ].wmark  = fd_dcache_compact_wmark ( ctx->in[ i ].mem, link->dcache, link->mtu );
    2032             : 
    2033           0 :     if( FD_UNLIKELY( !strcmp( link->name, "stake_out" ) ) ) {
    2034           0 :       ctx->in_kind[ i ] = IN_KIND_STAKE;
    2035           0 :     } else if( FD_UNLIKELY( !strcmp( link->name, "pack_bank" ) ) ) {
    2036           0 :       ctx->in_kind[ i ] = IN_KIND_PACK;
    2037           0 :     } else if( FD_LIKELY( !strcmp( link->name, "bank_poh" ) ) ) {
    2038           0 :       ctx->in_kind[ i ] = IN_KIND_BANK;
    2039           0 :     } else {
    2040           0 :       FD_LOG_ERR(( "unexpected input link name %s", link->name ));
    2041           0 :     }
    2042           0 :   }
    2043             : 
    2044           0 :   *ctx->shred_out = out1( topo, tile, "poh_shred" );
    2045           0 :   *ctx->pack_out  = out1( topo, tile, "poh_pack" );
    2046           0 :   ctx->plugin_out->mem = NULL;
    2047           0 :   if( FD_LIKELY( tile->poh.plugins_enabled ) ) {
    2048           0 :     *ctx->plugin_out = out1( topo, tile, "poh_plugin" );
    2049           0 :   }
    2050             : 
    2051           0 :   ulong scratch_top = FD_SCRATCH_ALLOC_FINI( l, 1UL );
    2052           0 :   if( FD_UNLIKELY( scratch_top > (ulong)scratch + scratch_footprint( tile ) ) )
    2053           0 :     FD_LOG_ERR(( "scratch overflow %lu %lu %lu", scratch_top - (ulong)scratch - scratch_footprint( tile ), scratch_top, (ulong)scratch + scratch_footprint( tile ) ));
    2054           0 : }
    2055             : 
    2056             : /* One tick, one microblock, one plugin slot end, one plugin slot start,
    2057             :    and one leader update. */
    2058           0 : #define STEM_BURST (5UL)
    2059             : 
    2060             : /* See explanation in fd_pack */
    2061           0 : #define STEM_LAZY  (128L*3000L)
    2062             : 
    2063           0 : #define STEM_CALLBACK_CONTEXT_TYPE  fd_poh_ctx_t
    2064           0 : #define STEM_CALLBACK_CONTEXT_ALIGN alignof(fd_poh_ctx_t)
    2065             : 
    2066           0 : #define STEM_CALLBACK_METRICS_WRITE metrics_write
    2067           0 : #define STEM_CALLBACK_AFTER_CREDIT  after_credit
    2068           0 : #define STEM_CALLBACK_BEFORE_FRAG   before_frag
    2069           0 : #define STEM_CALLBACK_DURING_FRAG   during_frag
    2070           0 : #define STEM_CALLBACK_AFTER_FRAG    after_frag
    2071             : 
    2072             : #include "../../../../disco/stem/fd_stem.c"
    2073             : 
    2074             : fd_topo_run_tile_t fd_tile_poh = {
    2075             :   .name                     = "poh",
    2076             :   .populate_allowed_seccomp = NULL,
    2077             :   .populate_allowed_fds     = NULL,
    2078             :   .scratch_align            = scratch_align,
    2079             :   .scratch_footprint        = scratch_footprint,
    2080             :   .privileged_init          = privileged_init,
    2081             :   .unprivileged_init        = unprivileged_init,
    2082             :   .run                      = stem_run,
    2083             : };

Generated by: LCOV version 1.14