LCOV - code coverage report
Current view: top level - disco/shred - fd_stake_ci.h (source / functions) Hit Total Coverage
Test: cov.lcov Lines: 1 3 33.3 %
Date: 2025-08-05 05:04:49 Functions: 0 8 0.0 %

          Line data    Source code
       1             : #ifndef HEADER_fd_src_app_fdctl_run_tiles_fd_stake_ci_h
       2             : #define HEADER_fd_src_app_fdctl_run_tiles_fd_stake_ci_h
       3             : 
       4             : /* fd_stake_ci handles the thorny problem of keeping track of leader
       5             :    schedules and shred destinations, which are epoch specific.  Around
       6             :    epoch boundaries, we may need to query information from the epoch on
       7             :    either side of the boundary.
       8             : 
       9             :    When you make a stake delegation change during epoch N, it becomes
      10             :    active at the start of the first slot of epoch N+1, but it doesn't
      11             :    affect the leader schedule or the shred destinations until epoch N+2.
      12             :    These methods take care all that complexity, so the caller does not
      13             :    need to do any adjustment. */
      14             : 
      15             : #include "fd_shred_dest.h"
      16             : #include "../../flamenco/leaders/fd_leaders.h"
      17             : 
      18      241611 : #define MAX_SHRED_DESTS             MAX_STAKED_LEADERS
      19             : /* staked+unstaked <= MAX_SHRED_DESTS implies
      20             :    MAX_SHRED_DEST_FOOTPRINT>=fd_shred_dest_footprint( staked, unstaked )
      21             :    This is asserted in the tests.  The size of fd_shred_dest_t, varies
      22             :    based on FD_SHA256_BATCH_FOOTPRINT, which depends on the compiler
      23             :    settings. */
      24             : #define MAX_SHRED_DEST_FOOTPRINT (8386688UL + sizeof(fd_shred_dest_t))
      25             : 
      26             : struct fd_per_epoch_info_private {
      27             :   /* Epoch, and [start_slot, start_slot+slot_cnt) refer to the time
      28             :      period for which lsched and sdest are valid. I.e. if you're
      29             :      interested in the leader or computing a shred destination for a
      30             :      slot s, this struct has the right data when s is in [start_slot,
      31             :      start_slot+slot_cnt). */
      32             :   ulong epoch;
      33             :   ulong start_slot;
      34             :   ulong slot_cnt;
      35             :   ulong excluded_stake;
      36             :   ulong vote_keyed_lsched;
      37             : 
      38             :   /* Invariant: These are always joined and use the memory below for
      39             :      their footprint. */
      40             :   fd_epoch_leaders_t * lsched;
      41             :   fd_shred_dest_t    * sdest;
      42             : 
      43             :   uchar __attribute__((aligned(FD_EPOCH_LEADERS_ALIGN))) _lsched[ FD_EPOCH_LEADERS_FOOTPRINT(MAX_SHRED_DESTS, MAX_SLOTS_PER_EPOCH) ];
      44             :   uchar __attribute__((aligned(FD_SHRED_DEST_ALIGN   ))) _sdest [ MAX_SHRED_DEST_FOOTPRINT ];
      45             : };
      46             : typedef struct fd_per_epoch_info_private fd_per_epoch_info_t;
      47             : 
      48             : struct fd_stake_ci {
      49             :   fd_pubkey_t identity_key[ 1 ];
      50             : 
      51             :   /* scratch and stake_weight are only relevant between stake_msg_init
      52             :      and stake_msg_fini.  shred_dest is only relevant between
      53             :      dest_add_init and dest_add_fini. */
      54             :   struct {
      55             :     ulong epoch;
      56             :     ulong start_slot;
      57             :     ulong slot_cnt;
      58             :     ulong staked_cnt;
      59             :     ulong excluded_stake;
      60             :     ulong vote_keyed_lsched;
      61             :   } scratch[1];
      62             : 
      63             :   fd_vote_stake_weight_t   vote_stake_weight[ MAX_SHRED_DESTS ];
      64             :   fd_stake_weight_t        stake_weight   [ MAX_SHRED_DESTS ];
      65             :   fd_shred_dest_weighted_t shred_dest     [ MAX_SHRED_DESTS ];
      66             : 
      67             :   fd_shred_dest_weighted_t shred_dest_temp[ MAX_SHRED_DESTS ];
      68             : 
      69             :   /* The information to be used for epoch i can be found at
      70             :      epoch_info[ i%2 ] if it is known. */
      71             :   fd_per_epoch_info_t epoch_info[ 2 ];
      72             : };
      73             : typedef struct fd_stake_ci fd_stake_ci_t;
      74             : 
      75             : /* fd_stake_ci_{footprint, align} return the footprint and alignment
      76             :    required of a region of memory to be used as an fd_stake_ci_t.
      77             :    fd_stake_ci_t is statically sized, so it can just be declared
      78             :    outright if needed, but it's pretty large (~30 MB!), so you probably
      79             :    don't want it on the stack. */
      80             : 
      81           0 : FD_FN_CONST static inline ulong fd_stake_ci_footprint( void ) { return sizeof (fd_stake_ci_t); }
      82           0 : FD_FN_CONST static inline ulong fd_stake_ci_align    ( void ) { return alignof(fd_stake_ci_t); }
      83             : 
      84             : /* fd_stake_ci_new formats a piece of memory as a valid stake contact
      85             :    information store.  `identity_key` is a pointer to the public key of
      86             :    the identity keypair of the local validator.  This is used by
      87             :    fd_shred_dest to know where in the Turbine tree it belongs.
      88             :    Does NOT retain a read interest in identity_key after the function
      89             :    returns. */
      90             : void          * fd_stake_ci_new ( void * mem, fd_pubkey_t const * identity_key );
      91             : fd_stake_ci_t * fd_stake_ci_join( void * mem );
      92             : 
      93             : void * fd_stake_ci_leave ( fd_stake_ci_t * info );
      94             : void * fd_stake_ci_delete( void          * mem  );
      95             : 
      96             : /* fd_stake_ci_stake_msg_{init, fini} are used to handle messages
      97             :    containing stake weight updates from the Rust side of the splice, and
      98             :    fd_stake_ci_dest_add_{init, fini} are used to handle messages
      99             :    containing contact info (potential shred destinations) updates from
     100             :    the Rust side of the splice.
     101             : 
     102             :    These are very specific to the current splices, but rather than parse
     103             :    the message in the pack and shred tiles, we parse it here.  Since
     104             :    these messages arrive on a dcache and can get overrun, both expose a
     105             :    init/fini model.
     106             : 
     107             :    Upon returning from a call to fd_stake_ci_{stake_msg, dest_add}_init,
     108             :    the stake contact info object will be in a stake-msg-pending or
     109             :    dest-add-pending mode, respectively, regardless of what mode it was
     110             :    in before.  In either of these modes, calls to the query functions
     111             :    (get_*_for slot) are okay and will return the same values they
     112             :    returned prior to the _init call.
     113             : 
     114             :    In order to call fd_stake_ci_{stake_msg, dest_add}_fini, the stake
     115             :    contact info must be in stake-msg-pending / dest-add-pending mode,
     116             :    respectively.  This means, for example, you cannot call
     117             :    fd_stake_ci_stake_msg_init followed by fd_stake_ci_dest_add_fini
     118             :    without an intervening call to fd_stake_ci_dest_add_init.  There's no
     119             :    need to cancel an operation that begun but didn't finish.  Calling
     120             :    init multiple times without calling fini will not leak any resources.
     121             : 
     122             :    msg should be a pointer to the first byte of the dcache entry
     123             :    containing the stakes update. msg will be accessed msg->weights[i]
     124             :    for i in [0, msg->staked_cnt).  msg must contain at least one
     125             :    staked pubkey, and the pubkeys must be sorted in the usual way (by
     126             :    stake descending, ties broken by pubkey ascending).
     127             : 
     128             :    fd_stake_ci_dest_add_init behaves slightly differently and returns a
     129             :    pointer to the first element of an array of size MAX_SHRED_DESTS-1 to
     130             :    be populated.  This allows the caller to add augment the information
     131             :    in the message from Rust with additional information (i.e. mac
     132             :    addresses).  The `cnt` argument to _dest_add_fini specifies the
     133             :    number of elements of the array returned by _init that were
     134             :    populated. 0<=cnt<MAX_SHRED_DESTS.  _fini will only read the first
     135             :    `cnt` elements of the array.  The stake_lamports field of the input
     136             :    is ignored.  The identity pubkey provided at initialization must not
     137             :    be one of the cnt values in the array.  The caller should not retain
     138             :    a read or write interest in the pointer returned by _init after fini
     139             :    has been called, or after the caller has determined that fini will
     140             :    not be called for that update, e.g. because the update was overrun.
     141             :    Calls to _fini may clobber the array.
     142             : 
     143             :    The list used for leader schedules is always just the staked nodes.
     144             :    The list used for shred destinations is the staked nodes along with
     145             :    any unstaked nodes for which we have contact info.  If a stake
     146             :    message doesn't have contact info for a staked node, the previous
     147             :    contact info will be preserved.  If a stake message doesn't have
     148             :    contact info for an unstaked node, on the other hand, that node will
     149             :    be deleted from the list. */
     150             : void                       fd_stake_ci_stake_msg_init( fd_stake_ci_t * info, fd_stake_weight_msg_t const * msg );
     151             : void                       fd_stake_ci_stake_msg_fini( fd_stake_ci_t * info                                    );
     152             : fd_shred_dest_weighted_t * fd_stake_ci_dest_add_init ( fd_stake_ci_t * info                                    );
     153             : void                       fd_stake_ci_dest_add_fini ( fd_stake_ci_t * info, ulong                         cnt );
     154             : 
     155             : 
     156             : /* fd_stake_ci_set_identity changes the identity of the locally running
     157             :    validator at runtime. */
     158             : void fd_stake_ci_set_identity( fd_stake_ci_t *     info,
     159             :                                fd_pubkey_t const * identity_key );
     160             : 
     161             : /* fd_stake_ci_get_{sdest, lsched}_for_slot respectively return a
     162             :    pointer to the fd_shred_dest_t and fd_epoch_leaders_t containing
     163             :    information about the specified slot, if it is available.  These
     164             :    functions are the primary query functions for fd_stake_ci.  They
     165             :    return NULL if we don't have information for that slot.
     166             : 
     167             :    The fact these take a slot perhaps makes it more clear, but, it's
     168             :    worth mentioning again there's nothing like the adjustment performed
     169             :    by Solana's get_leader_schedule_epoch going on here.  If you want to
     170             :    know the leader in slot X, just pass slot X.  The returned leader
     171             :    schedule will not be based on the stake weights active during slot X,
     172             :    but rather the stake weights offset in time by an appropriate amount
     173             :    so they apply to slot X. */
     174             : fd_shred_dest_t *    fd_stake_ci_get_sdest_for_slot ( fd_stake_ci_t const * info, ulong slot );
     175             : fd_epoch_leaders_t * fd_stake_ci_get_lsched_for_slot( fd_stake_ci_t const * info, ulong slot );
     176             : 
     177             : /* compute_id_weights_from_vote_weights() translates vote-based
     178             :    stake weigths into (older) identity-based stake weigths.
     179             : 
     180             :    Before SIMD-0180, the leader schedule was generated starting from
     181             :    a list [(id, stake)] where `id` is the validator identity and
     182             :    `stake` its aggregated stake, and the same list was used to build
     183             :    the Turbine tree.
     184             : 
     185             :    After SIMD-0180, the leader schedule is generated by vote
     186             :    accounts, i.e. starting from a list [(vote, id, stake)] instead.
     187             :    This makes it easier to send rewards to the expected vote account.
     188             :    Notably, turbine tree doesn't change with SIMD-0180, so the old
     189             :    list [(id, stake)] is still necessary.
     190             : 
     191             :    Realistically, there should be a 1:1 relationship between id and
     192             :    vote, but unfortunately the on chain state allows for a 1:N
     193             :    relationship (1 id could be associated to N vote accounts).
     194             :    At the time of writing, testnet has one such example.
     195             :    id: DtSguGSHVrXdqZU1mKWKocsAjrXMhaC7YJic5xxN1Uom
     196             :    votes:
     197             :    - https://solscan.io/account/BbtyLT1ntMFbbXtsJRCZnYjpe7d7TUtyZeGKzod3eNsN?cluster=testnet
     198             :    - https://solscan.io/account/FFr8Gyjy3Wjeqv6oD4RjbwqD1mVfKycAFxQdASYAfR75?cluster=testnet
     199             : 
     200             :    Even when there is a 1:1 relationship, the order of the 2 lists
     201             :    can be different because validators with the same stake could
     202             :    be ordered differently by vote vs id.
     203             : 
     204             :    Last consideration, this operation is done only once per epoch, twice
     205             :    at startup.
     206             : 
     207             :    The current implementation uses sort in place to avoid extra memory
     208             :    for a map or tree. */
     209             : ulong
     210             : compute_id_weights_from_vote_weights( fd_stake_weight_t *            stake_weight,
     211             :                                       fd_vote_stake_weight_t const * vote_stake_weight,
     212             :                                       ulong                          staked_cnt );
     213             : 
     214             : #endif /* HEADER_fd_src_app_fdctl_run_tiles_fd_stake_ci_h */

Generated by: LCOV version 1.14