LCOV - code coverage report
Current view: top level - discof/repair - fd_policy.h (source / functions) Hit Total Coverage
Test: cov.lcov Lines: 0 47 0.0 %
Date: 2026-05-15 07:18:56 Functions: 0 36 0.0 %

          Line data    Source code
       1             : #ifndef HEADER_fd_src_discof_repair_fd_policy_h
       2             : #define HEADER_fd_src_discof_repair_fd_policy_h
       3             : 
       4             : /* fd_policy implements the policy of the Repair agent.  It determines
       5             :    what next repair request the validator should make. It also
       6             :    determines which peer(s) the validator should make the request to.
       7             : 
       8             :    The default policy implementation is to prioritize discovering
       9             :    ancestry for orphaned slots first (making an orphan request), and
      10             :    then making forward progress on the main ancestry tree (making a
      11             :    regular request) when there are no orphan requests to make.
      12             : 
      13             :    Regular shred requests are made round-robin BFS with time-based
      14             :    dedup: round-robin through all the repair peers we know about, and
      15             :    BFS down the repair forest (see fd_forest.h).
      16             : 
      17             :    This policy dedups identical repair requests that occur within a
      18             :    specified amount of time window of each other. */
      19             : 
      20             : #include "../forest/fd_forest.h"
      21             : #include "../../util/net/fd_net_headers.h"
      22             : #include "fd_repair.h"
      23             : #include "../../disco/shred/fd_rnonce_ss.h"
      24             : 
      25             : /* fd_policy_dedup implements a dedup cache for already sent Repair
      26             :    requests.  It is backed by a map and linked list, in which the least
      27             :    recently used (oldest Repair request) in the map is evicted when the
      28             :    map is full. */
      29             : 
      30             : typedef struct fd_policy_dedup fd_policy_dedup_t; /* forward decl */
      31             : 
      32             : /* fd_policy_dedup_ele describes an element in the dedup cache.  The key
      33             :    compactly encodes an fd_repair_req_t.
      34             : 
      35             :    | kind (2 bits)       | slot (32 bits)  | shred_idx (15 bits) |
      36             :    | 0x0 (SHRED)         | slot            | shred_idx           |
      37             :    | 0x1 (HIGHEST_SHRED) | slot            | >=shred_idx         |
      38             :    | 0x2 (ORPHAN)        | orphan slot     | N/A                 |
      39             : 
      40             :    Note the common header (sig, from, to, ts, nonce) is not included. */
      41             : 
      42             : struct fd_policy_dedup_ele {
      43             :   ulong key;      /* compact encoding of fd_repair_req_t detailed above */
      44             :   ulong prev;     /* reserved by lru */
      45             :   ulong next;
      46             :   ulong hash;     /* reserved by pool and map_chain */
      47             :   long  req_ts;   /* timestamp when the request was sent */
      48             : };
      49             : typedef struct fd_policy_dedup_ele fd_policy_dedup_ele_t;
      50             : 
      51             : FD_FN_CONST static inline ulong
      52           0 : fd_policy_dedup_key( uint kind, ulong slot, uint shred_idx ) {
      53           0 :   slot = fd_ulong_extract_lsb( (ulong)slot, 32 );
      54           0 :   return (ulong)kind << 62 | slot << 30 | shred_idx << 15;
      55           0 : }
      56             : 
      57           0 : FD_FN_CONST static inline uint  fd_policy_dedup_key_kind     ( ulong key ) { return (uint)fd_ulong_extract( key, 62, 63 ); }
      58           0 : FD_FN_CONST static inline uint  fd_policy_dedup_key_shred_idx( ulong key ) { return (uint)fd_ulong_extract( key, 15, 29  ); }
      59             : 
      60             : #define POOL_NAME fd_policy_dedup_pool
      61           0 : #define POOL_T    fd_policy_dedup_ele_t
      62           0 : #define POOL_NEXT hash
      63             : #include "../../util/tmpl/fd_pool.c"
      64             : 
      65             : #define MAP_NAME  fd_policy_dedup_map
      66             : #define MAP_ELE_T fd_policy_dedup_ele_t
      67           0 : #define MAP_NEXT  hash
      68             : #include "../../util/tmpl/fd_map_chain.c"
      69             : 
      70             : #define DLIST_NAME   fd_policy_dedup_lru
      71             : #define DLIST_ELE_T  fd_policy_dedup_ele_t
      72           0 : #define DLIST_NEXT   next
      73           0 : #define DLIST_PREV   prev
      74             : #include "../../util/tmpl/fd_dlist.c"
      75             : struct fd_policy_dedup {
      76             :   fd_policy_dedup_map_t * map;  /* map of dedup elements */
      77             :   fd_policy_dedup_ele_t * pool; /* memory pool of dedup elements */
      78             :   fd_policy_dedup_lru_t * lru;  /* singly-linked list of dedup elements by insertion order */
      79             : };
      80             : 
      81             : /* fd_policy_peer_t describes a peer validator that serves repairs.
      82             :    Peers are discovered through gossip, via a "ContactInfo" message that
      83             :    shares the validator's ip and repair server port. */
      84             : 
      85             : struct fd_policy_peer {
      86             :   fd_pubkey_t key;     /* map key, pubkey of the validator */
      87             :   ulong       next;    /* reserved for map_chain, pool */
      88             :   uint        ip4;     /* ip4 addr of the peer */
      89             :   ushort      port;    /* repair server port of the peer */
      90             :   ulong       req_cnt; /* count of requests we've sent to this peer */
      91             :   ulong       res_cnt; /* count of responses we've received from this peer */
      92             : 
      93             :   struct {
      94             :     ulong next;
      95             :     ulong prev;
      96             :   } dlist;
      97             : 
      98             :   /* below are for measuring bandwidth usage */
      99             :   long  first_req_ts;
     100             :   long  last_req_ts;
     101             : 
     102             :   long  first_resp_ts;
     103             :   long  last_resp_ts;
     104             : 
     105             :   long  total_lat; /* total RTT over all responses in ns */
     106             :   ulong stake;
     107             : 
     108             :   uint ping;  /* whether this peer currently has a ping in our sign queue */
     109             : };
     110             : typedef struct fd_policy_peer fd_policy_peer_t;
     111             : 
     112             : #define MAP_NAME                 fd_policy_peer_map
     113             : #define MAP_ELE_T                fd_policy_peer_t
     114             : #define MAP_KEY_T                fd_pubkey_t
     115           0 : #define MAP_KEY_EQ(k0,k1)        (!memcmp( (k0)->uc, (k1)->uc, 32UL ))
     116           0 : #define MAP_KEY_HASH(key,seed)   (seed^fd_ulong_load_8( (key)->uc ))
     117             : #include "../../util/tmpl/fd_map_chain.c"
     118             : 
     119             : #define POOL_NAME fd_policy_peer_pool
     120           0 : #define POOL_T    fd_policy_peer_t
     121             : #include "../../util/tmpl/fd_pool.c"
     122             : 
     123             : #define DLIST_NAME  fd_policy_peer_dlist
     124             : #define DLIST_ELE_T fd_policy_peer_t
     125           0 : #define DLIST_NEXT  dlist.next
     126           0 : #define DLIST_PREV  dlist.prev
     127             : #include "../../util/tmpl/fd_dlist.c"
     128             : 
     129             : /* fd_policy_peers implements the data structures and bookkeeping for
     130             :    selecting repair peers via round-robin. */
     131             : 
     132             : struct fd_policy_peers {
     133             :   fd_policy_peer_t * pool;        /* memory pool of peers */
     134             :   fd_policy_peer_dlist_t * fast;  /* [0, FD_POLICY_LATENCY_THRESH]   ms latency group FD_POLICY_LATENCY_FAST */
     135             :   fd_policy_peer_dlist_t * slow;  /* (FD_POLICY_LATENCY_THRESH, inf) ms latency group FD_POLICY_LATENCY_SLOW */
     136             :   fd_policy_peer_map_t * map;     /* map keyed by pubkey to peer data */
     137             :   struct {
     138             :      uint stage;                         /* < sizeof(bucket_stages)        */
     139             :      fd_policy_peer_dlist_iter_t iter;   /* round-robin index of next peer */
     140             :   } select;
     141             : };
     142             : typedef struct fd_policy_peers fd_policy_peers_t;
     143             : 
     144           0 : #define FD_POLICY_LATENCY_FAST 1
     145             : #define FD_POLICY_LATENCY_SLOW 3
     146             : 
     147             : /* Policy parameters start */
     148           0 : #define FD_POLICY_LATENCY_THRESH 80e6L /* less than this is a BEST peer, otherwise a WORST peer */
     149             : #define FD_POLICY_DEDUP_TIMEOUT  80e6L /* how long wait to request the same shred */
     150             : 
     151             : /* Round robins through ALL the worst peers once, then round robins
     152             :    through ALL the best peers once, then round robins through ALL the
     153             :    best peers again, etc. All peers are initially added to the worst
     154             :    bucket, and moved once round trip times have been recorded. */
     155             : 
     156             : static const uint bucket_stages[7] = {
     157             :    FD_POLICY_LATENCY_SLOW, /* do a cycle through worst peers 1/7 times to see if any improvements are made */
     158             :    FD_POLICY_LATENCY_FAST,
     159             :    FD_POLICY_LATENCY_FAST,
     160             :    FD_POLICY_LATENCY_FAST,
     161             :    FD_POLICY_LATENCY_FAST,
     162             :    FD_POLICY_LATENCY_FAST,
     163             :    FD_POLICY_LATENCY_FAST,
     164             : };
     165             : /* Policy parameters end */
     166             : 
     167             : struct fd_policy {
     168             :   fd_policy_dedup_t dedup; /* dedup cache of already sent requests */
     169             :   fd_policy_peers_t peers; /* repair peers (strategy & data) */
     170             :   long              tsmax; /* maximum time for an iteration before resetting the DFS to root */
     171             :   long              tsref; /* reference timestamp for resetting DFS */
     172             : 
     173             :   fd_rnonce_ss_t    rnonce_ss[1];
     174             : 
     175             :   ulong turbine_slot0;
     176             : };
     177             : typedef struct fd_policy fd_policy_t;
     178             : 
     179             : /* Constructors */
     180             : 
     181             : /* fd_policy_{align,footprint} return the required alignment and
     182             :    footprint of a memory region suitable for use as policy with up to
     183             :    ele_max eles and vote_max votes. */
     184             : 
     185             : FD_FN_CONST static inline ulong
     186           0 : fd_policy_align( void ) {
     187           0 :   return 128UL;
     188           0 : }
     189             : 
     190             : FD_FN_CONST static inline ulong
     191           0 : fd_policy_footprint( ulong dedup_max, ulong peer_max ) {
     192           0 :   ulong peer_chain_cnt = fd_policy_peer_map_chain_cnt_est( peer_max );
     193           0 :   return FD_LAYOUT_FINI(
     194           0 :     FD_LAYOUT_APPEND(
     195           0 :     FD_LAYOUT_APPEND(
     196           0 :     FD_LAYOUT_APPEND(
     197           0 :     FD_LAYOUT_APPEND(
     198           0 :     FD_LAYOUT_APPEND(
     199           0 :     FD_LAYOUT_APPEND(
     200           0 :     FD_LAYOUT_APPEND(
     201           0 :     FD_LAYOUT_APPEND(
     202           0 :     FD_LAYOUT_INIT,
     203           0 :       fd_policy_align(),            sizeof(fd_policy_t)                             ),
     204           0 :       fd_policy_dedup_map_align(),  fd_policy_dedup_map_footprint ( dedup_max )     ),
     205           0 :       fd_policy_dedup_pool_align(), fd_policy_dedup_pool_footprint( dedup_max )     ),
     206           0 :       fd_policy_dedup_lru_align(),  fd_policy_dedup_lru_footprint()                 ),
     207           0 :       fd_policy_peer_map_align(),   fd_policy_peer_map_footprint ( peer_chain_cnt ) ),
     208           0 :       fd_policy_peer_pool_align(),  fd_policy_peer_pool_footprint( peer_max )       ),
     209           0 :       fd_policy_peer_dlist_align(), fd_policy_peer_dlist_footprint()                ),
     210           0 :       fd_policy_peer_dlist_align(), fd_policy_peer_dlist_footprint()                ),
     211           0 :     fd_policy_align() );
     212           0 : }
     213             : 
     214             : /* fd_policy_new formats an unused memory region for use as a policy.
     215             :    mem is a non-NULL pointer to this region in the local address space
     216             :    with the required footprint and alignment.  rnonce_ss is copied
     217             :    locally, so the read interest is not retained after this function
     218             :    returns. */
     219             : 
     220             : void *
     221             : fd_policy_new( void * shmem, ulong dedup_max, ulong peer_max, ulong seed, fd_rnonce_ss_t const * rnonce_ss );
     222             : 
     223             : /* fd_policy_join joins the caller to the policy.  policy points to the
     224             :    first byte of the memory region backing the policy in the caller's
     225             :    address space.  Returns a pointer in the local address space to
     226             :    policy on success. */
     227             : 
     228             : fd_policy_t *
     229             : fd_policy_join( void * policy );
     230             : 
     231             : /* fd_policy_leave leaves a current local join.  Returns a pointer to
     232             :    the underlying shared memory region on success and NULL on failure
     233             :    (logs details).  Reasons for failure include policy is NULL. */
     234             : 
     235             : void *
     236             : fd_policy_leave( fd_policy_t const * policy );
     237             : 
     238             : /* fd_policy_delete unformats a memory region used as a policy.  Assumes
     239             :    only the nobody is joined to the region.  Returns a pointer to the
     240             :    underlying shared memory region or NULL if used obviously in error
     241             :    (e.g. policy is obviously not a policy ... logs details).  The
     242             :    ownership of the memory region is transferred to the caller. */
     243             : 
     244             : void *
     245             : fd_policy_delete( void * policy );
     246             : 
     247             : /* fd_policy_next returns the next repair request that should be made.
     248             :    Currently implements the default round-robin DFS strategy. */
     249             : 
     250             : fd_repair_msg_t const *
     251             : fd_policy_next( fd_policy_t * policy, fd_forest_t * forest, fd_repair_t * repair, long now, ulong highest_known_slot, int * charge_busy );
     252             : 
     253             : /* fd_policy_peer_upsert upserts a peer into the policy.  If the peer
     254             :    does not exist, it is created.  If the peer already exists, it is
     255             :    updated.  Returns a pointer to the peer if a new peer was created,
     256             :    otherwise NULL (including on updates). */
     257             : fd_policy_peer_t const *
     258             : fd_policy_peer_upsert( fd_policy_t * policy, fd_pubkey_t const * key, fd_ip4_port_t const * addr );
     259             : 
     260             : fd_policy_peer_t *
     261             : fd_policy_peer_query( fd_policy_t * policy, fd_pubkey_t const * key );
     262             : 
     263             : int
     264             : fd_policy_peer_remove( fd_policy_t * policy, fd_pubkey_t const * key );
     265             : 
     266             : fd_pubkey_t const *
     267             : fd_policy_peer_select( fd_policy_t * policy );
     268             : 
     269             : void
     270             : fd_policy_peer_request_update( fd_policy_t * policy, fd_pubkey_t const * to );
     271             : 
     272             : static inline fd_policy_peer_dlist_t *
     273           0 : fd_policy_peer_latency_bucket( fd_policy_t * policy, long total_rtt /* ns */, ulong res_cnt ) {
     274           0 :    if( res_cnt == 0 || (long)(total_rtt / (long)res_cnt) > FD_POLICY_LATENCY_THRESH ) return policy->peers.slow;
     275           0 :    return policy->peers.fast;
     276           0 : }
     277             : 
     278             : void
     279             : fd_policy_peer_response_update( fd_policy_t * policy, fd_pubkey_t const * to, long rtt );
     280             : 
     281             : void
     282             : fd_policy_set_turbine_slot0( fd_policy_t * policy, ulong slot );
     283             : 
     284             : #endif /* HEADER_fd_src_choreo_policy_fd_policy_h */

Generated by: LCOV version 1.14