LCOV - code coverage report
Current view: top level - flamenco/progcache - fd_progcache_user.c (source / functions) Hit Total Coverage
Test: cov.lcov Lines: 339 443 76.5 %
Date: 2025-12-28 05:17:03 Functions: 16 16 100.0 %

          Line data    Source code
       1             : #include "fd_prog_load.h"
       2             : #include "fd_progcache_user.h"
       3             : #include "fd_progcache_rec.h"
       4             : #include "../../util/racesan/fd_racesan_target.h"
       5             : 
       6             : FD_TL fd_progcache_metrics_t fd_progcache_metrics_default;
       7             : 
       8             : fd_progcache_t *
       9             : fd_progcache_join( fd_progcache_t * ljoin,
      10             :                    void *           shfunk,
      11             :                    uchar *          scratch,
      12          45 :                    ulong            scratch_sz ) {
      13          45 :   if( FD_UNLIKELY( !ljoin ) ) {
      14           0 :     FD_LOG_WARNING(( "NULL ljoin" ));
      15           0 :     return NULL;
      16           0 :   }
      17          45 :   if( FD_UNLIKELY( !shfunk ) ) {
      18           0 :     FD_LOG_WARNING(( "NULL shfunk" ));
      19           0 :     return NULL;
      20           0 :   }
      21          45 :   if( FD_LIKELY( scratch_sz ) ) {
      22          45 :     if( FD_UNLIKELY( !scratch ) ) {
      23           0 :       FD_LOG_WARNING(( "NULL scratch" ));
      24           0 :       return NULL;
      25           0 :     }
      26          45 :     if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)scratch, FD_PROGCACHE_SCRATCH_ALIGN ) ) ) {
      27           0 :       FD_LOG_WARNING(( "misaligned scratch" ));
      28           0 :       return NULL;
      29           0 :     }
      30          45 :   }
      31          45 :   memset( ljoin, 0, sizeof(fd_progcache_t) );
      32          45 :   if( FD_UNLIKELY( !fd_funk_join( ljoin->funk, shfunk ) ) ) return NULL;
      33             : 
      34          45 :   ljoin->metrics    = &fd_progcache_metrics_default;
      35          45 :   ljoin->scratch    = scratch;
      36          45 :   ljoin->scratch_sz = scratch_sz;
      37             : 
      38          45 :   return ljoin;
      39          45 : }
      40             : 
      41             : void *
      42             : fd_progcache_leave( fd_progcache_t * cache,
      43          42 :                     void **          opt_shfunk ) {
      44          42 :   if( FD_UNLIKELY( !cache ) ) {
      45           0 :     FD_LOG_WARNING(( "NULL cache" ));
      46           0 :     return NULL;
      47           0 :   }
      48          42 :   if( FD_UNLIKELY( !fd_funk_leave( cache->funk, opt_shfunk ) ) ) return NULL;
      49          42 :   cache->scratch    = NULL;
      50          42 :   cache->scratch_sz = 0UL;
      51          42 :   return cache;
      52          42 : }
      53             : 
      54             : /* fd_progcache_load_fork pivots the progcache object to the selected
      55             :    fork (identified by tip XID).
      56             : 
      57             :    Populates cache->fork, which is a array-backed list of XIDs sorted
      58             :    newest to oldest.  Cache lookups only respect records with an XID
      59             :    present in that list.
      60             : 
      61             :    For any given xid, the epoch_slot0 is assumed to stay constant. */
      62             : 
      63             : static void
      64             : fd_progcache_load_fork_slow( fd_progcache_t *          cache,
      65             :                              fd_funk_txn_xid_t const * xid,
      66         105 :                              ulong                     epoch_slot0 ) {
      67         105 :   fd_funk_txn_xid_t next_xid = *xid;
      68         105 :   if( FD_UNLIKELY( next_xid.ul[0]<epoch_slot0 ) ) {
      69           0 :     FD_LOG_CRIT(( "fd_progcache_load_fork: attempted to load xid=%lu:%lu, which predates first slot of bank's epoch (epoch_slot0=%lu)",
      70           0 :                   next_xid.ul[0], next_xid.ul[1], epoch_slot0 ));
      71           0 :   }
      72             : 
      73             :   /* Walk transaction graph, recovering from overruns on-the-fly */
      74         105 :   cache->fork_depth = 0UL;
      75             : 
      76         105 :   ulong txn_max = fd_funk_txn_pool_ele_max( cache->funk->txn_pool );
      77         105 :   ulong i;
      78         159 :   for( i=0UL; i<FD_PROGCACHE_DEPTH_MAX; i++ ) {
      79         159 :     fd_funk_txn_map_query_t query[1];
      80         159 :     fd_funk_txn_t const *   candidate;
      81         159 :     fd_funk_txn_xid_t       found_xid;
      82         159 :     ulong                   parent_idx;
      83         159 :     fd_funk_txn_xid_t       parent_xid;
      84         159 : retry:
      85             :     /* Speculatively look up transaction from map */
      86         159 :     for(;;) {
      87         159 :       int query_err = fd_funk_txn_map_query_try( cache->funk->txn_map, &next_xid, NULL, query, 0 );
      88         159 :       if( FD_UNLIKELY( query_err==FD_MAP_ERR_AGAIN ) ) {
      89             :         /* FIXME random backoff */
      90           0 :         FD_SPIN_PAUSE();
      91           0 :         continue;
      92           0 :       }
      93         159 :       if( query_err==FD_MAP_ERR_KEY ) goto done;
      94         144 :       if( FD_UNLIKELY( query_err!=FD_MAP_SUCCESS ) ) {
      95           0 :         FD_LOG_CRIT(( "fd_funk_txn_map_query_try failed: %i-%s", query_err, fd_map_strerror( query_err ) ));
      96           0 :       }
      97         144 :       break;
      98         144 :     }
      99             : 
     100             :     /* Lookup parent transaction while recovering from overruns
     101             :        FIXME This would be a lot easier if transactions specified
     102             :              parent by XID instead of by pointer ... */
     103         144 :     candidate = fd_funk_txn_map_query_ele_const( query );
     104         144 :     FD_COMPILER_MFENCE();
     105         144 :     do {
     106         144 :       found_xid  = FD_VOLATILE_CONST( candidate->xid );
     107         144 :       parent_idx = fd_funk_txn_idx( FD_VOLATILE_CONST( candidate->parent_cidx ) );
     108         144 :       if( parent_idx<txn_max ) {
     109          54 :         FD_COMPILER_MFENCE();
     110          54 :         fd_funk_txn_t const * parent = &cache->funk->txn_pool->ele[ parent_idx ];
     111          54 :         parent_xid = FD_VOLATILE_CONST( parent->xid );
     112          54 :         FD_COMPILER_MFENCE();
     113          54 :       }
     114         144 :       parent_idx = fd_funk_txn_idx( FD_VOLATILE_CONST( candidate->parent_cidx ) );
     115         144 :     } while(0);
     116         144 :     FD_COMPILER_MFENCE();
     117             : 
     118             :     /* Verify speculative loads by ensuring txn still exists in map */
     119         144 :     if( FD_UNLIKELY( fd_funk_txn_map_query_test( query )!=FD_MAP_SUCCESS ) ) {
     120           0 :       FD_SPIN_PAUSE();
     121           0 :       goto retry;
     122           0 :     }
     123             : 
     124         144 :     if( FD_UNLIKELY( !fd_funk_txn_xid_eq( &found_xid, &next_xid ) ) ) {
     125           0 :       FD_LOG_CRIT(( "fd_progcache_load_fork_slow detected memory corruption: expected xid %lu:%lu at %p, found %lu:%lu",
     126           0 :                     next_xid.ul[0], next_xid.ul[1],
     127           0 :                     (void *)candidate,
     128           0 :                     found_xid.ul[0], found_xid.ul[1] ));
     129           0 :     }
     130             : 
     131         144 :     cache->fork[ i ] = next_xid;
     132         144 :     if( fd_funk_txn_idx_is_null( parent_idx ) ||
     133         144 :         next_xid.ul[0]<epoch_slot0 ) {
     134             :       /* Reached root or fork graph node is from previous epoch */
     135          90 :       i++;
     136          90 :       break;
     137          90 :     }
     138          54 :     next_xid = parent_xid;
     139          54 :   }
     140             : 
     141         105 : done:
     142         105 :   cache->fork_depth = i;
     143             : 
     144             :   /* Only include published/rooted records if they include at least one
     145             :      cache entry from the current epoch. */
     146         105 :   if( fd_funk_last_publish( cache->funk )->ul[0] >= epoch_slot0 &&
     147         105 :       cache->fork_depth < FD_PROGCACHE_DEPTH_MAX ) {
     148         105 :     fd_funk_txn_xid_set_root( &cache->fork[ cache->fork_depth++ ] );
     149         105 :   }
     150         105 : }
     151             : 
     152             : static inline void
     153             : fd_progcache_load_fork( fd_progcache_t *          cache,
     154             :                         fd_funk_txn_xid_t const * xid,
     155         207 :                         ulong                     epoch_slot0 ) {
     156             :   /* Skip if already on the correct fork */
     157         207 :   if( FD_LIKELY( (!!cache->fork_depth) & (!!fd_funk_txn_xid_eq( &cache->fork[ 0 ], xid ) ) ) ) return;
     158         105 :   cache->metrics->fork_switch_cnt++;
     159         105 :   fd_progcache_load_fork_slow( cache, xid, epoch_slot0 ); /* switch fork */
     160         105 : }
     161             : 
     162             : /* fd_progcache_query searches for a program cache entry on the current
     163             :    fork.  Stops short of an epoch boundary. */
     164             : 
     165             : static int
     166             : fd_progcache_fork_has_xid( fd_progcache_t const *    cache,
     167         132 :                            fd_funk_txn_xid_t const * rec_xid ) {
     168             :   /* FIXME unroll this a little */
     169         132 :   ulong const fork_depth = cache->fork_depth;
     170         234 :   for( ulong i=0UL; i<fork_depth; i++ ) {
     171         198 :     if( fd_funk_txn_xid_eq( &cache->fork[i], rec_xid ) ) return 1;
     172         198 :   }
     173          36 :   return 0;
     174         132 : }
     175             : 
     176             : static int
     177             : fd_progcache_search_chain( fd_progcache_t const *    cache,
     178             :                            ulong                     chain_idx,
     179             :                            fd_funk_rec_key_t const * key,
     180             :                            ulong                     epoch_slot0,
     181         150 :                            fd_funk_rec_t **          out_rec ) {
     182         150 :   *out_rec = NULL;
     183             : 
     184         150 :   fd_funk_rec_map_shmem_t *                     shmap     = cache->funk->rec_map->map;
     185         150 :   fd_funk_rec_map_shmem_private_chain_t const * chain_tbl = fd_funk_rec_map_shmem_private_chain_const( shmap, 0UL );
     186         150 :   fd_funk_rec_map_shmem_private_chain_t const * chain     = chain_tbl + chain_idx;
     187         150 :   fd_funk_rec_t *                               rec_tbl   = cache->funk->rec_pool->ele;
     188         150 :   ulong                                         rec_max   = fd_funk_rec_pool_ele_max( cache->funk->rec_pool );
     189         150 :   ulong                                         ver_cnt   = FD_VOLATILE_CONST( chain->ver_cnt );
     190         150 :   ulong                                         root_slot = fd_funk_last_publish( cache->funk )->ul[0];
     191             : 
     192             :   /* Start a speculative transaction for the chain containing revisions
     193             :      of the program cache key we are looking for. */
     194         150 :   ulong cnt = fd_funk_rec_map_private_vcnt_cnt( ver_cnt );
     195         150 :   if( FD_UNLIKELY( fd_funk_rec_map_private_vcnt_ver( ver_cnt )&1 ) ) {
     196           0 :     return FD_MAP_ERR_AGAIN; /* chain is locked */
     197           0 :   }
     198         150 :   FD_COMPILER_MFENCE();
     199         150 :   uint ele_idx = chain->head_cidx;
     200             : 
     201             :   /* Walk the map chain, remember the best entry */
     202         150 :   fd_funk_rec_t * best      = NULL;
     203         150 :   long            best_slot = -1L;
     204         330 :   for( ulong i=0UL; i<cnt; i++, ele_idx=rec_tbl[ ele_idx ].map_next ) {
     205         180 :     fd_funk_rec_t * rec = &rec_tbl[ ele_idx ];
     206             : 
     207             :     /* Skip over unrelated records (hash collision) */
     208         180 :     if( FD_UNLIKELY( !fd_funk_rec_key_eq( rec->pair.key, key ) ) ) continue;
     209             : 
     210             :     /* Skip over records from an older epoch (FIXME could bail early
     211             :        here if the chain is ordered) */
     212         180 :     ulong found_slot = rec->pair.xid->ul[0];
     213         180 :     if( found_slot==ULONG_MAX ) found_slot = root_slot;
     214             : 
     215         180 :     if( FD_UNLIKELY( found_slot<epoch_slot0 ) ) continue;
     216             : 
     217             :     /* Skip over records that are older than what we already have */
     218         171 :     if( FD_UNLIKELY( (long)found_slot<best_slot ) ) continue;
     219             : 
     220             :     /* Confirm that record is part of the current fork
     221             :        FIXME this has bad performance / pointer-chasing */
     222         132 :     if( FD_UNLIKELY( !fd_progcache_fork_has_xid( cache, rec->pair.xid ) ) ) continue;
     223             : 
     224          96 :     best      = rec;
     225          96 :     best_slot = (long)found_slot;
     226          96 :     if( FD_UNLIKELY( rec->map_next==ele_idx ) ) {
     227           0 :       FD_LOG_CRIT(( "fd_progcache_search_chain detected cycle" ));
     228           0 :     }
     229          96 :     if( rec->map_next > rec_max ) {
     230          57 :       if( FD_UNLIKELY( !fd_funk_rec_map_private_idx_is_null( rec->map_next ) ) ) {
     231           0 :         FD_LOG_CRIT(( "fd_progcache_search_chain detected memory corruption: rec->map_next %u is out of bounds (rec_max %lu)",
     232           0 :                       rec->map_next, rec_max ));
     233           0 :       }
     234          57 :     }
     235          96 :   }
     236             : 
     237             :   /* Retry if we were overrun */
     238         150 :   if( FD_UNLIKELY( FD_VOLATILE_CONST( chain->ver_cnt )!=ver_cnt ) ) {
     239           0 :     return FD_MAP_ERR_AGAIN;
     240           0 :   }
     241             : 
     242         150 :   *out_rec = best;
     243         150 :   return FD_MAP_SUCCESS;
     244         150 : }
     245             : 
     246             : static fd_funk_rec_t *
     247             : fd_progcache_query( fd_progcache_t *          cache,
     248             :                     fd_funk_txn_xid_t const * xid,
     249             :                     fd_funk_rec_key_t const * key,
     250         150 :                     ulong                     epoch_slot0 ) {
     251             :   /* Hash key to chain */
     252         150 :   fd_funk_xid_key_pair_t pair[1];
     253         150 :   fd_funk_txn_xid_copy( pair->xid, xid );
     254         150 :   fd_funk_rec_key_copy( pair->key, key );
     255         150 :   fd_funk_rec_map_t const * rec_map = cache->funk->rec_map;
     256         150 :   ulong hash      = fd_funk_rec_map_key_hash( pair, rec_map->map->seed );
     257         150 :   ulong chain_idx = (hash & (rec_map->map->chain_cnt-1UL) );
     258             : 
     259             :   /* Traverse chain for candidate */
     260         150 :   fd_funk_rec_t * rec = NULL;
     261         150 :   for(;;) {
     262         150 :     int err = fd_progcache_search_chain( cache, chain_idx, key, epoch_slot0, &rec );
     263         150 :     if( FD_LIKELY( err==FD_MAP_SUCCESS ) ) break;
     264           0 :     FD_SPIN_PAUSE();
     265           0 :     fd_racesan_hook( "fd_progcache_query_wait" );
     266             :     /* FIXME backoff */
     267           0 :   }
     268             : 
     269         150 :   return rec;
     270         150 : }
     271             : 
     272             : fd_progcache_rec_t const *
     273             : fd_progcache_peek( fd_progcache_t *          cache,
     274             :                    fd_funk_txn_xid_t const * xid,
     275             :                    void const *              prog_addr,
     276         150 :                    ulong                     epoch_slot0 ) {
     277         150 :   if( FD_UNLIKELY( !cache || !cache->funk->shmem ) ) FD_LOG_CRIT(( "NULL progcache" ));
     278         150 :   fd_progcache_load_fork( cache, xid, epoch_slot0 );
     279         150 :   fd_funk_rec_key_t key[1]; memcpy( key->uc, prog_addr, 32UL );
     280         150 :   fd_funk_rec_t const * rec = fd_progcache_query( cache, xid, key, epoch_slot0 );
     281         150 :   if( FD_UNLIKELY( !rec ) ) return NULL;
     282             : 
     283          93 :   fd_progcache_rec_t const * entry = fd_funk_val_const( rec, fd_funk_wksp( cache->funk ) );
     284          93 :   if( entry->slot < epoch_slot0 ) entry = NULL;
     285             : 
     286          93 :   cache->metrics->hit_cnt += !!entry;
     287             : 
     288          93 :   return entry;
     289         150 : }
     290             : 
     291             : static void
     292             : fd_funk_rec_push_tail( fd_funk_rec_t * rec_pool,
     293             :                        fd_funk_rec_t * rec,
     294             :                        uint *          rec_head_idx,
     295          66 :                        uint *          rec_tail_idx ) {
     296          66 :   uint rec_idx = (uint)( rec - rec_pool );
     297          66 :   for(;;) {
     298             : 
     299             :     /* Doubly linked list append.  Robust in the event of concurrent
     300             :        publishes.  Iteration during publish not supported.  Sequence:
     301             :        - Identify tail element
     302             :        - Set new element's prev and next pointers
     303             :        - Set tail element's next pointer
     304             :        - Set tail pointer */
     305             : 
     306          66 :     uint rec_prev_idx = FD_VOLATILE_CONST( *rec_tail_idx );
     307          66 :     rec->prev_idx = rec_prev_idx;
     308          66 :     rec->next_idx = FD_FUNK_REC_IDX_NULL;
     309          66 :     FD_COMPILER_MFENCE();
     310             : 
     311          66 :     uint * next_idx_p;
     312          66 :     if( fd_funk_rec_idx_is_null( rec_prev_idx ) ) {
     313          66 :       next_idx_p = rec_head_idx;
     314          66 :     } else {
     315           0 :       next_idx_p = &rec_pool[ rec_prev_idx ].next_idx;
     316           0 :     }
     317             : 
     318          66 :     fd_racesan_hook( "fd_progcache_rec_push_tail_start" );
     319          66 :     if( FD_UNLIKELY( !__sync_bool_compare_and_swap( next_idx_p, FD_FUNK_REC_IDX_NULL, rec_idx ) ) ) {
     320             :       /* Another thread beat us to the punch */
     321           0 :       FD_SPIN_PAUSE();
     322           0 :       continue;
     323           0 :     }
     324             : 
     325          66 :     if( FD_UNLIKELY( !__sync_bool_compare_and_swap( rec_tail_idx, rec_prev_idx, rec_idx ) ) ) {
     326             :       /* This CAS is guaranteed to succeed if the previous CAS passed. */
     327           0 :       FD_LOG_CRIT(( "Irrecoverable data race encountered while appending to txn rec list (invariant violation?): cas(%p,%u,%u)",
     328           0 :                     (void *)rec_tail_idx, rec_prev_idx, rec_idx ));
     329           0 :     }
     330             : 
     331          66 :     break;
     332          66 :   }
     333          66 : }
     334             : 
     335             : __attribute__((warn_unused_result))
     336             : static int
     337             : fd_progcache_push( fd_progcache_t * cache,
     338             :                    fd_funk_txn_t *  txn,
     339             :                    fd_funk_rec_t *  rec,
     340             :                    void const *     prog_addr,
     341          78 :                    fd_funk_rec_t ** dup_rec ) {
     342          78 :   fd_funk_t * funk = cache->funk;
     343          78 :   *dup_rec = NULL;
     344             : 
     345             :   /* Phase 1: Determine record's xid-key pair */
     346             : 
     347          78 :   rec->tag = 0;
     348          78 :   rec->prev_idx = FD_FUNK_REC_IDX_NULL;
     349          78 :   rec->next_idx = FD_FUNK_REC_IDX_NULL;
     350          78 :   memcpy( rec->pair.key, prog_addr, 32UL );
     351          78 :   if( FD_UNLIKELY( txn ) ) {
     352          69 :     fd_funk_txn_xid_copy( rec->pair.xid, &txn->xid );
     353          69 :   } else {
     354           9 :     fd_funk_txn_xid_set_root( rec->pair.xid );
     355           9 :   }
     356             : 
     357             :   /* Phase 2: Lock rec_map chain, entering critical section */
     358             : 
     359          78 :   struct {
     360          78 :     fd_funk_rec_map_txn_t txn[1];
     361          78 :     fd_funk_rec_map_txn_private_info_t info[1];
     362          78 :   } _map_txn;
     363          78 :   fd_funk_rec_map_txn_t * map_txn = fd_funk_rec_map_txn_init( _map_txn.txn, funk->rec_map, 1UL );
     364          78 :   fd_funk_rec_map_txn_add( map_txn, &rec->pair, 1 );
     365          78 :   int txn_err = fd_funk_rec_map_txn_try( map_txn, FD_MAP_FLAG_BLOCKING );
     366          78 :   if( FD_UNLIKELY( txn_err!=FD_MAP_SUCCESS ) ) {
     367           0 :     FD_LOG_CRIT(( "Failed to insert progcache record: canont lock funk rec map chain: %i-%s", txn_err, fd_map_strerror( txn_err ) ));
     368           0 :   }
     369             : 
     370             :   /* Phase 3: Check if record exists */
     371             : 
     372          78 :   fd_funk_rec_map_query_t query[1];
     373          78 :   int query_err = fd_funk_rec_map_txn_query( funk->rec_map, &rec->pair, NULL, query, 0 );
     374          78 :   if( FD_UNLIKELY( query_err==FD_MAP_SUCCESS ) ) {
     375           3 :     fd_funk_rec_map_txn_test( map_txn );
     376           3 :     fd_funk_rec_map_txn_fini( map_txn );
     377           3 :     *dup_rec = query->ele;
     378           3 :     return 0; /* another thread was faster */
     379          75 :   } else if( FD_UNLIKELY( query_err!=FD_MAP_ERR_KEY ) ) {
     380           0 :     FD_LOG_CRIT(( "fd_funk_rec_map_txn_query failed: %i-%s", query_err, fd_map_strerror( query_err ) ));
     381           0 :   }
     382             : 
     383             :   /* Phase 4: Insert new record */
     384             : 
     385          75 :   int insert_err = fd_funk_rec_map_txn_insert( funk->rec_map, rec );
     386          75 :   if( FD_UNLIKELY( insert_err!=FD_MAP_SUCCESS ) ) {
     387           0 :     FD_LOG_CRIT(( "fd_funk_rec_map_txn_insert failed: %i-%s", insert_err, fd_map_strerror( insert_err ) ));
     388           0 :   }
     389             : 
     390             :   /* At this point, another thread could aggressively evict this entry.
     391             :      But this entry is not yet present in rec_map!  This is why we hold
     392             :      a lock on the rec_map chain -- the rec_map_remove executed by the
     393             :      eviction will be sequenced after completion of phase 5. */
     394             : 
     395             :   /* Phase 5: Insert rec into rec_map */
     396             : 
     397          75 :   if( txn ) {
     398          66 :     fd_funk_rec_push_tail( funk->rec_pool->ele,
     399          66 :                            rec,
     400          66 :                            &txn->rec_head_idx,
     401          66 :                            &txn->rec_tail_idx );
     402          66 :   }
     403             : 
     404             :   /* Phase 6: Finish rec_map transaction */
     405             : 
     406          75 :   int test_err = fd_funk_rec_map_txn_test( map_txn );
     407          75 :   if( FD_UNLIKELY( test_err!=FD_MAP_SUCCESS ) ) FD_LOG_CRIT(( "fd_funk_rec_map_txn_test failed: %i-%s", test_err, fd_map_strerror( test_err ) ));
     408          75 :   fd_funk_rec_map_txn_fini( map_txn );
     409          75 :   return 1;
     410          75 : }
     411             : 
     412             : #if FD_HAS_ATOMIC
     413             : 
     414             : static int
     415          69 : fd_progcache_txn_try_lock( fd_funk_txn_t * txn ) {
     416          69 :   for(;;) {
     417          69 :     ushort * lock  = &txn->lock->value;
     418          69 :     ushort   value = FD_VOLATILE_CONST( *lock );
     419          69 :     if( FD_UNLIKELY( value>=0xFFFE ) ) return 0; /* txn is write-locked */
     420          69 :     if( FD_LIKELY( FD_ATOMIC_CAS( lock, value, value+1 )==value ) ) {
     421          69 :       return 1; /* transaction now read-locked */
     422          69 :     }
     423          69 :   }
     424          69 : }
     425             : 
     426             : #else
     427             : 
     428             : static int
     429             : fd_progcache_txn_try_lock( fd_funk_txn_t * txn ) {
     430             :   ushort * lock  = &txn->lock->value;
     431             :   ushort   value = FD_VOLATILE_CONST( *lock );
     432             :   if( FD_UNLIKELY( value>=0xFFFE ) ) return 0; /* txn is write-locked */
     433             :   *lock = value + 1;
     434             :   return 1; /* transaction now read-locked */
     435             : }
     436             : 
     437             : #endif
     438             : 
     439             : static void
     440          78 : fd_progcache_txn_unlock( fd_funk_txn_t * txn ) {
     441          78 :   if( !txn ) return;
     442          69 :   fd_rwlock_unread( txn->lock );
     443          69 : }
     444             : 
     445             : /* fd_progcache_lock_best_txn picks a fork graph node close to
     446             :    target_slot and write locks it for program cache entry insertion.
     447             : 
     448             :    The cache entry should be placed as far up the fork graph as
     449             :    possible (so it can be shared across more downstream forks), but not
     450             :    too early (or it would cause non-determinism).
     451             : 
     452             :    Influenced by a number of things:
     453             : 
     454             :    - Program modification time (cannot predate the program modification)
     455             :    - Epoch boundaries (cannot span across epochs)
     456             :    - Transaction publishing (cannot create a cache entry at a txn that
     457             :      is in the process of being published) */
     458             : 
     459             : static fd_funk_txn_t *
     460             : fd_progcache_lock_best_txn( fd_progcache_t * cache,
     461          45 :                             ulong            target_slot ) {
     462             : 
     463          45 :   fd_funk_txn_xid_t last_publish[1];
     464          45 :   fd_funk_txn_xid_ld_atomic( last_publish, fd_funk_last_publish( cache->funk ) );
     465          45 :   if( target_slot <= last_publish->ul[0] &&
     466          45 :       !fd_funk_txn_xid_eq_root( last_publish ) ) {
     467           9 :     return NULL; /* publishing record immediately */
     468           9 :   }
     469             : 
     470             :   /* Scan fork graph for oldest node (>= program update slot) */
     471          36 :   ulong target_xid_idx;
     472          36 :   ulong fork_depth = cache->fork_depth;
     473          36 :   for( target_xid_idx=0UL; target_xid_idx<fork_depth; target_xid_idx++ ) {
     474          36 :     if( cache->fork[ target_xid_idx ].ul[0]<=target_slot ) break;
     475          36 :   }
     476             : 
     477             :   /* Backtrack up to newer fork graph nodes (>= access slot)
     478             :      Very old slots could have been rooted at this point */
     479          36 :   do {
     480             :     /* Locate fork */
     481          36 :     fd_funk_txn_xid_t const * xid = &cache->fork[ target_xid_idx ];
     482             : 
     483             :     /* Speculatively query txn_map (recovering from ongoing rooting),
     484             :        and lock target transaction */
     485          36 :     for(;;) {
     486          36 :       fd_funk_txn_map_query_t query[1];
     487          36 :       int query_err = fd_funk_txn_map_query_try( cache->funk->txn_map, xid, NULL, query, 0 );
     488          36 :       if( FD_UNLIKELY( query_err==FD_MAP_ERR_AGAIN ) ) {
     489             :         /* FIXME random backoff */
     490           0 :         FD_SPIN_PAUSE();
     491           0 :         continue;
     492           0 :       }
     493          36 :       if( FD_LIKELY( query_err==FD_MAP_SUCCESS ) ) {
     494             :         /* Attempt to read-lock transaction */
     495          36 :         fd_funk_txn_t * txn = fd_funk_txn_map_query_ele( query );
     496          36 :         if( FD_LIKELY( fd_progcache_txn_try_lock( txn ) ) ) {
     497          36 :           FD_TEST( fd_funk_txn_map_query_test( query )==FD_MAP_SUCCESS );
     498          36 :           return txn;
     499          36 :         }
     500             :         /* currently being rooted */
     501          36 :       } else if( FD_UNLIKELY( query_err!=FD_MAP_ERR_KEY ) ) {
     502           0 :         FD_LOG_CRIT(( "fd_funk_txn_map_query_try failed (%i-%s)", query_err, fd_map_strerror( query_err ) ));
     503           0 :       }
     504           0 :       break;
     505          36 :     }
     506             : 
     507             :     /* Cannot insert at this fork graph node (already rooted, or
     508             :        currently being rooted), try one newer */
     509           0 :     target_xid_idx--;
     510           0 :   } while( target_xid_idx!=ULONG_MAX );
     511             : 
     512             :   /* There is no funk_txn in range [target_slot,load_slot] that we can
     513             :      create a cache entry at. */
     514           0 :   FD_LOG_CRIT(( "Could not find program cache fork graph node for target slot %lu", target_slot ));
     515           0 : }
     516             : 
     517             : static fd_progcache_rec_t const *
     518             : fd_progcache_insert( fd_progcache_t *           cache,
     519             :                      fd_accdb_user_t *          accdb,
     520             :                      fd_funk_txn_xid_t const *  load_xid,
     521             :                      void const *               prog_addr,
     522             :                      fd_prog_load_env_t const * env,
     523          51 :                      long                       slot_min ) {
     524             : 
     525             :   /* XID overview:
     526             : 
     527             :      - load_xid:   tip of fork currently being executed
     528             :      - modify_xid: xid in which program was last modified / deployed
     529             :      - txn->xid:   xid in which program cache entry is inserted
     530             : 
     531             :      slot(load_xid) > slot(entry_xid) >= slot(txn->xid) */
     532             : 
     533             :   /* Acquire reference to ELF binary data */
     534             : 
     535          51 :   fd_funk_txn_xid_t modify_xid;
     536          51 :   ulong progdata_sz;
     537          51 :   uchar const * progdata = fd_prog_load_elf( accdb, load_xid, prog_addr, &progdata_sz, &modify_xid );
     538          51 :   if( FD_UNLIKELY( !progdata ) ) return NULL;
     539          45 :   ulong target_slot = modify_xid.ul[0];
     540             : 
     541             :   /* Prevent cache entry from crossing epoch boundary */
     542             : 
     543          45 :   target_slot = fd_ulong_max( target_slot, env->epoch_slot0 );
     544             : 
     545             :   /* Prevent cache entry from shadowing invalidation */
     546             : 
     547          45 :   target_slot = (ulong)fd_long_max( (long)target_slot, slot_min );
     548             : 
     549             :   /* Allocate a funk_rec */
     550             : 
     551          45 :   fd_funk_t * funk = cache->funk;
     552          45 :   fd_funk_rec_t * funk_rec = fd_funk_rec_pool_acquire( funk->rec_pool, NULL, 0, NULL );
     553          45 :   if( FD_UNLIKELY( !funk_rec ) ) {
     554           0 :     FD_LOG_ERR(( "Program cache is out of memory: fd_funk_rec_pool_acquire failed (rec_max=%lu)",
     555           0 :                  fd_funk_rec_pool_ele_max( funk->rec_pool ) ));
     556           0 :   }
     557          45 :   memset( funk_rec, 0, sizeof(fd_funk_rec_t) );
     558          45 :   fd_funk_val_init( funk_rec );
     559             : 
     560             :   /* Pick and lock a txn in which cache entry is created at */
     561             : 
     562          45 :   fd_funk_txn_t * txn = fd_progcache_lock_best_txn( cache, target_slot );
     563             : 
     564             :   /* Load program */
     565             : 
     566          45 :   fd_features_t const * features  = env->features;
     567          45 :   ulong         const   load_slot = env->slot;
     568          45 :   fd_prog_versions_t versions = fd_prog_versions( features, load_slot );
     569          45 :   fd_sbpf_loader_config_t config = {
     570          45 :     .sbpf_min_version = versions.min_sbpf_version,
     571          45 :     .sbpf_max_version = versions.max_sbpf_version,
     572          45 :   };
     573          45 :   fd_sbpf_elf_info_t elf_info[1];
     574             : 
     575          45 :   fd_progcache_rec_t * rec = NULL;
     576          45 :   if( FD_LIKELY( fd_sbpf_elf_peek( elf_info, progdata, progdata_sz, &config )==FD_SBPF_ELF_SUCCESS ) ) {
     577             : 
     578          45 :     fd_funk_t * funk          = cache->funk;
     579          45 :     ulong       rec_align     = fd_progcache_rec_align();
     580          45 :     ulong       rec_footprint = fd_progcache_rec_footprint( elf_info );
     581             : 
     582          45 :     void * rec_mem = fd_funk_val_truncate( funk_rec, funk->alloc, funk->wksp, rec_align, rec_footprint, NULL );
     583          45 :     if( FD_UNLIKELY( !rec_mem ) ) {
     584           0 :       FD_LOG_ERR(( "Program cache is out of memory: fd_alloc_malloc failed (requested align=%lu sz=%lu)",
     585           0 :                   rec_align, rec_footprint ));
     586           0 :     }
     587             : 
     588          45 :     rec = fd_progcache_rec_new( rec_mem, elf_info, &config, load_slot, features, progdata, progdata_sz, cache->scratch, cache->scratch_sz );
     589          45 :     if( !rec ) {
     590           3 :       fd_funk_val_flush( funk_rec, funk->alloc, funk->wksp );
     591           3 :     }
     592             : 
     593          45 :   }
     594             : 
     595             :   /* Convert to tombstone if load failed */
     596             : 
     597          45 :   if( !rec ) {  /* load fail */
     598           3 :     void * rec_mem = fd_funk_val_truncate( funk_rec, funk->alloc, funk->wksp, fd_progcache_rec_align(), fd_progcache_rec_footprint( NULL ), NULL );
     599           3 :     if( FD_UNLIKELY( !rec_mem ) ) {
     600           0 :       FD_LOG_ERR(( "Program cache is out of memory: fd_alloc_malloc failed (requested align=%lu sz=%lu)",
     601           0 :                    fd_progcache_rec_align(), fd_progcache_rec_footprint( NULL ) ));
     602           0 :     }
     603           3 :     rec = fd_progcache_rec_new_nx( rec_mem, load_slot );
     604           3 :   }
     605             : 
     606             :   /* Publish cache entry to funk index */
     607             : 
     608          45 :   fd_funk_rec_t * dup_rec = NULL;
     609          45 :   int push_ok = fd_progcache_push( cache, txn, funk_rec, prog_addr, &dup_rec );
     610             : 
     611             :   /* Done modifying transaction */
     612             : 
     613          45 :   fd_progcache_txn_unlock( txn );
     614             : 
     615             :   /* If another thread was faster publishing the same record, use that
     616             :      one instead.  FIXME POSSIBLE RACE CONDITION WHERE THE OTHER REC IS
     617             :      EVICTED AFTER PEEK? */
     618             : 
     619          45 :   if( !push_ok ) {
     620           0 :     FD_TEST( dup_rec );
     621           0 :     fd_funk_val_flush( funk_rec, funk->alloc, funk->wksp );
     622           0 :     fd_funk_rec_pool_release( funk->rec_pool, funk_rec, 1 );
     623           0 :     cache->metrics->dup_insert_cnt++;
     624           0 :     return fd_funk_val_const( dup_rec, funk->wksp );
     625           0 :   }
     626             : 
     627          45 :   cache->metrics->fill_cnt++;
     628          45 :   cache->metrics->fill_tot_sz += rec->rodata_sz;
     629             : 
     630          45 :   return rec;
     631          45 : }
     632             : 
     633             : fd_progcache_rec_t const *
     634             : fd_progcache_pull( fd_progcache_t *           cache,
     635             :                    fd_accdb_user_t *          accdb,
     636             :                    fd_funk_txn_xid_t const *  xid,
     637             :                    void const *               prog_addr,
     638          57 :                    fd_prog_load_env_t const * env ) {
     639          57 :   if( FD_UNLIKELY( !cache || !cache->funk->shmem ) ) FD_LOG_CRIT(( "NULL progcache" ));
     640          57 :   fd_progcache_load_fork( cache, xid, env->epoch_slot0 );
     641             : 
     642          57 :   fd_progcache_rec_t const * found_rec = fd_progcache_peek( cache, xid, prog_addr, env->epoch_slot0 );
     643          57 :   long slot_min = -1L;
     644          57 :   if( !found_rec ) goto miss;
     645             : 
     646             :   /* Cache invalidation, update next slot */
     647          15 :   if( found_rec->invalidate ) {
     648           9 :     slot_min = (long)found_rec->slot+1L;
     649           9 :     if( FD_UNLIKELY( xid->ul[0] < (ulong)slot_min ) ) {
     650           0 :       FD_LOG_CRIT(( "Program cache entry %016lx%016lx%016lx%016lx invalidated at slot %lu but loaded at slot %lu",
     651           0 :                     fd_ulong_bswap( FD_LOAD( ulong, (uchar const *)prog_addr    ) ),
     652           0 :                     fd_ulong_bswap( FD_LOAD( ulong, (uchar const *)prog_addr+ 8 ) ),
     653           0 :                     fd_ulong_bswap( FD_LOAD( ulong, (uchar const *)prog_addr+16 ) ),
     654           0 :                     fd_ulong_bswap( FD_LOAD( ulong, (uchar const *)prog_addr+24 ) ),
     655           0 :                     found_rec->slot,
     656           0 :                     xid->ul[0] ));
     657           0 :     }
     658           9 :     goto miss;
     659           9 :   }
     660             : 
     661             :   /* Passed all checks */
     662           6 :   cache->metrics->hit_cnt++;
     663           6 :   cache->metrics->hit_tot_sz += found_rec->rodata_sz;
     664           6 :   return found_rec;
     665             : 
     666          51 : miss:
     667          51 :   cache->metrics->miss_cnt++;
     668          51 :   return fd_progcache_insert( cache, accdb, xid, prog_addr, env, slot_min );
     669          15 : }
     670             : 
     671             : fd_progcache_rec_t const *
     672             : fd_progcache_invalidate( fd_progcache_t *          cache,
     673             :                          fd_funk_txn_xid_t const * xid,
     674             :                          void const *              prog_addr,
     675          33 :                          ulong                     slot ) {
     676          33 :   fd_funk_t * funk = cache->funk;
     677             : 
     678          33 :   if( FD_UNLIKELY( !cache || !funk->shmem ) ) FD_LOG_CRIT(( "NULL progcache" ));
     679             : 
     680             :   /* Resolve the fork graph node at xid.  Due to (unrelated) ongoing
     681             :      root operations, recover from temporary lock issues. */
     682             : 
     683          33 :   fd_funk_txn_t * txn = NULL;
     684          33 :   for(;;) {
     685          33 :     fd_funk_txn_map_query_t query[1];
     686          33 :     int query_err = fd_funk_txn_map_query_try( funk->txn_map, xid, NULL, query, 0 );
     687          33 :     if( FD_UNLIKELY( query_err==FD_MAP_ERR_AGAIN ) ) {
     688             :       /* FIXME random backoff */
     689           0 :       FD_SPIN_PAUSE();
     690           0 :       continue;
     691           0 :     }
     692          33 :     if( FD_UNLIKELY( query_err ) ) {
     693           0 :       FD_LOG_CRIT(( "fd_progcache_invalidate(xid=%lu:%lu) failed: fd_funk_txn_map_query_try returned (%i-%s)",
     694           0 :                     xid->ul[0], xid->ul[1], query_err, fd_map_strerror( query_err ) ));
     695           0 :     }
     696          33 :     txn = fd_funk_txn_map_query_ele( query );
     697          33 :     break;
     698          33 :   }
     699             : 
     700             :   /* Select a fork node to create invalidate record in
     701             :      Do not create invalidation records at the funk root */
     702             : 
     703          33 :   if( FD_UNLIKELY( !fd_progcache_txn_try_lock( txn ) ) ) {
     704           0 :     FD_LOG_CRIT(( "fd_progcache_invalidate(xid=%lu,...) failed: txn is write-locked", xid->ul[0] ));
     705           0 :   }
     706             : 
     707             :   /* Allocate a funk_rec */
     708             : 
     709          33 :   fd_funk_rec_t * funk_rec = fd_funk_rec_pool_acquire( funk->rec_pool, NULL, 0, NULL );
     710          33 :   if( FD_UNLIKELY( !funk_rec ) ) {
     711           0 :     FD_LOG_ERR(( "Program cache is out of memory: fd_funk_rec_pool_acquire failed (rec_max=%lu)",
     712           0 :                  fd_funk_rec_pool_ele_max( funk->rec_pool ) ));
     713           0 :   }
     714          33 :   memset( funk_rec, 0, sizeof(fd_funk_rec_t) );
     715          33 :   fd_funk_val_init( funk_rec );
     716             : 
     717             :   /* Create a tombstone */
     718             : 
     719          33 :   void * rec_mem = fd_funk_val_truncate( funk_rec, funk->alloc, funk->wksp, fd_progcache_rec_align(), fd_progcache_rec_footprint( NULL ), NULL );
     720          33 :   if( FD_UNLIKELY( !rec_mem ) ) {
     721           0 :     FD_LOG_ERR(( "Program cache is out of memory: fd_alloc_malloc failed (requested align=%lu sz=%lu)",
     722           0 :                   fd_progcache_rec_align(), fd_progcache_rec_footprint( NULL ) ));
     723           0 :   }
     724          33 :   fd_progcache_rec_t * rec = fd_progcache_rec_new_nx( rec_mem, slot );
     725          33 :   rec->invalidate = 1;
     726             : 
     727             :   /* Publish cache entry to funk index */
     728             : 
     729          33 :   fd_funk_rec_t * dup_rec = NULL;
     730          33 :   int push_ok = fd_progcache_push( cache, txn, funk_rec, prog_addr, &dup_rec );
     731             : 
     732             :   /* Done modifying transaction */
     733             : 
     734          33 :   fd_progcache_txn_unlock( txn );
     735             : 
     736             :   /* If another thread was faster publishing the same record, use that
     737             :      one instead.  FIXME POSSIBLE RACE CONDITION WHERE THE OTHER REC IS
     738             :      EVICTED AFTER PEEK? */
     739             : 
     740          33 :   if( !push_ok ) {
     741           3 :     FD_TEST( dup_rec );
     742           3 :     fd_funk_val_flush( funk_rec, funk->alloc, funk->wksp );
     743           3 :     fd_funk_rec_pool_release( funk->rec_pool, funk_rec, 1 );
     744           3 :     cache->metrics->dup_insert_cnt++;
     745           3 :     return fd_funk_val_const( dup_rec, funk->wksp );
     746           3 :   }
     747             : 
     748          30 :   cache->metrics->invalidate_cnt++;
     749             : 
     750          30 :   return rec;
     751          33 : }

Generated by: LCOV version 1.14